repo_name
stringlengths 6
130
| hexsha
list | file_path
list | code
list | apis
list |
---|---|---|---|---|
vjdad4m/idk | [
"0ae633f9802b00170350108438100b1a6b0af4b3"
]
| [
"Perlin/Perlin.py"
]
| [
"import matplotlib.pyplot as plt\r\nfrom math import sin, cos\r\nimport numpy as np\r\n\r\ndef interpolate(a0, a1, w):\r\n return (a1 - a0) * w + a0\r\n\r\ndef random(ix, iy):\r\n r = 2920 * sin(ix * 21942 + iy * 171324 + 8912) * cos(ix * 23157 * iy * 217832 + 9758)\r\n return (cos(r), sin(r))\r\n\r\ndef dot(ix, iy, x, y):\r\n r = random(ix, iy)\r\n dx = x - float(ix)\r\n dy = y - float(iy)\r\n return (dx*r[0] + dy*r[1])\r\n\r\ndef perlin(x,y,scale=1):\r\n x0, y0 = int(x), int(y)\r\n x1 = x0 + 1\r\n y1 = y0 + 1\r\n \r\n sx = x - x0\r\n sy = y - y0\r\n \r\n n0 = dot(x0, y0, x, y)\r\n n1 = dot(x1, y0, x, y)\r\n ix0 = interpolate(n0, n1, sx)\r\n \r\n n0 = dot(x0, y1, x, y)\r\n n1 = dot(x1, y1, x, y)\r\n ix1 = interpolate(n0, n1, sx)\r\n\r\n val = interpolate(ix0, ix1, sy)\r\n return val * scale\r\n\r\ndT = 0.1\r\nstart = 0\r\nend = 300\r\nT = np.arange(start, end, dT)\r\nP = np.array([perlin(x,1) for x in T])\r\n\r\nfig, ax = plt.subplots()\r\nax.plot(T,P)\r\nax.set_title(f'[ dT: {dT} | start: {start} | end: {end} ]')\r\nax.set_xlabel('x')\r\nax.set_ylabel('value')\r\nplt.show()\r\n"
]
| [
[
"numpy.arange",
"matplotlib.pyplot.show",
"matplotlib.pyplot.subplots"
]
]
|
L-Net-1992/oneflow | [
"4dc08d65caea36fdd137841ac95551218897e730"
]
| [
"python/oneflow/test/exceptions/test_error_msg.py"
]
| [
"\"\"\"\nCopyright 2020 The OneFlow Authors. All rights reserved.\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n\"\"\"\nimport unittest\nimport oneflow as flow\nimport oneflow.unittest\nimport oneflow.nn.functional as F\nimport torch\n\n\[email protected]_unless_1n1d()\nclass TestErrorMsg(flow.unittest.TestCase):\n def test_torch_error_msg(test_case):\n with test_case.assertRaises(RuntimeError) as exp:\n F.pad(torch.randn(2, 2))\n test_case.assertTrue(\"torch.Tensor\" in str(exp.exception))\n\n def test_numpy_error_msg(test_case):\n import numpy as np\n\n with test_case.assertRaises(RuntimeError) as exp:\n F.pad(np.random.randn(2, 2))\n test_case.assertTrue(\"numpy\" in str(exp.exception))\n\n\nif __name__ == \"__main__\":\n unittest.main()\n"
]
| [
[
"torch.randn",
"numpy.random.randn"
]
]
|
ds7711/nlpaug | [
"9d4fb11dcff9980ebaec9d8e6cc7a0381f7db67b"
]
| [
"nlpaug/model/audio/vtlp.py"
]
| [
"import numpy as np\nimport librosa\nfrom nlpaug.model.audio import Audio\n\n\nclass Vtlp(Audio):\n # https://pdfs.semanticscholar.org/3de0/616eb3cd4554fdf9fd65c9c82f2605a17413.pdf\n def __init__(self, sampling_rate, zone=(0.2, 0.8), coverage=0.1, duration=None, factor=(0.9, 1.1), fhi=4800,\n stateless=True):\n \"\"\"\n :param int sampling_rate: Sampling rate of input audio. Mandatory if duration is provided.\n :param tuple zone: Assign a zone for augmentation. Default value is (0.2, 0.8) which means that no any\n augmentation\n will be applied in first 20% and last 20% of whole audio.\n :param float coverage: Portion of augmentation. Value should be between 0 and 1. If `1` is assigned, augment\n operation will be applied to target audio segment. For example, the audio duration is 60 seconds while\n zone and coverage are (0.2, 0.8) and 0.7 respectively. 42 seconds ((0.8-0.2)*0.7*60) audio will be\n augmented.\n :param int duration: Duration of augmentation (in second). Default value is None. If value is provided.\n `coverage` value will be ignored.\n :param int fhi: Boundary frequency. Default value is 4800.\n :param tuple factor: Warping factor\n \"\"\"\n super().__init__(zone=zone, coverage=coverage, duration=duration, sampling_rate=sampling_rate,\n stateless=stateless, factor=factor)\n self.fhi = fhi\n\n @classmethod\n def get_scale_factors(cls, freq_dim, sampling_rate, fhi=4800, alpha=0.9):\n factors = []\n freqs = np.linspace(0, 1, freq_dim)\n\n scale = fhi * min(alpha, 1)\n f_boundary = scale / alpha\n half_sr = sampling_rate / 2\n\n for f in freqs:\n f *= sampling_rate\n if f <= f_boundary:\n factors.append(f * alpha)\n else:\n warp_freq = half_sr - (half_sr - scale) / (half_sr - scale / alpha) * (half_sr - f)\n factors.append(warp_freq)\n\n return np.array(factors)\n\n # https://github.com/YerevaNN/Spoken-language-identification/blob/master/augment_data.py#L26\n def _manipulate(self, audio, sampling_rate, factor):\n stft = librosa.core.stft(audio)\n time_dim, freq_dim = stft.shape\n data_type = type(stft[0][0])\n\n factors = self.get_scale_factors(freq_dim, sampling_rate, alpha=factor)\n factors *= (freq_dim - 1) / max(factors)\n new_stft = np.zeros([time_dim, freq_dim], dtype=data_type)\n\n for i in range(freq_dim):\n # first and last freq\n if i == 0 or i + 1 >= freq_dim:\n new_stft[:, i] += stft[:, i]\n else:\n warp_up = factors[i] - np.floor(factors[i])\n warp_down = 1 - warp_up\n pos = int(np.floor(factors[i]))\n\n new_stft[:, pos] += warp_down * stft[:, i]\n new_stft[:, pos+1] += warp_up * stft[:, i]\n\n return librosa.core.istft(new_stft)\n\n def get_warping_level(self):\n return np.random.uniform(self.factor[0], self.factor[1])\n\n def manipulate(self, data):\n if self.duration is None:\n start_pos, end_pos = self.get_augment_range_by_coverage(data)\n else:\n start_pos, end_pos = self.get_augment_range_by_duration(data)\n\n factor = self.get_warping_level()\n aug_data = self._manipulate(data[start_pos:end_pos], sampling_rate=self.sampling_rate, factor=factor)\n\n if not self.stateless:\n self.start_pos = start_pos\n self.end_pos = end_pos\n self.aug_factor = factor\n self.aug_data = aug_data\n\n return np.concatenate((data[:start_pos], aug_data, data[end_pos:]), axis=0).astype(type(data[0]))\n\n # if start_pos > 0:\n # aug_data = np.concatenate((data[:start_pos], aug_data), axis=0)\n # if end_pos < len(data):\n # aug_data = np.concatenate((aug_data, data[end_pos:]), axis=0)\n #\n # return aug_data.astype(type(data[0]))\n"
]
| [
[
"numpy.linspace",
"numpy.concatenate",
"numpy.floor",
"numpy.random.uniform",
"numpy.array",
"numpy.zeros"
]
]
|
alefabris/information-retrieval-service-elasticsearch | [
"a44d6004a4b7e13e1bdc2276cd51eef6c34b22e2"
]
| [
"experiment0.py"
]
| [
"import string\r\nimport sys\r\nimport getopt\r\nimport os.path\r\nimport json\r\nimport subprocess\r\nimport pandas as pd\r\nimport nltk\r\nimport re\r\nimport numpy as np\r\nimport matplotlib.pyplot as plt\r\nimport seaborn as sns\r\nfrom tqdm import tqdm\r\nfrom sklearn.cluster import KMeans\r\nfrom nltk.corpus import stopwords as sw\r\nfrom string import punctuation as pun\r\nfrom nltk.tokenize import TweetTokenizer as tt\r\nfrom nltk.stem.snowball import SnowballStemmer as ss\r\nfrom sklearn.feature_extraction.text import CountVectorizer\r\nfrom elasticsearch import Elasticsearch\r\nfrom sklearn.model_selection import ParameterGrid\r\nnltk.download(\"stopwords\")\r\n\r\n\r\nINDEXNAME = 'cv19index'\r\nVERBOSE = True\r\nSIZE = 1000\r\n\r\nFILEQUERY = \"query1.json\"\r\nFILERETRIEVALS = \"retrieved.txt\"\r\n\r\nes = Elasticsearch([{'host':'localhost','port':9200}])\r\n\r\ndef resToText(fileout,response,query = \"1\", n = 1000, tag = \"tag\"):\r\n rank = 0\r\n for hit in response[\"hits\"][\"hits\"]:\r\n rank += 1\r\n line = \"\\t\".join([str(query),\"Q0\",str(hit[\"_source\"]['cord_uid']),str(rank),str(hit['_score']),str(tag)+\"\\n\"])\r\n fileout.write(line)\r\n if rank > n: \r\n break\r\n\r\n\r\ndef pulisci_parola(s):\r\n return \"\".join(re.findall(\"[a-zA-Z0-9]+\", s))\r\n\r\ndef add_spaces(doc):\r\n string = \"\"\r\n for word in doc:\r\n string = string + word + \" \"\r\n return string[0:-1]\r\n\r\ndef get_stem_matrix(text):\r\n tokenizer = tt(preserve_case=False, reduce_len=True, strip_handles=True)\r\n text = [tokenizer.tokenize(t) for t in text]\r\n\r\n stop_words = sw.words(\"english\")+list(pun)\r\n stemmer = ss(\"english\")\r\n\r\n stemmed_text = []\r\n\r\n for i in range(len(text)):\r\n stemmed_text.append([])\r\n for j in range(len(text[i])):\r\n s = pulisci_parola(text[i][j])\r\n if s not in stop_words and s !=\"\":\r\n stemmed_text[i].append(stemmer.stem(s))\r\n \r\n\r\n vec = CountVectorizer()\r\n X = vec.fit_transform(add_spaces(doc) for doc in stemmed_text)\r\n X = pd.DataFrame(X.toarray(), columns=vec.get_feature_names())\r\n return X\r\n \r\n\r\ndef standard_retrieval(index, body, size):\r\n return es.search(index = index, body = body, size = size)\r\n\r\n\r\ndef clustering_based_retrieval(index, body, size, K = 2, TR = 1):\r\n \r\n K = int(K)\r\n TR = int(TR)\r\n response = es.search(index = index, body = body, size = size)\r\n res = []\r\n onlytext = []\r\n for hit in response[\"hits\"][\"hits\"]:\r\n temp = []\r\n temp.append(hit[\"_id\"])\r\n temp.append(hit[\"_score\"])\r\n title = str(hit[\"_source\"][\"title\"])\r\n abstract = str(hit[\"_source\"][\"abstract\"])\r\n text = \"\"\r\n text += ( title + \" \") * TR\r\n text += abstract\r\n temp.append(title)\r\n temp.append(abstract)\r\n res.append(temp)\r\n onlytext.append(text)\r\n\r\n \r\n X = get_stem_matrix(onlytext)\r\n \r\n kmeans = KMeans(n_clusters = K, random_state = 123)\r\n kmeans.fit(X)\r\n clusters = kmeans.predict(X)\r\n freq_clusters = [0]*K\r\n for c in clusters: freq_clusters[c] += 1\r\n \r\n res = pd.DataFrame(res, columns = [\"id\", \"score\", \"title\", \"abstract\"])\r\n res[\"cluster_rel\"] = [freq_clusters[c] for c in clusters]\r\n res[\"score\"] = [1]*len(res)\r\n res = res.sort_values([\"cluster_rel\",\"score\"],ascending = [False,False]).reset_index(drop = True)\r\n \r\n response = dict()\r\n response[\"hits\"] = dict()\r\n response[\"hits\"][\"hits\"] = []\r\n for i in range(len(res)):\r\n temp_dict = dict()\r\n temp_dict['_score'] = res.iloc[i,1]\r\n temp_dict[\"_source\"] = dict()\r\n temp_dict[\"_source\"][\"cord_uid\"] = res.iloc[i,0]\r\n response[\"hits\"][\"hits\"].append(temp_dict)\r\n \r\n \r\n return response\r\n \r\n \r\n\r\n\r\ndef evaluate(retrieval_function = standard_retrieval):\r\n infile = open(FILEQUERY,'r')\r\n oufile = open(FILERETRIEVALS,'w')\r\n for tuttoIlFile in infile:\r\n queries = json.loads(tuttoIlFile)[\"topics\"][\"topic\"]\r\n for query in queries:\r\n querytext = query[\"query\"][\"#text\"] + query[\"question\"][\"#text\"]\r\n num = query[\"@number\"] \r\n query_dict = {\r\n \"query\": {\r\n \"bool\": {\r\n \"should\": [\r\n { \"match\": { \"title\"\t:\tquerytext } },\r\n { \"match\": { \"abstract\"\t:\tquerytext } }\r\n ]\r\n }\r\n }\r\n }\r\n response = retrieval_function(index=INDEXNAME,body=query_dict,size = SIZE) \r\n resToText(response=response, query=num, n=SIZE, tag=\"tag\", fileout=oufile)\r\n oufile.close() \r\n infile.close()\r\n\r\ndef evaluate_kmeans(K = 2, TR = 1):\r\n infile = open(FILEQUERY,'r')\r\n oufile = open(FILERETRIEVALS,'w')\r\n for tuttoIlFile in infile:\r\n queries = json.loads(tuttoIlFile)[\"topics\"][\"topic\"]\r\n for query in queries:\r\n querytext = query[\"query\"][\"#text\"] + query[\"question\"][\"#text\"]\r\n num = query[\"@number\"] \r\n query_dict = {\r\n \"query\": {\r\n \"bool\": {\r\n \"should\": [\r\n { \"match\": { \"title\"\t:\tquerytext } },\r\n { \"match\": { \"abstract\"\t:\tquerytext } }\r\n ]\r\n }\r\n }\r\n }\r\n response = clustering_based_retrieval(index = INDEXNAME, body = query_dict, size = SIZE, K = K, TR = TR) \r\n resToText(response=response, query=num, n=SIZE, tag=\"tag\", fileout=oufile)\r\n oufile.close() \r\n infile.close()\r\n \r\ndef get_map():\r\n result = subprocess.run([\"./trec_eval\", \"qrels.txt\", FILERETRIEVALS, \"-m\", \"map\"], stdout=subprocess.PIPE)\r\n s = result.stdout.decode('utf-8')\r\n return float(s.split()[2])\r\n\r\ndef get_gm_map():\r\n result = subprocess.run([\"./trec_eval\", \"qrels.txt\", FILERETRIEVALS, \"-m\", \"gm_map\"], stdout=subprocess.PIPE)\r\n s = result.stdout.decode('utf-8')\r\n return float(s.split()[2])\r\n\r\ndef get_recip_rank():\r\n result = subprocess.run([\"./trec_eval\", \"qrels.txt\", FILERETRIEVALS, \"-m\", \"recip_rank\"], stdout=subprocess.PIPE)\r\n s = result.stdout.decode('utf-8')\r\n return float(s.split()[2])\r\n\r\ndef get_Rprec():\r\n result = subprocess.run([\"./trec_eval\", \"qrels.txt\", FILERETRIEVALS, \"-m\", \"Rprec\"], stdout=subprocess.PIPE)\r\n s = result.stdout.decode('utf-8')\r\n return float(s.split()[2])\r\n\r\ndef get_bpref():\r\n result = subprocess.run([\"./trec_eval\", \"qrels.txt\", FILERETRIEVALS, \"-m\", \"bpref\"], stdout=subprocess.PIPE)\r\n s = result.stdout.decode('utf-8')\r\n return float(s.split()[2])\r\n\r\ndef get_partial_precision():\r\n after = [5,10,15,20,30,100,200,500,1000]\r\n p = []\r\n for a in after:\r\n result = subprocess.run([\"./trec_eval\", \"qrels.txt\", FILERETRIEVALS, \"-m\", \"P.\" + str(a)], stdout=subprocess.PIPE)\r\n p.append(result.stdout.decode('utf-8').split()[2])\r\n return p,after\r\n\r\n\r\n######################################################################################\r\n\r\nK_val = np.arange(2,10)\r\nTR_val = np.arange(1,5)\r\nbest_map = float(\"-inf\")\r\nfor K in tqdm(K_val):\r\n for TR in TR_val:\r\n evaluate_kmeans(K,TR)\r\n m = get_map()\r\n if best_map < m:\r\n best_map = m\r\n best_K = K\r\n best_TR = TR\r\n\r\nprint(\"\\n#########\\nBest K: \" + str(best_K))\r\nprint(\"\\n#########\\nBestTR: \" + str(best_TR))\r\n\r\nprint(\"...\\n...\\n...\\nperforming es standard retrieval...\")\r\nevaluate()\r\nes_p,after = get_partial_precision()\r\n\r\nprint(\"...\\n...\\n...\\nperforming clustering based retrieval...\\n\")\r\nevaluate_kmeans(best_K, best_TR)\r\nkmeans_p,after = get_partial_precision()"
]
| [
[
"numpy.arange",
"pandas.DataFrame",
"sklearn.cluster.KMeans",
"sklearn.feature_extraction.text.CountVectorizer"
]
]
|
juesato/jax_verify | [
"7a662be88a7787842a01c1ceb28f5c761a24c61b"
]
| [
"jax_verify/tests/model_zoo_test.py"
]
| [
"# coding=utf-8\n# Copyright 2020 The jax_verify Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Tests for propagating bounds through the networks defined in the Model Zoo.\n\nWe do not perform any check on the returned values but simply ensure that the\nbound propagation can be performed on those networks.\n\"\"\"\nimport pickle\nfrom absl.testing import absltest\nfrom absl.testing import parameterized\n\nimport haiku as hk\nimport jax\nimport jax.numpy as jnp\nimport jax_verify\nfrom jax_verify.src import bound_propagation\nfrom jax_verify.src import cvxpy_relaxation_solver\nfrom jax_verify.src import relaxation\nfrom jax_verify.tests import model_zoo\nimport numpy as np\n\n\nclass ModelZooModelTests(parameterized.TestCase):\n\n @parameterized.named_parameters(\n ('SmallResidualModel', model_zoo.SmallResidualModel),\n ('ResidualModel', model_zoo.ResidualModel),\n ('TinyModel', model_zoo.TinyModel)\n )\n def test_ibp(self, model_cls):\n\n @hk.transform_with_state\n def model_pred(inputs, is_training, test_local_stats=False):\n model = model_cls()\n return model(inputs, is_training, test_local_stats)\n\n inps = jnp.zeros((4, 28, 28, 1), dtype=jnp.float32)\n params, state = model_pred.init(jax.random.PRNGKey(42), inps,\n is_training=True)\n\n def logits_fun(inputs):\n return model_pred.apply(params, state, None, inputs,\n False, test_local_stats=False)[0]\n\n input_bounds = jax_verify.IntervalBound(inps - 1.0, inps + 1.0)\n jax_verify.interval_bound_propagation(logits_fun, input_bounds)\n\n @parameterized.named_parameters(\n ('SmallResidualModel', model_zoo.SmallResidualModel),\n ('ResidualModel', model_zoo.ResidualModel),\n ('TinyModel', model_zoo.TinyModel)\n )\n def test_fastlin(self, model_cls):\n\n @hk.transform_with_state\n def model_pred(inputs, is_training, test_local_stats=False):\n model = model_cls()\n return model(inputs, is_training, test_local_stats)\n\n inps = jnp.zeros((4, 28, 28, 1), dtype=jnp.float32)\n params, state = model_pred.init(jax.random.PRNGKey(42), inps,\n is_training=True)\n\n def logits_fun(inputs):\n return model_pred.apply(params, state, None, inputs,\n False, test_local_stats=False)[0]\n\n input_bounds = jax_verify.IntervalBound(inps - 1.0, inps + 1.0)\n jax_verify.fastlin_bound_propagation(logits_fun, input_bounds)\n\n @parameterized.named_parameters(\n ('SmallResidualModel', model_zoo.SmallResidualModel),\n ('ResidualModel', model_zoo.ResidualModel),\n ('TinyModel', model_zoo.TinyModel)\n )\n def test_ibpfastlin(self, model_cls):\n\n @hk.transform_with_state\n def model_pred(inputs, is_training, test_local_stats=False):\n model = model_cls()\n return model(inputs, is_training, test_local_stats)\n\n inps = jnp.zeros((4, 28, 28, 1), dtype=jnp.float32)\n params, state = model_pred.init(jax.random.PRNGKey(42), inps,\n is_training=True)\n\n def logits_fun(inputs):\n return model_pred.apply(params, state, None, inputs,\n False, test_local_stats=False)[0]\n\n input_bounds = jax_verify.IntervalBound(inps - 1.0, inps + 1.0)\n jax_verify.ibpfastlin_bound_propagation(logits_fun, input_bounds)\n\n @parameterized.named_parameters(\n ('SmallResidualModel', model_zoo.SmallResidualModel),\n ('ResidualModel', model_zoo.ResidualModel),\n ('TinyModel', model_zoo.TinyModel)\n )\n def test_crownibp(self, model_cls):\n\n @hk.transform_with_state\n def model_pred(inputs, is_training, test_local_stats=False):\n model = model_cls()\n return model(inputs, is_training, test_local_stats)\n\n inps = jnp.zeros((4, 28, 28, 1), dtype=jnp.float32)\n params, state = model_pred.init(jax.random.PRNGKey(42), inps,\n is_training=True)\n\n def logits_fun(inputs):\n return model_pred.apply(params, state, None, inputs,\n False, test_local_stats=False)[0]\n\n input_bounds = jax_verify.IntervalBound(inps - 1.0, inps + 1.0)\n jax_verify.crownibp_bound_propagation(logits_fun, input_bounds)\n\n @parameterized.named_parameters(\n ('SmallResidualModel', model_zoo.SmallResidualModel),\n ('TinyModel', model_zoo.TinyModel))\n def test_cvxpy_relaxation(self, model_cls):\n\n @hk.transform_with_state\n def model_pred(inputs, is_training, test_local_stats=False):\n model = model_cls()\n return model(inputs, is_training, test_local_stats)\n\n inps = jnp.zeros((4, 28, 28, 1), dtype=jnp.float32)\n params, state = model_pred.init(jax.random.PRNGKey(42), inps,\n is_training=True)\n\n def logits_fun(inputs):\n return model_pred.apply(params, state, None, inputs,\n False, test_local_stats=False)[0]\n\n output = logits_fun(inps)\n input_bounds = jax_verify.IntervalBound(inps - 1.0, inps + 1.0)\n\n boundprop_transform = jax_verify.ibp_transform\n relaxation_transform = relaxation.RelaxationTransform(boundprop_transform)\n var, graph = bound_propagation.bound_propagation(\n relaxation_transform, logits_fun, input_bounds)\n\n objective_bias = 0.\n objective = jax.ops.index_update(jnp.zeros(output.shape[1:]), 0, 1)\n index = 0\n\n lower_bound, _ = relaxation.solve_relaxation(\n cvxpy_relaxation_solver.CvxpySolver, objective, objective_bias,\n var, graph.env, index)\n\n self.assertLessEqual(lower_bound, output[index, 0])\n\n\ndef _predict_mlp(params, inputs):\n # pylint: disable=invalid-name\n inputs = np.reshape(inputs, (inputs.shape[0], -1))\n for W, b in params[:-1]:\n outputs = jnp.dot(inputs, W) + b\n inputs = jnp.maximum(outputs, 0)\n W, b = params[-1]\n return jnp.dot(inputs, W) + b\n\n\nclass SavedModelTests(parameterized.TestCase):\n\n @parameterized.named_parameters(\n ('PGDNN', 'models/raghunathan18_pgdnn.pkl', 20, 19),\n )\n def test_mnist_mlp(self, model_name, num_examples, expected_correct):\n with jax_verify.open_file('mnist/x_test_first100.npy', 'rb') as f:\n mnist_x = np.load(f)\n with jax_verify.open_file('mnist/y_test.npy', 'rb') as f:\n mnist_y = np.load(f)\n with jax_verify.open_file(model_name, 'rb') as f:\n params = pickle.load(f) # pytype: disable=wrong-arg-types # due to GFile\n logits = np.array(_predict_mlp(params, mnist_x[:num_examples]))\n pred_labels = np.argmax(logits, axis=1)\n num_correct = np.sum(np.equal(mnist_y[:num_examples], pred_labels))\n print(num_correct)\n assert num_correct == expected_correct, f'Number correct: {num_correct}'\n\n\nif __name__ == '__main__':\n absltest.main()\n"
]
| [
[
"numpy.reshape",
"numpy.load",
"numpy.argmax",
"numpy.equal"
]
]
|
TheSuperMyo/vnpy | [
"e38b7f4de879f1756aa664d5dfe7e0bec65c9a1b"
]
| [
"examples/import_csv.py"
]
| [
"from vnpy.trader.constant import (Exchange, Interval)\nimport pandas as pd\nfrom vnpy.trader.database import database_manager\nfrom vnpy.trader.object import (BarData,TickData)\nfrom datetime import datetime, timedelta, timezone\nimport sys\n\n# 封装函数\ndef move_df_to_mongodb(imported_data:pd.DataFrame,collection_name:str):\n ticks = []\n start = None\n count = 0\n utc_8 = timezone(timedelta(hours=8))\n for row in imported_data.itertuples():\n\n tick = TickData(\n symbol = row.symbol,\n exchange = row.exchange,\n datetime = row.datetime.replace(tzinfo=utc_8),\n #datetime = row.datetime,\n name = \"TickDataName\",\n volume = row.volume,\n open_interest = row.open_interest,\n turnover = row.turnover,\n last_price = row.last_price,\n last_volume = row.last_volume,\n last_amount = row.last_amount,\n limit_up = row.limit_up,\n limit_down = row.limit_down,\n open_price = row.open_price,\n high_price = row.high_price,\n low_price = row.low_price,\n pre_close = row.pre_close,\n bid_price_1 = row.bid_price_1,\n bid_price_2 = row.bid_price_2,\n bid_price_3 = row.bid_price_3,\n bid_price_4 = row.bid_price_4,\n bid_price_5 = row.bid_price_5,\n ask_price_1 = row.ask_price_1,\n ask_price_2 = row.ask_price_2,\n ask_price_3 = row.ask_price_3,\n ask_price_4 = row.ask_price_4,\n ask_price_5 = row.ask_price_5,\n bid_volume_1 = row.bid_volume_1,\n bid_volume_2 = row.bid_volume_2,\n bid_volume_3 = row.bid_volume_3,\n bid_volume_4 = row.bid_volume_4,\n bid_volume_5 = row.bid_volume_5,\n ask_volume_1 = row.ask_volume_1,\n ask_volume_2 = row.ask_volume_2,\n ask_volume_3 = row.ask_volume_3,\n ask_volume_4 = row.ask_volume_4,\n ask_volume_5 = row.ask_volume_5,\n gateway_name=\"DB\",\n )\n ticks.append(tick)\n\n # do some statistics\n count += 1\n if not start:\n start = tick.datetime\n end = tick.datetime\n\n # insert into database\n database_manager.save_tick_data(ticks, collection_name)\n print(f'Insert Tick: {count} from {start} - {end}')\n\nif __name__ == \"__main__\":\n #imported_data = pd.read_csv('D:\\Study\\数据\\PoboForVnpy\\cu7777\\cu7777_20200907-20200911.csv',encoding='utf-8')\n #imported_data = pd.read_csv('D:\\Study\\数据\\PoboForVnpy\\cu6666\\cu6666_20200907-20200911.csv',encoding='utf-8')\n #imported_data = pd.read_csv('D:/Study/数据/PoboForVnpy/al6666/al6666_20200907-20200911.csv',encoding='utf-8')\n #imported_data = pd.read_csv('D:/Study/数据/PoboForVnpy/al7777/al7777_20200907-20200911.csv',encoding='utf-8')\n \n sys_collection_name = sys.argv[1]\n sys_data_path = sys.argv[2]\n \n imported_data = pd.read_csv(sys_data_path,encoding='utf-8')\n \n \n # 将csv文件中 `市场代码`的 SC 替换成 Exchange.SHFE SHFE\n imported_data['exchange'] = Exchange.SHFE\n # 明确需要是float数据类型的列\n float_columns = ['volume','open_interest','last_price','last_volume','limit_up','limit_down','open_price','high_price','low_price','pre_close','bid_price_1','bid_price_2','bid_price_3','bid_price_4','bid_price_5','ask_price_1','ask_price_2','ask_price_3','ask_price_4','ask_price_5','bid_volume_1','bid_volume_2','bid_volume_3','bid_volume_4','bid_volume_5','ask_volume_1','ask_volume_2','ask_volume_3','ask_volume_4','ask_volume_5']\n for col in float_columns:\n imported_data[col] = imported_data[col].astype('float')\n # 明确时间戳的格式\n # %Y-%m-%d %H:%M:%S.%f 代表着你的csv数据中的时间戳必须是 2020-05-01 08:32:30.500000 格式\n datetime_format = '%Y-%m-%d %H:%M:%S.%f'\n imported_data['datetime'] = pd.to_datetime(imported_data['datetime'],format=datetime_format)\n\n\n #!!!!!!!!!!! 记得改名\n #move_df_to_mongodb(imported_data,'cu7777')\n #move_df_to_mongodb(imported_data,'cu6666')\n #move_df_to_mongodb(imported_data,'al6666')\n #move_df_to_mongodb(imported_data,'al7777')\n \n move_df_to_mongodb(imported_data, sys_collection_name)\n"
]
| [
[
"pandas.read_csv",
"pandas.to_datetime"
]
]
|
zonca/petsc4py | [
"33408c70b4211b801c24f8c3cdb859f5aaf59367"
]
| [
"test/test_dmplex.py"
]
| [
"from petsc4py import PETSc\nimport unittest\nimport numpy as np\n\n# --------------------------------------------------------------------\n\nclass BaseTestPlex(object):\n\n COMM = PETSc.COMM_WORLD\n DIM = 1\n CELLS = [[0, 1], [1, 2]]\n COORDS = [[0.], [0.5], [1.]]\n COMP = 1\n DOFS = [1, 0]\n\n def setUp(self):\n self.plex = PETSc.DMPlex().createFromCellList(self.DIM,\n self.CELLS,\n self.COORDS,\n comm=self.COMM)\n\n def tearDown(self):\n self.plex.destroy()\n self.plex = None\n\n def testTopology(self):\n dim = self.plex.getDimension()\n pStart, pEnd = self.plex.getChart()\n cStart, cEnd = self.plex.getHeightStratum(0)\n vStart, vEnd = self.plex.getDepthStratum(0)\n numDepths = self.plex.getLabelSize(\"depth\")\n coords_raw = self.plex.getCoordinates().getArray()\n coords = np.reshape(coords_raw, (vEnd - vStart, dim))\n self.assertEqual(dim, self.DIM)\n self.assertEqual(cEnd-cStart, len(self.CELLS))\n self.assertEqual(vEnd-vStart, len(self.COORDS))\n self.assertEqual(numDepths, self.DIM+1)\n self.assertTrue((coords == self.COORDS).all())\n\n def testClosure(self):\n pStart, pEnd = self.plex.getChart()\n for p in range(pStart, pEnd):\n closure = self.plex.getTransitiveClosure(p)[0]\n for c in closure:\n cone = self.plex.getCone(c)\n self.assertEqual(self.plex.getConeSize(c), len(cone))\n for i in cone:\n self.assertIn(i, closure)\n star = self.plex.getTransitiveClosure(p, useCone=False)[0]\n for s in star:\n support = self.plex.getSupport(s)\n self.assertEqual(self.plex.getSupportSize(s), len(support))\n for i in support:\n self.assertIn(i, star)\n\n def testSectionDofs(self):\n section = self.plex.createSection([self.COMP], [self.DOFS])\n size = section.getStorageSize()\n entity_dofs = [self.plex.getStratumSize(\"depth\", d) *\n self.DOFS[d] for d in range(self.DIM+1)]\n self.assertEqual(sum(entity_dofs), size)\n\n def testSectionClosure(self):\n section = self.plex.createSection([self.COMP], [self.DOFS])\n self.plex.setDefaultSection(section)\n vec = self.plex.createLocalVec()\n pStart, pEnd = self.plex.getChart()\n for p in range(pStart, pEnd):\n for i in range(section.getDof(p)):\n off = section.getOffset(p)\n vec.setValue(off+i, p)\n\n for p in range(pStart, pEnd):\n point_closure = self.plex.getTransitiveClosure(p)[0]\n dof_closure = self.plex.vecGetClosure(section, vec, p)\n for p in dof_closure:\n self.assertIn(p, point_closure)\n\n def testBoundaryLabel(self):\n self.plex.markBoundaryFaces(\"boundary\")\n self.assertTrue(self.plex.hasLabel(\"boundary\"))\n\n faces = self.plex.getStratumIS(\"boundary\", 1)\n for f in faces.getIndices():\n for p in self.plex.getTransitiveClosure(f)[0]:\n self.plex.setLabelValue(\"boundary\", p, 1)\n pStart, pEnd = self.plex.getChart()\n for p in range(pStart, pEnd):\n if self.plex.getLabelValue(\"boundary\", p) != 1:\n self.plex.setLabelValue(\"boundary\", p, -1)\n\n numBoundary = self.plex.getStratumSize(\"boundary\", 1)\n numInterior = self.plex.getStratumSize(\"boundary\", -1)\n self.assertNotEqual(numBoundary, pEnd - pStart)\n self.assertNotEqual(numInterior, pEnd - pStart)\n self.assertEqual(numBoundary + numInterior, pEnd - pStart)\n\n# --------------------------------------------------------------------\n\nclass BaseTestPlex_2D(BaseTestPlex):\n DIM = 2\n CELLS = [[0, 1, 3], [1, 3, 4], [1, 2, 4], [2, 4, 5],\n [3, 4, 6], [4, 6, 7], [4, 5, 7], [5, 7, 8]]\n COORDS = [[0.0, 0.0], [0.5, 0.0], [1.0, 0.0],\n [0.0, 0.5], [0.5, 0.5], [1.0, 0.5],\n [0.0, 1.0], [0.5, 1.0], [1.0, 1.0]]\n DOFS = [1, 0, 0]\n\nclass BaseTestPlex_3D(BaseTestPlex):\n DIM = 3\n CELLS = [[0, 2, 3, 7], [0, 2, 6, 7], [0, 4, 6, 7],\n [0, 1, 3, 7], [0, 1, 5, 7], [0, 4, 5, 7]]\n COORDS = [[0., 0., 0.], [1., 0., 0.], [0., 1., 0.], [1., 1., 0.],\n [0., 0., 1.], [1., 0., 1.], [0., 1., 1.], [1., 1., 1.]]\n DOFS = [1, 0, 0, 0]\n\n# --------------------------------------------------------------------\n\nclass TestPlex_1D(BaseTestPlex, unittest.TestCase):\n pass\n\nclass TestPlex_2D(BaseTestPlex_2D, unittest.TestCase):\n pass\n\nclass TestPlex_3D(BaseTestPlex_3D, unittest.TestCase):\n pass\n\nclass TestPlex_2D_P3(BaseTestPlex_2D, unittest.TestCase):\n DOFS = [1, 2, 1]\n\nclass TestPlex_3D_P3(BaseTestPlex_3D, unittest.TestCase):\n DOFS = [1, 2, 1, 0]\n\nclass TestPlex_3D_P4(BaseTestPlex_3D, unittest.TestCase):\n DOFS = [1, 3, 3, 1]\n\nimport sys\ntry:\n raise PETSc.Error\n PETSc.DMPlex().createBoxMesh(1, comm=PETSc.COMM_SELF).destroy()\nexcept PETSc.Error:\n pass\nelse:\n class TestPlex_2D_Box(BaseTestPlex_2D, unittest.TestCase):\n def setUp(self):\n self.plex = PETSc.DMPlex().createBoxMesh(self.DIM)\n\n class TestPlex_2D_Boundary(BaseTestPlex_2D, unittest.TestCase):\n def setUp(self):\n boundary = PETSc.DMPlex().create(self.COMM)\n boundary.createSquareBoundary([0., 0.], [1., 1.], [2, 2])\n boundary.setDimension(self.DIM-1)\n self.plex = PETSc.DMPlex().generate(boundary)\n\n class TestPlex_3D_Box(BaseTestPlex_3D, unittest.TestCase):\n def setUp(self):\n self.plex = PETSc.DMPlex().createBoxMesh(self.DIM)\n\n class TestPlex_3D_Boundary(BaseTestPlex_3D, unittest.TestCase):\n def setUp(self):\n boundary = PETSc.DMPlex().create(self.COMM)\n boundary.createCubeBoundary([0., 0., 0.], [1., 1., 1.], [1, 1, 1])\n boundary.setDimension(self.DIM-1)\n self.plex = PETSc.DMPlex().generate(boundary)\n\n# --------------------------------------------------------------------\n\nif __name__ == '__main__':\n unittest.main()\n"
]
| [
[
"numpy.reshape"
]
]
|
fossabot/satsense | [
"b0fa650193995a30328f26a36ebab2437c0e37ef"
]
| [
"satsense/features/texton.py"
]
| [
"\"\"\"Texton feature implementation.\"\"\"\nimport logging\nfrom typing import Iterator\n\nimport numpy as np\nfrom scipy.signal import convolve\nfrom skimage.filters import gabor_kernel, gaussian\nfrom sklearn.cluster import MiniBatchKMeans\n\nfrom ..generators import FullGenerator\nfrom ..image import Image\nfrom .feature import Feature\n\nlogger = logging.getLogger(__name__)\n\n\ndef create_texton_kernels():\n \"\"\"Create filter bank kernels.\"\"\"\n kernels = []\n angles = 8\n thetas = np.linspace(0, np.pi, angles)\n for theta in thetas:\n for sigma in (1, ):\n for frequency in (0.05, ):\n kernel = np.real(\n gabor_kernel(\n frequency, theta=theta, sigma_x=sigma, sigma_y=sigma))\n kernels.append(kernel)\n\n return kernels\n\n\ndef get_texton_descriptors(image: Image):\n \"\"\"Compute texton descriptors.\"\"\"\n logger.debug(\"Computing texton descriptors\")\n kernels = create_texton_kernels()\n\n # Prepare input image\n array = image['grayscale']\n mask = array.mask\n array = array.filled(fill_value=0)\n\n # Create result image\n shape = array.shape + (len(kernels) + 1, )\n result = np.ma.empty(shape, dtype=array.dtype)\n result.mask = np.zeros(result.shape, dtype=bool)\n\n for k, kernel in enumerate(kernels):\n result[:, :, k] = convolve(array, kernel, mode='same')\n result.mask[:, :, k] = mask\n\n result[:, :, -1] = gaussian(array, sigma=1) - gaussian(array, sigma=3)\n result.mask[:, :, -1] = mask\n\n logger.debug(\"Done computing texton descriptors\")\n return result\n\n\nImage.register('texton_descriptors', get_texton_descriptors)\n\n\ndef texton_cluster(images: Iterator[Image],\n n_clusters=32,\n max_samples=100000,\n sample_window=(8192, 8192)) -> MiniBatchKMeans:\n \"\"\"Compute texton clusters.\"\"\"\n nfeatures = int(max_samples / len(images))\n descriptors = []\n for image in images:\n image.precompute_normalization()\n\n chunk = np.minimum(image.shape, sample_window)\n\n generator = FullGenerator(image, chunk)\n generator.load_image('texton_descriptors', (chunk, ))\n\n max_features_per_window = int(nfeatures / np.prod(generator.shape))\n\n rand_state = np.random.RandomState(seed=0)\n\n for array in generator:\n array = array.reshape(-1, array.shape[-1])\n non_masked = ~array.mask.any(axis=-1)\n data = array.data[non_masked]\n if data.shape[0] > max_features_per_window:\n data = data[rand_state.choice(\n data.shape[0], max_features_per_window, replace=False)]\n descriptors.append(data)\n\n descriptors = np.vstack(descriptors)\n\n # Cluster the descriptors\n mbkmeans = MiniBatchKMeans(\n n_clusters=n_clusters, random_state=42).fit(descriptors)\n\n return mbkmeans\n\n\ndef texton(descriptors, kmeans: MiniBatchKMeans, normalized=True):\n \"\"\"Calculate the texton feature on the given window.\"\"\"\n n_clusters = kmeans.n_clusters\n\n shape = descriptors.shape\n descriptors = descriptors.reshape(shape[0] * shape[1], shape[2])\n\n codewords = kmeans.predict(descriptors)\n counts = np.bincount(codewords, minlength=n_clusters)\n\n # Perform normalization\n if normalized:\n counts = counts / n_clusters\n\n return counts\n\n\nclass Texton(Feature):\n \"\"\"\n Texton Feature Transform calculator\n\n First create a codebook of Texton features from the suplied images using\n `from_images`. Then we can compute the histogram of codewords for a given\n window.\n\n For more information see [1]_.\n\n Parameters\n ----------\n window_shapes: list\n The window shapes to calculate the feature on.\n kmeans : sklearn.cluster.MiniBatchKMeans\n The trained KMeans clustering from opencv\n normalized : bool\n If True normalize the feature by the total number of clusters\n\n Example\n -------\n Calculating the Texton feature on an image using a generator::\n\n from satsense import Image\n from satsense.generators import FullGenerator\n from satsense.extract import extract_feature\n from satsense.features import Texton\n\n windows = ((50, 50), )\n\n image = Image('test/data/source/section_2_sentinel.tif', 'quickbird')\n image.precompute_normalization()\n\n texton = Texton.from_images(windows, [image])\n\n generator = FullGenerator(image, (10, 10))\n\n feature_vector = extract_feature(texton, generator)\n print(feature_vector.shape)\n\n Notes\n -----\n .. [1] Arbelaez, Pablo, et al., \"Contour detection and hierarchical\n image segmentation,\" IEEE transactions on pattern analysis and\n machine intelligence (2011), vol. 33 no. 5, pp. 898-916.\n \"\"\"\n\n base_image = 'texton_descriptors'\n compute = staticmethod(texton)\n\n def __init__(self, windows, kmeans: MiniBatchKMeans, normalized=True):\n \"\"\"Create Texton feature.\"\"\"\n super().__init__(windows, kmeans=kmeans, normalized=normalized)\n self.size = kmeans.n_clusters\n\n @classmethod\n def from_images(cls,\n windows,\n images: Iterator[Image],\n n_clusters=32,\n max_samples=100000,\n sample_window=(8192, 8192),\n normalized=True):\n \"\"\"\n Create a codebook of Texton features from the suplied images.\n\n Using the images `max_samples` Texton features are extracted\n evenly from all images. These features are then clustered into\n `n_clusters` clusters. This codebook can then be used to\n calculate a histogram of this codebook.\n\n Parameters\n ----------\n windows : list[tuple]\n The window shapes to calculate the feature on.\n images : Iterator[satsense.Image]\n Iterable for the images to calculate the codebook no\n n_cluster : int\n The number of clusters to create for the codebook\n max_samples : int\n The maximum number of samples to use for creating the codebook\n normalized : bool\n Wether or not to normalize the resulting feature with regards to\n the number of clusters\n \"\"\"\n kmeans = texton_cluster(\n images, n_clusters, max_samples, sample_window=sample_window)\n return cls(windows, kmeans, normalized)\n"
]
| [
[
"numpy.minimum",
"numpy.linspace",
"numpy.ma.empty",
"numpy.bincount",
"numpy.prod",
"sklearn.cluster.MiniBatchKMeans",
"numpy.random.RandomState",
"numpy.zeros",
"numpy.vstack",
"scipy.signal.convolve"
]
]
|
mohdzamrimurah/ftsm_technical_reports | [
"ea6bdbc4184e564d81ec0990af25a94a5abf010f"
]
| [
"learning_tensorflow/template_01.py"
]
| [
"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Sun Jan 8 23:49:44 2017\n\n@author: zamri\n\"\"\"\n\nimport tensorflow as tf\n# initialize variables / model parameters# define the training loop operations\n\n\ndef inference(X):\n # compute inference model over data X and return the result\n\n return None\n\ndef loss(X,Y):\n # compute loss over training data X and expected outputs Y\n return None\n\ndef inputs():\n # read / generate input training data X and expected outputs Y\n return None\n\ndef train(total_loss):\n # train / adjust model parameters according to computed total loss\n return None\n\ndef evaluate(sess, X, Y):\n # evaluate the resulting trained model# Launch the graph in \n # a session, setup boilerplate\n return None\n\nsaver = tf.train.Saver()\n\nwith tf.Session() as sess:\n\n tf.initialize_all_variables().run()\n X, Y = inputs()\n total_loss = loss(X, Y)\n\n train_op = train(total_loss)\n\n coord = tf.train.Coordinator()\n\n threads = tf.train.start_queue_runners(sess = sess, \n coord = coord)\n\n # actual training loop\n training_steps = 1000\n for step in range(training_steps):\n sess.run([train_op]) \n # for debugging and learning purposes, \n # see how the loss gets decremented thru \n # training steps\n if step % 10 == 0:\n print(\"loss: %f\", sess.run([total_loss]))\n if step % 50 == 0:\n saver.save(sess, 'my-model', \n global_step=training_steps)\n\n evaluate(sess, X, Y)\n coord.request_stop()\n coord.join(threads)\n sess.close()\n"
]
| [
[
"tensorflow.train.start_queue_runners",
"tensorflow.train.Coordinator",
"tensorflow.initialize_all_variables",
"tensorflow.Session",
"tensorflow.train.Saver"
]
]
|
Melvin95/Music-Generation-LSTM-RBM | [
"f6cb0b3f034be2862b2bacefd7f9647cb79270de"
]
| [
"lstm_rbm.py"
]
| [
"'''\r\nLSTM-RBM model to generate music\r\n'''\r\n\r\nimport tensorflow as tf\r\nfrom tqdm import tqdm\r\nimport utildata as ud\r\nimport numpy as np\r\nfrom music21 import*\r\nimport gc\r\nimport random\r\nimport matplotlib.pyplot as plt\r\n\r\nclass lstm_rbm(object):\r\n '''LSTM-RBM class'''\r\n def __init__(self,input=None,rbm_path=None,n_hidden=None,n_visible=None,num_timesteps=None,epochs=None,batch_size=None,pitch_dict=None,duration_dict=None,octave_dict=None,pitch_oct_dict=None):\r\n '''\r\n :param config: contains model's parameters(number of layers/timesteps/batch size/learning rate etc)\r\n :input: array of data\r\n '''\r\n #Music represented in an array\r\n self.dataset = input\r\n self.duration_dict = duration_dict\r\n self.pitch_dict = pitch_dict\r\n self.pitch_oct_dict = pitch_oct_dict\r\n self.octave_dict = octave_dict\r\n\r\n self.weights_path = rbm_path\r\n\r\n #LSTM-RBM HYPER-PARAMETERS\r\n self.num_timesteps = num_timesteps\r\n self.n_visible = n_visible\r\n self.n_hidden = n_hidden\r\n self.epochs = epochs\r\n\r\n #LSTM hidden unit size (number of neurons/width)\r\n self.n_hidden_lstm = 256\r\n\r\n #Learning rate placeholder, adjusted during training\r\n self.lr = 0.001\r\n\r\n #Batch size and learning rate VALUES\r\n self.batch_size_ = batch_size\r\n self.lr_ = 0.001\r\n\r\n '''\r\n Define variables for model\r\n '''\r\n #Input tensor with shape [?,n_visible], for visible layer\r\n self.x = tf.placeholder(tf.float32,[None,self.n_visible],name=\"x\")\r\n\r\n #RBM shared weights\r\n self.W = tf.Variable(tf.random_normal([self.n_visible,self.n_hidden],0.01),name=\"W\")\r\n\r\n '''communicate sequence history to the RBM hidden layer(determine bh_t)'''\r\n #Weights from LSTM hidden unit a t-1 to RBM hidden layer at t\r\n self.Wuh = tf.Variable(tf.random_normal([self.n_hidden_lstm,self.n_hidden],0.000001),name=\"Wuh\")\r\n #Bias from LSTM hidden unit a t-1 to RBM hidden layer at t\r\n self.bh = tf.Variable(tf.zeros([1,self.n_hidden],tf.float32),name=\"bh\")\r\n\r\n '''communicate sequence history to the RBM visible layer(determine bv_t)'''\r\n #Weigths from LSTM hidden unit at t-1 to RBM visible layer a t\r\n self.Wuv = tf.Variable(tf.random_normal([self.n_hidden_lstm,self.n_visible],0.00001),name=\"Wuv\")\r\n #Bias from LSTM hidden unit at t-1 to RBM visible layer a t\r\n self.bv = tf.Variable(tf.zeros([1,self.n_visible],tf.float32),name=\"bv\")\r\n\r\n '''LSTM hidden unit variables'''\r\n #Weights of the input/music from RBM visible layer at t to LSTM hidden unit at t\r\n self.Wvu = tf.Variable(tf.random_normal([4,self.n_visible,self.n_hidden_lstm],0.0001),name=\"Wvu\")\r\n #Weights between each LSTM hidden units through time\r\n self.Wuu = tf.Variable(tf.random_normal([4,self.n_hidden_lstm,self.n_hidden_lstm],0.0001),name=\"Wuu\")\r\n #Bias for LSTM hidden units through time\r\n self.bu = tf.Variable(tf.zeros([4,self.n_hidden_lstm],tf.float32),name=\"bu\")\r\n #Initialize LSTM with internal and external states\r\n self.u0 = tf.Variable(tf.zeros([1,self.n_hidden_lstm],tf.float32),name=\"u0\")\r\n self.c0 = tf.Variable(tf.zeros([1,self.n_hidden_lstm],tf.float32),name=\"c0\")\r\n\r\n '''RBM biases(bias for RBM at a particular time t)'''\r\n #Bias to propagate from visible->hidden for RBM t\r\n self.bv_t = tf.Variable(tf.ones([self.batch_size_,self.n_visible],tf.float32),name=\"bv_t\")\r\n #Bias to propagate from hidden->visible for RBM t\r\n self.bh_t = tf.Variable(tf.ones([self.batch_size_,self.n_hidden],tf.float32),name=\"bh_t\")\r\n\r\n #tensor of batch_size\r\n self.batch_size = tf.shape(self.x)[0]\r\n #Reshape bias matrices\r\n tf.assign(self.bh_t,tf.tile(self.bh_t,[self.batch_size,1]))\r\n tf.assign(self.bv_t,tf.tile(self.bv_t,[self.batch_size,1]))\r\n\r\n def lstm_recurrence(self,prev_t,xt):\r\n '''Function to get values for LSTM hidden unit a t\r\n given (prev_t) LSTM unit at t-1 and (xt) current input\r\n '''\r\n xt = tf.reshape(xt,[1,self.n_visible])\r\n\r\n #Two states in LSTM internal cell state(ct) and external state/output(st)\r\n #get previous states\r\n st_1,ct_1= prev_t[0],prev_t[1]\r\n\r\n #Input layer:decides if new information is relevant then lets it in\r\n i = tf.sigmoid(tf.matmul(xt,self.Wvu[0])+tf.matmul(st_1,self.Wuu[0])+self.bu[0])\r\n #forget layer:gets rid of irrelevant information\r\n f = tf.sigmoid(tf.matmul(xt,self.Wvu[1])+tf.matmul(st_1,self.Wuu[1])+self.bu[1])\r\n #output layer\r\n o = tf.sigmoid(tf.matmul(xt,self.Wvu[2])+tf.matmul(st_1,self.Wuu[2])+self.bu[2])\r\n #some layer\r\n g = tf.tanh(tf.matmul(xt,self.Wvu[3])+tf.matmul(st_1,self.Wuu[3])+self.bu[3])\r\n\r\n #update internal cell state\r\n ct = (ct_1*f)+(g*i)\r\n #update external state\r\n st = tf.tanh(ct)*o\r\n\r\n return [st,ct]\r\n\r\n def hidden_bias_recurrence(self,_,st_1):\r\n return tf.add(self.bh,tf.matmul(st_1,self.Wuh))\r\n\r\n def visible_bias_recurrence(self,_,st_1):\r\n return tf.add(self.bv,tf.matmul(st_1,self.Wuv))\r\n\r\n def sample(self,prob_dist):\r\n return tf.floor(prob_dist+tf.random_uniform(tf.shape(prob_dist),0,1))\r\n\r\n def initialize_model(self,sess):\r\n '''\r\n Pretrain RBM layer to initialize RBM parameters\r\n '''\r\n saver = tf.train.Saver([self.W,self.Wuh,self.Wuv,self.Wvu,self.Wuu,self.bh,self.bv,self.bu,self.u0,self.c0])\r\n\r\n #If model already initialized\r\n if self.weights_path:\r\n saver.restore(sess,self.weights_path)\r\n else:\r\n '''Contrastive Divergence Algorithm'''\r\n #Sample visible layer x\r\n x_sample = self.gibbs_sample(self.x,1)\r\n\r\n h = self.sample(tf.sigmoid(tf.matmul(self.x,self.W)+self.bh))\r\n\r\n h_sample = self.sample(tf.sigmoid(tf.matmul(x_sample,self.W)+self.bh))\r\n\r\n '''Update the weights and biases by using the difference\r\n '''\r\n batch_size = tf.cast(tf.shape(self.x)[0],tf.float32)\r\n dW = tf.multiply(self.lr_/batch_size,tf.subtract(tf.matmul(tf.transpose(self.x),h),tf.matmul(tf.transpose(x_sample),h_sample)))\r\n dbv = tf.multiply(self.lr_/batch_size,tf.reduce_sum(tf.subtract(self.x,x_sample),0,True))\r\n dbh = tf.multiply(self.lr_/batch_size,tf.reduce_sum(tf.subtract(h,h_sample),0,True))\r\n\r\n updt = [self.W.assign_add(dW),self.bv.assign_add(dbv),self.bh.assign_add(dbh)]\r\n\r\n #train on a single RBM\r\n sess.run(tf.global_variables_initializer())\r\n print(\"---Pretraining RBM Layer---\")\r\n for epoch in tqdm(range(self.epochs)):\r\n for batch in tqdm(range(0,len(self.dataset)-self.batch_size_,self.batch_size_)):\r\n batch_x = self.dataset[batch:batch+self.batch_size_]\r\n sess.run(updt,feed_dict={self.x:batch_x})\r\n saver.save(sess,'./TrainingData/Basic-RBM/w.ckpt')\r\n return sess\r\n\r\n #Gibbs sampling recursively from visible layer to hidden layer then back to visible layer\r\n def gibbs_sample(self,x,k):\r\n def gibbs_step(i,k,xk):\r\n '''Perform a SINGLE gibbs step\r\n :param i: current loop iteration\r\n :param k: number of gibbs step to perform\r\n :param xk: The output sampled from RBM\r\n '''\r\n #Feed the input x into the visible layer\r\n v = xk\r\n #Forward propagation to sample hk from the hidden layer\r\n hk = self.sample(tf.sigmoid(tf.matmul(v,self.W)+self.bh))\r\n #Backpropgate to sample xk from the visible layer\r\n xk = self.sample(tf.sigmoid(tf.matmul(hk,tf.transpose(self.W))+self.bv))\r\n return i+1,k,xk\r\n\r\n #Run k-gibbs steps and return the sample\r\n [_,_,x_sample] = tf.while_loop(lambda i,n,*args: i < n, gibbs_step, [0,k,x],\r\n parallel_iterations=1,back_prop=False)\r\n\r\n return tf.stop_gradient(x_sample)\r\n\r\n def contrastive_divergence(self,k,lr=0.001):\r\n '''Run k steps of the contrastive divergence '''\r\n #Sample visible layer x\r\n x_sample = self.gibbs_sample(k)\r\n\r\n h = self.sample(tf.sigmoid(tf.matmul(self.x,self.W)+self.bh))\r\n\r\n h_sample = self.sample(tf.sigmoid(tf.matmul(x_sample,self.W)+self.bh))\r\n\r\n '''Update the weights and biases by using the difference\r\n '''\r\n batch_size = tf.cast(tf.shape(self.x)[0],tf.float32)\r\n dW = tf.multiply(self.lr_/batch_size,tf.subtract(tf.matmul(tf.transpose(self.x),h),tf.matmul(tf.transpose(x_sample),h_sample)))\r\n dbv = tf.multiply(self.lr_/batch_size,tf.reduce_sum(tf.subtract(self.x,x_sample),0,True))\r\n dbh = tf.multiply(self.lr_/batch_size,tf.reduce_sum(tf.subtract(h,h_sample),0,True))\r\n\r\n return [self.W.assign_add(dW),self.bv.assign_add(dbv),self.bh.assign_add(dbh)]\r\n\r\n def free_energy_cost(self,k):\r\n '''Calculate the loss of the model, since the RBM is an energy based model,\r\n Calculate the free energy cost between input and sample\r\n '''\r\n x_sample = self.gibbs_sample(k)\r\n\r\n #Function to that returns free energy of v (visible layer)\r\n free_energy = lambda v: - tf.reduce_sum(tf.log(1+tf.exp(tf.matmul(v,self.W)+self.bh)),1)-tf.matmul(v,tf.transpose(self.bv))\r\n\r\n #Loss is difference in free energy between the sample and the original\r\n cost = tf.reduce_mean(tf.subtract(free_energy(self.x),free_energy(x_sample)))\r\n\r\n return cost\r\n\r\n def train(self):\r\n lstm_state = tf.scan(self.lstm_recurrence,self.x,initializer=[self.u0,self.c0])\r\n s_t, c_t = lstm_state[0],lstm_state[1]\r\n\r\n self.bh_t = tf.reshape(tf.scan(self.hidden_bias_recurrence,s_t,tf.zeros([1,self.n_hidden],tf.float32)),[self.batch_size,self.n_hidden])\r\n self.bv_t = tf.reshape(tf.scan(self.visible_bias_recurrence,s_t,tf.zeros([1,self.n_visible],tf.float32)),[self.batch_size,self.n_visible])\r\n\r\n saver = tf.train.Saver([self.W,self.Wuh,self.Wuv,self.Wvu,self.Wuu,self.bh,self.bv,self.bu,self.u0,self.c0])\r\n\r\n '''Free-energy cost'''\r\n x_sample = self.gibbs_sample(self.x,15)\r\n #Function to that returns free energy of v (visible layer)\r\n free_energy = lambda v: - tf.reduce_sum(tf.log(1+tf.exp(tf.matmul(v,self.W)+self.bh)),1)-tf.matmul(v,tf.transpose(self.bv))\r\n #Loss is difference in free energy between the sample and the original\r\n freecost = tf.reduce_mean(tf.subtract(free_energy(self.x),free_energy(x_sample)))\r\n\r\n optimizer = tf.train.GradientDescentOptimizer(learning_rate=self.lr)\r\n gradients = optimizer.compute_gradients(freecost,[self.W,self.Wuh,self.Wuv,self.Wvu,self.Wuu,self.bh,self.bv,self.bu,self.u0,self.c0])\r\n appliedgrad = optimizer.apply_gradients(gradients)\r\n\r\n tf_metric, tf_metric_update = tf.metrics.accuracy(self.x, x_sample,name=\"my_metric\")\r\n running_vars = tf.get_collection(tf.GraphKeys.LOCAL_VARIABLES, scope=\"my_metric\")\r\n running_vars_initializer = tf.variables_initializer(var_list=running_vars)\r\n\r\n logs_dir = \"./graphs\"\r\n loss_list = []\r\n epoch_list = []\r\n '''START TRAINING'''\r\n with tf.Session() as sess:\r\n writer = tf.summary.FileWriter(logs_dir,sess.graph)\r\n\r\n init = tf.global_variables_initializer()\r\n sess.run(init)\r\n\r\n sess = self.initialize_model(sess)\r\n sess.run(running_vars_initializer)\r\n prev_loss = 100\r\n print(\"---STARTED TRAINING---\")\r\n for epoch in tqdm(range(self.epochs)):\r\n loss_epoch = 0 #Track loss after each epoch\r\n for b in tqdm(range(0,len(self.dataset)-self.batch_size_,self.batch_size_)):\r\n batch_x = self.dataset[b:b+self.batch_size_]\r\n _,_,cost = sess.run([tf_metric_update,appliedgrad,freecost],feed_dict={self.x:batch_x})\r\n loss_epoch += abs(cost)\r\n loss_list.append(loss_epoch/len(self.dataset))\r\n epoch_list.append(epoch)\r\n print(\"\\nLoss\",loss_epoch/len(self.dataset),\"at epoch\",epoch)\r\n if (loss_epoch/len(self.dataset))<prev_loss:\r\n saver.save(sess,\"./TrainingData/LSTM-RBM/\"+\"TIMESTEPS\"+str(self.num_timesteps)+\"epoch\"+str(epoch)+\"$\"+str(loss_epoch/len(self.dataset))+\".ckpt\")\r\n prev_loss = loss_epoch/len(self.dataset)\r\n\r\n score = sess.run(tf_metric)\r\n print(\"[TF] SCORE: \", score)\r\n\r\n writer.close()\r\n plt.plot(epoch_list,loss_list)\r\n plt.title('LSTM-RBM Loss')\r\n plt.ylabel('Loss')\r\n plt.xlabel('Epoch')\r\n plt.show()\r\n\r\n def getRandomNotes(self):\r\n '''Generate n_visible number of notes'''\r\n a = np.zeros([1,self.n_visible])\r\n offset = 0\r\n for i in range(0,self.n_visible-(len(self.duration_dict)+len(self.pitch_oct_dict)),len(self.pitch_oct_dict)):\r\n a[0,i+np.random.randint(0,len(self.pitch_oct_dict))] = 1\r\n a[0,np.random.randint(0,len(self.duration_dict))+((len(self.pitch_oct_dict))*4)] = 1\r\n return a\r\n\r\n def test(self,training_weights):\r\n '''Tests the model trained saved at training_weights path (imporvises music)'''\r\n saver = tf.train.Saver([self.W,self.Wuh,self.Wuv,self.Wvu,self.Wuu,self.bh,self.bv,self.bu,self.u0,self.c0])\r\n\r\n #Random input for initialization of visible layer\r\n primer = self.dataset[5]#self.getRandomNotes()\r\n\r\n with tf.Session() as sess:\r\n sess.run(tf.global_variables_initializer())\r\n saver.restore(sess,training_weights)\r\n\r\n for i in tqdm(range(1)):\r\n generated_music = sess.run(self.generate(),feed_dict={self.x:primer})\r\n\r\n #array of array: 4 notes + time/duration\r\n chord_list = []\r\n for visible_layer in generated_music:\r\n chord = []\r\n offset = len(self.pitch_oct_dict)\r\n prior = 0\r\n for i in range(4): #For notes/pitches\r\n chord.append(np.argmax(visible_layer[prior:offset]))\r\n prior = offset\r\n offset += len(self.pitch_oct_dict)\r\n #For duration\r\n prior = offset\r\n offset += len(self.duration_dict)\r\n chord.append(np.argmax(visible_layer[prior:offset]))\r\n #print(len(visible_layer[prior:offset])==len(self.duration_dict))\r\n chord_list.append(chord)\r\n self.create_midi(chord_list)\r\n\r\n def create_midi(self,prediction_output):\r\n\r\n inv_pitch = {pitch: pnum for pnum,pitch in self.pitch_dict.items()}\r\n inv_duration = {duration: dnum for dnum,duration in self.duration_dict.items()}\r\n inv_octave = {octave: onum for onum,octave in self.octave_dict.items()}\r\n inv_pitch_oct = {p_o: enc for enc,p_o in self.pitch_oct_dict.items()}\r\n\r\n import datetime\r\n fmt = '%Y%m%d%H%M%S'\r\n now_str = datetime.datetime.now().strftime(fmt)\r\n\r\n dirstr =\"./GeneratedMusic/LSTM_RBM_SONG\"+now_str+\".midi\"\r\n song = stream.Stream()\r\n\r\n for a_chord in prediction_output:\r\n d = inv_duration[a_chord[4]]\r\n gen_chord = []\r\n for i in range(4): #each pitch/octave encoding in chord\r\n encoding = inv_pitch_oct[a_chord[i]].split(';')\r\n p = inv_pitch[int(encoding[0])]\r\n o = inv_octave[int(encoding[1])]\r\n\r\n a_note = note.Note(str(p))\r\n a_note.octave = o\r\n a_note.duration.quarterLength = d\r\n gen_chord.append(a_note)\r\n\r\n song.append(chord.Chord(gen_chord))\r\n song.write('midi',fp=dirstr)\r\n\r\n def generate(self):\r\n '''Generates music by propagating through the LSTM and sampling from the RBM '''\r\n lstm_state = tf.scan(self.lstm_recurrence, self.x, initializer=[self.u0,self.c0])\r\n Uarr = lstm_state[0]\r\n\r\n U = Uarr[int(np.floor((self.num_timesteps-1)/self.num_timesteps)), :, :]\r\n ts = tf.TensorShape\r\n [_, _, _, _, music] = tf.while_loop(lambda count, num_iter, *args: count < num_iter,\r\n self.generate_recurrence, [tf.constant(1), tf.constant(self.num_timesteps*100), [U,lstm_state[1]],\r\n tf.zeros([1, self.n_visible], tf.float32),tf.zeros([1, self.n_visible], tf.float32)],\r\n shape_invariants=[ts([]), ts([]), [U.get_shape(),lstm_state[1].get_shape()], ts([1, self.n_visible]), ts([None, self.n_visible])])\r\n return music\r\n\r\n def generate_recurrence(self,count,k,prev_t,primer,music):\r\n #This function builds and runs the gibbs steps for each RBM in the chain to generate music\r\n #Get the bias vectors from the current state of the RNN\r\n st_1, ct_1 = prev_t[0],prev_t[1]\r\n\r\n self.bv_t = tf.add(self.bv, tf.matmul(st_1, self.Wuv))\r\n self.bh_t = tf.add(self.bh, tf.matmul(st_1, self.Wuh))\r\n\r\n #Run the Gibbs step to get the music output. Prime the RBM with the previous musical output.\r\n x_out = self.gibbs_sample(primer, k=25)\r\n\r\n #Update the RNN hidden state based on the musical output and current hidden state.\r\n\r\n #Input layer:decides if new information is relevant then lets it in\r\n i = tf.sigmoid(tf.matmul(x_out,self.Wvu[0])+tf.matmul(st_1,self.Wuu[0])+self.bu[0])\r\n #forget layer:gets rid of irrelevant information\r\n f = tf.sigmoid(tf.matmul(x_out,self.Wvu[1])+tf.matmul(st_1,self.Wuu[1])+self.bu[1])\r\n #output layer\r\n o = tf.sigmoid(tf.matmul(x_out,self.Wvu[2])+tf.matmul(st_1,self.Wuu[2])+self.bu[2])\r\n #some layer\r\n g = tf.tanh(tf.matmul(x_out,self.Wvu[3])+tf.matmul(st_1,self.Wuu[3])+self.bu[3])\r\n\r\n #update internal cell state\r\n ct = (ct_1*f)+(g*i)\r\n #update external state\r\n st = tf.tanh(ct)*o\r\n #Add the new output to the musical piece\r\n music = tf.concat([music, x_out],0)\r\n\r\n return count+1, k, [st_1,ct], x_out, music\r\n\r\n def compose(self):\r\n\r\n def compose_(i,k,prev_t,primer,pred):\r\n\r\n st_1, ct_1 = prev_t[0],prev_t[1]\r\n\r\n bv_t = tf.add(bv,tf.matmul(st_1,self.Wuv))\r\n bh_t = tf.add(bh,tf.matmul(st_1,self.Wuh))\r\n\r\n x_out =gibbs_sample(primer,self.W,self.bv_t,self.bh_t,k=25)\r\n\r\n #Propagate through the LSTM using the current output 'x_out' and the LSTM hidden unit at t-1, st_1, ct_1\r\n\r\n #Input layer:decides if new information is relevant then lets it in\r\n i = tf.sigmoid(tf.matmul(x_out,self.Wvu[0])+tf.matmul(st_1,self.Wuu[0])+self.bu[0])\r\n #forget layer:gets rid of irrelevant information\r\n f = tf.sigmoid(tf.matmul(x_out,self.Wvu[1])+tf.matmul(st_1,self.Wuu[1])+self.bu[1])\r\n #output layer\r\n o = tf.sigmoid(tf.matmul(x_out,self.Wvu[2])+tf.matmul(st_1,self.Wuu[2])+self.bu[2])\r\n #some layer\r\n g = tf.tanh(tf.matmul(x_out,self.Wvu[3])+tf.matmul(st_1,self.Wuu[3])+self.bu[3])\r\n\r\n #update internal cell state\r\n ct = (ct_1*f)+(g*i)\r\n #update external state\r\n st = tf.tanh(ct)*o\r\n\r\n #Append x_out to prediction\r\n pred = tf.concat(values=[pred,x_out],axis=0)\r\n\r\n return i+1,k,[st,ct],x_out,x,pred\r\n\r\n lstm_state = tf.scan(self.lstm_recurrence,self.x,initializer=[u0,c0])\r\n\r\n s_t,c_t = lstm_state[0],lstm_state[1]\r\n #s_t = s_t[int(np.floor(prime_timesteps/num_timesteps)),:,:]\r\n\r\n #lstm_state = [s_t,c_t]\r\n pred = tf.zeros([1,n_visible],tf.float32)\r\n\r\n ts = tf.TensorShape\r\n\r\n\r\n #Repeat compose_ whilst i<n is True\r\n ts = tf.TensorShape # To quickly define a TensorShape\r\n compose_loop_out = tf.while_loop(lambda i, n, *args: i < n, compose_, [tf.constant(1), tf.constant(song_timesteps), lstm_state,\r\n tf.zeros([1, self.n_visible], tf.float32), tf.zeros([1, self.n_visible], tf.float32)],\r\n shape_invariants=[ts([]), ts([]), [s_t.get_shape(),c_t.get_shape()], ts([1, self.n_visible]), ts([1,self.n_visible])])\r\n pred = compose_loop_out[10]\r\n return pred\r\n\r\n#Convert music_obj into input sequences for training(reshape into timesteps)\r\ndef get_input_sequences(dataset,num_timesteps,n_visible):\r\n visible_layer_inputs = []\r\n for song in dataset: #Traverse each song in set\r\n for chord_index in range(0,len(song)-num_timesteps): #get timesteps of chords in song\r\n chord_set = song[chord_index:chord_index+num_timesteps]\r\n chord_encoding = np.zeros(n_visible) #initialize 0xn_visible\r\n offset = 0\r\n for chord in chord_set:\r\n for note_index in range(4):\r\n chord_encoding[chord[note_index]+offset] = 1\r\n offset += len(pitch_oct_dict)\r\n chord_encoding[chord[4]+offset] = 1\r\n offset += len(duration_dict)\r\n visible_layer_inputs.append(chord_encoding)\r\n #Clear unreferenced memory\r\n gc.collect()\r\n print('done')\r\n return visible_layer_inputs\r\n\r\n\r\nif __name__ == '__main__':\r\n #Get music object(a set of songs represented in numeric format)\r\n dataset = ud.loadobj('./Files/BachChords')\r\n pitch_oct_dict = ud.loadobj('./Files/BachPitchOctave')\r\n duration_dict = ud.loadobj('./Files/BachDuration')\r\n octave_dict = ud.loadobj('./Files/BachOctaves')\r\n pitch_dict = ud.loadobj('./Files/BachPitch')\r\n print(len(pitch_oct_dict))\r\n num_timesteps = 4\r\n n_visible = ((len(pitch_oct_dict)*4)+len(duration_dict))*num_timesteps\r\n print(n_visible)\r\n print(((len(pitch_dict)*4)+len(duration_dict))*num_timesteps)\r\n print(len(octave_dict))\r\n n_hidden = int(n_visible*0.60)\r\n epochs = 1000\r\n batch_size = 50\r\n input_sequences = get_input_sequences(dataset,num_timesteps,n_visible)\r\n #print(duration_dict)\r\n #print(input_sequences)\r\n model = lstm_rbm(input_sequences,n_visible=n_visible,n_hidden=n_hidden,epochs=epochs,batch_size=batch_size,num_timesteps=num_timesteps,pitch_dict=pitch_dict,duration_dict=duration_dict,pitch_oct_dict=pitch_oct_dict,octave_dict=octave_dict)\r\n #print('hello')\r\n model.train()\r\n #model.test(\"./TrainingData/LSTM-RBM/TIMESTEPS8epoch1996$0.3799365885695821.ckpt\")\r\n"
]
| [
[
"tensorflow.scan",
"tensorflow.metrics.accuracy",
"tensorflow.concat",
"tensorflow.zeros",
"tensorflow.variables_initializer",
"matplotlib.pyplot.plot",
"tensorflow.tanh",
"tensorflow.while_loop",
"tensorflow.get_collection",
"tensorflow.stop_gradient",
"tensorflow.subtract",
"numpy.argmax",
"tensorflow.Session",
"tensorflow.train.Saver",
"numpy.zeros",
"tensorflow.tile",
"tensorflow.matmul",
"matplotlib.pyplot.title",
"tensorflow.shape",
"tensorflow.placeholder",
"tensorflow.global_variables_initializer",
"tensorflow.train.GradientDescentOptimizer",
"numpy.floor",
"matplotlib.pyplot.show",
"matplotlib.pyplot.ylabel",
"tensorflow.summary.FileWriter",
"tensorflow.constant",
"tensorflow.transpose",
"tensorflow.reshape",
"tensorflow.ones",
"matplotlib.pyplot.xlabel",
"tensorflow.random_normal"
]
]
|
onurbarut/Encrypted_Malware_Detection | [
"2d2323c1e9ea3313b76bc2e37b68a9126587c6cd"
]
| [
"vino_nsyss2020/utils/helper.py"
]
| [
"import numpy as np\nimport pandas as pd\nimport tensorflow as tf\n\nfrom sklearn import metrics\nfrom sklearn.metrics import classification_report\nfrom tensorflow.python.framework.graph_util_impl import convert_variables_to_constants\n\n\ndef read_csv_dataset(fileName,\n drop_low_packets=False):\n # Read dataset from csv and shuffle it into random order\n data = pd.read_csv(fileName).sample(frac=1)\n\n # Drop flows with total_num_pkts < 2\n if drop_low_packets:\n data['total_num_pkts'] = data['num_pkts_in'] + data['num_pkts_out']\n data = data[~(data['total_num_pkts'] < 2)]\n data.pop('total_num_pkts')\n\n labels = data.pop('label')\n\n return data, labels\n\n\ndef collect_statistics(y_true, y_pred):\n cm = metrics.confusion_matrix(y_true, y_pred)\n detectionRate = cm[1, 1] / (cm[1, 0] + cm[1, 1])\n falseAlarmRate = cm[0, 1] / (cm[0, 0] + cm[0, 1])\n correct = np.sum(y_true == y_pred)\n accu = float(correct) / len(y_true) * 100\n class_report = classification_report(y_true, y_pred)\n\n return detectionRate, falseAlarmRate, accu, class_report\n\n\ndef freeze_session(session, keep_var_names=None, output_names=None, clear_devices=True):\n \"\"\"\n *Freezes the state of a session into a pruned computation graph.*\n Creates a new computation graph where variable nodes are replaced by\n constants taking their current value in the session. The new graph will be\n pruned so subgraphs that are not necessary to compute the requested\n outputs are removed.\n @param session The TensorFlow session to be frozen.\n @param keep_var_names A list of variable names that should not be frozen, or None to freeze all the variables in the graph.\n @param output_names Names of the relevant graph outputs.\n @param clear_devices Remove the device directives from the graph for better portability.\n @return The frozen graph definition.\n \"\"\"\n\n graph = session.graph\n with graph.as_default():\n freeze_var_names = list(set(v.op.name for v in tf.global_variables()).difference(keep_var_names or []))\n output_names = output_names or []\n output_names += [v.op.name for v in tf.global_variables()]\n # Graph -> GraphDef ProtoBuf\n input_graph_def = graph.as_graph_def()\n if clear_devices:\n for node in input_graph_def.node:\n node.device = \"\"\n frozen_graph = convert_variables_to_constants(session, input_graph_def, output_names, freeze_var_names)\n return frozen_graph\n\n\ndef encode_label(labels, class_label_pairs=None):\n\n unique_labels = []\n label_list = []\n clp = []\n if class_label_pairs is None:\n class_label_pairs = {}\n [unique_labels.append(label) for label in labels if label not in unique_labels]\n unique_labels.sort()\n l = 0\n for ul in unique_labels:\n class_label_pairs[ul] = l\n l += 1\n\n [label_list.append(class_label_pairs[label]) for label in labels]\n\n # for label in unique_labels:\n # print(label, labels.count(label))\n labelArray = np.asarray(label_list).reshape((-1,))\n\n return labelArray, class_label_pairs\n\n\ndef convertToOneHot(array):\n # Convert predictions to one-hot format\n for arr in array:\n if arr[0] > arr[1]:\n arr[0] = 1\n arr[1] = 0\n else:\n arr[0] = 0\n arr[1] = 1\n return array\n\n\ndef convertToDefault(array):\n # Convert from one-hot to default format\n new_arr = np.zeros(array.shape[0])\n for i in range(array.shape[0]):\n # Handle ANN converting\n if len(array[i]) < 2:\n if array[i][0] < 0.5:\n new_arr[i] = 0\n else:\n new_arr[i] = 1\n # Handle CNN Converting\n else:\n if array[i][0] > array[i][1]:\n new_arr[i] = 0\n else:\n new_arr[i] = 1\n return new_arr\n"
]
| [
[
"pandas.read_csv",
"numpy.asarray",
"tensorflow.global_variables",
"sklearn.metrics.confusion_matrix",
"tensorflow.python.framework.graph_util_impl.convert_variables_to_constants",
"sklearn.metrics.classification_report",
"numpy.sum",
"numpy.zeros"
]
]
|
alexells/PyAthena | [
"e365b3f4568ebc755b3d6d631dc5da43bca867b1"
]
| [
"tests/test_sqlalchemy_athena.py"
]
| [
"# -*- coding: utf-8 -*-\nfrom __future__ import absolute_import, unicode_literals\n\nimport re\nimport unittest\nimport uuid\nfrom datetime import date, datetime\nfrom decimal import Decimal\n\nimport numpy as np\nimport pandas as pd\nimport sqlalchemy\nfrom future.utils import PY2\nfrom sqlalchemy.engine import create_engine\nfrom sqlalchemy.exc import NoSuchTableError, OperationalError, ProgrammingError\nfrom sqlalchemy.sql import expression\nfrom sqlalchemy.sql.schema import Column, MetaData, Table\nfrom sqlalchemy.sql.sqltypes import (\n BIGINT,\n BINARY,\n BOOLEAN,\n DATE,\n DECIMAL,\n FLOAT,\n INTEGER,\n STRINGTYPE,\n TIMESTAMP,\n)\n\nfrom tests.conftest import ENV, SCHEMA\nfrom tests.util import with_engine\n\nif PY2:\n from urllib import quote_plus\nelse:\n from urllib.parse import quote_plus\n\n\nclass TestSQLAlchemyAthena(unittest.TestCase):\n \"\"\"Reference test case is following:\n\n https://github.com/dropbox/PyHive/blob/master/pyhive/tests/sqlalchemy_test_case.py\n https://github.com/dropbox/PyHive/blob/master/pyhive/tests/test_sqlalchemy_hive.py\n https://github.com/dropbox/PyHive/blob/master/pyhive/tests/test_sqlalchemy_presto.py\n \"\"\"\n\n def create_engine(self):\n conn_str = (\n \"awsathena+rest://athena.{region_name}.amazonaws.com:443/\"\n + \"{schema_name}?s3_staging_dir={s3_staging_dir}&s3_dir={s3_dir}\"\n + \"&compression=snappy\"\n )\n return create_engine(\n conn_str.format(\n region_name=ENV.region_name,\n schema_name=SCHEMA,\n s3_staging_dir=quote_plus(ENV.s3_staging_dir),\n s3_dir=quote_plus(ENV.s3_staging_dir),\n )\n )\n\n @with_engine()\n def test_basic_query(self, engine, conn):\n rows = conn.execute(\"SELECT * FROM one_row\").fetchall()\n self.assertEqual(len(rows), 1)\n self.assertEqual(rows[0].number_of_rows, 1)\n self.assertEqual(len(rows[0]), 1)\n\n @with_engine()\n def test_reflect_no_such_table(self, engine, conn):\n self.assertRaises(\n NoSuchTableError,\n lambda: Table(\"this_does_not_exist\", MetaData(bind=engine), autoload=True),\n )\n self.assertRaises(\n NoSuchTableError,\n lambda: Table(\n \"this_does_not_exist\",\n MetaData(bind=engine),\n schema=\"also_does_not_exist\",\n autoload=True,\n ),\n )\n\n @with_engine()\n def test_reflect_table(self, engine, conn):\n one_row = Table(\"one_row\", MetaData(bind=engine), autoload=True)\n self.assertEqual(len(one_row.c), 1)\n self.assertIsNotNone(one_row.c.number_of_rows)\n\n @with_engine()\n def test_reflect_table_with_schema(self, engine, conn):\n one_row = Table(\"one_row\", MetaData(bind=engine), schema=SCHEMA, autoload=True)\n self.assertEqual(len(one_row.c), 1)\n self.assertIsNotNone(one_row.c.number_of_rows)\n\n @with_engine()\n def test_reflect_table_include_columns(self, engine, conn):\n one_row_complex = Table(\"one_row_complex\", MetaData(bind=engine))\n version = float(\n re.search(r\"^([\\d]+\\.[\\d]+)\\..+\", sqlalchemy.__version__).group(1)\n )\n if version <= 1.2:\n engine.dialect.reflecttable(\n conn, one_row_complex, include_columns=[\"col_int\"], exclude_columns=[]\n )\n else:\n # https://docs.sqlalchemy.org/en/13/changelog/changelog_13.html#\n # change-64ac776996da1a5c3e3460b4c0f0b257\n engine.dialect.reflecttable(\n conn,\n one_row_complex,\n include_columns=[\"col_int\"],\n exclude_columns=[],\n resolve_fks=True,\n )\n self.assertEqual(len(one_row_complex.c), 1)\n self.assertIsNotNone(one_row_complex.c.col_int)\n self.assertRaises(AttributeError, lambda: one_row_complex.c.col_tinyint)\n\n @with_engine()\n def test_unicode(self, engine, conn):\n unicode_str = \"密林\"\n one_row = Table(\"one_row\", MetaData(bind=engine))\n returned_str = sqlalchemy.select(\n [expression.bindparam(\"あまぞん\", unicode_str)], from_obj=one_row,\n ).scalar()\n self.assertEqual(returned_str, unicode_str)\n\n @with_engine()\n def test_reflect_schemas(self, engine, conn):\n insp = sqlalchemy.inspect(engine)\n schemas = insp.get_schema_names()\n self.assertIn(SCHEMA, schemas)\n self.assertIn(\"default\", schemas)\n\n @with_engine()\n def test_get_table_names(self, engine, conn):\n meta = MetaData()\n meta.reflect(bind=engine)\n print(meta.tables)\n self.assertIn(\"one_row\", meta.tables)\n self.assertIn(\"one_row_complex\", meta.tables)\n\n insp = sqlalchemy.inspect(engine)\n self.assertIn(\n \"many_rows\", insp.get_table_names(schema=SCHEMA),\n )\n\n @with_engine()\n def test_has_table(self, engine, conn):\n self.assertTrue(Table(\"one_row\", MetaData(bind=engine)).exists())\n self.assertFalse(\n Table(\"this_table_does_not_exist\", MetaData(bind=engine)).exists()\n )\n\n @with_engine()\n def test_get_columns(self, engine, conn):\n insp = sqlalchemy.inspect(engine)\n actual = insp.get_columns(table_name=\"one_row\", schema=SCHEMA)[0]\n self.assertEqual(actual[\"name\"], \"number_of_rows\")\n self.assertTrue(isinstance(actual[\"type\"], INTEGER))\n self.assertTrue(actual[\"nullable\"])\n self.assertIsNone(actual[\"default\"])\n self.assertEqual(actual[\"ordinal_position\"], 1)\n self.assertIsNone(actual[\"comment\"])\n\n @with_engine()\n def test_char_length(self, engine, conn):\n one_row_complex = Table(\"one_row_complex\", MetaData(bind=engine), autoload=True)\n result = (\n sqlalchemy.select(\n [sqlalchemy.func.char_length(one_row_complex.c.col_string)]\n )\n .execute()\n .scalar()\n )\n self.assertEqual(result, len(\"a string\"))\n\n @with_engine()\n def test_reflect_select(self, engine, conn):\n one_row_complex = Table(\"one_row_complex\", MetaData(bind=engine), autoload=True)\n self.assertEqual(len(one_row_complex.c), 15)\n self.assertIsInstance(one_row_complex.c.col_string, Column)\n rows = one_row_complex.select().execute().fetchall()\n self.assertEqual(len(rows), 1)\n self.assertEqual(\n list(rows[0]),\n [\n True,\n 127,\n 32767,\n 2147483647,\n 9223372036854775807,\n 0.5,\n 0.25,\n \"a string\",\n datetime(2017, 1, 1, 0, 0, 0),\n date(2017, 1, 2),\n b\"123\",\n \"[1, 2]\",\n \"{1=2, 3=4}\",\n \"{a=1, b=2}\",\n Decimal(\"0.1\"),\n ],\n )\n self.assertIsInstance(one_row_complex.c.col_boolean.type, BOOLEAN)\n self.assertIsInstance(one_row_complex.c.col_tinyint.type, INTEGER)\n self.assertIsInstance(one_row_complex.c.col_smallint.type, INTEGER)\n self.assertIsInstance(one_row_complex.c.col_int.type, INTEGER)\n self.assertIsInstance(one_row_complex.c.col_bigint.type, BIGINT)\n self.assertIsInstance(one_row_complex.c.col_float.type, FLOAT)\n self.assertIsInstance(one_row_complex.c.col_double.type, FLOAT)\n self.assertIsInstance(one_row_complex.c.col_string.type, type(STRINGTYPE))\n self.assertIsInstance(one_row_complex.c.col_timestamp.type, TIMESTAMP)\n self.assertIsInstance(one_row_complex.c.col_date.type, DATE)\n self.assertIsInstance(one_row_complex.c.col_binary.type, BINARY)\n self.assertIsInstance(one_row_complex.c.col_array.type, type(STRINGTYPE))\n self.assertIsInstance(one_row_complex.c.col_map.type, type(STRINGTYPE))\n self.assertIsInstance(one_row_complex.c.col_struct.type, type(STRINGTYPE))\n self.assertIsInstance(one_row_complex.c.col_decimal.type, DECIMAL)\n\n @with_engine()\n def test_reserved_words(self, engine, conn):\n \"\"\"Presto uses double quotes, not backticks\"\"\"\n fake_table = Table(\n \"select\", MetaData(bind=engine), Column(\"current_timestamp\", STRINGTYPE)\n )\n query = str(fake_table.select(fake_table.c.current_timestamp == \"a\"))\n self.assertIn('\"select\"', query)\n self.assertIn('\"current_timestamp\"', query)\n self.assertNotIn(\"`select`\", query)\n self.assertNotIn(\"`current_timestamp`\", query)\n\n @with_engine()\n def test_retry_if_data_catalog_exception(self, engine, conn):\n dialect = engine.dialect\n exc = OperationalError(\n \"\", None, \"Database does_not_exist not found. Please check your query.\"\n )\n self.assertFalse(\n dialect._retry_if_data_catalog_exception(\n exc, \"does_not_exist\", \"does_not_exist\"\n )\n )\n self.assertFalse(\n dialect._retry_if_data_catalog_exception(\n exc, \"does_not_exist\", \"this_does_not_exist\"\n )\n )\n self.assertTrue(\n dialect._retry_if_data_catalog_exception(\n exc, \"this_does_not_exist\", \"does_not_exist\"\n )\n )\n self.assertTrue(\n dialect._retry_if_data_catalog_exception(\n exc, \"this_does_not_exist\", \"this_does_not_exist\"\n )\n )\n\n exc = OperationalError(\n \"\", None, \"Namespace does_not_exist not found. Please check your query.\"\n )\n self.assertFalse(\n dialect._retry_if_data_catalog_exception(\n exc, \"does_not_exist\", \"does_not_exist\"\n )\n )\n self.assertFalse(\n dialect._retry_if_data_catalog_exception(\n exc, \"does_not_exist\", \"this_does_not_exist\"\n )\n )\n self.assertTrue(\n dialect._retry_if_data_catalog_exception(\n exc, \"this_does_not_exist\", \"does_not_exist\"\n )\n )\n self.assertTrue(\n dialect._retry_if_data_catalog_exception(\n exc, \"this_does_not_exist\", \"this_does_not_exist\"\n )\n )\n\n exc = OperationalError(\n \"\", None, \"Table does_not_exist not found. Please check your query.\"\n )\n self.assertFalse(\n dialect._retry_if_data_catalog_exception(\n exc, \"does_not_exist\", \"does_not_exist\"\n )\n )\n self.assertTrue(\n dialect._retry_if_data_catalog_exception(\n exc, \"does_not_exist\", \"this_does_not_exist\"\n )\n )\n self.assertFalse(\n dialect._retry_if_data_catalog_exception(\n exc, \"this_does_not_exist\", \"does_not_exist\"\n )\n )\n self.assertTrue(\n dialect._retry_if_data_catalog_exception(\n exc, \"this_does_not_exist\", \"this_does_not_exist\"\n )\n )\n\n exc = OperationalError(\"\", None, \"foobar.\")\n self.assertTrue(\n dialect._retry_if_data_catalog_exception(exc, \"foobar\", \"foobar\")\n )\n\n exc = ProgrammingError(\n \"\", None, \"Database does_not_exist not found. Please check your query.\"\n )\n self.assertFalse(\n dialect._retry_if_data_catalog_exception(\n exc, \"does_not_exist\", \"does_not_exist\"\n )\n )\n self.assertFalse(\n dialect._retry_if_data_catalog_exception(\n exc, \"does_not_exist\", \"this_does_not_exist\"\n )\n )\n self.assertFalse(\n dialect._retry_if_data_catalog_exception(\n exc, \"this_does_not_exist\", \"does_not_exist\"\n )\n )\n self.assertFalse(\n dialect._retry_if_data_catalog_exception(\n exc, \"this_does_not_exist\", \"this_does_not_exist\"\n )\n )\n\n @with_engine()\n def test_get_column_type(self, engine, conn):\n dialect = engine.dialect\n self.assertEqual(dialect._get_column_type(\"boolean\"), \"boolean\")\n self.assertEqual(dialect._get_column_type(\"tinyint\"), \"tinyint\")\n self.assertEqual(dialect._get_column_type(\"smallint\"), \"smallint\")\n self.assertEqual(dialect._get_column_type(\"integer\"), \"integer\")\n self.assertEqual(dialect._get_column_type(\"bigint\"), \"bigint\")\n self.assertEqual(dialect._get_column_type(\"real\"), \"real\")\n self.assertEqual(dialect._get_column_type(\"double\"), \"double\")\n self.assertEqual(dialect._get_column_type(\"varchar\"), \"varchar\")\n self.assertEqual(dialect._get_column_type(\"timestamp\"), \"timestamp\")\n self.assertEqual(dialect._get_column_type(\"date\"), \"date\")\n self.assertEqual(dialect._get_column_type(\"varbinary\"), \"varbinary\")\n self.assertEqual(dialect._get_column_type(\"array(integer)\"), \"array\")\n self.assertEqual(dialect._get_column_type(\"map(integer, integer)\"), \"map\")\n self.assertEqual(dialect._get_column_type(\"row(a integer, b integer)\"), \"row\")\n self.assertEqual(dialect._get_column_type(\"decimal(10,1)\"), \"decimal\")\n\n @with_engine()\n def test_contain_percents_character_query(self, engine, conn):\n query = sqlalchemy.sql.text(\n \"\"\"\n SELECT date_parse('20191030', '%Y%m%d')\n \"\"\"\n )\n result = engine.execute(query)\n self.assertEqual(result.fetchall(), [(datetime(2019, 10, 30),)])\n\n @with_engine()\n def test_query_with_parameter(self, engine, conn):\n query = sqlalchemy.sql.text(\n \"\"\"\n SELECT :word\n \"\"\"\n )\n result = engine.execute(query, word=\"cat\")\n self.assertEqual(result.fetchall(), [(\"cat\",)])\n\n @with_engine()\n def test_contain_percents_character_query_with_parameter(self, engine, conn):\n query = sqlalchemy.sql.text(\n \"\"\"\n SELECT date_parse('20191030', '%Y%m%d'), :word\n \"\"\"\n )\n result = engine.execute(query, word=\"cat\")\n self.assertEqual(result.fetchall(), [(datetime(2019, 10, 30), \"cat\")])\n\n query = sqlalchemy.sql.text(\n \"\"\"\n SELECT col_string FROM one_row_complex\n WHERE col_string LIKE 'a%' OR col_string LIKE :param\n \"\"\"\n )\n result = engine.execute(query, param=\"b%\")\n self.assertEqual(result.fetchall(), [(\"a string\",)])\n\n @with_engine()\n def test_nan_checks(self, engine, conn):\n dialect = engine.dialect\n self.assertFalse(dialect._is_nan(\"string\"))\n self.assertFalse(dialect._is_nan(1))\n self.assertTrue(dialect._is_nan(float(\"nan\")))\n\n @with_engine()\n def test_to_sql(self, engine, conn):\n # TODO Add binary column (After dropping support for Python 2.7)\n table_name = \"to_sql_{0}\".format(str(uuid.uuid4()).replace(\"-\", \"\"))\n df = pd.DataFrame(\n {\n \"col_int\": np.int32([1]),\n \"col_bigint\": np.int64([12345]),\n \"col_float\": np.float32([1.0]),\n \"col_double\": np.float64([1.2345]),\n \"col_string\": [\"a\"],\n \"col_boolean\": np.bool_([True]),\n \"col_timestamp\": [datetime(2020, 1, 1, 0, 0, 0)],\n \"col_date\": [date(2020, 12, 31)],\n }\n )\n # Explicitly specify column order\n df = df[\n [\n \"col_int\",\n \"col_bigint\",\n \"col_float\",\n \"col_double\",\n \"col_string\",\n \"col_boolean\",\n \"col_timestamp\",\n \"col_date\",\n ]\n ]\n df.to_sql(\n table_name,\n engine,\n schema=SCHEMA,\n index=False,\n if_exists=\"replace\",\n method=\"multi\",\n )\n\n table = Table(table_name, MetaData(bind=engine), autoload=True)\n self.assertEqual(\n table.select().execute().fetchall(),\n [\n (\n 1,\n 12345,\n 1.0,\n 1.2345,\n \"a\",\n True,\n datetime(2020, 1, 1, 0, 0, 0),\n date(2020, 12, 31),\n )\n ],\n )\n"
]
| [
[
"numpy.int32",
"numpy.int64",
"numpy.float64",
"numpy.float32",
"numpy.bool_"
]
]
|
prabhathur/CF | [
"20943f3f326e72ea7c5464bc2c3eee06703ed404"
]
| [
"tests/test_model_interface/test_keras_tensorflow_model.py"
]
| [
"import numpy as np\nimport pytest\n\nimport dice_ml\nfrom dice_ml.utils import helpers\n\ntf = pytest.importorskip(\"tensorflow\")\n\[email protected]\ndef tf_session():\n if tf.__version__[0] == '1':\n sess = tf.InteractiveSession()\n return sess\n\[email protected]\ndef tf_model_object():\n backend = 'TF'+tf.__version__[0]\n ML_modelpath = helpers.get_adult_income_modelpath(backend=backend)\n m = dice_ml.Model(model_path= ML_modelpath, backend=backend)\n return m\n\ndef test_model_initiation(tf_model_object):\n assert isinstance(tf_model_object, dice_ml.model_interfaces.keras_tensorflow_model.KerasTensorFlowModel)\n\ndef test_model_initiation_fullpath():\n \"\"\"\n Tests if model is initiated when full path to a model and explainer class is given to backend parameter.\n \"\"\"\n tf_version = tf.__version__[0]\n backend = {'model': 'keras_tensorflow_model.KerasTensorFlowModel',\n 'explainer': 'dice_tensorflow'+tf_version+'.DiceTensorFlow'+tf_version}\n ML_modelpath = helpers.get_adult_income_modelpath(backend=backend)\n m = dice_ml.Model(model_path= ML_modelpath, backend=backend)\n assert isinstance(m, dice_ml.model_interfaces.keras_tensorflow_model.KerasTensorFlowModel)\n\nclass TestKerasModelMethods:\n @pytest.fixture(autouse=True)\n def _get_model_object(self, tf_model_object, tf_session):\n self.m = tf_model_object\n self.sess = tf_session\n\n def test_load_model(self):\n self.m.load_model()\n assert self.m.model is not None\n\n @pytest.mark.parametrize(\"input_instance, prediction\",[(np.array([[0.5]*29], dtype=np.float32), 0.747)])\n def test_model_output(self, input_instance, prediction):\n self.m.load_model()\n if tf.__version__[0] == '1':\n input_instance_tf = tf.Variable(input_instance, dtype=tf.float32)\n output_instance = self.m.get_output(input_instance_tf)\n prediction = self.sess.run(output_instance, feed_dict={input_instance_tf:input_instance})[0][0]\n else:\n prediction = self.m.get_output(input_instance).numpy()[0][0]\n pytest.approx(prediction, abs=1e-3) == prediction\n"
]
| [
[
"numpy.array"
]
]
|
ylhz/Adversarial-attack-on-Person-ReID-With-Deep-Mis-Ranking | [
"2c20cd398be39dd8bba3e676275ef8459bb54820",
"2c20cd398be39dd8bba3e676275ef8459bb54820"
]
| [
"models/HACNN.py",
"util/local_dist.py"
]
| [
"from __future__ import absolute_import\n\nimport torch\nfrom torch import nn\nfrom torch.nn import functional as F\nimport torchvision\n\n__all__ = ['HACNN']\n\nclass ConvBlock(nn.Module):\n \"\"\"Basic convolutional block:\n convolution + batch normalization + relu.\n\n Args (following http://pytorch.org/docs/master/nn.html#torch.nn.Conv2d):\n in_c (int): number of input channels.\n out_c (int): number of output channels.\n k (int or tuple): kernel size.\n s (int or tuple): stride.\n p (int or tuple): padding.\n \"\"\"\n def __init__(self, in_c, out_c, k, s=1, p=0):\n super(ConvBlock, self).__init__()\n self.conv = nn.Conv2d(in_c, out_c, k, stride=s, padding=p)\n self.bn = nn.BatchNorm2d(out_c)\n\n def forward(self, x):\n return F.relu(self.bn(self.conv(x)))\n\nclass InceptionA(nn.Module):\n \"\"\"\n Args:\n in_channels (int): number of input channels\n out_channels (int): number of output channels AFTER concatenation\n \"\"\"\n def __init__(self, in_channels, out_channels):\n super(InceptionA, self).__init__()\n single_out_channels = out_channels // 4\n\n self.stream1 = nn.Sequential(\n ConvBlock(in_channels, single_out_channels, 1),\n ConvBlock(single_out_channels, single_out_channels, 3, p=1),\n )\n self.stream2 = nn.Sequential(\n ConvBlock(in_channels, single_out_channels, 1),\n ConvBlock(single_out_channels, single_out_channels, 3, p=1),\n )\n self.stream3 = nn.Sequential(\n ConvBlock(in_channels, single_out_channels, 1),\n ConvBlock(single_out_channels, single_out_channels, 3, p=1),\n )\n self.stream4 = nn.Sequential(\n nn.AvgPool2d(3, stride=1, padding=1),\n ConvBlock(in_channels, single_out_channels, 1),\n )\n\n def forward(self, x):\n s1 = self.stream1(x)\n s2 = self.stream2(x)\n s3 = self.stream3(x)\n s4 = self.stream4(x)\n y = torch.cat([s1, s2, s3, s4], dim=1)\n return y\n\nclass InceptionB(nn.Module):\n \"\"\"\n Args:\n in_channels (int): number of input channels\n out_channels (int): number of output channels AFTER concatenation\n \"\"\"\n def __init__(self, in_channels, out_channels):\n super(InceptionB, self).__init__()\n single_out_channels = out_channels // 4\n\n self.stream1 = nn.Sequential(\n ConvBlock(in_channels, single_out_channels, 1),\n ConvBlock(single_out_channels, single_out_channels, 3, s=2, p=1),\n )\n self.stream2 = nn.Sequential(\n ConvBlock(in_channels, single_out_channels, 1),\n ConvBlock(single_out_channels, single_out_channels, 3, p=1),\n ConvBlock(single_out_channels, single_out_channels, 3, s=2, p=1),\n )\n self.stream3 = nn.Sequential(\n nn.MaxPool2d(3, stride=2, padding=1),\n ConvBlock(in_channels, single_out_channels*2, 1),\n )\n\n def forward(self, x):\n s1 = self.stream1(x)\n s2 = self.stream2(x)\n s3 = self.stream3(x)\n y = torch.cat([s1, s2, s3], dim=1)\n return y\n\nclass SpatialAttn(nn.Module):\n \"\"\"Spatial Attention (Sec. 3.1.I.1)\"\"\"\n def __init__(self):\n super(SpatialAttn, self).__init__()\n self.conv1 = ConvBlock(1, 1, 3, s=2, p=1)\n self.conv2 = ConvBlock(1, 1, 1)\n\n def forward(self, x):\n # global cross-channel averaging\n x = x.mean(1, keepdim=True)\n # 3-by-3 conv\n x = self.conv1(x)\n # bilinear resizing\n x = F.upsample(x, (x.size(2)*2, x.size(3)*2), mode='bilinear', align_corners=True)\n # scaling conv\n x = self.conv2(x)\n return x\n\nclass ChannelAttn(nn.Module):\n \"\"\"Channel Attention (Sec. 3.1.I.2)\"\"\"\n def __init__(self, in_channels, reduction_rate=16):\n super(ChannelAttn, self).__init__()\n assert in_channels%reduction_rate == 0\n self.conv1 = ConvBlock(in_channels, in_channels//reduction_rate, 1)\n self.conv2 = ConvBlock(in_channels//reduction_rate, in_channels, 1)\n\n def forward(self, x):\n # squeeze operation (global average pooling)\n x = F.avg_pool2d(x, x.size()[2:])\n # excitation operation (2 conv layers)\n x = self.conv1(x)\n x = self.conv2(x)\n return x\n\nclass SoftAttn(nn.Module):\n \"\"\"Soft Attention (Sec. 3.1.I)\n Aim: Spatial Attention + Channel Attention\n Output: attention maps with shape identical to input.\n \"\"\"\n def __init__(self, in_channels):\n super(SoftAttn, self).__init__()\n self.spatial_attn = SpatialAttn()\n self.channel_attn = ChannelAttn(in_channels)\n self.conv = ConvBlock(in_channels, in_channels, 1)\n\n def forward(self, x):\n y_spatial = self.spatial_attn(x)\n y_channel = self.channel_attn(x)\n y = y_spatial * y_channel\n y = F.sigmoid(self.conv(y))\n return y\n\nclass HardAttn(nn.Module):\n \"\"\"Hard Attention (Sec. 3.1.II)\"\"\"\n def __init__(self, in_channels):\n super(HardAttn, self).__init__()\n self.fc = nn.Linear(in_channels, 4*2)\n self.init_params()\n\n def init_params(self):\n self.fc.weight.data.zero_()\n self.fc.bias.data.copy_(torch.tensor([0, -0.75, 0, -0.25, 0, 0.25, 0, 0.75], dtype=torch.float))\n\n def forward(self, x):\n # squeeze operation (global average pooling)\n x = F.avg_pool2d(x, x.size()[2:]).view(x.size(0), x.size(1))\n # predict transformation parameters\n theta = F.tanh(self.fc(x))\n theta = theta.view(-1, 4, 2)\n return theta\n\nclass HarmAttn(nn.Module):\n \"\"\"Harmonious Attention (Sec. 3.1)\"\"\"\n def __init__(self, in_channels):\n super(HarmAttn, self).__init__()\n self.soft_attn = SoftAttn(in_channels)\n self.hard_attn = HardAttn(in_channels)\n\n def forward(self, x):\n y_soft_attn = self.soft_attn(x)\n theta = self.hard_attn(x)\n return y_soft_attn, theta\n\nclass HACNN(nn.Module):\n \"\"\"\n Harmonious Attention Convolutional Neural Network\n\n Reference:\n Li et al. Harmonious Attention Network for Person Re-identification. CVPR 2018.\n\n Args:\n num_classes (int): number of classes to predict\n nchannels (list): number of channels AFTER concatenation\n feat_dim (int): feature dimension for a single stream\n learn_region (bool): whether to learn region features (i.e. local branch)\n \"\"\"\n def __init__(self, num_classes, loss={'xent', 'htri'}, nchannels=[128, 256, 384], feat_dim=512, learn_region=True, use_gpu=True, **kwargs):\n super(HACNN, self).__init__()\n self.loss = loss\n self.learn_region = learn_region\n self.use_gpu = use_gpu\n\n self.conv = ConvBlock(3, 32, 3, s=2, p=1)\n\n # Construct Inception + HarmAttn blocks\n # ============== Block 1 ==============\n self.inception1 = nn.Sequential(\n InceptionA(32, nchannels[0]),\n InceptionB(nchannels[0], nchannels[0]),\n )\n self.ha1 = HarmAttn(nchannels[0])\n\n # ============== Block 2 ==============\n self.inception2 = nn.Sequential(\n InceptionA(nchannels[0], nchannels[1]),\n InceptionB(nchannels[1], nchannels[1]),\n )\n self.ha2 = HarmAttn(nchannels[1])\n\n # ============== Block 3 ==============\n self.inception3 = nn.Sequential(\n InceptionA(nchannels[1], nchannels[2]),\n InceptionB(nchannels[2], nchannels[2]),\n )\n self.ha3 = HarmAttn(nchannels[2])\n\n self.fc_global = nn.Sequential(\n nn.Linear(nchannels[2], feat_dim),\n nn.BatchNorm1d(feat_dim),\n nn.ReLU(),\n )\n self.classifier_global = nn.Linear(feat_dim, num_classes)\n\n if self.learn_region:\n self.init_scale_factors()\n self.local_conv1 = InceptionB(32, nchannels[0])\n self.local_conv2 = InceptionB(nchannels[0], nchannels[1])\n self.local_conv3 = InceptionB(nchannels[1], nchannels[2])\n self.fc_local = nn.Sequential(\n nn.Linear(nchannels[2]*4, feat_dim),\n nn.BatchNorm1d(feat_dim),\n nn.ReLU(),\n )\n self.classifier_local = nn.Linear(feat_dim, num_classes)\n self.feat_dim = feat_dim * 2\n else:\n self.feat_dim = feat_dim\n\n def init_scale_factors(self):\n # initialize scale factors (s_w, s_h) for four regions\n self.scale_factors = []\n self.scale_factors.append(torch.tensor([[1, 0], [0, 0.25]], dtype=torch.float))\n self.scale_factors.append(torch.tensor([[1, 0], [0, 0.25]], dtype=torch.float))\n self.scale_factors.append(torch.tensor([[1, 0], [0, 0.25]], dtype=torch.float))\n self.scale_factors.append(torch.tensor([[1, 0], [0, 0.25]], dtype=torch.float))\n\n def stn(self, x, theta):\n \"\"\"Perform spatial transform\n x: (batch, channel, height, width)\n theta: (batch, 2, 3)\n \"\"\"\n grid = F.affine_grid(theta, x.size())\n x = F.grid_sample(x, grid)\n return x\n\n def transform_theta(self, theta_i, region_idx):\n \"\"\"Transform theta to include (s_w, s_h),\n resulting in (batch, 2, 3)\"\"\"\n scale_factors = self.scale_factors[region_idx]\n theta = torch.zeros(theta_i.size(0), 2, 3)\n theta[:,:,:2] = scale_factors\n theta[:,:,-1] = theta_i\n if self.use_gpu: theta = theta.cuda()\n return theta\n\n def forward(self, x, is_training):\n assert x.size(2) == 160 and x.size(3) == 64, \\\n \"Input size does not match, expected (160, 64) but got ({}, {})\".format(x.size(2), x.size(3))\n x = self.conv(x)\n\n # ============== Block 1 ==============\n # global branch\n x1 = self.inception1(x)\n x1_attn, x1_theta = self.ha1(x1)\n x1_out = x1 * x1_attn\n # local branch\n if self.learn_region:\n x1_local_list = []\n for region_idx in range(4):\n x1_theta_i = x1_theta[:,region_idx,:]\n x1_theta_i = self.transform_theta(x1_theta_i, region_idx)\n x1_trans_i = self.stn(x, x1_theta_i)\n x1_trans_i = F.upsample(x1_trans_i, (24, 28), mode='bilinear', align_corners=True)\n x1_local_i = self.local_conv1(x1_trans_i)\n x1_local_list.append(x1_local_i)\n\n # ============== Block 2 ==============\n # Block 2\n # global branch\n x2 = self.inception2(x1_out)\n x2_attn, x2_theta = self.ha2(x2)\n x2_out = x2 * x2_attn\n # local branch\n if self.learn_region:\n x2_local_list = []\n for region_idx in range(4):\n x2_theta_i = x2_theta[:,region_idx,:]\n x2_theta_i = self.transform_theta(x2_theta_i, region_idx)\n x2_trans_i = self.stn(x1_out, x2_theta_i)\n x2_trans_i = F.upsample(x2_trans_i, (12, 14), mode='bilinear', align_corners=True)\n x2_local_i = x2_trans_i + x1_local_list[region_idx]\n x2_local_i = self.local_conv2(x2_local_i)\n x2_local_list.append(x2_local_i)\n\n # ============== Block 3 ==============\n # Block 3\n # global branch\n x3 = self.inception3(x2_out)\n x3_attn, x3_theta = self.ha3(x3)\n x3_out = x3 * x3_attn\n # local branch\n if self.learn_region:\n x3_local_list = []\n for region_idx in range(4):\n x3_theta_i = x3_theta[:,region_idx,:]\n x3_theta_i = self.transform_theta(x3_theta_i, region_idx)\n x3_trans_i = self.stn(x2_out, x3_theta_i)\n x3_trans_i = F.upsample(x3_trans_i, (6, 7), mode='bilinear', align_corners=True)\n x3_local_i = x3_trans_i + x2_local_list[region_idx]\n x3_local_i = self.local_conv3(x3_local_i)\n x3_local_list.append(x3_local_i)\n\n # ============== Feature generation ==============\n # global branch\n x_global = F.avg_pool2d(x3_out, x3_out.size()[2:]).view(x3_out.size(0), x3_out.size(1))\n x_global = self.fc_global(x_global)\n # local branch\n if self.learn_region:\n x_local_list = []\n for region_idx in range(4):\n x_local_i = x3_local_list[region_idx]\n x_local_i = F.avg_pool2d(x_local_i, x_local_i.size()[2:]).view(x_local_i.size(0), -1)\n x_local_list.append(x_local_i)\n x_local = torch.cat(x_local_list, 1)\n x_local = self.fc_local(x_local)\n\n if not is_training:\n # l2 normalization before concatenation\n if self.learn_region:\n x_global = x_global / x_global.norm(p=2, dim=1, keepdim=True)\n x_local = x_local / x_local.norm(p=2, dim=1, keepdim=True)\n return [torch.cat([x_global, x_local], 1)]\n else:\n return [x_global]\n\n prelogits_global = self.classifier_global(x_global)\n if self.learn_region:\n prelogits_local = self.classifier_local(x_local)\n \n if self.loss == {'xent'}:\n if self.learn_region:\n return [prelogits_global, prelogits_local]\n else:\n return [prelogits_global]\n elif self.loss == {'xent', 'htri'}:\n if self.learn_region:\n return [(prelogits_global, prelogits_local), (x_global, x_local)]\n else:\n return [prelogits_global, x_global]\n else:\n raise KeyError(\"Unsupported loss: {}\".format(self.loss))",
"import torch\n\ndef batch_euclidean_dist(x, y):\n \"\"\"\n Args:\n x: pytorch Variable, with shape [Batch size, Local part, Feature channel]\n y: pytorch Variable, with shape [Batch size, Local part, Feature channel]\n Returns:\n dist: pytorch Variable, with shape [Batch size, Local part, Local part]\n \"\"\"\n assert len(x.size()) == 3\n assert len(y.size()) == 3\n assert x.size(0) == y.size(0)\n assert x.size(-1) == y.size(-1)\n\n N, m, d = x.size()\n N, n, d = y.size()\n\n # shape [N, m, n]\n xx = torch.pow(x, 2).sum(-1, keepdim=True).expand(N, m, n)\n yy = torch.pow(y, 2).sum(-1, keepdim=True).expand(N, n, m).permute(0, 2, 1)\n dist = xx + yy\n dist.baddbmm_(1, -2, x, y.permute(0, 2, 1))\n dist = dist.clamp(min=1e-12).sqrt() # for numerical stability\n return dist\n\ndef shortest_dist(dist_mat):\n \"\"\"Parallel version.\n Args:\n dist_mat: pytorch Variable, available shape:\n 1) [m, n]\n 2) [m, n, N], N is batch size\n 3) [m, n, *], * can be arbitrary additional dimensions\n Returns:\n dist: three cases corresponding to `dist_mat`:\n 1) scalar\n 2) pytorch Variable, with shape [N]\n 3) pytorch Variable, with shape [*]\n \"\"\"\n m, n = dist_mat.size()[:2]\n # Just offering some reference for accessing intermediate distance.\n dist = [[0 for _ in range(n)] for _ in range(m)]\n for i in range(m):\n for j in range(n):\n if (i == 0) and (j == 0):\n dist[i][j] = dist_mat[i, j]\n elif (i == 0) and (j > 0):\n dist[i][j] = dist[i][j - 1] + dist_mat[i, j]\n elif (i > 0) and (j == 0):\n dist[i][j] = dist[i - 1][j] + dist_mat[i, j]\n else:\n dist[i][j] = torch.min(dist[i - 1][j], dist[i][j - 1]) + dist_mat[i, j]\n dist = dist[-1][-1]\n return dist\n\ndef hard_example_mining(dist_mat, labels, return_inds=False):\n \"\"\"For each anchor, find the hardest positive and negative sample.\n Args:\n dist_mat: pytorch Variable, pair wise distance between samples, shape [N, N]\n labels: pytorch LongTensor, with shape [N]\n return_inds: whether to return the indices. Save time if `False`(?)\n Returns:\n dist_ap: pytorch Variable, distance(anchor, positive); shape [N]\n dist_an: pytorch Variable, distance(anchor, negative); shape [N]\n p_inds: pytorch LongTensor, with shape [N];\n indices of selected hard positive samples; 0 <= p_inds[i] <= N - 1\n n_inds: pytorch LongTensor, with shape [N];\n indices of selected hard negative samples; 0 <= n_inds[i] <= N - 1\n NOTE: Only consider the case in which all labels have same num of samples,\n thus we can cope with all anchors in parallel.\n \"\"\"\n\n assert len(dist_mat.size()) == 2\n assert dist_mat.size(0) == dist_mat.size(1)\n N = dist_mat.size(0)\n\n # shape [N, N]\n is_pos = labels.expand(N, N).eq(labels.expand(N, N).t())\n is_neg = labels.expand(N, N).ne(labels.expand(N, N).t())\n\n # `dist_ap` means distance(anchor, positive)\n # both `dist_ap` and `relative_p_inds` with shape [N, 1]\n dist_ap, relative_p_inds = torch.max(dist_mat[is_pos].contiguous().view(N, -1), 1, keepdim=True)\n # `dist_an` means distance(anchor, negative)\n # both `dist_an` and `relative_n_inds` with shape [N, 1]\n dist_an, relative_n_inds = torch.min(dist_mat[is_neg].contiguous().view(N, -1), 1, keepdim=True)\n # shape [N]\n dist_ap = dist_ap.squeeze(1)\n dist_an = dist_an.squeeze(1)\n\n if return_inds:\n # shape [N, N]\n ind = (labels.new().resize_as_(labels).copy_(torch.arange(0, N).long()).unsqueeze( 0).expand(N, N))\n # shape [N, 1]\n p_inds = torch.gather(ind[is_pos].contiguous().view(N, -1), 1, relative_p_inds.data)\n n_inds = torch.gather(ind[is_neg].contiguous().view(N, -1), 1, relative_n_inds.data)\n # shape [N]\n p_inds = p_inds.squeeze(1)\n n_inds = n_inds.squeeze(1)\n return dist_ap, dist_an, p_inds, n_inds\n\n return dist_ap, dist_an\n\ndef euclidean_dist(x, y):\n \"\"\"\n Args:\n x: pytorch Variable, with shape [m, d]\n y: pytorch Variable, with shape [n, d]\n Returns:\n dist: pytorch Variable, with shape [m, n]\n \"\"\"\n m, n = x.size(0), y.size(0)\n xx = torch.pow(x, 2).sum(1, keepdim=True).expand(m, n)\n yy = torch.pow(y, 2).sum(1, keepdim=True).expand(n, m).t()\n dist = xx + yy\n dist.addmm_(1, -2, x, y.t())\n dist = dist.clamp(min=1e-12).sqrt() # for numerical stability\n return dist\n\ndef batch_local_dist(x, y):\n \"\"\"\n Args:\n x: pytorch Variable, with shape [N, m, d]\n y: pytorch Variable, with shape [N, n, d]\n Returns:\n dist: pytorch Variable, with shape [N]\n \"\"\"\n assert len(x.size()) == 3\n assert len(y.size()) == 3\n assert x.size(0) == y.size(0)\n assert x.size(-1) == y.size(-1)\n\n # shape [N, m, n]\n dist_mat = batch_euclidean_dist(x, y)\n dist_mat = (torch.exp(dist_mat) - 1.) / (torch.exp(dist_mat) + 1.)\n # shape [N]\n dist = shortest_dist(dist_mat.permute(1, 2, 0))\n return dist\n\nif __name__ == '__main__':\n x = torch.randn(32,2048)\n y = torch.randn(32,2048)\n dist_mat = euclidean_dist(x,y)\n dist_ap, dist_an, p_inds, n_inds = hard_example_mining(dist_mat,return_inds=True)\n from IPython import embed\n embed()"
]
| [
[
"torch.nn.functional.upsample",
"torch.nn.BatchNorm1d",
"torch.cat",
"torch.nn.Conv2d",
"torch.tensor",
"torch.nn.Linear",
"torch.nn.AvgPool2d",
"torch.nn.MaxPool2d",
"torch.nn.functional.grid_sample",
"torch.nn.BatchNorm2d",
"torch.nn.ReLU"
],
[
"torch.randn",
"torch.min",
"torch.exp",
"torch.arange",
"torch.pow"
]
]
|
sdpython/ensae_teaching_cs | [
"ac978c4031afe6a5b846402a28628791e547a841",
"3bc80f29d93c30de812e34c314bc96e6a4f0d025"
]
| [
"src/ensae_teaching_cs/faq/faq_matplotlib.py",
"_unittests/ut_homeblog/test_tableformula2.py"
]
| [
"# -*- coding: utf-8 -*-\n\"\"\"\n@file\n@brief Quelques problèmes récurrents avec `matplotlib <http://matplotlib.org/>`_.\n\"\"\"\nimport numpy\n\n\ndef graph_style(style='ggplot'):\n \"\"\"\n Changes :epkg:`matplotlib` style.\n\n @param style style\n\n .. faqref::\n :tag: matplotlib\n :title: Changer le style de graphique pour ggplot\n\n .. index:: ggplot\n\n Voir `Customizing plots with style sheets <http://matplotlib.org/users/style_sheets.html>`_\n\n ::\n\n import matplotlib.pyplot as plt\n plt.style.use('ggplot')\n \"\"\"\n import matplotlib.pyplot as plt\n plt.style.use(style)\n\n\ndef close_all():\n \"\"\"\n Closes every graph with :epkg:`matplotlib`.\n\n .. faqref::\n :tag: matplotlib\n :title: Plante après plusieurs graphes\n\n Il peut arriver que matplotlib fasse planter python sans qu'aucune exception ne soit générée.\n L'article `matplotlib crashing Python <http://stackoverflow.com/questions/26955017/matplotlib-crashing-python>`_\n suggère la solution suivante ::\n\n import matplotlib.pyplot as plt\n plt.close('all')\n\n Voir `close <http://matplotlib.org/api/pyplot_api.html?highlight=close#matplotlib.pyplot.close>`_.\n \"\"\"\n import matplotlib.pyplot as plt\n plt.close('all')\n\n\ndef graph_with_label(x, y, labels, barplot=True, title=None, figsize=(6, 4), style=None,\n ax=None, **kwargs):\n \"\"\"\n Creates a graph with :epkg:`matplotlib`.\n\n @param x x\n @param y y\n @param labels x labels\n @param barplot boolean, True, uses bar, plot otherwise\n @param title if not None, sets the title\n @param figsize only if ax is not None\n @param style style\n @param ax existing :epkg:`Axes` or None if it must be created\n @param kwargs others parameters\n @return :epkg:`Axes`\n\n .. faqref::\n :tag: matplotlib\n :title: Comment ajuster les labels non numériques d'un graphe ?\n\n .. index:: date, matplotlib\n\n Lorsqu'on trace un graphique et qu'on veut ajouter des labels non numériques\n sur l'axe des abscisses (en particulier des dates), *matplotlib*\n ne fait pas apparaître tous les labels. Ainsi, si on a 50 points,\n 50 abscisses et 50 labels, seuls les premiers labels apparaîtront\n comme ceci :\n\n .. plot::\n\n import matplotlib.pyplot as plt\n x = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27,\n 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43]\n y = [1, 3, 10, 6, 3, 5, 3, 6, 4, 2, 3, 2, 11, 10, 4, 5, 2, 5, 4, 1, 1, 1, 3, 15, 5, 2, 1, 5, 3, 1, 3,\n 2, 4, 5, 2, 12, 12, 5, 11, 2, 19, 21, 5, 2]\n xl = ['2014-w04', '2014-w05', '2014-w06', '2014-w07', '2014-w08', '2014-w09',\n '2014-w10', '2014-w11',\n '2014-w12', '2014-w13', '2014-w14', '2014-w15', '2014-w16',\n '2014-w17', '2014-w18', '2014-w19', '2014-w20', '2014-w21', '2014-w22', '2014-w23',\n '2014-w24', '2014-w25', '2014-w27',\n '2014-w29', '2014-w30', '2014-w31', '2014-w32', '2014-w34', '2014-w35', '2014-w36',\n '2014-w38', '2014-w39', '2014-w41',\n '2014-w42', '2014-w43', '2014-w44', '2014-w45', '2014-w46', '2014-w47', '2014-w48',\n '2014-w49', '2014-w50', '2014-w51', '2014-w52']\n plt.close('all')\n fig,ax = plt.subplots(nrows=1,ncols=1,figsize=(10,4))\n ax.bar( x,y )\n ax.set_xticklabels( xl )\n ax.grid(True)\n ax.set_title(\"commits\")\n plt.show()\n\n Or c'est cela qu'on veut :\n\n .. plot::\n\n import matplotlib.pyplot as plt\n x = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31,\n 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43]\n y = [1, 3, 10, 6, 3, 5, 3, 6, 4, 2, 3, 2, 11, 10, 4, 5, 2, 5, 4, 1, 1, 1, 3, 15, 5, 2, 1, 5,\n 3, 1, 3, 2, 4, 5, 2, 12, 12, 5, 11, 2, 19, 21, 5, 2]\n xl = ['2014-w04', '2014-w05', '2014-w06', '2014-w07', '2014-w08', '2014-w09',\n '2014-w10', '2014-w11', '2014-w12', '2014-w13', '2014-w14',\n '2014-w15', '2014-w16', '2014-w17', '2014-w18', '2014-w19',\n '2014-w20', '2014-w21', '2014-w22', '2014-w23', '2014-w24', '2014-w25',\n '2014-w27', '2014-w29', '2014-w30', '2014-w31', '2014-w32', '2014-w34',\n '2014-w35', '2014-w36', '2014-w38', '2014-w39', '2014-w41',\n '2014-w42', '2014-w43', '2014-w44', '2014-w45', '2014-w46', '2014-w47',\n '2014-w48', '2014-w49', '2014-w50', '2014-w51', '2014-w52']\n plt.close('all')\n fig,ax = plt.subplots(nrows=1,ncols=1,figsize=(10,4))\n ax.bar( x,y )\n tig = ax.get_xticks()\n labs = [ ]\n for t in tig:\n if t in x: labs.append(xl[x.index(t)])\n else: labs.append(\"\")\n ax.set_xticklabels( labs )\n ax.grid(True)\n ax.set_title(\"commits\")\n plt.show()\n\n Pour cela il faut d'abord utiliser la méthode\n `get_xticks <http://matplotlib.org/api/axes_api.html#matplotlib.axes.Axes.get_xticks>`_\n pour récupérer d'abord les graduations et n'afficher les labels que\n pour celles-ci\n (voir aussi `Custom ticks autoscaled when using imshow?\n <http://stackoverflow.com/questions/13409006/custom-ticks-autoscaled-when-using-imshow>`_).\n Voici un exemple de code ::\n\n import matplotlib.pyplot as plt\n x = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29,\n 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43]\n y = [1, 3, 10, 6, 3, 5, 3, 6, 4, 2, 3, 2, 11, 10, 4, 5, 2, 5, 4, 1, 1, 1, 3, 15, 5, 2, 1, 5, 3, 1, 3, 2,\n 4, 5, 2, 12, 12, 5, 11, 2, 19, 21, 5, 2]\n xl = ['2014-w04', '2014-w05', '2014-w06', '2014-w07', '2014-w08', '2014-w09', '2014-w10', '2014-w11', '2014-w12', '2014-w13',\n '2014-w14', '2014-w15', '2014-w16', '2014-w17', '2014-w18', '2014-w19', '2014-w20', '2014-w21',\n '2014-w22', '2014-w23', '2014-w24', '2014-w25',\n '2014-w27', '2014-w29', '2014-w30', '2014-w31', '2014-w32', '2014-w34', '2014-w35', '2014-w36',\n '2014-w38', '2014-w39', '2014-w41', '2014-w42',\n '2014-w43', '2014-w44', '2014-w45', '2014-w46', '2014-w47', '2014-w48', '2014-w49',\n '2014-w50', '2014-w51', '2014-w52']\n plt.close('all')\n fig,ax = plt.subplots(nrows=1,ncols=1,figsize=(10,4))\n ax.bar( x,y )\n tig = ax.get_xticks()\n labs = [ ]\n for t in tig:\n if t in x:\n labs.append(xl[x.index(t)])\n else:\n # une graduation peut être en dehors des labels proposés\n labs.append(\"\")\n ax.set_xticklabels( labs )\n ax.grid(True)\n ax.set_title(\"commits\")\n plt.show()\n \"\"\"\n import matplotlib.pyplot as plt\n if ax is None:\n _, ax = plt.subplots(nrows=1, ncols=1, figsize=(10, 4))\n\n if barplot:\n if style is None:\n ax.bar(x, y, **kwargs)\n else:\n ax.bar(x, y, style=style, **kwargs)\n else:\n if style is None:\n ax.plot(x, y, **kwargs)\n else:\n ax.plot(x, y, style=style, **kwargs)\n tig = ax.get_xticks()\n xl = labels\n labs = []\n for t in tig:\n if t in x:\n labs.append(xl[x.index(t)])\n else:\n labs.append(\"\")\n ax.set_xticklabels(labs)\n ax.grid(True)\n if title is not None:\n ax.set_title(title)\n return ax\n\n\ndef change_legend_location(ax, new_location=\"lower center\"):\n \"\"\"\n Changes the location of the legend.\n\n @param ax :epkg:`Axes`\n @param new_location new_location, see method :epkg:`legend`\n @return ax\n\n .. faqref::\n :tag: matplotlib\n :title: Comment changer l'emplacement de la légende ?\n\n On cherche ici à changer l'emplacement de la légende alors que celle-ci a déjà été\n définie par ailleurs. C'est pratique lorsque celle-ci cache une partie du graphe\n qu'on veut absolument montrer.\n On ne dispose que de l'objet *ax* de type :epkg:`Axes`.\n On utilise pour cela la méthode :epkg:`legend`\n et le code suivant :\n\n ::\n\n handles, labels = ax.get_legend_handles_labels()\n ax.legend(handles, labels, loc=\"lower center\")\n\n Les différentes options pour le nouvel emplacement sont énoncées\n dans l'aide associée à la méthode :epkg:`legend`.\n \"\"\"\n handles, labels = ax.get_legend_handles_labels()\n ax.legend(handles, labels, loc=new_location)\n return ax\n\n\ndef avoid_overlapping_dates(fig, **options):\n \"\"\"\n Avoids overlapping dates by calling method\n :epkg:`autofmt_xdate`.\n\n .. faqref::\n :tag: matplotlib\n :title: Comment éviter les dates qui se superposent ?\n\n La méthode :epkg:`autofmt_xdate`\n permet d'éviter les problèmes de dates\n qui se superposent.\n\n ::\n\n fig, ax = plt.subplots(...)\n # ...\n fig.autofmt_xdate()\n \"\"\"\n fig.autofmt_xdate(**options)\n\n\ndef graph_cities_default_lands():\n \"\"\"\n Returns the default list of elements which can be added to a map.\n See `Features <https://scitools.org.uk/cartopy/docs/v0.15/matplotlib/feature_interface.html#cartopy.feature.GSHHSFeature>`_.\n\n .. runpython::\n :showcode:\n\n from ensae_teaching_cs.faq.faq_matplotlib import graph_cities_default_lands\n print(graph_cities_default_lands())\n \"\"\"\n return [\"BORDERS\", \"COASTLINE\", \"LAKES\", \"LAND\", \"OCEAN\", \"RIVERS\"]\n\n\ndef graph_cities(df, names=(\"Longitude\", \"Latitude\", \"City\"), ax=None, linked=False,\n fLOG=None, loop=False, many=False,\n draw_coastlines=True, draw_countries=True,\n fill_continents=True, draw_parallels=True,\n draw_meridians=True, draw_map_boundary=True,\n **params):\n \"\"\"\n Plots the cities on a map with :epkg:`cartopy`.\n Only not empty names are displayed on the graph.\n\n @param df dataframe\n @param names names of the column Latitude, Longitude, City\n @param ax existing ax\n @param linked draw lines between points\n @param loop add a final line to link the first point to the final one\n @param fLOG logging function\n @param params see below\n @param many change the return\n @param draw_coastlines draw coast lines\n @param draw_countries draw borders\n @param draw_map_boundary draw boundaries\n @param draw_meridians draw meridians\n @param draw_parallels draw parallels\n @param fill_continents fill continents\n @return *ax* or *fig, ax, m* if *many* is True\n\n Additional parameters:\n\n * projection: see `projections <https://scitools.org.uk/cartopy/docs/v0.15/crs/projections.html>`_,\n only used is *ax* is None\n * bounds: something like ``[lon1, lon2, lat1, lat2]``\n * landscape: a list of strings about what needs to be on the map,\n see @see fn graph_cities_default_lands.\n * style, markersize, fontname, fontcolor, fontsize, fontweight, fontvalign\n\n If the function returns the following error\n ``'AxesSubplot' object has no attribute 'add_feature'``,\n it means no projection was added to the axis.\n The function currently creates the following way:\n\n ::\n\n import cartopy.crs as ccrs\n import matplotlib.pyplot as plt\n projection = params.pop('projection', ccrs.PlateCarree())\n fig = plt.figure(**params)\n ax = fig.add_subplot(1, 1, 1, projection)\n \"\"\"\n bounds = params.pop(\"bounds\", None)\n landscape = params.pop(\"landscape\", graph_cities_default_lands())\n\n style = params.pop('style', 'ro')\n markersize = params.pop('markersize', 6)\n fontname = params.pop('fontname', 'Arial')\n fontsize = str(params.pop('fontsize', '16'))\n fontcolor = params.pop('fontcolor', 'black')\n fontweight = params.pop('fontweight', 'normal')\n fontvalign = params.pop('fontvalign', 'bottom')\n\n xx = list(df[names[0]])\n yy = list(df[names[1]])\n\n if ax is None:\n import cartopy.crs as ccrs\n import matplotlib.pyplot as plt\n projection = params.pop('projection', ccrs.PlateCarree())\n fig = plt.figure(**params)\n ax = fig.add_subplot(1, 1, 1, projection=projection)\n else:\n fig = None\n\n import cartopy.feature as cfeature\n for land in landscape:\n attr = getattr(cfeature, land)\n ax.add_feature(attr)\n\n if linked and \"-\" not in style:\n style += \"-\"\n ax.plot(df[names[0]], df[names[1]], style, markersize=markersize)\n ax.set_title('France')\n\n minx, maxx = min(xx), max(xx)\n miny, maxy = min(yy), max(yy)\n avex, avey = numpy.mean(xx), numpy.mean(yy)\n if fLOG:\n mes = \"[graph_cities] Lon:[{0}, {1}] x Lat:[{2}, {3}] - mean={4}, {5} - linked={6}\"\n fLOG(mes.format(minx, maxx, miny, maxy, avex, avey, linked))\n if bounds:\n dx = (maxx - minx) / 10\n dy = (maxy - miny) / 10\n minx -= dx\n maxx += dx\n miny -= dy\n maxy += dy\n ax.set_extent(bounds)\n else:\n ax.set_extent([minx, maxx, miny, maxy])\n if fLOG:\n fLOG(\"[graph_cities] \", [minx, maxx, miny, maxy])\n\n view = df[list(names)]\n for x, y, t in view.itertuples(index=False):\n if t is None or len(t) == 0:\n continue\n ax.text(x, y, t,\n fontname=fontname, size=fontsize,\n color=fontcolor, weight=fontweight,\n verticalalignment=fontvalign)\n return fig, ax\n",
"\"\"\"\n@brief test log(time=2s)\n\"\"\"\nimport os\nimport unittest\nimport random\nimport pandas\nfrom ensae_teaching_cs.homeblog.table_formula import TableFormula\n\n\nclass TestTableFormula2(unittest.TestCase):\n\n def test_TableFormulaCore_Excel(self):\n fold = os.path.split(__file__)[0]\n\n assert TableFormula.delta is not None\n\n file = os.path.join(fold, \"data\", \"BNP.PA.txt\")\n table = TableFormula(file, sep=\",\")\n table.sort(lambda v: v[\"Date\"])\n assert len(table) > 0\n\n tempfold = os.path.join(fold, \"temp_store\")\n if not os.path.exists(tempfold):\n os.mkdir(tempfold)\n\n tempexc = os.path.join(tempfold, \"temp_excel_table.xls\")\n if os.path.exists(tempexc):\n os.remove(tempexc)\n assert not os.path.exists(tempexc)\n\n table.save_as_excel(tempexc)\n assert os.path.exists(tempexc)\n\n tempexc = os.path.join(tempfold, \"temp_excel_table.xlsx\")\n if os.path.exists(tempexc):\n os.remove(tempexc)\n assert not os.path.exists(tempexc)\n\n table.save_as_excel(tempexc)\n assert os.path.exists(tempexc)\n\n def test_issubclass(self):\n fold = os.path.split(__file__)[0]\n\n tbl = TableFormula\n r = issubclass(tbl, TableFormula)\n assert r\n\n file = os.path.join(fold, \"data\", \"BNP.PA.txt\")\n table = tbl(file, sep=\",\")\n r = issubclass(table.__class__, TableFormula)\n assert r\n\n def test_addc(self):\n values = [random.random() for i in range(0, 100)]\n values = [[x, x + random.random() / 2] for x in values]\n tbl = TableFormula([\"x\", \"y\"], values)\n\n tbl.addc(\"a\", lambda v: 0, 0)\n assert tbl.header == [\"a\", \"x\", \"y\"]\n\n tbl.addc((\"aa\", \"bb\"), [lambda v: 4, lambda v: 5], 0)\n assert tbl.header == [\"aa\", \"bb\", \"a\", \"x\", \"y\"]\n\n tbl.addc((\"aaa\", \"bbb\"), lambda v: (7, 8), 0)\n assert tbl.header == [\"aaa\", \"bbb\", \"aa\", \"bb\", \"a\", \"x\", \"y\"]\n assert tbl[0, 0] == 7\n assert tbl[0, 1] == 8\n\n tbl.addc((\"aaaa\", \"bbba\"), lambda v: (8, 9))\n assert tbl.header == [\"aaa\", \"bbb\", \"aa\",\n \"bb\", \"a\", \"x\", \"y\", \"aaaa\", \"bbba\"]\n assert tbl[0, -2] == 8\n assert tbl[0, -1] == 9\n\n def test_pandas_matrix(self):\n fold = os.path.split(__file__)[0]\n file = os.path.join(fold, \"data\", \"BNP.PA.txt\")\n\n df = pandas.read_csv(file, sep=\",\")\n assert \"Date\" in df.columns # pylint: disable=E1101\n assert \"High\" in df.columns # pylint: disable=E1101\n assert len(df) == 2344\n mat = TableFormula(df)\n assert len(mat) == 2344\n if not isinstance(mat.header, list):\n raise Exception(\"expecting type: \" + str(type(mat.header)))\n assert mat.header == ['index', 'Date', 'Open',\n 'High', 'Low', 'Close', 'Volume', 'Adj Close']\n\n df = pandas.read_csv(file, sep=\",\")\n df.set_index(\"Date\")\n mat = TableFormula(df)\n assert len(mat) == 2344\n assert mat.header == ['index', 'Date', 'Open',\n 'High', 'Low', 'Close', 'Volume', 'Adj Close']\n df = mat.dataframe\n assert len(df) == 2344\n\n def test_pandas_matrix_index(self):\n fold = os.path.split(__file__)[0]\n file = os.path.join(fold, \"data\", \"BNP.PA.txt\")\n df = pandas.read_csv(file, sep=\",\", index_col=[\"Date\"])\n mat = TableFormula(df)\n assert len(mat) == 2344\n assert mat.header == ['index', 'Open', 'High',\n 'Low', 'Close', 'Volume', 'Adj Close']\n\n\nif __name__ == \"__main__\":\n unittest.main()\n"
]
| [
[
"matplotlib.pyplot.subplots",
"numpy.mean",
"matplotlib.pyplot.close",
"matplotlib.pyplot.style.use",
"matplotlib.pyplot.figure"
],
[
"pandas.read_csv"
]
]
|
Neo9061/amazon-sagemaker-examples | [
"da58c2950286a2e40bd53a5d5135b1e23fd79e63"
]
| [
"sagemaker-pipeline-multi-model/sagemaker-pipeline/pipelines/restate/dtree_evaluate.py"
]
| [
"# Copyright 2020 Amazon.com, Inc. or its affiliates. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\"). You\n# may not use this file except in compliance with the License. A copy of\n# the License is located at\n#\n# http://aws.amazon.com/apache2.0/\n#\n# or in the \"license\" file accompanying this file. This file is\n# distributed on an \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF\n# ANY KIND, either express or implied. See the License for the specific\n# language governing permissions and limitations under the License.\n\"\"\"Evaluation script for measuring model accuracy.\"\"\"\n\nimport json\nimport logging\nimport os\nimport pickle\nimport tarfile\n\nimport pandas as pd\nimport numpy\n\nlogger = logging.getLogger()\nlogger.setLevel(logging.INFO)\nlogger.addHandler(logging.StreamHandler())\n\n# May need to import additional metrics depending on what you are measuring.\n# See https://docs.aws.amazon.com/sagemaker/latest/dg/model-monitor-model-quality-metrics.html\nfrom sklearn.metrics import (\n accuracy_score,\n classification_report,\n roc_auc_score,\n mean_squared_error,\n mean_absolute_error,\n r2_score,\n)\n\nif __name__ == \"__main__\":\n\n tar_model_path = \"/opt/ml/processing/model/model.tar.gz\"\n model_path = \"/opt/ml/processing/model/decision-tree-model.pkl\"\n\n with tarfile.open(tar_model_path) as tar:\n tar.extractall(path=\"/opt/ml/processing/model/\")\n\n logger.debug(\"Loading DTree model.\")\n\n model = pickle.load(open(model_path, \"rb\"))\n\n test_path = \"/opt/ml/processing/test/test.csv\"\n\n logger.info(\"Loading test input data\")\n\n df = pd.read_csv(test_path, header=None)\n\n logger.debug(\"Reading test data.\")\n y_test = df.iloc[:, 0].to_numpy()\n df.drop(df.columns[0], axis=1, inplace=True)\n X_test = numpy.array(df.values)\n logger.info(X_test[0])\n\n logger.info(\"Performing predictions against test data.\")\n predictions = model.predict(X_test)\n\n logger.info(\"Creating classification evaluation report\")\n\n mse = mean_squared_error(y_test, predictions)\n r2s = r2_score(y_test, predictions)\n\n report_dict = {\n \"regression_metrics\": {\n \"mse\": {\"value\": mse},\n \"r2s\": {\"value\": r2s},\n },\n }\n\n logger.info(\"Regression report:\\n{}\".format(report_dict))\n\n evaluation_output_path = os.path.join(\"/opt/ml/processing/evaluation\", \"dtree_evaluation.json\")\n logger.info(\"Saving regression report to {}\".format(evaluation_output_path))\n\n with open(evaluation_output_path, \"w\") as f:\n f.write(json.dumps(report_dict))\n"
]
| [
[
"numpy.array",
"pandas.read_csv",
"sklearn.metrics.r2_score",
"sklearn.metrics.mean_squared_error"
]
]
|
o-P-o/disagree | [
"4c7aecdbb6bc38f3a6e81338bc0bd7124f6d10c0"
]
| [
"disagree/metrics.py"
]
| [
"\"\"\"\nSee Jupyter notebooks for example usage\n\"\"\"\n\nimport numpy as np\nimport pandas as pd\nimport itertools\nimport math\nimport sys\n\nfrom collections import Counter\nfrom tqdm import tqdm\nfrom utils import convert_dataframe\n\nfrom scipy.stats import pearsonr, kendalltau, spearmanr\n\n\nDATAFRAME_ERROR = \"Data input must be a pandas DataFrame\"\nANNOTATORS_ERROR = \"Invalid choice of annotators.\\n Possible options: \"\nKRIPP_DATA_TYPE_ERROR = \"\"\"Invalid 'data_type' input.\\n Possible options are\n(nominal, ordinal, interval, ratio)\"\"\"\nMATRIX_INPUT_ERROR = \"\"\"Error: The func argument must take two annotators as\narguments. You may choose joint_probability or cohens_kappa\"\"\"\n\n\ndef main_input_checks(df, labels):\n if not isinstance(df, pd.DataFrame):\n raise TypeError(DATAFRAME_ERROR)\n\n\nclass Metrics():\n def __init__(self, df):\n converted_data = convert_dataframe(df)\n self.df = converted_data[0]\n self.labels = converted_data[1]\n self.data_dict = converted_data[2]\n main_input_checks(self.df, self.labels)\n\n def joint_probability(self, ann1, ann2):\n \"\"\"\n The joint probability of agreement between two annotators.\n The most basic (and least useful) statistic to measure pairwise\n annotator agreement for non-continuous labelling.\n\n Parameters\n ----------\n ann1: string\n Name of one of the annotators\n ann2: string\n Name of another annotator\n\n Returns\n -------\n Probability of the two annotators agreeing across all instances\n \"\"\"\n all_anns = self.df.columns\n if (ann1 not in all_anns or ann2 not in all_anns):\n raise ValueError(ANNOTATORS_ERROR + str(list(all_anns)))\n\n df = self.df.dropna(subset=[ann1, ann2])\n ann1_labels = df[ann1].values.tolist()\n ann2_labels = df[ann2].values.tolist()\n zipped = zip(ann1_labels, ann2_labels)\n agree = [1 if label[0] == label[1] else 0 for label in zipped]\n\n return sum(agree) / len(agree)\n\n def cohens_kappa(self, ann1, ann2):\n \"\"\"\n A statistic to measure pairwise annotator agreement for non-continuous\n labelling.\n\n Parameters\n ----------\n ann1: string\n Name of one of the annotators\n ann2: string\n Name of another annotator\n\n Returns\n -------\n Cohen's kappa statistic between the two annotators\n \"\"\"\n all_anns = self.df.columns\n if (ann1 not in all_anns or ann2 not in all_anns):\n raise ValueError(ANNOTATORS_ERROR + str(list(all_anns)))\n\n df = self.df.dropna(subset=[ann1, ann2])\n ann1_labels = df[ann1].values.tolist()\n ann2_labels = df[ann2].values.tolist()\n num_instances = self.df.shape[0]\n num_categories = len(self.labels)\n\n ann1_num, ann2_num = [], []\n for label in self.labels:\n ann1_counter = Counter(ann1_labels)\n ann2_counter = Counter(ann2_labels)\n ann1_num.append(ann1_counter[label])\n ann2_num.append(ann2_counter[label])\n\n assert len(ann1_num) == len(self.labels)\n assert len(ann1_num) == len(self.labels)\n\n summation = 0\n for i in range(len(ann1_num)):\n summation += (ann1_num[i] * ann2_num[i])\n\n chance_agreement_prob = (1 / num_instances ** 2) * summation\n observed_agreement_prob = self.joint_probability(ann1, ann2)\n\n if chance_agreement_prob == 1:\n return 1.\n\n numerator = observed_agreement_prob - chance_agreement_prob\n denominator = 1. - chance_agreement_prob\n\n return numerator / denominator\n\n def df2table(self, df):\n # fleiss_kappa() helper function\n # Convert df(rows=instances, cols=annotators)\n # to df(rows=instances, cols=labels)\n n = len(self.labels)\n\n df_rows = []\n for idx, row in df.iterrows():\n labels = [0] * n\n for label in row:\n if not math.isnan(label):\n labels[int(label)] += 1\n df_rows.append(labels)\n\n return pd.DataFrame(df_rows, columns=self.labels)\n\n def proportion_label_per_category(self, df):\n # fleiss_kappa() helper function\n # Formula for calculating the proportion of all annotator\n # labels to the j-th category (list of all j)\n num_assignments = list(df.sum(axis=0))\n normaliser = 1. / sum(num_assignments)\n\n return [normaliser * i for i in num_assignments]\n\n def rater_agreement_extent(self, df):\n # fleiss_kappa() helper function\n # Formula for calculating the extent to which annotators\n # agree on instance j (list of all j)\n # Returns 1 for full agreement\n total_labels = list(df.sum(axis=1))\n df2 = df ** 2\n total_labels_squared = list(df2.sum(axis=1))\n v1 = np.array(total_labels_squared)\n v2 = np.array(total_labels)\n summations = list(v1 - v2)\n\n final = []\n for i in range(len(total_labels)):\n try:\n normalise = 1. / (total_labels[i] * (total_labels[i] - 1.))\n except ZeroDivisionError:\n normalise = 0\n final.append(normalise * summations[i])\n\n return final\n\n def fleiss_kappa(self):\n \"\"\"\n A statistic to measure agreement between any number of annotators\n for non-continuous labelling.\n\n Parameters\n ----------\n None\n\n Returns\n -------\n Fleiss' kappa statistic for all the annotators\n \"\"\"\n labels_per_instance = []\n for i, row in self.df.iterrows():\n num_nans = sum(math.isnan(k) for k in row)\n labels_per_instance.append(len(row) - num_nans)\n\n num_instances = self.df.shape[0]\n fleiss_df = self.df2table(self.df)\n prop_labels_per_cat = self.proportion_label_per_category(fleiss_df)\n rater_agreement_extent = self.rater_agreement_extent(fleiss_df)\n\n mean_P = (1 / num_instances) * sum(rater_agreement_extent)\n mean_p = sum([i ** 2 for i in prop_labels_per_cat])\n\n if mean_p == 1:\n return 1.\n\n return (mean_P - mean_p) / (1 - mean_p)\n\n def correlation(self, ann1, ann2, measure=\"pearson\"):\n \"\"\"\n Computes the correlation coefficient as a statistic for\n the agreement between two annotators. This\n method uses the scipy.stats module.\n\n Only appropriate for datasets larger than 500 or so (see scipy\n documentation).\n\n Parameters\n ----------\n ann1: string\n Name of one of the annotators\n ann2: string\n Name of another annotator\n measure: string, (\"kendall\", \"pearson\", \"spearman\")\n Pearson r, or Kendall tau, or Spearman rho statistics\n Pearson: assumes continuously labelled data\n Kendall/Spearman: assumes ordinal data\n\n Returns\n -------\n Tuple, (correlation, p-value)\n \"\"\"\n P = \"pearson\"\n S = \"spearman\"\n K = \"kendall\"\n if not (measure == P or measure == S or measure == K):\n raise ValueError(\"Input measure '\" + str(measure) + \"' is invalid.\\n Possible options: (pearson, kendall, spearman)\")\n\n all_anns = self.df.columns\n if (ann1 not in all_anns or ann2 not in all_anns):\n raise ValueError(ANNOTATORS_ERROR + str(list(all_anns)))\n\n ann1_labels = self.df[ann1].values.tolist()\n ann2_labels = self.df[ann2].values.tolist()\n\n ann1_, ann2_ = [], []\n for i, label in enumerate(ann1_labels):\n ann2_label = ann2_labels[i]\n if (not math.isnan(label) and not math.isnan(ann2_label)):\n ann1_.append(label)\n ann2_.append(ann2_label)\n\n if (len(ann1_) == 0 and len(ann2_) == 0):\n raise ValueError(\"Annotators \" + str(ann1) + \" and \" + str(ann2) + \" have not labelled any of the same instances.\")\n\n if measure == \"pearson\":\n result = pearsonr(ann1_, ann2_)\n return (abs(result[0]), result[1])\n elif measure == \"kendall\":\n result = kendalltau(ann1_, ann2_)\n return (abs(result[0]), result[1])\n elif measure == \"spearman\":\n result = spearmanr(ann1_, ann2_)\n return (abs(result[0]), result[1])\n\n def metric_matrix(self, func):\n all_anns = [ann for ann in self.df.columns]\n matrix = np.zeros((len(all_anns), len(all_anns)))\n\n for i, ann1 in enumerate(all_anns):\n for j, ann2 in enumerate(all_anns):\n try:\n val = func(ann1, ann2)\n matrix[i][j] = float(\"{:.3f}\".format(val))\n except TypeError:\n print(MATRIX_INPUT_ERROR)\n sys.exit(1)\n\n return matrix\n\n def instance_degree(self, labels):\n # bidisagreement_degree() helper function.\n # Computes the degree for a given instance of data, input as a list of annotations\n all_labels = set(labels)\n\n if len(all_labels) != 2:\n return 0\n\n label1 = all_labels[0]\n label2 = all_labels[1]\n\n if labels.count(label1) > labels.count(label2):\n looper = label1\n else:\n looper = label2\n\n new_labels = [1 if i == looper else 0 for i in labels]\n count = sum(new_labels)\n degree = (len(labels) - count) / count\n\n return degree\n\n def bidisagreement_degree(self):\n \"\"\"\n Computes the degree of bidisagreements throughout the dataset.\n This is done by considering each bidisagreement, and assigning\n a value to this based on how stong the bidisagreement is.\n\n Example: For a given instance, if half of the values are different\n then the degree is 1, and if all are the same except for one, then\n the degree will be as close to zero as possible.\n\n Parameters\n ----------\n None\n\n Returns\n -------\n Float\n \"\"\"\n average_degree = 0\n for instance in self.df.itertuples():\n instance = list(instance)\n degree = self.instance_degree(instance)\n average_degree += degree\n\n return average_degree / self.df.shape[0]\n\n\ndef main_count_(num_instances, labels_per_instance, num_anns, df_as_matrix, i, j):\n main_count = 0\n for k in range(num_instances):\n count = 0\n m = labels_per_instance[k]\n if (m == 0 or m == 1):\n continue\n for perm in itertools.permutations(range(num_anns), 2):\n b1 = int((df_as_matrix[perm[0]][k] == i))\n b2 = int((df_as_matrix[perm[1]][k] == j))\n count += (b1 * b2)\n count /= (m - 1)\n main_count += count\n return main_count\n\ndef coincidence_mat(df_as_matrix, labels, num_anns, num_instances, labels_per_instance, use_tqdm):\n # Helper function for metrics.Krippendorff.\n # For technical details on the coincidence matrix, see the\n # Krippendorff's alpha Wikipedia page.\n coincidence_mat = np.zeros((len(labels), len(labels)))\n\n if use_tqdm:\n loop_object = tqdm(labels)\n else:\n loop_object = labels\n\n for i, label in enumerate(loop_object):\n for j in range(len(labels)):\n main_count = main_count_(num_instances, labels_per_instance, num_anns, df_as_matrix, i, j)\n coincidence_mat[i][j] = main_count\n\n return coincidence_mat\n\n\nclass Krippendorff():\n \"\"\"\n Class for computing Krippendorff's alpha statistic between annotations\n agreements.\n\n Parameters\n ----------\n df: pandas DataFrame\n rows are data instances, columns are annotator labels\n labels: list\n list of possible labels from 0 up\n\n Initialised\n -----------\n num_anns: float\n number of annotators in the data\n num_instances: float\n number of instances of labelled data\n A: numpy array\n matrix version of the dataframe transposed\n labels_per_instance: list\n list of len(num_instances)\n Each element is the number of times that instance was labelled\n coincidence_matrix: numpy array\n matrix computed in coincidence_mat()\n coincidence_matrix_sum: 1D numpy array\n sum of rows/columns in coincidence_matrix\n \"\"\"\n def __init__(self, df, use_tqdm=False):\n converted_data = convert_dataframe(df)\n self.df = converted_data[0]\n self.labels = converted_data[1]\n self.data_dict = converted_data[2]\n\n #main_input_checks(df, labels)\n\n #self.df = df\n #self.labels = labels\n self.num_anns = self.df.shape[1]\n self.num_instances = self.df.shape[0]\n self.A = self.df.values\n self.A = self.A.transpose()\n self.use_tqdm = use_tqdm\n\n self.labels_per_instance = []\n for i, row in self.df.iterrows():\n self.labels_per_instance.append(len(row) - sum(math.isnan(k) for k in row))\n\n self.coincidence_matrix = coincidence_mat(self.A, self.labels, self.num_anns, self.num_instances, self.labels_per_instance, self.use_tqdm)\n self.coincidence_matrix_sum = np.sum(self.coincidence_matrix, axis=0)\n\n def delta_nominal(self, v1, v2):\n if v1 == v2:\n return 0\n else:\n return 1\n\n def delta_ordinal(self, v1, v2):\n v1, v2 = int(v1), int(v2)\n\n val = 0\n for g in range(v1, v2 + 1):\n element1 = self.coincidence_matrix_sum[g]\n val += element1\n\n element2 = (self.coincidence_matrix_sum[v1] + self.coincidence_matrix_sum[v2]) / 2.\n val = val - element2\n\n return val ** 2\n\n def delta_interval(self, v1, v2):\n v1, v2 = float(v1), float(v2)\n return (v1 - v2) ** 2\n\n def delta_ratio(self, v1, v2):\n v1, v2 = float(v1), float(v2)\n return ((v1 - v2) / (v1 + v2)) ** 2\n\n def disagreement(self, obs_or_exp, data_type):\n if obs_or_exp == \"expected\":\n n = self.coincidence_matrix_sum\n n_total = sum(n)\n coeff = 1 / (n_total - 1)\n else:\n coeff = 1\n\n result = 0\n for v1 in range(1, len(self.labels)):\n for v2 in range(v1):\n if data_type == \"nominal\":\n delta = self.delta_nominal(str(v1), str(v2))\n elif data_type == \"ordinal\":\n delta = self.delta_ordinal(str(v1), str(v2))\n elif data_type == \"interval\":\n delta = self.delta_interval(str(v1), str(v2))\n elif data_type == \"ratio\":\n delta = self.delta_ratio(str(v1), str(v2))\n\n if obs_or_exp == \"observed\":\n result += (self.coincidence_matrix[v1][v2] * delta)\n else:\n result += (n[v1] * n[v2] * delta)\n\n return coeff * result\n\n def alpha(self, data_type=\"nominal\"):\n \"\"\"\n Attribute used to produce Krippendorff's alpha\n\n Parameters\n ----------\n data_type: str, (\"nominal\", \"ordinal\", \"interval\", \"ratio\")\n\n Returns\n -------\n Krippendorff's alpha: float\n \"\"\"\n if not (data_type == \"nominal\" or data_type == \"ordinal\" or data_type == \"interval\" or data_type == \"ratio\"):\n raise ValueError(KRIPP_DATA_TYPE_ERROR)\n\n observed_disagreement = self.disagreement(obs_or_exp=\"observed\",\n data_type=data_type)\n expected_disagreement = self.disagreement(obs_or_exp=\"expected\",\n data_type=data_type)\n\n if expected_disagreement == 0:\n return 1.\n\n return 1 - (observed_disagreement / expected_disagreement)\n"
]
| [
[
"scipy.stats.pearsonr",
"pandas.DataFrame",
"scipy.stats.kendalltau",
"scipy.stats.spearmanr",
"numpy.array",
"numpy.sum"
]
]
|
x6rulin/pywonderland | [
"19a8f4503584ba77d60e381e2d14bd3683d9693f"
]
| [
"src/mobius/mobius/utils.py"
]
| [
"import numpy as np\n\n\ninfty = 1e10\nepsilon = 1e-10\n\n\ndef greater_than(x, y):\n return x > y + epsilon\n\n\ndef less_than(x, y):\n return x < y - epsilon\n\n\ndef equal(x, y):\n return x - epsilon <= y <= x + epsilon\n\n\ndef iszero(x):\n return equal(x, 0)\n\n\ndef nonzero(x):\n return not iszero(x)\n\n\ndef isinf(x):\n return abs(x) > infty\n\n\ndef safe_div(x, y):\n if isinf(y):\n return 0j\n\n if iszero(y):\n return complex(infty)\n\n return x / y\n\n\ndef norm2(z):\n return z.real * z.real + z.imag * z.imag\n\n\ndef dist_poincare_to_euclidean(x):\n return np.tanh(0.5 * x)\n\n\ndef dist_uhs_to_euclidean(x):\n return np.exp(x)\n\n\ndef angle_twopi(z):\n arg = np.angle(z)\n if arg < 0:\n arg += 2 * np.pi\n return arg\n"
]
| [
[
"numpy.exp",
"numpy.angle",
"numpy.tanh"
]
]
|
myurasov/PCLSegmentation | [
"fef998fb067a10c77a5c233d05635f20c7510972"
]
| [
"pcl_segmentation/configs/SqueezeSegV2_256x256.py"
]
| [
"# ==============================================================================\n# MIT License\n#\n# Copyright 2021 Institute for Automotive Engineering of RWTH Aachen University.\n#\n# Permission is hereby granted, free of charge, to any person obtaining a copy\n# of this software and associated documentation files (the \"Software\"), to deal\n# in the Software without restriction, including without limitation the rights\n# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell\n# copies of the Software, and to permit persons to whom the Software is\n# furnished to do so, subject to the following conditions:\n#\n# The above copyright notice and this permission notice shall be included in all\n# copies or substantial portions of the Software.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\n# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\n# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\n# SOFTWARE.\n# ==============================================================================\n\n\"\"\"Model configuration for SqueezeSeg\"\"\"\nimport numpy as np\nfrom easydict import EasyDict\n\n\ndef SqueezeSegV2Config():\n mc = EasyDict()\n\n mc.CLASSES = [\n \"Road\",\n \"Sidewalk\",\n \"Building\",\n \"Pole\",\n \"Vegetation\",\n \"Person\",\n \"TwoWheeler\",\n \"Car\",\n \"Truck\",\n \"Bus\",\n \"None\",\n ]\n mc.NUM_CLASS = len(mc.CLASSES)\n mc.CLS_2_ID = dict(zip(mc.CLASSES, range(len(mc.CLASSES))))\n mc.CLS_LOSS_WEIGHT = np.array(\n [1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0]\n )\n mc.CLS_COLOR_MAP = (\n np.array(\n [\n [128, 64, 128], # Road\n [244, 35, 232], # Sidewalk\n [70, 70, 70], # Building\n [153, 153, 153], # Pole\n [107, 142, 35], # Vegetation\n [220, 20, 60], # Person\n [255, 0, 0], # Two Wheeler\n [0, 0, 142], # Car\n [0, 0, 70], # Truck\n [0, 60, 100], # Bus\n [0, 0, 0], # None\n ]\n )\n / 255.0\n )\n\n # Input Shape\n mc.BATCH_SIZE = 8\n mc.AZIMUTH_LEVEL = 256\n mc.ZENITH_LEVEL = 256\n mc.NUM_FEATURES = 6\n\n # Loss\n mc.FOCAL_GAMMA = 2.0\n mc.CLS_LOSS_COEF = 15.0\n mc.DENOM_EPSILON = 1e-12 # small value used in denominator to prevent division by 0\n\n # Gradient Decent\n mc.LEARNING_RATE = 0.05\n mc.LR_DECAY_STEPS = 500\n mc.LR_DECAY_FACTOR = 0.9\n mc.MAX_GRAD_NORM = 100.0\n\n # Network\n mc.L2_WEIGHT_DECAY = 0.05\n mc.DROP_RATE = 0.1\n mc.BN_MOMENTUM = 0.9\n mc.REDUCTION = 16\n\n # Dataset\n mc.DATA_AUGMENTATION = True\n mc.RANDOM_FLIPPING = True\n mc.RANDOM_SHIFT = True\n\n # x, y, z, intensity, distance\n mc.INPUT_MEAN = np.array(\n [\n [\n [\n 0.04859793832012025,\n 0.38573448108628,\n 0.6072700274926605,\n 5.371398372516546,\n 9.203478223473573,\n ]\n ]\n ]\n )\n mc.INPUT_STD = np.array(\n [\n [\n [\n 16.077856279908826,\n 17.391639193541824,\n 2.021736786407958,\n 13.34776238781367,\n 21.928746295455827,\n ]\n ]\n ]\n )\n\n return mc\n"
]
| [
[
"numpy.array"
]
]
|
YuxiaoLiu/distribution_system_identification_theoretical_limit | [
"388db842a800b21fb75494f6547fa0940e094288"
]
| [
"plot_bound_test_external_case.py"
]
| [
"# This script do the boxplot\nimport matplotlib.pyplot as plt\nimport pandas\nfrom pandas import DataFrame\nimport numpy as np\n# Set the default colors\nimport brewer2mpl\nimport matplotlib as mpl\n#http://colorbrewer2.org/#type=qualitative&scheme=Set1&n=5\n# colors = ['empirical','model','sample','bound','test']\n# colors = ['#fdae61','#ffffbf','#abd9e9','#2c7bb6','#d7191c']\n# colors = ['#fdae61','#ffffbf','#abdda4','#2b83ba','#d7191c']\ncolors = ['#e41a1c','#377eb8','#4daf4a','#984ea3','#ff7f00']\nlw = 1.5 # the line width of the dashed lines\ns = 20 # the size of dot\nwidthBox = 0.1 # the width of the box\n\nlenTotal = 25 # the length of total position\nlenOne = 0.15 # the length of one postion\n\naddress = './plot/case33box.xlsx'\n\n# err_gbCPS = pandas.read_excel(address, sheet_name='gbErrCPS', header=None)\n# err_gbFirst = pandas.read_excel(address, sheet_name='gbErrFirst', header=None)\n# err_gbSecond = pandas.read_excel(address, sheet_name='gbErrSecond', header=None)\n# bound_gb = pandas.read_excel(address, sheet_name='gbBound', header=None)\n\nerr_gCPS = pandas.read_excel(address, sheet_name='gErrCPS', header=None)\nerr_bCPS = pandas.read_excel(address, sheet_name='bErrCPS', header=None)\nerr_gFirst = pandas.read_excel(address, sheet_name='gErrFirst', header=None)\nerr_bFirst = pandas.read_excel(address, sheet_name='bErrFirst', header=None)\nerr_gSecond = pandas.read_excel(address, sheet_name='gErrSecond', header=None)\nerr_bSecond = pandas.read_excel(address, sheet_name='bErrSecond', header=None)\n\nbound_g = pandas.read_excel(address, sheet_name='gBound', header=None)\nbound_b = pandas.read_excel(address, sheet_name='bBound', header=None)\n\n# val_gEvalCPS = pandas.read_excel(address, sheet_name='gEvalCPS', header=None)\n# val_bEvalCPS = pandas.read_excel(address, sheet_name='bEvalCPS', header=None)\n# val_gEvalFirst = pandas.read_excel(address, sheet_name='gEvalFirst', header=None)\n# val_bEvalFirst = pandas.read_excel(address, sheet_name='bEvalFirst', header=None)\n# val_gEvalSecond = pandas.read_excel(address, sheet_name='gEvalSecond', header=None)\n# val_bEValSecond = pandas.read_excel(address, sheet_name='bEvalSecond', header=None)\n\n# val_gReal = pandas.read_excel(address, sheet_name='gReal', header=None)\n# val_bReal = pandas.read_excel(address, sheet_name='bReal', header=None)\n\nnumBus = err_gCPS.shape[0]\n\n# pos_err_gbCPS = np.linspace(1, lenTotal, numBus)\n# pos_err_gbFirst = np.linspace(1+lenOne, lenTotal+lenOne, numBus)\n# pos_err_gbSecond = np.linspace(1+2*lenOne, lenTotal+2*lenOne, numBus)\n# pos_bound_gb = np.linspace(1+lenOne*3,lenTotal+lenOne*3,numBus)\n\npos_err_gFirst = np.linspace(1, lenTotal, numBus)\npos_err_gSecond = np.linspace(1+lenOne, lenTotal+lenOne, numBus)\npos_err_gCPS = np.linspace(1+lenOne*2, lenTotal+lenOne*2, numBus)\npos_bound_g = np.linspace(1+lenOne*3, lenTotal+lenOne*3, numBus)\npos_labels = np.linspace(1+lenOne*1.5, lenTotal+lenOne*1.5, numBus)\n\nfig = plt.figure(figsize = (lenTotal,5))\nax1 = fig.add_subplot(111)\n\n# c = colors[0]\n# ax1.boxplot(err_gFirst,positions=pos_err_gFirst,widths=widthBox,\n# boxprops=dict(color=c),capprops=dict(color=c),whiskerprops=dict(color=c),\n# flierprops=dict(color=c, markeredgecolor=c, markersize=s/6),medianprops=dict(color=c))\n\n# c = colors[1]\n# ax1.boxplot(err_gSecond,positions=pos_err_gSecond,widths=widthBox,\n# boxprops=dict(color=c),capprops=dict(color=c),whiskerprops=dict(color=c),\n# flierprops=dict(color=c, markeredgecolor=c, markersize=s/6),medianprops=dict(color=c))\n\n# c = colors[2]\n# ax1.boxplot(err_gCPS,positions=pos_err_gCPS,widths=widthBox,\n# boxprops=dict(color=c),capprops=dict(color=c),whiskerprops=dict(color=c),\n# flierprops=dict(color=c, markeredgecolor=c, markersize=s/6),medianprops=dict(color=c))\n\n# for i in range(numBus):\n# plt.vlines(x=pos_bound_g[i], ymin=0, ymax=bound_g[0][i], lw=lw, colors=colors[3], linestyles = \"dashed\")\n# plt.scatter(x=pos_bound_g[i], y=bound_g[0][i], color=colors[3], s=s)\n\nc = colors[0]\nax1.boxplot(err_bFirst,positions=pos_err_gFirst,widths=widthBox,\n boxprops=dict(color=c),capprops=dict(color=c),whiskerprops=dict(color=c),\n flierprops=dict(color=c, markeredgecolor=c, markersize=s/6),medianprops=dict(color=c))\n\nc = colors[1]\nax1.boxplot(err_bSecond,positions=pos_err_gSecond,widths=widthBox,\n boxprops=dict(color=c),capprops=dict(color=c),whiskerprops=dict(color=c),\n flierprops=dict(color=c, markeredgecolor=c, markersize=s/6),medianprops=dict(color=c))\n\nc = colors[2]\nax1.boxplot(err_bCPS,positions=pos_err_gCPS,widths=widthBox,\n boxprops=dict(color=c),capprops=dict(color=c),whiskerprops=dict(color=c),\n flierprops=dict(color=c, markeredgecolor=c, markersize=s/6),medianprops=dict(color=c))\n\nfor i in range(numBus):\n plt.vlines(x=pos_bound_g[i], ymin=0, ymax=bound_b[0][i], lw=lw, colors=colors[3], linestyles = \"dashed\")\n plt.scatter(x=pos_bound_g[i], y=bound_b[0][i], color=colors[3], s=s)\n\n\n# draw temporary red and blue lines and use them to create a legend\nh0, = plt.plot([0.25,0.1],color=colors[0])\nh1, = plt.plot([0.25,0.1],color=colors[1])\nh2, = plt.plot([0.25,0.1],color=colors[2])\nh3, = plt.plot([0.25,0.1],color=colors[3],linestyle = \"dashed\")\nplt.legend((h0, h1, h2, h3),('First-order', 'Second-order', 'CPS', 'Bound'),loc='lower right')\nh0.set_visible(False)\nh1.set_visible(False)\nh2.set_visible(False)\nh3.set_visible(False)\n\n# plt.ylabel('Estimation Error of Line Conductance(p.u.)')\nplt.ylabel('Estimation Error of Line Susceptance(p.u.)')\n# plt.xlabel('Line Numbers')\n\nname_labels = [str(i+1) for i in range(numBus)]\nax1.set_xticklabels(name_labels)\nax1.set_xticks(pos_labels)\n\nplt.yscale('log')\nplt.show()"
]
| [
[
"matplotlib.pyplot.legend",
"pandas.read_excel",
"numpy.linspace",
"matplotlib.pyplot.scatter",
"matplotlib.pyplot.yscale",
"matplotlib.pyplot.plot",
"matplotlib.pyplot.ylabel",
"matplotlib.pyplot.vlines",
"matplotlib.pyplot.show",
"matplotlib.pyplot.figure"
]
]
|
markomanninen/YatzyPy | [
"a6904b22473ae909f588e3b82a67b8b4f2dce0f2"
]
| [
"YatzyPy/AlanStrategy.py"
]
| [
"# AlanStrategy.py\nfrom . data import file as csvfile, categories, targets, functions, scoring, order\nimport pandas as pd\nfrom . main import Strategy\n\nprobabilities = None\n\ndef process_category(x):\n\treturn int(x)\n\ndef process_hand(x):\n\treturn tuple(map(int, x.replace('[', '').replace(']', '').replace(' ', '').split(',')))\n\ndef process_hold(x):\n\tx = x.replace('(', '').replace(',)', '').replace(')', '').replace(' ', '')\n\tif x:\n\t\treturn tuple(map(int, x.split(',')))\n\treturn ()\n\ndef process_probability(x):\n\t# upper categories are 1,6,36 because at least 3 same numbers is required to get the upper bonus\n\t# yet probability is calculated by getting 5 same numbers!\n if x['category'] in [6,7,8,9,10,11] and len(x['hold']) > 2:\n return x['probability'] * 36\n # yatzies are 36 to make it sure it competes with upper categories maximum ie 36\n if x['category'] == 1 and x['probability'] == 1:\n return x['probability'] * 36\n return x['probability']\n\ndef read_probabilities(file=None):\n\tglobal probabilities\n\tfile = file if file else csvfile\n\ttry:\n\t\tdf = pd.read_csv(file, sep='\\t')\n\t\ttry:\n\t\t\tdel df['Unnamed: 0']\n\t\texcept:\n\t\t\tpass\n\t\tdf['category'] = df['category'].apply(process_category)\n\t\tdf['hand'] = df['hand'].apply(process_hand)\n\t\tdf['hold'] = df['hold'].apply(process_hold)\n\t\tdf['probability'] = df.apply(process_probability, axis=1)\n\t\tprobabilities = df\n\texcept:\n\t\tprint('Cannot read probabilities file: %s. Make sure you have created it by running setup.save_probabilities() function.' % file)\n\t\tprobabilities = pd.DataFrame([], columns=['hand', 'hold', 'probability', 'category'])\n\ndef get_probability_and_hold(hand, category):\n\tresult = probabilities[probabilities['hand'] == tuple(hand)]\n\tresult = result[result['category'] == category]\n\tif len(result):\n\t\treturn float(result['probability']), tuple(result['hold'])[0]\n\treturn 0, ()\n\ndef get_probabilities(hand, probability_functions):\n\t# numbers in descending order, important to get the smallest biggest probability\n\thand.sort(reverse=True)\n\t# result set\n\tresult = {}\n\t# dictionary of categories that we should find out the probability\n\tfor key, function in probability_functions.items():\n\t\tp, hold = get_probability_and_hold(hand, key)\n\t\tresult[key] = [hand, hold, p]\n\t# return final results\n\treturn result\n\ndef dataframe_probabilities(P):\n\tdf = pd.DataFrame(P).T\n\tdf.rename(columns={\n\t\t\t\t0: 'Hand',\n\t\t\t\t1: 'Hold', \n\t\t\t\t2: 'Probability'}, inplace=True)\n\tdf['Category'] = df.apply(lambda row: categories[row.name], axis=1)\n\tdf['Aim_Score'] = df.apply(lambda row: targets[row.name], axis=1)\n\tdf['Cur_Score'] = df.apply(lambda row: scoring[row.name](row['Hand']), axis=1)\n\tdf['Threshold'] = df['Cur_Score'] >= df['Aim_Score']\n\tdf['Order'] = [order[k] for k,v in P.items()]\n\treturn df.sort_values(by=['Threshold', 'Probability', 'Order'], ascending=[0, 0, 1])\n\n\n#dice = {1: '⚀', 2: '⚁', 3: '⚂', 4: '⚃', 5: '⚄', 6: '⚅'}\n\ndef toDices(row):\n\treturn '<div class=\"cats\">%s</div>' % (''.join(['<div class=\"cat%s%s\"></div>' % \\\n\t\t\t(k, ' hold' if i in row['Hold'] else '') for i, k in enumerate(row['Hand'])]))\n\nclass AlanStrategy(Strategy):\n\t\"\"\" use probability table to maximize scores \"\"\"\n\t\n\tdef hold(self):\n\t\t\"\"\" ... \"\"\"\n\t\ty = self.yatzy\n\t\t# select only categories, that are not used yet\n\t\tp = {k:v for k,v in functions.items() if k not in y.scores}\n\t\t#hand = [i for i in y.hand]\n\t\thand = list(y.hand)\n\t\tdf = dataframe_probabilities(get_probabilities(hand, p))\n\t\tif 'dfs' not in self.__dict__:\n\t\t\tself.dfs = []\n\t\tself.dfs.append(df)\n\t\ty.hand = df['Hand'].iloc[0]\n\t\tif y.debug:\n\t\t\tprint ('#%s' % y.throws, 'hand', hand, 'hold', df['Hold'].iloc[0], 'Target category:', categories[df.iloc[0].name].upper())\n\t\treturn df['Hold'].iloc[0]\n\t\n\tdef select(self):\n\t\ty = self.yatzy\n\t\tp = {k:v for k,v in functions.items() if k not in y.scores}\n\t\thand = list(y.hand)\n\t\tdf = dataframe_probabilities(get_probabilities(hand, p))\n\t\tself.dfs.append(df)\n\t\tcategory = df.iloc[0].name\n\t\tif y.debug:\n\t\t\tprint ('#%s' % y.throws, 'hand', hand, 'Selected category:', categories[category].upper(), 'Score:', y.getScoreTable()[category])\n\t\t\tprint()\n\t\tif 'dfs' not in y.__dict__:\n\t\t\ty.dfs = {}\n\t\ty.dfs[category] = {'order': len(y.dfs), 'dfs': self.dfs}\n\t\tself.dfs = []\n\t\t# if upper is not ready, then do not select one with upper key!\n\t\treturn category\n\n#res=yatzy.dfs\ndef get_score_table(res):\n\td = pd.concat([x[:1] for x in res])\n\td[''] = d.apply(toDices, axis=1)\n\tdel d['Aim_Score']\n\tdel d['Threshold']\n\tdel d['Order']\n\treturn d.to_html(escape=False)\n\nread_probabilities()\n"
]
| [
[
"pandas.concat",
"pandas.read_csv",
"pandas.DataFrame"
]
]
|
fricklerhandwerk/diffpriv | [
"82e364816d9801aeea71ceadd286471f52238e82"
]
| [
"gui_svt.py"
]
| [
"import os\nimport random\nimport wx\nimport wx.lib.agw.floatspin as fs\nfrom wx.lib.intctrl import IntCtrl\n\nimport matplotlib\nmatplotlib.use('WXAgg')\nfrom matplotlib.backends.backend_wxagg import FigureCanvasWxAgg as FigCanvas\nfrom matplotlib.figure import Figure\nfrom matplotlib.ticker import MaxNLocator\n\nfrom collections import Counter\nfrom math import log\nimport numpy as np\nfrom numpy import product\nfrom scipy.integrate import quad\n\nfrom algorithms import *\nfrom accuracy import accuracy_overestimate\nfrom accuracy import probability_overestimate\nfrom accuracy import probability_baseline\nfrom accuracy import probability_optimized\nfrom accuracy import probability_precise\nfrom experiments import precise as probability_data\nfrom experiments import compute_alphas\n\n\nclass Model(object):\n def __init__(\n self, threshold, e1, e2, sensitivity=1, monotonic=True, compute=False,\n length=5, shift=1):\n\n self.threshold = threshold\n self.epsilon1 = e1\n self.epsilon2 = e2\n self.sensitivity = sensitivity\n self.monotonic = monotonic\n self.compute = compute\n\n self.length = length\n self.shift = shift\n self.maxint = 2*threshold\n\n self.response = self.random_response()\n self.queries = self.random_queries()\n self.shift_vector = self.new_shift_vector()\n self.count = self.get_count()\n\n \"\"\"probability of getting `response`, given `queries` and `threshold`\"\"\"\n self.pr_response = 1\n \"\"\"probability of getting `response`, given `queries` + `shift_vector` and `threshold\"\"\"\n self.pr_shifted = 1\n \"\"\"probability of getting a correct response,\n given `queries` and `threshold`\"\"\"\n self.pr_correct = 1\n \"\"\"probability of getting an alpha-accurate response,\n given `queries` and `threshold`\"\"\"\n self.pr_accurate = 1\n \"\"\"probabilities of each response item with respect to queries and threshold\"\"\"\n self.pr_items = []\n\n\n def random_response(self):\n # prevent responses with zero count\n while True:\n response = [self.randbool() for _ in range(self.length)]\n if any(response):\n break\n return response\n\n def random_queries(self):\n return [self.randint() for _ in range(self.length)]\n\n def new_shift_vector(self):\n return [self.shift] * self.length\n\n def set_random_response(self):\n self.response = self.random_response()\n\n def set_random_queries(self):\n self.queries = self.random_queries()\n\n def set_shift_vector(self, value):\n self.shift = value\n self.shift_vector = self.new_shift_vector()\n\n def randbool(self):\n return random.choice([True, False])\n\n def randint(self):\n return random.randint(0, self.maxint)\n\n def push(self):\n self.response.append(self.randbool())\n self.queries.append(self.randint())\n self.shift_vector.append(self.shift)\n\n def pop(self):\n if self.length > 1:\n self.response.pop()\n self.queries.pop()\n self.shift_vector.pop()\n return True\n else:\n return False\n\n def update(self):\n # this does long computation once\n self.update_length()\n self.pr_response = self.get_probability(self.response, self.queries)\n self.pr_shifted = self.get_probability(self.response, self.shifted_queries)\n self.pr_correct = self.get_probability(self.correct_response, self.queries)\n self.pr_items = self.get_pr_items(self.response, self.queries)\n self.pr_shifted_items = self.get_pr_items(self.response, self.shifted_queries)\n\n def update_length(self):\n self.length = len(self.response)\n assert len(self.queries) == self.length\n assert len(self.shift_vector) == self.length\n\n def get_count(self):\n return len([x for x in self.response if x])\n\n def get_probability(self, response, queries):\n\n def pred(x):\n return product([self.pr_single_response(r, q, x)\n for (r, q) in zip(response, queries)])\n\n def state(x):\n return self.threshold_dist(x) * pred(x)\n\n error = 1/1e12\n T_bound = self.threshold_scale * log(1/error)\n\n return quad(state, self.threshold-T_bound, self.threshold+T_bound, points=[self.threshold])[0]\n\n def pr_single_response(self, is_above, query, threshold):\n \"\"\"Pr(query => is_above | threshold_value )\"\"\"\n pr_above = 1 - self.query_dist(query).cdf(threshold)\n if is_above:\n return pr_above\n else:\n return 1 - pr_above\n\n @property\n def pr_diff(self):\n \"\"\"differential probability of original and shifted query vector\"\"\"\n return abs(log(self.pr_response/self.pr_shifted))\n\n\n @property\n def alphas(self):\n c = self.count\n T = self. threshold\n k = self.length\n counts = self.counts\n return compute_alphas(c, T, k, counts)\n\n @property\n def counts(self):\n return dict(Counter(self.queries))\n\n @property\n def correct_response(self):\n return [q >= self.threshold for q in self.queries]\n\n @property\n def shifted_queries(self):\n return [a + b for (a, b) in zip(self.queries, self.shift_vector)]\n\n @property\n def threshold_dist(self):\n return Laplace(self.threshold_scale, loc=self.threshold)\n\n\n def query_dist(self, value):\n return Laplace(self.query_scale, loc=value)\n\n @property\n def threshold_scale(self):\n return self.sensitivity / self.epsilon1\n\n @property\n def query_scale(self):\n return (self.factor*self.count*self.sensitivity) / self.epsilon2\n\n @property\n def factor(self):\n return 1 if self.monotonic else 2\n\n def get_pr_items(self, response, queries):\n items = zip(response, queries)\n return [self.pr_single_item(r, q) for (r, q) in items]\n\n def pr_single_item(self, is_above, query):\n pr_above = self.query_dist(query).larger(self.threshold_dist)\n if is_above:\n return pr_above\n else:\n return 1 - pr_above\n\n\nclass StaticBox(wx.StaticBox):\n def SetSizer(self, sizer):\n super().SetSizer(sizer)\n # the label's height is always included in the total size, so compensate\n _, label_height = self.GetSize()\n self.SetMinSize(sizer.GetMinSize() + (0, label_height))\n\n\nclass LineGraph(wx.Panel):\n def __init__(self, parent, model, lower=0, upper=100, step=1):\n super().__init__(parent)\n self.figure = Figure(figsize=(5,3))\n self.canvas = FigCanvas(self, wx.ID_ANY, self.figure)\n self.axes = self.figure.add_subplot(1, 1, 1)\n self.model = model\n\n self.lower = wx.SpinCtrl(\n self, style=wx.TE_PROCESS_ENTER | wx.ALIGN_RIGHT, size=(60, -1),\n min=-1000, max=1000, initial=lower)\n self.upper = wx.SpinCtrl(\n self, style=wx.TE_PROCESS_ENTER | wx.ALIGN_RIGHT, size=(60, -1),\n min=-1000, max=1000, initial=upper)\n self.step = wx.SpinCtrl(\n self, style=wx.TE_PROCESS_ENTER | wx.ALIGN_RIGHT, size=(60, -1),\n min=1, max=2048, initial=step)\n\n self.sizer = self.create_sizer()\n\n def plot(self):\n raise NotImplementedError\n\n @property\n def abscissa(self):\n return np.arange(self.lower.GetValue(), self.upper.GetValue(), self.step.GetValue())\n\n def create_sizer(self):\n vbox = wx.BoxSizer(wx.VERTICAL)\n vbox.Add(self.canvas, proportion=1, flag=wx.LEFT | wx.TOP | wx.EXPAND)\n bounds = wx.BoxSizer(wx.HORIZONTAL)\n bounds.Add(wx.StaticText(self, label=\"Lower bound\"))\n bounds.Add(self.lower)\n bounds.AddStretchSpacer()\n bounds.Add(wx.StaticText(self, label=\"Step\"))\n bounds.Add(self.step)\n bounds.AddStretchSpacer()\n bounds.Add(wx.StaticText(self, label=\"Upper bound\"))\n bounds.Add(self.upper)\n vbox.Add(bounds, proportion=0, flag=wx.ALL | wx.EXPAND, border=10)\n\n for widget in (self.lower, self.upper, self.step):\n self.Bind(wx.EVT_SPINCTRL, self.plot, widget)\n self.Bind(wx.EVT_TEXT_ENTER, on_spin_enter, widget)\n\n self.SetSizer(vbox)\n return vbox\n\n\nclass BarGraph(wx.Panel):\n def __init__(self, parent, model):\n super().__init__(parent)\n self.figure = Figure(figsize=(5,2))\n self.axes = self.figure.add_subplot(1, 1, 1)\n self.canvas = FigCanvas(self, wx.ID_ANY, self.figure)\n self.model = model\n\n def plot(self):\n raise NotImplementedError\n\n\nclass Probabilities(BarGraph):\n def plot(self, event):\n ax = self.axes\n ax.clear()\n\n xs = np.arange(self.model.length)\n ys = self.model.pr_items\n zs = self.model.pr_shifted_items\n for x, y, z in zip(xs, ys, zs):\n if y > z:\n original = ax.bar(x, y, color=\"blue\")\n shifted = ax.bar(x, z, color=\"red\")\n else:\n shifted = ax.bar(x, z, color=\"red\")\n original = ax.bar(x, y, color=\"blue\")\n\n ax.xaxis.set_major_locator(MaxNLocator(integer=True))\n ax.set_ylim(0,1)\n ax.legend((original[0], shifted[0]), (\"original\", \"shifted\"), loc='upper right')\n self.figure.suptitle(\"Probabilities of individual responses\")\n self.canvas.draw()\n\n\nclass Accuracy(LineGraph):\n @property\n def abscissa(self):\n s1 = self.model.threshold_scale\n s2 = self.model.query_scale\n k = self.model.length\n\n MAX = int(accuracy_overestimate(0.01, k, s1, s2))\n self.upper.SetValue(MAX)\n\n return super().abscissa\n\n def plot(self, event):\n ax = self.axes\n ax.clear()\n\n T = self.model.threshold\n k = self.model.length\n s1 = self.model.threshold_scale\n s2 = self.model.query_scale\n\n xs = self.abscissa\n ax.plot(xs, [probability_overestimate(x, k, s1, s2) for x in xs], color=\"red\", linewidth=2.0, label=\"overestimate\")\n ax.plot(xs, [probability_baseline(x, k, s1, s2) for x in xs], color=\"green\", linewidth=2.0, label=\"baseline\")\n ax.plot(xs, [probability_optimized(x, k, s1, s2) for x in xs], color=\"blue\", linewidth=2.0, label=\"optimized\")\n if self.model.compute:\n ax.plot(xs, [probability_precise(x, k, s1, s2) for x in xs], color=\"black\", linewidth=2.0, label=\"precise\")\n queries = self.model.queries\n alphas = self.model.alphas\n xs_ = [0] + list(alphas.keys())\n ys_ = [probability_data(x, k, s1, s2, queries, alphas, T) for x in alphas.keys()] + [0]\n ax.step(xs_, ys_, where='post',\n color=\"magenta\", linewidth=2.0, label=\"data-bound\")\n ax.legend(loc='upper right')\n ax.set_ylim(0, 1)\n ax.set_xlim(min(xs), max(xs))\n self.figure.suptitle(\"Accuracy estimation\")\n self.canvas.draw()\n\n\nclass Frame(wx.Frame):\n title = 'Differential Privacy of the Above Threshold Mechanism'\n\n head_size = (80, -1)\n element_size = (30, -1)\n spinctrl_size = (80, -1)\n\n def __init__(self):\n wx.Frame.__init__(self, None, title=self.title)\n\n self.menubar = self.create_menu()\n self.model = Model(100, e1=0.1, e2=0.2)\n self.create_view()\n self.model.update()\n self.draw()\n\n def create_menu(self):\n menubar = wx.MenuBar()\n\n menu_file = wx.Menu()\n menu_file.AppendSeparator()\n m_exit = menu_file.Append(wx.ID_ANY, \"E&xit\\tCtrl-X\", \"Exit\")\n self.Bind(wx.EVT_MENU, self.on_exit, m_exit)\n\n menu_help = wx.Menu()\n m_about = menu_help.Append(wx.ID_ANY, \"&About\\tF1\", \"About the demo\")\n self.Bind(wx.EVT_MENU, self.on_about, m_about)\n\n menubar.Append(menu_file, \"&File\")\n menubar.Append(menu_help, \"&Help\")\n self.SetMenuBar(menubar)\n\n return menubar\n\n def create_view(self):\n self.main_panel = wx.Panel(self)\n\n self.vector_control = self.create_vector_control(self.main_panel)\n self.parameter_control = self.create_parameter_control(self.main_panel)\n self.graphs = self.create_graphs(self.main_panel)\n self.stats = self.create_stats(self.main_panel)\n\n main = wx.BoxSizer(wx.VERTICAL)\n lower = wx.BoxSizer(wx.HORIZONTAL)\n left = wx.BoxSizer(wx.VERTICAL)\n\n left.Add(self.parameter_control, flag=wx.BOTTOM | wx.EXPAND, border=10)\n left.Add(self.stats, flag=wx.BOTTOM | wx.EXPAND, border=10)\n\n lower.Add(left, flag=wx.RIGHT | wx.LEFT, border=10)\n lower.Add(self.graphs, proportion=1)\n\n main.Add(self.vector_control, flag=wx.ALL | wx.EXPAND, border=10)\n main.Add(lower, flag=wx.EXPAND)\n\n self.main_panel.SetSizer(main)\n\n # set the first column of independent boxes to the same width\n # and accomodate the panel if it got wider in the process\n left_panels = [self.parameter_control, self.stats]\n label_width = max(i.Sizer.GetChildren()[0].Size[0] for i in left_panels)\n for panel in left_panels:\n sizer = panel.Sizer\n sizer.SetItemMinSize(0, label_width, -1)\n min_size = sizer.GetMinSize()\n sizer.SetMinSize(min_size)\n sizer.Layout()\n min_width, _ = min_size\n left.SetMinSize((min_width, -1))\n\n main.Fit(self)\n\n def create_vector_control(self, parent):\n panel = wx.Panel(parent)\n\n response_label = wx.StaticText(\n panel, label=\"Response\", style=wx.ALIGN_RIGHT)\n response_button = wx.Button(panel, label=\"Random\", size=self.head_size)\n self.response_vector = wx.BoxSizer(wx.HORIZONTAL)\n for i in self.model.response:\n self.create_response_element(panel, i)\n\n queries_label = wx.StaticText(\n panel, label=\"Queries\", style=wx.ALIGN_RIGHT)\n queries_button = wx.Button(panel, label=\"Random\", size=self.head_size)\n self.queries_vector = wx.BoxSizer(wx.HORIZONTAL)\n for i in self.model.queries:\n self.create_queries_element(panel, i)\n\n shift_label = wx.StaticText(\n panel, label=\"Shift\", style=wx.ALIGN_RIGHT)\n shift_control = wx.SpinCtrl(\n panel, style=wx.TE_PROCESS_ENTER | wx.ALIGN_RIGHT,\n min=-1000, max=1000, initial=1, size=self.head_size)\n self.shift_vector = wx.BoxSizer(wx.HORIZONTAL)\n for i in self.model.shift_vector:\n self.create_shift_element(panel, i)\n\n self.plus = wx.Button(panel, label=\"+\", size=self.element_size)\n self.minus = wx.Button(panel, label=\"-\", size=self.element_size)\n\n self.Bind(wx.EVT_BUTTON, self.on_random_response, response_button)\n self.Bind(wx.EVT_BUTTON, self.on_random_queries, queries_button)\n self.Bind(wx.EVT_SPINCTRL, self.on_set_shift_vector, shift_control)\n self.Bind(wx.EVT_TEXT_ENTER, on_spin_enter, shift_control)\n self.Bind(wx.EVT_BUTTON, self.on_plus, self.plus)\n self.Bind(wx.EVT_BUTTON, self.on_minus, self.minus)\n\n sizer = wx.FlexGridSizer(rows=3, cols=4, gap=(5, 5))\n sizer.AddGrowableCol(2)\n sizer.Add(response_label, flag=wx.EXPAND)\n sizer.Add(response_button)\n sizer.Add(self.response_vector, flag=wx.EXPAND)\n sizer.Add(self.plus)\n\n sizer.Add(queries_label, flag=wx.EXPAND)\n sizer.Add(queries_button)\n sizer.Add(self.queries_vector, flag=wx.EXPAND)\n sizer.Add(self.minus)\n\n sizer.Add(shift_label, flag=wx.EXPAND)\n sizer.Add(shift_control)\n sizer.Add(self.shift_vector, flag=wx.EXPAND)\n\n panel.SetSizer(sizer)\n sizer.Fit(panel)\n return panel\n\n def create_parameter_control(self, parent):\n panel = StaticBox(parent, label=\"Algorithm parameters\")\n\n threshold_label = wx.StaticText(\n panel, label=\"T\", style=wx.ALIGN_RIGHT)\n self.threshold = wx.SpinCtrl(\n panel,\n style=wx.TE_PROCESS_ENTER | wx.ALIGN_RIGHT, size=self.spinctrl_size,\n min=0, max=1000, initial=self.model.threshold)\n\n epsilon1_label = wx.StaticText(\n panel, label=\"ε₁\", style=wx.ALIGN_RIGHT)\n self.epsilon1 = fs.FloatSpin(\n panel, agwStyle=fs.FS_RIGHT,\n min_val=0.001, max_val=1, value=self.model.epsilon1,\n increment=0.01, digits=3, size=self.spinctrl_size)\n\n epsilon2_label = wx.StaticText(\n panel, label=\"ε₂\", style=wx.ALIGN_RIGHT)\n self.epsilon2 = fs.FloatSpin(\n panel, agwStyle=fs.FS_RIGHT,\n min_val=0.001, max_val=1, value=self.model.epsilon2,\n increment=0.01, digits=3, size=self.spinctrl_size)\n\n sensitivity_label = wx.StaticText(\n panel, label=\"Δ\", style=wx.ALIGN_RIGHT)\n self.sensitivity = wx.SpinCtrl(\n panel,\n style=wx.TE_PROCESS_ENTER | wx.ALIGN_RIGHT, size=self.spinctrl_size,\n min=0, max=100, initial=self.model.sensitivity)\n\n count_label = wx.StaticText(\n panel, label=\"c\", style=wx.ALIGN_RIGHT)\n self.count = wx.SpinCtrl(\n panel,\n style=wx.TE_PROCESS_ENTER | wx.ALIGN_RIGHT, size=self.spinctrl_size,\n min=1, max=100, initial=self.model.count)\n\n monotonic_label = wx.StaticText(\n panel, label=\"Monotonic\", style=wx.ALIGN_RIGHT)\n self.monotonic = wx.CheckBox(panel)\n self.monotonic.SetValue(self.model.monotonic)\n compute_label = wx.StaticText(\n panel, label=\"Slow graphs\", style=wx.ALIGN_RIGHT)\n self.compute = wx.CheckBox(panel)\n self.compute.SetValue(self.model.compute)\n\n grid = [\n [threshold_label, self.threshold],\n [epsilon1_label, self.epsilon1],\n [epsilon2_label, self.epsilon2],\n [sensitivity_label, self.sensitivity],\n [count_label, self.count],\n [monotonic_label, self.monotonic],\n [compute_label, self.compute],\n ]\n sizer = wx.FlexGridSizer(rows=len(grid), cols=len(grid[0]), gap=(5, 5))\n for line in grid:\n for item in line:\n sizer.Add(item, flag=wx.EXPAND)\n\n self.Bind(wx.EVT_SPINCTRL, self.on_threshold, self.threshold)\n self.Bind(wx.EVT_TEXT_ENTER, on_spin_enter, self.threshold)\n self.Bind(fs.EVT_FLOATSPIN, self.on_epsilon1, self.epsilon1)\n self.Bind(fs.EVT_FLOATSPIN, self.on_epsilon2, self.epsilon2)\n self.Bind(wx.EVT_SPINCTRL, self.on_sensitivity, self.sensitivity)\n self.Bind(wx.EVT_TEXT_ENTER, on_spin_enter, self.sensitivity)\n self.Bind(wx.EVT_SPINCTRL, self.on_count, self.count)\n self.Bind(wx.EVT_TEXT_ENTER, on_spin_enter, self.count)\n self.Bind(wx.EVT_CHECKBOX, self.on_monotonic, self.monotonic)\n self.Bind(wx.EVT_CHECKBOX, self.on_compute, self.compute)\n\n panel.SetSizer(sizer)\n return panel\n\n def create_response_element(self, parent, value):\n button = wx.Button(\n parent, label=(\"T\" if value else \"F\"),\n size=self.element_size)\n button.index = self.response_vector.GetItemCount()\n self.response_vector.Add(button, flag=wx.EXPAND | wx.RIGHT, border=5)\n self.Bind(wx.EVT_BUTTON, self.on_response_button, button)\n\n def create_queries_element(self, parent, value):\n field = IntCtrl(\n parent, value=value, min=0,\n style=wx.TE_PROCESS_ENTER | wx.TE_RIGHT,\n size=self.element_size)\n field.index = self.queries_vector.GetItemCount()\n self.queries_vector.Add(field, flag=wx.EXPAND | wx.RIGHT, border=5)\n self.Bind(wx.EVT_TEXT_ENTER, self.on_query_field, field)\n\n def create_shift_element(self, parent, value):\n field = IntCtrl(\n parent, value=value,\n style=wx.TE_PROCESS_ENTER | wx.TE_RIGHT,\n size=self.element_size)\n field.index = self.shift_vector.GetItemCount()\n self.shift_vector.Add(field, flag=wx.EXPAND | wx.RIGHT, border=5)\n self.Bind(wx.EVT_TEXT_ENTER, self.on_shift_field, field)\n\n def create_graphs(self, parent):\n graphs = wx.Panel(parent)\n\n bars_original = Probabilities(graphs, self.model)\n accuracy = Accuracy(graphs, self.model)\n\n box = wx.BoxSizer(wx.VERTICAL)\n box.Add(bars_original, proportion=0, flag=wx.EXPAND)\n box.Add(accuracy, proportion=0, flag=wx.EXPAND)\n\n graphs.SetSizer(box)\n return graphs\n\n def create_stats(self, parent):\n panel = StaticBox(parent, label=\"Vector properties\")\n\n pr_response_label = wx.StaticText(\n panel, label=\"ℙ(response)\", style=wx.ALIGN_RIGHT)\n pr_shifted_label = wx.StaticText(\n panel, label=\"ℙ(response')\", style=wx.ALIGN_RIGHT)\n pr_diff_label = wx.StaticText(\n panel, label=\"privacy loss\", style=wx.ALIGN_RIGHT)\n pr_correct_label = wx.StaticText(\n panel, label=\"ℙ(correct)\", style=wx.ALIGN_RIGHT)\n\n self.pr_response = wx.StaticText(panel)\n self.pr_shifted = wx.StaticText(panel)\n self.pr_diff = wx.StaticText(panel)\n self.pr_correct = wx.StaticText(panel)\n\n grid = [\n [pr_response_label, self.pr_response],\n [pr_shifted_label, self.pr_shifted],\n [pr_correct_label, self.pr_correct],\n [pr_diff_label, self.pr_diff],\n ]\n sizer = wx.FlexGridSizer(rows=len(grid), cols=len(grid[0]), gap=(5, 5))\n for line in grid:\n for item in line:\n sizer.Add(item, flag=wx.EXPAND)\n\n panel.SetSizer(sizer)\n return panel\n\n def update_stats(self):\n self.pr_response.SetLabel(\"{:.3f}\".format(self.model.pr_response))\n self.pr_shifted.SetLabel(\"{:.3f}\".format(self.model.pr_shifted))\n self.pr_diff.SetLabel(\"{:.3f}\".format(self.model.pr_diff))\n self.pr_correct.SetLabel(\"{:.3f}\".format(self.model.pr_correct))\n\n\n def draw(self):\n self.update_stats()\n self.main_panel.Layout()\n for g in [x for x in self.graphs.Children if type(x) != wx._core.SpinCtrl and type(x) != wx._core.StaticText]:\n g.plot(None)\n\n def on_threshold(self, event):\n self.model.threshold = event.GetEventObject().GetValue()\n self.on_parameter_change()\n\n def on_epsilon1(self, event):\n self.model.epsilon1 = event.GetEventObject().GetValue()\n self.on_parameter_change()\n\n def on_epsilon2(self, event):\n self.model.epsilon2 = event.GetEventObject().GetValue()\n self.on_parameter_change()\n\n def on_sensitivity(self, event):\n self.model.sensitivity = event.GetEventObject().GetValue()\n self.on_parameter_change()\n\n def on_count(self, event):\n self.model.count = event.GetEventObject().GetValue()\n self.on_parameter_change()\n\n def on_monotonic(self, event):\n self.model.monotonic = event.GetEventObject().GetValue()\n self.on_parameter_change()\n\n def on_compute(self, event):\n self.model.compute = event.GetEventObject().GetValue()\n self.on_parameter_change()\n\n def on_plus(self, event):\n self.model.push()\n parent = self.vector_control\n self.create_response_element(parent, self.model.response[-1])\n self.create_queries_element(parent, self.model.queries[-1])\n self.create_shift_element(parent, self.model.shift_vector[-1])\n\n self.on_parameter_change()\n\n def on_minus(self, event):\n if self.model.pop():\n vectors = [self.response_vector,self.queries_vector, self.shift_vector]\n for v in vectors:\n idx = len(v.GetChildren()) - 1\n v.GetChildren()[idx].DeleteWindows()\n v.Remove(idx)\n\n self.on_parameter_change()\n\n def on_random_response(self, event):\n self.model.set_random_response()\n for i, v in enumerate(self.response_vector.GetChildren()):\n v.Window.SetLabel(\"T\" if self.model.response[i] else \"F\")\n self.on_parameter_change()\n\n def on_random_queries(self, event):\n self.model.set_random_queries()\n for i, v in enumerate(self.queries_vector.GetChildren()):\n v.Window.SetValue(self.model.queries[i])\n self.on_parameter_change()\n\n def on_set_shift_vector(self, event):\n shift = event.GetEventObject().GetValue()\n self.model.set_shift_vector(shift)\n for i, v in enumerate(self.shift_vector.GetChildren()):\n v.Window.SetValue(self.model.shift_vector[i])\n self.on_parameter_change()\n\n def on_response_button(self, event):\n button = event.GetEventObject()\n idx = button.index\n self.model.response[idx] = not self.model.response[idx]\n button.SetLabel(\"T\" if self.model.response[idx] else \"F\")\n self.on_parameter_change()\n\n def on_query_field(self, event):\n field = event.GetEventObject()\n idx = field.index\n self.model.queries[idx] = field.GetValue()\n self.on_parameter_change()\n\n def on_shift_field(self, event):\n field = event.GetEventObject()\n idx = field.index\n self.model.shift_vector[idx] = field.GetValue()\n self.on_parameter_change()\n\n def on_parameter_change(self):\n self.model.update()\n self.draw()\n\n def on_exit(self, event):\n self.Destroy()\n\n def on_about(self, event):\n msg = \"\"\"Dynamically parametrize the Above Threshold Algorithm\n\n * Set a response vector\n * Set a query vector\n * Set a query vector for a neighboring database\n * Adjust the algorithm parameters T, e1, e2, sensitivity, count\n\n The program displays the queries' individual probabilities to produce\n the given response vector entries, the probability of the whole\n query vector producing the given response vector, and the probability\n of the query vector to produce a correct response.\n\n In addition multiple methods of accuracy estimation of the algorithm\n with set parameters are displayed.\n \"\"\"\n dlg = wx.MessageDialog(self, msg, \"About\", wx.OK)\n dlg.ShowModal()\n dlg.Destroy()\n\n\ndef on_spin_enter(event):\n # workaround for annoying behavior of wxPython.\n # > if the user modifies the text in the edit part of the spin control directly,\n # the EVT_TEXT is generated, like for the wx.TextCtrl. When the use enters text\n # into the text area, the text is not validated until the control loses focus\n # (e.g. by using the TAB key).\n # <https://wxpython.org/Phoenix/docs/html/wx.SpinCtrl.html#styles-window-styles>\n # solution: cycle focus\n spinctrl = event.GetEventObject()\n textctrl, spinbutton = spinctrl.GetChildren()\n spinbutton.SetFocus()\n spinctrl.SetFocus()\n\n\ndef main():\n app = wx.App()\n app.frame = Frame()\n app.frame.Show()\n app.MainLoop()\n\n\nif __name__ == '__main__':\n main()\n"
]
| [
[
"matplotlib.figure.Figure",
"matplotlib.use",
"numpy.arange",
"matplotlib.ticker.MaxNLocator",
"matplotlib.backends.backend_wxagg.FigureCanvasWxAgg",
"scipy.integrate.quad"
]
]
|
BoLin/2018-Projects | [
"c0727a883c8b291380aa850f588b9239b6ded30d"
]
| [
"Data Visualization/scatter_plot.py"
]
| [
"import matplotlib.pyplot as plt\nimport numpy as np\n\ndef scatterplot(x_data, y_data, x_label=\"\", y_label=\"\", title=\"\", color = \"r\", yscale_log=False):\n\n # Create the plot object\n _, ax = plt.subplots()\n\n # Plot the data, set the size (s), color and transparency (alpha)\n # of the points\n ax.scatter(x_data, y_data, s = 10, color = color, alpha = 0.75)\n\n if yscale_log == True:\n ax.set_yscale('log')\n\n # Label the axes and provide a title\n ax.set_title(title)\n ax.set_xlabel(x_label)\n ax.set_ylabel(y_label)"
]
| [
[
"matplotlib.pyplot.subplots"
]
]
|
Manpreet1377/analyzing-weather-dataset | [
"6f8efd18871cb41f4ac167eff46f2875cd366462"
]
| [
"code.py"
]
| [
"# --------------\r\n#Importing the modules\r\nimport pandas as pd\r\nimport numpy as np\r\nfrom scipy.stats import mode \r\n\r\n#Code for categorical variable\r\ndef categorical(df):\r\n categorical_var = df.select_dtypes(include = 'object').columns.tolist()\r\n return categorical_var\r\n \"\"\"\r\n This function accepts a dataframe and returns categorical list,\r\n containing the names of categorical columns(categorical_var).\r\n \r\n KEYWORD ARGUMENTS:\r\n df - Pandas dataframe from which the columns name will be extracted\r\n RETURNS:\r\n categorical_var - List of categorical features\r\n \"\"\"\r\n \r\n#Code for numerical variable\r\ndef numerical(df):\r\n numericals = df.select_dtypes(include = 'number').columns.tolist()\r\n return numericals \r\n \"\"\" \r\n This function accepts a dataframe and returns numerical list,\r\n containing the names of numerical columns(numerical_var).\r\n KEYWORD ARGUMENTS:\r\n df - Pandas dataframe from which the columns name will be extracted\r\n RETURNS:\r\n numerical_var - List of numerical features\r\n \"\"\"\r\n \r\n#code to check distribution of variable\r\ndef clear(df,col,val): \r\n value_counts = df[col].value_counts()[val]\r\n return value_counts\r\n \"\"\" \r\n This function accepts a dataframe,column(feature) and value which returns count of the value, containing the value counts of a variable(value_counts)\r\n KEYWORD ARGUMENTS:\r\n df - Pandas dataframe\r\n col - Feature of the dataframe\r\n val - value of the feature\r\n RETURNS:\r\n value_counts - Value count of the feature \r\n \"\"\"\r\n \r\n#Code to check instances based on the condition\r\ndef instances_based_condition(df,col1,val1,col2,val2):\r\n instance = df[(df[col1]>val1) & (df[col2]==val2)]\r\n return instance\r\n \"\"\" \r\n This function accepts a dataframe, 2 columns(feature) and 2 values which returns the dataframe\r\n based on the condition.\r\n KEYWORD ARGUMENTS:\r\n df - Pandas dataframe which has the data.\r\n col1 - First feature of the dataframe on which you want to apply the filter\r\n val1 - Value to be filtered on the first feature\r\n col2 - Second feature of the dataframe on which you want to apply the filter\r\n val2 - Value to be filtered on second feature\r\n RETURNS:\r\n instance - Generated dataframe\r\n \"\"\"\r\n\r\n# Code to calculate different aggreagted values according to month\r\n\r\ndef agg_values_ina_month(df,date_col,agg_col, agg):\r\n df[date_col] = pd.to_datetime(df[date_col])\r\n aggregate = {'mean':np.mean,'max':np.max,'min':np.min,'sum':np.sum,'len':len}\r\n aggregated_value = df.pivot_table(values=[agg_col], index=df[date_col].dt.month,aggfunc={agg_col:aggregate[agg]})\r\n return aggregated_value\r\n \"\"\"\r\n This function accepts a dataframe, 2 columns(feature) and aggregated funcion(agg) which returns the Pivot \r\n table with different aggregated value of the feature with an index of the month.\r\n KEYWORD ARGUMENTS:\r\n df - Pandas dataframe which has the data.\r\n date_col - Date feature of the dataframe on which you want to apply to_datetime conversion\r\n agg_col - Feature of the dataframe on which values will be aggregated.\r\n agg - The function to be used for aggregating the df (eg. 'mean', 'min', 'max').\r\n RETURNS:\r\n aggregated_value - Generated pivot table\r\n \"\"\"\r\n\r\n# Code to group values based on the feature\r\ndef group_values(df,col1,agg1):\r\n aggregate = {'mean':np.mean,'max':np.max,'min':np.min,'sum':np.sum,'len':len}\r\n grouping=df.groupby(col1).agg(aggregate[agg1])\r\n return grouping\r\n \"\"\" This function accepts a dataframe, 1 column(feature) and aggregated function(agg1) which groupby the datframe based on the column.\r\n KEYWORD ARGUMENTS:\r\n df - Pandas dataframe which has the data.\r\n col1 - Feature of the dataframe on which values will be aggregated.\r\n agg1 - The function to be used for aggregating the df (eg. 'mean', 'min', 'max').\r\n RETURNS:\r\n grouping - Dataframe with all columns on which it is grouped on.\r\n \"\"\"\r\n\r\n# function for conversion \r\ndef convert(df,celsius):\r\n centigrade_temps = df[celsius]\r\n converted_temp = 1.8*centigrade_temps + 32\r\n return converted_temp\r\n \"\"\" \r\n This function accepts a dataframe, 1 column(feature) which returns the dataframe with converted values from celsius to fahrenhheit.\r\n KEYWORD ARGUMENTS:\r\n df - Pandas dataframe which has the data.\r\n celsius - Temperature feature of the dataframe which you want to convert to fahrenhheit\r\n RETURNS:\r\n converted_temp - Generated dataframe with Fahrenhheit temp.\"\"\"\r\n \r\n# Load the weather_2012 data csv file and store it in weather variable. The path of the dataset has been stored in the variable `path` for you.\r\nweather = pd.read_csv(path)\r\ndf = pd.DataFrame(weather)\r\n\r\n\r\n# As you have now loaded the weather data you might want to check the categorical and numerical variables. You can check it by calling categorical and numerical function. \r\ncategorical_val = categorical(df)\r\nprint(categorical_val)\r\nnumerical_val = numerical(df)\r\nprint(numerical_val)\r\n\r\n\r\n\r\n#You might be interested in checking the distribution of a specific value like the number of times the weather was exactly Cloudy in the given column. Feel free to check on other values.\r\n#You can check it by calling the function clear with respective parameters.\r\n#By using index of the value or name of the value you can check the number of count\r\nprint(clear(weather,\"Weather\",'Cloudy'))\r\n\r\n# Now suppose you want to check some instances based on a specific condition like when the wind speed was above 35 and visibility was 25. You can dicretly check it by calling the function instances_based_condition with respective parameters.\r\nwind_speed_35_vis_25 = instances_based_condition(weather,'Wind Spd (km/h)',35,'Visibility (km)',25)\r\n\r\n#You have temperature data and want to calculate the mean temperature recorded by month.You can generate a pivot table which contains the aggregated values(like mean, max ,min, sum, len) recoreded by month. \r\n#You can call the function agg_values_ina_month with respective parameters. \r\nagg_values_ina_month(df,'Date/Time','Dew Point Temp (C)','mean')\r\n\r\n# To groupby based on a column like you want to groupby on Weather column and then aggregate the mean values of each column for different types of weather using mean. You can call the function group_values.\r\n# Feel free to try on diffrent aggregated functions like max, min, sum, len\r\nmean_weather = group_values(weather,\"Weather\",'mean')\r\n\r\n# You have a temperature data and wanted to convert celsius temperature into fahrehheit temperatures you can call the function convert.\r\nweather_fahrehheit = convert(weather,\"Temp (C)\")\r\n\r\n\r\n"
]
| [
[
"pandas.read_csv",
"pandas.to_datetime",
"pandas.DataFrame"
]
]
|
UplinkCoder/incubator-mxnet | [
"b68f18c97d08193cded0630a2e0a04673e4a0410"
]
| [
"tests/python/gpu/test_gluon_gpu.py"
]
| [
"# Licensed to the Apache Software Foundation (ASF) under one\n# or more contributor license agreements. See the NOTICE file\n# distributed with this work for additional information\n# regarding copyright ownership. The ASF licenses this file\n# to you under the Apache License, Version 2.0 (the\n# \"License\"); you may not use this file except in compliance\n# with the License. You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing,\n# software distributed under the License is distributed on an\n# \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n# KIND, either express or implied. See the License for the\n# specific language governing permissions and limitations\n# under the License.\n\nfrom __future__ import print_function\nimport sys\nimport os\nimport tempfile\nimport time\nimport multiprocessing as mp\nimport unittest\nimport random\nimport mxnet as mx\nimport numpy as np\nimport unittest\nimport math\nfrom nose.tools import assert_raises\nfrom mxnet.test_utils import check_consistency, set_default_context, assert_almost_equal\nfrom mxnet.base import MXNetError\nfrom mxnet import autograd\nfrom numpy.testing import assert_allclose\nfrom mxnet.test_utils import rand_ndarray\n\n\ncurr_path = os.path.dirname(os.path.abspath(os.path.expanduser(__file__)))\nsys.path.insert(0, os.path.join(curr_path, '../unittest'))\nfrom common import setup_module, with_seed, teardown, assert_raises_cudnn_not_satisfied\nfrom common import run_in_spawned_process\nfrom test_gluon import *\nfrom test_loss import *\nfrom test_gluon_rnn import *\n\nset_default_context(mx.gpu(0))\n\n\ndef check_rnn_layer(layer):\n layer.collect_params().initialize(ctx=[mx.cpu(0), mx.gpu(0)])\n with mx.gpu(0):\n x = mx.nd.ones((10, 16, 30))\n states = layer.begin_state(16)\n go, gs = layer(x, states)\n\n with mx.cpu(0):\n x = mx.nd.ones((10, 16, 30))\n states = layer.begin_state(16)\n co, cs = layer(x, states)\n\n # atol of 1e-6 required, as exposed by seed 2124685726\n assert_almost_equal(go.asnumpy(), co.asnumpy(), rtol=1e-2, atol=1e-6)\n for g, c in zip(gs, cs):\n assert_almost_equal(g.asnumpy(), c.asnumpy(), rtol=1e-2, atol=1e-6)\n\n\n@with_seed()\ndef check_rnn_layer_w_rand_inputs(layer):\n layer.collect_params().initialize(ctx=[mx.cpu(0), mx.gpu(0)])\n x = mx.nd.uniform(shape=(10, 16, 30))\n with mx.gpu(0):\n x = x.copyto(mx.gpu(0))\n states = layer.begin_state(16)\n go, gs = layer(x, states)\n\n with mx.cpu(0):\n x = x.copyto(mx.cpu(0))\n states = layer.begin_state(16)\n co, cs = layer(x, states)\n\n assert_almost_equal(go.asnumpy(), co.asnumpy(), rtol=1e-2, atol=1e-6)\n for g, c in zip(gs, cs):\n assert_almost_equal(g.asnumpy(), c.asnumpy(), rtol=1e-2, atol=1e-6)\n\n\n@with_seed()\n@assert_raises_cudnn_not_satisfied(min_version='7.2.1')\ndef test_lstmp():\n hidden_size, projection_size = 3, 2\n rtol, atol = 1e-2, 1e-2\n batch_size, seq_len = 7, 11\n input_size = 5\n ctx = mx.gpu(0)\n lstm_input = mx.nd.uniform(\n shape=(seq_len, batch_size, input_size), ctx=ctx)\n shapes = {'i2h_weight': (hidden_size * 4, input_size),\n 'h2h_weight': (hidden_size * 4, projection_size),\n 'i2h_bias': (hidden_size * 4,),\n 'h2h_bias': (hidden_size * 4,),\n 'h2r_weight': (projection_size, hidden_size)}\n weights = {k: rand_ndarray(v) for k, v in shapes.items()}\n lstm_layer = gluon.rnn.LSTM(hidden_size, projection_size=projection_size,\n input_size=input_size, prefix='lstm0_')\n lstm_cell = gluon.contrib.rnn.LSTMPCell(hidden_size=hidden_size,\n projection_size=projection_size,\n input_size=input_size,\n prefix='lstm0_l0_')\n lstm_layer.initialize(ctx=ctx)\n lstm_cell.initialize(ctx=ctx)\n layer_params = lstm_layer.collect_params()\n cell_params = lstm_cell.collect_params()\n for k, v in weights.items():\n layer_params['lstm0_l0_' + k].set_data(v.copy())\n cell_params['lstm0_l0_' + k].set_data(v.copy())\n with autograd.record():\n layer_output = lstm_layer(lstm_input.copy())\n cell_output = lstm_cell.unroll(seq_len, lstm_input.copy(), layout='TNC',\n merge_outputs=True)[0]\n assert_almost_equal(layer_output.asnumpy(),\n cell_output.asnumpy(), rtol=rtol, atol=atol)\n layer_output.backward()\n cell_output.backward()\n for k, v in weights.items():\n layer_grad = layer_params['lstm0_l0_' + k].grad()\n cell_grad = cell_params['lstm0_l0_' + k].grad()\n print('checking gradient for {}'.format('lstm0_l0_' + k))\n assert_almost_equal(layer_grad.asnumpy(), cell_grad.asnumpy(),\n rtol=rtol, atol=atol)\n check_rnn_layer_forward(gluon.rnn.LSTM(\n 10, 2, projection_size=5), mx.nd.ones((8, 3, 20)), ctx=ctx)\n check_rnn_layer_forward(gluon.rnn.LSTM(10, 2, projection_size=5, bidirectional=True), mx.nd.ones(\n (8, 3, 20)), [mx.nd.ones((4, 3, 5)), mx.nd.ones((4, 3, 10))], ctx=ctx)\n\n check_rnn_layer_forward(gluon.rnn.LSTM(10, 2, dropout=0.5, projection_size=5), mx.nd.ones((8, 3, 20)),\n run_only=True, ctx=ctx)\n check_rnn_layer_forward(gluon.rnn.LSTM(10, 2, bidirectional=True, dropout=0.5, projection_size=5),\n mx.nd.ones((8, 3, 20)),\n [mx.nd.ones((4, 3, 5)), mx.nd.ones((4, 3, 10))], run_only=True, ctx=ctx)\n\n\n@with_seed()\n@assert_raises_cudnn_not_satisfied(min_version='7.2.1')\ndef test_lstm_clip():\n hidden_size, projection_size = 4096, 2048\n batch_size, seq_len = 32, 80\n input_size = 50\n clip_min, clip_max, clip_nan = -5, 5, True\n lstm_input = mx.nd.uniform(\n shape=(seq_len, batch_size, input_size), ctx=mx.gpu(0))\n lstm_states = [mx.nd.uniform(shape=(2, batch_size, projection_size), ctx=mx.gpu(0)),\n mx.nd.uniform(shape=(2, batch_size, hidden_size), ctx=mx.gpu(0))]\n lstm_layer = gluon.rnn.LSTM(hidden_size, projection_size=projection_size,\n input_size=input_size, prefix='lstm0_',\n bidirectional=True,\n state_clip_min=clip_min,\n state_clip_max=clip_max,\n state_clip_nan=clip_nan)\n lstm_layer.initialize(ctx=mx.gpu(0))\n with autograd.record():\n _, layer_output_states = lstm_layer(lstm_input, lstm_states)\n cell_states = layer_output_states[0].asnumpy()\n assert (cell_states >= clip_min).all() and (cell_states <= clip_max).all()\n assert not np.isnan(cell_states).any()\n\n\n@with_seed()\n@assert_raises_cudnn_not_satisfied(min_version='5.1.10')\ndef test_rnn_layer():\n check_rnn_layer(gluon.rnn.RNN(100, num_layers=3))\n check_rnn_layer(gluon.rnn.RNN(100, activation='tanh', num_layers=3))\n check_rnn_layer(gluon.rnn.LSTM(100, num_layers=3))\n check_rnn_layer(gluon.rnn.GRU(100, num_layers=3))\n\n check_rnn_layer(gluon.rnn.LSTM(100, num_layers=3, bidirectional=True))\n check_rnn_layer_w_rand_inputs(gluon.rnn.LSTM(\n 100, num_layers=3, bidirectional=True))\n\n\ndef check_layer_bidirectional(size, in_size, proj_size):\n class RefBiLSTM(gluon.Block):\n def __init__(self, size, proj_size, **kwargs):\n super(RefBiLSTM, self).__init__(**kwargs)\n with self.name_scope():\n self._lstm_fwd = gluon.rnn.LSTM(\n size, projection_size=proj_size, bidirectional=False, prefix='l0')\n self._lstm_bwd = gluon.rnn.LSTM(\n size, projection_size=proj_size, bidirectional=False, prefix='r0')\n\n def forward(self, inpt):\n fwd = self._lstm_fwd(inpt)\n bwd_inpt = nd.flip(inpt, 0)\n bwd = self._lstm_bwd(bwd_inpt)\n bwd = nd.flip(bwd, 0)\n return nd.concat(fwd, bwd, dim=2)\n weights = {}\n for d in ['l', 'r']:\n weights['lstm_{}0_i2h_weight'.format(d)] = mx.random.uniform(\n shape=(size * 4, in_size))\n if proj_size:\n weights['lstm_{}0_h2h_weight'.format(d)] = mx.random.uniform(\n shape=(size * 4, proj_size))\n weights['lstm_{}0_h2r_weight'.format(d)] = mx.random.uniform(\n shape=(proj_size, size))\n else:\n weights['lstm_{}0_h2h_weight'.format(\n d)] = mx.random.uniform(shape=(size * 4, size))\n weights['lstm_{}0_i2h_bias'.format(\n d)] = mx.random.uniform(shape=(size * 4,))\n weights['lstm_{}0_h2h_bias'.format(\n d)] = mx.random.uniform(shape=(size * 4,))\n\n net = gluon.rnn.LSTM(size, projection_size=proj_size,\n bidirectional=True, prefix='lstm_')\n ref_net = RefBiLSTM(size, proj_size, prefix='lstm_')\n net.initialize()\n ref_net.initialize()\n net_params = net.collect_params()\n ref_net_params = ref_net.collect_params()\n for k in weights:\n net_params[k].set_data(weights[k])\n ref_net_params[k.replace('l0', 'l0l0').replace(\n 'r0', 'r0l0')].set_data(weights[k])\n\n data = mx.random.uniform(shape=(11, 10, in_size))\n assert_allclose(net(data).asnumpy(), ref_net(data).asnumpy())\n\n\n@with_seed()\n@assert_raises_cudnn_not_satisfied(min_version='5.1.10')\ndef test_layer_bidirectional():\n check_layer_bidirectional(7, 5, 0)\n\n\n@with_seed()\n@assert_raises_cudnn_not_satisfied(min_version='7.2.1')\ndef test_layer_bidirectional_proj():\n check_layer_bidirectional(7, 5, 3)\n\n\n@with_seed()\n@assert_raises_cudnn_not_satisfied(min_version='5.1.10')\ndef test_rnn_layer_begin_state_type():\n fake_data = nd.random.uniform(shape=(3, 5, 7), dtype='float16')\n modeling_layer = gluon.rnn.LSTM(\n hidden_size=11, num_layers=2, dropout=0.2, bidirectional=True)\n modeling_layer.cast('float16')\n modeling_layer.initialize()\n modeling_layer(fake_data)\n\n\ndef test_gluon_ctc_consistency():\n loss = mx.gluon.loss.CTCLoss()\n data = mx.nd.arange(0, 4, repeat=40, ctx=mx.gpu(0)\n ).reshape((2, 20, 4)).flip(axis=0)\n cpu_label = mx.nd.array([[2, 1, -1, -1], [3, 2, 2, -1]], ctx=mx.cpu(0))\n gpu_label = mx.nd.array([[2, 1, -1, -1], [3, 2, 2, -1]], ctx=mx.gpu(0))\n\n cpu_data = data.copy().as_in_context(mx.cpu(0))\n cpu_data.attach_grad()\n with mx.autograd.record():\n l_cpu = loss(cpu_data, cpu_label)\n l_cpu.backward()\n\n gpu_data = data.copyto(mx.gpu(0))\n gpu_data.attach_grad()\n with mx.autograd.record():\n l_gpu = loss(gpu_data, gpu_label)\n l_gpu.backward()\n\n assert_almost_equal(cpu_data.grad.asnumpy(),\n gpu_data.grad.asnumpy(), atol=1e-3, rtol=1e-3)\n\n\n@with_seed()\ndef test_global_norm_clip_multi_device():\n for check_isfinite in [True, False]:\n x1 = mx.nd.ones((3, 3), ctx=mx.gpu(0))\n x2 = mx.nd.ones((4, 4), ctx=mx.cpu(0))\n norm = gluon.utils.clip_global_norm(\n [x1, x2], 1.0, check_isfinite=check_isfinite)\n if check_isfinite:\n assert norm == 5.0\n else:\n assert norm.asscalar() == 5.0\n assert_almost_equal(x1.asnumpy(), np.ones((3, 3)) / 5)\n assert_almost_equal(x2.asnumpy(), np.ones((4, 4)) / 5)\n\n\n@with_seed()\ndef test_symbol_block_fp16():\n # Test case to verify if initializing the SymbolBlock from a model with params\n # other than fp32 param dtype.\n\n # 1. Load a resnet model, cast it to fp16 and export\n tmp = tempfile.mkdtemp()\n tmpfile = os.path.join(tmp, 'resnet34_fp16')\n ctx = mx.gpu(0)\n\n net_fp32 = mx.gluon.model_zoo.vision.resnet34_v2(\n pretrained=True, ctx=ctx, root=tmp)\n net_fp32.cast('float16')\n net_fp32.hybridize()\n data = mx.nd.zeros((1, 3, 224, 224), dtype='float16', ctx=ctx)\n net_fp32.forward(data)\n net_fp32.export(tmpfile, 0)\n\n # 2. Load the saved model and verify if all the params are loaded correctly.\n # and choose one of the param to verify the type if fp16.\n sm = mx.sym.load(tmpfile + '-symbol.json')\n inputs = mx.sym.var('data', dtype='float16')\n net_fp16 = mx.gluon.SymbolBlock(sm, inputs)\n net_fp16.collect_params().load(tmpfile + '-0000.params', ctx=ctx)\n # 3. Get a conv layer's weight parameter name. Conv layer's weight param is\n # expected to be of dtype casted, fp16.\n for param_name in net_fp16.params.keys():\n if 'conv' in param_name and 'weight' in param_name:\n break\n assert np.dtype(net_fp16.params[param_name].dtype) == np.dtype(np.float16)\n\n\n@with_seed()\ndef test_large_models():\n ctx = default_context()\n # Create model\n net = gluon.nn.HybridSequential()\n\n largest_num_features = 256\n with net.name_scope():\n net.add(nn.Conv2D(largest_num_features, 3))\n\n net.hybridize()\n net.initialize(mx.init.Normal(sigma=0.01), ctx=ctx)\n\n # Compute the height (=width) of the square tensor of the given size in bytes\n def tensor_size(big_tensor_bytes):\n bytes_per_float = 4\n sz = int(math.sqrt(big_tensor_bytes /\n largest_num_features / bytes_per_float))\n return (sz // 100) * 100\n\n # The idea is to create models with large tensors of (say) 20% of the total memory.\n # This in the past has given cudnnFind() trouble when it needed to allocate similar I/O's\n # from the area carved out by the MXNET_GPU_MEM_POOL_RESERVE setting (by default 5%).\n (free_mem_bytes, total_mem_bytes) = mx.context.gpu_memory_info(ctx.device_id)\n start_size = tensor_size(0.20 * total_mem_bytes)\n num_trials = 10\n sys.stderr.write(\n ' testing global memory of size {} ... '.format(total_mem_bytes))\n sys.stderr.flush()\n for i in range(num_trials):\n sz = start_size - 10 * i\n (height, width) = (sz, sz)\n sys.stderr.write(\" {}x{} \".format(height, width))\n sys.stderr.flush()\n data_in = nd.random_uniform(low=0, high=255, shape=(1, 3, height, width),\n ctx=ctx, dtype=\"float32\")\n # Evaluate model\n net(data_in).asnumpy()\n\n# isolated execution bulking test function to be invoked with different env var settings\n\n\ndef _test_bulking_in_process(seed, time_per_iteration):\n # Use flip since it's a simple function with same-sized I/O unlikely to ever be fused.\n class Flip(gluon.HybridBlock):\n def __init__(self, **kwargs):\n super(Flip, self).__init__(**kwargs)\n\n def hybrid_forward(self, F, x):\n return F.flip(x, axis=0)\n\n def get_net(num_ops):\n net = nn.HybridSequential()\n with net.name_scope():\n for _ in range(num_ops):\n net.add(Flip())\n return net\n\n data_shape = (10,)\n num_ops = 1000\n num_iterations = 20\n\n # build model\n x = mx.ndarray.zeros(data_shape)\n x.attach_grad()\n dy = mx.ndarray.ones(data_shape)\n net = get_net(num_ops)\n net.hybridize(static_alloc=True, static_shape=True)\n\n # time a number of forward() and backward() executions after some warm-up iterations\n warmups = 1\n for i in range(num_iterations + warmups):\n with autograd.record():\n if i == warmups:\n start = time.time()\n y = net(x)\n y.backward(dy)\n x.grad.wait_to_read()\n\n time_per_iteration.value = (time.time() - start) / num_iterations\n\n\n@with_seed()\ndef test_bulking():\n # test case format: (max_fwd_segment_size, max_bwd_segment_size, enable_bulking_in_training)\n test_cases = [(0, 0, True), (1, 1, True), (15, 15, False),\n (15, 0, True), (0, 15, True), (15, 15, True)]\n times = {}\n times_str = ''\n for seg_sizes in test_cases:\n # Create shared variable to return measured time from test process\n time_per_iteration = mp.Manager().Value('d', 0.0)\n if not run_in_spawned_process(_test_bulking_in_process,\n {'MXNET_EXEC_BULK_EXEC_MAX_NODE_TRAIN_FWD': seg_sizes[0],\n 'MXNET_EXEC_BULK_EXEC_MAX_NODE_TRAIN_BWD': seg_sizes[1],\n 'MXNET_EXEC_BULK_EXEC_TRAIN': seg_sizes[2]},\n time_per_iteration):\n # skip test since the python version can't run it properly. Warning msg was logged.\n return\n times[seg_sizes] = time_per_iteration.value\n times_str += \\\n '\\n runtime of (fwd,bwd,enable) op seg setting ({},{},{}) =\\t{:.1f} msec'.format(\n seg_sizes[0], seg_sizes[1], seg_sizes[2], 1000.0 * times[seg_sizes])\n\n fastest_non_bulked_time = min(\n times[(0, 0, True)], times[(1, 1, True)], times[(15, 15, False)])\n slowest_half_bulked_time = max(times[(0, 15, True)], times[(15, 0, True)])\n fastest_half_bulked_time = min(times[(0, 15, True)], times[(15, 0, True)])\n fully_bulked_time = times[(15, 15, True)]\n\n print(times_str)\n # Non-bulked times[0,0,True], times[1,1,True] and times[15,15,False] should be about the same,\n # slower than both half-bulked times[0,15,True] and times[15,0,True]\n assert slowest_half_bulked_time < fastest_non_bulked_time, \\\n 'A half-bulked exec time is slower than the non-bulked time by {} secs! {}' \\\n .format(slowest_half_bulked_time - fastest_non_bulked_time, times_str)\n # The fully bulked times[15,15,True] should be faster than both half-bulked runs\n assert fully_bulked_time < fastest_half_bulked_time, \\\n 'The fully-bulked exec time is slower than a half-bulked time by {} secs! {}' \\\n .format(fully_bulked_time - fastest_half_bulked_time, times_str)\n\n\nif __name__ == '__main__':\n import nose\n nose.runmodule()\n"
]
| [
[
"numpy.isnan",
"numpy.dtype",
"numpy.ones"
]
]
|
ktechhub/resume-ats | [
"c1748db2c37c35aed6172c8468db72c2cc539670"
]
| [
"main.py"
]
| [
"from collections import Counter\nimport requests\nimport spacy\nfrom bs4 import BeautifulSoup\nimport matplotlib.pyplot as plt\n\nurl = input('Enter job link: ')\npage = requests.get(url)\nhtml = page.text\n\nsoup = BeautifulSoup(html, 'html.parser')\ntitle = soup.find('h1').string\ncompany = soup.find('a', class_='topcard__org-name-link topcard__flavor--black-link').string.replace(\"\\\\n\", \"\").strip()\ncontent = soup.find('div', class_='show-more-less-html__markup show-more-less-html__markup--clamp-after-5')\nbody_string = ''\nfor x in iter(content.stripped_strings):\n body_string = body_string + x.lower()\n \nnlp = spacy.load('en_core_web_sm')\ndoc = nlp(body_string)\nnouns = [token.lemma_ for token in doc if token.pos_ == \"NOUN\"]\nnoun_freq = Counter(nouns)\ncommon_nouns = noun_freq.most_common(25)\n\n\nnoun_list, noun_occurance = zip(*common_nouns)\nplt.figure(0) # Specify differnt figures\nplt.barh(noun_list, noun_occurance)\nplt.title(f'{title} @ {company}')\nplt.ylabel('Word')\nplt.xlabel('Occurance')\nplt.tight_layout() # add padding\nplt.savefig(f'{title}.png')"
]
| [
[
"matplotlib.pyplot.tight_layout",
"matplotlib.pyplot.title",
"matplotlib.pyplot.figure",
"matplotlib.pyplot.barh",
"matplotlib.pyplot.savefig",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.ylabel"
]
]
|
Nohossat/youtube_sentiment_analysis | [
"4d8e15111ea448d46b787e7902b7523c8d041d92"
]
| [
"tests/test_preprocessing.py"
]
| [
"import os\n\nimport pandas as pd\n\nimport nohossat_cas_pratique\nfrom nohossat_cas_pratique.preprocessing import split_data, NLPCleaner\n\nmodule_path = os.path.dirname(os.path.dirname(os.path.dirname(nohossat_cas_pratique.__file__)))\ndata_path = os.path.join(module_path, \"data\", \"comments.csv\")\n\n\ndef test_split_data():\n data = pd.read_csv(data_path)\n X, y = split_data(data)\n assert isinstance(X, list), \"X should be a list\"\n assert isinstance(y, list), \"y should be a list\"\n\n\ndef test_nlp_transform():\n data = pd.read_csv(data_path)\n X, y = split_data(data)\n cleaner = NLPCleaner()\n\n clean_text = cleaner.transform(X)\n\n assert clean_text[0] == ('réserv tabl quelqu mois avanc le servic impecc pend processus nous arriv '\n 'temp rapid assis un personnel accueil attent détail nous verr champagn plus')\n"
]
| [
[
"pandas.read_csv"
]
]
|
firasl/inverse_attention | [
"8c9a08c90cacf4699bb829a234a2e3c7997c6f0c"
]
| [
"main_imagenet.py"
]
| [
"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Tue Apr 20 15:26:58 2021\n\n@author: laakom\n\"\"\"\n\n\nfrom __future__ import print_function\nimport tensorflow.keras as ks\nfrom tensorflow.keras.layers import Dense, Conv2D, BatchNormalization, Activation\nfrom tensorflow.keras.layers import AveragePooling2D, Input, Flatten\nfrom tensorflow.keras.optimizers import Adam,SGD\nfrom tensorflow.keras.callbacks import ModelCheckpoint, LearningRateScheduler\nfrom tensorflow.keras.callbacks import ReduceLROnPlateau,CSVLogger\nfrom tensorflow.keras.preprocessing.image import ImageDataGenerator\nfrom tensorflow.keras.regularizers import l2\nfrom tensorflow.keras import backend as K\nfrom tensorflow.keras.models import Model\nfrom models import resnet_model \n\nimport numpy as np\nimport os\n\nimport tensorflow as tf \nimport imagenet_input\n\n# Create a MirroredStrategy.\nstrategy = tf.distribute.MirroredStrategy()\nprint('Number of devices: {}'.format(strategy.num_replicas_in_sync))\n\n# Training parameters\nbatch_size = 256\nepochs = 100\nalpha = 0.0001 # weight decay coefficient\nnum_classes = 1000\nsubtract_pixel_mean = True # Subtracting pixel mean improves accuracy\n\n\n#specify the diroctory of the TF records of Imagenet.\ndataset = 'imagenet'\ndataset_dir = '/scratch/zhangh/imagenet_tdfs/tfrecords/'\nbase_model = 'resnet50_'\n# Choose what attention_module to use from the following options: \n# None: standard approach without attention\n#'se_block' or 'cbam_block': original SE and CBAM appraoches.\n#'se_block_ours' or 'se_block_ours_05' or 'se_block_ours_08': First variant of our approach using SE attention with alpha=1, alpha=0.5, and alpha=0.8 respectively. \n#'cbam_block_ours' or 'cbam_block_ours_05' or 'cbam_block_ours_08': First variant of our approach using CBAM attention with alpha=1, alpha=0.5, and alpha=0.8 respectively. \n# 'se_block_ours_invsigmoid' or 'cbam_block_ours_invsigmoid': Second variant of our approach using SE or CBAM approach.\n# 'se_block_ours_msigmoid' or 'cbam_block_ours_msigmoid': Third variant of our approach using SE or CBAM approach.\n\nattention_module =None # 'se_block' 'se_block_ours' 'se_block_ours_05' 'se_block_ours_invsigmoid' 'se_block_ours_msigmoid' 'cbam_block_ours' 'cbam_block_ours_msigmoid' 'cbam_block_ours_invsigmoid'\n\nmodel_type = base_model if attention_module==None else base_model+'_'+attention_module + '_'\nmodel_name = dataset + '_' + model_type + '.hdf5'\n\n\nUSE_BFLOAT16 = False\n\n\n\ndef lr_schedule(epoch):\n lr = 0.1\n if epoch > 80:\n lr = 0.0001\n elif epoch > 60:\n lr = 0.001\n elif epoch > 30:\n lr = 0.01\n print('Learning rate: ', lr)\n return lr\n\n\n\n\n\n\n\nimagenet_train = imagenet_input.ImageNetInput(\n is_training=True, data_dir=dataset_dir, batch_size=batch_size,\n use_bfloat16=USE_BFLOAT16)\nimagenet_eval = imagenet_input.ImageNetInput(\n is_training=False, data_dir=dataset_dir, batch_size=batch_size,\n use_bfloat16=USE_BFLOAT16)\n\n\n\n\n\n\n# Open a strategy scope.\nwith strategy.scope():\n input_shape = (224,224,3)\n model = resnet_model.ResNet50(num_classes=num_classes,attention_module=attention_module)\n\n model.compile(loss='sparse_categorical_crossentropy',\n optimizer=SGD(lr=lr_schedule(0),momentum=0.9,nesterov=True), \n metrics=['accuracy', 'sparse_top_k_categorical_accuracy'])\n\n\nmodel.summary()\n# Prepare model model saving directory.\nsave_dir = os.path.join(os.getcwd(), 'saved_models')\nif not os.path.isdir(save_dir):\n os.makedirs(save_dir)\nfilepath = os.path.join(save_dir, model_name)\nif os.path.exists(filepath):\n print('warning loading existing module')\n model.load_weights(filepath)\n# Prepare callbacks for model saving and for learning rate adjustment.\ncheckpoint = ModelCheckpoint(filepath=filepath,\n monitor='val_accuracy',\n save_best_only=False,\n verbose=1,\n save_weights_only=True)\n\nlr_scheduler = LearningRateScheduler(lr_schedule) #lr_schedule_warmup(1281167 // batch_size)\n\n\nsavecsvlog = CSVLogger(filepath[:-len('.hdf5')] + '_log.csv', separator=',', append=True ) \n\ncallbacks = [checkpoint, lr_scheduler,savecsvlog]\n\n\n\nmodel.fit(\n imagenet_train.input_fn(),\n callbacks=callbacks,\n steps_per_epoch=1281167 // batch_size,\n validation_data=imagenet_eval.input_fn(),\n validation_steps=50000 // batch_size,\n initial_epoch=0,\n workers=64,\n epochs=epochs)\n\n#model.load_weights(filepath[:-len('.hdf5')] + '_final.hdf5')\t\n# Score trained model.\nscores = model.evaluate(x = imagenet_eval.input_fn(), workers=64 , epochs=epochs ,steps=50000 // batch_size, verbose=1)\nprint('Model: ', model_name)\nprint('Final Test loss:', scores[0])\nprint('Final Test accuracy:', scores[1])\nprint('Final top-5 accuracy:', scores[2])\n\n"
]
| [
[
"tensorflow.keras.callbacks.ModelCheckpoint",
"tensorflow.keras.callbacks.LearningRateScheduler",
"tensorflow.distribute.MirroredStrategy"
]
]
|
khamrranirr/multi-party-data-differential-privacy | [
"40bd6aa86ad27e82e1534f60d1a9ce7586c4185a"
]
| [
"d_SVM.py"
]
| [
"from sklearn import svm\r\nfrom sklearn.naive_bayes import MultinomialNB\r\nimport pandas as pd\r\nimport numpy as np\r\nfrom sklearn.metrics import accuracy_score\r\nfrom sklearn.model_selection import KFold\r\nfrom sklearn.naive_bayes import GaussianNB\r\nfrom sklearn.naive_bayes import BernoulliNB\r\n\r\n\r\n# 数据数字化\r\ndef dataDigitize(path):\r\n # 获得原始数据\r\n adult_raw = pd.read_csv(path)\r\n\r\n # 清理数据,删除缺失值\r\n adult_cleaned = adult_raw.dropna()\r\n\r\n # 将bool变量数字化\r\n if 'Windy' in adult_cleaned.columns:\r\n\r\n for i in range(adult_cleaned['Windy'].shape[0]):\r\n adult_cleaned.loc[i, 'Windy'] = adult_cleaned.loc[i, 'Windy'] + 0\r\n\r\n # 其他属性数字化\r\n adult_digitization = pd.DataFrame()\r\n\r\n target_columns = adult_cleaned.columns\r\n for column in adult_cleaned.columns:\r\n if column in target_columns:\r\n\r\n unique_value = list(enumerate(np.unique(adult_cleaned[column])))\r\n dict_data = {key: value for value, key in unique_value}\r\n adult_digitization[column] = adult_cleaned[column].map(dict_data)\r\n else:\r\n adult_digitization[column] = adult_cleaned[column]\r\n\r\n\r\n return adult_digitization\r\n\r\n# 参数:data——处理后的数据;adult_clf——选用的训练模型;name——指定的模型名称\r\ndef main(data,labels,adult_clf, name=\"\"):\r\n adult_digitization = data\r\n\r\n\r\n # 构造输入和输出\r\n X = adult_digitization\r\n Y = labels\r\n\r\n # 交叉验证\r\n preaccsvm = []\r\n num = 1\r\n kf = KFold(n_splits=10)\r\n\r\n for train, test in kf.split(X):\r\n X_train, X_test = X.loc[train], X.loc[test]\r\n Y_train, Y_test = Y.loc[train], Y.loc[test]\r\n\r\n adult_clf.fit(X_train, Y_train.values.ravel())\r\n\r\n # test_score = clf.score(X_test, Y_test)\r\n # print(\"test_score:\" + str(test_score))\r\n test_predictions = adult_clf.predict(X_test)\r\n accuracy = accuracy_score(Y_test.values.ravel(), test_predictions)\r\n preaccsvm.append(accuracy)\r\n # print(name + str(num) + \"测试集准确率: %s \" % accuracy)\r\n num = num + 1\r\n\r\n # print(name + \"十折交叉平均准确率: %s \" % np.mean(np.array(preaccsvm)))\r\n\r\n # 返回十折交叉平均准确率\r\n return np.mean(np.array(preaccsvm))\r\n\r\ndef runSvm(data,labels):\r\n svmclf = svm.SVC(kernel='rbf', C=1)\r\n return main(data,labels,svmclf,'svm')\r\n\r\n\r\nMNBclf = MultinomialNB()\r\n# main(MNBclf,'MNB')\r\n\r\nGNBclf = GaussianNB()\r\n# main(GNBclf,'GNB')\r\n\r\nBNBclf = BernoulliNB()\r\n# main(BNBclf,'BNB')\r\n\r\n# dataPartition(3)\r\n\r\n\r\n"
]
| [
[
"pandas.read_csv",
"sklearn.naive_bayes.GaussianNB",
"numpy.unique",
"sklearn.naive_bayes.MultinomialNB",
"pandas.DataFrame",
"sklearn.model_selection.KFold",
"sklearn.naive_bayes.BernoulliNB",
"sklearn.svm.SVC",
"numpy.array"
]
]
|
heshanpadmasiri/alpr-simple | [
"61272c56b1ba4b6aec825e48d0eefced902a06b6"
]
| [
"models/bbox_25.py"
]
| [
"import tensorflow as tf\nfrom tensorflow.keras import datasets, layers, models, Input\nfrom models.nn_blocks import rpnet_block_1, rpnet_block_3, mobilenet_block_1\nfrom metrices.iou import IoUMetric\n\n\ndef __get_model__(feature_extractor, input_shape, extractor_layer=None):\n global_average_layer = tf.keras.layers.GlobalAveragePooling2D()\n prediction_layer = tf.keras.layers.Dense(4)\n model = tf.keras.Sequential(\n [feature_extractor, global_average_layer, prediction_layer])\n\n model.compile(optimizer='adam',\n loss='mean_squared_error',\n metrics=[IoUMetric(input_shape)])\n return model\n\n\ndef __get_backborn__(input_size):\n image = Input(shape=input_size, name='img')\n x = mobilenet_block_1(image)\n x = mobilenet_block_1(image)\n x = rpnet_block_3(x, 5)\n x = rpnet_block_3(x, 5)\n x = rpnet_block_3(x, 3)\n x = rpnet_block_3(x, 3)\n x = rpnet_block_1(x)\n x = rpnet_block_1(x)\n backborn = tf.keras.Model(inputs=image, outputs=x)\n return backborn\n\n\ndef create_model(input_shape):\n backborn = __get_backborn__(input_shape)\n model = __get_model__(backborn, input_shape)\n return model\n"
]
| [
[
"tensorflow.keras.layers.GlobalAveragePooling2D",
"tensorflow.keras.Input",
"tensorflow.keras.layers.Dense",
"tensorflow.keras.Sequential",
"tensorflow.keras.Model"
]
]
|
guybuk/ANCOR | [
"229f8a52b81fa94cd584cbc6fc715697f98e43e2"
]
| [
"localization.py"
]
| [
"from __future__ import print_function\n\nimport argparse\nimport os\n\nimport matplotlib.pyplot as plt\nimport torch\nimport torch.backends.cudnn as cudnn\nimport torch.nn.functional as F\nfrom torch.utils.data import DataLoader\nfrom torchvision import transforms\n\nfrom datasets.breeds import BREEDSFactory\nfrom models.util import create_model, load_model\n\n\ndef parse_option():\n parser = argparse.ArgumentParser('argument for training')\n\n # load pretrained model\n parser.add_argument('--model', type=str, default='resnet12')\n parser.add_argument('--model_path', type=str, default=None, help='absolute path to .pth model')\n\n # dataset\n parser.add_argument('--dataset', type=str, default='miniImageNet'\n )\n # parser.add_argument('--transform', type=str, default='A', choices=transforms_list)\n\n # specify data_root\n parser.add_argument('--data_root', type=str, default='', help='path to data root')\n\n # meta setting\n parser.add_argument('--n_test_runs', type=int, default=1000, metavar='N',\n help='Number of test runs')\n parser.add_argument('--n_ways', type=int, default=5, metavar='N',\n help='Number of classes for doing each classification run')\n parser.add_argument('--n_shots', type=int, default=1, metavar='N',\n help='Number of shots in test')\n parser.add_argument('--n_queries', type=int, default=15, metavar='N',\n help='Number of query in test')\n parser.add_argument('--n_aug_support_samples', default=5, type=int,\n help='The number of augmented samples for each meta test sample')\n parser.add_argument('--num_workers', type=int, default=3, metavar='N',\n help='Number of workers for dataloader')\n parser.add_argument('--test_batch_size', type=int, default=1, metavar='test_batch_size',\n help='Size of test batch)')\n parser.add_argument('-b', dest='batch_size', type=int)\n parser.add_argument('--mode', type=str, required=True, choices=['coarse', 'fine'])\n parser.add_argument('--only-base', action='store_true')\n parser.add_argument('--partition', type=str, required=True, choices=['train', 'test', 'validation'])\n parser.add_argument('--gpu', default=0, type=int,\n help='GPU id to use.')\n # ===========IRRELEVANT===============\n parser.add_argument('--dim', type=int, default=128)\n parser.add_argument('--head', default=None)\n parser.add_argument('--fg', action='store_true')\n parser.add_argument('--simclr', action='store_true')\n parser.add_argument('--cascade', action='store_true')\n\n opt = parser.parse_args()\n\n if 'trainval' in opt.model_path:\n opt.use_trainval = True\n else:\n opt.use_trainval = False\n\n opt.data_aug = True\n\n return opt\n\n\ndef main():\n args = parse_option()\n\n train_dataset, n_cls = get_datasets(args)\n\n train_loader = DataLoader(train_dataset,\n batch_size=args.batch_size, shuffle=True, drop_last=False,\n num_workers=args.num_workers)\n\n model = create_model(args.model, n_cls, args.only_base, args.head, args.dim)\n load_model(model, args.model_path, not args.only_base)\n if torch.cuda.is_available():\n torch.cuda.set_device(args.gpu)\n model = model.cuda()\n cudnn.benchmark = True\n\n for i, (images, labels) in enumerate(train_loader):\n if args.gpu is not None:\n images = images.cuda(args.gpu)\n\n def attention_forward(encoder, imgs):\n # hard-coded forward because we need the feature-map and not the finalized feature\n x = encoder.conv1(imgs)\n x = encoder.bn1(x)\n x = encoder.relu(x)\n x = encoder.maxpool(x)\n x = encoder.layer1(x)\n x = encoder.layer2(x)\n x = encoder.layer3(x)\n feats = encoder.layer4(x)\n feats_as_batch = feats.permute((0, 2, 3, 1)).contiguous().view((-1, feats.shape[1]))\n # reminder: \"fc\" layer outputs: (feature, class logits)\n feats_as_batch = encoder.fc(feats_as_batch)[0]\n feats_as_batch = feats_as_batch.view(\n (feats.shape[0], feats.shape[2], feats.shape[3], feats_as_batch.shape[1]))\n feats_as_batch = feats_as_batch.permute((0, 3, 1, 2))\n return feats_as_batch\n\n f_q = attention_forward(model, images)\n localization(images, f_q, args.batch_size, batch_id=i, img_size=448)\n if i == 10:\n break\n\n\ndef get_datasets(args):\n augs = [\n transforms.RandomResizedCrop(448, scale=(0.2, 1.)),\n transforms.RandomHorizontalFlip(),\n transforms.ToTensor(),\n transforms.Normalize(mean=[0.4717, 0.4499, 0.3837], std=[0.2600, 0.2516, 0.2575])\n ]\n if args.dataset in ['living17', 'nonliving26', 'entity30', 'entity13']:\n breeds_factory = BREEDSFactory(info_dir=os.path.join(args.data_root, \"BREEDS\"),\n data_dir=os.path.join(args.data_root, \"Data\", \"CLS-LOC\"))\n train_dataset = breeds_factory.get_breeds(ds_name=args.dataset, partition=args.partition, mode=args.mode,\n transforms=transforms.Compose(augs))\n n_cls = int(args.dataset[-2:])\n else:\n raise NotImplementedError(args.dataset)\n return train_dataset, n_cls\n\n\ndef localization(im_q, f_q, batch_size, batch_id, img_size):\n os.makedirs('imgs', exist_ok=True)\n for idd in range(batch_size):\n aa = torch.norm(f_q, dim=1)\n imgg = im_q[idd] * torch.Tensor([[[0.229, 0.224, 0.225]]]).view(\n (1, 3, 1, 1)).cuda() + torch.Tensor(\n [[[0.485, 0.456, 0.406]]]).view((1, 3, 1, 1)).cuda()\n heatmap = F.interpolate((aa[idd] / aa[0].max()).detach().unsqueeze(0).unsqueeze(0).repeat((1, 3, 1, 1)),\n [img_size, img_size])\n thresh = 0\n heatmap[heatmap < thresh] = 0\n plt.imsave(f'imgs/bImg_{idd}_batch_{batch_id}.png',\n torch.cat((imgg, heatmap * imgg), dim=3).squeeze(0).cpu().permute(\n (1, 2, 0)).clamp(0, 1).numpy().astype(float))\n\n\nif __name__ == '__main__':\n main()\n"
]
| [
[
"torch.norm",
"torch.cuda.set_device",
"torch.Tensor",
"torch.cat",
"torch.utils.data.DataLoader",
"torch.cuda.is_available"
]
]
|
QuantFinEcon/py-learn | [
"7151f01df9f7f096312e43434fe8026d1d7d7828"
]
| [
"wordpress-py-scripts/scipy timeseries test 1.py"
]
| [
"import numpy as np\nimport pandas as pd\nimport statsmodels.api as sm\ntsa = sm.tsa # as shorthand\n\n\nmdata = sm.datasets.macrodata.load().data\ntype(mdata)\n\nendog = np.log(mdata['m1'])\nexog = np.column_stack([np.log(mdata['realgdp']), np.log(mdata['cpi'])])\nexog = sm.add_constant(exog, prepend=True)\nexog\nres1 = sm.OLS(endog, exog).fit()\n\nacf, ci, Q, pvalue = tsa.acf(res1.resid, nlags=4,alpha=.05, qstat=True,unbiased=True)\nacf\npvalue\n\ntsa.pacf(res1.resid, nlags=4)\n\n\n\n\n\n\n\n\n#==============================================================================\n# FILTER\n#==============================================================================\n\nfrom scipy.signal import lfilter\ndata = sm.datasets.macrodata.load()\ninfl = data.data.infl[1:]\ndata.data.shape\n\n# get 4 qtr moving average\ninfl = lfilter(np.ones(4)/4, 1, infl)[4:]\nunemp = data.data.unemp[1:]\n\n#To apply the Hodrick-Prescott filter to the data 3, we can do\ninfl_c, infl_t = tsa.filters.hpfilter(infl)\nunemp_c, unemp_t = tsa.filters.hpfilter(unemp)\n\n#The Baxter-King filter 4 is applied as\ninfl_c = tsa.filters.bkfilter(infl)\nunemp_c = tsa.filters.bkfilter(unemp)\n\n#The Christiano-Fitzgerald filter is similarly applied 5\ninfl_c, infl_t = tsa.filters.cfilter(infl)\nunemp_c, unemp_t = tsa.filters.cfilter(unemp)\n\n\n#plot\nINFLA=pd.DataFrame(infl_c,columns=['INFLA'])\nUNEMP=pd.DataFrame(unemp_c[4:],columns=['UNEMP'])\npd.concat([INFLA,UNEMP],axis=1).plot()\n\nINFLA=pd.DataFrame(infl_t,columns=['INFLA'])\nUNEMP=pd.DataFrame(unemp_t[4:],columns=['UNEMP'])\npd.concat([INFLA,UNEMP],axis=1).plot()\n\n\n#==============================================================================\n# BENCHMARKING TO STANDARDISE LOWER FREQ TO HIGHER FREQ\n#==============================================================================\niprod_m = np.array([ 87.4510, 86.9878, 85.5359, #INDUSTRIAL PRODUCTION INDEX\n 84.7761, 83.8658, 83.5261, 84.4347,\n 85.2174, 85.7983, 86.0163, 86.2137,\n 86.7197, 87.7492, 87.9129, 88.3915,\n 88.7051, 89.9025, 89.9970, 90.7919,\n 90.9898, 91.2427, 91.1385, 91.4039,\n 92.5646])\ngdp_q = np.array([14049.7, 14034.5, 14114.7,14277.3, 14446.4, 14578.7, 14745.1,14871.4])\ngdp_m = tsa.interp.dentonm(iprod_m, gdp_q,freq=\"qm\")\n\n\na=[]\n[a.extend([i]*4) for i in gdp_q]\n\nx=pd.DataFrame([iprod_m,gdp_m],index=['IPROD','GDP MONTHLY']).T\nx.plot(secondary_y='IPROD')\npd.DataFrame([gdp_m,a],index=['monthly','quarterly']).T.plot(secondary_y='quarterly')\n\n\nmdata = sm.datasets.macrodata.load().data\nmdata = mdata[['realgdp','realcons','realinv']]\nnames = mdata.dtype.names\ndata = mdata.view((float,3))\ndata = np.diff(np.log(data), axis=0)\n#statsmodels.tsa.vector_ar.var_model.VAR\nimport statsmodels\ndata\nmodel = statsmodels.tsa.vector_ar.var_model.VAR(data)\nmodel.endog_names = names\nres = model.fit(maxlags=2)\nres.plot_forecast(5)\nres.fevd().plot()\n\n#autocorrelation\nres.plot_sample_acorr()\n# impulse response\nirf = res.irf(10) # 10 periods\nirf.plot()\n\n# granger causality with VAR.fit\nres.test_causality(equation='y1',variables=['y2'])\nres.test_causality(equation='y1',variables=['y2','y3'])\n\n\n\n\n\n\n\n\n\n\n"
]
| [
[
"numpy.log",
"pandas.concat",
"pandas.DataFrame",
"numpy.ones",
"numpy.array"
]
]
|
cltl/a-proof-zonmw | [
"f6d1a83fc77223bf8b58c9d465aae301269bb679"
]
| [
"clf_domains/wandb_sweep.py"
]
| [
"\"\"\"\nPerform a sweep for hyperparameters optimization, using Simple Transformers and W&B Sweeps.\nThe sweep is configured in a dictionary in a config file, which should specify the search strategy, the metric to be optimized, and the hyperparameters (and their possible values).\n\nThe script can be customized with the following parameters:\n --datapath: data dir\n --train_pkl: the file with the train data\n --eval_pkl: the file with the eval data\n --config: json file containing the model args\n --sweep_config: the name of the sweep config dict from `config`\n --model_args: the name of the model args dict from `config`\n --model_type: type of the pre-trained model, e.g. bert, roberta, electra\n --modelpath: models dir\n --model_name: the pre-trained model, either from Hugging Face or locally stored\n --hf: pass this parameter if a model from Hugging Face is used\n\nTo change the default values of a parameter, pass it in the command line, e.g.:\n\n$ python wandb_sweep.py --model_name pdelobelle/robbert-v2-dutch-base --hf\n\"\"\"\n\n\nimport argparse\nimport warnings\nimport json\nimport torch\nimport wandb\nimport pandas as pd\nfrom simpletransformers.classification import MultiLabelClassificationModel\n\nimport sys\nsys.path.insert(0, '..')\nfrom utils.config import PATHS\n\n\ndef main(\n train_pkl,\n eval_pkl,\n config_json,\n sweep_config,\n model_args,\n model_type,\n model_name,\n labels=['ADM', 'ATT', 'BER', 'ENR', 'ETN', 'FAC', 'INS', 'MBW', 'STM'],\n):\n \"\"\"\n Perform a sweep for hyperparameters optimization, using Simple Transformers and W&B Sweeps.\n The sweep is configured in a dictionary in `config_json`, which should specify the search strategy, the metric to be optimized, and the hyperparameters (and their possible values).\n\n Parameters\n ----------\n train_pkl: str\n path to pickled df with the training data, which must contain the columns 'text' and 'labels'; the labels are multi-hot lists (see column indices in `labels`), e.g. [1, 0, 0, 1, 0, 0, 0, 0, 1]\n eval_pkl: str\n path to pickled df for evaluation during training\n config_json: str\n path to a json file containing the sweep config\n sweep_config: str\n the name of the sweep config dict from `config_json` to use\n model_args: str\n the name of the model args dict from `config_json` to use\n model_type: str\n type of the pre-trained model, e.g. bert, roberta, electra\n model_name: str\n the exact architecture and trained weights to use; this can be a Hugging Face Transformers compatible pre-trained model, a community model, or the path to a directory containing model file\n labels: list\n list of column indices for the multi-hot labels\n\n Returns\n -------\n None\n \"\"\"\n\n # check CUDA\n cuda_available = torch.cuda.is_available()\n if not cuda_available:\n def custom_formatwarning(msg, *args, **kwargs):\n return str(msg) + '\\n'\n warnings.formatwarning = custom_formatwarning\n warnings.warn('CUDA device not available; running on a CPU!')\n\n # load data\n train_data = pd.read_pickle(train_pkl)\n eval_data = pd.read_pickle(eval_pkl)\n\n # sweep config & model args\n with open(config_json, 'r') as f:\n config = json.load(f)\n sweep_config = config[sweep_config]\n model_args = config[model_args]\n\n sweep_id = wandb.sweep(sweep_config, project=model_args['wandb_project'])\n\n def train():\n wandb.init()\n\n model = MultiLabelClassificationModel(\n model_type,\n model_name,\n num_labels=len(labels),\n args=model_args,\n use_cuda=cuda_available,\n )\n\n model.train_model(train_data, eval_df=eval_data)\n\n wandb.join()\n\n wandb.agent(sweep_id, train)\n\n\nif __name__ == '__main__':\n\n argparser = argparse.ArgumentParser()\n argparser.add_argument('--datapath', default='data_expr_july', help='must be listed as a key in /config.ini')\n argparser.add_argument('--train_pkl', default='clf_domains/train.pkl')\n argparser.add_argument('--eval_pkl', default='clf_domains/dev.pkl')\n argparser.add_argument('--config', default='config.json')\n argparser.add_argument('--sweep_config', default='sweep_config')\n argparser.add_argument('--model_args', default='sweep_args')\n argparser.add_argument('--model_type', default='roberta')\n argparser.add_argument('--modelpath', default='models')\n argparser.add_argument('--model_name', default='clin_nl_from_scratch')\n argparser.add_argument('--hf', dest='hugging_face', action='store_true')\n argparser.set_defaults(hugging_face=False)\n args = argparser.parse_args()\n\n train_pkl = PATHS.getpath(args.datapath) / args.train_pkl\n eval_pkl = PATHS.getpath(args.datapath) / args.eval_pkl\n\n # model stored locally (default) or on HuggingFace (--hf)\n model_name = str(PATHS.getpath(args.modelpath) / args.model_name)\n if args.hugging_face:\n model_name = args.model_name\n\n main(\n train_pkl,\n eval_pkl,\n args.config,\n args.sweep_config,\n args.model_args,\n args.model_type,\n model_name,\n )\n"
]
| [
[
"pandas.read_pickle",
"torch.cuda.is_available"
]
]
|
theXYZT/codejam-2020 | [
"7d8705725c13ef3a5cb309b4c1ac53bd7e2e7579"
]
| [
"Qualification Round/vestigium.py"
]
| [
"# Codejam 2020, Qualification Round: Vestigium\n\nimport numpy as np\n\n# I/O Code\nnum_cases = int(input())\n\nfor case in range(1, num_cases + 1):\n N = int(input())\n M = np.array([list(map(int, input().split())) for _ in range(N)])\n\n K = np.trace(M)\n R = sum(len(set(r)) < N for r in M)\n C = sum(len(set(c)) < N for c in M.T)\n print('Case #{}: {} {} {}'.format(case, K, R, C))\n"
]
| [
[
"numpy.trace"
]
]
|
dwiel/streamlit | [
"929942f755ca40f859a03d905ffcbf743f45dffa"
]
| [
"lib/streamlit/elements/data_frame_proto.py"
]
| [
"# -*- coding: utf-8 -*-\n# Copyright 2018-2019 Streamlit Inc.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Helper functions to marshall a pandas.DataFrame into a proto.Dataframe.\"\"\"\n\nimport re\nimport tzlocal\n\nfrom collections import namedtuple\n\nfrom streamlit import type_util\nfrom streamlit.logger import get_logger\n\nLOGGER = get_logger(__name__)\n\nCSSStyle = namedtuple(\"CSSStyle\", [\"property\", \"value\"])\n\n\ndef marshall_data_frame(data, proto_df):\n \"\"\"Convert a pandas.DataFrame into a proto.DataFrame.\n\n Parameters\n ----------\n data : pandas.DataFrame, numpy.ndarray, Iterable, dict, DataFrame, Styler, or None\n Something that is or can be converted to a dataframe.\n\n proto_df : proto.DataFrame\n Output. The protobuf for a Streamlit DataFrame proto.\n \"\"\"\n df = type_util.convert_anything_to_df(data)\n\n # Convert df into an iterable of columns (each of type Series).\n df_data = (df.iloc[:, col] for col in range(len(df.columns)))\n\n import numpy as np\n import pandas as pd\n\n _marshall_table(df_data, proto_df.data)\n _marshall_index(df.columns, proto_df.columns)\n _marshall_index(df.index, proto_df.index)\n\n styler = data if type_util.is_pandas_styler(data) else None\n _marshall_styles(proto_df.style, df, styler)\n\n\ndef _marshall_styles(proto_table_style, df, styler=None):\n \"\"\"Adds pandas.Styler styling data to a proto.DataFrame\n\n Parameters\n ----------\n proto_table_style : proto.TableStyle\n df : pandas.DataFrame\n styler : pandas.Styler holding styling data for the data frame, or\n None if there's no style data to marshall\n \"\"\"\n\n # NB: we're using protected members of Styler to get this data,\n # which is non-ideal and could break if Styler's interface changes.\n\n if styler is not None:\n styler._compute()\n translated_style = styler._translate()\n css_styles = _get_css_styles(translated_style)\n display_values = _get_custom_display_values(df, translated_style)\n else:\n # If we have no Styler, we just make an empty CellStyle for each cell\n css_styles = {}\n display_values = {}\n\n nrows, ncols = df.shape\n for col in range(ncols):\n proto_col = proto_table_style.cols.add()\n for row in range(nrows):\n proto_cell_style = proto_col.styles.add()\n\n for css in css_styles.get((row, col), []):\n proto_css = proto_cell_style.css.add()\n proto_css.property = css.property\n proto_css.value = css.value\n\n display_value = display_values.get((row, col), None)\n if display_value is not None:\n proto_cell_style.display_value = display_value\n proto_cell_style.has_display_value = True\n\n\ndef _get_css_styles(translated_style):\n \"\"\"Parses pandas.Styler style dictionary into a\n {(row, col): [CSSStyle]} dictionary\n \"\"\"\n # Create {(row, col): [CSSStyle]} from translated_style['cellstyle']\n # translated_style['cellstyle'] has the shape:\n # [\n # {\n # 'props': [['color', ' black'], ['background-color', 'orange'], ['', '']],\n # 'selector': 'row0_col0'\n # }\n # ...\n # ]\n\n cell_selector_regex = re.compile(r\"row(\\d+)_col(\\d+)\")\n\n css_styles = {}\n for cell_style in translated_style[\"cellstyle\"]:\n cell_selector = cell_style[\"selector\"] # a string of the form 'row0_col0'\n match = cell_selector_regex.match(cell_selector)\n if not match:\n raise RuntimeError(\n 'Failed to parse cellstyle selector \"%s\"' % cell_selector\n )\n row = int(match.group(1))\n col = int(match.group(2))\n css_declarations = []\n props = cell_style[\"props\"]\n for prop in props:\n if not isinstance(prop, list) or len(prop) != 2:\n raise RuntimeError('Unexpected cellstyle props \"%s\"' % prop)\n name = str(prop[0]).strip()\n value = str(prop[1]).strip()\n if name and value:\n css_declarations.append(CSSStyle(property=name, value=value))\n\n css_styles[(row, col)] = css_declarations\n\n return css_styles\n\n\ndef _get_custom_display_values(df, translated_style):\n \"\"\"Parses pandas.Styler style dictionary into a\n {(row, col): display_value} dictionary for cells whose display format\n has been customized.\n \"\"\"\n # Create {(row, col): display_value} from translated_style['body']\n # translated_style['body'] has the shape:\n # [\n # [ // row\n # { // cell or header\n # 'id': 'level0_row0' (for row header) | 'row0_col0' (for cells)\n # 'value': 1.329212\n # 'display_value': '132.92%'\n # ...\n # }\n # ]\n # ]\n\n default_formatter = df.style._display_funcs[(0, 0)]\n\n def has_custom_display_value(cell):\n value = str(cell[\"value\"])\n display_value = str(cell[\"display_value\"])\n if value == display_value:\n return False\n\n # Pandas applies a default style to all float values, regardless\n # of whether they have a user-specified display format. We test\n # for that here.\n return default_formatter(value) != display_value\n\n cell_selector_regex = re.compile(r\"row(\\d+)_col(\\d+)\")\n header_selector_regex = re.compile(r\"level(\\d+)_row(\\d+)\")\n\n display_values = {}\n for row in translated_style[\"body\"]:\n # row is a List[Dict], containing format data for each cell in the row,\n # plus an extra first entry for the row header, which we skip\n found_row_header = False\n for cell in row:\n cell_id = cell[\"id\"] # a string in the form 'row0_col0'\n if header_selector_regex.match(cell_id):\n if not found_row_header:\n # We don't care about processing row headers, but as\n # a sanity check, ensure we only see one per row\n found_row_header = True\n continue\n else:\n raise RuntimeError('Found unexpected row header \"%s\"' % cell)\n match = cell_selector_regex.match(cell_id)\n if not match:\n raise RuntimeError('Failed to parse cell selector \"%s\"' % cell_id)\n\n # Only store display values that differ from the cell's default\n if has_custom_display_value(cell):\n row = int(match.group(1))\n col = int(match.group(2))\n display_values[(row, col)] = str(cell[\"display_value\"])\n\n return display_values\n\n\ndef _marshall_index(pandas_index, proto_index):\n \"\"\"Convert an pandas.Index into a proto.Index.\n\n pandas_index - Panda.Index or related (input)\n proto_index - proto.Index (output)\n \"\"\"\n import pandas as pd\n import numpy as np\n\n if type(pandas_index) == pd.Index:\n _marshall_any_array(np.array(pandas_index), proto_index.plain_index.data)\n elif type(pandas_index) == pd.RangeIndex:\n min = pandas_index.min()\n max = pandas_index.max()\n if pd.isna(min) or pd.isna(max):\n proto_index.range_index.start = 0\n proto_index.range_index.stop = 0\n else:\n proto_index.range_index.start = min\n proto_index.range_index.stop = max + 1\n elif type(pandas_index) == pd.MultiIndex:\n for level in pandas_index.levels:\n _marshall_index(level, proto_index.multi_index.levels.add())\n if hasattr(pandas_index, \"codes\"):\n index_codes = pandas_index.codes\n else:\n # Deprecated in Pandas 0.24, do don't bother covering.\n index_codes = pandas_index.labels # pragma: no cover\n for label in index_codes:\n proto_index.multi_index.labels.add().data.extend(label)\n elif type(pandas_index) == pd.DatetimeIndex:\n if pandas_index.tz is None:\n current_zone = tzlocal.get_localzone()\n pandas_index = pandas_index.tz_localize(current_zone)\n proto_index.datetime_index.data.data.extend(pandas_index.astype(np.int64))\n elif type(pandas_index) == pd.TimedeltaIndex:\n proto_index.timedelta_index.data.data.extend(pandas_index.astype(np.int64))\n elif type(pandas_index) == pd.Int64Index:\n proto_index.int_64_index.data.data.extend(pandas_index)\n elif type(pandas_index) == pd.Float64Index:\n proto_index.float_64_index.data.data.extend(pandas_index)\n else:\n raise NotImplementedError(\"Can't handle %s yet.\" % type(pandas_index))\n\n\ndef _marshall_table(pandas_table, proto_table):\n \"\"\"Convert a sequence of 1D arrays into proto.Table.\n\n pandas_table - Sequence of 1D arrays which are AnyArray compatible (input).\n proto_table - proto.Table (output)\n \"\"\"\n for pandas_array in pandas_table:\n if len(pandas_array) == 0:\n continue\n _marshall_any_array(pandas_array, proto_table.cols.add())\n\n\ndef _marshall_any_array(pandas_array, proto_array):\n \"\"\"Convert a 1D numpy.Array into a proto.AnyArray.\n\n pandas_array - 1D arrays which is AnyArray compatible (input).\n proto_array - proto.AnyArray (output)\n \"\"\"\n import numpy as np\n\n # Convert to np.array as necessary.\n if not hasattr(pandas_array, \"dtype\"):\n pandas_array = np.array(pandas_array)\n\n # Only works on 1D arrays.\n if len(pandas_array.shape) != 1:\n raise ValueError(\"Array must be 1D.\")\n\n # Perform type-conversion based on the array dtype.\n if issubclass(pandas_array.dtype.type, np.floating):\n proto_array.doubles.data.extend(pandas_array)\n elif issubclass(pandas_array.dtype.type, np.timedelta64):\n proto_array.timedeltas.data.extend(pandas_array.astype(np.int64))\n elif issubclass(pandas_array.dtype.type, np.integer):\n proto_array.int64s.data.extend(pandas_array)\n elif pandas_array.dtype == np.bool:\n proto_array.int64s.data.extend(pandas_array)\n elif pandas_array.dtype == np.object:\n proto_array.strings.data.extend(map(str, pandas_array))\n # Setting a timezone changes (dtype, dtype.type) from\n # 'datetime64[ns]', <class 'numpy.datetime64'>\n # to\n # datetime64[ns, UTC], <class 'pandas._libs.tslibs.timestamps.Timestamp'>\n elif pandas_array.dtype.name.startswith(\"datetime64\"):\n # TODO(armando): Convert eveything to UTC not local timezone.\n if pandas_array.dt.tz is None:\n current_zone = tzlocal.get_localzone()\n pandas_array = pandas_array.dt.tz_localize(current_zone)\n proto_array.datetimes.data.extend(pandas_array.astype(np.int64))\n else:\n raise NotImplementedError(\"Dtype %s not understood.\" % pandas_array.dtype)\n\n\ndef add_rows(delta1, delta2, name=None):\n \"\"\"Concat the DataFrame in delta2 to the DataFrame in delta1.\n\n Parameters\n ----------\n delta1 : Delta\n delta2 : Delta\n name : str or None\n\n \"\"\"\n df1 = _get_data_frame(delta1, name)\n df2 = _get_data_frame(delta2, name)\n\n if len(df1.data.cols) == 0:\n if len(df2.data.cols) == 0:\n return\n df1.CopyFrom(df2)\n return\n\n # Copy Data\n if len(df1.data.cols) != len(df2.data.cols):\n raise ValueError(\"Dataframes have incompatible shapes\")\n for (col1, col2) in zip(df1.data.cols, df2.data.cols):\n _concat_any_array(col1, col2)\n\n # Copy index\n _concat_index(df1.index, df2.index)\n\n # Don't concat columns! add_rows should leave the dataframe with the same\n # number of columns as it had before.\n # DON'T DO: _concat_index(df1.columns, df2.columns)\n\n # Copy styles\n for (style_col1, style_col2) in zip(df1.style.cols, df2.style.cols):\n _concat_cell_style_array(style_col1, style_col2)\n\n\ndef _concat_index(index1, index2):\n \"\"\"Contact index2 into index1.\"\"\"\n # Special case if index1 is empty.\n if _index_len(index1) == 0:\n index1.Clear()\n index1.CopyFrom(index2)\n return\n\n # Otherwise, dispatch based on type.\n type1 = index1.WhichOneof(\"type\")\n type2 = index2.WhichOneof(\"type\")\n # This branch is covered with tests but pytest doesnt seem to realize it.\n if type1 != type2: # pragma: no cover\n raise ValueError(\n \"Cannot concatenate %(type1)s with %(type2)s.\"\n % {\"type1\": type1, \"type2\": type2}\n )\n\n if type1 == \"plain_index\":\n _concat_any_array(index1.plain_index.data, index2.plain_index.data)\n elif type1 == \"range_index\":\n index1.range_index.stop += index2.range_index.stop - index2.range_index.start\n elif type1 == \"multi_index\":\n raise NotImplementedError(\"Cannot yet concatenate MultiIndices.\")\n elif type1 == \"int_64_index\":\n index1.int_64_index.data.data.extend(index2.int_64_index.data.data)\n elif type1 == \"datetime_index\":\n index1.datetime_index.data.data.extend(index2.datetime_index.data.data)\n elif type1 == \"timedelta_index\":\n index1.timedelta_index.data.data.extend(index2.timedelta_index.data.data)\n else:\n raise NotImplementedError('Cannot concatenate \"%s\" indices.' % type1)\n\n\ndef _concat_any_array(any_array_1, any_array_2):\n \"\"\"Concat elements from any_array_2 into any_array_1.\"\"\"\n # Special case if any_array_1 is empty\n if _any_array_len(any_array_1) == 0:\n any_array_1.CopyFrom(any_array_2)\n return\n\n type1 = any_array_1.WhichOneof(\"type\")\n type2 = any_array_2.WhichOneof(\"type\")\n if type1 != type2:\n raise ValueError(\n \"Cannot concatenate %(type1)s with %(type2)s.\"\n % {\"type1\": type1, \"type2\": type2}\n )\n getattr(any_array_1, type1).data.extend(getattr(any_array_2, type2).data)\n\n\ndef _concat_cell_style_array(style_array1, style_array2):\n \"\"\"Concat elements from any_array_2 into any_array_1.\"\"\"\n # Special case if array1 is empty\n if len(style_array1.styles) == 0:\n style_array1.CopyFrom(style_array2)\n return\n\n style_array1.styles.extend(style_array2.styles)\n\n\ndef _get_data_frame(delta, name=None):\n \"\"\"Extract the dataframe from a delta.\"\"\"\n delta_type = delta.WhichOneof(\"type\")\n\n if delta_type == \"new_element\":\n element_type = delta.new_element.WhichOneof(\"type\")\n\n # Some element types don't support named datasets.\n if name and element_type in (\"data_frame\", \"table\", \"chart\"):\n raise ValueError(\"Dataset names not supported for st.%s\" % element_type)\n\n if element_type in \"data_frame\":\n return delta.new_element.data_frame\n elif element_type in \"table\":\n return delta.new_element.table\n elif element_type == \"chart\":\n return delta.new_element.chart.data\n elif element_type == \"vega_lite_chart\":\n chart_proto = delta.new_element.vega_lite_chart\n if name:\n return _get_or_create_dataset(chart_proto.datasets, name)\n elif len(chart_proto.datasets) == 1:\n # Support the case where the dataset name was randomly given by\n # the charting library (e.g. Altair) and the user has no\n # knowledge of it.\n return chart_proto.datasets[0].data\n else:\n return chart_proto.data\n # TODO: Support DeckGL. Need to figure out how to handle layer indices\n # first.\n\n elif delta_type == \"add_rows\":\n if delta.add_rows.has_name and name != delta.add_rows.name:\n raise ValueError('No dataset found with name \"%s\".' % name)\n return delta.add_rows.data\n else:\n raise ValueError(\"Cannot extract DataFrame from %s.\" % delta_type)\n\n\ndef _get_or_create_dataset(datasets_proto, name):\n for dataset in datasets_proto:\n if dataset.has_name and dataset.name == name:\n return dataset.data\n\n dataset = datasets_proto.add()\n dataset.name = name\n dataset.has_name = True\n return dataset.data\n\n\ndef _index_len(index):\n \"\"\"Return the number of elements in an index.\"\"\"\n index_type = index.WhichOneof(\"type\")\n if index_type == \"plain_index\":\n return _any_array_len(index.plain_index.data)\n elif index_type == \"range_index\":\n return index.range_index.stop - index.range_index.start\n elif index_type == \"multi_index\":\n if len(index.multi_index.labels) == 0:\n return 0\n else:\n return len(index.multi_index.labels[0].data)\n elif index_type == \"int_64_index\":\n return len(index.int_64_index.data.data)\n elif index_type == \"float_64_index\":\n return len(index.float_64_index.data.data)\n elif index_type == \"datetime_index\":\n return len(index.datetime_index.data.data)\n elif index_type == \"timedelta_index\":\n return len(index.timedelta_index.data.data)\n\n\ndef _any_array_len(any_array):\n \"\"\"Return the length of an any_array.\"\"\"\n array_type = any_array.WhichOneof(\"type\")\n the_array = getattr(any_array, array_type).data\n return len(the_array)\n"
]
| [
[
"pandas.isna",
"numpy.array"
]
]
|
JeremyBYU/UnrealRooftopLanding | [
"d7f17547b5daff8490cb6fe1582ed2e256faff16"
]
| [
"airsimcollect/helper/helper_transforms.py"
]
| [
"from airsim.types import Quaternionr, Vector3r\nimport ipdb\nimport matplotlib.pyplot as plt\nimport matplotlib as mpl\nimport airsim\nimport quaternion\nimport numpy as np\nimport logging\nfrom os import path\nimport json\nimport warnings\nimport time\nfrom shapely.geometry import Polygon\nfrom airsimcollect.helper.helper_logging import logger\n\n\n# ignore quaternions warning about numba not being installed\n# ignore vispy warning about matplotlib 2.2+ issues\nwarnings.simplefilter(\"ignore\")\n\n\nLOGGER = logging.getLogger('AirSimVis')\n\nBASE_DIR = path.dirname(path.dirname(path.dirname(__file__)))\nDATA_DIR = path.join(BASE_DIR, 'assets', 'data')\nSEG_RGB_FILE = path.join(DATA_DIR, 'segmentation_colors.txt')\nTOLERANCE = 0.01\n\n\nREGEX_CATCH_ALL = \"[\\w*. ]*\"\n\nlidar_to_camera_quat = np.quaternion(0.5, -0.5, -0.5, -0.5)\n# Default no offset\nlidar_to_camera_pos = Vector3r(x_val=0.0, y_val=0.0, z_val=0.0)\n\n\ndef transform_to_cam(points, cam_pos=lidar_to_camera_pos, cam_quat=lidar_to_camera_quat, invert=False, points_in_unreal=False):\n temp = points.copy()\n points = np.ones(shape=(4, points.shape[0]))\n points[:3, :] = temp.transpose()\n\n if points_in_unreal:\n # Need to scale down to meters\n points[:3, :] = points[:3, :] / 100.0\n # Need to convert to NED coordinate for homogeneous transformation matrix\n temp = points.copy()\n points[0, :], points[1, :], points[2,\n :] = temp[0, :], temp[1, :], -temp[2, :]\n\n # Points are in NED, wide form\n # Now transform them\n hom_transform = create_homogenous_transform(\n cam_pos, cam_quat, invert=invert)\n\n point_cam_ned = hom_transform.dot(points)\n point_cam_hom = point_cam_ned.copy()\n # Ha, so that was where I was fixing the camera coordinates, not needed anymore\n # point_cam_hom[0, :], point_cam_hom[1, :], point_cam_hom[2,\n # :] = point_cam_ned[1, :], point_cam_ned[2, :], point_cam_ned[0, :]\n return point_cam_hom\n\n\ndef create_homogenous_transform(cam_pos=lidar_to_camera_pos, cam_quat=lidar_to_camera_quat, invert=False):\n cam_pos = np.array([cam_pos.x_val, cam_pos.y_val, cam_pos.z_val])\n rot_mat = quaternion.as_rotation_matrix(cam_quat)\n\n hom_tran = np.zeros(shape=(4, 4))\n hom_tran[:3, :3] = rot_mat\n hom_tran[:3, 3] = -1 * rot_mat.dot(cam_pos) if invert else cam_pos\n hom_tran[3, 3] = 1\n\n return hom_tran\n\n\ndef project_points_img(points, proj_mat, width, height, filter_pixels=True):\n pixels = proj_mat.dot(points)\n pixels = np.divide(pixels[:2, :], pixels[2, :]).transpose()\n pixels = np.rint(pixels).astype(np.int)\n\n if filter_pixels:\n # Remove pixels that are outside the image\n mask_x = (pixels[:, 0] < width) & (pixels[:, 0] >= 0)\n mask_y = (pixels[:, 1] < height) & (pixels[:, 1] >= 0)\n mask = mask_x & mask_y\n # Return the pixels and points that are inside the image\n pixels = pixels[mask]\n else:\n mask = None\n pixels[:, 0] = np.clip(pixels[:, 0], 3, width - 3)\n pixels[:, 1] = np.clip(pixels[:, 1], 3, height - 3)\n return pixels, mask\n\n\ndef affine_points_pixels(points, affine, width, height):\n pixels = points[:,:2] - [affine[0, 0], affine[0, 1]]\n pixels = pixels / affine[0,2]\n pixels = np.floor(pixels).astype(np.int)\n\n # Remove pixels that are outside the image\n mask_x = (pixels[:, 0] < width) & (pixels[:, 0] >= 0)\n mask_y = (pixels[:, 1] < height) & (pixels[:, 1] >= 0)\n mask = mask_x & mask_y\n # Return the pixels and points that are inside the image\n pixels = pixels[mask]\n return pixels, mask\n\ndef get_transforms(img_meta, airsim_settings):\n cam_ori = img_meta['rotation']\n cam_quat = cam_ori if isinstance(cam_ori, np.quaternion) else np.quaternion(cam_ori.w_val, cam_ori.x_val, cam_ori.y_val, cam_ori.z_val)\n # cam_quat = np.quaternion(cam_ori.w_val, cam_ori.x_val,\n # cam_ori.y_val, cam_ori.z_val)\n cam_pos = img_meta['position']\n # lidar_to_camera_quat start off as\n # * the rotation from LIDAR frame, to a CAMERA frame (x=forward -> x=right, z=down(ned) -> z=forward, y=right->y=down), \n # * If the point cloud data is in the LIDAR frame, then lidar_to_camera_quat will ALSO be rotated such that roll,pitch,yaw offsets between sensors will be taken into account.\n # * See helper.py Line 131\n\n if airsim_settings['lidar_local_frame']:\n # Point cloud is in lidar local frame, transforms already \n transform_pos = airsim_settings['lidar_to_camera_pos']\n transform_rot = airsim_settings['lidar_to_camera_quat']\n invert = False\n else:\n # The data is already in the NED frame\n # Note that the NED frame axis corresponds to the Lidar Frame axes, so we can reuse `lidar_to_camera_quat`\n transform_pos = cam_pos\n transform_rot = airsim_settings['lidar_to_camera_quat'] * \\\n cam_quat.conjugate()\n invert = True\n return transform_pos, transform_rot, invert\n\ndef get_pixels_from_points(points, img_meta, airsim_settings):\n height = img_meta['height']\n width = img_meta['width']\n transform_pos, transform_rot, invert = get_transforms(\n img_meta, airsim_settings)\n\n proj_mat = create_projection_matrix(height, width)\n # Transform NED points to camera coordinate system (not NED)\n points_transformed = transform_to_cam(\n points, cam_pos=transform_pos, cam_quat=transform_rot, invert=invert, points_in_unreal=False)\n # Project Points into image, filter points outside of image\n pixels, mask = project_points_img(\n points_transformed, proj_mat, width, height)\n return pixels, mask\n\ndef classify_points(img, points, img_meta, airsim_settings):\n height = img_meta['height']\n width = img_meta['width']\n transform_pos, transform_rot, invert = get_transforms(\n img_meta, airsim_settings)\n\n proj_mat = create_projection_matrix(height, width)\n # Transform NED points to camera coordinate system (not NED)\n points_transformed = transform_to_cam(\n points, cam_pos=transform_pos, cam_quat=transform_rot, invert=invert, points_in_unreal=False)\n # Project Points into image, filter points outside of image\n pixels, mask = project_points_img(\n points_transformed, proj_mat, width, height)\n\n colors = get_colors_from_image(pixels, img)\n if len(img.shape) > 2:\n # converts colors to numbered class\n colors = colors2class(colors, airsim_settings['seg2rgb_map'])\n\n # 255 means nan point or pont outside of image\n all_colors = np.ones((points.shape[0], ), dtype=np.uint8) * 255\n all_colors[mask] = colors\n\n return all_colors, mask, pixels\n\ndef points_to_pixels(points, img_meta, airsim_settings):\n height = img_meta['height']\n width = img_meta['width']\n transform_pos, transform_rot, invert = get_transforms(\n img_meta, airsim_settings)\n\n proj_mat = create_projection_matrix(height, width)\n # Transform NED points to camera coordinate system (not NED)\n points_transformed = transform_to_cam(\n points, cam_pos=transform_pos, cam_quat=transform_rot, invert=invert, points_in_unreal=False)\n # Project Points into image, filter points outside of image\n pixels, mask = project_points_img(\n points_transformed, proj_mat, width, height, filter_pixels=False)\n return pixels, mask\n\ndef polygon_to_pixel_coords(polygon, img_meta, airsim_settings):\n exterior_pixels, _ = points_to_pixels(np.array(polygon.exterior.coords), img_meta, airsim_settings)\n holes_pixels = []\n for hole in list(polygon.interiors):\n hole_pixel, _ = points_to_pixels(np.array(hole), img_meta, airsim_settings)\n holes_pixels.append(hole_pixel)\n\n poly_pixel = Polygon(shell=exterior_pixels, holes=holes_pixels)\n return poly_pixel\n\n\ndef get_image_data(client: airsim.MultirotorClient, compress=True):\n responses = client.simGetImages([airsim.ImageRequest(\n \"0\", airsim.ImageType.Segmentation, False, compress)])\n response: ImageResponse = responses[0]\n img1d = np.fromstring(response.image_data_uint8, dtype=np.uint8)\n channels = 4 if compress else 3\n img_rgba = img1d.reshape((response.height, response.width, channels))\n # airsim is actually bgr!!\n img_rgba[:, :, [0, 2]] = img_rgba[:, :, [2, 0]]\n\n img_meta = dict()\n img_meta['rotation'] = response.camera_orientation\n img_meta['position'] = response.camera_position\n img_meta['width'] = response.width\n img_meta['height'] = response.height\n\n return img_rgba, img_meta\n\ndef parse_lidarData(data):\n # reshape array of floats to array of [X,Y,Z]\n points = np.array(data.point_cloud, dtype=np.dtype('f4'))\n points = np.reshape(points, (int(points.shape[0]/3), 3))\n\n return points\n\n\ndef tol(x, y):\n return (abs(x - y) < TOLERANCE)\n\n\ndef same_pose(op, np):\n if op is None:\n return False\n return (tol(op.x_val, np.x_val) and tol(op.y_val, np.y_val) and tol(op.z_val, np.z_val))\n\n\ndef seg2rgb(number_of_classes=None, cmap_file=SEG_RGB_FILE):\n cmap_list, _ = get_seg2rgb_map(number_of_classes, cmap_file)\n cmap, norm = create_cmap(cmap_list)\n\n def colors(values):\n return cmap(norm(values))\n return colors\n\n\ndef get_seg2rgb_map(number_of_classes=None, cmap_file=SEG_RGB_FILE, normalized=True):\n with open(cmap_file, 'r') as f:\n all_rows = f.read().splitlines()\n seg2rgb_map = {}\n alpha = 1 if normalized else 255\n for row in all_rows:\n seg = row.split('\\t')[0]\n if normalized:\n rgb = list(map(lambda x: int(x)/255,\n row.split('\\t')[1][1:-1].split(',')))\n else:\n rgb = list(\n map(lambda x: int(x), row.split('\\t')[1][1:-1].split(',')))\n rgb.append(alpha)\n seg2rgb_map[int(seg)] = rgb\n\n cmap_list = list(seg2rgb_map.values())\n if number_of_classes is not None:\n cmap_list = cmap_list[:number_of_classes]\n return cmap_list, seg2rgb_map\n\n\ndef colors2class(colors, seg2rgb_map):\n if colors.ndim == 2:\n n_points = colors.shape[0]\n classes = np.zeros((n_points,), dtype=np.uint8)\n columns = colors.shape[1]\n for code, color in seg2rgb_map.items():\n mask = (colors == color[:columns])[:, 0]\n classes[mask] = int(code)\n return classes\n elif colors.ndim == 3:\n classes = np.zeros((*colors.shape[:2],), dtype=np.uint8)\n channels = colors.shape[2]\n for code, color in seg2rgb_map.items():\n mask = np.all(colors == color[:channels], axis=-1)\n classes[mask] = int(code)\n return classes\n\n\ndef project_ned_points(points, img_meta):\n # Transform and project point cloud into segmentation image\n cam_ori = img_meta['rotation']\n cam_pos = img_meta['position']\n height = img_meta['height']\n width = img_meta['width']\n proj_mat = create_projection_matrix(height, width)\n # Transform NED points to camera coordinate system (not NED)\n points_transformed = transform_to_cam(\n points, cam_pos, cam_ori, points_in_unreal=False)\n # Project Points into image, filter points outside of image\n pixels, points = project_points_img(\n points_transformed, proj_mat, width, height, points)\n return pixels\n\n\n# def classify_points(points, img_meta, img, seg2rgb_map):\n# # Transform and project point cloud into segmentation image\n# cam_ori = img_meta['rotation']\n# cam_pos = img_meta['position']\n# height = img_meta['height']\n# width = img_meta['width']\n# proj_mat = create_projection_matrix(height, width)\n# # Transform NED points to camera coordinate system (not NED)\n# points_transformed = transform_to_cam(\n# points, cam_pos, cam_ori, points_in_unreal=False)\n# # Project Points into image, filter points outside of image\n# pixels, points = project_points_img(\n# points_transformed, proj_mat, width, height, points)\n\n# # Ensure we have valid points\n# if points.shape[0] < 1:\n# print(\"No points for lidar in segmented image\")\n# return None\n# # Check if we have an RGBA image or just a 2D numpy array of classes\n# remove_time = 0\n# color = get_colors_from_image(pixels, img, normalize=False)\n# if len(img.shape) > 2:\n# # converts colors to numbered class\n# t1 = time.time()\n# color = colors2class(color, seg2rgb_map)\n# remove_time = (time.time() - t1) * 1000\n\n# points = np.column_stack((points, color))\n# return points, remove_time\n\n\ndef create_cmap(cmap_list):\n number_of_classes = len(cmap_list)\n cmap = mpl.colors.LinearSegmentedColormap.from_list(\n 'Unreal', cmap_list, number_of_classes)\n # define the bins and normalize\n bounds = np.linspace(0, number_of_classes, number_of_classes+1)\n norm = mpl.colors.BoundaryNorm(bounds, number_of_classes)\n return cmap, norm\n\n\ndef height2rgb(height_range=[-25, 25], cmap_name='viridis'):\n cmap = plt.get_cmap(cmap_name)\n norm = mpl.colors.Normalize(height_range[0], height_range[1])\n\n def colors(values):\n return cmap(norm(values))\n\n return colors\n\n\ndef map_colors(values, cmap, norm):\n return cmap(norm(values))\n\n\n# def transform_to_cam(points, cam_pos, cam_ori, points_in_unreal=False):\n# temp = points.copy()\n# points = np.ones(shape=(4, points.shape[0]))\n# points[:3, :] = temp.transpose()\n\n# if points_in_unreal:\n# # Need to scale down to meters\n# points[:3, :] = points[:3, :] / 100.0\n# # Need to convert to NED coordinate for homogoneous transformation matrix\n# temp = points.copy()\n# points[0, :], points[1, :], points[2,\n# :] = temp[0, :], temp[1, :], -temp[2, :]\n\n# # Points are in NED, wide form\n# # Now transform them\n# hom_transform = create_homogenous_transform(cam_pos, cam_ori)\n\n# point_cam_ned = hom_transform.dot(points)\n# # print(point_cam_ned)\n# point_cam_hom = point_cam_ned.copy()\n# point_cam_hom[0, :], point_cam_hom[1, :], point_cam_hom[2,\n# :] = point_cam_ned[1, :], point_cam_ned[2, :], point_cam_ned[0, :]\n# # print(point_cam_hom)\n# return point_cam_hom\n\n\n# def project_points_img(points, proj_mat, width, height, points_orig):\n# pixels = proj_mat.dot(points)\n# pixels = np.divide(pixels[:2, :], pixels[2, :]).transpose().astype(np.int)\n\n# # Remove pixels that are outside the image\n# mask_x = (pixels[:, 0] < width) & (pixels[:, 0] > 0)\n# mask_y = (pixels[:, 1] < height) & (pixels[:, 1] > 0)\n\n# # Return the pixels and points that are inside the image\n# pixels = pixels[mask_x & mask_y]\n# points_orig = points_orig[mask_x & mask_y, :]\n# return pixels, points_orig\n\n\n# def create_homogenous_transform(cam_pos, rot, invert=True):\n\n# i_ = -1.0 if invert else 1.0\n# inv_rot_q = np.quaternion(\n# rot.w_val, i_ * rot.x_val, i_ * rot.y_val, i_ * rot.z_val)\n# cam_pos = np.array([cam_pos.x_val, cam_pos.y_val, cam_pos.z_val])\n\n# inv_rot_mat = quaternion.as_rotation_matrix(inv_rot_q)\n\n# hom_tran = np.zeros(shape=(4, 4))\n# hom_tran[:3, :3] = inv_rot_mat\n# hom_tran[:3, 3] = -1 * inv_rot_mat.dot(cam_pos) if invert else cam_pos\n# hom_tran[3, 3] = 1\n\n# return hom_tran\n\n\ndef get_colors_from_image(pixels, img, normalize=True):\n \"\"\"Extract pixel values from img\n\n Arguments:\n pixels {ndarray (N,2)} -- Pixel (N,2) array, (x,y)\n img {ndarray (M,N,4)} -- Img, Y,X,RGBA\n\n Returns:\n [type] -- [description]\n \"\"\"\n # Notice the flip in axes as well as dividing by 255.0 to give floats\n if normalize and len(img.shape) > 2:\n colors = np.squeeze(img[pixels[:, 1], pixels[:, 0], :]) / 255.0\n else:\n if len(img.shape) > 2:\n colors = np.squeeze(img[pixels[:, 1], pixels[:, 0], :])\n else:\n colors = np.squeeze(img[pixels[:, 1], pixels[:, 0]])\n return colors\n\n\ndef create_projection_matrix(height, width):\n f = width / 2.0\n cx = width / 2.0\n cy = height / 2.0\n proj_mat = np.array([[f, 0, cx, 0], [0, f, cy, 0], [0, 0, 1, 0]])\n return proj_mat\n\n\ndef set_all_to_zero(client, code=0):\n found = client.simSetSegmentationObjectID(REGEX_CATCH_ALL, code, True)\n if not found:\n LOGGER.warning(\n \"Segmentation - Could not find %s in Unreal Environment to set to code %r\", REGEX_CATCH_ALL, code)\n\n\ndef set_segmentation_ids(client, regex_codes):\n for regex_str, code in regex_codes:\n found = client.simSetSegmentationObjectID(regex_str, code, True)\n if not found:\n LOGGER.warning(\n \"Segmentation - Could not find %s in Unreal Environment to set to code %r\", regex_str, code)\n\n\ndef get_segmentation_codes(file_name):\n with open(file_name) as f:\n data = json.load(f)\n seg_codes = data.get('segmentation_codes', [])\n return seg_codes\n\n# def unreal_to_ned(x, y, z, pitch, roll, yaw):\n# pos = Vector3r(x / 100, y / 100, -z / 100)\n# rot = to_quaternion(pitch, roll, yaw)\n# return pos, rot\n"
]
| [
[
"matplotlib.colors.BoundaryNorm",
"numpy.linspace",
"numpy.clip",
"numpy.squeeze",
"numpy.rint",
"matplotlib.pyplot.get_cmap",
"numpy.quaternion",
"numpy.ones",
"matplotlib.colors.Normalize",
"numpy.dtype",
"numpy.all",
"numpy.fromstring",
"numpy.floor",
"matplotlib.colors.LinearSegmentedColormap.from_list",
"numpy.array",
"numpy.zeros",
"numpy.divide"
]
]
|
ailnicka/molecular_design_with_beam_search | [
"fbe47bfd2717d898cd96ca80a12111987c3e4db4"
]
| [
"src/python/scaffold_hopping_whales/code/mol_properties.py"
]
| [
"# ======================================================================================================================\n# * Weighted Holistic Atom Localization and Entity Shape (WHALES) descriptors *\n# v. 1, May 2018\n# ----------------------------------------------------------------------------------------------------------------------\n# This file contains all the necessary files to handle molecular properties and coordinates.\n#\n# Francesca Grisoni, May 2018, ETH Zurich & University of Milano-Bicocca, [email protected]\n# please cite as xxxx\n# ======================================================================================================================\n\nimport numpy as np\n\n\ndef get_coordinates_and_prop(mol, property_name='partial_charges', do_charge=True):\n \"\"\"\n Extracts all of the useful chemical information, i.e., the partial charge and the coordinates and formats it\n for atomic centred covariance matrix calculation.\n ====================================================================================================================\n :param\n mol: rdkit molecule\n do_charge: if True, the charges are computed\n do_geom: if True, it calculates MMF 3D coordinates\n :returns\n coords (n_atoms x 3): geometrical matrix (x-y-z coords)\n w (n_atoms x 1): partial charge array\n ====================================================================================================================\n Francesca Grisoni, 05/2018, v. beta\n ETH Zurich\n \"\"\"\n\n # molecule preparation\n mol, property_name, err = prepare_mol(mol, property_name, do_charge)\n\n if err == 0:\n # pre-allocation\n n_at = mol.GetNumAtoms() # num atoms\n coords = np.zeros((n_at, 3)) # init coords\n w = np.zeros((n_at, 1)) # init weights\n\n # coordinates and property\n for atom in range(n_at): # loops over atoms, gets 3D coordinate matrix\n\n # gets atomic positions\n pos = mol.GetConformer().GetAtomPosition(atom)\n coords[atom, ] = [pos.x, pos.y, pos.z]\n\n # gets atomic properties\n w[atom] = mol.GetAtomWithIdx(atom).GetProp(property_name)\n \n # checks the weight values computed and throws and error if they are all 0\n if all(v == 0 for v in w):\n err = 1\n else:\n coords = []\n w = []\n\n return coords, w, err\n\n# ----------------------------------------------------------------------------------------------------------------------\n\n\ndef prepare_mol(mol, property_name, do_charge):\n \"\"\"\n Sets atomic properties if they are specified in the sdf, otherwise computes them. If specified, computes 3D coordinates\n using MMF. The default number of iterations is 200, but it is progressively increased to 5000 (with a step of 500)\n in case convergence is not reached.\n ====================================================================================================================\n :param\n mol: molecule to be analyzed (from rdkit supplier)\n property_name: name of the property to be used\n do_charge: if True, partial charge is computed\n do_geom: if True, molecular geometry is optimized\n :return:\n mol: molecule with property and 3D coordinates (H depleted)\n property_name: updated on the basis of the settings\n ====================================================================================================================\n Francesca Grisoni, 12/2016, v. alpha\n ETH Zurich\n \"\"\"\n\n from rdkit.Chem import AllChem as Chem\n err = 0\n\n # partial charges\n if do_charge is False:\n if property_name is not '':\n err = check_mol(mol, property_name, do_charge)\n if err == 0:\n # prepares molecule\n # mol = Chem.AddHs(mol)\n mol = Chem.RemoveHs(mol)\n n_at = mol.GetNumAtoms()\n # takes properties\n list_prop = mol.GetPropsAsDict()\n string_values = list_prop[property_name] # extracts the property according to the set name\n string_values = string_values.split(\"\\n\")\n w = np.asarray(map(float, string_values))\n else:\n mol = Chem.AddHs(mol)\n n_at = mol.GetNumAtoms()\n w = np.ones((n_at, 1))/n_at\n w = np.asarray(map(float, w))\n property_name = 'equal_w'\n err = 0\n # extract properties\n for atom in range(n_at):\n mol.GetAtomWithIdx(atom).SetDoubleProp(property_name, w[atom])\n\n mol = Chem.RemoveHs(mol)\n\n # Gasteiger-Marsili Charges\n elif (do_charge is True) and (err is 0):\n Chem.ComputeGasteigerCharges(mol)\n property_name = '_GasteigerCharge'\n err = check_mol(mol, property_name, do_charge)\n\n return mol, property_name, err\n\n\n# ----------------------------------------------------------------------------------------------------------------------\ndef check_mol(mol, property_name, do_charge):\n \"\"\"\n checks if the property is annotated and gives 0 if it is\n \"\"\"\n n_at = mol.GetNumAtoms()\n if do_charge is False:\n list_prop = mol.GetPropsAsDict()\n string_values = list_prop[property_name] # extracts the property according to the set name\n if string_values == '' or string_values == ['']:\n err = 1\n else:\n err = 0\n else:\n from rdkit.Chem import AllChem as Chem\n err = 0\n atom = 0\n while atom < n_at:\n value = mol.GetAtomWithIdx(atom).GetProp(property_name)\n # checks for error (-nan, inf, nan)\n if value == '-nan' or value == 'nan' or value == 'inf':\n err = 1\n break\n\n atom += 1\n\n # checks for the number of atoms\n if n_at < 4:\n err = 1\n\n return err\n\n"
]
| [
[
"numpy.zeros",
"numpy.ones"
]
]
|
Rlamboll/silicone | [
"bceb8a438dfe53fba22b6ad387a10fca78eeb2a7"
]
| [
"tests/unit/test_stats.py"
]
| [
"import os\n\nimport numpy as np\nimport pandas as pd\nimport pyam\nimport pytest\nimport scipy.interpolate\n\nimport silicone.stats as stats\n\n_mc = \"model_c\"\n_sa = \"scen_a\"\n_sb = \"scen_b\"\n_sc = \"scen_c\"\n_eco2 = \"Emissions|CO2\"\n_gtc = \"Gt C/yr\"\n_ech4 = \"Emissions|CH4\"\n_mtch4 = \"Mt CH4/yr\"\n_msrvu = [\"model\", \"scenario\", \"region\", \"variable\", \"unit\"]\nsimple_df = pd.DataFrame(\n [\n [_mc, _sa, \"World\", _eco2, _gtc, 0, 200, 1],\n [_mc, _sb, \"World\", _eco2, _gtc, 2, 100, -1],\n [_mc, _sa, \"World\", _ech4, _mtch4, 0, 300, 1],\n [_mc, _sb, \"World\", _ech4, _mtch4, 2, 600, -1],\n [_mc, _sc, \"World\", _eco2, _gtc, np.nan, np.nan, 0.5],\n [_mc, _sc, \"World\", _ech4, _mtch4, np.nan, np.nan, 0.5],\n ],\n columns=_msrvu + [2010, 2030, 2050],\n)\nsimple_df = pyam.IamDataFrame(simple_df)\n\n\[email protected](\n \"xs,ys\",\n (\n (np.array([0, 0, 1, 1]), np.array([0, 1, 0, 1])),\n (np.array([0, 0, 1, 1]), np.array([0, 1, 1, 0])),\n (np.array([0, 1, 0, 1]), np.array([0, 1, 1, 0])),\n ),\n)\ndef test_rolling_window_find_quantiles(xs, ys):\n desired_quantiles = [0.4, 0.5, 0.6]\n # Firstly take the window centre at a lead value of 0. With a\n # decay_length_factor=20 and nwindows=10, the points at a lead value\n # of 0 are 10 window centres away hence receive a weight of 1/2 relative\n # to the points at a lead value of 0.\n # with the points in order of follow values then ordered by lead\n # values where lead values are the same we have i.e. the points are:\n # points: [(0, 0), (1, 0), (0, 1), (1, 1)]\n # we have\n # unnormalised weights: [2, 1, 2, 1]\n # normalised weights are: [1/3, 1/6, 1/3, 1/6]\n # cumulative weights are hence: [2/6, 3/6, 5/6, 1]\n # subtracting half the weights we get: [1/6, 5/12, 4/6, 11/12]\n # Hence above quantiles of quantiles of 5/12, we have a gradient (in\n # follower value - quantile space) = (1 - 0) / (4/6 - 5/12)\n # thus our relationship is (quant - 5/12) * grad\n quantiles = stats.rolling_window_find_quantiles(xs, ys, desired_quantiles, 11, 20)\n assert np.allclose(\n quantiles.iloc[0].tolist(),\n np.array([0, (0.5 - 5 / 12), (0.6 - 5 / 12)]) * 1 / (4 / 6 - 5 / 12),\n )\n # At the far side, we have switched the weights around, so that cumulative weights\n # are 1/12 and 1/3 for y = 0 and 7 / 12 and 5 / 12 for y = 1.\n assert np.allclose(\n quantiles.iloc[-1].tolist(), [(0.4 - 1 / 3) * 4, (0.5 - 1 / 3) * 4, 1]\n )\n\n xs = np.array([0, 0, 1, 1])\n ys = np.array([0, 0, 1, 1])\n quantiles = stats.rolling_window_find_quantiles(xs, ys, desired_quantiles, 11, 20)\n # And x = 0, a gradient of 4 starting from 1/2 at q > 0.5\n assert np.allclose(quantiles.iloc[0].tolist(), [0, 0, 0.1 * 4],)\n # at x = 1 we have the exact opposite\n assert np.allclose(quantiles.iloc[-1, :].tolist(), [(0.4 - 1 / 4) * 4, 1, 1],)\n\n desired_quantiles = [0, 0.5, 1]\n quantiles = stats.rolling_window_find_quantiles(\n np.array([1]), np.array([1]), desired_quantiles, 11, 20\n )\n assert all(quantiles.iloc[0, :] == [1, 1, 1])\n\n desired_quantiles = [0, 0.5, 1]\n quantiles = stats.rolling_window_find_quantiles(\n np.array([1, 1]), np.array([1, 1]), desired_quantiles, 11, 20\n )\n assert all(quantiles.iloc[0, :] == [1, 1, 1])\n\n\ndef test_rolling_window_find_quantiles_same_points():\n # If all the x-values are the same, this should just be our interpretation of\n # quantiles at all points\n xs = np.array([1] * 11)\n ys = np.array(range(11))\n desired_quantiles = [0, 0.4, 0.5, 0.6, 0.85, 1]\n quantiles = stats.rolling_window_find_quantiles(xs, ys, desired_quantiles, 11, 20)\n\n cumsum_weights = (0.5 + np.arange(11)) / 11\n calculated_quantiles = []\n for quant in desired_quantiles:\n calculated_quantiles.append(\n scipy.interpolate.interp1d(\n cumsum_weights,\n ys,\n bounds_error=False,\n fill_value=(ys[0], ys[-1]),\n assume_sorted=True,\n )(quant)\n )\n\n assert np.allclose(quantiles.squeeze().tolist(), calculated_quantiles)\n\n\ndef test_rolling_window_find_quantiles_one():\n # If all the x-values are the same, this should just be our interpretation of\n # quantiles at all points\n xs = np.array([1])\n ys = np.array([2])\n desired_quantiles = [0, 0.4, 0.5, 0.6, 0.85, 1]\n quantiles = stats.rolling_window_find_quantiles(\n xs, ys, desired_quantiles, 11, 2 * 9\n )\n\n assert np.allclose(quantiles.values.squeeze(), 2)\n\n\ndef test_calc_all_emissions_correlations_works(tmpdir):\n # We test that this saves a file in the correct place, with the correct results\n test_folder = os.path.join(tmpdir, \"output\")\n if not os.path.isdir(test_folder):\n os.makedirs(test_folder)\n stats.calc_all_emissions_correlations(\n simple_df, list(set(simple_df[\"year\"])), test_folder\n )\n expected = {2010: 1, 2030: -1, 2050: 1}\n for year in list(set(simple_df[\"year\"])):\n for file_string in [\"gases_correlation\", \"gases_rank_correlation\"]:\n test_file = os.path.join(test_folder, file_string + \"_{}.csv\".format(year))\n assert os.path.isfile(test_file)\n test_results = pd.read_csv(test_file)\n assert np.isnan(test_results.iloc[0].iloc[1])\n assert test_results.iloc[1].iloc[1] == expected.get(year)\n assert test_results.iloc[0].iloc[2] == expected.get(year)\n os.remove(test_file)\n assert not os.path.isfile(test_file)\n for file_string in [\n \"time_av_absolute_correlation\",\n \"time_av_absolute_rank_correlation\",\n \"time_variance_rank_correlation\",\n ]:\n test_file = os.path.join(\n test_folder,\n file_string\n + \"_{}_to_{}.csv\".format(\n min(set(simple_df[\"year\"])), max(set(simple_df[\"year\"]))\n ),\n )\n assert os.path.isfile(test_file)\n test_results = pd.read_csv(test_file)\n if file_string == \"time_variance_rank_correlation\":\n # All values are zeros since the abs value is 1 in all cases (+/-1)\n assert np.allclose(test_results.iloc[0].iloc[1], 0)\n assert np.allclose(test_results.iloc[1].iloc[1], 0)\n assert np.allclose(test_results.iloc[0].iloc[2], 0)\n else:\n assert np.isnan(test_results.iloc[0].iloc[1])\n assert np.allclose(test_results.iloc[1].iloc[1], 1)\n assert np.allclose(test_results.iloc[0].iloc[2], 1)\n os.remove(test_file)\n assert not os.path.isfile(test_file)\n # Check that the variable counts are correct too.\n test_file = os.path.join(test_folder, \"variable_counts.csv\")\n assert os.path.isfile(test_file)\n test_results = pd.read_csv(test_file)\n assert np.allclose(test_results[\"0\"].iloc[0], 3)\n assert np.allclose(test_results[\"0\"].iloc[1], 3)\n os.remove(test_file)\n assert not os.path.isfile(test_file)\n\n\ndef test_calc_all_emissions_numerical(tmpdir):\n # We construct a specific situation and check that the numerical answers are correct\n test_folder = os.path.join(tmpdir, \"output\")\n if not os.path.isdir(test_folder):\n os.makedirs(test_folder)\n # We establish a more complicated set of values\n numerical_df = simple_df.copy()\n numerical_df.data[\"model\"] = numerical_df.data[\"model\"] + numerical_df.data[\n \"year\"\n ].map(lambda x: str(x))\n numerical_df.data[\"year\"] = 2010\n numerical_df = pyam.IamDataFrame(numerical_df.data)\n # Perform the calculations\n stats.calc_all_emissions_correlations(numerical_df, [2010], test_folder)\n # The order of the elements is identical for the different cases, no sorting needed\n xs = numerical_df.filter(variable=_eco2).data[\"value\"].values\n ys = numerical_df.filter(variable=_ech4).data[\"value\"].values\n\n def calc_correl(x, y):\n xmean = sum(x) / len(x)\n ymean = sum(y) / len(y)\n return (\n sum((x - xmean) * (y - ymean))\n / (sum((x - xmean) ** 2) * sum((y - ymean) ** 2)) ** 0.5\n )\n\n correl = calc_correl(xs, ys)\n test_file = os.path.join(test_folder, \"gases_correlation\" + \"_{}.csv\".format(2010))\n test_results = pd.read_csv(test_file)\n assert np.isclose(test_results.iloc[1].iloc[1], correl)\n os.remove(test_file)\n x_ord = np.argsort(xs)\n y_ord = np.argsort(ys)\n rank_correl = calc_correl(x_ord, y_ord)\n test_file = os.path.join(\n test_folder, \"gases_rank_correlation\" + \"_{}.csv\".format(2010)\n )\n test_results = pd.read_csv(test_file)\n assert np.isclose(test_results.iloc[1].iloc[1], rank_correl, rtol=1e-4)\n os.remove(test_file)\n for file_string in [\n \"time_av_absolute_correlation\",\n \"time_av_absolute_rank_correlation\",\n \"time_variance_rank_correlation\",\n ]:\n test_file = os.path.join(\n test_folder,\n file_string\n + \"_{}_to_{}.csv\".format(\n min(set(numerical_df[\"year\"])), max(set(numerical_df[\"year\"]))\n ),\n )\n test_results = pd.read_csv(test_file)\n some_cor = rank_correl if file_string.__contains__(\"rank\") else correl\n if file_string == \"time_variance_rank_correlation\":\n assert np.isnan(test_results.iloc[1].iloc[1])\n else:\n assert np.isclose(test_results.iloc[1].iloc[1], some_cor, rtol=1e-4)\n os.remove(test_file)\n test_file = os.path.join(test_folder, \"variable_counts.csv\")\n assert os.path.isfile(test_file)\n test_results = pd.read_csv(test_file)\n assert np.allclose(test_results[\"0\"].iloc[0], 7)\n assert np.allclose(test_results[\"0\"].iloc[1], 7)\n os.remove(test_file)\n assert not os.path.isfile(test_file)\n # Now do a test for just the variance. This requires multiple years\n numerical_df[\"value\"] = numerical_df[\"value\"] + 10\n numerical_df.append(simple_df, inplace=True)\n numerical_df[\"year\"] = numerical_df[\"year\"].map(lambda x: int(x))\n rank_cors = []\n years = [2010, 2030, 2050]\n for year in years:\n xs = numerical_df.filter(variable=_eco2, year=year).data[\"value\"].values\n ys = numerical_df.filter(variable=_ech4, year=year).data[\"value\"].values\n x_ord = np.argsort(xs)\n y_ord = np.argsort(ys)\n rank_cors.append(abs(calc_correl(x_ord, y_ord)))\n expect_var = np.var(rank_cors, ddof=1)\n stats.calc_all_emissions_correlations(numerical_df, years, test_folder)\n for file_string in [\n \"time_av_absolute_correlation\",\n \"time_av_absolute_rank_correlation\",\n \"time_variance_rank_correlation\",\n ]:\n test_file = os.path.join(\n test_folder,\n file_string\n + \"_{}_to_{}.csv\".format(\n min(set(simple_df[\"year\"])), max(set(simple_df[\"year\"]))\n ),\n )\n test_results = pd.read_csv(test_file)\n if file_string == \"time_variance_rank_correlation\":\n assert np.isclose(expect_var, test_results.iloc[1].iloc[1])\n os.remove(test_file)\n"
]
| [
[
"pandas.read_csv",
"numpy.allclose",
"numpy.isnan",
"numpy.arange",
"pandas.DataFrame",
"numpy.var",
"numpy.argsort",
"numpy.array",
"numpy.isclose"
]
]
|
LLLjun/learn-to-cluster | [
"3b834589923baf72523e288cc462e0df591b99c1"
]
| [
"utils/misc.py"
]
| [
"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\nimport os\nimport time\nimport json\nimport pickle\nimport random\nimport numpy as np\n\n\nclass TextColors:\n HEADER = '\\033[35m'\n OKBLUE = '\\033[34m'\n OKGREEN = '\\033[32m'\n WARNING = '\\033[33m'\n FATAL = '\\033[31m'\n ENDC = '\\033[0m'\n BOLD = '\\033[1m'\n UNDERLINE = '\\033[4m'\n\n\nclass Timer():\n def __init__(self, name='task', verbose=True):\n self.name = name\n self.verbose = verbose\n\n def __enter__(self):\n self.start = time.time()\n return self\n\n def __exit__(self, exc_type, exc_val, exc_tb):\n if self.verbose:\n print('[Time] {} consumes {:.4f} s'.format(\n self.name,\n time.time() - self.start))\n return exc_type is None\n\n\ndef set_random_seed(seed, cuda=False):\n import torch\n random.seed(seed)\n np.random.seed(seed)\n torch.manual_seed(seed)\n if cuda:\n torch.cuda.manual_seed_all(seed)\n\n\ndef l2norm(vec):\n vec /= np.linalg.norm(vec, axis=1).reshape(-1, 1)\n return vec\n\n\ndef is_l2norm(features, size):\n rand_i = random.choice(range(size))\n norm_ = np.dot(features[rand_i, :], features[rand_i, :])\n return abs(norm_ - 1) < 1e-6\n\n\ndef is_spmat_eq(a, b):\n return (a != b).nnz == 0\n\n\ndef aggregate(features, adj, times):\n dtype = features.dtype\n for i in range(times):\n features = adj * features\n return features.astype(dtype)\n\n\ndef read_probs(path, inst_num, feat_dim, dtype=np.float32, verbose=False):\n assert (inst_num > 0 or inst_num == -1) and feat_dim > 0\n count = -1\n if inst_num > 0:\n count = inst_num * feat_dim\n probs = np.fromfile(path, dtype=dtype, count=count)\n if feat_dim > 1:\n probs = probs.reshape(inst_num, feat_dim)\n if verbose:\n print('[{}] shape: {}'.format(path, probs.shape))\n return probs\n\n\ndef read_meta(fn_meta, start_pos=0, verbose=True):\n lb2idxs = {}\n idx2lb = {}\n with open(fn_meta) as f:\n for idx, x in enumerate(f.readlines()[start_pos:]):\n lb = int(x.strip())\n if lb not in lb2idxs:\n lb2idxs[lb] = []\n lb2idxs[lb] += [idx]\n idx2lb[idx] = lb\n\n inst_num = len(idx2lb)\n cls_num = len(lb2idxs)\n if verbose:\n print('[{}] #cls: {}, #inst: {}'.format(fn_meta, cls_num, inst_num))\n return lb2idxs, idx2lb\n\n\ndef write_meta(ofn, idx2lb, inst_num=None):\n if len(idx2lb) == 0:\n print('[warn] idx2lb is empty! skip write idx2lb to {}'.format(ofn))\n return\n\n if inst_num is None:\n inst_num = max(idx2lb.keys()) + 1\n cls_num = len(set(idx2lb.values()))\n\n idx2newlb = {}\n current_lb = 0\n discard_lb = 0\n map2newlb = {}\n for idx in range(inst_num):\n if idx in idx2lb:\n lb = idx2lb[idx]\n if lb in map2newlb:\n newlb = map2newlb[lb]\n else:\n newlb = current_lb\n map2newlb[lb] = newlb\n current_lb += 1\n else:\n newlb = cls_num + discard_lb\n discard_lb += 1\n idx2newlb[idx] = newlb\n assert current_lb == cls_num, '{} vs {}'.format(current_lb, cls_num)\n\n print('#discard: {}, #lbs: {}'.format(discard_lb, current_lb))\n print('#inst: {}, #class: {}'.format(inst_num, cls_num))\n if ofn is not None:\n print('save label to', ofn)\n with open(ofn, 'w') as of:\n for idx in range(inst_num):\n of.write(str(idx2newlb[idx]) + '\\n')\n\n pred_labels = intdict2ndarray(idx2newlb)\n return pred_labels\n\n\ndef write_feat(ofn, features):\n print('save features to', ofn)\n features.tofile(ofn)\n\n\ndef dump2npz(ofn, data, force=False):\n if os.path.exists(ofn) and not force:\n return\n np.savez_compressed(ofn, data=data)\n\n\ndef dump2json(ofn, data, force=False):\n if os.path.exists(ofn) and not force:\n return\n\n def default(obj):\n if isinstance(obj, np.int32):\n return int(obj)\n elif isinstance(obj, np.int64):\n return int(obj)\n elif isinstance(obj, np.float32):\n return float(obj)\n elif isinstance(obj, set) or isinstance(obj, np.ndarray):\n return list(obj)\n else:\n raise TypeError(\"Unserializable object {} of type {}\".format(\n obj, type(obj)))\n\n with open(ofn, 'w') as of:\n json.dump(data, of, default=default)\n\n\ndef dump2pkl(ofn, data, force=False):\n if os.path.exists(ofn) and not force:\n return\n with open(ofn, 'wb') as of:\n pickle.dump(data, of)\n\n\ndef dump_data(ofn, data, force=False, verbose=False):\n if os.path.exists(ofn) and not force:\n if verbose:\n print(\n '{} already exists. Set force=True to overwrite.'.format(ofn))\n return\n mkdir_if_no_exists(ofn)\n if ofn.endswith('.json'):\n dump2json(ofn, data, force=force)\n elif ofn.endswith('.pkl'):\n dump2pkl(ofn, data, force=force)\n else:\n dump2npz(ofn, data, force=force)\n\n\ndef load_npz(fn):\n return np.load(fn, allow_pickle=True)['data']\n\n\ndef load_pkl(fn):\n return pickle.load(open(fn, 'rb'))\n\n\ndef load_json(fn):\n return json.load(open(fn, 'r'))\n\n\ndef load_data(ofn):\n if ofn.endswith('.json'):\n return load_json(ofn)\n elif ofn.endswith('.pkl'):\n return load_pkl(ofn)\n else:\n return load_npz(ofn)\n\n\ndef labels2clusters(lb2idxs):\n clusters = [idxs for _, idxs in lb2idxs.items()]\n return clusters\n\n\ndef clusters2labels(clusters):\n idx2lb = {}\n for lb, cluster in enumerate(clusters):\n for v in cluster:\n idx2lb[v] = lb\n return idx2lb\n\n\ndef intdict2ndarray(d, default_val=-1):\n arr = np.zeros(len(d)) + default_val\n for k, v in d.items():\n arr[k] = v\n return arr\n\n\ndef list2dict(labels, ignore_value=-1):\n idx2lb = {}\n for idx, lb in enumerate(labels):\n if lb == ignore_value:\n continue\n idx2lb[idx] = lb\n return idx2lb\n\n\ndef mkdir_if_no_exists(path, subdirs=[''], is_folder=False):\n if path == '':\n return\n for sd in subdirs:\n if sd != '' or is_folder:\n d = os.path.dirname(os.path.join(path, sd))\n else:\n d = os.path.dirname(path)\n if not os.path.exists(d):\n os.makedirs(d)\n\n\ndef rm_suffix(s, suffix=None):\n if suffix is None:\n return s[:s.rfind('.')]\n else:\n return s[:s.rfind(suffix)]\n\n\ndef rand_argmax(v):\n assert len(v.squeeze().shape) == 1\n return np.random.choice(np.flatnonzero(v == v.max()))\n\n\ndef create_temp_file_if_exist(path, suffix=''):\n path_with_suffix = path + suffix\n if not os.path.exists(path_with_suffix):\n return path_with_suffix\n else:\n i = 0\n while i < 1000:\n temp_path = '{}_{}'.format(path, i) + suffix\n i += 1\n if not os.path.exists(temp_path):\n return temp_path\n"
]
| [
[
"numpy.dot",
"numpy.fromfile",
"numpy.random.seed",
"torch.manual_seed",
"numpy.linalg.norm",
"numpy.savez_compressed",
"torch.cuda.manual_seed_all",
"numpy.load"
]
]
|
eisenjulian/bert | [
"9070c136e5a1d716472fd723880a8e8f15d74bbc"
]
| [
"run_classifier.py"
]
| [
"# coding=utf-8\n# Copyright 2018 The Google AI Language Team Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"BERT finetuning runner.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport collections\nimport csv\nimport os\nimport modeling\nimport optimization\nimport tokenization\nimport tensorflow as tf\n\nflags = tf.flags\n\nFLAGS = flags.FLAGS\n\n## Required parameters\nflags.DEFINE_string(\n \"data_dir\", None,\n \"The input data dir. Should contain the .tsv files (or other data files) \"\n \"for the task.\")\n\nflags.DEFINE_string(\n \"bert_config_file\", None,\n \"The config json file corresponding to the pre-trained BERT model. \"\n \"This specifies the model architecture.\")\n\nflags.DEFINE_string(\"task_name\", None, \"The name of the task to train.\")\n\nflags.DEFINE_string(\"vocab_file\", None,\n \"The vocabulary file that the BERT model was trained on.\")\n\nflags.DEFINE_string(\n \"output_dir\", None,\n \"The output directory where the model checkpoints will be written.\")\n\n## Other parameters\n\nflags.DEFINE_string(\n \"init_checkpoint\", None,\n \"Initial checkpoint (usually from a pre-trained BERT model).\")\n\nflags.DEFINE_bool(\n \"do_lower_case\", True,\n \"Whether to lower case the input text. Should be True for uncased \"\n \"models and False for cased models.\")\n\nflags.DEFINE_integer(\n \"max_seq_length\", 128,\n \"The maximum total input sequence length after WordPiece tokenization. \"\n \"Sequences longer than this will be truncated, and sequences shorter \"\n \"than this will be padded.\")\n\nflags.DEFINE_bool(\"do_train\", False, \"Whether to run training.\")\n\nflags.DEFINE_bool(\"do_eval\", False, \"Whether to run eval on the dev set.\")\n\nflags.DEFINE_bool(\n \"do_predict\", False,\n \"Whether to run the model in inference mode on the test set.\")\n\nflags.DEFINE_integer(\"train_batch_size\", 32, \"Total batch size for training.\")\n\nflags.DEFINE_integer(\"eval_batch_size\", 8, \"Total batch size for eval.\")\n\nflags.DEFINE_integer(\"predict_batch_size\", 8, \"Total batch size for predict.\")\n\nflags.DEFINE_float(\"learning_rate\", 5e-5, \"The initial learning rate for Adam.\")\n\nflags.DEFINE_float(\"num_train_epochs\", 3.0,\n \"Total number of training epochs to perform.\")\n\nflags.DEFINE_float(\n \"warmup_proportion\", 0.1,\n \"Proportion of training to perform linear learning rate warmup for. \"\n \"E.g., 0.1 = 10% of training.\")\n\nflags.DEFINE_integer(\"save_checkpoints_steps\", 1000,\n \"How often to save the model checkpoint.\")\n\nflags.DEFINE_integer(\"iterations_per_loop\", 1000,\n \"How many steps to make in each estimator call.\")\n\nflags.DEFINE_bool(\"use_tpu\", False, \"Whether to use TPU or GPU/CPU.\")\n\ntf.flags.DEFINE_string(\n \"tpu_name\", None,\n \"The Cloud TPU to use for training. This should be either the name \"\n \"used when creating the Cloud TPU, or a grpc://ip.address.of.tpu:8470 \"\n \"url.\")\n\ntf.flags.DEFINE_string(\n \"tpu_zone\", None,\n \"[Optional] GCE zone where the Cloud TPU is located in. If not \"\n \"specified, we will attempt to automatically detect the GCE project from \"\n \"metadata.\")\n\ntf.flags.DEFINE_string(\n \"gcp_project\", None,\n \"[Optional] Project name for the Cloud TPU-enabled project. If not \"\n \"specified, we will attempt to automatically detect the GCE project from \"\n \"metadata.\")\n\ntf.flags.DEFINE_string(\"master\", None, \"[Optional] TensorFlow master URL.\")\n\nflags.DEFINE_integer(\n \"num_tpu_cores\", 8,\n \"Only used if `use_tpu` is True. Total number of TPU cores to use.\")\n\n\nclass InputExample(object):\n \"\"\"A single training/test example for simple sequence classification.\"\"\"\n\n def __init__(self, guid, text_a, text_b=None, label=None):\n \"\"\"Constructs a InputExample.\n\n Args:\n guid: Unique id for the example.\n text_a: string. The untokenized text of the first sequence. For single\n sequence tasks, only this sequence must be specified.\n text_b: (Optional) string. The untokenized text of the second sequence.\n Only must be specified for sequence pair tasks.\n label: (Optional) string. The label of the example. This should be\n specified for train and dev examples, but not for test examples.\n \"\"\"\n self.guid = guid\n self.text_a = text_a\n self.text_b = text_b\n self.label = label\n\n\nclass PaddingInputExample(object):\n \"\"\"Fake example so the num input examples is a multiple of the batch size.\n\n When running eval/predict on the TPU, we need to pad the number of examples\n to be a multiple of the batch size, because the TPU requires a fixed batch\n size. The alternative is to drop the last batch, which is bad because it means\n the entire output data won't be generated.\n\n We use this class instead of `None` because treating `None` as padding\n battches could cause silent errors.\n \"\"\"\n\n\nclass InputFeatures(object):\n \"\"\"A single set of features of data.\"\"\"\n\n def __init__(self,\n input_ids,\n input_mask,\n segment_ids,\n label_id,\n is_real_example=True):\n self.input_ids = input_ids\n self.input_mask = input_mask\n self.segment_ids = segment_ids\n self.label_id = label_id\n self.is_real_example = is_real_example\n\n\nclass DataProcessor(object):\n \"\"\"Base class for data converters for sequence classification data sets.\"\"\"\n\n def get_train_examples(self, data_dir):\n \"\"\"Gets a collection of `InputExample`s for the train set.\"\"\"\n raise NotImplementedError()\n\n def get_dev_examples(self, data_dir):\n \"\"\"Gets a collection of `InputExample`s for the dev set.\"\"\"\n raise NotImplementedError()\n\n def get_test_examples(self, data_dir):\n \"\"\"Gets a collection of `InputExample`s for prediction.\"\"\"\n raise NotImplementedError()\n\n def get_labels(self):\n \"\"\"Gets the list of labels for this data set.\"\"\"\n raise NotImplementedError()\n\n @classmethod\n def _read_tsv(cls, input_file, quotechar=None):\n \"\"\"Reads a tab separated value file.\"\"\"\n with tf.gfile.Open(input_file, \"r\") as f:\n reader = csv.reader(f, delimiter=\"\\t\", quotechar=quotechar)\n lines = []\n for line in reader:\n lines.append(line)\n return lines\n\n\nclass XnliProcessor(DataProcessor):\n \"\"\"Processor for the XNLI data set.\"\"\"\n\n def __init__(self):\n self.language = \"zh\"\n\n def get_train_examples(self, data_dir):\n \"\"\"See base class.\"\"\"\n lines = self._read_tsv(\n os.path.join(data_dir, \"multinli\",\n \"multinli.train.%s.tsv\" % self.language))\n examples = []\n for (i, line) in enumerate(lines):\n if i == 0:\n continue\n guid = \"train-%d\" % (i)\n text_a = tokenization.convert_to_unicode(line[0])\n text_b = tokenization.convert_to_unicode(line[1])\n label = tokenization.convert_to_unicode(line[2])\n if label == tokenization.convert_to_unicode(\"contradictory\"):\n label = tokenization.convert_to_unicode(\"contradiction\")\n examples.append(\n InputExample(guid=guid, text_a=text_a, text_b=text_b, label=label))\n return examples\n\n def get_dev_examples(self, data_dir):\n \"\"\"See base class.\"\"\"\n lines = self._read_tsv(os.path.join(data_dir, \"xnli.dev.tsv\"))\n examples = []\n for (i, line) in enumerate(lines):\n if i == 0:\n continue\n guid = \"dev-%d\" % (i)\n language = tokenization.convert_to_unicode(line[0])\n if language != tokenization.convert_to_unicode(self.language):\n continue\n text_a = tokenization.convert_to_unicode(line[6])\n text_b = tokenization.convert_to_unicode(line[7])\n label = tokenization.convert_to_unicode(line[1])\n examples.append(\n InputExample(guid=guid, text_a=text_a, text_b=text_b, label=label))\n return examples\n\n def get_labels(self):\n \"\"\"See base class.\"\"\"\n return [\"contradiction\", \"entailment\", \"neutral\"]\n\n\nclass MLDocProcessor(DataProcessor):\n \"\"\"Processor for the MLDoc data set.\"\"\"\n\n def __init__(self):\n self.trn_size = 1000 \n\n def get_train_examples(self, data_dir):\n \"\"\"See base class.\"\"\"\n return self._create_examples(\n self._read_tsv(os.path.join(data_dir, f\"train.{self.trn_size}.tsv\"), quotechar='\"'), \"train\")\n\n def get_dev_examples(self, data_dir):\n \"\"\"See base class.\"\"\"\n return self._create_examples(\n self._read_tsv(os.path.join(data_dir, \"test.tsv\"), quotechar='\"'), \"dev\")\n\n def get_test_examples(self, data_dir):\n \"\"\"See base class.\"\"\"\n return self._create_examples(\n self._read_tsv(os.path.join(data_dir, \"dev.tsv\"), quotechar='\"'), \"test\")\n\n def get_labels(self):\n \"\"\"See base class.\"\"\"\n return [\"0\", \"1\", \"2\", \"3\"]\n\n def _create_examples(self, lines, set_type):\n \"\"\"Creates examples for the training and dev sets.\"\"\"\n examples = []\n for (i, line) in enumerate(lines):\n guid = \"%s-%s\" % (set_type, i)\n text_a = tokenization.convert_to_unicode(line[1])\n if set_type == \"test\":\n label = \"0\"\n else:\n label = tokenization.convert_to_unicode(line[0])\n examples.append(\n InputExample(guid=guid, text_a=text_a, text_b=None, label=label))\n return examples\n\n\nclass CLSProcessor(DataProcessor):\n \"\"\"Processor for the Cross-Lingual Sentiment data set.\"\"\" \n\n def get_train_examples(self, data_dir):\n \"\"\"See base class.\"\"\"\n return self._create_examples(\n self._read_tsv(os.path.join(data_dir, f\"train.tsv\"), quotechar='\"'), \"train\")\n\n def get_dev_examples(self, data_dir):\n \"\"\"See base class.\"\"\"\n return self._create_examples(\n self._read_tsv(os.path.join(data_dir, \"test.tsv\"), quotechar='\"'), \"dev\")\n\n def get_test_examples(self, data_dir):\n \"\"\"See base class.\"\"\"\n return self._create_examples(\n self._read_tsv(os.path.join(data_dir, \"test.tsv\"), quotechar='\"'), \"test\")\n\n def get_labels(self):\n \"\"\"See base class.\"\"\"\n return [\"0\", \"1\"]\n\n def _create_examples(self, lines, set_type):\n \"\"\"Creates examples for the training and dev sets.\"\"\"\n examples = []\n for (i, line) in enumerate(lines):\n guid = \"%s-%s\" % (set_type, i)\n text_a = tokenization.convert_to_unicode(line[1])\n text_b = tokenization.convert_to_unicode(line[2])\n\n if set_type == \"test\":\n label = \"0\"\n else:\n label = tokenization.convert_to_unicode(line[0])\n examples.append(\n InputExample(guid=guid, text_a=text_a, text_b=text_b, label=label))\n return examples\n\nclass MnliProcessor(DataProcessor):\n \"\"\"Processor for the MultiNLI data set (GLUE version).\"\"\"\n\n def get_train_examples(self, data_dir):\n \"\"\"See base class.\"\"\"\n return self._create_examples(\n self._read_tsv(os.path.join(data_dir, \"train.tsv\")), \"train\")\n\n def get_dev_examples(self, data_dir):\n \"\"\"See base class.\"\"\"\n return self._create_examples(\n self._read_tsv(os.path.join(data_dir, \"dev_matched.tsv\")),\n \"dev_matched\")\n\n def get_test_examples(self, data_dir):\n \"\"\"See base class.\"\"\"\n return self._create_examples(\n self._read_tsv(os.path.join(data_dir, \"test_matched.tsv\")), \"test\")\n\n def get_labels(self):\n \"\"\"See base class.\"\"\"\n return [\"contradiction\", \"entailment\", \"neutral\"]\n\n def _create_examples(self, lines, set_type):\n \"\"\"Creates examples for the training and dev sets.\"\"\"\n examples = []\n for (i, line) in enumerate(lines):\n if i == 0:\n continue\n guid = \"%s-%s\" % (set_type, tokenization.convert_to_unicode(line[0]))\n text_a = tokenization.convert_to_unicode(line[8])\n text_b = tokenization.convert_to_unicode(line[9])\n if set_type == \"test\":\n label = \"contradiction\"\n else:\n label = tokenization.convert_to_unicode(line[-1])\n examples.append(\n InputExample(guid=guid, text_a=text_a, text_b=text_b, label=label))\n return examples\n\n\nclass MrpcProcessor(DataProcessor):\n \"\"\"Processor for the MRPC data set (GLUE version).\"\"\"\n\n def get_train_examples(self, data_dir):\n \"\"\"See base class.\"\"\"\n return self._create_examples(\n self._read_tsv(os.path.join(data_dir, \"train.tsv\")), \"train\")\n\n def get_dev_examples(self, data_dir):\n \"\"\"See base class.\"\"\"\n return self._create_examples(\n self._read_tsv(os.path.join(data_dir, \"dev.tsv\")), \"dev\")\n\n def get_test_examples(self, data_dir):\n \"\"\"See base class.\"\"\"\n return self._create_examples(\n self._read_tsv(os.path.join(data_dir, \"test.tsv\")), \"test\")\n\n def get_labels(self):\n \"\"\"See base class.\"\"\"\n return [\"0\", \"1\"]\n\n def _create_examples(self, lines, set_type):\n \"\"\"Creates examples for the training and dev sets.\"\"\"\n examples = []\n for (i, line) in enumerate(lines):\n if i == 0:\n continue\n guid = \"%s-%s\" % (set_type, i)\n text_a = tokenization.convert_to_unicode(line[3])\n text_b = tokenization.convert_to_unicode(line[4])\n if set_type == \"test\":\n label = \"0\"\n else:\n label = tokenization.convert_to_unicode(line[0])\n examples.append(\n InputExample(guid=guid, text_a=text_a, text_b=text_b, label=label))\n return examples\n\n\nclass ColaProcessor(DataProcessor):\n \"\"\"Processor for the CoLA data set (GLUE version).\"\"\"\n\n def get_train_examples(self, data_dir):\n \"\"\"See base class.\"\"\"\n return self._create_examples(\n self._read_tsv(os.path.join(data_dir, \"train.tsv\")), \"train\")\n\n def get_dev_examples(self, data_dir):\n \"\"\"See base class.\"\"\"\n return self._create_examples(\n self._read_tsv(os.path.join(data_dir, \"dev.tsv\")), \"dev\")\n\n def get_test_examples(self, data_dir):\n \"\"\"See base class.\"\"\"\n return self._create_examples(\n self._read_tsv(os.path.join(data_dir, \"test.tsv\")), \"test\")\n\n def get_labels(self):\n \"\"\"See base class.\"\"\"\n return [\"0\", \"1\"]\n\n def _create_examples(self, lines, set_type):\n \"\"\"Creates examples for the training and dev sets.\"\"\"\n examples = []\n for (i, line) in enumerate(lines):\n # Only the test set has a header\n if set_type == \"test\" and i == 0:\n continue\n guid = \"%s-%s\" % (set_type, i)\n if set_type == \"test\":\n text_a = tokenization.convert_to_unicode(line[1])\n label = \"0\"\n else:\n text_a = tokenization.convert_to_unicode(line[3])\n label = tokenization.convert_to_unicode(line[1])\n examples.append(\n InputExample(guid=guid, text_a=text_a, text_b=None, label=label))\n return examples\n\n\ndef convert_single_example(ex_index, example, label_list, max_seq_length,\n tokenizer):\n \"\"\"Converts a single `InputExample` into a single `InputFeatures`.\"\"\"\n\n if isinstance(example, PaddingInputExample):\n return InputFeatures(\n input_ids=[0] * max_seq_length,\n input_mask=[0] * max_seq_length,\n segment_ids=[0] * max_seq_length,\n label_id=0,\n is_real_example=False)\n\n label_map = {}\n for (i, label) in enumerate(label_list):\n label_map[label] = i\n\n tokens_a = tokenizer.tokenize(example.text_a)\n tokens_b = None\n if example.text_b:\n tokens_b = tokenizer.tokenize(example.text_b)\n\n if tokens_b:\n # Modifies `tokens_a` and `tokens_b` in place so that the total\n # length is less than the specified length.\n # Account for [CLS], [SEP], [SEP] with \"- 3\"\n _truncate_seq_pair(tokens_a, tokens_b, max_seq_length - 3)\n else:\n # Account for [CLS] and [SEP] with \"- 2\"\n if len(tokens_a) > max_seq_length - 2:\n tokens_a = tokens_a[0:(max_seq_length - 2)]\n\n # The convention in BERT is:\n # (a) For sequence pairs:\n # tokens: [CLS] is this jack ##son ##ville ? [SEP] no it is not . [SEP]\n # type_ids: 0 0 0 0 0 0 0 0 1 1 1 1 1 1\n # (b) For single sequences:\n # tokens: [CLS] the dog is hairy . [SEP]\n # type_ids: 0 0 0 0 0 0 0\n #\n # Where \"type_ids\" are used to indicate whether this is the first\n # sequence or the second sequence. The embedding vectors for `type=0` and\n # `type=1` were learned during pre-training and are added to the wordpiece\n # embedding vector (and position vector). This is not *strictly* necessary\n # since the [SEP] token unambiguously separates the sequences, but it makes\n # it easier for the model to learn the concept of sequences.\n #\n # For classification tasks, the first vector (corresponding to [CLS]) is\n # used as the \"sentence vector\". Note that this only makes sense because\n # the entire model is fine-tuned.\n tokens = []\n segment_ids = []\n tokens.append(\"[CLS]\")\n segment_ids.append(0)\n for token in tokens_a:\n tokens.append(token)\n segment_ids.append(0)\n tokens.append(\"[SEP]\")\n segment_ids.append(0)\n\n if tokens_b:\n for token in tokens_b:\n tokens.append(token)\n segment_ids.append(1)\n tokens.append(\"[SEP]\")\n segment_ids.append(1)\n\n input_ids = tokenizer.convert_tokens_to_ids(tokens)\n\n # The mask has 1 for real tokens and 0 for padding tokens. Only real\n # tokens are attended to.\n input_mask = [1] * len(input_ids)\n\n # Zero-pad up to the sequence length.\n while len(input_ids) < max_seq_length:\n input_ids.append(0)\n input_mask.append(0)\n segment_ids.append(0)\n\n assert len(input_ids) == max_seq_length\n assert len(input_mask) == max_seq_length\n assert len(segment_ids) == max_seq_length\n\n label_id = label_map[example.label]\n if ex_index < 5:\n tf.logging.info(\"*** Example ***\")\n tf.logging.info(\"guid: %s\" % (example.guid))\n tf.logging.info(\"tokens: %s\" % \" \".join(\n [tokenization.printable_text(x) for x in tokens]))\n tf.logging.info(\"input_ids: %s\" % \" \".join([str(x) for x in input_ids]))\n tf.logging.info(\"input_mask: %s\" % \" \".join([str(x) for x in input_mask]))\n tf.logging.info(\"segment_ids: %s\" % \" \".join([str(x) for x in segment_ids]))\n tf.logging.info(\"label: %s (id = %d)\" % (example.label, label_id))\n\n feature = InputFeatures(\n input_ids=input_ids,\n input_mask=input_mask,\n segment_ids=segment_ids,\n label_id=label_id,\n is_real_example=True)\n return feature\n\n\ndef file_based_convert_examples_to_features(\n examples, label_list, max_seq_length, tokenizer, output_file):\n \"\"\"Convert a set of `InputExample`s to a TFRecord file.\"\"\"\n\n writer = tf.python_io.TFRecordWriter(output_file)\n\n for (ex_index, example) in enumerate(examples):\n if ex_index % 10000 == 0:\n tf.logging.info(\"Writing example %d of %d\" % (ex_index, len(examples)))\n\n feature = convert_single_example(ex_index, example, label_list,\n max_seq_length, tokenizer)\n\n def create_int_feature(values):\n f = tf.train.Feature(int64_list=tf.train.Int64List(value=list(values)))\n return f\n\n features = collections.OrderedDict()\n features[\"input_ids\"] = create_int_feature(feature.input_ids)\n features[\"input_mask\"] = create_int_feature(feature.input_mask)\n features[\"segment_ids\"] = create_int_feature(feature.segment_ids)\n features[\"label_ids\"] = create_int_feature([feature.label_id])\n features[\"is_real_example\"] = create_int_feature(\n [int(feature.is_real_example)])\n\n tf_example = tf.train.Example(features=tf.train.Features(feature=features))\n writer.write(tf_example.SerializeToString())\n writer.close()\n\n\ndef file_based_input_fn_builder(input_file, seq_length, is_training,\n drop_remainder):\n \"\"\"Creates an `input_fn` closure to be passed to TPUEstimator.\"\"\"\n\n name_to_features = {\n \"input_ids\": tf.FixedLenFeature([seq_length], tf.int64),\n \"input_mask\": tf.FixedLenFeature([seq_length], tf.int64),\n \"segment_ids\": tf.FixedLenFeature([seq_length], tf.int64),\n \"label_ids\": tf.FixedLenFeature([], tf.int64),\n \"is_real_example\": tf.FixedLenFeature([], tf.int64),\n }\n\n def _decode_record(record, name_to_features):\n \"\"\"Decodes a record to a TensorFlow example.\"\"\"\n example = tf.parse_single_example(record, name_to_features)\n\n # tf.Example only supports tf.int64, but the TPU only supports tf.int32.\n # So cast all int64 to int32.\n for name in list(example.keys()):\n t = example[name]\n if t.dtype == tf.int64:\n t = tf.to_int32(t)\n example[name] = t\n\n return example\n\n def input_fn(params):\n \"\"\"The actual input function.\"\"\"\n batch_size = params[\"batch_size\"]\n\n # For training, we want a lot of parallel reading and shuffling.\n # For eval, we want no shuffling and parallel reading doesn't matter.\n d = tf.data.TFRecordDataset(input_file)\n if is_training:\n d = d.repeat()\n d = d.shuffle(buffer_size=100)\n\n d = d.apply(\n tf.contrib.data.map_and_batch(\n lambda record: _decode_record(record, name_to_features),\n batch_size=batch_size,\n drop_remainder=drop_remainder))\n\n return d\n\n return input_fn\n\n\ndef _truncate_seq_pair(tokens_a, tokens_b, max_length):\n \"\"\"Truncates a sequence pair in place to the maximum length.\"\"\"\n\n # This is a simple heuristic which will always truncate the longer sequence\n # one token at a time. This makes more sense than truncating an equal percent\n # of tokens from each, since if one sequence is very short then each token\n # that's truncated likely contains more information than a longer sequence.\n while True:\n total_length = len(tokens_a) + len(tokens_b)\n if total_length <= max_length:\n break\n if len(tokens_a) > len(tokens_b):\n tokens_a.pop()\n else:\n tokens_b.pop()\n\n\ndef create_model(bert_config, is_training, input_ids, input_mask, segment_ids,\n labels, num_labels, use_one_hot_embeddings):\n \"\"\"Creates a classification model.\"\"\"\n model = modeling.BertModel(\n config=bert_config,\n is_training=is_training,\n input_ids=input_ids,\n input_mask=input_mask,\n token_type_ids=segment_ids,\n use_one_hot_embeddings=use_one_hot_embeddings)\n\n # In the demo, we are doing a simple classification task on the entire\n # segment.\n #\n # If you want to use the token-level output, use model.get_sequence_output()\n # instead.\n output_layer = model.get_pooled_output()\n\n hidden_size = output_layer.shape[-1].value\n\n output_weights = tf.get_variable(\n \"output_weights\", [num_labels, hidden_size],\n initializer=tf.truncated_normal_initializer(stddev=0.02))\n\n output_bias = tf.get_variable(\n \"output_bias\", [num_labels], initializer=tf.zeros_initializer())\n\n with tf.variable_scope(\"loss\"):\n if is_training:\n # I.e., 0.1 dropout\n output_layer = tf.nn.dropout(output_layer, keep_prob=0.9)\n\n logits = tf.matmul(output_layer, output_weights, transpose_b=True)\n logits = tf.nn.bias_add(logits, output_bias)\n probabilities = tf.nn.softmax(logits, axis=-1)\n log_probs = tf.nn.log_softmax(logits, axis=-1)\n\n one_hot_labels = tf.one_hot(labels, depth=num_labels, dtype=tf.float32)\n\n per_example_loss = -tf.reduce_sum(one_hot_labels * log_probs, axis=-1)\n loss = tf.reduce_mean(per_example_loss)\n\n return (loss, per_example_loss, logits, probabilities)\n\n\ndef model_fn_builder(bert_config, num_labels, init_checkpoint, learning_rate,\n num_train_steps, num_warmup_steps, use_tpu,\n use_one_hot_embeddings):\n \"\"\"Returns `model_fn` closure for TPUEstimator.\"\"\"\n\n def model_fn(features, labels, mode, params): # pylint: disable=unused-argument\n \"\"\"The `model_fn` for TPUEstimator.\"\"\"\n\n tf.logging.info(\"*** Features ***\")\n for name in sorted(features.keys()):\n tf.logging.info(\" name = %s, shape = %s\" % (name, features[name].shape))\n\n input_ids = features[\"input_ids\"]\n input_mask = features[\"input_mask\"]\n segment_ids = features[\"segment_ids\"]\n label_ids = features[\"label_ids\"]\n is_real_example = None\n if \"is_real_example\" in features:\n is_real_example = tf.cast(features[\"is_real_example\"], dtype=tf.float32)\n else:\n is_real_example = tf.ones(tf.shape(label_ids), dtype=tf.float32)\n\n is_training = (mode == tf.estimator.ModeKeys.TRAIN)\n\n (total_loss, per_example_loss, logits, probabilities) = create_model(\n bert_config, is_training, input_ids, input_mask, segment_ids, label_ids,\n num_labels, use_one_hot_embeddings)\n\n tvars = tf.trainable_variables()\n initialized_variable_names = {}\n scaffold_fn = None\n if init_checkpoint:\n (assignment_map, initialized_variable_names\n ) = modeling.get_assignment_map_from_checkpoint(tvars, init_checkpoint)\n if use_tpu:\n\n def tpu_scaffold():\n tf.train.init_from_checkpoint(init_checkpoint, assignment_map)\n return tf.train.Scaffold()\n\n scaffold_fn = tpu_scaffold\n else:\n tf.train.init_from_checkpoint(init_checkpoint, assignment_map)\n\n tf.logging.info(\"**** Trainable Variables ****\")\n for var in tvars:\n init_string = \"\"\n if var.name in initialized_variable_names:\n init_string = \", *INIT_FROM_CKPT*\"\n tf.logging.info(\" name = %s, shape = %s%s\", var.name, var.shape,\n init_string)\n\n output_spec = None\n if mode == tf.estimator.ModeKeys.TRAIN:\n\n train_op = optimization.create_optimizer(\n total_loss, learning_rate, num_train_steps, num_warmup_steps, use_tpu)\n\n output_spec = tf.contrib.tpu.TPUEstimatorSpec(\n mode=mode,\n loss=total_loss,\n train_op=train_op,\n scaffold_fn=scaffold_fn)\n elif mode == tf.estimator.ModeKeys.EVAL:\n\n def metric_fn(per_example_loss, label_ids, logits, is_real_example):\n predictions = tf.argmax(logits, axis=-1, output_type=tf.int32)\n accuracy = tf.metrics.accuracy(\n labels=label_ids, predictions=predictions, weights=is_real_example)\n loss = tf.metrics.mean(values=per_example_loss, weights=is_real_example)\n return {\n \"eval_accuracy\": accuracy,\n \"eval_loss\": loss,\n }\n\n eval_metrics = (metric_fn,\n [per_example_loss, label_ids, logits, is_real_example])\n output_spec = tf.contrib.tpu.TPUEstimatorSpec(\n mode=mode,\n loss=total_loss,\n eval_metrics=eval_metrics,\n scaffold_fn=scaffold_fn)\n else:\n output_spec = tf.contrib.tpu.TPUEstimatorSpec(\n mode=mode,\n predictions={\"probabilities\": probabilities},\n scaffold_fn=scaffold_fn)\n return output_spec\n\n return model_fn\n\n\n# This function is not used by this file but is still used by the Colab and\n# people who depend on it.\ndef input_fn_builder(features, seq_length, is_training, drop_remainder):\n \"\"\"Creates an `input_fn` closure to be passed to TPUEstimator.\"\"\"\n\n all_input_ids = []\n all_input_mask = []\n all_segment_ids = []\n all_label_ids = []\n\n for feature in features:\n all_input_ids.append(feature.input_ids)\n all_input_mask.append(feature.input_mask)\n all_segment_ids.append(feature.segment_ids)\n all_label_ids.append(feature.label_id)\n\n def input_fn(params):\n \"\"\"The actual input function.\"\"\"\n batch_size = params[\"batch_size\"]\n\n num_examples = len(features)\n\n # This is for demo purposes and does NOT scale to large data sets. We do\n # not use Dataset.from_generator() because that uses tf.py_func which is\n # not TPU compatible. The right way to load data is with TFRecordReader.\n d = tf.data.Dataset.from_tensor_slices({\n \"input_ids\":\n tf.constant(\n all_input_ids, shape=[num_examples, seq_length],\n dtype=tf.int32),\n \"input_mask\":\n tf.constant(\n all_input_mask,\n shape=[num_examples, seq_length],\n dtype=tf.int32),\n \"segment_ids\":\n tf.constant(\n all_segment_ids,\n shape=[num_examples, seq_length],\n dtype=tf.int32),\n \"label_ids\":\n tf.constant(all_label_ids, shape=[num_examples], dtype=tf.int32),\n })\n\n if is_training:\n d = d.repeat()\n d = d.shuffle(buffer_size=100)\n\n d = d.batch(batch_size=batch_size, drop_remainder=drop_remainder)\n return d\n\n return input_fn\n\n\n# This function is not used by this file but is still used by the Colab and\n# people who depend on it.\ndef convert_examples_to_features(examples, label_list, max_seq_length,\n tokenizer):\n \"\"\"Convert a set of `InputExample`s to a list of `InputFeatures`.\"\"\"\n\n features = []\n for (ex_index, example) in enumerate(examples):\n if ex_index % 10000 == 0:\n tf.logging.info(\"Writing example %d of %d\" % (ex_index, len(examples)))\n\n feature = convert_single_example(ex_index, example, label_list,\n max_seq_length, tokenizer)\n\n features.append(feature)\n return features\n\n\ndef main(_):\n tf.logging.set_verbosity(tf.logging.INFO)\n\n processors = {\n \"cola\": ColaProcessor,\n \"mnli\": MnliProcessor,\n \"mrpc\": MrpcProcessor,\n \"xnli\": XnliProcessor,\n \"mldoc\": MLDocProcessor,\n \"cls\": CLSProcessor,\n }\n\n tokenization.validate_case_matches_checkpoint(FLAGS.do_lower_case,\n FLAGS.init_checkpoint)\n\n if not FLAGS.do_train and not FLAGS.do_eval and not FLAGS.do_predict:\n raise ValueError(\n \"At least one of `do_train`, `do_eval` or `do_predict' must be True.\")\n\n bert_config = modeling.BertConfig.from_json_file(FLAGS.bert_config_file)\n\n if FLAGS.max_seq_length > bert_config.max_position_embeddings:\n raise ValueError(\n \"Cannot use sequence length %d because the BERT model \"\n \"was only trained up to sequence length %d\" %\n (FLAGS.max_seq_length, bert_config.max_position_embeddings))\n\n tf.gfile.MakeDirs(FLAGS.output_dir)\n\n task_name = FLAGS.task_name.lower()\n\n if task_name not in processors:\n raise ValueError(\"Task not found: %s\" % (task_name))\n\n processor = processors[task_name]()\n\n label_list = processor.get_labels()\n\n tokenizer = tokenization.FullTokenizer(\n vocab_file=FLAGS.vocab_file, do_lower_case=FLAGS.do_lower_case)\n\n tpu_cluster_resolver = None\n if FLAGS.use_tpu and FLAGS.tpu_name:\n tpu_cluster_resolver = tf.contrib.cluster_resolver.TPUClusterResolver(\n FLAGS.tpu_name, zone=FLAGS.tpu_zone, project=FLAGS.gcp_project)\n\n is_per_host = tf.contrib.tpu.InputPipelineConfig.PER_HOST_V2\n run_config = tf.contrib.tpu.RunConfig(\n cluster=tpu_cluster_resolver,\n master=FLAGS.master,\n model_dir=FLAGS.output_dir,\n save_checkpoints_steps=FLAGS.save_checkpoints_steps,\n tpu_config=tf.contrib.tpu.TPUConfig(\n iterations_per_loop=FLAGS.iterations_per_loop,\n num_shards=FLAGS.num_tpu_cores,\n per_host_input_for_training=is_per_host))\n\n train_examples = None\n num_train_steps = None\n num_warmup_steps = None\n if FLAGS.do_train:\n train_examples = processor.get_train_examples(FLAGS.data_dir)\n num_train_steps = int(\n len(train_examples) / FLAGS.train_batch_size * FLAGS.num_train_epochs)\n num_warmup_steps = int(num_train_steps * FLAGS.warmup_proportion)\n\n model_fn = model_fn_builder(\n bert_config=bert_config,\n num_labels=len(label_list),\n init_checkpoint=FLAGS.init_checkpoint,\n learning_rate=FLAGS.learning_rate,\n num_train_steps=num_train_steps,\n num_warmup_steps=num_warmup_steps,\n use_tpu=FLAGS.use_tpu,\n use_one_hot_embeddings=FLAGS.use_tpu)\n\n # If TPU is not available, this will fall back to normal Estimator on CPU\n # or GPU.\n estimator = tf.contrib.tpu.TPUEstimator(\n use_tpu=FLAGS.use_tpu,\n model_fn=model_fn,\n config=run_config,\n train_batch_size=FLAGS.train_batch_size,\n eval_batch_size=FLAGS.eval_batch_size,\n predict_batch_size=FLAGS.predict_batch_size)\n\n if FLAGS.do_train:\n train_file = os.path.join(FLAGS.output_dir, \"train.tf_record\")\n file_based_convert_examples_to_features(\n train_examples, label_list, FLAGS.max_seq_length, tokenizer, train_file)\n tf.logging.info(\"***** Running training *****\")\n tf.logging.info(\" Num examples = %d\", len(train_examples))\n tf.logging.info(\" Batch size = %d\", FLAGS.train_batch_size)\n tf.logging.info(\" Num steps = %d\", num_train_steps)\n train_input_fn = file_based_input_fn_builder(\n input_file=train_file,\n seq_length=FLAGS.max_seq_length,\n is_training=True,\n drop_remainder=True)\n estimator.train(input_fn=train_input_fn, max_steps=num_train_steps)\n\n if FLAGS.do_eval:\n eval_examples = processor.get_dev_examples(FLAGS.data_dir)\n num_actual_eval_examples = len(eval_examples)\n if FLAGS.use_tpu:\n # TPU requires a fixed batch size for all batches, therefore the number\n # of examples must be a multiple of the batch size, or else examples\n # will get dropped. So we pad with fake examples which are ignored\n # later on. These do NOT count towards the metric (all tf.metrics\n # support a per-instance weight, and these get a weight of 0.0).\n while len(eval_examples) % FLAGS.eval_batch_size != 0:\n eval_examples.append(PaddingInputExample())\n\n eval_file = os.path.join(FLAGS.output_dir, \"eval.tf_record\")\n file_based_convert_examples_to_features(\n eval_examples, label_list, FLAGS.max_seq_length, tokenizer, eval_file)\n\n tf.logging.info(\"***** Running evaluation *****\")\n tf.logging.info(\" Num examples = %d (%d actual, %d padding)\",\n len(eval_examples), num_actual_eval_examples,\n len(eval_examples) - num_actual_eval_examples)\n tf.logging.info(\" Batch size = %d\", FLAGS.eval_batch_size)\n\n # This tells the estimator to run through the entire set.\n eval_steps = None\n # However, if running eval on the TPU, you will need to specify the\n # number of steps.\n if FLAGS.use_tpu:\n assert len(eval_examples) % FLAGS.eval_batch_size == 0\n eval_steps = int(len(eval_examples) // FLAGS.eval_batch_size)\n\n eval_drop_remainder = True if FLAGS.use_tpu else False\n eval_input_fn = file_based_input_fn_builder(\n input_file=eval_file,\n seq_length=FLAGS.max_seq_length,\n is_training=False,\n drop_remainder=eval_drop_remainder)\n\n result = estimator.evaluate(input_fn=eval_input_fn, steps=eval_steps)\n\n output_eval_file = os.path.join(FLAGS.output_dir, \"eval_results.txt\")\n with tf.gfile.GFile(output_eval_file, \"w\") as writer:\n tf.logging.info(\"***** Eval results *****\")\n for key in sorted(result.keys()):\n tf.logging.info(\" %s = %s\", key, str(result[key]))\n writer.write(\"%s = %s\\n\" % (key, str(result[key])))\n\n if FLAGS.do_predict:\n predict_examples = processor.get_test_examples(FLAGS.data_dir)\n num_actual_predict_examples = len(predict_examples)\n if FLAGS.use_tpu:\n # TPU requires a fixed batch size for all batches, therefore the number\n # of examples must be a multiple of the batch size, or else examples\n # will get dropped. So we pad with fake examples which are ignored\n # later on.\n while len(predict_examples) % FLAGS.predict_batch_size != 0:\n predict_examples.append(PaddingInputExample())\n\n predict_file = os.path.join(FLAGS.output_dir, \"predict.tf_record\")\n file_based_convert_examples_to_features(predict_examples, label_list,\n FLAGS.max_seq_length, tokenizer,\n predict_file)\n\n tf.logging.info(\"***** Running prediction*****\")\n tf.logging.info(\" Num examples = %d (%d actual, %d padding)\",\n len(predict_examples), num_actual_predict_examples,\n len(predict_examples) - num_actual_predict_examples)\n tf.logging.info(\" Batch size = %d\", FLAGS.predict_batch_size)\n\n predict_drop_remainder = True if FLAGS.use_tpu else False\n predict_input_fn = file_based_input_fn_builder(\n input_file=predict_file,\n seq_length=FLAGS.max_seq_length,\n is_training=False,\n drop_remainder=predict_drop_remainder)\n\n result = estimator.predict(input_fn=predict_input_fn)\n\n output_predict_file = os.path.join(FLAGS.output_dir, \"test_results.tsv\")\n with tf.gfile.GFile(output_predict_file, \"w\") as writer:\n num_written_lines = 0\n tf.logging.info(\"***** Predict results *****\")\n for (i, prediction) in enumerate(result):\n probabilities = prediction[\"probabilities\"]\n if i >= num_actual_predict_examples:\n break\n output_line = \"\\t\".join(\n str(class_probability)\n for class_probability in probabilities) + \"\\n\"\n writer.write(output_line)\n num_written_lines += 1\n assert num_written_lines == num_actual_predict_examples\n\n\nif __name__ == \"__main__\":\n flags.mark_flag_as_required(\"data_dir\")\n flags.mark_flag_as_required(\"task_name\")\n flags.mark_flag_as_required(\"vocab_file\")\n flags.mark_flag_as_required(\"bert_config_file\")\n flags.mark_flag_as_required(\"output_dir\")\n tf.app.run()\n"
]
| [
[
"tensorflow.contrib.cluster_resolver.TPUClusterResolver",
"tensorflow.metrics.accuracy",
"tensorflow.FixedLenFeature",
"tensorflow.nn.log_softmax",
"tensorflow.reduce_sum",
"tensorflow.gfile.GFile",
"tensorflow.cast",
"tensorflow.train.init_from_checkpoint",
"tensorflow.gfile.MakeDirs",
"tensorflow.to_int32",
"tensorflow.contrib.tpu.TPUEstimatorSpec",
"tensorflow.contrib.tpu.TPUEstimator",
"tensorflow.data.TFRecordDataset",
"tensorflow.truncated_normal_initializer",
"tensorflow.python_io.TFRecordWriter",
"tensorflow.logging.set_verbosity",
"tensorflow.trainable_variables",
"tensorflow.parse_single_example",
"tensorflow.argmax",
"tensorflow.app.run",
"tensorflow.nn.dropout",
"tensorflow.metrics.mean",
"tensorflow.matmul",
"tensorflow.gfile.Open",
"tensorflow.shape",
"tensorflow.zeros_initializer",
"tensorflow.logging.info",
"tensorflow.one_hot",
"tensorflow.contrib.tpu.TPUConfig",
"tensorflow.train.Features",
"tensorflow.nn.bias_add",
"tensorflow.nn.softmax",
"tensorflow.constant",
"tensorflow.train.Scaffold",
"tensorflow.reduce_mean",
"tensorflow.flags.DEFINE_string",
"tensorflow.variable_scope"
]
]
|
ACTCollaboration/mnms | [
"4374f0efef5e1f5d36af472d633861a4dc52bde6"
]
| [
"mnms/inpaint.py"
]
| [
"import numpy as np\nimport warnings\n\nfrom optweight import mat_utils\nfrom pixell import enmap, utils\n\nfrom mnms import utils as m_utils\n\ndef catalog_to_mask(catalog, shape, wcs, radius=np.radians(4/60)):\n \"\"\"\n Convert catalog with DEC, RA values to binary point source mask.\n\n Parameters\n ----------\n catalog : (2, N) array\n DEC and RA values (in radians) for each point source.\n shape : tuple\n Shape of output map.\n wcs : astropy.wcs.wcs object\n WCS of output map.\n radius : float, optional\n Radius of holes in radians.\n\n Returns\n -------\n mask : (Ny, Nx) enmap\n Binary mask. False in circles around point sources.\n \"\"\"\n\n pix = enmap.sky2pix(shape[-2:], wcs, catalog.T).astype(int)\n mask = enmap.zeros(shape[-2:], wcs=wcs, dtype=bool)\n mask[pix[0],pix[1]] = True\n\n return ~enmap.grow_mask(mask, radius)\n\ndef inpaint_ivar(ivar, mask, thumb_width=40):\n \"\"\"\n Inpaint small unobserved patches in inverse variance map with mean \n of surrounding pixels. Done inplace!\n\n Parameters\n ----------\n ivar : (..., Ny, Nx) enmap\n Inverse variance maps to be inpainted.\n mask : (Ny, Nx) bool array\n Only inpaint where mask is True.\n thumb_width : float, optional\n Width in arcmin of thumbnail around each cut pixel.\n\n Notes\n -----\n Meant for small amount (~1000s) of clustered unobserved pixels, i.e. \n erroneously cut point sources, do not use to inpaint large patches.\n \"\"\"\n\n mask = mask.astype(bool)\n\n for idxs_pre in np.ndindex(ivar.shape[:-2]):\n\n ivar_view = ivar[idxs_pre]\n indices = np.argwhere((ivar_view == 0) & mask)\n\n for idxs in indices:\n\n # Could be done already.\n if ivar_view[idxs[0],idxs[1]] != 0:\n continue\n\n ivar_thumb = extract_thumbnail(ivar_view, idxs[0], idxs[1], 40)\n mask_thumb = extract_thumbnail(mask, idxs[0], idxs[1], 40)\n \n mask_inpaint = (ivar_thumb == 0) & mask_thumb\n mask_est = (ivar_thumb != 0) & mask_thumb\n ivar_thumb[mask_inpaint] = np.mean(ivar_thumb[mask_est])\n\n insert_thumbnail(ivar_thumb, ivar_view, idxs[0], idxs[1])\n \ndef inpaint_ivar_catalog(ivar, mask, catalog, thumb_width=120, ivar_threshold=4,\n inplace=False):\n \"\"\"\n Inpaint a noise map at locations specified by a source catalog.\n\n Parameters\n ----------\n ivar : (Ny, Nx) or (..., 1 Ny, Nx) enmap\n Inverse variance map.\n mask : (Ny, Nx) bool array\n Mask, True in observed regions. If not bool, will be converted to bool.\n catalog : (2, N) array\n DEC and RA values (in radians) for each point source.\n thumb_width : float, optional\n Width in arcmin of thumbnail around each source.\n ivar_threshold : float, optional\n Inpaint ivar at pixels where the ivar map is below this \n number of median absolute deviations below the median ivar in the \n thumbnail. To inpaint erroneously cut regions around point sources\n inplace : bool, optional\n Modify input ivar map.\n\n Returns\n -------\n ivar : (..., 3, Ny, Nx) enmap\n Inpainted map. Copy depending on `inplace`.\n \"\"\"\n\n if not inplace:\n ivar = ivar.copy()\n\n if mask.dtype != bool:\n mask = m_utils.get_mask_bool(mask)\n\n # Convert to pixel units, this ignores the curvature of the sky.\n nthumb = utils.nint((thumb_width / 60) / np.min(np.abs(ivar.wcs.wcs.cdelt)))\n nedge = 3 * nthumb // 7\n\n # Convert dec, ra to pix y, x and loop over catalog.\n pix = enmap.sky2pix(ivar.shape[-2:], ivar.wcs, catalog).astype(int)\n\n for pix_y, pix_x in zip(*pix):\n\n if not mask[pix_y,pix_x]:\n # Do not inpaint sources outside mask.\n continue\n\n ivarslice = extract_thumbnail(ivar, pix_y, pix_x, nthumb)\n maskslice = extract_thumbnail(mask, pix_y, pix_x, nthumb)\n\n # Set pixels below threshold to False.\n mask_ivar = mask_threshold(ivarslice, ivar_threshold, mask=maskslice)\n # We do not want to inpaint ivar outside the global mask.\n mask_ivar[...,~maskslice] = True \n\n # Skip inpainting ivar if no bad ivar pixels were found in middle part.\n if np.all(mask_ivar[...,nedge:-nedge,nedge:-nedge]):\n continue\n\n # Grow the False part by 1 arcmin to get slighly more uniform mask.\n mask_ivar = enmap.enmap(mask_ivar, ivarslice.wcs, copy=False)\n for idxs in np.ndindex(ivarslice.shape[:-3]):\n if np.any(mask_ivar[idxs]):\n continue\n mask_ivar[idxs] = enmap.shrink_mask(mask_ivar[idxs], np.radians(1 / 60))\n\n # Invert such that to-be-inpainted parts become True.\n mask_ivar_inpaint = ~mask_ivar\n mask_ivar_inpaint[...,~maskslice] = False\n\n # Inpaint too small ivar pixels with average value in thumbnail.\n # Loop over outer dims (splits)\n for idxs in np.ndindex(ivarslice.shape[:-3]):\n\n ivarslice[idxs][mask_ivar_inpaint[idxs]] = np.mean(\n ivarslice[idxs][mask_ivar[idxs]])\n\n insert_thumbnail(ivarslice, ivar, pix_y, pix_x)\n \n return ivar\n \ndef inpaint_noise_catalog(imap, ivar, mask, catalog, radius=6, thumb_width=120,\n ivar_threshold=None, seed=None, inplace=False):\n \"\"\"\n Inpaint a noise map at locations specified by a source catalog.\n\n Parameters\n ----------\n imap : (..., 3, Ny, Nx) enmap\n Maps to be inpainted.\n ivar : (Ny, Nx) or (..., 1 Ny, Nx) enmap\n Inverse variance map. If not 2d, shape[:-3] must match imap.\n mask : (Ny, Nx) bool array\n Mask, True in observed regions.\n catalog : (2, N) array\n DEC and RA values (in radians) for each point source.\n radius : float, optional\n Radius in arcmin of inpainted region around each source.\n thumb_width : float, optional\n Width in arcmin of thumbnail around each source.\n ivar_threshold : float, optional\n Also inpaint ivar and maps at pixels where the ivar map is below this \n number of median absolute deviations below the median ivar in the \n thumbnail. To inpaint erroneously cut regions around point sources\n seed : int or np.random._generator.Generator object, optional\n Seed or generator for random numbers.\n inplace : bool, optional\n Modify input map.\n\n Returns\n -------\n omap : (..., 3, Ny, Nx) enmap\n Inpainted map.\n\n Raises\n ------\n ValueError\n If radius exceeds thumb_width / 2.\n\n Notes\n -----\n Inpainting is not done using formal contrained realization, but using the\n approximation that the 1/f noise is approximated by smoothed version of \n surrounding pixels. White noise is drawn from ivar map. \n Main point is that it is much faster than constrained realizations.\n \"\"\"\n if radius > thumb_width // 2:\n raise ValueError(f'Radius exceeds thumbnail radius : '\n f'{radius} > {thumb_width // 2}')\n\n if not inplace:\n imap = imap.copy()\n shape_in = imap.shape\n imap = mat_utils.atleast_nd(imap, 3)\n mask = mask.astype(bool)\n\n # Convert to pixel units, this ignores the curvature of the sky.\n nthumb = utils.nint((thumb_width / 60) / np.min(np.abs(imap.wcs.wcs.cdelt)))\n nradius = utils.nint((radius / 60) / np.min(np.abs(imap.wcs.wcs.cdelt)))\n\n # Determine apod mask. Use 1/10th of width.\n mask_apod = enmap.apod(np.ones((nthumb, nthumb), dtype=imap.dtype), \n utils.nint(nthumb / 10))\n\n # Create circular mask in center and second mask around first mask.\n xx, yy = np.mgrid[-nthumb//2:nthumb//2,-nthumb//2:nthumb//2]\n rr = np.sqrt(xx ** 2 + yy ** 2)\n mask_src = rr <= nradius\n mask_est = (rr > nradius) & (rr < int(nradius * 1.5))\n\n # Determine smoothing scale.\n fwhm = np.radians(radius / 60)\n\n # Convert dec, ra to pix y, x and loop over catalog.\n pix = enmap.sky2pix(imap.shape[-2:], imap.wcs, catalog).astype(int)\n\n for cat_idx, (pix_y, pix_x) in enumerate(zip(*pix)):\n\n if not (pix_y >= 0 and pix_y < mask.shape[-2]) or not (pix_x >= 0 and pix_x < mask.shape[-1]):\n # Do not inpaint sources that are outside the footprint.\n continue\n\n if not mask[pix_y,pix_x]:\n # Do not inpaint sources outside mask.\n continue\n\n mslice = extract_thumbnail(imap, pix_y, pix_x, nthumb)\n ivarslice = extract_thumbnail(ivar, pix_y, pix_x, nthumb)\n\n if not (ivarslice[..., mask_src] != 0).sum() > 0:\n # Do not inpaint sources for which there are no observed pixels around the source\n warnings.warn(f'No good ivars in mask_src at cat_idx {cat_idx}, pix {(pix_y, pix_x)}', RuntimeWarning)\n continue\n\n if ivar_threshold:\n\n maskslice = extract_thumbnail(mask, pix_y, pix_x, nthumb)\n\n # Inpaint too small ivar pixels with average value in thumbnail.\n mask_ivar = mask_threshold(ivarslice, ivar_threshold, mask=maskslice)\n # We do not want to inpaint ivar outside the global mask.\n mask_ivar[...,~maskslice] = True \n\n # Skip inpainting ivar if no bad ivar pixels were found.\n if ivar_threshold and not np.all(mask_ivar):\n\n # Grow mask by 1 arcmin to get slighly more uniform mask.\n mask_ivar = enmap.enmap(mask_ivar, mslice.wcs, copy=False)\n for idxs in np.ndindex(ivarslice.shape[:-3]):\n if not np.any(~mask_ivar[idxs]):\n continue\n mask_ivar[idxs] = ~enmap.grow_mask(~mask_ivar[idxs], np.radians(1 / 60))\n \n mask_ivar_inpaint = ~mask_ivar\n mask_ivar_inpaint[...,~maskslice] = False\n mask_ivar_est = mask_ivar_inpaint.copy()\n\n # Loop over outer dims (splits)\n for idxs in np.ndindex(ivarslice.shape[:-3]):\n \n ivarslice[idxs][mask_ivar_inpaint[idxs]] = np.mean(\n ivarslice[idxs][mask_ivar[idxs]])\n\n # Also inpaint bad ivar pixels in imap, in addition to the src.\n mask_ivar_inpaint[idxs] |= mask_src\n\n if np.any(mask_ivar_inpaint[idxs]):\n mask_ivar_est[idxs] = enmap.grow_mask(\n mask_ivar_inpaint[idxs], np.radians(1.5 * radius / 60))\n mask_ivar_est[idxs] ^= mask_ivar_inpaint[idxs]\n mask_ivar_est[idxs] |= mask_est\n \n inpaint(mslice, ivarslice, mask_apod, mask_ivar_inpaint,\n mask_ivar_est, fwhm, seed=seed) \n else:\n inpaint(mslice, ivarslice, mask_apod, mask_src, mask_est,\n fwhm, seed=seed) \n\n insert_thumbnail(mslice, imap, pix_y, pix_x)\n \n return imap.reshape(shape_in)\n\ndef inpaint(imap, ivar, mask_apod, mask_src, mask_est, fwhm, seed=None):\n \"\"\"\n Inpaint a region in the map (inplace). Uses smoothing to approximate\n 1/f noise correlations between inpainted region and rest of map.\n \n Parameters\n ----------\n imap : (..., 3, Ny, Nx) enmap\n Input map(s)\n ivar : (..., 1, Ny, Nx) enmap\n Inverse variance maps.\n mask_apod : (Ny, Nx) array\n Apodized edges of mask.\n mask_src : (Ny, Nx) or (..., 1, Ny, Nx) bool array\n Mask that is True for region to be inpainted. Either 2D or \n matching shape of ivar array.\n mask_est : (Ny, Nx) or (..., 1, Ny, Nx) bool array\n Mask that is True for region whose average value is used for \n filling the source region. Either 2D or matching shape of ivar \n array. Should match shape of mask_src\n fwhm : float\n FWHM in radians of smoothing scale.\n seed : int or np.random._generator.Generator object, optional\n Seed or generator for random numbers.\n\n Raises\n ------\n ValueError\n If mask_src or mask_est are not 2D and not match shape of ivar.\n If mask_src and mask_est have different shapes.\n If leading dimensions of imap and ivar are not the same.\n \"\"\"\n\n if mask_src.shape != mask_est.shape:\n raise ValueError('Mismatch shapes mask_src and mask_est : '\n f'{mask_src.shape} != {mask_est.shape}')\n\n if mask_src.ndim != 2:\n if mask_src.shape != ivar.shape:\n raise ValueError('mask_src should be 2D or match shape ivar, '\n f'got {mask_src.shape}, while ivar.shape = {ivar.shape}')\n\n imap = mat_utils.atleast_nd(imap, 4)\n ivar = mat_utils.atleast_nd(ivar, 4)\n\n if imap.shape[:-3] != ivar.shape[:-3]:\n raise ValueError(\n f'Shape imap {imap.shape} inconsistent with ivar {ivar.shape}')\n\n mask_map_src = np.ones(ivar.shape, dtype=bool) * mask_src\n mask_map_est = np.ones(ivar.shape, dtype=bool) * mask_est\n\n # Loop over outer dimensions of imap (splits).\n for idxs in np.ndindex(imap.shape[:-3]):\n \n mask_src = mask_map_src[idxs][0]\n mask_est = mask_map_est[idxs][0]\n\n # Set average value to that of surrounding pixels.\n imap[idxs][:,mask_src] = np.mean(imap[idxs][:,mask_est], axis=-1, keepdims=True)\n\n # Smooth to get some large scale correlations into the mask.\n imap_sm = enmap.smooth_gauss(imap[idxs], fwhm / np.sqrt(8 * np.log(2)))\n imap[idxs][:,mask_src] = imap_sm[:,mask_src]\n\n # If ivar pixels are zero (i.e. cut) inside circle, inpaint with mean.\n ivar_src = ivar[idxs][:,mask_src]\n bad_ivar = ivar_src == 0\n good_ivar = ivar_src != 0\n assert np.sum(good_ivar) > 0, 'No good ivars in mask_src'\n ivar_src[bad_ivar] = np.mean(ivar_src[good_ivar])\n\n # Add white noise to inpainted region. Q and U get sqrt(2) higher noise.\n sqrtvar = ivar_src ** -0.5\n rng = np.random.default_rng(seed)\n noise = rng.normal(size=((3,) + (np.sum(mask_src),)))\n noise_amps = np.asarray([1, np.sqrt(2), np.sqrt(2)])\n sqrtvar = sqrtvar * noise_amps[:,np.newaxis]\n noise *= sqrtvar\n imap[idxs][:,mask_src] += noise\n\ndef mask_threshold(imap, threshold, mask=None):\n \"\"\"\n Mask pixels that are below a given number of median absolute\n deviations below the median value in the map. \n\n Parameters\n ----------\n imap : (..., Ny, Nx) enmap\n Input map(s)\n threshold : float\n Number of median absolute deviations\n mask : (Ny, Nx) bool array, optional\n True for observed pixels. Used to avoid biasing median\n when map has unobserved pixels.\n\n Returns\n -------\n mask_threshold : (..., Ny, Nx) bool array\n False for pixels below threshold.\n \"\"\"\n \n mask_threshold = np.zeros(imap.shape, dtype=bool)\n\n imap_good = imap[...,mask]\n\n median = np.median(imap_good, axis=-1, keepdims=True)\n absdev = np.abs(imap_good - median)\n mdev = np.median(absdev, axis=-1, keepdims=True)\n \n mask_threshold[...,mask] = imap_good > (median - threshold * mdev)\n\n return mask_threshold\n\ndef extract_thumbnail(imap, pix_y, pix_x, nthumb):\n \"\"\"\n Extract square thumbnail from map.\n\n Parameters\n ----------\n imap : (..., Ny, Nx) enmap\n Input map.\n pix_y : int\n Y pixel index of center.\n pix_x : int\n X pixel index of center.\n nthumb : int\n Width of square in pixels.\n\n Returns\n -------\n thumbnail : (..., nthumb, nthumb) enmap\n Copy of input in thumbnail.\n\n Notes\n -----\n If center is too close to edge, missing pixels are set to zero.\n \"\"\"\n\n ymin = pix_y - nthumb // 2\n ymax = ymin + nthumb\n xmin = pix_x - nthumb // 2\n xmax = xmin + nthumb\n\n box = np.asarray([[ymin, xmin], [ymax, xmax]], dtype=int)\n\n return enmap.padslice(imap, box, default=0.)\n\ndef insert_thumbnail(thumbnail, imap, pix_y, pix_x):\n \"\"\"\n Insert square thumbnail into map.\n\n Parameters\n ----------\n thumbnail : (..., thumb, nthumb) enmap\n Thumbnail.\n imap : (..., Ny, Nx) enmap\n Map in which thumbnail will be inserted.\n pix_y : int\n Y pixel index of center.\n pix_x : int\n X pixel index of center.\n \"\"\"\n\n if thumbnail.shape[:-2] != imap.shape[:-2]:\n raise ValueError('Leading dimensions of thumbnail and map do not match '\n f'got : {thumbnail.shape} and {imap.shape}')\n\n if thumbnail.shape[-2] != thumbnail.shape[-1]:\n raise ValueError('Only square thumbnails supported, got shape : '\n f'{thumbnail.shape}')\n nthumb = thumbnail.shape[-1]\n\n ymin = pix_y - nthumb // 2\n ymax = ymin + nthumb\n xmin = pix_x - nthumb // 2\n xmax = xmin + nthumb\n\n # Place thumbnail back into imap. Do not exceed bounds of imap.\n ymin_safe = max(ymin, 0)\n ymax_safe = min(ymax, imap.shape[-2])\n xmin_safe = max(xmin, 0)\n xmax_safe = min(xmax, imap.shape[-1])\n slice_safe = np.s_[...,ymin_safe:ymax_safe,xmin_safe:xmax_safe]\n\n # Slice into thumbnail also needs to be updated with safe bounds.\n shift_y = pix_y - nthumb // 2\n shift_x = pix_x - nthumb // 2\n slice_safe_thumb = np.s_[...,ymin_safe-shift_y:ymax_safe-shift_y,\n xmin_safe-shift_x:xmax_safe-shift_x]\n imap[slice_safe] = thumbnail[slice_safe_thumb]\n"
]
| [
[
"numpy.log",
"numpy.radians",
"numpy.sqrt",
"numpy.abs",
"numpy.asarray",
"numpy.median",
"numpy.argwhere",
"numpy.all",
"numpy.ones",
"numpy.mean",
"numpy.any",
"numpy.ndindex",
"numpy.zeros",
"numpy.sum",
"numpy.random.default_rng"
]
]
|
kmpaul/PyReshaper | [
"75a81213a8d2c36db7fc6bc51604491a6c902ab9"
]
| [
"tests/checks.py"
]
| [
"\"\"\"\nCopyright 2020, University Corporation for Atmospheric Research\nSee LICENSE.txt for details\n\"\"\"\n\nimport os\n\nimport numpy as np\n\nfrom pyreshaper import iobackend\n\nfrom .data import config\n\n\ndef check_outfile(infiles, prefix, tsvar, suffix, metadata, once, **kwds):\n \"\"\"\n Check that a PyReshaper generated output file is correct\n \"\"\"\n\n assertions = {}\n\n def _assert(key, value):\n assertions[key] = value\n\n outfile = '{0}{1}{2}'.format(prefix, tsvar, suffix)\n _assert('{0!r} exists'.format(outfile), os.path.exists(outfile))\n if not os.path.exists(outfile):\n return assertions\n ncout = iobackend.NCFile(outfile)\n\n if 'meta1d' in kwds and kwds['meta1d'] is True:\n metadata.append('time')\n\n if 'metafile' in kwds and kwds['metafile']:\n metafile = iobackend.NCFile('metafile.nc')\n _assert(\n '{0}: Extra time-invariant metadata found'.format(outfile),\n set(config.xtimvars).issubset(set(ncout.variables.keys())),\n )\n for v in config.xtimvars:\n _assert(\n '{0}: Extra time-invariant metadata dimensions'.format(outfile),\n ncout.variables[v].dimensions == ('lat', 'lon'),\n )\n else:\n metafile = None\n\n series_step = 0\n for infile in infiles:\n _assert('{0!r} exists'.format(infile), os.path.exists(infile))\n if not os.path.exists(infile):\n return assertions\n\n ncinp = iobackend.NCFile(infile)\n nsteps = ncinp.dimensions['time']\n if infile == infiles[0]:\n scvars = [v for v in ncinp.variables if ncinp.variables[v].dimensions == ()]\n tivars = [v for v in ncinp.variables if 'time' not in ncinp.variables[v].dimensions]\n tsvars = [\n v\n for v in ncinp.variables\n if 'time' in ncinp.variables[v].dimensions and v not in metadata\n ]\n if once:\n tsvars.append('once')\n\n outdims = {\n 'lat': ncinp.dimensions['lat'],\n 'lon': ncinp.dimensions['lon'],\n 'strlen': ncinp.dimensions['strlen'],\n }\n\n outmeta = [v for v in ncinp.variables if v not in tsvars]\n\n _assert('{0}: variable {1!r} found in input'.format(outfile, tsvar), tsvar in tsvars)\n _assert(\n '{0}: global attribute names equal'.format(outfile), ncout.ncattrs == ncinp.ncattrs\n )\n for a in set(ncout.ncattrs).intersection(set(ncinp.ncattrs)):\n _assert(\n '{0}: global attribute {1} values equal'.format(outfile, a),\n ncout.getncattr(a) == ncinp.getncattr(a),\n )\n for d, v in outdims.items():\n _assert('{0}: {1!r} in dimensions'.format(outfile, d), d in ncout.dimensions)\n _assert('{0}: dimensions[{1!r}]'.format(outfile, d), ncout.dimensions[d] == v)\n _assert(\"{0}: 'time' in dimensions\".format(outfile), 'time' in ncout.dimensions)\n _assert(\"{0}: 'time' unlimited\".format(outfile), ncout.unlimited('time'))\n if once:\n all_vars = outmeta if tsvar == 'once' else [tsvar]\n else:\n all_vars = [tsvar] + outmeta\n if metafile:\n all_vars += config.xtimvars\n _assert(\n '{0}: variable names same'.format(outfile),\n set(ncout.variables.keys()) == set(all_vars),\n )\n for v in all_vars:\n if v in scvars:\n expected = ()\n elif v in ncinp.dimensions:\n expected = (v,)\n elif v in tivars + config.xtimvars:\n expected = ('lat', 'lon')\n elif v in config.chvars:\n expected = ('time', 'strlen')\n else:\n expected = ('time', 'lat', 'lon')\n _assert(\n '{0}: {1}.dimemsions equal'.format(outfile, v),\n ncout.variables[v].dimensions == expected,\n )\n\n for v in all_vars:\n if v in config.xtimvars:\n expected = metafile.variables[v].get_value()\n else:\n expected = ncinp.variables[v].get_value()\n if v == 'time':\n oslice = slice(series_step, series_step + nsteps)\n actual = ncout.variables[v][oslice]\n elif 'time' in ncout.variables[v].dimensions:\n oslice = [slice(None)] * (2 if v in config.chvars else 3)\n oslice[0] = slice(series_step, series_step + nsteps)\n actual = ncout.variables[v][tuple(oslice)]\n else:\n actual = ncout.variables[v].get_value()\n _assert(('{0}: {1!r} values equal').format(outfile, v), np.all(actual == expected))\n\n series_step += nsteps\n ncinp.close()\n if metafile:\n metafile.close()\n ncout.close()\n\n return assertions\n\n\ndef check_var_in(var, fname):\n ncf = iobackend.NCFile(fname)\n value = var in ncf.variables\n ncf.close()\n return value\n"
]
| [
[
"numpy.all"
]
]
|
wqrray/nerf_baseline | [
"eb0777530a09baf4451da3e3688ba5d7820d6e8d"
]
| [
"dtu_dataset.py"
]
| [
"# This file is borrowed from NeuS project\nimport torch\nimport torch.nn.functional as F\nimport cv2 as cv\nimport numpy as np\nimport os\nfrom glob import glob\nfrom scipy.spatial.transform import Rotation as Rot\nfrom scipy.spatial.transform import Slerp\n\n\n# This function is borrowed from IDR: https://github.com/lioryariv/idr\ndef load_K_Rt_from_P(filename, P=None):\n if P is None:\n lines = open(filename).read().splitlines()\n if len(lines) == 4:\n lines = lines[1:]\n lines = [[x[0], x[1], x[2], x[3]] for x in (x.split(\" \") for x in lines)]\n P = np.asarray(lines).astype(np.float32).squeeze()\n\n out = cv.decomposeProjectionMatrix(P)\n K = out[0]\n R = out[1]\n t = out[2]\n\n K = K / K[2, 2]\n intrinsics = np.eye(4)\n intrinsics[:3, :3] = K\n\n pose = np.eye(4, dtype=np.float32)\n pose[:3, :3] = R.transpose()\n pose[:3, 3] = (t[:3] / t[3])[:, 0]\n\n return intrinsics, pose\n\n\nclass Dataset:\n def __init__(self, conf):\n super(Dataset, self).__init__()\n print('Load data: Begin')\n self.device = torch.device('cuda')\n self.conf = conf\n\n self.data_dir = conf.get_string('data_dir')\n self.render_cameras_name = conf.get_string('render_cameras_name')\n self.object_cameras_name = conf.get_string('object_cameras_name')\n\n self.camera_outside_sphere = conf.get_bool('camera_outside_sphere', default=True)\n self.scale_mat_scale = conf.get_float('scale_mat_scale', default=1.1)\n\n camera_dict = np.load(os.path.join(self.data_dir, self.render_cameras_name))\n self.camera_dict = camera_dict\n self.images_lis = sorted(glob(os.path.join(self.data_dir, 'image/*.png')))\n self.n_images = len(self.images_lis)\n self.images_np = np.stack([cv.imread(im_name) for im_name in self.images_lis]) / 256.0\n self.masks_lis = sorted(glob(os.path.join(self.data_dir, 'mask/*.png')))\n self.masks_np = np.stack([cv.imread(im_name) for im_name in self.masks_lis]) / 256.0\n\n # world_mat is a projection matrix from world to image\n self.world_mats_np = [camera_dict['world_mat_%d' % idx].astype(np.float32) for idx in range(self.n_images)]\n\n self.scale_mats_np = []\n\n # scale_mat: used for coordinate normalization, we assume the scene to render is inside a unit sphere at origin.\n self.scale_mats_np = [camera_dict['scale_mat_%d' % idx].astype(np.float32) for idx in range(self.n_images)]\n\n self.intrinsics_all = []\n self.pose_all = []\n\n for scale_mat, world_mat in zip(self.scale_mats_np, self.world_mats_np):\n P = world_mat @ scale_mat\n P = P[:3, :4]\n intrinsics, pose = load_K_Rt_from_P(None, P)\n self.intrinsics_all.append(torch.from_numpy(intrinsics).float())\n self.pose_all.append(torch.from_numpy(pose).float())\n\n self.images = torch.from_numpy(self.images_np.astype(np.float32)).cpu() # [n_images, H, W, 3]\n self.masks = torch.from_numpy(self.masks_np.astype(np.float32)).cpu() # [n_images, H, W, 3]\n self.intrinsics_all = torch.stack(self.intrinsics_all).to(self.device) # [n_images, 4, 4]\n self.intrinsics_all_inv = torch.inverse(self.intrinsics_all) # [n_images, 4, 4]\n self.focal = self.intrinsics_all[0][0, 0]\n self.pose_all = torch.stack(self.pose_all).to(self.device) # [n_images, 4, 4]\n self.H, self.W = self.images.shape[1], self.images.shape[2]\n self.image_pixels = self.H * self.W\n\n object_bbox_min = np.array([-1.01, -1.01, -1.01, 1.0])\n object_bbox_max = np.array([ 1.01, 1.01, 1.01, 1.0])\n # Object scale mat: region of interest to **extract mesh**\n object_scale_mat = np.load(os.path.join(self.data_dir, self.object_cameras_name))['scale_mat_0']\n object_bbox_min = np.linalg.inv(self.scale_mats_np[0]) @ object_scale_mat @ object_bbox_min[:, None]\n object_bbox_max = np.linalg.inv(self.scale_mats_np[0]) @ object_scale_mat @ object_bbox_max[:, None]\n self.object_bbox_min = object_bbox_min[:3, 0]\n self.object_bbox_max = object_bbox_max[:3, 0]\n\n print('Load data: End')\n\n def gen_rays_at(self, img_idx, resolution_level=1):\n \"\"\"\n Generate rays at world space from one camera.\n \"\"\"\n l = resolution_level\n tx = torch.linspace(0, self.W - 1, self.W // l)\n ty = torch.linspace(0, self.H - 1, self.H // l)\n pixels_x, pixels_y = torch.meshgrid(tx, ty)\n p = torch.stack([pixels_x, pixels_y, torch.ones_like(pixels_y)], dim=-1) # W, H, 3\n p = torch.matmul(self.intrinsics_all_inv[img_idx, None, None, :3, :3], p[:, :, :, None]).squeeze() # W, H, 3\n rays_v = p / torch.linalg.norm(p, ord=2, dim=-1, keepdim=True) # W, H, 3\n rays_v = torch.matmul(self.pose_all[img_idx, None, None, :3, :3], rays_v[:, :, :, None]).squeeze() # W, H, 3\n rays_o = self.pose_all[img_idx, None, None, :3, 3].expand(rays_v.shape) # W, H, 3\n return rays_o.transpose(0, 1), rays_v.transpose(0, 1)\n\n def gen_random_rays_at(self, img_idx, batch_size):\n \"\"\"\n Generate random rays at world space from one camera.\n \"\"\"\n pixels_x = torch.randint(low=0, high=self.W, size=[batch_size])\n pixels_y = torch.randint(low=0, high=self.H, size=[batch_size])\n color = self.images[img_idx][(pixels_y, pixels_x)] # batch_size, 3\n mask = self.masks[img_idx][(pixels_y, pixels_x)] # batch_size, 3\n p = torch.stack([pixels_x, pixels_y, torch.ones_like(pixels_y)], dim=-1).float() # batch_size, 3\n p = torch.matmul(self.intrinsics_all_inv[img_idx, None, :3, :3], p[:, :, None]).squeeze() # batch_size, 3\n rays_v = p / torch.linalg.norm(p, ord=2, dim=-1, keepdim=True) # batch_size, 3\n rays_v = torch.matmul(self.pose_all[img_idx, None, :3, :3], rays_v[:, :, None]).squeeze() # batch_size, 3\n rays_o = self.pose_all[img_idx, None, :3, 3].expand(rays_v.shape) # batch_size, 3\n return torch.cat([rays_o.cpu(), rays_v.cpu(), color, mask[:, :1]], dim=-1).cuda() # batch_size, 10\n\n def gen_rays_between(self, idx_0, idx_1, ratio, resolution_level=1):\n \"\"\"\n Interpolate pose between two cameras.\n \"\"\"\n l = resolution_level\n tx = torch.linspace(0, self.W - 1, self.W // l)\n ty = torch.linspace(0, self.H - 1, self.H // l)\n pixels_x, pixels_y = torch.meshgrid(tx, ty)\n p = torch.stack([pixels_x, pixels_y, torch.ones_like(pixels_y)], dim=-1) # W, H, 3\n p = torch.matmul(self.intrinsics_all_inv[0, None, None, :3, :3], p[:, :, :, None]).squeeze() # W, H, 3\n rays_v = p / torch.linalg.norm(p, ord=2, dim=-1, keepdim=True) # W, H, 3\n trans = self.pose_all[idx_0, :3, 3] * (1.0 - ratio) + self.pose_all[idx_1, :3, 3] * ratio\n pose_0 = self.pose_all[idx_0].detach().cpu().numpy()\n pose_1 = self.pose_all[idx_1].detach().cpu().numpy()\n pose_0 = np.linalg.inv(pose_0)\n pose_1 = np.linalg.inv(pose_1)\n rot_0 = pose_0[:3, :3]\n rot_1 = pose_1[:3, :3]\n rots = Rot.from_matrix(np.stack([rot_0, rot_1]))\n key_times = [0, 1]\n slerp = Slerp(key_times, rots)\n rot = slerp(ratio)\n pose = np.diag([1.0, 1.0, 1.0, 1.0])\n pose = pose.astype(np.float32)\n pose[:3, :3] = rot.as_matrix()\n pose[:3, 3] = ((1.0 - ratio) * pose_0 + ratio * pose_1)[:3, 3]\n pose = np.linalg.inv(pose)\n rot = torch.from_numpy(pose[:3, :3]).cuda()\n trans = torch.from_numpy(pose[:3, 3]).cuda()\n rays_v = torch.matmul(rot[None, None, :3, :3], rays_v[:, :, :, None]).squeeze() # W, H, 3\n rays_o = trans[None, None, :3].expand(rays_v.shape) # W, H, 3\n return rays_o.transpose(0, 1), rays_v.transpose(0, 1)\n\n def near_far_from_sphere(self, rays_o, rays_d):\n a = torch.sum(rays_d**2, dim=-1, keepdim=True)\n b = 2.0 * torch.sum(rays_o * rays_d, dim=-1, keepdim=True)\n mid = 0.5 * (-b) / a\n near = mid - 1.0\n far = mid + 1.0\n return near, far\n\n def image_at(self, idx, resolution_level):\n img = cv.imread(self.images_lis[idx])\n return (cv.resize(img, (self.W // resolution_level, self.H // resolution_level))).clip(0, 255)"
]
| [
[
"numpy.diag",
"torch.linspace",
"torch.randint",
"scipy.spatial.transform.Slerp",
"numpy.linalg.inv",
"numpy.asarray",
"numpy.eye",
"torch.sum",
"torch.stack",
"numpy.stack",
"torch.inverse",
"torch.linalg.norm",
"torch.matmul",
"torch.from_numpy",
"torch.device",
"numpy.array",
"torch.ones_like",
"torch.meshgrid"
]
]
|
mattmecoli/ethereum-forecasting | [
"1c54d42e77d75f96af40591312bce63e36cbcae6"
]
| [
"code_snippets/random_forest_example.py"
]
| [
"# Creating dict to hold data with and without trend data for streamlined testing\n\ntraining_sets = {'train': [target_train, features_train, target_val, features_val],\n 'train_with_trend' : [target_with_trends_train, features_with_trends_train,\n target_with_trends_val, features_with_trends_val]}\n\n# Building function to gridsearch multiple datasets and return relevent results\n\ndef test_classifiers(data, grid):\n results_dict = {}\n\n for data_set, splits in data.items():\n grid.fit(splits[1], splits[0])\n results_dict[data_set + ' results'] = [grid.best_score_, grid.best_params_, grid.score(splits[1], splits[0]), grid.score(splits[3], splits[2])]\n\n return results_dict\n\n# Creating and Running GridSearch on Random Forest Model\n\nfrom sklearn.ensemble import RandomForestClassifier\n\nrft_param_grid = {'randomforestclassifier__n_estimators': [250, 350, 450],\n 'randomforestclassifier__max_depth': [2, 6, 10, 14, 18, 22]}\n\nrft_pipe = make_pipeline(RandomForestClassifier())\n\nrft_grid = GridSearchCV(rft_pipe, rft_param_grid, scoring = \"accuracy\")\n\nrft_results = test_classifiers(training_sets, rft_grid)\n\nrft_results\n"
]
| [
[
"sklearn.ensemble.RandomForestClassifier"
]
]
|
guillaumehuet/SolidsPy | [
"42887540f579ba5c4f735edd3438421d39ad1694"
]
| [
"solidspy/preprocesor.py"
]
| [
"# -*- coding: utf-8 -*-\n\"\"\"\nPreprocessor subroutines\n-------------------------\n\nThis module contains functions to preprocess the input files to compute\na Finite Element Analysis.\n\n\"\"\"\nimport sys\nimport numpy as np\n\n\ndef readin(folder=\"\"):\n \"\"\"Read the input files\"\"\"\n nodes = np.loadtxt(folder + 'nodes.txt', ndmin=2)\n mats = np.loadtxt(folder + 'mater.txt', ndmin=2)\n elements = np.loadtxt(folder + 'eles.txt', ndmin=2, dtype=np.int)\n loads = np.loadtxt(folder + 'loads.txt', ndmin=2)\n\n return nodes, mats, elements, loads\n\n\ndef echomod(nodes, mats, elements, loads, folder=\"\"):\n \"\"\"Create echoes of the model input files\"\"\"\n np.savetxt(folder + \"KNODES.txt\", nodes, fmt='%5.2f', delimiter=' ')\n np.savetxt(folder + \"KMATES.txt\", mats, fmt='%5.2f', delimiter=' ')\n np.savetxt(folder + \"KELEMS.txt\", elements, fmt='%d', delimiter=' ')\n np.savetxt(folder + \"KLOADS.txt\", loads, fmt='%5.2f', delimiter=' ')\n\n\ndef initial_params():\n \"\"\"Read initial parameters for the simulation\n\n The parameters to be read are:\n\n - folder: location of the input files.\n - name: name for the output files (if echo is True).\n - echo: echo output files.\n \"\"\"\n # Check Python version\n version = sys.version_info.major\n if version == 3:\n global raw_input\n raw_input = input\n elif version == 2:\n pass\n else:\n raise ValueError(\"You should use Python 2.x at least!\")\n\n # Try to run with easygui\n try:\n import easygui\n folder = easygui.diropenbox(title=\"Folder for the job\") + \"/\"\n except:\n folder = raw_input('Enter folder (empty for the current one): ')\n\n return folder\n\n\ndef ele_writer(cells, cell_data, ele_tag, phy_sur, ele_type, mat_tag, nini):\n \"\"\"\n Extracts a subset of elements from a complete mesh according to the\n physical surface phy_sur and writes down the proper fields into an\n elements array.\n\n Parameters\n ----------\n cell : dictionary\n Dictionary created by meshio with cells information.\n cell_data: dictionary\n Dictionary created by meshio with cells data information.\n ele_tag : string\n Element type according to meshio convention,\n e.g., quad9 or line3.\n phy_sur : int\n Physical surface for the subset.\n ele_type: int\n Element type.\n mat_tag : int\n Material profile for the subset.\n ndof : int\n Number of degrees of freedom for the elements.\n nnode : int\n Number of nodes for the element.\n nini : int\n Element id for the first element in the set.\n\n Returns\n -------\n nf : int\n Element id for the last element in the set\n els_array : int\n Elemental data.\n\n \"\"\"\n eles = cells[ele_tag]\n dict_nnode = {'triangle': 3,\n 'triangle6': 6,\n 'quad': 4}\n nnode = dict_nnode[ele_tag]\n phy_surface = cell_data[ele_tag]['gmsh:physical']\n ele_id = [cont for cont, _ in enumerate(phy_surface[:])\n if phy_surface[cont] == phy_sur]\n els_array = np.zeros([len(ele_id) , 3 + nnode], dtype=int)\n els_array[: , 0] = range(nini , len(ele_id) + nini )\n els_array[: , 1] = ele_type\n els_array[: , 2] = mat_tag\n els_array[: , 3::] = eles[ele_id, :]\n nf = nini + len(ele_id)\n return nf , els_array\n\n\ndef node_writer(points , point_data):\n \"\"\"Write nodal data as required by SolidsPy\n\n Parameters\n ----------\n points : dictionary\n Nodal points\n point_data : dictionary\n Physical data associatted to the nodes.\n\n Returns\n -------\n nodes_array : ndarray (int)\n Array with the nodal data according to SolidsPy.\n\n \"\"\"\n nodes_array = np.zeros([points.shape[0], 5])\n nodes_array[:, 0] = range(points.shape[0])\n nodes_array[:, 1:3] = points[:, :2]\n return nodes_array\n\n\ndef boundary_conditions(cells, cell_data, phy_lin, nodes_array, bc_x, bc_y):\n \"\"\"Impose nodal point boundary conditions as required by SolidsPy\n\n Parameters\n ----------\n cell : dictionary\n Dictionary created by meshio with cells information.\n cell_data: dictionary\n Dictionary created by meshio with cells data information.\n phy_lin : int\n Physical line where BCs are to be imposed.\n nodes_array : int\n Array with the nodal data and to be modified by BCs.\n bc_x, bc_y : int\n Boundary condition flag along the x and y direction:\n * -1: restrained\n * 0: free\n\n Returns\n -------\n nodes_array : int\n Array with the nodal data after imposing BCs according\n to SolidsPy.\n\n \"\"\"\n lines = cells[\"line\"]\n # Bounds contains data corresponding to the physical line.\n phy_line = cell_data[\"line\"][\"gmsh:physical\"]\n id_frontera = [cont for cont in range(len(phy_line))\n if phy_line[cont] == phy_lin]\n nodes_frontera = lines[id_frontera]\n nodes_frontera = nodes_frontera.flatten()\n nodes_frontera = list(set(nodes_frontera))\n nodes_array[nodes_frontera, 3] = bc_x\n nodes_array[nodes_frontera, 4] = bc_y\n return nodes_array\n\n\ndef loading(cells, cell_data, phy_lin, P_x, P_y):\n \"\"\"Impose nodal boundary conditions as required by SolidsPy\n\n Parameters\n ----------\n cell : dictionary\n Dictionary created by meshio with cells information.\n cell_data: dictionary\n Dictionary created by meshio with cells data information.\n phy_lin : int\n Physical line where BCs are to be imposed.\n nodes_array : int\n Array with the nodal data and to be modified by BCs.\n P_x, P_y : float\n Load components in x and y directions.\n\n Returns\n -------\n nodes_array : int\n Array with the nodal data after imposing BCs according\n to SolidsPy.\n\n \"\"\"\n lines = cells[\"line\"]\n # Bounds contains data corresponding to the physical line.\n phy_line = cell_data[\"line\"][\"gmsh:physical\"]\n id_carga = [cont for cont in range(len(phy_line))\n if phy_line[cont] == phy_lin]\n nodes_carga = lines[id_carga]\n nodes_carga = nodes_carga.flatten()\n nodes_carga = list(set(nodes_carga))\n ncargas = len(nodes_carga)\n cargas = np.zeros((ncargas, 3))\n cargas[:, 0] = nodes_carga\n cargas[:, 1] = P_x/ncargas\n cargas[:, 2] = P_y/ncargas\n return cargas\n\n\ndef rect_grid(length, height, nx, ny, eletype=None):\n \"\"\"Generate a structured mesh for a rectangle\n\n The coordinates of the nodes will be defined in the\n domain [-length/2, length/2] x [-height/2, height/2].\n\n Parameters\n ----------\n length : float\n Length of the domain.\n height : gloat\n Height of the domain.\n nx : int\n Number of elements in the x direction.\n ny : int\n Number of elements in the y direction.\n eletype : None\n It does nothing right now.\n\n Returns\n -------\n x : ndarray (float)\n x-coordinates for the nodes.\n y : ndarray (float)\n y-coordinates for the nodes.\n els : ndarray\n Array with element data.\n\n Examples\n --------\n\n >>> x, y, els = rect_grid(2, 2, 2, 2)\n >>> x\n array([-1., 0., 1., -1., 0., 1., -1., 0., 1.])\n >>> y\n array([-1., -1., -1., 0., 0., 0., 1., 1., 1.])\n >>> els\n array([[0, 1, 0, 0, 1, 4, 3],\n [1, 1, 0, 1, 2, 5, 4],\n [2, 1, 0, 3, 4, 7, 6],\n [3, 1, 0, 4, 5, 8, 7]])\n\n \"\"\"\n y, x = np.mgrid[-height/2:height/2:(ny + 1)*1j,\n -length/2:length/2:(nx + 1)*1j]\n els = np.zeros((nx*ny, 7), dtype=int)\n els[:, 1] = 1\n for row in range(ny):\n for col in range(nx):\n cont = row*nx + col\n els[cont, 0] = cont\n els[cont, 3:7] = [cont + row, cont + row + 1,\n cont + row + nx + 2, cont + row + nx + 1]\n return x.flatten(), y.flatten(), els\n\n\nif __name__ == \"__main__\":\n import doctest\n doctest.testmod()\n"
]
| [
[
"numpy.savetxt",
"numpy.zeros",
"numpy.loadtxt"
]
]
|
afkfurby/stable-baselines | [
"e5adbc4c52f41ea315f05e6a6ca9ffc81793a35c"
]
| [
"stable_baselines/common/distributions.py"
]
| [
"import numpy as np\nimport tensorflow as tf\n\n\nfrom tensorflow.python.ops import math_ops\nfrom gym import spaces\n\nfrom stable_baselines.common.tf_layers import linear\n\n\nclass ProbabilityDistribution(object):\n \"\"\"\n Base class for describing a probability distribution.\n \"\"\"\n def __init__(self):\n super(ProbabilityDistribution, self).__init__()\n\n def flatparam(self):\n \"\"\"\n Return the direct probabilities\n\n :return: ([float]) the probabilities\n \"\"\"\n raise NotImplementedError\n\n def mode(self):\n \"\"\"\n Returns the probability\n\n :return: (Tensorflow Tensor) the deterministic action\n \"\"\"\n raise NotImplementedError\n\n def neglogp(self, x):\n \"\"\"\n returns the of the negative log likelihood\n\n :param x: (str) the labels of each index\n :return: ([float]) The negative log likelihood of the distribution\n \"\"\"\n # Usually it's easier to define the negative logprob\n raise NotImplementedError\n\n def kl(self, other):\n \"\"\"\n Calculates the Kullback-Leibler divergence from the given probability distribution\n\n :param other: ([float]) the distribution to compare with\n :return: (float) the KL divergence of the two distributions\n \"\"\"\n raise NotImplementedError\n\n def entropy(self):\n \"\"\"\n Returns Shannon's entropy of the probability\n\n :return: (float) the entropy\n \"\"\"\n raise NotImplementedError\n\n def sample(self):\n \"\"\"\n returns a sample from the probability distribution\n\n :return: (Tensorflow Tensor) the stochastic action\n \"\"\"\n raise NotImplementedError\n\n def logp(self, x):\n \"\"\"\n returns the of the log likelihood\n\n :param x: (str) the labels of each index\n :return: ([float]) The log likelihood of the distribution\n \"\"\"\n return - self.neglogp(x)\n\n\nclass ProbabilityDistributionType(object):\n \"\"\"\n Parametrized family of probability distributions\n \"\"\"\n\n def probability_distribution_class(self):\n \"\"\"\n returns the ProbabilityDistribution class of this type\n\n :return: (Type ProbabilityDistribution) the probability distribution class associated\n \"\"\"\n raise NotImplementedError\n\n def proba_distribution_from_flat(self, flat):\n \"\"\"\n Returns the probability distribution from flat probabilities\n flat: flattened vector of parameters of probability distribution\n\n :param flat: ([float]) the flat probabilities\n :return: (ProbabilityDistribution) the instance of the ProbabilityDistribution associated\n \"\"\"\n return self.probability_distribution_class()(flat)\n\n def proba_distribution_from_latent(self, pi_latent_vector, vf_latent_vector, init_scale=1.0, init_bias=0.0):\n \"\"\"\n returns the probability distribution from latent values\n\n :param pi_latent_vector: ([float]) the latent pi values\n :param vf_latent_vector: ([float]) the latent vf values\n :param init_scale: (float) the initial scale of the distribution\n :param init_bias: (float) the initial bias of the distribution\n :return: (ProbabilityDistribution) the instance of the ProbabilityDistribution associated\n \"\"\"\n raise NotImplementedError\n\n def param_shape(self):\n \"\"\"\n returns the shape of the input parameters\n\n :return: ([int]) the shape\n \"\"\"\n raise NotImplementedError\n\n def sample_shape(self):\n \"\"\"\n returns the shape of the sampling\n\n :return: ([int]) the shape\n \"\"\"\n raise NotImplementedError\n\n def sample_dtype(self):\n \"\"\"\n returns the type of the sampling\n\n :return: (type) the type\n \"\"\"\n raise NotImplementedError\n\n def param_placeholder(self, prepend_shape, name=None):\n \"\"\"\n returns the TensorFlow placeholder for the input parameters\n\n :param prepend_shape: ([int]) the prepend shape\n :param name: (str) the placeholder name\n :return: (TensorFlow Tensor) the placeholder\n \"\"\"\n return tf.compat.v1.placeholder(dtype=tf.float32, shape=prepend_shape + self.param_shape(), name=name)\n\n def sample_placeholder(self, prepend_shape, name=None):\n \"\"\"\n returns the TensorFlow placeholder for the sampling\n\n :param prepend_shape: ([int]) the prepend shape\n :param name: (str) the placeholder name\n :return: (TensorFlow Tensor) the placeholder\n \"\"\"\n return tf.compat.v1.placeholder(dtype=self.sample_dtype(), shape=prepend_shape + self.sample_shape(), name=name)\n\n\nclass CategoricalProbabilityDistributionType(ProbabilityDistributionType):\n def __init__(self, n_cat):\n \"\"\"\n The probability distribution type for categorical input\n\n :param n_cat: (int) the number of categories\n \"\"\"\n self.n_cat = n_cat\n\n def probability_distribution_class(self):\n return CategoricalProbabilityDistribution\n\n def proba_distribution_from_latent(self, pi_latent_vector, vf_latent_vector, init_scale=1.0, init_bias=0.0):\n pdparam = linear(pi_latent_vector, 'pi', self.n_cat, init_scale=init_scale, init_bias=init_bias)\n q_values = linear(vf_latent_vector, 'q', self.n_cat, init_scale=init_scale, init_bias=init_bias)\n return self.proba_distribution_from_flat(pdparam), pdparam, q_values\n\n def param_shape(self):\n return [self.n_cat]\n\n def sample_shape(self):\n return []\n\n def sample_dtype(self):\n return tf.int64\n\n\nclass MultiCategoricalProbabilityDistributionType(ProbabilityDistributionType):\n def __init__(self, n_vec):\n \"\"\"\n The probability distribution type for multiple categorical input\n\n :param n_vec: ([int]) the vectors\n \"\"\"\n # Cast the variable because tf does not allow uint32\n self.n_vec = n_vec.astype(np.int32)\n # Check that the cast was valid\n assert (self.n_vec > 0).all(), \"Casting uint32 to int32 was invalid\"\n\n def probability_distribution_class(self):\n return MultiCategoricalProbabilityDistribution\n\n def proba_distribution_from_flat(self, flat):\n return MultiCategoricalProbabilityDistribution(self.n_vec, flat)\n\n def proba_distribution_from_latent(self, pi_latent_vector, vf_latent_vector, init_scale=1.0, init_bias=0.0):\n pdparam = linear(pi_latent_vector, 'pi', sum(self.n_vec), init_scale=init_scale, init_bias=init_bias)\n q_values = linear(vf_latent_vector, 'q', sum(self.n_vec), init_scale=init_scale, init_bias=init_bias)\n return self.proba_distribution_from_flat(pdparam), pdparam, q_values\n\n def param_shape(self):\n return [sum(self.n_vec)]\n\n def sample_shape(self):\n return [len(self.n_vec)]\n\n def sample_dtype(self):\n return tf.int64\n\n\nclass DiagGaussianProbabilityDistributionType(ProbabilityDistributionType):\n def __init__(self, size):\n \"\"\"\n The probability distribution type for multivariate Gaussian input\n\n :param size: (int) the number of dimensions of the multivariate gaussian\n \"\"\"\n self.size = size\n\n def probability_distribution_class(self):\n return DiagGaussianProbabilityDistribution\n\n def proba_distribution_from_flat(self, flat):\n \"\"\"\n returns the probability distribution from flat probabilities\n\n :param flat: ([float]) the flat probabilities\n :return: (ProbabilityDistribution) the instance of the ProbabilityDistribution associated\n \"\"\"\n return self.probability_distribution_class()(flat)\n\n def proba_distribution_from_latent(self, pi_latent_vector, vf_latent_vector, init_scale=1.0, init_bias=0.0):\n mean = linear(pi_latent_vector, 'pi', self.size, init_scale=init_scale, init_bias=init_bias)\n logstd = tf.compat.v1.get_variable(name='pi/logstd', shape=[1, self.size], initializer=tf.compat.v1.zeros_initializer())\n pdparam = tf.concat([mean, mean * 0.0 + logstd], axis=1)\n q_values = linear(vf_latent_vector, 'q', self.size, init_scale=init_scale, init_bias=init_bias)\n return self.proba_distribution_from_flat(pdparam), mean, q_values\n\n def param_shape(self):\n return [2 * self.size]\n\n def sample_shape(self):\n return [self.size]\n\n def sample_dtype(self):\n return tf.float32\n\n\nclass BernoulliProbabilityDistributionType(ProbabilityDistributionType):\n def __init__(self, size):\n \"\"\"\n The probability distribution type for Bernoulli input\n\n :param size: (int) the number of dimensions of the Bernoulli distribution\n \"\"\"\n self.size = size\n\n def probability_distribution_class(self):\n return BernoulliProbabilityDistribution\n\n def proba_distribution_from_latent(self, pi_latent_vector, vf_latent_vector, init_scale=1.0, init_bias=0.0):\n pdparam = linear(pi_latent_vector, 'pi', self.size, init_scale=init_scale, init_bias=init_bias)\n q_values = linear(vf_latent_vector, 'q', self.size, init_scale=init_scale, init_bias=init_bias)\n return self.proba_distribution_from_flat(pdparam), pdparam, q_values\n\n def param_shape(self):\n return [self.size]\n\n def sample_shape(self):\n return [self.size]\n\n def sample_dtype(self):\n return tf.int32\n\n\nclass CategoricalProbabilityDistribution(ProbabilityDistribution):\n def __init__(self, logits):\n \"\"\"\n Probability distributions from categorical input\n\n :param logits: ([float]) the categorical logits input\n \"\"\"\n self.logits = logits\n super(CategoricalProbabilityDistribution, self).__init__()\n\n def flatparam(self):\n return self.logits\n\n def mode(self):\n return tf.argmax(input=self.logits, axis=-1)\n\n def neglogp(self, x):\n # Note: we can't use sparse_softmax_cross_entropy_with_logits because\n # the implementation does not allow second-order derivatives...\n one_hot_actions = tf.one_hot(x, self.logits.get_shape().as_list()[-1])\n return tf.nn.softmax_cross_entropy_with_logits(\n logits=self.logits,\n labels=tf.stop_gradient(one_hot_actions))\n\n def kl(self, other):\n a_0 = self.logits - tf.reduce_max(input_tensor=self.logits, axis=-1, keepdims=True)\n a_1 = other.logits - tf.reduce_max(input_tensor=other.logits, axis=-1, keepdims=True)\n exp_a_0 = tf.exp(a_0)\n exp_a_1 = tf.exp(a_1)\n z_0 = tf.reduce_sum(input_tensor=exp_a_0, axis=-1, keepdims=True)\n z_1 = tf.reduce_sum(input_tensor=exp_a_1, axis=-1, keepdims=True)\n p_0 = exp_a_0 / z_0\n return tf.reduce_sum(input_tensor=p_0 * (a_0 - tf.math.log(z_0) - a_1 + tf.math.log(z_1)), axis=-1)\n\n def entropy(self):\n a_0 = self.logits - tf.reduce_max(input_tensor=self.logits, axis=-1, keepdims=True)\n exp_a_0 = tf.exp(a_0)\n z_0 = tf.reduce_sum(input_tensor=exp_a_0, axis=-1, keepdims=True)\n p_0 = exp_a_0 / z_0\n return tf.reduce_sum(input_tensor=p_0 * (tf.math.log(z_0) - a_0), axis=-1)\n\n def sample(self):\n # Gumbel-max trick to sample\n # a categorical distribution (see http://amid.fish/humble-gumbel)\n uniform = tf.random.uniform(tf.shape(input=self.logits), dtype=self.logits.dtype)\n return tf.argmax(input=self.logits - tf.math.log(-tf.math.log(uniform)), axis=-1)\n\n @classmethod\n def fromflat(cls, flat):\n \"\"\"\n Create an instance of this from new logits values\n\n :param flat: ([float]) the categorical logits input\n :return: (ProbabilityDistribution) the instance from the given categorical input\n \"\"\"\n return cls(flat)\n\n\nclass MultiCategoricalProbabilityDistribution(ProbabilityDistribution):\n def __init__(self, nvec, flat):\n \"\"\"\n Probability distributions from multicategorical input\n\n :param nvec: ([int]) the sizes of the different categorical inputs\n :param flat: ([float]) the categorical logits input\n \"\"\"\n self.flat = flat\n self.categoricals = list(map(CategoricalProbabilityDistribution, tf.split(flat, nvec, axis=-1)))\n super(MultiCategoricalProbabilityDistribution, self).__init__()\n\n def flatparam(self):\n return self.flat\n\n def mode(self):\n return tf.stack([p.mode() for p in self.categoricals], axis=-1)\n\n def neglogp(self, x):\n return tf.add_n([p.neglogp(px) for p, px in zip(self.categoricals, tf.unstack(x, axis=-1))])\n\n def kl(self, other):\n return tf.add_n([p.kl(q) for p, q in zip(self.categoricals, other.categoricals)])\n\n def entropy(self):\n return tf.add_n([p.entropy() for p in self.categoricals])\n\n def sample(self):\n return tf.stack([p.sample() for p in self.categoricals], axis=-1)\n\n @classmethod\n def fromflat(cls, flat):\n \"\"\"\n Create an instance of this from new logits values\n\n :param flat: ([float]) the multi categorical logits input\n :return: (ProbabilityDistribution) the instance from the given multi categorical input\n \"\"\"\n raise NotImplementedError\n\n\nclass DiagGaussianProbabilityDistribution(ProbabilityDistribution):\n def __init__(self, flat):\n \"\"\"\n Probability distributions from multivariate Gaussian input\n\n :param flat: ([float]) the multivariate Gaussian input data\n \"\"\"\n self.flat = flat\n mean, logstd = tf.split(axis=len(flat.shape) - 1, num_or_size_splits=2, value=flat)\n self.mean = mean\n self.logstd = logstd\n self.std = tf.exp(logstd)\n super(DiagGaussianProbabilityDistribution, self).__init__()\n\n def flatparam(self):\n return self.flat\n\n def mode(self):\n # Bounds are taken into account outside this class (during training only)\n return self.mean\n\n def neglogp(self, x):\n return 0.5 * tf.reduce_sum(input_tensor=tf.square((x - self.mean) / self.std), axis=-1) \\\n + 0.5 * np.log(2.0 * np.pi) * tf.cast(tf.shape(input=x)[-1], tf.float32) \\\n + tf.reduce_sum(input_tensor=self.logstd, axis=-1)\n\n def kl(self, other):\n assert isinstance(other, DiagGaussianProbabilityDistribution)\n return tf.reduce_sum(input_tensor=other.logstd - self.logstd + (tf.square(self.std) + tf.square(self.mean - other.mean)) /\n (2.0 * tf.square(other.std)) - 0.5, axis=-1)\n\n def entropy(self):\n return tf.reduce_sum(input_tensor=self.logstd + .5 * np.log(2.0 * np.pi * np.e), axis=-1)\n\n def sample(self):\n # Bounds are taken into acount outside this class (during training only)\n # Otherwise, it changes the distribution and breaks PPO2 for instance\n return self.mean + self.std * tf.random.normal(tf.shape(input=self.mean),\n dtype=self.mean.dtype)\n\n @classmethod\n def fromflat(cls, flat):\n \"\"\"\n Create an instance of this from new multivariate Gaussian input\n\n :param flat: ([float]) the multivariate Gaussian input data\n :return: (ProbabilityDistribution) the instance from the given multivariate Gaussian input data\n \"\"\"\n return cls(flat)\n\n\nclass BernoulliProbabilityDistribution(ProbabilityDistribution):\n def __init__(self, logits):\n \"\"\"\n Probability distributions from Bernoulli input\n\n :param logits: ([float]) the Bernoulli input data\n \"\"\"\n self.logits = logits\n self.probabilities = tf.sigmoid(logits)\n super(BernoulliProbabilityDistribution, self).__init__()\n\n def flatparam(self):\n return self.logits\n\n def mode(self):\n return tf.round(self.probabilities)\n\n def neglogp(self, x):\n return tf.reduce_sum(input_tensor=tf.nn.sigmoid_cross_entropy_with_logits(logits=self.logits,\n labels=tf.cast(x, tf.float32)),\n axis=-1)\n\n def kl(self, other):\n return tf.reduce_sum(input_tensor=tf.nn.sigmoid_cross_entropy_with_logits(logits=other.logits,\n labels=self.probabilities), axis=-1) - \\\n tf.reduce_sum(input_tensor=tf.nn.sigmoid_cross_entropy_with_logits(logits=self.logits,\n labels=self.probabilities), axis=-1)\n\n def entropy(self):\n return tf.reduce_sum(input_tensor=tf.nn.sigmoid_cross_entropy_with_logits(logits=self.logits,\n labels=self.probabilities), axis=-1)\n\n def sample(self):\n samples_from_uniform = tf.random.uniform(tf.shape(input=self.probabilities))\n return tf.cast(math_ops.less(samples_from_uniform, self.probabilities), tf.float32)\n\n @classmethod\n def fromflat(cls, flat):\n \"\"\"\n Create an instance of this from new Bernoulli input\n\n :param flat: ([float]) the Bernoulli input data\n :return: (ProbabilityDistribution) the instance from the given Bernoulli input data\n \"\"\"\n return cls(flat)\n\n\ndef make_proba_dist_type(ac_space):\n \"\"\"\n return an instance of ProbabilityDistributionType for the correct type of action space\n\n :param ac_space: (Gym Space) the input action space\n :return: (ProbabilityDistributionType) the appropriate instance of a ProbabilityDistributionType\n \"\"\"\n if isinstance(ac_space, spaces.Box):\n assert len(ac_space.shape) == 1, \"Error: the action space must be a vector\"\n return DiagGaussianProbabilityDistributionType(ac_space.shape[0])\n elif isinstance(ac_space, spaces.Discrete):\n return CategoricalProbabilityDistributionType(ac_space.n)\n elif isinstance(ac_space, spaces.MultiDiscrete):\n return MultiCategoricalProbabilityDistributionType(ac_space.nvec)\n elif isinstance(ac_space, spaces.MultiBinary):\n return BernoulliProbabilityDistributionType(ac_space.n)\n else:\n raise NotImplementedError(\"Error: probability distribution, not implemented for action space of type {}.\"\n .format(type(ac_space)) +\n \" Must be of type Gym Spaces: Box, Discrete, MultiDiscrete or MultiBinary.\")\n\n\ndef shape_el(tensor, index):\n \"\"\"\n get the shape of a TensorFlow Tensor element\n\n :param tensor: (TensorFlow Tensor) the input tensor\n :param index: (int) the element\n :return: ([int]) the shape\n \"\"\"\n maybe = tensor.get_shape()[index]\n if maybe is not None:\n return maybe\n else:\n return tf.shape(input=tensor)[index]\n"
]
| [
[
"tensorflow.reduce_max",
"numpy.log",
"tensorflow.concat",
"tensorflow.split",
"tensorflow.unstack",
"tensorflow.shape",
"tensorflow.reduce_sum",
"tensorflow.python.ops.math_ops.less",
"tensorflow.cast",
"tensorflow.sigmoid",
"tensorflow.exp",
"tensorflow.compat.v1.zeros_initializer",
"tensorflow.stop_gradient",
"tensorflow.nn.sigmoid_cross_entropy_with_logits",
"tensorflow.math.log",
"tensorflow.square",
"tensorflow.argmax",
"tensorflow.round"
]
]
|
RajatKGupta/fMRI_BrainDancer | [
"e0e5b04a6dde2990134b60288096dcd2886e9334"
]
| [
"core/analysis.py"
]
| [
"# %%\n#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated/Last Edited: October 21, 2019\n\n@author: Rajat Kumar\n@maintainer: Rajat Kumar\nNotes:\nScript for executing the analysis routine for denoising.\n\nTo do:\nInclude the option to generate outer mask and corresponding time_series.\n\n\"\"\"\n\n# %% All imports\nimport nibabel as nib, pandas as pd, numpy as np, matplotlib.pyplot as plt\nfrom display import display_all_slices\nfrom cartridge import findcartridge,inner_mask, outer_mask, cen_rotation\nfrom quadrants import quadrant_mask_T2\nfrom t2 import T2Star\nfrom epi import phantom_motion, create_mean_slices, simulate_inner, scanner_output\n\n\n# %%\n# =============================================================================\n# Main Routine\n# =============================================================================\n\n\ndef create_denoising_dataset(epi_path,log_path,acqtimes_path,rot_dir=-1):\n data_read = nib.load(epi_path)\n \n \n display_all_slices(data_read,0)\n plt.show()\n \n start = int(input('Enter the first good slice: '))\n end = int(input('Enter the last good slice: '))\n \n log = pd.read_csv(log_path)\n motion_time = np.max(log['Tmot'].values)\n acq_times = pd.read_csv(acqtimes_path)\n motionfree = acq_times[acq_times['Time']>motion_time]['Slice'].values\n total_slices = [] \n for i in list(motionfree):\n if start<= i <= end:\n total_slices.append(i)\n print('Selected Slices for Analysis are: ', total_slices)\n \n imask = []\n cen = []\n imask_metrics = []\n center_rotation_all = []\n omask = []\n detect_remove = []\n updated_total_slices = []\n for i in range(len(total_slices)): \n \n \n img_complete,cy_complete,cx_complete, radii_complete = inner_mask(epi_path,total_slices[i],volume_num=0,lvl=0.004,rad1=7,rad2=50,step=1)\n \n \n center_rotation = cen_rotation(epi_path,total_slices[i],img_complete,cy_complete,cx_complete,radii_complete, canny_sgm=1)\n \n \n detect = int(input('Enter 1 if this slice is good'))\n \n if detect ==1:\n \n center_rotation_all.append(center_rotation)\n imask.append(img_complete)\n updated_total_slices.append(total_slices[i])\n # TO DO - Include the option to generate outer mask and corresponding time_series, with something like below:\n #out_mask = outer_mask(data_read,findcartridge(data_read,total_slices[i],0),total_slices[i],0)\n #omask.append(out_mask)\n \n \n positions = phantom_motion(log_path)\n synth =create_mean_slices(data_read,updated_total_slices,imask,200)\n simulated_data = simulate_inner(synth,positions,updated_total_slices,imask,center_rotation_all,rot_dir)\n scanner_inner = scanner_output(data_read,positions,updated_total_slices,imask,200) # add omask in future for outer cylinder\n \n return simulated_data, scanner_inner, imask, center_rotation_all, updated_total_slices\n\n\n# %%\n"
]
| [
[
"numpy.max",
"matplotlib.pyplot.show",
"pandas.read_csv"
]
]
|
Benedicte/quantum-systems | [
"31e6f78dceb03f5d01092f4008fbab38516a0623"
]
| [
"tests/test_two_dim_ho.py"
]
| [
"import os\nimport sys\nimport pytest\nimport numpy as np\n\nfrom quantum_systems.quantum_dots.two_dim.two_dim_helper import (\n get_index_p,\n get_indices_nm,\n get_one_body_elements,\n get_coulomb_elements,\n)\n\nfrom quantum_systems import (\n GeneralOrbitalSystem,\n TwoDimensionalHarmonicOscillator,\n BasisSet,\n)\n\n\ndef test_two_body_symmetry():\n l = 12\n\n u = get_coulomb_elements(l)\n\n for p in range(l):\n for q in range(l):\n for r in range(l):\n for s in range(l):\n assert abs(u[p, q, r, s] - u[q, p, s, r]) < 1e-8\n\n\ndef test_tdho_caching():\n l = 12\n\n pre_cache = os.listdir()\n\n os.environ[\"QS_CACHE_TDHO\"] = \"1\"\n u = get_coulomb_elements(l)\n\n post_cache = os.listdir()\n\n assert len(set(post_cache) - set(pre_cache)) == 1\n\n u_2 = get_coulomb_elements(l)\n np.testing.assert_allclose(u, u_2)\n\n filename = (set(post_cache) - set(pre_cache)).pop()\n os.remove(filename)\n\n assert len(set(os.listdir()) - set(pre_cache)) == 0\n\n\ndef test_p_index(index_map):\n for p, (n, m) in enumerate(index_map):\n assert p == get_index_p(n, m)\n\n\ndef test_nm_indices(index_map):\n for p, (n, m) in enumerate(index_map):\n assert get_indices_nm(p) == (n, m)\n\n\ndef test_one_body_elements(hi):\n l = len(hi)\n _hi = get_one_body_elements(l)\n\n np.testing.assert_allclose(hi, _hi, atol=1e-6, rtol=1e-6)\n\n\ndef test_antisymmetric_one_body_elements(h):\n l = len(h)\n _h = BasisSet.add_spin_one_body(get_one_body_elements(l // 2), np=np)\n\n np.testing.assert_allclose(h, _h, atol=1e-6, rtol=1e-6)\n\n\ndef test_two_body_elements(orbital_integrals):\n l = len(orbital_integrals)\n oi = get_coulomb_elements(l)\n\n np.testing.assert_allclose(orbital_integrals, oi, atol=1e-6, rtol=1e-6)\n\n\ndef test_antisymmetric_two_body_elements(u):\n l = len(u)\n _u = BasisSet.anti_symmetrize_u(\n BasisSet.add_spin_two_body(get_coulomb_elements(l // 2), np=np)\n )\n\n np.testing.assert_allclose(u, _u, atol=1e-6, rtol=1e-6)\n\n\ndef test_spf(spf_2dho):\n n, l, radius, num_grid_points, spf_test = spf_2dho\n\n tdho = TwoDimensionalHarmonicOscillator(l, radius, num_grid_points)\n\n for p in range(l // 2):\n np.testing.assert_allclose(spf_test[p], tdho.spf[p])\n"
]
| [
[
"numpy.testing.assert_allclose"
]
]
|
FanKuan44/TENAS | [
"eb70174626649ad2852924ca28c4cabe25fe03d4"
]
| [
"lib/models/cell_infers/cells.py"
]
| [
"import torch\nimport torch.nn as nn\nfrom copy import deepcopy\nfrom ..cell_operations import OPS\n\n\n# Cell for NAS-Bench-201\nclass InferCell(nn.Module):\n\n def __init__(self, genotype, C_in, C_out, stride):\n super(InferCell, self).__init__()\n\n self.layers = nn.ModuleList()\n self.node_IN = []\n self.node_IX = []\n self.genotype = deepcopy(genotype)\n for i in range(1, len(genotype)):\n node_info = genotype[i-1]\n cur_index = []\n cur_innod = []\n for (op_name, op_in) in node_info:\n if op_in == 0:\n layer = OPS[op_name](C_in , C_out, stride, True, True)\n else:\n layer = OPS[op_name](C_out, C_out, 1, True, True)\n cur_index.append( len(self.layers) )\n cur_innod.append( op_in )\n self.layers.append( layer )\n self.node_IX.append( cur_index )\n self.node_IN.append( cur_innod )\n self.nodes = len(genotype)\n self.in_dim = C_in\n self.out_dim = C_out\n\n def extra_repr(self):\n string = 'info :: nodes={nodes}, inC={in_dim}, outC={out_dim}'.format(**self.__dict__)\n laystr = []\n for i, (node_layers, node_innods) in enumerate(zip(self.node_IX,self.node_IN)):\n y = ['I{:}-L{:}'.format(_ii, _il) for _il, _ii in zip(node_layers, node_innods)]\n x = '{:}<-({:})'.format(i+1, ','.join(y))\n laystr.append( x )\n return string + ', [{:}]'.format( ' | '.join(laystr) ) + ', {:}'.format(self.genotype.tostr())\n\n def forward(self, inputs):\n nodes = [inputs]\n for i, (node_layers, node_innods) in enumerate(zip(self.node_IX,self.node_IN)):\n node_feature = sum( self.layers[_il](nodes[_ii]) for _il, _ii in zip(node_layers, node_innods) )\n nodes.append( node_feature )\n return nodes[-1]\n\n\n\n# Learning Transferable Architectures for Scalable Image Recognition, CVPR 2018\nclass NASNetInferCell(nn.Module):\n\n def __init__(self, genotype, C_prev_prev, C_prev, C, reduction, reduction_prev, affine, track_running_stats):\n super(NASNetInferCell, self).__init__()\n self.reduction = reduction\n if reduction_prev: self.preprocess0 = OPS['skip_connect'](C_prev_prev, C, 2, affine, track_running_stats)\n else : self.preprocess0 = OPS['nor_conv_1x1'](C_prev_prev, C, 1, affine, track_running_stats)\n self.preprocess1 = OPS['nor_conv_1x1'](C_prev, C, 1, affine, track_running_stats)\n\n if not reduction:\n nodes, concats = genotype['normal'], genotype['normal_concat']\n else:\n nodes, concats = genotype['reduce'], genotype['reduce_concat']\n self._multiplier = len(concats)\n self._concats = concats\n self._steps = len(nodes)\n self._nodes = nodes\n self.edges = nn.ModuleDict()\n for i, node in enumerate(nodes):\n for in_node in node:\n name, j = in_node[0], in_node[1]\n stride = 2 if reduction and j < 2 else 1\n node_str = '{:}<-{:}'.format(i+2, j)\n self.edges[node_str] = OPS[name](C, C, stride, affine, track_running_stats)\n\n # [TODO] to support drop_prob in this function..\n def forward(self, s0, s1, unused_drop_prob):\n s0 = self.preprocess0(s0)\n s1 = self.preprocess1(s1)\n\n states = [s0, s1]\n for i, node in enumerate(self._nodes):\n clist = []\n for in_node in node:\n name, j = in_node[0], in_node[1]\n node_str = '{:}<-{:}'.format(i+2, j)\n op = self.edges[ node_str ]\n clist.append( op(states[j]) )\n states.append( sum(clist) )\n return torch.cat([states[x] for x in self._concats], dim=1)\n\n\nclass AuxiliaryHeadCIFAR(nn.Module):\n\n def __init__(self, C, num_classes):\n \"\"\"assuming input size 8x8\"\"\"\n super(AuxiliaryHeadCIFAR, self).__init__()\n self.features = nn.Sequential(\n nn.ReLU(inplace=True),\n nn.AvgPool2d(5, stride=3, padding=0, count_include_pad=False), # image size = 2 x 2\n nn.Conv2d(C, 128, 1, bias=False),\n nn.BatchNorm2d(128),\n nn.ReLU(inplace=True),\n nn.Conv2d(128, 768, 2, bias=False),\n nn.BatchNorm2d(768),\n nn.ReLU(inplace=True)\n )\n self.classifier = nn.Linear(768, num_classes)\n\n def forward(self, x):\n x = self.features(x)\n x = self.classifier(x.view(x.size(0),-1))\n return x\n"
]
| [
[
"torch.cat",
"torch.nn.ModuleList",
"torch.nn.ModuleDict",
"torch.nn.Conv2d",
"torch.nn.Linear",
"torch.nn.AvgPool2d",
"torch.nn.BatchNorm2d",
"torch.nn.ReLU"
]
]
|
AlexanderSlav/Automatic-Soccer-Highlights-Generation | [
"073e95776052034a327a102e5291234983965ad2"
]
| [
"Celebration_Classification/trainer.py"
]
| [
"import torch\nfrom utils import load_split_train_test, accuracy, Logger, AverageMeter\nimport os\nimport numpy as np\nfrom sklearn.metrics import confusion_matrix\nimport seaborn as sns\nimport matplotlib.pyplot as plt\n\n\nclass Trainer:\n def __init__(self, args, model, criterion, optimizer, wandb, scheduler=None):\n self.args = args\n self.model = model\n self.criterion = criterion\n self.scheduler = scheduler\n self.optimizer = optimizer\n self.device = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")\n self.train_loader, self.test_loader = load_split_train_test(args)\n self.loss = {'train': AverageMeter(), 'test': AverageMeter()}\n self.accuracy = {'train': AverageMeter(), 'test': AverageMeter()}\n self.logger = Logger(wandb, args, len(self.train_loader.dataset))\n self.epoch = 0\n self.min_accuracy = 0\n self.predlist = torch.zeros(0, dtype=torch.long, device='cpu')\n self.lbllist = torch.zeros(0, dtype=torch.long, device='cpu')\n\n def before_training_step(self):\n self.model.train()\n self.epoch += 1\n\n if self.scheduler is not None:\n self.scheduler.step()\n\n def after_training_step(self):\n self.logger.epoch_log(self.accuracy, self.loss, 'train')\n\n def training_step(self):\n self.before_training_step()\n\n for batch_idx, (data, target) in enumerate(self.train_loader):\n data, target = data.to(self.device, dtype=torch.float), target.to(self.device)\n self.optimizer.zero_grad()\n output = self.model(data)\n loss = self.criterion(output, target)\n self.loss['train'].update(loss.item())\n self.accuracy['train'].update(accuracy(output, target)[0])\n loss.backward()\n self.optimizer.step()\n self.logger.batch_log(self.epoch, batch_idx, loss.item())\n\n self.after_training_step()\n\n def before_validation_step(self):\n self.model.eval()\n\n def after_validation_step(self):\n if self.accuracy['test'].avg > self.min_accuracy:\n torch.save(self.model.state_dict(), os.path.join(self.logger.wandb.run.dir,\n f\"best_model.pth\"))\n self.min_accuracy = self.accuracy['test'].avg\n self.conf_mat = confusion_matrix(self.lbllist.numpy(), self.predlist.numpy())\n self.predlist = torch.zeros(0, dtype=torch.long, device='cpu')\n self.lbllist = torch.zeros(0, dtype=torch.long, device='cpu')\n else:\n self.predlist = torch.zeros(0, dtype=torch.long, device='cpu')\n self.lbllist = torch.zeros(0, dtype=torch.long, device='cpu')\n self.logger.epoch_log(self.accuracy, self.loss, 'test')\n self.loss = {stage: meter.reset() for stage, meter in self.loss.items()}\n self.accuracy = {stage: meter.reset() for stage, meter in self.accuracy.items()}\n\n def validation_step(self):\n self.before_validation_step()\n with torch.no_grad():\n for i, (data, target) in enumerate(self.test_loader):\n data, target = data.to(self.device, dtype=torch.float), target.to(self.device)\n output = self.model(data)\n _, preds = torch.max(output, 1)\n # Append batch prediction results\n self.predlist = torch.cat([self.predlist, preds.view(-1).cpu()])\n self.lbllist = torch.cat([self.lbllist, target.view(-1).cpu()])\n self.loss['test'].update(self.criterion(output, target).item())\n self.accuracy['test'].update(accuracy(output, target)[0])\n self.after_validation_step()\n\n def run_training(self):\n self.model.to(self.device)\n for _ in range(1, self.args.epochs + 1):\n self.training_step()\n self.validation_step()\n # Confusion matrix\n # conf_mat = confusion_matrix(self.lbllist.numpy(), self.predlist.numpy())\n\n # Per-class accuracy\n class_accuracy = 100 * self.conf_mat.diagonal() / self.conf_mat.sum(1)\n self.logger.final_accuracy(class_accuracy)\n classes = self.train_loader.dataset.dataset.classes\n conf_mat = sns.heatmap(self.conf_mat, annot=True, fmt='g',\n xticklabels=classes,\n yticklabels=classes)\n plt.ylabel('True label')\n plt.xlabel('Predicted label')\n figure = conf_mat.get_figure()\n dataset_name = self.args.datapath.split('/')[-1]\n figure.savefig(f'confusion_matrix_{self.args.model_name}_{dataset_name}.png')\n\n"
]
| [
[
"torch.max",
"torch.zeros",
"torch.no_grad",
"torch.cuda.is_available",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.ylabel"
]
]
|
SirRob1997/TextAttack | [
"674a34cb966e7f2f2d9e1e4f51fc4e44cfd6821e"
]
| [
"textattack/shared/attacked_text.py"
]
| [
"from collections import OrderedDict\nimport math\n\nimport numpy as np\nimport torch\n\nimport textattack\n\nfrom .utils import words_from_text\n\n\nclass AttackedText:\n\n \"\"\" \n A helper class that represents a string that can be attacked.\n \n Models that take multiple sentences as input separate them by ``SPLIT_TOKEN``. \n Attacks \"see\" the entire input, joined into one string, without the split token. \n \n ``AttackedText`` instances that were perturbed from other ``AttackedText``\n objects contain a pointer to the previous text \n (``attack_attrs[\"previous_attacked_text\"]``), so that the full chain of \n perturbations might be reconstructed by using this key to form a linked\n list.\n\n Args:\n text (string): The string that this AttackedText represents\n attack_attrs (dict): Dictionary of various attributes stored\n during the course of an attack.\n \n \"\"\"\n\n SPLIT_TOKEN = \">>>>\"\n\n def __init__(self, text_input, attack_attrs=None):\n # Read in ``text_input`` as a string or OrderedDict.\n if isinstance(text_input, str):\n self._text_input = OrderedDict([(\"text\", text_input)])\n elif isinstance(text_input, OrderedDict):\n self._text_input = text_input\n else:\n raise TypeError(\n f\"Invalid text_input type {type(text_input)} (required str or OrderedDict)\"\n )\n # Format text inputs.\n self._text_input = OrderedDict([(k, v) for k, v in self._text_input.items()])\n self.words = words_from_text(self.text)\n if attack_attrs is None:\n self.attack_attrs = dict()\n elif isinstance(attack_attrs, dict):\n self.attack_attrs = attack_attrs\n else:\n raise TypeError(f\"Invalid type for attack_attrs: {type(attack_attrs)}\")\n # Indices of words from the *original* text. Allows us to map\n # indices between original text and this text, and vice-versa.\n self.attack_attrs.setdefault(\"original_index_map\", np.arange(len(self.words)))\n # A list of all indices in *this* text that have been modified.\n self.attack_attrs.setdefault(\"modified_indices\", set())\n\n def __eq__(self, other):\n \"\"\" Compares two text instances to make sure they have the same attack\n attributes.\n \n Since some elements stored in ``self.attack_attrs`` may be numpy\n arrays, we have to take special care when comparing them.\n \"\"\"\n if not (self.text == other.text):\n return False\n for key in self.attack_attrs:\n if key not in other.attack_attrs:\n return False\n elif isinstance(self.attack_attrs[key], np.ndarray):\n if not (self.attack_attrs[key].shape == other.attack_attrs[key].shape):\n return False\n elif not (self.attack_attrs[key] == other.attack_attrs[key]).all():\n return False\n else:\n if not self.attack_attrs[key] == other.attack_attrs[key]:\n return False\n return True\n\n def __hash__(self):\n return hash(self.text)\n\n def free_memory(self):\n \"\"\" Delete items that take up memory.\n \n Can be called once the AttackedText is only needed to display.\n \"\"\"\n if \"previous_attacked_text\" in self.attack_attrs:\n self.attack_attrs[\"previous_attacked_text\"].free_memory()\n if \"last_transformation\" in self.attack_attrs:\n del self.attack_attrs[\"last_transformation\"]\n for key in self.attack_attrs:\n if isinstance(self.attack_attrs[key], torch.Tensor):\n del self.attack_attrs[key]\n\n def text_window_around_index(self, index, window_size):\n \"\"\" The text window of ``window_size`` words centered around ``index``. \"\"\"\n length = len(self.words)\n half_size = (window_size - 1) / 2.0\n if index - half_size < 0:\n start = 0\n end = min(window_size - 1, length - 1)\n elif index + half_size >= length:\n start = max(0, length - window_size)\n end = length - 1\n else:\n start = index - math.ceil(half_size)\n end = index + math.floor(half_size)\n text_idx_start = self._text_index_of_word_index(start)\n text_idx_end = self._text_index_of_word_index(end) + len(self.words[end])\n return self.text[text_idx_start:text_idx_end]\n\n def _text_index_of_word_index(self, i):\n \"\"\" Returns the index of word ``i`` in self.text. \"\"\"\n pre_words = self.words[: i + 1]\n lower_text = self.text.lower()\n # Find all words until `i` in string.\n look_after_index = 0\n for word in pre_words:\n look_after_index = lower_text.find(word.lower(), look_after_index)\n return look_after_index\n\n def text_until_word_index(self, i):\n \"\"\" Returns the text before the beginning of word at index ``i``. \"\"\"\n look_after_index = self._text_index_of_word_index(i)\n return self.text[:look_after_index]\n\n def text_after_word_index(self, i):\n \"\"\" Returns the text after the end of word at index ``i``. \"\"\"\n # Get index of beginning of word then jump to end of word.\n look_after_index = self._text_index_of_word_index(i) + len(self.words[i])\n return self.text[look_after_index:]\n\n def first_word_diff(self, other_attacked_text):\n \"\"\" Returns the first word in self.words that differs from \n other_attacked_text. Useful for word swap strategies. \"\"\"\n w1 = self.words\n w2 = other_attacked_text.words\n for i in range(min(len(w1), len(w2))):\n if w1[i] != w2[i]:\n return w1\n return None\n\n def first_word_diff_index(self, other_attacked_text):\n \"\"\" Returns the index of the first word in self.words that differs\n from other_attacked_text. Useful for word swap strategies. \"\"\"\n w1 = self.words\n w2 = other_attacked_text.words\n for i in range(min(len(w1), len(w2))):\n if w1[i] != w2[i]:\n return i\n return None\n\n def all_words_diff(self, other_attacked_text):\n \"\"\" Returns the set of indices for which this and other_attacked_text\n have different words. \"\"\"\n indices = set()\n w1 = self.words\n w2 = other_attacked_text.words\n for i in range(min(len(w1), len(w2))):\n if w1[i] != w2[i]:\n indices.add(i)\n return indices\n\n def ith_word_diff(self, other_attacked_text, i):\n \"\"\" Returns whether the word at index i differs from other_attacked_text\n \"\"\"\n w1 = self.words\n w2 = other_attacked_text.words\n if len(w1) - 1 < i or len(w2) - 1 < i:\n return True\n return w1[i] != w2[i]\n\n def convert_from_original_idxs(self, idxs):\n \"\"\" Takes indices of words from original string and converts them to \n indices of the same words in the current string.\n \n Uses information from ``self.attack_attrs['original_index_map'], \n which maps word indices from the original to perturbed text.\n \"\"\"\n if len(self.attack_attrs[\"original_index_map\"]) == 0:\n return idxs\n elif isinstance(idxs, set):\n idxs = list(idxs)\n if isinstance(idxs, list) or isinstance(idxs, np.ndarray):\n idxs = torch.tensor(idxs)\n elif not isinstance(idxs, torch.Tensor):\n raise TypeError(\n f\"convert_from_original_idxs got invalid idxs type {type(idxs)}\"\n )\n return [self.attack_attrs[\"original_index_map\"][i] for i in idxs]\n\n def replace_words_at_indices(self, indices, new_words):\n \"\"\" This code returns a new AttackedText object where the word at \n ``index`` is replaced with a new word.\"\"\"\n if len(indices) != len(new_words):\n raise ValueError(\n f\"Cannot replace {len(new_words)} words at {len(indices)} indices.\"\n )\n words = self.words[:]\n for i, new_word in zip(indices, new_words):\n if not isinstance(new_word, str):\n raise TypeError(\n f\"replace_words_at_indices requires ``str`` words, got {type(new_word)}\"\n )\n if (i < 0) or (i > len(words)):\n raise ValueError(f\"Cannot assign word at index {i}\")\n words[i] = new_word\n return self.generate_new_attacked_text(words)\n\n def replace_word_at_index(self, index, new_word):\n \"\"\" This code returns a new AttackedText object where the word at \n ``index`` is replaced with a new word.\n \"\"\"\n if not isinstance(new_word, str):\n raise TypeError(\n f\"replace_word_at_index requires ``str`` new_word, got {type(new_word)}\"\n )\n return self.replace_words_at_indices([index], [new_word])\n\n def delete_word_at_index(self, index):\n \"\"\" This code returns a new AttackedText object where the word at \n ``index`` is removed.\n \"\"\"\n return self.replace_word_at_index(index, \"\")\n\n def insert_text_after_word_index(self, index, text):\n \"\"\" Inserts a string before word at index ``index`` and attempts to add\n appropriate spacing.\n \"\"\"\n if not isinstance(text, str):\n raise TypeError(f\"text must be an str, got type {type(text)}\")\n word_at_index = self.words[index]\n new_text = \" \".join((word_at_index, text))\n return self.replace_word_at_index(index, new_text)\n\n def insert_text_before_word_index(self, index, text):\n \"\"\" Inserts a string before word at index ``index`` and attempts to add\n appropriate spacing.\n \"\"\"\n if not isinstance(text, str):\n raise TypeError(f\"text must be an str, got type {type(text)}\")\n word_at_index = self.words[index]\n # TODO if ``word_at_index`` is at the beginning of a sentence, we should\n # optionally capitalize ``text``.\n new_text = \" \".join((text, word_at_index))\n return self.replace_word_at_index(index, new_text)\n\n def get_deletion_indices(self):\n return self.attack_attrs[\"original_index_map\"][\n self.attack_attrs[\"original_index_map\"] == -1\n ]\n\n def generate_new_attacked_text(self, new_words):\n \"\"\" Returns a new AttackedText object and replaces old list of words \n with a new list of words, but preserves the punctuation and spacing \n of the original message.\n \n ``self.words`` is a list of the words in the current text with \n punctuation removed. However, each \"word\" in ``new_words``\n could be an empty string, representing a word deletion, or a string\n with multiple space-separated words, representation an insertion\n of one or more words.\n \"\"\"\n perturbed_text = \"\"\n original_text = AttackedText.SPLIT_TOKEN.join(self._text_input.values())\n new_attack_attrs = dict()\n if \"label_names\" in self.attack_attrs:\n new_attack_attrs[\"label_names\"] = self.attack_attrs[\"label_names\"]\n new_attack_attrs[\"newly_modified_indices\"] = set()\n # Point to previously monitored text.\n new_attack_attrs[\"previous_attacked_text\"] = self\n # Use `new_attack_attrs` to track indices with respect to the original\n # text.\n new_attack_attrs[\"modified_indices\"] = self.attack_attrs[\n \"modified_indices\"\n ].copy()\n new_attack_attrs[\"original_index_map\"] = self.attack_attrs[\n \"original_index_map\"\n ].copy()\n new_i = 0\n # Create the new attacked text by swapping out words from the original\n # text with a sequence of 0+ words in the new text.\n for i, (input_word, adv_word_seq) in enumerate(zip(self.words, new_words)):\n word_start = original_text.index(input_word)\n word_end = word_start + len(input_word)\n perturbed_text += original_text[:word_start]\n original_text = original_text[word_end:]\n adv_num_words = len(words_from_text(adv_word_seq))\n num_words_diff = adv_num_words - len(words_from_text(input_word))\n # Track indices on insertions and deletions.\n if num_words_diff != 0:\n # Re-calculated modified indices. If words are inserted or deleted,\n # they could change.\n shifted_modified_indices = set()\n for modified_idx in new_attack_attrs[\"modified_indices\"]:\n if modified_idx < i:\n shifted_modified_indices.add(modified_idx)\n elif modified_idx > i:\n shifted_modified_indices.add(modified_idx + num_words_diff)\n else:\n pass\n new_attack_attrs[\"modified_indices\"] = shifted_modified_indices\n # Track insertions and deletions wrt original text.\n original_modification_idx = i\n new_idx_map = new_attack_attrs[\"original_index_map\"].copy()\n if num_words_diff == -1:\n new_idx_map[new_idx_map == i] = -1\n new_idx_map[new_idx_map > i] += num_words_diff\n new_attack_attrs[\"original_index_map\"] = new_idx_map\n # Move pointer and save indices of new modified words.\n for j in range(i, i + adv_num_words):\n if input_word != adv_word_seq:\n new_attack_attrs[\"modified_indices\"].add(new_i)\n new_attack_attrs[\"newly_modified_indices\"].add(new_i)\n new_i += 1\n # Check spaces for deleted text.\n if adv_num_words == 0:\n # Remove extra space (or else there would be two spaces for each\n # deleted word).\n # @TODO What to do with punctuation in this case? This behavior is undefined.\n if i == 0:\n # If the first word was deleted, take a subsequent space.\n if original_text[0] == \" \":\n original_text = original_text[1:]\n else:\n # If a word other than the first was deleted, take a preceding space.\n if perturbed_text[-1] == \" \":\n perturbed_text = perturbed_text[:-1]\n # Add substitute word(s) to new sentence.\n perturbed_text += adv_word_seq\n perturbed_text += original_text # Add all of the ending punctuation.\n # Reform perturbed_text into an OrderedDict.\n perturbed_input_texts = perturbed_text.split(AttackedText.SPLIT_TOKEN)\n perturbed_input = OrderedDict(\n zip(self._text_input.keys(), perturbed_input_texts)\n )\n return AttackedText(perturbed_input, attack_attrs=new_attack_attrs)\n\n @property\n def tokenizer_input(self):\n \"\"\" The tuple of inputs to be passed to the tokenizer. \"\"\"\n return tuple(self._text_input.values())\n\n @property\n def text(self):\n \"\"\" Represents full text input. Multiply inputs are joined with a line \n break.\n \"\"\"\n return \"\\n\".join(self._text_input.values())\n\n def printable_text(self, key_color=\"bold\", key_color_method=None):\n \"\"\" Represents full text input. Adds field descriptions.\n \n For example, entailment inputs look like:\n ```\n premise: ...\n hypothesis: ...\n ```\n \"\"\"\n # For single-sequence inputs, don't show a prefix.\n if len(self._text_input) == 1:\n return next(iter(self._text_input.values()))\n # For multiple-sequence inputs, show a prefix and a colon. Optionally,\n # color the key.\n else:\n if key_color_method:\n ck = lambda k: textattack.shared.utils.color_text(\n k, key_color, key_color_method\n )\n else:\n ck = lambda k: k\n return \"\\n\".join(\n f\"{ck(key.capitalize())}: {value}\"\n for key, value in self._text_input.items()\n )\n\n def __repr__(self):\n return f'<AttackedText \"{self.text}\">'\n"
]
| [
[
"torch.tensor"
]
]
|
rafael-junio/JustAChip8PythonEmulator | [
"ff9c2d67aeaf4f87ff3b5fd6f0231702587455a7"
]
| [
"core/cpu/config/memory_config.py"
]
| [
"import numpy as np\n\n\nclass Config:\n MEMORY_START_ADDRESS = 0x200\n FONT_SET_START_ADDRESS = 0x50\n\n FONT_SET = np.array([\n 0xF0, 0x90, 0x90, 0x90, 0xF0,\n 0x20, 0x60, 0x20, 0x20, 0x70,\n 0xF0, 0x10, 0xF0, 0x80, 0xF0,\n 0xF0, 0x10, 0xF0, 0x10, 0xF0,\n 0x90, 0x90, 0xF0, 0x10, 0x10,\n 0xF0, 0x80, 0xF0, 0x10, 0xF0,\n 0xF0, 0x80, 0xF0, 0x90, 0xF0,\n 0xF0, 0x10, 0x20, 0x40, 0x40,\n 0xF0, 0x90, 0xF0, 0x90, 0xF0,\n 0xF0, 0x90, 0xF0, 0x10, 0xF0,\n 0xF0, 0x90, 0xF0, 0x90, 0x90,\n 0xE0, 0x90, 0xE0, 0x90, 0xE0,\n 0xF0, 0x80, 0x80, 0x80, 0xF0,\n 0xE0, 0x90, 0x90, 0x90, 0xE0,\n 0xF0, 0x80, 0xF0, 0x80, 0xF0,\n 0xF0, 0x80, 0xF0, 0x80, 0x80\n ], dtype=np.uint8)\n"
]
| [
[
"numpy.array"
]
]
|
Kazuhito00/M-LSD-warpPerspective-Example | [
"3045a14ee6c9e7d1d25ca0bcdcb67160b8ad18c9"
]
| [
"example.py"
]
| [
"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\nimport copy\nimport argparse\n\nimport cv2 as cv\nimport numpy as np\nimport tensorflow as tf\n\nfrom utils import CvFpsCalc\nfrom mlsd.utils import pred_lines, pred_squares\n\n\ndef get_args():\n parser = argparse.ArgumentParser()\n\n parser.add_argument(\"--device\", type=int, default=0)\n parser.add_argument(\"--file\", type=str, default=None)\n parser.add_argument(\"--width\", type=int, default=640)\n parser.add_argument(\"--height\", type=int, default=480)\n parser.add_argument(\"--crop_width\", type=int, default=224)\n parser.add_argument(\"--crop_height\", type=int, default=224)\n\n parser.add_argument(\n \"--model\",\n type=str,\n default='mlsd/tflite_models/M-LSD_320_tiny_fp32.tflite')\n parser.add_argument(\"--model_shape\", type=int, default=320)\n parser.add_argument(\"--top_n\", type=int, default=1)\n\n parser.add_argument(\"--score\", type=float, default=0.1)\n parser.add_argument(\"--outside_ratio\", type=float, default=0.1)\n parser.add_argument(\"--inside_ratio\", type=float, default=0.5)\n parser.add_argument(\"--w_overlap\", type=float, default=0.0)\n parser.add_argument(\"--w_degree\", type=float, default=1.14)\n parser.add_argument(\"--w_length\", type=float, default=0.03)\n parser.add_argument(\"--w_area\", type=float, default=1.84)\n parser.add_argument(\"--w_center\", type=float, default=1.46)\n\n args = parser.parse_args()\n\n return args\n\n\ndef get_params(args):\n params = {\n 'score': args.score,\n 'outside_ratio': args.outside_ratio,\n 'inside_ratio': args.inside_ratio,\n 'w_overlap': args.w_overlap,\n 'w_degree': args.w_degree,\n 'w_length': args.w_length,\n 'w_area': args.w_area,\n 'w_center': args.w_center,\n }\n return params\n\n\ndef extract_square_image(image, square, crop_width, crop_height):\n extract_image = None\n\n # 射影変換\n pts1 = np.float32([\n square[0],\n square[1],\n square[2],\n square[3],\n ])\n pts2 = np.float32([\n [0, 0],\n [crop_width, 0],\n [crop_width, crop_height],\n [0, crop_height],\n ])\n M = cv.getPerspectiveTransform(pts1, pts2)\n extract_image = cv.warpPerspective(image, M, (crop_width, crop_height))\n\n return extract_image\n\n\ndef main():\n # コマンドライン引数\n args = get_args()\n cap_device = args.device\n cap_width = args.width\n cap_height = args.height\n filepath = args.file\n\n crop_width = args.crop_width\n crop_height = args.crop_height\n\n model = args.model\n model_shape = args.model_shape\n model_shapes = [model_shape, model_shape]\n\n top_n = args.top_n\n\n # カメラ準備\n cap = None\n if filepath is None:\n cap = cv.VideoCapture(cap_device)\n cap.set(cv.CAP_PROP_FRAME_WIDTH, cap_width)\n cap.set(cv.CAP_PROP_FRAME_HEIGHT, cap_height)\n else:\n cap = cv.VideoCapture(filepath)\n\n # M-LSDモデルロード\n interpreter = tf.lite.Interpreter(model_path=model, num_threads=2)\n interpreter.allocate_tensors()\n input_details = interpreter.get_input_details()\n output_details = interpreter.get_output_details()\n\n params = get_params(args)\n\n # FPS計測モジュール\n cvFpsCalc = CvFpsCalc(buffer_len=10)\n\n square_count = 0\n prev_square_count = 0\n while True:\n # キー入力(ESC:プログラム終了)\n key = cv.waitKey(1)\n if key == 27: # ESC\n break\n\n # FPS計測\n display_fps = cvFpsCalc.get()\n\n # カメラキャプチャ\n ret, frame = cap.read()\n if not ret:\n print('Error : cap.read()')\n if filepath is None:\n resize_frame = cv.resize(frame, (int(cap_width), int(cap_height)))\n else:\n resize_frame = copy.deepcopy(frame)\n\n # M-LSD推論\n lines, squares, score_array, inter_points = pred_squares(\n resize_frame,\n interpreter,\n input_details,\n output_details,\n model_shapes,\n params,\n )\n\n # スコア降順にインデックスを並び替え\n # sorted_score_array = []\n sorted_squares = []\n if (len(score_array) > 0):\n score_sort_index = np.argsort(score_array)[::-1]\n # sorted_score_array = score_array[score_sort_index]\n sorted_squares = squares[score_sort_index]\n\n # 射影変換\n extract_images = []\n for index, square in enumerate(sorted_squares):\n if (index < top_n):\n extract_image = extract_square_image(resize_frame, square,\n crop_width, crop_height)\n extract_images.append(extract_image)\n\n square_count = index + 1\n else:\n break\n\n # デバッグ情報描画\n for index, square in enumerate(sorted_squares):\n if (index < top_n):\n cv.polylines(resize_frame, [square.reshape([-1, 1, 2])], True,\n [255, 0, 0], 2)\n for pt in square:\n cv.circle(resize_frame, (int(pt[0]), int(pt[1])), 4,\n [255, 0, 0], -1)\n else:\n break\n\n cv.putText(resize_frame, \"FPS:\" + str(display_fps), (10, 30),\n cv.FONT_HERSHEY_SIMPLEX, 0.6, (255, 0, 0), 1, cv.LINE_AA)\n\n # 描画更新\n cv.imshow('M-LSD-warpPerspective', resize_frame)\n for index, extract_image in enumerate(extract_images):\n cv.imshow('SQUARE:' + str(index), extract_image)\n\n # 不要ウィンドウ削除\n if prev_square_count > len(extract_images):\n for index in range(len(extract_images), prev_square_count):\n cv.destroyWindow('SQUARE:' + str(index))\n prev_square_count = square_count\n\n cap.release()\n cv.destroyAllWindows()\n\n\nif __name__ == '__main__':\n main()\n"
]
| [
[
"numpy.argsort",
"tensorflow.lite.Interpreter",
"numpy.float32"
]
]
|
shuklasid19/mlflow-cnn-deep-learning | [
"5dc98b867f1a9f70e226a61d716d87f8b333fe67"
]
| [
"src/stage_02_base_model_creation.py"
]
| [
"import argparse\nimport os\nimport shutil\nfrom tqdm import tqdm\nimport logging\nfrom src.utils.common import read_yaml, create_directories\nfrom src.utils.model import log_model_summary\nimport random\nimport tensorflow as tf\n\nSTAGE = \"BASE MODEL CREATION\" ## <<< change stage name \n\nlogging.basicConfig(\n filename=os.path.join(\"logs\", 'running_logs.log'), \n level=logging.INFO, \n format=\"[%(asctime)s: %(levelname)s: %(module)s]: %(message)s\",\n filemode=\"a\"\n )\n\n\ndef main(config_path):\n ## read config files\n config = read_yaml(config_path)\n\n params = config[\"params\"]\n\n logging.info(\"layers defined\")\n LAYERS = [\n tf.keras.layers.Input(shape=tuple(params[\"img_shape\"])),\n tf.keras.layers.Conv2D(filters=32, kernel_size=(3,3), activation=\"relu\"),\n tf.keras.layers.MaxPool2D(pool_size=(2,2)),\n tf.keras.layers.Conv2D(32, (3,3), activation=\"relu\"),\n tf.keras.layers.MaxPool2D(pool_size=(2,2)),\n tf.keras.layers.Flatten(),\n tf.keras.layers.Dense(8, activation=\"relu\"),\n tf.keras.layers.Dense(2, activation=\"softmax\")\n ]\n\n classifier = tf.keras.Sequential(LAYERS)\n\n logging.info(f\"base model summary:\\n{log_model_summary(classifier)}\")\n\n classifier.compile(\n optimizer=tf.keras.optimizers.Adam(params[\"lr\"]),\n loss=params[\"loss\"],\n metrics=params[\"metrics\"]\n )\n\n path_to_model_dir = os.path.join(\n config[\"data\"][\"local_dir\"],\n config[\"data\"][\"model_dir\"])\n create_directories([path_to_model_dir])\n path_to_model = os.path.join(\n path_to_model_dir, \n config[\"data\"][\"init_model_file\"])\n classifier.save(path_to_model)\n logging.info(f\"model is saved at : {path_to_model}\")\n\nif __name__ == '__main__':\n args = argparse.ArgumentParser()\n args.add_argument(\"--config\", \"-c\", default=\"configs/config.yaml\")\n parsed_args = args.parse_args()\n\n try:\n logging.info(\"\\n********************\")\n logging.info(f\">>>>> stage {STAGE} started <<<<<\")\n main(config_path=parsed_args.config)\n logging.info(f\">>>>> stage {STAGE} completed!<<<<<\\n\")\n except Exception as e:\n logging.exception(e)\n raise e"
]
| [
[
"tensorflow.keras.layers.Dense",
"tensorflow.keras.layers.Conv2D",
"tensorflow.keras.Sequential",
"tensorflow.keras.layers.MaxPool2D",
"tensorflow.keras.optimizers.Adam",
"tensorflow.keras.layers.Flatten"
]
]
|
chauvu/AmpliGraph | [
"783d885b38e82c991a308f26d26eeb5d3513fcc3"
]
| [
"tests/ampligraph/datasets/test_datasets.py"
]
| [
"# Copyright 2019-2021 The AmpliGraph Authors. All Rights Reserved.\n#\n# This file is Licensed under the Apache License, Version 2.0.\n# A copy of the Licence is available in LICENCE, or at:\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\nfrom ampligraph.datasets import load_wn18, load_fb15k, load_fb15k_237, load_yago3_10, load_wn18rr, load_wn11, \\\n load_fb13, load_onet20k, load_ppi5k, load_nl27k, load_cn15k, OneToNDatasetAdapter, load_from_ntriples\nfrom ampligraph.datasets.datasets import _clean_data\nimport os\nimport numpy as np\nimport pytest\n\n\ndef test_clean_data():\n X = {\n 'train': np.array([['a', 'b', 'c'], ['d', 'e', 'f'], ['g', 'h', 'i'], ['j', 'k', 'l']]),\n 'valid': np.array([['a', 'b', 'c'], ['x', 'e', 'f'], ['g', 'a', 'i'], ['j', 'k', 'y']]),\n 'test': np.array([['a', 'b', 'c'], ['d', 'e', 'x'], ['g', 'b', 'i'], ['y', 'k', 'l']]),\n }\n\n clean_X, valid_idx, test_idx = _clean_data(X, return_idx=True)\n\n np.testing.assert_array_equal(clean_X['train'], X['train'])\n np.testing.assert_array_equal(clean_X['valid'], np.array([['a', 'b', 'c']]))\n np.testing.assert_array_equal(clean_X['test'], np.array([['a', 'b', 'c'], ['g', 'b', 'i']]))\n np.testing.assert_array_equal(valid_idx, np.array([True, False, False, False]))\n np.testing.assert_array_equal(test_idx, np.array([True, False, True, False]))\n\n\ndef test_load_wn18():\n wn18 = load_wn18()\n assert len(wn18['train']) == 141442\n assert len(wn18['valid']) == 5000\n assert len(wn18['test']) == 5000\n\n ent_train = np.union1d(np.unique(wn18[\"train\"][:, 0]), np.unique(wn18[\"train\"][:, 2]))\n ent_valid = np.union1d(np.unique(wn18[\"valid\"][:, 0]), np.unique(wn18[\"valid\"][:, 2]))\n ent_test = np.union1d(np.unique(wn18[\"test\"][:, 0]), np.unique(wn18[\"test\"][:, 2]))\n distinct_ent = np.union1d(np.union1d(ent_train, ent_valid), ent_test)\n distinct_rel = np.union1d(np.union1d(np.unique(wn18[\"train\"][:, 1]), np.unique(wn18[\"train\"][:, 1])),\n np.unique(wn18[\"train\"][:, 1]))\n\n assert len(distinct_ent) == 40943\n assert len(distinct_rel) == 18\n\n\ndef test_reciprocals():\n \"\"\"Test for reciprocal relations\n \"\"\"\n # Create dataset with reciprocal relations and test if the are added\n fb15k = load_fb15k(add_reciprocal_rels=True)\n train_reciprocal = fb15k['train']\n triple = train_reciprocal[0]\n reciprocal_triple = train_reciprocal[train_reciprocal.shape[0]//2]\n assert(triple[0]==reciprocal_triple[2])\n assert(triple[2]==reciprocal_triple[0])\n assert(triple[1]+'_reciprocal'==reciprocal_triple[1])\n \n # create the same dataset without reciprocals. Now the number of triples should be half of prev\n fb15k = load_fb15k(add_reciprocal_rels=False)\n assert(fb15k['train'].shape[0]==train_reciprocal.shape[0]//2)\n\n\ndef test_load_fb15k():\n fb15k = load_fb15k()\n assert len(fb15k['train']) == 483142\n assert len(fb15k['valid']) == 50000\n assert len(fb15k['test']) == 59071\n\n # ent_train = np.union1d(np.unique(fb15k[\"train\"][:,0]), np.unique(fb15k[\"train\"][:,2]))\n # ent_valid = np.union1d(np.unique(fb15k[\"valid\"][:,0]), np.unique(fb15k[\"valid\"][:,2]))\n # ent_test = np.union1d(np.unique(fb15k[\"test\"][:,0]), np.unique(fb15k[\"test\"][:,2]))\n # distinct_ent = np.union1d(np.union1d(ent_train, ent_valid), ent_test)\n # distinct_rel = np.union1d(np.union1d(np.unique(fb15k[\"train\"][:,1]), np.unique(fb15k[\"train\"][:,1])),\n # np.unique(fb15k[\"train\"][:,1]))\n\n # assert len(distinct_ent) == 14951 \n # assert len(distinct_rel) == 1345 \n\n\ndef test_load_fb15k_237():\n fb15k_237 = load_fb15k_237()\n assert len(fb15k_237['train']) == 272115 \n \n # - 9 because 9 triples containing unseen entities are removed\n assert len(fb15k_237['valid']) == 17535 - 9\n\n # - 28 because 28 triples containing unseen entities are removed\n assert len(fb15k_237['test']) == 20466 - 28\n\n\ndef test_yago_3_10():\n yago_3_10 = load_yago3_10()\n assert len(yago_3_10['train']) == 1079040 \n assert len(yago_3_10['valid']) == 5000 - 22\n assert len(yago_3_10['test']) == 5000 - 18\n\n # ent_train = np.union1d(np.unique(yago_3_10[\"train\"][:,0]), np.unique(yago_3_10[\"train\"][:,2]))\n # ent_valid = np.union1d(np.unique(yago_3_10[\"valid\"][:,0]), np.unique(yago_3_10[\"valid\"][:,2]))\n # ent_test = np.union1d(np.unique(yago_3_10[\"test\"][:,0]), np.unique(yago_3_10[\"test\"][:,2]))\n\n # assert len(set(ent_valid) - set(ent_train)) == 22\n # assert len (set(ent_test) - ((set(ent_valid) & set(ent_train)) | set(ent_train))) == 18\n\n # distinct_ent = np.union1d(np.union1d(ent_train, ent_valid), ent_test)\n # distinct_rel = np.union1d(np.union1d(np.unique(yago_3_10[\"train\"][:,1]), np.unique(yago_3_10[\"train\"][:,1])),\n # np.unique(yago_3_10[\"train\"][:,1]))\n\n # assert len(distinct_ent) == 123182 \n # assert len(distinct_rel) == 37 \n\n\ndef test_wn18rr():\n wn18rr = load_wn18rr()\n\n ent_train = np.union1d(np.unique(wn18rr[\"train\"][:, 0]), np.unique(wn18rr[\"train\"][:, 2]))\n ent_valid = np.union1d(np.unique(wn18rr[\"valid\"][:, 0]), np.unique(wn18rr[\"valid\"][:, 2]))\n ent_test = np.union1d(np.unique(wn18rr[\"test\"][:, 0]), np.unique(wn18rr[\"test\"][:, 2]))\n distinct_ent = np.union1d(np.union1d(ent_train, ent_valid), ent_test)\n distinct_rel = np.union1d(np.union1d(np.unique(wn18rr[\"train\"][:, 1]), np.unique(wn18rr[\"train\"][:, 1])),\n np.unique(wn18rr[\"train\"][:, 1]))\n\n assert len(wn18rr['train']) == 86835\n\n # - 210 because 210 triples containing unseen entities are removed\n assert len(wn18rr['valid']) == 3034 - 210\n\n # - 210 because 210 triples containing unseen entities are removed\n assert len(wn18rr['test']) == 3134 - 210\n\n\ndef test_wn11():\n wn11 = load_wn11(clean_unseen=False)\n assert len(wn11['train']) == 110361\n assert len(wn11['valid']) == 5215\n assert len(wn11['test']) == 21035\n assert len(wn11['valid_labels']) == 5215\n assert len(wn11['test_labels']) == 21035\n assert sum(wn11['valid_labels']) == 2606\n assert sum(wn11['test_labels']) == 10493\n\n wn11 = load_wn11(clean_unseen=True)\n assert len(wn11['train']) == 110361\n assert len(wn11['valid']) == 5215 - 338\n assert len(wn11['test']) == 21035 - 1329\n assert len(wn11['valid_labels']) == 5215 - 338\n assert len(wn11['test_labels']) == 21035 - 1329\n assert sum(wn11['valid_labels']) == 2409\n assert sum(wn11['test_labels']) == 9706\n\n\ndef test_fb13():\n fb13 = load_fb13(clean_unseen=False)\n assert len(fb13['train']) == 316232\n assert len(fb13['valid']) == 5908 + 5908\n assert len(fb13['test']) == 23733 + 23731\n assert len(fb13['valid_labels']) == 5908 + 5908\n assert len(fb13['test_labels']) == 23733 + 23731\n assert sum(fb13['valid_labels']) == 5908\n assert sum(fb13['test_labels']) == 23733\n\n fb13 = load_fb13(clean_unseen=True)\n assert len(fb13['train']) == 316232\n assert len(fb13['valid']) == 5908 + 5908\n assert len(fb13['test']) == 23733 + 23731\n assert len(fb13['valid_labels']) == 5908 + 5908\n assert len(fb13['test_labels']) == 23733 + 23731\n assert sum(fb13['valid_labels']) == 5908\n assert sum(fb13['test_labels']) == 23733\n \n \ndef test_onet20k():\n onet = load_onet20k()\n assert len(onet['train']) == 461932\n assert len(onet['valid']) == 850\n assert len(onet['test']) == 2000\n assert len(onet['train_numeric_values']) == 461932\n assert len(onet['valid_numeric_values']) == 850\n assert len(onet['test_numeric_values']) == 2000\n \n\ndef test_nl27k():\n nl27k = load_nl27k()\n assert len(nl27k['train']) == 149100\n assert len(nl27k['valid']) == 12274\n assert len(nl27k['test']) == 14026\n assert len(nl27k['train_numeric_values']) == 149100\n assert len(nl27k['valid_numeric_values']) == 12274\n assert len(nl27k['test_numeric_values']) == 14026\n \n nl27k = load_nl27k(clean_unseen=False)\n assert len(nl27k['train']) == 149100\n assert len(nl27k['valid']) == 12274 + 4\n assert len(nl27k['test']) == 14026 + 8\n assert len(nl27k['train_numeric_values']) == 149100\n assert len(nl27k['valid_numeric_values']) == 12274 + 4\n assert len(nl27k['test_numeric_values']) == 14026 + 8\n \n \ndef test_ppi5k():\n ppi5k = load_ppi5k()\n assert len(ppi5k['train']) == 230929\n assert len(ppi5k['valid']) == 19017\n assert len(ppi5k['test']) == 21720\n assert len(ppi5k['train_numeric_values']) == 230929\n assert len(ppi5k['valid_numeric_values']) == 19017\n assert len(ppi5k['test_numeric_values']) == 21720\n \n \ndef test_cn15k():\n cn15k = load_cn15k()\n assert len(cn15k['train']) == 199417\n assert len(cn15k['valid']) == 16829\n assert len(cn15k['test']) == 19224\n assert len(cn15k['train_numeric_values']) == 199417\n assert len(cn15k['valid_numeric_values']) == 16829\n assert len(cn15k['test_numeric_values']) == 19224\n\n\ndef test_load_from_ntriples(request):\n rootdir = request.config.rootdir\n path = os.path.join(rootdir, 'tests', 'ampligraph', 'datasets')\n X = load_from_ntriples('', 'test_triples.nt', data_home=path)\n assert X.shape == (3, 3)\n assert len(np.unique(X.flatten())) == 6\n\n\ndef test_oneton_adapter():\n\n from ampligraph.evaluation.protocol import create_mappings, to_idx\n\n # Train set\n X = np.array([['a', 'p', 'b'],\n ['a', 'p', 'd'],\n ['c', 'p', 'd'],\n ['c', 'p', 'e'],\n ['c', 'p', 'f']])\n\n # a, b, c, d, e, f\n O = np.array([[0, 1, 0, 1, 0, 0], # (a, p)\n [0, 0, 0, 1, 1, 1]]) # (c, p)\n\n # Test\n T = np.array([['a', 'p', 'c'],\n ['c', 'p', 'b']])\n\n # a, b, c, d, e, f\n OT1 = np.array([[0, 1, 0, 1, 0, 0], # (a, p) # test set onehots when output mapping is from train set\n [0, 0, 0, 1, 1, 1]]), # (c, p)\n OT2 = np.array([[0, 0, 1, 0, 0, 0], # (a, p) # test set onehots when output mapping is from test set\n [0, 1, 0, 0, 0, 0]]), # (c, p)\n\n # Filter\n filter = np.concatenate((X, T))\n # a, b, c, d, e, f\n OF = np.array([[0, 1, 1, 1, 0, 0], # (a, p) # train set onehots when output mapping is from filter\n [0, 1, 0, 1, 1, 1]]) # (c, p)\n\n # Expected input tuple to filtered outputs\n OF_map = {(0, 0): [0, 1, 1, 1, 0, 0],\n (2, 0): [0, 1, 0, 1, 1, 1]}\n\n rel_to_idx, ent_to_idx = create_mappings(X)\n X = to_idx(X, ent_to_idx, rel_to_idx)\n\n adapter = OneToNDatasetAdapter()\n adapter.use_mappings(rel_to_idx, ent_to_idx)\n adapter.set_data(X, 'train', mapped_status=True)\n\n adapter.set_data(T, 'test', mapped_status=False)\n\n # Adapter internally maps test set\n assert (adapter.mapped_status['test'] == True)\n\n # Generate output map\n train_output_map = adapter.generate_output_mapping('train')\n\n # Assert all unique sp pairs are in the output_map keys\n unique_sp = set([(s, p) for s, p in X[:, [0, 1]]])\n for sp in train_output_map.keys():\n assert(sp in unique_sp)\n\n # ValueError if generating onehot outputs before output_mapping is set\n with pytest.raises(ValueError):\n adapter.generate_outputs('train')\n\n adapter.set_output_mapping(train_output_map)\n adapter.generate_outputs('train')\n train_iter = adapter.get_next_batch(batches_count=1, dataset_type='train', use_filter=False)\n triples, onehot = next(train_iter)\n assert np.all(np.unique(X[:, [0, 1]], axis=0) == triples[:, [0, 1]])\n assert np.all(O == onehot)\n\n test_iter = adapter.get_next_batch(batches_count=1, dataset_type='test', use_filter=False)\n\n triples, onehot = next(test_iter)\n assert np.all(np.unique(X[:, [0, 1]], axis=0) == triples[:, [0, 1]])\n assert np.all(OT1 == onehot)\n\n # Generate test output map\n test_output_map = adapter.generate_output_mapping('test')\n adapter.set_output_mapping(test_output_map)\n\n test_iter = adapter.get_next_batch(batches_count=1, dataset_type='test', use_filter=False)\n\n triples, onehot = next(test_iter)\n assert np.all(np.unique(X[:, [0, 1]], axis=0) == triples[:, [0, 1]])\n assert np.all(OT2 == onehot)\n\n # Train onehot outputs with filter=True\n adapter.set_filter(filter_triples=filter)\n train_iter = adapter.get_next_batch(batches_count=1, dataset_type='train', use_filter=True)\n triples, onehot = next(train_iter)\n assert np.all(np.unique(X[:, [0, 1]], axis=0) == triples[:, [0, 1]])\n assert np.all(OF == onehot)\n\n ## Test adapter clear_outputs\n assert(len(adapter.filtered_status) > 0)\n adapter.clear_outputs()\n assert(len(adapter.filtered_status) == 0)\n\n # Test verify_outputs\n adapter.clear_outputs()\n adapter.generate_outputs('train', use_filter=False, unique_pairs=False)\n assert adapter.verify_outputs('train', use_filter=False, unique_pairs=False) == True\n assert adapter.verify_outputs('train', use_filter=True, unique_pairs=True) == False\n assert adapter.verify_outputs('train', use_filter=True, unique_pairs=False) == False\n assert adapter.verify_outputs('train', use_filter=False, unique_pairs=True) == False\n\n adapter.clear_outputs()\n adapter.generate_outputs('train', use_filter=True, unique_pairs=True)\n assert adapter.verify_outputs('train', use_filter=False, unique_pairs=False) == False\n assert adapter.verify_outputs('train', use_filter=True, unique_pairs=True) == True\n assert adapter.verify_outputs('train', use_filter=True, unique_pairs=False) == False\n assert adapter.verify_outputs('train', use_filter=False, unique_pairs=True) == False\n\n # Test batch output shapes\n adapter.clear_outputs()\n train_iter = adapter.get_next_batch(batches_count=1, dataset_type='train', use_filter=True, unique_pairs=True)\n out, triples = next(train_iter)\n assert out.shape[0] == 2\n assert triples.shape[0] == 2\n\n adapter.clear_outputs()\n train_iter = adapter.get_next_batch(batches_count=1, dataset_type='train', use_filter=True, unique_pairs=False)\n out, triples = next(train_iter)\n assert out.shape[0] == 5\n assert triples.shape[0] == 5\n\n # Test batch subject corruptions\n batch_size = 3\n batch_iter = adapter.get_next_batch_subject_corruptions(batch_size=batch_size, dataset_type='train', use_filter=True)\n triples, out, out_onehot = next(batch_iter)\n\n assert np.all(X == triples) # only 1 relation in X, so triples should be the same (ignores batch_size)\n assert triples.shape[1] == 3 # triples should be triples!\n assert out.shape[1] == 3 # batch out should also be triples!\n assert out.shape[0] == batch_size # batch dimension of out should equal batch_size\n assert out_onehot.shape[0] == batch_size # Onehot batch_dimension should equal batch size\n assert out_onehot.shape[0] == out.shape[0] # .. and should be same size as number of unique entities\n assert out_onehot.shape[1] == len(adapter.ent_to_idx)\n\n adapter.clear_outputs()\n batch_iter = adapter.get_next_batch_subject_corruptions(batch_size=-1, dataset_type='train', use_filter=True)\n triples, out, out_onehot = next(batch_iter)\n\n assert np.all(X == triples) # only 1 relation in X, so triples should be the same\n assert triples.shape[1] == 3 # triples should be triples!\n assert out.shape[1] == 3 # batch out should also be triples!\n assert out.shape[0] == len(adapter.ent_to_idx) # batch_size=-1 (entire set), so the batch_dim should equal ents\n assert out_onehot.shape[0] == out_onehot.shape[1] # Onehot should be a square matrix\n assert out_onehot.shape[0] == out.shape[0] # .. and should be same size as number of unique entities\n assert out_onehot.shape[1] == len(adapter.ent_to_idx)\n\n # Verify that onehot outputs are as expected in the out array (or if not present in OF_Map then is a zero vector)\n for idx, (s, p, o) in enumerate(out):\n if (s, p) in OF_map.keys():\n onehot = OF_map[(s, p)]\n assert np.all(onehot == out_onehot[idx])\n else:\n assert np.all(out_onehot[idx] == 0)\n\n\nif __name__ == '__main__':\n test_oneton_adapter()"
]
| [
[
"numpy.unique",
"numpy.union1d",
"numpy.testing.assert_array_equal",
"numpy.all",
"numpy.concatenate",
"numpy.array"
]
]
|
mweiss17/SEVN-model | [
"14bb5b416886cd2aa0d21a3b6ec07411ec671dcb"
]
| [
"scripts/parse_comet_noisy.py"
]
| [
"from _warnings import warn\n\nfrom comet_ml import API\nimport numpy as np\nimport matplotlib.pyplot as plt\n\napi = API()\n\nexp_ids = {\n \"SEVN-Train-AllObs-Shaped-v1-s0-10p\": \"mweiss17/navi-corl-2019/008004e9c9a940e088437e4ddeab9eb4\",\n \"SEVN-Train-AllObs-Shaped-v1-s1-10p\": \"mweiss17/navi-corl-2019/dbd36b752d6a4703904161d95ee09868\",\n \"SEVN-Train-AllObs-Shaped-v1-s2-10p\": \"mweiss17/navi-corl-2019/84dbd53a36db4b39a7afc9acc66609a0\",\n \"SEVN-Train-AllObs-Shaped-v1-s3-10p\": \"mweiss17/navi-corl-2019/12f4aec90e284d1188bbe6307bdc33bd\",\n \"SEVN-Train-AllObs-Shaped-v1-s4-10p\": \"mweiss17/navi-corl-2019/bb6af29d7336411b92e31f750b5087bb\",\n}\n\n# oracle_random_ids = {\n# \"Hyrule-Mini-Random-v1\": \"mweiss17/navi-corl-2019/80b8b611c84242ffa61d08cc3364ba4b\",\n# \"Hyrule-Mini-Oracle-v1\": \"mweiss17/navi-corl-2019/c212813764de4a66994912dae21a8628\",\n# }\n\nplot_info = {\n \"SEVN-Train-AllObs-Shaped-v1\": {'color': '#22a784', 'plot_name': 'AllObs'},\n # \"Hyrule-Mini-NoImg-Shaped-v1\": {'color': '#fde724', 'plot_name': 'NoImg'},\n # \"Hyrule-Mini-NoGPS-Shaped-v1\": {'color': '#440154', 'plot_name': 'NoGPS'},\n # \"Hyrule-Mini-ImgOnly-Shaped-v1\": {'color': '#29788e', 'plot_name': 'ImgOnly'},\n # \"Hyrule-Mini-Random-v1\": {'color': '#79d151', 'plot_name': 'Random'},\n # \"Hyrule-Mini-Oracle-v1\": {'color': '#404387', 'plot_name': 'Oracle'},\n}\n\nreported_metrics = [\"Reward Mean\", \"Episodic Success Rate\" , \"Episode Length Mean \",]\n\n\ndef build_plot_dict(orig_env_name, raw_tuples, final_data, log_ts):\n for i in range(len(raw_tuples)):\n\n env_name = orig_env_name + ' - ' + reported_metrics[i]\n temp_data = np.array([list(x) for x in raw_tuples[i]]).transpose()\n\n # Preprocess for equal log intervals\n data = np.zeros((temp_data.shape[0], len(log_ts)))\n data[0, :] = log_ts\n for j in range(len(log_ts)):\n index = np.where(temp_data[0] == log_ts[j])[0]\n if index.size == 0:\n continue\n data[:, j] = temp_data[:, index[0]]\n\n # If same experiment with different seed\n if env_name in final_data:\n final_data[env_name]['n'] += 1\n data_concat = np.vstack((final_data[env_name]['data0'], data[1, :]))\n final_data[env_name]['data0'] = data_concat\n\n # If first experiment with these hyperparams.\n else:\n final_data[env_name] = {'metric': reported_metrics[i], 'data0': data, 'n': 1}\n\n return final_data\n\n\ndef running_mean(x, N):\n cumsum = np.cumsum(np.insert(x, 0, 0))\n return (cumsum[N:] - cumsum[:-N]) / float(N)\n\n\n# Training Data\nfinal_data = {}\nfor name, exp_id in exp_ids.items():\n experiment = api.get(exp_id)\n for d in experiment.parameters:\n if d[\"name\"] == 'env-name':\n assert d['valueMax'] in name\n env_name = d['valueMax']\n if d[\"name\"] == 'log_interval':\n log_int = d['valueMax']\n\n reward_mean = experiment.metrics_raw[reported_metrics[0]]\n episodic_success_rate = experiment.metrics_raw[reported_metrics[1]]\n episode_length_mean = experiment.metrics_raw[reported_metrics[2]]\n if name == 'SEVN-Train-AllObs-Shaped-v1-s0-10p':\n logged_timesteps = np.array([list(x) for x in reward_mean]).transpose()[0]\n\n\n final_data = build_plot_dict(orig_env_name=env_name,\n raw_tuples=[reward_mean, episodic_success_rate, episode_length_mean],\n final_data=final_data,\n log_ts=logged_timesteps)\n\n# Random-Oracle data0\n# random_oracle_data = {}\n# for name, exp_id in oracle_random_ids.items():\n# random_oracle_data[name] = {}\n# experiment = api.get(exp_id)\n#\n# reward_mean = experiment.metrics_raw[reported_metrics[0]]\n# reward_arr = np.array(reward_mean).transpose()\n# random_oracle_data[name][reported_metrics[0]] = np.mean(reward_arr[1])\n#\n# if \"Oracle\" in name:\n# random_oracle_data[name][reported_metrics[1]] = 1.0\n# else:\n# episodic_success_rate = experiment.metrics_raw[reported_metrics[1]]\n# ep_succes_rate_arr = np.array(episodic_success_rate).transpose()\n# random_oracle_data[name][reported_metrics[1]] = np.mean(ep_succes_rate_arr[1])\n#\n# episode_length_mean = experiment.metrics_raw[reported_metrics[2]]\n# ep_length_mean_arr = np.array(episode_length_mean).transpose()\n# random_oracle_data[name][reported_metrics[2]] = np.mean(ep_length_mean_arr[1])\n\n\n# Plotting Statistics\nfor metric in reported_metrics:\n fig = plt.figure()\n plt.title(metric, fontsize=18)\n plt.xlabel('Timesteps', fontsize=14)\n plt.ylabel(metric, fontsize=14)\n\n # Add random and oracle here.\n # for name, _ in oracle_random_ids.items():\n # color = plot_info[name]['color']\n # label = plot_info[name]['plot_name']\n # plt.axhline(y=random_oracle_data[name][metric], color=color, label=label, linestyle='--')\n for key, val in final_data.items():\n if metric in key:\n key = key.replace(' - ' + metric, \"\")\n color = plot_info[key]['color']\n label = plot_info[key]['plot_name']\n\n met_mean = np.mean(val['data0'][1:], axis=0)\n met_std = np.std(val['data0'][1:], axis=0)\n\n plt.fill_between(running_mean(val['data0'][0], 10), running_mean(met_mean - met_std, 10),\n running_mean(met_mean + met_std, 10), alpha=0.1, facecolor=color)\n plt.plot(running_mean(val['data0'][0], 10), running_mean(met_mean, 10), color, label=label)\n\n plt.ticklabel_format(style='sci', axis='x', scilimits=(0, 0))\n plt.legend(fontsize=14)\n plt.savefig('plots/'+metric + \"123.png\")\n\n\n# Summary Statistics\nfor key, val in final_data.items():\n print(key)\n if val['metric'] == reported_metrics[0]:\n print(\"Reward Mean. Mean of the last 10 log intervals:\", np.mean(np.mean(val['data0'][1:], axis=0)[-10:]))\n elif val['metric'] == reported_metrics[1]:\n print(\"Success Rate. Mean of max success rates:\", np.mean(np.max(val['data0'][1:, -100:], axis=1)))\n elif val['metric'] == reported_metrics[2]:\n print(\"Episode Length Mean. Mean of the last 10 log intervals:\", np.mean(np.mean(val['data0'][1:], axis=0)[-10:]))\n print(\"------------------------------------------------------------------------------------------------\")\n\n\n# GPS data0\n# 4 procs only\ngps_exp_ids = {\n \"SEVN-Test-NoisyGPS-1-v1-s0-10p\": \"mweiss17/navi-corl-2019/2d3670a8af1e4c9d83f06a889e02862e\",\n \"SEVN-Test-NoisyGPS-1-v1-s1-10p\": \"mweiss17/navi-corl-2019/604adedd9fdd4f0bb18fff5d76a4400f\",\n \"SEVN-Test-NoisyGPS-1-v1-s2-10p\": \"mweiss17/navi-corl-2019/76078c37a6934a24bb644bda65fcff0e\",\n \"SEVN-Test-NoisyGPS-5-v1-s0-10p\": \"mweiss17/navi-corl-2019/60fb0b41644c4ba89949eb03ada53948\",\n \"SEVN-Test-NoisyGPS-5-v1-s1-10p\": \"mweiss17/navi-corl-2019/a887d1de91c0463e94985441bbf8b126\",\n \"SEVN-Test-NoisyGPS-5-v1-s2-10p\": \"mweiss17/navi-corl-2019/76078c37a6934a24bb644bda65fcff0e\",\n \"SEVN-Test-NoisyGPS-25-v1-s0-10p\": \"mweiss17/navi-corl-2019/281db6264d9d46fb8b0ada97ecd19903\",\n \"SEVN-Test-NoisyGPS-25-v1-s1-10p\": \"mweiss17/navi-corl-2019/f4f8901bd4944340bd8beb2c203ccca9\",\n \"SEVN-Test-NoisyGPS-25-v1-s2-10p\": \"mweiss17/navi-corl-2019/69a88388d483456c9174453956414f97\",\n \"SEVN-Test-NoisyGPS-100-v1-s0-10p\": \"mweiss17/navi-corl-2019/5824c0711976465cb1c2363434246961\",\n \"SEVN-Test-NoisyGPS-100-v1-s1-10p\": \"mweiss17/navi-corl-2019/79e37ee59d0e41b094eafb5a70f8df23\",\n \"SEVN-Test-NoisyGPS-100-v1-s2-10p\": \"mweiss17/navi-corl-2019/cb839ab2d3444f3aaadfb8497c7604ad\",\n}\n\nplot_info = {\n \"SEVN-Train-AllObs-Shaped-v1\": {'color': '#22a784', 'plot_name': 'AllObs'},\n \"SEVN-Test-NoisyGPS-1-v1\": {'color': '#fde724', 'plot_name': 'NoisyGPS-1'},\n \"SEVN-Test-NoisyGPS-5-v1\": {'color': '#fde724', 'plot_name': 'NoisyGPS-5'},\n \"SEVN-Test-NoisyGPS-25-v1\": {'color': '#fde724', 'plot_name': 'NoisyGPS-25'},\n \"SEVN-Test-NoisyGPS-100-v1\": {'color': '#fde724', 'plot_name': 'NoisyGPS-100'},\n}\n\ngps_exp_data = {}\nmin_numframes = 0000000\nmax_numframes = 100000000\nfor name, exp_id in gps_exp_ids.items():\n gps_exp_data[name] = {}\n experiment = api.get(exp_id)\n\n reward_mean = experiment.metrics_raw[reported_metrics[0]]\n reward_arr = np.array(reward_mean).transpose()\n reward_arr = reward_arr[:, np.where(np.logical_and(reward_arr[0] >= min_numframes,\n reward_arr[0] < max_numframes))[0]]\n gps_exp_data[name][reported_metrics[0]] = {\n 'mean': np.mean(reward_arr[1]),\n 'std': np.std(reward_arr[1]),\n 'raw_data': reward_arr\n }\n\n episodic_success_rate = experiment.metrics_raw[reported_metrics[1]]\n ep_succes_rate_arr = np.array(episodic_success_rate).transpose()\n ep_succes_rate_arr = ep_succes_rate_arr[:, np.where(np.logical_and(ep_succes_rate_arr[0] >= min_numframes,\n ep_succes_rate_arr[0] < max_numframes))[0]]\n gps_exp_data[name][reported_metrics[1]] = {\n 'mean': np.mean(ep_succes_rate_arr[1]),\n 'std': np.std(ep_succes_rate_arr[1]),\n 'raw_data': ep_succes_rate_arr\n }\n\n episode_length_mean = experiment.metrics_raw[reported_metrics[2]]\n ep_length_mean_arr = np.array(episode_length_mean).transpose()\n ep_length_mean_arr = ep_length_mean_arr[:, np.where(np.logical_and(ep_length_mean_arr[0] >= min_numframes,\n ep_length_mean_arr[0] < max_numframes))[0]]\n gps_exp_data[name][reported_metrics[2]] = {\n 'mean': np.mean(ep_length_mean_arr[1]),\n 'std': np.std(ep_length_mean_arr[1]),\n 'raw_data': ep_length_mean_arr\n }\n\n# Plotting Statistics\nfor metric in reported_metrics:\n fig = plt.figure()\n plt.title(metric, fontsize=18)\n plt.xlabel('Timesteps', fontsize=14)\n plt.ylabel(metric, fontsize=14)\n\n for key, val in gps_exp_data.items():\n key = \"-\".join(key.split('-')[:5])\n color = plot_info[key]['color']\n label = plot_info[key]['plot_name']\n\n data = val[metric][\"raw_data\"]\n plt.plot(running_mean(data[0], 100), running_mean(data[1], 100), color, label=label)\n\n plt.ticklabel_format(style='sci', axis='x', scilimits=(0, 0))\n plt.legend(fontsize=14)\n plt.savefig('plots/gps_exp'+metric + \".png\")\n\n"
]
| [
[
"matplotlib.pyplot.legend",
"matplotlib.pyplot.title",
"numpy.logical_and",
"matplotlib.pyplot.figure",
"matplotlib.pyplot.savefig",
"numpy.max",
"matplotlib.pyplot.ticklabel_format",
"numpy.std",
"numpy.mean",
"numpy.insert",
"matplotlib.pyplot.xlabel",
"numpy.array",
"numpy.where",
"numpy.vstack",
"matplotlib.pyplot.ylabel"
]
]
|
maurock/Active-3D-Vision-and-Touch | [
"39e24de0ad3c3caad5d78b7cb351e95d4691c88c"
]
| [
"pterotactyl/utility/utils.py"
]
| [
"# Copyright (c) Facebook, Inc. and its affiliates.\n# This source code is licensed under the MIT license found in the\n# LICENSE file in the root directory of this source tree.\n\nimport random\nimport os\nfrom PIL import Image\nimport math\nimport json\n\nimport numpy as np\nimport torch\nimport torch.nn.functional as F\nimport matplotlib.pyplot as plt\nfrom scipy.spatial.transform import Rotation as R\nimport xml.etree.ElementTree as ET\nfrom scipy import ndimage\nfrom collections import namedtuple\n\nfrom pytorch3d.loss import chamfer_distance as cuda_cd\nfrom pytorch3d.ops.mesh_face_areas_normals import mesh_face_areas_normals\nfrom pytorch3d.ops.sample_points_from_meshes import _rand_barycentric_coords\nfrom pytorch3d.io.obj_io import load_obj, save_obj\nfrom pterotactyl.utility import pretty_render\nimport pterotactyl.objects as objects\n\ndevice = torch.device(\"cuda:0\" if torch.cuda.is_available() else \"cpu\")\n\ndef load_mesh_vision(args, obj):\n\t# load obj file\n\tverts, faces = load_mesh_touch(obj)\n\n\t# get adjacency matrix infomation\n\tadj_info = adj_init(verts, faces, args)\n\treturn adj_info, verts\n\n# set seeds for consistency\ndef set_seeds(seed):\n\tnp.random.seed(seed)\n\ttorch.manual_seed(seed)\n\ttorch.cuda.manual_seed(seed)\n\trandom.seed(seed)\n\n\n# normalizes symetric, binary adj matrix such that sum of each row is 1\ndef normalize_adj(mx):\n\trowsum = mx.sum(1)\n\tr_inv = (1. / rowsum).view(-1)\n\tr_inv[r_inv != r_inv] = 0.\n\tmx = torch.mm(torch.eye(r_inv.shape[0]).to(mx.device) * r_inv, mx)\n\treturn mx\n\n\n# defines the adjacecny matrix for an object\ndef adj_init(verts, faces, args):\n\t# get generic adjacency matrix for vision charts\n\tadj = calc_adj(faces)\n\tadj_info = {}\n\tadj_info['origional'] = normalize_adj(adj.clone())\n\t# this combines the adjacency information of touch and vision charts\n\t# the output adj matrix has the first k rows corresponding to vision charts, and the last |V| - k\n\t# corresponding to touch charts. Similarly the first l faces are correspond to vision charts, and the\n\t# remaining correspond to touch charts\n\tif args.use_touch:\n\t\tadj, faces = adj_fuse_touch(verts, faces, adj, args)\n\n\tadj = normalize_adj(adj)\n\tadj_info['adj'] = adj\n\tadj_info['faces'] = faces\n\treturn adj_info\n\n\n# combines graph for vision and touch charts to define a fused adjacency matrix\ndef adj_fuse_touch(verts, faces, adj, args):\n\tverts = verts.data.cpu().numpy()\n\thash = {}\n\tnumber_of_grasps = args.num_grasps\n\t# find vertices which have the same 3D position\n\tfor e, v in enumerate(verts):\n\t\tif v.tobytes() in hash:\n\t\t\thash[v.tobytes()].append(e)\n\t\telse:\n\t\t\thash[v.tobytes()] = [e]\n\n\t# load object information for generic touch chart\n\tif args.use_touch:\n\t\tchart_location = os.path.join(\n\t\t\tos.path.dirname(objects.__file__), \"touch_chart.obj\"\n\t\t)\n\t\tsheet_verts, sheet_faces = load_mesh_touch(chart_location)\n\t\tsheet_adj = calc_adj(sheet_faces)\n\n\t\t# central vertex for each touch chart that will communicate with all vision charts\n\t\tcentral_point = 4\n\t\tfingers = 1 if args.finger else 4\n\t\tcentral_points = [central_point + (i * sheet_adj.shape[0]) + adj.shape[0] for i in\n\t\t\t\t\t\t range(fingers * number_of_grasps)]\n\n\t\t# define and fill new adjacency matrix with vision and touch charts\n\t\tnew_dim = adj.shape[0] + (fingers * number_of_grasps * sheet_adj.shape[0])\n\t\tnew_adj = torch.zeros((new_dim, new_dim)).to(device)\n\t\tnew_adj[: adj.shape[0], :adj.shape[0]] = adj.clone()\n\t\tfor i in range(fingers * number_of_grasps):\n\t\t\tstart = adj.shape[0] + (sheet_adj.shape[0] * i)\n\t\t\tend = adj.shape[0] + (sheet_adj.shape[0] * (i + 1))\n\t\t\tnew_adj[start: end, start:end] = sheet_adj.clone()\n\t\tadj = new_adj\n\n\t\t# define new faces with vision and touch charts\n\t\tall_faces = [faces]\n\t\tfor i in range(fingers * number_of_grasps):\n\t\t\ttemp_sheet_faces = sheet_faces.clone() + verts.shape[0]\n\t\t\ttemp_sheet_faces += i * sheet_verts.shape[0]\n\t\t\tall_faces.append(temp_sheet_faces)\n\t\tfaces = torch.cat(all_faces)\n\n\t# update adjacency matrix to allow communication between vision and touch charts\n\tfor key in hash.keys():\n\t\tcur_verts = hash[key]\n\t\tif len(cur_verts) > 1:\n\t\t\tfor v1 in cur_verts:\n\t\t\t\tfor v2 in cur_verts: # vertices on the boundary of vision charts can communicate\n\t\t\t\t\tadj[v1, v2] = 1\n\t\t\t\tif args.use_touch:\n\t\t\t\t\tfor c in central_points: # touch and vision charts can communicate\n\t\t\t\t\t\tadj[v1, c] = 1\n\t\t\t\t\t\tadj[c, v1] = 1\n\n\treturn adj, faces\n\n\n# computes adjacemcy matrix from face information\ndef calc_adj(faces):\n\tv1 = faces[:, 0]\n\tv2 = faces[:, 1]\n\tv3 = faces[:, 2]\n\tnum_verts = int(faces.max())\n\tadj = torch.eye(num_verts + 1).to(faces.device)\n\n\tadj[(v1, v2)] = 1\n\tadj[(v1, v3)] = 1\n\tadj[(v2, v1)] = 1\n\tadj[(v2, v3)] = 1\n\tadj[(v3, v1)] = 1\n\tadj[(v3, v2)] = 1\n\n\treturn adj\n\n\n# sample points from a batch of meshes\ndef batch_sample(verts, faces, num=10000):\n\t# Pytorch3D based code\n\tbs = verts.shape[0]\n\tface_dim = faces.shape[0]\n\tvert_dim = verts.shape[1]\n\t# following pytorch3D convention shift faces to correctly index flatten vertices\n\tF = faces.unsqueeze(0).repeat(bs, 1, 1)\n\tF += vert_dim * torch.arange(0, bs).unsqueeze(-1).unsqueeze(-1).to(F.device)\n\t# flatten vertices and faces\n\tF = F.reshape(-1, 3)\n\tV = verts.reshape(-1, 3)\n\twith torch.no_grad():\n\t\tareas, _ = mesh_face_areas_normals(V, F)\n\t\tAr = areas.reshape(bs, -1)\n\t\tAr[Ar != Ar] = 0\n\t\tAr = torch.abs(Ar / Ar.sum(1).unsqueeze(1))\n\t\tAr[Ar != Ar] = 1\n\n\t\tsample_face_idxs = Ar.multinomial(num, replacement=True)\n\t\tsample_face_idxs += face_dim * torch.arange(0, bs).unsqueeze(-1).to(Ar.device)\n\n\n\t# Get the vertex coordinates of the sampled faces.\n\tface_verts = V[F]\n\tv0, v1, v2 = face_verts[:, 0], face_verts[:, 1], face_verts[:, 2]\n\n\t# Randomly generate barycentric coords.\n\tw0, w1, w2 = _rand_barycentric_coords(bs, num, V.dtype, V.device)\n\n\t# Use the barycentric coords to get a point on each sampled face.\n\tA = v0[sample_face_idxs] # (N, num_samples, 3)\n\tB = v1[sample_face_idxs]\n\tC = v2[sample_face_idxs]\n\tsamples = w0[:, :, None] * A + w1[:, :, None] * B + w2[:, :, None] * C\n\n\treturn samples\n\n\n# implemented from:\n# https://github.com/EdwardSmith1884/GEOMetrics/blob/master/utils.py\n# MIT License\n# loads the initial mesh and returns vertex, and face information\ndef load_mesh_touch(obj):\n\tobj_info = load_obj(obj)\n\tverts = obj_info[0]\n\tfaces = obj_info[1].verts_idx\n\tverts = torch.FloatTensor(verts).to(device)\n\tfaces = torch.LongTensor(faces).to(device)\n\treturn verts, faces\n\n\n# returns the chamfer distance between a mesh and a point cloud\ndef chamfer_distance(verts, faces, gt_points, num=1000, repeat=3):\n\tpred_points= batch_sample(verts, faces, num=num)\n\n\tcd, _ = cuda_cd(pred_points, gt_points, batch_reduction=None)\n\tif repeat > 1:\n\t\tcds = [cd]\n\t\tfor i in range(repeat - 1):\n\t\t\tpred_points = batch_sample(verts, faces, num=num)\n\t\t\tcd, _ = cuda_cd(pred_points, gt_points, batch_reduction=None)\n\t\t\tcds.append(cd)\n\t\tcds = torch.stack(cds)\n\t\tcd = cds.mean(dim=0)\n\n\treturn cd\n\n# saves a point cloud as a .obj file\ndef save_points(file, points):\n\tlocation = f'{file}.obj'\n\ttry:\n\t\twrite_obj(location, points.data.cpu().numpy(), [])\n\texcept:\n\t\twrite_obj(location, points, [])\n\n# converts a voxel object to a point cloud\ndef extract_surface(voxel):\n\tconv_filter = torch.ones((1, 1, 3, 3, 3)).to(device)\n\tlocal_occupancy = F.conv3d(voxel.unsqueeze(\n\t\t0).unsqueeze(0), conv_filter, padding=1)\n\tlocal_occupancy = local_occupancy.squeeze(0).squeeze(0)\n\t# only elements with exposed faces\n\tsurface_positions = (local_occupancy < 27) * (local_occupancy > 0)\n\tpoints = torch.where(surface_positions)\n\tpoints = torch.stack(points)\n\tpoints = points.permute(1, 0)\n\tif device=='cuda:0':\n\t\treturn points.type(torch.cuda.FloatTensor)\n\telse:\n\t\treturn points.type(torch.FloatTensor)\n\n# saves a mesh as an .obj file\ndef write_obj(filename, verts, faces):\n\t\"\"\" write the verts and faces on file.\"\"\"\n\twith open(filename, 'w') as f:\n\t\t# write vertices\n\t\tf.write('g\\n# %d vertex\\n' % len(verts))\n\t\tfor vert in verts:\n\t\t\tf.write('v %f %f %f\\n' % tuple(vert))\n\n\t\t# write faces\n\t\tf.write('# %d faces\\n' % len(faces))\n\t\tfor face in faces:\n\t\t\tf.write('f %d %d %d\\n' % tuple(face))\n\n# makes the sphere of actions\nclass get_circle(object):\n\tdef __init__(self, num_points, rank=0):\n\t\taction_position = []\n\t\ta = 4 * np.pi / float(num_points)\n\t\td = math.sqrt(a)\n\t\tM_t = round(np.pi / d)\n\t\td_t = np.pi / M_t\n\t\td_phi = a / d_t\n\t\tsphere_positions = []\n\t\tfor i in range(0, M_t):\n\t\t\ttheta = np.pi * (i + .5) / M_t\n\t\t\tM_phi = round(2 * np.pi * math.sin(theta) / d_phi)\n\t\t\tfor j in range(0, M_phi):\n\t\t\t\tphi = 2 * np.pi * j / M_phi\n\t\t\t\tpoint = self.get_point(theta, phi)\n\t\t\t\tsphere_positions.append([theta, phi])\n\t\t\t\taction_position.append(point)\n\t\tself.points = torch.stack(action_position)\n\t\tself.sphere_points = sphere_positions\n\t\tif num_points != self.points.shape[0]:\n\t\t\tprint(f' we have {self.points.shape} points but want {num_points}')\n\t\t\texit()\n\n\tdef get_point(self, a, b):\n\t\tx = math.sin(a) * math.cos(b)\n\t\ty = math.sin(a) * math.sin(b)\n\t\tz = math.cos(a)\n\t\treturn torch.FloatTensor([x, y, z])\n\n\n\n# get the normal of a 3D traingle\ndef normal_from_triangle(a, b, c):\n\tA = b - a\n\tB = c - a\n\tnormal = np.cross(A, B)\n\tnormal = normalize_vector(normal.reshape(1, 1, 3))\n\treturn normal.reshape(3)\n\n# normalizes a vector\ndef normalize_vector(vector):\n\tn = np.linalg.norm(vector, axis=2)\n\tvector[:, :, 0] /= n\n\tvector[:, :, 1] /= n\n\tvector[:, :, 2] /= n\n\treturn vector\n\n# combines 2 3D rotations and converts to a quaternion\ndef quats_from_vectors(vec1, vec2):\n\tvec1 = np.array(vec1)\n\tvec2 = np.array(vec2)\n\n\ta, b = (vec1 / np.linalg.norm(vec1)).reshape(3), (vec2 / np.linalg.norm(vec2)).reshape(3)\n\tv = np.cross(a, b)\n\tc = np.dot(a, b)\n\ts = np.linalg.norm(v)\n\tif s == 0:\n\t\ts = 1\n\tkmat = np.array([[0, -v[2], v[1]], [v[2], 0, -v[0]], [-v[1], v[0], 0]])\n\n\trotation_matrix = np.eye(3) + kmat + kmat.dot(kmat) * ((1 - c) / (s ** 2))\n\tquat = R.from_matrix(rotation_matrix).as_quat()\n\n\treturn quat\n\n# combines two quaternions\ndef combine_quats(q1, q2):\n\tr1 = R.from_quat(q1).as_matrix()\n\tr2 = R.from_quat(q2).as_matrix()\n\tnew_q = R.from_matrix(np.matmul(r1, r2)).as_quat()\n\treturn new_q\n\n# converts a euler rotation to a rotation matrix\ndef euler2matrix(angles=[0, 0, 0], translation=[0, 0, 0], xyz=\"xyz\", degrees=False):\n\tr = R.from_euler(xyz, angles, degrees=degrees)\n\tpose = np.eye(4)\n\tpose[:3, 3] = translation\n\tpose[:3, :3] = r.as_matrix()\n\treturn pose\n\n# adds redundent faces\ndef add_faces(faces):\n\tf1 = np.array(faces[:, 0]).reshape(-1, 1)\n\tf2 = np.array(faces[:, 1]).reshape(-1, 1)\n\tf3 = np.array(faces[:, 2]).reshape(-1, 1)\n\tfaces_2 = np.concatenate((f1, f3, f2), axis=-1)\n\tfaces_3 = np.concatenate((f3, f2, f1), axis=-1)\n\tfaces = np.concatenate((faces, faces_2, faces_3), axis=0)\n\treturn faces\n\n# centers a pointcloud and scales to defined size\ndef scale_points(points, scale = 1.):\n\tfor i in range(3):\n\t\tpoints[:,i] -= points[:,i].min()\n\tpoints = points / points.max()\n\tpoints = points / scale\n\tfor i in range(3):\n\t\tverts_range = points[:, i].max()\n\t\tpoints[:, i] -= verts_range / 2.\n\treturn points\n\n# makes a urdf file pointing to a mesh\ndef make_urdf(verts, faces, urdf_location):\n\tobj_location = urdf_location.replace('.urdf', '.obj')\n\tfaces = add_faces(faces)\n\tsave_obj(obj_location, torch.FloatTensor(verts), torch.LongTensor(faces), 4)\n\n\tblank_location = os.path.join(os.path.dirname(objects.__file__), 'blank.urdf')\n\ttree = ET.parse(blank_location)\n\troot = tree.getroot()\n\troot.attrib['name'] = 'object.urdf'\n\n\troot[0][2][1][0].attrib['filename'] = obj_location\n\troot[0][3][1][0].attrib['filename'] = obj_location\n\ttree.write(urdf_location)\n\n# loads a obj file and scales it\ndef get_obj_data(obj_location, scale = 1.):\n\tobj_info = load_obj(obj_location)\n\tverts = obj_info[0].data.numpy()\n\tverts = scale_points(verts, scale)\n\tfaces = obj_info[1].verts_idx.data.numpy()\n\treturn verts, faces\n\n# converts a mesh to a voxel array by subdeviding the mesh\ndef mesh_to_voxel(verts, faces, resolution):\n\t# maximum side lentghs of the subdevided triangles\n\tsmallest_side = (1. / resolution) ** 2\n\n\t# center the mesh and scales to unit\n\tverts_max = verts.max()\n\tverts_min = verts.min()\n\tverts = (verts - verts_min) / (verts_max - verts_min) - 0.5\n\n\t# get all of the mesh triangles\n\tfaces = faces.clone()\n\tv1 = torch.index_select(verts, 0, faces[:, 0])\n\tv2 = torch.index_select(verts, 0, faces[:, 1])\n\tv3 = torch.index_select(verts, 0, faces[:, 2])\n\t# defined points as swt of all vertices\n\tpoints = torch.cat((v1, v2, v3))\n\n\twhile True:\n\t\t# get maximum side length of all traingles\n\t\tside_1 = (torch.abs(v1 - v2) ** 2).sum(dim=1).unsqueeze(1)\n\t\tside_2 = (torch.abs(v2 - v3) ** 2).sum(dim=1).unsqueeze(1)\n\t\tside_3 = (torch.abs(v3 - v1) ** 2).sum(dim=1).unsqueeze(1)\n\t\tsides = torch.cat((side_1, side_2, side_3), dim=1)\n\t\tsides = sides.max(dim=1)[0]\n\n\t\t# identify triangles which are small enough\n\t\tkeep = sides > smallest_side\n\t\tif keep.sum() == 0:\n\t\t\tbreak\n\n\t\t# remove triangles which are small enough\n\t\tv1 = v1[keep]\n\t\tv2 = v2[keep]\n\t\tv3 = v3[keep]\n\t\tv4 = (v1 + v3) / 2.\n\t\tv5 = (v1 + v2) / 2.\n\t\tv6 = (v2 + v3) / 2.\n\t\tdel (side_1, side_2, side_3, keep, sides)\n\n\t\t# add new vertices to set of points\n\t\tpoints = torch.cat((points, v4, v5, v6))\n\n\t\t# add subdevided traingles to list of triagnles\n\t\tvertex_set = [v1, v2, v3, v4, v5, v6]\n\t\tnew_traingles = [[0, 3, 4], [4, 1, 5], [4, 3, 5], [3, 2, 5]]\n\t\tnew_verts = []\n\t\tfor i in range(4):\n\t\t\tfor j in range(3):\n\t\t\t\tif i == 0:\n\t\t\t\t\tnew_verts.append(vertex_set[new_traingles[i][j]])\n\t\t\t\telse:\n\t\t\t\t\tnew_verts[j] = torch.cat(\n\t\t\t\t\t\t(new_verts[j], vertex_set[new_traingles[i][j]]))\n\n\t\tv1, v2, v3 = new_verts\n\t\tdel (v4, v5, v6, vertex_set, new_verts)\n\tdel (v1, v2, v3)\n\tif points is None:\n\t\treturn None\n\n\t# scales points\n\tpoints = ((points + .5) * (resolution - 1)).long()\n\tpoints = torch.split(points.permute(1, 0), 1, dim=0)\n\tpoints = [m.unsqueeze(0) for m in points]\n\t# set grid points to on if a point exists inside them\n\tvoxel = torch.zeros((resolution, resolution, resolution)).cuda()\n\tvoxel[points] = 1\n\n\treturn voxel\n\n# converts a voxel grid to a pointcloud\ndef voxel_to_pointcloud(voxel):\n\tvoxel = voxel.float()\n\toff_positions = voxel == 0\n\tconv_filter = torch.ones((1, 1, 3, 3, 3))\n\tsurface_voxel = torch.zeros(voxel.shape).cuda()\n\tconv_filter = conv_filter.cuda()\n\tlocal_occupancy = F.conv3d(voxel.unsqueeze(0).unsqueeze(0), conv_filter, padding=1)\n\tlocal_occupancy = local_occupancy.squeeze(0).squeeze(0)\n\tsurface_positions = (local_occupancy < 27) * (local_occupancy > 0)\n\tsurface_voxel[surface_positions] = 1\n\tsurface_voxel[off_positions] = 0\n\tpoints = torch.where(surface_voxel != 0)\n\tpoints = torch.stack(points).permute(1, 0).float()\n\treturn points\n\n# implemented from:\n# https://github.com/EdwardSmith1884/GEOMetrics/blob/master/utils.py\n# MIT License\ndef extract_ODMs(voxels):\n\tvoxels = voxels.data.cpu().numpy()\n\tdim = voxels.shape[0]\n\ta, b, c = np.where(voxels == 1)\n\tlarge = int(dim * 1.5)\n\tbig_list = [[[[-1, large] for j in range(dim)] for i in range(dim)] for k in range(3)]\n\t# over the whole object extract for each face the first and last occurance of a voxel at each pixel\n\t# we take highest for convinience\n\tfor i, j, k in zip(a, b, c):\n\t\tbig_list[0][i][j][0] = (max(k, big_list[0][i][j][0]))\n\t\tbig_list[0][i][j][1] = (min(k, big_list[0][i][j][1]))\n\t\tbig_list[1][i][k][0] = (max(j, big_list[1][i][k][0]))\n\t\tbig_list[1][i][k][1] = (min(j, big_list[1][i][k][1]))\n\t\tbig_list[2][j][k][0] = (max(i, big_list[2][j][k][0]))\n\t\tbig_list[2][j][k][1] = (min(i, big_list[2][j][k][1]))\n\tODMs = np.zeros((6, dim, dim)) # will hold odms\n\tfor i in range(dim):\n\t\tfor j in range(dim):\n\t\t\tODMs[0, i, j] = dim - 1 - big_list[0][i][j][0] if big_list[0][i][j][0] > -1 else dim\n\t\t\tODMs[1, i, j] = big_list[0][i][j][1] if big_list[0][i][j][1] < large else dim\n\t\t\tODMs[2, i, j] = dim - 1 - big_list[1][i][j][0] if big_list[1][i][j][0] > -1 else dim\n\t\t\tODMs[3, i, j] = big_list[1][i][j][1] if big_list[1][i][j][1] < large else dim\n\t\t\tODMs[4, i, j] = dim - 1 - big_list[2][i][j][0] if big_list[2][i][j][0] > -1 else dim\n\t\t\tODMs[5, i, j] = big_list[2][i][j][1] if big_list[2][i][j][1] < large else dim\n\n\treturn ODMs\n\n# implemented from:\n# https://github.com/EdwardSmith1884/GEOMetrics/blob/master/utils.py\n# MIT License\n# use orthographic depth maps to do space carving\ndef apply_ODMs(ODMs, dim):\n\tvoxel = np.ones((dim, dim, dim))\n\ta, b, c = np.where(ODMs > 0)\n\tfor x, i, j in zip(a, b, c):\n\t\tpos = int(ODMs[x, i, j])\n\t\tif x == 0:\n\t\t\tvoxel[i, j, -pos:] = 0\n\t\tif x == 1:\n\t\t\tvoxel[i, j, :pos] = 0\n\t\tif x == 2:\n\t\t\tvoxel[i, -pos:, j] = 0\n\t\tif x == 3:\n\t\t\tvoxel[i, :pos, j] = 0\n\t\tif x == 4:\n\t\t\tvoxel[-pos:, i, j] = 0\n\t\tif x == 5:\n\t\t\tvoxel[:pos, i, j] = 0\n\tvoxel[ndimage.binary_fill_holes(voxel)] = 1\n\treturn torch.LongTensor(voxel).cuda()\n\n# aligns a pointcloud to the size of a mesh\ndef realign_points(points, verts):\n\tpoints = points.float()\n\tverts = verts\n\tfor i in range(3):\n\t\tpoints[:, i] = points[:, i] - ((points[:, i].max() + points[:, i].min()) / 2.)\n\t\tv_range = verts[:, i].max() - verts[:, i].min()\n\t\tp_range = points[:, i].max() + 1 - points[:, i].min()\n\t\tpoints[:, i] = points[:, i] * v_range / p_range\n\n\treturn points\n\n# saves arguments for a experiment\ndef save_config(location, args):\n\tabs_path = os.path.abspath(location)\n\targs = vars(args)\n\targs['check_point'] = abs_path\n\n\tconfig_location = f'{location}/config.json'\n\twith open(config_location, 'w') as fp:\n\t\tjson.dump(args, fp, indent=4)\n\n\treturn config_location\n\n# loads arguments from an experiment and the model weights\ndef load_model_config(location):\n\tconfig_location = f'{location}/config.json'\n\twith open(config_location) as json_file:\n\t\tdata = json.load(json_file)\n\tweight_location = data['check_point'] + '/model'\n\targs = namedtuple(\"ObjectName\", data.keys())(*data.values())\n\treturn args, weight_location\n\n# for nicely visualizing dpeth images\ndef visualize_depth(depth, max_depth=0.025):\n\tdepth[depth > max_depth] = 0\n\tdepth = 255 * (depth / max_depth)\n\tdepth = depth.astype(np.uint8)\n\treturn depth\n\n# visualize the actions used by the policy\ndef visualize_actions(location, actions, args):\n\tactions = actions.view(-1).long().data.cpu().numpy()\n\tcircle = get_circle(args.num_actions)\n\tplt.hist(actions, bins=np.arange(0, args.num_actions+ 1 ))\n\tplt.title(\"actions histogram\")\n\tplt.savefig(location + '/histogram.png')\n\tplt.close()\n\n\tarray = np.zeros([args.num_actions * 2, args.num_actions * 4, 3])\n\tfor i in range(args.num_actions):\n\t\tx, y, z = circle.points[i]\n\t\tx = math.atan2(-x, y);\n\t\tx = (x + np.pi / 2.0) / (np.pi * 2.0) + np.pi * (28.670 / 360.0);\n\t\ty = math.acos(z) / np.pi;\n\n\t\tx_co = int(y * args.num_actions * 12 / (2 * np.pi))\n\t\ty_co = int(x * args.num_actions * 24 / (2 * np.pi))\n\t\tfor i in range(3):\n\t\t\tfor j in range(3):\n\t\t\t\tarray[x_co - 1 + i, y_co - 1 + j] += 1.\n\tfor a in actions:\n\t\tx, y, z = circle.points[a]\n\t\tx = math.atan2(-x, y);\n\t\tx = (x + np.pi / 2.0) / (np.pi * 2.0) + np.pi * (28.670 / 360.0);\n\t\ty = math.acos(z) / np.pi;\n\n\t\tx_co = int(y * args.num_actions * 12 / (2 * np.pi))\n\t\ty_co = int(x * args.num_actions * 24 / (2 * np.pi))\n\t\tfor i in range(3):\n\t\t\tfor j in range(3):\n\t\t\t\tarray[x_co - 1 + i, y_co - 1 + j] += 1.\n\tarray = array * 255. / array.max()\n\n\tif args.use_img:\n\t\tvisible_location = os.path.join(\n\t\t\tos.path.dirname(objects.__file__), \"visible.obj\"\n\t\t)\n\t\tseen_points = np.array(load_obj(visible_location)[0])\n\t\tseen_points = seen_points / np.sqrt(((seen_points ** 2).sum(axis=1))).reshape(-1, 1)\n\t\tfor point in seen_points:\n\t\t\tx, y, z = point\n\t\t\tx = math.atan2(-x, y);\n\t\t\tx = (x + np.pi / 2.0) / (np.pi * 2.0) + np.pi * (28.670 / 360.0);\n\t\t\ty = math.acos(z) / np.pi;\n\n\t\t\tx_co = int(y * args.num_actions * 12 / (2 * np.pi))\n\t\t\ty_co = int(x * args.num_actions * 24 / (2 * np.pi))\n\t\t\tfor i in range(5):\n\t\t\t\tfor j in range(5):\n\t\t\t\t\tif array[x_co - 2 + i, y_co - 2 + j].sum() == 0:\n\t\t\t\t\t\tarray[x_co - 2 + i, y_co - 2 + j] = (255, 127, 80)\n\t\tarray[np.all(array == (0, 0, 0), axis=-1)] = (0, 204, 204)\n\n\n\t\tcheck_array = np.zeros([args.num_actions * 2, args.num_actions * 4])\n\t\tfor point in seen_points:\n\t\t\tx, y, z = point\n\t\t\tx = math.atan2(-x, y);\n\t\t\tx = (x + np.pi / 2.0) / (np.pi * 2.0) + np.pi * (28.670 / 360.0);\n\t\t\ty = math.acos(z) / np.pi;\n\n\t\t\tx_co = int(y * args.num_actions * 12 / (2 * np.pi))\n\t\t\ty_co = int(x * args.num_actions * 24 / (2 * np.pi))\n\t\t\tfor i in range(3):\n\t\t\t\tfor j in range(3):\n\t\t\t\t\tcheck_array[x_co - 1 + i, y_co - 1 + j] = 100\n\n\t\ton = 0.\n\t\toff = 0.\n\t\tfor a in actions:\n\t\t\tx, y, z = circle.points[a]\n\t\t\tx = math.atan2(-x, y);\n\t\t\tx = (x + np.pi / 2.0) / (np.pi * 2.0) + np.pi * (28.670 / 360.0);\n\t\t\ty = math.acos(z) / np.pi;\n\n\t\t\tx_co = int(y * args.num_actions * 12 / (2 * np.pi))\n\t\t\ty_co = int(x * args.num_actions * 24 / (2 * np.pi))\n\t\t\tif check_array[x_co, y_co] > 0:\n\t\t\t\ton += 1\n\t\t\telse:\n\t\t\t\toff += 1\n\n\t\tprint(f'percentage in vision is {on * 100 / (on+off):.2f} % for policy')\n\telse:\n\t\tarray[np.all(array == (0, 0, 0), axis=-1)] = (0, 204, 204)\n\tarray = array.astype(np.uint8)\n\tImage.fromarray(array).save(location + '/sphere_projection.png')\n\n\n\n\n\n\n# visualize the actions used by the policy\ndef visualize_prediction(location, meshes, faces, names):\n\tdata = {}\n\tmeshes = meshes.data.cpu().numpy()\n\tfaces = faces.data.cpu().numpy()\n\tlocations = []\n\tfor n in names:\n\t\tn = '/'+ n.split('/')[-1] + '/'\n\t\tlocations.append(location + n)\n\t\tif not os.path.exists(locations[-1]):\n\t\t\tos.makedirs(locations[-1])\n\tdata['locations'] = locations\n\tpretty_render.render_representations(locations, names, meshes, faces)\n"
]
| [
[
"numpy.dot",
"torch.abs",
"torch.cat",
"torch.zeros",
"scipy.spatial.transform.Rotation.from_matrix",
"numpy.concatenate",
"numpy.all",
"torch.no_grad",
"torch.FloatTensor",
"torch.where",
"numpy.cross",
"torch.cuda.is_available",
"scipy.ndimage.binary_fill_holes",
"numpy.where",
"torch.ones",
"numpy.arange",
"numpy.eye",
"torch.eye",
"numpy.matmul",
"matplotlib.pyplot.close",
"torch.arange",
"torch.index_select",
"numpy.zeros",
"torch.LongTensor",
"matplotlib.pyplot.title",
"matplotlib.pyplot.savefig",
"torch.stack",
"scipy.spatial.transform.Rotation.from_euler",
"numpy.array",
"torch.cuda.manual_seed",
"numpy.random.seed",
"scipy.spatial.transform.Rotation.from_quat",
"torch.manual_seed",
"numpy.linalg.norm",
"numpy.ones"
]
]
|
shawntan/compound-pcfg | [
"01577bf19f8170d5325678a8ea630185f4c37987"
]
| [
"compare_trees.py"
]
| [
"#!/usr/bin/env python3\nimport sys\nimport os\n\nimport argparse\nimport json\nimport random\nimport shutil\nimport copy\n\nimport torch\nfrom torch import cuda\nimport torch.nn as nn\nimport numpy as np\nimport time\nfrom utils import *\nimport re\n\nparser = argparse.ArgumentParser()\n\n# Data path options\nparser.add_argument('--tree1', default='')\nparser.add_argument('--tree2', default='')\nparser.add_argument('--length_cutoff', default=150, type = int)\n\ndef is_next_open_bracket(line, start_idx):\n for char in line[(start_idx + 1):]:\n if char == '(':\n return True\n elif char == ')':\n return False\n raise IndexError('Bracket possibly not balanced, open bracket not followed by closed bracket') \n\ndef get_between_brackets(line, start_idx):\n output = []\n for char in line[(start_idx + 1):]:\n if char == ')':\n break\n assert not(char == '(')\n output.append(char) \n return ''.join(output)\n\ndef get_tags_tokens_lowercase(line):\n output = []\n line_strip = line.rstrip()\n for i in range(len(line_strip)):\n if i == 0:\n assert line_strip[i] == '(' \n if line_strip[i] == '(' and not(is_next_open_bracket(line_strip, i)): # fulfilling this condition means this is a terminal symbol\n output.append(get_between_brackets(line_strip, i))\n #print 'output:',output\n output_tags = []\n output_tokens = []\n output_lowercase = []\n for terminal in output:\n terminal_split = terminal.split()\n assert len(terminal_split) == 2 # each terminal contains a POS tag and word \n output_tags.append(terminal_split[0])\n output_tokens.append(terminal_split[1])\n output_lowercase.append(terminal_split[1].lower())\n return [output_tags, output_tokens, output_lowercase] \n\ndef get_nonterminal(line, start_idx):\n assert line[start_idx] == '(' # make sure it's an open bracket\n output = []\n for char in line[(start_idx + 1):]:\n if char == ' ':\n break\n assert not(char == '(') and not(char == ')')\n output.append(char)\n return ''.join(output)\n\n\ndef get_actions(line):\n output_actions = []\n line_strip = line.rstrip()\n i = 0\n max_idx = (len(line_strip) - 1)\n while i <= max_idx:\n assert line_strip[i] == '(' or line_strip[i] == ')'\n if line_strip[i] == '(':\n if is_next_open_bracket(line_strip, i): # open non-terminal\n curr_NT = get_nonterminal(line_strip, i)\n output_actions.append('NT(' + curr_NT + ')')\n i += 1 \n while line_strip[i] != '(': # get the next open bracket, which may be a terminal or another non-terminal\n i += 1\n else: # it's a terminal symbol\n output_actions.append('SHIFT')\n while line_strip[i] != ')':\n i += 1\n i += 1\n while line_strip[i] != ')' and line_strip[i] != '(':\n i += 1\n else:\n output_actions.append('REDUCE')\n if i == max_idx:\n break\n i += 1\n while line_strip[i] != ')' and line_strip[i] != '(':\n i += 1\n assert i == max_idx \n return output_actions\n\n \ndef main(args):\n corpus_f1 = [0., 0., 0.] \n sent_f1 = [] \n with torch.no_grad():\n for k, (tree1, tree2) in enumerate(zip(open(args.tree1, \"r\"), open(args.tree2))):\n tree1 = tree1.strip()\n action1 = get_actions(tree1)\n tags1, sent1, sent_lower1 = get_tags_tokens_lowercase(tree1)\n if len(sent1) > args.length_cutoff or len(sent1) == 1:\n continue\n gold_span1, binary_actions1, nonbinary_actions1 = get_nonbinary_spans(action1)\n tree2 = tree2.strip()\n action2 = get_actions(tree2)\n tags2, sent2, sent_lower2 = get_tags_tokens_lowercase(tree2)\n gold_span2, binary_actions2, nonbinary_actions2 = get_nonbinary_spans(action2)\n pred_span_set = set(gold_span2[:-1]) #the last span in the list is always the\n gold_span_set = set(gold_span1[:-1]) #trival sent-level span so we ignore it\n tp, fp, fn = get_stats(pred_span_set, gold_span_set) \n corpus_f1[0] += tp\n corpus_f1[1] += fp\n corpus_f1[2] += fn\n # sent-level F1 is based on L83-89 from https://github.com/yikangshen/PRPN/test_phrase_grammar.py\n overlap = pred_span_set.intersection(gold_span_set)\n prec = float(len(overlap)) / (len(pred_span_set) + 1e-8)\n reca = float(len(overlap)) / (len(gold_span_set) + 1e-8)\n if len(gold_span_set) == 0:\n reca = 1.\n if len(pred_span_set) == 0: \n prec = 1.\n f1 = 2 * prec * reca / (prec + reca + 1e-8)\n sent_f1.append(f1)\n tp, fp, fn = corpus_f1 \n prec = tp / (tp + fp)\n recall = tp / (tp + fn)\n corpus_f1 = 2*prec*recall/(prec+recall) if prec+recall > 0 else 0.\n print('Corpus F1: %.2f, Sentence F1: %.2f' %\n (corpus_f1*100, np.mean(np.array(sent_f1))*100))\n\nif __name__ == '__main__':\n args = parser.parse_args()\n main(args)\n"
]
| [
[
"numpy.array",
"torch.no_grad"
]
]
|
zhenlingcn/scikit-obliquetree | [
"41d70b210799b57306a10b62f43605a9ddb2b6ad"
]
| [
"examples/simple_example.py"
]
| [
"from scikit_obliquetree.HHCART import HouseHolderCART\nfrom scikit_obliquetree.segmentor import MSE, MeanSegmentor\nfrom sklearn.datasets import load_boston\nfrom sklearn.ensemble import BaggingRegressor\nfrom sklearn.model_selection import cross_val_score\n\nX, y = load_boston(return_X_y=True)\nreg = BaggingRegressor(\n HouseHolderCART(MSE(), MeanSegmentor(), max_depth=3),\n n_estimators=100,\n n_jobs=-1,\n)\nprint(\"CV Score\", cross_val_score(reg, X, y))\n"
]
| [
[
"sklearn.model_selection.cross_val_score",
"sklearn.datasets.load_boston"
]
]
|
stormxuwz/SeabirdCode | [
"943c38b0ef0272c04157700ee6ecc2e87f2c2aaa"
]
| [
"seabird/models/model_segmentation.py"
]
| [
"import numpy as np\nimport logging\n\n\n\ndef debugPlot(x, segList, createLineFunc, plotTitle):\n\timport matplotlib.pyplot as plt\n\tplt.figure()\n\tplt.plot(range(len(x)), x)\n\n\tfor seg in segList:\n\t\tplt.scatter(seg, x[seg], s=8)\n\t\tplt.plot(seg, createLineFunc(x[seg]))\n\tplt.title(plotTitle)\n\tplt.savefig(plotTitle + \".png\")\n\tplt.close()\n\n\nclass timeSeriesSegmentation(object):\n\tdef __init__(self,max_error):\n\t\tself.segmentList=None\n\t\tself.x=None\n\t\tself.max_error=max_error\n\n\tdef fit_predict(self,x):\n\t\traise ValueError(\"not implementated\")\n\n\tdef calculate_error(self,x,y):\n\t\treturn np.max(np.abs(x-y))\n\n\tdef plot(self):\n\t\timport matplotlib.pyplot as plt\n\t\tplt.figure()\n\t\tplt.plot(self.x,\"ro\")\n\t\tfor seg in self.segmentList:\n\t\t\tplt.plot(seg[1],seg[0],\"+-\")\n\t\tplt.show()\n\n\tdef createLine(self,x,method=\"regression\"):\n\t\tx = np.array(x)\n\t\tn=len(x)\n\n\t\tif method==\"simple\":\n\t\t\tline = np.linspace(x[0],x[-1],n)\n\n\t\telif method == \"regression\":\n\t\t\tline = np.poly1d(np.polyfit(range(n),x,1))(range(n))\n\t\t\n\t\telif method == \"poly\":\n\t\t\tline=np.poly1d(np.polyfit(range(n),x,2))(range(n))\n\n\t\treturn line\n\n\nclass slidingWindow(timeSeriesSegmentation):\n\t\"\"\"\n\tsegment signal using sliding window approach (not used)\n\t\"\"\"\n\tdef fit_predict(self,x):\n\t\tn=len(x)\n\t\tleftNode=0\n\t\tsegmentList=[]\n\t\tprint(n)\n\t\t\n\t\twhile leftNode<n-1:\n\t\t\tprint(leftNode)\n\t\t\tnewSeg = False\n\t\t\tfor rightNode in range(leftNode+3,n):\n\t\t\t\ttestSeg=x[leftNode:rightNode]\n\n\t\t\t\ttestLine = self.createLine(testSeg,\"regression\")\n\t\t\t\tsegError = self.calculate_error(testSeg, testLine)\n\n\t\t\t\tif segError>self.max_error:\n\t\t\t\t\tsegmentList.append([testLine,range(leftNode,rightNode)])\n\t\t\t\t\tleftNode=rightNode\n\t\t\t\t\tnewSeg = True\n\t\t\t\t\tbreak\n\n\t\t\tif newSeg is False:\n\t\t\t\tsegmentList.append([testLine,range(leftNode,rightNode)])\n\t\t\t\tleftNode = n-1\n\t\t\n\t\tself.segmentList=segmentList\n\t\tself.x=x\n\nclass bottomUp(timeSeriesSegmentation):\n\t\"\"\"\n\tsegment signal using sliding bottom up approach (not used)\n\t\"\"\"\n\n\tdef fit_predict(self,x):\n\t\t\"\"\"\n\t\tFunction to fit linear segments based on x\n\t\tArgs:\n\t\t\tx: input signal\n\t\tReturns:\n\t\t\tNone\n\t\t\"\"\"\n\t\tn=len(x)\n\t\tsegmentIndexList = [[i,i+1] for i in range(0,n,2)]\n\t\terrorList = [self.mergeCost(x[segmentIndexList[i]], x[segmentIndexList[i+1]]) for i in range(len(segmentIndexList)-1)]\n\n\t\twhile True:\n\t\t\tminIndex = np.argmin(errorList)\n\t\t\tself.mergeRight(segmentIndexList,minIndex)\n\t\t\t\n\t\t\tif len(segmentIndexList) == 3:\n\t\t\t\tbreak\n\t\t\tif minIndex > 0:\n\t\t\t\terrorList[minIndex-1] = self.mergeCost(x[segmentIndexList[minIndex-1]],x[segmentIndexList[minIndex]])\n\n\t\t\tif minIndex < len(errorList)-1:\n\t\t\t\terrorList[minIndex+1] = self.mergeCost(x[segmentIndexList[minIndex]], x[segmentIndexList[minIndex+1]])\n\n\t\t\terrorList.pop(minIndex)\n\t\t\t\n\t\t\tif len(errorList)!=len(segmentIndexList)-1:\n\t\t\t\traise ValueError(\"error length not right\")\n\n\t\t\tif min(errorList)>self.max_error:\n\t\t\t\tbreak\n\n\t\tself.x = x\n\t\tself.segmentList=[[self.createLine(x[segIndex],\"regression\"),segIndex] for segIndex in segmentIndexList]\n\t\t\n\t\t# self.finalAdjust()\n\t\t\n\n\tdef finalAdjust(self):\n\t\tnSeg = len(self.segmentList)\n\t\t\n\t\tnewSegment = []\n\t\ti = 0\n\t\t\n\t\twhile i < nSeg - 1:\n\t\t\tnewSeg1, newSeg2 = self.splitAdjust(self.segmentList[i],self.segmentList[i+1])\n\t\t\t# print seg1, seg2\n\t\t\tself.segmentList[i] = newSeg1\n\t\t\tself.segmentList[i+1] = newSeg2\n\t\t\ti += 1\n\t\t\t# print newSeg1, newSeg2\n\n\n\tdef splitAdjust(self, seg1, seg2):\n\t\t# function to find the best split point given seg1 and seg2\n\t\t# Args:\n\t\t#\tseg1: [fitted value, idx]\n\t\t# \tseg2: [fitted value, idx]\n\t\t# print seg1[1], seg2[1]\n\n\t\tsegIdx = seg1[1] + seg2[1]\n\t\tsegX = self.x[segIdx]\n\t\t\n\t\tn = len(segIdx)\n\n\n\t\tminErr = self.max_error*100\n\t\tminErrIdx = 1\n\n\t\tfor i in range(1, n - 2):\n\t\t\ts1 = segX[:i]\n\t\t\ts2 = segX[i:]\n\t\t\t# print s1, s2\n\t\t\tif len(s1) > 2:\n\t\t\t\te1 = self.calculate_error(self.createLine(s1), s1)\n\t\t\telse:\n\t\t\t\te1 = 0\n\n\t\t\tif len(s2) > 2:\n\t\t\t\te2 = self.calculate_error(self.createLine(s2), s2)\n\t\t\telse:\n\t\t\t\te2 = 0\n\n\t\t\tif e1 + e2 < minErr:\n\t\t\t\tminErr = e1 + e2\n\t\t\t\tminErrIdx = i\n\t\n\t\t# print \"***\", minErrIdx, s1, s2\n\n\t\treturn [self.createLine(segX[:minErrIdx]), segIdx[:minErrIdx] ], [self.createLine(segX[minErrIdx:]), segIdx[minErrIdx:] ]\n\n\t\t\n\n\n\tdef mergeCost(self, leftSeg,rightSeg):\n\t\t\"\"\"\n\t\tfunction to calculate the error when merging the right segment\n\t\tArgs:\n\t\t\tleftSeg: left segment\n\t\t\trightSeg: the segment to merge\n\t\tReturns:\n\t\t\terror when merging the right segment\n\t\t\"\"\"\n\t\tallSeg=np.concatenate((leftSeg,rightSeg))\n\t\tline = self.createLine(allSeg)\n\t\treturn self.calculate_error(line, allSeg)\n\n\tdef mergeRight(self,segList,index):\n\t\t\"\"\"\n\t\tfunction to merge the segment of \"index\" with its right segment\n\t\tArgs:\n\t\t\tsegList: a list of segment\n\t\t\tindex: the segment to merge with its right segment\n\t\t\"\"\"\n\t\tsegList[index]=(segList[index]+segList[index+1]) # merge \n\t\tsegList.pop(index+1) # pop the right segment\n\n\nclass splitAndMerge(bottomUp):\n\n def fit_predict(self, x):\n n = len(x)\n x = np.array(x)\n segmentIndexList = self.randomInitialization(n)\n iterNum = 0\n\n while iterNum < n * 5:\n segmentIndexList_step1 = []\n\n converged = True\n\n\t\t\t# Split\n for i in range(len(segmentIndexList)):\n y = x[segmentIndexList[i]]\n y_fit = self.createLine(y)\n error = self.calculate_error(y, y_fit)\n if error > self.max_error:\n newSegs = self.split(segmentIndexList[i], y, y_fit)\n segmentIndexList_step1 = segmentIndexList_step1 + newSegs\n converged = False\n else:\n segmentIndexList_step1.append(segmentIndexList[i])\n\n segmentIndexList_step2 = segmentIndexList_step1.copy()\n\n # Merge\n if not converged:\n i = 0\n while i < len(segmentIndexList_step2) - 1:\n\n mergeRightCost = self.mergeCost(x[segmentIndexList_step2[i]], x[segmentIndexList_step2[i + 1]])\n\n if mergeRightCost < self.max_error:\n segmentIndexList_step2[i] = segmentIndexList_step2[i] + segmentIndexList_step2[i + 1]\n # then pop the right segment\n segmentIndexList_step2.pop(i + 1)\n else:\n i += 1\n\n # debugPlot(x, segmentIndexList_step2, self.createLine, \"itarNum=\" + str(iterNum) + \"_step2\")\n\n # Adjust\n if not converged:\n segmentIndexList = self.splitAdjust_overall(x, segmentIndexList_step2)\n else:\n segmentIndexList = segmentIndexList_step2.copy()\n\n # debugPlot(x, segmentIndexList, self.createLine, \"itarNum=\" + str(iterNum) + \"_step3\")\n\n if converged:\n break\n iterNum += 1\n\n if iterNum == n * 5:\n print(\"iter maximum reached\")\n # get the final results\n self.x = x\n self.segmentList = [[self.createLine(x[segIndex], \"regression\"), segIndex] for segIndex in segmentIndexList]\n\n def split(self, x, y, y_fit):\n # first find which points are larger than the maximum error\n # function return a list of new segments\n errorPoints = np.where(abs(y - y_fit) > self.max_error)[0]\n\n if len(errorPoints) >= 2:\n # splitPoint = (errorPoints[0] + errorPoints[1]) // 2\n splitPoint = (errorPoints[0] + errorPoints[-1]) // 2\n # splitPoint = len(x) // 2\n else:\n splitPoint = len(x) // 2\n\n newSegs = [[x[i] for i in range(splitPoint)], [x[i] for i in range(splitPoint, len(x))]]\n return newSegs\n\n def randomInitialization(self, n):\n splitPoint = n // 2\n splitPoint = np.random.randint(n // 3, 2 * n // 3)\n segmentIndexList = [[i for i in range(splitPoint)], [i for i in range(splitPoint, n)]]\n\n return segmentIndexList\n\n def splitAdjust_overall(self, x, segmentIndexList):\n newSegList = segmentIndexList.copy()\n nSeg = len(segmentIndexList)\n i = 0\n while i < nSeg - 1:\n newSeg1, newSeg2 = self.splitAdjust(x, newSegList[i], newSegList[i + 1])\n newSegList[i] = newSeg1\n newSegList[i + 1] = newSeg2\n i += 1\n return newSegList\n\n def splitAdjust(self, x, segIndexList1, segIndexList2):\n segIdx = segIndexList1 + segIndexList2\n segX = x[segIdx]\n minErr = self.max_error * 99999\n n = len(segIdx)\n\n if len(segIdx) < 4:\n return segIndexList1, segIndexList2\n\n for i in range(2, n - 1):\n s1 = segX[:i]\n s2 = segX[i:]\n if len(s1) > 2:\n e1 = self.calculate_error(self.createLine(s1), s1)\n else:\n e1 = 0\n\n if len(s2) > 2:\n e2 = self.calculate_error(self.createLine(s2), s2)\n else:\n e2 = 0\n\n if max(e1, e2) < minErr:\n minErr = max(e1, e2)\n minErrIdx = i\n\n return (segIdx[:(minErrIdx)], segIdx[minErrIdx:])\n\n"
]
| [
[
"numpy.abs",
"matplotlib.pyplot.scatter",
"matplotlib.pyplot.title",
"numpy.linspace",
"matplotlib.pyplot.savefig",
"matplotlib.pyplot.plot",
"numpy.concatenate",
"numpy.random.randint",
"numpy.argmin",
"matplotlib.pyplot.close",
"numpy.array",
"matplotlib.pyplot.show",
"matplotlib.pyplot.figure"
]
]
|
chakerouari/UNET_segmetation | [
"a7d9e9ccd31595d482f620cbf9a625a486f5f0df",
"a7d9e9ccd31595d482f620cbf9a625a486f5f0df",
"a7d9e9ccd31595d482f620cbf9a625a486f5f0df"
]
| [
"my_transforms.py",
"unet_explanation/model/unet_utils.py",
"DiceLoss.py"
]
| [
"import torchvision.transforms as transforms\nimport random\nimport torch.nn.functional as F\nimport numpy as np\n#transforms = {Resize, ToTensor, RandomCrop, ToPILImage}\n\nclass GrayScale(object):\n def __call__(self,sample):\n from torchvision.transforms import Grayscale\n Grayscale = Grayscale()\n sample['image'] = Grayscale(sample['image'])\n return sample\n\nclass Resize(object):\n \"\"\"\n Resize the input PIL Image to the given size.\n \"\"\"\n def __init__(self,img_size):\n assert isinstance(img_size , (int,tuple))\n self.img_size = img_size\n\n def __call__(self,sample):\n img , mask = sample['image'],sample['mask']\n Resize = transforms.Resize((self.img_size,self.img_size))\n sample['image'],sample['mask'] = Resize(img), Resize(mask)\n return sample\nclass RandomRotation(object):\n \"\"\"Rotate the image by angle.\n\n Args:\n degrees (sequence or float or int): Range of degrees to select from.\n If degrees is a number instead of sequence like (min, max), the range of degrees\n will be (-degrees, +degrees).\n resample ({PIL.Image.NEAREST, PIL.Image.BILINEAR, PIL.Image.BICUBIC}, optional):\n An optional resampling filter. See `filters`_ for more information.\n If omitted, or if the image has mode \"1\" or \"P\", it is set to PIL.Image.NEAREST.\n expand (bool, optional): Optional expansion flag.\n If true, expands the output to make it large enough to hold the entire rotated image.\n If false or omitted, make the output image the same size as the input image.\n Note that the expand flag assumes rotation around the center and no translation.\n center (2-tuple, optional): Optional center of rotation.\n Origin is the upper left corner.\n Default is the center of the image.\n fill (3-tuple or int): RGB pixel fill value for area outside the rotated image.\n If int, it is used for all channels respectively.\n\n .. _filters: https://pillow.readthedocs.io/en/latest/handbook/concepts.html#filters\n\n \"\"\"\n\n def __init__(self, degrees, resample=False, expand=False, center=None, fill=0):\n if isinstance(degrees, numbers.Number):\n if degrees < 0:\n raise ValueError(\"If degrees is a single number, it must be positive.\")\n self.degrees = (-degrees, degrees)\n else:\n if len(degrees) != 2:\n raise ValueError(\"If degrees is a sequence, it must be of len 2.\")\n self.degrees = degrees\n\n self.resample = resample\n self.expand = expand\n self.center = center\n self.fill = fill\n\n @staticmethod\n def get_params(degrees):\n \"\"\"Get parameters for ``rotate`` for a random rotation.\n\n Returns:\n sequence: params to be passed to ``rotate`` for random rotation.\n \"\"\"\n angle = random.uniform(degrees[0], degrees[1])\n\n return angle\n\n def __call__(self, img):\n \"\"\"\n Args:\n img (PIL Image): Image to be rotated.\n\n Returns:\n PIL Image: Rotated image.\n \"\"\"\n\n angle = self.get_params(self.degrees)\n\n return F.rotate(img, angle, self.resample, self.expand, self.center, self.fill)\n\n def __repr__(self):\n format_string = self.__class__.__name__ + '(degrees={0}'.format(self.degrees)\n format_string += ', resample={0}'.format(self.resample)\n format_string += ', expand={0}'.format(self.expand)\n if self.center is not None:\n format_string += ', center={0}'.format(self.center)\n format_string += ')'\n return format_string\n\n\nclass ColorJitter(object):\n def __init__(self,brightness=0, contrast=0, saturation=0, hue=0):\n \"\"\"\n\n :param brightness:\n :param contrast:\n :param saturation:\n :param hue:\n \"\"\"\n from torchvision.transforms import ColorJitter\n self.ColorJitter = ColorJitter(brightness, contrast, saturation, hue)\n\n def __call__(self,sample):\n return {\"image\":self.ColorJitter(sample[\"image\"]),\n \"mask\" :sample[\"mask\"]}\n\n\n\n# class RandomCrop(object):\n# \"\"\"Crop randomly the image in a sample\n#\n# Args:\n# output_size (tuple or int): Desired output size.\n# If int, square crop is made\n# \"\"\"\n# def __init__(self,output_size):\n# assert isinstance(output_size, (int,tuple))\n# if isinstance(output_size, int):\n# self.output_size = (output_size, output_size)\n# else:\n# assert len(output_size) == 2\n# self.output_size = output_size\n#\n# def __call__(self,sample):\n# img, mask = sample['image'], sample['mask']\n#\n# # h,w = img.shape[:2] # numpy img : H X W X C\n# w,h = img.size\n# new_h , new_w = self.output_size\n#\n# top = np.random.randint(0, h - new_h)\n# left = np.random.randint(0,w - new_w)\n#\n# img = img[top:top + new_h,\n# left: left + new_w]\n# mask = mask[top:top + new_h,\n# left: left + new_w]\n#\n# sample['image'], sample['mask'] = img, mask\n# return sample\n\nclass ToTensor(object):\n \"\"\"convert ndarrays in sample to Tensors\"\"\"\n def __call__(self,sample):\n from torchvision.transforms import ToTensor\n ToTensor = ToTensor()\n img , mask = sample['image'],sample['mask']\n sample['image'],sample['mask'] = ToTensor(img) ,ToTensor(mask)\n return sample\n\n\n\n# class Rescale(object):\n# \"\"\"\n# Rescale the image in a sample to a given size\n# \"\"\"\n# def __init__(self,scale):\n# self.scale = scale\n#\n# def __call__(self,sample):\n# import torchvision.transforms as transforms\n# img , mask = sample['image'],sample['mask']\n# Scale = transforms.Scale()\n# resize = transforms.Resize((self.img_size,self.img_size))\n# sample['image'],sample['mask'] = resize(img), resize(mask)\n# return sample\n\n\nclass RandomVerticalFlip(object):\n \"\"\"Vertically flip the given PIL Image randomly with a given probability.\n\n Args:\n p (float): probability of the image being flipped. Default value is 0.5\n \"\"\"\n def __init__(self, p =0.5):\n self.p = p\n def __call__(self,sample):\n from torchvision.transforms.functional import vflip as vertical_flip\n img , mask = sample['image'],sample['mask']\n if random.random() < self.p:\n sample['image'], sample['mask'] = vertical_flip(img), vertical_flip(mask)\n return sample\n\nclass RandomHorizontalFlip(object):\n \"\"\"Horizontally flip the given PIL Image randomly with a given probability.\n\n Args:\n p (float): probability of the image being flipped. Default value is 0.5\n \"\"\"\n def __init__(self, p = 0.5):\n self.p = p\n\n def __call__(self, sample):\n from torchvision.transforms.functional import hflip as horizontal_flip\n img , mask = sample['image'],sample['mask']\n if random.random() < self.p:\n sample['image'], sample['mask'] = horizontal_flip(img), horizontal_flip(mask)\n return sample\n\nclass ToPILImage(object):\n def __call__(self,sample):\n from torchvision.transforms import ToPILImage\n img , mask = sample['image'],sample['mask']\n ToPILImage = ToPILImage()\n sample['image'], sample['mask'] = ToPILImage(img),ToPILImage(mask)\n return sample\n",
"import torch\nimport torch.nn as nn\nimport torch.nn.functional as F\n\nclass UNetConvBlock(nn.Module):\n \" [conv -> BN -> ReLU] -> [conv -> BN -> ReLU]\"\n def __init__(self, in_channels, out_channels, kernel_size = 3 , padding = True):\n \"\"\"\n\n :param in_channels:\n :param out_channels:\n :param kernel_size:\n :param padding:\n\n The Original paper uses VALID padding (i.e. no padding)\n The main benefit of using SAME padding is that the output feature map will have the same spatial dimensions\n as the input feature map.\n \"\"\"\n super().__init__()\n self.double_conv = nn.Sequential(\n # Usually Conv -> BatchNormalization -> Activation\n nn.Conv2d(in_channels , out_channels , kernel_size= kernel_size , padding = int(padding)),\n nn.BatchNorm2d(out_channels),\n nn.ReLU(inplace=True),\n\n nn.Conv2d(out_channels, out_channels, kernel_size=kernel_size, padding=int(padding)),\n nn.BatchNorm2d(out_channels),\n nn.ReLU(inplace=True))\n\n def forward(self,inp):\n return self.double_conv(inp)\n\n\nclass Down(nn.Module):\n \"\"\"\n Downscaling with maxpool and then Double Conv\n - 3x3 Conv2D -> BN -> ReLU\n - 3X3 Conv2D -> BN -> ReLU\n - MaxPooling\n \"\"\"\n def __init__(self, in_channels , out_channels):\n super().__init__()\n self.maxpool_conv = nn.Sequential(\n nn.MaxPool2d(2),\n UNetConvBlock(in_channels,out_channels)\n )\n\n def forward(self,x):\n return self.maxpool_conv(x)\n\nclass Up(nn.Module):\n \"\"\"\n - Upsampling Convolution (\"Up-convolution\")\n - 3x3 Conv2D -> BN -> ReLU\n - 3X3 Conv2D -> BN -> ReLU\n\n Upsampling vs Transposed convolutions\n\n Transposed convolution (a.k.a.\"up-convolution or fractionally-strided convolutions or deconvolutions\")\n - The original paper uses this\n - detects more fine-grained detail\n\n Other implementation use bilinear upsampling, possibly followed by a 1x1 convolution.\n The benefit of using upsampling is that it has no parameters and if you include the 1x1 convolution,\n it will still have less parameters than the transposed convolution\n \"\"\"\n def __init__(self,in_channels , out_channels , bilinear = False):\n super(Up,self).__init__()\n\n if bilinear: # use the normal conv to reduce the number of channels\n self.up = nn.Upsample(scale_factor=2, mode= 'bilinear', align_corners = True)\n else: # use Transpose convolution (the one that official UNet used)\n self.up = nn.ConvTranspose2d(in_channels//2 , in_channels // 2, kernel_size = 2,stride=2 )\n\n self.conv = UNetConvBlock(in_channels,out_channels)\n\n def forward(self,x1,x2):\n # input dim is CHW\n x1 = self.up(x1)\n\n diffY = torch.tensor([x2.size()[2] - x1.size()[2]])\n diffX = torch.tensor([x2.size()[3] - x1.size()[3]])\n\n x1 = F.pad(x1 , [diffX // 2, diffX - diffX // 2,\n diffY // 2, diffY - diffY // 2])\n x = torch.cat([x2, x1] , dim = 1)\n out = self.conv(x)\n return out\n\n\nclass OutConv(nn.Module):\n def __init__(self, in_channels, out_channels):\n super(OutConv, self).__init__()\n self.conv = nn.Conv2d(in_channels, out_channels, kernel_size=1)\n\n def forward(self, x):\n return self.conv(x)",
"import numpy\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\n\nclass DiceLoss(nn.Module):\n def __init__(self, weight=None, size_average=True):\n super(DiceLoss, self).__init__()\n\n def forward(self, inputs, targets, smooth=1):\n \n #comment out if your model contains a sigmoid or equivalent activation layer\n inputs = torch.sigmoid(inputs) \n \n #flatten label and prediction tensors\n inputs = inputs.view(-1)\n targets = targets.view(-1)\n \n intersection = (inputs * targets).sum() \n dice = (2.*intersection + smooth)/(inputs.sum() + targets.sum() + smooth) \n \n return 1 - dice\n"
]
| [
[
"torch.nn.functional.rotate"
],
[
"torch.nn.ConvTranspose2d",
"torch.cat",
"torch.nn.Conv2d",
"torch.nn.MaxPool2d",
"torch.nn.Upsample",
"torch.nn.BatchNorm2d",
"torch.nn.ReLU",
"torch.nn.functional.pad"
],
[
"torch.sigmoid"
]
]
|
Lupin1998/inv-ML | [
"9f3db461911748292dff18024587538eb66d44bf"
]
| [
"invMLEnc_toy/models/InvML_MLP.py"
]
| [
"import math\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nfrom torch.nn.parameter import Parameter\n\n\nclass InvLeakyReLU(nn.Module):\n \"\"\" Invertible Bi-LeakyReLU with alpha \"\"\"\n def __init__(self, alpha=2, inplace=False, invertible=False):\n super(InvLeakyReLU, self).__init__()\n self.alpha = alpha\n self.inplace = inplace\n self.invertible = invertible\n self.weight = None\n\n def set_invertible(self, invertible=False):\n # print('set invertible in LeakyReLU', invertible)\n self.invertible = invertible\n\n def forward(self, x):\n if self.invertible == False:\n return torch.max(1 / self.alpha * x, self.alpha * x)\n else:\n return torch.min(1 / self.alpha * x, self.alpha * x)\n \n def extra_repr(self):\n inplace_str = ', inplace=True' if self.inplace else ''\n return 'alpha_inplace={}{}'.format(self.negative_slope, inplace_str)\n\n\nclass InvML_MLP(nn.Module):\n \"\"\" InvML_MLP baseline \"\"\"\n\n def __init__(self, args, mode='encoder', device=\"cuda\"):\n\n super().__init__()\n self.args = args\n self.Structure = args['NetworkStructure'] # MLP\n self.epoch = 0\n self.device = device\n # 1. network\n self.name_list = ['input'] # input, encoder layers, decoder layers\n self.plot_index_list = [0] # index of layers for plot\n self.network = nn.ModuleList()\n self.layer_num = len(args['NetworkStructure']['layer']) - 1\n self.extraHead = nn.ModuleList()\n self.extraLayer = self.args['ExtraHead']['layer']\n # 2. add inv relue\n if args['ratio']['orth'] > 0: # add inv to leaky\n self.inv_leaky = True\n else:\n self.inv_leaky = False\n # 3. Enc, Dec weight index\n self.enc_weight_index = []\n self.dec_weight_index = []\n # Extra-Head index\n self.extra_index = []\n # 4. mode: ['encoder', 'decoder']\n self.mode = mode\n # 5. jump link for Decoder\n if self.args['AEWeight']['each'][-1] > 0:\n self.add_jump = False\n else:\n self.add_jump = True\n print(\"##### Using jump in Dec #####\")\n # 6. zero padding mode\n if args[\"InverseMode\"][\"mode\"] == \"ZeroPadding\":\n self.zero_padding = args[\"InverseMode\"][\"padding\"]\n tmp_padding = []\n else:\n self.zero_padding = []\n\n # Encoder\n for i in range(len(self.Structure[\"layer\"])-1):\n \n self.network.append(\n nn.Linear(self.Structure[\"layer\"][i], self.Structure[\"layer\"][i+1], bias=False)\n )\n self.name_list.append('EN{}_{}->{}'.format(i+1,\n self.Structure[\"layer\"][i], self.Structure[\"layer\"][i+1]))\n self.enc_weight_index.append(len(self.name_list) - 2) # add weight index\n # 1) set required_grad\n if self.Structure[\"Enc_require_gard\"][i] == 0:\n self.network[-1].weight.requires_grad = False\n \n # 2) add Extra-Head (project) for DR\n if len(self.extraLayer) > 0:\n if self.extraLayer[i] > 0:\n self.extraHead.append(\n nn.Linear(self.Structure[\"layer\"][i], self.extraLayer[i], bias=False)\n )\n self.extra_index.append( {\"index\":len(self.network)-1, \"extra\":len(self.extra_index)} )\n # 3) add weight padding\n if len(self.zero_padding) > 0:\n tmp_padding.append(self.zero_padding[i])\n \n # 4) add relu\n if self.Structure[\"relu\"][i] > 0:\n if self.args['ReluType'][\"type\"] == \"Leaky\":\n self.network.append(nn.LeakyReLU(self.args['ReluType'][\"Enc_alpha\"], inplace=False))\n elif self.args['ReluType'][\"type\"] == \"InvLeaky\":\n self.network.append(\n InvLeakyReLU(self.args['ReluType'][\"Enc_alpha\"] , inplace=False, invertible=False)\n )\n else:\n raise NotImplementedError\n self.name_list.append('ENRELU{}_{}->{}'.format(\n i+1, self.Structure[\"layer\"][i], self.Structure[\"layer\"][i+1]))\n # add relu padding\n if len(self.zero_padding) > 0:\n tmp_padding.append(self.zero_padding[i])\n self.plot_index_list.append(len(self.name_list) - 1)\n \n # Decoder (not sharing params with Encoder)\n for i in range(len(self.Structure[\"layer\"])-1, 0, -1): # 10 -> 1\n if self.Structure[\"inv_Dec\"] == 0:\n \n self.network.append(\n nn.Linear(self.Structure[\"layer\"][i], self.Structure[\"layer\"][i-1], bias=False)\n )\n self.name_list.append('DE{}*_{}->{}'.format(\n i-1, self.Structure[\"layer\"][i], self.Structure[\"layer\"][i-1]))\n self.dec_weight_index.append(len(self.name_list) - 2) # add weight index\n # set required_grad\n if self.Structure[\"Dec_require_gard\"][i-1] == 0:\n self.network[-1].weight.requires_grad = False\n \n # add relu\n if self.Structure[\"relu\"][i-1] > 0:\n if self.args['ReluType'][\"type\"] == \"Leaky\":\n self.network.append(nn.LeakyReLU(self.args['ReluType'][\"Dec_alpha\"]))\n elif self.args['ReluType'][\"type\"] == \"InvLeaky\":\n self.network.append(\n InvLeakyReLU(self.args['ReluType'][\"Enc_alpha\"], inplace=False, invertible=self.inv_leaky) # add inv\n )\n else:\n raise NotImplementedError\n self.name_list.append('DERELU{}*_{}->{}'.format(\n i-1, self.Structure[\"layer\"][i], self.Structure[\"layer\"][i-1]))\n else:\n # add relu\n if self.Structure[\"relu\"][i-1] > 0:\n if self.args['ReluType'][\"type\"] == \"Leaky\":\n self.network.append(nn.LeakyReLU(self.args['ReluType'][\"Dec_alpha\"]))\n elif self.args['ReluType'][\"type\"] == \"InvLeaky\":\n self.network.append(\n InvLeakyReLU(self.args['ReluType'][\"Enc_alpha\"], inplace=False, invertible=self.inv_leaky) # add inv\n )\n else:\n raise NotImplementedError\n self.name_list.append('DERELU{}*_{}->{}'.format(\n i-1, self.Structure[\"layer\"][i], self.Structure[\"layer\"][i-1]))\n # set Linear\n self.network.append(\n nn.Linear(self.Structure[\"layer\"][i], self.Structure[\"layer\"][i-1], bias=False)\n )\n self.name_list.append('DE{}*_{}->{}'.format(\n i-1, self.Structure[\"layer\"][i], self.Structure[\"layer\"][i-1]))\n self.dec_weight_index.append(len(self.name_list) - 2) # add weight index\n # set required_grad\n if self.Structure[\"Dec_require_gard\"][i-1] == 0:\n self.network[-1].weight.requires_grad = False\n # add plot\n self.plot_index_list.append(len(self.name_list)-1)\n\n # padding for Enc and Dec\n if len(self.zero_padding) > 0:\n latent_pad = self.zero_padding[-1]\n self.zero_padding = []\n for t in tmp_padding:\n self.zero_padding.append(t)\n #self.zero_padding.append(latent_pad)\n for i in range(len(tmp_padding)-1, -1, -1):\n self.zero_padding.append(tmp_padding[i])\n \n def SetEpoch(self, epoch):\n self.epoch = epoch\n \n def SetMode(self, mode):\n self.mode = mode\n \n def params_transfer(self):\n \"\"\" transfer Enc's params to Dec, when Dec is forzen \"\"\"\n\n for i in range(self.layer_num):\n enc_i = self.enc_weight_index[i]\n dec_i = self.dec_weight_index[-i-1] if i > 0 else self.dec_weight_index[-1]\n \n row, col = self.network[dec_i].weight.shape\n if row == col:\n #self.network[dec_i].weight = Parameter(self.network[enc_i].weight.t() ) # transpose\n self.network[dec_i].weight = Parameter(torch.inverse(self.network[enc_i].weight) ) # inverse\n else:\n self.network[dec_i].weight = Parameter(torch.pinverse(self.network[enc_i].weight) ) # fake inverse\n if self.Structure[\"Dec_require_gard\"][i] == 0:\n self.network[dec_i].weight.requires_grad = False\n\n def GetGradual(self, step=[0,0,0]):\n \"\"\" gradual change loss weight [ascendant, descendant] \"\"\"\n if step[2] > 0: # 0 -> 1\n gradual = 0\n if self.epoch >= step[0]:\n if self.epoch < step[1]:\n gap = step[1] - step[0]\n gradual = (self.epoch - step[0]) / gap\n else:\n gradual = 1\n else: # 1 -> 0\n gradual = 1\n if self.epoch >= step[0]:\n if self.epoch < step[1]:\n gap = step[1] - step[0]\n gradual -= (self.epoch - step[0]) / gap\n else:\n gradual = 0\n return gradual\n\n def forward(self, input_data):\n \"\"\" baseline, add orth \"\"\"\n input_data = input_data.view(input_data.shape[0], -1)\n input_c = input_data\n # info\n output_info = [input_data, ] # store input and each layer output\n grad_info = [None] # store requires_grad\n extra_info = []\n pad_info = []\n # extra\n tmp_res = None\n extra_i = 0 # extra head\n use_jump = self.mode == 'decoder' or self.args[\"InverseMode\"][\"mode\"] == \"ZeroPadding\"\n use_jump = True\n\n for i, layer in enumerate(self.network):\n # 1. normal\n output_c = layer(input_c) # for add jump\n # save output before padding\n pad_info.append(output_c)\n\n # 2. zero padding, use for CS\n if len(self.zero_padding) > 0:\n if i < len(self.zero_padding):\n pad_bool = False\n if self.zero_padding[i] > 0 and self.zero_padding[i] < output_c.shape[1]:\n if self.mode == 'encoder' and i < self.layer_num * 2 - 1: # padding latent, not decoder\n pad_bool = True\n elif self.mode == 'decoder' and i < self.layer_num * 2 - 2: # not for decoder\n pad_bool = True\n \n if pad_bool:\n pad_num = self.zero_padding[i]\n pad_shape = (output_c.shape[0], output_c.shape[1] - pad_num)\n zeros = torch.zeros(pad_shape).to(self.device)\n # pad output\n output_c[:, self.zero_padding[i]:] = zeros # padding with zeros\n\n # 3. jump (Dec)\n if self.add_jump: # Dec without DR\n try:\n grad_info.append(layer.weight.requires_grad)\n if use_jump:\n if layer.weight.shape == (2, 50):\n tmp_res = input_c\n elif layer.weight.shape == (50, 2):\n output_c = tmp_res # only for 'decoder'\n except:\n grad_info.append(None)\n \n # 4. extra Head (Enc)\n if len(self.extra_index) > 0:\n if extra_i < len(self.extra_index):\n if self.extra_index[extra_i][\"index\"] == i:\n output_e = self.extraHead[ self.extra_index[extra_i][\"extra\"] ](input_c)\n extra_info.append(output_e)\n extra_i += 1\n # normal\n output_info.append(output_c)\n input_c = output_c\n \n # list of each layer output\n return {'output': output_info, 'weight': self.network.parameters(), \"grad\": grad_info, \\\n \"extra\": extra_info, \"padding\": pad_info}\n"
]
| [
[
"torch.max",
"torch.zeros",
"torch.pinverse",
"torch.min",
"torch.nn.ModuleList",
"torch.inverse",
"torch.nn.Linear",
"torch.nn.LeakyReLU"
]
]
|
janiapurv/offset-human-interface | [
"e4aaba3886f846d9a611397e287079f722b9132c"
]
| [
"offset-game/envs/extract_info.py"
]
| [
"import numpy as np\n\n\ndef action_parameters(vehicles, parameters):\n centroid = get_centroid(vehicles)\n parameters['centroid_pos'] = centroid\n return parameters\n\n\ndef get_centroid(vehicles):\n \"\"\"Get the centroid of the vehicles\n \"\"\"\n centroid = []\n for vehicle in vehicles:\n centroid.append(vehicle.current_pos)\n centroid = np.mean(np.asarray(centroid), axis=0)\n return centroid[0:2] # only x and y\n"
]
| [
[
"numpy.asarray"
]
]
|
joymallyac/Fairway | [
"6aa11e4a182e7271059a473cd6857f91d1668e7f"
]
| [
"Measure.py"
]
| [
"import numpy as np\nimport copy,math\nfrom sklearn.metrics import confusion_matrix,classification_report\n\n\ndef get_counts(clf, x_train, y_train, x_test, y_test, test_df, biased_col, metric='aod'):\n \n clf.fit(x_train, y_train)\n y_pred = clf.predict(x_test)\n cnf_matrix = confusion_matrix(y_test, y_pred)\n\n TN, FP, FN, TP = confusion_matrix(y_test,y_pred).ravel()\n\n print(TN, FP, FN, TP)\n\n test_df_copy = copy.deepcopy(test_df)\n test_df_copy['current_pred_' + biased_col] = y_pred\n\n test_df_copy['TP_' + biased_col + \"_1\"] = np.where((test_df_copy['Probability'] == 1) &\n (test_df_copy['current_pred_' + biased_col] == 1) &\n (test_df_copy[biased_col] == 1), 1, 0)\n\n test_df_copy['TN_' + biased_col + \"_1\"] = np.where((test_df_copy['Probability'] == 0) &\n (test_df_copy['current_pred_' + biased_col] == 0) &\n (test_df_copy[biased_col] == 1), 1, 0)\n\n test_df_copy['FN_' + biased_col + \"_1\"] = np.where((test_df_copy['Probability'] == 1) &\n (test_df_copy['current_pred_' + biased_col] == 0) &\n (test_df_copy[biased_col] == 1), 1, 0)\n\n test_df_copy['FP_' + biased_col + \"_1\"] = np.where((test_df_copy['Probability'] == 0) &\n (test_df_copy['current_pred_' + biased_col] == 1) &\n (test_df_copy[biased_col] == 1), 1, 0)\n\n test_df_copy['TP_' + biased_col + \"_0\"] = np.where((test_df_copy['Probability'] == 1) &\n (test_df_copy['current_pred_' + biased_col] == 1) &\n (test_df_copy[biased_col] == 0), 1, 0)\n\n test_df_copy['TN_' + biased_col + \"_0\"] = np.where((test_df_copy['Probability'] == 0) &\n (test_df_copy['current_pred_' + biased_col] == 0) &\n (test_df_copy[biased_col] == 0), 1, 0)\n\n test_df_copy['FN_' + biased_col + \"_0\"] = np.where((test_df_copy['Probability'] == 1) &\n (test_df_copy['current_pred_' + biased_col] == 0) &\n (test_df_copy[biased_col] == 0), 1, 0)\n\n test_df_copy['FP_' + biased_col + \"_0\"] = np.where((test_df_copy['Probability'] == 0) &\n (test_df_copy['current_pred_' + biased_col] == 1) &\n (test_df_copy[biased_col] == 0), 1, 0)\n\n a = test_df_copy['TP_' + biased_col + \"_1\"].sum()\n b = test_df_copy['TN_' + biased_col + \"_1\"].sum()\n c = test_df_copy['FN_' + biased_col + \"_1\"].sum()\n d = test_df_copy['FP_' + biased_col + \"_1\"].sum()\n e = test_df_copy['TP_' + biased_col + \"_0\"].sum()\n f = test_df_copy['TN_' + biased_col + \"_0\"].sum()\n g = test_df_copy['FN_' + biased_col + \"_0\"].sum()\n h = test_df_copy['FP_' + biased_col + \"_0\"].sum()\n\n print(a,b,c,d,e,f,g,h)\n\n if metric=='aod':\n return calculate_average_odds_difference(a, b, c, d, e, f, g, h)\n elif metric=='eod':\n return calculate_equal_opportunity_difference(a, b, c, d, e, f, g, h) \n elif metric=='recall':\n return calculate_recall(TP,FP,FN,TN)\n elif metric=='far':\n return calculate_far(TP,FP,FN,TN)\n elif metric=='precision':\n return calculate_precision(TP,FP,FN,TN)\n elif metric=='accuracy':\n return calculate_accuracy(TP,FP,FN,TN)\n elif metric=='TPR':\n return calculate_TPR_difference(a, b, c, d, e, f, g, h)\n elif metric=='FPR':\n return calculate_FPR_difference(a, b, c, d, e, f, g, h)\n\n\n\ndef calculate_average_odds_difference(TP_male , TN_male, FN_male,FP_male, TP_female , TN_female , FN_female, FP_female):\n # TPR_male = TP_male/(TP_male+FN_male)\n # TPR_female = TP_female/(TP_female+FN_female)\n # FPR_male = FP_male/(FP_male+TN_male)\n # FPR_female = FP_female/(FP_female+TN_female)\n # average_odds_difference = abs(abs(TPR_male - TPR_female) + abs(FPR_male - FPR_female))/2\n FPR_diff = calculate_FPR_difference(TP_male , TN_male, FN_male,FP_male, TP_female , TN_female , FN_female, FP_female)\n TPR_diff = calculate_TPR_difference(TP_male , TN_male, FN_male,FP_male, TP_female , TN_female , FN_female, FP_female)\n average_odds_difference = (FPR_diff + TPR_diff)/2\n #print(\"average_odds_difference\",average_odds_difference)\n return round(average_odds_difference,2)\n\n\ndef calculate_equal_opportunity_difference(TP_male , TN_male, FN_male,FP_male, TP_female , TN_female , FN_female, FP_female):\n # TPR_male = TP_male/(TP_male+FN_male)\n # TPR_female = TP_female/(TP_female+FN_female) \n # equal_opportunity_difference = abs(TPR_male - TPR_female)\n #print(\"equal_opportunity_difference:\",equal_opportunity_difference)\n return calculate_TPR_difference(TP_male , TN_male, FN_male,FP_male, TP_female , TN_female , FN_female, FP_female)\n\ndef calculate_TPR_difference(TP_male , TN_male, FN_male,FP_male, TP_female , TN_female , FN_female, FP_female):\n TPR_male = TP_male/(TP_male+FN_male)\n TPR_female = TP_female/(TP_female+FN_female)\n print(\"TPR_male:\",TPR_male,\"TPR_female:\",TPR_female) \n diff = (TPR_male - TPR_female)\n return round(diff,2)\n\ndef calculate_FPR_difference(TP_male , TN_male, FN_male,FP_male, TP_female , TN_female , FN_female, FP_female):\n FPR_male = FP_male/(FP_male+TN_male)\n FPR_female = FP_female/(FP_female+TN_female)\n print(\"FPR_male:\",FPR_male,\"FPR_female:\",FPR_female) \n diff = (FPR_female - FPR_male) \n return round(diff,2)\n\n\ndef calculate_recall(TP,FP,FN,TN):\n if (TP + FN) is not 0:\n recall = TP / (TP + FN)\n else:\n recall = 0\n return round(recall,2)\n\ndef calculate_far(TP,FP,FN,TN):\n if (FP + TN) is not 0:\n far = FP / (FP + TN)\n else:\n far = 0\n return round(far,2)\n\ndef calculate_precision(TP,FP,FN,TN):\n if (TP + FP) is not 0:\n prec = TP / (TP + FP)\n else:\n prec = 0\n return round(prec,2)\n\ndef calculate_accuracy(TP,FP,FN,TN):\n return round((TP + TN)/(TP + TN + FP + FN),2)\n\n\ndef measure_final_score(test_df, clf, X_train, y_train, X_test, y_test, biased_col, metric):\n df = copy.deepcopy(test_df)\n return get_counts(clf, X_train, y_train, X_test, y_test, df, biased_col, metric=metric)\n\n"
]
| [
[
"numpy.where",
"sklearn.metrics.confusion_matrix"
]
]
|
badarsh2/Virtual-Makeup | [
"065ed6d58c269b3be092ac084485277e125c9c5f"
]
| [
"foundation/foundation.py"
]
| [
"from __future__ import division\nimport cv2\nimport Image, numpy as np,math\nimport scipy as sp\nfrom numpy.linalg import eig, inv\nfrom scipy.interpolate import interp1d, InterpolatedUnivariateSpline\nfrom pylab import *\nfrom skimage import io, color\nfrom scipy import interpolate\n\n\n#Reqd color\nR,G,B = (200.,121.,46.)\nR,G,B = (234.,135.,103.)\n\n#Intensity low = 0.5, med = 0.6, high = 0.7\ninten=0.6\n\nup_left_end = 3\nup_right_end = 5\n\neye_lower_left_end = 5\neye_upper_left_end = 10\neye_lower_right_end = 15\neye_upper_right_end = 20\n\n\ndef fitEllipse(x,y):\n x = x[:,np.newaxis]\n y = y[:,np.newaxis]\n D = np.hstack((x*x, x*y, y*y, x, y, np.ones_like(x)))\n S = np.dot(D.T,D)\n C = np.zeros([6,6])\n C[0,2] = C[2,0] = 2; C[1,1] = -1\n E, V = eig(np.dot(inv(S), C))\n n = np.argmax(np.abs(E))\n a = V[:,n]\n return a\n\ndef ellipse_center(a):\n b,c,d,f,g,a = a[1]/2, a[2], a[3]/2, a[4]/2, a[5], a[0]\n num = b*b-a*c\n x0=(c*d-b*f)/num\n y0=(a*f-b*d)/num\n return np.array([x0,y0])\n\n\ndef ellipse_angle_of_rotation( a ):\n b,c,d,f,g,a = a[1]/2, a[2], a[3]/2, a[4]/2, a[5], a[0]\n return 0.5*np.arctan(2*b/(a-c))\n\n\ndef ellipse_axis_length( a ):\n b,c,d,f,g,a = a[1]/2, a[2], a[3]/2, a[4]/2, a[5], a[0]\n up = 2*(a*f*f+c*d*d+g*b*b-2*b*d*f-a*c*g)\n down1=(b*b-a*c)*( (c-a)*np.sqrt(1+4*b*b/((a-c)*(a-c)))-(c+a))\n down2=(b*b-a*c)*( (a-c)*np.sqrt(1+4*b*b/((a-c)*(a-c)))-(c+a))\n res1=np.sqrt(up/down1)\n res2=np.sqrt(up/down2)\n return np.array([res1, res2])\n\ndef ellipse_angle_of_rotation2( a ):\n b,c,d,f,g,a = a[1]/2, a[2], a[3]/2, a[4]/2, a[5], a[0]\n if b == 0:\n if a < c:\n return 0\n else:\n return np.pi/2\n else:\n if a < c:\n return np.arctan(2*b/(a-c))/2\n else:\n return np.pi/2 + np.arctan(2*b/(a-c))/2\n\ndef getEllipse(x,y):\n arc = 0.8\n R = np.arange(0,arc*np.pi, 0.01)\n a = fitEllipse(x,y)\n center = ellipse_center(a)\n phi = ellipse_angle_of_rotation(a)\n axes = ellipse_axis_length(a)\n return (center[0],center[1]),(axes[0],axes[1]),phi\n\ndef univariate_plot(lx=[],ly=[]):\n unew = np.arange(lx[0], lx[-1]+1, 1)\n f2 = InterpolatedUnivariateSpline(lx, ly)\n return unew,f2(unew)\n\ndef inter_plot(lx=[],ly=[],k1='quadratic'):\n unew = np.arange(lx[0], lx[-1]+1, 1)\n f2 = interp1d(lx, ly, kind=k1)\n return unew,f2(unew)\n\ndef getBoundaryPoints(x , y):\n tck,u = interpolate.splprep([x, y], s=0, per=1)\n unew = np.linspace(u.min(), u.max(), 10000)\n xnew,ynew = interpolate.splev(unew, tck, der=0)\n tup = c_[xnew.astype(int),ynew.astype(int)].tolist()\n coord = list(set(tuple(map(tuple, tup))))\n coord = np.array([list(elem) for elem in coord])\n return coord[:,0],coord[:,1]\n\ndef getInteriorPoints(x , y):\n intx = []\n inty = []\n def ext(a, b, i):\n a, b=round(a), round(b)\n intx.extend(arange(a, b, 1).tolist())\n inty.extend((ones(b-a)*i).tolist())\n x, y = np.array(x), np.array(y)\n xmin, xmax = amin(x), amax(x)\n xrang = np.arange(xmin, xmax+1, 1)\n for i in xrang:\n ylist = y[where(x==i)]\n ext(amin(ylist), amax(ylist), i)\n return intx, inty\n\ndef checkForSkin(IMG10):\n high,widt=IMG10.shape[:2]\n\n B1=np.reshape(np.float32(IMG10[:,:,0]),high*widt)#B\n G1=np.reshape(np.float32(IMG10[:,:,1]),high*widt)#G\n R1=np.reshape(np.float32(IMG10[:,:,2]),high*widt)#Rs\n\n #print high,widt\n h3=np.zeros((high,widt,3),np.uint8)\n\n #cv2.imshow(\"onetime\",h)\n \n\n tem=np.logical_and(np.logical_and(np.logical_and(np.logical_and(R1 > 95, G1 > 40),np.logical_and(B1 > 20, (np.maximum(np.maximum(R1,G1),B1) - np.minimum(np.minimum(R1,G1),B1)) > 15)),R1>B1),np.logical_and(np.absolute(R1-G1) > 15,R1>G1))\n h5=np.array(tem).astype(np.uint8,order='C',casting='unsafe')\n\n h5=np.reshape(h5,(high,widt))\n h3[:,:,0]=h5\n h3[:,:,1]=h5\n h3[:,:,2]=h5\n #cv2.imshow(\"thirdtime\",h3)\n kernel1 = np.ones((3,3),np.uint8)\n closedH3=np.copy(h3)\n for i in range(5):\n closedH3 = cv2.erode(closedH3,kernel1)\n for i in range(5):\n closedH3 = cv2.dilate(closedH3,kernel1)\n #cv2.imshow(\"closedH3\",closedH3)\n # closedH3 = cv2.cvtColor(closedH3, cv2.COLOR_BGR2RGB)\n return closedH3\n\nfileface = np.loadtxt('pointface.txt')\npointsface = np.floor(fileface)\npoint_face_x = np.array((pointsface[:][:,0]))\npoint_face_y = np.array(pointsface[:][:,1])\n\nfile = np.loadtxt('pointlips.txt')\npoints = np.floor(file)\npoint_out_x = np.array((points[:len(points)/2][:,0]))\npoint_out_y = np.array(points[:len(points)/2][:,1])\npoint_in_x = (points[len(points)/2:][:,0])\npoint_in_y = points[len(points)/2:][:,1]\n\nfileeye = np.loadtxt('pointeyes.txt')\npointseye = np.floor(fileeye)\neye_point_down_x = np.array((pointseye[:eye_lower_left_end][:,0]))\neye_point_down_y = np.array(pointseye[:eye_lower_left_end][:,1])\neye_point_up_x = np.array(pointseye[eye_lower_left_end:eye_upper_left_end][:,0])\neye_point_up_y = np.array(pointseye[eye_lower_left_end:eye_upper_left_end][:,1])\neye_point_down_x_right = np.array((pointseye[eye_upper_left_end:eye_lower_right_end][:,0]))\neye_point_down_y_right = np.array(pointseye[eye_upper_left_end:eye_lower_right_end][:,1])\neye_point_up_x_right = np.array((pointseye[eye_lower_right_end:eye_upper_right_end][:,0]))\neye_point_up_y_right = np.array(pointseye[eye_lower_right_end:eye_upper_right_end][:,1])\n\nim = imread('Input.jpg')\nim2 = im.copy()\nheight, width = im.shape[:2]\n\nx_face = []\ny_face = []\nx_aux = []\ny_aux = []\n\n# Face\nlower_face = univariate_plot(point_face_x[:],point_face_y[:])\nx_face.extend(lower_face[0][::-1])\ny_face.extend(lower_face[1][::-1])\n\n# Upper face approximation\n(centerx,centery),(axesx,axesy),angel = getEllipse(point_face_x,point_face_y)\ncenterpt = (int(centerx),int(centery))\naxeslen = (int(axesx),int(axesy*1.2))\n# cv2.ellipse(im,centerpt,axeslen,angel,180,360,(0,255,0),2)\n# upper_ellipse = cv2.ellipse2Poly(centerpt,axeslen,int(angel),180,360,1)\n# upperellipse[1] = cv2.ellipse2Poly(centerpt,axeslen,int(angel),180,360,1)[:,1]\nellippoints = cv2.ellipse2Poly(centerpt,axeslen,int(angel),180,360,1)\nellippoints = np.floor(ellippoints)\nellipseabs = ellippoints[:,0].tolist()\nellipseord = ellippoints[:,1].tolist()\n# upper_ellipse = univariate_plot(ellipseabs, ellipseord)\n# upper_ellipse = inter_plot(ellipseabs, ellipseord, 'linear')\nx_face.extend(ellipseabs)\ny_face.extend(ellipseord)\n\nx_face.append(x_face[0])\ny_face.append(y_face[0])\n\nx_face, y_face = getBoundaryPoints(x_face, y_face)\n# print upper_ellipse[0], upper_ellipse[1]\n\n# imshow(im)\n# # plot(upper_ellipse[0], upper_ellipse[1], 'g-')\n# plot(x_face, y_face, 'go')\n# gca().set_aspect('equal', adjustable='box')\n# imsave('out1.jpg',im)\n# show()\nx, y = getInteriorPoints(x_face, y_face)\n\n#Lips\nl_u_l = inter_plot(point_out_x[:up_left_end],point_out_y[:up_left_end])\nl_u_r = inter_plot(point_out_x[up_left_end-1:up_right_end],point_out_y[up_left_end-1:up_right_end])\nl_l = inter_plot([point_out_x[0]]+point_out_x[up_right_end-1:][::-1].tolist(),[point_out_y[0]]+point_out_y[up_right_end-1:][::-1].tolist(),'cubic')\nlipinteriorx, lipinteriory = getInteriorPoints(l_u_l[0].tolist() + l_u_r[0].tolist() + l_l[0].tolist(),l_u_l[1].tolist() + l_u_r[1].tolist() + l_l[1].tolist())\nx_aux.extend(lipinteriorx)\ny_aux.extend(lipinteriory)\n\n#Eyes\ne_l_l = inter_plot(eye_point_down_x[:],eye_point_down_y[:],'cubic')\ne_u_l = inter_plot(eye_point_up_x[:],eye_point_up_y[:],'cubic')\nlefteyex, lefteyey = getInteriorPoints(e_l_l[0].tolist() + e_u_l[0].tolist(), e_l_l[1].tolist() + e_u_l[1].tolist())\nx_aux.extend(lefteyex)\ny_aux.extend(lefteyey)\n\ne_l_r = inter_plot(eye_point_down_x_right[:],eye_point_down_y_right[:],'cubic')\ne_u_r = inter_plot(eye_point_up_x_right[:],eye_point_up_y_right[:],'cubic')\nrighteyex, righteyey = getInteriorPoints(e_l_r[0].tolist() + e_u_r[0].tolist(), e_l_r[1].tolist() + e_u_r[1].tolist())\nx_aux.extend(righteyex)\ny_aux.extend(righteyey)\n\ntemp = im[x_aux, y_aux]\n\nval = color.rgb2lab((im[x,y]/255.).reshape(len(x),1,3)).reshape(len(x),3)\nvallips = color.rgb2lab((im[x_aux,y_aux]/255.).reshape(len(x_aux),1,3)).reshape(len(x_aux),3)\n# print sum(val[:,0])\nL = (sum(val[:,0])-sum(vallips[:,0]))/(len(val[:,0])-len(vallips[:,0]))\nA = (sum(val[:,1])-sum(vallips[:,1]))/(len(val[:,1])-len(vallips[:,1]))\nbB = (sum(val[:,2])-sum(vallips[:,2]))/(len(val[:,2])-len(vallips[:,2]))\n\nL1,A1,B1 = color.rgb2lab(np.array((R/255.,G/255.,B/255.)).reshape(1,1,3)).reshape(3,)\nval[:,0] += (L1-L)*inten\nval[:,1] += (A1-A)*inten\nval[:,2] += (B1-bB)*inten\n\nim[x,y] = color.lab2rgb(val.reshape(len(x),1,3)).reshape(len(x),3)*255\n\nscale = min(width/750, height/1000)\n# Blur Filter\nfilter = np.zeros((height,width))\ncv2.fillConvexPoly(filter,np.array(c_[y, x],dtype = 'int32'),1)\n# cv2.fillConvexPoly(filter,np.array(c_[yright, xright],dtype = 'int32'),1)\nplt.imshow(filter)\nsigma = (int(int(201 * scale)/2)*2) + 1\nfilter = cv2.GaussianBlur(filter,(sigma,sigma),0)\n\n# Erosion to reduce blur size\nkernel_size = int(12 * scale)\nkernel = np.ones((kernel_size,kernel_size),np.uint8)\nfilter = cv2.erode(filter,kernel,iterations = 4)\n\nalpha=np.zeros([height,width,3],dtype='float64')\nalpha[:,:,0]=filter\nalpha[:,:,1]=filter\nalpha[:,:,2]=filter\n\nimmask = cv2.imread('Input.jpg')\nskinalpha = checkForSkin(immask)\nimshow(skinalpha*255)\ngca().set_aspect('equal', adjustable='box')\nshow()\n\n# xspl, yspl = getBoundaryPoints(ellipseabs , ellipseord)\n# xspl, yspl = getInteriorPoints(xspl, yspl)\nim = (alpha*im+(1-alpha)*im2).astype('uint8')\nim = ((skinalpha)*im+(1-(skinalpha))*im2).astype('uint8')\nimshow(im)\n# plot(upper_ellipse[0], upper_ellipse[1], 'g-')\n# plot(lower_face[0], lower_face[1], 'g-')\ngca().set_aspect('equal', adjustable='box')\nimsave('out1.jpg',im)\nshow()"
]
| [
[
"numpy.dot",
"numpy.minimum",
"numpy.sqrt",
"numpy.arctan",
"numpy.ones_like",
"scipy.interpolate.InterpolatedUnivariateSpline",
"numpy.reshape",
"numpy.arange",
"numpy.copy",
"scipy.interpolate.interp1d",
"numpy.float32",
"numpy.zeros",
"numpy.linalg.inv",
"scipy.interpolate.splprep",
"scipy.interpolate.splev",
"numpy.floor",
"numpy.array",
"numpy.logical_and",
"numpy.absolute",
"numpy.maximum",
"numpy.abs",
"numpy.ones",
"numpy.loadtxt"
]
]
|
XuJiacong/test2 | [
"79b21b3eea1a402d4f401b771f78242411855524"
]
| [
"lib/models/hrunetv666.py"
]
| [
"import math\nimport torch\nimport numpy as np \nimport torch.nn as nn\nimport torch.nn.functional as F\nfrom torch.nn import init\nfrom collections import OrderedDict\nimport time\n\nBatchNorm2d = nn.BatchNorm2d\nbn_mom = 0.1\nalgc = False\n\n\ndef conv3x3(in_planes, out_planes, stride=1):\n \"\"\"3x3 convolution with padding\"\"\"\n return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride,\n padding=1, bias=False)\n\n\nclass BasicBlock(nn.Module):\n expansion = 1\n\n def __init__(self, inplanes, planes, stride=1, downsample=None, no_relu=False):\n super(BasicBlock, self).__init__()\n self.conv1 = conv3x3(inplanes, planes, stride)\n self.bn1 = BatchNorm2d(planes, momentum=bn_mom)\n self.relu = nn.ReLU(inplace=True)\n self.conv2 = conv3x3(planes, planes)\n self.bn2 = BatchNorm2d(planes, momentum=bn_mom)\n self.downsample = downsample\n self.stride = stride\n self.no_relu = no_relu\n\n def forward(self, x):\n residual = x\n\n out = self.conv1(x)\n out = self.bn1(out)\n out = self.relu(out)\n\n out = self.conv2(out)\n out = self.bn2(out)\n\n if self.downsample is not None:\n residual = self.downsample(x)\n\n out += residual\n\n if self.no_relu:\n return out\n else:\n return self.relu(out)\n\nclass Bottleneck(nn.Module):\n expansion = 2\n\n def __init__(self, inplanes, planes, stride=1, downsample=None, no_relu=True):\n super(Bottleneck, self).__init__()\n self.conv1 = nn.Conv2d(inplanes, planes, kernel_size=1, bias=False)\n self.bn1 = BatchNorm2d(planes, momentum=bn_mom)\n self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=stride,\n padding=1, bias=False)\n self.bn2 = BatchNorm2d(planes, momentum=bn_mom)\n self.conv3 = nn.Conv2d(planes, planes * self.expansion, kernel_size=1,\n bias=False)\n self.bn3 = BatchNorm2d(planes * self.expansion, momentum=bn_mom)\n self.relu = nn.ReLU(inplace=True)\n self.downsample = downsample\n self.stride = stride\n self.no_relu = no_relu\n\n def forward(self, x):\n residual = x\n\n out = self.conv1(x)\n out = self.bn1(out)\n out = self.relu(out)\n\n out = self.conv2(out)\n out = self.bn2(out)\n out = self.relu(out)\n\n out = self.conv3(out)\n out = self.bn3(out)\n\n if self.downsample is not None:\n residual = self.downsample(x)\n\n out += residual\n if self.no_relu:\n return out\n else:\n return self.relu(out)\n\n# Deep Pyramid Feature Fusion Module\nclass DPFFM(nn.Module):\n def __init__(self, inplanes=512, branch_planes=128, mdplanes=256, outplanes=128):\n super(DPFFM, self).__init__()\n \n self.scale0 = nn.Sequential(\n BatchNorm2d(inplanes, momentum=bn_mom),\n nn.ReLU(inplace=True),\n nn.Conv2d(inplanes, branch_planes, kernel_size=1, bias=False),\n BatchNorm2d(branch_planes, momentum=bn_mom),\n nn.ReLU(inplace=True)\n )\n \n self.scale1 = nn.Sequential(\n BatchNorm2d(inplanes, momentum=bn_mom),\n nn.ReLU(inplace=True),\n nn.Conv2d(inplanes, branch_planes, kernel_size=1, bias=False),\n BatchNorm2d(branch_planes, momentum=bn_mom),\n nn.ReLU(inplace=True)\n )\n self.pooling1 = nn.AvgPool2d(kernel_size=5, stride=2, padding=2)\n \n self.scale2 = nn.Sequential(\n BatchNorm2d(inplanes, momentum=bn_mom),\n nn.ReLU(inplace=True),\n nn.Conv2d(inplanes, branch_planes, kernel_size=1, bias=False),\n BatchNorm2d(branch_planes, momentum=bn_mom),\n nn.ReLU(inplace=True)\n )\n self.pooling2 = nn.AvgPool2d(kernel_size=9, stride=4, padding=4)\n \n self.scale3 = nn.Sequential(\n BatchNorm2d(inplanes, momentum=bn_mom),\n nn.ReLU(inplace=True),\n nn.Conv2d(inplanes, branch_planes, kernel_size=1, bias=False),\n BatchNorm2d(branch_planes, momentum=bn_mom),\n nn.ReLU(inplace=True)\n )\n self.pooling3 = nn.AvgPool2d(kernel_size=17, stride=8, padding=8)\n \n self.process0 = nn.Sequential(\n nn.Conv2d(branch_planes, branch_planes//2, kernel_size=3, padding=1, bias=False),\n BatchNorm2d(branch_planes//2, momentum=bn_mom),\n nn.ReLU(inplace=True)\n )\n self.process1 = nn.Sequential(\n nn.Conv2d(branch_planes, branch_planes//2, kernel_size=3, padding=1, bias=False),\n BatchNorm2d(branch_planes//2, momentum=bn_mom),\n nn.ReLU(inplace=True)\n )\n self.process2 = nn.Sequential(\n nn.Conv2d(branch_planes, branch_planes//2, kernel_size=3, padding=1, bias=False),\n BatchNorm2d(branch_planes//2, momentum=bn_mom),\n nn.ReLU(inplace=True)\n )\n self.process3 = nn.Sequential(\n nn.Conv2d(branch_planes, branch_planes//2, kernel_size=3, padding=1, bias=False),\n BatchNorm2d(branch_planes//2, momentum=bn_mom),\n nn.ReLU(inplace=True)\n )\n \n self.ffm1 = nn.Sequential(\n nn.Conv2d(branch_planes * 2, mdplanes, kernel_size=1, bias=False),\n BatchNorm2d(mdplanes, momentum=bn_mom),\n nn.ReLU(inplace=True)\n )\n self.ffm2 = nn.Sequential(\n nn.AdaptiveAvgPool2d(1), \n nn.Conv2d(mdplanes, mdplanes, kernel_size=1, bias=False),\n BatchNorm2d(mdplanes, momentum=bn_mom),\n nn.ReLU(inplace=True),\n nn.Conv2d(mdplanes, mdplanes, kernel_size=1, bias=False),\n BatchNorm2d(mdplanes, momentum=bn_mom),\n nn.Sigmoid(),\n #nn.Softmax(dim=1)\n )\n \n self.compression = nn.Sequential(\n nn.Conv2d(mdplanes, outplanes, kernel_size=1, bias=False),\n BatchNorm2d(outplanes, momentum=bn_mom)\n )\n\n self.shortcut = nn.Sequential(\n BatchNorm2d(inplanes, momentum=bn_mom),\n nn.ReLU(inplace=True),\n nn.Conv2d(inplanes, outplanes, kernel_size=1, bias=False),\n BatchNorm2d(outplanes, momentum=bn_mom)\n )\n\n def forward(self, x):\n\n #x = self.downsample(x)\n width = x.shape[-1]\n height = x.shape[-2] \n x_list = []\n\n x_list.append(self.process0(self.scale0(x)))\n x1 = self.scale1(x)\n x_list.append(self.process1((F.interpolate(self.pooling1(x1),\n size=[height, width],\n mode='bilinear', align_corners=algc)+x1)))\n x2 = self.scale2(x)\n x_list.append(self.process2((F.interpolate(self.pooling2(x2),\n size=[height, width],\n mode='bilinear', align_corners=algc)+x2)))\n x3 = self.scale3(x)\n x_list.append(self.process3((F.interpolate(self.pooling3(x3),\n size=[height, width],\n mode='bilinear', align_corners=algc)+x3)))\n \n out = self.ffm1(torch.cat(x_list, 1))\n out = out + out * self.ffm2(out)\n out = self.compression(out) + self.shortcut(x)\n return out \n\n\nclass segmenthead(nn.Module):\n\n def __init__(self, inplanes, interplanes, outplanes, scale_factor=None):\n super(segmenthead, self).__init__()\n self.bn1 = BatchNorm2d(inplanes, momentum=bn_mom)\n self.conv1 = nn.Conv2d(inplanes, interplanes, kernel_size=3, padding=1, bias=False)\n self.bn2 = BatchNorm2d(interplanes, momentum=bn_mom)\n self.relu = nn.ReLU(inplace=True)\n self.conv2 = nn.Conv2d(interplanes, outplanes, kernel_size=1, padding=0, bias=True)\n self.scale_factor = scale_factor\n\n def forward(self, x):\n \n x = self.conv1(self.relu(self.bn1(x)))\n out = self.conv2(self.relu(self.bn2(x)))\n\n if self.scale_factor is not None:\n height = x.shape[-2] * self.scale_factor\n width = x.shape[-1] * self.scale_factor\n out = F.interpolate(out,\n size=[height, width],\n mode='bilinear', align_corners=algc)\n\n return out\n\nclass Hrunetv6(nn.Module):\n\n def __init__(self, block, layers, num_classes=19, planes=32, head_planes=64, augment=True):\n super(Hrunetv6, self).__init__()\n\n self.augment = augment\n\n self.conv1 = nn.Sequential(\n nn.Conv2d(3,planes,kernel_size=3, stride=2, padding=1),\n BatchNorm2d(planes, momentum=bn_mom),\n nn.ReLU(inplace=True),\n nn.Conv2d(planes,planes,kernel_size=3, stride=2, padding=1),\n BatchNorm2d(planes, momentum=bn_mom),\n nn.ReLU(inplace=True),\n )\n\n self.relu = nn.ReLU(inplace=False)\n self.ReLU = nn.ReLU(inplace=True)\n self.layer1 = self._make_layer(block, planes, planes, layers[0])\n self.layer2 = self._make_layer(block, planes, planes * 2, layers[1], stride=2)\n self.layer3 = self._make_layer(block, planes * 2, planes * 4, layers[2], stride=2)\n self.layer4 = self._make_layer(block, planes * 4, planes * 8, layers[3], stride=2)\n\n self.compression3 = nn.Sequential(\n nn.Conv2d(planes * 4, planes * 2, kernel_size=1, bias=False),\n BatchNorm2d(planes * 2, momentum=bn_mom),\n )\n\n self.compression4 = nn.Sequential(\n nn.Conv2d(planes * 8, planes * 2, kernel_size=1, bias=False),\n BatchNorm2d(planes * 2, momentum=bn_mom),\n )\n\n self.down3 = nn.Sequential(\n nn.Conv2d(planes * 2, planes * 4, kernel_size=3, stride=2, padding=1, bias=False),\n BatchNorm2d(planes * 4, momentum=bn_mom),\n )\n\n self.down4 = nn.Sequential(\n nn.Conv2d(planes * 2, planes * 4, kernel_size=3, stride=2, padding=1, bias=False),\n BatchNorm2d(planes * 4, momentum=bn_mom),\n nn.ReLU(inplace=True),\n nn.Conv2d(planes * 4, planes * 8, kernel_size=3, stride=2, padding=1, bias=False),\n BatchNorm2d(planes * 8, momentum=bn_mom)\n )\n \n self.mlayer_8 = self._make_single_layer(block, planes * 2, planes * 2, no_relu=False)\n \n \n #self._make_single_layer(block, planes * 2, planes * 2, no_relu=False)\n \n self.mlayer_16 = nn.Sequential(\n nn.Conv2d(planes * 4, planes * 4, kernel_size=1, padding=0, bias=False),\n BatchNorm2d(planes * 4, momentum=bn_mom)\n )\n self.mlayer_32 = nn.Sequential(\n nn.Conv2d(planes * 8, planes * 4, kernel_size=1, bias=False),\n BatchNorm2d(planes * 4, momentum=bn_mom)\n )\n\n self.layer3_ = self._make_single_layer(block, planes * 2, planes * 2)\n\n self.layer4_ = self._make_single_layer(block, planes * 2, planes * 2)\n\n self.layer5 = self._make_layer(Bottleneck, planes * 8, planes * 8, 1, stride=2)\n\n self.spp = DPFFM(inplanes=planes*16, branch_planes=planes*4, mdplanes=planes*8, outplanes=planes*4)\n \n self.move_8_1 = self._make_single_layer(block, planes * 2, planes * 2, no_relu=False)\n self.move_8_2 = self._make_layer(Bottleneck, planes * 2, planes * 2, 1)\n \n self.compression_32 = nn.Sequential(\n nn.Conv2d(planes * 4, planes * 2, kernel_size=1, bias=False),\n BatchNorm2d(planes * 2, momentum=bn_mom),\n nn.Sigmoid()\n )\n self.compression_16 = nn.Sequential(\n nn.Conv2d(planes * 4, planes * 2, kernel_size=1, bias=False),\n BatchNorm2d(planes * 2, momentum=bn_mom),\n nn.Sigmoid()\n )\n\n self.down_8_1 = nn.Sequential(\n nn.Conv2d(planes * 2, planes * 4, kernel_size=3, stride=2, padding=1, bias=False),\n BatchNorm2d(planes * 4, momentum=bn_mom),\n nn.AvgPool2d(kernel_size=3, stride=2, padding=1)\n )\n self.down_8_2 = nn.Sequential(\n nn.Conv2d(planes * 2, planes * 4, kernel_size=3, stride=2, padding=1, bias=False),\n BatchNorm2d(planes * 4, momentum=bn_mom)\n )\n \n self.up_32 = nn.Sequential(\n nn.ReLU(inplace=True),\n nn.Conv2d(planes * 4, planes * 4, kernel_size=3, padding=1, bias=False),\n BatchNorm2d(planes * 4, momentum=bn_mom)\n )\n self.up_16 = nn.Sequential(\n nn.ReLU(inplace=True),\n nn.Conv2d(planes * 4, planes * 4, kernel_size=3, padding=1, bias=False),\n BatchNorm2d(planes * 4, momentum=bn_mom)\n )\n \n if self.augment:\n #self.seghead_extra1 = segmenthead(planes * 2, head_planes, num_classes)\n self.seghead_extra2 = segmenthead(planes * 2, head_planes, num_classes) \n\n self.final_layer = segmenthead(planes * 4, head_planes, num_classes)\n\n\n for m in self.modules():\n if isinstance(m, nn.Conv2d):\n nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')\n elif isinstance(m, BatchNorm2d):\n nn.init.constant_(m.weight, 1)\n nn.init.constant_(m.bias, 0)\n\n\n def _make_layer(self, block, inplanes, planes, blocks, stride=1):\n downsample = None\n if stride != 1 or inplanes != planes * block.expansion:\n downsample = nn.Sequential(\n nn.Conv2d(inplanes, planes * block.expansion,\n kernel_size=1, stride=stride, bias=False),\n nn.BatchNorm2d(planes * block.expansion, momentum=bn_mom),\n )\n\n layers = []\n layers.append(block(inplanes, planes, stride, downsample))\n inplanes = planes * block.expansion\n for i in range(1, blocks):\n if i == (blocks-1):\n layers.append(block(inplanes, planes, stride=1, no_relu=True))\n else:\n layers.append(block(inplanes, planes, stride=1, no_relu=False))\n\n return nn.Sequential(*layers)\n \n def _make_single_layer(self, block, inplanes, planes, stride=1, no_relu=True):\n downsample = None\n if stride != 1 or inplanes != planes * block.expansion:\n downsample = nn.Sequential(\n nn.Conv2d(inplanes, planes * block.expansion,\n kernel_size=1, stride=stride, bias=False),\n nn.BatchNorm2d(planes * block.expansion, momentum=bn_mom),\n )\n\n layer = block(inplanes, planes, stride, downsample, no_relu=no_relu)\n \n return layer\n\n def forward(self, x):\n\n width_output = x.shape[-1] // 8\n height_output = x.shape[-2] // 8\n layers = []\n\n x = self.conv1(x)\n x = self.layer1(x)\n x = self.ReLU(self.layer2(self.ReLU(x)))\n x_16 = self.layer3(x)\n layers.append(x_16)\n x_8 = self.layer3_(x)\n\n x_16 = self.ReLU(x_16 + self.down3(self.relu(x_8)))\n x_8 = x_8 + F.interpolate(\n self.compression3(self.relu(layers[0])),\n size=[height_output, width_output],\n mode='bilinear', align_corners=algc)\n if self.augment:\n temp1 = x_8\n \n x_8 = self.layer4_(self.relu(x_8))\n x_32 = self.layer4(x_16)\n layers.append(x_32)\n x_32 = self.ReLU(x_32 + self.down4(self.relu(x_8)))\n x_8 = x_8 + F.interpolate(\n self.compression4(self.relu(layers[1])),\n size=[height_output, width_output],\n mode='bilinear', align_corners=algc)\n if self.augment:\n temp2 = x_8\n \n x_8 = self.mlayer_8(self.relu(x_8))\n x_16 = self.mlayer_16(x_16)\n \n '''==========================='''\n x_64 = self.layer5(x_32)\n x_32 = self.mlayer_32(x_32)\n x_32 = x_32 + F.interpolate(\n self.spp(x_64),\n size=[height_output // 4, width_output // 4],\n mode='bilinear', align_corners=algc)\n '''==========================='''\n \n layers.append(x_8)\n x_8 = x_8 + x_8 * F.interpolate(self.compression_32(self.relu(x_32)),\n size=[height_output, width_output],\n mode='bilinear', align_corners=algc)\n\n x_16 = x_16 + F.interpolate(self.up_32(x_32 + self.down_8_1(layers[2])),\n size=[height_output // 2, width_output // 2],\n mode='bilinear', align_corners=algc)\n \n x_8 = self.move_8_1(x_8)\n layers.append(x_8)\n x_8 = x_8 + x_8 * F.interpolate(self.compression_16(self.relu(x_16)),\n size=[height_output, width_output],\n mode='bilinear', align_corners=algc)\n \n x_8 = self.move_8_2(x_8)\n x_8 = x_8 + F.interpolate(self.up_16(x_16 + self.down_8_2(layers[3])),\n size=[height_output, width_output],\n mode='bilinear', align_corners=algc)\n \n x = self.final_layer(x_8)\n\n if self.augment: \n #x_extra1 = self.seghead_extra1(temp1)\n x_extra2 = self.seghead_extra2(temp2)\n return [x_extra2, x]\n else:\n return x \n\ndef Hrunetv6_imagenet(cfg, pretrained=False):\n model = Hrunetv6(BasicBlock, [2, 2, 2, 2], num_classes=19, planes=32, spp_planes=128, head_planes=64, augment=True)\n if pretrained:\n pretrained_state = torch.load(cfg.MODEL.PRETRAINED, map_location='cpu')['state_dict']\n model_dict = model.state_dict()\n #pretrained_state = {k[7:]: v for k, v in pretrained_state.items() if (k[7:] in model_dict and v.shape == model_dict[k[7:]].shape)}\n pretrained_state = {k: v for k, v in pretrained_state.items() if (k in model_dict and v.shape == model_dict[k].shape)}\n model_dict.update(pretrained_state)\n print(\"Loaded the pretained model with {} components!\".format(len(pretrained_state)))\n model.load_state_dict(model_dict, strict = False)\n return model\n\ndef get_seg_model(cfg, **kwargs):\n model = Hrunetv6_imagenet(cfg, pretrained=True)\n return model\n\n\"\"\"\nx = torch.rand(2, 3, 1024, 1024).cuda()\nmodel = Hrunetv6(BasicBlock, [2, 2, 2, 2], num_classes=19, planes=32, head_planes=64, augment=True)\npretrained_state = torch.load(\"D:/ImageNet/imagenet_test/checkpoints/imagenet/hrunetv6_city/hrunetv6.pth.tar\", map_location='cpu')['state_dict']\nmodel_dict = model.state_dict()\n#pretrained_state = {k[7:]: v for k, v in pretrained_state.items() if (k[7:] in model_dict and v.shape == model_dict[k[7:]].shape)}\npretrained_state = {k: v for k, v in pretrained_state.items() if (k in model_dict and v.shape == model_dict[k].shape)}\nmodel_dict.update(pretrained_state)\nprint(\"Loaded the pretained model with {} components!\".format(len(pretrained_state)))\nmodel.load_state_dict(model_dict, strict = False)\nmodel.cuda()\ny = model(x)\n\n\"\"\"\n\"\"\"\nx = torch.rand(4, 3, 1024, 2048).cuda()\nmodel = Hrunetv6(BasicBlock, [2, 2, 2, 2], num_classes=19, planes=32, spp_planes=128, head_planes=64, augment=False).to('cuda:0')\nnum_params = sum(p.numel() for p in model.parameters() if p.requires_grad)\nmodel.eval()\na = time.time()\nfor i in range(100):\n y = model(x)\nb = time.time()\nprint(400/(b-a))\n\"\"\"\n\n\n\n\n\n"
]
| [
[
"torch.nn.Sequential",
"torch.cat",
"torch.load",
"torch.nn.init.constant_",
"torch.nn.Conv2d",
"torch.nn.Sigmoid",
"torch.nn.AvgPool2d",
"torch.nn.AdaptiveAvgPool2d",
"torch.nn.functional.interpolate",
"torch.nn.BatchNorm2d",
"torch.nn.ReLU",
"torch.nn.init.kaiming_normal_"
]
]
|
JohnnyPeng18/coach | [
"1ee9e10747c535b387a00c946efa220efd114d47"
]
| [
"rl_coach/agents/ddpg_agent.py"
]
| [
"#\n# Copyright (c) 2017 Intel Corporation \n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\n\nimport copy\nfrom typing import Union\nfrom collections import OrderedDict\n\nimport numpy as np\n\nfrom rl_coach.agents.actor_critic_agent import ActorCriticAgent\nfrom rl_coach.agents.agent import Agent\nfrom rl_coach.architectures.embedder_parameters import InputEmbedderParameters\nfrom rl_coach.architectures.head_parameters import DDPGActorHeadParameters, DDPGVHeadParameters\nfrom rl_coach.architectures.middleware_parameters import FCMiddlewareParameters\nfrom rl_coach.base_parameters import NetworkParameters, AlgorithmParameters, \\\n AgentParameters, EmbedderScheme\nfrom rl_coach.core_types import ActionInfo, EnvironmentSteps\nfrom rl_coach.exploration_policies.ou_process import OUProcessParameters\nfrom rl_coach.memories.episodic.episodic_experience_replay import EpisodicExperienceReplayParameters\nfrom rl_coach.spaces import BoxActionSpace, GoalsSpace\n\n\nclass DDPGCriticNetworkParameters(NetworkParameters):\n def __init__(self, use_batchnorm=False):\n super().__init__()\n self.input_embedders_parameters = {'observation': InputEmbedderParameters(batchnorm=use_batchnorm),\n 'action': InputEmbedderParameters(scheme=EmbedderScheme.Shallow)}\n self.middleware_parameters = FCMiddlewareParameters()\n self.heads_parameters = [DDPGVHeadParameters()]\n self.optimizer_type = 'Adam'\n self.batch_size = 64\n self.async_training = False\n self.learning_rate = 0.001\n self.adam_optimizer_beta2 = 0.999\n self.optimizer_epsilon = 1e-8\n self.create_target_network = True\n self.shared_optimizer = True\n self.scale_down_gradients_by_number_of_workers_for_sync_training = False\n # self.l2_regularization = 1e-2\n\n\nclass DDPGActorNetworkParameters(NetworkParameters):\n def __init__(self, use_batchnorm=False):\n super().__init__()\n self.input_embedders_parameters = {'observation': InputEmbedderParameters(batchnorm=use_batchnorm)}\n self.middleware_parameters = FCMiddlewareParameters(batchnorm=use_batchnorm)\n self.heads_parameters = [DDPGActorHeadParameters(batchnorm=use_batchnorm)]\n self.optimizer_type = 'Adam'\n self.batch_size = 64\n self.adam_optimizer_beta2 = 0.999\n self.optimizer_epsilon = 1e-8\n self.async_training = False\n self.learning_rate = 0.0001\n self.create_target_network = True\n self.shared_optimizer = True\n self.scale_down_gradients_by_number_of_workers_for_sync_training = False\n\n\nclass DDPGAlgorithmParameters(AlgorithmParameters):\n \"\"\"\n :param num_steps_between_copying_online_weights_to_target: (StepMethod)\n The number of steps between copying the online network weights to the target network weights.\n\n :param rate_for_copying_weights_to_target: (float)\n When copying the online network weights to the target network weights, a soft update will be used, which\n weight the new online network weights by rate_for_copying_weights_to_target\n\n :param num_consecutive_playing_steps: (StepMethod)\n The number of consecutive steps to act between every two training iterations\n\n :param use_target_network_for_evaluation: (bool)\n If set to True, the target network will be used for predicting the actions when choosing actions to act.\n Since the target network weights change more slowly, the predicted actions will be more consistent.\n\n :param action_penalty: (float)\n The amount by which to penalize the network on high action feature (pre-activation) values.\n This can prevent the actions features from saturating the TanH activation function, and therefore prevent the\n gradients from becoming very low.\n\n :param clip_critic_targets: (Tuple[float, float] or None)\n The range to clip the critic target to in order to prevent overestimation of the action values.\n\n :param use_non_zero_discount_for_terminal_states: (bool)\n If set to True, the discount factor will be used for terminal states to bootstrap the next predicted state\n values. If set to False, the terminal states reward will be taken as the target return for the network.\n \"\"\"\n def __init__(self):\n super().__init__()\n self.num_steps_between_copying_online_weights_to_target = EnvironmentSteps(1)\n self.rate_for_copying_weights_to_target = 0.001\n self.num_consecutive_playing_steps = EnvironmentSteps(1)\n self.use_target_network_for_evaluation = False\n self.action_penalty = 0\n self.clip_critic_targets = None # expected to be a tuple of the form (min_clip_value, max_clip_value) or None\n self.use_non_zero_discount_for_terminal_states = False\n\n\nclass DDPGAgentParameters(AgentParameters):\n def __init__(self, use_batchnorm=False):\n super().__init__(algorithm=DDPGAlgorithmParameters(),\n exploration=OUProcessParameters(),\n memory=EpisodicExperienceReplayParameters(),\n networks=OrderedDict([(\"actor\", DDPGActorNetworkParameters(use_batchnorm=use_batchnorm)),\n (\"critic\", DDPGCriticNetworkParameters(use_batchnorm=use_batchnorm))]))\n\n @property\n def path(self):\n return 'rl_coach.agents.ddpg_agent:DDPGAgent'\n\n\n# Deep Deterministic Policy Gradients Network - https://arxiv.org/pdf/1509.02971.pdf\nclass DDPGAgent(ActorCriticAgent):\n def __init__(self, agent_parameters, parent: Union['LevelManager', 'CompositeAgent']=None):\n super().__init__(agent_parameters, parent)\n\n self.q_values = self.register_signal(\"Q\")\n self.TD_targets_signal = self.register_signal(\"TD targets\")\n self.action_signal = self.register_signal(\"actions\")\n\n @property\n def is_on_policy(self) -> bool:\n return False\n\n def learn_from_batch(self, batch):\n actor = self.networks['actor']\n critic = self.networks['critic']\n\n actor_keys = self.ap.network_wrappers['actor'].input_embedders_parameters.keys()\n critic_keys = self.ap.network_wrappers['critic'].input_embedders_parameters.keys()\n\n # TD error = r + discount*max(q_st_plus_1) - q_st\n next_actions, actions_mean = actor.parallel_prediction([\n (actor.target_network, batch.next_states(actor_keys)),\n (actor.online_network, batch.states(actor_keys))\n ])\n\n critic_inputs = copy.copy(batch.next_states(critic_keys))\n critic_inputs['action'] = next_actions\n q_st_plus_1 = critic.target_network.predict(critic_inputs)[0]\n\n # calculate the bootstrapped TD targets while discounting terminal states according to\n # use_non_zero_discount_for_terminal_states\n if self.ap.algorithm.use_non_zero_discount_for_terminal_states:\n TD_targets = batch.rewards(expand_dims=True) + self.ap.algorithm.discount * q_st_plus_1\n else:\n TD_targets = batch.rewards(expand_dims=True) + \\\n (1.0 - batch.game_overs(expand_dims=True)) * self.ap.algorithm.discount * q_st_plus_1\n\n # clip the TD targets to prevent overestimation errors\n if self.ap.algorithm.clip_critic_targets:\n TD_targets = np.clip(TD_targets, *self.ap.algorithm.clip_critic_targets)\n\n self.TD_targets_signal.add_sample(TD_targets)\n\n # get the gradients of the critic output with respect to the action\n critic_inputs = copy.copy(batch.states(critic_keys))\n critic_inputs['action'] = actions_mean\n action_gradients = critic.online_network.predict(critic_inputs,\n outputs=critic.online_network.gradients_wrt_inputs[1]['action'])\n\n # train the critic\n critic_inputs = copy.copy(batch.states(critic_keys))\n critic_inputs['action'] = batch.actions(len(batch.actions().shape) == 1)\n\n # also need the inputs for when applying gradients so batchnorm's update of running mean and stddev will work\n result = critic.train_and_sync_networks(critic_inputs, TD_targets, use_inputs_for_apply_gradients=True)\n total_loss, losses, unclipped_grads = result[:3]\n\n # apply the gradients from the critic to the actor\n initial_feed_dict = {actor.online_network.gradients_weights_ph[0]: -action_gradients}\n gradients = actor.online_network.predict(batch.states(actor_keys),\n outputs=actor.online_network.weighted_gradients[0],\n initial_feed_dict=initial_feed_dict)\n\n # also need the inputs for when applying gradients so batchnorm's update of running mean and stddev will work\n if actor.has_global:\n actor.apply_gradients_to_global_network(gradients, additional_inputs=copy.copy(batch.states(critic_keys)))\n actor.update_online_network()\n else:\n actor.apply_gradients_to_online_network(gradients, additional_inputs=copy.copy(batch.states(critic_keys)))\n\n return total_loss, losses, unclipped_grads\n\n def train(self):\n return Agent.train(self)\n\n def choose_action(self, curr_state):\n if not (isinstance(self.spaces.action, BoxActionSpace) or isinstance(self.spaces.action, GoalsSpace)):\n raise ValueError(\"DDPG works only for continuous control problems\")\n # convert to batch so we can run it through the network\n tf_input_state = self.prepare_batch_for_inference(curr_state, 'actor')\n if self.ap.algorithm.use_target_network_for_evaluation:\n actor_network = self.networks['actor'].target_network\n else:\n actor_network = self.networks['actor'].online_network\n\n action_values = actor_network.predict(tf_input_state).squeeze()\n\n action = self.exploration_policy.get_action(action_values)\n\n self.action_signal.add_sample(action)\n\n # get q value\n tf_input_state = self.prepare_batch_for_inference(curr_state, 'critic')\n action_batch = np.expand_dims(action, 0)\n if type(action) != np.ndarray:\n action_batch = np.array([[action]])\n tf_input_state['action'] = action_batch\n q_value = self.networks['critic'].online_network.predict(tf_input_state)[0]\n self.q_values.add_sample(q_value)\n\n action_info = ActionInfo(action=action,\n action_value=q_value)\n\n return action_info"
]
| [
[
"numpy.array",
"numpy.expand_dims",
"numpy.clip"
]
]
|
mdobson/detroit-property-data-lookup | [
"c52d7126b43ccd0f7df87731cb12e211398691f4"
]
| [
"zillow/zillow_functions.py"
]
| [
"# -*- coding: utf-8 -*-\n# Zillow scraper functions, these are sourced at the top of zillow_runfile.py\n\nimport re as re\nimport numpy as np\nimport time\n#import zipcode\nfrom bs4 import BeautifulSoup\nfrom selenium import webdriver\nfrom selenium.webdriver.common.by import By\nfrom selenium.webdriver.support.ui import WebDriverWait\nfrom selenium.webdriver.support import expected_conditions as EC\nfrom selenium.common.exceptions import TimeoutException\nfrom selenium.common.exceptions import NoSuchElementException\n\n#def zipcodes_list(st_items):\n# # If st_items is a single zipcode string.\n# if isinstance(st_items, str):\n# zc_objects = zipcode.islike(st_items)\n# # If st_items is a list of zipcode strings.\n# elif isinstance(st_items, list):\n# zc_objects = [n for i in st_items for n in zipcode.islike(str(i))]\n# else:\n# raise ValueError(\"arg 'st_items' must be of type str or list\")\n# \n# output = [str(i).split(\" \", 1)[1].split(\">\")[0] for i in zc_objects]\n# return(output)\n\ndef init_driver(file_path):\n # Starting maximized fixes https://github.com/ChrisMuir/Zillow/issues/1\n options = webdriver.ChromeOptions()\n options.add_argument(\"--start-maximized\")\n driver = webdriver.Chrome(executable_path=file_path, \n chrome_options=options)\n driver.wait = WebDriverWait(driver, 10)\n return(driver)\n\n# Helper function for checking for the presence of a web element.\ndef _is_element_displayed(driver, elem_text, elem_type):\n if elem_type == \"class\":\n try:\n out = driver.find_element_by_class_name(elem_text).is_displayed()\n except (NoSuchElementException, TimeoutException):\n out = False\n elif elem_type == \"css\":\n try:\n out = driver.find_element_by_css_selector(elem_text).is_displayed()\n except (NoSuchElementException, TimeoutException):\n out = False\n else:\n raise ValueError(\"arg 'elem_type' must be either 'class' or 'css'\")\n return(out)\n\n# If captcha page is displayed, this function will run indefinitely until the \n# captcha page is no longer displayed (checks for it every 30 seconds).\n# Purpose of the function is to \"pause\" execution of the scraper until the \n# user has manually completed the captcha requirements.\ndef _pause_for_captcha(driver):\n while True:\n time.sleep(30)\n if not _is_element_displayed(driver, \"captcha-container\", \"class\"):\n break\n\n# Check to see if the page is currently stuck on a captcha page. If so, pause \n# the scraper until user has manually completed the captcha requirements.\ndef check_for_captcha(driver):\n if _is_element_displayed(driver, \"captcha-container\", \"class\"):\n print(\"\\nCAPTCHA!\\n\"\\\n \"Manually complete the captcha requirements.\\n\"\\\n \"Once that's done, if the program was in the middle of scraping \"\\\n \"(and is still running), it should resume scraping after ~30 seconds.\")\n _pause_for_captcha(driver)\n\ndef navigate_to_website(driver, site):\n driver.get(site)\n # Check to make sure a captcha page is not displayed.\n check_for_captcha(driver)\n\ndef click_buy_button(driver):\n try:\n button = driver.wait.until(EC.element_to_be_clickable(\n (By.CLASS_NAME, \"nav-header\")))\n button.click()\n time.sleep(10)\n except (TimeoutException, NoSuchElementException):\n raise ValueError(\"Clicking the 'Buy' button failed\")\n # Check to make sure a captcha page is not displayed.\n check_for_captcha(driver)\n\ndef enter_search_term(driver, search_term):\n if not isinstance(search_term, str):\n search_term = str(search_term)\n try:\n search_bar = driver.wait.until(EC.presence_of_element_located(\n (By.ID, \"citystatezip\")))\n button = driver.wait.until(EC.element_to_be_clickable(\n (By.CLASS_NAME, \"zsg-icon-searchglass\")))\n search_bar.clear()\n time.sleep(3)\n search_bar.send_keys(search_term)\n time.sleep(3)\n button.click()\n time.sleep(3)\n return(True)\n except (TimeoutException, NoSuchElementException):\n return(False)\n # Check to make sure a captcha page is not displayed.\n check_for_captcha(driver)\n\ndef test_for_no_results(driver):\n # Check to see if the \"zoom out\" msg exists (an indication that no results\n # were returned from the search).\n no_results = _is_element_displayed(driver, \".zoom-out-message\", \"css\")\n # If the zoom-out msg is not displayed, check for \"invalid zip\" msg.\n if not no_results:\n no_results = _is_element_displayed(driver, \"zsg-icon-x-thick\", \"class\")\n # Check to make sure a captcha page is not displayed.\n check_for_captcha(driver)\n return(no_results)\n\ndef get_html(driver):\n output = []\n keep_going = True\n while keep_going:\n # Pull page HTML\n try:\n output.append(driver.page_source)\n except TimeoutException:\n pass\n # Check to see if a \"next page\" link exists.\n keep_going = _is_element_displayed(driver, \"zsg-pagination-next\", \n \"class\")\n if keep_going:\n # Test to ensure the \"updating results\" image isnt displayed. \n # Will try up to 5 times before giving up, with a 5 second wait \n # between each try. \n tries = 5\n cover = _is_element_displayed(driver, \n \"list-loading-message-cover\", \n \"class\")\n while cover and tries > 0:\n time.sleep(5)\n tries -= 1\n cover = _is_element_displayed(driver, \n \"list-loading-message-cover\", \n \"class\")\n # If the \"updating results\" image is confirmed to be gone \n # (cover == False), click next page. Otherwise, give up on trying \n # to click thru to the next page of house results, and return the \n # results that have been scraped up to the current page.\n if not cover:\n try:\n driver.wait.until(EC.element_to_be_clickable(\n (By.CLASS_NAME, \"zsg-pagination-next\"))).click()\n time.sleep(3)\n # Check to make sure a captcha page is not displayed.\n check_for_captcha(driver)\n except TimeoutException:\n keep_going = False\n else:\n keep_going = False\n return(output)\n\n# Teardown webdriver.\ndef close_connection(driver):\n driver.quit()\n\n# Split the raw page source into segments, one for each home listing.\ndef get_listings(list_obj):\n output = []\n for i in list_obj:\n htmlSplit = i.split('\" id=\"zpid_')[1:]\n output += htmlSplit\n return(output)\n\n# Set of functions to extract specific data from an input html string.\nclass html_parser:\n def __init__(self, html):\n self.soup = BeautifulSoup(html, \"lxml\")\n self.card_info = self.get_card_info()\n \n # For most listings, card_info will contain info on number of bedrooms, \n # number of bathrooms, square footage, and sometimes price.\n def get_card_info(self):\n try:\n card = self.soup.find(\n \"span\", {\"class\" : \"zsg-photo-card-info\"}).get_text().split(u\" \\xb7 \")\n except (ValueError, AttributeError):\n card = np.nan\n if self._is_empty(card):\n card = np.nan\n return(card)\n \n def get_street_address(self):\n try:\n street = self.soup.find(\n \"span\", {\"itemprop\" : \"streetAddress\"}).get_text().strip()\n except (ValueError, AttributeError):\n street = np.nan\n if self._is_empty(street):\n street = np.nan\n return(street)\n \n def get_city(self):\n try:\n city = self.soup.find(\n \"span\", {\"itemprop\" : \"addressLocality\"}).get_text().strip()\n except (ValueError, AttributeError):\n city = np.nan\n if self._is_empty(city):\n city = np.nan\n return(city)\n \n def get_state(self):\n try:\n state = self.soup.find(\n \"span\", {\"itemprop\" : \"addressRegion\"}).get_text().strip()\n except (ValueError, AttributeError):\n state = np.nan\n if self._is_empty(state):\n state = np.nan\n return(state)\n \n def get_zipcode(self):\n try:\n zipcode = self.soup.find(\n \"span\", {\"itemprop\" : \"postalCode\"}).get_text().strip()\n except (ValueError, AttributeError):\n zipcode = np.nan\n if self._is_empty(zipcode):\n zipcode = np.nan\n return(zipcode)\n \n def get_price(self):\n price = np.nan\n # Look for price within the BeautifulSoup object.\n try:\n price = self.soup.find(\n \"span\", {\"class\" : \"zsg-photo-card-price\"}).get_text().strip()\n except (ValueError, AttributeError):\n if not self._is_empty(self.card_info):\n # If that fails, look for price within card_info.\n try:\n price = [n for n in self.card_info \n if any([\"$\" in n, \"K\" in n, \"k\" in n])]\n if len(price) > 0:\n price = price[0].split(\" \")\n price = [n for n in price if re.search(\"\\d\", n)]\n if len(price[0]) > 0:\n price = price[0]\n else:\n price = np.nan\n else:\n price = np.nan\n except (ValueError, AttributeError):\n price = np.nan\n if not self._is_empty(price):\n # Transformations to the price string.\n price = price.replace(\",\", \"\").replace(\"+\", \"\").replace(\"$\", \"\").lower()\n if \"k\" in price:\n price = price.split(\"k\")[0].strip()\n price = price + \"000\"\n if \"m\" in price:\n price = price.split(\"m\")[0].strip()\n if \".\" not in price:\n price = price + \"000000\"\n else:\n pricelen = len(price.split(\".\")[0]) + 6\n price = price.replace(\".\", \"\")\n price = price + ((pricelen - len(price)) * \"0\")\n if self._is_empty(price):\n price = np.nan\n else:\n price = np.nan\n return(price)\n \n def get_sqft(self):\n sqft = [n for n in self.card_info if \"sqft\" in n]\n if len(sqft) > 0:\n try:\n sqft = float(\n sqft[0].split(\"sqft\")[0].strip().replace(\",\", \"\").replace(\"+\", \"\")\n )\n except (ValueError, IndexError):\n sqft = np.nan\n if sqft == 0:\n sqft = np.nan\n else:\n sqft = np.nan\n return(sqft)\n \n def get_bedrooms(self):\n beds = [n for n in self.card_info if any([\"bd\" in n, \"tudio\" in n])]\n if len(beds) > 0:\n beds = beds[0].lower()\n if beds == \"studio\":\n return(0.0)\n try:\n beds = float(beds.split(\"bd\")[0].strip())\n except (ValueError, IndexError):\n beds = np.nan\n else:\n beds = np.nan\n return(beds)\n \n def get_bathrooms(self):\n baths = [n for n in self.card_info if \"ba\" in n]\n if len(baths) > 0:\n try:\n baths = float(baths[0].split(\"ba\")[0].strip())\n except (ValueError, IndexError):\n baths = np.nan\n if baths == 0:\n baths = np.nan\n else:\n baths = np.nan\n return(baths)\n \n def get_days_on_market(self):\n try:\n dom = self.soup.find_all(\n \"ul\", {\"class\" : \"zsg-list_inline zsg-photo-card-badge\"})\n if dom is not None:\n dom = [n.get_text().strip().lower() for n in dom]\n dom = [n for n in dom if \"zillow\" in n]\n if len(dom) > 0:\n dom = int(dom[0].split(\" \")[0])\n else:\n dom = np.nan\n else:\n dom = np.nan\n except (ValueError, AttributeError):\n dom = np.nan\n return(dom)\n \n def get_sale_type(self):\n try:\n sale_type = self.soup.find(\n \"span\", {\"class\" : \"zsg-photo-card-status\"}).get_text().strip()\n except (ValueError, AttributeError):\n sale_type = np.nan\n if self._is_empty(sale_type):\n sale_type = np.nan\n return(sale_type)\n \n def get_url(self):\n # Try to find url in the BeautifulSoup object.\n href = [n[\"href\"] for n in self.soup.find_all(\"a\", href = True)]\n url = [i for i in href if \"homedetails\" in i]\n if len(url) > 0:\n url = \"http://www.zillow.com/homes/for_sale/\" + url[0]\n else:\n # If that fails, contruct the url from the zpid of the listing.\n url = [i for i in href if \"zpid\" in i and \"avorite\" not in i]\n if len(url) > 0:\n zpid = re.findall(r\"\\d{8,10}\", url[0])\n if zpid is not None and len(zpid) > 0:\n url = \"http://www.zillow.com/homes/for_sale/\" \\\n + str(zpid[0]) \\\n + \"_zpid/any_days/globalrelevanceex_sort/29.759534,\" \\\n + \"-95.335321,29.675003,-95.502863_rect/12_zm/\"\n else:\n url = np.nan\n else:\n url = np.nan\n return(url)\n \n # Helper function for testing if an object is \"empty\" or not.\n def _is_empty(self, obj):\n if isinstance(obj, float) and np.isnan(obj):\n return(True)\n if any([len(obj) == 0, obj == \"null\"]):\n return(True)\n else:\n return(False)\n"
]
| [
[
"numpy.isnan"
]
]
|
mickuz/doggofier | [
"3c75df79f3705168b6785d32970430619b531c50"
]
| [
"app/utils.py"
]
| [
"\"\"\"This module contains tools supporting the main application.\"\"\"\n\nimport torch\nimport torchvision.transforms as transforms\nfrom PIL import Image\nfrom typing import Tuple\nfrom doggofier.models import ResNet50, VGG16\n\n\ndef transform_image(image_path: str) -> torch.Tensor:\n \"\"\"Prepares an image for inference by applying certain transforms.\n\n Parameters\n ----------\n image_path : str\n Path where an image is located.\n\n Returns\n -------\n torch.Tensor\n Image in a form of tensor ready to enter into the model.\n \"\"\"\n transform = transforms.Compose([\n transforms.Resize(224),\n transforms.ToTensor(),\n transforms.Normalize(mean=[0.485, 0.456, 0.406],\n std=[0.229, 0.224, 0.225])\n ])\n\n image = Image.open(image_path)\n image = transform(image)\n image.unsqueeze_(0)\n\n return image\n\n\ndef load_model(\n model_name: str,\n model_path: str,\n num_classes: int\n) -> torch.nn.Module:\n \"\"\"Loads the model with trained parameters for inference.\n\n Parameters\n ----------\n model_name : str\n Name of the model to be used for inference. It can contain only\n 'resnet50' and 'vgg16' values.\n model_path : str\n A path where model state dictionary is stored.\n num_classes : int\n Number of classes in the dataset.\n\n Returns\n -------\n torch.nn.Module\n Model for inference.\n\n Raises\n ------\n ValueError\n When name of the model has an invalid value.\n \"\"\"\n if model_name == 'resnet50':\n model = ResNet50(num_classes, pretrained=False)\n elif model_name == 'vgg16':\n model = VGG16(num_classes, pretrained=False)\n else:\n raise ValueError('Wrong model!')\n model.load_state_dict(torch.load(model_path))\n model.eval()\n\n return model\n\n\ndef get_prediction(\n image: torch.Tensor,\n model: torch.nn.Module\n) -> Tuple[float, int]:\n \"\"\"Predicts the most likely category with its associated probability.\n\n Parameters\n ----------\n image : torch.Tensor\n Image in a form of tensor ready to enter into the model.\n model : torch.nn.Module\n Model for inference.\n\n Returns\n -------\n Tuple[float, int]\n Predicted category with its probability.\n \"\"\"\n output = model(image)\n log_softmax, prediction = output.max(1)\n probability = torch.exp(log_softmax).item()\n prediction = prediction.item()\n\n return probability, prediction\n"
]
| [
[
"torch.exp",
"torch.load"
]
]
|
thoward27/udacity-datascience | [
"e605a2c6298eae30bfd28a466cf3a33f45717f3a"
]
| [
"small-projects/submission/tools/tester.py"
]
| [
"#!/usr/bin/pickle\n\n\"\"\" a basic script for importing student's POI identifier,\n and checking the results that they get from it \n \n requires that the algorithm, dataset, and features list\n be written to my_classifier.pkl, my_dataset.pkl, and\n my_feature_list.pkl, respectively\n\n that process should happen at the end of poi_id.py\n\"\"\"\n\nimport pickle\nimport sys\nfrom sklearn.cross_validation import StratifiedShuffleSplit\nsys.path.append(\"../tools/\")\nfrom feature_format import featureFormat, targetFeatureSplit\n\nPERF_FORMAT_STRING = \"\\\n\\tAccuracy: {:>0.{display_precision}f}\\tPrecision: {:>0.{display_precision}f}\\t\\\nRecall: {:>0.{display_precision}f}\\tF1: {:>0.{display_precision}f}\\tF2: {:>0.{display_precision}f}\"\nRESULTS_FORMAT_STRING = \"\\tTotal predictions: {:4d}\\tTrue positives: {:4d}\\tFalse positives: {:4d}\\\n\\tFalse negatives: {:4d}\\tTrue negatives: {:4d}\"\n\ndef test_classifier(clf, dataset, feature_list, folds = 1000):\n data = featureFormat(dataset, feature_list, sort_keys = True)\n labels, features = targetFeatureSplit(data)\n cv = StratifiedShuffleSplit(labels, folds, random_state = 42)\n true_negatives = 0\n false_negatives = 0\n true_positives = 0\n false_positives = 0\n for train_idx, test_idx in cv: \n features_train = []\n features_test = []\n labels_train = []\n labels_test = []\n for ii in train_idx:\n features_train.append( features[ii] )\n labels_train.append( labels[ii] )\n for jj in test_idx:\n features_test.append( features[jj] )\n labels_test.append( labels[jj] )\n \n ### fit the classifier using training set, and test on test set\n clf.fit(features_train, labels_train)\n predictions = clf.predict(features_test)\n for prediction, truth in zip(predictions, labels_test):\n if prediction == 0 and truth == 0:\n true_negatives += 1\n elif prediction == 0 and truth == 1:\n false_negatives += 1\n elif prediction == 1 and truth == 0:\n false_positives += 1\n elif prediction == 1 and truth == 1:\n true_positives += 1\n else:\n print(\"Warning: Found a predicted label not == 0 or 1.\")\n print(\"All predictions should take value 0 or 1.\")\n print(\"Evaluating performance for processed predictions:\")\n break\n try:\n total_predictions = true_negatives + false_negatives + false_positives + true_positives\n accuracy = 1.0*(true_positives + true_negatives)/total_predictions\n precision = 1.0*true_positives/(true_positives+false_positives)\n recall = 1.0*true_positives/(true_positives+false_negatives)\n f1 = 2.0 * true_positives/(2*true_positives + false_positives+false_negatives)\n f2 = (1+2.0*2.0) * precision*recall/(4*precision + recall)\n print(clf)\n print(PERF_FORMAT_STRING.format(accuracy, precision, recall, f1, f2, display_precision = 5))\n print(RESULTS_FORMAT_STRING.format(total_predictions, true_positives, false_positives, false_negatives, true_negatives))\n print(\"\")\n dict_ = {\n 'accuracy': accuracy,\n 'precision': precision,\n 'recall': recall,\n 'f1': f1,\n 'f2': f2,\n 'total_predictions': total_predictions,\n 'true_positives': true_positives,\n 'false_positives': false_positives,\n 'false_negatives': false_negatives,\n 'true_negatives': true_negatives,\n }\n return dict_\n except:\n print(\"Got a divide by zero when trying out:\", clf)\n print(\"Precision or recall may be undefined due to a lack of true positive predicitons.\")\n\nCLF_PICKLE_FILENAME = \"my_classifier.pkl\"\nDATASET_PICKLE_FILENAME = \"my_dataset.pkl\"\nFEATURE_LIST_FILENAME = \"my_feature_list.pkl\"\n\ndef dump_classifier_and_data(clf, dataset, feature_list):\n with open(CLF_PICKLE_FILENAME, \"wb\") as clf_outfile:\n pickle.dump(clf, clf_outfile)\n with open(DATASET_PICKLE_FILENAME, \"wb\") as dataset_outfile:\n pickle.dump(dataset, dataset_outfile)\n with open(FEATURE_LIST_FILENAME, \"wb\") as featurelist_outfile:\n pickle.dump(feature_list, featurelist_outfile)\n\ndef load_classifier_and_data():\n with open(CLF_PICKLE_FILENAME, \"r\") as clf_infile:\n clf = pickle.load(clf_infile)\n with open(DATASET_PICKLE_FILENAME, \"r\") as dataset_infile:\n dataset = pickle.load(dataset_infile)\n with open(FEATURE_LIST_FILENAME, \"r\") as featurelist_infile:\n feature_list = pickle.load(featurelist_infile)\n return clf, dataset, feature_list\n\ndef main():\n ### load up student's classifier, dataset, and feature_list\n clf, dataset, feature_list = load_classifier_and_data()\n ### Run testing script\n test_classifier(clf, dataset, feature_list)\n\nif __name__ == '__main__':\n main()\n"
]
| [
[
"sklearn.cross_validation.StratifiedShuffleSplit"
]
]
|
sgomezsaez/SCARF-Evaluation | [
"a118039ddd62798ca93b78cb968d6ee8b15ec6f2"
]
| [
"utility_samples/de/uni-stuttgart/iaas/utilityFunctions/cost_VMs.py"
]
| [
"from matplotlib import pyplot as plt\nfrom numpy import *\nimport pylab as pl\nfrom matplotlib import rc\n\nrc('text', usetex=True)\n\n# Creating Plot\n#fig1 = plt.figure(figsize=(10, 8))\n#ax1 = fig1.add_subplot(111)\n\n#x = pl.frange(1,50)\n#log_serie = [1/(e / 2)**(y / 4) for y in x]\n#ax1.plot(x, log_serie, label = r\"u($\\mu$-topology$)\", marker='o', linestyle='--', linewidth=1.0)\n#ax1.set_ylabel('utility', fontsize=30)\n#ax1.set_xlabel(r\"$\\mu$-topology\", fontsize=30)\n#ax1.tick_params(axis='x', labelsize=20)\n#ax1.tick_params(axis='y', labelsize=20)\n#ax1.set_ylim([0,1])\n\n#trendline\n#z = polyfit(x, log_serie, 1)\n#p = poly1d(z)\n\n#ax1.plot(x, p(x), \"r--\", label = 'utility trend')\n#ax1.legend(loc='upper right', prop={'size':20})\n\n\n##### Using Step Function\nfig1 = plt.figure(figsize=(10, 8))\nax1 = fig1.add_subplot(111)\n\nx_threshold = pl.frange(1,400)\nconstant_serie = [1 for i in x_threshold]\n\nx = pl.frange(400,1000)\nx_variable = pl.frange(0,600)\nlog_serie = [math.pow(0.995,i) for i in x_variable]\n\nax1.plot(x_threshold, constant_serie, label = r\"u($\\mu$-topology$)\", linewidth=2.0, color='blue')\nax1.plot(x, log_serie, label = r\"u($\\mu$-topology$)\", linewidth=2.0, color='blue')\nax1.set_ylabel('Utility', fontsize=30)\nax1.set_xlabel(\"Montly Cost (U\\$)\", fontsize=30)\nax1.tick_params(axis='x', labelsize=20)\nax1.tick_params(axis='y', labelsize=20)\nax1.set_ylim([0,1.1])\n\nplt.axvspan(0, 400, color='grey', alpha=0.2)\n\nax1.annotate('Threshold', xy=(100, 0.5), xytext=(100, 0.5), fontsize=30)\n#plt.show()\n\n\nfig1.savefig(\"/Users/gomezsso/Documents/dissertation/Publications/Journal/2016_TOIT/gfx/utility_cost.pdf\", format='pdf')"
]
| [
[
"matplotlib.pyplot.axvspan",
"matplotlib.rc",
"matplotlib.pyplot.figure"
]
]
|
TimurDzhumakaev/retailhero-recommender-workspace | [
"419576ab47d0cbb1aa2d50e1d1ea17c71c04726d"
]
| [
"src/nn_models.py"
]
| [
"import torch\nfrom torch import nn\n\n\nclass UserModel(nn.Module):\n def __init__(self, num_products, embedding_dim):\n super(UserModel, self).__init__()\n self._model = nn.Sequential(nn.Linear(num_products, embedding_dim),)\n\n def forward(self, x):\n return self._model(x)\n\n\nclass ItemModel(nn.Module):\n def __init__(self, num_products, embedding_dim):\n super(ItemModel, self).__init__()\n self._embeds = nn.Embedding(num_products, embedding_dim)\n\n def forward(self, x):\n return self._embeds(x)\n"
]
| [
[
"torch.nn.Linear",
"torch.nn.Embedding"
]
]
|
NiloMendonca/machinelearning-basics | [
"684ac2c6e557415b30e7b4f1f26135b72c30f4a1"
]
| [
"logistic-regression-classifier.py"
]
| [
"# Classification template\n\n# Importing the libraries\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport pandas as pd\n\n# Importing the dataset\ndataset = pd.read_csv('Social_Network_Ads.csv')\nX = dataset.iloc[:, [2, 3]].values\ny = dataset.iloc[:, 4].values\n\n# Splitting the dataset into the Training set and Test set\nfrom sklearn.model_selection import train_test_split\nX_train, X_test, y_train, y_test = train_test_split(X, y, test_size = 0.25, random_state = 0)\n\n# Feature Scaling\nfrom sklearn.preprocessing import StandardScaler\nsc = StandardScaler()\nX_train = sc.fit_transform(X_train)\nX_test = sc.transform(X_test)\n\n# Visualising the set results\ndef plotarGrafico(X_dados, y_dados, classifier, tipo_classificacao, nome):\n\tfrom matplotlib.colors import ListedColormap\n\tX_set, y_set = X_dados, y_dados\n\tX1, X2 = np.meshgrid(np.arange(start = X_set[:, 0].min() - 1, stop = X_set[:, 0].max() + 1, step = 0.01),\n\t np.arange(start = X_set[:, 1].min() - 1, stop = X_set[:, 1].max() + 1, step = 0.01))\n\tplt.contourf(X1, X2, classifier.predict(np.array([X1.ravel(), X2.ravel()]).T).reshape(X1.shape),\n\t alpha = 0.75, cmap = ListedColormap(('pink', 'lightgreen')))\n\tplt.xlim(X1.min(), X1.max())\n\tplt.ylim(X2.min(), X2.max())\n\tfor i, j in enumerate(np.unique(y_set)):\n\t plt.scatter(X_set[y_set == j, 0], X_set[y_set == j, 1],\n\t c = ListedColormap(('red', 'green'))(i), label = j)\n\tplt.title('Classifier '+nome+' ('+tipo_classificacao+' set)')\n\tplt.xlabel('Age')\n\tplt.ylabel('Estimated Salary')\n\tplt.legend()\n\tplt.show()\n\n#############################################\n### Regressão Logistica\n# Fitting classifier to the Training set\nfrom sklearn.linear_model import LogisticRegression\nclassifier = LogisticRegression(random_state = 0)\nclassifier.fit(X_train,y_train)\n# Predicting the Test set results\ny_pred = classifier.predict(X_test)\n\n# Making the Confusion Matrix\nfrom sklearn.metrics import confusion_matrix\ncm = confusion_matrix(y_test, y_pred)\n\n# Plota os graficos\nplotarGrafico(X_train, y_train, classifier, 'Training', 'Logistic Regression')\nplotarGrafico(X_test, y_test, classifier, 'Test', 'Logistic Regression')\n\n#############################################\n### Regressão ARD bayesiana\n# Fitting classifier to the Training set\nfrom sklearn import linear_model\nclassifier = linear_model.ARDRegression()\nclassifier.fit(X_train,y_train)\n# Predicting the Test set results\ny_pred = classifier.predict(X_test)\n\n# Making the Confusion Matrix\nfrom sklearn.metrics import confusion_matrix\ncmARD = confusion_matrix(y_test, y_pred.round())\n\n# Plota os graficos\nplotarGrafico(X_train, y_train, classifier, 'Training', 'ARD Regression')\nplotarGrafico(X_test, y_test, classifier, 'Test', 'ARD Regression')\n\n#############################################\n### Regressão ARD bayesiana\n# Fitting classifier to the Training set\nfrom sklearn import linear_model\nclassifier = linear_model.LinearRegression()\nclassifier.fit(X_train,y_train)\n# Predicting the Test set results\ny_pred = classifier.predict(X_test)\n\n# Making the Confusion Matrix\nfrom sklearn.metrics import confusion_matrix\ncmLinear = confusion_matrix(y_test, y_pred.round())\n\n# Plota os graficos\nplotarGrafico(X_train, y_train, classifier, 'Training', 'Linear Regression')\nplotarGrafico(X_test, y_test, classifier, 'Test', 'Linear Regression')\n"
]
| [
[
"matplotlib.pyplot.legend",
"pandas.read_csv",
"sklearn.linear_model.LogisticRegression",
"matplotlib.pyplot.title",
"numpy.unique",
"sklearn.metrics.confusion_matrix",
"sklearn.model_selection.train_test_split",
"matplotlib.pyplot.ylabel",
"matplotlib.colors.ListedColormap",
"sklearn.linear_model.LinearRegression",
"matplotlib.pyplot.xlabel",
"sklearn.preprocessing.StandardScaler",
"matplotlib.pyplot.show",
"sklearn.linear_model.ARDRegression"
]
]
|
mallika2011/IOT-controlling-motor | [
"d79f1977d767c7cb783e847cc9039d9440dba24e"
]
| [
"test.py"
]
| [
"import random\nimport matplotlib.pyplot as plt\nimport math\n\ndate=[]\ntime=[]\nd1=[]\nd2=[]\nrpm=[]\nflag=0\nf = open('data16.txt')\nline = f.readline()\nwhile line:\n lol=line.split('T')\n lee=lol[1].split('\\n')\n lol=lol[0].split()\n # print(lol[`/`])\n if lol[1]==\"20191110\":\n flag=1\n if flag==1:\n time.append(int(lee[0]))\n date.append(int(lol[1]))\n lol=lol[0].split('(')\n lol=lol[1].split(',')\n d1.append(int(lol[0]))\n rpm.append(int(lol[2]))\n lol=lol[1].split(')')\n d2.append(int(lol[0]))\n line = f.readline()\nf.close()\n\n#Plotting distances\nprint(date)\nplt.plot(d2,'-bo')\nplt.xlabel('time') \nplt.ylabel('Distance 2') \nplt.savefig('./distance2.png')\nplt.clf()\n\nplt.plot(d1,'-go')\nplt.xlabel('time') \nplt.ylabel('Distance 1') \nplt.savefig('./distance1.png')\nplt.clf()\n\nplt.plot(rpm,'-yo')\nplt.xlabel('time') \nplt.ylabel('RPM') \nplt.savefig('./rpm_o.png')\nplt.clf()\n\n#Daily traffic\ncurdate=-1\ncount=0\nsum=0\ntraffic=[]\nindex=0\nindices=[]\n\nfor i in range(len(d1)):\n if(curdate==date[i]):\n sum+=d1[i]+d2[i]\n count+=1\n else:\n if(count>0):\n sum/=count\n traffic.append(sum)\n indices.append(index)\n count=0\n sum=0\n index+=1\n curdate=date[i]\n sum+=d1[i]+d2[i]\n count+=1\n\nif(count>0):\n sum/=count\n traffic.append(sum)\n indices.append(index)\n\n# print(traffic,indices)\nplt.bar(indices,traffic, width=0.3,color = ['brown']) \nplt.xlabel('Day') \nplt.ylabel('Traffic') \nplt.savefig('./daily.png')\nplt.clf()\n\n#Daily traffic\ncurdate=20191120\ncount=[0]*24\nsum=0\ntraffic=[0]*24\nindices=[]\n\nfor i in range(len(d1)):\n if curdate==date[i]:\n tm=int(time[i]/10000)\n count[tm]+=1\n traffic[tm]+=d1[i]+d2[i]\n\nfor i in range(24):\n if count[i]>0:\n traffic[i]=traffic[i]/count[i]\n else: \n traffic[i]=0\n indices.append(i)\n\n\nplt.bar(indices,traffic,color = ['orange']) \nplt.xlabel('Hour') \nplt.ylabel('Traffic') \nplt.savefig('./hourly.png')\nplt.clf()\n# plt.show()\n\n"
]
| [
[
"matplotlib.pyplot.savefig",
"matplotlib.pyplot.plot",
"matplotlib.pyplot.clf",
"matplotlib.pyplot.bar",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.ylabel"
]
]
|
BenAlWalker/pytorch | [
"606a792dce8cb363dea2c0b39420236520a48236"
]
| [
"torch/testing/_internal/common_methods_invocations.py"
]
| [
"from functools import wraps, partial\nfrom itertools import product, chain, islice\nimport itertools\nimport collections\nimport copy\nfrom enum import Enum\nimport operator\nimport random\nimport unittest\nimport math\n\nimport torch\nimport numpy as np\nfrom torch._six import inf\nimport collections.abc\n\nfrom typing import Any, Callable, Dict, List, Optional, Sequence, Tuple, Union\n\nfrom torch.testing import make_non_contiguous, make_tensor\nfrom torch.testing._internal.common_dtype import (\n _dispatch_dtypes, floating_types, floating_types_and, complex_types, floating_and_complex_types,\n floating_and_complex_types_and, all_types_and_complex_and, all_types_and, all_types_and_complex, integral_types_and,\n all_types, double_types, empty_types\n)\nfrom torch.testing._internal.common_device_type import \\\n (onlyCPU, onlyCUDA, onlyNativeDeviceTypes, disablecuDNN, skipCUDAIfNoMagma, skipCUDAIfNoMagmaAndNoCusolver,\n skipCUDAIfNoCusolver, skipCPUIfNoLapack, skipCPUIfNoFFT, skipCUDAIfRocm, precisionOverride,\n toleranceOverride, tol, has_cusolver)\nfrom torch.testing._internal.common_cuda import CUDA11OrLater, SM53OrLater, SM60OrLater\nfrom torch.testing._internal.common_utils import \\\n (is_iterable_of_tensors,\n random_symmetric_matrix, random_symmetric_psd_matrix,\n make_fullrank_matrices_with_distinct_singular_values,\n random_symmetric_pd_matrix, make_symmetric_matrices,\n make_symmetric_pd_matrices, random_square_matrix_of_rank,\n TEST_WITH_ROCM, IS_WINDOWS, IS_MACOS, TEST_SCIPY,\n torch_to_numpy_dtype_dict, TEST_WITH_ASAN,\n GRADCHECK_NONDET_TOL, slowTest, noncontiguous_like,\n freeze_rng_state)\nimport torch.testing._internal.opinfo_helper as opinfo_helper\n\nfrom distutils.version import LooseVersion\n\nhas_scipy_fft = False\nif TEST_SCIPY:\n import scipy.special\n try:\n import scipy.fft\n has_scipy_fft = True\n except ModuleNotFoundError:\n pass\n\n\n# Reasonable testing sizes for dimensions\nL = 20\nM = 10\nS = 5\n\n# Unique value to distinguish default from anything else\n_NOTHING = object()\n\n\nclass DecorateInfo(object):\n \"\"\"Describes which test, or type of tests, should be wrapped in the given\n decorators when testing an operator. Any test that matches all provided\n arguments will be decorated. The decorators will only be applied if the\n active_if argument is True.\"\"\"\n\n __slots__ = ['decorators', 'cls_name', 'test_name', 'device_type', 'dtypes', 'active_if']\n\n def __init__(self, decorators, cls_name=None, test_name=None, *,\n device_type=None, dtypes=None, active_if=True):\n self.decorators = list(decorators) if isinstance(decorators, collections.abc.Sequence) else [decorators]\n self.cls_name = cls_name\n self.test_name = test_name\n self.device_type = device_type\n self.dtypes = dtypes\n self.active_if = active_if\n\n def is_active(self, cls_name, test_name, device_type, dtype):\n return (\n self.active_if and\n (self.cls_name is None or self.cls_name == cls_name) and\n (self.test_name is None or self.test_name == test_name) and\n (self.device_type is None or self.device_type == device_type) and\n (self.dtypes is None or dtype in self.dtypes)\n )\n\n\nclass SampleInput(object):\n \"\"\"Represents sample inputs to a function.\"\"\"\n\n __slots__ = ['input', 'args', 'kwargs', 'output_process_fn_grad', 'broadcasts_input', 'name']\n\n def __init__(self, input, *, args=tuple(), kwargs=None, output_process_fn_grad=lambda x: x, broadcasts_input=False, name=\"\"):\n # input is the first input to the op and must be either a Tensor or TensorList (Sequence[Tensor]).\n # This follows the typical pattern where for Tensor inputs op(t, ...) = t.op(...).\n # op with TensorList inputs do not support method or inplace variants.\n assert isinstance(input, torch.Tensor) or is_iterable_of_tensors(input)\n self.input: Union[torch.Tensor, Sequence[torch.Tensor]] = input\n self.args = args\n self.kwargs = kwargs if kwargs is not None else {}\n self.output_process_fn_grad = output_process_fn_grad\n self.name = name\n\n # Specifies if `self.input` is broadcasted or not,\n # given that the operator supports broadcasting.\n # This field is used to verify the behavior for inplace variant.\n #\n # If a SampleInput is marked with `broadcasts_input=True`,\n # it is verified that we get a `RuntimerError` with this sample,\n # and inplace variant. Also inplace grad{grad} tests are skipped,\n # for such inputs (as they will error out otherwise).\n self.broadcasts_input = broadcasts_input\n\n def _repr_helper(self, formatter):\n # Helper function to return the details of the SampleInput as `str`\n # It consolidates all the fields of SampleInput and allows,\n # formatting the fields like `input`, `args`, etc with `formatter`\n # callable to customize the representation.\n # Look at `summary` method for example.\n arguments = [\n f'input={formatter(self.input)}',\n f'args={formatter(self.args)}',\n f'kwargs={formatter(self.kwargs)}',\n f'output_process_fn_grad={self.output_process_fn_grad}',\n f'broadcasts_input={self.broadcasts_input}',\n f'name={repr(self.name)}']\n\n return f'SampleInput({\", \".join(a for a in arguments if a is not None)})'\n\n def __repr__(self):\n return self._repr_helper(lambda x: x)\n\n def summary(self):\n # Returns the SampleInput details in a more\n # friendly format.\n # It formats `Tensor` and `TensorList`\n # in a more condensed representation.\n def formatter(arg):\n # Format any instance of `Tensor` (standalone, in list, or in dict)\n # by Tensor[TensorShape]\n # Eg. Tensor with shape (3, 4) is formatted as Tensor[3, 4]\n if isinstance(arg, torch.Tensor):\n shape = str(tuple(arg.shape)).replace('(', '').replace(')', '')\n return f\"Tensor[{shape}]\"\n elif isinstance(arg, dict):\n return {k: formatter(v) for k, v in arg.items()}\n elif is_iterable_of_tensors(arg):\n return \"TensorList[\" + \", \".join(map(formatter, arg)) + \"]\"\n elif isinstance(arg, (list, tuple)): # Handle list, tuple\n return \"(\" + \",\".join(map(formatter, arg)) + \")\"\n\n return repr(arg)\n\n return self._repr_helper(formatter)\n\n # Applies the transform f(t) -> t to each tensor and dtype in the SampleInput\n def transform(self, f):\n def tt(t):\n def _tt(t):\n return f(t)\n\n if isinstance(t, torch.Tensor):\n return _tt(t)\n elif isinstance(t, torch.dtype):\n return _tt(t)\n elif isinstance(t, list):\n return list(map(tt, t))\n elif isinstance(t, tuple):\n return tuple(map(tt, t))\n elif isinstance(t, dict):\n return {k: tt(v) for k, v in t.items()}\n else:\n return t\n\n sample_tt_input, tt_args, tt_kwargs = tt(self.input), tt(self.args), tt(self.kwargs)\n return (sample_tt_input, tt_args, tt_kwargs)\n\n # Returns the NumPy version of the sample input object in the form of a tuple: (input, args, kwargs)\n # Converts tensors to ndarrays by calling .detach().cpu().numpy() on them\n # Converts dtypes by remapping them using torch_to_numpy_dtype_dict\n def numpy(self):\n def to_numpy(t):\n if isinstance(t, torch.Tensor):\n return t.detach().cpu().numpy()\n elif isinstance(t, torch.dtype):\n return torch_to_numpy_dtype_dict[t]\n\n return self.transform(to_numpy)\n\n def noncontiguous(self):\n def to_noncontiguous(t):\n if isinstance(t, torch.Tensor):\n return noncontiguous_like(t)\n if isinstance(t, torch.dtype):\n return t\n\n return self.transform(to_noncontiguous)\n\n\nclass ErrorInput(object):\n \"\"\"\n A SampleInput that will cause the operation to throw an error plus information\n about the resulting error.\n \"\"\"\n\n __slots__ = ['sample_input', 'error_type', 'error_regex']\n\n def __init__(self, sample_input, *, error_type, error_regex):\n self.sample_input = sample_input\n self.error_type = error_type\n self.error_regex = error_regex\n\n\nclass AliasInfo(object):\n \"\"\"Class holds alias information. For example, torch.abs ->\n torch.absolute, torch.Tensor.absolute, torch.Tensor.absolute_\n \"\"\"\n\n def __init__(self, alias_name):\n self.name = alias_name\n self.op = _getattr_qual(torch, alias_name)\n self.method_variant = getattr(torch.Tensor, alias_name, None)\n self.inplace_variant = getattr(torch.Tensor, alias_name + \"_\", None)\n\n def __call__(self, *args, **kwargs):\n return self.op(*args, **kwargs)\n\n\n# Extension of getattr to support qualified names\n# e.g. _getattr_qual(torch, 'linalg.norm') -> torch.linalg.norm\ndef _getattr_qual(obj, name, default=_NOTHING):\n try:\n for path in name.split('.'):\n obj = getattr(obj, path)\n return obj\n except AttributeError:\n if default is not _NOTHING:\n return default\n else:\n raise\n\n\n# test if a tensor is close to an integer\ndef close_to_int(x, eps=0.1):\n if x.is_complex():\n y = torch.abs(torch.view_as_complex(torch.frac(torch.view_as_real(x))))\n else:\n y = torch.abs(torch.frac(x))\n return (y < eps) | (y > (1 - eps))\n\n\nNumericsFilter = collections.namedtuple('NumericsFilter', ['condition', 'safe_val'])\n\n\n# Note [OpInfos]\n# ~~~~~~~~~~~~~~\n#\n# The majority of this note was written shortly after the PyTorch 1.9 release.\n# If you notice it's out-of-date or think it could be improved then please\n# file an issue.\n#\n# See also: the OpInfo tracker (https://github.com/pytorch/pytorch/issues/54261)\n# See also: \"Writing Test Templates\" in common_device_type.py to learn how to\n# parametrize a test template using OpInfos.\n# See also: PyTorch's GitHub wiki on running and writing tests\n# https://github.com/pytorch/pytorch/wiki/Running-and-writing-tests\n# See also: ModuleInfos, OpInfo's sister class, defined in common_modules.py\n#\n# An OpInfo is a collection of metadata related to a PyTorch operator. This\n# metadata is used to generate tests that validate properties of the operator,\n# like if it implements the correct gradient formula.\n#\n# WHY OPINFOS?\n# ~~~~~~~~~~~~\n#\n# OpInfos are principally intended to do three things:\n#\n# 1) to allow systematic testing over all PyTorch's operators\n# 2) to simplify operating testing by autogenerating many tests\n# 3) to allow systems (like autograd, torchscript, fx, nnc...) to test\n# against every PyTorch operator\n#\n# All these goals are still a work in progress. Not every operator has an\n# OpInfo, and some operator tests that could be automatically generated\n# still have to be written manually.\n#\n# It's helpful to understand that OpInfos are both about test simplification and\n# modularity. PyTorch is a complicated framework with many interrelated systems,\n# too many for any one person to keep track of. An OpInfo can be thought of as the\n# interface between an operator implementer and those other systems. Instead of\n# requiring the implementer of torch.foo understand how to test its forward\n# mode AD or NNC support that's typically handled automatically just by\n# defining an OpInfo.\n#\n# It's often surprising to OpInfo writers that just implementing an OpInfo\n# typically can't verify an operator is actually implemented correctly:\n#\n# \"If an OpInfo doesn't validate my op works as expected, what's the point\n# of it?\"\n#\n# But the point of is the above. OpInfos are intended to let you focus on testing\n# the operator logic you're familiar with instead of having to write tests for\n# how the operator interacts with each of PyTorch's many systems.\n#\n# And, OK, it turns out that SOMETIMES just writing an OpInfo DOES\n# validate your op works as expected, but that's only in special\n# cases. See below for details.\n#\n# WHAT'S AN OPINFO?\n# ~~~~~~~~~~~~~~~~~\n#\n# So what is an OpInfo? It's a Python class that describes an operator's properties,\n# like which dtypes it supports on the CPU and whether it has any aliases.\n# These properties can be divided into three categories:\n#\n# 1) Metadata describing the operator, like the operator's name and if it\n# \"supports\" the out kwarg.\n# 2) Test directives, like \"skips\" that tell the test suite to skip some\n# tests.\n# 3) A \"sample inputs\" function that generates valid inputs for the operator.\n#\n# OpInfo attributes are described in more detail below.\n#\n# THE SAMPLE INPUTS FUNCTION\n# ~~~~~~~~~~~~~~~~~~~~~~~~~~\n#\n# The \"sample inputs\" function merits special elaboration. This function is\n# crucial to testing with OpInfos. A typical OpInfo test has to treat the operator\n# as a black box. There's no structure for the test to understand or exploit.\n# Without \"sample inputs\" it wouldn't even know how to call the OpInfo's\n# operator. The sample input function saves the day by providing different\n# \"SampleInputs\" that can be used to call the operator. A sample input\n# function should have the following signature:\n#\n# def sample_inputs_foo(op_info, device, dtype, requires_grad, **kwargs):\n#\n# And should return an iterable of SampleInputs (see the class description\n# above). Each SampleInput defines an \"input\", \"args\", \"kwargs\", an\n# \"output_process_fn_grad\" function, the \"broadcasts_input\" bool and a\n# \"name\".\n#\n# All the \"sample_inputs\" functions are invoked within a `torch.no_grad()`\n# environment for efficiency and correctness. As such remember to set the the\n# \"requires_grad\" flag on the inputs **after** performing any transformations\n# on them.\n#\n# The \"input\" is the first argument to the operator, or the tensor that\n# the method or inplace variants of the operator should be called on, and\n# should be on the requested device, of the requested dtype, and its\n# requires_grad attribute should be set to the requires_grad argument.\n#\n# \"args\" should contain positional arguments, and \"kwargs\" keyword arguments.\n#\n# \"output_process_fn_grad\" has an interesting name. It's a function that maps\n# the operator's output (when given the input, args, and kwargs) to the\n# portion of the output to gradcheck. For example, consider an operator\n# like torch.linalg.slogdet\n# (https://pytorch.org/docs/master/generated/torch.linalg.slogdet.html).\n# This operator returns a tuple of two tensors, but the first tensor\n# cannot be backwarded through. Its \"output_process_fn_grad\" filters\n# this output tuple to just the second argument, which we can call backward\n# on. Functions that produce a single tensor can ignore this argument.\n#\n# \"broadcasts_input\" is a bool indicated if the SampleInput causes the operator\n# to broadcast the \"input\" argument. This is important for tests to understand\n# because inplace variants of operations throw a runtime error if they\n# would broadcast their input arguments, so tests that work with inplace\n# variants filter SampleInputs that broadcast their input.\n#\n# \"name\" is a string that's just used for debugging. It appears when printing\n# the SampleInput.\n#\n# THE (OPTIONAL) ERROR INPUTS FUNCTION\n# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n#\n# OpInfos may optionally specify \"error inputs\" through an error function. If\n# specified test_errors in test_ops.py will call the op with these inputs\n# and validate that the desired error is thrown.\n#\n# Error inputs automate a common testing pattern where multiple inputs are\n# passed to an operation and the errors they thrown are reviewed. Tests\n# written in this style should be ported to the new OpInfo pattern.\n#\n# Error inputs are specified using the ErrorInputs class, which contains\n# a SampleInput (see above) and data about the expected error.\n#\n# OPINFO FILE ORGANIZATION\n# ~~~~~~~~~~~~~~~~~~~~~~~~\n#\n# All OpInfos are currently defined in this file. Most OpInfo tests are defined\n# in test_ops.py, but some system-specific tests are defined in those\n# systems' test files, and subclass-specific tests are defined in the test\n# file that corresponds to that subclass (see the below).\n# Expect a reorganization in the future.\n#\n# WHAT'S TESTED?\n# ~~~~~~~~~~~~~~\n#\n# Every OpInfo in the op_db sequence has the following properties validated in\n# test_ops.py:\n#\n# - that its supported dtypes are specified correctly\n# - that the operation produces the same results when called with noncontiguous inputs\n# - that it supports the out= argument properly (if it allows out=),\n# see https://github.com/pytorch/pytorch/wiki/Developer-FAQ#how-does-out-work-in-pytorch\n# - that it works with the conjugate view bit properly\n# - that its function, method, and inplace variants perform the same operation\n# (that is, that torch.add, torch.Tensor.add, and torch.Tensor.add_ all\n# do the same thing).\n# - that its inplace variant preserves the input's storage\n# - that its gradient formula is implemented correctly, and that it supports\n# gradgrad and complex grad and gradgrad and forward mode AD properly for\n# the op's function and inplace variants (method variants are skipped\n# to reduce test time).\n# - that the operation performs the same operation when traced or scripted\n# using the jit\n# - that the operation is autodifferentiated by the jit as expected\n# - that the operator's aliases, if any, perform the same operation and that\n# the jit understands the alias\n#\n# Additional OpInfo tests are in test_jit_fuser_te.py, test_fx_experimental.py,\n# and test_fx.py. These tests validate that operators work with NNC and FX\n# as expected.\n#\n# For performance, some of the above tests may only run on the first\n# SampleInput returned by an OpInfo's sample input function.\n#\n# In addition to these tests, some subclasses (discussed in the next section)\n# define additional tests.\n#\n# Critically, as mentioned above, what's not tested is that the operator\n# works as expected. When implementing an OpInfo an engineer must still\n# typically write one or more tests validating the operator's behavior.\n#\n# OPINFO (SUB)CLASSES\n# ~~~~~~~~~~~~~~~~~~~\n#\n# In addition to the OpInfo base class there are several specialized OpInfo\n# subclasses. For example, the UnaryUfuncInfo subclass is used for\n# unary elementwise operations. These operations have a common structure\n# that test_unary_ufuncs.py exploits with additional automated testing.\n# The automated testing in test_unary_ufuncs.py is so thorough, comparing\n# the operator to a NumPy reference function on a plethora of values, that\n# just implementing an OpInfo for a unary elementwise operation is often\n# sufficient testing.\n#\n# The ForeachFuncInfo is another OpInfo subclass that is hyper-specialized to a\n# very unique class of operations. These OpInfos aren't included in the\n# op_db sequence and have their own tests.\n#\n# Other OpInfo subclasses, like SpectralFuncInfo, are just for convenience\n# when writing OpInfos.\n#\n# TESTING A NEW OPERATOR\n# ~~~~~~~~~~~~~~~~~~~~~~\n#\n# If you're adding a new operator to any of the following namespaces:\n# - torch\n# - torch.fft\n# - torch.linalg,\n# - torch.special\n# - torch.nn.functional\n# then you should typically add an OpInfo for it.\n#\n# As mentioned a couple times above, implementing an OpInfo is not\n# usually sufficient testing (unless the operator is a unary elementwise\n# operator). The OpInfo will only test the properties described in the\n# \"WHAT'S TESTED\" section. It DOES NOT verify that the operator is\n# implemented correctly.\n#\n# TIPS FOR WRITING AN OPINFO AND OPINFO TESTS\n# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n#\n# Writing an OpInfo can be a little daunting. Since the point of an OpInfo is to\n# be consumed by a variety of systems it can be hard to understand how to\n# deal with test failures or how to set the OpInfo metadata properly.\n#\n# Before adding an OpInfo it helps to look at other OpInfos. A sample inputs\n# function must be defined, and the operator's dtypes must be specified.\n# Once that's done you should run the operator's tests in test_ops.py\n# (these can be filtered using the \"-k\" argument in pytest). Tests that\n# fail should provide an error message that describes what to change about\n# your OpInfo. You don't need to worry about changing an OpInfo's default\n# values unless a test yells at you.\n#\n# Similarly, if you're writing a test that consumes OpInfos then it's critical\n# your test provides a clear error message describing what to do when it\n# fails. You should not assume the OpInfo implementer is familiar with your\n# system.\n#\n# If you see a confusing error message while developing an OpInfo then please\n# file an issue describing what happened.\n#\n# This trial-and-error approach to writing an OpInfo can be frustrating,\n# but it's probably necessary as long as OpInfos don't require\n# learning about all the systems that consume them. One thing that can help\n# is the get_supported_dtypes() function defined in opinfo_helper.py. This\n# function can be used to programmatically specify the dtypes an operator\n# supports, and is especially useful if writing an OpInfo on a machine\n# without a CUDA device. See its documentation for more details.\n#\n# THE FUTURE OF OPINFOS AND OPINFO TESTING\n# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n#\n# In the future we expect OpInfo coverage to improve and cover\n# the great majority of PyTorch's (public) operators.\n#\n\n# Classes and methods for the operator database\nclass OpInfo(object):\n \"\"\"Operator information and helper functions for acquiring it.\"\"\"\n\n def __init__(self,\n name, # the string name of the function\n *,\n ref=None, # An optional reference function that accepts ndarrays (AKA \"NumPy arrays\").\n # If given, the op will be compared with its reference on each of its sample inputs.\n # the following metadata describes the operator, its variants,\n # and its aliases, if any\n aliases=None, # iterable of aliases, e.g. (\"absolute\",) for torch.abs\n variant_test_name='', # additional string to include in the test name\n # this is useful when an op needs multiple OpInfos,\n # like divide does, often because it's really several\n # different ops behind the scenes\n op=None, # the function variant of the operation, populated as torch.<name> if None\n method_variant=_NOTHING, # explicitly specifies the method variant of the operator\n # if _NOTHING (default), the method variant will be autopopulated\n # if None, then the OpInfo specifies no method variant\n inplace_variant=_NOTHING, # explicitly specifies the inplace variant of the operator\n # if _NOTHING (default), the method variant will be autopopulated\n # if None, then the OpInfo specifies no method variant\n\n # the following metadata are test directives for skipping or\n # modifying tests\n skips=tuple(), # information about which tests to skip\n decorators=tuple(), # decorators to apply to generated tests\n\n # the following are pointers to functions to generate certain classes\n # of inputs\n sample_inputs_func=None, # function to generate sample inputs\n error_inputs_func=None, # function to generate inputs that will throw errors\n\n # the following metadata relates to dtype support and is tested for correctness in test_ops.py\n dtypes, # dtypes this function works with on the CPU,\n # inherited by other device types that don't specify their own dtypes\n\n # the following dtypesIf... options override the dtypes value\n # on their respective device types\n dtypesIfCPU=None, # dtypes this function is expected to work with on the CPU,\n # typically unnecessary since it's (now) redundant with the dtypes kwarg above\n dtypesIfCUDA=None, # dtypes this function is expected to work with on CUDA\n dtypesIfROCM=None, # dtypes this function is expected to work with on ROCM\n backward_dtypes=None, # backward dtypes this function is expected to work with\n backward_dtypesIfCPU=None, # backward dtypes this function is expected to work with on CPU\n backward_dtypesIfCUDA=None, # backward dtypes this function is expected to work with on CUDA\n backward_dtypesIfROCM=None, # backward dtypes this function is expected to work with on ROCM\n default_test_dtypes=None, # dtypes to test with by default. Tests are instantiated with\n # these dtypes for the op unless otherwise specified.\n # This is helpful in reducing the test matrix.\n # the following metadata describes the operators out= support\n supports_out=True, # whether the op supports the out kwarg\n # defaults to True, if the op does not allow the out kwarg or\n # supports it incorrectly then test_out in test_ops.py should fail\n safe_casts_outputs=False, # whether op allows safe casting when writing to out arguments\n\n # the following metadata relates to autograd support\n supports_autograd=True, # whether the operation supports backward mode AD\n # if true, gradient correctness is tested in test_ops.py\n # using the op's sample inputs\n supports_gradgrad=None, # whether the op supports second order gradients\n # if true, gradgrad correctness is tested in test_ops.py\n # defaults to support_autograd's value\n # TODO: rename this to supports_bwgrad_bwgrad to be consistent with below\n supports_fwgrad_bwgrad=False, # whether the ops supports second order gradients via\n # forward-over-reverse. If True, forward-over-reverse gradgrad correctness\n # is tested. If False, test that forward grad is not implemented.\n # Defaults to False.\n supports_inplace_autograd=None, # whether the operation supports inplace autograd\n # if true, tested in test_ops.py\n # defaults to supports_autograd's value\n supports_forward_ad=False, # Whether the operation support forward mode AD\n # If the value is True, we check that the gradients are correct\n # If the value is False, we test that forward grad is not implemented\n gradcheck_wrapper=lambda op, *args, **kwargs: op(*args, **kwargs), # wrapper function for gradcheck\n check_batched_grad=None, # whether to check batched grad when doing gradcheck\n # defaults to support_autograd's value\n check_batched_gradgrad=None, # whether to check batched grad grad when doing gradgradcheck\n # default's to support_gradgrad's value\n check_batched_forward_grad=None, # whether to check batched forward grad when doing gradcheck\n # defaults to the value of `supports_forward_ad`\n check_inplace_batched_forward_grad=None, # whether to check batched forward grad when doing gradcheck\n # defaults to the value of `check_batched_forward_grad`\n gradcheck_nondet_tol=0.0, # tolerance for nondeterminism while performing gradcheck\n gradcheck_fast_mode=None, # Whether to use the fast implmentation for gradcheck/gradgradcheck.\n # When set to None, defers to the default value provided by the wrapper\n # function around gradcheck (testing._internal.common_utils.gradcheck)\n\n # the following metadata relates to JIT support and is tested for correctness in test_ops.py\n aten_name=None, # name of the corresponding aten:: operator\n assert_autodiffed=False, # if a op's aten::node is expected to be symbolically autodiffed\n autodiff_nonfusible_nodes=None, # a list of strings with node names that are expected to be in a\n # DifferentiableGraph when autodiffed. Ex: ['aten::add', 'aten::mm'],\n # default is populated to be ['aten::(name of Python operator)']\n autodiff_fusible_nodes=None, # a list of strings with node names that are expected to be in FusionGroups\n # inside of DifferentiableGraphs when this operation is autodiffed.\n # Ex: ['aten::add', 'aten::mm'], defaults to an empty list\n # Note: currently no ops use fusible nodes\n\n # the following metadata relates to sparse support and is used in test_sparse.py\n supports_sparse=False, # whether the op supports sparse inputs\n\n supports_scripting=True, # only run tracing tests\n # the following metadata relates to sparse csr support and is used in test_sparse_csr.py\n supports_sparse_csr=False, # whether the op supports sparse csr inputs\n # the following metadata relates to complex support and is checked in test_ops.py\n test_conjugated_samples=True,\n test_neg_view=True,\n assert_jit_shape_analysis=False, # assert that jit shape analysis fully propagates shape\n ):\n\n dtypes_args = (dtypes, dtypesIfCPU, dtypesIfCUDA, dtypesIfROCM)\n # Validates the dtypes are generated from the dispatch-related functions\n for dtype_list in dtypes_args:\n assert isinstance(dtype_list, (_dispatch_dtypes, type(None)))\n\n self.name = name\n self.ref = ref\n self.aten_name = aten_name if aten_name is not None else name\n self.variant_test_name = variant_test_name\n\n # Attribute to verify dynamic_dtypes are used.\n self.dynamic_dtypes = any(map(lambda dtypes: isinstance(\n dtypes, opinfo_helper._dynamic_dispatch_dtypes), dtypes_args))\n\n if self.dynamic_dtypes:\n # Make sure `dtyesIfCUDA` is dynamic, if dynamic dispatch is used for CPU\n # This is because, below we set dtypesIfCUDA to dtypes if they are None.\n assert isinstance(dtypesIfCUDA, opinfo_helper._dynamic_dispatch_dtypes), \\\n (f\"To use dynamic dypes for operator {name}, \"\n \"acquire the dtypes dynamically for argument `dtypesIfCUDA`.\"\n \"This is to ensure that CUDA dtypes are acquired correctly as they\"\n \"differ from CPU dtypes occasionally\")\n\n self.dtypes = set(dtypes)\n\n # NOTE: backward dtypes must be acquired before forward dtypes\n # since they fallback to explicit (not implicit!) specifications of\n # forward dtypes\n self.backward_dtypes = set(backward_dtypes) if backward_dtypes is not None else self.dtypes\n self.backward_dtypesIfCPU = set(backward_dtypesIfCPU) if backward_dtypesIfCPU is not None else (\n backward_dtypes if backward_dtypes is not None\n else dtypesIfCPU if dtypesIfCPU is not None\n else dtypes)\n self.backward_dtypesIfCUDA = set(backward_dtypesIfCUDA) if backward_dtypesIfCUDA is not None else (\n backward_dtypes if backward_dtypes is not None\n else dtypesIfCUDA if dtypesIfCUDA is not None\n else dtypes)\n self.backward_dtypesIfROCM = set(backward_dtypesIfROCM) if backward_dtypesIfROCM is not None else (\n backward_dtypesIfCUDA if backward_dtypesIfCUDA is not None\n else backward_dtypes if backward_dtypes is not None\n else dtypesIfROCM if dtypesIfROCM is not None\n else dtypesIfCUDA if dtypesIfCUDA is not None\n else dtypes)\n\n self.dtypesIfCPU = set(dtypesIfCPU) if dtypesIfCPU is not None else self.dtypes\n self.dtypesIfCUDA = set(dtypesIfCUDA) if dtypesIfCUDA is not None else self.dtypes\n self.dtypesIfROCM = set(dtypesIfROCM) if dtypesIfROCM is not None else self.dtypesIfCUDA\n\n self._default_test_dtypes = set(default_test_dtypes) if default_test_dtypes is not None else None\n\n # NOTE: if the op is unspecified it is assumed to be under the torch namespace\n self.op = op if op else _getattr_qual(torch, self.name)\n method_variant = getattr(torch.Tensor, name, None) if method_variant is _NOTHING else method_variant\n # attributes like real, imag are not callable\n self.method_variant = method_variant if callable(method_variant) else None\n inplace_name = name + \"_\"\n self.inplace_variant = getattr(torch.Tensor, inplace_name, None) \\\n if inplace_variant is _NOTHING else inplace_variant\n self.operator_variant = getattr(operator, name, None)\n\n self.supports_out = supports_out\n self.safe_casts_outputs = safe_casts_outputs\n\n self.decorators = (*decorators, *skips)\n\n # We run the sampling functions without tracking the gradiends of the creation of inputs\n self.sample_inputs_func = torch.no_grad()(sample_inputs_func)\n self.error_inputs_func = error_inputs_func\n\n self.assert_autodiffed = assert_autodiffed\n self.autodiff_fusible_nodes = autodiff_fusible_nodes if autodiff_fusible_nodes else []\n if autodiff_nonfusible_nodes is None:\n self.autodiff_nonfusible_nodes = ['aten::' + self.name]\n else:\n self.autodiff_nonfusible_nodes = autodiff_nonfusible_nodes\n\n # Autograd support\n\n # Autograd flags that don't depend on backward AD\n self.supports_autograd = supports_autograd\n self.supports_forward_ad = supports_forward_ad\n self.gradcheck_fast_mode = gradcheck_fast_mode\n self.gradcheck_wrapper = gradcheck_wrapper\n self.gradcheck_nondet_tol = gradcheck_nondet_tol\n\n # Autograd flags that depend on backward AD only\n # - If setting has been explicitly set, raise error if inconsistent\n if supports_gradgrad is None:\n supports_gradgrad = supports_autograd\n else:\n assert not (supports_gradgrad and not supports_autograd), (\n \"supports_gradgrad refines the part of autograd is supported, so it should \"\n \"not be set if supports_autograd is False\")\n if check_batched_grad is None:\n check_batched_grad = supports_autograd or supports_forward_ad\n else:\n assert not (check_batched_grad and not (supports_autograd or supports_forward_ad)), (\n \"check_batched_grad refines the part of autograd that will be checked (by gradcheck), so \"\n \"it should not be set if supports_autograd is False\")\n if check_batched_gradgrad is None:\n check_batched_gradgrad = supports_gradgrad\n else:\n assert not (check_batched_gradgrad and not supports_gradgrad), (\n \"check_batched_gradgrad refines the part of autograd that will be checked (by \"\n \"gradgradcheck), so it should not be set if either supports_gradgrad or supports_autograd \"\n \"is False.\")\n if check_batched_forward_grad is None:\n check_batched_forward_grad = supports_forward_ad\n else:\n assert not (check_batched_forward_grad and not supports_forward_ad), (\n \"check_batched_forward_grad should only be used when supports_forward_ad \"\n \"is True. It is used to disable the test in the specific cases \"\n \"where the op supports forward ad but fails to compute \"\n \"batched forward grad.\")\n\n if check_inplace_batched_forward_grad is None:\n check_inplace_batched_forward_grad = check_batched_forward_grad\n else:\n assert not (check_inplace_batched_forward_grad and not check_batched_forward_grad), (\n \"check_batched_forward_grad should only be used when check_batched_forward_grad \"\n \"is True. It is used to disable the test in the specific cases \"\n \"where the op supports batched forward grad but fails to compute batched forward \"\n \"grad for the inplace variant of the op.\")\n\n assert not (supports_fwgrad_bwgrad and not supports_autograd), (\n \"supports_fwgrad_bwgrad enables forward-over-backward gradgrad checks and should only be \"\n \"True if backward ad is also checked, i.e., supports_forward_ad should be True.\", self.name)\n\n self.supports_fwgrad_bwgrad = supports_fwgrad_bwgrad\n\n self.supports_gradgrad = supports_gradgrad\n self.check_batched_grad = check_batched_grad\n self.check_batched_gradgrad = check_batched_gradgrad\n self.check_batched_forward_grad = check_batched_forward_grad\n self.check_inplace_batched_forward_grad = check_inplace_batched_forward_grad\n\n # Autograd flags that depend on both forward AD and backward AD\n if supports_inplace_autograd is None:\n supports_inplace_autograd = supports_autograd or supports_forward_ad\n else:\n assert not (supports_inplace_autograd and not supports_autograd and not supports_forward_ad), (\n \"supports_inplace_autograd refines the part of autograd that is supported, so \"\n \"it should not be set if both supports_autograd and supports_forward_ad are False\")\n self.supports_inplace_autograd = supports_inplace_autograd\n\n self.supports_sparse = supports_sparse\n self.supports_sparse_csr = supports_sparse_csr\n\n self.aliases = ()\n if aliases is not None:\n self.aliases = tuple(AliasInfo(a) for a in aliases) # type: ignore[assignment]\n\n self.supports_scripting = supports_scripting\n self.assert_jit_shape_analysis = assert_jit_shape_analysis\n\n self.test_conjugated_samples = test_conjugated_samples\n self.test_neg_view = test_neg_view\n\n def __call__(self, *args, **kwargs):\n \"\"\"Calls the function variant of the operator.\"\"\"\n return self.op(*args, **kwargs)\n\n def get_op(self):\n \"\"\"Returns the function variant of the operator, torch.<op_name>.\"\"\"\n return self.op\n\n def get_method(self):\n \"\"\"Returns the method variant of the operator, torch.Tensor.<op_name>.\n Returns None if the operator has no method variant.\n \"\"\"\n return self.method_variant\n\n def get_inplace(self):\n \"\"\"Returns the inplace variant of the operator, torch.Tensor.<op_name>_.\n Returns None if the operator has no inplace variant.\n \"\"\"\n return self.inplace_variant\n\n def get_operator_variant(self):\n \"\"\"Returns operator variant of the operator, e.g. operator.neg\n Returns None if the operator has no operator variant.\n \"\"\"\n return self.operator_variant\n\n def conjugate_sample_inputs(self, device, dtype, requires_grad=False, **kwargs):\n \"\"\"Returns an iterable of SampleInputs but with the tensor input or first\n tensor in a sequence input conjugated.\n \"\"\"\n\n # TODO: Remove the try/except once all operators have sample_inputs_func with\n # **kwargs in their signature.\n try:\n samples = self.sample_inputs_func(self, device, dtype, requires_grad, **kwargs)\n except TypeError:\n samples = self.sample_inputs_func(self, device, dtype, requires_grad)\n\n conj_samples = list(samples)\n\n def conjugate(tensor):\n _requires_grad = tensor.requires_grad\n tensor = tensor.conj()\n return tensor.requires_grad_(_requires_grad)\n\n for i, sample in enumerate(samples):\n sample = conj_samples[i]\n # Note: it is assumed that the input here is either a tensor or tensorlist\n if isinstance(sample.input, torch.Tensor):\n sample.input = conjugate(sample.input)\n else:\n sample.input[0] = conjugate(sample.input[0])\n\n return tuple(conj_samples)\n\n def sample_inputs(self, device, dtype, requires_grad=False, **kwargs):\n \"\"\"Returns an iterable of SampleInputs.\n\n These samples should be sufficient to test the function works correctly\n with autograd, TorchScript, etc.\n \"\"\"\n\n # TODO: Remove the try/except once all operators have sample_inputs_func with\n # **kwargs in their signature.\n try:\n samples = self.sample_inputs_func(self, device, dtype, requires_grad, **kwargs)\n except TypeError:\n samples = self.sample_inputs_func(self, device, dtype, requires_grad)\n\n if 'include_conjugated_inputs' in kwargs and kwargs.get('include_conjugated_inputs'):\n conj_samples = self.conjugate_sample_inputs(device, dtype, requires_grad, **kwargs)\n samples_list = list(samples)\n samples_list.extend(conj_samples)\n samples = tuple(samples_list)\n\n return samples\n\n def error_inputs(self, device, **kwargs):\n \"\"\"\n Returns an iterable of ErrorInputs.\n \"\"\"\n return self.error_inputs_func(self, device, **kwargs)\n\n def get_decorators(self, test_class, test_name, device, dtype):\n '''Returns the decorators targeting the given test.'''\n result = []\n for decorator in self.decorators:\n if isinstance(decorator, DecorateInfo):\n if decorator.is_active(test_class, test_name, device, dtype):\n result.extend(decorator.decorators)\n else:\n result.append(decorator)\n return result\n\n def supported_dtypes(self, device_type):\n if device_type == 'cpu':\n return self.dtypesIfCPU\n if device_type == 'cuda':\n return self.dtypesIfROCM if TEST_WITH_ROCM else self.dtypesIfCUDA\n else:\n return self.dtypes\n\n def supported_backward_dtypes(self, device_type):\n if not self.supports_autograd:\n return set()\n\n backward_dtypes = None\n if device_type == 'cpu':\n backward_dtypes = self.backward_dtypesIfCPU\n elif device_type == 'cuda':\n backward_dtypes = self.backward_dtypesIfROCM if TEST_WITH_ROCM else self.backward_dtypesIfCUDA\n else:\n backward_dtypes = self.backward_dtypes\n\n allowed_backward_dtypes = floating_and_complex_types_and(torch.bfloat16, torch.float16)\n return set(allowed_backward_dtypes).intersection(backward_dtypes)\n\n def supports_complex_autograd(self, device_type):\n if device_type == 'cpu':\n return any(dtype.is_complex for dtype in self.backward_dtypesIfCPU)\n if device_type == 'cuda':\n if TEST_WITH_ROCM:\n return any(dtype.is_complex for dtype in self.backward_dtypesIfROCM)\n else:\n return any(dtype.is_complex for dtype in self.backward_dtypesIfCUDA)\n else:\n return any(dtype.is_complex for dtype in self.backward_dtypes)\n\n def supports_dtype(self, dtype, device_type):\n return dtype in self.supported_dtypes(device_type)\n\n def default_test_dtypes(self, device_type):\n \"\"\"Returns the default dtypes used to test this operator on the device.\n\n Equal to the operator's default_test_dtypes filtered to remove dtypes\n not supported by the device.\n \"\"\"\n supported = self.supported_dtypes(device_type)\n return (supported if self._default_test_dtypes is None\n else supported.intersection(self._default_test_dtypes))\n\n @property\n def formatted_name(self):\n \"\"\"Returns a formatted full name for this OpInfo that can be used in test names.\"\"\"\n variant = '_' + self.variant_test_name.replace('.', '_') if self.variant_test_name else ''\n return '{}{}'.format(self.name.replace('.', '_'), variant)\n\n\ndef _generate_reduction_inputs(device, dtype, requires_grad, **kwargs):\n \"\"\"Generates input tensors for testing reduction operators\"\"\"\n yield make_tensor([], device, dtype, requires_grad=requires_grad)\n yield make_tensor([2], device, dtype, requires_grad=requires_grad)\n yield make_tensor([3, 5], device, dtype, requires_grad=requires_grad)\n yield make_tensor([3, 2, 1, 2], device, dtype, requires_grad=requires_grad)\n\n\ndef _generate_reduction_kwargs(ndim, supports_multiple_dims=True):\n \"\"\"Generates a subset of all valid dim and keepdim kwargs given ndim that\n is appropriate for testing reduction operators.\n \"\"\"\n\n # Test default dim and keepdim\n yield {}\n\n # Test reducing inner and outer most dimensions\n yield {'dim': 0, 'keepdim': True}\n yield {'dim': -1, 'keepdim': False}\n\n # Test reducing middle dimension\n if ndim > 2:\n yield {'dim': ndim // 2, 'keepdim': True}\n\n if supports_multiple_dims:\n # Test reducing all dimensions\n yield {'dim': tuple(range(ndim)), 'keepdim': False}\n\n # Test reducing both first and last dimensions\n if ndim > 1:\n yield {'dim': (0, -1), 'keepdim': True}\n\n # Test reducing every other dimension starting with the second\n if ndim > 3:\n yield {'dim': tuple(range(1, ndim, 2)), 'keepdim': False}\n\n\ndef sample_inputs_reduction(op_info, device, dtype, requires_grad, **kwargs):\n \"\"\"Sample inputs for reduction operators.\"\"\"\n\n # TODO(@heitorschueroff) Once all reduction operators are using\n # ReductionOpInfo use op_info.supports_multiple_dims directly.\n supports_multiple_dims: bool = kwargs.get('supports_multiple_dims', True)\n\n # TODO(@heitorschueroff) Once all reduction operators are using ReductionOpInfo\n # use op_info.genearte_args_kwargs directly.\n generate_args_kwargs = kwargs.get('generate_args_kwargs', lambda *args, **kwargs: (yield tuple(), {}))\n\n inputs: List[SampleInput] = []\n for t in _generate_reduction_inputs(device, dtype, requires_grad):\n for reduction_kwargs in _generate_reduction_kwargs(t.ndim, supports_multiple_dims):\n for args, kwargs in generate_args_kwargs(t, **reduction_kwargs):\n kwargs.update(reduction_kwargs)\n inputs.append(SampleInput(\n t.clone().requires_grad_(requires_grad),\n args=args,\n kwargs=kwargs))\n\n return inputs\n\n\ndef _generate_masked_op_mask(input_shape, device, **kwargs):\n yield None\n yield make_tensor(input_shape, device, torch.bool, requires_grad=False)\n if len(input_shape) > 2:\n # broadcast last mask dimension:\n yield make_tensor(input_shape[:-1] + (1,), device, torch.bool, requires_grad=False)\n # broadcast middle mask dimension:\n yield make_tensor(input_shape[:1] + (1,) + input_shape[2:], device, torch.bool, requires_grad=False)\n # broadcast first mask dimension:\n yield make_tensor((1,) + input_shape[1:], device, torch.bool, requires_grad=False)\n # mask.ndim < input.ndim\n yield make_tensor(input_shape[1:], device, torch.bool, requires_grad=False)\n # mask.ndim == 1\n yield make_tensor(input_shape[-1:], device, torch.bool, requires_grad=False)\n # masks that require broadcasting of inputs (mask.ndim >\n # input.ndim) will not be supported, however, we may\n # reconsider this if there will be demand on this kind of\n # degenerate cases.\n\n\ndef sample_inputs_masked_reduction(op_info, device, dtype, requires_grad, **kwargs):\n \"\"\"Sample inputs for masked reduction operators.\n\n Masked reduction operator is a reduction operator with trailing\n mask optional argument. A mask is a bool tensor with the same\n shape as input or a shape that is broadcastable to input shape.\n \"\"\"\n inputs: List[SampleInput] = []\n kwargs['supports_multiple_dims'] = op_info.supports_multiple_dims\n for sample_input in sample_inputs_reduction(op_info, device, dtype, requires_grad, **kwargs):\n for mask in _generate_masked_op_mask(sample_input.input.shape, device, **kwargs):\n sample_input_args, sample_input_kwargs = sample_input.args, dict(mask=mask, **sample_input.kwargs)\n inputs.append(SampleInput(sample_input.input.clone().requires_grad_(requires_grad),\n args=sample_input_args, kwargs=sample_input_kwargs))\n if(not requires_grad and dtype.is_floating_point and\n sample_input.input.ndim == 2 and mask is not None and\n mask.shape == sample_input.input.shape):\n for v in [torch.inf, -torch.inf, torch.nan]:\n t = sample_input.input.clone()\n t.diagonal()[:] = v\n inputs.append(SampleInput(t.detach().requires_grad_(requires_grad),\n args=sample_input_args,\n kwargs=sample_input_kwargs))\n\n return inputs\n\n\ndef sample_inputs_masked_norm(op_info, device, dtype, requires_grad, **kwargs):\n \"\"\"Sample inputs for masked norm.\n \"\"\"\n inputs: List[SampleInput] = []\n for ord in [2.0, 1, float('inf'), float('-inf'), 0]:\n for sample_input in sample_inputs_masked_reduction(op_info, device, dtype, requires_grad, **kwargs):\n sample_input_args, sample_input_kwargs = (ord,) + sample_input.args, sample_input.kwargs.copy()\n inputs.append(SampleInput(sample_input.input.clone().requires_grad_(requires_grad),\n args=sample_input_args, kwargs=sample_input_kwargs))\n return inputs\n\n\ndef sample_inputs_masked_var(op_info, device, dtype, requires_grad, **kwargs):\n \"\"\"Sample inputs for masked var.\n \"\"\"\n inputs: List[SampleInput] = []\n for unbiased in [False, True]:\n for sample_input in sample_inputs_masked_reduction(op_info, device, dtype, requires_grad, **kwargs):\n if sample_input.args:\n dim = sample_input.args[0]\n sample_input_args = sample_input.args[:1] + (unbiased,) + sample_input.args[1:]\n sample_input_kwargs = sample_input.kwargs.copy()\n else:\n dim = sample_input.kwargs.get('dim')\n sample_input_args = sample_input.args\n sample_input_kwargs = dict(sample_input.kwargs, unbiased=unbiased)\n if requires_grad:\n inmask = torch._masked._input_mask(sample_input.input, *sample_input_args, **sample_input_kwargs)\n orig_count = torch._masked.sum(inmask.new_ones(sample_input.input.shape, dtype=torch.int64),\n dim, keepdim=True, mask=inmask)\n if orig_count.min() <= int(unbiased):\n # Skip samples that lead to singularities in var\n # computation resulting nan values both in var and\n # autograd output that test_grad_fn cannot handle\n # correctly.\n continue\n inputs.append(SampleInput(sample_input.input.clone().requires_grad_(requires_grad),\n args=sample_input_args, kwargs=sample_input_kwargs))\n return inputs\n\n\n# NOTE [Reductions]:\n#\n# For testing purposes, we relax the definition of a reduction operator\n# as defined in the docstring below. We do this to capture operators with\n# a similar API so they can be tested automatically. However...\n#\n# Strictly speaking a reduction operator is an operator that can reduce an\n# array to a single scalar value and that can be computed from the partial\n# result of reducing subarrays. This usually means that the reduction operation\n# should be commutative and associative. This definition is important when it\n# comes to implementation as it determines how a reduction can be parallelized.\n#\n# For example, many summary statistics such as median, mode and quantile cannot\n# be computed from partial results because these are sorting and counting based\n# algorithms that need information that would be lost in the reduced value.\nclass ReductionOpInfo(OpInfo):\n \"\"\"Reduction operator information.\n\n An operator is a reduction operator if it reduces one or more dimensions of\n the input tensor to a single value. Reduction operators must implement the\n following signature:\n\n - `op(input, *args, *, dim=None, keepdim=False, **kwargs) -> Tensor`\n\n ReductionOpInfo tests that reduction operators implement a consistent API.\n Optional features such as reducing over multiple dimensions are captured in\n the optional keyword parameters of the ReductionOpInfo constructor.\n\n If a reduction operator does not yet implement the full required API of\n reduction operators, this should be documented by skipping the failing\n tests rather than adding optional parameters to ReductionOpInfo.\n\n NOTE\n The API for reduction operators has not yet been finalized and some\n requirements may change.\n\n See tests in test/test_reductions.py\n \"\"\"\n\n def __init__(\n self, name, *,\n\n # The identity value for the operator if it has one.\n identity: Optional[Any] = None,\n\n # The nan policy for the operator if it implements one.\n # - propagate: NaN values are propagated to the output\n # - omit: NaN values are discarded during the reduction\n nan_policy: Optional[str] = None,\n\n # Whether the operator supports reducing multiple dimensions.\n supports_multiple_dims: bool = True,\n\n # Whether the operator promotes integral to floating point dtypes.\n promotes_int_to_float: bool = False,\n\n # Whether the operator promotes all integral dtypes to int64.\n promotes_int_to_int64: bool = False,\n\n # If a specific dtype is given, then the operator always returns that\n # dtype irrespective of the input dtype. If None, the operator returns\n # the dtype according to the type promotion rules above.\n result_dtype: Optional[torch.dtype] = None,\n\n # ReductionOpInfo tests generate their own input, dim and keepdim\n # arguments and call this function to generate tuples of extra args and\n # kwargs to use when calling the op. This is required for operators that\n # have other required parameters besides the input tensor.\n generate_args_kwargs: Callable = lambda t, dim=None, keepdim=False: (yield tuple(), {}),\n\n # Options from the OpInfo base class\n **kwargs,\n ):\n assert nan_policy in (None, 'propagate', 'omit')\n\n # These are mutually exclusive options\n assert not (result_dtype and promotes_int_to_float)\n assert not (result_dtype and promotes_int_to_int64)\n assert not (promotes_int_to_float and promotes_int_to_int64)\n\n # Default sample_inputs_func for ReductionOpInfo which augments sample\n # inputs from sample_inputs_reduction with the args and kwargs from\n # generate_args_kwargs. This is only used if sample_inputs_func is None.\n def sample_inputs_func(*args, **kwargs):\n kwargs['supports_multiple_dims'] = supports_multiple_dims\n kwargs['generate_args_kwargs'] = generate_args_kwargs\n return sample_inputs_reduction(*args, **kwargs)\n\n # Override OpInfo defaults and call base class __init__\n kwargs.setdefault('inplace_variant', None)\n kwargs.setdefault('sample_inputs_func', sample_inputs_func)\n kwargs.setdefault('default_test_dtypes', (\n torch.uint8, torch.int64, torch.float16, torch.bfloat16, torch.float32, torch.complex64))\n super(ReductionOpInfo, self).__init__(name, **kwargs)\n\n self.identity = identity\n self.nan_policy = nan_policy\n self.supports_multiple_dims = supports_multiple_dims\n self.promotes_int_to_float = promotes_int_to_float\n self.promotes_int_to_int64 = promotes_int_to_int64\n self.result_dtype = result_dtype\n self.generate_args_kwargs = generate_args_kwargs\n\n\ndef sample_inputs_unary(op_info, device, dtype, requires_grad, op_kwargs=None, **kwargs):\n if not op_kwargs:\n op_kwargs = {}\n\n low, high = op_info.domain\n low = low if low is None else low + op_info._domain_eps\n high = high if high is None else high - op_info._domain_eps\n\n if op_info.supports_sparse_csr:\n # Tensors with dim=2 for sparse CSR testing\n yield SampleInput(make_tensor((L, L), device=device, dtype=dtype,\n low=low, high=high,\n requires_grad=requires_grad), kwargs=op_kwargs)\n else:\n # Creates a 1D, empty, and scalar tensor\n for shape in ((L,), (1, 0, 3), ()):\n yield SampleInput(make_tensor(shape, device=device, dtype=dtype,\n low=low, high=high,\n requires_grad=requires_grad), kwargs=op_kwargs)\n\n\n# Metadata class for unary \"universal functions (ufuncs)\" that accept a single\n# tensor and have common properties like:\nclass UnaryUfuncInfo(OpInfo):\n \"\"\"Operator information for 'universal unary functions (unary ufuncs).'\n These are functions of a single tensor with common properties like:\n - they are elementwise functions\n - the input shape is the output shape\n - they typically have method and inplace variants\n - they typically support the out kwarg\n - they typically have NumPy or SciPy references\n See NumPy's universal function documentation\n (https://numpy.org/doc/1.18/reference/ufuncs.html) for more details\n about the concept of ufuncs.\n \"\"\"\n\n def __init__(self,\n name, # the string name of the function\n *,\n ref, # a reference function\n dtypes=floating_types(),\n dtypesIfCUDA=None,\n dtypesIfROCM=None,\n default_test_dtypes=(\n torch.uint8, torch.long, torch.half, torch.bfloat16,\n torch.float32, torch.cfloat), # dtypes which tests check by default\n domain=(None, None), # the [low, high) domain of the function\n handles_large_floats=True, # whether the op correctly handles large float values (like 1e20)\n handles_extremals=True, # whether the op correctly handles extremal values (like inf)\n handles_complex_extremals=True, # whether the op correct handles complex extremals (like inf -infj)\n supports_complex_to_float=False, # op supports casting from complex input to real output safely eg. angle\n sample_inputs_func=sample_inputs_unary,\n sample_kwargs=lambda device, dtype, input: ({}, {}),\n supports_sparse=False,\n reference_numerics_filter=None, # Filter for singular input values for test_reference_numerics_normal\n **kwargs):\n super(UnaryUfuncInfo, self).__init__(name,\n dtypes=dtypes,\n dtypesIfCUDA=dtypesIfCUDA,\n dtypesIfROCM=dtypesIfROCM,\n default_test_dtypes=default_test_dtypes,\n sample_inputs_func=sample_inputs_func,\n supports_sparse=supports_sparse,\n **kwargs)\n self.ref = ref\n self.domain = domain\n self.handles_large_floats = handles_large_floats\n self.handles_extremals = handles_extremals\n self.handles_complex_extremals = handles_complex_extremals\n self.supports_complex_to_float = supports_complex_to_float\n self.reference_numerics_filter = reference_numerics_filter\n\n # test_unary_ufuncs.py generates its own inputs to test the consistency\n # of the operator on sliced tensors, non-contig tensors, etc.\n # `sample_kwargs` is a utility function to provide kwargs\n # along with those inputs if required (eg. clamp).\n # It should return two dictionaries, first holding kwarg for\n # torch operator and second one for reference NumPy operator.\n self.sample_kwargs = sample_kwargs\n\n # Epsilon to ensure grad and gradgrad checks don't test values\n # outside a function's domain.\n self._domain_eps = 1e-5\n\ndef sample_inputs_tensor_split(op_info, device, dtype, requires_grad, **kwargs):\n make_input = partial(make_tensor, device=device, dtype=dtype,\n low=None, high=None, requires_grad=requires_grad)\n\n args_cases = (\n # Cases with tensor indices.\n (torch.tensor([1, 2, 3]),),\n (torch.tensor(1),),\n (torch.tensor([1, 2, 3]), 1),\n (torch.tensor([1, 4, 2, 5, 3, 6])[::2], 1),\n # Cases with list of indices.\n ((2, 4),),\n ((2, 4), 1),\n ((2, 4), -1),\n # Cases with integer section.\n (3,),\n (3, 1),\n (3, -1),\n )\n\n for args in args_cases:\n yield SampleInput(make_input((S, S, S)), args=args)\n\n\ndef sample_inputs_linalg_det(op_info, device, dtype, requires_grad, **kwargs):\n kw = dict(device=device, dtype=dtype)\n inputs = [\n make_tensor((S, S), **kw),\n make_tensor((1, 1), **kw), # 1x1\n random_symmetric_matrix(S, **kw), # symmetric\n random_symmetric_psd_matrix(S, **kw), # symmetric_psd\n random_symmetric_pd_matrix(S, **kw), # symmetric_pd\n\n random_square_matrix_of_rank(S, S - 2, **kw), # dim2_null\n random_square_matrix_of_rank(S, 1, **kw), # rank1\n random_square_matrix_of_rank(S, 2, **kw), # rank2\n\n make_fullrank_matrices_with_distinct_singular_values(S, S, **kw), # full rank\n make_tensor((3, 3, S, S), **kw), # batched\n make_tensor((3, 3, 1, 1), **kw), # batched_1x1\n random_symmetric_matrix(S, 3, **kw), # batched_symmetric\n random_symmetric_psd_matrix(S, 3, **kw), # batched_symmetric_psd\n random_symmetric_pd_matrix(S, 3, **kw), # batched_symmetric_pd\n make_fullrank_matrices_with_distinct_singular_values(S, 3, 3, **kw), # batched fullrank\n make_tensor((0, 0), **kw),\n make_tensor((0, S, S), **kw),\n ]\n for t in inputs:\n t.requires_grad = requires_grad\n return [SampleInput(t) for t in inputs]\n\ndef sample_inputs_linalg_det_singular(op_info, device, dtype, requires_grad, **kwargs):\n make_arg = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad)\n\n def make_singular_matrix_batch_base(size, rank):\n assert size[-1] == size[-2]\n assert rank > 0 and rank < size[-1]\n\n n = size[-1]\n a = make_arg(size[:-2] + (n, rank)) / 10\n b = make_arg(size[:-2] + (rank, n)) / 10\n x = a @ b\n lu, pivs, _ = torch.linalg.lu_factor_ex(x)\n p, l, u = torch.lu_unpack(lu, pivs)\n u_diag_abs = u.diagonal(0, -2, -1).abs()\n u_diag_abs_largest = u_diag_abs.max(dim=-1, keepdim=True).values\n u_diag_abs_smallest_idxs = torch.topk(u_diag_abs, k=(n - rank), largest=False).indices\n u.diagonal(0, -2, -1).div_(u_diag_abs_largest)\n u[..., u_diag_abs_smallest_idxs] = torch.finfo(dtype).eps\n matrix = p @ l @ u\n\n matrix.requires_grad_(requires_grad)\n return matrix\n\n def sample_generator():\n for batch, size in product(((), (2,), (2, 2)), range(6)):\n shape = batch + (size, size)\n for rank in range(1, size):\n yield make_singular_matrix_batch_base(shape, rank)\n\n return [SampleInput(t) for t in sample_generator()]\n\n\ndef sample_inputs_linalg_matrix_power(op_info, device, dtype, requires_grad, **kwargs):\n make_fullrank = make_fullrank_matrices_with_distinct_singular_values\n make_arg = partial(make_tensor, dtype=dtype, device=device, requires_grad=requires_grad)\n make_arg_fullrank = partial(make_fullrank, dtype=dtype, device=device, requires_grad=requires_grad)\n # (<matrix_size>, (<batch_sizes, ...>))\n test_sizes = [\n (1, ()),\n (2, (0,)),\n (2, (2,)),\n ]\n\n for matrix_size, batch_sizes in test_sizes:\n size = batch_sizes + (matrix_size, matrix_size)\n for n in (0, 3, 5):\n yield SampleInput(make_arg(size), args=(n,))\n for n in [-4, -2, -1]:\n yield SampleInput(make_arg_fullrank(*size), args=(n,))\n\ndef sample_inputs_hsplit(op_info, device, dtype, requires_grad, **kwargs):\n return (SampleInput(make_tensor((6,), device, dtype,\n low=None, high=None,\n requires_grad=requires_grad),\n args=(2,),),\n SampleInput(make_tensor((S, S, S), device, dtype,\n low=None, high=None,\n requires_grad=requires_grad),\n args=([1, 2, 3],),),)\n\ndef sample_inputs_vsplit(op_info, device, dtype, requires_grad, **kwargs):\n return (SampleInput(make_tensor((6, S), device, dtype,\n low=None, high=None,\n requires_grad=requires_grad),\n args=(2,),),\n SampleInput(make_tensor((S, S, S), device, dtype,\n low=None, high=None,\n requires_grad=requires_grad),\n args=([1, 2, 3],),),)\n\ndef sample_inputs_dsplit(op_info, device, dtype, requires_grad, **kwargs):\n return (SampleInput(make_tensor((S, S, S), device, dtype,\n low=None, high=None,\n requires_grad=requires_grad),\n args=([1, 2, 3],),),\n SampleInput(make_tensor((S, S, 6), device, dtype,\n low=None, high=None,\n requires_grad=requires_grad),\n args=(2,),),)\n\ndef error_inputs_hsplit(op_info, device, **kwargs):\n err_msg1 = (\"torch.hsplit requires a tensor with at least 1 dimension, \"\n \"but got a tensor with 0 dimensions!\")\n si1 = SampleInput(make_tensor((),\n dtype=torch.float32,\n device=device),\n args=(0,),)\n err_msg2 = (f\"torch.hsplit attempted to split along dimension 1, \"\n f\"but the size of the dimension {S} \"\n f\"is not divisible by the split_size 0!\")\n si2 = SampleInput(make_tensor((S, S, S),\n dtype=torch.float32,\n device=device),\n args=(0,),)\n return (ErrorInput(si1, error_type=RuntimeError, error_regex=err_msg1),\n ErrorInput(si2, error_type=RuntimeError, error_regex=err_msg2),)\n\ndef error_inputs_vsplit(op_info, device, **kwargs):\n err_msg1 = (\"torch.vsplit requires a tensor with at least 2 dimension, \"\n \"but got a tensor with 1 dimensions!\")\n si1 = SampleInput(make_tensor((S,),\n dtype=torch.float32,\n device=device),\n args=(0,),)\n err_msg2 = (f\"torch.vsplit attempted to split along dimension 0, \"\n f\"but the size of the dimension {S} \"\n f\"is not divisible by the split_size 0!\")\n si2 = SampleInput(make_tensor((S, S, S),\n dtype=torch.float32,\n device=device),\n args=(0,),)\n return (ErrorInput(si1, error_type=RuntimeError, error_regex=err_msg1),\n ErrorInput(si2, error_type=RuntimeError, error_regex=err_msg2),)\n\ndef error_inputs_dsplit(op_info, device, **kwargs):\n err_msg1 = (\"torch.dsplit requires a tensor with at least 3 dimension, \"\n \"but got a tensor with 1 dimensions!\")\n si1 = SampleInput(make_tensor((S,),\n dtype=torch.float32,\n device=device),\n args=(0,),)\n err_msg2 = (f\"torch.dsplit attempted to split along dimension 2, \"\n f\"but the size of the dimension {S} \"\n f\"is not divisible by the split_size 0!\")\n si2 = SampleInput(make_tensor((S, S, S),\n dtype=torch.float32,\n device=device),\n args=(0,),)\n return (ErrorInput(si1, error_type=RuntimeError, error_regex=err_msg1),\n ErrorInput(si2, error_type=RuntimeError, error_regex=err_msg2),)\n\ndef sample_inputs_linalg_multi_dot(op_info, device, dtype, requires_grad, **kwargs):\n # Each test case consists of the sizes in the chain of multiplications\n # e.g. [2, 3, 4, 5] generates matrices (2, 3) @ (3, 4) @ (4, 5)\n test_cases = [\n [1, 2, 1],\n [2, 0, 2],\n [0, 2, 2],\n [2, 2, 2, 2],\n [2, 3, 4, 5],\n [5, 4, 0, 2],\n [2, 4, 3, 5, 3, 2]\n ]\n\n result = []\n for sizes in test_cases:\n tensors = []\n for size in zip(sizes[:-1], sizes[1:]):\n t = make_tensor(size, device, dtype, requires_grad=requires_grad)\n tensors.append(t)\n result.append(SampleInput(tensors))\n\n return result\n\ndef sample_inputs_linalg_matrix_norm(op_info, device, dtype, requires_grad, **kwargs):\n sizes = ((2, 2), (2, 3, 2))\n ords = ('fro', 'nuc', inf, -inf, 1, -1, 2, -2)\n dims = ((-2, -1), (-1, 0))\n\n inputs: List[SampleInput] = []\n for size, ord, dim, keepdim in product(sizes, ords, dims, [True, False]):\n t = make_tensor(size, device, dtype, requires_grad=requires_grad)\n inputs.append(SampleInput(t, args=(ord, dim, keepdim)))\n\n return inputs\n\ndef sample_inputs_linalg_norm(op_info, device, dtype, requires_grad, **kwargs):\n test_sizes = [\n (S,),\n (0,),\n (S, S),\n (0, 0),\n (S, 0),\n (0, S),\n (S, S, S),\n (0, S, S),\n (S, 0, S),\n (0, 0, 0),\n ]\n\n vector_ords = (None, 0, 0.5, 1, 2, 3.5, inf, -0.5, -1, -2, -3.5, -inf)\n matrix_ords = (None, 'fro', 'nuc', 1, 2, inf, -1, -2, -inf)\n\n inputs = []\n\n for test_size in test_sizes:\n is_vector_norm = len(test_size) == 1\n is_matrix_norm = len(test_size) == 2\n\n for keepdim in [False, True]:\n inputs.append(SampleInput(\n make_tensor(\n test_size, device, dtype, low=None, high=None,\n requires_grad=requires_grad),\n kwargs=dict(\n keepdim=keepdim)))\n\n if not (is_vector_norm or is_matrix_norm):\n continue\n\n ords = vector_ords if is_vector_norm else matrix_ords\n\n for ord in ords:\n\n inputs.append(SampleInput(\n make_tensor(\n test_size, device, dtype,\n low=None, high=None,\n requires_grad=requires_grad),\n args=(ord,),\n kwargs=dict(\n keepdim=keepdim)))\n\n if ord in ['nuc', 'fro']:\n inputs.append(SampleInput(\n make_tensor(\n test_size, device, dtype,\n low=None, high=None,\n requires_grad=requires_grad),\n kwargs=dict(\n ord=ord,\n keepdim=keepdim,\n dim=(0, 1))))\n return inputs\n\ndef sample_inputs_as_strided(op_info, device, dtype, requires_grad, **kwargs):\n make_arg = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad)\n\n # input shape, output shape, output stride, output storage offset\n test_cases = [\n ((1,), (1,), (1,), 0),\n ((3, 3), (2, 2), (1, 2), 0),\n ((3, 3), (2, 2), (1, 2), 1),\n ((16,), (2, 2, 2, 2), (1, 1, 1, 1), 0),\n ((16,), (2, 1, 1, 2), (1, 7, 7, 1), 0),\n ]\n\n samples = []\n\n for input_shape, output_shape, stride, storage_offset in test_cases:\n input_t = make_arg(input_shape)\n kwargs = dict(storage_offset=storage_offset)\n samples.append(SampleInput(input_t, args=(output_shape, stride), kwargs=kwargs))\n\n return samples\n\ndef sample_inputs_combinations(op_info, device, dtype, requires_grad, **kwargs):\n inputs = (\n (0,),\n (0, 1),\n (0, 1, 2, 3),\n )\n\n rvals = [1, 2, 4]\n\n products = product(inputs, rvals, [False, True])\n\n samples = []\n\n for input_data, r, with_replacement in products:\n input_t = torch.tensor(input_data, device=device, dtype=dtype, requires_grad=requires_grad)\n kwargs = dict(r=r, with_replacement=with_replacement)\n\n samples.append(SampleInput(input_t, kwargs=kwargs))\n\n return tuple(samples)\n\ndef sample_inputs_cartesian_prod(op_info, device, dtype, requires_grad, **kwargs):\n make_arg = partial(torch.tensor, device=device, dtype=dtype, requires_grad=requires_grad)\n\n # constructs 1-D tensors with varying number of elements\n a = make_arg((0,))\n b = make_arg((0, 1))\n c = make_arg((0, 1, 2, 3))\n\n samples = []\n\n # sample with only 1 tensor\n samples.append(SampleInput(\n a\n ))\n\n # sample with 2 tensors\n samples.append(SampleInput(\n a,\n args=(b,)\n ))\n\n # sample with 3 tensors\n samples.append(SampleInput(\n a,\n args=(b, c)\n ))\n\n return tuple(samples)\n\ndef sample_inputs_cosine_similarity(op_info, device, dtype, requires_grad, **kwargs):\n make_arg = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad)\n\n # Ordered as input_shape, dict of dim and eps\n cases: Tuple[tuple, dict] = ( # type: ignore[assignment]\n ((S, S), {'dim': 1}),\n ((S, 2), {'dim': -1}),\n ((S,), {'dim': 0, 'eps': 0.5}),\n ((), {'dim': 0}),\n ((S, S, M), {'dim': 2}),\n ((S, S), {})\n )\n\n for input_shape, kwargs in cases:\n yield SampleInput(make_arg(input_shape), args=(make_arg(input_shape),), kwargs=kwargs)\n # Test for Broadcasting\n yield SampleInput(make_arg((1, 2, 3)), args=(make_arg((2, 1, 3)),), kwargs={'dim': -1})\n yield SampleInput(make_arg((1, 2, 3)), args=(make_arg((2, 1, 3)),), kwargs={'dim': -2})\n yield SampleInput(make_arg((2, 3)), args=(make_arg((2, 1, 3)),), kwargs={'dim': -1})\n\ndef sample_inputs_batch_norm(op_info, device, dtype, requires_grad, **kwargs):\n make_arg = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad)\n make_arg_without_requires_grad = partial(make_tensor, device=device, dtype=dtype, requires_grad=False)\n\n # Ordered as: input shape, kwargs for training, momentum, eps\n cases: Tuple[Tuple[int], dict] = ( # type: ignore[assignment]\n ((S, S, S), {'training': True, 'momentum': 0.5, 'eps': 0.6}),\n ((3, 2, 4), {'training': False, 'momentum': -1.2}),\n ((3, 1), {'training': True, 'momentum': 0.0}),\n ((0,), {'training': True}),\n ((0,), {'training': False}),\n ((3, 2, 3, 4), {'training': True, 'momentum': -1.0, 'eps': 0.5}),\n ((3, 2, 3, 4), {'training': False, 'momentum': -1.0, 'eps': 0.5}),\n ((2, 1), {}),\n )\n\n for input_shape, kwargs in cases:\n # args: running mean, running var, weight and bias should necessarily be of shape: (channels,)\n channels = input_shape[1] if len(input_shape) > 1 else 0\n weight = make_arg(channels) if channels > 0 else None\n bias = make_arg(channels) if channels > 0 else None\n running_mean = make_arg_without_requires_grad(channels, low=0)\n running_var = make_arg_without_requires_grad(channels, low=0)\n\n yield SampleInput(\n make_arg(input_shape),\n args=(\n running_mean,\n running_var,\n weight,\n bias\n ),\n kwargs=kwargs\n )\n\n # Checking for permutations of weights and biases as `None`\n weights = [channels, None, None]\n biases = [None, channels, None]\n is_training = [True, False, False]\n\n for weight, bias, training in zip(weights, biases, is_training):\n yield SampleInput(\n make_arg(input_shape),\n args=(\n running_mean,\n running_var,\n make_arg(channels),\n make_arg(channels)\n ),\n kwargs={'training': training}\n )\n\n # Test case for no optional kwargs\n # running_mean and running_var are required in evaluation mode (training: False) but not in training mode\n yield SampleInput(make_arg((1, 2, 3)), args=(None, None), kwargs={'training': True})\n\ndef sample_inputs_nn_activation_relu(op_info, device, dtype, requires_grad, **kwargs):\n make_arg = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad)\n\n cases = (\n (()),\n ((S, )),\n ((S, S)),\n ((S, M, S))\n )\n\n for shape in cases:\n yield SampleInput(make_arg(shape))\n\ndef sample_inputs_nn_functional_prelu(op_info, device, dtype, requires_grad, **kwargs):\n make_arg = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad)\n\n cases = (\n (()),\n ((S, )),\n ((S, S)),\n ((S, M, S))\n )\n\n for shape in cases:\n for weight in [-1., 0., 0.8, 1.]:\n weight_tensor = torch.tensor(weight, device=device, dtype=dtype, requires_grad=requires_grad)\n yield SampleInput(make_arg(shape), kwargs=dict(weight=weight_tensor))\n\n if len(shape) >= 2:\n channel_size = shape[1]\n yield SampleInput(make_arg(shape), kwargs=dict(weight=make_arg((channel_size,))))\n\ndef sample_inputs_norm(op_info, device, dtype, requires_grad, **kwargs):\n make_arg = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad)\n\n cases = (\n ((S, S), (2,), '2'),\n ((S, S), (0,), '0'),\n ((S, S), (0.5,), '0_5'),\n ((S, S), (1,), '1'),\n ((S, S), (3,), '3'),\n ((S, S), (-1,), 'neg_1'),\n ((S, S), (-2,), 'neg_2'),\n ((S, S), (-0.5,), 'neg_0_5'),\n ((S, S), (-1.5,), 'neg_1_5'),\n )\n\n cases_nonzero_input = (\n ((S, S, S), (1.5,), '1_5_default'),\n ((S, S, S), (1.5, 1), '1_5_dim'),\n ((S, S, S), (1.5, -1), '1_5_neg_dim'),\n ((S, S, S), (1.5, 1, True), 'keepdim_1_5_dim'),\n ((S, S, S), (1.5, -1, True), 'keepdim_1_5_neg_dim'),\n )\n\n cases_negdim_base = (\n ((S, S), (-2, 1,), 'neg_2_2_dim'),\n ((S, S), (-1, 1,), 'neg_1_2_dim'),\n ((S, S), (0, 1,), '0_2_dim'),\n ((S, S), (1, 1,), '1_2_dim'),\n ((S, S), (2, 1,), '2_2_dim'),\n ((S, S), (3, 1,), '3_2_dim'),\n ((S, S, S), (2, 1), '2_dim'),\n ((S, S, S), (3, 1), '3_dim'),\n ((S, S, S), (2, 1, True), 'keepdim_2_dim'),\n ((S, S, S), (3, 1, True), 'keepdim_3_dim'),\n ((), (2, 0), '2_dim_scalar'),\n ((), (3, 0), '3_dim_scalar'),\n ((), (2, 0, True), 'keepdim_2_dim_scalar'),\n ((), (3, 0, True), 'keepdim_3_dim_scalar'),\n )\n\n cases_negdim = []\n for case in cases_negdim_base:\n cases_negdim.append(case)\n shape, args, name = case\n new_args = copy.deepcopy(list(args))\n new_args[1] *= -1\n cases_negdim.append((shape, tuple(new_args), name.replace(\"_dim\", \"_neg_dim\")))\n\n for shape, args, name in itertools.chain(cases, cases_negdim):\n yield SampleInput(make_arg(shape), args=args, name=name)\n\n for shape, args, name in cases_nonzero_input:\n yield SampleInput(make_arg(shape, exclude_zero=True), args=args, name=name)\n\n\ndef sample_inputs_norm_fro(op_info, device, dtype, requires_grad, **kwargs):\n make_arg = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad)\n\n cases = (\n ((S, S), (), 'default'),\n ((S, S), ('fro',), 'fro_default'),\n ((S, S), ('fro', [0, 1],), 'fro'),\n )\n\n for shape, args, name in cases:\n yield SampleInput(make_arg(shape), args=args, name=name)\n\n\ndef sample_inputs_norm_nuc(op_info, device, dtype, requires_grad, **kwargs):\n make_arg = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad)\n\n cases = (\n ((S, S), ('nuc',), 'nuc'),\n ((S, S, S), ('nuc', [1, 2]), 'nuc_batched'),\n )\n\n for shape, args, name in cases:\n yield SampleInput(make_arg(shape), args=args, name=name)\n\n\ndef sample_inputs_norm_inf(op_info, device, dtype, requires_grad, **kwargs):\n make_arg = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad)\n\n cases = (\n ((S, S), (-inf,), '-inf'),\n ((S, S), (inf,), 'inf'),\n ((S, S), (inf, 1,), 'inf_2_dim'),\n ((S, S), (inf, -1,), 'inf_2_neg_dim'),\n )\n\n for shape, args, name in cases:\n yield SampleInput(make_arg(shape), args=args, name=name)\n\n\ndef sample_inputs_linalg_vector_norm(op_info, device, dtype, requires_grad, **kwargs):\n size_1D = (S,)\n size_2D = (2, 2)\n\n test_cases = [\n # input size, ord, dim args\n (size_1D, 2, None),\n (size_1D, 2, (0,)),\n (size_1D, 0, None),\n (size_1D, 0, (0,)),\n (size_1D, 0.9, None),\n (size_1D, 0.9, (0,)),\n (size_1D, 1, None),\n (size_1D, 1, (0,)),\n (size_1D, -2.1, None),\n (size_1D, -2.1, (0,)),\n (size_1D, inf, None),\n (size_1D, inf, (0,)),\n (size_1D, -inf, None),\n (size_1D, -inf, (0,)),\n\n (size_2D, 2, None),\n (size_2D, 2, (0,)),\n (size_2D, 2, (-1, 0)),\n (size_2D, 0, None),\n (size_2D, 0, (0,)),\n (size_2D, 0, (-1, 0)),\n (size_2D, 0.9, None),\n (size_2D, 0.9, (0,)),\n (size_2D, 0.9, (-1, 0)),\n (size_2D, 1, None),\n (size_2D, 1, (0,)),\n (size_2D, 1, (-1, 0)),\n (size_2D, -2.1, None),\n (size_2D, -2.1, (0,)),\n (size_2D, -2.1, (-1, 0)),\n (size_2D, inf, None),\n (size_2D, inf, (0,)),\n (size_2D, inf, (-1, 0)),\n (size_2D, -inf, None),\n (size_2D, -inf, (0,)),\n (size_2D, -inf, (-1, 0)),\n ]\n inputs = []\n\n for test_size, ord, dim in test_cases:\n for keepdim in [False, True]:\n inputs.append(SampleInput(\n make_tensor(\n test_size, device, dtype,\n low=None, high=None,\n requires_grad=requires_grad),\n args=(ord,),\n kwargs=dict(\n keepdim=keepdim,\n dim=dim)))\n\n return inputs\n\n\n# Metadata class for binary \"universal functions (ufuncs)\" that accept two\n# tensor and have common properties\nclass BinaryUfuncInfo(OpInfo):\n \"\"\"Operator information for 'universal binary functions (binary ufuncs).'\n These are functions of two tensors with common properties like:\n - they are elementwise functions\n - the output shape is determined by the input shape\n - they typically have method and inplace variants\n - they typically support the out kwarg\n - they typically have NumPy or SciPy references\n See NumPy's universal function documentation\n (https://numpy.org/doc/stable/reference/ufuncs.html) for more details\n about the concept of ufuncs.\n \"\"\"\n def __init__(self, name, *,\n lhs_make_tensor_kwargs=None,\n rhs_make_tensor_kwargs=None,\n promotes_int_to_float=False, # Set to true if the op promotes integer inputs to float\n always_returns_bool=False, # Set to true if the op always returns bool tensors\n **kwargs):\n super().__init__(name, **kwargs)\n\n # [lr]hs_make_tensor_kwargs are part of the OpInfo to be able to dynamically generate valid samples later on.\n if lhs_make_tensor_kwargs is None:\n lhs_make_tensor_kwargs = {}\n self.lhs_make_tensor_kwargs = lhs_make_tensor_kwargs\n\n if rhs_make_tensor_kwargs is None:\n rhs_make_tensor_kwargs = {}\n self.rhs_make_tensor_kwargs = rhs_make_tensor_kwargs\n\n self.promotes_int_to_float = promotes_int_to_float\n self.always_returns_bool = always_returns_bool\n\ndef _resolve_binary_pwise_kwargs(\n op_info, *, op_kwargs=None, lhs_make_tensor_kwargs=None, rhs_make_tensor_kwargs=None\n):\n \"\"\"Resolves default values for :func:`sample_inputs_binary_pwise`.\n\n By default :attr:`op_kwargs`, :attr:`lhs_make_tensor_kwargs`, and :attr:`rhs_make_tensor_kwargs` are just empty\n dictionaries. In case :attr:`op_info` is a :class:`BinaryUfuncInfo`, :attr:`BinaryUfuncInfo.lhs_make_tensor_kwargs`\n and :attr:`BinaryUfuncInfo.rhs_make_tensor_kwargs` will be used as defaults.\n \"\"\"\n if op_kwargs is None:\n op_kwargs = {}\n if lhs_make_tensor_kwargs is None:\n lhs_make_tensor_kwargs = op_info.lhs_make_tensor_kwargs if isinstance(op_info, BinaryUfuncInfo) else {}\n if rhs_make_tensor_kwargs is None:\n rhs_make_tensor_kwargs = op_info.rhs_make_tensor_kwargs if isinstance(op_info, BinaryUfuncInfo) else {}\n\n return op_kwargs, lhs_make_tensor_kwargs, rhs_make_tensor_kwargs\n\n\ndef sample_inputs_binary_pwise(\n op_info,\n device,\n dtype,\n requires_grad,\n *,\n python_scalars=False,\n op_kwargs=None,\n lhs_make_tensor_kwargs=None,\n rhs_make_tensor_kwargs=None,\n **kwargs,\n):\n make_arg = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad)\n\n op_kwargs, lhs_make_tensor_kwargs, rhs_make_tensor_kwargs = _resolve_binary_pwise_kwargs(\n op_info,\n op_kwargs=op_kwargs,\n lhs_make_tensor_kwargs=lhs_make_tensor_kwargs,\n rhs_make_tensor_kwargs=rhs_make_tensor_kwargs,\n )\n\n scalar = make_arg((), **rhs_make_tensor_kwargs)\n if python_scalars:\n scalar = scalar.item() # type: ignore[assignment]\n\n shapes = [\n ((), scalar),\n ((S,), scalar),\n ((S, 1), (S,)),\n ((M, S), scalar),\n ((S, M, S), (M, S)),\n ((S, M, S), (S, M, S)),\n ((M, 1, S), (M, S)),\n ((M, 1, S), (1, M, S)),\n ((0, 1, 3), (0, 10, 3))\n ]\n\n for shape_lhs, shape_rhs_or_scalar in shapes:\n lhs = make_arg(shape_lhs, **lhs_make_tensor_kwargs)\n if isinstance(shape_rhs_or_scalar, tuple):\n # shape\n rhs = make_arg(shape_rhs_or_scalar, **rhs_make_tensor_kwargs)\n broadcasts_input = torch.broadcast_shapes(shape_lhs, shape_rhs_or_scalar) != shape_lhs\n else:\n # scalar\n rhs = shape_rhs_or_scalar # type: ignore[assignment]\n broadcasts_input = False\n\n yield SampleInput(lhs, args=(rhs,), kwargs=op_kwargs, broadcasts_input=broadcasts_input)\n\n\ndef sample_inputs_add_sub(\n op_info,\n device,\n dtype,\n requires_grad,\n python_scalars=False,\n alpha=1,\n op_kwargs=None,\n lhs_make_tensor_kwargs=None,\n rhs_make_tensor_kwargs=None,\n **kwargs,\n):\n op_kwargs, lhs_make_tensor_kwargs, rhs_make_tensor_kwargs = _resolve_binary_pwise_kwargs(\n op_info,\n op_kwargs=op_kwargs,\n lhs_make_tensor_kwargs=lhs_make_tensor_kwargs,\n rhs_make_tensor_kwargs=rhs_make_tensor_kwargs,\n )\n\n yield from sample_inputs_binary_pwise(\n op_info,\n device,\n dtype,\n requires_grad,\n python_scalars=python_scalars,\n op_kwargs=op_kwargs,\n lhs_make_tensor_kwargs=lhs_make_tensor_kwargs,\n rhs_make_tensor_kwargs=rhs_make_tensor_kwargs,\n **kwargs,\n )\n\n lhs = make_tensor((S, S), device=device, dtype=dtype, requires_grad=requires_grad, **lhs_make_tensor_kwargs)\n rhs = make_tensor((S, S), device=device, dtype=dtype, requires_grad=requires_grad, **rhs_make_tensor_kwargs)\n yield SampleInput(lhs, args=(rhs,), kwargs=dict(op_kwargs, alpha=alpha), broadcasts_input=False)\n\ndef sample_inputs_isclose(\n op_info,\n device,\n dtype,\n requires_grad,\n python_scalars=False,\n op_kwargs=None,\n lhs_make_tensor_kwargs=None,\n rhs_make_tensor_kwargs=None,\n **kwargs,\n):\n op_kwargs, lhs_make_tensor_kwargs, rhs_make_tensor_kwargs = _resolve_binary_pwise_kwargs(\n op_info,\n op_kwargs=op_kwargs,\n lhs_make_tensor_kwargs=lhs_make_tensor_kwargs,\n rhs_make_tensor_kwargs=rhs_make_tensor_kwargs,\n )\n\n yield from sample_inputs_binary_pwise(\n op_info,\n device,\n dtype,\n requires_grad,\n python_scalars=python_scalars,\n op_kwargs=op_kwargs,\n lhs_make_tensor_kwargs=lhs_make_tensor_kwargs,\n rhs_make_tensor_kwargs=rhs_make_tensor_kwargs,\n **kwargs,\n )\n\n rtols = [0., 1e-7]\n atols = [0., 1e-7]\n equal_nans = [False, True]\n\n products = product(rtols, atols, equal_nans)\n\n for rtol, atol, equal_nan in products:\n lhs = make_tensor((S, S), device=device, dtype=dtype, requires_grad=requires_grad, **lhs_make_tensor_kwargs)\n rhs = make_tensor((S, S), device=device, dtype=dtype, requires_grad=requires_grad, **rhs_make_tensor_kwargs)\n\n yield SampleInput(lhs, args=(rhs,),\n kwargs=dict(op_kwargs, rtol=rtol, atol=atol, equal_nan=equal_nan))\n\ndef sample_inputs_t(op_info, device, dtype, requires_grad, **kwargs):\n make_arg = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad)\n return (SampleInput(make_arg((1, 2))),\n SampleInput(make_arg((2,))),\n SampleInput(make_arg(())))\n\n\ndef sample_inputs_mm(op_info, device, dtype, requires_grad, **kwargs):\n make_arg = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad)\n\n def make_arg_conj(size):\n return make_arg(size).conj().requires_grad_(requires_grad)\n\n first_shape, second_shape = (S, M), (M, S)\n\n yield SampleInput(make_arg(first_shape), args=(make_arg(second_shape),))\n\n if dtype.is_complex:\n yield SampleInput(make_arg(first_shape), args=(make_arg_conj(second_shape),))\n\n\ndef sample_inputs_addmm(op_info, device, dtype, requires_grad, **kwargs):\n alpha_val = kwargs.get('alpha', 2 + 3j if dtype.is_complex else 0.6)\n beta_val = kwargs.get('beta', 1 + 2j if dtype.is_complex else 0.2)\n tests_list = [\n ((2, 3), (2, 2), (2, 3), False)\n ]\n tests_with_lhs_broadcasting = [\n ((1,), (2, 2), (2, 3), True),\n ((), (2, 2), (2, 3), True)\n ]\n test_cases = tests_list + tests_with_lhs_broadcasting # type: ignore[operator]\n\n sample_inputs = []\n\n for shape_a, shape_b, shape_c, broadcasts_input in test_cases:\n sample_inputs.append(\n SampleInput(\n make_tensor(shape_a, device, dtype, requires_grad=requires_grad),\n args=(\n make_tensor(shape_b, device, dtype,\n requires_grad=requires_grad),\n make_tensor(shape_c, device, dtype,\n requires_grad=requires_grad)),\n kwargs={'alpha': alpha_val, 'beta': beta_val},\n broadcasts_input=broadcasts_input))\n\n if dtype.is_complex:\n shape = (3, 3)\n sample_inputs.append(\n SampleInput(make_tensor(shape, device, dtype, requires_grad=requires_grad),\n args=(\n make_tensor(shape, device, dtype).mH.requires_grad_(requires_grad),\n make_tensor(shape, device, dtype,\n requires_grad=requires_grad)),\n kwargs={'alpha': alpha_val, 'beta': beta_val},))\n sample_inputs.append(\n SampleInput(make_tensor(shape, device, dtype, requires_grad=requires_grad),\n args=(\n make_tensor(shape, device, dtype,\n requires_grad=requires_grad),\n make_tensor(shape, device, dtype).mH.requires_grad_(requires_grad)),\n kwargs={'alpha': alpha_val, 'beta': beta_val},))\n return sample_inputs\n\ndef sample_inputs_mv(self, device, dtype, requires_grad, **kwargs):\n return (\n SampleInput(\n make_tensor((S, M, ), device, dtype, low=None, high=None, requires_grad=requires_grad),\n args=(\n make_tensor((M, ), device, dtype, low=None, high=None, requires_grad=requires_grad),\n )\n ),\n )\n\ndef sample_inputs_bmm(self, device, dtype, requires_grad, **kwargs):\n return (\n SampleInput(\n make_tensor((M, S, M, ), device, dtype, low=None, high=None, requires_grad=requires_grad),\n args=(\n make_tensor((M, M, S, ), device, dtype, low=None, high=None, requires_grad=requires_grad),\n )\n ),\n )\n\ndef sample_inputs_dot_vdot(self, device, dtype, requires_grad, **kwargs):\n make_arg = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad)\n\n def make_arg_conj(size):\n return make_arg(size).conj().requires_grad_(requires_grad)\n\n sample_inputs = []\n sample_inputs.append(SampleInput(make_arg((S, )), args=(make_arg((S, )),)))\n if dtype.is_complex:\n # dot/vdot for (conj(input), conj(arg_tensor)) and (conj(input), arg_tensor)\n # is tested in test_conj_view (which tests operations with only conjugated input tensor\n # -- not conjugated arg tensors)\n sample_inputs.append(SampleInput(make_arg((S, )), args=(make_arg_conj((S, )),)))\n return sample_inputs\n\ndef sample_inputs_addmv(op_info, device, dtype, requires_grad, **kwargs):\n make_arg = partial(make_tensor, dtype=dtype, device=device, requires_grad=requires_grad)\n\n test_cases = (((S,), (S, M), (M,), 1, 1, False),\n ((S,), (S, M), (M,), 0.2, 0.6, False),\n )\n\n test_cases_with_broadcast = (((1,), (S, M), (M,), 1, 1, True),\n ((1,), (S, M), (M,), 0.2, 0.6, True),\n ((), (S, M), (M,), 1, 1, True),\n ((), (S, M), (M,), 0.2, 0.6, True),\n )\n\n cases = test_cases + test_cases_with_broadcast\n\n # addmv performs: beta * M + alpha * (mat @ vec)\n for size, mat, vec, beta, alpha, broadcasts_input in cases:\n yield SampleInput(make_arg(size), args=(make_arg(mat), make_arg(vec)),\n kwargs=dict(beta=beta, alpha=alpha), broadcasts_input=broadcasts_input)\n\ndef sample_inputs_addbmm(op_info, device, dtype, requires_grad, **kwargs):\n make_arg = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad)\n\n # input_shape, batch1_shape, batch2_shape, beta_val, alpha_val, is_broadcasting\n test_cases = [((S, M), (S, S, S), (S, S, M), 1, 1, False),\n ((1,), (S, S, S), (S, S, M), 1, 1, True),\n ((S, M), (S, S, S), (S, S, M), 0.6, 0.2, False),\n ((1,), (S, S, S), (S, S, M), 0.6, 0.2, True),\n ((), (S, S, S), (S, S, M), 1, 1, True),\n ((), (S, S, S), (S, S, M), 0.6, 0.2, True),\n ]\n\n for input_shape, batch1_shape, batch2_shape, beta, alpha, is_broadcasting in test_cases:\n if dtype.is_complex:\n beta_complex, alpha_complex = beta * (1 + 2j), alpha * (2 + 3j)\n yield SampleInput(make_arg(input_shape), args=(make_arg(batch1_shape), make_arg(batch2_shape)),\n kwargs=dict(beta=beta_complex, alpha=alpha_complex), broadcasts_input=is_broadcasting)\n yield SampleInput(make_arg(input_shape), args=(make_arg(batch1_shape), make_arg(batch2_shape)),\n kwargs=dict(beta=beta, alpha=alpha), broadcasts_input=is_broadcasting)\n\ndef sample_inputs_addcmul_addcdiv(op_info, device, dtype, requires_grad, **kwargs):\n test_cases = [(((S, S), (S, S), (S, S)), False),\n (((S, S), (S, 1), (1, S)), False),\n (((1,), (S, S, 1), (1, S)), True),\n (((), (), ()), False),\n (((S, S), (), ()), True),\n (((), (S, S, 1), (1, S)), True)\n ]\n\n sample_inputs = []\n for input_args, broadcasts_input in test_cases:\n args = tuple(make_tensor(arg, device, dtype, requires_grad=requires_grad) if isinstance(arg, tuple) else arg\n for arg in input_args)\n sample_inputs.append(SampleInput(\n args[0],\n args=args[1:],\n broadcasts_input=broadcasts_input))\n\n args = tuple(make_tensor(arg, device, dtype, requires_grad=requires_grad) if isinstance(arg, tuple) else arg\n for arg in input_args)\n sample_inputs.append(SampleInput(\n args[0],\n args=args[1:],\n kwargs=dict(value=3.14), broadcasts_input=broadcasts_input))\n\n return tuple(sample_inputs)\n\ndef sample_inputs_baddbmm(op_info, device, dtype, requires_grad, **kwargs):\n test_cases = [((S, S, M), (S, S, S), (S, S, M), 1, 1, False),\n ((1,), (S, S, S), (S, S, M), 1, 1, True),\n ((S, S, M), (S, S, S), (S, S, M), 0.6, 0.2, False),\n ((1,), (S, S, S), (S, S, M), 0.6, 0.2, True),\n ((), (S, S, S), (S, S, M), 1, 1, True),\n ((), (S, S, S), (S, S, M), 0.6, 0.2, True),\n ]\n sample_inputs = []\n for (input_shape, batch1_shape, batch2_shape, alpha, beta, broadcasts_input) in test_cases:\n args = (make_tensor(input_shape, device, dtype,\n low=None, high=None,\n requires_grad=requires_grad),\n make_tensor(batch1_shape, device, dtype,\n low=None, high=None,\n requires_grad=requires_grad),\n make_tensor(batch2_shape, device, dtype,\n low=None, high=None,\n requires_grad=requires_grad))\n\n sample_inputs.append(SampleInput(args[0], args=(args[1], args[2]),\n kwargs=dict(beta=beta, alpha=alpha), broadcasts_input=broadcasts_input))\n if dtype.is_complex:\n sample_inputs.append(SampleInput(\n args[0].clone().requires_grad_(requires_grad),\n args=(args[1].clone().requires_grad_(requires_grad),\n args[2].clone().requires_grad_(requires_grad)),\n kwargs=dict(beta=beta * (1 + 2j), alpha=alpha * (2 + 3j)),\n broadcasts_input=broadcasts_input))\n\n if dtype.is_complex:\n shapes = [(S, S, S), (S, M, S), (S, S, M)]\n args = (make_tensor(shapes[0], device, dtype,\n low=None, high=None,\n requires_grad=requires_grad),\n make_tensor(shapes[1], device, dtype,\n low=None, high=None,\n requires_grad=requires_grad),\n make_tensor(shapes[2], device, dtype,\n low=None, high=None,\n requires_grad=requires_grad))\n sample_inputs.append(\n SampleInput(\n args[0].transpose_(-1, 1),\n args=(args[1].transpose(-1, 1).conj().requires_grad_(requires_grad),\n args[2].transpose(-1, 1).conj().requires_grad_(requires_grad)),\n kwargs=dict(beta=beta * (1 + 2j), alpha=alpha * (2 + 3j)),))\n\n return tuple(sample_inputs)\n\ndef sample_inputs_addr(op_info, device, dtype, requires_grad, **kwargs):\n input1 = SampleInput(\n make_tensor((S, M), device, dtype, low=None, high=None, requires_grad=requires_grad),\n args=(\n make_tensor((S, ), device, dtype, low=None, high=None, requires_grad=requires_grad),\n make_tensor((M, ), device, dtype, low=None, high=None, requires_grad=requires_grad)))\n\n input2 = SampleInput(\n make_tensor((), device, dtype, low=None, high=None, requires_grad=requires_grad),\n args=(\n make_tensor((S, ), device, dtype, low=None, high=None, requires_grad=requires_grad),\n make_tensor((M, ), device, dtype, low=None, high=None, requires_grad=requires_grad)),\n broadcasts_input=True)\n\n if dtype.is_complex:\n alpha, beta = 0.1 + 0.3j, 0.4 + 0.6j\n elif dtype.is_floating_point:\n alpha, beta = 0.2, 0.6\n else:\n alpha, beta = 2, 3\n\n input3 = SampleInput(\n make_tensor((S, M), device, dtype, low=None, high=None, requires_grad=requires_grad),\n args=(\n make_tensor((S, ), device, dtype, low=None, high=None, requires_grad=requires_grad),\n make_tensor((M, ), device, dtype, low=None, high=None, requires_grad=requires_grad)),\n kwargs=dict(beta=beta, alpha=alpha))\n\n input4 = SampleInput(\n make_tensor((), device, dtype, low=None, high=None, requires_grad=requires_grad),\n args=(\n make_tensor((S, ), device, dtype, low=None, high=None, requires_grad=requires_grad),\n make_tensor((M, ), device, dtype, low=None, high=None, requires_grad=requires_grad)),\n kwargs=dict(beta=beta, alpha=alpha),\n broadcasts_input=True)\n\n return (input1, input2, input3, input4)\n\ndef sample_inputs_xlogy(self, device, dtype, requires_grad, **kwargs):\n return (\n SampleInput(\n make_tensor((S, S), device, dtype, low=None, high=None, requires_grad=requires_grad),\n args=(\n make_tensor((S, S), device, dtype, low=0, high=None, requires_grad=requires_grad),\n )\n ),\n )\n\n\ndef sample_inputs_xlog1py(self, device, dtype, requires_grad, **kwargs):\n make_arg = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad)\n\n # same shape\n yield SampleInput(make_arg((S, S)), args=(make_arg((S, S), low=-1),))\n # rhs broadcast\n yield SampleInput(make_arg((S, S)), args=(make_arg((S,), low=-1),))\n # all zero `x`\n x = make_arg((S, S))\n x.fill_(0)\n yield SampleInput(x, args=(make_arg((S, S), low=-1),))\n\n # randomly zero-masked `x`\n x = make_arg((S, S))\n y = make_arg((S, S), low=-1)\n x[torch.rand(x.shape) > 0.5] = 0\n yield SampleInput(x, args=(y,))\n\n # Scalar x\n # `input` has to be a tensor\n # yield SampleInput(0, args=(make_arg((S, S), low=-1),))\n # yield SampleInput(2.1, args=(make_arg((S, S), low=-1),))\n\n # Scalar y\n yield SampleInput(make_arg((S, S)), args=(-0.5,))\n yield SampleInput(make_arg((S, S)), args=(1.2,))\n\ndef sample_inputs_zero_(op_info, device, dtype, requires_grad, **kwargs):\n make_arg = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad)\n\n cases = ((), (S, S, S), (S,))\n\n for shape in cases:\n yield(SampleInput(make_arg(shape)))\n\n\ndef sample_inputs_logsumexp(self, device, dtype, requires_grad, **kwargs):\n inputs = (\n ((), (0,), True),\n ((S, S), (1,), True),\n ((S, S), (1,), False)\n )\n samples = []\n\n for shape, dim, keepdim in inputs:\n t = make_tensor(shape, device, dtype,\n low=None, high=None,\n requires_grad=requires_grad)\n samples.append(SampleInput(t, args=(dim, keepdim)))\n\n return tuple(samples)\n\ndef sample_inputs_like_fns(self, device, dtype, requires_grad, **kwargs):\n inputs = [\n ((), {}),\n ((S, S), {}),\n ((0, S, 0), {}),\n ((S,), {'dtype': dtype, 'device': device}),\n # Hard-code some dtypes/devices. We want to test cases where the\n # (dtype, device) is different from the input's (dtype, device)\n ((S,), {'dtype': torch.double}),\n ((S,), {'device': 'cpu'}),\n ((S,), {'dtype': torch.double, 'device': 'cpu'}),\n ]\n if torch.cuda.is_available():\n inputs.append(((S,), {'device': 'cuda'}))\n\n samples = []\n for shape, kwargs in inputs:\n t = make_tensor(shape, device, dtype,\n low=None, high=None,\n requires_grad=requires_grad)\n samples.append(SampleInput(t, kwargs=kwargs))\n\n return tuple(samples)\n\ndef get_independent_tensor(tensor):\n return tensor.clone().requires_grad_(tensor.requires_grad)\n\ndef sample_inputs_randint_like(self, device, dtype, requires_grad, **kwargs):\n samples = []\n low = 2\n high = 10\n\n for sample in sample_inputs_like_fns(self, device, dtype, requires_grad, **kwargs):\n # With high\n samples.append(SampleInput(\n sample.input,\n args=(high,) + sample.args,\n kwargs=sample.kwargs))\n # With low and high\n samples.append(SampleInput(\n get_independent_tensor(sample.input),\n args=(low, high,) + sample.args,\n kwargs=sample.kwargs))\n return tuple(samples)\n\ndef sample_inputs_new_fns(self, device, dtype, requires_grad, **kwargs):\n inputs = [\n ((), (), {}),\n ((S, S), (2, 0), {}),\n ((0, S, 0), (3, 2, 2), {}),\n ((S,), (2, 3), {'dtype': dtype, 'device': device}),\n # Hard-code some dtypes/devices. We want to test cases where the\n # (dtype, device) is different from the input's (dtype, device)\n ((S,), (10,), {'dtype': torch.double}),\n ((S,), (1, 1, 12), {'device': 'cpu'}),\n ((S,), (2, 2, 2), {'dtype': torch.double, 'device': 'cpu'}),\n ]\n if torch.cuda.is_available():\n inputs.append(((S,), (7, 2), {'device': 'cuda'}))\n\n samples = []\n for input_shape, output_shape, kwargs in inputs:\n t = make_tensor(input_shape, device, dtype,\n low=None, high=None,\n requires_grad=requires_grad)\n samples.append(SampleInput(t, args=(output_shape,), kwargs=kwargs))\n\n return tuple(samples)\n\ndef sample_inputs_new_full(self, device, dtype, requires_grad, **kwargs):\n def get_val(dtype):\n return make_tensor([], 'cpu', dtype).item()\n\n samples = []\n for sample in sample_inputs_new_fns(self, device, dtype, requires_grad, **kwargs):\n # The scalar we are passing to new_full must be the same dtype\n # as the one of the resulting tensor\n use_dtype = sample.kwargs['dtype'] if 'dtype' in sample.kwargs else dtype\n samples.append(SampleInput(\n sample.input, args=sample.args + (get_val(use_dtype),), kwargs=sample.kwargs))\n return tuple(samples)\n\ndef sample_inputs_full_like(self, device, dtype, requires_grad, **kwargs):\n def get_val(dtype):\n return make_tensor([], 'cpu', dtype).item()\n\n inputs = [\n ((), get_val(dtype), {}),\n ((S, S), get_val(dtype), {}),\n ((0, S, 0), get_val(dtype), {}),\n ((S,), get_val(dtype), {'dtype': dtype, 'device': device}),\n # Hard-code some dtypes/devices. We want to test cases where the\n # (dtype, device) is different from the input's (dtype, device)\n ((S,), get_val(torch.double), {'dtype': torch.double}),\n ((S,), get_val(dtype), {'device': 'cpu'}),\n ((S,), get_val(torch.double), {'dtype': torch.double, 'device': 'cpu'}),\n ]\n if torch.cuda.is_available():\n inputs.append(((S,), get_val(dtype), {'device': 'cuda'}))\n\n samples = []\n for shape, fill_value, kwargs in inputs:\n t = make_tensor(shape, device, dtype,\n low=None, high=None,\n requires_grad=requires_grad)\n samples.append(SampleInput(t, args=(fill_value,), kwargs=kwargs))\n\n return tuple(samples)\n\ndef sample_inputs_multinomial(self, device, dtype, requires_grad, **kwargs):\n cases = [\n ([3], 3, dict()),\n ([10], 3, dict()),\n ([3, 10], 3, dict()),\n ([3], 3, dict(replacement=False)),\n ([3], 3, dict(replacement=True)),\n ([3, 4], 4, dict(replacement=True)),\n ([3, 4], 4, dict(replacement=False)),\n ]\n\n samples = []\n for shape, num_samples, kwargs in cases:\n t = make_tensor(shape, device, dtype,\n low=0, high=None,\n requires_grad=requires_grad)\n samples.append(SampleInput(t, args=(num_samples,), kwargs=kwargs))\n return tuple(samples)\n\ndef sample_inputs_normal_common(self, device, dtype, requires_grad, cases, **kwargs):\n def get_value_or_make_tensor(value_or_shape):\n if isinstance(value_or_shape, list):\n return make_tensor(value_or_shape, device, dtype,\n low=0, high=None,\n requires_grad=requires_grad)\n return value_or_shape\n\n samples = []\n for value_or_mean_shape, value_or_std_shape, kwargs in cases:\n mean = get_value_or_make_tensor(value_or_mean_shape)\n std = get_value_or_make_tensor(value_or_std_shape)\n samples.append(SampleInput(mean, args=(std,), kwargs=kwargs))\n return tuple(samples)\n\ndef sample_inputs_normal_tensor_first(self, device, dtype, requires_grad, **kwargs):\n # value_or_size, value_or_size, kwargs\n cases = [\n ([], [], {}),\n ([3], [3], {}),\n ([3, 4, 2], [3, 4, 2], {}),\n ([2, 3], 1.1, {}),\n ]\n\n return sample_inputs_normal_common(self, device, dtype, requires_grad, cases, **kwargs)\n\ndef sample_inputs_normal_tensor_second(self, device, dtype, requires_grad, **kwargs):\n cases = [\n ([3, 4], 0.3, {}),\n ]\n return sample_inputs_normal_common(self, device, dtype, requires_grad, cases, **kwargs)\n\ndef sample_inputs_bernoulli(self, device, dtype, requires_grad, **kwargs):\n shapes = [\n [3],\n [],\n [0, 3],\n [2, 3, 4],\n ]\n\n samples = []\n for shape in shapes:\n t = make_tensor(shape, device, dtype,\n low=0, high=1,\n requires_grad=requires_grad)\n samples.append(SampleInput(t))\n return tuple(samples)\n\ndef sample_inputs_logcumsumexp(self, device, dtype, requires_grad, **kwargs):\n inputs = (\n ((S, S, S), 0),\n ((S, S, S), 1),\n ((), 0),\n )\n samples = []\n\n for large_number in (True, False):\n for shape, dim in inputs:\n t = make_tensor(shape, device, dtype,\n low=None, high=None,\n requires_grad=requires_grad)\n\n if large_number and t.dim() > 0:\n t[0] = 10000\n samples.append(SampleInput(t, args=(dim,)))\n\n return tuple(samples)\n\ndef sample_inputs_trace(self, device, dtype, requires_grad, **kwargs):\n return (SampleInput((make_tensor((S, S), device, dtype,\n low=None, high=None,\n requires_grad=requires_grad))),)\n\n\ndef sample_inputs_renorm(self, device, dtype, requires_grad, **kwargs):\n make_arg = partial(make_tensor, dtype=dtype, device=device, requires_grad=requires_grad)\n cases = (((S, S, S), (2, 1, 0.5)),\n ((S, S, S), (2, -1, 0.5)),\n ((S, S, S), (1, 2, 3)),\n ((S, S, S), (float('inf'), 2, 0.5)),\n )\n\n for shape, args in cases:\n yield SampleInput(make_arg(shape), args=args)\n\n\ndef sample_inputs_transpose_swapdims(self, device, dtype, requires_grad, **kwargs):\n make_arg = partial(make_tensor, dtype=dtype, device=device, requires_grad=requires_grad)\n\n cases = (((1, 2, 3), (-1, -2)),\n ((1, 2, 3), (-1, 2)),\n ((1, 2, 3), (1, -2)),\n ((1, 2, 3), (1, 2)),\n ((), (0, 0)),\n ((1, ), (0, 0)),\n ((M, M), (0, 1)),\n ((S, S, S), (2, 0)), )\n\n for shape, args in cases:\n yield SampleInput(make_arg(shape), args=args)\n\ndef sample_inputs_adjoint(self, device, dtype, requires_grad, **kwargs):\n make_arg = partial(make_tensor, dtype=dtype, device=device, requires_grad=requires_grad)\n\n shapes = ((1, 2, 3), (), (M, M), (S, S, S), (S, M, S), (M, S, M, S))\n return (SampleInput(make_arg(shape)) for shape in shapes)\n\ndef sample_inputs_T(self, device, dtype, requires_grad, **kwargs):\n make_arg = partial(make_tensor, dtype=dtype, device=device, requires_grad=requires_grad)\n\n shapes = ((), (M, M))\n return (SampleInput(make_arg(shape)) for shape in shapes)\n\n\ndef sample_inputs_linalg_invertible(op_info, device, dtype, requires_grad=False, **kwargs):\n \"\"\"\n This function generates invertible inputs for linear algebra ops\n The input is generated as the itertools.product of 'batches' and 'ns'.\n In total this function generates 8 SampleInputs\n 'batches' cases include:\n () - single input,\n (0,) - zero batched dimension,\n (2,) - batch of two matrices,\n (1, 1) - 1x1 batch of matrices\n 'ns' gives 0x0 and 5x5 matrices.\n Zeros in dimensions are edge cases in the implementation and important to test for in order to avoid unexpected crashes.\n \"\"\"\n make_fn = make_fullrank_matrices_with_distinct_singular_values\n make_arg = partial(make_fn, dtype=dtype, device=device, requires_grad=requires_grad)\n\n batches = [(), (0, ), (2, ), (1, 1)]\n ns = [5, 0]\n\n for batch, n in product(batches, ns):\n yield SampleInput(make_arg(*batch, n, n))\n\ndef sample_inputs_linalg_pinv_singular(op_info, device, dtype, requires_grad=False, **kwargs):\n \"\"\"\n This function produces factors `a` and `b` to generate inputs of the form `a @ b.t()` to\n test the backward method of `linalg_pinv`. That way we always preserve the rank of the\n input no matter the perturbations applied to it by the gradcheck.\n Note that `pinv` is Frechet-differentiable in a rank-preserving neighborhood.\n \"\"\"\n batches = [(), (0, ), (2, ), (1, 1)]\n # the size of at least 30 is required to cause failures for the previous implicit implementation\n # of the pinv's backward method, albeit it is slow.\n size = [0, 3, 50]\n\n for batch, m, n in product(batches, size, size):\n for k in range(min(3, min(m, n))):\n # Note that by making the columns of `a` and `b` orthonormal we make sure that\n # the product matrix `a @ b.t()` has condition number 1 when restricted to its image\n a = torch.rand(*batch, m, k, device=device, dtype=dtype).qr().Q.requires_grad_(requires_grad)\n b = torch.rand(*batch, n, k, device=device, dtype=dtype).qr().Q.requires_grad_(requires_grad)\n yield SampleInput(a, args=(b,))\n\n\ndef sample_inputs_singular_matrix_factors(op_info, device, dtype, requires_grad=False, **kwargs):\n \"\"\"\n This function produces two tensors of shape (*, m, k) and (*, n, k) with k <= min(m, n).\n Their matrix product could be used to generate tensor of shape (*, m, n) of rank k.\n \"\"\"\n\n batches = [(), (0, ), (2, ), (1, 1)]\n size = [1, 5, 10]\n\n for batch, m, n in product(batches, size, size):\n for k in range(min(3, min(m, n))):\n a = make_tensor((*batch, m, k), device, dtype, requires_grad=requires_grad)\n b = make_tensor((*batch, n, k), device, dtype, requires_grad=requires_grad)\n yield SampleInput(a, args=(b,), kwargs=kwargs)\n\n\ndef clone_sample(sample, **kwargs):\n \"\"\"\n Given a SampleInput, this function analyzes its input, args and kwargs,\n and produces a copy with each non-Tensor entry being copied by reference,\n and with each Tensor entry cloned with `t.clone().requires_grad_(t.requires_grad)`\n \"\"\"\n\n def clone_tensor(t):\n if isinstance(t, torch.Tensor):\n return t.clone().requires_grad_(t.requires_grad)\n else:\n return t\n\n sample_kwargs = kwargs if kwargs else sample.kwargs\n\n return SampleInput(\n clone_tensor(sample.input),\n args=tuple(map(clone_tensor, sample.args)),\n kwargs=dict(((k, clone_tensor(v)) for k, v in sample_kwargs.items()))\n )\n\n\ndef sample_inputs_svd_lowrank(op_info, device, dtype, requires_grad=False, **kwargs):\n for sample in sample_inputs_singular_matrix_factors(op_info, device, dtype, requires_grad, **kwargs):\n *batch, m, k = sample.input.shape\n *_, n, _ = sample.args[0].shape\n\n # NOTE: since svd_lowrank relies on non rank-revealing SVD,\n # it inherits the problem of unstable behavior with repeated\n # singular values including zeros.\n # Since we want to avoid (repeated) zeros as singular values,\n # we can only use k for q.\n # This issues could be resolved with using a rank-revealing SVD\n # which does not include \"zero\" singular values.\n op_kwargs = {\n 'q': k,\n 'M': None\n }\n\n # without M specified\n yield clone_sample(sample, **op_kwargs)\n\n # now with M\n # TODO: fix bug in the documentation for svd_lowrank:\n # M has to be (*, m, n), and not (*, 1, n) as written\n # in the documentation\n op_kwargs['M'] = make_tensor((*batch, m, n), device, dtype, requires_grad=requires_grad)\n yield clone_sample(sample, **op_kwargs)\n\ndef chunk_iter(iterable, size):\n it = iter(iterable)\n while True:\n chunk = tuple(islice(it, size))\n if not chunk:\n break\n yield chunk\n\ndef sample_inputs_pca_lowrank(op_info, device, dtype, requires_grad=False, **kwargs):\n # we reuse samples from svd_lowrank which come in group of two with\n # kwarg['M'] = None and with kwarg['M'] = <some tensor>\n samples = sample_inputs_svd_lowrank(op_info, device, dtype, requires_grad, **kwargs)\n for s1, s2 in chunk_iter(samples, 2):\n del s1.kwargs['M']\n del s2.kwargs['M']\n s1.kwargs['center'] = False\n s2.kwargs['center'] = True\n yield s1\n yield s2\n\ndef sample_inputs_linalg_cond(op_info, device, dtype, requires_grad=False, **kwargs):\n make_arg = partial(make_tensor, dtype=dtype, device=device, requires_grad=requires_grad)\n\n # autograd is not supported for inputs with zero number of elements\n shapes = ((S, S),\n (2, S, S),\n (2, 1, S, S), )\n\n for shape in shapes:\n yield SampleInput(make_arg(shape))\n\ndef np_sinc_with_fp16_as_fp32(x):\n # Wraps numpy's sinc function so that fp16 values are promoted to fp32\n # before sinc is invoked. Context: numpy's sinc returns NaN when evaluated\n # at 0 for fp16.\n if x.dtype == np.float16:\n return np.sinc(x.astype(np.float32))\n else:\n return np.sinc(x)\n\ndef sample_inputs_broadcast_to(op_info, device, dtype, requires_grad, **kwargs):\n test_cases = (\n ((S, 1, 1), (S, S, S)),\n ((S, 1, S), (S, S, S)),\n ((S, 1), (S, S, S)),\n ((1,), (S, S, S)),\n ((1, S), (1, 1, S)),\n ((), ()),\n ((), (1, 3, 2)),\n )\n\n return tuple(\n SampleInput(\n make_tensor(size, device, dtype, low=None, high=None, requires_grad=requires_grad),\n args=(shape,)) for size, shape in test_cases)\n\ndef sample_inputs_broadcast_tensors(op_info, device, dtype, requires_grad, **kwargs):\n make_arg = partial(make_tensor, dtype=dtype, device=device, requires_grad=requires_grad)\n test_cases: Tuple[tuple] = (((3,), (1, 2, 1), (1, 1), (5, 1, 1),),)\n\n samples: List[SampleInput] = []\n for shape, *other_shapes in test_cases:\n samples.append(SampleInput(make_arg(shape), args=tuple(make_arg(s) for s in other_shapes)))\n\n return samples\n\ndef sample_inputs_block_diag(op_info, device, dtype, requires_grad, **kwargs):\n make_arg = partial(make_tensor, dtype=dtype, device=device, requires_grad=requires_grad)\n test_cases: Tuple[tuple] = (((1, S), (2, S), (3, S),),)\n\n samples: List[SampleInput] = []\n for shape, *other_shapes in test_cases:\n samples.append(SampleInput(make_arg(shape), args=tuple(make_arg(s) for s in other_shapes)))\n\n return samples\n\ndef sample_inputs_bitwise_shift(op_info, device, dtype, requires_grad, **kwargs):\n test_cases = (\n (S, S, S),\n (S,),\n (),\n )\n\n sample_inputs = []\n for size in test_cases:\n tensor1 = make_tensor(size, device, dtype, low=-32, high=32, requires_grad=requires_grad)\n tensor2 = make_tensor(size, device, dtype, low=0, high=5, requires_grad=requires_grad)\n sample_inputs.append(SampleInput(tensor1, args=(tensor2,)))\n sample_inputs.append(SampleInput(tensor1, args=(2,)))\n\n return tuple(sample_inputs)\n\n\ndef sample_inputs_cdist(op_info, device, dtype, requires_grad, **kwargs):\n small_S = 2\n test_cases = (\n ((S, S, 2), (S, S + 1, 2)),\n ((S, S), (S, S)),\n ((S, S, S), (S, S, S)),\n ((3, 5), (3, 5)),\n ((2, 3, 5), (2, 3, 5)),\n ((1, 2, 3), (1, 2, 3)),\n ((1, 1), (S, 1)),\n ((0, 5), (4, 5)),\n ((4, 5), (0, 5)),\n ((0, 4, 5), (3, 5)),\n ((4, 5), (0, 3, 5)),\n ((0, 4, 5), (1, 3, 5)),\n ((1, 4, 5), (0, 3, 5)),\n # Using S here would make this one test take 9s\n ((small_S, small_S, small_S + 1, 2), (small_S, small_S, small_S + 2, 2)),\n ((small_S, 1, 1, small_S), (1, small_S, small_S)),\n ((1, 1, small_S), (small_S, 1, small_S, small_S)),\n )\n\n samples = []\n for cm in ['use_mm_for_euclid_dist', 'donot_use_mm_for_euclid_dist']:\n # FIXME add an override for JIT and revert 0. back to 0\n # since it's accepted by eager\n for p in [0., 1., 2., 3., 0.5, 1.5, 2.5, float(\"inf\")]:\n for t1_size, t2_size in test_cases:\n # The args should never be non-contiguous as this is not supported in the backward\n samples.append(SampleInput(\n make_tensor(t1_size, device, dtype, requires_grad=requires_grad),\n args=(make_tensor(t2_size, device, dtype, requires_grad=requires_grad), p, cm)))\n\n return samples\n\n\ndef sample_inputs_fill_(op_info, device, dtype, requires_grad, **kwargs):\n make_arg = partial(make_tensor, device=device, dtype=dtype,\n low=None, high=None, requires_grad=requires_grad)\n\n cases = (((S, S, S), (1,)),\n ((), (1,)),\n ((S, S, S), (make_arg(()),)))\n\n for shape, args in cases:\n yield SampleInput(make_arg(shape), args=args)\n\n\ndef sample_inputs_comparison_ops(self, device, dtype, requires_grad, **kwargs):\n test_cases = (\n ((S, S, S), (S, S, S), False),\n ((S, S, S), (), False),\n ((S, S, S), (1,), False),\n ((S,), (1,), False),\n ((), (), False),\n )\n test_cases_lhs_broadcasting = (\n ((S, 1, S), (S, S, S), True),\n ((1,), (S, S, S), True),\n ((1, S), (1, 1, S), True),\n ((), (0,), True),\n ((), (S, S, S), True),\n )\n cases = test_cases + test_cases_lhs_broadcasting\n sample_inputs = list(SampleInput(make_tensor(first_shape, device, dtype,\n requires_grad=requires_grad),\n args=(make_tensor(second_shape, device, dtype,\n requires_grad=requires_grad),),\n broadcasts_input=broadcasts_input)\n for first_shape, second_shape, broadcasts_input in cases)\n equal_tensors_non_bool = (\n ([[[-8, 6], [9, 0]], [[0, 5], [5, 7]]]),\n ([[[6, 5]], [[1, -5]]]),\n ([[2], [-1]]),\n ([0, -6]),\n ([3],),\n )\n equal_tensors_bool = (\n ([[[1, 0], [0, 0]], [[0, 1], [1, 0]]]),\n ([[[1, 1]], [[1, 0]]]),\n ([[1], [0]]),\n ([0, 1]),\n ([1],),\n )\n more_cases = equal_tensors_bool if dtype is torch.bool else equal_tensors_non_bool\n more_inputs = list(SampleInput(torch.tensor(elements, device=device, dtype=dtype,\n requires_grad=requires_grad),\n args=(torch.tensor(elements, device=device, dtype=dtype,\n requires_grad=requires_grad),))\n for elements in more_cases)\n sample_inputs = [*sample_inputs, *more_inputs]\n return tuple(sample_inputs)\n\n\ndef sample_inputs_stack(op_info, device, dtype, requires_grad, **kwargs):\n tensors = [\n make_tensor((S, S), device, dtype, requires_grad=requires_grad),\n make_tensor((S, S), device, dtype, requires_grad=requires_grad),\n make_tensor((S, S), device, dtype, requires_grad=requires_grad),\n ]\n\n return (SampleInput(tensors, args=(0,)),)\n\ndef sample_inputs_cat_concat(op_info, device, dtype, requires_grad, **kwargs):\n make_arg = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad)\n\n cases: Tuple[tuple, tuple, dict] = ( # type: ignore[assignment]\n ((S, S), (S, S), {'dim': -1}),\n ((S, S), (S, S), {'dim': 1}),\n ((M, S), (S, S), {'dim': 0}), # different shapes\n ((1, 2, 3), (1, 2, 3), {'dim': -2}),\n ((0,), (0,), {'dim': 0}), # empty tensor\n ((0, S), (S, S), {'dim': 0}),\n ((1,), (1,), {}) # dim not passed, fallback to default\n )\n\n for input_shape1, input_shape2, kwargs in cases:\n yield SampleInput([make_arg(input_shape1), make_arg(input_shape2)], kwargs=kwargs)\n\ndef sample_inputs_hstack_dstack_vstack(op_info, device, dtype, requires_grad, **kwargs):\n tensors = [\n make_tensor((S, S), device, dtype, requires_grad=requires_grad),\n make_tensor((S, S), device, dtype, requires_grad=requires_grad),\n make_tensor((S, S), device, dtype, requires_grad=requires_grad),\n ]\n\n return (SampleInput(tensors),)\n\ndef sample_inputs_hypot(op_info, device, dtype, requires_grad, **kwargs):\n input = make_tensor((S, S), device, dtype, requires_grad=requires_grad)\n args = make_tensor((S, S), device, dtype, requires_grad=requires_grad)\n\n return (\n SampleInput(input, args=(args,)),\n )\n\ndef sample_inputs_gather(op_info, device, dtype, requires_grad, **kwargs):\n return (\n SampleInput(\n make_tensor((M, S), device, dtype, low=None, high=None, requires_grad=requires_grad),\n args=(0, gather_variable((S, S), 1, M, True, device=device))),\n SampleInput(\n make_tensor((M, S), device, dtype, low=None, high=None, requires_grad=requires_grad),\n args=(1, gather_variable((M, S // 2), 0, S, True, device=device))),\n SampleInput(\n make_tensor((), device, dtype, low=None, high=None, requires_grad=requires_grad),\n args=(0, torch.tensor([0], dtype=torch.int64, device=device))),\n # Empty index tensor case, see: https://github.com/pytorch/pytorch/pull/65006\n SampleInput(\n make_tensor((S,), device, dtype, low=None, high=None, requires_grad=requires_grad),\n args=(0, torch.tensor([], dtype=torch.uint8, device=device))),\n SampleInput(\n make_tensor((), device, dtype, low=None, high=None, requires_grad=requires_grad),\n args=(0, torch.tensor(0, dtype=torch.int64, device=device))),\n )\n\ndef _fill_indices(idx, dim, dim_size, elems_per_row, m, n, o):\n for i in range(1 if dim == 0 else m):\n for j in range(1 if dim == 1 else n):\n for k in range(1 if dim == 2 else o):\n ii = [i, j, k]\n ii[dim] = slice(0, idx.size(dim) + 1)\n idx[tuple(ii)] = torch.randperm(dim_size)[0:elems_per_row]\n\ndef error_inputs_gather(op_info, device, **kwargs):\n # src is [1, 2]\n # [3, 4]\n src = torch.tensor(((1, 2), (3, 4)), device=device, dtype=torch.float32)\n\n # idx is [0, 0]\n # [1, 0]\n idx = torch.tensor(((0, 0), (1, 0)), device=device, dtype=torch.long)\n\n # Index should be smaller than self except on dimesion 1\n bad_src = make_tensor((1, 1), device=device, dtype=torch.float32)\n yield ErrorInput(SampleInput(bad_src, args=(1, idx,)), error_type=RuntimeError,\n error_regex=\"Size does not match at dimension 0\")\n\n # Index must have long dtype\n bad_idx = idx.to(torch.int32)\n yield ErrorInput(SampleInput(src, args=(1, bad_idx)), error_type=RuntimeError,\n error_regex=\"Expected dtype int64 for index\")\n\n # TODO: FIXME\n # out.dtype must match src.dtype\n # Creates new src & idx since SampleInputs can't share tensors\n src = torch.tensor(((1, 2), (3, 4)), device=device, dtype=torch.float32)\n idx = torch.tensor(((0, 0), (1, 0)), device=device, dtype=torch.long)\n out = torch.empty((2, 2), device=device, dtype=torch.float64)\n yield ErrorInput(SampleInput(src, args=(1, idx), kwargs={'out': out}), error_type=RuntimeError,\n error_regex=\"Expected out tensor to have dtype\")\n\n # src and index tensors must have the same # of dimensions\n # idx too few dimensions\n src = torch.tensor(((1, 2), (3, 4)), device=device, dtype=torch.float32)\n idx = torch.tensor((0, 0), device=device, dtype=torch.long)\n yield ErrorInput(SampleInput(src, args=(1, idx)), error_type=RuntimeError,\n error_regex=\"Index tensor must have the same number of dimensions\")\n\n # src too few dimensions\n src = torch.tensor((1, 2), device=device, dtype=torch.float32)\n idx = torch.tensor(((0, 0), (1, 0)), device=device, dtype=torch.long)\n yield ErrorInput(SampleInput(src, args=(0, idx)), error_type=RuntimeError,\n error_regex=\"Index tensor must have the same number of dimensions\")\n\n # index out of bounds\n # NOTE: this ErrorInput is guarded because bounds checking does not occur on CUDA devices\n if torch.device(device).type == 'cpu':\n src = torch.tensor(((1, 2), (3, 4)), device=device, dtype=torch.float32)\n idx = torch.tensor(((0, 23), (1, 0)), device=device, dtype=torch.long)\n yield ErrorInput(SampleInput(src, args=(1, idx,)), error_type=RuntimeError,\n error_regex=\"index 23 is out of bounds for dimension\")\n\n# Error inputs for scatter\ndef error_inputs_scatter_and_scatter_add(op_info, device, **kwargs):\n # Error when self.dtype != src.dtype (and src is not a scalar)\n src = make_tensor((2, 5), device=device, dtype=torch.float32)\n idx = torch.tensor(((0, 1), (1, 2)), device=device, dtype=torch.long)\n dst = torch.zeros((3, 5), device=device, dtype=torch.double)\n yield ErrorInput(SampleInput(dst, args=(0, idx, src)), error_type=RuntimeError,\n error_regex=\"Expected self.dtype to be equal to src.dtype\")\n\n # Index dtype must be long\n src = make_tensor((2, 5), device=device, dtype=torch.float32)\n idx = torch.tensor(((0, 1), (1, 2)), device=device, dtype=torch.int32)\n dst = torch.zeros((3, 5), device=device, dtype=torch.float32)\n yield ErrorInput(SampleInput(dst, args=(0, idx, src)), error_type=RuntimeError,\n error_regex=\"Expected dtype int64 for index\")\n\n # Index and destination must have the same number of dimensions\n src = make_tensor((2, 5), device=device, dtype=torch.float32)\n idx = torch.tensor(((0, 1), (1, 2)), device=device, dtype=torch.long)\n dst = torch.zeros((3, 5, 3), device=device, dtype=torch.float32)\n yield ErrorInput(SampleInput(dst, args=(0, idx, src)), error_type=RuntimeError,\n error_regex=\"Index tensor must have the same number of dimensions as self tensor\")\n\n # Index and src must have the same number of dimensions when src is not a scalar\n src = make_tensor((2, 5, 2), device=device, dtype=torch.float32)\n idx = torch.tensor(((34, 1), (1, 2)), device=device, dtype=torch.long)\n dst = torch.zeros((3, 5), device=device, dtype=torch.float32)\n yield ErrorInput(SampleInput(dst, args=(0, idx, src)), error_type=RuntimeError,\n error_regex=\"Index tensor must have the same number of dimensions as src tensor\")\n\n # Index out of bounds\n # NOTE: this ErrorInput is guarded because bounds checking does not occur on CUDA devices\n if torch.device(device).type == 'cpu':\n src = make_tensor((2, 5), device=device, dtype=torch.float32)\n idx = torch.tensor(((34, 1), (1, 2)), device=device, dtype=torch.long)\n dst = torch.zeros((3, 5), device=device, dtype=torch.float32)\n yield ErrorInput(SampleInput(dst, args=(0, idx, src)), error_type=RuntimeError,\n error_regex=\"index 34 is out of bounds for dimension 0 with size 3\")\n\ndef sample_inputs_take_along_dim(op_info, device, dtype, requires_grad, **kwargs):\n return (SampleInput(make_tensor((S, S), device, dtype,\n low=None, high=None,\n requires_grad=requires_grad),\n args=(gather_variable((S, S), 1, S, True, device=device), 0)),\n\n # `indices` broadcast\n SampleInput(make_tensor((S, S), device, dtype,\n low=None, high=None,\n requires_grad=requires_grad),\n args=(gather_variable((1, S // 2), 0, S, True, device=device), 1)),\n\n # `self` broadcast\n SampleInput(make_tensor((1, S), device, dtype,\n low=None, high=None,\n requires_grad=requires_grad),\n args=(gather_variable((S, S // 2), 0, S, True, device=device), 1)),\n\n # without `dim` arg\n SampleInput(make_tensor((S, S), device, dtype,\n low=None, high=None,\n requires_grad=requires_grad),\n args=(gather_variable((S, S // 2), 0, S, True, device=device), )),\n SampleInput(make_tensor((S, S), device, dtype,\n low=None, high=None,\n requires_grad=requires_grad),\n args=(gather_variable((S, S // 2), 0, S, True, device=device),)),\n )\n\n\ndef sample_inputs_aminmax(op_info, device, dtype, requires_grad, **kwargs):\n test_cases: Tuple[tuple, dict] = ( # type: ignore[assignment]\n ((S, S, S), {}),\n ((S, S, S), {'dim': 1}),\n ((S, S, S), {'dim': 1, 'keepdim': True}),\n ((), {'dim': 0}),\n ((), {}),\n ((), {'dim': 0, 'keepdim': True}),\n )\n\n samples: List[SampleInput] = []\n for shape, kwargs in test_cases:\n samples.append(SampleInput(\n make_tensor(shape, device, dtype, requires_grad=requires_grad),\n kwargs=kwargs))\n\n return samples\n\ndef sample_inputs_diff(op_info, device, dtype, requires_grad, **kwargs):\n make_arg = partial(make_tensor, dtype=dtype, device=device, requires_grad=requires_grad)\n\n test_cases = (\n ((1,), 0, None, None),\n ((S,), 0, None, None),\n ((S, 1), 0, None, None),\n ((S, 1), 1, None, None),\n ((S, S), 0, None, None),\n ((S, S), 1, None, None),\n ((S, S), 0, (1, S), (2, S)),\n ((S, S), 0, None, (2, S)),\n ((S, S, S), 1, None, None),\n ((S, S, S), 2, None, None),\n ((S, S, S), 1, (S, 1, S), (S, 1, S)),\n ((S, S, S), 2, (S, S, 1), (S, S, 1)),\n ((S, S, S), 2, (S, S, S), (S, S, S)),)\n\n sample_inputs = []\n for size, dim, size_prepend, size_append in test_cases:\n prepend_size = 0 if (size_prepend is None) else size_prepend[dim]\n append_size = 0 if (size_append is None) else size_append[dim]\n dim_size = size[dim] + prepend_size + append_size\n for n in range(dim_size):\n input_tensor = make_arg(size)\n prepend = make_arg(size_prepend) if size_prepend else None\n append = make_arg(size_append) if size_append else None\n sample_inputs.append(SampleInput(input_tensor, args=(n, dim, prepend, append,)))\n\n # add some samples with n > dim_size\n sample_inputs.append(SampleInput(make_arg((S, S, S)), args=(S + 1, 1,)))\n sample_inputs.append(SampleInput(make_arg((S, S, S)), args=(S * 3 + 2, 2, make_arg((S, S, S)), make_arg((S, S, S)),)))\n\n return sample_inputs\n\ndef sample_inputs_histogram(op_info, device, dtype, requires_grad, **kwargs):\n make_arg = partial(make_tensor, dtype=dtype, device=device, requires_grad=requires_grad)\n\n sizes = ((), (S,), (S, S), (S, S, S), (S, 1, S), (S, 0, S))\n\n sample_inputs = []\n for size, bin_ct, weighted, density in product(sizes, range(1, 5), [False, True], [False, True]):\n input_tensor = make_arg(size)\n weight_tensor = make_arg(size) if weighted else None\n\n sample_inputs.append(SampleInput(input_tensor, args=(bin_ct,),\n kwargs=dict(weight=weight_tensor, density=density)))\n\n bins_tensor = make_arg((bin_ct + 1,))\n sample_inputs.append(SampleInput(input_tensor, args=(bins_tensor,),\n kwargs=dict(weight=weight_tensor, density=density)))\n\n return sample_inputs\n\ndef sample_inputs_histogramdd(op_info, device, dtype, requires_grad, **kwargs):\n make_arg = partial(make_tensor, dtype=dtype, device=device, requires_grad=requires_grad)\n\n sizes = ((S, S), (S, S, S), (S, 1, S), (S, 0, S))\n bin_ct_patterns = ((1, 1, 1, 1, 1), (2, 3, 2, 3, 2), (3, 2, 3, 2, 3))\n\n sample_inputs = []\n for size, bin_ct_pattern, weighted, density in product(sizes, bin_ct_patterns, [False, True], [False, True]):\n input_tensor = make_arg(size)\n bin_ct = bin_ct_pattern[:size[-1]]\n weight_tensor = make_arg(size[:-1]) if weighted else None\n\n sample_inputs.append(SampleInput(input_tensor, args=(bin_ct,),\n kwargs=dict(weight=weight_tensor, density=density)))\n\n bins_tensor = [make_arg(ct + 1) for ct in bin_ct]\n sample_inputs.append(SampleInput(input_tensor, args=(bins_tensor,),\n kwargs=dict(weight=weight_tensor, density=density)))\n\n return sample_inputs\n\ndef sample_inputs_histc(op_info, device, dtype, requires_grad, **kwargs):\n make_arg = partial(make_tensor, dtype=dtype, device=device, requires_grad=requires_grad)\n\n sizes = ((), (S,), (S, S), (S, S, S), (S, 1, S), (S, 0, S))\n\n sample_inputs = []\n for size, min, max in product(sizes, [0, -10], [0, 10]):\n # construct sample input omitting bins arg\n sample_inputs.append(SampleInput(make_arg(size),\n kwargs=dict(min=min, max=max)))\n\n # construct sample inputs with a few different bins values\n for bins in [1, 3, 10]:\n sample_inputs.append(SampleInput(make_arg(size),\n kwargs=dict(bins=bins, min=min, max=max)))\n\n return sample_inputs\n\ndef sample_inputs_bincount(op_info, device, dtype, requires_grad, **kwargs):\n make_arg = partial(make_tensor, dtype=dtype, device=device, requires_grad=requires_grad)\n\n sample_inputs = []\n\n for size, weighted in product((S, M), [False, True]):\n input_tensor = torch.randint(0, size, (size,), dtype=dtype, device=device)\n weight_tensor = make_arg((size,)) if weighted else None\n\n max_val = int(input_tensor.max().item())\n\n for minlength in [0, max_val // 2, max_val, 2 * max_val]:\n sample_inputs.append(SampleInput(input_tensor,\n kwargs=dict(weights=weight_tensor, minlength=minlength)))\n\n return sample_inputs\n\ndef sample_inputs_bucketize(op_info, device, dtype, requires_grad, **kwargs):\n make_arg = partial(make_tensor, dtype=dtype, device=device, requires_grad=requires_grad)\n\n sizes = ((), (S,), (S, S), (S, S, S), (S, 1, S), (S, 0, S))\n\n sample_inputs = []\n\n for size, out_int32, right in product(sizes, [False, True], [False, True]):\n input_tensor = make_arg(size)\n boundaries = make_arg((S,)).msort()\n\n sample_inputs.append(SampleInput(input_tensor, args=(boundaries, ),\n kwargs=dict(out_int32=out_int32, right=right)))\n\n return sample_inputs\n\ndef sample_inputs_searchsorted(op_info, device, dtype, requires_grad, **kwargs):\n make_arg = partial(make_tensor, dtype=dtype, device=device, requires_grad=requires_grad)\n\n sizes = ((0,), (M,), (0, 0), (M, M), (0, 0, 0), (M, M, M))\n inputs = []\n for size, noncontiguous, out_int32, right in product(sizes, [False, True], [False, True], [False, True]):\n unsorted_tensor = make_arg(size, noncontiguous=noncontiguous)\n input_tensor = make_arg(size, noncontiguous=noncontiguous)\n if np.product(size) == 0:\n boundary_tensor = unsorted_tensor\n sorter = make_tensor(size, dtype=torch.int64, device=device, noncontiguous=noncontiguous)\n else:\n boundary_tensor, sorter = torch.sort(unsorted_tensor)\n side = \"right\" if right else \"left\"\n\n inputs.append(SampleInput(boundary_tensor, args=(input_tensor,), kwargs=dict(out_int32=out_int32, right=right)))\n inputs.append(SampleInput(boundary_tensor, args=(input_tensor,), kwargs=dict(out_int32=out_int32, side=side)))\n\n inputs.append(\n SampleInput(unsorted_tensor, args=(input_tensor,), kwargs=dict(out_int32=out_int32, right=right, sorter=sorter)))\n inputs.append(\n SampleInput(unsorted_tensor, args=(input_tensor,), kwargs=dict(out_int32=out_int32, side=side, sorter=sorter)))\n return inputs\n\ndef sample_inputs_gradient(op_info, device, dtype, requires_grad, **kwargs):\n sample_inputs = []\n test_cases_float = (\n ((S,), None, None, 1),\n ((S,), 2., None, 1),\n ((S, S), None, None, 2),\n ((S, S), [2.0, 2.1], None, 1),\n ((S, S), [2.0, 2.1], (0, 1), 1),\n ((4, 4, 4), [2., 1.], (0, 1), 2),\n )\n for size, spacing, dim, edge_order in test_cases_float:\n t = make_tensor(size, device, dtype, low=None, high=None, requires_grad=requires_grad)\n sample_inputs.append(SampleInput(t, kwargs=dict(dim=dim, spacing=spacing, edge_order=edge_order)))\n\n test_cases_tensor = (\n ((3, 3, 3), ((1.1, 2.0, 3.5), (4.0, 2, 6.0)), (0, -1), 1),\n ((3, 3, 3), ((1.0, 3.0, 2.0), (8.0, 6.0, 1.0)), (0, 1), 2),\n )\n for size, coordinates, dim, edge_order in test_cases_tensor:\n t = make_tensor(size, device, dtype, low=None, high=None, requires_grad=requires_grad)\n coordinates_tensor_list = []\n for coords in coordinates:\n # `coords` will always contain floating point values and Python 3.10 does not support this\n # implicit conversion to an integer using `__int__`\n # TODO: this can be simplified after https://github.com/pytorch/pytorch/issues/69316 is fixed\n a = torch.tensor(coords, device=device)\n coordinates_tensor_list.append(a.to(dtype))\n sample_inputs.append(SampleInput(t, kwargs=dict(dim=dim, spacing=coordinates_tensor_list, edge_order=edge_order)))\n\n return tuple(sample_inputs)\n\ndef sample_inputs_getitem(op_info, device, dtype, requires_grad, **kwargs):\n make_arg = partial(make_tensor, dtype=dtype, device=device, requires_grad=requires_grad)\n test_args = [\n ([1, 2],),\n (slice(0, 3),),\n ([slice(0, 3), 1],),\n ([[0, 2, 3], [1, 3, 3], [0, 0, 2]],),\n ([[0, 0, 3], [1, 1, 3], [0, 0, 2]],),\n ([slice(None), slice(None), [0, 3]],),\n ([slice(None), [0, 3], slice(None)],),\n ([[0, 3], slice(None), slice(None)],),\n ([[0, 3], [1, 2], slice(None)],),\n ([[0, 3], ],),\n ([[0, 3], slice(None)],),\n ([[0, 3], Ellipsis],),\n ([[0, 2, 3], [1, 3, 3], torch.LongTensor([0, 0, 2])],),\n (index_variable(2, S, device=device),),\n (mask_not_all_zeros((S,)),),\n ]\n\n for args in test_args:\n yield SampleInput(make_arg((S, S, S)), args=args)\n\ndef sample_inputs_index_put(op_info, device, dtype, requires_grad, **kwargs):\n make_arg = partial(make_tensor, dtype=dtype, device=device, requires_grad=requires_grad)\n\n inputs = []\n for accumulate in [False, True]:\n # Test with indices arg\n inputs.append(SampleInput(\n make_arg((S, S,)),\n args=((index_variable(2, S, device=device),), make_arg((2, S))),\n kwargs=dict(accumulate=accumulate)))\n\n # Test with mask arg\n mask = torch.zeros(S, dtype=torch.bool) if accumulate else mask_not_all_zeros((S,))\n inputs.append(SampleInput(\n make_arg((S, S)),\n args=((mask, ), make_arg((S,))),\n kwargs=dict(accumulate=accumulate)))\n\n return inputs\n\ndef sample_inputs_sort(op_info, device, dtype, requires_grad, **kwargs):\n def small_3d_unique():\n res = torch.randperm(S * S * S, dtype=torch.int64, device=device).view(S, S, S)\n res = res.to(dtype).requires_grad_(requires_grad)\n return res\n\n def large_1d_unique():\n res = torch.randperm(L * L * L, dtype=torch.int64, device=device)\n res = res.to(dtype).requires_grad_(requires_grad)\n return res\n\n samples = []\n # Test case for large tensor.\n samples.append(SampleInput(large_1d_unique()))\n\n # Test cases for small 3d tensors.\n # Imitates legacy tests from test/test_torch.py\n dims = range(-3, 3)\n flag = [True, False]\n for dim, descending, stable in product(dims, flag, flag):\n # default schema without stable sort\n samples.append(SampleInput(small_3d_unique(),\n args=(dim, descending)))\n # schema with stable sort, no CUDA support yet\n if torch.device(device).type == 'cpu':\n samples.append(\n SampleInput(small_3d_unique(),\n kwargs=dict(dim=dim, descending=descending, stable=stable))\n )\n\n # Test cases for scalar tensor\n samples.append(SampleInput(torch.tensor(1, dtype=dtype, device=device, requires_grad=requires_grad)))\n samples.append(SampleInput(torch.tensor(1, dtype=dtype, device=device, requires_grad=requires_grad),\n args=(0,)))\n samples.append(SampleInput(torch.tensor(1, dtype=dtype, device=device, requires_grad=requires_grad),\n args=(0, True)))\n\n # Test cases for stable sort\n samples.append(SampleInput(small_3d_unique(),\n kwargs=dict(stable=True)))\n samples.append(SampleInput(small_3d_unique(),\n kwargs=dict(dim=0, stable=True)))\n samples.append(SampleInput(small_3d_unique(),\n kwargs=dict(dim=0, descending=True, stable=True)))\n return samples\n\ndef sample_inputs_threshold(op_info, device, dtype, requires_grad, **kwargs):\n make_arg = partial(make_tensor, dtype=dtype, device=device, requires_grad=requires_grad)\n sizes = ((), (S,), (S, S), (S, S, S))\n samples = []\n for x_size in sizes:\n # threshold and values args must be numbers\n samples.append(SampleInput(make_arg(x_size), args=(make_arg(()).item(), make_arg(()).item())))\n return samples\n\ndef sample_inputs_argsort(*args, **kwargs):\n return [sample_input for sample_input in sample_inputs_sort(*args, **kwargs) if \"stable\" not in sample_input.kwargs]\n\ndef sample_inputs_unique(op_info, device, dtype, requires_grad, **kwargs):\n sizes = ((), (S,), (S, S), (S, S, S), (S, 1, S), (S, 0, S))\n\n sample_inputs = []\n for shape, sorted, return_inverse, return_counts, dim in \\\n product(sizes, [False, True], [False, True], [False, True], [None, -2, -1, 0, 1, 2]):\n # torch.unique cannot be called if the input tensor has a zero dimension which isn't the selected dim\n if 0 in shape and shape.index(0) is not dim:\n continue\n\n # skip invalid dim args\n if dim is not None and (dim < -len(shape) or dim >= len(shape)):\n continue\n\n kwargs = dict(sorted=sorted, return_inverse=return_inverse, return_counts=return_counts, dim=dim)\n\n # construct a test case with only one distinct value\n input_t = torch.zeros(shape, dtype=dtype, device=device, requires_grad=requires_grad)\n sample_inputs.append(SampleInput(input_t, kwargs=kwargs.copy()))\n\n # construct a test case with mixed 0s and 1s\n input_t = make_tensor(shape, dtype=torch.bool, device=device, requires_grad=False)\\\n .to(dtype).requires_grad_(requires_grad)\n sample_inputs.append(SampleInput(input_t, kwargs=kwargs.copy()))\n\n # construct a test case with many different values\n input_t = make_tensor(shape, dtype=dtype, device=device, requires_grad=requires_grad)\n sample_inputs.append(SampleInput(input_t, kwargs=kwargs.copy()))\n\n return sample_inputs\n\ndef sample_inputs_unique_consecutive(*args, **kwargs):\n for sample_input in sample_inputs_unique(*args, **kwargs):\n if not sample_input.kwargs[\"sorted\"]:\n sample_input.kwargs.pop(\"sorted\")\n yield sample_input\n\ndef sample_inputs_max_min_binary(op_info, device, dtype, requires_grad, **kwargs):\n inputs = []\n args_for_binary_op = (\n ((S, S, S), (S, S, S),),\n ((S, S, S), (S,),),\n ((S,), (S, S, S),),\n ((S, 1, S), (S, S),),\n ((S, S), (S, S),),\n ((), (),),\n ((S, S, S), (),),\n ((), (S, S, S),),\n )\n inputs = list((SampleInput(make_tensor(input_tensor, device, dtype,\n low=None, high=None,\n requires_grad=requires_grad),\n args=(make_tensor(other_tensor, device, dtype,\n low=None, high=None,\n requires_grad=requires_grad),),))\n for input_tensor, other_tensor in args_for_binary_op)\n return inputs\n\n\ndef sample_inputs_adaptive_avg_pool1d(op_info, device, dtype, requires_grad, **kwargs):\n make_arg = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad)\n\n # Ordered as (input shape, output size)\n cases = (\n ((0, 8, 8), (5,)),\n ((3, 8, 8), 5),\n ((3, 8, 8), 1)\n )\n\n for input_shape, output_size in cases:\n yield SampleInput(make_arg(input_shape), args=(output_size,))\n\ndef sample_inputs_adaptive_avg_pool2d(op_info, device, dtype, requires_grad, **kwargs):\n make_arg = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad)\n\n # Ordered as (input shape, output size)\n cases = (\n ((1, 8, 8, 8), (5, 7)),\n ((2, 8, 8, 8), (None, 7)),\n ((1, 8, 4, 3), (5, None)),\n ((1, 8, 4, 3), (None, None)),\n ((1, 8, 4, 3), (5)),\n )\n\n for input_shape, output_size in cases:\n yield SampleInput(make_arg(input_shape), args=(output_size,))\n\n\ndef sample_inputs_adaptive_avg_pool3d(op_info, device, dtype, requires_grad, **kwargs):\n make_arg = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad)\n\n # Ordered as (input shape, output size)\n cases = (\n ((0, 8, 8, 8, 8), (5, 7, 4)),\n ((1, 8, 4, 3, 7), (None, None, None)),\n ((1, 8, 4, 3, 7), (1, 1, 1)),\n ((3, 3, 8, 8, 6), (5, 7, None)),\n ((1, 3, 8, 8, 6), (5, None, 2)),\n ((3, 3, 8, 8, 6), (None, 3, 2)),\n )\n\n for input_shape, output_size in cases:\n yield SampleInput(make_arg(input_shape), args=(output_size,))\n\ndef sample_inputs_adaptive_max_pool1d(op_info, device, dtype, requires_grad, **kwargs):\n make_arg = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad)\n\n # Ordered as (input shape, output size)\n cases = (\n # ((0, 8, 8), (5,)),\n # 0 batch size doesn't work, cannot reshape tensor of 0 elements into shape [0, 8, -1]\n ((3, 4, 4), 3),\n ((3, 4, 4), 1)\n )\n\n for shapes, return_idx in product(cases, (True, False)):\n yield SampleInput(make_arg(shapes[0]), args=(shapes[1], return_idx))\n\ndef sample_inputs_adaptive_max_pool2d(op_info, device, dtype, requires_grad, **kwargs):\n make_arg = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad)\n\n # Ordered as (input shape, output size)\n cases = (\n # ((0, 8, 8, 8), (5, 7)),\n # 0 batch size doesn't work, cannot reshape tensor of 0 elements into shape [0, 8, -1]\n ((1, 4, 4, 4), (2, 3)),\n ((2, 4, 4, 4), (None, 3)),\n ((2, 4, 4, 4), (1, 1)),\n ((1, 4, 4, 3), (3, None)),\n ((1, 4, 4, 3), (None, None)),\n ((1, 4, 4, 3), (3)),\n )\n\n for shapes, return_idx in product(cases, (True, False)):\n yield SampleInput(make_arg(shapes[0]), args=(shapes[1], return_idx))\n\n\ndef sample_inputs_adaptive_max_pool3d(op_info, device, dtype, requires_grad, **kwargs):\n make_arg = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad)\n\n # Ordered as (input shape, output size)\n cases = (\n # ((0, 8, 8, 8, 8), (5, 7, 4)),\n # 0 batch size doesn't work, cannot reshape tensor of 0 elements into shape [0, 8, -1]\n ((1, 4, 4, 3, 5), (None, None, None)),\n ((1, 4, 4, 3, 5), (1, 1, 1)),\n ((3, 3, 4, 4, 6), (2, 3, None)),\n ((1, 3, 4, 4, 6), (3, None, 2)),\n ((3, 3, 4, 4, 6), (None, 3, 2)),\n )\n\n for shapes, return_idx in product(cases, (True, False)):\n yield SampleInput(make_arg(shapes[0]), args=(shapes[1], return_idx))\n\nclass _TestParamsMaxPoolBase(object):\n\n def __init__(self):\n self.kwargs = {\n 'kernel_size': [3],\n 'stride': [2, None],\n 'ceil_mode': [True, False],\n 'padding': [0, 1],\n 'dilation': [1],\n 'return_indices': [True, False]\n }\n\n self.shapes = [\n [1, 2, None], # batch\n [2], # channels\n [3, 6] # signal\n ]\n\n def _gen_shape(self):\n for shape in product(*self.shapes):\n # shape[0] is None indicates missing batch dimension\n if shape[0] is None:\n shape = shape[1:]\n\n yield shape, torch.contiguous_format\n # only 2d (N, C, H, W) rank 4 tensors support channels_last memory format\n if len(self.shapes) == 4 and len(shape) == 4:\n yield shape, torch.channels_last\n\n def _gen_kwargs(self):\n keys = self.kwargs.keys()\n for values in product(*self.kwargs.values()):\n yield dict(zip(keys, values))\n\n def gen_input_params(self):\n yield from product(self._gen_shape(), self._gen_kwargs())\n\nclass _TestParamsMaxPool1d(_TestParamsMaxPoolBase):\n\n def __init__(self):\n super().__init__()\n self.kwargs['kernel_size'] += [(3,)]\n self.kwargs['stride'] += [(2,)]\n self.kwargs['padding'] += [(1,)]\n self.kwargs['dilation'] += [(1,)]\n\nclass _TestParamsMaxPool2d(_TestParamsMaxPoolBase):\n\n def __init__(self):\n super().__init__()\n self.kwargs['kernel_size'] += [(3, 2)]\n self.kwargs['stride'] += [(2, 1)]\n self.kwargs['padding'] += [(1, 1)]\n self.kwargs['dilation'] += [(1, 2)]\n\n self.shapes.append([6])\n\nclass _TestParamsMaxPool3d(_TestParamsMaxPoolBase):\n\n def __init__(self):\n super().__init__()\n self.kwargs['kernel_size'] += [(3, 2, 3)]\n self.kwargs['stride'] += [(2, 1, 2)]\n self.kwargs['dilation'] += [(1, 2, 1)]\n\n self.shapes.append([6])\n self.shapes.append([5])\n\ndef sample_inputs_max_pool(op_info, device, dtype, requires_grad, **kwargs):\n make_arg = partial(make_tensor, device=device, dtype=dtype, requires_grad=False)\n\n params_generator_type_dict = {\n 'nn.functional.max_pool1d': _TestParamsMaxPool1d,\n 'nn.functional.max_pool2d': _TestParamsMaxPool2d,\n 'nn.functional.max_pool3d': _TestParamsMaxPool3d,\n }\n\n params_generator = params_generator_type_dict[op_info.name]()\n for (shape, memory_format), kwargs in params_generator.gen_input_params():\n arg = make_arg(shape).to(memory_format=memory_format).requires_grad_(requires_grad)\n yield SampleInput(arg, kwargs=kwargs)\n\ndef sample_inputs_normalize(self, device, dtype, requires_grad, **kwargs):\n make_arg = partial(make_tensor, low=-1, high=1, device=device, dtype=dtype, requires_grad=requires_grad)\n\n cases: Tuple[Tuple[int], dict] = ( # type: ignore[assignment]\n ((2, 1, 4, 5), {'p': 1., 'dim': 2}),\n ((2, 3, 4, 5), {'p': 2., 'dim': 1}),\n ((1, 2, 4, 5), {'p': 0.5, 'dim': 0}),\n ((1, 3, 4, 5), {'p': -1., 'dim': 1}),\n ((1, 3, 4, 5), {'p': 0., 'dim': -1}),\n ((), {'p': 1.2, 'dim': 0}),\n ((2, 3, 4, 5), {}),\n ((2, 3, 4, 5), {'eps': 1e-4}))\n\n for input_shape, kwargs in cases:\n yield SampleInput(make_arg(input_shape), kwargs=kwargs)\n\ndef sample_inputs_conv_transpose1d(op_info, device, dtype, requires_grad, **kwargs):\n make_arg = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad)\n\n # Ordered as shapes for input, weight, bias\n # and a dict of values of (stride, padding, output_padding, groups, dilation)\n cases: Tuple[Tuple[int], Tuple[int], Tuple[int], dict] = ( # type: ignore[assignment]\n ((1, 3, 4), (3, 3, 3), (3,),\n {'stride': (2,), 'padding': 2, 'output_padding': (1,), 'groups': 1}),\n ((2, 2, 4), (2, 2, 4), (4,),\n {'stride': (3,), 'padding': (1,), 'output_padding': (2,), 'groups': 2, 'dilation': (4,)}),\n ((1, 1, 4), (1, 1, 4), (1,),\n {'stride': 2, 'padding': 1, 'output_padding': 1, 'groups': 1, 'dilation': (2,)}),\n ((1, 1, 4), (1, 2, 3), None,\n {'stride': 2, 'padding': 1, 'output_padding': 1, 'groups': 1}),\n ((1, 4, 5), (4, 8, 3), None,\n {})\n )\n\n for input_shape, weight, bias, kwargs in cases:\n yield SampleInput(make_arg(input_shape), args=(\n make_arg(weight),\n make_arg(bias) if bias is not None else bias\n ), kwargs=kwargs)\n\n\ndef sample_inputs_conv_transpose2d(op_info, device, dtype, requires_grad, **kwargs):\n make_arg = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad)\n\n # Ordered as shapes for input, weight, bias\n # and a dict of values of (stride, padding, output_padding, groups, dilation)\n cases: Tuple[Tuple[int], Tuple[int], Tuple[int], dict] = ( # type: ignore[assignment]\n ((1, 3, 4, 4), (3, 3, 3, 3), (3,),\n {'stride': (2, 2), 'padding': 2, 'output_padding': (1, 1), 'groups': 1}),\n ((2, 2, 4, 4), (2, 2, 4, 5), (4,),\n {'stride': (3, 2), 'padding': (1, 2), 'output_padding': (2, 3), 'groups': 2, 'dilation': (4, 4)}),\n ((1, 1, 4, 5), (1, 1, 4, 3), (1,),\n {'stride': 2, 'padding': 1, 'output_padding': 1, 'groups': 1, 'dilation': (2, 3)}),\n ((1, 1, 4, 3), (1, 2, 3, 4), None,\n {'stride': 2, 'padding': 1, 'output_padding': 1, 'groups': 1}),\n ((1, 4, 5, 5), (4, 8, 3, 3), None,\n {})\n )\n\n for input_shape, weight, bias, kwargs in cases:\n yield SampleInput(make_arg(input_shape), args=(\n make_arg(weight),\n make_arg(bias) if bias is not None else bias\n ), kwargs=kwargs)\n\ndef sample_inputs_conv_transpose3d(op_info, device, dtype, requires_grad, **kwargs):\n make_arg = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad)\n\n # Ordered as shapes for input, weight, bias\n # and a dict of values of (stride, padding, output_padding, groups, dilation)\n cases: Tuple[Tuple[int], Tuple[int], Tuple[int], dict] = ( # type: ignore[assignment]\n ((1, 3, 4, 4, 4), (3, 3, 3, 3, 3), (3,),\n {'stride': (2, 2, 2), 'padding': 2, 'output_padding': (1, 1, 1), 'groups': 1}),\n ((2, 2, 4, 4, 4), (2, 2, 4, 5, 6), (4,),\n {'stride': (3, 2, 1), 'padding': (1, 2, 3), 'output_padding': (2, 3, 1), 'groups': 2, 'dilation': (4, 4, 4)}),\n ((1, 1, 4, 5, 2), (1, 1, 4, 3, 1), (1,),\n {'stride': 2, 'padding': 1, 'output_padding': 1, 'groups': 1, 'dilation': (2, 3, 2)}),\n ((1, 1, 4, 3, 4), (1, 2, 3, 4, 5), None,\n {'stride': 2, 'padding': 1, 'output_padding': 1, 'groups': 1}),\n ((1, 4, 5, 5, 5), (4, 8, 3, 3, 3), None,\n {})\n )\n\n for input_shape, weight, bias, kwargs in cases:\n yield SampleInput(make_arg(input_shape), args=(\n make_arg(weight),\n make_arg(bias) if bias is not None else bias\n ), kwargs=kwargs)\n\n\ndef sample_inputs_conv1d(op_info, device, dtype, requires_grad, **kwargs):\n make_arg = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad)\n\n # Ordered as shapes for input, weight, bias,\n # and a dict of values of (stride, padding, dilation, groups)\n cases: Tuple = (\n ((1, 3, 4), (3, 3, 3), (3,), {'stride': (2,), 'padding': 2, 'groups': 1}),\n ((2, 4, 8), (2, 2, 3), (2,), {'stride': 3, 'padding': 1, 'groups': 2, 'dilation': 2}),\n ((1, 4, 5), (1, 4, 3), None, {'stride': (2,), 'padding': 'valid'}),\n ((2, 2, 4), (2, 1, 4), (2,), {'stride': (1,), 'padding': 'same', 'groups': 2, 'dilation': (2,)}),\n # With defaults\n ((1, 4, 5), (3, 4, 3), None, {}),\n )\n\n # TODO: (@krshrimali), add error_inputs_func once https://github.com/pytorch/pytorch/pull/67354 is merged\n # Should replace test_conv_modules_raise_error_on_incorrect_input_size and test_conv_shapecheck\n # in test/test_nn.py\n\n for input_shape, weight, bias, kwargs in cases:\n yield SampleInput(make_arg(input_shape), args=(\n make_arg(weight),\n make_arg(bias) if bias is not None else bias\n ), kwargs=kwargs)\n\n\ndef sample_inputs_conv2d(op_info, device, dtype, requires_grad, jit_fail_sample=False, **kwargs):\n make_arg = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad)\n\n # Ordered as shapes for input, weight, bias\n # and a dict of values of (stride, padding, groups, dilation)\n cases: Tuple = (\n ((1, 3, 4, 4), (3, 3, 3, 3), (3,),\n {'stride': (2, 2), 'padding': 2, 'groups': 1}),\n ((2, 4, 8, 8), (2, 2, 3, 3), (2,),\n {'stride': (3, 2), 'padding': (2, 1), 'groups': 2, 'dilation': (4, 4)}),\n ((1, 4, 5, 5), (1, 4, 2, 3), (1,),\n {'stride': 2, 'padding': 1, 'groups': 1, 'dilation': (2, 3)}),\n ((1, 4, 5, 5), (1, 4, 2, 3), (1,),\n {'stride': 2, 'padding': 1, 'groups': 1, 'dilation': (2, 3)}),\n ((1, 2, 4, 3), (4, 2, 3, 4), None,\n {'stride': 2, 'padding': 1, 'groups': 1}),\n ((1, 4, 5, 5), (1, 4, 2, 3), (1,),\n {'stride': 2, 'padding': \"valid\"}),\n ((1, 4, 5, 5), (1, 4, 2, 3), (1,),\n {'stride': 1, 'padding': \"same\", 'dilation': 3}),\n # Below are the group related samples from common_nn.py\n ((2, 4, 6, 6), (4, 1, 3, 3), (4,), {'groups': 4}),\n ((2, 4, 6, 6), (8, 1, 3, 3), (8,), {'groups': 4}),\n ((2, 4, 6, 6), (8, 1, 3, 3), None, {'groups': 4}),\n ((2, 4, 6, 6), (4, 1, 3, 3), (4,), {'groups': 4, 'stride': (3, 2)}),\n ((2, 4, 6, 6), (4, 1, 3, 3), (4,), {'groups': 4, 'padding': (1, 1)}),\n ((2, 4, 5, 5), (4, 1, 2, 2), (4,), {'groups': 4, 'dilation': (2, 2)}),\n ((2, 4, 6, 5), (6, 2, 3, 2), (6,), {'groups': 2}),\n # With defaults\n ((1, 4, 5, 5), (3, 4, 3, 3), None, {}),\n )\n\n for input_shape, weight, bias, kwargs in cases:\n yield SampleInput(make_arg(input_shape), args=(\n make_arg(weight),\n make_arg(bias) if bias is not None else bias\n ), kwargs=kwargs)\n\n\ndef sample_inputs_group_norm(opinfo, device, dtype, requires_grad, **kwargs):\n make_arg = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad)\n\n # Ordered as input shape, num groups, and eps\n cases: Tuple[Tuple[int], int, float] = ( # type: ignore[assignment]\n ((1, 6, 3), 2, 0.5),\n ((2, 6, 3), 2, -0.5),\n ((1, 2), 1, None),\n ((0, 2), 1, None),\n )\n\n for input_shape, num_groups, eps in cases:\n # Shape of weight and bias should be the same as num_channels\n weight = make_arg(input_shape[1])\n bias = make_arg(input_shape[1])\n kwargs = {'weight': weight, 'bias': bias} if eps is None else {'weight': weight, 'bias': bias, 'eps': eps}\n yield SampleInput(\n make_arg(input_shape),\n args=(num_groups,),\n kwargs=kwargs\n )\n # Without any optional args\n yield SampleInput(make_arg((1, 2)), args=(1,))\n\n\ndef sample_inputs_instance_norm(opinfo, device, dtype, requires_grad, **kwargs):\n make_arg = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad)\n make_arg_without_requires_grad = partial(make_tensor, device=device, dtype=dtype, requires_grad=False)\n\n # Ordered as: input shape, kwargs for momentum, eps\n cases: Tuple[Tuple[int], dict] = ( # type: ignore[assignment]\n ((S, S, S), {'momentum': 0.5, 'eps': 0.6}),\n ((S, S, S), {'momentum': 0.5, 'eps': 0.6, 'use_input_stats': True}),\n ((3, 2, 4), {'momentum': -1.2}),\n ((3, 2, 4), {'momentum': 0.0}),\n ((3, 2, 3, 4), {'momentum': -1.0, 'eps': 0.5}),\n ((3, 2, 3, 4), {'momentum': -1.0, 'eps': 0.5}),\n )\n\n for input_shape, kwargs in cases:\n # args: running mean, running var, weight and bias should necessarily be of shape: (channels,)\n channels = input_shape[1]\n weight = make_arg(channels)\n bias = make_arg(channels)\n running_mean = make_arg_without_requires_grad(channels, low=0)\n running_var = make_arg_without_requires_grad(channels, low=0)\n new_kwargs = {\n 'running_mean': running_mean,\n 'running_var': running_var,\n 'weight': weight,\n 'bias': bias,\n **kwargs\n }\n\n yield SampleInput(\n make_arg(input_shape),\n args=(),\n kwargs=new_kwargs\n )\n\n # Checking for permutations of weights and biases as `None`\n # instance_norm assumes that if there's a bias, there's a weight\n weights = [channels, None]\n biases = [None, None]\n\n for weight_channels, bias_channels in zip(weights, biases):\n running_mean = make_arg_without_requires_grad(channels, low=0)\n running_var = make_arg_without_requires_grad(channels, low=0)\n yield SampleInput(\n make_arg(input_shape),\n args=(),\n kwargs={\n 'running_mean': running_mean,\n 'running_var': running_var,\n 'weight': make_arg(weight_channels) if weight_channels is not None else None,\n 'bias': make_arg(bias_channels) if bias_channels is not None else None\n }\n )\n\n # Test case for no optional kwargs\n yield SampleInput(make_arg((1, 2, 3)), kwargs={})\n\n\ndef sample_inputs_layer_norm(opinfo, device, dtype, requires_grad, **kwargs):\n make_arg = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad)\n\n # Ordered as input shape, normalized_shape and a kwarg dict for eps\n cases: Tuple[Tuple[int], Tuple[int], dict] = ( # type: ignore[assignment]\n ((1, 2, 3), (1, 2, 3), {'eps': 0.5}),\n ((2, 2, 3), (2, 3), {'eps': -0.5}),\n ((1,), (1,), {}),\n ((1, 2), (2,), {}),\n ((0, 1), (1,), {}),\n )\n\n for input_shape, normalized_shape, kwargs in cases:\n # Shape of weight and bias should be the same as normalized_shape\n weight = make_arg(normalized_shape)\n bias = make_arg(normalized_shape)\n yield SampleInput(\n make_arg(input_shape),\n args=(normalized_shape, weight, bias),\n kwargs=kwargs\n )\n # Without any optional args\n yield SampleInput(make_arg((1, 2)), args=((2,),))\n\n # TODO: @krshrimali, once to_numpy method in SampleInput class is modified to take None inputs,\n # enable these inputs; see https://github.com/pytorch/pytorch/pull/63276#discussion_r691950400\n\n # With weight and a `None` bias\n # yield SampleInput(make_arg((1, 2)), args=((2,), make_arg((2,)), None))\n\n # With `None` weight and bias (tests failing for this, see the link above)\n # yield SampleInput(make_arg((1, 2)), args=((2,), None, make_arg((2,))))\n\ndef sample_inputs_local_response_norm(opinfo, device, dtype, requires_grad, **kwargs):\n make_arg = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad)\n\n # Ordered as input shape, size and a kwarg dict for alpha, beta, and k\n cases: Tuple[Tuple[int], Tuple[int], dict] = ( # type: ignore[assignment]\n ((1, 6, 3), 2, {'alpha': 3e-05, 'beta': 0.5, 'k': 1.25}),\n ((1, 6, 3), 2, {'beta': 0.5, 'k': 1.25}),\n ((1, 6, 3), 2, {'alpha': 3e-05, 'k': 1.25}),\n ((1, 6, 3), 2, {'alpha': 3e-05, 'beta': 0.5}),\n ((1, 6, 3), 2, {'alpha': 3e-05}),\n ((1, 6, 3), 2, {'beta': 0.5}),\n ((1, 6, 3), 2, {'k': 1.25}),\n ((1, 6, 3), 2, {}),\n ((2, 6, 3), 2, {'alpha': 3e-05, 'beta': 0.5, 'k': 1.25}),\n ((1, 1, 2), 1, {'alpha': 3e-05, 'beta': 0.5, 'k': 1.25}),\n ((0, 1, 2), 1, {'alpha': 3e-05, 'beta': 0.5, 'k': 1.25}),\n )\n\n for input_shape, size, kwargs in cases:\n yield SampleInput(make_arg(input_shape), args=(size,), kwargs=kwargs)\n\n\ndef sample_inputs_hardswish(self, device, dtype, requires_grad, **kwargs):\n N = 5\n # make sure we are testing -3 -> 3 range. default is -10 -> 10 so maybe unnecessary ?\n tensors = [SampleInput(make_tensor((N * 2, N * 2), device=device, dtype=dtype,\n requires_grad=requires_grad, low=-5, high=5)) for _ in range(1, N)]\n return tensors\n\ndef sample_inputs_linear(self, device, dtype, requires_grad, **kwargs):\n features_options = [[3, 4], [8, 8]]\n batch_options: List[List[int]] = [\n [], # no batch\n [0],\n [8],\n [2, 3],\n ]\n create_tensor = partial(make_tensor, device=device, dtype=dtype,\n requires_grad=requires_grad, low=-2, high=2)\n\n sample_inputs = []\n for has_bias, (in_feat, out_feat), batch_shape in \\\n itertools.product([True, False], features_options, batch_options):\n input_tensor = create_tensor(batch_shape + [in_feat])\n weight = create_tensor([out_feat, in_feat])\n if not has_bias:\n sample_inputs.append(SampleInput(input_tensor, args=(weight,)))\n continue\n\n bias = create_tensor([out_feat])\n sample_inputs.append(SampleInput(input_tensor, args=(weight, bias)))\n return sample_inputs\n\ndef sample_inputs_bilinear(self, device, dtype, requires_grad, **kwargs):\n features_options = [[3, 4, 5], [8, 8, 8]]\n batch_options: List[List[int]] = [\n [], # no batch\n [0],\n [8],\n [2, 3],\n ]\n create_tensor = partial(make_tensor, device=device, dtype=dtype,\n requires_grad=requires_grad, low=-2, high=2)\n\n sample_inputs = []\n for has_bias, (in_feat1, in_feat2, out_feat), batch_shape in \\\n itertools.product([True, False], features_options, batch_options):\n input_tensor1 = create_tensor(batch_shape + [in_feat1])\n input_tensor2 = create_tensor(batch_shape + [in_feat2])\n weight = create_tensor([out_feat, in_feat1, in_feat2])\n if not has_bias:\n sample_inputs.append(SampleInput(input_tensor1, args=(input_tensor2, weight,)))\n continue\n bias = create_tensor([out_feat])\n sample_inputs.append(SampleInput(input_tensor1, args=(input_tensor2, weight, bias)))\n\n return sample_inputs\n\ndef sample_inputs_glu(self, device, dtype, requires_grad, **kwargs):\n features_options = [[2], [2, 4], [8, 8], [3, 6, 8], [1, 4, 6, 7]]\n batch_options: List[List[int]] = [\n [], # no batch\n [0],\n [8],\n [2, 3],\n ]\n create_tensor = partial(make_tensor, device=device, dtype=dtype,\n requires_grad=requires_grad, low=-2, high=2)\n\n sample_inputs = []\n for features, batch_shape in itertools.product(features_options, batch_options):\n ndim = len(features) + len(batch_shape)\n for dim in range(ndim):\n input_tensor = create_tensor(batch_shape + features)\n dim_size = input_tensor.size(dim)\n if dim_size > 0 and dim_size % 2 == 0:\n sample_inputs.append(SampleInput(input_tensor, args=(dim,)))\n\n return sample_inputs\n\ndef sample_inputs_interpolate(mode, self, device, dtype, requires_grad, **kwargs):\n N, C = 2, 3\n D = 4\n S = 3\n L = 5\n\n align_corners_options: Tuple[Any, ...] = (None,)\n if mode in ('linear', 'bilinear', 'bicubic', 'trilinear'):\n align_corners_options = (True, False, None)\n ranks_for_mode = {\n 'nearest': [1, 2, 3],\n 'linear': [1],\n 'bilinear': [2],\n 'bicubic': [2],\n 'trilinear': [3],\n 'area': [1, 2, 3]\n }\n\n def shape(size, rank, with_batch_channel=True):\n if with_batch_channel:\n return tuple([N, C] + ([size] * rank))\n return tuple([size] * rank)\n\n make_arg = partial(make_tensor, device=device, dtype=dtype,\n requires_grad=requires_grad, low=-1, high=1)\n\n sample_inputs = []\n for align_corners in align_corners_options:\n for rank in ranks_for_mode[mode]:\n sample_inputs.extend([\n SampleInput(make_arg(shape(D, rank)),\n args=(shape(S, rank, False), None, mode, align_corners)),\n SampleInput(make_arg(shape(D, rank)),\n args=(shape(L, rank, False), None, mode, align_corners)),\n SampleInput(make_arg(shape(D, rank)),\n args=(None, 1.7, mode, align_corners)),\n SampleInput(make_arg(shape(D, rank)),\n args=(None, 0.6, mode, align_corners)),\n ])\n\n return sample_inputs\n\ndef sample_inputs_upsample(mode, self, device, dtype, requires_grad, **kwargs):\n N, C = 2, 3\n D = 4\n S = 3\n L = 5\n\n ranks_for_mode = {\n 'nearest': [1, 2, 3],\n 'bilinear': [2],\n }\n\n def shape(size, rank, with_batch_channel=True):\n if with_batch_channel:\n return tuple([N, C] + ([size] * rank))\n return tuple([size] * rank)\n\n make_arg = partial(make_tensor, device=device, dtype=dtype,\n requires_grad=requires_grad, low=-1, high=1)\n\n sample_inputs = []\n for rank in ranks_for_mode[mode]:\n sample_inputs.extend([\n SampleInput(make_arg(shape(D, rank)),\n kwargs=dict(size=shape(S, rank, False))),\n SampleInput(make_arg(shape(D, rank)),\n kwargs=dict(size=shape(L, rank, False))),\n SampleInput(make_arg(shape(D, rank)),\n kwargs=dict(scale_factor=1.7)),\n SampleInput(make_arg(shape(D, rank)),\n kwargs=dict(scale_factor=0.6)),\n ])\n\n return sample_inputs\n\ndef sample_inputs_gelu(self, device, dtype, requires_grad, **kwargs):\n N = 5\n tensors = []\n for _ in range(1, N):\n for approximate in ['none', 'tanh']:\n tensors.append(SampleInput(\n make_tensor((N * 2, N * 2), device=device, dtype=dtype,\n requires_grad=requires_grad, low=-3, high=3),\n kwargs=dict(approximate=approximate)))\n return tensors\n\ndef sample_inputs_max_min_reduction_with_dim(op_info, device, dtype, requires_grad, **kwargs):\n inputs = []\n args_for_reduction_with_dim = (\n ((S, S, S), (1,),),\n ((S, S, S), (1, True, ),),\n ((), (0,),),\n ((), (0, True,),),\n )\n inputs = list((SampleInput(make_tensor(input_tensor, device, dtype,\n low=None, high=None,\n requires_grad=requires_grad),\n args=args,))\n for input_tensor, args in args_for_reduction_with_dim)\n return inputs\n\ndef sample_inputs_max_min_reduction_no_dim(op_info, device, dtype, requires_grad, **kwargs):\n inputs = []\n inputs.append(SampleInput(make_tensor((S, S, S), device, dtype,\n low=None, high=None,\n requires_grad=requires_grad),))\n inputs.append(SampleInput(make_tensor((), device, dtype,\n low=None, high=None,\n requires_grad=requires_grad),))\n return inputs\n\ndef _generate_nan_reduction_inputs(device, dtype, requires_grad, **kwargs):\n yield from _generate_reduction_inputs(device, dtype, requires_grad)\n yield torch.tensor([2, torch.nan, -1], device=device, dtype=dtype, requires_grad=requires_grad)\n yield torch.tensor([[torch.nan, 2], [0, 1]], device=device, dtype=dtype, requires_grad=requires_grad)\n\ndef sample_inputs_nan_reduction(supports_multiple_dims):\n # Generates sample inputs for reduction ops that contain the input tensor\n # and dim and keepdim kwargs. If a reduction op needs to test additional\n # args/kwargs then create a separate sample_inputs function\n def fn(op_info, device, dtype, requires_grad):\n inputs = []\n\n for t in _generate_nan_reduction_inputs(device, dtype, requires_grad):\n # Add case without dim and keepdim kwargs\n inputs.append(SampleInput(t.clone().requires_grad_(requires_grad)))\n for kwargs in _generate_reduction_kwargs(t.ndim, supports_multiple_dims):\n inputs.append(SampleInput(t.clone().requires_grad_(requires_grad),\n kwargs=kwargs))\n\n return inputs\n\n return fn\n\ndef sample_inputs_reduction_quantile(op_info, device, dtype, requires_grad, **kwargs):\n test_quantiles = (0.5, make_tensor((2,), device, dtype, low=0, high=1, requires_grad=requires_grad))\n test_interpolations = ['linear', 'midpoint']\n\n inputs = []\n for quantiles in test_quantiles:\n for t in _generate_reduction_inputs(device, dtype, requires_grad):\n # Add case without dim and keepdim kwargs\n inputs.append(SampleInput(t.clone().requires_grad_(requires_grad),\n args=(quantiles,)))\n for kwargs in _generate_reduction_kwargs(t.ndim, supports_multiple_dims=False):\n # Interpolation kwarg for now is only supported when providing both dim and keepdim\n kwargs.setdefault('dim', 0)\n kwargs.setdefault('keepdim', False)\n for interpolation in test_interpolations:\n kwargs['interpolation'] = interpolation\n inputs.append(SampleInput(t.clone().requires_grad_(requires_grad),\n args=(quantiles,), kwargs=kwargs))\n\n return inputs\n\ndef sample_inputs_reduction_count_nonzero(*args, **kwargs):\n \"\"\"Sample inputs for count_nonzero\"\"\"\n samples: List[SampleInput] = sample_inputs_reduction(*args, **kwargs)\n # count_nonzero does not support keepdim yet\n for sample in samples:\n sample.kwargs.pop('keepdim', None)\n return samples\n\ndef sample_inputs_leaky_relu(op_info, device, dtype, requires_grad, **kwargs):\n N = 10\n tensors = [SampleInput(make_tensor((N, N), device=device, dtype=dtype,\n requires_grad=requires_grad)) for _ in range(1, N)]\n return tensors\n\ndef sample_inputs_fractional_max_pool2d(op_info, device, dtype, requires_grad, **kwargs):\n make_arg = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad)\n\n # Order: input_shape, kernel_size\n cases = (((1, 3, 9, 9), 3),\n ((1, 3, 9, 9), (4, 4)),\n ((1, 3, 9, 9), (6, 6)),\n ((2, 3, 9, 9), (3, 3)),\n ((1, 1, 4, 4), (2, 2)),\n ((1, 2, 6, 6), (4, 4)))\n\n samples = []\n\n for input_shape, kernel_size in cases:\n for return_indices in [False, True]:\n # test case passing a single output size\n samples.append(SampleInput(\n make_arg(input_shape),\n args=(kernel_size,),\n kwargs=dict(output_size=(2), return_indices=return_indices)\n ))\n\n # test case passing a tuple output size\n samples.append(SampleInput(\n make_arg(input_shape),\n args=(kernel_size,),\n kwargs=dict(output_size=(2, 3), return_indices=return_indices)\n ))\n\n # test case passing an output ratio\n samples.append(SampleInput(\n make_arg(input_shape),\n args=(kernel_size,),\n kwargs=dict(output_ratio=(0.5, 0.5), return_indices=return_indices)\n ))\n\n return samples\n\ndef sample_inputs_fractional_max_pool3d(op_info, device, dtype, requires_grad, **kwargs):\n make_arg = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad)\n\n # Order: input_shape, kernel_size\n cases = (((2, 3, 5, 5, 5), (2, 2, 2)),\n ((1, 2, 6, 5, 4), 2),\n ((1, 2, 5, 6, 5), (2, 3, 2)),\n ((1, 2, 6, 6, 6), (2, 3, 2)),\n ((1, 1, 7, 6, 7), (2, 3, 4)),\n ((1, 1, 4, 5, 4), (2, 2, 1)),\n ((1, 1, 8, 7, 6), (4, 3, 2)),\n ((0, 1, 4, 5, 4), (2, 2, 1)))\n\n samples = []\n\n for input_shape, kernel_size in cases:\n for return_indices in [False, True]:\n # test case passing a single output size\n samples.append(SampleInput(\n make_arg(input_shape),\n args=(kernel_size,),\n kwargs=dict(output_size=(2), return_indices=return_indices)\n ))\n\n # test case passing a tuple output size\n samples.append(SampleInput(\n make_arg(input_shape),\n args=(kernel_size,),\n kwargs=dict(output_size=(2, 3, 2), return_indices=return_indices)\n ))\n\n # test case passing an output ratio\n samples.append(SampleInput(\n make_arg(input_shape),\n args=(kernel_size,),\n kwargs=dict(output_ratio=(0.5, 0.5, 0.5), return_indices=return_indices)\n ))\n\n return samples\n\ndef sample_inputs_avgpool2d(op_info, device, dtype, requires_grad, **kwargs):\n make_arg = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad)\n\n # Order: input_shape, kernel_size, stride, padding, ceil_mode, count_include_pad, divisor_override\n cases = (((1, 3, 9, 9), 3, 1, 1, True, False, 2),\n ((1, 3, 9, 9), (4, 4), (2, 3), 1, True, False, 2),\n ((1, 3, 9, 9), (6, 6), (3, 3), (2, 3), True, True, 2),\n ((2, 3, 9, 9), (3, 3), (1, 1), (1, ), True, False, 2),\n ((1, 1, 4, 4), (2, 2), (), (0, ), False, True, -2),\n ((1, 2, 6, 6), (4, 4), (2, 2), (2, ), True, True, None))\n\n for input_shape, kernel_size, stride, padding, ceil_mode, count_include_pad, divisor_override in cases:\n yield SampleInput(make_arg(input_shape),\n args=(kernel_size, stride, padding, ceil_mode, count_include_pad, divisor_override))\n # Case with just input_shape and kernel_size\n yield SampleInput(make_arg((1, 3, 9, 9)), args=((3, 3)))\n\ndef sample_inputs_avgpool1d(op_info, device, dtype, requires_grad, **kwargs):\n make_arg = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad)\n\n # Order: input_shape, kernel_size, kwargs\n cases: List[Tuple[Tuple[int, ...], Union[int, Tuple[int, ...]], Dict]] = [\n ((2, 3, 9), (3,), dict()),\n ((1, 3, 9), 3, dict(stride=1, padding=1, ceil_mode=True, count_include_pad=False)),\n ((1, 3, 9), (6,), dict(stride=(3,), padding=(2,), ceil_mode=True, count_include_pad=True)),\n ((2, 3, 9), (3,), dict(stride=(1,), padding=(1,), ceil_mode=False, count_include_pad=True)),\n ((0, 3, 9), (6,), dict(stride=(3,), padding=(2,), ceil_mode=False, count_include_pad=True)),\n ((1, 2, 9), (7,), dict(stride=(3,), padding=(2,), ceil_mode=False)),\n ((1, 2, 9), (7,), dict(stride=(3,), padding=(3,), ceil_mode=True)),\n ((1, 2, 9), (7,), dict(stride=(3,), ceil_mode=False)),\n ((1, 2, 9), (7,), dict(stride=(3,), ceil_mode=True)),\n ]\n\n for input_shape, kernel_size, kwargs in cases:\n yield SampleInput(make_arg(input_shape), args=(kernel_size,), kwargs=kwargs)\n\ndef sample_inputs_avgpool3d(op_info, device, dtype, requires_grad, **kwargs):\n make_arg = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad)\n\n # Order: input_shape, kernel_size, stride, padding, ceil_mode, count_include_pad, divisor_override\n cases: List[Tuple[Tuple[int, ...], Union[int, Tuple[int, ...]], Dict]] = [\n ((2, 3, 3, 4, 4), (2, 2, 2), dict()),\n ((1, 2, 4, 4, 4), 2, dict(stride=1, padding=1, ceil_mode=True,\n count_include_pad=False, divisor_override=2)),\n ((1, 2, 5, 5, 5), (2, 3, 4), dict(stride=(1, 2, 2), padding=(0, 1, 2), ceil_mode=True,\n count_include_pad=True, divisor_override=2)),\n ((1, 2, 5, 5, 5), (2, 3, 4), dict(stride=(1, 2, 2), padding=(0, 1, 2), ceil_mode=False)),\n ((1, 1, 7, 5, 7), (6, 3, 4), dict(stride=(2, 3, 2), padding=(3, 1, 0), ceil_mode=False,\n count_include_pad=False, divisor_override=2)),\n ((1, 1, 4, 5, 4), (2, 2, 3), dict(stride=(2, 2, 1), padding=0, ceil_mode=False,\n count_include_pad=True, divisor_override=-2)),\n ((1, 1, 6, 5, 6), (4, 5, 6), dict(stride=(2, 3, 2), padding=2, ceil_mode=True,\n count_include_pad=True, divisor_override=None)),\n ((0, 1, 4, 5, 4), (2, 3, 1), dict(stride=(2, 1, 2), padding=0, ceil_mode=False,\n count_include_pad=True, divisor_override=None)),\n ]\n\n for input_shape, kernel_size, kwargs in cases:\n yield SampleInput(make_arg(input_shape), args=(kernel_size,), kwargs=kwargs)\n\ndef sample_inputs_topk(op_info, device, dtype, requires_grad, **kwargs):\n def get_tensor_input(size):\n return make_tensor(size, device, dtype, requires_grad=requires_grad)\n\n inputs = []\n inputs.append(SampleInput(get_tensor_input((S, M, S)), args=(3,)))\n inputs.append(SampleInput(get_tensor_input((S, M, S)), args=(3, 1)))\n inputs.append(SampleInput(get_tensor_input((S, M, S)), args=(3, -2)))\n inputs.append(SampleInput(get_tensor_input((S, M, S)), args=(3, 1, True)))\n inputs.append(SampleInput(get_tensor_input((S, M, S)), args=(3, -2, True)))\n inputs.append(SampleInput(get_tensor_input((S, M, S)), args=(3, 1, True, True)))\n inputs.append(SampleInput(get_tensor_input((S, M, S)), args=(3, -2, True, True)))\n\n inputs.append(SampleInput(get_tensor_input(()), args=(1,)))\n inputs.append(SampleInput(get_tensor_input(()), args=(1, 0)))\n inputs.append(SampleInput(get_tensor_input(()), args=(1, -1)))\n inputs.append(SampleInput(get_tensor_input(()), args=(1, 0, True)))\n inputs.append(SampleInput(get_tensor_input(()), args=(1, -1, True)))\n inputs.append(SampleInput(get_tensor_input(()), args=(1, 0, True, True)))\n inputs.append(SampleInput(get_tensor_input(()), args=(1, -1, True, True)))\n\n return inputs\n\ndef sample_inputs_outer(op_info, device, dtype, requires_grad, **kwargs):\n inputs = []\n arg_a = make_tensor((S,), device, dtype, requires_grad=requires_grad)\n arg_b = make_tensor((M,), device, dtype, requires_grad=requires_grad)\n inputs.append(SampleInput(arg_a, args=(arg_b,)))\n return inputs\n\n\ndef sample_inputs_igamma_igammac(op_info, device, dtype, requires_grad, **kwargs):\n make_arg = partial(make_tensor, device=device, dtype=dtype, low=1e-3)\n cases = (((S, S), (S, S), False),\n ((S, S), (S, ), False),\n ((S, ), (S, S), True),\n ((), (), False))\n\n for shape, other_shape, broadcasts_input in cases:\n yield SampleInput(make_arg(shape, requires_grad=requires_grad),\n args=(make_arg(other_shape, requires_grad=False),),\n broadcasts_input=broadcasts_input)\n\n\ndef sample_inputs_dist(op_info, device, dtype, requires_grad, **kwargs):\n make_arg = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad)\n sizes = ((S, S, S), (S,), (S, 1, S), (), (S, S))\n ps = (2, 4)\n\n for size_x, size_y, p in product(sizes, sizes, ps):\n yield SampleInput(make_arg(size_x), args=(make_arg(size_y), p))\n\n# Missing to test the nondeterminism of the operation\n# https://github.com/pytorch/pytorch/issues/53352\ndef sample_inputs_index(op_info, device, dtype, requires_grad, **kwargs):\n # target.index_select(dim, idx)\n select = op_info.name == \"index_select\"\n # target.index_add(dim, idx, source, *, alpha=1)\n add = op_info.name == \"index_add\"\n # target.index_copy(dim, idx, source)\n copy = op_info.name == \"index_copy\"\n # target.index_fill(dim, idx, value)\n fill = op_info.name == \"index_fill\"\n\n make_arg = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad)\n make_permutation = partial(torch.randperm, device=device, dtype=torch.int64)\n\n def make_idx(n):\n return make_tensor((n,), device=device, dtype=torch.int64, low=0, high=n)\n\n shapes = [(), (1,), (S, S)]\n # extra parameter for add\n alphas = (-1, 0, 2) if add else (None,)\n\n for shape, alpha in product(shapes, alphas):\n t = make_arg(shape)\n args = []\n\n # dim. We handle the scalar case\n dim = 1 if t.ndim == 2 else 0\n args.append(dim)\n\n # idx They need to be different for copy and add to be deterministic\n make_idx_fn = make_permutation if copy or add else make_idx\n idx = make_idx_fn(t.shape[dim] if t.ndim != 0 else 1)\n args.append(idx)\n\n # source\n if copy or add:\n args.append(make_arg(shape))\n elif fill:\n # A weird number to catch errors\n args.append(make_arg((1,)).item())\n\n args = tuple(args)\n kwargs = {} if alpha is None else {\"alpha\": alpha}\n\n yield SampleInput(t, args=args, kwargs=kwargs)\n\ndef sample_inputs_mode(op_info, device, dtype, requires_grad, **kwargs):\n inputs = []\n args = (\n ((S, S, S), (),),\n ((S, S, S), (1, ),),\n ((S, S, S), (1, True, ),),\n ((), (),),\n ((), (0,),),\n ((), (0, True,),),\n )\n inputs = list((SampleInput(make_tensor(input_tensor, device, dtype,\n low=None, high=None,\n requires_grad=requires_grad),\n args=args,))\n for input_tensor, args in args)\n return inputs\n\n# Missing to test the nondeterminism of the operation\n# https://github.com/pytorch/pytorch/issues/53352\ndef sample_inputs_put(op_info, device, dtype, requires_grad, **kwargs):\n make_arg = partial(make_tensor, dtype=dtype, device=device, requires_grad=requires_grad)\n make_idx = partial(make_tensor, low=0, dtype=torch.int64, device=device, requires_grad=False)\n\n S = 3\n\n # Generic inputs\n idx = torch.randperm(S * S, device=device, dtype=torch.int64)[:S]\n idx_list = [idx, -idx - 1]\n for idx, acc in product(idx_list, (True, False)):\n yield SampleInput(input=make_arg((S, S)),\n args=(idx.clone(),\n make_arg((S,)),\n acc))\n\n # Scalar cases\n scalar_sizes = [(), (1,)]\n tgt_gen = (make_arg(size) for size in scalar_sizes)\n idx_gen = (make_idx(size, high=1) for size in scalar_sizes)\n src_gen = (make_arg(size) for size in scalar_sizes)\n for tgt, idx, src, acc in product(tgt_gen, idx_gen, src_gen, (True, False)):\n yield SampleInput(input=tgt.clone().requires_grad_(requires_grad),\n args=(idx.clone(),\n src.clone().requires_grad_(requires_grad),\n acc))\n\n # Empty cases\n tgt_sizes = [(0,), (), (1,), (3, 2)]\n tgt_gen = (make_arg(size) for size in tgt_sizes)\n idx = make_idx((0,), high=1)\n src = make_arg((0,))\n for tgt, acc in product(tgt, (True, False)):\n yield SampleInput(input=tgt.clone().requires_grad_(requires_grad),\n args=(idx.clone(),\n src.clone().requires_grad_(requires_grad),\n acc))\n\ndef sample_inputs_take(op_info, device, dtype, requires_grad, **kwargs):\n make_arg = partial(make_tensor, dtype=dtype, device=device, requires_grad=requires_grad)\n make_idx = partial(make_tensor, low=0, dtype=torch.int64, device=device, requires_grad=False)\n\n S = 3\n\n # Generic inputs: take S elements out of S * S\n index = make_idx((S,), high=(S * S))\n for idx in (index, -index - 1):\n yield SampleInput(input=make_arg((S, S)), args=(idx,))\n\n # Scalar cases\n scalar_sizes = [(), (1,)]\n src_gen = (make_arg(size) for size in scalar_sizes)\n idx_gen = (make_idx(size, high=1) for size in scalar_sizes)\n for src, idx in product(src_gen, idx_gen):\n yield SampleInput(input=src.clone().requires_grad_(requires_grad),\n args=(idx.clone(),))\n\n # Empty cases\n src_sizes = [(0,), (), (1,), (3, 2)]\n src_gen = (make_arg(size) for size in src_sizes)\n\n idx = make_idx((0,), high=1)\n for src in src_gen:\n yield SampleInput(input=src.clone().requires_grad_(requires_grad),\n args=(idx.clone(),))\n\ndef sample_movedim_moveaxis(op_info, device, dtype, requires_grad, **kwargs):\n return (\n SampleInput(\n make_tensor((4, 3, 2, 1), device, dtype, low=None, high=None, requires_grad=requires_grad),\n args=([0, 1, 2, 3], [3, 2, 1, 0])),\n SampleInput(\n make_tensor((4, 3, 2, 1), device, dtype, low=None, high=None, requires_grad=requires_grad),\n args=([0, -1, -2, -3], [-3, -2, -1, -0]))\n )\n\n\ndef sample_repeat_tile(op_info, device, dtype, requires_grad, **kwargs):\n make_arg = partial(make_tensor, dtype=dtype, device=device, requires_grad=requires_grad)\n rep_dims = ((), (0, ), (1, ), (0, 2), (1, 1), (2, 3), (2, 3, 2), (0, 2, 3), (2, 1, 1, 1),)\n shapes = ((), (0,), (2,), (3, 0), (3, 2), (3, 0, 1))\n\n if requires_grad:\n # Tests for variant_consistency_jit, grad, gradgrad\n # are slower. Use smaller bags of `rep_dims` and `shapes`\n # in this case.\n rep_dims = ((), (0, ), (0, 2), (1, 1), (2, 3), (1, 3, 2), (3, 1, 1)) # type: ignore[assignment]\n shapes = ((), (0,), (2,), (3, 2)) # type: ignore[assignment]\n\n samples = []\n for rep_dim, shape in product(rep_dims, shapes):\n # `torch.repeat` errors for `len(rep_dims) < t.dim()`,\n # so we filter such combinations.\n if op_info.name == 'repeat' and len(rep_dim) < len(shape):\n continue\n samples.append(SampleInput(make_arg(shape), args=(rep_dim,),))\n\n return samples\n\n\ndef sample_inputs_narrow(op_info, device, dtype, requires_grad, **kwargs):\n shapes_and_args = (\n ((S, S, S), (1, 2, 2)),\n ((S, S, S), (-1, 2, 2)),\n ((S, S, S), (1, 0, 0)),\n ((S, S, S), (-1, 0, 0)),\n )\n\n for shape, args in shapes_and_args:\n tensor = make_tensor(shape, device, dtype, low=None, high=None,\n requires_grad=requires_grad)\n yield SampleInput(tensor, args=args)\n\ndef sample_trapezoid(op_info, device, dtype, requires_grad, **kwargs):\n y_shape_x_shape_and_kwargs = [\n ((2, 3), (2, 3), {}),\n ((2, 3), (2, 3), {'dim': 1}),\n ((6,), (6,), {}),\n ((6,), None, {}),\n # When 'trapezoid' is called with an empty input, it does not produce an output with requires_grad\n # See Issue #{61619}\n # ((6,0), (6,0), {}),\n ((2, 3), (1, 3), {}),\n ((3, 3), (3, 3), {}),\n ((3, 3), (3, 3), {'dim': -2}),\n ((5,), None, {'dx': 2.0}),\n ((2, 2), None, {'dx': 3.0})\n ]\n samples = []\n for y_shape, x_shape, kwarg in y_shape_x_shape_and_kwargs:\n y_tensor = make_tensor(y_shape, device, dtype, low=None, high=None,\n requires_grad=requires_grad)\n if x_shape is not None:\n x_tensor = make_tensor(x_shape, device, dtype, low=None, high=None,\n requires_grad=requires_grad)\n samples.append(SampleInput(y_tensor, args=(x_tensor,), kwargs=kwarg))\n else:\n samples.append(SampleInput(y_tensor, kwargs=kwarg))\n return samples\n\ndef sample_cumulative_trapezoid(op_info, device, dtype, requires_grad, **kwargs):\n\n y_shape_x_shape_and_kwargs = [\n ((2, 3), (2, 3), {}),\n ((2, 3), (2, 3), {'dim': 1}),\n ((6,), (6,), {}),\n ((6,), None, {}),\n # When 'cumulative_trapezoid' is called with an empty input, it does not produce an output with requires_grad\n # See Issue #{61619}\n # ((6,0), (6,0), {}),\n ((2, 3), (1, 3), {}),\n ((3, 3), (3, 3), {}),\n ((3, 3), (3, 3), {'dim': -2}),\n ((5,), None, {'dx': 2.0}),\n ((2, 2), None, {'dx': 3.0})\n ]\n samples = []\n for y_shape, x_shape, kwarg in y_shape_x_shape_and_kwargs:\n y_tensor = make_tensor(y_shape, device, dtype, low=None, high=None,\n requires_grad=requires_grad)\n if x_shape is not None:\n x_tensor = make_tensor(x_shape, device, dtype, low=None, high=None,\n requires_grad=requires_grad)\n samples.append(SampleInput(y_tensor, args=(x_tensor,), kwargs=kwarg))\n else:\n samples.append(SampleInput(y_tensor, kwargs=kwarg))\n return samples\n\ndef sample_unsqueeze(op_info, device, dtype, requires_grad, **kwargs):\n shapes_and_axes = [\n ((3, 4, 5), 0),\n ((3, 4, 5), 1),\n ((3, 4, 5), 3),\n ((3, 4, 5), -1),\n ((3, 4, 5), -3),\n ((), 0)\n ]\n\n samples = []\n for shape, axis in shapes_and_axes:\n tensor = make_tensor(shape, device, dtype, low=None, high=None,\n requires_grad=requires_grad)\n samples.append(SampleInput(tensor, args=(axis,),))\n\n return samples\n\n\ndef sample_inputs_nn_unfold(op_info, device, dtype, requires_grad, **kwargs):\n shapes = ((0, 1, 5, 5), (1, 1, 5, 5), (2, 3, 5, 5))\n kernel_sizes = (2, (2, 2), (3, 3))\n dilations = (1, 2, (1, 2))\n paddings = (0, 1, (1, 1))\n strides = (1, 2, (1, 2))\n\n cases = product(shapes, kernel_sizes, dilations, paddings, strides)\n for shape, kernel_size, dilation, padding, stride in cases:\n tensor = make_tensor(shape, device, dtype, requires_grad=requires_grad)\n yield SampleInput(tensor, args=(kernel_size, dilation, padding, stride))\n\n # With default args\n yield SampleInput(make_tensor((1, 1, 5, 5), device, dtype, requires_grad=requires_grad),\n args=((3, 3),))\n\n\ndef sample_inputs_squeeze(op_info, device, dtype, requires_grad, **kwargs):\n shapes_and_args = (\n ((S, 1, S, 1), ()),\n ((1, 1, 1, 1), ()),\n ((S, 1, S, 1), (1,)),\n ((S, 1, S, 1), (-1,)),\n ((S, 1, S, 1), (2,)),\n ((S, 1, S, 1), (-2,)),\n ((), (0, )),\n )\n\n for shape, args in shapes_and_args:\n tensor = make_tensor(shape, device, dtype, low=None, high=None,\n requires_grad=requires_grad)\n\n yield SampleInput(tensor, args=args)\n\n\ndef sample_inputs_nn_pad(op_info, device, dtype, requires_grad, mode, **kwargs):\n assert mode in ('constant', 'reflect', 'replicate', 'circular')\n if mode in ['reflect', 'replicate']:\n cases: tuple = ( # ignore\n ((1, 3), (1, 2)),\n ((1, 3), (0, 1)),\n ((0, 3, 3), (1, 2)),\n ((0, 3, 3), (0, 1)),\n ((1, 3, 3), (1, 2)),\n ((1, 3, 3), (0, 1)),\n ((1, 3, 3), (0, 2, 0, 1)),\n ((0, 3, 3, 3), (0, 2, 0, 1)),\n ((3, 3, 5, 5), (0, 2, 0, 1)),\n ((3, 3, 5, 5), (1, 1, 1, 1, 1, 1)),\n ((1, 3, 3, 3, 3), (1, 1, 1, 1, 1, 1)),\n ((1, 3, 4, 4), (-1, 1, -2, 1)),\n )\n elif mode == 'constant':\n cases = (\n ((1, 3), (1, 2)),\n ((1, 3), (0, 1)),\n ((1, 3), (0, 2, 0, 1)),\n ((0, 3, 3), (1, 2)),\n ((0, 3, 3), (0, 1)),\n ((0, 3, 3), (0, 2, 0, 1)),\n ((0, 3, 3), (1, 1, 1, 1, 1, 1)),\n ((1, 3, 3), (1, 2)),\n ((1, 3, 3), (0, 1)),\n ((1, 3, 3), (0, 2, 0, 1)),\n ((1, 3, 3), (1, 1, 1, 1, 1, 1)),\n ((0, 3, 3, 3), (1, 2)),\n ((0, 3, 3, 3), (0, 1)),\n ((0, 3, 3, 3), (0, 2, 0, 1)),\n ((0, 3, 3, 3), (1, 1, 1, 1, 1, 1)),\n ((3, 3, 5, 5), (1, 2)),\n ((3, 3, 5, 5), (0, 1)),\n ((3, 3, 5, 5), (0, 2, 0, 1)),\n ((3, 3, 5, 5), (1, 1, 1, 1, 1, 1)),\n ((1, 3, 3, 3, 3), (1, 2)),\n ((1, 3, 3, 3, 3), (0, 1)),\n ((1, 3, 3, 3, 3), (0, 2, 0, 1)),\n ((1, 3, 3, 3, 3), (1, 1, 1, 1, 1, 1)),\n ((1, 3, 4, 4), (-1, 1, -2, 1)),\n )\n else: # mode == 'circular'\n if dtype == torch.bool:\n # test_dtypes fails on ASAN with for the case ab\n # runtime error: load of value 190, which is not a valid value for type 'bool'\n # Reference: https://github.com/pytorch/pytorch/pull/62814#issuecomment-894156562\n # Reference Issue: https://github.com/pytorch/pytorch/issues/63034\n cases = (\n ((2, 3, 3), (1, 2)),\n ((1, 3, 3), (1, 2)),\n )\n else:\n cases = (\n ((0, 3, 3), (1, 2)),\n ((0, 3, 3), (0, 1)),\n ((1, 3, 3), (1, 2)),\n ((1, 3, 3), (0, 1)),\n ((0, 3, 3, 3), (0, 2, 0, 1)),\n ((3, 3, 5, 5), (0, 2, 0, 1)),\n ((1, 3, 3, 3, 3), (1, 1, 1, 1, 1, 1)),\n ((1, 3, 4, 4), (-1, 1, -2, 1)),\n )\n\n make_inp = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad)\n\n if mode == 'constant':\n # Default args\n yield SampleInput(make_inp((1, 3, 3)), args=((2, 2),))\n\n if mode in ['reflect', 'replicate', 'circular']:\n for shape, pad in cases:\n yield SampleInput(make_inp(shape), args=(pad, mode))\n else: # mode == 'constant'\n for pad_value in (1., 2.):\n for shape, pad in cases:\n yield SampleInput(make_inp(shape), args=(pad, mode, pad_value))\n\n\n# TODO: reconcile with torch.linalg.det and torch.linalg.slogdet\n# Creates matrices with a positive nonzero determinant\ndef sample_inputs_logdet(op_info, device, dtype, requires_grad, **kwargs):\n def make_nonzero_det(A, *, sign=1, min_singular_value=0.1, **kwargs):\n u, s, vh = torch.linalg.svd(A, full_matrices=False)\n s.clamp_(min=min_singular_value)\n A = (u * s.unsqueeze(-2)) @ vh\n det = A.det()\n if sign is not None:\n if A.dim() == 2:\n if (det < 0) ^ (sign < 0):\n A[0, :].neg_()\n else:\n cond = ((det < 0) ^ (sign < 0)).nonzero()\n if cond.size(0) > 0:\n for i in range(cond.size(0)):\n A[list(cond[i])][0, :].neg_()\n return A\n\n # cases constructed using make_tensor()\n tensor_shapes = (\n (S, S),\n (1, 1),\n (3, 3, S, S),\n (3, 3, 1, 1)\n )\n\n for shape in tensor_shapes:\n t = make_tensor(shape, device=device, dtype=dtype)\n d = make_nonzero_det(t).requires_grad_(requires_grad)\n yield SampleInput(d)\n\n # cases constructed using:\n # 1) make_symmetric_matrices\n # 2) make_symmetric_pd_matrices\n # 3) make_fullrank_matrices_with_distinct_singular_values\n symmetric_shapes = (\n (S, S),\n (3, S, S),\n )\n\n\n def _helper(constructor, *shape, **kwargs):\n t = constructor(*shape, device=device, dtype=dtype)\n d = make_nonzero_det(t, **kwargs).requires_grad_(requires_grad)\n yield SampleInput(d)\n\n for shape in symmetric_shapes:\n _helper(make_symmetric_matrices, *shape)\n _helper(make_symmetric_pd_matrices, *shape)\n _helper(make_fullrank_matrices_with_distinct_singular_values, *shape, min_singular_value=0)\n\n\ndef np_unary_ufunc_integer_promotion_wrapper(fn):\n # Wrapper that passes PyTorch's default scalar\n # type as an argument to the wrapped NumPy\n # unary ufunc when given an integer input.\n # This mimicks PyTorch's integer->floating point\n # type promotion.\n #\n # This is necessary when NumPy promotes\n # integer types to double, since PyTorch promotes\n # integer types to the default scalar type.\n\n # Helper to determine if promotion is needed\n def is_integral(dtype):\n return dtype in [np.bool_, bool, np.uint8, np.int8, np.int16, np.int32, np.int64]\n\n @wraps(fn)\n def wrapped_fn(x):\n # As the default dtype can change, acquire it when function is called.\n # NOTE: Promotion in PyTorch is from integer types to the default dtype\n np_dtype = torch_to_numpy_dtype_dict[torch.get_default_dtype()]\n\n if is_integral(x.dtype):\n return fn(x.astype(np_dtype))\n return fn(x)\n\n return wrapped_fn\n\ndef sample_inputs_spectral_ops(self, device, dtype, requires_grad=False, **kwargs):\n nd_tensor = partial(make_tensor, (S, S + 1, S + 2), device=device,\n dtype=dtype, requires_grad=requires_grad)\n oned_tensor = partial(make_tensor, (31,), device=device,\n dtype=dtype, requires_grad=requires_grad)\n\n if self.ndimensional == SpectralFuncType.ND:\n return [\n SampleInput(nd_tensor(),\n kwargs=dict(s=(3, 10), dim=(1, 2), norm='ortho')),\n SampleInput(nd_tensor(),\n kwargs=dict(norm='ortho')),\n SampleInput(nd_tensor(),\n kwargs=dict(s=(8,))),\n SampleInput(oned_tensor()),\n\n *(SampleInput(nd_tensor(),\n kwargs=dict(dim=dim))\n for dim in [-1, -2, -3, (0, -1)]),\n ]\n elif self.ndimensional == SpectralFuncType.TwoD:\n return [\n SampleInput(nd_tensor(),\n kwargs=dict(s=(3, 10), dim=(1, 2), norm='ortho')),\n SampleInput(nd_tensor(),\n kwargs=dict(norm='ortho')),\n SampleInput(nd_tensor(),\n kwargs=dict(s=(6, 8))),\n SampleInput(nd_tensor(),\n kwargs=dict(dim=0)),\n SampleInput(nd_tensor(),\n kwargs=dict(dim=(0, -1))),\n SampleInput(nd_tensor(),\n kwargs=dict(dim=(-3, -2, -1))),\n ]\n else:\n return [\n SampleInput(nd_tensor(),\n kwargs=dict(n=10, dim=1, norm='ortho')),\n SampleInput(nd_tensor(),\n kwargs=dict(norm='ortho')),\n SampleInput(nd_tensor(),\n kwargs=dict(n=7)),\n SampleInput(oned_tensor()),\n\n *(SampleInput(nd_tensor(),\n kwargs=dict(dim=dim))\n for dim in [-1, -2, -3]),\n ]\n\ndef sample_inputs_repeat_interleave(op_info, device, dtype, requires_grad, **kwargs):\n make_input = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad)\n\n return [\n SampleInput(make_input(()), kwargs=dict(repeats=2)),\n SampleInput(make_input((2, 3, 4)), kwargs=dict(repeats=2)),\n SampleInput(make_input((2, 3, 4)), kwargs=dict(repeats=2, dim=1)),\n SampleInput(make_input((2, 3, 4)), kwargs=dict(repeats=torch.arange(3, device=device), dim=1))\n ]\n\nSpectralFuncType = Enum('SpectralFuncType', ('OneD', 'TwoD', 'ND'))\n\n# Metadata class for Fast Fourier Transforms in torch.fft.\nclass SpectralFuncInfo(OpInfo):\n \"\"\"Operator information for torch.fft transforms. \"\"\"\n\n def __init__(self,\n name, # the string name of the function\n *,\n ref=None, # Reference implementation (probably in np.fft namespace)\n dtypes=floating_and_complex_types(),\n ndimensional: SpectralFuncType,\n sample_inputs_func=sample_inputs_spectral_ops,\n decorators=None,\n **kwargs):\n decorators = list(decorators) if decorators is not None else []\n decorators += [\n skipCPUIfNoFFT,\n skipCUDAIfRocm,\n ]\n\n super().__init__(name=name,\n dtypes=dtypes,\n decorators=decorators,\n sample_inputs_func=sample_inputs_func,\n **kwargs)\n self.ref = ref\n self.ndimensional = ndimensional\n\n\ndef sample_inputs_stft(op_info, device, dtype, requires_grad, **kwargs):\n def mt(shape, **kwargs):\n return make_tensor(shape, device=device, dtype=dtype,\n requires_grad=requires_grad, **kwargs)\n yield SampleInput(mt(100), kwargs=dict(n_fft=10))\n\n for center in [False, True]:\n yield SampleInput(mt(10), kwargs=dict(n_fft=7, center=center))\n yield SampleInput(mt((10, 100)), kwargs=dict(n_fft=16, hop_length=4, center=center))\n\n window = make_tensor(16, low=.5, high=2.0, dtype=dtype, device=device, requires_grad=requires_grad)\n yield SampleInput(\n mt((2, 100)), kwargs=dict(n_fft=16, window=window, return_complex=True, center=center))\n yield SampleInput(\n mt((3, 100)), kwargs=dict(n_fft=16, window=window, return_complex=True, center=center))\n if not dtype.is_complex:\n yield SampleInput(\n mt((10, 100)), kwargs=dict(n_fft=16, window=window, onesided=False))\n\n\ndef sample_inputs_istft(op_info, device, dtype, requires_grad, **kwargs):\n def mt(shape, **kwargs):\n real_shape = shape if dtype.is_complex else shape + (2,)\n return make_tensor(real_shape, device=device, dtype=dtype,\n requires_grad=requires_grad, **kwargs)\n\n yield SampleInput(mt((10, 2)), kwargs=dict(n_fft=10))\n yield SampleInput(mt((6, 3)), kwargs=dict(n_fft=6, onesided=False))\n yield SampleInput(mt((6, 4)), kwargs=dict(n_fft=10, onesided=True))\n\n for center in [False, True]:\n yield SampleInput(mt((10, 10, 6)), kwargs=dict(n_fft=10, center=center))\n yield SampleInput(mt((1, 9, 10)), kwargs=dict(n_fft=16, hop_length=4, center=center))\n\n window = make_tensor(10, low=.5, high=2.0, dtype=dtype, device=device, requires_grad=requires_grad)\n yield SampleInput(mt((10, 10, 6)), kwargs=dict(\n n_fft=10, window=window, center=center, return_complex=dtype.is_complex))\n yield SampleInput(mt((10, 10, 10)), kwargs=dict(\n n_fft=10, window=window[:8], win_length=8, center=center, return_complex=True))\n\n real_window = window if not dtype.is_complex else window.real\n yield SampleInput(mt((10, 5, 6)), kwargs=dict(n_fft=8, window=real_window[:8], center=center))\n\n\ndef sample_inputs_fftshift(op_info, device, dtype, requires_grad, **kwargs):\n def mt(shape, **kwargs):\n return make_tensor(shape, device=device, dtype=dtype,\n requires_grad=requires_grad, **kwargs)\n\n yield SampleInput(mt((9, 10)))\n yield SampleInput(mt((50,)), kwargs=dict(dim=0))\n yield SampleInput(mt((5, 11)), kwargs=dict(dim=(1,)))\n yield SampleInput(mt((5, 6)), kwargs=dict(dim=(0, 1)))\n yield SampleInput(mt((5, 6, 2)), kwargs=dict(dim=(0, 2)))\n\n\nclass ShapeFuncInfo(OpInfo):\n \"\"\"Early version of a specialized OpInfo for Shape manipulating operations like tile and roll\"\"\"\n def __init__(self,\n name, # the string name of the function\n *,\n ref, # a reference function\n dtypes=floating_types(),\n dtypesIfCUDA=None,\n dtypesIfROCM=None,\n sample_inputs_func=None,\n **kwargs):\n super(ShapeFuncInfo, self).__init__(name,\n dtypes=dtypes,\n dtypesIfCUDA=dtypesIfCUDA,\n dtypesIfROCM=dtypesIfROCM,\n sample_inputs_func=sample_inputs_func,\n **kwargs)\n self.ref = ref\n\ndef sample_inputs_foreach(self, device, dtype, N, *, noncontiguous=False, same_size=False):\n if same_size:\n return [make_tensor((N, N), device, dtype, noncontiguous=noncontiguous) for _ in range(N)]\n else:\n return [make_tensor((N - i, N - i), device, dtype, noncontiguous=noncontiguous) for i in range(N)]\n\n\ndef get_foreach_method_names(name):\n # get torch inplace reference function\n op_name = \"_foreach_\" + name\n inplace_op_name = \"_foreach_\" + name + \"_\"\n\n op = getattr(torch, op_name, None)\n inplace_op = getattr(torch, inplace_op_name, None)\n\n ref = getattr(torch, name, None)\n ref_inplace = getattr(torch.Tensor, name + \"_\", None)\n return op, inplace_op, ref, ref_inplace\n\nclass ForeachFuncInfo(OpInfo):\n \"\"\"Early version of a specialized OpInfo for foreach functions\"\"\"\n def __init__(self,\n name,\n dtypes=floating_and_complex_types(),\n dtypesIfCUDA=floating_and_complex_types_and(torch.half),\n dtypesIfROCM=None,\n safe_casts_outputs=True,\n supports_alpha_param=False,\n sample_inputs_func=sample_inputs_foreach,\n **kwargs):\n super().__init__(\n \"_foreach_\" + name,\n dtypes=dtypes,\n dtypesIfCUDA=dtypesIfCUDA,\n dtypesIfROCM=dtypesIfROCM,\n safe_casts_outputs=safe_casts_outputs,\n sample_inputs_func=sample_inputs_func,\n **kwargs\n )\n\n foreach_method, foreach_method_inplace, torch_ref_method, torch_ref_inplace = get_foreach_method_names(name)\n self.method_variant = foreach_method\n self.inplace_variant = foreach_method_inplace\n self.ref = torch_ref_method\n self.ref_inplace = torch_ref_inplace\n self.supports_alpha_param = supports_alpha_param\n\n if name == \"norm\":\n self.ref = torch.linalg.vector_norm\n\n\ndef sample_inputs_linalg_cholesky_inverse(op_info, device, dtype, requires_grad=False, **kwargs):\n # Generate Cholesky factors of positive-definite (non-singular) Hermitian (symmetric) matrices\n from torch.testing._internal.common_utils import random_hermitian_pd_matrix\n inputs = (\n torch.zeros(0, 0, dtype=dtype, device=device), # 0x0 matrix\n torch.zeros(0, 2, 2, dtype=dtype, device=device), # zero batch of matrices\n random_hermitian_pd_matrix(S, dtype=dtype, device=device), # single matrix\n random_hermitian_pd_matrix(S, 2, dtype=dtype, device=device), # batch of matrices\n )\n test_cases = (torch.linalg.cholesky(a) for a in inputs)\n out = []\n for a in test_cases:\n a.requires_grad = requires_grad\n out.append(SampleInput(a))\n out.append(SampleInput(a.clone().requires_grad_(requires_grad), kwargs=dict(upper=True)))\n return out\n\ndef sample_inputs_linalg_lstsq(op_info, device, dtype, requires_grad=False, **kwargs):\n from torch.testing._internal.common_utils import random_well_conditioned_matrix\n\n device = torch.device(device)\n\n drivers: Tuple[str, ...]\n if device.type == 'cuda':\n drivers = ('gels',)\n else:\n drivers = ('gels', 'gelsy', 'gelss', 'gelsd')\n\n # we generate matrices of shape (..., n + delta, n)\n deltas: Tuple[int, ...]\n if device.type == 'cpu' or has_cusolver():\n deltas = (-1, 0, +1)\n # only square systems if Cusolver is not available\n # becase we solve a lstsq problem with a transposed matrix in the backward\n else:\n deltas = (0,)\n\n out = []\n for batch, driver, delta in product(((), (3,), (3, 3)), drivers, deltas):\n shape = batch + (3 + delta, 3)\n a = random_well_conditioned_matrix(*shape, dtype=dtype, device=device)\n a.requires_grad_(requires_grad)\n b = make_tensor(shape, device, dtype, low=None, high=None, requires_grad=requires_grad)\n out.append(SampleInput(a, args=(b,), kwargs=dict(driver=driver)))\n return out\n\ndef sample_inputs_householder_product(op_info, device, dtype, requires_grad, **kwargs):\n \"\"\"\n This function generates input for torch.linalg.householder_product (torch.orgqr).\n The first argument should be a square matrix or batch of square matrices, the second argument is a vector or batch of vectors.\n Empty, square, rectangular, batched square and batched rectangular input is generated.\n \"\"\"\n # Each column of the matrix is getting multiplied many times leading to very large values for\n # the Jacobian matrix entries and making the finite-difference result of grad check less accurate.\n # That's why gradcheck with the default range [-9, 9] fails and [-2, 2] is used here.\n samples = (\n SampleInput(make_tensor((S, S), device, dtype, low=-2, high=2, requires_grad=requires_grad),\n args=(make_tensor((S,), device, dtype, low=-2, high=2, requires_grad=requires_grad),)),\n\n SampleInput(make_tensor((S + 1, S), device, dtype, low=-2, high=2, requires_grad=requires_grad),\n args=(make_tensor((S,), device, dtype, low=-2, high=2, requires_grad=requires_grad),)),\n\n SampleInput(make_tensor((2, 1, S, S), device, dtype, low=-2, high=2, requires_grad=requires_grad),\n args=(make_tensor((2, 1, S,), device, dtype, low=-2, high=2, requires_grad=requires_grad),)),\n\n SampleInput(make_tensor((2, 1, S + 1, S), device, dtype, low=-2, high=2, requires_grad=requires_grad),\n args=(make_tensor((2, 1, S,), device, dtype, low=-2, high=2, requires_grad=requires_grad),)),\n\n SampleInput(make_tensor((0, 0), device, dtype, low=None, high=None, requires_grad=requires_grad),\n args=(make_tensor((0,), device, dtype, low=None, high=None, requires_grad=requires_grad),)),\n\n SampleInput(make_tensor((S, S), device, dtype, low=-2, high=2, requires_grad=requires_grad),\n args=(make_tensor((0,), device, dtype, low=None, high=None, requires_grad=requires_grad),)),\n\n # m = n = S, k = S - 2\n SampleInput(make_tensor((S, S), device, dtype, low=-2, high=2, requires_grad=requires_grad),\n args=(make_tensor((S - 2,), device, dtype, low=None, high=None, requires_grad=requires_grad),)),\n\n # m = S, n = S -1, k = S - 2\n SampleInput(make_tensor((S, S - 1), device, dtype, low=-2, high=2, requires_grad=requires_grad),\n args=(make_tensor((S - 2,), device, dtype, low=None, high=None, requires_grad=requires_grad),)),\n )\n\n return samples\n\ndef sample_inputs_ormqr(op_info, device, dtype, requires_grad, **kwargs):\n # create a helper function wrapping `make_tensor`\n make_input = partial(make_tensor, dtype=dtype, device=device, requires_grad=requires_grad)\n\n def gen_inputs():\n batches = [(), (0, ), (2, ), (2, 1)]\n ns = [5, 2, 0]\n tf = [True, False]\n for batch, (m, n), left, transpose in product(batches, product(ns, ns), tf, tf):\n reflectors = make_input((*batch, m, n))\n tau = make_input((*batch, min(m, n)))\n other_matrix_shape = (m, n) if left else (n, m)\n other = make_input((*batch, *other_matrix_shape))\n kwargs = {\"left\": left, \"transpose\": transpose}\n yield SampleInput(reflectors, args=(tau, other,), kwargs=kwargs)\n\n return tuple(gen_inputs())\n\ndef sample_inputs_linalg_cholesky(op_info, device, dtype, requires_grad=False, **kwargs):\n \"\"\"\n This function generates always positive-definite input for torch.linalg.cholesky using\n random_hermitian_pd_matrix.\n The input is generated as the itertools.product of 'batches' and 'ns'.\n In total this function generates 8 SampleInputs\n 'batches' cases include:\n () - single input,\n (0,) - zero batched dimension,\n (2,) - batch of two matrices,\n (1, 1) - 1x1 batch of matrices\n 'ns' gives 0x0 and 5x5 matrices.\n Zeros in dimensions are edge cases in the implementation and important to test for in order to avoid unexpected crashes.\n \"\"\"\n from torch.testing._internal.common_utils import random_hermitian_pd_matrix\n\n batches = [(), (0, ), (2, ), (1, 1)]\n ns = [5, 0]\n out = []\n for batch, n, upper in product(batches, ns, [True, False]):\n a = random_hermitian_pd_matrix(n, *batch, dtype=dtype, device=device)\n a.requires_grad = requires_grad\n out.append(SampleInput(a, kwargs={\"upper\": upper}))\n return out\n\ndef sample_inputs_symeig(op_info, device, dtype, requires_grad=False, **kwargs):\n out = sample_inputs_linalg_invertible(op_info, device, dtype, requires_grad)\n\n for o in out:\n o.kwargs = {\"upper\": bool(np.random.choice([True, False])),\n \"eigenvectors\": True}\n # A gauge-invariant function\n o.output_process_fn_grad = lambda output: (output[0], abs(output[1]))\n yield o\n\ndef sample_inputs_linalg_eig(op_info, device, dtype, requires_grad=False, **kwargs):\n \"\"\"\n This function generates input for torch.linalg.eig\n \"\"\"\n def out_fn(output):\n return output[0], abs(output[1])\n\n samples = sample_inputs_linalg_invertible(op_info, device, dtype, requires_grad)\n for sample in samples:\n sample.output_process_fn_grad = out_fn\n yield sample\n\ndef sample_inputs_linalg_eigh(op_info, device, dtype, requires_grad=False, **kwargs):\n \"\"\"\n This function generates input for torch.linalg.eigh/eigvalsh with UPLO=\"U\" or \"L\" keyword argument.\n \"\"\"\n def out_fn(output):\n if isinstance(output, tuple):\n # eigh function\n return output[0], abs(output[1])\n else:\n # eigvalsh function\n return output\n\n # Samples do not need to be Hermitian, as we're using gradcheck_wrapper_hermitian_input\n samples = sample_inputs_linalg_invertible(op_info, device, dtype, requires_grad)\n for sample in samples:\n sample.kwargs = {\"UPLO\": np.random.choice([\"L\", \"U\"])}\n sample.output_process_fn_grad = out_fn\n yield sample\n\n\ndef sample_inputs_linalg_slogdet(op_info, device, dtype, requires_grad=False, **kwargs):\n def out_fn(output):\n return output[1]\n\n samples = sample_inputs_linalg_invertible(op_info, device, dtype, requires_grad)\n for sample in samples:\n sample.output_process_fn_grad = out_fn\n yield sample\n\n\ndef sample_inputs_linalg_pinv(op_info, device, dtype, requires_grad=False, **kwargs):\n \"\"\"\n This function generates input for torch.linalg.pinv with hermitian=False keyword argument.\n \"\"\"\n for o in sample_inputs_linalg_invertible(op_info, device, dtype, requires_grad, **kwargs):\n real_dtype = o.input.real.dtype if dtype.is_complex else dtype\n # requires_grad path for rtol tensor is not implemented\n for rtol in (None, 1.0, torch.tensor(1.0, dtype=real_dtype, device=device)):\n o = clone_sample(o)\n o.kwargs = {\"rtol\": rtol}\n yield o\n\n\ndef sample_inputs_linalg_pinv_hermitian(op_info, device, dtype, requires_grad=False, **kwargs):\n \"\"\"\n This function generates input for torch.linalg.pinv with hermitian=True keyword argument.\n \"\"\"\n for o in sample_inputs_linalg_invertible(op_info, device, dtype, requires_grad, **kwargs):\n o.kwargs = {\"hermitian\": True}\n yield o\n\ndef sample_inputs_linalg_solve(op_info, device, dtype, requires_grad=False, vector_rhs_allowed=True, **kwargs):\n \"\"\"\n This function generates always solvable input for torch.linalg.solve\n We sample a fullrank square matrix (i.e. invertible) A\n The first input to torch.linalg.solve is generated as the itertools.product of 'batches' and 'ns'.\n The second input is generated as the product of 'batches', 'ns' and 'nrhs'.\n In total this function generates 18 SampleInputs\n 'batches' cases include:\n () - single input,\n (0,) - zero batched dimension,\n (2,) - batch of two matrices.\n 'ns' gives 0x0 and 5x5 matrices.\n and 'nrhs' controls the number of vectors to solve for:\n () - using 1 as the number of vectors implicitly\n (1,) - same as () but explicit\n (3,) - solve for 3 vectors.\n Zeros in dimensions are edge cases in the implementation and important to test for in order to avoid unexpected crashes.\n 'vector_rhs_allowed' controls whether to include nrhs = () to the list of SampleInputs.\n torch.solve / triangular_solve / cholesky_solve (opposed to torch.linalg.solve) do not allow\n 1D tensors (vectors) as the right-hand-side.\n Once torch.solve / triangular_solve / cholesky_solve and its testing are removed,\n 'vector_rhs_allowed' may be removed here as well.\n \"\"\"\n make_fullrank = make_fullrank_matrices_with_distinct_singular_values\n make_a = partial(make_fullrank, dtype=dtype, device=device, requires_grad=requires_grad)\n make_b = partial(make_tensor, dtype=dtype, device=device, requires_grad=requires_grad)\n\n batches = [(), (0, ), (2, )]\n ns = [5, 0]\n if vector_rhs_allowed:\n nrhs = [(), (1,), (3,)]\n else:\n nrhs = [(1,), (3,)]\n\n for n, batch, rhs in product(ns, batches, nrhs):\n yield SampleInput(make_a(*batch, n, n), args=(make_b((batch + (n,) + rhs)),))\n\n\ndef sample_inputs_linalg_solve_triangular(op_info, device, dtype, requires_grad=False, **kwargs):\n make_arg = partial(make_tensor, dtype=dtype, device=device)\n bs = (1, 2, 0)\n ns = (3, 0)\n ks = (1, 3, 0)\n\n for b, n, k, (left, upper, uni) in product(bs, ns, ks, product((True, False), repeat=3)):\n with torch.no_grad():\n if b == 1:\n A = make_arg((n, n)) if left else make_arg((k, k))\n B = make_arg((n, k))\n else:\n A = make_arg((b, n, n)) if left else make_arg((b, k, k))\n B = make_arg((b, n, k))\n if uni:\n # Not really necessary, but writing it for consistency\n A.diagonal(0, -2, -1).fill_(1.)\n else:\n d = A.diagonal(0, -2, -1)\n d[d.abs() < 1e-6] = 1.\n if upper:\n A.triu_()\n else:\n A.tril_()\n kwargs = {\"upper\": upper, \"left\": left, \"unitriangular\": uni}\n if requires_grad:\n for grad_A, grad_B in product((True, False), repeat=2):\n # Either A or B needs to have a gradient\n if not grad_A and not grad_B:\n continue\n yield SampleInput(\n A.clone().requires_grad_(grad_A),\n args=(B.clone().requires_grad_(grad_B),),\n kwargs=kwargs)\n else:\n yield SampleInput(A, args=(B,), kwargs=kwargs)\n\ndef sample_inputs_legacy_solve(op_info, device, dtype, requires_grad=False, **kwargs):\n \"\"\"\n This function generates always solvable input for legacy solve functions\n (the ones that are not in torch.linalg module).\n The difference from sample_inputs_linalg_solve is that here the right-hand-side of A x = b equation\n should have b.ndim >= 2, vectors are not allowed.\n Also the arguments order is swapped.\n \"\"\"\n out = sample_inputs_linalg_solve(\n op_info, device, dtype, requires_grad=requires_grad, vector_rhs_allowed=False\n )\n\n # Reverses tensor order\n for sample in out:\n sample.input, sample.args = sample.args[0], (sample.input,)\n yield sample\n\n\ndef sample_inputs_cholesky_solve(op_info, device, dtype, requires_grad=False, **kwargs):\n out = sample_inputs_linalg_cholesky_inverse(\n op_info, device, dtype, requires_grad=False\n )\n\n for sample in out:\n psd_matrix = sample.input\n sample.input = make_tensor(psd_matrix.shape, device, dtype, requires_grad=requires_grad, low=None, high=None)\n sample.args = (psd_matrix.requires_grad_(requires_grad),)\n\n return out\n\n\ndef sample_inputs_lu(op_info, device, dtype, requires_grad=False, **kwargs):\n make_arg = partial(make_fullrank_matrices_with_distinct_singular_values,\n dtype=dtype, device=device, requires_grad=requires_grad)\n\n # not needed once OpInfo tests support Iterables\n batch_shapes = ((), (3,), (3, 3))\n for batch_shape, get_infos, size_delta in product(batch_shapes, (True, False), (-2, -1, 0, +1, +2)):\n shape = batch_shape + (S + size_delta, S)\n input = make_arg(*shape)\n yield SampleInput(input, args=(True, get_infos))\n\ndef sample_inputs_linalg_lu_factor(op_info, device, dtype, requires_grad=False, **kwargs):\n # When calling `lu_factor` we need to assure that the matrix is invertible\n make_fn = make_tensor if \"ex\" in op_info.name else make_fullrank_matrices_with_distinct_singular_values\n make_arg = partial(make_fn, dtype=dtype, device=device, requires_grad=requires_grad)\n\n # not needed once OpInfo tests support Iterables\n batch_shapes = ((), (3,), (3, 3))\n # pivot=False only supported in CUDA\n pivots = (True, False) if torch.device(device).type == \"cuda\" else (True,)\n deltas = (-2, -1, 0, +1, +2)\n for batch_shape, pivot, delta in product(batch_shapes, pivots, deltas):\n shape = batch_shape + (S + delta, S)\n # Insanely annoying that make_fullrank_blablabla accepts a *shape and not a tuple!\n A = make_arg(shape) if \"ex\" in op_info.name else make_arg(*shape)\n yield SampleInput(A, kwargs={\"pivot\": pivot})\n\n\ndef sample_inputs_lu_solve(op_info, device, dtype, requires_grad=False, **kwargs):\n make_fn = make_fullrank_matrices_with_distinct_singular_values\n make_a = partial(make_fn, dtype=dtype, device=device)\n make_b = partial(make_tensor, dtype=dtype, device=device)\n\n batches = ((), (0, ), (2, ))\n ns = (5, 3, 0)\n nrhs = (0, 1, 6)\n\n for n, batch, rhs in product(ns, batches, nrhs):\n shape_a = batch + (n, n)\n a = make_a(*shape_a)\n lu, pivs = a.lu()\n lu = lu.contiguous()\n\n shape_b = batch + (n, rhs)\n b = make_b(shape_b)\n\n grads = (False,) if not requires_grad else (True, False)\n # we try all possible combinations of requires_grad for each input\n for lu_grad, b_grad in product(grads, grads):\n # when requires_grad == True, at least one input has to have requires_grad enabled\n if requires_grad and not lu_grad and not b_grad:\n continue\n\n lu_ = lu.clone()\n lu_.requires_grad_(lu_grad)\n b_ = b.clone()\n b_.requires_grad_(b_grad)\n yield SampleInput(b_, args=(lu_, pivs))\n\ndef sample_inputs_lu_unpack(op_info, device, dtype, requires_grad=False, **kwargs):\n for lu_sample in sample_inputs_lu(op_info, device, dtype, requires_grad, **kwargs):\n lu_data, pivots = torch.linalg.lu_factor(lu_sample.input)\n lu_data.requires_grad_(requires_grad)\n yield SampleInput(lu_data, args=(pivots,))\n\n\ndef sample_inputs_roll(op_info, device, dtype, requires_grad=False, **kwargs):\n make_arg = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad)\n\n args = ((0, 0), (1, 2), (0, 2), (2, 0), (-1, 0), (10000, 1), (2,), ((1, 2, -1), (0, 1, 2)))\n\n for arg in args:\n yield SampleInput(make_arg((S, S, S)), args=arg)\n\n\ndef sample_inputs_rot90(op_info, device, dtype, requires_grad=False, **kwargs):\n make_arg = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad)\n\n args = ((1, (0, 1),),\n (1, (1, 2),),\n (1, (1, -1),),\n ())\n\n for arg in args:\n yield SampleInput(make_arg((S, S, S)), args=arg)\n\n\ndef sample_inputs_std_var(op_info, device, dtype, requires_grad, **kwargs):\n tensor_nd = partial(make_tensor, (S, S, S), device=device, dtype=dtype,\n requires_grad=requires_grad)\n tensor_1d = partial(make_tensor, (S,), device=device, dtype=dtype,\n requires_grad=requires_grad)\n\n return [\n SampleInput(tensor_nd()),\n SampleInput(tensor_nd(), kwargs=dict(dim=1)),\n SampleInput(tensor_nd(), kwargs=dict(dim=1, unbiased=True, keepdim=True)),\n SampleInput(tensor_1d(), kwargs=dict(dim=0, unbiased=True, keepdim=True)),\n SampleInput(tensor_1d(), kwargs=dict(dim=0, unbiased=False, keepdim=False)),\n\n SampleInput(tensor_nd(), kwargs=dict(dim=(1,), correction=S // 2)),\n SampleInput(tensor_nd(), kwargs=dict(dim=None, correction=0, keepdim=True)),\n ]\n\n\ndef _generate_correlation_inputs(device, dtype, requires_grad, **kwargs):\n shapes = [(2,), (1, 2), (3, 2), (2, 3)]\n for shape in shapes:\n yield make_tensor(shape, device, dtype, requires_grad=requires_grad)\n\n\ndef sample_inputs_corrcoef(op_info, device, dtype, requires_grad, **kwargs):\n return [SampleInput(t) for t in _generate_correlation_inputs(device, dtype, requires_grad)]\n\n\ndef sample_inputs_cov(op_info, device, dtype, requires_grad, **kwargs):\n inputs = []\n for t in _generate_correlation_inputs(device, dtype, requires_grad):\n inputs.append(SampleInput(t))\n num_observations = t.numel() if t.ndimension() < 2 else t.size(1)\n fweights = make_tensor((num_observations,), device, torch.int, low=1, high=10)\n aweights = make_tensor((num_observations,), device, torch.float, low=0, high=1, requires_grad=requires_grad)\n for correction, fw, aw in product(range(num_observations), [None, fweights], [None, aweights]):\n inputs.append(SampleInput(t.clone().requires_grad_(requires_grad),\n kwargs={'correction': correction, 'fweights': fw, 'aweights': aw}))\n return inputs\n\n\ndef sample_inputs_svd(op_info, device, dtype, requires_grad=False, **kwargs):\n make_fullrank = make_fullrank_matrices_with_distinct_singular_values\n make_arg = partial(make_fullrank, dtype=dtype, device=device, requires_grad=requires_grad)\n\n is_linalg_svd = (op_info.name == \"linalg.svd\")\n batches = [(), (0, ), (3, )]\n ns = [0, 3, 5]\n\n def uniformize(usv):\n S = usv[1]\n k = S.shape[-1]\n U = usv[0][..., :k]\n Vh = usv[2] if is_linalg_svd else usv[2].mH\n Vh = Vh[..., :k, :]\n return U, S, Vh\n\n def fn_U(usv):\n U, _, _ = uniformize(usv)\n return U.abs()\n\n\n def fn_S(usv):\n return uniformize(usv)[1]\n\n def fn_Vh(usv):\n # We also return S to test\n _, S, Vh = uniformize(usv)\n return S, Vh.abs()\n\n def fn_UVh(usv):\n U, S, Vh = uniformize(usv)\n return U @ Vh, S\n\n fns = (fn_U, fn_S, fn_Vh, fn_UVh)\n\n fullmat = 'full_matrices' if is_linalg_svd else 'some'\n\n for batch, n, k, fullmat_val, fn in product(batches, ns, ns, (True, False), fns):\n shape = batch + (n, k)\n yield SampleInput(make_arg(*shape), kwargs={fullmat: fullmat_val}, output_process_fn_grad=fn)\n\n\ndef sample_inputs_permute(op_info, device, dtype, requires_grad, **kwargs):\n make_arg = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad)\n\n cases = [((1, 2, 3, 4), (0, 2, 3, 1)),\n ((1, 2, 3, 4), (0, -2, -1, 1)),\n ((), ()),\n ((1, 2, 3, 4), (2, 1, 3, 0))]\n\n for shape, args in cases:\n yield SampleInput(make_arg(shape), args=(args,))\n\n\n# Based on erstwhile method_tests tests & some tensor_op_tests for pow\ndef sample_inputs_pow(op_info, device, dtype, requires_grad, **kwargs):\n make_arg = partial(make_tensor, device=device, dtype=dtype)\n\n samples = []\n\n if dtype in [torch.float16, torch.bfloat16, torch.float32, torch.float64]:\n test_cases = (\n ((2, 2), 0, 5, 1e-3, requires_grad, (2, 2), 0, 1, 0.1, requires_grad, False),\n ((2, 2), 0, 5, 1e-3, requires_grad, (1,), 0, 1, 0.1, requires_grad, False),\n ((), 1e-3, 1e-3 + 1, 0, requires_grad, (), 0.1, 1.1, 0, requires_grad, False),\n ((2, 2), 0, 5, 1e-3, requires_grad, (), 0.1, 1.1, 1, requires_grad, False),\n )\n tests_require_resizing = (\n ((1,), 0, 5, 1e-3, requires_grad, (2, 2), 0, 1, 0.1, requires_grad, requires_grad),\n ((2, 1, 2), 0, 5, 1e-3, requires_grad, (1, 2, 1), 0, 1, 0.1, requires_grad, requires_grad),\n ((), 1e-3, 1e-3 + 1, 0, requires_grad, (1, S, 1), 0, 1, 0.1, requires_grad, requires_grad),\n )\n cases = test_cases + tests_require_resizing\n\n samples = []\n for (shape_b, low_b, high_b, additive_b, b_grad, shape_e, low_e,\n high_e, additive_e, e_grad, broadcasts_input) in cases:\n si = SampleInput((make_arg(shape_b, low=low_b, high=high_b) + additive_b).requires_grad_(b_grad),\n args=((make_arg(shape_e, low=low_e, high=high_e) + additive_e).requires_grad_(e_grad),),\n broadcasts_input=broadcasts_input)\n samples.append(si)\n\n tensor_scalar_inputs = (\n ((2, 2), 0, 5, 1e-3, requires_grad, (3.14,)),\n ((), 1e-3, 1e-3 + 1, 0, requires_grad, (3.14,))\n )\n more_samples = list(SampleInput(\n (make_arg(shape, high=high, low=low) + additive).requires_grad_(b_grad),\n args=exp)\n for shape, low, high, additive, b_grad, exp in tensor_scalar_inputs)\n\n samples = [*samples, *more_samples]\n elif dtype in [torch.complex64, torch.complex128]:\n args_tuple = (\n ((2, 2), 0, 5, requires_grad, (3.14,)),\n ((), 0, 1, requires_grad, (3.14,)),\n ((), 0, 1, requires_grad, (3.14j,))\n )\n samples = list(SampleInput(\n (make_arg(shape, high=high, low=low) + 1e-3 * (1 + 1j)).requires_grad_(b_grad),\n args=arg)\n for shape, low, high, b_grad, arg in args_tuple)\n else: # integral dtype\n exp_tuple = (1, 2, 3)\n samples = list(SampleInput(\n make_arg((2, 2), requires_grad=requires_grad),\n args=(arg,))\n for arg in exp_tuple)\n samples.append(SampleInput(\n make_arg((2, 2), requires_grad=requires_grad),\n args=(make_arg((2, 2), requires_grad=requires_grad),)))\n return tuple(samples)\n\ndef sample_inputs_linalg_svdvals(op_info, device, dtype, requires_grad=False, **kwargs):\n make_arg = partial(make_tensor, dtype=dtype, device=device, requires_grad=requires_grad)\n\n batches = [(), (0, ), (2, ), (1, 1)]\n ns = [5, 2, 0]\n\n for batch, m, n in product(batches, ns, ns):\n yield SampleInput(make_arg(batch + (m, n)))\n\n\ndef sample_inputs_softshrink_hardshrink_hardtanh(op_info, device, dtype, requires_grad=False, **kwargs):\n N = 10\n tensors = [SampleInput(make_tensor((N, N), device=device, dtype=dtype,\n requires_grad=requires_grad)) for _ in range(1, N)]\n return tensors\n\ndef sample_inputs_eig(op_info, device, dtype, requires_grad=False, **kwargs):\n eigvecs = make_tensor((S, S), device=device, dtype=dtype,\n low=None, high=None)\n eigvals = make_tensor((S,), device=device, dtype=dtype,\n low=None, high=None)\n # we produce only diagonazible inputs which do not have\n # complex eigenvalues for real inputs, as there is no\n # backward implementation for real inputs with complex\n # eigenvalues yet.\n input = (eigvecs * eigvals.unsqueeze(-2)) @ eigvecs.inverse()\n input.requires_grad_(requires_grad)\n\n def process_output(eigpair):\n eigvals, eigvecs = eigpair\n if dtype.is_complex:\n # eig produces eigenvectors which are normalized to 1 norm.\n # Note that if v is an eigenvector, so is v * e^{i \\phi},\n # and |v| = |v * e^{i \\phi}| = 1.\n # This, however, makes the eigenvector backward computation process\n # rather unstable unless the objective function is gauge-invariant,\n # that is if f(z) == f(|z|), for example.\n # Hence for complex inputs we ignore the phases and return only\n # the absolute values.\n return eigvals, eigvecs.abs()\n else:\n return eigvals, eigvecs\n\n return [\n SampleInput(\n input,\n kwargs=dict(eigenvectors=True),\n output_process_fn_grad=process_output\n ),\n ]\n\n\ndef sample_inputs_einsum(op_info, device, dtype, requires_grad=False, **kwargs):\n def c(t):\n return t.clone().requires_grad_(requires_grad)\n\n x = make_tensor((3,), device, dtype, requires_grad=requires_grad)\n y = make_tensor((4,), device, dtype, requires_grad=requires_grad)\n A = make_tensor((2, 3,), device, dtype, requires_grad=requires_grad)\n B = make_tensor((1, 3,), device, dtype, requires_grad=requires_grad)\n C = make_tensor((1, 2, 3,), device, dtype, requires_grad=requires_grad)\n D = make_tensor((1, 3, 4,), device, dtype, requires_grad=requires_grad)\n E = make_tensor((4, 4,), device, dtype, requires_grad=requires_grad)\n H = make_tensor((3, 3,), device, dtype, requires_grad=requires_grad)\n I = make_tensor((1, 3, 1,), device, dtype, requires_grad=requires_grad)\n\n inputs = []\n\n # Vector operations\n inputs.append(SampleInput([c(x)], args=('i->',))) # sum\n inputs.append(SampleInput([c(x), c(y)], args=('i,j->ij',))) # outer\n\n # Matrix operations\n inputs.append(SampleInput([c(A)], args=(\"ij->i\",))) # col sum\n inputs.append(SampleInput([c(A), c(B)], args=(\"ij,kj->ik\",))) # matmul\n inputs.append(SampleInput([c(A), c(E)], args=(\"ij,Ab->ijAb\",))) # matrix outer product\n\n # Tensor operations\n inputs.append(SampleInput([c(C), c(D)], args=(\"aij,ajk->aik\",))) # batch matmul\n inputs.append(SampleInput([c(D), c(E)], args=(\"aij,jk->aik\",))) # tensor matrix contraction\n inputs.append(SampleInput([c(C), c(B)], args=(\"ijk,ik->j\",))) # non contiguous\n\n # Test diagonals\n inputs.append(SampleInput([c(I)], args=('iji->j',))) # non-contiguous trace\n\n # Test ellipsis\n inputs.append(SampleInput([c(H)], args=(\"i...->...\",)))\n inputs.append(SampleInput([c(C), c(x)], args=('...ik, ...j -> ij',)))\n\n return inputs\n\n\ndef sample_inputs_linalg_qr_geqrf(op_info, device, dtype, requires_grad=False, **kwargs):\n # QR is just well defined when the matrix is full rank\n make_fullrank = make_fullrank_matrices_with_distinct_singular_values\n make_arg = partial(make_fullrank, dtype=dtype, device=device, requires_grad=requires_grad)\n\n batches = [(), (0,), (2, ), (1, 1)]\n ns = [5, 2, 0]\n\n for batch, (m, n) in product(batches, product(ns, ns)):\n shape = batch + (m, n)\n yield SampleInput(make_arg(*shape))\n\ndef sample_inputs_flip(op_info, device, dtype, requires_grad, **kwargs):\n make_arg = partial(make_tensor, dtype=dtype, device=device, requires_grad=requires_grad)\n sizes = ((S, M, S), (S, 0, M))\n all_dims = ((0, 1, 2), (0,), (0, 2), (-1,), ())\n\n for size, dims in product(sizes, all_dims):\n yield SampleInput(make_arg(size), kwargs={\"dims\": dims})\n\ndef sample_inputs_fliplr_flipud(op_info, device, dtype, requires_grad, **kwargs):\n tensors = (\n make_tensor((S, M, S), device, dtype, low=None, high=None, requires_grad=requires_grad),\n make_tensor((S, 0, M), device, dtype, low=None, high=None, requires_grad=requires_grad)\n )\n return [SampleInput(tensor) for tensor in tensors]\n\ndef sample_inputs_fmod_remainder(op_info, device, dtype, requires_grad, *, autodiffed=False, **kwargs):\n make_arg = partial(make_tensor, dtype=dtype, device=device, requires_grad=requires_grad)\n\n if autodiffed:\n samples = (\n ((S, S, S), 1.5, False),\n ((), 1.5, False),\n )\n else:\n cases = (\n ((S, S, S), (), False),\n ((S, S, S), (S, S, S), False),\n ((S, S, S), (S,), False),\n )\n\n # Sample inputs with scalars as torch tensors\n # FIXME It does not work for mak make_arg((1,), exclude_zero=True)\n cases_with_tensor_scalar = (\n ((), make_arg((), exclude_zero=True), False),\n )\n\n # Sample inputs with broadcasting\n cases_with_broadcasting = (\n ((S,), (S, S, S), True),\n ((S, 1, S), (S, S, S), True),\n ((), (S, S, S), True),\n )\n\n samples = cases + cases_with_tensor_scalar + cases_with_broadcasting # type: ignore[assignment]\n\n for shape, arg_other, broadcasts_input in samples:\n if isinstance(arg_other, tuple):\n arg = make_arg(arg_other, exclude_zero=True)\n else:\n # shape_other is scalar or torch.tensor\n arg = arg_other\n yield(SampleInput(make_arg(shape), args=(arg,), broadcasts_input=broadcasts_input))\n\n# TODO: clamp shares tensors among its sample inputs --- we should prohibit this!\ndef sample_inputs_clamp(op_info, device, dtype, requires_grad, **kwargs):\n x = make_tensor((S, M, S), device, dtype, low=None, high=None, requires_grad=requires_grad)\n lb = make_tensor((S, M, S), device, dtype, low=None, high=None, requires_grad=requires_grad)\n ub = make_tensor((S, M, S), device, dtype, low=None, high=None, requires_grad=requires_grad)\n\n def detach(tensor):\n return tensor.clone().detach_().requires_grad_(requires_grad)\n\n return [\n SampleInput(detach(x), args=(lb, ub)),\n SampleInput(detach(x), args=(detach(lb[0]), detach(ub[0]))),\n SampleInput(detach(x), args=(detach(lb[:, :1]),)),\n ]\n\ndef sample_inputs_clamp_scalar(op_info, device, dtype, requires_grad, **kwargs):\n tensors = (\n make_tensor((2, 3, 2), device, dtype, low=None, high=None, requires_grad=requires_grad),\n make_tensor((2, 0, 3), device, dtype, low=None, high=None, requires_grad=requires_grad),\n )\n\n if dtype is torch.uint8:\n min_max_vals = ((2, 5), (3, 7))\n else:\n min_max_vals = ((0, 1), (-1, 1))\n\n output = [SampleInput(\n tensor.clone().requires_grad_(requires_grad),\n args=vals) for tensor, vals in product(tensors, min_max_vals)]\n output += [\n SampleInput(tensors[0].clone().requires_grad_(requires_grad),\n args=(0.5, None)),\n SampleInput(tensors[0].clone().requires_grad_(requires_grad),\n args=(None, 0.5))]\n empty_tensor = make_tensor((), device=device, dtype=dtype, low=None, high=None, requires_grad=requires_grad)\n output.append(SampleInput(empty_tensor, args=(0.0, 1.0)))\n return output\n\ndef sample_kwargs_clamp_scalar(device, dtype, input):\n if dtype is torch.uint8:\n min_val, max_val = (random.randint(1, 3), random.randint(4, 8))\n elif dtype.is_floating_point:\n min_val, max_val = (random.uniform(-8, 0), random.uniform(1, 8)) # type: ignore[assignment]\n else:\n min_val, max_val = (random.randint(-8, 0), random.randint(1, 8))\n return {'min': min_val, 'max': max_val}, {'a_min': min_val, 'a_max': max_val}\n\ndef sample_inputs_cross(op_info, device, dtype, requires_grad, **kwargs):\n sample0 = SampleInput(make_tensor((S, 3), device=device, dtype=dtype, requires_grad=requires_grad),\n args=(make_tensor((S, 3), device=device, dtype=dtype, requires_grad=requires_grad),))\n sample1 = SampleInput(make_tensor((S, 3, S), device=device, dtype=dtype, requires_grad=requires_grad),\n args=(make_tensor((S, 3, S), device=device, dtype=dtype, requires_grad=requires_grad),),\n kwargs={'dim': 1})\n sample2 = SampleInput(make_tensor((S, 3), device=device, dtype=dtype, requires_grad=requires_grad),\n args=(make_tensor((S, 3), device=device, dtype=dtype, requires_grad=requires_grad),),\n kwargs={'dim': -1})\n\n return (sample0, sample1, sample2)\n\ndef sample_inputs_cumprod(op_info, device, dtype, requires_grad, **kwargs):\n def make_arg(shape):\n # shrink values to be in the interval [-1, +1] for better precision in gradgradcheck\n return make_tensor(shape, device, dtype, low=-1, high=+1, requires_grad=requires_grad)\n\n def prod_zeros(dim_select):\n assert len(dim_select) == 2\n result = make_arg(3 * (S,))\n result.narrow(dim_select[0], 0, 1).narrow(dim_select[1], 1, 1).zero_()\n result.narrow(dim_select[0], 2, 1).narrow(dim_select[1], 3, 1).zero_()\n result.narrow(dim_select[0], 4, 1).narrow(dim_select[1], 3, 1).zero_()\n return result\n\n for dim in range(3):\n yield SampleInput(make_arg((S, S, S)), args=(dim,))\n # Scalar tensors and empty tensor\n for size in [(), (1,), (0,)]:\n yield SampleInput(make_arg(size), args=(0,))\n\n yield SampleInput(prod_zeros([0, 1]), args=(1,))\n yield SampleInput(prod_zeros([0, 2]), args=(1,))\n yield SampleInput(prod_zeros([1, 2]), args=(1,))\n\n # test dtype kwarg\n yield SampleInput(prod_zeros([1, 2]), args=(1,), kwargs={'dtype': dtype})\n\ndef sample_inputs_view_as_complex(op_info, device, dtype, requires_grad, **kwargs):\n return [SampleInput(make_tensor((S, 2), device, dtype, requires_grad=requires_grad),)]\n\ndef sample_inputs_view_as_real(op_info, device, dtype, requires_grad, **kwargs):\n tensors = (\n make_tensor((S, S), device, dtype, requires_grad=requires_grad),\n make_tensor((), device, dtype, requires_grad=requires_grad)\n )\n return [SampleInput(tensor) for tensor in tensors]\n\ndef sample_inputs_copysign(op_info, device, dtype, requires_grad, **kwargs):\n def _make_tensor(*shape, low=None, high=None):\n return make_tensor(shape, device, dtype, low=low, high=high, requires_grad=requires_grad)\n\n cases = [\n # no broadcast\n ((S, S, S), (S, S, S), False),\n # broadcast rhs\n ((S, S, S), (S, S), False),\n\n # scalar\n ((S, S), 3.14, False),\n # scalar positive zero\n ((S, S), 0.0, False),\n # scalar negative zero\n ((S, S), -0.0, False),\n ]\n\n # broadcast lhs\n cases.append(((S, S), (S, S, S), True))\n # broadcast all\n cases.append(((S, 1, S), (M, S), True))\n\n for input_shape, arg_val, broadcasts_input in cases:\n if isinstance(arg_val, tuple):\n arg = _make_tensor(*arg_val)\n else:\n # arg_val is scalar\n arg = arg_val\n\n yield SampleInput(_make_tensor(*input_shape), args=(arg, ), broadcasts_input=broadcasts_input)\n\ndef sample_inputs_prod(op_info, device, dtype, requires_grad, **kwargs):\n def make_arg(shape):\n # shrink values to be in the interval [-1, +1] for better precision in gradgradcheck\n return make_tensor(shape, device, dtype, low=-1, high=+1, requires_grad=requires_grad)\n\n def prod_single_zero():\n result = make_arg(2 * (S,))\n result[0, 1] = 0\n return result\n\n for sample in sample_inputs_cumprod(op_info, device, dtype, requires_grad):\n # only Tensor, ignore other inputs\n yield SampleInput(sample.input.clone().requires_grad_(requires_grad))\n yield sample\n\n # Generates samples with keepdim = True\n for sample in sample_inputs_cumprod(op_info, device, dtype, requires_grad):\n sample.kwargs['keepdim'] = True\n yield sample\n\n yield SampleInput(prod_single_zero())\n yield SampleInput(make_arg((3, 3, 3)), args=(1,))\n yield SampleInput(make_arg((3, 3, 3)), args=(1,), kwargs={'keepdim': True})\n\n # test zero scalar tensor\n zero = make_arg(())\n zero.zero_()\n yield SampleInput(zero.clone().requires_grad_(requires_grad))\n yield SampleInput(zero.clone().requires_grad_(requires_grad), args=(0,))\n yield SampleInput(zero.clone().requires_grad_(requires_grad),\n args=(0,),\n kwargs={'keepdim': True})\n\ndef error_inputs_neg(op_info, device, **kwargs):\n si = SampleInput(torch.tensor((False, True), device=device))\n msg = (\"Negation, the `\\\\-` operator, on a bool tensor is not supported.\"\n \" If you are trying to invert a mask, use the `\\\\~` or\"\n \" `logical_not\\\\(\\\\)` operator instead.\")\n return (ErrorInput(si, error_type=RuntimeError, error_regex=msg),)\n\ndef sample_inputs_nextafter(op_info, device, dtype, requires_grad, **kwargs):\n make_arg = partial(make_tensor, dtype=dtype, device=device, requires_grad=requires_grad)\n\n cases = (\n ((S, S), (S, S), False),\n ((S, S), (S,), False),\n ((S, ), (S, S), True)\n )\n\n for shape, other_shape, broadcasts_input in cases:\n yield SampleInput(make_arg(shape), args=(make_arg(other_shape),), broadcasts_input=broadcasts_input)\n\n\ndef sample_inputs_diag(op_info, device, dtype, requires_grad, **kwargs):\n vec_sample = SampleInput(make_tensor((M, ), device, dtype, low=None, high=None, requires_grad=requires_grad))\n\n tensors = (\n make_tensor((M, M), device, dtype, low=None, high=None, requires_grad=requires_grad),\n make_tensor((3, 5), device, dtype, low=None, high=None, requires_grad=requires_grad),\n make_tensor((5, 3), device, dtype, low=None, high=None, requires_grad=requires_grad),\n )\n\n args = ((), (2,), (-2,), (1,), (2,))\n\n samples = []\n for tensor, arg in product(tensors, args):\n samples.append(SampleInput(tensor.clone().requires_grad_(requires_grad), args=arg))\n\n return samples + [vec_sample]\n\ndef sample_inputs_diagonal_diag_embed(op_info, device, dtype, requires_grad, **kwargs):\n make_arg = partial(make_tensor, dtype=dtype, device=device, requires_grad=requires_grad)\n\n # Shapes for 2D Tensors\n shapes_2d = ((M, M), (3, 5), (5, 3))\n\n # Shapes for 3D Tensors\n shapes_3d = ((M, M, M),)\n\n kwargs_2d = (dict(), dict(offset=2), dict(offset=2), dict(offset=1))\n kwargs_3d = (dict(offset=1, dim1=1, dim2=2),\n dict(offset=2, dim1=0, dim2=1),\n dict(offset=-2, dim1=0, dim2=1))\n\n for shape, kwarg in chain(product(shapes_2d, kwargs_2d), product(shapes_3d, kwargs_3d)):\n yield SampleInput(make_arg(shape), kwargs=kwarg)\n\n\ndef sample_inputs_diagonal_scatter(op_info, device, dtype, requires_grad, **kwargs):\n make_arg = partial(make_tensor, dtype=dtype, device=device, requires_grad=requires_grad)\n\n # Shapes for 2D Tensors\n shapes_2d = ((M, M), (3, 5), (5, 3))\n\n # Shapes for 3D Tensors\n shapes_3d = ((M, M, M),)\n\n args_2d = ((), (2,), (-2,), (1,))\n args_3d = ((1, 1, 2), (2, 0, 1), (-2, 0, 1))\n\n for input_shape, arg in chain(product(shapes_2d, args_2d), product(shapes_3d, args_3d)):\n input_ = make_arg(input_shape)\n # We can programatically figure out the right shape for src:\n # It should be the same size as input.diagonal(other_args...)\n if not isinstance(arg, tuple):\n arg_tuple = (arg,)\n else:\n arg_tuple = arg\n src_shape = input_.diagonal(*arg_tuple).size()\n src = make_arg(src_shape)\n yield SampleInput(input_, args=(src, *arg_tuple))\n\n\ndef sample_inputs_to_sparse(op_info, device, dtype, requires_grad, **kwargs):\n make_arg = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad)\n\n return (SampleInput(make_arg((S, S)), args=(), output_process_fn_grad=lambda x: x.to_dense()),\n SampleInput(make_arg((S, S)), args=(1,), output_process_fn_grad=lambda x: x.to_dense()),)\n\ndef sample_inputs_cross_entropy(op_info, device, dtype, requires_grad, **kwargs):\n batch_size, num_classes = shape = (2, 3)\n reductions = (\"mean\", \"sum\", \"none\")\n\n input_shape_and_kwargs: List[Tuple[Tuple[int, ...], Dict[str, Any]]] = [\n (shape, dict()),\n ((*shape, 1), dict()),\n ((*shape, 1, 2), dict()),\n ((*shape, 1, 2, 3), dict()),\n *[(shape, dict(reduction=reduction)) for reduction in reductions],\n *[\n (\n shape,\n dict(\n weight=make_tensor((num_classes,), device=device, dtype=dtype),\n reduction=reduction,\n ),\n )\n for reduction in reductions\n ],\n (shape, dict(ignore_index=1)),\n ]\n\n sample_inputs = []\n for (input_shape, kwargs), probabilities_target in itertools.product(input_shape_and_kwargs, (False, True)):\n input = make_tensor(input_shape, device=device, dtype=dtype, requires_grad=requires_grad)\n\n if probabilities_target:\n # ignore_index is not supported for probabilities target\n if \"ignore_index\" in kwargs:\n continue\n\n target = make_tensor(\n input_shape,\n low=0,\n high=1,\n device=device,\n dtype=dtype,\n requires_grad=requires_grad,\n )\n else:\n target = make_tensor(\n (batch_size, *input_shape[2:]),\n low=0,\n high=num_classes,\n device=device,\n dtype=torch.long,\n )\n\n if \"ignore_index\" in kwargs and torch.all(target == kwargs[\"ignore_index\"]):\n # make sure at least one item in target is not ignored\n target[0] = random.sample(set(range(num_classes)) - {kwargs[\"ignore_index\"]}, 1)[0]\n\n sample_inputs.append(SampleInput(input, args=(target,), kwargs=kwargs))\n\n return sample_inputs\n\n# Used for log_softmax, softmax, softmin\ndef sample_inputs_softmax_variant(op_info, device, dtype, requires_grad, with_dtype=False, **kwargs):\n make_arg = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad)\n\n cases = [\n ((S, ), (0, )),\n ((S, S), (0, )),\n ((S, S), (1, )),\n ((S, S), (-1, )),\n ((S, M, S), (2, )),\n ]\n\n # PyTorch on XLA throws an error when passed with dim argument for 0d tensor.\n # See https://github.com/pytorch/xla/issues/3061 for more details.\n if torch.device(device).type != 'xla':\n cases.append(((), (0, )))\n\n return [\n SampleInput(make_arg(shape), args=dim, kwargs=dict(dtype=torch.float64) if with_dtype else None)\n for shape, dim in cases\n ]\n\n\ndef sample_inputs_masked_softmax(op_info, device, dtype, requires_grad, with_dtype=False, **kwargs):\n \"\"\"Sample inputs for masked softmax, log_softmax, and softmin.\n\n Masked normalization operator is a reduction operator with\n trailing mask optional argument. A mask is a bool tensor with the\n same shape as input or a shape that is broadcastable to input\n shape.\n \"\"\"\n inputs: List[SampleInput] = []\n for sample_input in sample_inputs_softmax_variant(op_info, device, dtype, requires_grad, with_dtype=with_dtype, **kwargs):\n for mask in _generate_masked_op_mask(sample_input.input.shape, device, **kwargs):\n sample_input_args, sample_input_kwargs = sample_input.args, dict(mask=mask, **sample_input.kwargs)\n inputs.append(SampleInput(sample_input.input.clone().requires_grad_(requires_grad),\n args=sample_input_args, kwargs=sample_input_kwargs))\n return inputs\n\n\ndef sample_inputs_masked_normalize(op_info, device, dtype, requires_grad, **kwargs):\n \"\"\"Sample inputs for masked normalize.\n \"\"\"\n inputs: List[SampleInput] = []\n for ord in [2.0, 1, float('inf'), float('-inf'), 0]:\n for sample_input in sample_inputs_softmax_variant(op_info, device, dtype, requires_grad, **kwargs):\n sample_input_args, sample_input_kwargs = (ord,) + sample_input.args, sample_input.kwargs.copy()\n inputs.append(SampleInput(sample_input.input.clone().requires_grad_(requires_grad),\n args=sample_input_args, kwargs=sample_input_kwargs))\n return inputs\n\ndef sample_inputs_logit(op_info, device, dtype, requires_grad, **kwargs):\n low, high = op_info.domain\n\n # Note: Operator is very sensitive at points near the\n # start and end of domain and leads to NaN for float16\n # if domain_eps is 1e-5.\n domain_eps = op_info._domain_eps if dtype != torch.float16 else 3e-2\n\n low = low + domain_eps\n high = high - domain_eps\n\n samples = (\n SampleInput(make_tensor((S, S, S), device, dtype, low=low, high=high, requires_grad=requires_grad)),\n SampleInput(make_tensor((S, S, S), device, dtype, low=low,\n high=high, requires_grad=requires_grad), args=(0.2,)),\n SampleInput(make_tensor((), device, dtype, low=low, high=high, requires_grad=requires_grad)),\n SampleInput(make_tensor((), device, dtype, low=low,\n high=high, requires_grad=requires_grad), args=(0.2,)),\n )\n\n return samples\n\ndef sample_inputs_isin(op_info, device, dtype, requires_grad, **kwargs):\n make_arg = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad)\n # isin has two paths based on the size of elements and test_elements.\n # if elements.numel() < 10 * pow(test_elements.numel(), 0.145):\n yield SampleInput(make_arg((L,)), args=(make_arg((S,)),))\n # else:\n yield SampleInput(make_arg((S,)), args=(make_arg((L,)),))\n\ndef sample_inputs_masked_scatter(op_info, device, dtype, requires_grad, **kwargs):\n make_arg = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad)\n\n yield SampleInput(make_arg((S, S)), args=(torch.randn(S, S, device=device) > 0, make_arg((S, S))))\n yield SampleInput(make_arg((S, S)), args=(torch.randn((S,), device=device) > 0, make_arg((S, S))))\n yield SampleInput(make_arg((S, S)), args=(bernoulli_scalar().to(device), make_arg((S, S))))\n yield SampleInput(make_arg((S,)),\n args=(torch.randn(S, S, device=device) > 0, make_arg((S, S))),\n broadcasts_input=True)\n\n\ndef sample_inputs_masked_fill(op_info, device, dtype, requires_grad, **kwargs):\n make_arg = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad)\n\n yield SampleInput(make_arg((S, S)), args=(torch.randn(S, S, device=device) > 0, 10))\n yield SampleInput(make_arg((S, S)), args=(torch.randn(S, S, device=device) > 0, make_arg(())))\n yield SampleInput(make_arg((S, S)), args=(torch.randn(S, device=device) > 0, 10))\n yield SampleInput(make_arg(()), args=(torch.randn((), device=device) > 0, 10))\n yield SampleInput(make_arg(()), args=(torch.randn((), device=device) > 0, make_arg(())))\n yield SampleInput(make_arg((S, S)), args=(torch.randn((), device=device) > 0, 10))\n\n yield SampleInput(make_arg((S,)),\n args=(torch.randn(S, S, device=device) > 0, make_arg(())),\n broadcasts_input=True)\n yield SampleInput(make_arg((S,)),\n args=(torch.randn(S, S, device=device) > 0, 10),\n broadcasts_input=True)\n\n\ndef sample_inputs_masked_select(op_info, device, dtype, requires_grad, **kwargs):\n samples = (\n SampleInput(make_tensor((M, M), device, dtype, low=None, high=None, requires_grad=requires_grad),\n args=(torch.randn(M, M, device=device) > 0,)),\n\n SampleInput(make_tensor((M, M), device, dtype, low=None, high=None, requires_grad=requires_grad),\n args=(torch.randn((M,), device=device) > 0,)),\n\n SampleInput(make_tensor((M,), device, dtype, low=None, high=None, requires_grad=requires_grad),\n args=(torch.randn((M, M), device=device) > 0,)),\n\n SampleInput(make_tensor((M, 1, M), device, dtype, low=None, high=None, requires_grad=requires_grad),\n args=(torch.randn((M, M), device=device) > 0,)),\n\n SampleInput(make_tensor((), device, dtype, low=None, high=None, requires_grad=requires_grad),\n args=(torch.tensor(1, device=device, dtype=torch.bool),)),\n\n SampleInput(make_tensor((M, M), device, dtype, low=None, high=None, requires_grad=requires_grad),\n args=(torch.tensor(1, device=device, dtype=torch.bool),)),\n\n SampleInput(make_tensor((), device, dtype, low=None, high=None, requires_grad=requires_grad),\n args=(torch.randn((M, M), device=device) > 0,)),\n )\n\n return samples\n\ndef sample_inputs_matrix_exp(op_info, device, dtype, requires_grad, **kwargs):\n samples = (\n SampleInput(make_tensor((S, S), device, dtype, requires_grad=requires_grad)),\n SampleInput(make_tensor((S, S, S), device, dtype, requires_grad=requires_grad)),\n )\n\n return samples\n\ndef sample_inputs_matmul(op_info, device, dtype, requires_grad, **kwargs):\n test_cases = (((L,), (L,)),\n ((S, M), (M,)),\n ((M,), (M, S)),\n ((S, M), (M, S)),\n ((S, 0), (0, M)),\n ((S, S, M), (M,)),\n ((S, S, M), (M, S)),\n ((S, S, 0), (0, S)),\n ((M,), (S, M, S)),\n ((S, M), (S, M, S)),\n ((0, 0), (S, 0, 0)),\n ((S, S, M, M), (S, S, M, S)),\n ((S, S, M, M), (M,)),\n ((M,), (S, S, M, S)))\n sample_inputs = []\n for lhs_shape, rhs_shape in test_cases:\n lhs = make_tensor(lhs_shape, device, dtype, low=None, high=None, requires_grad=requires_grad)\n rhs = make_tensor(rhs_shape, device, dtype, low=None, high=None, requires_grad=requires_grad)\n if op_info.name == 'matmul':\n sample_inputs.append(SampleInput(lhs, args=(rhs,)))\n elif op_info.name == '__rmatmul__':\n sample_inputs.append(SampleInput(rhs, args=(lhs,)))\n else:\n raise RuntimeError(\"`op_info.name` must be 'matmul' or '__rmatmul__'\")\n return tuple(sample_inputs)\n\n\ndef sample_inputs_meshgrid(op_info: OpInfo, device: torch.device, dtype: torch.dtype,\n requires_grad: bool,\n *, variant: str) -> List[SampleInput]:\n if variant == 'variadic':\n def make_inputs(\n tensors: List[torch.Tensor]) -> Tuple[Union[torch.Tensor,\n List[torch.Tensor]],\n Tuple[torch.Tensor, ...]]:\n return tensors[0], tuple(tensors[1:])\n elif variant == 'list':\n def make_inputs(\n tensors: List[torch.Tensor]) -> Tuple[Union[torch.Tensor,\n List[torch.Tensor]],\n Tuple[torch.Tensor, ...]]:\n return tensors, ()\n else:\n raise ValueError(\n 'Unsupported variant, must be one of {\"variadic\", \"list\"}. '\n f'Got \"{variant}\".')\n\n SCALAR = torch.Size([])\n VECTOR = torch.Size([3])\n test_cases: List[List[torch.Size]] = [\n [SCALAR],\n [VECTOR],\n [VECTOR, SCALAR],\n [VECTOR, SCALAR, VECTOR],\n [VECTOR, SCALAR, VECTOR, SCALAR],\n ]\n\n sample_inputs = []\n for shapes, indexing in itertools.product(test_cases, {'xy', 'ij'}):\n input, args = make_inputs(\n [make_tensor(shape, device, dtype, requires_grad=requires_grad)\n for shape in shapes])\n sample_inputs.append(SampleInput(input=input, args=args,\n kwargs=dict(indexing=indexing)))\n return sample_inputs\n\n\ndef sample_inputs_polar(op_info, device, dtype, requires_grad, **kwargs):\n def _make_tensor_helper(shape, low=None, high=None):\n return make_tensor(shape, device, dtype, low=low, high=high, requires_grad=requires_grad)\n\n samples = (\n SampleInput(_make_tensor_helper((S, S), low=0), args=(_make_tensor_helper((S, S)),)),\n SampleInput(_make_tensor_helper((), low=0), args=(_make_tensor_helper(()),)),\n )\n\n return samples\n\ndef sample_inputs_complex(op_info, device, dtype, requires_grad, **kwargs):\n def _make_tensor_helper(shape):\n return make_tensor(shape, device, dtype, requires_grad=requires_grad)\n\n samples = (\n SampleInput(_make_tensor_helper((S, S)), args=(_make_tensor_helper((S, S)),)),\n SampleInput(_make_tensor_helper(()), args=(_make_tensor_helper(()),)),\n )\n\n return samples\n\n\ndef sample_inputs_polygamma(op_info, device, dtype, requires_grad, **kwargs):\n make_arg = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad)\n tensor_shapes = ((S, S), ())\n ns = (1, 2, 3, 4, 5)\n\n for shape, n in product(tensor_shapes, ns):\n yield SampleInput(make_arg(shape), args=(n,))\n\n\ndef sample_inputs_mvlgamma(op_info, device, dtype, requires_grad, **kwargs):\n make_arg = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad)\n tensor_shapes = ((S, S), ())\n ns = (1, 2, 3, 4, 5)\n\n # Since the accepted lower bound for input\n # to mvlgamma depends on `p` argument,\n # the following function computes the lower bound\n # which we pass to `make_tensor`.\n def compute_min_val(p):\n return (p - 1.) / 2\n\n for shape, n in product(tensor_shapes, ns):\n min_val = compute_min_val(n)\n if not dtype.is_floating_point:\n # Round-up minimum value for integral dtypes\n min_val += 1\n yield SampleInput(make_arg(shape, low=min_val), args=(n,))\n\n\n# Since `mvlgamma` has multiple entries,\n# there are multiple common skips for the additional\n# entries. Following function is a helper to that end.\ndef skips_mvlgamma(skip_redundant=False):\n skips = (\n # outside domain values are hard error for mvlgamma op.\n DecorateInfo(unittest.skip(\"Skipped!\"), 'TestUnaryUfuncs', 'test_float_domains'),\n )\n if skip_redundant:\n # Redundant tests\n skips = skips + ( # type: ignore[assignment]\n DecorateInfo(unittest.skip(\"Skipped!\"), 'TestGradients'),\n DecorateInfo(unittest.skip(\"Skipped!\"), 'TestJit'),\n DecorateInfo(unittest.skip(\"Skipped!\"), 'TestCommon'),\n )\n return skips\n\n\n# To test reference numerics against multiple values of argument `p`,\n# we make multiple OpInfo entries with each entry corresponding to different value of p.\n# We run the op tests from test_ops.py only for `p=1` to avoid redundancy in testing.\n# Class `MvlGammaInfo` already contains the basic information related to the operator,\n# it only takes arguments like `domain`, `skips` and `sample_kwargs`, which\n# differ between the entries.\nclass MvlGammaInfo(UnaryUfuncInfo):\n def __init__(self, variant_test_name, domain, skips, sample_kwargs):\n super(MvlGammaInfo, self).__init__(\n 'mvlgamma',\n ref=reference_mvlgamma if TEST_SCIPY else _NOTHING,\n aliases=('special.multigammaln',),\n variant_test_name=variant_test_name,\n domain=domain,\n decorators=(precisionOverride({torch.float16: 5e-2}),),\n dtypes=all_types_and(torch.bfloat16),\n dtypesIfCUDA=all_types_and(torch.half),\n sample_inputs_func=sample_inputs_mvlgamma,\n safe_casts_outputs=True,\n supports_forward_ad=True,\n supports_fwgrad_bwgrad=True,\n skips=skips,\n sample_kwargs=sample_kwargs)\n\n\ndef sample_inputs_entr(op_info, device, dtype, requires_grad, **kwargs):\n low, _ = op_info.domain\n\n if requires_grad:\n low = 0 + op_info._domain_eps\n\n return (SampleInput(make_tensor((L,), device, dtype,\n low=low,\n requires_grad=requires_grad)),\n SampleInput(make_tensor((), device, dtype,\n low=low,\n requires_grad=requires_grad)))\n\n\ndef sample_inputs_zeta(op_info, device, dtype, requires_grad, **kwargs):\n make_arg = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad)\n samples = (SampleInput(make_arg((S,), low=1, requires_grad=requires_grad),\n args=(make_arg((S,), low=2, requires_grad=False),)),\n SampleInput(make_arg((S,), low=1, requires_grad=requires_grad),\n args=(3.,)),\n )\n\n return samples\n\n\n# TODO: Consolidate `i0e` with sample_inputs_unary when `make_tensor`,\n# supports `exclude` argument.\n# For more context: https://github.com/pytorch/pytorch/pull/56352#discussion_r633277617\ndef sample_inputs_i0_i1(op_info, device, dtype, requires_grad, **kwargs):\n\n samples = (SampleInput(make_tensor((S,), device, dtype,\n requires_grad=requires_grad)),\n SampleInput(make_tensor((), device, dtype,\n requires_grad=requires_grad)))\n\n if requires_grad and op_info.op == torch.special.i0e:\n # NOTE: `i0e`'s first-order gradient is not continous\n # at `0`, hence we don't test `i0e` with any input being `0`.\n # TODO: Remove this when `make_tensor` supports excluding `0`.\n for sample in samples:\n t = sample.input\n t[t == 0] = torch.finfo(dtype).eps # type: ignore[index]\n elif requires_grad and op_info.op != torch.special.i0e:\n # Special Case for gradient\n # Sample with `0` in the input\n t = make_tensor((S,), device, dtype,\n requires_grad=requires_grad)\n t[0] = 0\n\n samples += (SampleInput(t),) # type: ignore[assignment]\n\n return samples\n\n\ndef sample_inputs_rsub(op_info, device, dtype, requires_grad, other_scalar, **kwargs):\n make_arg = partial(make_tensor, device=device)\n\n shapes = ((S, S), (S,), ()) if not other_scalar else ((),)\n # We are doing y - a*x, where y may be a scalar or a tensor\n # If y is a scalar, y may be of any dtype that can be cast to the dtype of x\n # a may always be of any dtype that can be cast to the dtype of x\n if dtype.is_complex:\n dtypes_a = (torch.int32, torch.float32, dtype)\n elif dtype.is_floating_point:\n dtypes_a = (torch.int32, dtype)\n else:\n dtypes_a = (dtype, )\n dtypes_y = dtypes_a if other_scalar else (dtype,)\n\n for shape_x, shape_y, dtype_y, dtype_a in product(shapes, shapes, dtypes_y, dtypes_a):\n requires_grad_y = (requires_grad and\n not other_scalar and\n (dtype_y.is_floating_point or dtype_y.is_complex))\n\n x = make_arg(shape_x, dtype=dtype, requires_grad=requires_grad)\n y = make_arg(shape_y, dtype=dtype_y, requires_grad=requires_grad_y)\n if other_scalar:\n y = y.item()\n a = make_arg((), dtype=dtype_a).item()\n yield SampleInput(x, args=(y,), kwargs={\"alpha\": a})\n\ndef sample_inputs_cumulative_ops(op_info, device, dtype, requires_grad, supports_dtype_kwargs=True, **kwargs):\n def _make_tensor_helper(shape, low=None, high=None):\n return make_tensor(shape, device, dtype, low=low, high=high, requires_grad=requires_grad)\n\n samples = [\n SampleInput(_make_tensor_helper((S, S, S)), args=(0,)),\n SampleInput(_make_tensor_helper((S, S, S)), args=(1,)),\n SampleInput(_make_tensor_helper(()), args=(0,)),\n ]\n\n if supports_dtype_kwargs:\n # NOTE: if `dtype` is not same as input, then inplace variants fail with\n # `provided dtype must match the dtype of self tensor in cumsum`\n samples.append(SampleInput(_make_tensor_helper((S, S, S)), args=(1,), kwargs={'dtype': dtype}))\n\n return samples\n\n\ndef sample_inputs_unfold(op_info, device, dtype, requires_grad, **kwargs):\n test_cases = (\n ((), (0, 1, 1)),\n ((S, S, S, S), (0, 3, 1)),\n ((S, S, S, S), (1, 3, 1)),\n ((S, S, S, S), (2, 3, 1)),\n ((S, S, S, S), (3, 3, 1)),\n ((S, S, S, S), (0, 3, 2)),\n ((S, S, S, S), (1, 3, 2)),\n ((S, S, S, S), (2, 3, 2)),\n ((S, S, S, S), (3, 3, 2)),\n ((S, S, S, S), (0, 4, 1)),\n ((S, S, S, S), (1, 4, 1)),\n ((S, S, S, S), (2, 4, 1)),\n ((S, S, S, S), (3, 4, 1)),\n ((M,), (0, 3, 1)),\n ((M,), (0, 3, 2)),\n ((M,), (0, 3, 3)),\n ((1000,), (0, 3, 11)),\n ((1000,), (0, 2, 27)),\n ((10, 10), (0, 1, 2)),\n ((10, 10), (1, 2, 3)),\n ((10, 10), (1, 2, 2)),\n ((S, S, S), (2, 3, 2)),\n )\n\n sample_inputs = []\n for shape, arguments in test_cases:\n sample_inputs += [SampleInput(make_tensor(shape, device, dtype,\n low=None, high=None,\n requires_grad=requires_grad),\n args=arguments)]\n return sample_inputs\n\n\ndef sample_inputs_atan2(op_info, device, dtype, requires_grad, **kwargs):\n make_arg = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad)\n cases = (\n ((S, S, S), (S, S, S), False),\n ((), (), False),\n ((S, S, S), (S,), False),\n ((S,), (S, S, S), True),\n ((S, 1, S), (S, S), True),\n )\n\n for x_shape, y_shape, broadcasts_input in cases:\n yield SampleInput(make_arg(x_shape), args=(make_arg(y_shape),),\n broadcasts_input=broadcasts_input)\n\n\ndef sample_inputs_split(op_info, device, dtype, requires_grad, *, list_args=False, **kwargs):\n make_arg = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad)\n\n if list_args:\n cases = (\n ((S, S, S), ([int(S / 3), S - int(S / 3) * 2, int(S / 3)],)),\n ((S, S, S), ([int(S / 2), S - int(S / 2) * 2, int(S / 2)], 2),),\n ((S, S, S), ([int(S / 2), S - int(S / 2) * 2, int(S / 2)], -2),)\n )\n else:\n cases = ( # type: ignore[assignment]\n ((S, S, S), (2,)),\n ((S, S, S), (S, 1)),\n )\n\n for shape, args in cases:\n yield SampleInput(make_arg(shape), args=args)\n\n\ndef sample_inputs_split_with_sizes(op_info, device, dtype, requires_grad, **kwargs):\n make_arg = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad)\n\n cases = (((S, S, S), ([int(S / 3), S - int(S / 3) * 2, int(S / 3)],)),\n ((S, S, S), ([int(S / 3), S - int(S / 3), 0],)),\n ((S, S, S), ([int(S / 3), S - int(S / 3) * 2, int(S / 3)], 2)),\n ((S, S, S), ([int(S / 3), S - int(S / 3) * 2, int(S / 3)], -2)),\n )\n\n for shape, args in cases:\n yield SampleInput(make_arg(shape), args=args)\n\n\ndef sample_inputs_msort(op_info, device, dtype, requires_grad, **kwargs):\n def apply_grad(t):\n if dtype in floating_types_and(torch.float16, torch.bfloat16):\n t.requires_grad_(requires_grad)\n\n def large_1d_unique(dtype, device):\n res = torch.randperm(L * L * L, dtype=torch.int64, device=device)\n res = res.to(dtype)\n apply_grad(res)\n return res\n\n samples = []\n # Test case for large tensor.\n largesample = SampleInput(large_1d_unique(dtype, device))\n\n sample = SampleInput(make_tensor((S, M, S), device, dtype,\n low=None, high=None,\n requires_grad=requires_grad))\n\n return [largesample, sample]\n\ndef sample_inputs_lerp(op_info, device, dtype, requires_grad, **kwargs):\n make_arg = partial(make_tensor, dtype=dtype, device=device, requires_grad=requires_grad)\n\n samples = (\n # no broadcast\n SampleInput(make_arg((S, S)), args=(make_arg((S, S)), 0.4)),\n # broadcast rhs\n SampleInput(make_arg((S, S)), args=(make_arg((S,)), 0.4)),\n # scalar tensor\n SampleInput(make_arg(()), args=(make_arg(()), 0.4)),\n # broadcast rhs scalar-tensor\n SampleInput(make_arg((S, S)), args=(make_arg(()), 0.4)),\n # broadcast rhs with weight tensor\n SampleInput(make_arg((S, S)), args=(make_arg((S,)), make_arg((S, S)))),\n # broadcast rhs and weight tensor\n SampleInput(make_arg((S, S)), args=(make_arg((S, 1)), make_arg((S,)))),\n # broadcast lhs\n SampleInput(make_arg((S,)), args=(make_arg((S, S)), 0.4), broadcasts_input=True),\n # scalar broadcast_lhs\n SampleInput(make_arg(()), args=(make_arg((S, S)), 0.4), broadcasts_input=True),\n # broadcast all\n SampleInput(make_arg((S, 1)), args=(make_arg((S, S)), 0.4), broadcasts_input=True),\n # tensor broadcast all\n SampleInput(make_arg((S, 1)), args=(make_arg((S, S)), make_arg((S, 1))),\n broadcasts_input=True),\n # no broadcast with weight tensor\n SampleInput(make_arg((S, S)), args=(make_arg((S, S)), make_arg((S, S)))),\n # broadcast lhs with weight tensor\n SampleInput(make_arg((S,)), args=(make_arg((S, S)), make_arg((S, S))), broadcasts_input=True),\n # broadcast lhs and weight tensor\n SampleInput(make_arg((S,)), args=(make_arg((S, S, S)), make_arg((S, S))), broadcasts_input=True),\n # broadcast lhs and weight tensor variant\n SampleInput(make_arg((S, S)), args=(make_arg((S, S, S)), make_arg((S,))), broadcasts_input=True),\n )\n\n if dtype.is_complex:\n samples = samples + ( # type: ignore[assignment]\n # no broadcast\n SampleInput(make_arg((S, S)), args=(make_arg((S, S)), 0.4j)),\n SampleInput(make_arg((S, S)), args=(make_arg((S, S)), 1.2 + 0.1j)),\n # broadcast rhs\n SampleInput(make_arg((S, S)), args=(make_arg((S,)), 0.4j)),\n SampleInput(make_arg((S, S)), args=(make_arg((S, S)), 5.4 + 9j)),\n # scalar tensor\n SampleInput(make_arg(()), args=(make_arg(()), 0.4j)),\n SampleInput(make_arg(()), args=(make_arg(()), 6.1 + 0.004j)),\n # broadcast rhs scalar-tensor\n SampleInput(make_arg((S, S)), args=(make_arg(()), 0.4j)),\n SampleInput(make_arg((S, S)), args=(make_arg(()), 1 + 2j)),\n )\n\n return samples\n\ndef sample_inputs_tensordot(self, device, dtype, requires_grad, **kwargs):\n cases = (\n ((2, 2, 2), (2, 2, 2), (2)),\n ((2, 2, 1), (2, 1, 2), ([0, 1], [2, 0])),\n )\n samples = []\n for first_shape, second_shape, dims in cases:\n samples.append(SampleInput(make_tensor(first_shape, device, dtype,\n requires_grad=requires_grad),\n args=(make_tensor(second_shape, device, dtype,\n requires_grad=requires_grad),),\n kwargs=dict(dims=dims,)))\n return tuple(samples)\n\ndef sample_inputs_kron(op_info, device, dtype, requires_grad, **kwargs):\n test_cases = (\n ((S, S), (M, L)),\n )\n\n sample_inputs = []\n for input_shape, other_shape in test_cases:\n input = make_tensor(input_shape, device, dtype, low=None, high=None, requires_grad=requires_grad)\n other = make_tensor(other_shape, device, dtype, low=None, high=None, requires_grad=requires_grad)\n sample = SampleInput(input, args=(other,))\n sample_inputs.append(sample)\n return tuple(sample_inputs)\n\ndef sample_inputs_inner(self, device, dtype, requires_grad, **kwargs):\n return (\n SampleInput(\n make_tensor((S, ), device, dtype, requires_grad=requires_grad),\n args=(\n make_tensor((S, ), device, dtype, requires_grad=requires_grad),\n )\n ),\n SampleInput(\n make_tensor((), device, dtype, requires_grad=requires_grad),\n args=(\n make_tensor((S, S), device, dtype, requires_grad=requires_grad),\n )\n ),\n )\n\ndef sample_inputs_scatter(op_info, device, dtype, requires_grad, **kwargs):\n def _tensor(shape, dtype=dtype, low=None, high=None):\n return make_tensor(shape, device, dtype, low=low, high=high, requires_grad=requires_grad)\n\n def _gather(shape, index_dim, max_indices):\n return gather_variable(shape, index_dim, max_indices, device=device)\n\n zero = torch.tensor(0, dtype=torch.long, device=device)\n test_cases = (\n (_tensor((M, S)), (0, _gather((S, S), 1, M), _tensor((S, S)))),\n (_tensor((M, S)), (1, _gather((S, S), 0, S), _tensor((S, S)))),\n (_tensor((M, S)), (-1, _gather((S, S), 0, S), _tensor((S, S)))),\n (_tensor((M, S)), (0, _gather((M, S // 2), 1, M), _tensor((M, S // 2)))),\n (_tensor((M, S)), (1, _gather((M, S // 2), 0, S), _tensor((M, S // 2)))),\n (_tensor((M, S)), (-1, _gather((M, S // 2), 0, S), _tensor((M, S // 2)))),\n (_tensor(()), (0, zero.clone().detach(), _tensor(()))),\n (_tensor(()), (0, zero.clone().detach(), 2.5)),\n )\n\n samples = []\n for tensor, args in test_cases:\n samples.append(SampleInput(tensor, args=args))\n\n if not requires_grad:\n samples.append(SampleInput(\n tensor.clone().detach(),\n args=args, kwargs={'reduce': 'add'}\n ))\n\n if dtype.is_floating_point:\n samples.append(SampleInput(\n tensor.clone().detach(),\n args=args, kwargs={'reduce': 'multiply'}\n ))\n\n return samples\n\ndef sample_inputs_scatter_add(op_info, device, dtype, requires_grad, **kwargs):\n def _tensor(shape, dtype=dtype, low=None, high=None):\n return make_tensor(shape, device, dtype, low=low, high=high, requires_grad=requires_grad)\n\n def _gather(shape, index_dim, max_indices):\n return gather_variable(shape, index_dim, max_indices, device=device)\n\n zero = torch.tensor(0, dtype=torch.long, device=device)\n test_cases = (\n (_tensor((M, S)), (0, _gather((S, S), 1, M), _tensor((S, S)))),\n (_tensor((M, S)), (1, _gather((S, S), 0, S), _tensor((S, S)))),\n (_tensor((M, S)), (-1, _gather((S, S), 0, S), _tensor((S, S)))),\n (_tensor((M, S)), (0, _gather((M, S // 2), 1, M), _tensor((M, S // 2)))),\n (_tensor((M, S)), (1, _gather((M, S // 2), 0, S), _tensor((M, S // 2)))),\n (_tensor((M, S)), (-1, _gather((M, S // 2), 0, S), _tensor((M, S // 2)))),\n (_tensor(()), (0, zero.clone().detach(), _tensor(()))),\n )\n\n return [SampleInput(tensor, args=args) for tensor, args in test_cases]\n\ndef sample_inputs_scatter_reduce(op_info, device, dtype, requires_grad):\n def _tensor(shape, dtype=dtype, low=None, high=None):\n return make_tensor(shape, device, dtype, low=low, high=high, requires_grad=requires_grad)\n\n def _index(shape, max_index):\n return torch.from_numpy(np.random.choice(max_index, size=shape)).to(dtype=torch.int64, device=device)\n\n reduces = [\"sum\", \"prod\", \"mean\", \"amax\", \"amin\"]\n shapes_and_dims = [((M,), 1), ((M, S), 2), ((M, M, S), 3), ((1, M, M, S), 4)]\n\n sample_inputs = []\n\n for ((shape, dim), reduce) in itertools.product(shapes_and_dims, reduces):\n for d in range(dim):\n # Generate a random maximum integer that can appear in index array\n max_index = np.random.randint(1, shape[d] * 2)\n index = _index(shape, max_index)\n sample_inputs.append(\n SampleInput(\n _tensor(shape),\n args=(d, index, reduce),\n )\n )\n\n return sample_inputs\n\ndef sample_inputs_ravel(op_info, device, dtype, requires_grad, **kwargs):\n samples = (SampleInput(make_tensor((S, S, S), device, dtype,\n low=None, high=None,\n requires_grad=requires_grad)),\n SampleInput(make_tensor((), device, dtype,\n low=None, high=None,\n requires_grad=requires_grad)),)\n\n return samples\n\n\ndef sample_inputs_tril_triu(op_info, device, dtype, requires_grad, **kwargs):\n make_arg = partial(make_tensor, dtype=dtype, device=device, requires_grad=requires_grad)\n cases = (((M, M), ()),\n ((M, M), (2,),),\n ((S, M, M), ()),\n ((S, M, M), (2,)),\n ((3, 3, S, S), ()),)\n\n for shape, args in cases:\n yield SampleInput(make_arg(shape), args=args)\n\n\ndef sample_inputs_clone(op_info, device, dtype, requires_grad, **kwargs):\n make_arg = partial(make_tensor, dtype=dtype, device=device, requires_grad=requires_grad)\n\n yield SampleInput(make_arg((S, M, S)))\n yield SampleInput(make_arg(()))\n\n\ndef sample_inputs_contiguous(op_info, device, dtype, requires_grad, **kwargs):\n make_arg = partial(make_tensor, dtype=dtype, device=device, requires_grad=requires_grad)\n yield SampleInput(make_arg((S, S)))\n\n\ndef sample_inputs_sum_to_size(op_info, device, dtype, requires_grad, **kwargs):\n make_arg = partial(make_tensor, dtype=dtype, device=device, requires_grad=requires_grad)\n\n # list of tuples (shape, shape) defining the shapes of the input and output tensors\n sample_shapes = [\n ((), ()),\n ((S), (1)),\n ((S, S), (1, 1)),\n ((S, S), (1, S)),\n ((S, S), (S, S)),\n ((S, S, S), (S, 1, S)),\n ]\n\n samples = []\n\n for input_shape, output_shape in sample_shapes:\n input_t = make_arg(input_shape)\n samples.append(SampleInput(input_t, args=(output_shape,)))\n\n return samples\n\ndef sample_inputs_resize_ops(op_info, device, dtype, requires_grad, **kwargs):\n make_arg = partial(make_tensor, dtype=dtype, device=device)\n cases = (((S, S, S), (S * S, S)),\n ((), ()),\n ((), (1, 1, 1)),\n )\n\n for shape, args_or_shape in cases:\n # Update `args` based on operator\n if op_info.name == 'resize_':\n # resize_ takes shape/tuple of ints,\n args = (args_or_shape, )\n elif op_info.name == 'resize_as_':\n # resize_as_ takes another tensor\n args = (make_arg(shape, requires_grad=False), ) # type:ignore[assignment]\n else:\n raise ValueError(\"sample_inputs_resize_ops is being used with incorrect operator\")\n\n yield(SampleInput(make_arg(shape, requires_grad=requires_grad), args=args))\n\ndef sample_inputs_view_reshape(op_info, device, dtype, requires_grad, **kwargs):\n make_arg = partial(make_tensor, dtype=dtype, device=device, requires_grad=requires_grad)\n\n cases = (((S, S, S), (S * S, S)),\n ((S * S, S), (S, S, S)),\n ((S * S, S), (S, -1, S)),\n ((S * S * 2, S), (S, -1)),\n ((S,), (S,)),\n ((), ()),\n ((), (1,)))\n\n for case in cases:\n shape, args = case\n inp = make_arg(shape, requires_grad=requires_grad)\n yield(SampleInput(inp, args=(args, )))\n\n if op_info.name != \"view\" and len(shape) >= 2:\n yield(SampleInput(\n inp.clone().transpose(0, 1).requires_grad_(requires_grad),\n args=(args, )))\n\ndef sample_inputs_view_as_reshape_as(op_info, device, dtype, requires_grad, **kwargs):\n make_arg = partial(make_tensor, dtype=dtype, device=device)\n\n cases = (((S, S, S), (S * S, S)),\n ((), ()),\n ((), (1, 1)),\n )\n\n for case in cases:\n shape, shape_other = case\n inp = make_arg(shape, requires_grad=requires_grad)\n yield(SampleInput(inp, args=(make_arg(shape_other, requires_grad=False),)))\n\n if op_info.name != \"view_as\" and len(shape) >= 2:\n yield(SampleInput(\n inp.clone().transpose(0, 1).requires_grad_(requires_grad),\n args=(make_arg(shape_other, requires_grad=False),)))\n\ndef sample_inputs_atleast1d2d3d(op_info, device, dtype, requires_grad, **kwargs):\n input_list = []\n shapes = ((S, S, S, S), (S, S, S), (S, S), (S, ), (),)\n make_tensor_partial = partial(make_tensor, dtype=dtype, device=device, requires_grad=requires_grad)\n samples = []\n for shape in shapes:\n input_list.append(make_tensor_partial(shape))\n samples.append(SampleInput(make_tensor_partial(shape)))\n samples.append(SampleInput(input_list, ))\n return samples\n\ndef sample_inputs_column_stack(op_info, device, dtype, requires_grad, **kwargs):\n input_list = []\n cases: Tuple[tuple, tuple] = ( # type: ignore[assignment]\n ((S, 2, 1), (S, 3, 1)),\n ((S), (S, 5)), ((), (1, S))\n )\n make_tensor_partial = partial(make_tensor, dtype=dtype, device=device, requires_grad=requires_grad)\n for shape1, shape2 in cases:\n input_list.append(SampleInput([make_tensor_partial(shape1), make_tensor_partial(shape2)]))\n\n return input_list\n\ndef sample_inputs_flatten(op_info, device, dtype, requires_grad, **kwargs):\n samples = []\n shapes = ((S, S, S), (S, S), (S, ), (),)\n make_tensor_partial = partial(make_tensor, dtype=dtype, device=device, requires_grad=requires_grad)\n for shape in shapes:\n samples.append(SampleInput(make_tensor_partial(shape)))\n if len(shape) > 1:\n samples.append(SampleInput(make_tensor_partial(shape), kwargs=dict(start_dim=1, end_dim=-1)))\n return samples\n\ndef sample_inputs_select(op_info, device, dtype, requires_grad, **kwargs):\n make_arg = partial(make_tensor, dtype=dtype, device=device, requires_grad=requires_grad)\n\n cases = (((S, S, S), (1, 2)),\n ((S, S, S), (-1, 2)),\n ((S, S, S), (-1, -1)),\n ((S, S, S), (1, -1)),\n ((S,), (0, 2))\n )\n\n for shape, args in cases:\n yield SampleInput(make_arg(shape), args=args)\n\n\ndef sample_inputs_select_scatter(op_info, device, dtype, requires_grad, **kwargs):\n make_arg = partial(make_tensor, dtype=dtype, device=device, requires_grad=requires_grad)\n\n cases = (((S, S, S), (S, S), (1, 2)),\n ((S, S, S), (S, S), (-1, 2)),\n ((S, S, S), (S, S), (-1, -1)),\n ((S, S, S), (S, S), (1, -1)),\n ((S,), (), (0, 2))\n )\n\n for input_shape, src_shape, args in cases:\n input_ = make_arg(input_shape)\n src = make_arg(src_shape)\n yield SampleInput(input_, args=(src, *args))\n\n\ndef sample_inputs_slice_scatter(op_info, device, dtype, requires_grad, **kwargs):\n make_arg = partial(make_tensor, dtype=dtype, device=device, requires_grad=requires_grad)\n\n cases = (((L, L, L), (L, L, L,), (0, 0, L, 1)),\n ((L, L, L), (L // 2, L, L,), (0, L // 2, L, 1)),\n ((L, L, L), (L // 4, L, L,), (0, L // 2, L, 2)),\n ((L, L, L), (L, L, L,), (1, 0, L, 1)),\n ((L, L, L), (L, L // 2, L,), (1, L // 2, L, 1)),\n ((L, L, L), (L, L // 4, L,), (1, L // 2, L, 2)),\n ((L, L, L), (L, L, L,), (2, 0, L, 1)),\n ((L, L, L), (L, L, L // 2,), (2, L // 2, L, 1)),\n ((L, L, L), (L, L, L // 4,), (2, L // 2, L, 2)),\n )\n\n for input_shape, src_shape, args in cases:\n input_ = make_arg(input_shape)\n src = make_arg(src_shape)\n yield SampleInput(input_, args=(src, *args))\n\n\ndef sample_inputs_rbinops(op_info, device, dtype, requires_grad, supports_dtype_kwargs=True, **kwargs):\n def _make_tensor_helper(shape, low=None, high=None):\n return make_tensor(shape, device, dtype, low=low, high=high, requires_grad=requires_grad)\n\n scalar: Union[int, float, complex] = 3\n\n if dtype.is_floating_point:\n scalar = 3.14\n elif dtype.is_complex:\n scalar = 3.14j\n\n samples = [\n SampleInput(_make_tensor_helper((S, S, S)), args=(scalar,)),\n SampleInput(_make_tensor_helper(()), args=(scalar,)),\n ]\n\n return samples\n\n\ndef sample_inputs_expand(op_info, device, dtype, requires_grad, **kwargs):\n make_arg = partial(make_tensor, dtype=dtype, device=device, requires_grad=requires_grad)\n\n cases = (((S, 1, 1), (S, S, S)),\n ((S, 1, S), (S, S, S)),\n ((S, 1, S), (-1, S, -1)),\n ((S, 1, S), (-1, S, S)),\n ((S, 1), (S, S, S)),\n ((1,), (S, S, S)),\n ((1, S), (1, 1, S)),\n ((), ()),\n ((), (1, 3, 2)),\n )\n\n for case in cases:\n shape, args = case\n yield(SampleInput(make_arg(shape), args=(args, )))\n\ndef sample_inputs_conversion(op_info, device, dtype, requires_grad, **kwargs):\n make_arg = partial(make_tensor, dtype=dtype, device=device, requires_grad=requires_grad)\n\n shapes = ((),\n (2, 3))\n memory_format_options = [None, torch.contiguous_format]\n\n for shape, memory_format in itertools.product(shapes, memory_format_options):\n yield SampleInput(make_arg(shape),\n kwargs={'memory_format': memory_format} if memory_format else {})\n\ndef sample_inputs_conversion_channels_last(op_info, device, dtype, requires_grad, **kwargs):\n make_arg = partial(make_tensor, dtype=dtype, device=device, requires_grad=requires_grad)\n return [\n # Channels last case: input must be 4d\n SampleInput(make_arg((2, 3, 2, 3)), kwargs={'memory_format': torch.channels_last})\n\n ]\n\ndef sample_inputs_expand_as(op_info, device, dtype, requires_grad, **kwargs):\n make_arg = partial(make_tensor, dtype=dtype, device=device)\n\n cases = (((S, 1, 1), (S, S, S)),\n ((), ()),\n ((), (1, 1)),\n )\n\n for shape, shape_other in cases:\n yield(SampleInput(make_arg(shape, requires_grad=requires_grad),\n args=(make_arg(shape_other, requires_grad=False), )))\n\n\ndef sample_inputs_where(op_info, device, dtype, requires_grad, **kwargs):\n make_arg = partial(make_tensor, dtype=dtype, device=device, requires_grad=requires_grad)\n\n def make_bool_mask(shape):\n # Make sure atleast one element is nonzero,\n # except for empty tensor\n mask_t = make_tensor(shape, dtype=torch.bool, device=device, requires_grad=False)\n\n if mask_t.numel() == 0:\n return mask_t\n elif mask_t.numel() == 1:\n mask_t.fill_(True)\n return mask_t\n\n if mask_t.sum() == 0:\n def random_index(shape):\n return tuple(map(lambda max_idx: random.randint(0, max_idx), shape))\n\n mask_t[random_index(mask_t.shape)] = True\n return mask_t\n\n return mask_t\n\n cases = (((M, M), (M, M), (M, M), False),\n ((M, 1, M), (M, M), (M, M, 1), True),\n ((), (), (), False),\n ((M, 1, M), (), (M, M, 1), True),\n ((), (M, M), (), True),)\n\n for shape, mask_shape, other_shape, broadcasts_input in cases:\n yield SampleInput(make_arg(shape),\n args=(make_bool_mask(mask_shape), make_arg(other_shape)),\n broadcasts_input=broadcasts_input)\n\ndef sample_inputs_nonzero(op_info, device, dtype, requires_grad, **kwargs):\n make_arg = partial(make_tensor, dtype=dtype, device=device, requires_grad=requires_grad)\n\n sizes = ((), (S,), (S, S), (S, S, S), (S, 1, S), (S, 0, S))\n\n inputs = []\n for shape in sizes:\n # construct input without any non-zero elements\n zeros = torch.zeros(shape, dtype=dtype, device=device, requires_grad=requires_grad)\n inputs.append(zeros)\n\n # construct input with mixed zero and non-zero elements\n mixed = make_arg(shape).requires_grad_(False)\n mask_t = make_tensor(shape, dtype=torch.bool, device=device, requires_grad=False)\n mixed[mask_t] = 0\n inputs.append(mixed)\n\n for input_t, as_tuple in product(inputs, [False, True]):\n yield(SampleInput(input_t.clone().requires_grad_(requires_grad),\n kwargs=dict(as_tuple=as_tuple)))\n\ndef sample_inputs_chunk(op_info, device, dtype, requires_grad, **kwargs):\n make_arg = partial(make_tensor, dtype=dtype, device=device)\n\n cases = (((S, S, S), (2,)),\n ((S, S, S), (S, 1)),\n ((S, S, S), (S, -1)))\n\n for case in cases:\n shape, args = case\n yield(SampleInput(make_arg(shape, requires_grad=requires_grad), args=args))\n\ndef sample_inputs_kthvalue(op_info, device, dtype, requires_grad, **kwargs):\n def _tensor(shape, dtype=dtype, low=None, high=None):\n return make_tensor(shape, device, dtype, low=low, high=high, requires_grad=requires_grad)\n\n test_cases = [\n (_tensor((S, S, S)), (2,)),\n (_tensor((S, S, S)), (2, 1,)),\n (_tensor((S, S, S)), (2, -1,)),\n (_tensor((S, S, S)), (2, 1, True,)),\n (_tensor((S, S, S)), (2, -1, True,)),\n (_tensor((S,)), (2, 0,)),\n (_tensor((S,)), (2, 0, True,)),\n (_tensor(()), (1,)),\n (_tensor(()), (1, 0,)),\n (_tensor(()), (1, 0, True))\n ]\n\n return [SampleInput(tensor, args=args) for tensor, args in test_cases]\n\ndef error_inputs_kthvalue(op_info, device, **kwargs):\n # tests overlapping output fails\n t = make_tensor(10, dtype=torch.float32, device=device)\n indices = torch.empty((), device=device, dtype=torch.long)\n si = SampleInput(t, args=(5,), kwargs={'out': (t, indices)})\n\n k_out_of_range_err = \"selected number k out of range for dimension\"\n return (ErrorInput(si, error_type=RuntimeError, error_regex=\"unsupported operation\"),\n ErrorInput(SampleInput(torch.randn(2, 2, device=device), args=(3, 0)),\n error_type=RuntimeError, error_regex=k_out_of_range_err),\n ErrorInput(SampleInput(torch.randn(2, 2, device=device), args=(3,)),\n error_type=RuntimeError, error_regex=k_out_of_range_err),\n ErrorInput(SampleInput(torch.tensor(2, device=device), args=(3,)),\n error_type=RuntimeError, error_regex=k_out_of_range_err),)\n\ndef sample_inputs_dropout(op_info, device, dtype, requires_grad, *,\n train=None, min_input_dim=None, **kwargs):\n make_arg = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad)\n\n if min_input_dim:\n # Create cases with dim ranging from min_input_dim to min_input_dim + 2 (inclusive)\n cases = [(S,) * i for i in range(min_input_dim, min_input_dim + 3)]\n else:\n cases = [(S, S), (S,), ()]\n p_vals = [0.0, 0.5, 1.0]\n # This is to handle special case for feature_alpha_dropout which has different\n # supported dtypes depending on `train` parameter\n training_vals = [train] if train is not None else [True, False]\n\n for case, p, training in product(cases, p_vals, training_vals):\n yield SampleInput(make_arg(case), kwargs=dict(p=p, training=training))\n yield SampleInput(make_arg(case), kwargs=dict())\n\n\ndef sample_inputs_embedding_bag(op_info, device, dtype, requires_grad, **kwargs):\n def make_input(shape):\n return make_tensor(shape, device=device, dtype=dtype, requires_grad=requires_grad)\n\n def make_long_input(shape, *, low, high, noncontiguous=False):\n return make_tensor(shape, device=device, dtype=torch.long, low=low, high=high,\n noncontiguous=noncontiguous)\n\n def make_per_sample_weight(flag, idx):\n # a tensor of float / double weights, or None\n # to indicate all weights should be taken to be 1\n if flag:\n return make_input(idx.shape)\n return None\n\n offsets = torch.tensor([0, 3], device=device, dtype=torch.long)\n for generate_per_sample_weight in (True, False):\n for mode in ('sum', 'mean', 'max'):\n # per_sample_weights is only supported for mode='sum' (got mode='****')\n if generate_per_sample_weight and mode in ('mean', 'max'):\n continue\n\n # 1-D index tensor\n idx = make_long_input((S,), low=0, high=M)\n per_sample_weights = make_per_sample_weight(generate_per_sample_weight, idx)\n yield SampleInput(make_input((M, S)), args=(idx,),\n kwargs={'offsets': offsets, 'mode': mode,\n 'per_sample_weights': per_sample_weights})\n\n idx = make_long_input((S,), low=0, high=M, noncontiguous=True)\n per_sample_weights = make_per_sample_weight(generate_per_sample_weight, idx)\n yield SampleInput(make_input((M, S)), args=(idx,),\n kwargs={'offsets': offsets, 'mode': mode,\n 'per_sample_weights': per_sample_weights})\n\n # bag with zero length\n idx = make_long_input((S,), low=0, high=M, noncontiguous=True)\n per_sample_weights = make_per_sample_weight(generate_per_sample_weight, idx)\n yield SampleInput(make_input((M, S)), args=(idx,),\n kwargs={'offsets': torch.tensor([0, 0, 3], device=device, dtype=torch.long),\n 'mode': mode,\n 'per_sample_weights': per_sample_weights})\n\n # 2-D index tensor\n idx = make_long_input((S, S), low=0, high=M)\n per_sample_weights = make_per_sample_weight(generate_per_sample_weight, idx)\n yield SampleInput(make_input((M, S)), args=(idx,),\n kwargs={'mode': mode, 'per_sample_weights': per_sample_weights})\n\n idx = make_long_input((S, S), low=0, high=M, noncontiguous=True)\n per_sample_weights = make_per_sample_weight(generate_per_sample_weight, idx)\n yield SampleInput(make_input((M, S)), args=(idx,),\n kwargs={'mode': mode, 'per_sample_weights': per_sample_weights})\n\n # The gradient vector at `padding_idx` is not updated.\n # Negative padding_idx\n idx = make_long_input((6,), low=0, high=S)\n idx[0] = 4\n idx[4] = 4\n per_sample_weights = make_per_sample_weight(generate_per_sample_weight, idx)\n yield SampleInput(make_input((S, S)), args=(idx,),\n kwargs={'padding_idx': -1, 'offsets': offsets,\n 'mode': mode, 'per_sample_weights': per_sample_weights},)\n\n idx = make_long_input((3, 3), low=0, high=S)\n # Positive padding_idx\n idx[0, 0] = 2\n idx[1, 1] = 2\n per_sample_weights = make_per_sample_weight(generate_per_sample_weight, idx)\n yield SampleInput(make_input((S, S)), args=(idx,),\n kwargs={'padding_idx': 2, 'mode': mode,\n 'per_sample_weights': per_sample_weights},)\n\n idx = make_long_input((6, ), low=0, high=S)\n weights = make_input((S, S))\n offsets_ = torch.tensor([0, 3, 6], device=device, dtype=torch.long)\n per_sample_weights = make_per_sample_weight(generate_per_sample_weight, idx)\n yield SampleInput(weights, args=(idx,),\n kwargs={'mode': mode, 'offsets': offsets_, 'include_last_offset': True},)\n\n if not requires_grad:\n # Following inputs return different gradient from the numerical gradient.\n # This is expected and relevant tests are present in `test_nn.py`.\n\n # Due to inplace renorming of weight, the numerical gradient doesn't match the\n # analytical gradient.\n idx = make_long_input((2, 2), low=0, high=S)\n weights = make_input((S, S)) * 2\n per_sample_weights = make_per_sample_weight(generate_per_sample_weight, idx)\n yield SampleInput(weights, args=(idx,),\n kwargs={'max_norm': 1., 'mode': mode,\n 'per_sample_weights': per_sample_weights},)\n\n idx = make_long_input((6, ), low=0, high=S)\n weights = make_input((S, S)) * 2\n per_sample_weights = make_per_sample_weight(generate_per_sample_weight, idx)\n yield SampleInput(weights, args=(idx,),\n kwargs={'max_norm': 1., 'norm_type': 1.0,\n 'mode': mode, 'offsets': offsets,\n 'per_sample_weights': per_sample_weights},)\n\n if mode != 'max':\n # Scale the gradient based on the inverse frequency of a particular index.\n # Note : smax mode does not support sparse weights\n idx = make_long_input((2, 2), low=0, high=S)\n idx[0, 0] = 1\n idx[0, 1] = 1\n weights = make_input((S, S))\n per_sample_weights = make_per_sample_weight(generate_per_sample_weight, idx)\n yield SampleInput(weights, args=(idx,),\n kwargs={'scale_grad_by_freq': True, 'mode': mode,\n 'per_sample_weights': per_sample_weights},)\n\n # gradcheck not implemented for sparse tensors.\n # Note : max mode does not support sparse weights\n idx = make_long_input((6, ), low=0, high=S)\n weights = make_input((S, S))\n per_sample_weights = make_per_sample_weight(generate_per_sample_weight, idx)\n yield SampleInput(weights, args=(idx,),\n kwargs={'sparse': True, 'offsets': offsets,\n 'mode': mode, 'per_sample_weights': per_sample_weights})\n\n idx = make_long_input((6, ), low=0, high=S)\n idx[0] = 1 # freq more than 1\n idx[1] = 1 # freq more than 1\n idx[3] = 0 # padding_idx\n weights = make_input((S, S)) * 2\n per_sample_weights = make_per_sample_weight(generate_per_sample_weight, idx)\n yield SampleInput(weights, args=(idx,),\n kwargs={'sparse': True, 'scale_grad_by_freq': True, 'padding_idx': 0,\n 'max_norm': 1., 'offsets': offsets,\n 'mode': mode, 'per_sample_weights': per_sample_weights})\n\n\ndef sample_inputs_embedding(op_info, device, dtype, requires_grad, **kwargs):\n def make_input(shape):\n return make_tensor(shape, device=device, dtype=dtype, requires_grad=requires_grad)\n\n def make_long_input(shape, *, low, high):\n return make_tensor(shape, device=device, dtype=torch.long, low=low, high=high)\n\n # 0-D index tensor\n idx = make_long_input((), low=0, high=M)\n yield SampleInput(make_input((M, S)), args=(idx,),)\n\n # 1-D index tensor\n idx = make_long_input((S,), low=0, high=M)\n yield SampleInput(make_input((M, S)), args=(idx,),)\n\n # 2-D index tensor\n idx = make_long_input((S, S), low=0, high=M)\n yield SampleInput(make_input((M, S)), args=(idx,),)\n\n if not requires_grad:\n # Following inputs return different gradient from the numerical gradient.\n # This is expected and relevant tests are present in `test_nn.py`.\n\n # The gradient vector at `padding_idx` is not updated.\n idx = make_long_input((2, 2), low=0, high=S)\n idx[0, 0] = 2\n idx[1, 1] = 2\n yield SampleInput(make_input((S, S)), args=(idx,), kwargs={'padding_idx': 2},)\n\n idx = make_long_input((2, 2), low=0, high=S)\n idx[0, 0] = 4\n idx[1, 1] = 4\n yield SampleInput(make_input((S, S)), args=(idx,), kwargs={'padding_idx': -1},)\n\n # Due to inplace renorming of weight, the numerical gradient doesn't match the\n # analytical gradient.\n idx = make_long_input((2, 2), low=0, high=S)\n weights = make_input((S, S)) * 2\n yield SampleInput(weights, args=(idx,), kwargs={'max_norm': 1.},)\n\n idx = make_long_input((2, 2), low=0, high=S)\n weights = make_input((S, S)) * 2\n yield SampleInput(weights, args=(idx,), kwargs={'max_norm': 1., 'norm_type': 1.0},)\n\n # Scale the gradient based on the inverse frequency of a particular index.\n idx = make_long_input((2, 2), low=0, high=S)\n idx[0, 0] = 1\n idx[0, 1] = 1\n weights = make_input((S, S))\n yield SampleInput(weights, args=(idx,), kwargs={'scale_grad_by_freq': True},)\n\n # gradcheck not implemented for sparse tensors.\n idx = make_long_input((2, 2), low=0, high=S)\n weights = make_input((S, S))\n yield SampleInput(weights, args=(idx,), kwargs={'sparse': True})\n\n idx = make_long_input((3, 3), low=0, high=S)\n idx[0, 0] = 1 # freq more than 1\n idx[0, 1] = 1 # freq more than 1\n idx[1, 0] = 0 # padding_idx\n weights = make_input((S, S)) * 2\n yield SampleInput(weights, args=(idx,),\n kwargs={'sparse': True, 'scale_grad_by_freq': True,\n 'padding_idx': 0, 'max_norm': 1.})\n\n\ndef sample_inputs_one_hot(op_info, device, dtype, requires_grad, **kwargs):\n def make_input(shape, *, low, high):\n return make_tensor(shape, device=device, dtype=dtype, low=low, high=high, requires_grad=requires_grad)\n\n shapes = ((), (S,), (L, M, S))\n num_classess = (-1, 10)\n\n return [\n SampleInput(\n make_input(\n shape,\n low=0,\n high=10 if num_classes == -1 else num_classes // 2,\n ),\n kwargs=dict(num_classes=num_classes),\n )\n for shape, num_classes in itertools.product(shapes, num_classess)\n ]\n\ndef sample_inputs_softplus(op_info, device, dtype, requires_grad, **kwargs):\n make_input = partial(make_tensor, (S,), device=device, dtype=dtype, requires_grad=requires_grad)\n\n return [\n SampleInput(make_input()),\n SampleInput(make_input(), kwargs=dict(beta=3)),\n SampleInput(make_input(low=1), kwargs=dict(threshold=1)),\n ]\n\ndef sample_inputs_tensorinv(op_info, device, dtype, requires_grad, **kwargs):\n make_arg = make_fullrank_matrices_with_distinct_singular_values\n\n def make_input():\n return make_arg(12, 12, device=device, dtype=dtype, requires_grad=requires_grad)\n\n # lhs / rhs shape can have any number of dimensions as long as their product equals 12\n shapes = [\n ((2, 2, 3), (12, 1)),\n ((4, 3), (6, 1, 2)),\n ]\n\n samples = []\n for shape_lhs, shape_rhs in shapes:\n inp = make_input().reshape(*shape_lhs, *shape_rhs).detach()\n inp.requires_grad_(requires_grad)\n samples.append(SampleInput(inp, kwargs=dict(ind=len(shape_lhs))))\n\n return samples\n\ndef sample_inputs_tensorsolve(op_info, device, dtype, requires_grad, **kwargs):\n a_shapes = [(2, 3, 6), (3, 4, 4, 3)]\n # Zero-dim tensors are not supported in NumPy, so we skip them for now.\n # NumPy is used in reference check tests.\n # See https://github.com/numpy/numpy/pull/20482 for tracking NumPy bugfix.\n # a_shapes += [(0, 0, 1, 2, 3, 0)]\n dimss = [None, (0, 2)]\n\n for a_shape, dims in itertools.product(a_shapes, dimss):\n a = make_tensor(a_shape, dtype=dtype, device=device, requires_grad=requires_grad)\n b = make_tensor(a_shape[:2], dtype=dtype, device=device, requires_grad=requires_grad)\n yield SampleInput(a, args=(b,), kwargs=dict(dims=dims))\n\ndef sample_inputs_mse_loss(op_info, device, dtype, requires_grad, **kwargs):\n _make_tensor = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad)\n\n shapes_and_kwargs = [\n ((), None),\n ((S,), dict(reduction=\"mean\")),\n ((S,), dict(reduction=\"sum\")),\n ((S,), dict(reduction=\"none\")),\n ((S, S), None),\n ((S, S, S), None),\n ]\n\n return [\n SampleInput(_make_tensor(shape), args=(_make_tensor(shape),), kwargs=kwargs)\n for shape, kwargs in shapes_and_kwargs\n ]\n\ndef sample_inputs_grid_sample(op_info, device, dtype, requires_grad, **kwargs):\n _make_tensor = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad)\n\n batch_size = 2\n num_channels = 3\n modes = (\"bilinear\", \"nearest\")\n align_cornerss = (False, True)\n padding_modes = (\"zeros\", \"border\", \"reflection\")\n\n sample_inputs = []\n for dim in (2, 3):\n\n modes_ = (*modes, \"bicubic\") if dim == 2 else modes\n\n for mode, padding_mode, align_corners in itertools.product(modes_, padding_modes, align_cornerss):\n sample_inputs.append(\n SampleInput(\n _make_tensor((batch_size, num_channels, *[S] * dim)),\n args=(_make_tensor((batch_size, *[S] * dim, dim)),),\n kwargs=dict(\n mode=mode,\n padding_mode=padding_mode,\n align_corners=align_corners,\n )\n )\n )\n\n return sample_inputs\n\ndef sample_inputs_cosine_embedding_loss(op_info, device, dtype, requires_grad, **kwargs):\n make_input = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad)\n\n def make_target(shape):\n shape = () if len(shape) == 1 else (shape[0], )\n t = torch.randint(0, 2, shape, device=device, dtype=torch.long)\n # Label with -1 or 1\n t = t * 2 - 1\n target = t.to(dtype=dtype).detach_().requires_grad_(requires_grad)\n return target\n\n shapes = ((S, S), (S,))\n reductions = ('none', 'mean', 'sum')\n for s, r in product(shapes, reductions):\n yield SampleInput(\n make_input(s),\n args=(make_input(s), make_target(s)),\n kwargs=dict(reduction=r, margin=random.uniform(-1, 1))\n )\n\ndef sample_inputs_ctc_loss(op_info, device, dtype, requires_grad, **kwargs):\n input_length = 50\n batch = 16\n num_char = 20\n target_length = 30\n\n def make_log_probs(s):\n t = make_tensor(s, device=device, dtype=dtype)\n log_probs = t.log_softmax(2).to(device=device, dtype=dtype).detach().requires_grad_(requires_grad=requires_grad)\n return log_probs\n\n reductions = ('none', 'mean', 'sum')\n zero_inf = (True, False)\n for r, z in product(reductions, zero_inf):\n log_probs = make_log_probs((input_length, batch, num_char))\n targets = torch.randint(1, num_char, (batch, target_length), dtype=torch.long, device=device)\n input_lengths = torch.full((batch, ), input_length, dtype=torch.long, device=device)\n target_lengths = torch.randint(10, target_length, (batch, ), dtype=torch.long, device=device)\n\n yield SampleInput(log_probs, args=(targets, input_lengths, target_lengths,), kwargs=dict(reduction=r, zero_infinity=z))\n\ndef sample_inputs_nll_loss(op_info, device, dtype, requires_grad, **kwargs):\n shape = (2, 3)\n num_classes = shape[1]\n make_input = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad)\n # FIXME: Derivative wrt. weight not implemented\n make_weight = partial(make_tensor, shape=(num_classes,), device=device, dtype=dtype, requires_grad=False)\n\n def make_target(shape, zeros=False):\n s = (shape[0], *shape[2:]) if len(shape) > 1 else ()\n if zeros:\n return torch.zeros(s, device=device, dtype=torch.long)\n else:\n return make_tensor(s,\n low=0,\n high=shape[1] if len(shape) > 1 else shape[0],\n device=device,\n dtype=torch.long)\n\n\n def gen_shape_kwargs():\n # Batched, non-batched and 2d\n shapes = (shape, (num_classes,), shape + (2, 2))\n reductions = ('none', 'mean', 'sum')\n for reduction, s in product(reductions, shapes):\n yield make_input(s), make_target(s), dict(reduction=reduction)\n yield make_input(s), make_target(s), dict(weight=make_weight(), reduction=reduction)\n yield make_input(s), make_target(s), dict(weight=make_weight(low=0), reduction=reduction)\n yield make_input(s), make_target(s), dict(weight=make_weight(high=0), reduction=reduction)\n t = make_target(s)\n ignore = num_classes // 2\n # If \"mean\", nll returns NaN, so it's not differentiable at those points\n if t.eq(ignore).all() and reduction == \"mean\":\n t.fill_(0)\n yield make_input(s), t, dict(ignore_index=num_classes // 2, reduction=reduction)\n # Test ignoring all the targets\n # If \"mean\", nll returns NaN, so it's not differentiable at those points\n if reduction != \"mean\":\n yield make_input(s), make_target(s, zeros=True), dict(ignore_index=0, reduction=reduction)\n\n for input, target, kwargs in gen_shape_kwargs():\n yield SampleInput(input, args=(target,), kwargs=kwargs)\n\ndef sample_inputs_argwhere(op_info, device, dtype, requires_grad, **kwargs):\n yield SampleInput(torch.tensor([1, 0, 2, 0], dtype=dtype, device=device, requires_grad=requires_grad))\n mask = torch.tensor([[0, 1, 0, 1, 0],\n [1, 1, 1, 1, 0],\n [0, 0, 0, 1, 0],\n [1, 0, 1, 1, 0],\n [1, 0, 0, 1, 0]], dtype=torch.bool, device=device)\n t = make_tensor((S, S), dtype=dtype, device=device, requires_grad=requires_grad)\n t[mask] = 0\n yield SampleInput(t)\n\n t = make_tensor((S, S), dtype=dtype, device=device, requires_grad=requires_grad, noncontiguous=True)\n t[mask] = 0\n yield SampleInput(t)\n\n t = make_tensor((S, 0), dtype=dtype, device=device, requires_grad=requires_grad)\n yield SampleInput(t)\n\n yield SampleInput(torch.zeros((S,), dtype=dtype, device=device, requires_grad=requires_grad))\n yield SampleInput(make_tensor((), dtype=dtype, device=device, requires_grad=requires_grad))\n\ndef _generate_sample_shape_reduction():\n shapes = ((S,), (S, S), (S, S, S))\n reductions = ('none', 'mean', 'sum')\n for s, r in product(shapes, reductions):\n yield s, r\n\ndef sample_inputs_gaussian_nll_loss(op_info, device, dtype, requires_grad, **kwargs):\n _make_tensor = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad)\n # Set low slightly above 0 so gradcheck doesn't accidentally dip below 0\n make_var = partial(make_tensor, low=0.1, device=device, dtype=dtype, requires_grad=requires_grad)\n\n def gen_shape(shape):\n yield shape\n # Broadcast\n yield (*shape[:-1], 1)\n yield shape[:-1]\n\n def gen_shape_kwargs():\n for s, r in _generate_sample_shape_reduction():\n for t_s, v_s in product(gen_shape(s), gen_shape(s)):\n yield _make_tensor(s), _make_tensor(t_s), make_var(v_s), dict(reduction=r)\n yield (\n _make_tensor(s), _make_tensor(t_s), make_var(v_s),\n dict(full=True, reduction=r)\n )\n yield (\n _make_tensor(s), _make_tensor(t_s), make_var(v_s),\n dict(eps=random.uniform(1e-6, 1e-3), reduction=r)\n )\n yield (\n _make_tensor(s), _make_tensor(t_s), make_var(v_s),\n dict(full=True, eps=random.uniform(1e-6, 1e-3), reduction=r)\n )\n\n for input, target, var, kwargs in gen_shape_kwargs():\n yield SampleInput(input, args=(target, var, ), kwargs=kwargs)\n\ndef _generate_sample_inputs_nn_loss(op_info, device, dtype, requires_grad, **kwargs):\n _make_tensor = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad)\n\n for s, r in _generate_sample_shape_reduction():\n yield _make_tensor(s), _make_tensor(s), dict(reduction=r)\n\ndef sample_inputs_hinge_embedding_loss(op_info, device, dtype, requires_grad, **kwargs):\n for input, target, d in _generate_sample_inputs_nn_loss(op_info, device, dtype, requires_grad, **kwargs):\n d['margin'] = random.uniform(-9, 9)\n yield SampleInput(input, args=(target, ), kwargs=d)\n\ndef sample_inputs_huber_loss(op_info, device, dtype, requires_grad, **kwargs):\n for input, target, d in _generate_sample_inputs_nn_loss(op_info, device, dtype, requires_grad, **kwargs):\n d['delta'] = random.uniform(1e-3, 9)\n yield SampleInput(input, args=(target, ), kwargs=d)\n\ndef sample_inputs_poisson_nll_loss(op_info, device, dtype, requires_grad, **kwargs):\n _make_tensor = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad)\n\n def gen_shape_kwargs():\n for s, r in _generate_sample_shape_reduction():\n for li in (True, False):\n for f in (True, False):\n i1 = _make_tensor(s)\n i2 = _make_tensor(s)\n # For Poisson NLL Loss,\n # target is assumed to be from\n # Poisson Distribution which\n # always has positive samples\n t1 = _make_tensor(s, low=0)\n t2 = _make_tensor(s, low=0)\n\n with torch.no_grad():\n if not li:\n i1.abs_()\n i2.abs_()\n t1.abs_()\n t2.abs_()\n\n yield (\n i1, t1,\n dict(log_input=li, full=f, reduction=r)\n )\n yield (\n i2, t2,\n dict(log_input=li, full=f,\n eps=random.uniform(1e-8, 1e-3),\n reduction=r)\n )\n\n for input, target, kwargs in gen_shape_kwargs():\n yield SampleInput(input, args=(target, ), kwargs=kwargs)\n\ndef sample_inputs_pairwise_distance(op_info, device, dtype, requires_grad, **kwargs):\n make = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad)\n\n shape = (3,)\n batched_shape = (2, *shape)\n shapes_and_kwargs = [\n (shape, None),\n (batched_shape, None),\n (shape, dict(keepdim=True)),\n (batched_shape, dict(keepdim=True)),\n (shape, dict(p=5.0)),\n (shape, dict(p=-1.0)),\n (shape, dict(eps=1.0)),\n ]\n\n return [\n SampleInput(make(shape), args=(make(shape),), kwargs=kwargs) for shape, kwargs in shapes_and_kwargs\n ]\n\ndef sample_inputs_pixel_shuffle(op_info, device, dtype, requires_grad, **kwargs):\n return [\n SampleInput(\n make_tensor((1, 9, 2, 2), device=device, dtype=dtype, requires_grad=requires_grad),\n kwargs=dict(upscale_factor=upscale_factor),\n )\n for upscale_factor in (1, 3)\n ]\n\ndef sample_inputs_pixel_unshuffle(op_info, device, dtype, requires_grad, **kwargs):\n return [\n SampleInput(\n make_tensor((1, 1, 6, 6), device=device, dtype=dtype, requires_grad=requires_grad),\n kwargs=dict(downscale_factor=downscale_factor),\n )\n for downscale_factor in (1, 3)\n ]\n\ndef sample_inputs_allclose(op_info, device, dtype, requires_grad, **kwargs):\n samples = []\n sample_shapes = [(), (S), (S, S, S)]\n atols = [1e-2, 1e-16]\n rtols = [1e-1, 0.5]\n eps = 1e-8\n for s, rtol, atol in product(sample_shapes, rtols, atols):\n # close sample\n t = make_tensor(s, device=device, dtype=dtype, requires_grad=requires_grad)\n close = (t + atol).detach().requires_grad_(requires_grad)\n close_sample = SampleInput(t, args=(close,), kwargs=dict(rtol=rtol, atol=atol))\n samples.append(close_sample)\n\n # random sample\n a = make_tensor(s, device=device, dtype=dtype, requires_grad=requires_grad)\n b = make_tensor(s, device=device, dtype=dtype, requires_grad=requires_grad)\n r_sample = SampleInput(a, args=(b,), kwargs=dict(rtol=rtol, atol=atol))\n samples.append(r_sample)\n\n return samples\n\n\ndef sample_inputs_kl_div(op_info, device, dtype, requires_grad, **kwargs):\n make = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad)\n\n shapes_and_reduction = [\n ((2,), \"mean\"),\n ((2, 3), \"mean\"),\n ((2, 3, 4), \"mean\"),\n ((2,), \"none\"),\n ((2,), \"batchmean\"),\n ((2,), \"sum\"),\n ]\n\n sample_inputs = []\n for (shape, reduction), log_target in itertools.product(shapes_and_reduction, (True, False)):\n # input should be log-probability, i.e. lie in (-inf, 0]\n input = make(shape, low=None, high=0)\n # target should be a probability by default, i.e. lie in [0, 1], and a log-probability if log_target is set,\n # i.e. lie in (-inf, 0]\n target = make(shape, low=None, high=0) if log_target else make(shape, low=0, high=1)\n sample_inputs.append(\n SampleInput(input, args=(target,), kwargs=dict(reduction=reduction, log_target=log_target))\n )\n return sample_inputs\n\ndef sample_inputs_diagflat(op_info, device, dtype, requires_grad, **kwargs):\n make_input = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad)\n\n return [\n SampleInput(make_input(())),\n SampleInput(make_input((2,))),\n SampleInput(make_input((2, 2))),\n SampleInput(make_input((2,)), kwargs=dict(offset=1)),\n SampleInput(make_input((2,)), kwargs=dict(offset=-1)),\n ]\n\n\nforeach_unary_op_db: List[OpInfo] = [\n ForeachFuncInfo('exp'),\n ForeachFuncInfo('acos'),\n ForeachFuncInfo('asin'),\n ForeachFuncInfo('atan'),\n ForeachFuncInfo('cos'),\n ForeachFuncInfo('cosh'),\n ForeachFuncInfo('log'),\n ForeachFuncInfo('log10'),\n ForeachFuncInfo('log2'),\n ForeachFuncInfo('tan'),\n ForeachFuncInfo('tanh'),\n ForeachFuncInfo('sin'),\n ForeachFuncInfo('sinh'),\n\n ForeachFuncInfo(\n 'neg',\n dtypes=all_types_and_complex(),\n dtypesIfCUDA=all_types_and_complex(),\n sample_inputs_func=sample_inputs_foreach,\n safe_casts_outputs=False,\n ),\n\n ForeachFuncInfo(\n 'sqrt',\n dtypes=floating_and_complex_types_and(torch.bfloat16),\n dtypesIfCUDA=floating_and_complex_types_and(torch.half),\n ),\n\n ForeachFuncInfo(\n 'ceil',\n dtypes=floating_types_and(torch.bfloat16),\n dtypesIfCUDA=floating_types_and(torch.half, torch.bfloat16),\n ),\n\n ForeachFuncInfo(\n 'erf',\n dtypes=floating_types_and(torch.bfloat16),\n dtypesIfCUDA=floating_types_and(torch.half, torch.bfloat16),\n ),\n\n ForeachFuncInfo(\n 'erfc',\n dtypes=floating_types_and(torch.bfloat16),\n dtypesIfCUDA=floating_types_and(torch.half, torch.bfloat16),\n ),\n\n ForeachFuncInfo(\n 'expm1',\n dtypes=floating_types_and(torch.bfloat16),\n dtypesIfCUDA=floating_types_and(torch.half, torch.bfloat16),\n ),\n\n ForeachFuncInfo(\n 'floor',\n dtypes=floating_types_and(torch.bfloat16),\n dtypesIfCUDA=floating_types_and(torch.half, torch.bfloat16),\n ),\n\n ForeachFuncInfo(\n 'log1p',\n dtypes=floating_types_and(torch.bfloat16),\n dtypesIfCUDA=floating_types_and(torch.half),\n ),\n\n ForeachFuncInfo(\n 'round',\n dtypes=floating_types_and(torch.bfloat16),\n dtypesIfCUDA=floating_types_and(torch.half, torch.bfloat16),\n ),\n\n ForeachFuncInfo(\n 'frac',\n dtypes=floating_types_and(torch.bfloat16),\n dtypesIfCUDA=floating_types_and(torch.half, torch.bfloat16),\n ),\n\n ForeachFuncInfo(\n 'reciprocal',\n dtypes=floating_types_and(torch.bfloat16),\n dtypesIfCUDA=floating_types_and(torch.half),\n ),\n\n ForeachFuncInfo(\n 'sigmoid',\n dtypes=floating_types_and(torch.bfloat16),\n dtypesIfCUDA=floating_types_and(torch.half),\n ),\n\n ForeachFuncInfo(\n 'trunc',\n dtypes=floating_types_and(torch.bfloat16),\n dtypesIfCUDA=floating_types_and(torch.half, torch.bfloat16),\n ),\n\n ForeachFuncInfo(\n 'abs',\n dtypes=all_types_and_complex_and(torch.bfloat16, torch.half),\n dtypesIfCUDA=all_types_and_complex_and(torch.bfloat16, torch.half, torch.bool),\n safe_casts_outputs=False,\n supports_forward_ad=True,\n supports_fwgrad_bwgrad=True,\n ),\n]\n\nforeach_binary_op_db: List[OpInfo] = [\n ForeachFuncInfo(\n \"add\",\n dtypes=all_types_and_complex_and(torch.bool, torch.bfloat16, torch.float16),\n dtypesIfCUDA=all_types_and_complex_and(torch.bool, torch.bfloat16, torch.float16),\n supports_alpha_param=True,\n ),\n ForeachFuncInfo(\n \"sub\",\n dtypes=all_types_and_complex_and(torch.bool, torch.bfloat16, torch.float16),\n dtypesIfCUDA=all_types_and_complex_and(torch.bool, torch.bfloat16, torch.float16),\n supports_alpha_param=True,\n ),\n ForeachFuncInfo(\n \"mul\",\n dtypes=all_types_and_complex_and(torch.bool, torch.bfloat16, torch.float16),\n dtypesIfCUDA=all_types_and_complex_and(torch.bool, torch.bfloat16, torch.float16),\n ),\n ForeachFuncInfo(\n \"div\",\n dtypes=all_types_and_complex_and(torch.bool, torch.bfloat16, torch.float16),\n dtypesIfCUDA=all_types_and_complex_and(torch.bool, torch.bfloat16, torch.float16),\n ),\n]\n\nforeach_pointwise_op_db: List[ForeachFuncInfo] = [\n ForeachFuncInfo(\n \"addcmul\",\n dtypes=all_types_and_complex(),\n dtypesIfCUDA=all_types_and_complex_and(torch.half, torch.bfloat16),\n ),\n ForeachFuncInfo(\n \"addcdiv\",\n dtypes=all_types_and_complex(),\n dtypesIfCUDA=all_types_and_complex_and(torch.half, torch.bfloat16),\n ),\n]\n\nforeach_minmax_op_db: List[ForeachFuncInfo] = [\n ForeachFuncInfo(\n \"maximum\",\n dtypes=all_types_and(torch.float16, torch.bfloat16, torch.bool),\n dtypesIfCUDA=all_types_and(torch.float16, torch.bool),\n ),\n ForeachFuncInfo(\n \"minimum\",\n dtypes=all_types_and(torch.float16, torch.bfloat16, torch.bool),\n dtypesIfCUDA=all_types_and(torch.float16, torch.bool),\n ),\n]\n\nforeach_reduce_op_db: List[ForeachFuncInfo] = [\n ForeachFuncInfo(\n \"norm\",\n dtypesIfCPU=floating_and_complex_types_and(torch.float16, torch.bfloat16),\n dtypesIfCUDA=floating_and_complex_types_and(torch.float16, torch.bfloat16),\n ),\n]\n\ndef reference_sign(x):\n if x.dtype == np.bool_:\n # `np.sign` doesn't support `bool`.\n # >>> np.sign(True)\n # ufunc 'sign' did not contain a loop\n # with signature matching types dtype('bool') -> dtype('bool')\n return np.sign(x, dtype=np.uint8).astype(np.bool_)\n return np.sign(x)\n\n\ndef reference_sgn(x):\n # NumPy doesn't have an equivalent to `torch.sgn` when the dtype is complex.\n # For complex inputs, `np.sign` returns sign(x.real) + 0j if x.real != 0 else sign(x.imag) + 0j.\n # while `torch.sgn` returns, 0 if abs(input) == 0 else input/abs(input)\n if x.dtype not in [np.complex64, np.complex128]:\n return reference_sign(x)\n\n out = (x / np.abs(x))\n if out.ndim == 0:\n # Handle x == 0 case\n if (x == 0):\n # Can't assign to np.complex object\n # So make a new one.\n return np.array(complex(0, 0), dtype=x.dtype)\n return out\n\n # Handle x == 0 case\n mask = (x == 0)\n out[mask] = complex(0, 0)\n return out\n\n\ndef reference_sigmoid(x):\n # 'scipy.special.expit' not supported for the input types\n if x.dtype in [np.complex64, np.complex128]:\n return (1 / (1 + np.exp(-x)))\n return scipy.special.expit(x)\n\n\ndef reference_logsigmoid(x):\n return np.where(\n x < 0,\n x - np.log1p(np.exp(x)),\n -np.log1p(np.exp(-x)))\n\n\ndef reference_hardsigmoid(x):\n intermediate = x / 6 + 0.5\n y = np.clip(intermediate, 0, None)\n return np.where(y > 1, 1, y).astype(x.dtype)\n\n\ndef reference_lgamma(x):\n # scipy.special.gammaln returns `-inf` when input is `-inf`.\n # While Pytorch, C and C++, all return `inf` when input is `-inf`.\n # Reference:\n # https://en.cppreference.com/w/cpp/numeric/math/lgamma\n # https://en.cppreference.com/w/c/numeric/math/lgamma\n\n # To handle the above discrepancy,\n # we replace -inf with inf so values\n # that were originally -inf map to inf as expected\n if x.dtype.kind == 'f':\n x = np.where(x == float('-inf'), np.array(float('inf'), dtype=x.dtype), x)\n\n out = scipy.special.gammaln(x)\n\n if x.dtype == np.float16:\n # `scipy.special.gammaln` returns output of float32 when input is float16,\n # while `torch.lgamma` preserves `float16`. But due to smaller range of float16,\n # Pytorch version outputs `inf` while SciPy returns finite values.\n out = out.astype(np.float16)\n\n return out\n\ndef reference_polygamma(x, n):\n # WEIRD `scipy.special.polygamma` behavior\n # >>> scipy.special.polygamma(0, np.array(501, dtype=np.float32)).dtype\n # dtype('float64')\n # >>> scipy.special.polygamma(0, np.array([501], dtype=np.float32)).dtype\n # dtype('float32')\n #\n # Thus we cast output to the default torch dtype.\n np_dtype = torch_to_numpy_dtype_dict[torch.get_default_dtype()]\n return scipy.special.polygamma(n, x).astype(np_dtype)\n\n\ndef reference_mvlgamma(x, d):\n if x.dtype == np.float16:\n return scipy.special.multigammaln(x, d).astype(np.float16)\n\n return scipy.special.multigammaln(x, d)\n\ndef reference_softplus(input, beta=1, threshold=20):\n non_linear = input * beta <= threshold\n output = input.copy()\n output[non_linear] = np.log(1 + np.exp(beta * input[non_linear])) / beta\n return output\n\n\ndef reference_one_hot(a: np.ndarray, num_classes: int = -1) -> np.ndarray:\n if num_classes == -1:\n num_classes = int(np.amax(a) + 1)\n\n idcs = a.reshape(-1) + np.arange(0, a.size, dtype=np.int64) * num_classes\n one_hot = np.zeros((a.size, num_classes), dtype=a.dtype)\n np.put(one_hot, idcs, 1)\n return one_hot.reshape(*a.shape, -1)\n\n\ndef reference_mse_loss(input, target, reduction=\"mean\"):\n se = (input - target) ** 2\n if reduction == \"mean\":\n return np.mean(se)\n elif reduction == \"sum\":\n return np.sum(se)\n else: # reduction == \"none\"\n return se\n\n\ndef wrapper_set_seed(op, *args, **kwargs):\n \"\"\"Wrapper to set seed manually for some functions like dropout\n See: https://github.com/pytorch/pytorch/pull/62315#issuecomment-896143189 for more details.\n \"\"\"\n with freeze_rng_state():\n torch.manual_seed(42)\n return op(*args, **kwargs)\n\n\ndef reference_layer_norm(inp: np.ndarray, normalized_shape: Tuple[int], weight=None, bias=None, eps=1e-5):\n feature_size = np.prod(normalized_shape)\n inp_view = inp.reshape(-1, feature_size) # type: ignore[call-overload]\n mean = inp_view.mean(axis=-1, keepdims=True)\n var = inp_view.var(axis=-1, ddof=0, keepdims=True)\n Y = (inp_view - mean) / np.sqrt(var + eps)\n if weight is None and bias is not None:\n Y = Y + bias.reshape(-1)\n elif weight is not None and bias is None:\n Y = Y * weight.reshape(-1)\n elif weight is not None and bias is not None:\n Y = Y * weight.reshape(-1) + bias.reshape(-1)\n return Y.reshape(*inp.shape)\n\ndef reference_group_norm(inp: np.ndarray, num_groups: int, weight=None, bias=None, eps=1e-5):\n inp_view = inp\n if np.prod(inp.shape) != 0:\n inp_view = inp.reshape((inp.shape[0], num_groups, -1))\n mean = inp_view.mean(axis=-1, keepdims=True)\n var = inp_view.var(axis=-1, ddof=0, keepdims=True)\n Y = (inp_view - mean) / np.sqrt(var + eps)\n Y = Y.reshape(inp.shape)\n if weight is not None:\n # weight is a vector of length equal to the channel\n if len(Y.shape) > 2:\n weight = np.tile(np.expand_dims(weight, 1), [1] + list(inp.shape[2:]))\n Y = Y * weight\n if bias is not None:\n # bias is a vector of length equal to the channel\n if len(Y.shape) > 2:\n bias = np.tile(np.expand_dims(bias, 1), [1] + list(inp.shape[2:]))\n Y = Y + bias\n return Y\n\n\n# using a custom reference function since numpy only has a string side arg (instead of right and side) and doesn't\n# have an out_int32 arg. Additionally, numpy doesn't support searchsorted with ND arrays, so this splits those into\n# stacked 1D cases\ndef reference_searchsorted(sorted_sequence, boundary, out_int32=False, right=False, side='left', sorter=None):\n side = 'right' if (right or side == 'right') else 'left'\n if len(sorted_sequence.shape) == 1 :\n ret = np.searchsorted(sorted_sequence, boundary, side=side, sorter=sorter)\n return ret.astype(np.int32) if out_int32 else ret\n elif sorted_sequence.shape[0] == 0:\n if sorter is not None:\n sorter = sorter.flatten()\n ret = np.searchsorted(sorted_sequence.flatten(), boundary.flatten(), side=side, sorter=sorter)\n ret = ret.astype(np.int32) if out_int32 else ret\n return ret.reshape(boundary.shape)\n else:\n # numpy searchsorted only supports 1D inputs so we split up ND inputs\n orig_shape = boundary.shape\n num_splits = np.prod(sorted_sequence.shape[:-1])\n splits = range(0, num_splits)\n sorted_sequence, boundary = sorted_sequence.reshape(num_splits, -1), boundary.reshape(num_splits, -1)\n if sorter is not None:\n sorter = sorter.reshape(num_splits, -1)\n\n split_sequence = [sorted_sequence[i] for i in splits]\n split_boundary = [boundary[i] for i in splits]\n split_sorter = [sorter[i] if (sorter is not None) else None for i in splits]\n\n split_ret = [np.searchsorted(s_seq, b, side=side, sorter=s_sort)\n for (s_seq, b, s_sort) in zip(split_sequence, split_boundary, split_sorter)]\n split_ret = [i.astype(np.int32) for i in split_ret] if out_int32 else split_ret\n return np.stack(split_ret).reshape(orig_shape)\n\n\ndef gradcheck_wrapper_hermitian_input(op, input, *args, **kwargs):\n \"\"\"Gradcheck wrapper for functions that take Hermitian matrices as input.\n\n They require a modified function because the finite-difference algorithm\n for calculating derivatives does not preserve the Hermitian property of the input.\n \"\"\"\n return op(input + input.mH, *args, **kwargs)\n\n\ndef gradcheck_wrapper_triangular_input(op, *args, upper=False, idx=0, **kwargs):\n \"\"\"Gradcheck wrpper for functions that take lower or upper triangular matrices as input.\n\n They require a modified function because the finite-difference algorithm\n for calculating derivatives does not preserve the triangular property of the input.\n `idx` is used to specific which `args[idx]` is to be triangularized.\n \"\"\"\n triangular_arg = args[idx].triu() if upper else args[idx].tril()\n return op(*args[:idx], triangular_arg, *args[idx + 1:], upper, **kwargs)\n\n\ndef gradcheck_wrapper_masked_operation(op, input, *args, **kwargs):\n \"\"\"Gradcheck wrapper for masked operations.\n\n When mask is specified, replaces masked-out elements with zeros.\n\n Use for operations that produce non-finite masked-out elements,\n for instance, for minimum and maximum reductions.\n \"\"\"\n output = op(input, *args, **kwargs)\n mask = kwargs.get('mask')\n if mask is not None:\n output_mask = torch._masked._output_mask(op, input, *args, **kwargs)\n output = torch.where(output_mask, output, output.new_zeros([]))\n return output\n\n\ndef reference_reduction_numpy(f, supports_keepdims=True):\n \"\"\"Wraps a NumPy reduction operator.\n\n The wrapper function will forward dim, keepdim, mask, and identity\n kwargs to the wrapped function as the NumPy equivalent axis,\n keepdims, where, and initiak kwargs, respectively.\n\n Args:\n f: NumPy reduction operator to wrap\n supports_keepdims (bool, optional): Whether the NumPy operator accepts\n keepdims parameter. If it does not, the wrapper will manually unsqueeze\n the reduced dimensions if it was called with keepdim=True. Defaults to True.\n\n Returns:\n Wrapped function\n\n \"\"\"\n @wraps(f)\n def wrapper(x: np.ndarray, *args, **kwargs):\n # Copy keys into a set\n keys = set(kwargs.keys())\n\n dim = kwargs.pop('dim', None)\n keepdim = kwargs.pop('keepdim', False)\n\n if 'dim' in keys:\n dim = tuple(dim) if isinstance(dim, Sequence) else dim\n\n # NumPy reductions don't accept dim=0 for scalar inputs\n # so we convert it to None if and only if dim is equivalent\n if x.ndim == 0 and dim in {0, -1, (0,), (-1,)}:\n kwargs['axis'] = None\n else:\n kwargs['axis'] = dim\n\n if 'keepdim' in keys and supports_keepdims:\n kwargs['keepdims'] = keepdim\n\n if 'mask' in keys:\n mask = kwargs.pop('mask')\n if mask is not None:\n kwargs['where'] = mask.cpu().numpy()\n\n if 'identity' in keys:\n identity = kwargs.pop('identity')\n if identity is not None:\n if identity.dtype is torch.bfloat16:\n identity = identity.cpu().to(torch.float32)\n else:\n identity = identity.cpu()\n kwargs['initial'] = identity.numpy()\n\n if 'unbiased' in keys:\n unbiased = kwargs.pop('unbiased')\n if unbiased is not None:\n kwargs['ddof'] = int(unbiased)\n\n result = f(x, *args, **kwargs)\n\n # Unsqueeze reduced dimensions if NumPy does not support keepdims\n if keepdim and not supports_keepdims and x.ndim > 0:\n dim = list(range(x.ndim)) if dim is None else dim\n result = np.expand_dims(result, dim)\n\n return result\n\n return wrapper\n\n\ndef reference_std_var(f):\n \"\"\"Forwards unbiased/correction kwargs as NumPy's equivalent ddof\"\"\"\n g = reference_reduction_numpy(f)\n\n @wraps(g)\n def wrapper(x: np.ndarray, *args, **kwargs):\n assert not ('unbiased' in kwargs and 'correction' in kwargs)\n\n if 'unbiased' in kwargs:\n kwargs['ddof'] = int(kwargs.pop('unbiased'))\n elif 'correction' in kwargs:\n kwargs['ddof'] = kwargs.pop('correction')\n\n return g(x, *args, **kwargs)\n\n return wrapper\n\n\ndef generate_std_var_kwargs(t: torch.Tensor, **kwargs):\n \"\"\"Generates unbiased/correction kwargs for std/var operators\"\"\"\n yield ((), {'unbiased': True})\n yield ((), {'unbiased': False})\n\n # Currently, calling std with correction is only enabled when\n # both dim and keepdim are provided.\n if 'dim' in kwargs and 'keepdim' in kwargs:\n yield ((), {'correction': 0})\n yield ((), {'correction': 1})\n\n numel = torch.tensor(t.shape)[kwargs.get('dim')].prod()\n yield ((), {'correction': numel // 2})\n\ndef ref_pairwise_distance(input1, input2):\n pass\n\n\n# Operator database (sorted alphabetically)\nop_db: List[OpInfo] = [\n UnaryUfuncInfo('abs',\n aliases=('absolute', ),\n ref=np.abs,\n dtypes=all_types_and_complex_and(torch.half, torch.bfloat16),\n dtypesIfCUDA=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16),\n skips=(\n DecorateInfo(unittest.skip(\"Skipped!\"), 'TestUnaryUfuncs', 'test_reference_numerics_extremal',\n device_type='cpu', dtypes=[torch.cfloat, torch.cdouble]),\n DecorateInfo(unittest.skip(\"Skipped!\"), 'TestUnaryUfuncs', 'test_reference_numerics_hard',\n device_type='cpu', dtypes=[torch.cfloat]),\n # Reference: https://github.com/pytorch/pytorch/issues/49224\n DecorateInfo(unittest.skip(\"Skipped!\"), 'TestUnaryUfuncs', 'test_reference_numerics_normal',\n dtypes=[torch.int8], active_if=TEST_WITH_ASAN),\n # TODO: Fix test_out_arg_all_dtypes as torch.empty_like(expected_output) where expected_output=op(input)\n # We can break the logic of the loop over all possible types but it is OK.\n # https://github.com/pytorch/pytorch/blob/master/test/test_unary_ufuncs.py#L440-L449\n DecorateInfo(unittest.skip(\"Skipped!\"), 'TestUnaryUfuncs', 'test_out_arg_all_dtypes',\n dtypes=[torch.cfloat, torch.cdouble]),\n # The complex formula might be wrong\n DecorateInfo(unittest.skip(\"Skipped!\"), 'TestGradients', 'test_forward_mode_AD',\n dtypes=complex_types()),\n # Forward-over-reverse gradgrad might be wrong for complex (see above):\n DecorateInfo(unittest.expectedFailure, 'TestGradients', 'test_fn_fwgrad_bwgrad',\n dtypes=complex_types()),\n ),\n supports_inplace_autograd=False,\n supports_fwgrad_bwgrad=True,\n assert_autodiffed=True,\n supports_sparse_csr=True,\n supports_forward_ad=True),\n # NOTE: CPU complex acos produces incorrect outputs (https://github.com/pytorch/pytorch/issues/42952)\n UnaryUfuncInfo('acos',\n aliases=('arccos', ),\n ref=np.arccos,\n domain=(-1, 1),\n handles_complex_extremals=False,\n dtypes=all_types_and_complex_and(torch.bool, torch.bfloat16),\n dtypesIfCUDA=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16),\n # \"rsqrt_cpu\" not implemented for 'BFloat16'\n backward_dtypesIfCPU=all_types_and_complex_and(torch.bool, torch.bfloat16),\n assert_autodiffed=True,\n supports_forward_ad=True,\n supports_fwgrad_bwgrad=True,\n decorators=(precisionOverride({torch.float16: 1e-2,\n torch.bfloat16: 1e-1,\n torch.complex64: 1e-2}),),\n safe_casts_outputs=True,\n skips=(\n DecorateInfo(unittest.skip(\"Skipped!\"), 'TestUnaryUfuncs', 'test_reference_numerics_hard',\n device_type='cpu', dtypes=[torch.cfloat, torch.cdouble]),\n DecorateInfo(unittest.skip(\"Skipped!\"), 'TestGradients', 'test_fn_grad',\n dtypes=[torch.cdouble], active_if=IS_WINDOWS),\n DecorateInfo(unittest.skip(\"Skipped!\"), 'TestGradients', 'test_method_grad',\n dtypes=[torch.cdouble], active_if=IS_WINDOWS),\n DecorateInfo(unittest.skip(\"Skipped!\"), 'TestGradients', 'test_inplace_grad',\n dtypes=[torch.cdouble], active_if=IS_WINDOWS),\n DecorateInfo(unittest.skip(\"Skipped!\"), 'TestGradients', 'test_forward_mode_AD',\n dtypes=[torch.cdouble], active_if=IS_WINDOWS),\n DecorateInfo(unittest.skip(\"Skipped!\"), 'TestGradients', 'test_inplace_forward_mode_AD',\n dtypes=[torch.cdouble], active_if=IS_WINDOWS),\n )),\n # NOTE: the derivative for inplace acosh is not implemented\n UnaryUfuncInfo('acosh',\n aliases=('arccosh', ),\n ref=np.arccosh,\n domain=(1, None),\n dtypes=all_types_and_complex_and(torch.bool, torch.bfloat16),\n dtypesIfCUDA=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16),\n # \"rsqrt_cuda\" not implemented for 'BFloat16'\n backward_dtypesIfCUDA=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16),\n safe_casts_outputs=True,\n decorators=(precisionOverride({torch.bfloat16: 5e-2}),),\n supports_inplace_autograd=False,\n supports_forward_ad=True,\n supports_fwgrad_bwgrad=True,\n skips=(\n DecorateInfo(unittest.skip(\"Skipped!\"), 'TestUnaryUfuncs', 'test_reference_numerics_extremal',\n device_type='cpu', dtypes=[torch.cfloat, torch.cdouble]),\n DecorateInfo(unittest.skip(\"Skipped!\"), 'TestUnaryUfuncs', 'test_reference_numerics_hard',\n device_type='cpu', dtypes=[torch.cfloat, torch.cdouble]),\n DecorateInfo(unittest.skip(\"Skipped!\"), 'TestUnaryUfuncs', 'test_reference_numerics_extremal',\n device_type='cuda', dtypes=[torch.cdouble],\n active_if=IS_WINDOWS),\n DecorateInfo(unittest.skip(\"Skipped!\"), 'TestUnaryUfuncs', 'test_reference_numerics_hard',\n device_type='cuda', dtypes=[torch.cdouble],\n active_if=IS_WINDOWS),\n # Reference: https://github.com/pytorch/pytorch/issues/50692\n DecorateInfo(unittest.skip(\"Skipped!\"), 'TestGradients', 'test_fn_grad',\n device_type='cuda', dtypes=[torch.cdouble], active_if=IS_WINDOWS),\n DecorateInfo(unittest.skip(\"Skipped!\"), 'TestGradients', 'test_method_grad',\n device_type='cuda', dtypes=[torch.cdouble], active_if=IS_WINDOWS),\n DecorateInfo(unittest.skip(\"Skipped!\"), 'TestGradients', 'test_forward_mode_AD',\n device_type='cuda', dtypes=[torch.cdouble], active_if=IS_WINDOWS),\n ),\n # acosh is not defined at x < 1 (real) or |z| < 1 (complex)\n reference_numerics_filter=NumericsFilter(\n condition=lambda x: (torch.abs(x) < 1 if x.is_complex() else x < 1),\n safe_val=2)),\n BinaryUfuncInfo('add',\n # NumPy has no builtin reference for the alpha kwarg, but it is easy enough to emulate\n ref=lambda input, other, *, alpha=1: np.add(input, other) if alpha == 1 \\\n else np.add(input, np.multiply(alpha, other)),\n dtypes=all_types_and_complex_and(torch.bool, torch.bfloat16, torch.float16),\n assert_autodiffed=True,\n sample_inputs_func=partial(sample_inputs_add_sub, alpha=2),\n supports_inplace_autograd=False,\n supports_fwgrad_bwgrad=True,\n supports_forward_ad=True,\n skips=(\n DecorateInfo(unittest.skip(\"Skipped!\"),\n 'TestBinaryUfuncs',\n 'test_reference_numerics_extremal_values',\n dtypes=(torch.complex64, torch.complex128)),\n )),\n BinaryUfuncInfo('mul',\n aliases=('multiply',),\n dtypes=all_types_and_complex_and(torch.float16, torch.bfloat16, torch.bool),\n assert_autodiffed=True,\n supports_forward_ad=True,\n supports_fwgrad_bwgrad=True,\n sample_inputs_func=partial(sample_inputs_binary_pwise, python_scalars=True)),\n BinaryUfuncInfo('sub',\n # NumPy has no builtin reference for the alpha kwarg, but it is easy enough to emulate\n ref=lambda input, other, *, alpha=1: np.subtract(input, np.multiply(alpha, other)),\n aliases=('subtract',),\n dtypes=all_types_and_complex_and(torch.bfloat16, torch.float16),\n assert_autodiffed=True,\n supports_forward_ad=True,\n supports_fwgrad_bwgrad=True,\n sample_inputs_func=partial(sample_inputs_add_sub, alpha=2, python_scalars=True),\n supports_inplace_autograd=False,\n decorators=(\n DecorateInfo(\n toleranceOverride({torch.float16: tol(atol=1e-2, rtol=0)}),\n 'TestBinaryUfuncs', 'test_reference_numerics'),\n ),\n skips=(\n DecorateInfo(unittest.skip(\"Skipped!\"),\n 'TestBinaryUfuncs',\n 'test_reference_numerics',\n dtypes=(torch.uint8,)),\n DecorateInfo(unittest.skip(\"Skipped!\"),\n 'TestBinaryUfuncs',\n 'test_reference_numerics_small_values',\n dtypes=(torch.uint8,)),\n )),\n OpInfo('addmm',\n # This addmm OpInfo is for when alpha and beta are not both equal to 1.\n # alpha=beta=1 is tested in the following opinfo, because that special case will\n # trigger addmm being decomposed by a jit pass.\n dtypes=all_types_and_complex_and(torch.float16, torch.bfloat16),\n dtypesIfROCM=floating_and_complex_types_and(torch.float16, torch.bfloat16),\n dtypesIfCUDA=floating_and_complex_types_and(torch.float16, *[torch.bfloat16] if CUDA11OrLater else []),\n assert_autodiffed=True,\n supports_inplace_autograd=False,\n supports_forward_ad=True,\n supports_fwgrad_bwgrad=True,\n gradcheck_nondet_tol=GRADCHECK_NONDET_TOL,\n sample_inputs_func=sample_inputs_addmm),\n OpInfo('addmm',\n # When alpha=beta=1 as compile-time constants, JIT will decompose addmm into mm and add.\n variant_test_name='decomposed',\n dtypes=all_types_and_complex_and(torch.float16, torch.bfloat16),\n dtypesIfROCM=floating_and_complex_types_and(torch.float16, torch.bfloat16),\n dtypesIfCUDA=floating_and_complex_types_and(torch.float16, *[torch.bfloat16] if CUDA11OrLater else []),\n assert_autodiffed=True,\n supports_inplace_autograd=False,\n supports_forward_ad=True,\n supports_fwgrad_bwgrad=True,\n gradcheck_nondet_tol=GRADCHECK_NONDET_TOL,\n autodiff_nonfusible_nodes=['aten::add', 'aten::mm'],\n sample_inputs_func=partial(sample_inputs_addmm, alpha=1, beta=1)),\n OpInfo('addmv',\n dtypes=all_types_and_complex_and(torch.bfloat16),\n dtypesIfCUDA=floating_types_and(torch.float16, torch.complex64, torch.complex128,\n *[torch.bfloat16] if CUDA11OrLater else []),\n dtypesIfROCM=floating_types_and(torch.half),\n supports_inplace_autograd=False,\n supports_forward_ad=True,\n supports_fwgrad_bwgrad=True,\n sample_inputs_func=sample_inputs_addmv),\n OpInfo('addbmm',\n ref=lambda M, batch1, batch2, beta=1, alpha=1: np.add(np.multiply(np.asarray(beta, dtype=M.dtype), M),\n np.multiply(np.asarray(alpha, dtype=batch1.dtype),\n np.sum(np.matmul(batch1, batch2), axis=0))),\n dtypes=all_types_and_complex_and(torch.float16, torch.bfloat16),\n dtypesIfCUDA=floating_and_complex_types_and(torch.float16, *[torch.bfloat16] if CUDA11OrLater else []),\n backward_dtypesIfCUDA=floating_and_complex_types_and(torch.float16, *[torch.bfloat16] if SM53OrLater else []),\n dtypesIfROCM=floating_types_and(torch.half),\n backward_dtypesIfROCM=floating_types_and(torch.half),\n supports_forward_ad=True,\n supports_fwgrad_bwgrad=True,\n decorators=[\n DecorateInfo(\n toleranceOverride({torch.float32: tol(atol=1.3e-05, rtol=1.3e-05),\n torch.complex64: tol(atol=1e-05, rtol=1.2e-03)}),\n 'TestCommon', 'test_reference_testing')],\n skips=(\n # FIXME: bfloat16 backward support likely depends on CUDA11+\n # and SM53+\n DecorateInfo(unittest.skip(\"Skipped!\"), 'TestCommon', 'test_dtypes', active_if=IS_WINDOWS),\n # addbmm does not correctly warn when resizing out= inputs\n DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_out_warning'),\n # https://github.com/pytorch/pytorch/issues/55907\n DecorateInfo(unittest.skip(\"Skipped!\"), 'TestCommon', 'test_variant_consistency_eager'),\n ),\n sample_inputs_func=sample_inputs_addbmm),\n OpInfo('baddbmm',\n dtypes=all_types_and_complex_and(torch.float16, torch.bfloat16),\n dtypesIfCUDA=floating_types_and(torch.float16, torch.complex64, torch.complex128,\n *[torch.bfloat16] if CUDA11OrLater else []),\n backward_dtypesIfCUDA=floating_types_and(torch.float16,\n *[torch.bfloat16] if SM53OrLater else [],\n torch.complex64, torch.complex128),\n supports_forward_ad=True,\n supports_fwgrad_bwgrad=True,\n decorators=[\n DecorateInfo(\n toleranceOverride({torch.complex64: tol(atol=1e-05, rtol=1.2e-03)}),\n 'TestCommon', 'test_variant_consistency_eager', device_type='cuda'),\n DecorateInfo(\n toleranceOverride({torch.complex64: tol(atol=1e-05, rtol=1.2e-03)}),\n 'TestMathBits', 'test_conj_view', device_type='cuda')],\n sample_inputs_func=sample_inputs_baddbmm),\n OpInfo('dot',\n dtypes=all_types_and_complex_and(torch.float16, torch.bfloat16),\n dtypesIfCUDA=floating_and_complex_types_and(torch.float16, *[torch.bfloat16] if CUDA11OrLater else []),\n assert_autodiffed=True,\n sample_inputs_func=sample_inputs_dot_vdot,\n supports_forward_ad=True,\n supports_fwgrad_bwgrad=True,\n ),\n OpInfo('vdot',\n dtypes=all_types_and_complex_and(torch.float16, torch.bfloat16),\n dtypesIfCUDA=floating_and_complex_types_and(torch.float16, *[torch.bfloat16] if CUDA11OrLater else []),\n sample_inputs_func=sample_inputs_dot_vdot,\n supports_forward_ad=True,\n supports_fwgrad_bwgrad=True,\n ),\n OpInfo('bmm',\n dtypes=all_types_and_complex_and(torch.bfloat16, torch.float16),\n dtypesIfCUDA=floating_and_complex_types_and(torch.float16, *[torch.bfloat16] if CUDA11OrLater else []),\n backward_dtypesIfCUDA=floating_and_complex_types_and(torch.float16, *[torch.bfloat16] if SM53OrLater else []),\n assert_autodiffed=True,\n supports_forward_ad=True,\n supports_fwgrad_bwgrad=True,\n skips=(\n # FIXME: bfloat16 backward support likely depends on CUDA11+\n # and SM53+\n DecorateInfo(unittest.skip(\"Skipped!\"), 'TestCommon', 'test_dtypes', active_if=IS_WINDOWS),\n ),\n sample_inputs_func=sample_inputs_bmm),\n OpInfo('mv',\n dtypes=all_types_and_complex_and(torch.bfloat16),\n dtypesIfCUDA=floating_and_complex_types_and(torch.float16, *[torch.bfloat16] if CUDA11OrLater else []),\n assert_autodiffed=True,\n supports_forward_ad=True,\n supports_fwgrad_bwgrad=True,\n sample_inputs_func=sample_inputs_mv),\n OpInfo('addr',\n dtypes=all_types_and_complex_and(torch.bool, torch.bfloat16, torch.float16),\n backward_dtypes=all_types_and_complex_and(torch.bool, torch.bfloat16),\n backward_dtypesIfCUDA=all_types_and_complex_and(torch.bool, torch.float16, *[torch.bfloat16] if CUDA11OrLater else []),\n # Reference: https://github.com/pytorch/pytorch/issues/50747\n supports_inplace_autograd=False,\n supports_forward_ad=True,\n supports_fwgrad_bwgrad=True,\n skips=(\n # Reference: https://github.com/pytorch/pytorch/issues/50747\n DecorateInfo(unittest.skip(\"Skipped!\"), 'TestCommon', 'test_variant_consistency_eager',\n dtypes=all_types_and_complex_and(torch.bool, torch.bfloat16, torch.float16)),\n ),\n sample_inputs_func=sample_inputs_addr,\n gradcheck_nondet_tol=GRADCHECK_NONDET_TOL),\n OpInfo('addcmul',\n dtypes=all_types_and_complex_and(torch.bfloat16),\n dtypesIfCUDA=all_types_and_complex_and(torch.float16, torch.bfloat16),\n assert_autodiffed=True,\n supports_forward_ad=True,\n supports_fwgrad_bwgrad=True,\n supports_inplace_autograd=False,\n skips=(\n # TODO: update sample inputs with for_inplace_variant kwarg to support this test\n DecorateInfo(unittest.skip(\"Skipped!\"), 'TestCommon', 'test_variant_consistency_eager'),),\n sample_inputs_func=sample_inputs_addcmul_addcdiv),\n OpInfo('addcdiv',\n dtypes=floating_and_complex_types_and(torch.bfloat16),\n dtypesIfCUDA=floating_and_complex_types_and(torch.float16, torch.bfloat16),\n supports_inplace_autograd=False,\n supports_forward_ad=True,\n supports_fwgrad_bwgrad=True,\n skips=(\n # TODO: update sample inputs with for_inplace_variant kwarg to support this test\n DecorateInfo(unittest.skip(\"Skipped!\"),\n 'TestCommon',\n 'test_variant_consistency_eager'),),\n sample_inputs_func=sample_inputs_addcmul_addcdiv),\n UnaryUfuncInfo('asin',\n aliases=('arcsin', ),\n ref=np.arcsin,\n domain=(-1, 1),\n supports_sparse=True,\n supports_sparse_csr=True,\n supports_forward_ad=True,\n supports_fwgrad_bwgrad=True,\n safe_casts_outputs=True,\n dtypes=all_types_and_complex_and(torch.bool, torch.bfloat16),\n dtypesIfCUDA=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16),\n assert_autodiffed=True,\n decorators=[\n DecorateInfo(\n toleranceOverride({torch.float16: tol(atol=1e-05, rtol=1e-03)}),\n 'TestUnaryUfuncs', device_type='cuda'),\n precisionOverride({torch.bfloat16: 1e-2}),\n ],\n skips=(\n DecorateInfo(unittest.skip(\"Skipped!\"), 'TestUnaryUfuncs', 'test_reference_numerics_extremal',\n device_type='cpu', dtypes=[torch.cfloat, torch.cdouble]),\n DecorateInfo(unittest.skip(\"Skipped!\"), 'TestUnaryUfuncs', 'test_reference_numerics_hard',\n device_type='cpu', dtypes=[torch.cfloat, torch.cdouble]),\n DecorateInfo(unittest.skip(\"Skipped!\"), 'TestUnaryUfuncs', 'test_reference_numerics_extremal',\n device_type='cuda', dtypes=[torch.cdouble],\n active_if=IS_WINDOWS),\n DecorateInfo(unittest.skip(\"Skipped!\"), 'TestUnaryUfuncs', 'test_reference_numerics_hard',\n device_type='cuda', dtypes=[torch.cdouble],\n active_if=IS_WINDOWS),\n DecorateInfo(unittest.skip(\"Skipped! sparse backward not supported\"),\n 'TestSparseUnaryUfuncs', 'test_sparse_fn_grad'),\n )),\n # NOTE: derivative for inplace asinh is not implemented\n UnaryUfuncInfo('asinh',\n aliases=('arcsinh', ),\n ref=np.arcsinh,\n dtypes=all_types_and_complex_and(torch.bool, torch.bfloat16),\n dtypesIfCUDA=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16),\n safe_casts_outputs=True,\n decorators=(precisionOverride({torch.bfloat16: 5e-2}),),\n supports_inplace_autograd=False,\n supports_forward_ad=True,\n supports_fwgrad_bwgrad=True,\n supports_sparse=True,\n supports_sparse_csr=True,\n skips=(\n DecorateInfo(unittest.skip(\"Skipped!\"), 'TestUnaryUfuncs', 'test_reference_numerics_extremal',\n device_type='cpu', dtypes=[torch.cfloat, torch.cdouble]),\n DecorateInfo(unittest.skip(\"Skipped!\"), 'TestUnaryUfuncs', 'test_reference_numerics_hard',\n device_type='cpu', dtypes=[torch.cfloat, torch.cdouble]),\n DecorateInfo(unittest.skip(\"Skipped!\"), 'TestUnaryUfuncs', 'test_reference_numerics_normal',\n device_type='cpu', dtypes=[torch.cfloat, torch.cdouble]),\n DecorateInfo(unittest.skip(\"Skipped!\"), 'TestUnaryUfuncs', 'test_reference_numerics_extremal',\n device_type='cuda', dtypes=[torch.cdouble],\n active_if=IS_WINDOWS),\n DecorateInfo(unittest.skip(\"Skipped!\"), 'TestUnaryUfuncs', 'test_reference_numerics_hard',\n device_type='cuda', dtypes=[torch.cdouble],\n active_if=IS_WINDOWS),\n DecorateInfo(unittest.skip(\"Skipped! sparse backward not supported\"),\n 'TestSparseUnaryUfuncs', 'test_sparse_fn_grad'),\n )),\n UnaryUfuncInfo('atan',\n aliases=('arctan', ),\n ref=np.arctan,\n dtypes=all_types_and_complex_and(torch.bool, torch.bfloat16),\n dtypesIfCUDA=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16),\n assert_autodiffed=True,\n supports_forward_ad=True,\n supports_fwgrad_bwgrad=True,\n supports_sparse=True,\n supports_sparse_csr=True,\n decorators=(precisionOverride({torch.bfloat16: 1e-2}),),\n safe_casts_outputs=True,\n skips=(\n DecorateInfo(unittest.skip(\"Skipped!\"), 'TestUnaryUfuncs', 'test_reference_numerics_extremal',\n device_type='cpu', dtypes=[torch.cfloat, torch.cdouble]),\n DecorateInfo(unittest.skip(\"Skipped!\"), 'TestUnaryUfuncs', 'test_reference_numerics_hard',\n device_type='cpu', dtypes=[torch.cfloat, torch.cdouble]),\n DecorateInfo(unittest.skip(\"Skipped!\"), 'TestUnaryUfuncs', 'test_reference_numerics_normal',\n device_type='cpu', dtypes=[torch.cfloat, torch.cdouble]),\n DecorateInfo(unittest.skip(\"Skipped!\"), 'TestUnaryUfuncs', 'test_reference_numerics_extremal',\n device_type='cuda', dtypes=[torch.cfloat, torch.cdouble],\n active_if=IS_WINDOWS),\n DecorateInfo(unittest.skip(\"Skipped!\"), 'TestUnaryUfuncs', 'test_reference_numerics_hard',\n device_type='cuda', dtypes=[torch.cfloat, torch.cdouble],\n active_if=IS_WINDOWS),\n DecorateInfo(unittest.skip(\"Skipped! sparse backward not supported\"),\n 'TestSparseUnaryUfuncs', 'test_sparse_fn_grad'),\n )),\n BinaryUfuncInfo('atan2',\n aliases=('arctan2',),\n dtypes=all_types_and(torch.bool),\n dtypesIfCUDA=all_types_and(torch.bool, torch.half, torch.bfloat16),\n promotes_int_to_float=True,\n sample_inputs_func=sample_inputs_atan2,\n skips=(\n # TypeError: atan2(): argument 'other' (position 2) must be Tensor, not float\n DecorateInfo(unittest.skip('Skipped!'), 'TestBinaryUfuncs'),\n )),\n UnaryUfuncInfo('atanh',\n aliases=('arctanh', ),\n ref=np.arctanh,\n domain=(-1, 1),\n dtypes=all_types_and_complex_and(torch.bool, torch.bfloat16),\n dtypesIfCUDA=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16),\n safe_casts_outputs=True,\n decorators=(precisionOverride({torch.bfloat16: 1e-2}),),\n supports_inplace_autograd=False,\n supports_forward_ad=True,\n supports_fwgrad_bwgrad=True,\n supports_sparse=True,\n supports_sparse_csr=True,\n skips=(\n DecorateInfo(unittest.skip(\"Skipped!\"), 'TestUnaryUfuncs', 'test_reference_numerics_normal',\n device_type='cpu', dtypes=[torch.cfloat]),\n DecorateInfo(unittest.skip(\"Skipped!\"), 'TestUnaryUfuncs', 'test_reference_numerics_extremal',\n device_type='cpu', dtypes=[torch.cfloat, torch.cdouble]),\n DecorateInfo(unittest.skip(\"Skipped!\"), 'TestUnaryUfuncs', 'test_reference_numerics_hard',\n device_type='cpu', dtypes=[torch.cfloat, torch.cdouble]),\n DecorateInfo(unittest.skip(\"Skipped!\"), 'TestUnaryUfuncs', 'test_reference_numerics_extremal',\n device_type='cuda', dtypes=[torch.cfloat, torch.cdouble],\n active_if=IS_WINDOWS),\n DecorateInfo(unittest.skip(\"Skipped!\"), 'TestUnaryUfuncs', 'test_reference_numerics_hard',\n device_type='cuda', dtypes=[torch.cfloat],\n active_if=IS_WINDOWS),\n DecorateInfo(unittest.skip(\"Skipped! sparse backward not supported\"),\n 'TestSparseUnaryUfuncs', 'test_sparse_fn_grad'),\n )),\n OpInfo('allclose',\n dtypes=floating_and_complex_types_and(torch.float16, torch.bfloat16),\n ref=np.allclose,\n supports_autograd=False,\n supports_forward_ad=False,\n sample_inputs_func=sample_inputs_allclose,\n skips=(\n DecorateInfo(unittest.skip(\"Skipped!\"), 'TestJit', 'test_variant_consistency_jit'),\n ),\n supports_out=False),\n OpInfo('broadcast_to',\n dtypes=all_types_and_complex_and(torch.bool, torch.float16, torch.bfloat16),\n supports_out=False,\n supports_forward_ad=True,\n supports_fwgrad_bwgrad=True,\n sample_inputs_func=sample_inputs_broadcast_to),\n OpInfo('broadcast_tensors',\n dtypes=all_types_and_complex_and(torch.bool, torch.float16, torch.bfloat16),\n supports_out=False,\n supports_forward_ad=True,\n supports_fwgrad_bwgrad=True,\n skips=(\n # JIT does not support variadic tensors.\n # RuntimeError: input->type()->kind() == TypeKind::OptionalType\n # INTERNAL ASSERT FAILED at \"../torch/csrc/jit/passes/utils/check_alias_annotation.cpp\":252,\n # please report a bug to PyTorch.\n DecorateInfo(unittest.skip(\"Skipped!\"), 'TestJit', 'test_variant_consistency_jit', dtypes=[torch.float32]),\n ),\n sample_inputs_func=sample_inputs_broadcast_tensors),\n OpInfo('block_diag',\n dtypes=all_types_and_complex_and(torch.bool, torch.float16, torch.bfloat16),\n supports_out=False,\n supports_forward_ad=True,\n supports_fwgrad_bwgrad=True,\n skips=(\n # JIT does not support variadic tensors.\n # RuntimeError: input->type()->kind() == TypeKind::OptionalType\n # INTERNAL ASSERT FAILED at \"../torch/csrc/jit/passes/utils/check_alias_annotation.cpp\":252,\n # please report a bug to PyTorch.\n DecorateInfo(unittest.skip(\"Skipped!\"), 'TestJit', 'test_variant_consistency_jit', dtypes=[torch.float32]),\n ),\n sample_inputs_func=sample_inputs_block_diag),\n BinaryUfuncInfo('bitwise_and',\n dtypes=integral_types_and(torch.bool),\n supports_autograd=False,\n sample_inputs_func=sample_inputs_binary_pwise,\n skips=(\n # RuntimeError: \"bitwise_and_cuda\" not implemented for 'Half'\n DecorateInfo(unittest.skip(\"Skipped!\"), 'TestBinaryUfuncs', 'test_type_promotion'),\n )),\n UnaryUfuncInfo('bitwise_not',\n ref=np.bitwise_not,\n dtypes=integral_types_and(torch.bool),\n supports_autograd=False),\n BinaryUfuncInfo('bitwise_left_shift',\n op=torch.bitwise_left_shift,\n dtypes=all_types(),\n dtypesIfCUDA=all_types_and(torch.float16, torch.bfloat16),\n supports_autograd=False,\n sample_inputs_func=sample_inputs_bitwise_shift,\n skips=(\n DecorateInfo(unittest.skip(\"Skipped!\"), 'TestBinaryUfuncs', 'test_type_promotion'),\n # FIXME: Undefined behavior sanitizer: shift exponent -9 is negative\n DecorateInfo(unittest.skip(\"Skipped!\"), 'TestBinaryUfuncs', device_type='cpu'),\n )),\n BinaryUfuncInfo('bitwise_right_shift',\n op=torch.bitwise_right_shift,\n dtypes=all_types(),\n dtypesIfCUDA=all_types_and(torch.float16, torch.bfloat16),\n supports_autograd=False,\n sample_inputs_func=sample_inputs_bitwise_shift,\n skips=(\n DecorateInfo(unittest.skip(\"Skipped!\"), 'TestBinaryUfuncs', 'test_type_promotion'),\n # FIXME: Undefined behavior sanitizer\n DecorateInfo(unittest.skip(\"Skipped!\"), 'TestBinaryUfuncs', device_type='cpu'),\n )),\n OpInfo('combinations',\n op=torch.combinations,\n dtypes=all_types_and_complex_and(torch.bool, torch.float16, torch.bfloat16),\n supports_autograd=False,\n supports_out=False,\n sample_inputs_func=sample_inputs_combinations),\n OpInfo('cartesian_prod',\n op=torch.cartesian_prod,\n dtypes=all_types_and_complex_and(torch.bool, torch.float16, torch.bfloat16),\n supports_autograd=False,\n supports_out=False,\n sample_inputs_func=sample_inputs_cartesian_prod,\n skips=(\n # RuntimeError: input->type()->kind() == TypeKind::OptionalType\n # INTERNAL ASSERT FAILED at \"../torch/csrc/jit/passes/utils/check_alias_annotation.cpp\":270\n DecorateInfo(unittest.skip(\"Skipped!\"),\n 'TestJit', 'test_variant_consistency_jit', dtypes=(torch.float32,)),\n )),\n OpInfo('cdist',\n dtypes=floating_types(),\n supports_out=False,\n supports_gradgrad=False,\n assert_autodiffed=False,\n sample_inputs_func=sample_inputs_cdist),\n UnaryUfuncInfo('ceil',\n ref=np.ceil,\n dtypes=floating_types_and(torch.bfloat16),\n dtypesIfCUDA=floating_types_and(torch.half, torch.bfloat16),\n supports_forward_ad=True,\n supports_fwgrad_bwgrad=True,\n supports_sparse=True,\n supports_sparse_csr=True,\n assert_autodiffed=True),\n OpInfo('cholesky',\n dtypes=floating_and_complex_types(),\n check_batched_gradgrad=False,\n sample_inputs_func=sample_inputs_linalg_cholesky,\n gradcheck_wrapper=gradcheck_wrapper_hermitian_input,\n decorators=[skipCUDAIfNoMagma, skipCUDAIfRocm, skipCPUIfNoLapack],),\n OpInfo('cholesky_inverse',\n dtypes=floating_and_complex_types(),\n backward_dtypes=floating_types(),\n # TODO: RuntimeError: cholesky_inverse does not support automatic differentiation for outputs\n # with complex dtype.\n check_batched_gradgrad=False,\n sample_inputs_func=sample_inputs_linalg_cholesky_inverse,\n gradcheck_wrapper=gradcheck_wrapper_triangular_input,\n decorators=[skipCUDAIfNoMagma, skipCPUIfNoLapack],\n skips=(\n # TODO: FIXME: cholesky_inverse throws an error in forward when requires_grad=True\n # for complex tensors\n DecorateInfo(unittest.skip(\"Skipped!\"), 'TestCommon', 'test_dtypes'),\n # cholesky_inverse does not correctly warn when resizing out= inputs\n DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_out_warning'),)),\n OpInfo('cholesky_solve',\n op=torch.cholesky_solve,\n dtypes=floating_and_complex_types(),\n sample_inputs_func=sample_inputs_cholesky_solve,\n check_batched_gradgrad=False,\n supports_forward_ad=True,\n supports_fwgrad_bwgrad=True,\n gradcheck_wrapper=lambda *args, **kwargs: gradcheck_wrapper_triangular_input(*args, idx=1, **kwargs),\n decorators=[skipCUDAIfNoMagma, skipCUDAIfRocm, skipCPUIfNoLapack],\n skips=(\n # cholesky_solve does not correctly warn when resizing out= inputs\n DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_out_warning'),),),\n OpInfo('chunk',\n dtypes=all_types_and_complex_and(torch.bool, torch.bfloat16, torch.float16),\n sample_inputs_func=sample_inputs_chunk,\n supports_forward_ad=True,\n supports_fwgrad_bwgrad=True,\n supports_out=False),\n OpInfo('clone',\n dtypes=all_types_and_complex_and(torch.bool, torch.bfloat16, torch.float16),\n sample_inputs_func=sample_inputs_clone,\n supports_forward_ad=True,\n supports_fwgrad_bwgrad=True,\n supports_out=False),\n OpInfo('contiguous',\n op=lambda x, *args, **kwargs: x.contiguous(*args, **kwargs),\n dtypes=all_types_and_complex_and(torch.bool, torch.bfloat16, torch.float16),\n sample_inputs_func=sample_inputs_contiguous,\n supports_forward_ad=True,\n supports_fwgrad_bwgrad=True,\n autodiff_fusible_nodes=['aten::contiguous'],\n assert_jit_shape_analysis=True,\n supports_out=False),\n OpInfo('sum_to_size',\n op=lambda x, *args, **kwargs: x.sum_to_size(*args, **kwargs),\n dtypes=floating_and_complex_types_and(torch.float16, torch.bfloat16),\n sample_inputs_func=sample_inputs_sum_to_size,\n supports_forward_ad=True,\n supports_fwgrad_bwgrad=True,\n supports_out=False,\n skips=(\n # RuntimeError: inputSet && outputSet\n # INTERNAL ASSERT FAILED at \"../torch/csrc/jit/passes/utils/check_alias_annotation.cpp\":118\n DecorateInfo(unittest.skip(\"Skipped!\"), 'TestJit', 'test_variant_consistency_jit'),),),\n OpInfo('symeig',\n dtypes=floating_and_complex_types(),\n check_batched_grad=False,\n check_batched_gradgrad=False,\n sample_inputs_func=sample_inputs_symeig,\n gradcheck_wrapper=gradcheck_wrapper_hermitian_input,\n decorators=[skipCUDAIfNoMagma, skipCUDAIfRocm, skipCPUIfNoLapack]),\n # NOTE: clamp has seperate opinfos for scalar min/max (unary op) vs. tensors\n OpInfo('clamp',\n aliases=('clip',),\n dtypes=all_types_and(torch.bfloat16),\n dtypesIfCUDA=all_types_and(torch.half, torch.bfloat16),\n assert_autodiffed=True,\n sample_inputs_func=sample_inputs_clamp),\n UnaryUfuncInfo('clamp',\n variant_test_name='scalar',\n aliases=('clip', ),\n decorators=(precisionOverride({torch.bfloat16: 7e-2, torch.float16: 1e-2}),),\n ref=np.clip,\n dtypes=all_types_and(torch.bfloat16),\n dtypesIfCUDA=all_types_and(torch.half, torch.bfloat16),\n assert_autodiffed=True,\n supports_forward_ad=True,\n supports_fwgrad_bwgrad=True,\n skips=(\n # Reference: https://github.com/pytorch/pytorch/issues/54841\n DecorateInfo(unittest.skip(\"Skipped!\"), 'TestUnaryUfuncs', 'test_reference_numerics_extremal',\n device_type='cpu', dtypes=[torch.bfloat16]),\n ),\n sample_kwargs=sample_kwargs_clamp_scalar,\n sample_inputs_func=sample_inputs_clamp_scalar),\n UnaryUfuncInfo('positive',\n ref=np.positive,\n dtypes=all_types_and_complex_and(torch.half, torch.bfloat16),\n supports_out=False,\n supports_forward_ad=True,\n supports_fwgrad_bwgrad=True,\n ),\n UnaryUfuncInfo('conj',\n ref=np.conj,\n dtypes=all_types_and_complex_and(torch.bool,\n torch.bfloat16, torch.half),\n supports_sparse=True,\n supports_forward_ad=True,\n supports_fwgrad_bwgrad=True,\n supports_out=False),\n UnaryUfuncInfo('conj_physical',\n ref=np.conj,\n dtypes=all_types_and_complex_and(torch.bool,\n torch.bfloat16, torch.half),\n supports_forward_ad=True,\n supports_fwgrad_bwgrad=True,\n supports_sparse=True,\n supports_sparse_csr=True,\n skips=(\n # RuntimeError: inputSet && outputSet\n # INTERNAL ASSERT FAILED at \"../torch/csrc/jit/passes/utils/check_alias_annotation.cpp\":118,\n # please report a bug to PyTorch.\n DecorateInfo(unittest.skip(\"Skipped!\"), 'TestJit', 'test_variant_consistency_jit', dtypes=(torch.float32, )),\n DecorateInfo(unittest.skip(\"Skipped! conj_physical_ not implemented for sparse\"),\n 'TestSparseUnaryUfuncs', 'test_inplace'),\n )),\n OpInfo('resolve_conj',\n dtypes=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16),\n sample_inputs_func=sample_inputs_view_as_real,\n supports_forward_ad=True,\n supports_fwgrad_bwgrad=True,\n supports_out=False,\n ),\n OpInfo('resolve_neg',\n dtypes=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16),\n sample_inputs_func=sample_inputs_view_as_real,\n supports_forward_ad=True,\n supports_fwgrad_bwgrad=True,\n supports_out=False,\n ),\n OpInfo('view_as_real',\n dtypes=complex_types(),\n supports_forward_ad=True,\n supports_out=False,\n supports_fwgrad_bwgrad=True,\n sample_inputs_func=sample_inputs_view_as_real,\n test_conjugated_samples=False,\n ),\n OpInfo('view_as_complex',\n dtypes=floating_types_and(torch.half),\n supports_out=False,\n supports_forward_ad=True,\n supports_fwgrad_bwgrad=True,\n test_neg_view=False,\n sample_inputs_func=sample_inputs_view_as_complex,\n skips=(\n # RuntimeError: Tensor must have a last dimension with stride 1\n DecorateInfo(unittest.expectedFailure, \"TestCommon\", \"test_noncontiguous_samples\"),\n )),\n BinaryUfuncInfo('complex',\n dtypes=floating_types(),\n sample_inputs_func=sample_inputs_complex,\n supports_forward_ad=True,\n supports_fwgrad_bwgrad=True,\n skips=(\n # RuntimeError: Expected object of scalar type Float but got scalar type Double for second argument\n DecorateInfo(unittest.skip(\"Skipped!\"), 'TestBinaryUfuncs', 'test_type_promotion'),\n # TypeError: complex(): argument 'imag' (position 2) must be Tensor, not float\n DecorateInfo(unittest.skip('Skipped!'), 'TestBinaryUfuncs'),\n )),\n BinaryUfuncInfo('copysign',\n dtypes=all_types_and(torch.bool, torch.half, torch.bfloat16),\n promotes_int_to_float=True,\n sample_inputs_func=sample_inputs_copysign,\n supports_inplace_autograd=False,\n supports_forward_ad=True,\n supports_fwgrad_bwgrad=True),\n OpInfo('corrcoef',\n dtypes=all_types_and_complex(),\n dtypesIfCUDA=all_types_and_complex_and(torch.half, *[torch.bfloat16] if CUDA11OrLater else []),\n sample_inputs_func=sample_inputs_corrcoef,\n supports_forward_ad=True,\n supports_fwgrad_bwgrad=True,\n supports_out=False),\n UnaryUfuncInfo('cos',\n ref=np.cos,\n dtypes=all_types_and_complex_and(torch.bool, torch.bfloat16),\n dtypesIfCUDA=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16),\n assert_autodiffed=True,\n handles_large_floats=False,\n safe_casts_outputs=True,\n supports_forward_ad=True,\n supports_fwgrad_bwgrad=True,\n decorators=(precisionOverride({torch.bfloat16: 1e-2}),),\n skips=(\n DecorateInfo(unittest.skip(\"Skipped!\"), 'TestUnaryUfuncs', 'test_reference_numerics_extremal',\n dtypes=[torch.cfloat, torch.cdouble], active_if=IS_WINDOWS),\n DecorateInfo(unittest.skip(\"Skipped!\"), 'TestUnaryUfuncs', 'test_reference_numerics_extremal',\n device_type='cpu',\n dtypes=[torch.cfloat, torch.cdouble], active_if=IS_MACOS),\n )),\n UnaryUfuncInfo('cosh',\n ref=np_unary_ufunc_integer_promotion_wrapper(np.cosh),\n dtypes=all_types_and_complex_and(torch.bool, torch.bfloat16),\n dtypesIfCUDA=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16),\n safe_casts_outputs=True,\n assert_autodiffed=True,\n supports_forward_ad=True,\n supports_fwgrad_bwgrad=True,\n skips=(\n # Reference: https://github.com/pytorch/pytorch/issues/48641\n DecorateInfo(unittest.skip(\"Skipped!\"), 'TestUnaryUfuncs', 'test_reference_numerics_hard',\n device_type='cpu', dtypes=[torch.int8]),\n DecorateInfo(unittest.skip(\"Skipped!\"), 'TestUnaryUfuncs', 'test_reference_numerics_extremal',\n dtypes=[torch.cfloat, torch.cdouble], active_if=IS_WINDOWS),\n DecorateInfo(unittest.skip(\"Skipped!\"), 'TestUnaryUfuncs', 'test_reference_numerics_hard',\n dtypes=[torch.cfloat, torch.cdouble], active_if=IS_WINDOWS),\n DecorateInfo(unittest.skip(\"Skipped!\"), 'TestUnaryUfuncs', 'test_reference_numerics_extremal',\n device_type='cpu',\n dtypes=[torch.cfloat, torch.cdouble], active_if=IS_MACOS),\n DecorateInfo(unittest.skip(\"Skipped!\"), 'TestUnaryUfuncs', 'test_reference_numerics_hard',\n device_type='cpu',\n dtypes=[torch.cfloat, torch.cdouble], active_if=IS_MACOS),\n )),\n OpInfo('cov',\n dtypes=all_types_and_complex_and(torch.half, torch.bfloat16),\n dtypesIfCUDA=all_types_and_complex_and(torch.half, *[torch.bfloat16] if CUDA11OrLater else []),\n backward_dtypesIfCUDA=all_types_and_complex_and(torch.half, *[torch.bfloat16] if CUDA11OrLater else []),\n sample_inputs_func=sample_inputs_cov,\n supports_out=False,\n supports_forward_ad=True,\n supports_fwgrad_bwgrad=True,\n skips=(\n # JIT test not working for tensor kwargs (https://github.com/pytorch/pytorch/issues/58507)\n # RuntimeError:\n # undefined value tensor:\n # File \"<string>\", line 3\n # def the_method(i0):\n # return torch.cov(i0, correction=0, fweights=None, aweights=tensor([0.0518, 0.4681], dtype=torch.float32, requires_grad=True)) # noqa: B950\n # ~~~~~~ <--- HERE\n DecorateInfo(unittest.skip(\"Skipped!\"), 'TestJit', 'test_variant_consistency_jit'),\n )),\n OpInfo('cross',\n dtypes=all_types_and_complex_and(torch.bfloat16),\n dtypesIfCUDA=all_types_and_complex_and(torch.half),\n sample_inputs_func=sample_inputs_cross,\n supports_fwgrad_bwgrad=True,\n supports_forward_ad=True),\n OpInfo('linalg.cross',\n ref=lambda x, y, dim=-1: np.cross(x, y, axis=dim),\n op=torch.linalg.cross,\n dtypes=all_types_and_complex(),\n dtypesIfCPU=all_types_and_complex_and(torch.bfloat16),\n dtypesIfCUDA=all_types_and_complex_and(torch.half),\n aten_name='linalg_cross',\n sample_inputs_func=sample_inputs_cross,\n supports_fwgrad_bwgrad=True,\n supports_forward_ad=True),\n OpInfo('cumsum',\n dtypes=all_types_and_complex(),\n dtypesIfCUDA=all_types_and_complex_and(torch.half, torch.bfloat16),\n supports_forward_ad=True,\n supports_fwgrad_bwgrad=True,\n skips=(\n # cumsum does not handle correctly out= dtypes\n DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_out'),\n ),\n sample_inputs_func=sample_inputs_cumulative_ops),\n OpInfo('cumprod',\n dtypes=all_types_and_complex(),\n dtypesIfCUDA=all_types_and_complex_and(torch.float16, torch.bfloat16),\n supports_forward_ad=True,\n supports_fwgrad_bwgrad=True,\n skips=(\n # cumprod does not handle correctly out= dtypes\n DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_out'),\n ),\n # gradgradcheck fails in fast_mode=True: #56275\n sample_inputs_func=sample_inputs_cumprod,\n gradcheck_fast_mode=False),\n OpInfo('cummax',\n dtypes=all_types_and(torch.bool, torch.bfloat16),\n dtypesIfCUDA=all_types_and(torch.bool, torch.half, torch.bfloat16),\n sample_inputs_func=partial(sample_inputs_cumulative_ops, supports_dtype_kwargs=False),\n supports_forward_ad=True,\n supports_fwgrad_bwgrad=True,\n gradcheck_nondet_tol=GRADCHECK_NONDET_TOL),\n OpInfo('cummin',\n dtypes=all_types_and(torch.bool, torch.bfloat16),\n dtypesIfCUDA=all_types_and(torch.bool, torch.half, torch.bfloat16),\n sample_inputs_func=partial(sample_inputs_cumulative_ops, supports_dtype_kwargs=False),\n supports_forward_ad=True,\n supports_fwgrad_bwgrad=True,\n gradcheck_nondet_tol=GRADCHECK_NONDET_TOL),\n UnaryUfuncInfo('deg2rad',\n ref=np.radians,\n decorators=(precisionOverride({torch.bfloat16: 7e-1,\n torch.float16: 7e-1}),),\n dtypes=all_types_and(torch.bool, torch.half, torch.bfloat16),\n supports_forward_ad=True,\n supports_fwgrad_bwgrad=True,\n skips=(\n # Reference: https://github.com/pytorch/pytorch/pull/51283#issuecomment-770614273\n DecorateInfo(unittest.skip(\"Skipped!\"), 'TestUnaryUfuncs', 'test_reference_numerics_hard',\n dtypes=[torch.bfloat16]),\n ),\n safe_casts_outputs=True),\n OpInfo('diff',\n op=torch.diff,\n # np.diff has np._NoValue as default values for prepend and append, compare_with_reference breaks if prepend/append\n # are set as None when converting to numpy\n ref=lambda input, n=1, dim=-1, prepend=np._NoValue, append=np._NoValue: (\n np.diff(input, n, dim, np._NoValue if prepend is None else prepend, np._NoValue if append is None else append)\n ),\n dtypes=all_types_and_complex_and(torch.bool, torch.float16, torch.bfloat16),\n supports_forward_ad=True,\n supports_fwgrad_bwgrad=True,\n sample_inputs_func=sample_inputs_diff),\n BinaryUfuncInfo('div',\n aliases=('divide',),\n variant_test_name='no_rounding_mode',\n dtypes=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16),\n sample_inputs_func=partial(sample_inputs_binary_pwise, python_scalars=True),\n supports_forward_ad=True,\n promotes_int_to_float=True,\n supports_fwgrad_bwgrad=True,\n assert_autodiffed=True,\n rhs_make_tensor_kwargs=dict(exclude_zero=True),\n skips=(\n # 69913: RuntimeError: CUDA error: an illegal memory access was encountered\n DecorateInfo(unittest.skip(\"Skipped!\"), 'TestGradients', 'test_fn_fwgrad_bwgrad',\n device_type='cuda', dtypes=[torch.double, torch.cdouble]),\n DecorateInfo(unittest.skip(\"Skipped!\"), 'TestGradients', 'test_forward_mode_AD',\n device_type='cuda', dtypes=[torch.double, torch.cdouble]),\n DecorateInfo(unittest.skip(\"Skipped!\"), 'TestGradients', 'test_inplace_forward_mode_AD',\n device_type='cuda', dtypes=[torch.double, torch.cdouble]),\n ),),\n BinaryUfuncInfo('div',\n aliases=('divide',),\n variant_test_name='trunc_rounding',\n dtypes=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16),\n sample_inputs_func=partial(sample_inputs_binary_pwise, rounding_mode=\"trunc\", python_scalars=True),\n supports_forward_ad=True,\n promotes_int_to_float=True,\n supports_fwgrad_bwgrad=True,\n assert_autodiffed=True,\n rhs_make_tensor_kwargs=dict(exclude_zero=True),\n skips=(\n # 69913: RuntimeError: CUDA error: an illegal memory access was encountered\n DecorateInfo(unittest.skip(\"Skipped!\"), 'TestGradients', 'test_fn_fwgrad_bwgrad',\n device_type='cuda', dtypes=[torch.double, torch.cdouble]),\n DecorateInfo(unittest.skip(\"Skipped!\"), 'TestGradients', 'test_forward_mode_AD',\n device_type='cuda', dtypes=[torch.double, torch.cdouble]),\n DecorateInfo(unittest.skip(\"Skipped!\"), 'TestGradients', 'test_inplace_forward_mode_AD',\n device_type='cuda', dtypes=[torch.double, torch.cdouble]),\n ),),\n BinaryUfuncInfo('div',\n aliases=('divide',),\n variant_test_name='floor_rounding',\n dtypes=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16),\n sample_inputs_func=partial(sample_inputs_binary_pwise, rounding_mode=\"floor\", python_scalars=True),\n supports_forward_ad=True,\n promotes_int_to_float=True,\n supports_fwgrad_bwgrad=True,\n assert_autodiffed=True,\n rhs_make_tensor_kwargs=dict(exclude_zero=True),\n skips=(\n # 69913: RuntimeError: CUDA error: an illegal memory access was encountered\n DecorateInfo(unittest.skip(\"Skipped!\"), 'TestGradients', 'test_fn_fwgrad_bwgrad',\n device_type='cuda', dtypes=[torch.double, torch.cdouble]),\n DecorateInfo(unittest.skip(\"Skipped!\"), 'TestGradients', 'test_forward_mode_AD',\n device_type='cuda', dtypes=[torch.double, torch.cdouble]),\n DecorateInfo(unittest.skip(\"Skipped!\"), 'TestGradients', 'test_inplace_forward_mode_AD',\n device_type='cuda', dtypes=[torch.double, torch.cdouble]),\n ),),\n BinaryUfuncInfo('true_divide',\n dtypes=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16),\n supports_forward_ad=True,\n promotes_int_to_float=True,\n supports_fwgrad_bwgrad=True,\n sample_inputs_func=sample_inputs_binary_pwise,\n rhs_make_tensor_kwargs=dict(exclude_zero=True)),\n UnaryUfuncInfo('exp',\n ref=np_unary_ufunc_integer_promotion_wrapper(np.exp),\n dtypes=all_types_and_complex_and(torch.bool, torch.bfloat16),\n dtypesIfCUDA=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16),\n skips=(\n # Reference: https://github.com/pytorch/pytorch/pull/50093#pullrequestreview-561791547\n DecorateInfo(unittest.skip(\"Skipped!\"), 'TestUnaryUfuncs', 'test_reference_numerics_extremal',\n dtypes=[torch.bfloat16]),\n DecorateInfo(unittest.skip(\"Skipped!\"), 'TestUnaryUfuncs', 'test_reference_numerics_hard',\n dtypes=[torch.bfloat16]),\n DecorateInfo(unittest.skip(\"Skipped!\"), 'TestUnaryUfuncs', 'test_reference_numerics_normal',\n dtypes=[torch.bfloat16]),\n # Reference: https://github.com/pytorch/pytorch/issues/48010\n DecorateInfo(unittest.skip(\"Skipped!\"), 'TestUnaryUfuncs', 'test_reference_numerics_extremal',\n device_type='cpu', dtypes=[torch.cfloat, torch.cdouble]),\n DecorateInfo(unittest.skip(\"Skipped!\"), 'TestUnaryUfuncs', 'test_reference_numerics_hard',\n device_type='cpu', dtypes=[torch.cfloat, torch.cdouble]),\n ),\n assert_autodiffed=True,\n supports_forward_ad=True,\n supports_fwgrad_bwgrad=True,\n safe_casts_outputs=True),\n OpInfo('expand',\n op=lambda self, shape: self.expand(shape),\n dtypes=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16),\n sample_inputs_func=sample_inputs_expand,\n supports_forward_ad=True,\n supports_fwgrad_bwgrad=True,\n assert_jit_shape_analysis=True,\n supports_out=False),\n OpInfo('expand_as',\n op=lambda self, other: self.expand_as(other),\n dtypes=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16),\n supports_forward_ad=True,\n supports_fwgrad_bwgrad=True,\n sample_inputs_func=sample_inputs_expand_as,\n supports_out=False,\n skips=(\n DecorateInfo(unittest.skip(\"Second argument does not need gradient\"),\n \"TestCommon\", \"test_floating_inputs_are_differentiable\"),),\n ),\n OpInfo('diag',\n dtypes=all_types_and_complex_and(torch.bool),\n dtypesIfCUDA=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16),\n supports_forward_ad=True,\n supports_fwgrad_bwgrad=True,\n sample_inputs_func=sample_inputs_diag),\n OpInfo('diag_embed',\n dtypes=all_types_and_complex_and(torch.bool, torch.bfloat16, torch.float16),\n supports_out=False,\n supports_forward_ad=True,\n supports_fwgrad_bwgrad=True,\n sample_inputs_func=sample_inputs_diagonal_diag_embed),\n OpInfo('diagonal',\n # They are not strictly aliases as they have diverging defaults, but we can see them as aliases for testing purposes\n # If we add tests that test the function against the alias, make linalg.diagonal into its own OpInfo\n aliases=('linalg.diagonal',),\n dtypes=all_types_and_complex_and(torch.bool, torch.bfloat16, torch.float16),\n supports_out=False,\n supports_forward_ad=True,\n supports_fwgrad_bwgrad=True,\n sample_inputs_func=sample_inputs_diagonal_diag_embed),\n OpInfo('diagonal_scatter',\n dtypes=all_types_and(torch.bool, torch.bfloat16, torch.float16),\n supports_out=False,\n supports_forward_ad=True,\n supports_fwgrad_bwgrad=True,\n sample_inputs_func=sample_inputs_diagonal_scatter),\n BinaryUfuncInfo('eq',\n dtypes=all_types_and_complex_and(torch.bool, torch.bfloat16, torch.float16),\n always_returns_bool=True,\n supports_autograd=False,\n sample_inputs_func=sample_inputs_comparison_ops),\n BinaryUfuncInfo('fmax',\n op=torch.fmax,\n dtypes=all_types_and(torch.float16, torch.bfloat16, torch.bool),\n supports_forward_ad=True,\n supports_fwgrad_bwgrad=True,\n sample_inputs_func=sample_inputs_max_min_binary,\n skips=(\n # RuntimeError: \"max_elementwise_cuda\" not implemented for 'ComplexFloat'\n DecorateInfo(unittest.skip(\"Skipped!\"), 'TestBinaryUfuncs', 'test_type_promotion'),\n # TypeError: fmax(): argument 'other' (position 2) must be Tensor, not float\n DecorateInfo(unittest.skip(\"Skipped!\"), 'TestBinaryUfuncs'),\n )),\n BinaryUfuncInfo('fmin',\n op=torch.fmin,\n dtypes=all_types_and(torch.float16, torch.bfloat16, torch.bool),\n supports_forward_ad=True,\n supports_fwgrad_bwgrad=True,\n sample_inputs_func=sample_inputs_max_min_binary,\n skips=(\n # RuntimeError: \"min_elementwise_cuda\" not implemented for 'ComplexFloat'\n DecorateInfo(unittest.skip(\"Skipped!\"), 'TestBinaryUfuncs', 'test_type_promotion'),\n # TypeError: fmin(): argument 'other' (position 2) must be Tensor, not float\n DecorateInfo(unittest.skip(\"Skipped!\"), 'TestBinaryUfuncs'),\n )),\n BinaryUfuncInfo('fmod',\n ref=np.fmod,\n dtypes=all_types_and(torch.float16),\n rhs_make_tensor_kwargs={'exclude_zero': True},\n supports_forward_ad=True,\n supports_fwgrad_bwgrad=True,\n sample_inputs_func=sample_inputs_fmod_remainder,\n skips=(\n DecorateInfo(unittest.skip(\"Skipped!\"), 'TestBinaryUfuncs',\n dtypes=(torch.bool, torch.uint8, torch.int8, torch.int16, torch.int32, torch.int64)),\n )),\n BinaryUfuncInfo('fmod',\n ref=np.fmod,\n variant_test_name='autodiffed',\n dtypes=all_types_and(torch.float16, torch.bool),\n assert_autodiffed=True,\n supports_forward_ad=True,\n supports_fwgrad_bwgrad=True,\n rhs_make_tensor_kwargs={'exclude_zero': True},\n sample_inputs_func=partial(sample_inputs_fmod_remainder, autodiffed=True),\n skips=(\n DecorateInfo(unittest.skip(\"Skipped!\"), 'TestBinaryUfuncs',\n dtypes=(torch.bool, torch.uint8, torch.int8, torch.int16, torch.int32, torch.int64)),\n )),\n BinaryUfuncInfo('remainder',\n ref=np.remainder,\n dtypes=all_types_and(torch.float16, torch.bfloat16),\n dtypesIfCUDA=all_types_and(torch.float16, torch.bfloat16),\n supports_forward_ad=True,\n supports_fwgrad_bwgrad=True,\n rhs_make_tensor_kwargs={'exclude_zero': True},\n sample_inputs_func=sample_inputs_fmod_remainder,\n skips=(\n # AssertionError: False is not true : Tensors failed to compare as equal!\n DecorateInfo(unittest.skip(\"Skipped!\"), 'TestBinaryUfuncs',\n dtypes=(torch.bfloat16,)),\n DecorateInfo(unittest.skip(\"Skipped!\"), 'TestBinaryUfuncs',\n dtypes=(torch.bool, torch.uint8, torch.int8, torch.int16, torch.int32, torch.int64)),\n )),\n BinaryUfuncInfo('remainder',\n ref=np.remainder,\n variant_test_name='autodiffed',\n dtypes=all_types_and(torch.float16, torch.bool, torch.bfloat16),\n dtypesIfCUDA=all_types_and(torch.float16, torch.bool, torch.bfloat16),\n supports_forward_ad=True,\n supports_fwgrad_bwgrad=True,\n assert_autodiffed=True,\n rhs_make_tensor_kwargs={'exclude_zero': True},\n sample_inputs_func=partial(sample_inputs_fmod_remainder, autodiffed=True),\n decorators=(\n # Fails on XLA\n # False is not true : Tensors failed to compare as equal!\n # Attempted to compare equality of tensors with different dtypes\n DecorateInfo(unittest.expectedFailure, 'TestOpInfo', device_type='xla', dtypes=(torch.long,)),\n ),\n skips=(\n # AssertionError: False is not true : Tensors failed to compare as equal!\n DecorateInfo(unittest.skip(\"Skipped!\"), 'TestBinaryUfuncs',\n dtypes=(torch.bfloat16,)),\n DecorateInfo(unittest.skip(\"Skipped!\"), 'TestBinaryUfuncs',\n dtypes=(torch.bool, torch.uint8, torch.int8, torch.int16, torch.int32, torch.int64)),\n )),\n UnaryUfuncInfo('frac',\n ref=lambda x: np.modf(x)[0],\n dtypes=floating_types_and(torch.bfloat16, torch.float16),\n dtypesIfCUDA=floating_types_and(torch.float16, torch.bfloat16),\n assert_autodiffed=True,\n supports_forward_ad=True,\n supports_fwgrad_bwgrad=True,\n # Reference for disabling extremals\n # https://github.com/pytorch/pytorch/issues/51948\n handles_extremals=False),\n SpectralFuncInfo('fft.fft',\n aten_name='fft_fft',\n ref=np.fft.fft,\n ndimensional=SpectralFuncType.OneD,\n dtypes=all_types_and_complex_and(torch.bool),\n default_test_dtypes=floating_and_complex_types(),\n ),\n SpectralFuncInfo('fft.fft2',\n aten_name='fft_fft2',\n ref=np.fft.fft2,\n ndimensional=SpectralFuncType.TwoD,\n dtypes=all_types_and_complex_and(torch.bool),\n default_test_dtypes=floating_and_complex_types(),\n decorators=[precisionOverride(\n {torch.float: 1e-4, torch.cfloat: 1e-4})],\n ),\n SpectralFuncInfo('fft.fftn',\n aten_name='fft_fftn',\n ref=np.fft.fftn,\n ndimensional=SpectralFuncType.ND,\n dtypes=all_types_and_complex_and(torch.bool),\n default_test_dtypes=floating_and_complex_types(),\n decorators=[precisionOverride(\n {torch.float: 1e-4, torch.cfloat: 1e-4})],\n ),\n SpectralFuncInfo('fft.hfft',\n aten_name='fft_hfft',\n ref=np.fft.hfft,\n ndimensional=SpectralFuncType.OneD,\n dtypes=all_types_and_complex_and(torch.bool),\n default_test_dtypes=floating_and_complex_types(),\n check_batched_gradgrad=False),\n SpectralFuncInfo('fft.hfft2',\n aten_name='fft_hfft2',\n ref=scipy.fft.hfft2 if has_scipy_fft else None,\n ndimensional=SpectralFuncType.TwoD,\n dtypes=all_types_and_complex_and(torch.bool),\n default_test_dtypes=floating_and_complex_types(),\n check_batched_gradgrad=False,\n decorators=[\n DecorateInfo(\n precisionOverride({torch.float: 2e-4, torch.cfloat: 2e-4}),\n 'TestFFT', 'test_reference_nd')],\n ),\n SpectralFuncInfo('fft.hfftn',\n aten_name='fft_hfftn',\n ref=scipy.fft.hfftn if has_scipy_fft else None,\n ndimensional=SpectralFuncType.ND,\n dtypes=all_types_and_complex_and(torch.bool),\n default_test_dtypes=floating_and_complex_types(),\n check_batched_gradgrad=False,\n decorators=[\n DecorateInfo(\n precisionOverride({torch.float: 2e-4, torch.cfloat: 2e-4}),\n 'TestFFT', 'test_reference_nd')],\n ),\n SpectralFuncInfo('fft.rfft',\n aten_name='fft_rfft',\n ref=np.fft.rfft,\n ndimensional=SpectralFuncType.OneD,\n dtypes=all_types_and(torch.bool),\n default_test_dtypes=floating_and_complex_types(),\n check_batched_grad=False,\n check_batched_gradgrad=False),\n SpectralFuncInfo('fft.rfft2',\n aten_name='fft_rfft2',\n ref=np.fft.rfft2,\n ndimensional=SpectralFuncType.TwoD,\n dtypes=all_types_and(torch.bool),\n default_test_dtypes=floating_and_complex_types(),\n check_batched_grad=False,\n check_batched_gradgrad=False,\n decorators=[precisionOverride({torch.float: 1e-4})],),\n SpectralFuncInfo('fft.rfftn',\n aten_name='fft_rfftn',\n ref=np.fft.rfftn,\n ndimensional=SpectralFuncType.ND,\n dtypes=all_types_and(torch.bool),\n default_test_dtypes=floating_and_complex_types(),\n check_batched_grad=False,\n check_batched_gradgrad=False,\n decorators=[precisionOverride({torch.float: 1e-4})],),\n SpectralFuncInfo('fft.ifft',\n aten_name='fft_ifft',\n ref=np.fft.ifft,\n ndimensional=SpectralFuncType.OneD,\n dtypes=all_types_and_complex_and(torch.bool),\n default_test_dtypes=floating_and_complex_types()),\n SpectralFuncInfo('fft.ifft2',\n aten_name='fft_ifft2',\n ref=np.fft.ifft2,\n ndimensional=SpectralFuncType.TwoD,\n dtypes=all_types_and_complex_and(torch.bool),\n default_test_dtypes=floating_and_complex_types(),\n decorators=[\n DecorateInfo(\n precisionOverride({torch.float: 1e-4, torch.cfloat: 1e-4}),\n 'TestFFT', 'test_reference_nd')],\n ),\n SpectralFuncInfo('fft.ifftn',\n aten_name='fft_ifftn',\n ref=np.fft.ifftn,\n ndimensional=SpectralFuncType.ND,\n dtypes=all_types_and_complex_and(torch.bool),\n default_test_dtypes=floating_and_complex_types(),\n decorators=[\n DecorateInfo(\n precisionOverride({torch.float: 1e-4, torch.cfloat: 1e-4}),\n 'TestFFT', 'test_reference_nd')],\n ),\n SpectralFuncInfo('fft.ihfft',\n aten_name='fft_ihfft',\n ref=np.fft.ihfft,\n ndimensional=SpectralFuncType.OneD,\n dtypes=all_types_and(torch.bool),\n default_test_dtypes=floating_types(),\n check_batched_grad=False),\n SpectralFuncInfo('fft.ihfft2',\n aten_name='fft_ihfft2',\n ref=scipy.fft.ihfftn if has_scipy_fft else None,\n ndimensional=SpectralFuncType.TwoD,\n dtypes=all_types_and(torch.bool),\n default_test_dtypes=floating_types(),\n check_batched_grad=False,\n check_batched_gradgrad=False,\n decorators=[\n DecorateInfo(\n precisionOverride({torch.float: 2e-4}),\n 'TestFFT', 'test_reference_nd')],\n ),\n SpectralFuncInfo('fft.ihfftn',\n aten_name='fft_ihfftn',\n ref=scipy.fft.ihfftn if has_scipy_fft else None,\n ndimensional=SpectralFuncType.ND,\n dtypes=all_types_and(torch.bool),\n default_test_dtypes=floating_types(),\n check_batched_grad=False,\n check_batched_gradgrad=False,\n decorators=[\n DecorateInfo(\n precisionOverride({torch.float: 2e-4}),\n 'TestFFT', 'test_reference_nd')],\n ),\n SpectralFuncInfo('fft.irfft',\n aten_name='fft_irfft',\n ref=np.fft.irfft,\n ndimensional=SpectralFuncType.OneD,\n dtypes=all_types_and_complex_and(torch.bool),\n default_test_dtypes=floating_and_complex_types(),\n check_batched_gradgrad=False),\n SpectralFuncInfo('fft.irfft2',\n aten_name='fft_irfft2',\n ref=np.fft.irfft2,\n ndimensional=SpectralFuncType.TwoD,\n dtypes=all_types_and_complex_and(torch.bool),\n default_test_dtypes=floating_and_complex_types(),\n check_batched_gradgrad=False,\n decorators=[\n DecorateInfo(\n precisionOverride({torch.float: 1e-4, torch.cfloat: 1e-4}),\n 'TestFFT', 'test_reference_nd')],\n ),\n SpectralFuncInfo('fft.irfftn',\n aten_name='fft_irfftn',\n ref=np.fft.irfftn,\n ndimensional=SpectralFuncType.ND,\n dtypes=all_types_and_complex_and(torch.bool),\n default_test_dtypes=floating_and_complex_types(),\n check_batched_gradgrad=False,\n decorators=[\n DecorateInfo(\n precisionOverride({torch.float: 1e-4, torch.cfloat: 1e-4}),\n 'TestFFT', 'test_reference_nd')],\n ),\n OpInfo('fft.fftshift',\n dtypes=all_types_and_complex_and(torch.bool, torch.bfloat16, torch.half),\n sample_inputs_func=sample_inputs_fftshift,\n supports_out=False,\n supports_forward_ad=True,\n supports_fwgrad_bwgrad=True,\n ),\n OpInfo('fft.ifftshift',\n dtypes=all_types_and_complex_and(torch.bool, torch.bfloat16, torch.half),\n sample_inputs_func=sample_inputs_fftshift,\n supports_out=False,\n supports_forward_ad=True,\n supports_fwgrad_bwgrad=True,\n ),\n OpInfo('stft',\n decorators=[\n skipCPUIfNoFFT,\n DecorateInfo(unittest.skip(\"Skipped! stft does not match the native function\"),\n 'TestJit', 'test_variant_consistency_jit'),\n ],\n dtypes=floating_and_complex_types(),\n sample_inputs_func=sample_inputs_stft,\n check_batched_grad=False,\n check_batched_gradgrad=False,\n supports_out=False,\n gradcheck_nondet_tol=GRADCHECK_NONDET_TOL,\n ),\n OpInfo('istft',\n decorators=[\n skipCPUIfNoFFT,\n DecorateInfo(unittest.skip(\"Skipped! istft does not match the native function\"),\n 'TestJit', 'test_variant_consistency_jit'),\n # gradcheck fails on ROCm (gh-68429)\n DecorateInfo(skipCUDAIfRocm, 'TestGradients', 'test_fn_grad'),\n ],\n dtypes=floating_and_complex_types(),\n sample_inputs_func=sample_inputs_istft,\n check_batched_grad=False,\n check_batched_gradgrad=False,\n supports_out=False,\n ),\n UnaryUfuncInfo('floor',\n ref=np.floor,\n dtypes=floating_types_and(torch.bfloat16),\n dtypesIfCUDA=floating_types_and(torch.half, torch.bfloat16),\n supports_forward_ad=True,\n supports_fwgrad_bwgrad=True,\n supports_sparse=True,\n supports_sparse_csr=True,\n assert_autodiffed=True),\n OpInfo('flip',\n op=torch.flip,\n dtypes=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16),\n sample_inputs_func=sample_inputs_flip,\n supports_forward_ad=True,\n supports_fwgrad_bwgrad=True,\n supports_out=False),\n OpInfo('fliplr',\n op=torch.fliplr,\n dtypes=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16),\n sample_inputs_func=sample_inputs_fliplr_flipud,\n supports_forward_ad=True,\n supports_fwgrad_bwgrad=True,\n supports_out=False),\n OpInfo('flipud',\n op=torch.flipud,\n dtypes=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16),\n sample_inputs_func=sample_inputs_fliplr_flipud,\n supports_forward_ad=True,\n supports_fwgrad_bwgrad=True,\n supports_out=False),\n UnaryUfuncInfo('i0',\n ref=np_unary_ufunc_integer_promotion_wrapper(\n scipy.special.i0) if TEST_SCIPY else _NOTHING,\n aliases=('special.i0',),\n decorators=(precisionOverride({torch.bfloat16: 3e-1,\n torch.float16: 5e-1}),),\n backward_dtypesIfCPU=floating_types(),\n backward_dtypesIfCUDA=floating_types(),\n backward_dtypesIfROCM=floating_types(),\n dtypes=all_types_and(torch.bool, torch.bfloat16),\n dtypesIfCUDA=all_types_and(torch.bool, torch.half, torch.bfloat16),\n safe_casts_outputs=True,\n supports_forward_ad=True,\n supports_fwgrad_bwgrad=True,\n sample_inputs_func=sample_inputs_i0_i1),\n UnaryUfuncInfo('special.i0e',\n aten_name='special_i0e',\n ref=scipy.special.i0e if TEST_SCIPY else _NOTHING,\n decorators=(precisionOverride({torch.bfloat16: 3e-1,\n torch.float16: 3e-1}),),\n backward_dtypesIfCPU=floating_types(),\n backward_dtypesIfCUDA=floating_types(),\n backward_dtypesIfROCM=floating_types(),\n dtypes=all_types_and(torch.bool, torch.bfloat16),\n dtypesIfCUDA=all_types_and(torch.bool, torch.half, torch.bfloat16),\n sample_inputs_func=sample_inputs_i0_i1,\n supports_forward_ad=True,\n supports_fwgrad_bwgrad=True,\n safe_casts_outputs=True),\n UnaryUfuncInfo('special.i1',\n aten_name='special_i1',\n ref=np_unary_ufunc_integer_promotion_wrapper(scipy.special.i1) if TEST_SCIPY else _NOTHING,\n dtypes=all_types_and(torch.bool),\n dtypesIfCUDA=all_types_and(torch.bool),\n sample_inputs_func=sample_inputs_i0_i1,\n safe_casts_outputs=True,\n decorators=(\n DecorateInfo(toleranceOverride({\n torch.float32: tol(atol=1e-4, rtol=0),\n torch.bool: tol(atol=1e-4, rtol=0)})),\n ),\n skips=(\n # TODO: FIXME: jiterator does not support casting to complex outs\n DecorateInfo(unittest.skip(\"FIXME: Jiterator does not support complex outs!\"),\n \"TestUnaryUfuncs\",\n \"test_out_arg_all_dtypes\",\n device_type='cuda'),\n ),\n supports_fwgrad_bwgrad=True,\n supports_forward_ad=True),\n UnaryUfuncInfo('special.i1e',\n aten_name='special_i1e',\n ref=scipy.special.i1e if TEST_SCIPY else _NOTHING,\n dtypes=all_types_and(torch.bool),\n dtypesIfCUDA=all_types_and(torch.bool),\n sample_inputs_func=sample_inputs_i0_i1,\n supports_forward_ad=True,\n supports_fwgrad_bwgrad=True,\n safe_casts_outputs=True),\n UnaryUfuncInfo('special.ndtr',\n aten_name='special_ndtr',\n decorators=(precisionOverride({torch.bfloat16: 5e-3,\n torch.float16: 5e-4}),),\n ref=scipy.special.ndtr if TEST_SCIPY else _NOTHING,\n dtypes=all_types_and(torch.bool, torch.bfloat16),\n dtypesIfCUDA=all_types_and(torch.bool, torch.bfloat16, torch.float16),\n supports_forward_ad=True,\n supports_fwgrad_bwgrad=True,\n safe_casts_outputs=True,\n skips=(\n # Dispatch stub: unsupported device typemeta\n DecorateInfo(unittest.expectedFailure, 'TestGradients', 'test_fn_fwgrad_bwgrad', device_type='meta'),\n # (ROCm) Memory access fault by GPU node-4 (Agent handle: 0x55cebc9e8430) on address 0x7fa17b757000\n DecorateInfo(unittest.skip(\"Skipped! ROCm memory exception\"), 'TestGradients', 'test_fn_fwgrad_bwgrad',\n device_type='cuda', dtypes=[torch.float64], active_if=TEST_WITH_ROCM),\n )),\n BinaryUfuncInfo('floor_divide',\n dtypes=all_types_and(torch.half, torch.bfloat16),\n sample_inputs_func=sample_inputs_binary_pwise,\n supports_autograd=False,\n rhs_make_tensor_kwargs=dict(exclude_zero=True),\n ),\n UnaryUfuncInfo('frexp',\n op=torch.frexp,\n ref=np.frexp,\n dtypes=floating_types_and(torch.half, torch.bfloat16),\n dtypesIfCUDA=floating_types_and(torch.half),\n # skip testing torch.frexp as it is not supported by ROCm platform yet\n decorators=[],\n supports_out=False,\n supports_forward_ad=True,\n supports_fwgrad_bwgrad=True,\n skips=(\n # skips below tests as torch.frexp returns tuple-like (mantissa, exponent) as outputs,\n # while theses tests currently requires output to a single tensor.\n DecorateInfo(unittest.skip(\"Skipped!\"), 'TestUnaryUfuncs', 'test_batch_vs_slicing'),\n DecorateInfo(unittest.skip(\"Skipped!\"), 'TestUnaryUfuncs', 'test_contig_vs_every_other'),\n DecorateInfo(unittest.skip(\"Skipped!\"), 'TestUnaryUfuncs', 'test_contig_vs_transposed'),\n DecorateInfo(unittest.skip(\"Skipped!\"), 'TestUnaryUfuncs', 'test_non_contig_expand'),\n DecorateInfo(unittest.skip(\"Skipped!\"), 'TestUnaryUfuncs', 'test_variant_consistency'),\n\n # skips test_reference_numerics due to error in Windows CI.\n # The np.frexp returns exponent as np.intc dtype on Windows platform,\n # and np.intc does not have the correspond torch dtype\n DecorateInfo(unittest.skip(\"Skipped!\"), 'TestUnaryUfuncs', 'test_reference_numerics_normal',\n active_if=IS_WINDOWS),\n DecorateInfo(unittest.skip(\"Skipped!\"), 'TestUnaryUfuncs', 'test_reference_numerics_hard',\n active_if=IS_WINDOWS),\n DecorateInfo(unittest.skip(\"Skipped!\"), 'TestUnaryUfuncs', 'test_reference_numerics_extremal',\n active_if=IS_WINDOWS),\n )),\n BinaryUfuncInfo('ge',\n aliases=('greater_equal',),\n dtypes=all_types_and(torch.bool, torch.bfloat16, torch.float16),\n always_returns_bool=True,\n supports_autograd=False,\n sample_inputs_func=sample_inputs_comparison_ops),\n OpInfo('geqrf',\n dtypes=floating_and_complex_types(),\n supports_autograd=False,\n sample_inputs_func=sample_inputs_linalg_qr_geqrf,\n decorators=[skipCUDAIfNoMagma, skipCUDAIfRocm, skipCPUIfNoLapack],),\n BinaryUfuncInfo('gt',\n aliases=('greater',),\n dtypes=all_types_and(torch.bool, torch.bfloat16, torch.float16),\n always_returns_bool=True,\n supports_autograd=False,\n sample_inputs_func=sample_inputs_comparison_ops),\n UnaryUfuncInfo('imag',\n ref=np.imag,\n dtypes=complex_types(),\n supports_out=False,\n supports_forward_ad=True,\n supports_fwgrad_bwgrad=True,\n # See https://github.com/pytorch/pytorch/issues/66357\n # RuntimeError: view_as_real doesn't work on unresolved conjugated tensors.\n check_batched_forward_grad=False,\n skips=(\n # Skip since real and imag don't have out variants.\n DecorateInfo(unittest.expectedFailure, 'TestUnaryUfuncs', 'test_out_arg_all_dtypes'),\n )),\n OpInfo('gradient',\n dtypes=floating_and_complex_types_and(torch.int8, torch.int16,\n torch.int32, torch.int64,\n torch.bfloat16, torch.half),\n supports_out=False,\n supports_forward_ad=True,\n supports_fwgrad_bwgrad=True,\n skips=(\n # following tests give a runtime error with undefined value tensor\n # see discussion : https://github.com/pytorch/pytorch/issues/56660\n # RuntimeError:\n # Arguments for call are not valid.\n DecorateInfo(unittest.skip(\"Skipped!\"), 'TestJit', 'test_variant_consistency_jit', dtypes=(torch.float32, torch.complex64)), # noqa: B950\n # 69925: RuntimeError: Expected all tensors to be on the same device, but found at least two devices, cuda:0 and cpu!\n DecorateInfo(unittest.expectedFailure, 'TestGradients', 'test_fn_fwgrad_bwgrad', device_type='cuda'),\n # (ROCm) Memory exception on virtual address 0x7f6f3deb7000, node id 4: Page not present\n DecorateInfo(unittest.skip(\"Skipped! ROCm memory exception\"), 'TestGradients', 'test_fn_fwgrad_bwgrad',\n device_type='cuda', dtypes=[torch.float64, torch.complex128], active_if=TEST_WITH_ROCM),\n ),\n supports_inplace_autograd=False,\n sample_inputs_func=sample_inputs_gradient),\n OpInfo('inverse',\n op=torch.inverse,\n dtypes=floating_and_complex_types(),\n check_batched_gradgrad=False,\n supports_forward_ad=True,\n supports_fwgrad_bwgrad=True,\n gradcheck_nondet_tol=GRADCHECK_NONDET_TOL,\n sample_inputs_func=sample_inputs_linalg_invertible,\n decorators=[skipCUDAIfNoMagmaAndNoCusolver, skipCUDAIfRocm, skipCPUIfNoLapack]),\n OpInfo('isin',\n dtypes=all_types(),\n dtypesIfCUDA=all_types_and(torch.half),\n supports_autograd=False,\n sample_inputs_func=sample_inputs_isin),\n OpInfo('kthvalue',\n dtypes=all_types_and(torch.bfloat16),\n dtypesIfCUDA=all_types_and(torch.float16),\n supports_forward_ad=True,\n supports_fwgrad_bwgrad=True,\n sample_inputs_func=sample_inputs_kthvalue,\n error_inputs_func=error_inputs_kthvalue),\n BinaryUfuncInfo('le',\n aliases=('less_equal',),\n dtypes=all_types_and(torch.bool, torch.bfloat16, torch.float16),\n always_returns_bool=True,\n supports_autograd=False,\n sample_inputs_func=sample_inputs_comparison_ops),\n OpInfo('linalg.det',\n op=torch.linalg.det,\n aliases=('det', ),\n dtypes=floating_and_complex_types(),\n backward_dtypes=floating_and_complex_types(),\n aten_name='linalg_det',\n sample_inputs_func=sample_inputs_linalg_det,\n decorators=[skipCUDAIfNoMagma, skipCPUIfNoLapack, skipCUDAIfRocm,\n DecorateInfo(toleranceOverride({torch.complex64: tol(atol=1e-3, rtol=1e-3)}))],\n check_batched_gradgrad=False,\n supports_inplace_autograd=False),\n OpInfo('linalg.det',\n op=torch.linalg.det,\n variant_test_name='singular',\n aliases=('det', ),\n dtypes=double_types(),\n backward_dtypes=double_types(),\n aten_name='linalg_det',\n sample_inputs_func=sample_inputs_linalg_det_singular,\n decorators=[skipCUDAIfNoMagma, skipCPUIfNoLapack, skipCUDAIfRocm,\n DecorateInfo(toleranceOverride({torch.complex64: tol(atol=1e-3, rtol=1e-3)}))],\n check_batched_gradgrad=False,\n supports_inplace_autograd=False,\n skips=(\n # These tests started breaking after touching the SVD.\n DecorateInfo(unittest.skip(\"Skipped!\"), 'TestGradients', 'test_fn_grad', device_type='cpu',\n dtypes=(torch.complex128,), active_if=IS_WINDOWS),\n # Will be removed once https://github.com/pytorch/pytorch/issues/62328 is fixed\n # Probable fix (open PR): https://github.com/pytorch/pytorch/pull/62570\n DecorateInfo(unittest.skip(\"Skipped!\"), 'TestGradients', 'test_fn_grad', device_type='cuda',\n dtypes=(torch.complex128,)),\n DecorateInfo(unittest.skip(\"Skipped!\"), 'TestCommon', 'test_dtypes'),\n DecorateInfo(unittest.skip(\"Skipped!\"), 'TestGradients', 'test_fn_gradgrad'),\n )),\n OpInfo('linalg.cholesky',\n aten_name='linalg_cholesky',\n dtypes=floating_and_complex_types(),\n # TODO: RuntimeError: While computing batched gradients,\n # got: vmap: Calling Tensor.as_strided is not supported\n # unless the batch dims being vmapped over are at the front of the tensor (in memory layout).\n check_batched_gradgrad=False,\n supports_forward_ad=True,\n supports_fwgrad_bwgrad=True,\n sample_inputs_func=sample_inputs_linalg_cholesky,\n gradcheck_wrapper=gradcheck_wrapper_hermitian_input,\n decorators=[skipCUDAIfNoMagmaAndNoCusolver, skipCUDAIfRocm, skipCPUIfNoLapack],\n ),\n OpInfo('linalg.cholesky_ex',\n aten_name='linalg_cholesky_ex',\n dtypes=floating_and_complex_types(),\n check_batched_gradgrad=False,\n supports_forward_ad=True,\n supports_fwgrad_bwgrad=True,\n sample_inputs_func=sample_inputs_linalg_cholesky,\n gradcheck_wrapper=gradcheck_wrapper_hermitian_input,\n decorators=[skipCUDAIfNoMagmaAndNoCusolver, skipCUDAIfRocm, skipCPUIfNoLapack],\n ),\n OpInfo('linalg.cond',\n aten_name='linalg_cond',\n dtypes=floating_and_complex_types(),\n sample_inputs_func=sample_inputs_linalg_cond,\n check_batched_gradgrad=False,\n check_batched_forward_grad=False,\n supports_forward_ad=True,\n supports_fwgrad_bwgrad=True,\n gradcheck_nondet_tol=GRADCHECK_NONDET_TOL,\n decorators=[skipCUDAIfNoMagmaAndNoCusolver, skipCPUIfNoLapack],),\n OpInfo('linalg.eig',\n aten_name='linalg_eig',\n op=torch.linalg.eig,\n dtypes=floating_and_complex_types(),\n sample_inputs_func=sample_inputs_linalg_eig,\n check_batched_forward_grad=False,\n check_batched_grad=False,\n check_batched_gradgrad=False,\n supports_forward_ad=True,\n supports_fwgrad_bwgrad=True,\n decorators=[skipCUDAIfNoMagma, skipCUDAIfRocm, skipCPUIfNoLapack],\n skips=(\n # Forward-over-reverse gradgrad might be incorrect\n DecorateInfo(unittest.expectedFailure, 'TestGradients', 'test_fn_fwgrad_bwgrad'),),),\n OpInfo('linalg.eigvals',\n aten_name='linalg_eigvals',\n op=torch.linalg.eigvals,\n dtypes=floating_and_complex_types(),\n sample_inputs_func=sample_inputs_linalg_invertible,\n check_batched_forward_grad=False,\n check_batched_grad=False,\n check_batched_gradgrad=False,\n supports_forward_ad=True,\n supports_fwgrad_bwgrad=True,\n decorators=[skipCUDAIfNoMagma, skipCUDAIfRocm, skipCPUIfNoLapack],\n skips=(\n # Pre-existing condition; Needs to be fixed\n DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_composite_compliance'),\n )),\n OpInfo('linalg.eigh',\n aten_name='linalg_eigh',\n dtypes=floating_and_complex_types(),\n sample_inputs_func=sample_inputs_linalg_eigh,\n gradcheck_wrapper=gradcheck_wrapper_hermitian_input,\n check_batched_forward_grad=False,\n check_batched_grad=False,\n check_batched_gradgrad=False,\n supports_forward_ad=True,\n supports_fwgrad_bwgrad=True,\n decorators=[skipCUDAIfNoMagma, skipCUDAIfRocm, skipCPUIfNoLapack],\n skips=(\n # Forward-over-reverse gradgrad might be incorrect\n DecorateInfo(unittest.expectedFailure, 'TestGradients', 'test_fn_fwgrad_bwgrad',\n dtypes=complex_types()),),\n ),\n OpInfo('linalg.eigvalsh',\n aten_name='linalg_eigvalsh',\n dtypes=floating_and_complex_types(),\n sample_inputs_func=sample_inputs_linalg_eigh,\n gradcheck_wrapper=gradcheck_wrapper_hermitian_input,\n check_batched_forward_grad=False,\n check_batched_grad=False,\n check_batched_gradgrad=False,\n supports_forward_ad=True,\n supports_fwgrad_bwgrad=True,\n decorators=[skipCUDAIfNoMagma, skipCUDAIfRocm, skipCPUIfNoLapack],\n skips=(\n # Pre-existing condition; Needs to be fixed\n DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_composite_compliance'),\n )),\n OpInfo('linalg.householder_product',\n aten_name='linalg_householder_product',\n op=torch.linalg.householder_product,\n aliases=('orgqr', ),\n dtypes=floating_and_complex_types(),\n # TODO: backward uses in-place operations that vmap doesn't like\n check_batched_grad=False,\n check_batched_gradgrad=False,\n supports_forward_ad=True,\n supports_fwgrad_bwgrad=True,\n check_batched_forward_grad=False,\n sample_inputs_func=sample_inputs_householder_product,\n decorators=[\n skipCUDAIfNoCusolver, skipCUDAIfRocm, skipCPUIfNoLapack,\n DecorateInfo(toleranceOverride({torch.complex64: tol(atol=1e-3, rtol=1e-3)})),\n ]),\n OpInfo('linalg.lstsq',\n aten_name='linalg_lstsq',\n dtypes=floating_and_complex_types(),\n supports_out=True,\n sample_inputs_func=sample_inputs_linalg_lstsq,\n decorators=[skipCUDAIfNoMagma, skipCPUIfNoLapack],\n skips=(\n # we skip gradient checks for this suite as they are tested in\n # variant_test_name='grad_oriented'\n DecorateInfo(unittest.skip(\"Skipped!\"), 'TestGradients'),\n )),\n OpInfo('linalg.lstsq',\n aten_name='linalg_lstsq',\n variant_test_name='grad_oriented',\n # gradchecks for forward AD fails with multi-Tensor outputs\n op=lambda a, b, driver: torch.linalg.lstsq(a, b, driver=driver)[0],\n supports_out=False,\n dtypes=floating_and_complex_types(),\n sample_inputs_func=sample_inputs_linalg_lstsq,\n supports_autograd=True,\n supports_forward_ad=True,\n supports_fwgrad_bwgrad=True,\n decorators=[skipCUDAIfNoMagma, skipCPUIfNoLapack],\n skips=(\n # tests do not work with passing lambda for op\n DecorateInfo(unittest.expectedFailure, 'TestJit', 'test_variant_consistency_jit'),\n DecorateInfo(unittest.expectedFailure, 'TestNormalizeOperators', 'test_normalize_operator_exhaustive'),\n DecorateInfo(unittest.expectedFailure, 'TestOperatorSignatures', 'test_get_torch_func_signature_exhaustive'),\n )),\n OpInfo('linalg.matrix_power',\n aliases=('matrix_power',),\n aten_name='linalg_matrix_power',\n dtypes=floating_and_complex_types(),\n supports_inplace_autograd=False,\n supports_forward_ad=True,\n supports_fwgrad_bwgrad=True,\n check_batched_grad=False,\n decorators=[skipCUDAIfNoMagmaAndNoCusolver, skipCPUIfNoLapack, skipCUDAIfRocm],\n sample_inputs_func=sample_inputs_linalg_matrix_power,\n gradcheck_nondet_tol=GRADCHECK_NONDET_TOL,\n ),\n OpInfo('linalg.multi_dot',\n # Need this lambda because gradcheck does not work with TensorList inputs\n aten_name='linalg_multi_dot',\n dtypes=all_types_and_complex_and(torch.half, torch.bfloat16),\n dtypesIfCUDA=floating_and_complex_types_and(torch.half, *[torch.bfloat16] if CUDA11OrLater else []),\n supports_inplace_autograd=False,\n # Batched grad checks fail for empty input tensors (see https://github.com/pytorch/pytorch/issues/53407)\n check_batched_grad=False,\n check_batched_gradgrad=False,\n supports_forward_ad=True,\n supports_fwgrad_bwgrad=True,\n # https://github.com/pytorch/pytorch/issues/66357\n check_batched_forward_grad=False,\n sample_inputs_func=sample_inputs_linalg_multi_dot,\n gradcheck_nondet_tol=GRADCHECK_NONDET_TOL,\n skips=(\n # https://github.com/pytorch/pytorch/issues/67470\n DecorateInfo(unittest.skip(\"67470!\"), 'TestCommon', 'test_noncontiguous_samples'),\n # Fails on XLA.\n # AssertionError: False is not true : Tensors failed to compare as equal!\n DecorateInfo(unittest.expectedFailure, 'TestOpInfo', device_type='xla', dtypes=(torch.long,)),\n )),\n OpInfo('linalg.norm',\n op=torch.linalg.norm,\n dtypes=floating_and_complex_types_and(torch.float16, torch.bfloat16),\n decorators=[skipCUDAIfNoMagmaAndNoCusolver, skipCPUIfNoLapack],\n sample_inputs_func=sample_inputs_linalg_norm,\n aten_name='linalg_norm',\n skips=(\n # Pre-existing condition; Needs to be fixed\n DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_composite_compliance'),\n # Expected RuntimeError when calling with input.device=cpu and out.device=cuda\n DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_out'),\n )),\n OpInfo('linalg.matrix_norm',\n aten_name='linalg_matrix_norm',\n dtypes=floating_and_complex_types(),\n check_batched_gradgrad=False,\n decorators=[skipCUDAIfNoMagmaAndNoCusolver, skipCPUIfNoLapack],\n sample_inputs_func=sample_inputs_linalg_matrix_norm,\n skips=(\n # Pre-existing condition; Needs to be fixed\n DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_composite_compliance'),\n # Expected RuntimeError when calling with input.device=cpu and out.device=cuda\n DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_out'),\n )),\n OpInfo('linalg.qr',\n aten_name='linalg_qr',\n op=torch.linalg.qr,\n dtypes=floating_and_complex_types(),\n # batched gradients do not work for empty inputs\n # https://github.com/pytorch/pytorch/issues/50743#issuecomment-767376085\n check_batched_gradgrad=False,\n supports_forward_ad=True,\n supports_fwgrad_bwgrad=True,\n # See https://github.com/pytorch/pytorch/issues/66357\n check_batched_forward_grad=False,\n sample_inputs_func=sample_inputs_linalg_qr_geqrf,\n decorators=[skipCUDAIfNoMagma, skipCUDAIfRocm, skipCPUIfNoLapack]),\n OpInfo('linalg.slogdet',\n aten_name='linalg_slogdet',\n op=torch.linalg.slogdet,\n dtypes=floating_and_complex_types(),\n sample_inputs_func=sample_inputs_linalg_slogdet,\n decorators=[skipCUDAIfNoMagma, skipCUDAIfRocm, skipCPUIfNoLapack],),\n OpInfo('linalg.vector_norm',\n op=torch.linalg.vector_norm,\n dtypes=floating_and_complex_types_and(torch.float16, torch.bfloat16),\n decorators=[skipCUDAIfNoMagma, skipCPUIfNoLapack],\n sample_inputs_func=sample_inputs_linalg_vector_norm,\n aten_name='linalg_vector_norm'),\n UnaryUfuncInfo('log',\n ref=np.log,\n domain=(0, None),\n dtypes=all_types_and_complex_and(torch.bool, torch.bfloat16),\n dtypesIfCUDA=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16),\n assert_autodiffed=True,\n safe_casts_outputs=True,\n supports_forward_ad=True,\n supports_fwgrad_bwgrad=True,\n decorators=(precisionOverride({torch.bfloat16: 5e-2}),),\n skips=(\n DecorateInfo(unittest.skip(\"Skipped!\"), 'TestUnaryUfuncs', 'test_reference_numerics_extremal',\n device_type='cpu', dtypes=[torch.cfloat, torch.cdouble],\n active_if=IS_WINDOWS),\n ),\n # log(z)->-inf for |z|->0\n reference_numerics_filter=NumericsFilter(condition=lambda x: torch.abs(x) < 0.1, safe_val=1)),\n UnaryUfuncInfo('log10',\n ref=np.log10,\n domain=(0, None),\n decorators=(precisionOverride({torch.bfloat16: 5e-2}),),\n dtypes=all_types_and_complex_and(torch.bool, torch.bfloat16),\n assert_autodiffed=True,\n dtypesIfCUDA=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16),\n safe_casts_outputs=True,\n supports_forward_ad=True,\n supports_fwgrad_bwgrad=True,\n skips=(\n DecorateInfo(unittest.skip(\"Skipped!\"), 'TestUnaryUfuncs', 'test_reference_numerics_extremal',\n device_type='cpu', dtypes=[torch.cfloat, torch.cdouble],\n active_if=IS_WINDOWS),\n ),\n # log10(z)->-inf for |z|->0\n reference_numerics_filter=NumericsFilter(condition=lambda x: torch.abs(x) < 0.1, safe_val=1)),\n UnaryUfuncInfo('log1p',\n ref=np.log1p,\n aliases=('special.log1p',),\n domain=(-1, None),\n dtypes=all_types_and(torch.bool, torch.bfloat16),\n dtypesIfCUDA=all_types_and(torch.bool, torch.half, torch.bfloat16),\n decorators=(precisionOverride({torch.bfloat16: 1e-1}),),\n skips=(\n DecorateInfo(unittest.skip(\"Skipped! sparse backward not supported\"),\n 'TestSparseUnaryUfuncs', 'test_sparse_fn_grad'),\n ),\n safe_casts_outputs=True,\n supports_forward_ad=True,\n supports_fwgrad_bwgrad=True,\n supports_sparse=True,\n supports_sparse_csr=True,\n assert_autodiffed=True),\n UnaryUfuncInfo('log2',\n ref=np.log2,\n domain=(0, None),\n dtypes=all_types_and_complex_and(torch.bool, torch.bfloat16),\n dtypesIfCUDA=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16),\n assert_autodiffed=True,\n safe_casts_outputs=True,\n supports_forward_ad=True,\n supports_fwgrad_bwgrad=True,\n decorators=(precisionOverride({torch.bfloat16: 1e-1}),),\n skips=(\n DecorateInfo(unittest.skip(\"Skipped!\"), 'TestUnaryUfuncs', 'test_reference_numerics_extremal',\n dtypes=[torch.cfloat, torch.cdouble]),\n ),\n # log2(z)->-inf for |z|->0\n reference_numerics_filter=NumericsFilter(condition=lambda x: torch.abs(x) < 0.1, safe_val=1)),\n BinaryUfuncInfo('ldexp',\n dtypes=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16),\n supports_forward_ad=True,\n supports_fwgrad_bwgrad=True,\n supports_inplace_autograd=False,\n sample_inputs_func=sample_inputs_binary_pwise,\n promotes_int_to_float=True,\n supports_out=True,\n skips=(\n # RuntimeError: mul(): functions with out=... arguments don't support\n # automatic differentiation, but one of the arguments requires grad\n # https://github.com/pytorch/pytorch/issues/68966\n DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_variant_consistency_eager'),\n DecorateInfo(unittest.expectedFailure, 'TestMathBits', 'test_conj_view'),\n DecorateInfo(unittest.expectedFailure, 'TestMathBits', 'test_neg_view'),\n DecorateInfo(unittest.expectedFailure, 'TestMathBits', 'test_neg_conj_view'),\n # FIXME: ldexp does not accept scalar inputs\n DecorateInfo(unittest.expectedFailure, 'TestBinaryUfuncs', 'test_broadcast_python_scalar'),\n ),\n decorators=[\n DecorateInfo(\n toleranceOverride({\n torch.complex64: tol(atol=1e-05, rtol=1e-05)\n }),\n 'TestCommon', device_type='cpu',\n ),\n ], ),\n OpInfo('logaddexp',\n dtypes=floating_types_and(torch.bfloat16),\n dtypesIfCUDA=floating_types_and(torch.bfloat16),\n dtypesIfROCM=floating_types_and(torch.bfloat16),\n supports_forward_ad=True,\n supports_fwgrad_bwgrad=True,\n sample_inputs_func=lambda op_info, device, dtype, requires_grad=False, **kwargs:\n (SampleInput(make_tensor((S, S), device, dtype, requires_grad=requires_grad),\n args=(make_tensor((S, S), device, dtype, requires_grad=requires_grad),)),)),\n OpInfo('logaddexp2',\n dtypes=floating_types_and(torch.bfloat16),\n dtypesIfCUDA=floating_types_and(torch.bfloat16),\n dtypesIfROCM=floating_types_and(torch.bfloat16),\n supports_forward_ad=True,\n supports_fwgrad_bwgrad=True,\n sample_inputs_func=lambda op_info, device, dtype, requires_grad=False, **kwargs:\n (SampleInput(make_tensor((S, S), device, dtype, requires_grad=requires_grad),\n args=(make_tensor((S, S), device, dtype, requires_grad=requires_grad),)),)),\n UnaryUfuncInfo('logical_not',\n ref=np.logical_not,\n decorators=(precisionOverride({torch.bfloat16: 7e-1,\n torch.float16: 5e-1}),),\n dtypes=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16),\n safe_casts_outputs=True,\n supports_autograd=False,\n skips=(\n # The function variant always returns BoolTensor\n # while the inplace variant preserves the input dtype.\n # >>> t = torch.randn(3)\n # >>> torch.logical_not(t)\n # tensor([False, False, False])\n # >>> torch.logical_not(t).dtype\n # torch.bool\n # >>> t.logical_not_().dtype\n # torch.float32\n DecorateInfo(unittest.skip(\"Skipped!\"), 'TestUnaryUfuncs', 'test_variant_consistency',\n dtypes=all_types_and_complex_and(torch.half, torch.bfloat16)),\n DecorateInfo(unittest.skip(\"Skipped!\"), 'TestCommon', 'test_variant_consistency_eager',\n dtypes=all_types_and_complex_and(torch.half, torch.bfloat16)),\n )),\n BinaryUfuncInfo('lt',\n aliases=('less',),\n dtypes=all_types_and(torch.bool, torch.bfloat16, torch.float16),\n always_returns_bool=True,\n supports_autograd=False,\n sample_inputs_func=sample_inputs_comparison_ops),\n OpInfo('linalg.lu_factor',\n aten_name='linalg_lu_factor',\n op=torch.linalg.lu_factor,\n dtypes=floating_and_complex_types(),\n check_batched_gradgrad=False,\n supports_forward_ad=True,\n sample_inputs_func=sample_inputs_linalg_lu_factor,\n decorators=[skipCUDAIfNoMagmaAndNoCusolver, skipCPUIfNoLapack]),\n OpInfo('linalg.lu_factor_ex',\n aten_name='linalg_lu_factor_ex',\n op=torch.linalg.lu_factor_ex,\n dtypes=floating_and_complex_types(),\n check_batched_gradgrad=False,\n supports_forward_ad=True,\n sample_inputs_func=sample_inputs_linalg_lu_factor,\n decorators=[skipCUDAIfNoMagmaAndNoCusolver, skipCPUIfNoLapack]),\n OpInfo('lu',\n op=torch.lu,\n dtypes=floating_and_complex_types(),\n check_batched_gradgrad=False,\n supports_forward_ad=True,\n supports_fwgrad_bwgrad=False, # need: lu_unpack\n # https://github.com/pytorch/pytorch/issues/66357\n check_batched_forward_grad=False,\n supports_out=False,\n sample_inputs_func=sample_inputs_lu,\n decorators=[skipCUDAIfNoMagmaAndNoCusolver, skipCPUIfNoLapack],\n skips=(\n # we skip jit tests because `lu` is a torch function\n # RuntimeError:\n # 'Tensor (inferred)' object has no attribute or method 'lu'.:\n # File \"<string>\", line 3\n # def the_method(i0):\n # return i0.lu(True, True)\n # ~~~~~ <--- HERE\n DecorateInfo(unittest.skip(\"Skipped!\"), 'TestJit', 'test_variant_consistency_jit'),\n )),\n OpInfo('lu_solve',\n op=torch.lu_solve,\n dtypes=floating_and_complex_types(),\n check_batched_gradgrad=False,\n supports_forward_ad=True,\n supports_fwgrad_bwgrad=False, # need: lu_unpack\n # See https://github.com/pytorch/pytorch/issues/66357\n check_batched_forward_grad=False,\n sample_inputs_func=sample_inputs_lu_solve,\n decorators=[skipCUDAIfNoMagma, skipCUDAIfRocm, skipCPUIfNoLapack],\n skips=(\n # RuntimeError: lu_unpack: LU_pivots is expected to be a contiguous tensor of torch.int32 dtype\n DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_noncontiguous_samples'), # noqa: B950\n DecorateInfo(unittest.skip(\"Tests different backward implementations\"),\n \"TestCommon\", \"test_floating_inputs_are_differentiable\"),),\n ),\n OpInfo('lu_unpack',\n op=torch.lu_unpack,\n dtypes=floating_and_complex_types(),\n supports_inplace_autograd=False,\n # we use in-place operations which cannot be avoided.\n # This causes vmap failures, hence we skip batched gradient checks\n check_batched_grad=False,\n supports_out=True,\n sample_inputs_func=sample_inputs_lu_unpack,\n decorators=[skipCUDAIfNoMagmaAndNoCusolver, skipCUDAIfRocm, skipCPUIfNoLapack],\n skips=(\n # LU_pivots is expected to be a contiguous tensor\n DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_noncontiguous_samples'), # noqa: B950\n # cuda gradchecks are slow\n # see discussion https://github.com/pytorch/pytorch/pull/47761#issuecomment-747316775\n DecorateInfo(unittest.skip(\"Skipped!\"), 'TestGradients', 'test_fn_gradgrad', device_type='cuda'),\n )),\n OpInfo('masked_fill',\n dtypes=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16),\n sample_inputs_func=sample_inputs_masked_fill,\n supports_forward_ad=True,\n supports_fwgrad_bwgrad=True,\n check_batched_forward_grad=False,\n supports_out=False),\n OpInfo('masked_scatter',\n dtypes=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16),\n sample_inputs_func=sample_inputs_masked_scatter,\n supports_forward_ad=True,\n supports_fwgrad_bwgrad=True,\n # https://github.com/pytorch/pytorch/issues/66357\n check_batched_forward_grad=False,\n supports_out=False),\n OpInfo('masked_select',\n dtypes=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16),\n supports_forward_ad=True,\n supports_fwgrad_bwgrad=True,\n sample_inputs_func=sample_inputs_masked_select),\n OpInfo('matrix_exp',\n dtypes=floating_and_complex_types_and(torch.bfloat16),\n dtypesIfCUDA=floating_and_complex_types_and(torch.float16, *[torch.bfloat16] if CUDA11OrLater else []),\n aliases=('linalg.matrix_exp',),\n sample_inputs_func=sample_inputs_matrix_exp,\n # Needs to construct a 2nx2n matrix by copy_ ing into it\n check_batched_grad=False,\n check_batched_gradgrad=False,\n supports_forward_ad=True,\n supports_fwgrad_bwgrad=True,\n # https://github.com/pytorch/pytorch/issues/66357\n check_batched_forward_grad=False,\n supports_out=False,\n ),\n OpInfo('matmul',\n aliases=('linalg.matmul',),\n dtypes=all_types_and_complex_and(torch.bfloat16),\n dtypesIfCUDA=floating_and_complex_types_and(torch.float16, *[torch.bfloat16] if CUDA11OrLater else []),\n dtypesIfROCM=floating_types_and(torch.half, torch.bfloat16),\n backward_dtypesIfCUDA=floating_and_complex_types_and(torch.float16,\n *[torch.bfloat16] if (SM60OrLater and CUDA11OrLater) else []),\n assert_autodiffed=True,\n assert_jit_shape_analysis=True,\n supports_forward_ad=True,\n supports_fwgrad_bwgrad=True,\n check_batched_forward_grad=False,\n sample_inputs_func=sample_inputs_matmul,\n decorators=[\n # ROCm intermittently fails the test with standard atol/rtol\n DecorateInfo(toleranceOverride({torch.float32: tol(atol=1e-4, rtol=0)}),\n 'TestCommon', 'test_noncontiguous_samples',\n active_if=TEST_WITH_ROCM), ],\n skips=(\n # https://github.com/pytorch/pytorch/issues/67470\n DecorateInfo(unittest.skip(\"67470!\"),\n 'TestCommon', 'test_noncontiguous_samples',\n device_type='cpu', dtypes=(torch.long,)),\n # AssertionError: False is not true : Tensors failed to compare as equal!\n DecorateInfo(unittest.expectedFailure, 'TestOpInfo',\n device_type='xla', dtypes=(torch.long,)),\n )),\n OpInfo('max',\n variant_test_name='reduction_with_dim',\n dtypes=all_types_and(torch.float16, torch.bfloat16, torch.bool),\n sample_inputs_func=sample_inputs_max_min_reduction_with_dim,\n supports_fwgrad_bwgrad=True,\n supports_forward_ad=True),\n OpInfo('max',\n variant_test_name='reduction_no_dim',\n dtypes=all_types_and(torch.float16, torch.bfloat16, torch.bool),\n supports_out=False,\n supports_forward_ad=True,\n supports_fwgrad_bwgrad=True,\n sample_inputs_func=sample_inputs_max_min_reduction_no_dim),\n OpInfo('median',\n dtypes=all_types_and(torch.bfloat16),\n dtypesIfCUDA=all_types_and(torch.float16),\n # TODO: some signatures of median do support out\n supports_out=False,\n supports_forward_ad=True,\n supports_fwgrad_bwgrad=True,\n sample_inputs_func=partial(sample_inputs_reduction, supports_multiple_dims=False)),\n OpInfo('nanmedian',\n dtypes=all_types_and(torch.bfloat16),\n dtypesIfCUDA=all_types_and(torch.float16),\n # TODO: some signatures of nanmedian do support out\n supports_out=False,\n supports_forward_ad=True,\n supports_fwgrad_bwgrad=True,\n sample_inputs_func=partial(sample_inputs_reduction, supports_multiple_dims=False)),\n OpInfo('var_mean',\n dtypes=floating_and_complex_types_and(torch.half, torch.bfloat16),\n dtypesIfCUDA=floating_and_complex_types_and(torch.half, torch.bfloat16),\n sample_inputs_func=partial(sample_inputs_reduction, supports_multiple_dims=False),\n backward_dtypes=floating_types_and(torch.half),\n backward_dtypesIfCPU=floating_types_and(torch.half, torch.bfloat16),\n backward_dtypesIfCUDA=floating_types_and(torch.half),\n # TODO: some signatures of var_mean do support out\n supports_out=False,\n supports_forward_ad=True,\n supports_fwgrad_bwgrad=False, # Need: var_mean\n skips=(\n # https://github.com/pytorch/pytorch/issues/67539\n DecorateInfo(unittest.skip(\"67539\"), 'TestCommon', 'test_noncontiguous_samples',\n active_if=TEST_WITH_ASAN, device_type='cpu'),\n # TODO: FIXME: complex inputs requiring grad error in forward\n DecorateInfo(unittest.skip(\"Skipped!\"), 'TestCommon', 'test_dtypes'),\n # TODO: review with var_mean tests in test_autograd.py\n DecorateInfo(unittest.skip(\"Skipped!\"), 'TestJit', 'test_variant_consistency_jit'),\n DecorateInfo(unittest.skip(\"Skipped!\"), 'TestGradients', 'test_fn_grad'),\n DecorateInfo(unittest.skip(\"Skipped!\"), 'TestGradients', 'test_fn_gradgrad'),\n DecorateInfo(unittest.skip(\"Skipped!\"), 'TestGradients', 'test_forward_mode_AD'),\n # Division by zero, may be related to above?\n DecorateInfo(unittest.skip(\"Skipped!\"), 'TestGradients', 'test_fn_fwgrad_bwgrad'))),\n OpInfo('std_mean',\n dtypes=floating_and_complex_types_and(torch.half, torch.bfloat16),\n dtypesIfCUDA=floating_and_complex_types_and(torch.half, torch.bfloat16),\n sample_inputs_func=partial(sample_inputs_reduction, supports_multiple_dims=False),\n backward_dtypes=floating_types_and(torch.half),\n backward_dtypesIfCPU=floating_types_and(torch.half, torch.bfloat16),\n backward_dtypesIfCUDA=floating_types_and(torch.half),\n # TODO: some signatures of std_mean do support out\n supports_out=False,\n supports_forward_ad=True, # Supports only certain variants?\n supports_fwgrad_bwgrad=False, # Need: std_mean\n skips=(\n # https://github.com/pytorch/pytorch/issues/67539\n DecorateInfo(unittest.skip(\"67539\"), 'TestCommon', 'test_noncontiguous_samples',\n active_if=TEST_WITH_ASAN, device_type='cpu'),\n # TODO: FIXME: complex inputs requiring grad error in forward\n DecorateInfo(unittest.skip(\"Skipped!\"), 'TestCommon', 'test_dtypes'),\n # TODO: fix along with var_mean autograd tests\n DecorateInfo(unittest.skip(\"Skipped!\"), 'TestJit', 'test_variant_consistency_jit'),\n DecorateInfo(unittest.skip(\"Skipped!\"), 'TestGradients', 'test_fn_grad'),\n DecorateInfo(unittest.skip(\"Skipped!\"), 'TestGradients', 'test_fn_gradgrad'),\n DecorateInfo(unittest.skip(\"Skipped!\"), 'TestGradients', 'test_forward_mode_AD'),\n # Division by zero, may be related to above?\n DecorateInfo(unittest.skip(\"Skipped!\"), 'TestGradients', 'test_fn_fwgrad_bwgrad'))),\n OpInfo('meshgrid',\n variant_test_name='variadic_tensors',\n ref=np.meshgrid,\n dtypes=all_types_and_complex_and(torch.bfloat16, torch.bool, torch.float16),\n sample_inputs_func=partial(sample_inputs_meshgrid, variant='variadic'),\n skips=[\n # JIT does not support variadic tensors.\n # RuntimeError: input->type()->kind() == TypeKind::OptionalType\n # INTERNAL ASSERT FAILED at \"../torch/csrc/jit/passes/utils/check_alias_annotation.cpp\":252,\n # please report a bug to PyTorch.\n DecorateInfo(unittest.skip(\"Skipped!\"), 'TestJit', 'test_variant_consistency_jit'),\n # meshgrid is defined in torch.functional to take a\n # variadic list of tensors. Variadic parameters are not\n # compatible with the normalize operator tests.\n DecorateInfo(unittest.skip(\"Skipped!\"), 'TestNormalizeOperators', 'test_normalize_operator_exhaustive'),\n # Skip operator schema test because this is a functional and not an operator\n DecorateInfo(unittest.skip(\"Skipped!\"), 'TestOperatorSignatures', 'test_get_torch_func_signature_exhaustive'),\n ],\n supports_out=False,\n supports_fwgrad_bwgrad=True,\n supports_forward_ad=True),\n OpInfo('meshgrid',\n variant_test_name='list_of_tensors',\n # Unlike the variant above, we do not use np.meshgrid as a\n # ref since it does not officially support list of numpy\n # arrays.\n dtypes=all_types_and_complex_and(torch.bfloat16, torch.bool, torch.float16),\n sample_inputs_func=partial(sample_inputs_meshgrid, variant='list'),\n skips=[\n # meshgrid is defined in torch.functional to take a\n # variadic list of tensors. Variadic parameters are not\n # compatible with the normalize operator tests.\n DecorateInfo(unittest.expectedFailure, 'TestNormalizeOperators', 'test_normalize_operator_exhaustive'),\n ],\n assert_autodiffed=True,\n supports_out=False,\n autodiff_nonfusible_nodes=[],\n supports_fwgrad_bwgrad=True,\n supports_forward_ad=True),\n OpInfo('min',\n variant_test_name='reduction_with_dim',\n dtypes=all_types_and(torch.float16, torch.bfloat16, torch.bool),\n sample_inputs_func=sample_inputs_max_min_reduction_with_dim,\n supports_fwgrad_bwgrad=True,\n supports_forward_ad=True),\n OpInfo('min',\n variant_test_name='reduction_no_dim',\n dtypes=all_types_and(torch.float16, torch.bfloat16, torch.bool),\n supports_out=False,\n supports_forward_ad=True,\n supports_fwgrad_bwgrad=True,\n sample_inputs_func=sample_inputs_max_min_reduction_no_dim),\n OpInfo('quantile',\n dtypes=floating_types(),\n sample_inputs_func=sample_inputs_reduction_quantile,\n supports_forward_ad=True,\n supports_fwgrad_bwgrad=True,\n # See https://github.com/pytorch/pytorch/issues/66357\n # Relies on copy_ to broadcast, but the forward AD path calls broadcast_to which\n # does not have a batching rule in core\n check_batched_forward_grad=False),\n OpInfo('nanquantile',\n dtypes=floating_types(),\n sample_inputs_func=sample_inputs_reduction_quantile,\n supports_forward_ad=True,\n supports_fwgrad_bwgrad=True,\n # See https://github.com/pytorch/pytorch/issues/66357\n # Relies on copy_ to broadcast, but the forward AD path calls broadcast_to which\n # does not have a batching rule in core\n check_batched_forward_grad=False),\n BinaryUfuncInfo(\n 'max',\n aliases=('maximum',),\n variant_test_name='binary',\n dtypes=all_types_and(torch.float16, torch.bfloat16, torch.bool),\n sample_inputs_func=sample_inputs_max_min_binary,\n supports_forward_ad=True,\n supports_fwgrad_bwgrad=True,\n assert_autodiffed=True,\n ref=np.maximum,\n skips=(\n # FIXME: maximum does not accept scalar inputs\n DecorateInfo(unittest.skip(\"Skipped!\"), 'TestBinaryUfuncs', 'test_broadcast_python_scalar'),\n # TODO: FIXME: RuntimeError: \"max_elementwise_cuda\" not implemented for 'ComplexFloat'\n DecorateInfo(unittest.expectedFailure,\n 'TestBinaryUfuncs',\n 'test_type_promotion',\n device_type='cuda'),\n ),\n ),\n BinaryUfuncInfo(\n 'maximum',\n dtypes=all_types_and(torch.float16, torch.bfloat16, torch.bool),\n supports_forward_ad=True,\n supports_fwgrad_bwgrad=True,\n sample_inputs_func=sample_inputs_max_min_binary,\n ref=np.maximum,\n skips=(\n # FIXME: maximum does not accept scalar inputs\n DecorateInfo(unittest.skip(\"Skipped!\"), 'TestBinaryUfuncs', 'test_broadcast_python_scalar'),\n # TODO: FIXME: RuntimeError: \"max_elementwise_cuda\" not implemented for 'ComplexFloat'\n DecorateInfo(unittest.expectedFailure,\n 'TestBinaryUfuncs',\n 'test_type_promotion',\n device_type='cuda'),\n ),\n ),\n BinaryUfuncInfo(\n 'min',\n aliases=('minimum',),\n variant_test_name='binary',\n dtypes=all_types_and(torch.float16, torch.bfloat16, torch.bool),\n sample_inputs_func=sample_inputs_max_min_binary,\n supports_forward_ad=True,\n supports_fwgrad_bwgrad=True,\n assert_autodiffed=True,\n ref=np.minimum,\n skips=(\n # FIXME: min does not accept scalar inputs\n DecorateInfo(unittest.skip(\"Skipped!\"), 'TestBinaryUfuncs', 'test_broadcast_python_scalar'),\n # TODO: FIXME: RuntimeError: \"min_elementwise_cuda\" not implemented for 'ComplexFloat'\n DecorateInfo(unittest.expectedFailure,\n 'TestBinaryUfuncs',\n 'test_type_promotion',\n device_type='cuda'),\n ),\n ),\n BinaryUfuncInfo(\n 'minimum',\n dtypes=all_types_and(torch.float16, torch.bfloat16, torch.bool),\n supports_forward_ad=True,\n supports_fwgrad_bwgrad=True,\n sample_inputs_func=sample_inputs_max_min_binary,\n ref=np.minimum,\n skips=(\n # FIXME: minimum does not accept scalar inputs\n DecorateInfo(unittest.skip(\"Skipped!\"), 'TestBinaryUfuncs', 'test_broadcast_python_scalar'),\n # TODO: FIXME: RuntimeError: \"min_elementwise_cuda\" not implemented for 'ComplexFloat'\n DecorateInfo(unittest.expectedFailure,\n 'TestBinaryUfuncs',\n 'test_type_promotion',\n device_type='cuda'),\n ),\n ),\n BinaryUfuncInfo('logical_and',\n ref=np.logical_and,\n dtypes=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16),\n sample_inputs_func=sample_inputs_binary_pwise,\n supports_autograd=False,\n always_returns_bool=True,\n skips=(\n # FIXME: logical_and does not accept scalar inputs\n DecorateInfo(unittest.expectedFailure, 'TestBinaryUfuncs', 'test_broadcast_python_scalar'),\n )),\n BinaryUfuncInfo('logical_or',\n ref=np.logical_or,\n dtypes=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16),\n sample_inputs_func=sample_inputs_binary_pwise,\n supports_autograd=False,\n always_returns_bool=True,\n skips=(\n # FIXME: logical_or does not accept scalar inputs\n DecorateInfo(unittest.expectedFailure, 'TestBinaryUfuncs', 'test_broadcast_python_scalar'),\n )),\n BinaryUfuncInfo('logical_xor',\n ref=np.logical_xor,\n dtypes=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16),\n sample_inputs_func=sample_inputs_binary_pwise,\n supports_autograd=False,\n always_returns_bool=True,\n skips=(\n # FIXME: logical_xor does not accept scalar inputs\n DecorateInfo(unittest.expectedFailure, 'TestBinaryUfuncs', 'test_broadcast_python_scalar'),\n )),\n BinaryUfuncInfo('bitwise_or',\n ref=np.bitwise_or,\n dtypes=integral_types_and(torch.bool),\n sample_inputs_func=sample_inputs_binary_pwise,\n supports_autograd=False,\n skips=(\n # TODO: FIXME: RuntimeError: \"bitwise_or_cuda\" not implemented for 'Half'\n DecorateInfo(unittest.expectedFailure,\n 'TestBinaryUfuncs',\n 'test_type_promotion',\n device_type='cuda'),\n )),\n BinaryUfuncInfo('bitwise_xor',\n ref=np.bitwise_xor,\n dtypes=integral_types_and(torch.bool),\n sample_inputs_func=sample_inputs_binary_pwise,\n supports_autograd=False,\n skips=(\n # TODO: FIXME: RuntimeError: \"bitwise_xor_cuda\" not implemented for 'Half'\n DecorateInfo(unittest.expectedFailure,\n 'TestBinaryUfuncs',\n 'test_type_promotion',\n device_type='cuda'),\n )),\n BinaryUfuncInfo('heaviside',\n ref=lambda a, b: (\n # necessary because np.heaviside incorrectly returns float64 when passed args of dtype int64\n np.int64(np.heaviside(a, b)) if a.dtype == np.int64 and b.dtype == np.int64 else np.heaviside(a, b)\n ),\n dtypes=all_types_and(torch.bool, torch.float16, torch.bfloat16),\n sample_inputs_func=sample_inputs_binary_pwise,\n supports_autograd=False,\n # FIXME: heaviside does not accept scalar inputs\n skips=(\n # RuntimeError: heaviside is not yet implemented for tensors with different dtypes.\n DecorateInfo(unittest.expectedFailure,\n 'TestBinaryUfuncs',\n 'test_type_promotion'),\n # PyTorch's heaviside does not appear to propagate NaNs\n DecorateInfo(unittest.skip(\"Skipped!\"),\n 'TestBinaryUfuncs',\n 'test_reference_numerics_extremal_values'),\n DecorateInfo(unittest.expectedFailure, 'TestBinaryUfuncs', 'test_broadcast_python_scalar'),\n )),\n BinaryUfuncInfo('lcm',\n ref=np.lcm,\n dtypes=integral_types_and(),\n sample_inputs_func=sample_inputs_binary_pwise,\n supports_autograd=False,\n skips=(\n # TODO: FIXME: lcm doesn't support scalars\n DecorateInfo(unittest.expectedFailure,\n 'TestBinaryUfuncs',\n 'test_broadcast_python_scalar'),\n )),\n BinaryUfuncInfo('gcd',\n ref=np.gcd,\n dtypes=integral_types_and(),\n sample_inputs_func=sample_inputs_binary_pwise,\n supports_autograd=False,\n skips=(\n DecorateInfo(unittest.expectedFailure,\n 'TestBinaryUfuncs',\n 'test_reference_numerics_small_values',\n dtypes=(torch.int8,)),\n # TODO: FIXME: jiterator doesn't support non-tensor inputs\n DecorateInfo(unittest.expectedFailure,\n 'TestBinaryUfuncs',\n 'test_broadcast_python_scalar'),)),\n BinaryUfuncInfo('isclose',\n ref=np.isclose,\n dtypes=all_types_and_complex_and(torch.bool, torch.float16, torch.bfloat16),\n sample_inputs_func=sample_inputs_isclose,\n supports_autograd=False,\n supports_out=False,\n skips=(\n # RuntimeError: Short did not match Int\n DecorateInfo(unittest.expectedFailure,\n 'TestBinaryUfuncs',\n 'test_type_promotion'),\n DecorateInfo(unittest.skip(\"Skipped!\"),\n 'TestBinaryUfuncs',\n 'test_reference_numerics_extremal_values'),\n # FIXME: isclose does not accept scalar inputs\n DecorateInfo(unittest.expectedFailure, 'TestBinaryUfuncs', 'test_broadcast_python_scalar'),\n )),\n # `softmax` supports different dtypes based on whether `dtype` argument,\n # is passed or not. Hence two OpInfo entries, one with dtype and other without.\n # https://github.com/pytorch/pytorch/issues/68752\n OpInfo('softmax',\n aliases=('special.softmax', 'nn.functional.softmax',),\n aten_name='softmax',\n dtypes=floating_types_and(torch.bfloat16),\n dtypesIfCUDA=floating_types_and(torch.half, torch.bfloat16),\n sample_inputs_func=sample_inputs_softmax_variant,\n assert_jit_shape_analysis=True,\n assert_autodiffed=True,\n supports_out=False),\n OpInfo('softmax',\n aliases=('special.softmax', 'nn.functional.softmax',),\n variant_test_name=\"with_dtype\",\n aten_name='softmax',\n dtypes=all_types_and_complex_and(torch.bool, torch.float16, torch.bfloat16),\n sample_inputs_func=partial(sample_inputs_softmax_variant, with_dtype=True),\n assert_autodiffed=True,\n supports_out=False),\n # `softmin` supports different dtypes based on whether `dtype` argument,\n # is passed or not. Hence two OpInfo entries, one with dtype and other without.\n # https://github.com/pytorch/pytorch/issues/68752\n OpInfo('nn.functional.softmin',\n aten_name='softmin',\n dtypes=floating_types_and(torch.bfloat16),\n dtypesIfCUDA=floating_types_and(torch.half, torch.bfloat16),\n sample_inputs_func=sample_inputs_softmax_variant,\n assert_jit_shape_analysis=False,\n assert_autodiffed=False,\n supports_out=False),\n OpInfo('nn.functional.softmin',\n variant_test_name=\"with_dtype\",\n aten_name='softmin',\n dtypes=all_types_and_complex_and(torch.float16, torch.bfloat16),\n sample_inputs_func=partial(sample_inputs_softmax_variant, with_dtype=True),\n assert_autodiffed=False,\n supports_out=False),\n OpInfo(\n \"nn.functional.cross_entropy\",\n dtypes=floating_types_and(torch.bfloat16),\n dtypesIfCUDA=floating_types_and(torch.float16, torch.bfloat16),\n sample_inputs_func=sample_inputs_cross_entropy,\n supports_out=False,\n decorators=(\n DecorateInfo(\n toleranceOverride({torch.float32: tol(atol=1e-5, rtol=1e-3)}),\n \"TestJit\",\n \"test_variant_consistency_jit\",\n device_type=\"cpu\",\n ),\n # FIXME Derivative wrt weights is not implemented\n DecorateInfo(unittest.expectedFailure, \"TestCommon\",\n \"test_floating_inputs_are_differentiable\")\n ),\n skips=(\n # AssertionError: False is not true : Scalars failed to compare as equal! 0 != 1536\n # test_ops.TestJitCUDA.test_variant_consistency_jit_nn_functional_cross_entropy_cuda_float32 leaked\n # 1536 bytes CUDA memory on device 0\n DecorateInfo(\n unittest.expectedFailure,\n \"TestJit\",\n \"test_variant_consistency_jit\",\n device_type=\"cuda\",\n ),\n )\n ),\n OpInfo('nn.functional.normalize',\n dtypes=floating_and_complex_types_and(torch.bfloat16),\n dtypesIfCUDA=floating_and_complex_types_and(torch.half, torch.bfloat16),\n sample_inputs_func=sample_inputs_normalize),\n OpInfo('aminmax',\n ref=lambda x, dim=None, keepdim=False: (np.amin(x, axis=dim, keepdims=keepdim), np.amax(x, axis=dim, keepdims=keepdim)),\n dtypes=all_types_and(torch.bool),\n dtypesIfCUDA=all_types_and(torch.bool, torch.float16, torch.bfloat16),\n decorators=(onlyNativeDeviceTypes,),\n supports_autograd=False,\n sample_inputs_func=sample_inputs_aminmax),\n OpInfo('as_strided',\n op=lambda x, size, stride, storage_offset=0:\n torch.as_strided(x, size, stride, storage_offset=storage_offset),\n dtypes=all_types_and_complex_and(torch.bool, torch.float16, torch.bfloat16),\n supports_out=False,\n supports_forward_ad=True,\n supports_fwgrad_bwgrad=True,\n # vmap does not support inplace views\n check_inplace_batched_forward_grad=False,\n sample_inputs_func=sample_inputs_as_strided,\n skips=(\n # AssertionError: False is not true : Tensors failed to compare as equal!\n DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_noncontiguous_samples'),\n # AssertionError: False is not true : Scalars failed to compare as equal!\n DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_variant_consistency_eager'),)),\n OpInfo('nn.functional.cosine_similarity',\n aten_name=\"cosine_similarity\",\n dtypes=floating_types_and(torch.bfloat16),\n dtypesIfCUDA=floating_types_and(torch.float16, torch.bfloat16),\n supports_out=False,\n supports_forward_ad=True,\n supports_fwgrad_bwgrad=True,\n sample_inputs_func=sample_inputs_cosine_similarity),\n OpInfo('nn.functional.adaptive_avg_pool1d',\n dtypes=floating_types(),\n dtypesIfCPU=floating_types_and(torch.bfloat16),\n dtypesIfCUDA=floating_types_and(torch.half, torch.bfloat16),\n supports_out=False,\n supports_forward_ad=True,\n supports_fwgrad_bwgrad=True,\n gradcheck_nondet_tol=GRADCHECK_NONDET_TOL,\n sample_inputs_func=sample_inputs_adaptive_avg_pool1d),\n OpInfo('nn.functional.adaptive_avg_pool2d',\n dtypes=floating_types(),\n dtypesIfCPU=floating_types_and(torch.bfloat16),\n dtypesIfCUDA=floating_types_and(torch.half, torch.bfloat16),\n decorators=(\n # RuntimeError:\n # adaptive_avg_pool2d(Tensor input, int[2] output_size) -> (Tensor):\n # Expected a value of type 'List[int]' for argument 'output_size' but\n # instead found type 'Tuple[NoneType, int]'. :\n # File \"<string>\", line 3\n # def the_method(i0):\n # return torch.nn.functional.adaptive_avg_pool2d(i0, (None, 7))\n # ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ <--- HERE\n DecorateInfo(unittest.expectedFailure, 'TestJit', 'test_variant_consistency_jit'),\n ),\n supports_out=False,\n supports_forward_ad=True,\n supports_fwgrad_bwgrad=True,\n gradcheck_nondet_tol=GRADCHECK_NONDET_TOL,\n sample_inputs_func=sample_inputs_adaptive_avg_pool2d),\n OpInfo('nn.functional.adaptive_avg_pool3d',\n dtypes=floating_types_and(torch.half),\n dtypesIfCUDA=floating_types_and(torch.half, torch.bfloat16),\n decorators=(\n # RuntimeError:\n # adaptive_avg_pool3d(Tensor input, int[3] output_size) -> (Tensor):\n # Expected a value of type 'List[int]' for argument 'output_size' but\n # instead found type 'Tuple[NoneType, NoneType, NoneType]'. :\n # File \"<string>\", line 3\n #\n # def the_method(i0):\n # return torch.nn.functional.adaptive_avg_pool3d(i0, (None, None, None))\n # ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ <--- HERE\n #\n DecorateInfo(unittest.expectedFailure, 'TestJit', 'test_variant_consistency_jit'),\n ),\n supports_out=False,\n supports_forward_ad=True,\n supports_fwgrad_bwgrad=True,\n gradcheck_nondet_tol=GRADCHECK_NONDET_TOL,\n sample_inputs_func=sample_inputs_adaptive_avg_pool3d),\n OpInfo('nn.functional.adaptive_max_pool1d',\n dtypes=floating_types(),\n dtypesIfCPU=floating_types_and(torch.bfloat16),\n dtypesIfCUDA=floating_types_and(torch.half, torch.bfloat16),\n supports_out=False,\n supports_forward_ad=True,\n supports_fwgrad_bwgrad=True,\n # got: Batching rule not implemented for aten::flatten.using_ints\n check_batched_forward_grad=False,\n gradcheck_nondet_tol=GRADCHECK_NONDET_TOL,\n sample_inputs_func=sample_inputs_adaptive_max_pool1d),\n OpInfo('nn.functional.adaptive_max_pool2d',\n dtypes=floating_types(),\n dtypesIfCPU=floating_types_and(torch.bfloat16),\n dtypesIfCUDA=floating_types_and(torch.half, torch.bfloat16),\n decorators=(\n # RuntimeError:\n # adaptive_max_pool2d(Tensor input, int[2] output_size) -> (Tensor):\n # Expected a value of type 'List[int]' for argument 'output_size' but\n # instead found type 'Tuple[NoneType, int]'. :\n # File \"<string>\", line 3\n # def the_method(i0):\n # return torch.nn.functional.adaptive_max_pool2d(i0, (None, 7))\n # ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ <--- HERE\n DecorateInfo(unittest.expectedFailure, 'TestJit', 'test_variant_consistency_jit'),\n ),\n supports_out=False,\n supports_forward_ad=True,\n supports_fwgrad_bwgrad=True,\n # got: Batching rule not implemented for aten::flatten.using_ints\n check_batched_forward_grad=False,\n gradcheck_nondet_tol=GRADCHECK_NONDET_TOL,\n sample_inputs_func=sample_inputs_adaptive_max_pool2d),\n OpInfo('nn.functional.adaptive_max_pool3d',\n dtypes=floating_types(),\n dtypesIfCUDA=floating_types_and(torch.half, torch.bfloat16),\n decorators=(\n # RuntimeError:\n # adaptive_max_pool3d(Tensor input, int[3] output_size) -> (Tensor):\n # Expected a value of type 'List[int]' for argument 'output_size' but\n # instead found type 'Tuple[NoneType, NoneType, NoneType]'. :\n # File \"<string>\", line 3\n #\n # def the_method(i0):\n # return torch.nn.functional.adaptive_max_pool3d(i0, (None, None, None))\n # ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ <--- HERE\n #\n DecorateInfo(unittest.expectedFailure, 'TestJit', 'test_variant_consistency_jit'),\n ),\n supports_out=False,\n supports_forward_ad=True,\n supports_fwgrad_bwgrad=True,\n # got: Batching rule not implemented for aten::flatten.using_ints\n check_batched_forward_grad=False,\n gradcheck_nondet_tol=GRADCHECK_NONDET_TOL,\n sample_inputs_func=sample_inputs_adaptive_max_pool3d),\n OpInfo('nn.functional.avg_pool1d',\n aten_name='avg_pool1d',\n supports_autograd=True,\n supports_out=False,\n supports_forward_ad=True,\n supports_fwgrad_bwgrad=True,\n dtypes=floating_types_and(torch.int64),\n dtypesIfCPU=floating_types_and(torch.int64, torch.bfloat16),\n dtypesIfCUDA=floating_types_and(torch.float16, torch.bfloat16),\n gradcheck_nondet_tol=GRADCHECK_NONDET_TOL,\n sample_inputs_func=sample_inputs_avgpool1d),\n OpInfo('nn.functional.avg_pool3d',\n aten_name='avg_pool3d',\n supports_autograd=True,\n supports_forward_ad=True,\n supports_fwgrad_bwgrad=True,\n supports_out=False,\n dtypes=floating_types_and(torch.int64),\n dtypesIfCUDA=floating_types_and(torch.float16, torch.bfloat16),\n gradcheck_nondet_tol=GRADCHECK_NONDET_TOL,\n sample_inputs_func=sample_inputs_avgpool3d),\n OpInfo('nn.functional.relu',\n aten_name=\"relu\",\n supports_autograd=True,\n dtypes=all_types_and(torch.bfloat16),\n dtypesIfCUDA=all_types_and(torch.half, torch.bfloat16),\n sample_inputs_func=sample_inputs_nn_activation_relu,\n supports_out=False,\n supports_fwgrad_bwgrad=True,\n supports_forward_ad=True),\n OpInfo('nn.functional.conv_transpose1d',\n aten_name='conv_transpose1d',\n aliases=('conv_transpose1d',),\n dtypes=floating_types_and(torch.int64),\n dtypesIfCUDA=floating_types_and(torch.float16, *[torch.bfloat16] if CUDA11OrLater else []),\n sample_inputs_func=sample_inputs_conv_transpose1d,\n supports_forward_ad=True,\n supports_fwgrad_bwgrad=True,\n gradcheck_nondet_tol=GRADCHECK_NONDET_TOL,\n decorators=[\n DecorateInfo(\n toleranceOverride({torch.float32: tol(atol=1e-04, rtol=1.3e-06), }),\n 'TestCommon', 'test_variant_consistency_eager', device_type='cuda')],\n skips=(\n # RuntimeError: !lhs.isAliasOf(rhs)INTERNAL ASSERT FAILED at\n # \"../torch/csrc/jit/passes/utils/check_alias_annotation.cpp\":104, please report a bug to PyTorch.\n DecorateInfo(unittest.expectedFailure, 'TestJit', 'test_variant_consistency_jit'),\n ),\n supports_out=False,),\n OpInfo('nn.functional.conv_transpose2d',\n aten_name='conv_transpose2d',\n aliases=('conv_transpose2d',),\n dtypes=floating_types_and(torch.int64),\n dtypesIfCUDA=floating_types_and(torch.float16, *[torch.bfloat16] if CUDA11OrLater else []),\n sample_inputs_func=sample_inputs_conv_transpose2d,\n supports_forward_ad=True,\n supports_fwgrad_bwgrad=True,\n gradcheck_nondet_tol=GRADCHECK_NONDET_TOL,\n decorators=[\n DecorateInfo(\n toleranceOverride({torch.float32: tol(atol=1e-04, rtol=1.3e-06), }),\n 'TestCommon', 'test_variant_consistency_eager', device_type='cuda')],\n skips=(\n # RuntimeError: !lhs.isAliasOf(rhs)INTERNAL ASSERT FAILED at\n # \"../torch/csrc/jit/passes/utils/check_alias_annotation.cpp\":104, please report a bug to PyTorch.\n DecorateInfo(unittest.skip(\"Skipped!\"), 'TestJit', 'test_variant_consistency_jit'),\n ),\n supports_out=False,),\n OpInfo('nn.functional.conv_transpose3d',\n aten_name='conv_transpose3d',\n aliases=('conv_transpose3d',),\n dtypes=floating_types_and(torch.int64),\n dtypesIfCUDA=floating_types_and(torch.float16, *[torch.bfloat16] if CUDA11OrLater else []),\n sample_inputs_func=sample_inputs_conv_transpose3d,\n supports_forward_ad=True,\n supports_fwgrad_bwgrad=True,\n gradcheck_nondet_tol=GRADCHECK_NONDET_TOL,\n decorators=[\n DecorateInfo(\n toleranceOverride({torch.float32: tol(atol=1e-04, rtol=1.3e-06), }),\n 'TestCommon', 'test_variant_consistency_eager', device_type='cuda'),\n DecorateInfo(\n toleranceOverride({torch.float32: tol(atol=1e-04, rtol=1.3e-06), }),\n 'TestCommon', 'test_noncontiguous_samples', device_type='cuda')],\n skips=(\n # RuntimeError: !lhs.isAliasOf(rhs)INTERNAL ASSERT FAILED at\n # \"../torch/csrc/jit/passes/utils/check_alias_annotation.cpp\":104, please report a bug to PyTorch.\n DecorateInfo(unittest.skip(\"Skipped!\"), 'TestJit', 'test_variant_consistency_jit'),\n DecorateInfo(unittest.skip(\"Skipped! RuntimeError: bias tensor has to be contiguous\"), 'TestGradients',\n 'test_forward_mode_AD', device_type='cuda', active_if=TEST_WITH_ROCM),\n ),\n supports_out=False,),\n OpInfo('nn.functional.conv1d',\n aliases=('conv1d',),\n aten_name='conv1d',\n dtypes=floating_types_and(torch.int64),\n dtypesIfCUDA=floating_types_and(torch.float16, *[torch.bfloat16] if CUDA11OrLater else []),\n sample_inputs_func=sample_inputs_conv1d,\n supports_forward_ad=True,\n supports_fwgrad_bwgrad=True,\n gradcheck_nondet_tol=GRADCHECK_NONDET_TOL,\n skips=(\n # RuntimeError: !lhs.isAliasOf(rhs)INTERNAL ASSERT FAILED at\n # \"../torch/csrc/jit/passes/utils/check_alias_annotation.cpp\":103, please report a bug to PyTorch.\n DecorateInfo(unittest.skip(\"Skipped!\"), 'TestJit', 'test_variant_consistency_jit'),\n ),\n supports_out=False,),\n OpInfo('nn.functional.conv2d',\n aliases=('conv2d',),\n aten_name='conv2d',\n dtypes=floating_types_and(torch.int64),\n dtypesIfCUDA=floating_types_and(torch.float16, *[torch.bfloat16] if CUDA11OrLater else []),\n sample_inputs_func=partial(sample_inputs_conv2d),\n gradcheck_nondet_tol=GRADCHECK_NONDET_TOL,\n supports_forward_ad=True,\n supports_fwgrad_bwgrad=True,\n skips=(\n # RuntimeError: !lhs.isAliasOf(rhs)INTERNAL ASSERT FAILED at\n # \"../torch/csrc/jit/passes/utils/check_alias_annotation.cpp\":103, please report a bug to PyTorch.\n DecorateInfo(unittest.skip(\"Skipped!\"), 'TestJit', 'test_variant_consistency_jit'),\n ),\n supports_out=False,),\n OpInfo('nn.functional.group_norm',\n aten_name='group_norm',\n aliases=('group_norm',),\n ref=reference_group_norm,\n dtypes=floating_types(),\n dtypesIfCUDA=floating_types_and(torch.float16, torch.bfloat16),\n supports_out=False,\n supports_forward_ad=True,\n supports_fwgrad_bwgrad=True,\n decorators=[\n # RuntimeError: Cannot insert a Tensor that requires grad as a constant.\n # Consider making it a parameter or input, or detaching the gradient\n DecorateInfo(unittest.expectedFailure, 'TestJit', 'test_variant_consistency_jit', dtypes=(torch.float32,))\n ],\n sample_inputs_func=sample_inputs_group_norm,),\n OpInfo('nn.functional.instance_norm',\n # no ref because instance_norm will often have numerical instability (large numbers or nan)\n dtypes=floating_types(),\n dtypesIfCUDA=floating_types_and(torch.float16, torch.bfloat16),\n supports_out=False,\n supports_forward_ad=True,\n decorators=[\n # RuntimeError: Cannot insert a Tensor that requires grad as a constant.\n # Consider making it a parameter or input, or detaching the gradient\n DecorateInfo(unittest.expectedFailure, 'TestJit', 'test_variant_consistency_jit', dtypes=(torch.float32,))\n ],\n skips=(\n DecorateInfo(unittest.skip(\"We don't want to differentiate wrt running mean / std\"),\n \"TestCommon\", \"test_floating_inputs_are_differentiable\"),),\n sample_inputs_func=sample_inputs_instance_norm,),\n OpInfo('nn.functional.layer_norm',\n aten_name='layer_norm',\n aliases=('layer_norm',),\n ref=reference_layer_norm,\n dtypes=floating_types_and(torch.bfloat16),\n dtypesIfCUDA=floating_types_and(torch.float16, torch.bfloat16),\n supports_out=False,\n supports_forward_ad=True,\n decorators=[\n DecorateInfo(\n toleranceOverride({torch.float32: tol(atol=1e-05, rtol=1e-03)}),\n 'TestCommon', 'test_reference_testing'\n )\n ],\n sample_inputs_func=sample_inputs_layer_norm,),\n OpInfo('nn.functional.local_response_norm',\n dtypes=floating_types_and(torch.int64),\n dtypesIfCPU=floating_types_and(torch.int64, torch.bfloat16),\n dtypesIfCUDA=floating_types_and(torch.float16, torch.bfloat16),\n supports_out=False,\n supports_forward_ad=True,\n supports_fwgrad_bwgrad=True,\n decorators=[\n # RuntimeError: falseINTERNAL ASSERT FAILED at\n # \"../torch/csrc/jit/passes/utils/check_alias_annotation.cpp\":185, please report a bug to PyTorch.\n DecorateInfo(unittest.expectedFailure, 'TestJit', 'test_variant_consistency_jit', dtypes=(torch.float32,))\n ],\n sample_inputs_func=sample_inputs_local_response_norm,),\n OpInfo('nn.functional.pad',\n variant_test_name='constant',\n aten_name='constant_pad_nd',\n supports_forward_ad=True,\n supports_fwgrad_bwgrad=True,\n dtypes=all_types_and_complex_and(torch.bool, torch.bfloat16, torch.half),\n sample_inputs_func=partial(sample_inputs_nn_pad, mode='constant'),\n supports_out=False),\n OpInfo('nn.functional.pad',\n variant_test_name='reflect',\n supports_forward_ad=True,\n supports_fwgrad_bwgrad=True,\n dtypes=floating_and_complex_types(),\n dtypesIfCUDA=floating_and_complex_types_and(torch.half),\n sample_inputs_func=partial(sample_inputs_nn_pad, mode='reflect'),\n skips=(\n # Doesn't have a corresponding aten operator.\n # RuntimeError: falseINTERNAL ASSERT FAILED at\n # \"../torch/csrc/jit/passes/utils/check_alias_annotation.cpp\":185, please report a bug to PyTorch.\n DecorateInfo(unittest.skip(\"Skipped!\"), 'TestJit', 'test_variant_consistency_jit', dtypes=(torch.float32,)),\n ),\n gradcheck_nondet_tol=GRADCHECK_NONDET_TOL,\n supports_out=False),\n OpInfo('nn.functional.pad',\n variant_test_name='replicate',\n supports_forward_ad=True,\n supports_fwgrad_bwgrad=True,\n dtypes=floating_and_complex_types(),\n dtypesIfCUDA=floating_and_complex_types_and(torch.half),\n sample_inputs_func=partial(sample_inputs_nn_pad, mode='replicate'),\n skips=(\n # Doesn't have a corresponding aten operator.\n # RuntimeError: falseINTERNAL ASSERT FAILED at\n # \"../torch/csrc/jit/passes/utils/check_alias_annotation.cpp\":185, please report a bug to PyTorch.\n DecorateInfo(unittest.skip(\"Skipped!\"), 'TestJit', 'test_variant_consistency_jit', dtypes=(torch.float32,)),\n ),\n gradcheck_nondet_tol=GRADCHECK_NONDET_TOL,\n supports_out=False),\n OpInfo('nn.functional.pad',\n variant_test_name='circular',\n dtypes=all_types_and_complex_and(torch.bool, torch.bfloat16, torch.half),\n sample_inputs_func=partial(sample_inputs_nn_pad, mode='circular'),\n supports_forward_ad=True,\n supports_fwgrad_bwgrad=True,\n check_batched_grad=False,\n # https://github.com/pytorch/pytorch/issues/66357\n check_batched_forward_grad=False,\n skips=(\n # Doesn't have a corresponding aten operator.\n # RuntimeError: falseINTERNAL ASSERT FAILED at\n # \"../torch/csrc/jit/passes/utils/check_alias_annotation.cpp\":185, please report a bug to PyTorch.\n DecorateInfo(unittest.skip(\"Skipped!\"), 'TestJit', 'test_variant_consistency_jit', dtypes=(torch.float32,)),\n ),\n supports_out=False),\n OpInfo('nn.functional.hardswish',\n aten_name=\"hardswish\",\n supports_autograd=True,\n assert_autodiffed=True,\n sample_inputs_func=sample_inputs_hardswish,\n dtypes=floating_types(),\n dtypesIfCUDA=floating_types_and(torch.half, torch.bfloat16),\n supports_gradgrad=False,\n supports_forward_ad=True,\n supports_fwgrad_bwgrad=False, # Need: hardswish_backward\n supports_out=False,\n autodiff_nonfusible_nodes=[\"aten::hardswish\"]),\n OpInfo('nn.functional.unfold',\n aten_name='im2col',\n dtypes=floating_and_complex_types_and(torch.half),\n dtypesIfCPU=floating_and_complex_types_and(torch.half, torch.bfloat16),\n sample_inputs_func=sample_inputs_nn_unfold,\n supports_forward_ad=True,\n supports_fwgrad_bwgrad=True,\n skips=(\n # RuntimeError: false\n # INTERNAL ASSERT FAILED at \"../torch/csrc/jit/passes/utils/check_alias_annotation.cpp\":185,\n # please report a bug to PyTorch.\n DecorateInfo(unittest.skip(\"Skipped!\"), 'TestJit', 'test_variant_consistency_jit'),\n ),\n supports_out=False),\n OpInfo('nn.functional.interpolate',\n aten_name=\"interpolate\",\n variant_test_name='nearest',\n supports_autograd=True,\n supports_fwgrad_bwgrad=True,\n supports_forward_ad=True,\n dtypes=floating_types_and(torch.uint8),\n dtypesIfCUDA=floating_types_and(torch.half, torch.uint8),\n sample_inputs_func=partial(sample_inputs_interpolate, 'nearest'),\n skips=(\n # RuntimeError: false\n # INTERNAL ASSERT FAILED at \"../torch/csrc/jit/passes/utils/check_alias_annotation.cpp\":185,\n # please report a bug to PyTorch.\n DecorateInfo(unittest.skip(\"Skipped!\"), 'TestJit', 'test_variant_consistency_jit'),\n ),\n supports_out=False),\n OpInfo('nn.functional.interpolate',\n aten_name=\"interpolate\",\n variant_test_name='linear',\n supports_autograd=True,\n supports_fwgrad_bwgrad=True,\n supports_forward_ad=True,\n dtypes=floating_types(),\n dtypesIfCUDA=floating_types_and(torch.half),\n sample_inputs_func=partial(sample_inputs_interpolate, 'linear'),\n skips=(\n # RuntimeError: false\n # INTERNAL ASSERT FAILED at \"../torch/csrc/jit/passes/utils/check_alias_annotation.cpp\":185,\n # please report a bug to PyTorch.\n DecorateInfo(unittest.skip(\"Skipped!\"), 'TestJit', 'test_variant_consistency_jit'),\n ),\n supports_out=False),\n OpInfo('nn.functional.interpolate',\n aten_name=\"interpolate\",\n variant_test_name='bilinear',\n supports_fwgrad_bwgrad=True,\n supports_autograd=True,\n supports_forward_ad=True,\n dtypes=floating_types(),\n dtypesIfCUDA=floating_types_and(torch.half),\n gradcheck_nondet_tol=GRADCHECK_NONDET_TOL,\n sample_inputs_func=partial(sample_inputs_interpolate, 'bilinear'),\n skips=(\n # RuntimeError: false\n # INTERNAL ASSERT FAILED at \"../torch/csrc/jit/passes/utils/check_alias_annotation.cpp\":185,\n # please report a bug to PyTorch.\n DecorateInfo(unittest.skip(\"Skipped!\"), 'TestJit', 'test_variant_consistency_jit'),\n ),\n supports_out=False),\n OpInfo('nn.functional.interpolate',\n aten_name=\"interpolate\",\n variant_test_name='bicubic',\n supports_autograd=True,\n supports_forward_ad=True,\n supports_fwgrad_bwgrad=True,\n dtypes=floating_types(),\n dtypesIfCUDA=floating_types_and(torch.half),\n sample_inputs_func=partial(sample_inputs_interpolate, 'bicubic'),\n gradcheck_nondet_tol=GRADCHECK_NONDET_TOL,\n skips=(\n # RuntimeError: false\n # INTERNAL ASSERT FAILED at \"../torch/csrc/jit/passes/utils/check_alias_annotation.cpp\":185,\n # please report a bug to PyTorch.\n DecorateInfo(unittest.skip(\"Skipped!\"), 'TestJit', 'test_variant_consistency_jit'),\n ),\n supports_out=False),\n OpInfo('nn.functional.interpolate',\n aten_name=\"interpolate\",\n variant_test_name='trilinear',\n supports_autograd=True,\n supports_forward_ad=True,\n supports_fwgrad_bwgrad=True,\n dtypes=floating_types(),\n dtypesIfCUDA=floating_types_and(torch.half),\n gradcheck_nondet_tol=GRADCHECK_NONDET_TOL,\n sample_inputs_func=partial(sample_inputs_interpolate, 'trilinear'),\n skips=(\n # RuntimeError: false\n # INTERNAL ASSERT FAILED at \"../torch/csrc/jit/passes/utils/check_alias_annotation.cpp\":185,\n # please report a bug to PyTorch.\n DecorateInfo(unittest.skip(\"Skipped!\"), 'TestJit', 'test_variant_consistency_jit'),\n ),\n supports_out=False),\n OpInfo('nn.functional.interpolate',\n aten_name=\"interpolate\",\n variant_test_name='area',\n supports_autograd=True,\n supports_forward_ad=True,\n supports_fwgrad_bwgrad=True,\n dtypes=floating_types(),\n dtypesIfCUDA=floating_types_and(torch.half, torch.bfloat16),\n sample_inputs_func=partial(sample_inputs_interpolate, 'area'),\n gradcheck_nondet_tol=GRADCHECK_NONDET_TOL,\n skips=(\n # RuntimeError: false\n # INTERNAL ASSERT FAILED at \"../torch/csrc/jit/passes/utils/check_alias_annotation.cpp\":185,\n # please report a bug to PyTorch.\n DecorateInfo(unittest.skip(\"Skipped!\"), 'TestJit', 'test_variant_consistency_jit'),\n ),\n supports_out=False),\n OpInfo('nn.functional.upsample_bilinear',\n supports_autograd=True,\n supports_forward_ad=True,\n supports_fwgrad_bwgrad=True,\n dtypes=floating_types(),\n dtypesIfCUDA=floating_types_and(torch.half),\n gradcheck_nondet_tol=GRADCHECK_NONDET_TOL,\n sample_inputs_func=partial(sample_inputs_upsample, 'bilinear'),\n skips=(\n # RuntimeError: false\n # INTERNAL ASSERT FAILED at \"../torch/csrc/jit/passes/utils/check_alias_annotation.cpp\":185,\n # please report a bug to PyTorch.\n DecorateInfo(unittest.skip(\"Skipped!\"), 'TestJit', 'test_variant_consistency_jit'),\n ),\n supports_out=False),\n OpInfo('nn.functional.upsample_nearest',\n supports_autograd=True,\n supports_forward_ad=True,\n supports_fwgrad_bwgrad=True,\n dtypes=floating_types_and(torch.uint8),\n dtypesIfCUDA=floating_types_and(torch.half, torch.uint8),\n gradcheck_nondet_tol=GRADCHECK_NONDET_TOL,\n sample_inputs_func=partial(sample_inputs_upsample, 'nearest'),\n skips=(\n # RuntimeError: false\n # INTERNAL ASSERT FAILED at \"../torch/csrc/jit/passes/utils/check_alias_annotation.cpp\":185,\n # please report a bug to PyTorch.\n DecorateInfo(unittest.skip(\"Skipped!\"), 'TestJit', 'test_variant_consistency_jit'),\n ),\n supports_out=False),\n OpInfo('nn.functional.leaky_relu',\n aliases=None,\n aten_name=\"leaky_relu\",\n sample_inputs_func=sample_inputs_leaky_relu,\n dtypes=floating_types_and(torch.bfloat16),\n dtypesIfCUDA=floating_types_and(torch.float16, torch.bfloat16),\n supports_autograd=True,\n assert_autodiffed=True,\n supports_gradgrad=True,\n supports_out=False,\n supports_forward_ad=True,\n supports_fwgrad_bwgrad=False, # Need: leaky_relu_backward\n autodiff_nonfusible_nodes=[\"aten::leaky_relu\"]),\n OpInfo('nn.functional.avg_pool2d',\n aten_name='avg_pool2d',\n supports_autograd=True,\n supports_forward_ad=True,\n supports_fwgrad_bwgrad=True,\n supports_out=False,\n dtypes=floating_types_and(torch.int64),\n dtypesIfCPU=floating_types_and(torch.int64, torch.bfloat16),\n dtypesIfCUDA=floating_types_and(torch.float16, torch.bfloat16),\n sample_inputs_func=sample_inputs_avgpool2d),\n OpInfo('nn.functional.fractional_max_pool2d',\n supports_autograd=True,\n supports_out=False,\n supports_forward_ad=True,\n supports_fwgrad_bwgrad=True,\n op=lambda input, *args, **kwargs:\n wrapper_set_seed(torch.nn.functional.fractional_max_pool2d, input, *args, **kwargs),\n # vmap does not support random operations\n check_batched_forward_grad=False,\n dtypes=floating_types(),\n dtypesIfCUDA=floating_types_and(torch.float16),\n test_neg_view=False,\n sample_inputs_func=sample_inputs_fractional_max_pool2d,\n decorators=(\n # FIXME: AssertionError: False is not true : Tensors failed to compare as equal!\n DecorateInfo(unittest.expectedFailure, 'TestNormalizeOperators', 'test_normalize_operator_exhaustive'),\n # RuntimeError: input->type()->kind() == TypeKind::OptionalType\n # INTERNAL ASSERT FAILED at \"../torch/csrc/jit/passes/utils/check_alias_annotation.cpp\":270\n DecorateInfo(unittest.skip(\"Skipped!\"), 'TestJit', 'test_variant_consistency_jit'))),\n OpInfo('nn.functional.fractional_max_pool3d',\n supports_autograd=True,\n supports_out=False,\n supports_forward_ad=True,\n supports_fwgrad_bwgrad=True,\n op=lambda input, *args, **kwargs:\n wrapper_set_seed(torch.nn.functional.fractional_max_pool3d, input, *args, **kwargs),\n # vmap does not support random operations\n check_batched_forward_grad=False,\n dtypes=floating_types(),\n dtypesIfCUDA=floating_types_and(torch.float16),\n test_neg_view=False,\n sample_inputs_func=sample_inputs_fractional_max_pool3d,\n decorators=(\n # FIXME: both derivatives are implemented incorrectly\n # https://github.com/pytorch/pytorch/issues/69322\n # RuntimeError: cannot reshape tensor of 0 elements into shape [0, 1, -1] because the\n # unspecified dimension size -1 can be any value and is ambiguous\n DecorateInfo(unittest.expectedFailure, 'TestGradients', 'test_fn_gradgrad'),\n # FIXME: AssertionError: False is not true : Tensors failed to compare as equal!\n DecorateInfo(unittest.expectedFailure, 'TestNormalizeOperators', 'test_normalize_operator_exhaustive'),\n # RuntimeError: input->type()->kind() == TypeKind::OptionalType\n # INTERNAL ASSERT FAILED at \"../torch/csrc/jit/passes/utils/check_alias_annotation.cpp\":270\n DecorateInfo(unittest.skip(\"Skipped!\"), 'TestJit', 'test_variant_consistency_jit'),)),\n OpInfo('nn.functional.max_pool1d',\n aten_name='max_pool1d',\n supports_autograd=True,\n supports_out=False,\n supports_forward_ad=True,\n supports_fwgrad_bwgrad=True,\n # got: Batching rule not implemented for aten::flatten.using_ints\n check_batched_forward_grad=False,\n # TODO: add shape checks\n assert_jit_shape_analysis=False,\n dtypes=floating_types(),\n dtypesIfCPU=floating_types_and(torch.bfloat16),\n dtypesIfCUDA=floating_types_and(torch.float16, torch.bfloat16),\n sample_inputs_func=sample_inputs_max_pool),\n OpInfo('nn.functional.max_pool2d',\n aten_name='max_pool2d',\n supports_autograd=True,\n # Vmap is not happy with non-contiguous (channels_last) inputs\n check_batched_gradgrad=False,\n supports_out=False,\n supports_forward_ad=True,\n supports_fwgrad_bwgrad=True,\n # got: Batching rule not implemented for aten::flatten.using_ints\n check_batched_forward_grad=False,\n assert_jit_shape_analysis=True,\n dtypes=floating_types(),\n dtypesIfCPU=floating_types_and(torch.bfloat16),\n dtypesIfCUDA=floating_types_and(torch.float16, torch.bfloat16),\n sample_inputs_func=sample_inputs_max_pool),\n OpInfo('nn.functional.max_pool3d',\n aten_name='max_pool3d',\n supports_autograd=True,\n supports_out=False,\n supports_forward_ad=True,\n supports_fwgrad_bwgrad=True,\n # got: Batching rule not implemented for aten::flatten.using_ints\n check_batched_forward_grad=False,\n # TODO: add shape checks\n assert_jit_shape_analysis=False,\n dtypes=floating_types(),\n dtypesIfCUDA=floating_types_and(torch.float16, torch.bfloat16),\n # TODO: investigate nondeterminism\n gradcheck_nondet_tol=GRADCHECK_NONDET_TOL,\n sample_inputs_func=sample_inputs_max_pool),\n OpInfo('nn.functional.linear',\n aten_name='linear',\n supports_autograd=True,\n sample_inputs_func=sample_inputs_linear,\n dtypes=all_types_and_complex_and(torch.half, torch.bfloat16),\n dtypesIfROCM=floating_and_complex_types_and(torch.float16, torch.bfloat16),\n dtypesIfCUDA=floating_and_complex_types_and(torch.float16, *[torch.bfloat16] if CUDA11OrLater else []),\n backward_dtypesIfCUDA=floating_and_complex_types_and(torch.float16,\n *[torch.bfloat16] if CUDA11OrLater else []),\n # linear calls mm under the hood which is nondeterministic on CUDA\n # https://pytorch.org/docs/stable/generated/torch.use_deterministic_algorithms.html#torch.use_deterministic_algorithms\n gradcheck_nondet_tol=GRADCHECK_NONDET_TOL,\n supports_forward_ad=True,\n supports_fwgrad_bwgrad=True,\n # See https://github.com/pytorch/pytorch/issues/66357\n check_batched_forward_grad=False,\n supports_out=False),\n OpInfo('nn.functional.bilinear',\n aten_name='bilinear',\n supports_autograd=True,\n sample_inputs_func=sample_inputs_bilinear,\n dtypes=all_types_and(torch.half, torch.bfloat16),\n dtypesIfROCM=floating_types_and(torch.float16, torch.bfloat16),\n dtypesIfCUDA=floating_types_and(torch.float16, *[torch.bfloat16] if CUDA11OrLater else []),\n backward_dtypesIfCUDA=floating_types_and(torch.float16, *[torch.bfloat16] if CUDA11OrLater else []),\n skips=(\n # FIXME: bfloat16 backward support likely depends on CUDA11+ and SM53+\n DecorateInfo(unittest.skip(\"Skipped!\"), 'TestCommon', 'test_dtypes'),\n ),\n supports_forward_ad=False,\n supports_out=False),\n OpInfo('nn.functional.glu',\n aten_name='glu',\n supports_autograd=True,\n sample_inputs_func=sample_inputs_glu,\n dtypes=floating_types(),\n dtypesIfROCM=floating_types_and(torch.float16, torch.bfloat16),\n dtypesIfCUDA=floating_types_and(torch.float16, torch.bfloat16),\n backward_dtypesIfCUDA=floating_types_and(torch.float16, torch.bfloat16),\n supports_forward_ad=False,\n supports_out=False),\n UnaryUfuncInfo(\n 'nn.functional.elu',\n ref=lambda x, alpha=1.0, inplace=False:\n np.maximum(0., x) + np.minimum(0., alpha * (np.exp(x) - 1)),\n dtypes=floating_types(),\n dtypesIfCUDA=floating_types_and(torch.float16, torch.bfloat16),\n supports_forward_ad=True,\n supports_fwgrad_bwgrad=False, # Need: elu_backward\n supports_autograd=True,\n assert_autodiffed=False,\n supports_gradgrad=True,\n supports_out=False,\n sample_kwargs=lambda device, dtype, input:\n ({'alpha': 0.8}, {'alpha': 0.8}),\n inplace_variant=lambda x, alpha=1.0:\n torch.nn.functional.elu(x, alpha, inplace=True),\n decorators=[\n # Not implemented yet\n DecorateInfo(unittest.expectedFailure, 'TestGradients', 'test_inplace_forward_mode_AD'),\n DecorateInfo(\n toleranceOverride({\n torch.float16: tol(atol=1e-03, rtol=1.2e-03),\n torch.bfloat16: tol(atol=1e-03, rtol=1.2e-03)\n }),\n 'TestUnaryUfuncs', device_type='cuda',\n ), ],\n ),\n OpInfo(\n 'nn.functional.prelu',\n ref=lambda x, weight:\n np.maximum(0., x) + np.minimum(0., x) *\n (weight if x.ndim == 1 else weight.reshape([weight.size if i == 1 else 1 for i in range(0, x.ndim)])),\n dtypes=floating_types(),\n dtypesIfCUDA=floating_types_and(torch.float16),\n supports_forward_ad=False,\n supports_autograd=True,\n assert_autodiffed=False,\n supports_gradgrad=True,\n supports_out=False,\n sample_inputs_func=sample_inputs_nn_functional_prelu,\n decorators=[\n # FIXME: second derivative is implemented but seems to be incorrect\n # https://github.com/pytorch/pytorch/issues/68760\n DecorateInfo(unittest.expectedFailure, 'TestGradients', 'test_fn_gradgrad'),\n # RuntimeError: Cannot insert a Tensor that requires grad as a constant.\n # Consider making it a parameter or input, or detaching the gradient\n # https://github.com/pytorch/pytorch/issues/68752\n DecorateInfo(unittest.skip(\"Skipped!\"), 'TestJit', 'test_variant_consistency_jit'), ],\n ),\n UnaryUfuncInfo(\n 'nn.functional.celu',\n ref=lambda x, alpha=1.0, inplace=False:\n np.maximum(0., x) + np.minimum(0., alpha * (np.exp(x / alpha) - 1)),\n dtypes=floating_types(),\n dtypesIfCUDA=floating_types_and(torch.float16, torch.bfloat16),\n supports_forward_ad=True,\n supports_fwgrad_bwgrad=False, # Need: elu_backward\n supports_autograd=True,\n assert_autodiffed=False,\n supports_gradgrad=True,\n supports_out=False,\n sample_kwargs=lambda device, dtype, input:\n ({'alpha': 0.8}, {'alpha': 0.8}),\n inplace_variant=lambda x, alpha=1.0:\n torch.nn.functional.celu(x, alpha, inplace=True),\n decorators=[\n # Not implemented yet\n DecorateInfo(unittest.expectedFailure, 'TestGradients', 'test_inplace_forward_mode_AD'),\n DecorateInfo(\n toleranceOverride({\n torch.float16: tol(atol=1e-03, rtol=1.2e-03),\n torch.bfloat16: tol(atol=1e-03, rtol=1.2e-03)\n }),\n 'TestUnaryUfuncs', device_type='cuda',\n ), ],\n ),\n UnaryUfuncInfo(\n 'nn.functional.rrelu',\n op=lambda input, *args, **kwargs:\n wrapper_set_seed(torch.nn.functional.rrelu, input, *args, **kwargs),\n ref=_NOTHING,\n dtypes=floating_types_and(torch.bfloat16),\n dtypesIfCUDA=floating_types_and(torch.float16, torch.bfloat16),\n gradcheck_wrapper=wrapper_set_seed,\n supports_forward_ad=False,\n supports_autograd=True,\n assert_autodiffed=False,\n supports_gradgrad=True,\n supports_out=False,\n sample_kwargs=lambda device, dtype, input:\n ({'lower': 0., 'upper': 1.}, {'lower': 0., 'upper': 1.}),\n inplace_variant=lambda input, *args, **kwargs:\n wrapper_set_seed(partial(torch.nn.functional.rrelu, inplace=True), input, *args, **kwargs),\n decorators=[\n DecorateInfo(\n toleranceOverride({\n torch.float16: tol(atol=1e-03, rtol=1.2e-03),\n torch.bfloat16: tol(atol=1e-03, rtol=1.2e-03)\n }),\n 'TestUnaryUfuncs', device_type='cuda',\n ),\n # Probably because we have used lambda for the op here\n # AssertionError: JIT Test does not execute any logic\n DecorateInfo(\n unittest.skip(\"Skipped!\"),\n 'TestJit', 'test_variant_consistency_jit'\n ), ],\n ),\n UnaryUfuncInfo(\n 'nn.functional.selu',\n ref=lambda x, inplace=False:\n 1.0507009873554804934193349852946 * (\n np.maximum(0., x) + np.minimum(0., 1.6732632423543772848170429916717 * (np.exp(x) - 1))\n ),\n dtypes=floating_types(),\n dtypesIfCUDA=floating_types_and(torch.float16, torch.bfloat16),\n supports_forward_ad=True, # depends on 'elu'\n supports_fwgrad_bwgrad=False, # Needs: elu_backward\n supports_autograd=True,\n assert_autodiffed=False,\n supports_gradgrad=True,\n supports_out=False,\n inplace_variant=lambda x: torch.nn.functional.selu(x, inplace=True),\n decorators=[\n # Not implemented yet (depends on 'elu_')\n DecorateInfo(unittest.expectedFailure, 'TestGradients', 'test_inplace_forward_mode_AD'),\n DecorateInfo(\n toleranceOverride({\n torch.float16: tol(atol=1e-2, rtol=1.8e-2),\n torch.bfloat16: tol(atol=1e-2, rtol=1.8e-2)\n }),\n 'TestUnaryUfuncs', device_type='cuda',\n ), ],\n ),\n UnaryUfuncInfo(\n 'nn.functional.silu',\n ref=lambda x, inplace=False:\n x / (1 + np.exp(-x)),\n dtypes=floating_and_complex_types_and(torch.bfloat16),\n dtypesIfCUDA=floating_types_and(torch.float16, torch.bfloat16),\n supports_forward_ad=False,\n supports_autograd=False,\n assert_autodiffed=False,\n supports_out=False,\n inplace_variant=lambda x: torch.nn.functional.silu(x, inplace=True),\n decorators=[\n DecorateInfo(\n toleranceOverride({\n torch.float16: tol(atol=1e-3, rtol=1e-3),\n torch.bfloat16: tol(atol=1e-4, rtol=1e-4)\n }),\n 'TestUnaryUfuncs', device_type='cuda',\n ), ],\n skips=[\n # FIXME: numpy reference diverges: Comparing (nan+nanj) and (-0+0j)\n DecorateInfo(unittest.skip(\"Skipped!\"), 'TestUnaryUfuncs', dtypes=(torch.complex64,)), ],\n ),\n UnaryUfuncInfo(\n 'nn.functional.hardsigmoid',\n ref=reference_hardsigmoid,\n dtypes=floating_types(),\n dtypesIfCUDA=floating_types_and(torch.float16, torch.bfloat16),\n supports_autograd=True,\n assert_autodiffed=False,\n supports_gradgrad=False,\n supports_forward_ad=True,\n supports_out=False,\n inplace_variant=partial(torch.nn.functional.hardsigmoid, inplace=True),\n decorators=[\n DecorateInfo(\n toleranceOverride({torch.float16: tol(atol=1e-04, rtol=0.001)}), 'TestUnaryUfuncs', device_type='cuda',), ],\n skips=[\n # still want to test that first derivative works though second derivative isn't supported\n DecorateInfo(unittest.expectedFailure, 'TestGradients', \"test_inplace_gradgrad\"),\n # produces 0 instead of nan on ROCM\n DecorateInfo(unittest.expectedFailure,\n 'TestUnaryUfuncs', \"test_reference_numerics_extremal\",\n dtypes=(torch.bfloat16, torch.float16, torch.float32,), device_type='cuda',\n active_if=(TEST_WITH_ROCM)), ]\n ),\n UnaryUfuncInfo(\n 'nn.functional.logsigmoid',\n aten_name=\"log_sigmoid\",\n ref=reference_logsigmoid,\n dtypes=floating_types(),\n dtypesIfCUDA=floating_types_and(torch.float16),\n supports_autograd=True,\n assert_autodiffed=False,\n supports_gradgrad=True,\n supports_out=False,\n ),\n UnaryUfuncInfo(\n 'nn.functional.mish',\n ref=lambda x: x * np.tanh(reference_softplus(x)),\n dtypes=floating_types(),\n dtypesIfCUDA=floating_types_and(torch.float16, torch.bfloat16),\n supports_forward_ad=True,\n supports_fwgrad_bwgrad=True,\n supports_autograd=True,\n assert_autodiffed=False,\n supports_gradgrad=True,\n supports_out=False,\n inplace_variant=partial(torch.nn.functional.mish, inplace=True),\n decorators=[\n DecorateInfo(\n toleranceOverride({torch.float16: tol(atol=1e-02, rtol=1e-03)}), 'TestUnaryUfuncs', device_type='cuda',), ],\n ),\n UnaryUfuncInfo(\n 'nn.functional.softsign',\n ref=lambda x: x / (np.abs(x) + 1),\n dtypes=all_types_and_complex_and(torch.float16, torch.bfloat16),\n dtypesIfCUDA=all_types_and_complex_and(torch.float16, torch.bfloat16, torch.bool),\n supports_forward_ad=True,\n supports_fwgrad_bwgrad=True,\n supports_autograd=True,\n assert_autodiffed=False,\n supports_gradgrad=True,\n supports_out=False,\n decorators=[\n DecorateInfo(\n toleranceOverride({torch.float16: tol(atol=1e-03, rtol=1.3e-04)}), 'TestUnaryUfuncs',), ],\n skips=(\n DecorateInfo(unittest.expectedFailure, 'TestGradients',\n \"test_fn_fwgrad_bwgrad\", dtypes=(torch.complex128,)),\n # pytorch computes (0+nanj), numpy computes (-5e-18-1j) for input (-501.-1.0000e+20j)\n DecorateInfo(unittest.expectedFailure, 'TestUnaryUfuncs',\n \"test_reference_numerics_hard\", dtypes=(torch.complex64,)),),\n ),\n UnaryUfuncInfo(\n 'nn.functional.tanhshrink',\n ref=lambda x: x - np.tanh(x),\n dtypes=all_types_and_complex_and(torch.bfloat16),\n dtypesIfCUDA=all_types_and_complex_and(torch.float16, torch.bfloat16),\n supports_forward_ad=True,\n supports_fwgrad_bwgrad=True,\n supports_autograd=True,\n assert_autodiffed=False,\n supports_gradgrad=True,\n supports_out=False,\n decorators=[\n DecorateInfo(\n toleranceOverride({torch.bfloat16: tol(atol=1e-02, rtol=1.6e-02)}), 'TestUnaryUfuncs',), ],\n skips=(\n # in each case, pytorch will produce a nan while numpy will not\n DecorateInfo(unittest.expectedFailure,\n 'TestUnaryUfuncs', \"test_reference_numerics_normal\",\n dtypes=(torch.complex64,), active_if=(IS_MACOS)),\n DecorateInfo(unittest.expectedFailure,\n 'TestUnaryUfuncs', \"test_reference_numerics_hard\",\n dtypes=(torch.complex64,), active_if=(IS_MACOS)),\n DecorateInfo(unittest.expectedFailure,\n 'TestUnaryUfuncs', \"test_reference_numerics_extremal\",\n dtypes=(torch.complex64,), device_type='cpu',\n active_if=(IS_MACOS or IS_WINDOWS)),)\n ),\n OpInfo(\n 'nn.functional.threshold',\n ref=lambda x, threshold, value: np.where(x > threshold, x, value).astype(x.dtype),\n dtypes=all_types_and(torch.bfloat16),\n dtypesIfCUDA=all_types_and(torch.float16, torch.bfloat16),\n supports_autograd=True,\n supports_forward_ad=True,\n supports_fwgrad_bwgrad=True,\n assert_autodiffed=False,\n supports_gradgrad=True,\n supports_out=False,\n sample_inputs_func=sample_inputs_threshold,\n ),\n BinaryUfuncInfo('nextafter',\n dtypes=floating_types_and(torch.bfloat16),\n supports_autograd=False,\n sample_inputs_func=sample_inputs_nextafter,\n skips=(\n # TypeError: nextafter(): argument 'other' (position 2) must be Tensor, not float\n DecorateInfo(unittest.skip('Skipped!'), 'TestBinaryUfuncs'),\n )),\n OpInfo('topk',\n dtypes=all_types_and(torch.bfloat16),\n dtypesIfCUDA=all_types_and(torch.bfloat16, torch.float16),\n supports_forward_ad=True,\n supports_fwgrad_bwgrad=True,\n sample_inputs_func=sample_inputs_topk),\n # Multiple variants for batch_norm to test with and without cuDNN disabled\n # See https://github.com/pytorch/pytorch/pull/63218#discussion_r688549391 for more details\n OpInfo('nn.functional.batch_norm',\n aten_name='batch_norm',\n dtypes=floating_types(),\n dtypesIfCUDA=floating_types_and(torch.float16, torch.bfloat16),\n supports_out=False,\n supports_forward_ad=True,\n sample_inputs_func=sample_inputs_batch_norm,\n skips=(\n DecorateInfo(unittest.skip(\"We don't want to differentiate wrt running mean / std\"),\n \"TestCommon\", \"test_floating_inputs_are_differentiable\"),)\n ),\n # This variant tests batch_norm with cuDNN disabled only on CUDA devices\n OpInfo('nn.functional.batch_norm',\n variant_test_name='without_cudnn',\n aten_name='batch_norm',\n dtypes=empty_types(),\n dtypesIfCUDA=floating_types_and(torch.float16, torch.bfloat16),\n supports_out=False,\n supports_forward_ad=True,\n decorators=[onlyCUDA, disablecuDNN],\n skips=(\n DecorateInfo(unittest.skip(\"We don't want to differentiate wrt running mean / std\"),\n \"TestCommon\", \"test_floating_inputs_are_differentiable\"),),\n sample_inputs_func=sample_inputs_batch_norm),\n # We have to add 2 OpInfo entry for `igamma` and `igammac`.First is the\n # standard entry, second is to run gradcheck tests on the second argument.\n BinaryUfuncInfo('igamma',\n dtypes=floating_types_and(torch.bfloat16, torch.float16),\n aliases=('torch.special.gammainc',),\n dtypesIfCUDA=floating_types(),\n supports_autograd=False,\n sample_inputs_func=sample_inputs_igamma_igammac,\n skips=(\n # TypeError: igamma(): argument 'input' (position 1) must be Tensor, not float\n DecorateInfo(unittest.skip('Skipped!'), 'TestBinaryUfuncs'),\n )),\n BinaryUfuncInfo('igamma',\n variant_test_name='grad_other',\n # Since autograd formula is implemented only for other and\n # gradcheck test verifies the formula for input in SampleInput,\n # we permute the arguments.\n op=lambda self, other, **kwargs: torch.igamma(other, self, **kwargs),\n inplace_variant=None,\n method_variant=None,\n dtypes=floating_types_and(torch.bfloat16, torch.float16),\n backward_dtypesIfCPU=floating_types_and(torch.bfloat16),\n dtypesIfCUDA=floating_types(),\n backward_dtypesIfCUDA=floating_types(),\n supports_inplace_autograd=False,\n decorators=[\n # Derivative wrt first tensor not implemented\n DecorateInfo(unittest.expectedFailure, \"TestCommon\",\n \"test_floating_inputs_are_differentiable\")\n ],\n skips=(\n # test does not work with passing lambda for op\n # AssertionError: False is not true : Tensors failed to compare as equal!\n DecorateInfo(unittest.skip(\"Skipped!\"), 'TestJit', 'test_variant_consistency_jit'),\n # test fails are we permute the arguments function variant\n # but not for inplace or method.\n DecorateInfo(unittest.skip(\"Skipped!\"), 'TestCommon', 'test_variant_consistency_eager'),\n # TypeError: igamma(): argument 'input' (position 1) must be Tensor, not float\n DecorateInfo(unittest.skip('Skipped!'), 'TestBinaryUfuncs'),\n ),\n sample_inputs_func=sample_inputs_igamma_igammac),\n BinaryUfuncInfo('igammac',\n dtypes=floating_types_and(torch.bfloat16, torch.float16),\n aliases=('torch.special.gammaincc',),\n dtypesIfCUDA=floating_types(),\n supports_autograd=False,\n sample_inputs_func=sample_inputs_igamma_igammac,\n skips=(\n # TypeError: igammac(): argument 'input' (position 1) must be Tensor, not float\n DecorateInfo(unittest.skip('Skipped!'), 'TestBinaryUfuncs'),\n )),\n BinaryUfuncInfo('igammac',\n variant_test_name='grad_other',\n # Since autograd formula is implemented only for other and\n # gradcheck test verifies the formula for input in SampleInput,\n # we permute the arguments\n op=lambda self, other, **kwargs: torch.igammac(other, self, **kwargs),\n inplace_variant=None,\n method_variant=None,\n dtypes=floating_types_and(torch.bfloat16, torch.float16),\n backward_dtypesIfCPU=floating_types_and(torch.bfloat16),\n dtypesIfCUDA=floating_types(),\n backward_dtypesIfCUDA=floating_types(),\n supports_inplace_autograd=False,\n decorators=[\n # Derivative wrt first tensor not implemented\n DecorateInfo(unittest.expectedFailure, \"TestCommon\",\n \"test_floating_inputs_are_differentiable\"),\n ],\n skips=(\n # test does not work with passing lambda for op\n # AssertionError: False is not true : Tensors failed to compare as equal!\n DecorateInfo(unittest.skip(\"Skipped!\"), 'TestJit', 'test_variant_consistency_jit'),\n # test fails are we permute the arguments function variant\n # but not for inplace or method.\n DecorateInfo(unittest.skip(\"Skipped!\"), 'TestCommon', 'test_variant_consistency_eager'),\n # TypeError: igammac(): argument 'input' (position 1) must be Tensor, not float\n DecorateInfo(unittest.skip('Skipped!'), 'TestBinaryUfuncs'),\n ),\n sample_inputs_func=sample_inputs_igamma_igammac),\n OpInfo('nn.functional.softshrink',\n aten_name=\"softshrink\",\n dtypes=floating_types(),\n dtypesIfCUDA=floating_types_and(torch.float16, torch.bfloat16),\n supports_autograd=True,\n supports_forward_ad=True,\n supports_fwgrad_bwgrad=True,\n assert_autodiffed=False,\n sample_inputs_func=sample_inputs_softshrink_hardshrink_hardtanh,\n supports_gradgrad=True,\n supports_out=False,\n ),\n OpInfo('nn.functional.hardshrink',\n aten_name=\"hardshrink\",\n dtypes=floating_types(),\n dtypesIfCUDA=floating_types_and(torch.float16, torch.bfloat16),\n supports_autograd=True,\n assert_autodiffed=True,\n sample_inputs_func=sample_inputs_softshrink_hardshrink_hardtanh,\n supports_gradgrad=True,\n supports_out=False,\n supports_forward_ad=True,\n supports_fwgrad_bwgrad=True,\n autodiff_nonfusible_nodes=[\"aten::hardshrink\"]),\n OpInfo('nn.functional.hardtanh',\n aten_name=\"hardtanh\",\n dtypes=floating_types_and(torch.int8, torch.int16, torch.int32, torch.int64, torch.bfloat16),\n backward_dtypesIfCPU=all_types(),\n dtypesIfCUDA=floating_types_and(torch.int8, torch.int16, torch.int32, torch.int64, torch.float16, torch.bfloat16),\n backward_dtypesIfCUDA=floating_types_and(torch.float16),\n supports_autograd=True,\n assert_autodiffed=True,\n sample_inputs_func=sample_inputs_softshrink_hardshrink_hardtanh,\n supports_gradgrad=True,\n supports_out=False,\n supports_forward_ad=True,\n supports_fwgrad_bwgrad=True,\n autodiff_nonfusible_nodes=[\"aten::hardtanh\"],\n ),\n OpInfo('nn.functional.gelu',\n aten_name=\"gelu\",\n supports_autograd=True,\n assert_autodiffed=True,\n sample_inputs_func=sample_inputs_gelu,\n dtypes=floating_types_and(torch.bfloat16),\n dtypesIfCUDA=floating_types_and(torch.half, torch.bfloat16),\n supports_gradgrad=True,\n supports_out=False,\n supports_forward_ad=True,\n supports_fwgrad_bwgrad=False,\n autodiff_nonfusible_nodes=[\"aten::gelu\"]),\n OpInfo('nn.functional.relu6',\n aten_name=\"relu6\",\n dtypes=all_types_and(torch.bfloat16),\n backward_dtypesIfCPU=floating_types(),\n dtypesIfCUDA=all_types_and(torch.float16, torch.bfloat16),\n backward_dtypesIfCUDA=floating_types_and(torch.float16),\n supports_autograd=True,\n assert_autodiffed=True,\n sample_inputs_func=sample_inputs_softshrink_hardshrink_hardtanh,\n supports_gradgrad=True,\n supports_out=False,\n supports_forward_ad=True,\n supports_fwgrad_bwgrad=True,\n autodiff_nonfusible_nodes=[\"aten::relu6\"]),\n OpInfo('mm',\n dtypes=all_types_and_complex_and(torch.float16, torch.bfloat16),\n dtypesIfCUDA=floating_and_complex_types_and(torch.float16, *[torch.bfloat16] if CUDA11OrLater else []),\n assert_autodiffed=True,\n supports_forward_ad=True,\n supports_fwgrad_bwgrad=True,\n sample_inputs_func=sample_inputs_mm),\n OpInfo('mode',\n op=torch.mode,\n dtypes=all_types_and(torch.float16, torch.bfloat16, torch.bool),\n supports_forward_ad=True,\n supports_fwgrad_bwgrad=True,\n sample_inputs_func=sample_inputs_mode,),\n MvlGammaInfo(variant_test_name='mvlgamma_p_1',\n domain=(1, None),\n skips=skips_mvlgamma(),\n sample_kwargs=lambda device, dtype, input: ({'p': 1}, {'d': 1})),\n MvlGammaInfo(variant_test_name='mvlgamma_p_3',\n domain=(2, None),\n skips=skips_mvlgamma(skip_redundant=True) + (\n DecorateInfo(unittest.skip(\"Skipped!\"), 'TestUnaryUfuncs', 'test_reference_numerics_hard',\n dtypes=(torch.float16,)),\n ),\n sample_kwargs=lambda device, dtype, input: ({'p': 3}, {'d': 3})),\n MvlGammaInfo(variant_test_name='mvlgamma_p_5',\n domain=(3, None),\n skips=skips_mvlgamma(skip_redundant=True) + (\n DecorateInfo(unittest.skip(\"Skipped!\"), 'TestUnaryUfuncs', 'test_reference_numerics_hard',\n dtypes=(torch.float16,)),\n ),\n sample_kwargs=lambda device, dtype, input: ({'p': 5}, {'d': 5})),\n BinaryUfuncInfo('ne',\n aliases=('not_equal',),\n dtypes=all_types_and_complex_and(torch.bool, torch.bfloat16, torch.float16),\n always_returns_bool=True,\n supports_autograd=False,\n sample_inputs_func=sample_inputs_comparison_ops,\n skips=(\n # AssertionError: False is not true : Tensors failed to compare as equal!\n DecorateInfo(unittest.skip('Skipped!'), 'TestBinaryUfuncs', 'test_type_promotion'),\n )),\n OpInfo('narrow',\n dtypes=all_types_and_complex_and(torch.bool, torch.bfloat16, torch.float16),\n supports_out=False,\n supports_forward_ad=True,\n supports_fwgrad_bwgrad=True,\n sample_inputs_func=sample_inputs_narrow),\n UnaryUfuncInfo('neg',\n aliases=('negative', ),\n ref=np.negative,\n dtypes=all_types_and_complex_and(torch.half, torch.bfloat16),\n error_inputs_func=error_inputs_neg,\n supports_forward_ad=True,\n supports_fwgrad_bwgrad=True,\n supports_sparse=True,\n supports_sparse_csr=True,\n assert_autodiffed=True,),\n OpInfo('dist',\n op=torch.dist,\n dtypes=floating_and_complex_types_and(torch.half, torch.bfloat16),\n supports_out=False,\n sample_inputs_func=sample_inputs_dist),\n OpInfo('outer',\n op=torch.outer,\n aliases=('ger', ),\n dtypes=all_types_and_complex_and(torch.bool, torch.float16, torch.bfloat16),\n supports_forward_ad=True,\n supports_fwgrad_bwgrad=True,\n sample_inputs_func=sample_inputs_outer,),\n OpInfo('ormqr',\n op=torch.ormqr,\n dtypes=floating_and_complex_types(),\n supports_autograd=False,\n sample_inputs_func=sample_inputs_ormqr,\n decorators=[skipCUDAIfNoCusolver, skipCPUIfNoLapack]),\n OpInfo('permute',\n dtypes=all_types_and_complex_and(torch.bool, torch.float16, torch.bfloat16),\n dtypesIfCUDA=all_types_and_complex_and(torch.bool, torch.float16, torch.bfloat16),\n supports_out=False,\n assert_autodiffed=True,\n autodiff_fusible_nodes=[], # aliases inputs, shouldn't be fused\n autodiff_nonfusible_nodes=[], # aliases inputs, shouldn't be fused\n assert_jit_shape_analysis=True,\n supports_forward_ad=True,\n supports_fwgrad_bwgrad=True,\n sample_inputs_func=sample_inputs_permute),\n BinaryUfuncInfo('pow',\n dtypes=all_types_and_complex_and(torch.half, torch.bfloat16),\n # Due to AVX2 curently not being fully supported for Float16, log_vml_cpu can't be enabled\n # for Float16, causing this test to fail. pow's autograd for Float16 is thus currently\n # unsupported on CPU.\n backward_dtypes=floating_and_complex_types_and(torch.bfloat16),\n backward_dtypesIfCUDA=floating_and_complex_types_and(torch.bfloat16, torch.half),\n sample_inputs_func=sample_inputs_pow,\n supports_inplace_autograd=False,\n supports_forward_ad=True,\n supports_fwgrad_bwgrad=True,\n assert_autodiffed=True,\n skips=(\n DecorateInfo(unittest.skip(\"Skipped!\"), 'TestBinaryUfuncs', 'test_type_promotion'),\n # RuntimeError: Integers to negative integer powers are not allowed.\n DecorateInfo(unittest.skip(\"Skipped!\"), 'TestBinaryUfuncs',\n dtypes=(torch.int8, torch.int16, torch.int32, torch.int64)),\n )),\n BinaryUfuncInfo('float_power',\n dtypes=all_types_and_complex_and(torch.half, torch.bfloat16, torch.bool),\n promotes_int_to_float=True,\n sample_inputs_func=sample_inputs_pow,\n supports_forward_ad=True,\n supports_fwgrad_bwgrad=True,\n skips=(\n DecorateInfo(unittest.skip(\"Skipped!\"), 'TestBinaryUfuncs', 'test_type_promotion'),\n )),\n OpInfo('qr',\n op=torch.qr,\n dtypes=floating_and_complex_types(),\n sample_inputs_func=sample_inputs_linalg_qr_geqrf,\n # batched gradients do not work for empty inputs\n # https://github.com/pytorch/pytorch/issues/50743#issuecomment-767376085\n check_batched_gradgrad=False,\n supports_forward_ad=True,\n supports_fwgrad_bwgrad=True,\n # See https://github.com/pytorch/pytorch/issues/66357\n check_batched_forward_grad=False,\n decorators=[skipCUDAIfNoMagma, skipCUDAIfRocm, skipCPUIfNoLapack]),\n UnaryUfuncInfo('rad2deg',\n ref=np.degrees,\n decorators=(precisionOverride({torch.bfloat16: 7e-1,\n torch.float16: 7e-1}),),\n dtypes=all_types_and(torch.bool, torch.half, torch.bfloat16),\n skips=(\n # Reference: https://github.com/pytorch/pytorch/pull/51283#issuecomment-770614273\n DecorateInfo(unittest.skip(\"Skipped!\"), 'TestUnaryUfuncs', 'test_reference_numerics_normal',\n dtypes=[torch.bfloat16]),\n DecorateInfo(unittest.skip(\"Skipped!\"), 'TestUnaryUfuncs', 'test_reference_numerics_hard',\n dtypes=[torch.bfloat16]),\n DecorateInfo(unittest.skip(\"Skipped!\"), 'TestUnaryUfuncs', 'test_reference_numerics_extremal',\n dtypes=[torch.bfloat16]),\n ),\n supports_forward_ad=True,\n supports_fwgrad_bwgrad=True,\n safe_casts_outputs=True),\n UnaryUfuncInfo('real',\n ref=np.real,\n dtypes=all_types_and_complex_and(torch.bool, torch.bfloat16, torch.half),\n supports_out=False,\n supports_forward_ad=True,\n supports_fwgrad_bwgrad=True,\n # See https://github.com/pytorch/pytorch/issues/66357\n check_batched_forward_grad=False,\n skips=(\n # Skip since real and imag don't have out variants.\n DecorateInfo(unittest.expectedFailure, 'TestUnaryUfuncs', 'test_out_arg_all_dtypes'),\n )),\n OpInfo('roll',\n ref=np.roll,\n dtypes=all_types_and_complex_and(torch.bool, torch.bfloat16, torch.half),\n supports_out=False,\n supports_forward_ad=True,\n supports_fwgrad_bwgrad=True,\n sample_inputs_func=sample_inputs_roll),\n OpInfo('rot90',\n dtypes=all_types_and_complex_and(torch.bool, torch.bfloat16, torch.half),\n supports_out=False,\n supports_forward_ad=True,\n supports_fwgrad_bwgrad=True,\n sample_inputs_func=sample_inputs_rot90),\n # To test reference numerics against multiple values of argument `decimals`,\n # we make multiple OpInfo entries with each entry corresponding to different value of decimals.\n UnaryUfuncInfo('round',\n ref=np.round,\n aliases=('special.round',),\n dtypes=floating_types_and(torch.bfloat16),\n dtypesIfCUDA=floating_types_and(torch.half, torch.bfloat16),\n supports_forward_ad=True,\n supports_fwgrad_bwgrad=True,\n supports_sparse=True,\n supports_sparse_csr=True,\n assert_autodiffed=True,),\n UnaryUfuncInfo('round',\n ref=np.round,\n variant_test_name='decimals_0',\n aliases=('special.round',),\n dtypes=floating_types_and(torch.bfloat16),\n dtypesIfCUDA=floating_types_and(torch.half, torch.bfloat16),\n sample_kwargs=lambda device, dtype, input: ({'decimals': 0}, {'decimals': 0}),\n sample_inputs_func=partial(sample_inputs_unary, op_kwargs={'decimals': 0}),\n supports_forward_ad=True,\n supports_fwgrad_bwgrad=True,\n assert_autodiffed=False,\n supports_sparse_csr=False),\n UnaryUfuncInfo('round',\n ref=np.round,\n variant_test_name='decimals_3',\n aliases=('special.round',),\n dtypes=floating_types_and(torch.bfloat16),\n dtypesIfCUDA=floating_types_and(torch.half, torch.bfloat16),\n sample_kwargs=lambda device, dtype, input: ({'decimals': 3}, {'decimals': 3}),\n sample_inputs_func=partial(sample_inputs_unary, op_kwargs={'decimals': 3}),\n skips=(\n # test_ops already tested for this overload with `decimals_0` opinfo entry\n DecorateInfo(unittest.skip(\"Skipped!\"), 'TestCommon'),\n DecorateInfo(unittest.skip(\"Skipped!\"), 'TestGradients'),\n DecorateInfo(unittest.skip(\"Skipped!\"), 'TestJit'),\n DecorateInfo(unittest.skip(\"Skipped!\"), 'TestMathBits'),\n ),\n supports_forward_ad=True,\n supports_fwgrad_bwgrad=True,\n assert_autodiffed=False,\n supports_sparse_csr=False),\n UnaryUfuncInfo('round',\n ref=np.round,\n variant_test_name='decimals_neg_3',\n aliases=('special.round',),\n dtypes=floating_types_and(torch.bfloat16),\n dtypesIfCUDA=floating_types_and(torch.half, torch.bfloat16),\n sample_kwargs=lambda device, dtype, input: ({'decimals': -3}, {'decimals': -3}),\n sample_inputs_func=partial(sample_inputs_unary, op_kwargs={'decimals': -3}),\n skips=(\n # test_ops already tested for this overload with `decimals_0` opinfo entry\n DecorateInfo(unittest.skip(\"Skipped!\"), 'TestCommon'),\n DecorateInfo(unittest.skip(\"Skipped!\"), 'TestGradients'),\n DecorateInfo(unittest.skip(\"Skipped!\"), 'TestJit'),\n DecorateInfo(unittest.skip(\"Skipped!\"), 'TestMathBits'),\n ),\n supports_forward_ad=True,\n supports_fwgrad_bwgrad=True,\n assert_autodiffed=False,\n supports_sparse_csr=False),\n UnaryUfuncInfo('sin',\n ref=np.sin,\n dtypes=all_types_and_complex_and(torch.bool, torch.bfloat16),\n dtypesIfCUDA=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16),\n assert_autodiffed=True,\n handles_large_floats=False,\n handles_complex_extremals=False,\n safe_casts_outputs=True,\n supports_sparse=True,\n supports_sparse_csr=True,\n supports_forward_ad=True,\n supports_fwgrad_bwgrad=True,\n skips=(\n DecorateInfo(unittest.skip(\"Skipped! sparse backward not supported\"),\n 'TestSparseUnaryUfuncs', 'test_sparse_fn_grad'),\n ),\n decorators=(precisionOverride({torch.bfloat16: 1e-2}),)),\n UnaryUfuncInfo('sinc',\n ref=np_sinc_with_fp16_as_fp32,\n aliases=('special.sinc',),\n dtypes=all_types_and_complex_and(torch.bool, torch.bfloat16),\n dtypesIfCUDA=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16),\n handles_large_floats=False,\n handles_complex_extremals=False,\n safe_casts_outputs=True,\n supports_forward_ad=True,\n supports_fwgrad_bwgrad=True,\n decorators=(precisionOverride({torch.bfloat16: 1e-2,\n torch.float16: 1e-2}),),\n skips=(\n # Reference: https://github.com/pytorch/pytorch/issues/49133\n DecorateInfo(unittest.skip(\"Skipped!\"), 'TestUnaryUfuncs', 'test_reference_numerics_normal',\n dtypes=[torch.cfloat]),\n )),\n UnaryUfuncInfo('sinh',\n ref=np_unary_ufunc_integer_promotion_wrapper(np.sinh),\n dtypes=all_types_and_complex_and(torch.bool, torch.bfloat16),\n dtypesIfCUDA=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16),\n safe_casts_outputs=True,\n assert_autodiffed=True,\n supports_forward_ad=True,\n supports_fwgrad_bwgrad=True,\n supports_sparse=True,\n supports_sparse_csr=True,\n decorators=(precisionOverride({torch.float16: 1e-2}),),\n skips=(\n DecorateInfo(unittest.skip(\"Skipped!\"), 'TestUnaryUfuncs', 'test_reference_numerics_extremal',\n device_type='cpu', dtypes=[torch.cfloat, torch.cdouble],\n active_if=(IS_MACOS or IS_WINDOWS)),\n DecorateInfo(unittest.skip(\"Skipped!\"), 'TestUnaryUfuncs', 'test_reference_numerics_hard',\n device_type='cpu', dtypes=[torch.cfloat, torch.cdouble],\n active_if=(IS_MACOS or IS_WINDOWS)),\n # Reference: https://github.com/pytorch/pytorch/issues/48641\n DecorateInfo(unittest.skip(\"Skipped!\"), 'TestUnaryUfuncs', 'test_reference_numerics_hard',\n device_type='cpu', dtypes=[torch.int8]),\n DecorateInfo(unittest.skip(\"Skipped! sparse backward not supported\"),\n 'TestSparseUnaryUfuncs', 'test_sparse_fn_grad'),\n )),\n UnaryUfuncInfo('sign',\n ref=reference_sign,\n dtypes=all_types_and(torch.bool, torch.bfloat16, torch.half),\n dtypesIfCUDA=all_types_and(torch.bool, torch.bfloat16, torch.half),\n supports_forward_ad=True,\n supports_fwgrad_bwgrad=True,\n supports_sparse=True,\n supports_sparse_csr=True,\n skips=(\n # Reference: https://github.com/pytorch/pytorch/issues/41245\n DecorateInfo(unittest.skip(\"Skipped!\"), 'TestUnaryUfuncs', 'test_reference_numerics_extremal',\n dtypes=[torch.bfloat16, torch.float16, torch.float32, torch.float64]),\n )),\n UnaryUfuncInfo('sgn',\n ref=reference_sgn,\n dtypes=all_types_and_complex_and(torch.bool, torch.bfloat16, torch.half),\n supports_forward_ad=True,\n supports_fwgrad_bwgrad=True,\n supports_sparse=True,\n supports_sparse_csr=True,\n skips=(\n # Reference: https://github.com/pytorch/pytorch/issues/41245\n DecorateInfo(unittest.skip(\"Skipped!\"), 'TestUnaryUfuncs', 'test_reference_numerics_extremal',\n dtypes=[torch.bfloat16, torch.float16, torch.float32, torch.float64]),\n # Reference: https://github.com/pytorch/pytorch/issues/53958\n # Test fails in comparison on Nan as the `equal_nan` is True for\n # comparing the CPU tensors.\n DecorateInfo(unittest.skip(\"Skipped!\"), 'TestUnaryUfuncs', 'test_reference_numerics_extremal',\n device_type='cpu', dtypes=[torch.complex64, torch.complex128]),\n # Reference: https://github.com/pytorch/pytorch/issues/48486\n DecorateInfo(unittest.skip(\"Skipped!\"), 'TestUnaryUfuncs', 'test_reference_numerics_hard',\n device_type='cpu', dtypes=[torch.complex64]),\n # The complex formula might be wrong\n DecorateInfo(unittest.skip(\"Skipped!\"), 'TestGradients', 'test_forward_mode_AD',\n dtypes=complex_types()),\n # Passes for float, but for complex - Need: _s_where\n DecorateInfo(unittest.expectedFailure, 'TestGradients', 'test_fn_fwgrad_bwgrad',\n dtypes=complex_types()),\n DecorateInfo(unittest.skip(\"Skipped!\"), 'TestGradients', 'test_inplace_forward_mode_AD',\n dtypes=complex_types()),\n DecorateInfo(unittest.skip(\"Skipped! sparse backward not supported\"),\n 'TestSparseUnaryUfuncs', 'test_sparse_fn_grad'),\n )),\n OpInfo('split',\n dtypes=all_types_and_complex_and(torch.bfloat16, torch.half, torch.bool),\n sample_inputs_func=partial(sample_inputs_split, list_args=False),\n supports_forward_ad=True,\n supports_fwgrad_bwgrad=True,\n supports_out=False,\n autodiff_fusible_nodes=[], # aliases inputs, shouldn't be fused\n autodiff_nonfusible_nodes=[], # aliases inputs, shouldn't be fused\n assert_autodiffed=True),\n OpInfo('split',\n variant_test_name='list_args',\n dtypes=all_types_and_complex_and(torch.bfloat16, torch.half, torch.bool),\n sample_inputs_func=partial(sample_inputs_split, list_args=True),\n supports_forward_ad=True,\n supports_fwgrad_bwgrad=True,\n supports_out=False),\n OpInfo('split_with_sizes',\n dtypes=all_types_and_complex_and(torch.bfloat16, torch.half, torch.bool),\n sample_inputs_func=sample_inputs_split_with_sizes,\n autodiff_fusible_nodes=[], # aliases inputs, shouldn't be fused\n autodiff_nonfusible_nodes=[], # aliases inputs, shouldn't be fused\n supports_out=False,\n supports_forward_ad=True,\n supports_fwgrad_bwgrad=True,\n assert_autodiffed=True),\n BinaryUfuncInfo('__radd__',\n op=torch.Tensor.__radd__,\n dtypes=all_types_and_complex_and(torch.bfloat16, torch.half, torch.bool),\n sample_inputs_func=sample_inputs_rbinops,\n supports_out=False,\n skips=(\n # RuntimeError:\n # object has no attribute __radd__:\n # File \"<string>\", line 3\n # def the_method(i0):\n # return torch.__radd__(i0, 3.14j)\n # ~~~~~~~~~~~~~~ <--- HERE\n DecorateInfo(unittest.skip(\"Skipped!\"), 'TestJit', 'test_variant_consistency_jit',),\n ),\n assert_autodiffed=True,\n supports_forward_ad=True,\n supports_fwgrad_bwgrad=True,\n autodiff_nonfusible_nodes=['aten::add'],),\n BinaryUfuncInfo('__rdiv__',\n op=torch.Tensor.__rdiv__,\n dtypes=all_types_and_complex_and(torch.bfloat16, torch.half, torch.bool),\n promotes_int_to_float=True,\n lhs_make_tensor_kwargs={'exclude_zero': True},\n sample_inputs_func=sample_inputs_rbinops,\n supports_out=False,\n skips=(\n # RuntimeError:\n # object has no attribute __rdiv__:\n # File \"<string>\", line 3\n # def the_method(i0):\n # return torch.__rdiv__(i0, 3.14j)\n # ~~~~~~~~~~~~~~ <--- HERE\n DecorateInfo(unittest.skip(\"Skipped!\"), 'TestJit', 'test_variant_consistency_jit',),\n ),\n supports_forward_ad=True,\n supports_fwgrad_bwgrad=True,\n assert_autodiffed=True,\n autodiff_nonfusible_nodes=['aten::mul', 'aten::reciprocal'],),\n BinaryUfuncInfo('__rmul__',\n op=torch.Tensor.__rmul__,\n dtypes=all_types_and_complex_and(torch.bfloat16, torch.half, torch.bool),\n sample_inputs_func=sample_inputs_rbinops,\n supports_out=False,\n skips=(\n # RuntimeError:\n # object has no attribute __rmul__:\n # File \"<string>\", line 3\n # def the_method(i0):\n # return torch.__rmul__(i0, 3.14j)\n # ~~~~~~~~~~~~~~ <--- HERE\n DecorateInfo(unittest.skip(\"Skipped!\"), 'TestJit', 'test_variant_consistency_jit',),\n ),\n assert_autodiffed=True,\n supports_forward_ad=True,\n supports_fwgrad_bwgrad=True,\n autodiff_nonfusible_nodes=['aten::mul'],),\n OpInfo('__rand__',\n op=torch.Tensor.__rand__,\n dtypes=integral_types_and(torch.bool),\n sample_inputs_func=sample_inputs_rbinops,\n supports_out=False,\n supports_autograd=False,\n supports_forward_ad=True,),\n BinaryUfuncInfo('__ror__',\n op=torch.Tensor.__ror__,\n dtypes=integral_types_and(torch.bool),\n sample_inputs_func=sample_inputs_rbinops,\n supports_out=False,\n supports_autograd=False,\n supports_forward_ad=True,),\n BinaryUfuncInfo('__rxor__',\n op=torch.Tensor.__rxor__,\n dtypes=integral_types_and(torch.bool),\n sample_inputs_func=sample_inputs_rbinops,\n supports_out=False,\n supports_autograd=False,\n supports_forward_ad=True,),\n OpInfo('__rmatmul__',\n op=torch.Tensor.__rmatmul__,\n dtypes=all_types_and_complex_and(torch.bfloat16),\n dtypesIfCUDA=floating_types_and(torch.float16, *[torch.bfloat16] if CUDA11OrLater else [],\n torch.complex64, torch.complex128),\n backward_dtypesIfCUDA=floating_types_and(torch.float16,\n *[torch.bfloat16] if (SM60OrLater and CUDA11OrLater) else [],\n torch.complex64, torch.complex128),\n assert_autodiffed=True,\n sample_inputs_func=sample_inputs_matmul,\n supports_out=False,\n supports_forward_ad=True,\n supports_fwgrad_bwgrad=True,\n check_batched_forward_grad=False,\n decorators=(\n # https://github.com/pytorch/pytorch/issues/67470\n DecorateInfo(unittest.skip(\"67470!\"),\n 'TestCommon', 'test_noncontiguous_samples',\n device_type='cpu', dtypes=(torch.long,)),\n DecorateInfo(toleranceOverride({torch.complex64: tol(atol=1e-05, rtol=1.2e-03)}),\n 'TestMathBits', 'test_conj_view'),\n # Fails on XLA.\n # AssertionError: False is not true : Tensors failed to compare as equal\n DecorateInfo(unittest.expectedFailure, 'TestOpInfo', device_type='xla', dtypes=(torch.long,)),\n DecorateInfo(toleranceOverride({torch.float32: tol(atol=1e-05, rtol=1.2e-03)}),\n 'TestCommon', 'test_noncontiguous_samples',\n device_type='cuda', active_if=TEST_WITH_ROCM)),\n skips=(\n # RuntimeError:\n # object has no attribute __rmatmul__:\n # File \"<string>\", line 3\n # def the_method(i0, i1):\n # return torch.__rmatmul__(i0, i1)\n # ~~~~~~~~~~~~~~ <--- HERE\n DecorateInfo(unittest.skip(\"Skipped!\"), 'TestJit', 'test_variant_consistency_jit',),\n )),\n BinaryUfuncInfo('__rmod__',\n op=torch.Tensor.__rmod__,\n dtypes=floating_types_and(torch.bfloat16, torch.half,),\n dtypesIfCUDA=all_types_and(torch.bfloat16, torch.half, torch.bool),\n sample_inputs_func=sample_inputs_rbinops,\n supports_out=False,\n skips=(\n # RuntimeError:\n # object has no attribute __rmod__:\n # File \"<string>\", line 3\n # def the_method(i0):\n # return torch.__rmod__(i0, 3.14)\n # ~~~~~~~~~~~~~~ <--- HERE\n DecorateInfo(unittest.skip(\"Skipped!\"), 'TestJit', 'test_variant_consistency_jit',),\n # RuntimeError: \"remainder_cuda\" not implemented for 'Bool'\n DecorateInfo(unittest.skip(\"Skipped!\"), 'TestBinaryUfuncs',\n dtypes=(torch.bool,))\n ),\n # Support autograd after torch.remainder(Tensor, Tensor) supports\n # autograd of the second argument.\n # https://github.com/pytorch/pytorch/pull/58476/files#r637167630\n supports_autograd=False,\n assert_autodiffed=True,\n autodiff_nonfusible_nodes=['aten::remainder'],),\n BinaryUfuncInfo('__rpow__',\n op=torch.Tensor.__rpow__,\n dtypes=all_types_and_complex_and(torch.bfloat16, torch.half, torch.bool),\n # Reference: https://github.com/pytorch/pytorch/issues/54774\n # \"log2\" \"_vml_cpu\" not implemented for Half\n backward_dtypesIfCPU=all_types_and_complex_and(torch.bfloat16, torch.bool),\n sample_inputs_func=sample_inputs_rbinops,\n supports_out=False,\n supports_forward_ad=True,\n supports_fwgrad_bwgrad=True,\n skips=(\n # RuntimeError:\n # object has no attribute __rpow__:\n # File \"<string>\", line 3\n # def the_method(i0):\n # return torch.__rpow__(i0, 3.14j)\n # ~~~~~~~~~~~~~~ <--- HERE\n DecorateInfo(unittest.skip(\"Skipped!\"), 'TestJit', 'test_variant_consistency_jit',),\n # RuntimeError: \"pow_cuda\" not implemented for 'Bool'\n DecorateInfo(unittest.skip(\"Skipped!\"), 'TestBinaryUfuncs',\n dtypes=(torch.bool,)),\n ),\n assert_autodiffed=True,\n autodiff_nonfusible_nodes=['aten::pow'],),\n BinaryUfuncInfo('__rsub__',\n op=torch.Tensor.__rsub__,\n dtypes=all_types_and_complex_and(torch.bfloat16, torch.half),\n sample_inputs_func=sample_inputs_rbinops,\n supports_out=False,\n skips=(\n # RuntimeError:\n # object has no attribute __rsub__:\n # File \"<string>\", line 3\n # def the_method(i0):\n # return torch.__rsub__(i0, 3.14j)\n # ~~~~~~~~~~~~~~ <--- HERE\n DecorateInfo(unittest.skip(\"Skipped!\"), 'TestJit', 'test_variant_consistency_jit',),\n ),\n assert_autodiffed=True,\n autodiff_nonfusible_nodes=['aten::rsub'],),\n BinaryUfuncInfo('rsub',\n dtypes=all_types_and_complex_and(torch.bfloat16, torch.half),\n variant_test_name='rsub_tensor',\n supports_out=False,\n supports_inplace_autograd=False,\n sample_inputs_func=partial(sample_inputs_rsub, other_scalar=False),),\n BinaryUfuncInfo('rsub',\n dtypes=all_types_and_complex_and(torch.bfloat16, torch.half),\n variant_test_name='rsub_scalar',\n supports_out=False,\n supports_inplace_autograd=False,\n sample_inputs_func=partial(sample_inputs_rsub, other_scalar=True),\n assert_autodiffed=True,),\n OpInfo('select',\n dtypes=all_types_and_complex_and(torch.bfloat16, torch.half, torch.bool),\n sample_inputs_func=sample_inputs_select,\n assert_jit_shape_analysis=True,\n supports_forward_ad=True,\n supports_fwgrad_bwgrad=True,\n supports_out=False),\n OpInfo('select_scatter',\n dtypes=all_types_and(torch.bfloat16, torch.half, torch.bool),\n sample_inputs_func=sample_inputs_select_scatter,\n supports_forward_ad=True,\n supports_fwgrad_bwgrad=True,\n supports_out=False),\n OpInfo('slice_scatter',\n dtypes=all_types_and(torch.bfloat16, torch.half, torch.bool),\n sample_inputs_func=sample_inputs_slice_scatter,\n supports_forward_ad=True,\n supports_fwgrad_bwgrad=True,\n supports_out=False),\n UnaryUfuncInfo('signbit',\n ref=np.signbit,\n dtypes=all_types_and(torch.bool, torch.bfloat16, torch.half),\n supports_sparse=True,\n supports_sparse_csr=True,\n supports_autograd=False,),\n OpInfo('solve',\n op=torch.solve,\n dtypes=floating_and_complex_types(),\n sample_inputs_func=sample_inputs_legacy_solve,\n check_batched_gradgrad=False,\n decorators=[skipCUDAIfNoMagma, skipCUDAIfRocm, skipCPUIfNoLapack]),\n UnaryUfuncInfo('tan',\n ref=np.tan,\n dtypes=all_types_and_complex_and(torch.bool, torch.bfloat16),\n dtypesIfCUDA=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16),\n assert_autodiffed=True,\n safe_casts_outputs=True,\n supports_forward_ad=True,\n supports_fwgrad_bwgrad=True,\n supports_sparse=True,\n supports_sparse_csr=True,\n skips=(\n DecorateInfo(unittest.skip(\"Skipped!\"), 'TestUnaryUfuncs', 'test_reference_numerics_extremal',\n device_type='cpu', dtypes=[torch.bfloat16]),\n DecorateInfo(unittest.skip(\"Skipped!\"), 'TestUnaryUfuncs', 'test_reference_numerics_hard',\n device_type='cpu', dtypes=[torch.bfloat16]),\n DecorateInfo(unittest.skip(\"Skipped!\"), 'TestUnaryUfuncs', 'test_reference_numerics_normal',\n device_type='cpu', dtypes=[torch.bfloat16]),\n DecorateInfo(unittest.skip(\"Skipped!\"), 'TestUnaryUfuncs', 'test_reference_numerics_extremal',\n device_type='cpu', dtypes=[torch.cfloat, torch.cdouble],\n active_if=(IS_MACOS or IS_WINDOWS)),\n DecorateInfo(unittest.skip(\"Skipped!\"), 'TestUnaryUfuncs', 'test_reference_numerics_hard',\n device_type='cpu', dtypes=[torch.cfloat, torch.cdouble],\n active_if=(IS_MACOS or IS_WINDOWS)),\n DecorateInfo(unittest.skip(\"Skipped!\"), 'TestUnaryUfuncs', 'test_reference_numerics_hard',\n device_type='cuda', dtypes=[torch.float64],\n active_if=TEST_WITH_ROCM),\n DecorateInfo(unittest.skip(\"Skipped! sparse backward not supported\"),\n 'TestSparseUnaryUfuncs', 'test_sparse_fn_grad'),\n ),\n # tan(pi/2 * odd_number) is nan\n reference_numerics_filter=NumericsFilter(\n condition=lambda x: close_to_int(x / (math.pi * 0.5)), safe_val=math.pi)),\n UnaryUfuncInfo('tanh',\n ref=np.tanh,\n aliases=('nn.functional.tanh',),\n decorators=(precisionOverride({torch.bfloat16: 1e-2}),),\n dtypes=all_types_and_complex_and(torch.bool, torch.bfloat16),\n dtypesIfCUDA=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16),\n # \"tanh_backward_cpu\" not implemented for 'BFloat16'\n backward_dtypesIfCPU=all_types_and_complex_and(torch.bool, torch.bfloat16),\n assert_autodiffed=True,\n safe_casts_outputs=True,\n assert_jit_shape_analysis=True,\n supports_forward_ad=True,\n supports_fwgrad_bwgrad=True,\n supports_sparse=True,\n supports_sparse_csr=True,\n skips=(\n DecorateInfo(unittest.skip(\"Skipped!\"), 'TestUnaryUfuncs', 'test_reference_numerics_extremal',\n device_type='cpu', dtypes=[torch.cfloat, torch.cdouble],\n active_if=(IS_MACOS or IS_WINDOWS)),\n DecorateInfo(unittest.skip(\"Skipped!\"), 'TestUnaryUfuncs', 'test_reference_numerics_hard',\n device_type='cpu', dtypes=[torch.cfloat, torch.cdouble],\n active_if=(IS_MACOS or IS_WINDOWS)),\n # alias, nn.functional.tanh, will produce (because of warning string saved):\n # \"RuntimeError: Expected to not find \"tanh\" but found it\"\n DecorateInfo(unittest.expectedFailure, 'TestJit', 'test_jit_alias_remapping'),\n DecorateInfo(unittest.skip(\"Skipped! sparse backward not supported\"),\n 'TestSparseUnaryUfuncs', 'test_sparse_fn_grad'),\n ),\n # tan(j * pi/2 * odd_number) is nan\n reference_numerics_filter=NumericsFilter(\n condition=lambda x: (close_to_int(x / (math.pi * 0.5j))\n if x.is_complex() else x.new_tensor(False, dtype=torch.bool)),\n safe_val=0)),\n OpInfo('tensor_split',\n ref=np.array_split,\n dtypes=all_types_and_complex_and(torch.bool, torch.bfloat16, torch.float16),\n dtypesIfCUDA=all_types_and_complex_and(torch.bool, torch.bfloat16, torch.float16),\n supports_out=False,\n supports_forward_ad=True,\n supports_fwgrad_bwgrad=True,\n skips=(\n # Pre-existing condition; Needs to be fixed\n DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_composite_compliance'),\n ),\n sample_inputs_func=sample_inputs_tensor_split,),\n OpInfo('hsplit',\n dtypes=all_types_and_complex_and(torch.bool, torch.bfloat16, torch.float16),\n supports_out=False,\n supports_forward_ad=True,\n supports_fwgrad_bwgrad=True,\n sample_inputs_func=sample_inputs_hsplit,\n error_inputs_func=error_inputs_hsplit,),\n OpInfo('vsplit',\n dtypes=all_types_and_complex_and(torch.bool, torch.bfloat16, torch.float16),\n supports_out=False,\n supports_forward_ad=True,\n supports_fwgrad_bwgrad=True,\n sample_inputs_func=sample_inputs_vsplit,\n error_inputs_func=error_inputs_vsplit,),\n OpInfo('dsplit',\n dtypes=all_types_and_complex_and(torch.bool, torch.bfloat16, torch.float16),\n supports_out=False,\n supports_forward_ad=True,\n supports_fwgrad_bwgrad=True,\n sample_inputs_func=sample_inputs_dsplit,\n error_inputs_func=error_inputs_dsplit,),\n OpInfo('triangular_solve',\n op=torch.triangular_solve,\n dtypes=floating_and_complex_types(),\n sample_inputs_func=sample_inputs_legacy_solve,\n check_batched_gradgrad=False,\n supports_forward_ad=True,\n supports_fwgrad_bwgrad=True,\n gradcheck_wrapper=lambda *args, **kwargs: gradcheck_wrapper_triangular_input(*args, idx=1, **kwargs),\n decorators=[skipCUDAIfNoMagma],\n skips=(\n # Gradcheck fails\n DecorateInfo(unittest.expectedFailure, 'TestGradients', 'test_fn_fwgrad_bwgrad',\n dtypes=floating_and_complex_types()),\n )),\n UnaryUfuncInfo('trunc',\n aliases=('fix', ),\n ref=np.trunc,\n dtypes=floating_types_and(torch.bfloat16),\n dtypesIfCUDA=floating_types_and(torch.float16, torch.bfloat16),\n supports_forward_ad=True,\n supports_fwgrad_bwgrad=True,\n supports_sparse=True,\n supports_sparse_csr=True,\n assert_autodiffed=True),\n UnaryUfuncInfo('exp2',\n aliases=('special.exp2', ),\n ref=np_unary_ufunc_integer_promotion_wrapper(np.exp2),\n dtypes=all_types_and(torch.bool, torch.half, torch.bfloat16),\n dtypesIfCUDA=all_types_and(torch.bool, torch.half, torch.bfloat16),\n supports_forward_ad=True,\n supports_fwgrad_bwgrad=True,\n safe_casts_outputs=True),\n UnaryUfuncInfo('expm1',\n aliases=('special.expm1', ),\n ref=np_unary_ufunc_integer_promotion_wrapper(np.expm1),\n dtypes=all_types_and(torch.bool, torch.bfloat16),\n dtypesIfCUDA=all_types_and(torch.bool, torch.half, torch.bfloat16),\n supports_forward_ad=True,\n supports_fwgrad_bwgrad=True,\n supports_sparse=True,\n supports_sparse_csr=True,\n safe_casts_outputs=True,\n assert_autodiffed=True,\n skips=(\n # Reference: https://github.com/pytorch/pytorch/pull/48926#issuecomment-739734774\n DecorateInfo(unittest.skip(\"Skipped!\"), 'TestUnaryUfuncs', 'test_reference_numerics_extremal',\n device_type='cpu', dtypes=[torch.bfloat16]),\n DecorateInfo(unittest.skip(\"Skipped!\"), 'TestUnaryUfuncs', 'test_reference_numerics_hard',\n device_type='cpu', dtypes=[torch.bfloat16]),\n DecorateInfo(unittest.skip(\"Skipped!\"), 'TestUnaryUfuncs', 'test_reference_numerics_normal',\n device_type='cpu', dtypes=[torch.bfloat16]),\n DecorateInfo(unittest.skip(\"Skipped! sparse backward not supported\"),\n 'TestSparseUnaryUfuncs', 'test_sparse_fn_grad'),\n )),\n UnaryUfuncInfo('nan_to_num',\n ref=np.nan_to_num,\n dtypes=all_types_and(torch.half, torch.bool, torch.bfloat16),\n dtypesIfCUDA=all_types_and(torch.half, torch.bool, torch.bfloat16),\n supports_forward_ad=True,\n supports_fwgrad_bwgrad=True,\n supports_sparse=True,\n skips=(\n DecorateInfo(unittest.skip(\"Skipped! sparse backward not supported\"),\n 'TestSparseUnaryUfuncs', 'test_sparse_fn_grad'),\n ),\n # Passing numpy_kwargs via sample_kwargs, as numpy does comparison\n # with BFloat16 in float, since it currently doesn't support BFloat16.\n # Ref: https://github.com/pytorch/pytorch/issues/57982#issuecomment-839150556\n sample_kwargs=lambda device, dtype, input: ({},\n {'posinf': torch.finfo(torch.bfloat16).max,\n 'neginf': torch.finfo(torch.bfloat16).min})\n if dtype is torch.bfloat16 else ({}, {})),\n UnaryUfuncInfo('reciprocal',\n ref=np_unary_ufunc_integer_promotion_wrapper(np.reciprocal),\n dtypes=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16),\n assert_autodiffed=True,\n supports_forward_ad=True,\n supports_fwgrad_bwgrad=True,\n safe_casts_outputs=True,\n skips=(\n # Reference: https://github.com/pytorch/pytorch/issues/45690\n DecorateInfo(unittest.skip(\"Skipped!\"), 'TestUnaryUfuncs', 'test_reference_numerics_extremal',\n dtypes=[torch.cfloat, torch.cdouble]),\n # Reference: https://github.com/pytorch/pytorch/pull/49102#issuecomment-744604601\n DecorateInfo(unittest.skip(\"Skipped!\"), 'TestUnaryUfuncs', 'test_reference_numerics_extremal',\n dtypes=[torch.bfloat16]),\n DecorateInfo(unittest.skip(\"Skipped!\"), 'TestUnaryUfuncs', 'test_reference_numerics_hard',\n dtypes=[torch.bfloat16]),\n DecorateInfo(unittest.skip(\"Skipped!\"), 'TestUnaryUfuncs', 'test_reference_numerics_normal',\n dtypes=[torch.bfloat16]),\n )),\n UnaryUfuncInfo('rsqrt',\n ref=lambda x: np.reciprocal(np.sqrt(x)),\n domain=(0, None),\n dtypes=all_types_and_complex_and(torch.bool, torch.bfloat16),\n dtypesIfCUDA=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16),\n decorators=(precisionOverride({torch.half: 5e-2}),),\n safe_casts_outputs=True,\n assert_autodiffed=True,\n supports_forward_ad=True,\n supports_fwgrad_bwgrad=True,\n handles_complex_extremals=False),\n UnaryUfuncInfo('sqrt',\n ref=np.sqrt,\n supports_sparse=True,\n domain=(0, None),\n dtypes=all_types_and_complex_and(torch.bool, torch.bfloat16),\n dtypesIfCUDA=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16),\n assert_autodiffed=True,\n supports_forward_ad=True,\n supports_sparse_csr=True,\n supports_fwgrad_bwgrad=True,\n decorators=(precisionOverride({torch.bfloat16: 7e-2}),),\n skips=(\n # Reference: https://github.com/pytorch/pytorch/issues/47358\n DecorateInfo(unittest.skip(\"Skipped!\"), 'TestUnaryUfuncs', 'test_reference_numerics_hard',\n device_type='cpu', dtypes=[torch.cfloat, torch.cdouble],\n active_if=IS_MACOS),\n # Reference: https://github.com/pytorch/pytorch/pull/47293#issuecomment-721774436\n DecorateInfo(unittest.skip(\"Skipped!\"), 'TestUnaryUfuncs', 'test_reference_numerics_hard',\n dtypes=[torch.bfloat16]),\n DecorateInfo(unittest.skip(\"Skipped! sparse backward not supported\"),\n 'TestSparseUnaryUfuncs', 'test_sparse_fn_grad'),\n ),\n safe_casts_outputs=True,\n handles_complex_extremals=False),\n UnaryUfuncInfo('square',\n ref=np.square,\n dtypes=all_types_and_complex_and(torch.bool, torch.float16, torch.bfloat16),\n decorators=(precisionOverride({torch.complex64: 3e-4, torch.bfloat16: 3e-1}),),\n supports_forward_ad=True,\n supports_fwgrad_bwgrad=True,\n skips=(\n # Reference: https://github.com/pytorch/pytorch/issues/52549\n DecorateInfo(unittest.skip(\"Skipped!\"), 'TestUnaryUfuncs', 'test_reference_numerics_hard',\n dtypes=[torch.cfloat, torch.cdouble]),\n # >>> t = torch.tensor(complex(-0.01, float(\"inf\")))\n # >>> np.square(t.numpy())\n # (-inf-infj)\n # >>> t.square()\n # tensor(-inf-infj)\n # >>> t.cuda().square()\n # tensor(inf+nanj, device='cuda:0')\n DecorateInfo(unittest.skip(\"Skipped!\"), 'TestUnaryUfuncs', 'test_reference_numerics_extremal',\n device_type='cuda', dtypes=[torch.cfloat, torch.cdouble]),\n # Reference: https://github.com/pytorch/pytorch/pull/52551#issuecomment-782596181\n DecorateInfo(unittest.skip(\"Skipped!\"), 'TestUnaryUfuncs', 'test_reference_numerics_hard',\n dtypes=[torch.bfloat16]),\n ),),\n OpInfo('lerp',\n dtypes=floating_and_complex_types(),\n dtypesIfCUDA=floating_and_complex_types_and(torch.half, torch.bfloat16),\n dtypesIfROCM=floating_and_complex_types_and(torch.half, torch.bfloat16),\n sample_inputs_func=sample_inputs_lerp,\n supports_forward_ad=True,\n supports_fwgrad_bwgrad=True,\n assert_autodiffed=True),\n OpInfo('linalg.inv',\n aten_name='linalg_inv',\n op=torch.linalg.inv,\n dtypes=floating_and_complex_types(),\n sample_inputs_func=sample_inputs_linalg_invertible,\n check_batched_gradgrad=False,\n supports_forward_ad=True,\n supports_fwgrad_bwgrad=True,\n gradcheck_nondet_tol=GRADCHECK_NONDET_TOL,\n decorators=[skipCUDAIfNoMagmaAndNoCusolver, skipCUDAIfRocm, skipCPUIfNoLapack],\n ),\n OpInfo('linalg.inv_ex',\n aten_name='linalg_inv_ex',\n dtypes=floating_and_complex_types(),\n sample_inputs_func=sample_inputs_linalg_invertible,\n check_batched_gradgrad=False,\n supports_forward_ad=True,\n supports_fwgrad_bwgrad=True,\n gradcheck_nondet_tol=GRADCHECK_NONDET_TOL,\n decorators=[skipCUDAIfNoMagmaAndNoCusolver, skipCUDAIfRocm, skipCPUIfNoLapack],\n ),\n UnaryUfuncInfo('angle',\n ref=np.angle,\n dtypes=all_types_and_complex_and(torch.bool, torch.bfloat16, torch.float16),\n dtypesIfCUDA=all_types_and_complex_and(torch.bool),\n decorators=(precisionOverride({torch.float16: 1e-2,\n torch.bfloat16: 1e-2}),),\n safe_casts_outputs=True,\n supports_forward_ad=True,\n supports_fwgrad_bwgrad=True,\n supports_sparse_csr=True,\n supports_complex_to_float=True,\n skips=(\n # The complex formula might be wrong\n DecorateInfo(unittest.skip(\"Skipped!\"), 'TestGradients', 'test_forward_mode_AD',\n dtypes=complex_types()),\n )),\n UnaryUfuncInfo('isfinite',\n ref=np.isfinite,\n dtypes=all_types_and_complex_and(torch.bool, torch.bfloat16, torch.float16),\n supports_out=False,\n supports_autograd=False,\n skips=(\n # Reference: https://github.com/pytorch/pytorch/issues/66402\n DecorateInfo(unittest.expectedFailure, \"TestUnaryUfuncs\", \"test_reference_numerics_hard\",\n device_type='cpu', dtypes=(torch.complex64,), active_if=not (IS_MACOS or IS_WINDOWS)),\n )),\n UnaryUfuncInfo('isinf',\n ref=np.isinf,\n dtypes=all_types_and_complex_and(torch.bool, torch.bfloat16, torch.float16),\n supports_out=False,\n supports_sparse=True,\n supports_sparse_csr=True,\n supports_autograd=False),\n UnaryUfuncInfo('isposinf',\n ref=np.isposinf,\n dtypes=all_types_and(torch.bool, torch.bfloat16, torch.float16),\n supports_sparse=True,\n supports_sparse_csr=True,\n supports_autograd=False),\n UnaryUfuncInfo('isneginf',\n ref=np.isneginf,\n dtypes=all_types_and(torch.bool, torch.bfloat16, torch.float16),\n supports_sparse=True,\n supports_sparse_csr=True,\n supports_autograd=False),\n UnaryUfuncInfo('isreal',\n ref=np.isreal,\n dtypes=all_types_and_complex_and(torch.bool, torch.bfloat16, torch.float16),\n supports_out=False,\n supports_autograd=False),\n UnaryUfuncInfo('isnan',\n ref=np.isnan,\n dtypes=all_types_and_complex_and(torch.bool, torch.bfloat16, torch.float16),\n supports_out=False,\n supports_sparse=True,\n supports_sparse_csr=True,\n supports_autograd=False),\n OpInfo('linalg.solve',\n aten_name='linalg_solve',\n op=torch.linalg.solve,\n dtypes=floating_and_complex_types(),\n sample_inputs_func=sample_inputs_linalg_solve,\n check_batched_gradgrad=False,\n supports_forward_ad=True,\n supports_fwgrad_bwgrad=True,\n decorators=[skipCUDAIfNoMagma, skipCUDAIfRocm, skipCPUIfNoLapack]),\n OpInfo('linalg.solve_triangular',\n aten_name='linalg_solve_triangular',\n op=torch.linalg.solve_triangular,\n dtypes=floating_and_complex_types(),\n sample_inputs_func=sample_inputs_linalg_solve_triangular,\n supports_fwgrad_bwgrad=True,\n # linalg.solve_triangular cannot be batched over because of a call to out.copy_(result);\n supports_forward_ad=True,\n skips=(\n DecorateInfo(unittest.skip(\"Tests different backward implementations\"),\n \"TestCommon\", \"test_floating_inputs_are_differentiable\"),),\n ),\n OpInfo('linalg.matrix_rank',\n aten_name='linalg_matrix_rank',\n dtypes=floating_and_complex_types(),\n supports_autograd=False,\n sample_inputs_func=sample_inputs_linalg_invertible,\n decorators=[skipCUDAIfNoMagmaAndNoCusolver, skipCPUIfNoLapack],\n skips=(\n # Pre-existing condition; Needs to be fixed\n DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_composite_compliance'),\n ),\n ),\n OpInfo('linalg.matrix_rank',\n aten_name='linalg_matrix_rank',\n variant_test_name='hermitian',\n dtypes=floating_and_complex_types(),\n supports_autograd=False,\n sample_inputs_func=sample_inputs_linalg_pinv_hermitian,\n decorators=[skipCUDAIfNoMagmaAndNoCusolver, skipCPUIfNoLapack],\n skips=(\n # Pre-existing condition; Needs to be fixed\n DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_composite_compliance'),\n ),\n ),\n OpInfo('linalg.pinv',\n aten_name='linalg_pinv',\n op=torch.linalg.pinv,\n dtypes=floating_and_complex_types(),\n check_batched_grad=False,\n check_batched_gradgrad=False,\n supports_forward_ad=True,\n supports_fwgrad_bwgrad=True,\n sample_inputs_func=sample_inputs_linalg_pinv,\n decorators=[skipCUDAIfNoMagmaAndNoCusolver, skipCPUIfNoLapack,\n # Derivative wrt rcond tensor not implemented\n DecorateInfo(unittest.expectedFailure,\n \"TestCommon\", \"test_floating_inputs_are_differentiable\")],\n skips=(\n # errors with \"leaked XXXX bytes CUDA memory on device 0\"\n DecorateInfo(unittest.skip(\"Skipped!\"), 'TestJit', 'test_variant_consistency_jit', device_type='cuda'),)\n ),\n OpInfo('linalg.pinv',\n aten_name='linalg_pinv',\n variant_test_name='singular',\n # pinv is Frechet-differentiable in a rank-preserving neighborhood,\n # so we feed inputs that are the products of two full-rank factors,\n # to avoid any rank changes caused by the perturbations in the gradcheck\n op=lambda a, b: torch.linalg.pinv(a @ b.mT),\n dtypes=floating_and_complex_types(),\n supports_out=False,\n check_batched_grad=False,\n check_batched_gradgrad=False,\n supports_forward_ad=True,\n supports_fwgrad_bwgrad=True,\n sample_inputs_func=sample_inputs_linalg_pinv_singular,\n # Only large tensors show issues with implicit backward used prior to\n # explicit backward implementation.\n decorators=[slowTest, skipCUDAIfNoMagmaAndNoCusolver, skipCPUIfNoLapack],\n skips=(\n # test does not work with passing lambda for op\n DecorateInfo(unittest.skip(\"Skipped!\"), 'TestJit', 'test_variant_consistency_jit'),\n # CUDA runs out of memory\n DecorateInfo(unittest.skip(\"Skipped!\"), 'TestGradients', 'test_fn_fwgrad_bwgrad',\n device_type='cuda', dtypes=[torch.cdouble]),\n )),\n OpInfo('linalg.pinv',\n aten_name='linalg_pinv',\n variant_test_name='hermitian',\n dtypes=floating_and_complex_types(),\n check_batched_grad=False,\n check_batched_gradgrad=False,\n supports_forward_ad=True,\n supports_fwgrad_bwgrad=True,\n sample_inputs_func=sample_inputs_linalg_pinv_hermitian,\n gradcheck_wrapper=gradcheck_wrapper_hermitian_input,\n decorators=[skipCUDAIfNoMagma, skipCPUIfNoLapack],\n ),\n OpInfo('eig',\n op=torch.eig,\n dtypes=floating_and_complex_types(),\n sample_inputs_func=sample_inputs_eig,\n decorators=[\n skipCUDAIfNoMagma,\n skipCPUIfNoLapack,\n skipCUDAIfRocm\n ],),\n OpInfo('einsum',\n # we need this lambda because SampleInput expects tensor input as the first argument\n # TODO(@heitorschueroff) update SampleInput to handle such cases\n op=lambda tensors, equation: torch.einsum(equation, tensors),\n dtypes=all_types_and_complex_and(torch.half, torch.bfloat16),\n dtypesIfCUDA=floating_and_complex_types_and(torch.half, *[torch.bfloat16] if CUDA11OrLater else []),\n backward_dtypesIfCUDA=floating_and_complex_types_and(torch.half,\n *[torch.bfloat16] if (SM60OrLater and CUDA11OrLater) else []),\n supports_out=False,\n supports_forward_ad=True,\n supports_fwgrad_bwgrad=True,\n check_batched_forward_grad=False,\n # See https://github.com/pytorch/pytorch/issues/66357\n sample_inputs_func=sample_inputs_einsum,\n skips=(\n # test does not work with passing lambda for op\n # there's a test `test_einsum` in `test_jit.py` to handle this case\n # AssertionError: JIT Test does not execute any logic\n DecorateInfo(unittest.skip(\"Skipped!\"), 'TestJit', 'test_variant_consistency_jit'),\n )),\n OpInfo('svd',\n op=torch.svd,\n dtypes=floating_and_complex_types(),\n sample_inputs_func=sample_inputs_svd,\n supports_forward_ad=True,\n supports_fwgrad_bwgrad=True,\n check_batched_forward_grad=False,\n # We're using at::allclose, which does not have a batching rule\n check_batched_grad=False,\n check_batched_gradgrad=False,\n decorators=[skipCUDAIfNoMagmaAndNoCusolver, skipCPUIfNoLapack],\n skips=(\n # Fixme, forward over backward gives a numerical error\n DecorateInfo(unittest.expectedFailure, 'TestGradients', 'test_fn_fwgrad_bwgrad', dtypes=(torch.complex128,)),\n )),\n OpInfo('linalg.svd',\n op=torch.linalg.svd,\n aten_name='linalg_svd',\n dtypes=floating_and_complex_types(),\n supports_fwgrad_bwgrad=True,\n supports_forward_ad=True,\n check_batched_forward_grad=False,\n # We're using at::allclose, which does not have a batching rule\n check_batched_grad=False,\n check_batched_gradgrad=False,\n sample_inputs_func=sample_inputs_svd,\n decorators=[skipCUDAIfNoMagmaAndNoCusolver, skipCPUIfNoLapack],\n skips=(\n # FIXME forward over backward gives a numerical error\n DecorateInfo(unittest.expectedFailure, 'TestGradients', 'test_fn_fwgrad_bwgrad', dtypes=(torch.complex128,)),\n )),\n OpInfo('linalg.svdvals',\n op=torch.linalg.svdvals,\n aten_name='linalg_svdvals',\n dtypes=floating_and_complex_types(),\n check_batched_forward_grad=False,\n supports_fwgrad_bwgrad=True,\n supports_forward_ad=True,\n # We're using at::allclose, which does not have a batching rule\n check_batched_gradgrad=False,\n sample_inputs_func=sample_inputs_linalg_svdvals,\n decorators=[skipCUDAIfNoMagmaAndNoCusolver, skipCPUIfNoLapack]),\n OpInfo('svd_lowrank',\n op=lambda *args, **kwargs: wrapper_set_seed(\n lambda a, b, **kwargs: torch.svd_lowrank(a @ b.mT, **kwargs),\n *args, **kwargs\n ),\n dtypes=floating_types(),\n supports_out=False,\n check_batched_grad=False,\n check_batched_gradgrad=False,\n check_batched_forward_grad=False,\n supports_fwgrad_bwgrad=True,\n supports_forward_ad=True,\n sample_inputs_func=sample_inputs_svd_lowrank,\n decorators=[skipCUDAIfNoMagmaAndNoCusolver, skipCPUIfNoLapack,\n DecorateInfo(toleranceOverride({torch.float32: tol(atol=1e-03, rtol=1e-03)}),\n 'TestCommon', 'test_noncontiguous_samples',\n device_type='cuda')],\n skips=(\n # test does not work with passing lambda for op\n DecorateInfo(unittest.skip(\"Skipped!\"), 'TestJit', 'test_variant_consistency_jit'),\n )),\n OpInfo('pca_lowrank',\n op=lambda *args, **kwargs: wrapper_set_seed(\n lambda a, b, **kwargs: torch.pca_lowrank(a @ b.mT, **kwargs),\n *args, **kwargs\n ),\n dtypes=floating_types(),\n supports_out=False,\n check_batched_forward_grad=False,\n check_batched_grad=False,\n check_batched_gradgrad=False,\n supports_forward_ad=True,\n supports_fwgrad_bwgrad=True,\n sample_inputs_func=sample_inputs_pca_lowrank,\n decorators=[skipCUDAIfNoMagmaAndNoCusolver, skipCPUIfNoLapack],\n skips=(\n # test does not work with passing lambda for op\n DecorateInfo(unittest.skip(\"Skipped!\"), 'TestJit', 'test_variant_consistency_jit'),\n )),\n BinaryUfuncInfo('polar',\n dtypes=floating_types(),\n sample_inputs_func=sample_inputs_polar,\n skips=(\n # RuntimeError: Expected object of scalar type Float but got scalar type Double for second argument\n DecorateInfo(unittest.skip('Skipped!'), 'TestBinaryUfuncs', 'test_type_promotion'),\n # TypeError: polar(): argument 'angle' (position 2) must be Tensor, not float\n DecorateInfo(unittest.skip('Skipped!'), 'TestBinaryUfuncs'),\n )),\n # TODO(@kshitij12345): Refactor similar to `mvlgamma` entries.\n # To test reference numerics against multiple values of argument `n`,\n # we make multiple OpInfo entries with each entry corresponding to different value of n (currently 0 to 4).\n # We run the op tests from test_ops.py only for `n=0` to avoid redundancy in testing.\n UnaryUfuncInfo('polygamma',\n op=lambda x, n, **kwargs: torch.polygamma(n, x, **kwargs),\n variant_test_name='polygamma_n_0',\n ref=reference_polygamma if TEST_SCIPY else _NOTHING,\n dtypes=all_types_and(torch.bool, torch.bfloat16),\n dtypesIfCUDA=all_types_and(torch.bool, torch.half),\n safe_casts_outputs=True,\n supports_forward_ad=True,\n supports_fwgrad_bwgrad=True,\n sample_inputs_func=sample_inputs_polygamma,\n skips=(\n # AssertionError: JIT Test does not execute any logic\n DecorateInfo(unittest.skip(\"Skipped!\"), 'TestJit', 'test_variant_consistency_jit'),\n ),\n sample_kwargs=lambda device, dtype, input: ({'n': 0}, {'n': 0})),\n # A separate OpInfo entry for special.polygamma is needed to reorder the arguments\n # for the alias. See the discussion here: https://github.com/pytorch/pytorch/pull/59691#discussion_r650261939\n UnaryUfuncInfo('special.polygamma',\n op=lambda x, n, **kwargs: torch.special.polygamma(n, x, **kwargs),\n variant_test_name='special_polygamma_n_0',\n ref=reference_polygamma if TEST_SCIPY else _NOTHING,\n dtypes=all_types_and(torch.bool, torch.bfloat16),\n dtypesIfCUDA=all_types_and(torch.bool, torch.half),\n safe_casts_outputs=True,\n supports_forward_ad=True,\n supports_fwgrad_bwgrad=True,\n sample_inputs_func=sample_inputs_polygamma,\n skips=(\n # AssertionError: JIT Test does not execute any logic\n DecorateInfo(unittest.skip(\"Skipped!\"), 'TestJit', 'test_variant_consistency_jit'),\n ),\n sample_kwargs=lambda device, dtype, input: ({'n': 0}, {'n': 0}),\n # polygamma functions have multiple singularities at x <= 0\n reference_numerics_filter=NumericsFilter(condition=lambda x: x < 0.1, safe_val=1)),\n UnaryUfuncInfo('polygamma',\n op=lambda x, n, **kwargs: torch.polygamma(n, x, **kwargs),\n variant_test_name='polygamma_n_1',\n ref=reference_polygamma if TEST_SCIPY else _NOTHING,\n dtypes=all_types_and(torch.bool, torch.bfloat16),\n dtypesIfCUDA=all_types_and(torch.bool, torch.half),\n safe_casts_outputs=True,\n supports_forward_ad=True,\n supports_fwgrad_bwgrad=True,\n sample_inputs_func=sample_inputs_polygamma,\n skips=(\n # Redundant tests\n DecorateInfo(unittest.skip(\"Skipped!\"), 'TestGradients'),\n DecorateInfo(unittest.skip(\"Skipped!\"), 'TestJit'),\n DecorateInfo(unittest.skip(\"Skipped!\"), 'TestCommon'),\n # Mismatch: https://github.com/pytorch/pytorch/issues/55357\n DecorateInfo(unittest.skip(\"Skipped!\"), 'TestUnaryUfuncs', 'test_reference_numerics_extremal'),\n DecorateInfo(unittest.skip(\"Skipped!\"), 'TestUnaryUfuncs', 'test_reference_numerics_hard'),\n ),\n sample_kwargs=lambda device, dtype, input: ({'n': 1}, {'n': 1}),\n # polygamma functions have multiple singularities at x <= 0\n reference_numerics_filter=NumericsFilter(condition=lambda x: x < 0.1, safe_val=1)),\n UnaryUfuncInfo('polygamma',\n op=lambda x, n, **kwargs: torch.polygamma(n, x, **kwargs),\n variant_test_name='polygamma_n_2',\n ref=reference_polygamma if TEST_SCIPY else _NOTHING,\n dtypes=all_types_and(torch.bool, torch.bfloat16),\n dtypesIfCUDA=all_types_and(torch.bool, torch.half),\n safe_casts_outputs=True,\n supports_forward_ad=True,\n supports_fwgrad_bwgrad=True,\n sample_inputs_func=sample_inputs_polygamma,\n skips=(\n # Redundant tests\n DecorateInfo(unittest.skip(\"Skipped!\"), 'TestGradients'),\n DecorateInfo(unittest.skip(\"Skipped!\"), 'TestJit'),\n DecorateInfo(unittest.skip(\"Skipped!\"), 'TestCommon'),\n # Mismatch: https://github.com/pytorch/pytorch/issues/55357\n DecorateInfo(unittest.skip(\"Skipped!\"), 'TestUnaryUfuncs', 'test_reference_numerics_extremal'),\n DecorateInfo(unittest.skip(\"Skipped!\"), 'TestUnaryUfuncs', 'test_reference_numerics_hard',\n active_if=TEST_WITH_ROCM),\n DecorateInfo(unittest.skip(\"Skipped!\"), 'TestUnaryUfuncs', 'test_reference_numerics_normal',\n active_if=TEST_WITH_ROCM),),\n sample_kwargs=lambda device, dtype, input: ({'n': 2}, {'n': 2}),\n # polygamma functions have multiple singularities at x <= 0\n reference_numerics_filter=NumericsFilter(condition=lambda x: x < 0.1, safe_val=1)),\n UnaryUfuncInfo('polygamma',\n op=lambda x, n, **kwargs: torch.polygamma(n, x, **kwargs),\n variant_test_name='polygamma_n_3',\n ref=reference_polygamma if TEST_SCIPY else _NOTHING,\n dtypes=all_types_and(torch.bool, torch.bfloat16),\n dtypesIfCUDA=all_types_and(torch.bool, torch.half),\n safe_casts_outputs=True,\n supports_forward_ad=True,\n supports_fwgrad_bwgrad=True,\n sample_inputs_func=sample_inputs_polygamma,\n skips=(\n # Redundant tests\n DecorateInfo(unittest.skip(\"Skipped!\"), 'TestGradients'),\n DecorateInfo(unittest.skip(\"Skipped!\"), 'TestJit'),\n DecorateInfo(unittest.skip(\"Skipped!\"), 'TestCommon'),\n # Mismatch: https://github.com/pytorch/pytorch/issues/55357\n DecorateInfo(unittest.skip(\"Skipped!\"), 'TestUnaryUfuncs', 'test_reference_numerics_extremal'),\n DecorateInfo(unittest.skip(\"Skipped!\"), 'TestUnaryUfuncs', 'test_reference_numerics_hard',\n active_if=TEST_WITH_ROCM),\n DecorateInfo(unittest.skip(\"Skipped!\"), 'TestUnaryUfuncs', 'test_reference_numerics_normal',\n active_if=TEST_WITH_ROCM),),\n sample_kwargs=lambda device, dtype, input: ({'n': 3}, {'n': 3}),\n # polygamma functions have multiple singularities at x <= 0\n reference_numerics_filter=NumericsFilter(condition=lambda x: x < 0.1, safe_val=1)),\n UnaryUfuncInfo('polygamma',\n op=lambda x, n, **kwargs: torch.polygamma(n, x, **kwargs),\n variant_test_name='polygamma_n_4',\n ref=reference_polygamma if TEST_SCIPY else _NOTHING,\n decorators=(precisionOverride({torch.float16: 5e-4, torch.float32: 5e-4}),),\n dtypes=all_types_and(torch.bool, torch.bfloat16),\n dtypesIfCUDA=all_types_and(torch.bool, torch.half),\n safe_casts_outputs=True,\n supports_forward_ad=True,\n supports_fwgrad_bwgrad=True,\n sample_inputs_func=sample_inputs_polygamma,\n skips=(\n # Redundant tests\n DecorateInfo(unittest.skip(\"Skipped!\"), 'TestGradients'),\n DecorateInfo(unittest.skip(\"Skipped!\"), 'TestJit'),\n DecorateInfo(unittest.skip(\"Skipped!\"), 'TestCommon'),\n # Mismatch: https://github.com/pytorch/pytorch/issues/55357\n DecorateInfo(unittest.skip(\"Skipped!\"), 'TestUnaryUfuncs', 'test_reference_numerics_extremal'),\n DecorateInfo(unittest.skip(\"Skipped!\"), 'TestUnaryUfuncs', 'test_reference_numerics_hard',\n active_if=TEST_WITH_ROCM),\n DecorateInfo(unittest.skip(\"Skipped!\"), 'TestUnaryUfuncs', 'test_reference_numerics_normal',\n active_if=TEST_WITH_ROCM),),\n sample_kwargs=lambda device, dtype, input: ({'n': 4}, {'n': 4}),\n # polygamma functions have multiple singularities at x <= 0\n reference_numerics_filter=NumericsFilter(condition=lambda x: x < 0.1, safe_val=1)),\n OpInfo('ravel',\n dtypes=all_types_and_complex_and(torch.bool, torch.float16, torch.bfloat16),\n supports_out=False,\n supports_forward_ad=True,\n supports_fwgrad_bwgrad=True,\n sample_inputs_func=sample_inputs_ravel,\n ),\n OpInfo('reshape',\n dtypes=all_types_and_complex_and(torch.bool, torch.float16, torch.bfloat16),\n sample_inputs_func=sample_inputs_view_reshape,\n supports_out=False,\n supports_forward_ad=True,\n supports_fwgrad_bwgrad=True,\n ),\n OpInfo('reshape_as',\n op=lambda x, other: x.reshape_as(other),\n dtypes=all_types_and_complex_and(torch.bool, torch.float16, torch.bfloat16),\n sample_inputs_func=sample_inputs_view_as_reshape_as,\n supports_out=False,\n supports_forward_ad=True,\n supports_fwgrad_bwgrad=True,\n skips=(\n DecorateInfo(unittest.skip(\"Second argument does not need gradient\"),\n \"TestCommon\", \"test_floating_inputs_are_differentiable\"),),\n ),\n OpInfo('view',\n op=lambda x, shape: x.view(shape),\n dtypes=all_types_and_complex_and(torch.bool, torch.float16, torch.bfloat16),\n supports_out=False,\n supports_forward_ad=True,\n supports_fwgrad_bwgrad=True,\n assert_jit_shape_analysis=True,\n sample_inputs_func=sample_inputs_view_reshape,\n ),\n OpInfo('view_as',\n op=lambda x, other: x.view_as(other),\n dtypes=all_types_and_complex_and(torch.bool, torch.float16, torch.bfloat16),\n supports_out=False,\n supports_forward_ad=True,\n supports_fwgrad_bwgrad=True,\n sample_inputs_func=sample_inputs_view_as_reshape_as,\n skips=(\n DecorateInfo(unittest.skip(\"Second argument does not need gradient\"),\n \"TestCommon\", \"test_floating_inputs_are_differentiable\"),),\n ),\n OpInfo('atleast_1d',\n dtypes=all_types_and_complex_and(torch.bool, torch.float16, torch.bfloat16),\n supports_out=False,\n supports_forward_ad=True,\n supports_fwgrad_bwgrad=True,\n sample_inputs_func=sample_inputs_atleast1d2d3d,\n skips=(\n # JIT does not support variadic tensors.\n # RuntimeError: input->type()->kind() == TypeKind::OptionalType\n # INTERNAL ASSERT FAILED at \"../torch/csrc/jit/passes/utils/check_alias_annotation.cpp\":252,\n # please report a bug to PyTorch.\n DecorateInfo(unittest.skip(\"Skipped!\"), 'TestJit', 'test_variant_consistency_jit', dtypes=[torch.float32]),\n ),\n ),\n OpInfo('atleast_2d',\n dtypes=all_types_and_complex_and(torch.bool, torch.float16, torch.bfloat16),\n supports_out=False,\n supports_forward_ad=True,\n supports_fwgrad_bwgrad=True,\n skips=(\n # JIT does not support variadic tensors.\n # RuntimeError: input->type()->kind() == TypeKind::OptionalType\n # INTERNAL ASSERT FAILED at \"../torch/csrc/jit/passes/utils/check_alias_annotation.cpp\":252,\n # please report a bug to PyTorch.\n DecorateInfo(unittest.skip(\"Skipped!\"), 'TestJit', 'test_variant_consistency_jit', dtypes=[torch.float32]),\n ),\n sample_inputs_func=sample_inputs_atleast1d2d3d,\n ),\n OpInfo('atleast_3d',\n dtypes=all_types_and_complex_and(torch.bool, torch.float16, torch.bfloat16),\n supports_out=False,\n supports_forward_ad=True,\n supports_fwgrad_bwgrad=True,\n skips=(\n # JIT does not support variadic tensors.\n # RuntimeError: input->type()->kind() == TypeKind::OptionalType\n # INTERNAL ASSERT FAILED at \"../torch/csrc/jit/passes/utils/check_alias_annotation.cpp\":252,\n # please report a bug to PyTorch.\n DecorateInfo(unittest.skip(\"Skipped!\"), 'TestJit', 'test_variant_consistency_jit', dtypes=[torch.float32]),\n ),\n sample_inputs_func=sample_inputs_atleast1d2d3d,\n ),\n OpInfo('flatten',\n dtypes=all_types_and_complex_and(torch.bool, torch.float16, torch.bfloat16),\n supports_out=False,\n supports_forward_ad=True,\n supports_fwgrad_bwgrad=True,\n sample_inputs_func=sample_inputs_flatten,\n ),\n OpInfo('column_stack',\n dtypes=floating_and_complex_types_and(torch.float16, torch.bfloat16),\n supports_forward_ad=True,\n supports_fwgrad_bwgrad=True,\n skips=(\n # JIT does not support variadic tensors.\n # RuntimeError: input->type()->kind() == TypeKind::OptionalType\n # INTERNAL ASSERT FAILED at \"../torch/csrc/jit/passes/utils/check_alias_annotation.cpp\":252,\n # please report a bug to PyTorch.\n DecorateInfo(unittest.skip(\"Skipped!\"), 'TestJit', 'test_variant_consistency_jit', dtypes=(torch.float32,)),\n # AssertionError: UserWarning not triggered : Resized a non-empty tensor but did not warn about it.\n DecorateInfo(unittest.skip(\"Skipped!\"), 'TestCommon', 'test_out_warning'),),\n sample_inputs_func=sample_inputs_column_stack,),\n OpInfo('pinverse',\n op=torch.pinverse,\n dtypes=floating_and_complex_types(),\n check_batched_grad=False,\n check_batched_gradgrad=False,\n supports_forward_ad=True,\n supports_fwgrad_bwgrad=True,\n gradcheck_nondet_tol=GRADCHECK_NONDET_TOL,\n supports_out=False,\n sample_inputs_func=sample_inputs_linalg_invertible,\n decorators=[skipCUDAIfNoMagmaAndNoCusolver, skipCPUIfNoLapack]),\n OpInfo('gather',\n dtypes=all_types_and_complex_and(torch.bool, torch.float16, torch.bfloat16),\n dtypesIfCUDA=all_types_and_complex_and(torch.bool, torch.float16, torch.bfloat16),\n sample_inputs_func=sample_inputs_gather,\n gradcheck_nondet_tol=GRADCHECK_NONDET_TOL,\n supports_forward_ad=True,\n supports_fwgrad_bwgrad=True,\n error_inputs_func=error_inputs_gather\n ),\n OpInfo('index_fill',\n dtypes=all_types_and_complex_and(torch.bool, torch.float16, torch.bfloat16),\n supports_inplace_autograd=False,\n supports_out=False,\n supports_forward_ad=True,\n supports_fwgrad_bwgrad=True,\n # https://github.com/pytorch/pytorch/issues/66357\n check_batched_forward_grad=False,\n sample_inputs_func=sample_inputs_index),\n OpInfo('index_copy',\n dtypes=all_types_and_complex_and(torch.bool, torch.float16, torch.bfloat16),\n supports_inplace_autograd=False,\n supports_out=False,\n supports_forward_ad=True,\n supports_fwgrad_bwgrad=True,\n # https://github.com/pytorch/pytorch/issues/66357\n check_batched_forward_grad=False,\n sample_inputs_func=sample_inputs_index,\n gradcheck_nondet_tol=GRADCHECK_NONDET_TOL),\n OpInfo('index_select',\n dtypes=all_types_and_complex_and(torch.bool, torch.float16, torch.bfloat16),\n sample_inputs_func=sample_inputs_index,\n supports_forward_ad=True,\n supports_fwgrad_bwgrad=True,\n assert_jit_shape_analysis=True,\n gradcheck_nondet_tol=GRADCHECK_NONDET_TOL),\n OpInfo('index_add',\n dtypes=all_types_and_complex_and(torch.bool, torch.float16, torch.bfloat16),\n # An `out=` variant exists but is not exposed to the Python API\n # see: https://github.com/pytorch/pytorch/pull/65993#discussion_r737760723\n supports_out=False,\n supports_forward_ad=True,\n supports_fwgrad_bwgrad=True,\n # https://github.com/pytorch/pytorch/issues/66357\n check_batched_forward_grad=False,\n sample_inputs_func=sample_inputs_index,\n gradcheck_nondet_tol=GRADCHECK_NONDET_TOL),\n OpInfo('__getitem__',\n dtypes=all_types_and_complex_and(torch.bool, torch.float16, torch.bfloat16),\n supports_out=False,\n supports_forward_ad=True,\n supports_fwgrad_bwgrad=True,\n supports_inplace_autograd=False,\n supports_scripting=False,\n op=torch.Tensor.__getitem__,\n skips=(\n # AssertionError: False is not true : Scalars failed to compare as equal! 0 != 104448\n DecorateInfo(unittest.skip(\"Skipped!\"), 'TestJit', 'test_variant_consistency_jit', device_type='cuda'),),\n assert_jit_shape_analysis=False, # TODO: support index.Tensor()\n sample_inputs_func=sample_inputs_getitem),\n OpInfo('index_put',\n dtypes=all_types_and_complex_and(torch.bool, torch.float16, torch.bfloat16),\n supports_out=False,\n supports_inplace_autograd=True,\n supports_forward_ad=True,\n supports_fwgrad_bwgrad=True,\n # https://github.com/pytorch/pytorch/issues/66357\n check_batched_forward_grad=False,\n test_neg_view=False,\n sample_inputs_func=sample_inputs_index_put,\n skips=(\n # RuntimeError: The following operation failed in the TorchScript interpreter.\n # Traceback of TorchScript (most recent call last):\n # File \"<string>\", line 3, in forward\n # def the_method(i0, i1: List[torch.Tensor], i2):\n # return torch.index_put(i0, i1, i2, accumulate=False)\n # ~~~~~~~~~~~~~~~ <--- HERE\n # RuntimeError: a leaf Variable that requires grad is being used in an in-place operation.\n DecorateInfo(unittest.expectedFailure, 'TestJit', 'test_variant_consistency_jit'),\n )),\n OpInfo('sort',\n dtypes=all_types_and(torch.bool, torch.float16, torch.bfloat16),\n dtypesIfCUDA=all_types_and(torch.float16, torch.bfloat16),\n dtypesIfROCM=all_types_and(torch.float16),\n sample_inputs_func=sample_inputs_sort,\n supports_forward_ad=True,\n supports_fwgrad_bwgrad=True,\n skips=(\n # sort does not correctly warn when resizing out= inputs\n DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_out_warning'),\n # RuntimeError not raised\n DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_out', device_type='cpu'),\n )),\n OpInfo('unique',\n dtypes=all_types_and(torch.bool, torch.bfloat16),\n dtypesIfCUDA=all_types_and(torch.bool, torch.float16),\n sample_inputs_func=sample_inputs_unique,\n supports_out=False,\n supports_autograd=False,\n skips=(\n # RuntimeError:\n # 'Tensor (inferred)' object has no attribute or method 'unique'.:\n # File \"<string>\", line 3\n #\n # def the_method(i0):\n # return i0.unique(sorted=False, return_inverse=False, return_counts=False, dim=None)\n # ~~~~~~~~~ <--- HERE\n DecorateInfo(unittest.expectedFailure, 'TestJit', 'test_variant_consistency_jit'),\n )),\n OpInfo('unique_consecutive',\n dtypes=all_types_and(torch.bool, torch.bfloat16),\n dtypesIfCUDA=all_types_and(torch.bool, torch.float16),\n sample_inputs_func=sample_inputs_unique_consecutive,\n supports_out=False,\n supports_autograd=False,\n skips=(\n DecorateInfo(unittest.expectedFailure, 'TestJit', 'test_variant_consistency_jit'),\n )),\n OpInfo('put',\n dtypes=all_types_and_complex_and(torch.bool, torch.float16, torch.bfloat16),\n supports_out=False,\n check_batched_gradgrad=False, # vmap complains of the sizes\n sample_inputs_func=sample_inputs_put),\n OpInfo('take',\n dtypes=all_types_and_complex_and(torch.bool, torch.float16, torch.bfloat16),\n check_batched_grad=False, # vmap complains of the sizes\n supports_forward_ad=True,\n supports_fwgrad_bwgrad=False, # Need: put_\n sample_inputs_func=sample_inputs_take),\n OpInfo('scatter',\n dtypes=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16),\n supports_forward_ad=True,\n supports_fwgrad_bwgrad=True,\n sample_inputs_func=sample_inputs_scatter,\n error_inputs_func=error_inputs_scatter_and_scatter_add),\n OpInfo('bfloat16',\n op=lambda x, *args, **kwargs: x.bfloat16(*args, **kwargs),\n dtypes=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16),\n supports_out=False,\n sample_inputs_func=sample_inputs_conversion,\n # The autograd test runner cannot handle functions that change dtype\n supports_autograd=False,\n skips=(\n # RuntimeError: attribute lookup is not defined on builtin\n DecorateInfo(unittest.skip(\"Skipped!\"), 'TestJit', 'test_variant_consistency_jit'),\n )),\n OpInfo('bfloat16',\n op=lambda x, *args, **kwargs: x.bfloat16(*args, **kwargs),\n dtypes=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16),\n supports_out=False,\n variant_test_name='channels_last',\n sample_inputs_func=sample_inputs_conversion_channels_last,\n # The autograd test runner cannot handle functions that change dtype\n supports_autograd=False,\n skips=(\n # RuntimeError: attribute lookup is not defined on builtin\n DecorateInfo(unittest.skip(\"Skipped!\"), 'TestJit', 'test_variant_consistency_jit'),\n )),\n OpInfo('bool',\n op=lambda x, *args, **kwargs: x.bool(*args, **kwargs),\n dtypes=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16),\n supports_out=False,\n sample_inputs_func=sample_inputs_conversion,\n supports_autograd=False,\n skips=(\n # RuntimeError: attribute lookup is not defined on builtin\n DecorateInfo(unittest.skip(\"Skipped!\"), 'TestJit', 'test_variant_consistency_jit'),\n )),\n OpInfo('bool',\n op=lambda x, *args, **kwargs: x.bool(*args, **kwargs),\n dtypes=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16),\n supports_out=False,\n variant_test_name='channels_last',\n sample_inputs_func=sample_inputs_conversion_channels_last,\n supports_autograd=False,\n skips=(\n # RuntimeError: attribute lookup is not defined on builtin\n DecorateInfo(unittest.skip(\"Skipped!\"), 'TestJit', 'test_variant_consistency_jit'),\n )),\n OpInfo('byte',\n op=lambda x, *args, **kwargs: x.byte(*args, **kwargs),\n dtypes=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16),\n supports_out=False,\n sample_inputs_func=sample_inputs_conversion,\n # The autograd test runner cannot handle functions that change dtype\n supports_autograd=False,\n skips=(\n # RuntimeError: attribute lookup is not defined on builtin\n DecorateInfo(unittest.skip(\"Skipped!\"), 'TestJit', 'test_variant_consistency_jit'),\n )),\n OpInfo('byte',\n op=lambda x, *args, **kwargs: x.byte(*args, **kwargs),\n dtypes=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16),\n supports_out=False,\n variant_test_name='channels_last',\n sample_inputs_func=sample_inputs_conversion_channels_last,\n # The autograd test runner cannot handle functions that change dtype\n supports_autograd=False,\n skips=(\n # RuntimeError: attribute lookup is not defined on builtin\n DecorateInfo(unittest.skip(\"Skipped!\"), 'TestJit', 'test_variant_consistency_jit'),\n )),\n OpInfo('char',\n op=lambda x, *args, **kwargs: x.char(*args, **kwargs),\n dtypes=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16),\n supports_out=False,\n sample_inputs_func=sample_inputs_conversion,\n # The autograd test runner cannot handle functions that change dtype\n supports_autograd=False,\n skips=(\n # RuntimeError: attribute lookup is not defined on builtin\n DecorateInfo(unittest.skip(\"Skipped!\"), 'TestJit', 'test_variant_consistency_jit'),\n )),\n OpInfo('char',\n op=lambda x, *args, **kwargs: x.char(*args, **kwargs),\n dtypes=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16),\n supports_out=False,\n variant_test_name='channels_last',\n sample_inputs_func=sample_inputs_conversion_channels_last,\n # The autograd test runner cannot handle functions that change dtype\n supports_autograd=False,\n skips=(\n # RuntimeError: attribute lookup is not defined on builtin\n DecorateInfo(unittest.skip(\"Skipped!\"), 'TestJit', 'test_variant_consistency_jit'),\n )),\n OpInfo('double',\n op=lambda x, *args, **kwargs: x.double(*args, **kwargs),\n dtypes=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16),\n supports_out=False,\n sample_inputs_func=sample_inputs_conversion,\n supports_forward_ad=True,\n supports_fwgrad_bwgrad=True,\n skips=(\n # RuntimeError: attribute lookup is not defined on builtin\n DecorateInfo(unittest.skip(\"Skipped!\"), 'TestJit', 'test_variant_consistency_jit'),\n )),\n OpInfo('double',\n op=lambda x, *args, **kwargs: x.double(*args, **kwargs),\n dtypes=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16),\n supports_out=False,\n variant_test_name='channels_last',\n sample_inputs_func=sample_inputs_conversion_channels_last,\n supports_forward_ad=True,\n supports_fwgrad_bwgrad=True,\n skips=(\n # RuntimeError: attribute lookup is not defined on builtin\n DecorateInfo(unittest.skip(\"Skipped!\"), 'TestJit', 'test_variant_consistency_jit'),\n )),\n OpInfo('float',\n op=lambda x, *args, **kwargs: x.float(*args, **kwargs),\n dtypes=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16),\n supports_out=False,\n sample_inputs_func=sample_inputs_conversion,\n # The autograd test runner cannot handle functions that change dtype\n supports_autograd=False,\n skips=(\n # RuntimeError: attribute lookup is not defined on builtin\n DecorateInfo(unittest.skip(\"Skipped!\"), 'TestJit', 'test_variant_consistency_jit'),\n )),\n OpInfo('float',\n op=lambda x, *args, **kwargs: x.float(*args, **kwargs),\n dtypes=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16),\n supports_out=False,\n variant_test_name='channels_last',\n sample_inputs_func=sample_inputs_conversion_channels_last,\n # The autograd test runner cannot handle functions that change dtype\n supports_autograd=False,\n skips=(\n # RuntimeError: attribute lookup is not defined on builtin\n DecorateInfo(unittest.skip(\"Skipped!\"), 'TestJit', 'test_variant_consistency_jit'),\n )),\n OpInfo('half',\n op=lambda x, *args, **kwargs: x.half(*args, **kwargs),\n dtypes=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16),\n supports_out=False,\n sample_inputs_func=sample_inputs_conversion,\n # The autograd test runner cannot handle functions that change dtype\n supports_autograd=False,\n skips=(\n # RuntimeError: attribute lookup is not defined on builtin\n DecorateInfo(unittest.skip(\"Skipped!\"), 'TestJit', 'test_variant_consistency_jit'),\n )),\n OpInfo('half',\n op=lambda x, *args, **kwargs: x.half(*args, **kwargs),\n dtypes=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16),\n supports_out=False,\n variant_test_name='channels_last',\n sample_inputs_func=sample_inputs_conversion_channels_last,\n # The autograd test runner cannot handle functions that change dtype\n supports_autograd=False,\n skips=(\n # RuntimeError: attribute lookup is not defined on builtin\n DecorateInfo(unittest.skip(\"Skipped!\"), 'TestJit', 'test_variant_consistency_jit'),\n )),\n OpInfo('int',\n op=lambda x, *args, **kwargs: x.int(*args, **kwargs),\n dtypes=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16),\n supports_out=False,\n sample_inputs_func=sample_inputs_conversion,\n supports_autograd=False,\n skips=(\n # RuntimeError: attribute lookup is not defined on builtin\n DecorateInfo(unittest.skip(\"Skipped!\"), 'TestJit', 'test_variant_consistency_jit'),\n )),\n OpInfo('int',\n op=lambda x, *args, **kwargs: x.int(*args, **kwargs),\n dtypes=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16),\n supports_out=False,\n variant_test_name='channels_last',\n sample_inputs_func=sample_inputs_conversion_channels_last,\n supports_autograd=False,\n skips=(\n # RuntimeError: attribute lookup is not defined on builtin\n DecorateInfo(unittest.skip(\"Skipped!\"), 'TestJit', 'test_variant_consistency_jit'),\n )),\n OpInfo('long',\n op=lambda x, *args, **kwargs: x.long(*args, **kwargs),\n dtypes=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16),\n supports_out=False,\n sample_inputs_func=sample_inputs_conversion,\n supports_autograd=False,\n skips=(\n # RuntimeError: attribute lookup is not defined on builtin\n DecorateInfo(unittest.skip(\"Skipped!\"), 'TestJit', 'test_variant_consistency_jit'),\n )),\n OpInfo('long',\n op=lambda x, *args, **kwargs: x.long(*args, **kwargs),\n dtypes=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16),\n supports_out=False,\n variant_test_name='channels_last',\n sample_inputs_func=sample_inputs_conversion_channels_last,\n supports_autograd=False,\n skips=(\n # RuntimeError: attribute lookup is not defined on builtin\n DecorateInfo(unittest.skip(\"Skipped!\"), 'TestJit', 'test_variant_consistency_jit'),\n )),\n OpInfo('short',\n op=lambda x, *args, **kwargs: x.short(*args, **kwargs),\n dtypes=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16),\n supports_out=False,\n sample_inputs_func=sample_inputs_conversion,\n supports_autograd=False,\n skips=(\n # RuntimeError: attribute lookup is not defined on builtin\n DecorateInfo(unittest.skip(\"Skipped!\"), 'TestJit', 'test_variant_consistency_jit'),\n )),\n OpInfo('short',\n op=lambda x, *args, **kwargs: x.short(*args, **kwargs),\n dtypes=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16),\n supports_out=False,\n variant_test_name='channels_last',\n sample_inputs_func=sample_inputs_conversion_channels_last,\n supports_autograd=False,\n skips=(\n # RuntimeError: attribute lookup is not defined on builtin\n DecorateInfo(unittest.skip(\"Skipped!\"), 'TestJit', 'test_variant_consistency_jit'),\n )),\n OpInfo('empty_like',\n dtypes=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16),\n supports_out=False,\n sample_inputs_func=sample_inputs_like_fns,\n supports_autograd=False,\n skips=(\n # Empty tensor data is garbage so it's hard to make comparisons with it.\n DecorateInfo(unittest.skip(\"Skipped!\"), 'TestCommon', 'test_noncontiguous_samples'),\n # Empty tensor data is garbage so it's hard to make comparisons with it.\n DecorateInfo(unittest.skip(\"Skipped!\"), 'TestJit', 'test_variant_consistency_jit'),\n # Empty tensor data is garbage so it's hard to make comparisons with it.\n DecorateInfo(unittest.skip(\"Skipped!\"), 'TestMathBits', 'test_conj_view'),\n # Empty tensor data is garbage so it's hard to make comparisons with it.\n DecorateInfo(unittest.skip(\"Skipped!\"), 'TestMathBits', 'test_neg_view'),\n # Empty tensor data is garbage so it's hard to make comparisons with it.\n DecorateInfo(unittest.skip(\"Skipped!\"), 'TestMathBits', 'test_neg_conj_view'),\n # Can't find schemas for this operator for some reason\n DecorateInfo(unittest.skip(\"Skipped!\"), 'TestOperatorSignatures', 'test_get_torch_func_signature_exhaustive'),\n )),\n OpInfo('zeros_like',\n dtypes=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16),\n supports_out=False,\n sample_inputs_func=sample_inputs_like_fns,\n supports_autograd=False,\n skips=(\n # Can't find schemas for this operator for some reason\n DecorateInfo(unittest.skip(\"Skipped!\"), 'TestOperatorSignatures', 'test_get_torch_func_signature_exhaustive'),\n )),\n OpInfo('ones_like',\n dtypes=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16),\n supports_out=False,\n sample_inputs_func=sample_inputs_like_fns,\n supports_autograd=False,\n skips=(\n # Can't find schemas for this operator for some reason\n DecorateInfo(unittest.skip(\"Skipped!\"), 'TestOperatorSignatures', 'test_get_torch_func_signature_exhaustive'),\n )),\n OpInfo('randn_like',\n dtypes=floating_types_and(torch.half, torch.bfloat16, torch.complex64, torch.complex128),\n op=lambda inp, *args, **kwargs:\n wrapper_set_seed(torch.randn_like, inp, *args, **kwargs),\n supports_out=False,\n sample_inputs_func=sample_inputs_like_fns,\n supports_autograd=False,\n supports_sparse_csr=True,\n skips=(\n # AssertionError: JIT Test does not execute any logic\n DecorateInfo(unittest.skip(\"Skipped!\"), 'TestJit', 'test_variant_consistency_jit'),\n )),\n OpInfo('rand_like',\n dtypes=floating_types_and(torch.half, torch.bfloat16, torch.complex64, torch.complex128),\n op=lambda inp, *args, **kwargs:\n wrapper_set_seed(torch.randn_like, inp, *args, **kwargs),\n supports_out=False,\n sample_inputs_func=sample_inputs_like_fns,\n supports_autograd=False,\n skips=(\n # AssertionError: JIT Test does not execute any logic\n DecorateInfo(unittest.skip(\"Skipped!\"), 'TestJit', 'test_variant_consistency_jit'),\n # Can't find schemas for this operator for some reason\n DecorateInfo(unittest.skip(\"Skipped!\"), 'TestOperatorSignatures', 'test_get_torch_func_signature_exhaustive'),\n )),\n OpInfo('randint_like',\n dtypes=all_types_and(torch.half, torch.bfloat16),\n op=lambda inp, *args, **kwargs:\n wrapper_set_seed(torch.randint_like, inp, *args, **kwargs),\n supports_out=False,\n sample_inputs_func=sample_inputs_randint_like,\n supports_autograd=False,\n skips=(\n # AssertionError: JIT Test does not execute any logic\n DecorateInfo(unittest.skip(\"Skipped!\"), 'TestJit', 'test_variant_consistency_jit'),\n # Can't find schemas for this operator for some reason\n DecorateInfo(unittest.skip(\"Skipped!\"), 'TestOperatorSignatures', 'test_get_torch_func_signature_exhaustive'),\n )),\n OpInfo('full_like',\n dtypes=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16),\n supports_out=False,\n sample_inputs_func=sample_inputs_full_like,\n supports_autograd=False,\n skips=(\n # Can't find schemas for this operator for some reason\n DecorateInfo(unittest.skip(\"Skipped!\"), 'TestOperatorSignatures', 'test_get_torch_func_signature_exhaustive'),\n )),\n OpInfo('new_zeros',\n op=lambda x, *args, **kwargs: x.new_zeros(*args, **kwargs),\n dtypes=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16),\n supports_out=False,\n sample_inputs_func=sample_inputs_new_fns,\n skips=(\n # Can't find schemas for this operator for some reason\n DecorateInfo(unittest.skip(\"Skipped!\"), 'TestOperatorSignatures', 'test_get_torch_func_signature_exhaustive'),\n ),\n supports_autograd=False),\n OpInfo('new_ones',\n op=lambda x, *args, **kwargs: x.new_ones(*args, **kwargs),\n dtypes=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16),\n supports_out=False,\n sample_inputs_func=sample_inputs_new_fns,\n skips=(\n # Can't find schemas for this operator for some reason\n DecorateInfo(unittest.skip(\"Skipped!\"), 'TestOperatorSignatures', 'test_get_torch_func_signature_exhaustive'),\n ),\n supports_autograd=False),\n OpInfo('new_empty',\n op=lambda x, *args, **kwargs: x.new_empty(*args, **kwargs),\n dtypes=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16),\n supports_out=False,\n sample_inputs_func=sample_inputs_new_fns,\n skips=(\n # Empty tensor data is garbage so it's hard to make comparisons with it.\n DecorateInfo(unittest.skip(\"Skipped!\"), 'TestJit', 'test_variant_consistency_jit'),\n # Empty tensor data is garbage so it's hard to make comparisons with it.\n DecorateInfo(unittest.skip(\"Skipped!\"), 'TestCommon', 'test_variant_consistency_eager'),\n # Empty tensor data is garbage so it's hard to make comparisons with it.\n DecorateInfo(unittest.skip(\"Skipped!\"), 'TestCommon', 'test_noncontiguous_samples'),\n # Empty tensor data is garbage so it's hard to make comparisons with it.\n DecorateInfo(unittest.skip(\"Skipped!\"), 'TestMathBits', 'test_conj_view'),\n # Empty tensor data is garbage so it's hard to make comparisons with it.\n DecorateInfo(unittest.skip(\"Skipped!\"), 'TestMathBits', 'test_neg_view'),\n # Empty tensor data is garbage so it's hard to make comparisons with it.\n DecorateInfo(unittest.skip(\"Skipped!\"), 'TestMathBits', 'test_neg_conj_view'),\n # Can't find schemas for this operator for some reason\n DecorateInfo(unittest.skip(\"Skipped!\"), 'TestOperatorSignatures', 'test_get_torch_func_signature_exhaustive'),\n ),\n supports_autograd=False),\n OpInfo('new_full',\n op=lambda x, *args, **kwargs: x.new_full(*args, **kwargs),\n dtypes=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16),\n supports_out=False,\n sample_inputs_func=sample_inputs_new_full,\n skips=(\n # Can't find schemas for this operator for some reason\n DecorateInfo(unittest.skip(\"Skipped!\"), 'TestOperatorSignatures', 'test_get_torch_func_signature_exhaustive'),\n ),\n supports_autograd=False),\n OpInfo('multinomial',\n op=lambda inp, *args, **kwargs:\n wrapper_set_seed(torch.multinomial, inp, *args, **kwargs),\n method_variant=lambda inp, *args, **kwargs:\n wrapper_set_seed(torch.Tensor.multinomial, inp, *args, **kwargs),\n dtypes=floating_types(),\n dtypesIfCUDA=floating_types_and(torch.half),\n supports_out=True,\n sample_inputs_func=sample_inputs_multinomial,\n skips=(\n # AssertionError: JIT Test does not execute any logic\n DecorateInfo(unittest.skip(\"Skipped!\"), 'TestJit', 'test_variant_consistency_jit'),\n # UserWarning not triggered : Resized a non-empty tensor but did not warn about it.\n DecorateInfo(unittest.skip(\"Skipped!\"), 'TestCommon', 'test_out_warning')),\n supports_autograd=False),\n OpInfo('normal',\n op=lambda inp, *args, **kwargs:\n wrapper_set_seed(torch.normal, inp, *args, **kwargs),\n # The inplace variant (Tensor.normal_) is different from torch.normal\n inplace_variant=None,\n dtypes=floating_types_and(torch.bfloat16, torch.half),\n dtypesIfCUDA=floating_types_and(torch.bfloat16, torch.half),\n supports_out=True,\n sample_inputs_func=sample_inputs_normal_tensor_first,\n skips=(\n # AssertionError: JIT Test does not execute any logic\n DecorateInfo(unittest.skip(\"Skipped!\"), 'TestJit', 'test_variant_consistency_jit'),\n # UserWarning not triggered : Resized a non-empty tensor but did not warn about it.\n DecorateInfo(unittest.skip(\"Skipped!\"), 'TestCommon', 'test_out_warning'),),\n supports_autograd=False),\n OpInfo('normal',\n # This has its own variant b/c OpInfos assume the first arg is a Tensor but it is not here\n variant_test_name='number_mean',\n op=lambda std, mean, *args, **kwargs:\n wrapper_set_seed(torch.normal, mean, std, *args, **kwargs),\n # The inplace variant (Tensor.normal_) is different from torch.normal\n inplace_variant=None,\n dtypes=floating_types_and(torch.bfloat16, torch.half),\n dtypesIfCUDA=floating_types_and(torch.bfloat16, torch.half),\n supports_out=True,\n sample_inputs_func=sample_inputs_normal_tensor_second,\n skips=(\n # AssertionError: JIT Test does not execute any logic\n DecorateInfo(unittest.skip(\"Skipped!\"), 'TestJit', 'test_variant_consistency_jit'),\n # Seems like a bug:\n # The size of tensor a (0) must match the size of tensor b (4) at non-singleton dimension 1\n DecorateInfo(unittest.skip(\"Skipped!\"), 'TestCommon', 'test_out'),\n # UserWarning not triggered : Resized a non-empty tensor but did not warn about it.\n DecorateInfo(unittest.skip(\"Skipped!\"), 'TestCommon', 'test_out_warning'),),\n supports_autograd=False),\n OpInfo('bernoulli',\n op=lambda inp, *args, **kwargs:\n wrapper_set_seed(torch.bernoulli, inp, *args, **kwargs),\n # The inplace variant (Tensor.bernoulli_) is different from torch.bernoulli\n inplace_variant=None,\n method_variant=lambda inp, *args, **kwargs:\n wrapper_set_seed(torch.Tensor.bernoulli, inp, *args, **kwargs),\n dtypes=floating_types_and(torch.bfloat16),\n dtypesIfCUDA=floating_types_and(torch.bfloat16, torch.half),\n supports_out=True,\n sample_inputs_func=sample_inputs_bernoulli,\n skips=(\n # AssertionError: JIT Test does not execute any logic\n DecorateInfo(unittest.skip(\"Skipped!\"), 'TestJit', 'test_variant_consistency_jit'),\n # Expected RuntimeError when doing an unsafe cast from a result of\n # dtype torch.float32 into an out= with dtype torch.lon\n DecorateInfo(unittest.skip(\"Skipped!\"), 'TestCommon', 'test_out'),\n # UserWarning not triggered : Resized a non-empty tensor but did not warn about it.\n DecorateInfo(unittest.skip(\"Skipped!\"), 'TestCommon', 'test_out_warning')),\n supports_autograd=False),\n OpInfo('scatter_add',\n dtypes=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16),\n sample_inputs_func=sample_inputs_scatter_add,\n error_inputs_func=error_inputs_scatter_and_scatter_add,\n supports_out=False,\n supports_forward_ad=True,\n supports_fwgrad_bwgrad=True,\n ),\n OpInfo('stack',\n dtypes=all_types_and_complex_and(torch.bool, torch.float16, torch.bfloat16),\n sample_inputs_func=sample_inputs_stack,\n assert_autodiffed=True,\n supports_forward_ad=True,\n supports_fwgrad_bwgrad=True,\n ),\n OpInfo('hstack',\n dtypes=all_types_and_complex_and(torch.bool, torch.float16, torch.bfloat16),\n sample_inputs_func=sample_inputs_hstack_dstack_vstack,\n supports_forward_ad=True,\n supports_fwgrad_bwgrad=True,\n skips=(\n # TODO: see https://github.com/pytorch/pytorch/issues/64709\n DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_out_warning'),\n )),\n BinaryUfuncInfo('hypot',\n dtypes=floating_types_and(torch.bfloat16),\n dtypesIfCUDA=floating_types_and(torch.half, torch.bfloat16),\n supports_forward_ad=True,\n supports_fwgrad_bwgrad=True,\n sample_inputs_func=sample_inputs_hypot,\n skips=(\n # TypeError: hypot(): argument 'other' (position 2) must be Tensor, not float\n DecorateInfo(unittest.skip('Skipped!'), 'TestBinaryUfuncs'),\n )),\n OpInfo('histogram',\n dtypes=floating_types(),\n dtypesIfCUDA=_dispatch_dtypes(), # histogram is only implemented on CPU\n sample_inputs_func=sample_inputs_histogram,\n supports_autograd=False,\n skips=(\n # JIT tests don't work with Tensor keyword arguments\n # https://github.com/pytorch/pytorch/issues/58507\n # RuntimeError:\n # undefined value tensor:\n # File \"<string>\", line 3\n # def the_method(i0):\n # return torch.histogram(i0, 1, weight=tensor(-0.5735, dtype=torch.float32), density=False)\n # ~~~~~~ <--- HERE\n DecorateInfo(unittest.expectedFailure, 'TestJit', 'test_variant_consistency_jit'),\n # Not Implemented on XLA.\n DecorateInfo(unittest.expectedFailure, 'TestOpInfo', device_type='xla'),\n )),\n OpInfo('histogramdd',\n dtypes=floating_types(),\n dtypesIfCUDA=_dispatch_dtypes(), # histogramdd is only implemented on CPU\n sample_inputs_func=sample_inputs_histogramdd,\n supports_autograd=False,\n skips=(\n # JIT tests don't work with Tensor keyword arguments\n # https://github.com/pytorch/pytorch/issues/58507\n DecorateInfo(unittest.expectedFailure, 'TestJit', 'test_variant_consistency_jit'),\n )),\n OpInfo('histc',\n dtypes=floating_types(),\n dtypesIfCUDA=floating_types_and(torch.int8, torch.int16, torch.int32, torch.int64),\n sample_inputs_func=sample_inputs_histc,\n supports_out=True,\n supports_autograd=False,\n skips=(\n # CUDA histc returns a float tensor but does not correctly warn when passed an integral out tensor\n # \"AssertionError: RuntimeError not raised : Expected RuntimeError when doing an unsafe cast\n # from a result of dtype torch.float32 into an out= with dtype torch.long\"\n DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_out', device_type='cuda'),\n )),\n OpInfo('bincount',\n dtypes=integral_types_and(),\n sample_inputs_func=sample_inputs_bincount,\n supports_out=False,\n supports_autograd=False,\n skips=(\n # JIT tests don't work with Tensor keyword arguments\n # https://github.com/pytorch/pytorch/issues/58507\n DecorateInfo(unittest.skip(\"Skipped!\"), 'TestJit', 'test_variant_consistency_jit'),\n )),\n OpInfo('bucketize',\n dtypes=all_types_and(torch.float16, torch.bfloat16),\n dtypesIfCUDA=all_types_and(torch.float16),\n sample_inputs_func=sample_inputs_bucketize,\n supports_autograd=False,\n skips=(\n # JIT tests don't work with Tensor keyword arguments\n DecorateInfo(unittest.skip(\"Expected failure!\"), 'TestJit', 'test_variant_consistency_jit'),\n )),\n OpInfo('searchsorted',\n dtypes=all_types(),\n dtypesIfCPU=all_types_and(torch.bfloat16, torch.float16),\n dtypesIfCUDA=all_types_and(torch.float16),\n sample_inputs_func=sample_inputs_searchsorted,\n supports_autograd=False,\n ref=reference_searchsorted,\n skips=(\n # JIT tests don't work with Tensor keyword arguments\n # https://github.com/pytorch/pytorch/issues/58507\n DecorateInfo(unittest.skip(\"Expected failure!\"), 'TestJit', 'test_variant_consistency_jit'),\n )),\n OpInfo('cat',\n ref=lambda input_seq, dim=0, **kwargs: np.concatenate(input_seq, axis=dim, **kwargs),\n aliases=('concat',),\n dtypes=all_types_and_complex_and(torch.bool, torch.float16, torch.bfloat16),\n sample_inputs_func=sample_inputs_cat_concat,\n supports_forward_ad=True,\n supports_fwgrad_bwgrad=True,\n assert_autodiffed=True,\n skips=(\n # TODO: see https://github.com/pytorch/pytorch/issues/64709\n DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_out_warning'),\n # RuntimeError: Arguments for call not valid.\n # Expected a value of type 'List[Tensor]' for argument\n # 'tensors' but instead found type 'Tensor (inferred)'.\n DecorateInfo(unittest.expectedFailure, 'TestJit', 'test_jit_alias_remapping'),)),\n OpInfo('vstack',\n aliases=('row_stack',),\n dtypes=all_types_and_complex_and(torch.bool, torch.float16, torch.bfloat16),\n sample_inputs_func=sample_inputs_hstack_dstack_vstack,\n supports_forward_ad=True,\n supports_fwgrad_bwgrad=True,\n skips=(\n # TODO: see https://github.com/pytorch/pytorch/issues/64709\n DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_out_warning'),\n # RuntimeError: _fn() Expected a value of type\n # 'Tensor (inferred)' for argument 't0' but instead found type 'tuple'.\n DecorateInfo(unittest.expectedFailure, 'TestJit', 'test_jit_alias_remapping'),)),\n OpInfo('dstack',\n dtypes=all_types_and_complex_and(torch.bool, torch.float16, torch.bfloat16),\n sample_inputs_func=sample_inputs_hstack_dstack_vstack,\n supports_forward_ad=True,\n supports_fwgrad_bwgrad=True,\n skips=(\n # TODO: see https://github.com/pytorch/pytorch/issues/64709\n DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_out_warning'),\n )),\n OpInfo('unfold',\n op=lambda x, *args: x.unfold(*args),\n dtypes=all_types_and_complex_and(torch.bool, torch.float16, torch.bfloat16),\n supports_out=False,\n supports_forward_ad=True,\n supports_fwgrad_bwgrad=True,\n check_batched_gradgrad=False,\n # See https://github.com/pytorch/pytorch/issues/66357\n check_batched_forward_grad=False,\n skips=(\n # Skip operator schema test because this is a functional and not an operator\n DecorateInfo(unittest.expectedFailure, 'TestOperatorSignatures', 'test_get_torch_func_signature_exhaustive'),\n ),\n sample_inputs_func=sample_inputs_unfold),\n OpInfo('msort',\n dtypes=all_types_and(torch.bool, torch.float16, torch.bfloat16),\n dtypesIfCUDA=all_types_and(torch.float16, torch.bfloat16),\n dtypesIfROCM=all_types_and(torch.float16),\n check_batched_gradgrad=False,\n supports_forward_ad=True,\n supports_fwgrad_bwgrad=True,\n skips=(\n # msort does not correctly warn when resizing out= inputs.\n DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_out_warning'),\n # Expected RuntimeError when doing an unsafe cast from a result of dtype\n # torch.float32 into an out= with dtype torch.long\n DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_out', device_type='cpu'),\n ),\n sample_inputs_func=sample_inputs_msort),\n OpInfo('movedim',\n aliases=('moveaxis',),\n dtypes=all_types_and_complex_and(torch.bool, torch.float16, torch.bfloat16),\n supports_out=False,\n supports_forward_ad=True,\n supports_fwgrad_bwgrad=True,\n sample_inputs_func=sample_movedim_moveaxis),\n OpInfo('renorm',\n dtypes=floating_and_complex_types_and(torch.float16, torch.bfloat16),\n sample_inputs_func=sample_inputs_renorm),\n ShapeFuncInfo('repeat',\n op=lambda x, dims: x.repeat(dims),\n ref=np.tile,\n dtypes=all_types_and_complex_and(torch.bool, torch.float16, torch.bfloat16),\n supports_out=False,\n supports_forward_ad=True,\n supports_fwgrad_bwgrad=True,\n sample_inputs_func=sample_repeat_tile),\n OpInfo('squeeze',\n dtypes=all_types_and_complex_and(torch.bool, torch.float16, torch.bfloat16),\n supports_out=False,\n assert_autodiffed=True,\n autodiff_fusible_nodes=[], # aliases inputs, shouldn't be fused\n autodiff_nonfusible_nodes=[], # aliases inputs, shouldn't be fused\n assert_jit_shape_analysis=True,\n supports_forward_ad=True,\n supports_fwgrad_bwgrad=True,\n # vmap does not support inplace views\n check_inplace_batched_forward_grad=False,\n # https://github.com/pytorch/pytorch/issues/66357\n check_batched_forward_grad=False,\n sample_inputs_func=sample_inputs_squeeze),\n OpInfo('fill_',\n op=lambda x, scalar: torch.fill_(x.clone(), scalar),\n method_variant=None,\n inplace_variant=torch.Tensor.fill_,\n supports_forward_ad=True,\n supports_fwgrad_bwgrad=True,\n # https://github.com/pytorch/pytorch/issues/66357\n check_batched_forward_grad=False,\n dtypes=all_types_and_complex_and(torch.bool, torch.float16, torch.bfloat16),\n supports_out=False,\n skips=(\n # JIT has issue when op is passed as lambda\n # AssertionError: JIT Test does not execute any logic\n DecorateInfo(unittest.expectedFailure, 'TestJit', 'test_variant_consistency_jit'),\n # Fails due to a limitation of gradgradcheck\n # https://github.com/pytorch/pytorch/issues/59137\n DecorateInfo(unittest.expectedFailure, 'TestGradients', 'test_fn_gradgrad'),\n DecorateInfo(unittest.expectedFailure, 'TestGradients', 'test_inplace_gradgrad'),\n ),\n sample_inputs_func=sample_inputs_fill_),\n OpInfo('resize_',\n op=lambda x, shape: x.clone().resize_(shape),\n method_variant=None,\n inplace_variant=torch.Tensor.resize_,\n # the test fails because resize_ doesn't work with imag views as expected by the test\n # https://github.com/pytorch/pytorch/issues/65945\n test_neg_view=False,\n dtypes=all_types_and_complex_and(torch.bool, torch.float16, torch.bfloat16),\n supports_out=False,\n supports_autograd=False,\n skips=(\n # resize_ is raising an error on input that requires grad on purpose\n DecorateInfo(\n unittest.skip('Skipped! Resizing of variables that require grad is not supported.'),\n 'TestGradients',\n 'test_nondifferentiable',\n ),\n DecorateInfo(unittest.skip(\"Allowed exception\"), 'TestCommon', 'test_composite_compliance'),\n ),\n sample_inputs_func=sample_inputs_resize_ops),\n OpInfo('resize_as_',\n op=lambda x, other: torch.resize_as_(x.clone(), other),\n method_variant=None,\n inplace_variant=torch.Tensor.resize_as_,\n dtypes=all_types_and_complex_and(torch.bool, torch.float16, torch.bfloat16),\n supports_out=False,\n supports_autograd=False,\n skips=(\n # resize_ is raising an error on input that requires grad on purpose\n DecorateInfo(\n unittest.skip('Skipped! Resizing of variables that require grad is not supported.'),\n 'TestGradients',\n 'test_nondifferentiable',\n ),\n ),\n sample_inputs_func=sample_inputs_resize_ops),\n OpInfo('take_along_dim',\n dtypes=all_types_and_complex_and(torch.bool, torch.float16, torch.bfloat16),\n dtypesIfCUDA=all_types_and_complex_and(torch.bool, torch.float16, torch.bfloat16),\n supports_inplace_autograd=False,\n supports_forward_ad=True,\n supports_fwgrad_bwgrad=True,\n sample_inputs_func=sample_inputs_take_along_dim,\n gradcheck_nondet_tol=GRADCHECK_NONDET_TOL),\n ShapeFuncInfo('tile',\n ref=np.tile,\n dtypes=all_types_and_complex_and(torch.bool, torch.float16, torch.bfloat16),\n supports_out=False,\n supports_forward_ad=True,\n supports_fwgrad_bwgrad=True,\n sample_inputs_func=sample_repeat_tile),\n OpInfo('trapz', # TODO: in the future, 'trapz' should be made a proper alias of 'trapezoid'\n dtypes=all_types_and_complex_and(torch.float16, torch.bfloat16),\n supports_out=False,\n supports_forward_ad=True,\n supports_fwgrad_bwgrad=True,\n sample_inputs_func=sample_trapezoid,\n skips=(\n # Dispatch stub: unsupported device typemeta\n DecorateInfo(unittest.expectedFailure, 'TestGradients', 'test_fn_fwgrad_bwgrad', device_type='meta'),\n # (ROCm) Memory access fault by GPU node-4 (Agent handle: 0x55860348e690) on address 0x7f0f4ddcb000\n DecorateInfo(unittest.skip(\"Skipped! ROCm memory exception\"), 'TestGradients', 'test_fn_fwgrad_bwgrad',\n device_type='cuda', dtypes=[torch.float64, torch.complex128], active_if=TEST_WITH_ROCM),\n )),\n OpInfo('trapezoid',\n dtypes=all_types_and_complex_and(torch.float16, torch.bfloat16),\n supports_out=False,\n supports_forward_ad=True,\n supports_fwgrad_bwgrad=True,\n sample_inputs_func=sample_trapezoid,\n skips=(\n # Dispatch stub: unsupported device typemeta\n DecorateInfo(unittest.expectedFailure, 'TestGradients', 'test_fn_fwgrad_bwgrad', device_type='meta'),\n # (ROCm) Memory access fault by GPU node-4 (Agent handle: 0x55bbf53d5500) on address 0x7fe536eb5000\n DecorateInfo(unittest.skip(\"Skipped! ROCm memory exception\"), 'TestGradients', 'test_fn_fwgrad_bwgrad',\n device_type='cuda', dtypes=[torch.float64, torch.complex128], active_if=TEST_WITH_ROCM),\n )),\n OpInfo('cumulative_trapezoid',\n dtypes=all_types_and_complex_and(),\n dtypesIfCUDA=all_types_and_complex_and(torch.bfloat16, torch.float16),\n supports_forward_ad=True,\n supports_fwgrad_bwgrad=True,\n supports_out=False,\n sample_inputs_func=sample_cumulative_trapezoid,\n skips=(\n # Two failures:\n # 1. (CUDA) RuntimeError: Expected all tensors to be on the same device, but found at\n # least two devices, cuda:0 and cpu!\n # 2. (ROCm) Memory exception on virtual address 0x7f6a2216f000, node id 4: Page not present\n DecorateInfo(unittest.skip(\"Skipped! ROCm memory exception\"), 'TestGradients',\n 'test_fn_fwgrad_bwgrad', device_type='cuda'),\n )),\n OpInfo('unsqueeze',\n dtypes=all_types_and_complex_and(torch.bool, torch.float16, torch.bfloat16),\n supports_out=False,\n supports_forward_ad=True,\n supports_fwgrad_bwgrad=True,\n # vmap does not support inplace views\n check_inplace_batched_forward_grad=False,\n assert_jit_shape_analysis=True,\n assert_autodiffed=True,\n autodiff_fusible_nodes=[], # aliases inputs, shouldn't be fused\n autodiff_nonfusible_nodes=[], # aliases inputs, shouldn't be fused\n sample_inputs_func=sample_unsqueeze),\n BinaryUfuncInfo('xlogy',\n aliases=('special.xlogy',),\n dtypes=all_types_and(torch.bool, torch.half, torch.bfloat16),\n promotes_int_to_float=True,\n supports_forward_ad=True,\n supports_fwgrad_bwgrad=True,\n safe_casts_outputs=True,\n sample_inputs_func=sample_inputs_xlogy),\n OpInfo('zero_',\n op=lambda x: torch.zero_(x.clone()),\n method_variant=None,\n inplace_variant=torch.Tensor.zero_,\n dtypes=all_types_and_complex_and(torch.bool, torch.float16, torch.bfloat16),\n supports_out=False,\n supports_forward_ad=True,\n supports_fwgrad_bwgrad=True,\n skips=(\n # JIT has issue when op is passed as lambda\n # AssertionError: JIT Test does not execute any logic\n DecorateInfo(unittest.skip(\"Skipped!\"), 'TestJit', 'test_variant_consistency_jit'),\n ),\n sample_inputs_func=sample_inputs_zero_),\n BinaryUfuncInfo('special.xlog1py',\n aten_name='special_xlog1py',\n dtypes=all_types_and(torch.bool, torch.half, torch.bfloat16),\n backward_dtypesIfCPU=all_types_and(torch.bool, torch.bfloat16),\n promotes_int_to_float=True,\n safe_casts_outputs=True,\n supports_forward_ad=True,\n supports_fwgrad_bwgrad=True,\n sample_inputs_func=sample_inputs_xlog1py),\n BinaryUfuncInfo('special.zeta',\n aten_name='special_zeta',\n dtypes=all_types_and(torch.bool),\n promotes_int_to_float=True,\n supports_autograd=False,\n safe_casts_outputs=True,\n sample_inputs_func=sample_inputs_binary_pwise),\n # OpInfo entry to verify the gradient formula of `other`/`q`\n BinaryUfuncInfo('special.zeta',\n op=lambda q, x, **kwargs: torch.special.zeta(x, q, **kwargs),\n aten_name='special_zeta',\n variant_test_name='grad',\n dtypes=all_types_and(torch.bool),\n promotes_int_to_float=True,\n supports_autograd=True,\n safe_casts_outputs=True,\n decorators=[\n # Derivative wrt first tensor not implemented\n DecorateInfo(unittest.expectedFailure, \"TestCommon\",\n \"test_floating_inputs_are_differentiable\")\n ],\n skips=(\n # Lambda doesn't work in JIT test\n # AssertionError: JIT Test does not execute any logic\n DecorateInfo(unittest.skip(\"Skipped!\"), \"TestJit\", \"test_variant_consistency_jit\"),\n ),\n sample_inputs_func=sample_inputs_zeta),\n OpInfo('logsumexp',\n aliases=('special.logsumexp',),\n dtypes=all_types_and(torch.bool, torch.bfloat16),\n dtypesIfCUDA=all_types_and(torch.bool, torch.bfloat16, torch.half),\n assert_autodiffed=True,\n sample_inputs_func=sample_inputs_logsumexp),\n OpInfo('trace',\n dtypes=all_types_and_complex(),\n dtypesIfCUDA=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16),\n supports_inplace_autograd=False,\n supports_out=False,\n supports_forward_ad=True,\n supports_fwgrad_bwgrad=True,\n sample_inputs_func=sample_inputs_trace),\n OpInfo('transpose',\n aliases=('swapdims', 'swapaxes'),\n assert_jit_shape_analysis=True,\n dtypes=all_types_and_complex_and(torch.bool, torch.bfloat16, torch.half),\n dtypesIfCUDA=all_types_and_complex_and(torch.bool, torch.bfloat16, torch.half),\n supports_out=False,\n supports_forward_ad=True,\n supports_fwgrad_bwgrad=True,\n # vmap does not support inplace views\n check_inplace_batched_forward_grad=False,\n sample_inputs_func=sample_inputs_transpose_swapdims),\n OpInfo('T',\n op=lambda x: x.T,\n dtypes=all_types_and_complex_and(torch.bool, torch.bfloat16, torch.half),\n dtypesIfCUDA=all_types_and_complex_and(torch.bool, torch.bfloat16, torch.half),\n supports_out=False,\n supports_forward_ad=True,\n supports_fwgrad_bwgrad=True,\n skips=( # Lambda doesn't work in JIT test\n DecorateInfo(unittest.skip(\"Skipped!\"), \"TestJit\", \"test_variant_consistency_jit\"),),\n sample_inputs_func=sample_inputs_T),\n OpInfo('H',\n op=lambda x: x.H,\n dtypes=all_types_and_complex_and(torch.bool, torch.bfloat16, torch.half),\n dtypesIfCUDA=all_types_and_complex_and(torch.bool, torch.bfloat16, torch.half),\n supports_out=False,\n supports_forward_ad=True,\n supports_fwgrad_bwgrad=True,\n skips=( # Lambda doesn't work in JIT test\n DecorateInfo(unittest.skip(\"Skipped!\"), \"TestJit\", \"test_variant_consistency_jit\"),),\n sample_inputs_func=sample_inputs_T),\n OpInfo('mT',\n op=lambda x: x.mT,\n dtypes=all_types_and_complex_and(torch.bool, torch.bfloat16, torch.half),\n dtypesIfCUDA=all_types_and_complex_and(torch.bool, torch.bfloat16, torch.half),\n supports_out=False,\n supports_forward_ad=True,\n supports_fwgrad_bwgrad=True,\n skips=( # Lambda doesn't work in JIT test\n DecorateInfo(unittest.skip(\"Skipped!\"), \"TestJit\", \"test_variant_consistency_jit\"),),\n sample_inputs_func=sample_inputs_adjoint),\n OpInfo('mH',\n op=lambda x: x.mH,\n aliases=('adjoint',),\n dtypes=all_types_and_complex_and(torch.bool, torch.bfloat16, torch.half),\n dtypesIfCUDA=all_types_and_complex_and(torch.bool, torch.bfloat16, torch.half),\n supports_out=False,\n supports_forward_ad=True,\n supports_fwgrad_bwgrad=True,\n skips=( # Lambda doesn't work in JIT test\n DecorateInfo(unittest.skip(\"Skipped!\"), \"TestJit\", \"test_variant_consistency_jit\"),),\n sample_inputs_func=sample_inputs_adjoint),\n OpInfo('tril',\n dtypes=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16),\n dtypesIfCUDA=all_types_and_complex_and(torch.bool, torch.half),\n supports_forward_ad=True,\n supports_fwgrad_bwgrad=True,\n sample_inputs_func=sample_inputs_tril_triu),\n OpInfo('triu',\n dtypes=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16),\n dtypesIfCUDA=all_types_and_complex_and(torch.bool, torch.half),\n supports_forward_ad=True,\n supports_fwgrad_bwgrad=True,\n sample_inputs_func=sample_inputs_tril_triu),\n OpInfo('kron',\n dtypes=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16),\n dtypesIfCUDA=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16),\n supports_inplace_autograd=False,\n supports_forward_ad=True,\n supports_fwgrad_bwgrad=True,\n sample_inputs_func=sample_inputs_kron),\n OpInfo('inner',\n dtypes=all_types_and_complex_and(torch.half, torch.bfloat16),\n dtypesIfCUDA=floating_and_complex_types_and(torch.float16, *[torch.bfloat16] if CUDA11OrLater else []),\n dtypesIfROCM=floating_and_complex_types_and(torch.half, torch.bfloat16),\n supports_forward_ad=True,\n supports_fwgrad_bwgrad=True,\n sample_inputs_func=sample_inputs_inner,\n ),\n OpInfo('tensordot',\n dtypes=all_types_and_complex_and(torch.half, torch.bfloat16),\n dtypesIfCUDA=floating_and_complex_types_and(torch.float16, *[torch.bfloat16] if CUDA11OrLater else []),\n dtypesIfROCM=floating_and_complex_types_and(torch.half, torch.bfloat16),\n safe_casts_outputs=True,\n supports_forward_ad=True,\n supports_fwgrad_bwgrad=True,\n sample_inputs_func=sample_inputs_tensordot,\n skips=(\n # Skip operator schema test because this is a functional and not an operator.\n # Reference: https://github.com/pytorch/pytorch/issues/54574\n DecorateInfo(unittest.skip(\"Skipped!\"), 'TestOperatorSignatures', 'test_get_torch_func_signature_exhaustive'),\n )\n ),\n OpInfo('to_sparse',\n op=lambda x, *args: x.to_sparse(*args),\n sample_inputs_func=sample_inputs_to_sparse,\n dtypes=all_types_and_complex_and(torch.bool, torch.float16, torch.bfloat16),\n backward_dtypes=floating_types(),\n backward_dtypesIfCUDA=floating_types_and(torch.float16, torch.bfloat16),\n supports_out=False,\n check_batched_grad=False,\n check_batched_gradgrad=False,\n skips=(\n # NotImplementedError: Could not run 'aten::normal_' with arguments from the 'SparseCPU' backend\n DecorateInfo(unittest.skip(\"\"), 'TestCommon', 'test_noncontiguous_samples'),\n # TODO: FIXME: complex inputs requiring grad error in forward\n DecorateInfo(unittest.skip(\"Skipped!\"), 'TestCommon', 'test_dtypes'),\n # JIT has issue when op is passed as lambda\n # NotImplementedError: Cannot access storage of SparseTensorImpl\n DecorateInfo(unittest.skip(\"Skipped!\"), 'TestJit', 'test_variant_consistency_jit'),\n # Allowed exception: sparse tensors don't have strides\n DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_composite_compliance'),\n )\n ),\n OpInfo('logcumsumexp',\n dtypes=floating_types_and(),\n dtypesIfCUDA=floating_types_and(torch.half, torch.bfloat16),\n backward_dtypesIfCUDA=floating_types_and(),\n skips=(\n # AssertionError: UserWarning not triggered : Resized a non-empty tensor but did not warn about it.\n DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_out_warning', device_type='cuda'),\n ),\n sample_inputs_func=sample_inputs_logcumsumexp),\n UnaryUfuncInfo('sigmoid',\n aliases=('special.expit', 'nn.functional.sigmoid'),\n ref=reference_sigmoid if TEST_SCIPY else _NOTHING,\n decorators=(precisionOverride({torch.float16: 1e-2,\n torch.complex64: 1e-1,\n torch.bfloat16: 1e-2}),),\n skips=(\n # TODO: FIXME: sigmoid fails on complex inputs that require grad\n DecorateInfo(unittest.skip(\"Skipped!\"), 'TestCommon', 'test_dtypes'),\n # Reference: https://github.com/pytorch/pytorch/issues/56012\n DecorateInfo(unittest.skip(\"Skipped!\"), 'TestUnaryUfuncs', 'test_reference_numerics_extremal',\n device_type='cuda', dtypes=[torch.complex64]),\n DecorateInfo(unittest.skip(\"Skipped!\"), 'TestUnaryUfuncs', 'test_reference_numerics_hard',\n device_type='cuda', dtypes=[torch.complex64]),\n DecorateInfo(unittest.skip(\"Skipped!\"), 'TestUnaryUfuncs', 'test_reference_numerics_extremal',\n device_type='cpu', dtypes=[torch.cfloat, torch.cdouble]),\n DecorateInfo(unittest.skip(\"Skipped!\"), 'TestUnaryUfuncs', 'test_reference_numerics_hard',\n device_type='cpu', dtypes=[torch.cfloat, torch.cdouble]),\n # alias, nn.functional.sigmoid, will produce (because of warning string saved):\n # \"RuntimeError: Expected to not find \"sigmoid\" but found it\"\n DecorateInfo(unittest.expectedFailure, 'TestJit', 'test_jit_alias_remapping')),\n dtypes=all_types_and_complex_and(torch.bool, torch.bfloat16),\n dtypesIfCUDA=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16),\n safe_casts_outputs=True,\n supports_forward_ad=True,\n supports_fwgrad_bwgrad=True,\n assert_autodiffed=True,\n # sigmoid(z) = 1 / (1 + exp(-z)), at z = j * pi * odd_number, the denominator is zero\n reference_numerics_filter=NumericsFilter(\n condition=lambda x: (close_to_int(x / (math.pi * 1j))\n if x.is_complex() else x.new_tensor(False, dtype=torch.bool)),\n safe_val=0)),\n UnaryUfuncInfo('digamma',\n ref=scipy.special.digamma if TEST_SCIPY else _NOTHING,\n aliases=('special.psi', 'special.digamma',),\n decorators=(precisionOverride({torch.float16: 5e-1}),),\n dtypes=all_types_and(torch.bool, torch.bfloat16),\n dtypesIfCUDA=all_types_and(torch.bool, torch.half),\n supports_forward_ad=True,\n supports_fwgrad_bwgrad=True,\n safe_casts_outputs=True),\n UnaryUfuncInfo('special.entr',\n ref=scipy.special.entr if TEST_SCIPY else _NOTHING,\n aten_name='special_entr',\n supports_forward_ad=True,\n supports_fwgrad_bwgrad=True,\n decorators=(precisionOverride({torch.float16: 1e-1,\n torch.bfloat16: 1e-1}),),\n dtypes=all_types_and(torch.bool, torch.bfloat16),\n dtypesIfCUDA=all_types_and(torch.bool, torch.half, torch.bfloat16),\n skips=(\n DecorateInfo(unittest.skip(\"Skipped!\"), 'TestUnaryUfuncs', 'test_reference_numerics_hard',\n dtypes=[torch.bfloat16, torch.float16]),\n ),\n supports_inplace_autograd=False,\n safe_casts_outputs=True,\n sample_inputs_func=sample_inputs_entr),\n UnaryUfuncInfo('special.ndtri',\n ref=scipy.special.ndtri if TEST_SCIPY else _NOTHING,\n domain=(0, 1),\n aten_name='special_ndtri',\n dtypes=all_types_and(torch.bool),\n supports_forward_ad=True,\n supports_fwgrad_bwgrad=True,\n safe_casts_outputs=True),\n UnaryUfuncInfo('erf',\n ref=scipy.special.erf if TEST_SCIPY else _NOTHING,\n aliases=('special.erf', ),\n decorators=(precisionOverride({torch.float16: 1e-2,\n torch.bfloat16: 1e-2}),),\n skips=(\n DecorateInfo(unittest.skip(\"Skipped! sparse backward not supported\"),\n 'TestSparseUnaryUfuncs', 'test_sparse_fn_grad'),\n\n ),\n dtypes=all_types_and(torch.bool, torch.bfloat16),\n dtypesIfCUDA=all_types_and(torch.bool, torch.half, torch.bfloat16),\n assert_autodiffed=True,\n assert_jit_shape_analysis=True,\n supports_sparse=True,\n supports_sparse_csr=True,\n supports_forward_ad=True,\n supports_fwgrad_bwgrad=True,\n safe_casts_outputs=True),\n UnaryUfuncInfo('erfc',\n ref=scipy.special.erfc if TEST_SCIPY else _NOTHING,\n aliases=('special.erfc', ),\n decorators=(precisionOverride({torch.float16: 1e-2,\n torch.bfloat16: 1e-2}),),\n dtypes=all_types_and(torch.bool, torch.bfloat16),\n dtypesIfCUDA=all_types_and(torch.bool, torch.half, torch.bfloat16),\n assert_autodiffed=True,\n supports_forward_ad=True,\n supports_fwgrad_bwgrad=True,\n safe_casts_outputs=True),\n UnaryUfuncInfo('erfinv',\n ref=scipy.special.erfinv if TEST_SCIPY else _NOTHING,\n aliases=('special.erfinv', ),\n decorators=(precisionOverride({torch.float16: 1e-2,\n torch.bfloat16: 1e-2,\n torch.float32: 1e-4}),),\n dtypes=all_types_and(torch.bool, torch.bfloat16),\n dtypesIfCUDA=all_types_and(torch.bool, torch.half),\n safe_casts_outputs=True,\n supports_sparse_csr=True,\n supports_forward_ad=True,\n supports_fwgrad_bwgrad=True,\n domain=(-1, 1),\n skips=(\n # Reference: https://github.com/pytorch/pytorch/pull/49155#issuecomment-742664611\n DecorateInfo(unittest.skip(\"Skipped!\"), 'TestUnaryUfuncs', 'test_reference_numerics_extremal',\n active_if=TEST_SCIPY and LooseVersion(scipy.__version__) < \"1.4.0\"),\n DecorateInfo(unittest.skip(\"Skipped!\"), 'TestUnaryUfuncs', 'test_reference_numerics_hard',\n active_if=TEST_SCIPY and LooseVersion(scipy.__version__) < \"1.4.0\"),\n DecorateInfo(unittest.skip(\"Skipped!\"), 'TestUnaryUfuncs', 'test_reference_numerics_normal',\n active_if=TEST_SCIPY and LooseVersion(scipy.__version__) < \"1.4.0\"),\n )),\n UnaryUfuncInfo('lgamma',\n ref=reference_lgamma if TEST_SCIPY else _NOTHING,\n aliases=('special.gammaln', ),\n decorators=(precisionOverride({torch.float16: 7e-1}),),\n dtypes=all_types_and(torch.bool, torch.bfloat16),\n dtypesIfCUDA=all_types_and(torch.bool, torch.half),\n supports_forward_ad=True,\n supports_fwgrad_bwgrad=True,\n skips=(\n # Reference: https://github.com/pytorch/pytorch/pull/50140#discussion_r552615345\n DecorateInfo(unittest.skip(\"Skipped!\"), 'TestUnaryUfuncs', 'test_reference_numerics_extremal',\n dtypes=[torch.bfloat16]),\n DecorateInfo(unittest.skip(\"Skipped!\"), 'TestUnaryUfuncs', 'test_reference_numerics_hard',\n device_type='cpu', dtypes=[torch.bfloat16]),\n DecorateInfo(unittest.skip(\"Skipped!\"), 'TestUnaryUfuncs', 'test_reference_numerics_normal',\n device_type='cpu', dtypes=[torch.bfloat16]),\n # Reference: https://github.com/pytorch/pytorch/pull/50140#issuecomment-756150214\n DecorateInfo(unittest.skip(\"Skipped!\"), 'TestUnaryUfuncs', 'test_reference_numerics_extremal',\n dtypes=[torch.float32, torch.float64], active_if=IS_WINDOWS),\n DecorateInfo(unittest.skip(\"Skipped!\"), 'TestUnaryUfuncs', 'test_reference_numerics_hard',\n dtypes=[torch.float32, torch.float64], active_if=IS_WINDOWS),\n ),\n safe_casts_outputs=True,\n # lgamma have multiple singularities at x <= 0\n reference_numerics_filter=NumericsFilter(condition=lambda x: x < 0.1, safe_val=1)),\n OpInfo(\n 'logdet',\n dtypes=floating_types(),\n supports_out=False,\n sample_inputs_func=sample_inputs_logdet,\n decorators=(skipCPUIfNoLapack, skipCUDAIfNoMagma, skipCUDAIfRocm)),\n # `log_softmax` supports different dtypes based on whether `dtype` argument,\n # is passed or not. Hence two OpInfo entries, one with dtype and other without.\n OpInfo(\n 'log_softmax',\n aliases=('special.log_softmax', 'nn.functional.log_softmax'),\n supports_out=False,\n dtypes=floating_types_and(torch.bfloat16),\n dtypesIfCUDA=floating_types_and(torch.float16, torch.bfloat16),\n sample_inputs_func=sample_inputs_softmax_variant,\n assert_autodiffed=True),\n OpInfo(\n 'log_softmax',\n variant_test_name='dtype',\n aliases=('special.log_softmax', 'nn.functional.log_softmax'),\n supports_out=False,\n dtypes=all_types_and_complex_and(torch.bool, torch.float16, torch.bfloat16),\n sample_inputs_func=partial(sample_inputs_softmax_variant, with_dtype=True),\n assert_autodiffed=True),\n UnaryUfuncInfo('logit',\n ref=scipy.special.logit if TEST_SCIPY else _NOTHING,\n domain=(0, 1),\n aliases=('special.logit', ),\n supports_forward_ad=True,\n supports_fwgrad_bwgrad=True,\n decorators=(precisionOverride({torch.bfloat16: 5e-1,\n torch.float16: 5e-1}),),\n dtypes=all_types_and(torch.bool, torch.bfloat16),\n dtypesIfCUDA=all_types_and(torch.bool, torch.half, torch.bfloat16),\n sample_inputs_func=sample_inputs_logit,\n safe_casts_outputs=True),\n OpInfo('where',\n # Currently only the `input` is tested in gradcheck.\n # If we pass `condition` first, none of the input which supports\n # autograd will be tested. Hence the following lambda.\n op=lambda self, condition, other: torch.where(condition, self, other),\n ref=lambda self, condition, other: np.where(condition, self, other),\n sample_inputs_func=sample_inputs_where,\n supports_out=False,\n supports_forward_ad=True,\n supports_fwgrad_bwgrad=True,\n skips=(\n # test does not work with passing lambda for op\n # AssertionError: False is not true :\n # Failure in testing nodes' autodifferentiation.\n DecorateInfo(unittest.skip(\"Skipped!\"), 'TestJit', 'test_variant_consistency_jit'),\n ),\n dtypes=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16)),\n OpInfo('nonzero',\n dtypes=all_types_and_complex_and(torch.bool, torch.bfloat16, torch.float16),\n sample_inputs_func=sample_inputs_nonzero,\n supports_autograd=False,\n skips=(\n # https://github.com/pytorch/pytorch/issues/67458\n DecorateInfo(unittest.expectedFailure, 'TestJit', 'test_variant_consistency_jit'),\n # nonzero is not raising a warning when the out is resized\n DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_out_warning'),\n # Can't find schemas for this operator for some reason\n DecorateInfo(unittest.expectedFailure, 'TestOperatorSignatures', 'test_get_torch_func_signature_exhaustive'),\n )),\n # `torch.norm` has multiple code paths depending on the value of `p`.\n # These paths have different dtype support. Also JIT supports,\n # most variants but not all of them. So we split the OpInfo entries,\n # for `norm` based on the code-paths and JIT support.\n OpInfo('norm',\n sample_inputs_func=sample_inputs_norm,\n dtypes=floating_and_complex_types_and(torch.float16, torch.bfloat16)),\n OpInfo('norm',\n variant_test_name='nuc',\n sample_inputs_func=sample_inputs_norm_nuc,\n decorators=[skipCUDAIfNoMagmaAndNoCusolver, skipCPUIfNoLapack],\n check_batched_gradgrad=False,\n check_batched_forward_grad=False,\n supports_forward_ad=True,\n supports_fwgrad_bwgrad=True,\n dtypes=floating_and_complex_types(),\n dtypesIfCUDA=floating_and_complex_types(),\n skips=(\n # Pre-existing condition; Needs to be fixed\n DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_composite_compliance'),\n # RuntimeError not raised :\n # Expected RuntimeError when calling with input.device=cpu and out.device=cuda\n DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_out'),\n # RuntimeError:\n # Arguments for call are not valid.\n DecorateInfo(unittest.expectedFailure, 'TestJit', 'test_variant_consistency_jit', dtypes=(torch.complex64, torch.float32,)), # noqa: B950\n )\n ),\n OpInfo('norm',\n variant_test_name='fro',\n sample_inputs_func=sample_inputs_norm_fro,\n dtypes=floating_and_complex_types_and(torch.bfloat16),\n dtypesIfCUDA=floating_and_complex_types_and(torch.float16, torch.bfloat16),\n skips=(\n # Pre-existing condition; Needs to be fixed\n DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_composite_compliance'),\n # Expected RuntimeError when calling with input.device=cpu and out.device=cuda\n DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_out'),\n # Arguments for call are not valid.\n DecorateInfo(unittest.expectedFailure, 'TestJit', 'test_variant_consistency_jit', dtypes=(torch.complex64, torch.float32,)), # noqa: B950\n )),\n OpInfo('norm',\n variant_test_name='inf',\n sample_inputs_func=sample_inputs_norm_inf,\n dtypes=floating_and_complex_types_and(torch.float16, torch.bfloat16),\n backward_dtypesIfCPU=floating_and_complex_types_and(torch.float16, torch.bfloat16),\n skips=(\n # https://github.com/pytorch/pytorch/issues/67517\n DecorateInfo(unittest.skip(\"Skipped!\"), 'TestCommon', 'test_noncontiguous_samples'),\n # following 2 tests failed intermittenly\n DecorateInfo(unittest.skip(\"Skipped!\"), 'TestGradients', 'test_fn_grad', device_type='cpu', dtypes=(torch.complex128,)), # noqa: B950\n DecorateInfo(unittest.skip(\"Skipped!\"), 'TestGradients', 'test_fn_gradgrad', device_type='cpu', dtypes=(torch.complex128,)), # noqa: B950\n )\n ),\n OpInfo('t',\n sample_inputs_func=sample_inputs_t,\n supports_out=False,\n supports_forward_ad=True,\n supports_fwgrad_bwgrad=True,\n # vmap does not support inplace views\n check_inplace_batched_forward_grad=False,\n autodiff_fusible_nodes=[], # aliases inputs, shouldn't be fused\n autodiff_nonfusible_nodes=[], # aliases inputs, shouldn't be fused\n dtypes=all_types_and_complex_and(torch.bool, torch.float16, torch.bfloat16),\n assert_autodiffed=True,),\n UnaryUfuncInfo('special.erfcx',\n ref=scipy.special.erfcx if TEST_SCIPY else _NOTHING,\n aten_name='special_erfcx',\n decorators=(toleranceOverride({torch.float32: tol(atol=0, rtol=4e-6), }),),\n dtypes=all_types_and(torch.bool),\n supports_forward_ad=True,\n supports_fwgrad_bwgrad=True,\n safe_casts_outputs=True),\n OpInfo(\n \"nn.functional.dropout\",\n op=lambda input, *args, **kwargs:\n wrapper_set_seed(torch.nn.functional.dropout, input, *args, **kwargs),\n ref=_NOTHING,\n dtypes=floating_types_and(torch.bfloat16),\n dtypesIfCUDA=floating_types_and(torch.float16, torch.bfloat16),\n skips=(\n # Probably because we have used lambda for the op here\n # AssertionError: JIT Test does not execute any logic\n DecorateInfo(unittest.expectedFailure, 'TestJit', 'test_variant_consistency_jit'),\n # inplace variant dispatches to dropout kernel, while on CUDA\n # the op dispatches to _fused_dropout (with a few more conditions)\n # hence, different values and this skip here\n DecorateInfo(unittest.skip(\"Skipped!\"), 'TestMathBits', 'test_neg_view', device_type='cuda'),\n # On CUDA, the op is dispatched (and a few more conditions) to\n # _fused_dropout, which doesn't support forward AD\n DecorateInfo(unittest.skip(\"Skipped!\"), 'TestGradients', 'test_forward_mode_AD', device_type='cuda'),\n # NotImplementedError: Trying to use forward AD with native_dropout that does not support it\n DecorateInfo(unittest.expectedFailure, 'TestGradients', 'test_fn_fwgrad_bwgrad',\n device_type='cuda', dtypes=[torch.float64]),),\n gradcheck_wrapper=wrapper_set_seed,\n supports_forward_ad=True,\n supports_fwgrad_bwgrad=True,\n # https://github.com/pytorch/pytorch/issues/66357\n check_batched_forward_grad=False,\n supports_out=False,\n sample_inputs_func=sample_inputs_dropout,\n inplace_variant=lambda input, *args, **kwargs:\n wrapper_set_seed(torch.nn.functional.dropout, input, *args, **kwargs, inplace=True)),\n OpInfo(\n \"nn.functional.dropout2d\",\n op=lambda input, *args, **kwargs:\n wrapper_set_seed(torch.nn.functional.dropout2d, input, *args, **kwargs),\n ref=_NOTHING,\n dtypes=floating_types_and(torch.bfloat16),\n dtypesIfCUDA=floating_types_and(torch.float16, torch.bfloat16),\n skips=(\n # torch.autograd.gradcheck.GradcheckError: While computing batched gradients, got:\n # vmap: We do not yet support calling random operations inside of vmap.\n # Please perform random operations outside of vmap as a workaround\n DecorateInfo(unittest.expectedFailure, 'TestGradients', \"test_forward_mode_AD\"),\n DecorateInfo(unittest.expectedFailure, 'TestGradients', \"test_inplace_forward_mode_AD\"),\n # Probably because we have used lambda for the op here\n # AssertionError: JIT Test does not execute any logic\n DecorateInfo(unittest.skip(\"Skipped!\"), 'TestJit', 'test_variant_consistency_jit'),),\n gradcheck_wrapper=wrapper_set_seed,\n supports_forward_ad=True,\n supports_fwgrad_bwgrad=True,\n supports_out=False,\n sample_inputs_func=partial(sample_inputs_dropout, min_input_dim=2),\n inplace_variant=lambda input, *args, **kwargs:\n wrapper_set_seed(torch.nn.functional.dropout2d, input, *args, **kwargs, inplace=True)),\n # In training mode, feature_alpha_dropout currently doesn't support inputs of complex dtype\n # unlike when `train=False`, it supports complex inputs, hence 2 OpInfos to cover all cases\n OpInfo(\n \"nn.functional.feature_alpha_dropout\",\n op=lambda input, *args, **kwargs:\n wrapper_set_seed(torch.nn.functional.feature_alpha_dropout, input, *args, **kwargs),\n variant_test_name=\"with_train\",\n ref=_NOTHING,\n dtypes=floating_types_and(torch.bfloat16),\n dtypesIfCUDA=floating_types_and(torch.float16, torch.bfloat16),\n skips=(\n # torch.autograd.gradcheck.GradcheckError: While computing batched gradients, got:\n # vmap: We do not yet support calling random operations inside of vmap.\n # Please perform random operations outside of vmap as a workaround\n DecorateInfo(unittest.expectedFailure, 'TestGradients', \"test_forward_mode_AD\"),\n DecorateInfo(unittest.expectedFailure, 'TestGradients', \"test_inplace_forward_mode_AD\"),\n # Probably because we have used lambda for the op here\n # AssertionError: JIT Test does not execute any logic\n DecorateInfo(unittest.expectedFailure, 'TestJit', 'test_variant_consistency_jit'),),\n gradcheck_wrapper=wrapper_set_seed,\n supports_forward_ad=True,\n supports_fwgrad_bwgrad=True,\n supports_out=False,\n sample_inputs_func=partial(sample_inputs_dropout, train=True, min_input_dim=2),\n inplace_variant=lambda input, *args, **kwargs:\n wrapper_set_seed(torch.nn.functional.feature_alpha_dropout, input, *args, **kwargs, inplace=True)),\n OpInfo(\n \"nn.functional.feature_alpha_dropout\",\n op=lambda input, *args, **kwargs:\n wrapper_set_seed(torch.nn.functional.feature_alpha_dropout, input, *args, **kwargs),\n variant_test_name=\"without_train\",\n ref=_NOTHING,\n dtypes=all_types_and_complex_and(torch.bool, torch.float16, torch.bfloat16),\n skips=(\n # Probably because we have used lambda for the op here\n # AssertionError: JIT Test does not execute any logic\n DecorateInfo(unittest.expectedFailure, 'TestJit', 'test_variant_consistency_jit'),),\n gradcheck_wrapper=wrapper_set_seed,\n supports_forward_ad=True,\n supports_fwgrad_bwgrad=True,\n supports_out=False,\n sample_inputs_func=partial(sample_inputs_dropout, train=False),\n inplace_variant=lambda input, *args, **kwargs:\n wrapper_set_seed(torch.nn.functional.feature_alpha_dropout, input, *args, **kwargs, inplace=True)),\n OpInfo(\n \"nn.functional.one_hot\",\n ref=reference_one_hot,\n supports_out=False,\n dtypes=_dispatch_dtypes((torch.int64,)),\n sample_inputs_func=sample_inputs_one_hot,\n ),\n OpInfo(\n \"nn.functional.embedding\",\n # We use lambda to reshuffle the positional arguments.\n # This is because currently only the `input` field of SampleInput\n # is tested in gradient tests.\n op=lambda weight, idx, **kwargs: torch.nn.functional.embedding(idx, weight, **kwargs),\n dtypes=floating_types_and(torch.bfloat16, torch.float16),\n sample_inputs_func=sample_inputs_embedding,\n skips=(\n # Does not work with lambda\n # Raises : JIT Test does not execute any logic\n DecorateInfo(unittest.expectedFailure, 'TestJit', 'test_variant_consistency_jit'),\n # Reference: https://github.com/pytorch/pytorch/issues/67084\n DecorateInfo(unittest.skip(\"Skipped!\"), 'TestMathBits', 'test_neg_view', device_type='cuda'),\n ),\n supports_out=False,\n ),\n OpInfo(\n \"nn.functional.embedding_bag\",\n # We use lambda to reshuffle the positional arguments.\n # This is because currently only the `input` field of SampleInput\n # is tested in gradient tests.\n op=lambda weight, idx, **kwargs: torch.nn.functional.embedding_bag(idx, weight, **kwargs),\n dtypes=floating_types(),\n dtypesIfCUDA=floating_types_and(torch.bfloat16, torch.float16),\n # backward is not supported for mode `max` and dtype `bfloat16`\n backward_dtypesIfCUDA=floating_types_and(torch.float16),\n sample_inputs_func=sample_inputs_embedding_bag,\n skips=(\n # Does not work with lambda\n # Raises : JIT Test does not execute any logic\n DecorateInfo(unittest.expectedFailure, 'TestJit', 'test_variant_consistency_jit'),\n ),\n gradcheck_nondet_tol=GRADCHECK_NONDET_TOL,\n supports_out=False,\n supports_gradgrad=False,\n ),\n OpInfo(\n \"nn.functional.softplus\",\n ref=reference_softplus,\n sample_inputs_func=sample_inputs_softplus,\n supports_forward_ad=True,\n dtypes=floating_types(),\n dtypesIfCUDA=floating_types_and(torch.bfloat16, torch.float16),\n supports_out=False,\n ),\n OpInfo(\n \"linalg.tensorinv\",\n ref=np.linalg.tensorinv,\n dtypes=floating_and_complex_types(),\n sample_inputs_func=sample_inputs_tensorinv,\n supports_forward_ad=True,\n supports_fwgrad_bwgrad=True,\n decorators=[skipCPUIfNoLapack, skipCUDAIfNoMagmaAndNoCusolver],\n ),\n OpInfo(\n \"linalg.tensorsolve\",\n ref=lambda a, b, dims=None: np.linalg.tensorsolve(a, b, axes=dims),\n dtypes=floating_and_complex_types(),\n sample_inputs_func=sample_inputs_tensorsolve,\n supports_forward_ad=True,\n supports_fwgrad_bwgrad=True,\n decorators=[skipCPUIfNoLapack, skipCUDAIfNoMagmaAndNoCusolver],\n ),\n OpInfo(\n \"nn.functional.mse_loss\",\n ref=reference_mse_loss,\n sample_inputs_func=sample_inputs_mse_loss,\n supports_out=False,\n supports_forward_ad=True,\n dtypes=floating_types_and(torch.float16),\n backward_dtypesIfCPU=floating_types(),\n dtypesIfCUDA=floating_types_and(torch.bfloat16, torch.float16),\n skips=(\n # RuntimeError: input->type()->kind() == TypeKind::OptionalType\n # INTERNAL ASSERT FAILED at \"../torch/csrc/jit/passes/utils/check_alias_annotation.cpp\":252,\n # please report a bug to PyTorch.\n DecorateInfo(unittest.skip(\"Skipped!\"), \"TestJit\", \"test_variant_consistency_jit\", dtypes=(torch.float32,),),\n ),\n ),\n OpInfo(\n \"nn.functional.grid_sample\",\n ref=_NOTHING,\n dtypes=floating_types(),\n dtypesIfCUDA=floating_types_and(torch.float16),\n supports_out=False,\n sample_inputs_func=sample_inputs_grid_sample,\n supports_gradgrad=False,\n gradcheck_nondet_tol=1e-15),\n OpInfo(\n \"argwhere\",\n ref=np.argwhere,\n dtypes=all_types_and_complex_and(torch.bool, torch.float16, torch.bfloat16),\n supports_out=False,\n supports_autograd=False,\n sample_inputs_func=sample_inputs_argwhere,\n ),\n ReductionOpInfo(\n 'all',\n identity=True,\n supports_multiple_dims=False,\n supports_out=False,\n supports_autograd=False,\n result_dtype=torch.bool,\n dtypes=all_types_and_complex_and(torch.bool, torch.float16, torch.bfloat16),\n ref=reference_reduction_numpy(np.all),\n skips=(\n # FIXME: does not support passing keepdim without dim\n DecorateInfo(unittest.expectedFailure, 'TestReductions', 'test_dim_default_keepdim'),\n # FIXME: does not support dim=None\n DecorateInfo(unittest.expectedFailure, 'TestReductions', 'test_dim_none'),\n DecorateInfo(unittest.expectedFailure, 'TestReductions', 'test_dim_none_keepdim'),\n # FIXME: uint8 input returns uint8 instead of bool\n DecorateInfo(unittest.expectedFailure, 'TestReductions', 'test_result_dtype', dtypes=[torch.uint8]),\n ),\n ),\n ReductionOpInfo(\n 'any',\n identity=False,\n supports_multiple_dims=False,\n supports_out=False,\n supports_autograd=False,\n result_dtype=torch.bool,\n dtypes=all_types_and_complex_and(torch.bool, torch.float16, torch.bfloat16),\n ref=reference_reduction_numpy(np.any),\n skips=(\n # FIXME: does not support passing keepdim without dim\n DecorateInfo(unittest.expectedFailure, 'TestReductions', 'test_dim_default_keepdim'),\n # FIXME: does not support dim=None\n DecorateInfo(unittest.expectedFailure, 'TestReductions', 'test_dim_none'),\n DecorateInfo(unittest.expectedFailure, 'TestReductions', 'test_dim_none_keepdim'),\n # FIXME: uint8 input returns uint8 instead of bool\n DecorateInfo(unittest.expectedFailure, 'TestReductions', 'test_result_dtype', dtypes=[torch.uint8]),\n ),\n ),\n ReductionOpInfo(\n 'amax',\n nan_policy='propagate',\n dtypes=all_types_and(torch.float16, torch.bfloat16, torch.bool),\n ref=reference_reduction_numpy(np.amax),\n skips=(\n # FIXME: sum reduces all dimensions when dim=[]\n DecorateInfo(unittest.expectedFailure, 'TestReductions', 'test_dim_empty'),\n DecorateInfo(unittest.expectedFailure, 'TestReductions', 'test_dim_empty_keepdim'),\n ),\n ),\n ReductionOpInfo(\n 'amin',\n nan_policy='propagate',\n dtypes=all_types_and(torch.float16, torch.bfloat16, torch.bool),\n ref=reference_reduction_numpy(np.amin),\n skips=(\n # FIXME: sum reduces all dimensions when dim=[]\n DecorateInfo(unittest.expectedFailure, 'TestReductions', 'test_dim_empty'),\n DecorateInfo(unittest.expectedFailure, 'TestReductions', 'test_dim_empty_keepdim'),\n ),\n ),\n ReductionOpInfo(\n 'argmax',\n supports_multiple_dims=False,\n supports_autograd=False,\n result_dtype=torch.int64,\n dtypes=all_types_and(torch.float16, torch.bfloat16),\n ref=reference_reduction_numpy(np.argmax, supports_keepdims=False),\n skips=(\n # FIXME: keepdim parameter is ignored when dim=None\n DecorateInfo(unittest.skip(\"Skipped!\"), 'TestReductions', 'test_dim_default_keepdim'),\n DecorateInfo(unittest.skip(\"Skipped!\"), 'TestReductions', 'test_dim_none_keepdim'),\n ),\n ),\n ReductionOpInfo(\n 'argmin',\n supports_multiple_dims=False,\n supports_autograd=False,\n result_dtype=torch.int64,\n dtypes=all_types_and(torch.float16, torch.bfloat16),\n ref=reference_reduction_numpy(np.argmin, supports_keepdims=False),\n skips=(\n # FIXME: keepdim parameter is ignored when dim=None\n DecorateInfo(unittest.skip(\"Skipped!\"), 'TestReductions', 'test_dim_default_keepdim'),\n DecorateInfo(unittest.skip(\"Skipped!\"), 'TestReductions', 'test_dim_none_keepdim'),\n ),\n ),\n ReductionOpInfo(\n 'count_nonzero',\n identity=0,\n supports_out=False,\n supports_autograd=False,\n result_dtype=torch.int64,\n dtypes=all_types_and_complex_and(torch.bool, torch.float16, torch.bfloat16),\n sample_inputs_func=sample_inputs_reduction_count_nonzero,\n ref=reference_reduction_numpy(np.count_nonzero),\n skips=(\n # FIXME: count_nonzero does not accept keepdim kwarg\n DecorateInfo(unittest.skip(\"Skipped!\"), 'TestReductions', 'test_dim_default_keepdim'),\n DecorateInfo(unittest.skip(\"Skipped!\"), 'TestReductions', 'test_dim_none_keepdim'),\n DecorateInfo(unittest.skip(\"Skipped!\"), 'TestReductions', 'test_dim_single_keepdim'),\n DecorateInfo(unittest.skip(\"Skipped!\"), 'TestReductions', 'test_dim_empty_keepdim'),\n DecorateInfo(unittest.skip(\"Skipped!\"), 'TestReductions', 'test_dim_multi_keepdim'),\n DecorateInfo(unittest.skip(\"Skipped!\"), 'TestReductions', 'test_dim_multi_unsorted_keepdim'),\n DecorateInfo(unittest.skip(\"Skipped!\"), 'TestReductions', 'test_dim_offbounds_keepdim'),\n # FIXME: dim=[] reduces all dimensions\n DecorateInfo(unittest.skip(\"Skipped!\"), 'TestReductions', 'test_dim_empty'),\n ),\n ),\n ReductionOpInfo(\n 'mean',\n nan_policy='propagate',\n supports_out=False,\n supports_forward_ad=True,\n supports_fwgrad_bwgrad=True,\n assert_autodiffed=True,\n assert_jit_shape_analysis=True,\n promotes_int_to_float=True,\n dtypes=floating_and_complex_types_and(torch.float16, torch.bfloat16),\n ref=reference_reduction_numpy(np.mean),\n skips=(\n # FIXME: mean does not support passing keepdim without passing dim\n DecorateInfo(unittest.skip(\"Skipped!\"), 'TestReductions', 'test_dim_default_keepdim'),\n # FIXME: mean reduces all dimensions when dim=[]\n DecorateInfo(unittest.skip(\"Skipped!\"), 'TestReductions', 'test_dim_empty'),\n DecorateInfo(unittest.skip(\"Skipped!\"), 'TestReductions', 'test_dim_empty_keepdim'),\n # FIXME: mean does not support passing None to dim\n DecorateInfo(unittest.skip(\"Skipped!\"), 'TestReductions', 'test_dim_none'),\n DecorateInfo(unittest.skip(\"Skipped!\"), 'TestReductions', 'test_dim_none_keepdim'),\n # FIXME: improve precision\n DecorateInfo(unittest.skip(\"Skipped!\"), 'TestReductions', 'test_ref_small_input',\n dtypes=[torch.float16]),\n DecorateInfo(unittest.skip(\"Skipped!\"), 'TestReductions', 'test_ref_extremal_values',\n device_type='cuda', dtypes=[torch.complex64]),\n ),\n ),\n ReductionOpInfo(\n 'nanmean',\n nan_policy='omit',\n assert_autodiffed=True,\n promotes_int_to_float=True,\n dtypes=floating_types_and(torch.float16, torch.bfloat16),\n sample_inputs_func=sample_inputs_nan_reduction(supports_multiple_dims=True),\n ref=reference_reduction_numpy(np.nanmean),\n skips=(\n # AssertionError: False is not true :\n # Failure in testing nodes' autodifferentiation.\n DecorateInfo(unittest.skip(\"Skipped!\"), 'TestJit', 'test_variant_consistency_jit'),\n # FIXME: prod reduces all dimensions when dim=[]\n DecorateInfo(unittest.skip(\"Skipped!\"), 'TestReductions', 'test_dim_empty'),\n DecorateInfo(unittest.skip(\"Skipped!\"), 'TestReductions', 'test_dim_empty_keepdim'),\n # FIXME: improve precision\n DecorateInfo(unittest.skip(\"Skipped!\"), 'TestReductions', 'test_ref_small_input',\n dtypes=[torch.float16]),\n DecorateInfo(unittest.skip(\"Skipped!\"), 'TestReductions', 'test_ref_duplicate_values',\n device_type='cuda', dtypes=[torch.float16]),\n DecorateInfo(unittest.skip(\"Skipped!\"), 'TestReductions', 'test_ref_extremal_values',\n device_type='cuda', dtypes=[torch.complex64]),\n ),\n ),\n ReductionOpInfo(\n 'std',\n nan_policy='propagate',\n supports_out=False,\n supports_forward_ad=True,\n supports_fwgrad_bwgrad=True,\n assert_autodiffed=True,\n promotes_int_to_float=True,\n dtypes=floating_and_complex_types_and(torch.half, torch.bfloat16),\n dtypesIfCUDA=floating_and_complex_types_and(torch.half, torch.bfloat16),\n sample_inputs_func=sample_inputs_std_var,\n ref=reference_std_var(np.std),\n generate_args_kwargs=generate_std_var_kwargs,\n skips=(\n # FIXME: cannot specify keepdim without dim\n DecorateInfo(unittest.skip(\"Skipped!\"), 'TestReductions', 'test_dim_default_keepdim'),\n # FIXME: dim=None not supported\n DecorateInfo(unittest.skip(\"Skipped!\"), 'TestReductions', 'test_dim_none'),\n DecorateInfo(unittest.skip(\"Skipped!\"), 'TestReductions', 'test_dim_none_keepdim'),\n # FIXME: dim=[] reduces all dimensions\n DecorateInfo(unittest.skip(\"Skipped!\"), 'TestReductions', 'test_dim_empty'),\n DecorateInfo(unittest.skip(\"Skipped!\"), 'TestReductions', 'test_dim_empty_keepdim'),\n # TODO(@heitorschueroff) std return float for complex types\n # need to find a better way to model result dtype\n DecorateInfo(unittest.skip(\"Skipped!\"), 'TestReductions', 'test_result_dtype'),\n # FIXME: improve precision\n DecorateInfo(unittest.skip(\"Skipped!\"), 'TestReductions', 'test_ref_small_input'),\n DecorateInfo(unittest.skip(\"Skipped!\"), 'TestReductions', 'test_ref_duplicate_values'),\n # NumPy is giving NaN for this\n DecorateInfo(unittest.skip(\"Skipped!\"), 'TestReductions', 'test_ref_large_input'),\n ),\n ),\n ReductionOpInfo(\n 'var',\n nan_policy='propagate',\n supports_out=False,\n assert_autodiffed=True,\n promotes_int_to_float=True,\n supports_forward_ad=True,\n supports_fwgrad_bwgrad=True,\n dtypes=floating_and_complex_types_and(torch.half, torch.bfloat16),\n dtypesIfCUDA=floating_and_complex_types_and(torch.half, torch.bfloat16),\n sample_inputs_func=sample_inputs_std_var,\n ref=reference_std_var(np.var),\n generate_args_kwargs=generate_std_var_kwargs,\n skips=(\n # FIXME: cannot specify keepdim without dim\n DecorateInfo(unittest.skip(\"Skipped!\"), 'TestReductions', 'test_dim_default_keepdim'),\n # FIXME: dim=None not supported\n DecorateInfo(unittest.skip(\"Skipped!\"), 'TestReductions', 'test_dim_none'),\n DecorateInfo(unittest.skip(\"Skipped!\"), 'TestReductions', 'test_dim_none_keepdim'),\n # FIXME: dim=[] reduces all dimensions\n DecorateInfo(unittest.skip(\"Skipped!\"), 'TestReductions', 'test_dim_empty'),\n DecorateInfo(unittest.skip(\"Skipped!\"), 'TestReductions', 'test_dim_empty_keepdim'),\n # TODO(@heitorschueroff) std return float for complex types\n # need to find a better way to model result dtype\n DecorateInfo(unittest.skip(\"Skipped!\"), 'TestReductions', 'test_result_dtype'),\n # FIXME: improve precision\n DecorateInfo(unittest.skip(\"Skipped!\"), 'TestReductions', 'test_ref_small_input'),\n DecorateInfo(unittest.skip(\"Skipped!\"), 'TestReductions', 'test_ref_duplicate_values'),\n # NumPy is giving NaN for this\n DecorateInfo(unittest.skip(\"Skipped!\"), 'TestReductions', 'test_ref_large_input'),\n ),\n ),\n ReductionOpInfo(\n 'prod',\n identity=1,\n nan_policy='propagate',\n supports_multiple_dims=False,\n supports_out=False,\n supports_forward_ad=True,\n supports_fwgrad_bwgrad=True,\n promotes_int_to_int64=True,\n gradcheck_nondet_tol=GRADCHECK_NONDET_TOL,\n dtypes=all_types_and_complex_and(torch.bool),\n dtypesIfCUDA=all_types_and_complex_and(torch.bool, torch.float16, torch.bfloat16),\n sample_inputs_func=sample_inputs_prod,\n ref=reference_reduction_numpy(np.prod),\n skips=(\n # FIXME: prod does not support passing keepdim without passing dim\n DecorateInfo(unittest.skip(\"Skipped!\"), 'TestReductions', 'test_dim_default_keepdim'),\n # FIXME: prod reduces all dimensions when dim=[]\n DecorateInfo(unittest.skip(\"Skipped!\"), 'TestReductions', 'test_dim_empty'),\n DecorateInfo(unittest.skip(\"Skipped!\"), 'TestReductions', 'test_dim_empty_keepdim'),\n # FIXME: prod does not support passing None to dim\n DecorateInfo(unittest.skip(\"Skipped!\"), 'TestReductions', 'test_dim_none'),\n DecorateInfo(unittest.skip(\"Skipped!\"), 'TestReductions', 'test_dim_none_keepdim'),\n DecorateInfo(unittest.skip(\"Skipped!\"), 'TestReductions', 'test_ref_small_input',\n dtypes=[torch.float16, torch.complex64]),\n DecorateInfo(unittest.skip(\"Skipped!\"), 'TestReductions', 'test_ref_duplicate_values',\n dtypes=[torch.uint8, torch.float16, torch.complex64]),\n ),\n ),\n ReductionOpInfo(\n 'sum',\n identity=0,\n nan_policy='propagate',\n supports_out=False,\n supports_forward_ad=True,\n supports_fwgrad_bwgrad=True,\n promotes_int_to_int64=True,\n dtypes=all_types_and_complex_and(torch.bool, torch.float16, torch.bfloat16),\n ref=reference_reduction_numpy(np.sum),\n skips=(\n # FIXME: sum does not support passing keepdim without passing dim\n DecorateInfo(unittest.skip(\"Skipped!\"), 'TestReductions', 'test_dim_default_keepdim'),\n # FIXME: sum reduces all dimensions when dim=[]\n DecorateInfo(unittest.skip(\"Skipped!\"), 'TestReductions', 'test_dim_empty'),\n DecorateInfo(unittest.skip(\"Skipped!\"), 'TestReductions', 'test_dim_empty_keepdim'),\n # FIXME: sum does not support passing None to dim\n DecorateInfo(unittest.skip(\"Skipped!\"), 'TestReductions', 'test_dim_none'),\n DecorateInfo(unittest.skip(\"Skipped!\"), 'TestReductions', 'test_dim_none_keepdim'),\n # FIXME: improve precision\n DecorateInfo(unittest.skip(\"Skipped!\"), 'TestReductions', 'test_ref_small_input',\n dtypes=[torch.float16]),\n DecorateInfo(unittest.skip(\"Skipped!\"), 'TestReductions', 'test_ref_duplicate_values',\n dtypes=[torch.float16]),\n ),\n ),\n ReductionOpInfo(\n 'nansum',\n identity=0,\n nan_policy='omit',\n supports_out=False,\n promotes_int_to_int64=True,\n dtypes=all_types_and(torch.bool, torch.float16, torch.bfloat16),\n ref=reference_reduction_numpy(np.nansum),\n skips=(\n # FIXME: nansum does not support passing keepdim without passing dim\n DecorateInfo(unittest.skip(\"Skipped!\"), 'TestReductions', 'test_dim_default_keepdim'),\n # FIXME: nansum reduces all dimensions when dim=[]\n DecorateInfo(unittest.skip(\"Skipped!\"), 'TestReductions', 'test_dim_empty'),\n DecorateInfo(unittest.skip(\"Skipped!\"), 'TestReductions', 'test_dim_empty_keepdim'),\n # FIXME: nansum does not support passing None to dim\n DecorateInfo(unittest.skip(\"Skipped!\"), 'TestReductions', 'test_dim_none'),\n DecorateInfo(unittest.skip(\"Skipped!\"), 'TestReductions', 'test_dim_none_keepdim'),\n # FIXME: improve precision\n DecorateInfo(unittest.skip(\"Skipped!\"), 'TestReductions', 'test_ref_small_input',\n dtypes=[torch.float16]),\n DecorateInfo(unittest.skip(\"Skipped!\"), 'TestReductions', 'test_ref_duplicate_values',\n dtypes=[torch.float16]),\n ),\n ),\n ReductionOpInfo(\n '_masked.sum',\n ref=reference_reduction_numpy(np.sum),\n method_variant=None,\n identity=0,\n nan_policy='propagate',\n supports_out=False,\n supports_forward_ad=True,\n supports_fwgrad_bwgrad=True,\n promotes_int_to_int64=False,\n dtypes=all_types_and_complex_and(torch.bool, torch.float16, torch.bfloat16),\n skips=(\n # FIXME: sum reduces all dimensions when dim=[]\n DecorateInfo(unittest.skip(\"Skipped!\"), 'TestReductions', 'test_dim_empty'),\n DecorateInfo(unittest.skip(\"Skipped!\"), 'TestReductions', 'test_dim_empty_keepdim'),\n # RuntimeError: undefined value tensor\n DecorateInfo(unittest.skip(\"Skipped!\"), 'TestJit', 'test_variant_consistency_jit'),\n ),\n decorators=[\n DecorateInfo(toleranceOverride({torch.bfloat16: tol(atol=1e-03, rtol=1e-03)}),\n 'TestReductions', 'test_reference_masked'),\n DecorateInfo(toleranceOverride({torch.float16: tol(atol=1e-03, rtol=1e-03)}),\n 'TestReductions', 'test_reference_masked'),\n DecorateInfo(toleranceOverride({torch.float16: tol(atol=1e-02, rtol=1e-03)}),\n 'TestReductions', 'test_ref_small_input'),\n ],\n sample_inputs_func=sample_inputs_masked_reduction\n ),\n ReductionOpInfo(\n '_masked.prod',\n ref=reference_reduction_numpy(np.prod),\n method_variant=None,\n identity=1,\n nan_policy='propagate',\n supports_out=False,\n supports_forward_ad=True,\n supports_fwgrad_bwgrad=True,\n promotes_int_to_int64=True,\n # FIXME: \"prod_cpu\" not implemented for 'BFloat16'\n # FIXME: \"prod_cpu\" not implemented for 'Half'\n dtypes=all_types_and_complex_and(torch.bool),\n dtypesIfCUDA=all_types_and_complex_and(torch.bool, torch.float16, torch.bfloat16),\n skips=(\n # NotSupportedError: Compiled functions can't ... use keyword-only arguments with defaults\n DecorateInfo(unittest.skip(\"Skipped!\"), 'TestJit', 'test_variant_consistency_jit'),\n ),\n decorators=[\n DecorateInfo(toleranceOverride({torch.float16: tol(atol=1e-03, rtol=1e-02)}),\n 'TestReductions', 'test_reference_masked'),\n DecorateInfo(toleranceOverride({torch.float16: tol(atol=1e-03, rtol=1e-03)}),\n 'TestReductions', 'test_ref_duplicate_values'),\n ],\n sample_inputs_func=sample_inputs_masked_reduction\n ),\n ReductionOpInfo(\n '_masked.amax',\n nan_policy='propagate',\n supports_out=False,\n dtypes=all_types_and(torch.float16, torch.bfloat16),\n ref=reference_reduction_numpy(np.amax),\n skips=(\n # FIXME: amax reduces all dimensions when dim=[]\n DecorateInfo(unittest.skip(\"Skipped!\"), 'TestReductions', 'test_dim_empty'),\n DecorateInfo(unittest.skip(\"Skipped!\"), 'TestReductions', 'test_dim_empty_keepdim'),\n # RuntimeError: Unknown builtin op: aten::iinfo\n DecorateInfo(unittest.skip(\"Skipped!\"), 'TestJit', 'test_variant_consistency_jit'),\n ),\n sample_inputs_func=sample_inputs_masked_reduction,\n gradcheck_wrapper=gradcheck_wrapper_masked_operation\n ),\n ReductionOpInfo(\n '_masked.amin',\n nan_policy='propagate',\n supports_out=False,\n dtypes=all_types_and(torch.float16, torch.bfloat16),\n ref=reference_reduction_numpy(np.amin),\n skips=(\n # FIXME: amax reduces all dimensions when dim=[]\n DecorateInfo(unittest.skip(\"Skipped!\"), 'TestReductions', 'test_dim_empty'),\n DecorateInfo(unittest.skip(\"Skipped!\"), 'TestReductions', 'test_dim_empty_keepdim'),\n # RuntimeError: Unknown builtin op: aten::iinfo\n DecorateInfo(unittest.skip(\"Skipped!\"), 'TestJit', 'test_variant_consistency_jit'),\n ),\n sample_inputs_func=sample_inputs_masked_reduction,\n gradcheck_wrapper=gradcheck_wrapper_masked_operation\n ),\n ReductionOpInfo(\n '_masked.mean',\n ref=reference_reduction_numpy(np.mean) if np.lib.NumpyVersion(np.__version__) >= '1.20.2' else None,\n method_variant=None,\n nan_policy='propagate',\n supports_out=False,\n supports_forward_ad=True,\n supports_fwgrad_bwgrad=True,\n promotes_int_to_float=True,\n dtypes=all_types_and_complex_and(torch.float16, torch.bfloat16, torch.bool),\n skips=(\n # FIXME: sum reduces all dimensions when dim=[]\n DecorateInfo(unittest.skip(\"Skipped!\"), 'TestReductions', 'test_dim_empty'),\n DecorateInfo(unittest.skip(\"Skipped!\"), 'TestReductions', 'test_dim_empty_keepdim'),\n # RuntimeError: undefined value tensor\n DecorateInfo(unittest.skip(\"Skipped!\"), 'TestJit', 'test_variant_consistency_jit'),\n ),\n decorators=[\n DecorateInfo(toleranceOverride({torch.float16: tol(atol=1e-03, rtol=1e-03)}),\n 'TestReductions', 'test_reference_masked'),\n ],\n sample_inputs_func=sample_inputs_masked_reduction,\n gradcheck_wrapper=gradcheck_wrapper_masked_operation\n ),\n ReductionOpInfo(\n '_masked.norm',\n identity=0,\n method_variant=None,\n nan_policy='propagate',\n supports_out=False,\n promotes_int_to_float=True,\n dtypes=floating_types_and(torch.float16, torch.bfloat16),\n skips=(\n # FIXME: sum reduces all dimensions when dim=[]\n DecorateInfo(unittest.skip(\"Skipped!\"), 'TestReductions', 'test_dim_empty'),\n DecorateInfo(unittest.skip(\"Skipped!\"), 'TestReductions', 'test_dim_empty_keepdim'),\n # torch.jit.frontend.NotSupportedError: Compiled functions\n # can't take variable number of arguments or use\n # keyword-only arguments with defaults\n DecorateInfo(unittest.skip(\"Skipped!\"), 'TestJit', 'test_variant_consistency_jit'),\n ),\n sample_inputs_func=sample_inputs_masked_norm,\n gradcheck_wrapper=gradcheck_wrapper_masked_operation\n ),\n ReductionOpInfo(\n '_masked.var',\n ref=reference_reduction_numpy(np.var) if np.lib.NumpyVersion(np.__version__) >= '1.20.2' else None,\n method_variant=None,\n nan_policy='propagate',\n supports_out=False,\n supports_forward_ad=True,\n supports_fwgrad_bwgrad=True,\n promotes_int_to_float=True,\n dtypes=all_types_and_complex_and(torch.float16, torch.bfloat16),\n skips=(\n # FIXME: sum reduces all dimensions when dim=[]\n DecorateInfo(unittest.skip(\"Skipped!\"), 'TestReductions', 'test_dim_empty'),\n DecorateInfo(unittest.skip(\"Skipped!\"), 'TestReductions', 'test_dim_empty_keepdim'),\n # RuntimeError: undefined value tensor\n DecorateInfo(unittest.skip(\"Skipped!\"), 'TestJit', 'test_variant_consistency_jit'),\n ),\n decorators=[\n DecorateInfo(toleranceOverride({torch.float16: tol(atol=1e-02, rtol=1e-02)}),\n 'TestReductions', 'test_reference_masked'),\n DecorateInfo(toleranceOverride({torch.float16: tol(atol=1e-02, rtol=1e-02)}),\n 'TestReductions', 'test_ref_small_input'),\n DecorateInfo(toleranceOverride({torch.float16: tol(atol=1e-02, rtol=1e-02)}),\n 'TestMasked', 'test_reference_masked'),\n ],\n sample_inputs_func=sample_inputs_masked_var,\n gradcheck_wrapper=gradcheck_wrapper_masked_operation\n ),\n OpInfo(\n '_masked.softmax',\n method_variant=None,\n dtypes=floating_types_and(torch.bfloat16),\n dtypesIfCUDA=floating_types_and(torch.half, torch.bfloat16),\n sample_inputs_func=sample_inputs_masked_softmax,\n skips=(\n # torch.jit.frontend.NotSupportedError: Compiled\n # functions can't take variable number of arguments or\n # use keyword-only arguments with defaults\n DecorateInfo(unittest.skip(\"Skipped!\"), 'TestJit', 'test_variant_consistency_jit'),\n ),\n gradcheck_wrapper=gradcheck_wrapper_masked_operation,\n supports_out=False),\n OpInfo(\n '_masked.log_softmax',\n method_variant=None,\n dtypes=floating_types_and(torch.bfloat16),\n dtypesIfCUDA=floating_types_and(torch.half, torch.bfloat16),\n sample_inputs_func=sample_inputs_masked_softmax,\n skips=(\n # torch.jit.frontend.NotSupportedError: Compiled\n # functions can't take variable number of arguments or\n # use keyword-only arguments with defaults\n DecorateInfo(unittest.skip(\"Skipped!\"), 'TestJit', 'test_variant_consistency_jit'),\n ),\n decorators=[\n DecorateInfo(toleranceOverride({torch.bfloat16: tol(atol=1e-02, rtol=1e-02)}),\n 'TestMasked', 'test_reference_masked'),\n ],\n gradcheck_wrapper=gradcheck_wrapper_masked_operation,\n supports_out=False),\n OpInfo(\n '_masked.softmin',\n method_variant=None,\n dtypes=floating_types_and(torch.bfloat16),\n dtypesIfCUDA=floating_types_and(torch.half, torch.bfloat16),\n sample_inputs_func=sample_inputs_masked_softmax,\n skips=(\n # torch.jit.frontend.NotSupportedError: Compiled\n # functions can't take variable number of arguments or\n # use keyword-only arguments with defaults\n DecorateInfo(unittest.skip(\"Skipped!\"), 'TestJit', 'test_variant_consistency_jit'),\n ),\n gradcheck_wrapper=gradcheck_wrapper_masked_operation,\n supports_out=False),\n OpInfo(\n '_masked.normalize',\n method_variant=None,\n dtypes=floating_types_and(torch.half, torch.bfloat16),\n sample_inputs_func=sample_inputs_masked_normalize,\n skips=(\n # torch.jit.frontend.NotSupportedError: Compiled\n # functions can't take variable number of arguments or\n # use keyword-only arguments with defaults\n DecorateInfo(unittest.skip(\"Skipped!\"), 'TestJit', 'test_variant_consistency_jit'),\n # RuntimeError: \"clamp_min_cpu\" not implemented for 'Half'\n DecorateInfo(unittest.skip(\"Skipped!\"), 'TestMasked', 'test_reference_masked',\n device_type='cpu', dtypes=[torch.half]),\n ),\n gradcheck_wrapper=gradcheck_wrapper_masked_operation,\n supports_out=False),\n OpInfo(\n \"nn.functional.ctc_loss\",\n ref=_NOTHING,\n dtypes=floating_types(),\n supports_out=False,\n sample_inputs_func=sample_inputs_ctc_loss,\n skips=(\n # https://github.com/pytorch/pytorch/issues/67462\n # torch.autograd.gradcheck.GradcheckError: Jacobian mismatch for output 0 with respect to input 0\n DecorateInfo(\n unittest.expectedFailure,\n \"TestGradients\",\n \"test_fn_grad\",\n dtypes=(torch.float64,),\n ),\n # RuntimeError: derivative for aten::_ctc_loss_backward is not implemented\n DecorateInfo(\n unittest.expectedFailure,\n \"TestGradients\",\n \"test_fn_gradgrad\",\n dtypes=(torch.float64,),\n ),\n # RuntimeError: derivative for aten::_ctc_loss_backward is not implemented\n DecorateInfo(\n unittest.skip(\"Skipped!\"),\n \"TestJit\",\n \"test_variant_consistency_jit\",\n dtypes=(torch.float32,),\n ),\n # Operation calls data_ptr() somewhere; needs to be fixed\n DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_composite_compliance'),\n ),\n ),\n OpInfo(\n \"nn.functional.cosine_embedding_loss\",\n ref=_NOTHING,\n dtypes=all_types_and(torch.bfloat16, torch.bool),\n dtypesIfCUDA=all_types_and(torch.float16, torch.bfloat16, torch.bool),\n supports_out=False,\n supports_forward_ad=True,\n supports_fwgrad_bwgrad=True,\n sample_inputs_func=sample_inputs_cosine_embedding_loss,\n ),\n OpInfo(\n \"nn.functional.nll_loss\",\n ref=_NOTHING,\n dtypes=floating_types_and(torch.bfloat16),\n dtypesIfCUDA=floating_types_and(torch.float16, torch.bfloat16),\n supports_out=False,\n sample_inputs_func=sample_inputs_nll_loss,\n decorators=[\n # FIXME: Derivative wrt. weight not implemented\n DecorateInfo(unittest.expectedFailure, \"TestCommon\",\n \"test_floating_inputs_are_differentiable\")],\n skips=(\n # RuntimeError:\n # undefined value tensor:\n # File \"<string>\", line 3\n # def the_method(i0, i1):\n # return torch.nn.functional.nll_loss(i0, i1, weight=tensor([8.4784, 1.7658, 4.3228], dtype=torch.float32))\n # ~~~~~~ <--- HERE\n DecorateInfo(unittest.skip(\"Skipped!\"), \"TestJit\", \"test_variant_consistency_jit\", dtypes=(torch.float32,),),\n ),\n ),\n OpInfo(\n \"nn.functional.gaussian_nll_loss\",\n ref=_NOTHING,\n dtypes=all_types_and(torch.bfloat16),\n dtypesIfCUDA=all_types_and(torch.float16, torch.bfloat16),\n supports_out=False,\n supports_forward_ad=True,\n supports_fwgrad_bwgrad=True,\n sample_inputs_func=sample_inputs_gaussian_nll_loss,\n skips=(\n # JIT does not support variadic tensors.\n # RuntimeError: input->type()->kind() == TypeKind::OptionalType\n # INTERNAL ASSERT FAILED at \"../torch/csrc/jit/passes/utils/check_alias_annotation.cpp\":270,\n # please report a bug to PyTorch.\n DecorateInfo(unittest.skip(\"Skipped!\"), \"TestJit\", \"test_variant_consistency_jit\", dtypes=(torch.float32,),),\n ),\n ),\n OpInfo(\n \"nn.functional.hinge_embedding_loss\",\n ref=_NOTHING,\n dtypes=floating_types_and(torch.bfloat16),\n dtypesIfCUDA=floating_types_and(torch.float16, torch.bfloat16),\n supports_out=False,\n supports_forward_ad=True,\n supports_fwgrad_bwgrad=True,\n sample_inputs_func=sample_inputs_hinge_embedding_loss,\n ),\n OpInfo(\n \"nn.functional.huber_loss\",\n ref=_NOTHING,\n dtypes=floating_types_and(torch.float16, torch.bfloat16),\n supports_out=False,\n supports_forward_ad=True,\n sample_inputs_func=sample_inputs_huber_loss,\n skips=(\n # JIT does not support variadic tensors.\n # RuntimeError: input->type()->kind() == TypeKind::OptionalType\n # INTERNAL ASSERT FAILED at \"../torch/csrc/jit/passes/utils/check_alias_annotation.cpp\":270,\n # please report a bug to PyTorch.\n DecorateInfo(unittest.skip(\"Skipped!\"), \"TestJit\", \"test_variant_consistency_jit\", dtypes=(torch.float32,),),\n )\n ),\n OpInfo(\n \"nn.functional.poisson_nll_loss\",\n ref=_NOTHING,\n dtypes=all_types_and(torch.bfloat16),\n dtypesIfCUDA=all_types_and(torch.float16, torch.bfloat16),\n supports_out=False,\n supports_forward_ad=True,\n supports_fwgrad_bwgrad=True,\n sample_inputs_func=sample_inputs_poisson_nll_loss,\n ),\n OpInfo(\n \"argsort\",\n dtypes=all_types_and(torch.bool, torch.float16, torch.bfloat16),\n dtypesIfCUDA=all_types_and(torch.float16, torch.bfloat16),\n sample_inputs_func=sample_inputs_argsort,\n supports_out=False,\n supports_autograd=False,\n skips=(\n DecorateInfo(\n unittest.skip(\"Skipped!\"),\n \"TestJit\",\n \"test_variant_consistency_jit\",\n dtypes=(torch.float32,),\n ),\n ),\n ),\n OpInfo(\n \"repeat_interleave\",\n dtypes=all_types_and_complex_and(torch.bool, torch.float16, torch.bfloat16),\n sample_inputs_func=sample_inputs_repeat_interleave,\n supports_out=False,\n supports_forward_ad=True,\n supports_fwgrad_bwgrad=True,\n skips=(\n DecorateInfo(\n unittest.skip(\"Skipped!\"),\n \"TestJit\",\n \"test_variant_consistency_jit\",\n dtypes=(torch.float32, torch.complex64),\n ),\n ),\n ),\n OpInfo(\n \"nn.functional.pairwise_distance\",\n ref=lambda a, b, p=2.0, eps=1e-6, keepdim=False: (\n np.sum(np.abs(a - b + eps) ** p, axis=-1, keepdims=keepdim) ** (1 / p)\n ),\n sample_inputs_func=sample_inputs_pairwise_distance,\n dtypes=all_types_and_complex_and(torch.float16, torch.bfloat16),\n supports_out=False,\n skips=(\n DecorateInfo(\n unittest.skip(\"Skipped!\"),\n \"TestJit\",\n \"test_variant_consistency_jit\",\n dtypes=(torch.float32, torch.complex64),\n ),\n ),\n ),\n OpInfo(\n \"nn.functional.pixel_shuffle\",\n sample_inputs_func=sample_inputs_pixel_shuffle,\n dtypes=all_types_and_complex_and(torch.bool, torch.float16, torch.bfloat16),\n supports_out=False,\n supports_forward_ad=True,\n supports_fwgrad_bwgrad=True,\n skips=(\n DecorateInfo(\n unittest.skip(\"Skipped!\"),\n \"TestJit\",\n \"test_variant_consistency_jit\",\n dtypes=(torch.float32, torch.complex64),\n ),\n ),\n ),\n OpInfo(\n \"nn.functional.pixel_unshuffle\",\n sample_inputs_func=sample_inputs_pixel_unshuffle,\n dtypes=all_types_and_complex_and(torch.bool, torch.float16, torch.bfloat16),\n supports_out=False,\n supports_forward_ad=True,\n supports_fwgrad_bwgrad=True,\n skips=(\n DecorateInfo(\n unittest.skip(\"Skipped!\"),\n \"TestJit\",\n \"test_variant_consistency_jit\",\n dtypes=(torch.float32, torch.complex64),\n ),\n ),\n ),\n OpInfo(\n \"nn.functional.kl_div\",\n sample_inputs_func=sample_inputs_kl_div,\n dtypes=floating_types_and(torch.bfloat16, torch.int8, torch.int16, torch.int32, torch.int64),\n backward_dtypesIfCPU=floating_types_and(torch.int8, torch.int16, torch.int32, torch.int64),\n dtypesIfCUDA=floating_types_and(\n torch.float16, torch.bfloat16, torch.int8, torch.int16, torch.int32, torch.int64\n ),\n backward_dtypesIfCUDA=floating_types_and(torch.float16, torch.int8, torch.int16, torch.int32, torch.int64),\n supports_out=False,\n check_batched_grad=False,\n supports_forward_ad=True,\n skips=(\n # See https://github.com/pytorch/pytorch/issues/65466\n DecorateInfo(\n unittest.expectedFailure,\n \"TestGradients\",\n \"test_fn_gradgrad\",\n ),\n # (ROCm) Memory access fault by GPU node-4 (Agent handle: 0x5642a3aa7b60) on address 0x5642bab40000\n DecorateInfo(unittest.skip(\"Skipped! ROCm memory exception\"), 'TestGradients', 'test_forward_mode_AD',\n device_type='cuda', dtypes=[torch.float64, torch.complex128], active_if=TEST_WITH_ROCM),\n ),\n ),\n OpInfo(\n \"diagflat\",\n ref=lambda input, offset=0: np.diagflat(input, k=offset),\n sample_inputs_func=sample_inputs_diagflat,\n dtypes=all_types_and_complex_and(torch.bool),\n dtypesIfCUDA=all_types_and_complex_and(torch.bool, torch.float16, torch.bfloat16),\n supports_out=False,\n supports_forward_ad=True,\n supports_fwgrad_bwgrad=True,\n ),\n OpInfo(\n 'scatter_reduce',\n dtypes=all_types_and(torch.float16, torch.bfloat16),\n sample_inputs_func=sample_inputs_scatter_reduce,\n supports_out=False,\n decorators=(onlyCPU,),\n skips=(\n DecorateInfo(unittest.skip(\"Skipped!\"), 'TestOperatorSignatures', 'test_get_torch_func_signature_exhaustive',\n active_if=IS_WINDOWS),\n DecorateInfo(unittest.skip(\"Skipped!\"), 'TestNormalizeOperators', 'test_normalize_operator_exhaustive',\n active_if=IS_WINDOWS),\n ),\n ),\n]\n\n# Common operator groupings\nunary_ufuncs = [op for op in op_db if isinstance(op, UnaryUfuncInfo)]\nbinary_ufuncs = [op for op in op_db if isinstance(op, BinaryUfuncInfo)]\nspectral_funcs = [op for op in op_db if isinstance(op, SpectralFuncInfo)]\nsparse_unary_ufuncs = [op for op in op_db if isinstance(op, UnaryUfuncInfo) and op.supports_sparse]\nsparse_csr_unary_ufuncs = [op for op in op_db if isinstance(op, UnaryUfuncInfo) and op.supports_sparse_csr]\nshape_funcs = [op for op in op_db if isinstance(op, ShapeFuncInfo)]\nreduction_ops = [op for op in op_db if isinstance(op, ReductionOpInfo)]\nreference_filtered_ops = [op for op in reduction_ops if op.ref not in (_NOTHING, None)]\nreference_masked_ops = [op for op in reference_filtered_ops if op.name.startswith('_masked.')]\n\n# TODO: review porting these to make_tensor\ndef index_variable(shape, max_indices, device=torch.device('cpu')):\n if not isinstance(shape, tuple):\n shape = (shape,)\n index = torch.rand(*shape, dtype=torch.double, device=device).mul_(max_indices).floor_().long()\n return index\n\ndef gather_variable(shape, index_dim, max_indices, duplicate=False, device=torch.device('cpu')):\n assert len(shape) == 2\n assert index_dim < 2\n batch_dim = 1 - index_dim\n index = torch.zeros(*shape, dtype=torch.long, device=device)\n for i in range(shape[index_dim]):\n index.select(index_dim, i).copy_(\n torch.randperm(max_indices, device=device)[:shape[batch_dim]])\n if duplicate:\n index.select(batch_dim, 0).copy_(index.select(batch_dim, 1))\n return index\n\ndef bernoulli_scalar():\n return torch.tensor(0, dtype=torch.bool).bernoulli_()\n\ndef mask_not_all_zeros(shape):\n assert len(shape) > 0\n while True:\n result = torch.randn(shape).gt(0)\n if result.sum() > 0:\n return result\n\n\n# TODO: move all tri/tril/triu testing to tensor creation op test suite and remove\n# these from here\ndef _compare_trilu_indices(\n self, row, col, offset=0, dtype=torch.long, device='cpu'):\n if row == 0 or col == 0:\n # have to handle this separately as tril and triu does not take\n # empty matrix as input\n self.assertEqual(\n torch.empty(0, 2, dtype=dtype, device=device).transpose(0, 1),\n torch.tril_indices(row, col, offset, dtype=dtype, device=device))\n\n self.assertEqual(\n torch.empty(0, 2, dtype=dtype, device=device).transpose(0, 1),\n torch.triu_indices(row, col, offset, dtype=dtype, device=device))\n\n else:\n # TODO(#38095): Replace assertEqualIgnoreType. See issue #38095\n self.assertEqualIgnoreType(\n torch.ones(row, col, device='cpu')\n .tril(offset).nonzero().to(dtype).transpose(0, 1),\n torch.tril_indices(row, col, offset, dtype=dtype, device=device))\n\n # TODO(#38095): Replace assertEqualIgnoreType. See issue #38095\n self.assertEqualIgnoreType(\n torch.ones(row, col, device='cpu')\n .triu(offset).nonzero().to(dtype).transpose(0, 1),\n torch.triu_indices(row, col, offset, dtype=dtype, device=device))\n\n\ndef _compare_large_trilu_indices(\n self, row, col, offset=0, dtype=torch.long, device='cpu'):\n l = torch.ones(row, col, dtype=dtype, device='cpu').tril(offset) \\\n .nonzero()[-100:-1, :].transpose(0, 1).to(device)\n torch.cuda.empty_cache()\n\n r = torch.tril_indices(\n row, col, offset, dtype=dtype, device=device)[:, -100:-1]\n self.assertEqual(l, r)\n torch.cuda.empty_cache()\n\n l = torch.ones(row, col, dtype=dtype, device='cpu').triu(offset) \\\n .nonzero()[-100:-1, :].transpose(0, 1).to(device)\n torch.cuda.empty_cache()\n\n r = torch.triu_indices(\n row, col, offset, dtype=dtype, device=device)[:, -100:-1]\n self.assertEqual(l, r)\n torch.cuda.empty_cache()\n\n# (\n# row\n# col\n# offset (optional)\n# dtype (optional)\n# )\ntri_tests_args = [\n (1, 1),\n (3, 3),\n (3, 3, 1),\n (3, 3, 2),\n (3, 3, 200),\n (3, 3, -1),\n (3, 3, -2),\n (3, 3, -200),\n (0, 3, 0),\n (0, 3, 1),\n (0, 3, -1),\n (3, 0, 0),\n (3, 0, 1),\n (3, 0, -1),\n (0, 0, 0),\n (0, 0, 1),\n (0, 0, -1),\n (3, 6, 0),\n (3, 6, 1),\n (3, 6, 3),\n (3, 6, 9),\n (3, 6, -1),\n (3, 6, -3),\n (3, 6, -9),\n (6, 3, 0),\n (6, 3, 1),\n (6, 3, 3),\n (6, 3, 9),\n (6, 3, -1),\n (6, 3, -3),\n (6, 3, -9),\n (258, 253, 1, torch.float32),\n (257, 258, 1, torch.float64),\n (258, 258, 1, torch.short),\n (3, 513, 1, torch.long),\n (513, 3, 1, torch.int),\n (513, 0, 1, torch.double),\n (1024, 1024),\n (1024, 1024, 500, torch.float32),\n (1024, 1024, 1023),\n (1024, 1024, -500),\n (1023, 1025),\n (1025, 1023, 1022),\n (1024, 1024, -500),\n (3, 2028),\n (3, 2028, 1),\n (3, 2028, -1),\n (2028, 3),\n (2028, 1),\n (2028, 1, -1)\n]\n\ntri_large_tests_args: List[Tuple[int, ...]] = [\n # Large test cases below are deliberately commented out to speed up CI\n # tests and to avoid OOM error. When modifying implementations of\n # tril_indices and triu_indices, please enable these tests and make sure\n # they pass.\n #\n # (1, 268435455),\n # (5000, 5000),\n # (10000, 10000),\n # (268435455, 1),\n # (134217727, 2, 1),\n # (2, 134217727, 1),\n # (536870901, 1),\n # (1, 536870901),\n # (268435455, 2, 1),\n # (2, 268435455, 1)\n]\n\n\ndef run_additional_tri_tests(self, device):\n x = torch.ones(\n 3, 3, dtype=torch.long, device=device, layout=torch.strided)\n l = x.tril(0).nonzero().transpose(0, 1)\n u = x.triu(0).nonzero().transpose(0, 1)\n self.assertEqual(l, torch.tril_indices(3, 3, device=device))\n self.assertEqual(\n l, torch.tril_indices(3, 3, device=device, layout=torch.strided))\n\n self.assertEqual(u, torch.triu_indices(3, 3, device=device))\n self.assertEqual(\n u, torch.triu_indices(3, 3, device=device, layout=torch.strided))\n\n self.assertRaises(\n RuntimeError,\n lambda: torch.triu_indices(\n 1, 1, device=device, layout=torch.sparse_coo))\n\n self.assertRaises(\n RuntimeError,\n lambda: torch.tril_indices(\n 1, 1, device=device, layout=torch.sparse_coo))\n\n# TODO: move into common_utils.py or the test suite(s) that use this\ndef unpack_variables(args):\n if isinstance(args, tuple):\n return tuple(unpack_variables(elem) for elem in args)\n else:\n return args\n\n\nclass dont_convert(tuple):\n pass\n\n\nnon_differentiable = collections.namedtuple('non_differentiable', ['tensor'])\n\n\n# TODO: move into common_utils.py or the test suite(s) that use this\ndef create_input(call_args, requires_grad=True, non_contiguous=False, call_kwargs=None, dtype=torch.double, device=None):\n if not isinstance(call_args, tuple):\n call_args = (call_args,)\n\n def map_arg(arg):\n def maybe_non_contig(tensor):\n return tensor if not non_contiguous else make_non_contiguous(tensor)\n\n def conjugate(tensor):\n return tensor.conj()\n\n if isinstance(arg, torch.Size) or isinstance(arg, dont_convert):\n return arg\n elif isinstance(arg, tuple) and len(arg) == 0:\n var = conjugate(torch.randn((), dtype=dtype, device=device))\n var.requires_grad = requires_grad\n return var\n elif isinstance(arg, tuple) and not isinstance(arg[0], torch.Tensor):\n return conjugate(maybe_non_contig(torch.randn(*arg, dtype=dtype, device=device))).requires_grad_(requires_grad)\n # double check casting\n elif isinstance(arg, non_differentiable):\n if isinstance(arg.tensor, torch.Tensor):\n if arg.tensor.dtype == torch.float:\n return maybe_non_contig(arg.tensor.to(dtype=torch.double, device=device))\n if arg.tensor.dtype == torch.cfloat:\n return conjugate(maybe_non_contig(arg.tensor.to(dtype=torch.cdouble, device=device)))\n return conjugate(maybe_non_contig(arg.tensor.to(device=device)))\n return conjugate(maybe_non_contig(arg.tensor.to(device=device)))\n elif isinstance(arg, torch.Tensor):\n if arg.dtype == torch.float:\n arg = arg.double()\n if arg.dtype == torch.cfloat:\n arg = arg.to(torch.cdouble)\n if arg.is_complex() != dtype.is_complex:\n raise RuntimeError(\"User provided tensor is real for a test that runs with complex dtype, \",\n \"which is not supported for now\")\n # NOTE: We do clone() after detach() here because we need to be able to change size/storage of v afterwards\n v = conjugate(maybe_non_contig(arg)).detach().to(device=device).clone()\n v.requires_grad = requires_grad and (v.is_floating_point() or v.is_complex())\n return v\n elif callable(arg):\n return map_arg(arg(dtype=dtype, device=device))\n else:\n return arg\n args_out = tuple(map_arg(arg) for arg in call_args)\n kwargs_out = {k: map_arg(v) for k, v in call_kwargs.items()} if call_kwargs else {}\n return args_out, kwargs_out\n"
]
| [
[
"torch.all",
"torch.pca_lowrank",
"torch.lu_unpack",
"torch.randint",
"numpy.sqrt",
"torch.zeros",
"numpy.minimum",
"torch.linalg.lu_factor",
"numpy.searchsorted",
"torch.where",
"torch.device",
"torch.topk",
"torch.testing._internal.common_utils.is_iterable_of_tensors",
"numpy.exp",
"numpy.where",
"torch.nn.functional.silu",
"numpy.sinc",
"torch.randn",
"numpy.matmul",
"numpy.diff",
"torch.testing._internal.common_dtype.floating_and_complex_types_and",
"torch._masked._output_mask",
"numpy.zeros",
"torch.full",
"numpy.random.choice",
"numpy.multiply",
"torch.tril_indices",
"numpy.tanh",
"numpy.sum",
"torch.testing._internal.common_dtype.empty_types",
"torch._masked._input_mask",
"numpy.add",
"torch.testing.make_tensor",
"torch.abs",
"numpy.expand_dims",
"torch.randperm",
"numpy.asarray",
"numpy.concatenate",
"torch.testing._internal.common_dtype.floating_types_and",
"torch.cuda.is_available",
"torch.igamma",
"torch.einsum",
"torch.tensor",
"torch.sort",
"torch.testing._internal.common_dtype._dispatch_dtypes",
"torch.rand",
"torch.nn.functional.elu",
"numpy.lib.NumpyVersion",
"torch.igammac",
"torch.LongTensor",
"torch.testing._internal.common_utils.noncontiguous_like",
"numpy.diagflat",
"numpy.amin",
"numpy.modf",
"torch.testing._internal.common_dtype.all_types_and_complex",
"torch.as_strided",
"torch.testing._internal.common_utils.make_fullrank_matrices_with_distinct_singular_values",
"torch.testing._internal.common_dtype.complex_types",
"numpy.maximum",
"torch.manual_seed",
"torch.testing._internal.common_utils.random_hermitian_pd_matrix",
"torch.polygamma",
"torch.testing._internal.common_utils.random_symmetric_pd_matrix",
"torch.testing._internal.common_utils.random_symmetric_matrix",
"numpy.product",
"torch.linalg.pinv",
"numpy.mean",
"torch.testing._internal.common_utils.random_square_matrix_of_rank",
"torch.finfo",
"numpy.random.randint",
"torch.ones",
"numpy.clip",
"torch.broadcast_shapes",
"torch.testing._internal.common_dtype.integral_types_and",
"torch.nn.functional.selu",
"torch.arange",
"torch.nn.functional.celu",
"torch.linalg.cholesky",
"torch.testing._internal.common_utils.freeze_rng_state",
"torch.cuda.empty_cache",
"torch.testing.make_non_contiguous",
"torch.nn.functional.embedding",
"torch.linalg.svd",
"torch.special.zeta",
"torch.testing._internal.common_dtype.all_types_and",
"torch.testing._internal.common_device_type.precisionOverride",
"numpy.amax",
"torch.testing._internal.common_dtype.all_types",
"torch.testing._internal.common_utils.random_symmetric_psd_matrix",
"torch.no_grad",
"numpy.cross",
"torch.linalg.lu_factor_ex",
"torch.Size",
"torch.frac",
"torch.nn.functional.embedding_bag",
"numpy.arange",
"numpy.linalg.tensorsolve",
"numpy.stack",
"torch.testing._internal.common_utils.random_well_conditioned_matrix",
"torch.triu_indices",
"torch.special.polygamma",
"torch.get_default_dtype",
"torch.linalg.lstsq",
"numpy.heaviside",
"torch.empty",
"torch.testing._internal.common_dtype.double_types",
"torch.svd_lowrank",
"torch.testing._internal.common_dtype.floating_and_complex_types",
"torch.view_as_real",
"torch.testing._internal.common_device_type.tol",
"numpy.abs",
"numpy.put",
"torch.testing._internal.common_dtype.floating_types",
"numpy.sign",
"numpy.prod",
"torch.testing._internal.common_device_type.has_cusolver",
"torch.testing._internal.common_dtype.all_types_and_complex_and"
]
]
|
wps712/BitSpl | [
"ed8a8eee65ac54bd32f002c107ea55352a192012"
]
| [
"models/resnet.py"
]
| [
"import torch.nn as nn\nimport torch.utils.model_zoo as model_zoo\n\n\n__all__ = ['ResNet', 'resnet18', 'resnet34', 'resnet50', 'resnet101',\n 'resnet152']\n\n\nmodel_urls = {\n 'resnet18': 'https://download.pytorch.org/models/resnet18-5c106cde.pth',\n 'resnet34': 'https://download.pytorch.org/models/resnet34-333f7ec4.pth',\n 'resnet50': 'https://download.pytorch.org/models/resnet50-19c8e357.pth',\n 'resnet101': 'https://download.pytorch.org/models/resnet101-5d3b4d8f.pth',\n 'resnet152': 'https://download.pytorch.org/models/resnet152-b121ed2d.pth',\n}\n\n\ndef conv3x3(in_planes, out_planes, stride=1):\n \"\"\"3x3 convolution with padding\"\"\"\n return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride,\n padding=1, bias=False)\n\n\ndef conv1x1(in_planes, out_planes, stride=1):\n \"\"\"1x1 convolution\"\"\"\n return nn.Conv2d(in_planes, out_planes, kernel_size=1, stride=stride, bias=False)\n\n\nclass BasicBlock(nn.Module):\n expansion = 1\n\n def __init__(self, inplanes, planes, stride=1, downsample=None):\n super(BasicBlock, self).__init__()\n self.conv1 = conv3x3(inplanes, planes, stride)\n self.bn1 = nn.BatchNorm2d(planes)\n self.relu = nn.ReLU(inplace=True)\n self.conv2 = conv3x3(planes, planes)\n self.bn2 = nn.BatchNorm2d(planes)\n self.downsample = downsample\n self.stride = stride\n\n def forward(self, x):\n residual = x\n\n out = self.conv1(x)\n out = self.bn1(out)\n out = self.relu(out)\n\n out = self.conv2(out)\n out = self.bn2(out)\n\n if self.downsample is not None:\n residual = self.downsample(x)\n\n out += residual\n out = self.relu(out)\n\n return out\n\n\nclass Bottleneck(nn.Module):\n expansion = 4\n\n def __init__(self, inplanes, planes, stride=1, downsample=None):\n super(Bottleneck, self).__init__()\n self.conv1 = conv1x1(inplanes, planes)\n self.bn1 = nn.BatchNorm2d(planes)\n self.conv2 = conv3x3(planes, planes, stride)\n self.bn2 = nn.BatchNorm2d(planes)\n self.conv3 = conv1x1(planes, planes * self.expansion)\n self.bn3 = nn.BatchNorm2d(planes * self.expansion)\n self.relu = nn.ReLU(inplace=True)\n self.downsample = downsample\n self.stride = stride\n self.relu_last = nn.ReLU(inplace=False)\n\n def forward(self, x):\n residual = x\n\n out = self.conv1(x)\n out = self.bn1(out)\n out = self.relu(out)\n\n out = self.conv2(out)\n out = self.bn2(out)\n out = self.relu(out)\n\n out = self.conv3(out)\n out = self.bn3(out)\n\n if self.downsample is not None:\n residual = self.downsample(x)\n\n out += residual\n out = self.relu_last(out)\n\n return out\n\n\nclass ResNet(nn.Module):\n\n def __init__(self, block, layers, num_classes=1000, zero_init_residual=False):\n super(ResNet, self).__init__()\n self.inplanes = 64\n self.conv1 = nn.Conv2d(3, 64, kernel_size=7, stride=2, padding=3,\n bias=False)\n self.bn1 = nn.BatchNorm2d(64)\n self.relu = nn.ReLU(inplace=True)\n self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)\n self.layer1 = self._make_layer(block, 64, layers[0])\n self.layer2 = self._make_layer(block, 128, layers[1], stride=2)\n self.layer3 = self._make_layer(block, 256, layers[2], stride=2)\n self.layer4 = self._make_layer(block, 512, layers[3], stride=2)\n self.avgpool = nn.AdaptiveAvgPool2d((1, 1))\n self.fc = nn.Linear(512 * block.expansion, num_classes)\n\n for m in self.modules():\n if isinstance(m, nn.Conv2d):\n nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')\n elif isinstance(m, nn.BatchNorm2d):\n nn.init.constant_(m.weight, 1)\n nn.init.constant_(m.bias, 0)\n\n # Zero-initialize the last BN in each residual branch,\n # so that the residual branch starts with zeros, and each residual block behaves like an identity.\n # This improves the model by 0.2~0.3% according to https://arxiv.org/abs/1706.02677\n if zero_init_residual:\n for m in self.modules():\n if isinstance(m, Bottleneck):\n nn.init.constant_(m.bn3.weight, 0)\n elif isinstance(m, BasicBlock):\n nn.init.constant_(m.bn2.weight, 0)\n\n def _make_layer(self, block, planes, blocks, stride=1):\n downsample = None\n if stride != 1 or self.inplanes != planes * block.expansion:\n downsample = nn.Sequential(\n conv1x1(self.inplanes, planes * block.expansion, stride),\n nn.BatchNorm2d(planes * block.expansion),\n )\n\n layers = []\n layers.append(block(self.inplanes, planes, stride, downsample))\n self.inplanes = planes * block.expansion\n for _ in range(1, blocks):\n layers.append(block(self.inplanes, planes))\n\n return nn.Sequential(*layers)\n\n def forward(self, x):\n x = self.conv1(x)\n x = self.bn1(x)\n x = self.relu(x)\n x = self.maxpool(x)\n\n x = self.layer1(x)\n x = self.layer2(x)\n x = self.layer3(x)\n x = self.layer4(x)\n\n x = self.avgpool(x)\n x = x.view(x.size(0), -1)\n x = self.fc(x)\n\n return x\n\n\ndef resnet18(pretrained=False, **kwargs):\n \"\"\"Constructs a ResNet-18 model.\n\n Args:\n pretrained (bool): If True, returns a model pre-trained on ImageNet\n \"\"\"\n model = ResNet(BasicBlock, [2, 2, 2, 2], **kwargs)\n if pretrained:\n model.load_state_dict(model_zoo.load_url(model_urls['resnet18']))\n return model\n\n\ndef resnet34(pretrained=False, **kwargs):\n \"\"\"Constructs a ResNet-34 model.\n\n Args:\n pretrained (bool): If True, returns a model pre-trained on ImageNet\n \"\"\"\n model = ResNet(BasicBlock, [3, 4, 6, 3], **kwargs)\n if pretrained:\n model.load_state_dict(model_zoo.load_url(model_urls['resnet34']))\n return model\n\n\ndef resnet50(pretrained=False, **kwargs):\n \"\"\"Constructs a ResNet-50 model.\n\n Args:\n pretrained (bool): If True, returns a model pre-trained on ImageNet\n \"\"\"\n model = ResNet(Bottleneck, [3, 4, 6, 3], **kwargs)\n if pretrained:\n model.load_state_dict(model_zoo.load_url(model_urls['resnet50']))\n return model\n\n\ndef resnet101(pretrained=False, **kwargs):\n \"\"\"Constructs a ResNet-101 model.\n\n Args:\n pretrained (bool): If True, returns a model pre-trained on ImageNet\n \"\"\"\n model = ResNet(Bottleneck, [3, 4, 23, 3], **kwargs)\n if pretrained:\n model.load_state_dict(model_zoo.load_url(model_urls['resnet101']))\n return model\n\n\ndef resnet152(pretrained=False, **kwargs):\n \"\"\"Constructs a ResNet-152 model.\n\n Args:\n pretrained (bool): If True, returns a model pre-trained on ImageNet\n \"\"\"\n model = ResNet(Bottleneck, [3, 8, 36, 3], **kwargs)\n if pretrained:\n model.load_state_dict(model_zoo.load_url(model_urls['resnet152']))\n return model\n"
]
| [
[
"torch.nn.Sequential",
"torch.nn.init.constant_",
"torch.nn.Conv2d",
"torch.nn.MaxPool2d",
"torch.nn.Linear",
"torch.nn.AdaptiveAvgPool2d",
"torch.nn.BatchNorm2d",
"torch.nn.ReLU",
"torch.utils.model_zoo.load_url",
"torch.nn.init.kaiming_normal_"
]
]
|
SeanLee97/clfzoo | [
"8c51ee316d51a4ec1d3e0c5c91a64248d6705214",
"8c51ee316d51a4ec1d3e0c5c91a64248d6705214"
]
| [
"clfzoo/libs/loss.py",
"clfzoo/base.py"
]
| [
"# -*- coding: utf-8 -*-\n\nimport tensorflow as tf\nfrom tensorflow.python.ops import array_ops\n\ndef spread_loss(labels, activations, margin):\n activations_shape = activations.get_shape().as_list()\n mask_t = tf.equal(labels, 1)\n mask_i = tf.equal(labels, 0) \n activations_t = tf.reshape(\n tf.boolean_mask(activations, mask_t), [activations_shape[0], 1]\n ) \n activations_i = tf.reshape(\n tf.boolean_mask(activations, mask_i), [activations_shape[0], activations_shape[1] - 1]\n ) \n gap_mit = tf.reduce_sum(tf.square(tf.nn.relu(margin - (activations_t - activations_i))))\n return gap_mit \n\ndef cross_entropy(y, logits): \n #y = tf.argmax(y, axis=1)\n loss = tf.nn.sparse_softmax_cross_entropy_with_logits(logits=logits, labels=y) \n loss = tf.reduce_mean(loss) \n return loss\n\ndef margin_loss(y, logits): \n y = tf.cast(y,tf.float32)\n loss = y * tf.square(tf.maximum(0., 0.9 - logits)) + \\\n 0.25 * (1.0 - y) * tf.square(tf.maximum(0., logits - 0.1))\n loss = tf.reduce_mean(tf.reduce_sum(loss, axis=1))\n #loss = tf.reduce_mean(loss) \n return loss\n\n\"\"\"\nbinary label focal loss\n\"\"\"\ndef bin_focal_loss(y, logits, weights=None, alpha=0.5, gamma=2):\n sigmoid_p = tf.nn.sigmoid(logits)\n zeros = array_ops.zeros_like(sigmoid_p, dtype=sigmoid_p.dtype)\n pos_p_sub = array_ops.where(y >= sigmoid_p, y - sigmoid_p, zeros)\n neg_p_sub = array_ops.where(y > zeros, zeros, sigmoid_p)\n per_entry_cross_ent = - alpha * (pos_p_sub ** gamma) * tf.log(tf.clip_by_value(sigmoid_p, 1e-8, 1.0)) \\\n - (1 - alpha) * (neg_p_sub ** gamma) * tf.log(tf.clip_by_value(1.0 - sigmoid_p, 1e-8, 1.0))\n return tf.reduce_mean(per_entry_cross_ent)\n\n\"\"\"\nMuti-label focal loss \n\"\"\"\ndef focal_loss(y, logits, gamma=2, epsilon=1e-10):\n y = tf.cast(tf.expand_dims(y, -1), tf.int32)\n\n predictions = tf.nn.softmax(logits)\n batch_idxs = tf.range(0, tf.shape(y)[0])\n batch_idxs = tf.expand_dims(batch_idxs, 1)\n\n idxs = tf.concat([batch_idxs, y], 1)\n y_true_pred = tf.gather_nd(predictions, idxs)\n\n y = tf.cast(tf.squeeze(y, axis=-1), tf.float32)\n losses = tf.log(y_true_pred+epsilon) * tf.pow(1-y_true_pred, gamma)\n\n return -tf.reduce_mean(losses)\n",
"# -*- coding: utf-8 -*-\n\n\"\"\"Base class for general models.\n\"\"\"\n\nimport os\nimport tensorflow as tf\nfrom sklearn.metrics import accuracy_score, precision_score, recall_score, f1_score\n\n# clear session\ntf.keras.backend.clear_session()\n\nclass BaseModel(object):\n\n def __init__(self, config):\n self.config = config\n self.logger = config.logger\n self.sess = None\n self.saver = None\n\n self.lr = self.config.lr_rate\n\n def train(self, dataloader, pad_id=0):\n \"\"\"\n Args:\n train: training dataset that yield tuple (word_idx, label_idx)\n dev: develope dataset that yield tuple (word_idx, label_idx)\n \"\"\"\n best_score = 0\n no_improve_epoch = 0\n\n self.add_summary()\n\n for epoch in range(self.config.epochs):\n train = dataloader.next_batch('train', self.config.batch_size, pad_id, shuffle=True)\n dev = dataloader.next_batch('dev', self.config.batch_size, pad_id, shuffle=False)\n\n self.logger.info(\"Epoch {} / {}\".format(epoch+1, self.config.epochs)) \n metrics = self.run_epoch(train, dev, epoch)\n\n #print(\">>>>\", best_score, metrics[self.config.eval_metric])\n\n if best_score <= metrics[self.config.eval_metric]:\n best_score = metrics[self.config.eval_metric]\n no_improve_epoch = 0\n self.save()\n self.logger.info(\"new best score!\")\n else:\n no_improve_epoch += 1\n\n if self.config.lr_decay > 0:\n self.lr *= self.config.lr_decay\n\n if self.config.early_stop > 0 and no_improve_epoch > self.config.early_stop:\n self.logger.info(\"early stopping {} epochs without improvement\".format(no_improve_epoch))\n break \n \n def evaluate(self, test):\n \"\"\"\n Evaluate model on test set\n \"\"\"\n return self.run_evaluate(test)\n\n def calc_metrics(self, pred_labels, true_labels):\n return {\n 'accu': accuracy_score(true_labels, pred_labels),\n 'p': precision_score(true_labels, pred_labels, average='macro'),\n 'r': recall_score(true_labels, pred_labels, average='macro'),\n 'f1': f1_score(true_labels, pred_labels, average='macro')\n }\n \n\n def add_train_op(self, loss):\n optim_type = self.config.optimizer.lower()\n with tf.variable_scope(\"train_scope\"):\n if optim_type == 'adam':\n optimizer = tf.train.AdamOptimizer(self.lr)\n elif optim_type == 'adagrad':\n optimizer = tf.train.AdagradOptimizer(self.lr)\n elif optim_type == 'adadelta':\n optimizer = tf.train.AdadeltaOptimizer(self.lr)\n elif optim_type == 'sgd':\n optimizer = tf.train.GradientDescentOptimizer(self.lr)\n elif optim_type == 'rmsprop':\n optimizer = tf.train.RMSPropOptimizer(self.lr)\n else:\n raise NotImplementedError(\"Optimizer {} is not support\".format(optim_type))\n \n if self.config.clipper > 0:\n grads, vs = zip(*optimizer.compute_gradients(loss))\n grads, gnorm = tf.clip_by_global_norm(grads, self.config.clipper)\n self.train_op = optimizer.apply_gradients(zip(grads, vs))\n else:\n self.train_op = optimizer.minimize(loss)\n\n def init_session(self):\n \"\"\"\n Initialize session\n \"\"\"\n self.logger.info(\"Init session\")\n sess_config = tf.ConfigProto()\n sess_config.gpu_options.allow_growth = True\n self.sess = tf.Session(config=sess_config)\n self.sess.run(tf.global_variables_initializer())\n\n self.saver = tf.train.Saver()\n\n def close_session(self):\n \"\"\"\n close session\n \"\"\"\n self.sess.close()\n\n def add_summary(self):\n self.merged = tf.summary.merge_all()\n self.summary_writer = tf.summary.FileWriter(self.config.graph_dir, self.sess.graph)\n \n def save(self):\n \"\"\"\n Saves the model into model_dir with model_name as the model indicator\n \"\"\"\n if not os.path.exists(self.config.model_dir):\n os.makedirs(self.config.model_dir)\n self.saver.save(self.sess, os.path.join(self.config.model_dir, self.config.model_name))\n self.logger.info('Model saved in {}, with name {}.'.format(self.config.model_dir, self.config.model_name))\n\n def restore(self):\n \"\"\"\n Restores the model into model_dir from model_name as the model indicator\n \"\"\"\n self.saver.restore(self.sess, os.path.join(self.config.model_dir, self.config.model_name))\n self.logger.info('Model restored from {}, with prefix {}'.format(self.config.model_dir, self.config.model_name))\n\n"
]
| [
[
"tensorflow.concat",
"tensorflow.reduce_sum",
"tensorflow.cast",
"tensorflow.equal",
"tensorflow.boolean_mask",
"tensorflow.python.ops.array_ops.where",
"tensorflow.squeeze",
"tensorflow.nn.sigmoid",
"tensorflow.gather_nd",
"tensorflow.pow",
"tensorflow.shape",
"tensorflow.python.ops.array_ops.zeros_like",
"tensorflow.nn.sparse_softmax_cross_entropy_with_logits",
"tensorflow.clip_by_value",
"tensorflow.nn.relu",
"tensorflow.nn.softmax",
"tensorflow.reduce_mean",
"tensorflow.maximum",
"tensorflow.expand_dims",
"tensorflow.log"
],
[
"tensorflow.train.AdagradOptimizer",
"tensorflow.summary.FileWriter",
"tensorflow.train.RMSPropOptimizer",
"sklearn.metrics.precision_score",
"tensorflow.train.AdadeltaOptimizer",
"tensorflow.ConfigProto",
"tensorflow.global_variables_initializer",
"tensorflow.clip_by_global_norm",
"tensorflow.train.GradientDescentOptimizer",
"tensorflow.keras.backend.clear_session",
"tensorflow.summary.merge_all",
"tensorflow.Session",
"tensorflow.variable_scope",
"tensorflow.train.Saver",
"tensorflow.train.AdamOptimizer",
"sklearn.metrics.f1_score",
"sklearn.metrics.recall_score",
"sklearn.metrics.accuracy_score"
]
]
|
tgisaturday/pytorch-image-models | [
"bc25e17bed586148ff43008d0e2f9017297b258e"
]
| [
"timm/models/quantization/rexnet.py"
]
| [
"\"\"\" ReXNet\n\nA PyTorch impl of `ReXNet: Diminishing Representational Bottleneck on Convolutional Neural Network` -\nhttps://arxiv.org/abs/2007.00992\n\nAdapted from original impl at https://github.com/clovaai/rexnet\nCopyright (c) 2020-present NAVER Corp. MIT license\n\nChanges for timm, feature extraction, and rounded channel variant hacked together by Ross Wightman\nCopyright 2020 Ross Wightman\n\"\"\"\nimport torch\nimport torch.nn as nn\nfrom math import ceil\n\nfrom timm.data import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD\nfrom timm.models.helpers import build_model_with_cfg\nfrom .layers import ClassifierHead, create_act_layer, create_conv2d\nfrom timm.models.registry import register_model\nfrom .layers.activations import sigmoid, Swish, HardSwish, HardSigmoid\n\ndef _cfg(url=''):\n return {\n 'url': url, 'num_classes': 1000, 'input_size': (3, 224, 224), 'pool_size': (7, 7),\n 'crop_pct': 0.875, 'interpolation': 'bicubic',\n 'mean': IMAGENET_DEFAULT_MEAN, 'std': IMAGENET_DEFAULT_STD,\n 'first_conv': 'stem.conv', 'classifier': 'head.fc',\n }\n\n\ndefault_cfgs = dict(\n rexnet_100=_cfg(\n url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-rexnet/rexnetv1_100-1b4dddf4.pth'),\n rexnet_130=_cfg(\n url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-rexnet/rexnetv1_130-590d768e.pth'),\n rexnet_150=_cfg(\n url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-rexnet/rexnetv1_150-bd1a6aa8.pth'),\n rexnet_200=_cfg(\n url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-rexnet/rexnetv1_200-8c0b7f2d.pth'),\n rexnetr_100=_cfg(\n url=''),\n rexnetr_130=_cfg(\n url=''),\n rexnetr_150=_cfg(\n url=''),\n rexnetr_200=_cfg(\n url=''),\n)\n\n\ndef make_divisible(v, divisor=8, min_value=None):\n min_value = min_value or divisor\n new_v = max(min_value, int(v + divisor / 2) // divisor * divisor)\n return new_v\n\nclass ConvBn(nn.Module):\n def __init__(self, in_chs, out_chs, kernel_size,\n stride=1, dilation=1, pad_type='', \n norm_layer=nn.BatchNorm2d, groups = 1,norm_kwargs=None):\n super(ConvBn, self).__init__()\n norm_kwargs = norm_kwargs or {}\n self.conv = create_conv2d(in_chs, out_chs, kernel_size, stride=stride, dilation=dilation, groups=groups,padding=pad_type)\n self.bn1 = norm_layer(out_chs, **norm_kwargs)\n\n def feature_info(self, location):\n if location == 'expansion': # output of conv after act, same as block coutput\n info = dict(module='act1', hook_type='forward', num_chs=self.conv.out_channels)\n else: # location == 'bottleneck', block output\n info = dict(module='', hook_type='', num_chs=self.conv.out_channels)\n return info\n\n def forward(self, x):\n x = self.conv(x)\n x = self.bn1(x)\n return x\n \n def fuse_module(self):\n modules_to_fuse = ['conv','bn1'] \n torch.quantization.fuse_modules(self, modules_to_fuse, inplace=True)\n \nclass ConvBnAct(nn.Module):\n def __init__(self, in_chs, out_chs, kernel_size,\n stride=1, dilation=1, pad_type='', act_layer=nn.ReLU,\n norm_layer=nn.BatchNorm2d, norm_kwargs=None):\n super(ConvBnAct, self).__init__()\n norm_kwargs = norm_kwargs or {}\n self.conv = create_conv2d(in_chs, out_chs, kernel_size, stride=stride, dilation=dilation, padding=pad_type)\n self.bn1 = norm_layer(out_chs, **norm_kwargs)\n self.act1 = act_layer(inplace=True)\n self.out_channels = out_chs\n def feature_info(self, location):\n if location == 'expansion': # output of conv after act, same as block coutput\n info = dict(module='act1', hook_type='forward', num_chs=self.conv.out_channels)\n else: # location == 'bottleneck', block output\n info = dict(module='', hook_type='', num_chs=self.conv.out_channels)\n return info\n\n def forward(self, x):\n x = self.conv(x)\n x = self.bn1(x)\n x = self.act1(x)\n return x\n def fuse_module(self):\n modules_to_fuse = ['conv','bn1'] \n if type(self.act1) == nn.ReLU:\n modules_to_fuse.append('act1') \n torch.quantization.fuse_modules(self, modules_to_fuse, inplace=True) \n \nclass SEWithNorm(nn.Module):\n\n def __init__(self, channels, reduction=16, act_layer=nn.ReLU, divisor=1, reduction_channels=None,\n gate_layer='sigmoid'):\n super(SEWithNorm, self).__init__()\n self.avg_pool = nn.AdaptiveAvgPool2d(1)\n reduction_channels = reduction_channels or make_divisible(channels // reduction, divisor=divisor)\n self.fc1 = nn.Conv2d(\n channels, reduction_channels, kernel_size=1, padding=0, bias=True)\n self.bn = nn.BatchNorm2d(reduction_channels)\n self.act = act_layer(inplace=True)\n self.fc2 = nn.Conv2d(\n reduction_channels, channels, kernel_size=1, padding=0, bias=True)\n self.gate = create_act_layer(gate_layer)\n self.quant_mul = nn.quantized.FloatFunctional()\n def forward(self, x):\n x_se = self.avg_pool(x)\n x_se = self.fc1(x_se)\n x_se = self.bn(x_se)\n x_se = self.act(x_se)\n x_se = self.fc2(x_se)\n return self.quant_mul.mul(x, self.gate(x_se))\n \n def fuse_module(self): \n modules_to_fuse = ['fc1','bn','act'] \n torch.quantization.fuse_modules(self, modules_to_fuse, inplace=True)\n \n\nclass LinearBottleneck(nn.Module):\n def __init__(self, in_chs, out_chs, stride, exp_ratio=1.0, use_se=True, se_rd=12, ch_div=1):\n super(LinearBottleneck, self).__init__()\n self.use_shortcut = stride == 1 and in_chs <= out_chs\n self.in_channels = in_chs\n self.out_channels = out_chs\n\n if exp_ratio != 1.:\n dw_chs = make_divisible(round(in_chs * exp_ratio), divisor=ch_div)\n self.conv_exp = ConvBnAct(in_chs, dw_chs,1, act_layer=Swish)\n else:\n dw_chs = in_chs\n self.conv_exp = None\n\n self.conv_dw = ConvBn(dw_chs, dw_chs, 3, stride=stride, groups=dw_chs)\n self.se = SEWithNorm(dw_chs, reduction=se_rd, divisor=ch_div) if use_se else None\n self.act_dw = nn.ReLU6()\n\n self.conv_pwl = ConvBn(dw_chs, out_chs, 1)\n if self.use_shortcut: \n self.skip_add = nn.quantized.FloatFunctional()\n \n def feat_channels(self, exp=False):\n return self.conv_dw.out_channels if exp else self.out_channels\n\n def forward(self, x):\n shortcut = x\n if self.conv_exp is not None:\n x = self.conv_exp(x)\n x = self.conv_dw(x)\n if self.se is not None:\n x = self.se(x)\n x = self.act_dw(x)\n x = self.conv_pwl(x)\n if self.use_shortcut:\n x[:, 0:self.in_channels]= self.skip_add.add(x[:, 0:self.in_channels], shortcut)\n return x\n\ndef _block_cfg(width_mult=1.0, depth_mult=1.0, initial_chs=16, final_chs=180, use_se=True, ch_div=1):\n layers = [1, 2, 2, 3, 3, 5]\n strides = [1, 2, 2, 2, 1, 2]\n layers = [ceil(element * depth_mult) for element in layers]\n strides = sum([[element] + [1] * (layers[idx] - 1) for idx, element in enumerate(strides)], [])\n exp_ratios = [1] * layers[0] + [6] * sum(layers[1:])\n depth = sum(layers[:]) * 3\n base_chs = initial_chs / width_mult if width_mult < 1.0 else initial_chs\n\n # The following channel configuration is a simple instance to make each layer become an expand layer.\n out_chs_list = []\n for i in range(depth // 3):\n out_chs_list.append(make_divisible(round(base_chs * width_mult), divisor=ch_div))\n base_chs += final_chs / (depth // 3 * 1.0)\n\n if use_se:\n use_ses = [False] * (layers[0] + layers[1]) + [True] * sum(layers[2:])\n else:\n use_ses = [False] * sum(layers[:])\n\n return zip(out_chs_list, exp_ratios, strides, use_ses)\n\n\ndef _build_blocks(block_cfg, prev_chs, width_mult, se_rd=12, ch_div=1, feature_location='bottleneck'):\n feat_exp = feature_location == 'expansion'\n feat_chs = [prev_chs]\n feature_info = []\n curr_stride = 2\n features = []\n for block_idx, (chs, exp_ratio, stride, se) in enumerate(block_cfg):\n if stride > 1:\n fname = 'stem' if block_idx == 0 else f'features.{block_idx - 1}'\n if block_idx > 0 and feat_exp:\n fname += '.act_dw'\n feature_info += [dict(num_chs=feat_chs[-1], reduction=curr_stride, module=fname)]\n curr_stride *= stride\n features.append(LinearBottleneck(\n in_chs=prev_chs, out_chs=chs, exp_ratio=exp_ratio, stride=stride, use_se=se, se_rd=se_rd, ch_div=ch_div))\n prev_chs = chs\n feat_chs += [features[-1].feat_channels(feat_exp)]\n pen_chs = make_divisible(1280 * width_mult, divisor=ch_div)\n feature_info += [dict(\n num_chs=pen_chs if feat_exp else feat_chs[-1], reduction=curr_stride,\n module=f'features.{len(features) - int(not feat_exp)}')]\n features.append(ConvBnAct(prev_chs, pen_chs,1, act_layer=Swish))\n return features, feature_info\n\n\nclass ReXNetV1(nn.Module):\n def __init__(self, in_chans=3, num_classes=1000, global_pool='avg', output_stride=32,\n initial_chs=16, final_chs=180, width_mult=1.0, depth_mult=1.0, use_se=True,\n se_rd=12, ch_div=1, drop_rate=0.2, feature_location='bottleneck'):\n super(ReXNetV1, self).__init__()\n self.drop_rate = drop_rate\n\n assert output_stride == 32 # FIXME support dilation\n stem_base_chs = 32 / width_mult if width_mult < 1.0 else 32\n stem_chs = make_divisible(round(stem_base_chs * width_mult), divisor=ch_div)\n self.stem = ConvBnAct(in_chans, stem_chs, 3, stride=2, act_layer=Swish)\n\n block_cfg = _block_cfg(width_mult, depth_mult, initial_chs, final_chs, use_se, ch_div)\n features, self.feature_info = _build_blocks(\n block_cfg, stem_chs, width_mult, se_rd, ch_div, feature_location)\n self.num_features = features[-1].out_channels\n self.features = nn.Sequential(*features)\n\n self.head = ClassifierHead(self.num_features, num_classes, global_pool, drop_rate)\n \n # Quantization Stubs\n self.quant = torch.quantization.QuantStub()\n self.dequant = torch.quantization.DeQuantStub() \n # FIXME weight init, the original appears to use PyTorch defaults\n\n def get_classifier(self):\n return self.head.fc\n\n def reset_classifier(self, num_classes, global_pool='avg'):\n self.head = ClassifierHead(self.num_features, num_classes, pool_type=global_pool, drop_rate=self.drop_rate)\n\n def forward_features(self, x):\n x = self.stem(x)\n x = self.features(x)\n return x\n\n def forward(self, x):\n x = self.quant(x)\n x = self.forward_features(x)\n x = self.head(x)\n x = self.dequant(x)\n return x\n\n def fuse_model(self): \n for m in self.modules():\n if type(m) in [ConvBnAct, ConvBn, SEWithNorm]:\n m.fuse_module()\ndef _create_rexnet(variant, pretrained, **kwargs):\n feature_cfg = dict(flatten_sequential=True)\n if kwargs.get('feature_location', '') == 'expansion':\n feature_cfg['feature_cls'] = 'hook'\n return build_model_with_cfg(\n ReXNetV1, variant, pretrained, default_cfg=default_cfgs[variant], feature_cfg=feature_cfg, **kwargs)\n\n\n@register_model\ndef quant_rexnet_100(pretrained=False, **kwargs):\n \"\"\"ReXNet V1 1.0x\"\"\"\n return _create_rexnet('rexnet_100', pretrained, **kwargs)\n\n\n@register_model\ndef quant_rexnet_130(pretrained=False, **kwargs):\n \"\"\"ReXNet V1 1.3x\"\"\"\n return _create_rexnet('rexnet_130', pretrained, width_mult=1.3, **kwargs)\n\n\n@register_model\ndef quant_rexnet_150(pretrained=False, **kwargs):\n \"\"\"ReXNet V1 1.5x\"\"\"\n return _create_rexnet('rexnet_150', pretrained, width_mult=1.5, **kwargs)\n\n\n@register_model\ndef quant_rexnet_200(pretrained=False, **kwargs):\n \"\"\"ReXNet V1 2.0x\"\"\"\n return _create_rexnet('rexnet_200', pretrained, width_mult=2.0, **kwargs)\n\n\n@register_model\ndef quant_rexnetr_100(pretrained=False, **kwargs):\n \"\"\"ReXNet V1 1.0x w/ rounded (mod 8) channels\"\"\"\n return _create_rexnet('rexnetr_100', pretrained, ch_div=8, **kwargs)\n\n\n@register_model\ndef quant_rexnetr_130(pretrained=False, **kwargs):\n \"\"\"ReXNet V1 1.3x w/ rounded (mod 8) channels\"\"\"\n return _create_rexnet('rexnetr_130', pretrained, width_mult=1.3, ch_div=8, **kwargs)\n\n\n@register_model\ndef quant_rexnetr_150(pretrained=False, **kwargs):\n \"\"\"ReXNet V1 1.5x w/ rounded (mod 8) channels\"\"\"\n return _create_rexnet('rexnetr_150', pretrained, width_mult=1.5, ch_div=8, **kwargs)\n\n\n@register_model\ndef quant_rexnetr_200(pretrained=False, **kwargs):\n \"\"\"ReXNet V1 2.0x w/ rounded (mod 8) channels\"\"\"\n return _create_rexnet('rexnetr_200', pretrained, width_mult=2.0, ch_div=8, **kwargs)\n"
]
| [
[
"torch.nn.Sequential",
"torch.nn.ReLU6",
"torch.quantization.fuse_modules",
"torch.nn.Conv2d",
"torch.nn.AdaptiveAvgPool2d",
"torch.nn.BatchNorm2d",
"torch.quantization.DeQuantStub",
"torch.nn.quantized.FloatFunctional",
"torch.quantization.QuantStub"
]
]
|
qingquansong/detext | [
"66df145e653ce05af094d3379e27b60d0d3c81b4"
]
| [
"src/detext/train/optimization.py"
]
| [
"import re\nimport tensorflow as tf\n\n\ndef create_optimizer(hparams, loss):\n \"\"\"\n Creates an optimizer training op.\n If the parameter lr_bert is specified, then use another adam for this learning rate.\n \"\"\"\n tvars = tf.trainable_variables()\n\n # Print trainable variables\n print(\"# Trainable variables\")\n total_param = 0\n for param in tvars:\n if param.name.startswith('bert'):\n psize = 1\n for s in param.get_shape():\n psize *= s\n total_param += psize\n print(\" %s, %s, %s\" % (param.name, str(param.get_shape()), param.op.device))\n print('total bert parameters:', total_param)\n\n # Define optimizer parameters\n init_lr = hparams.learning_rate\n num_train_steps = hparams.num_train_steps\n num_warmup_steps = hparams.num_warmup_steps\n lr_bert = hparams.lr_bert\n\n global_step = tf.train.get_or_create_global_step()\n\n learning_rate = tf.constant(value=init_lr, shape=[], dtype=tf.float32)\n\n if hparams.optimizer == \"bert_adam\":\n # Using optimizer with bert's implementation\n # Implements linear decay of the learning rate.\n learning_rate = tf.train.polynomial_decay(\n learning_rate,\n global_step,\n num_train_steps,\n end_learning_rate=0.0,\n power=1.0,\n cycle=False)\n\n # Implements linear warmup. I.e., if global_step < num_warmup_steps, the\n # learning rate will be `global_step/num_warmup_steps * init_lr`.\n if num_warmup_steps:\n global_steps_int = tf.cast(global_step, tf.int32)\n warmup_steps_int = tf.constant(num_warmup_steps, dtype=tf.int32)\n\n global_steps_float = tf.cast(global_steps_int, tf.float32)\n warmup_steps_float = tf.cast(warmup_steps_int, tf.float32)\n\n warmup_percent_done = global_steps_float / warmup_steps_float\n warmup_learning_rate = init_lr * warmup_percent_done\n\n is_warmup = tf.cast(global_steps_int < warmup_steps_int, tf.float32)\n learning_rate = ((1.0 - is_warmup) * learning_rate + is_warmup * warmup_learning_rate)\n\n # It is recommended that you use this optimizer for fine tuning, since this\n # is how the model was trained (note that the Adam m/v variables are NOT\n # loaded from init_checkpoint.)\n optimizer = AdamWeightDecayOptimizer(\n learning_rate=learning_rate,\n weight_decay_rate=0.01,\n beta_1=0.9,\n beta_2=0.999,\n epsilon=1e-6,\n exclude_from_weight_decay=[\"LayerNorm\", \"layer_norm\", \"bias\"])\n\n if hparams.use_horovod:\n import horovod.tensorflow as hvd\n # Horovod's distributed optimizer handles allreduce calls, synchronous only\n optimizer = hvd.DistributedOptimizer(optimizer, sparse_as_dense=True)\n grads_and_vars = optimizer.compute_gradients(loss, tvars)\n grads = [grad for grad, var in grads_and_vars]\n tvars = [var for grad, var in grads_and_vars]\n else:\n grads = tf.gradients(loss, tvars)\n\n grads, grad_norm = tf.clip_by_global_norm(grads, clip_norm=1.0)\n\n if lr_bert is None:\n # If not a separate learning rate for bert (lr_bert) is specified,\n # all components use the same learning rate\n train_op = optimizer.apply_gradients(\n zip(grads, tvars), global_step=global_step)\n\n # Normally the global step update is done inside of `apply_gradients`.\n # However, `AdamWeightDecayOptimizer` doesn't do this. But if you use\n # a different optimizer, you should probably take this line out.\n new_global_step = global_step + 1\n train_op = tf.group(train_op, [global_step.assign(new_global_step)])\n else:\n # the BERT components will use another learning rate\n optimizer_bert = AdamWeightDecayOptimizer(\n learning_rate=learning_rate * lr_bert / init_lr,\n weight_decay_rate=0.01,\n beta_1=0.9,\n beta_2=0.999,\n epsilon=1e-6,\n exclude_from_weight_decay=[\"LayerNorm\", \"layer_norm\", \"bias\"])\n bert_grad, bert_tvars = [], []\n other_grad, other_tvars = [], []\n for grad, tvar in zip(grads, tvars):\n if tvar is not None and grad is not None:\n if tvar.name.startswith('bert'):\n bert_grad.append(grad)\n bert_tvars.append(tvar)\n print('****bert param:', tvar.name)\n else:\n other_grad.append(grad)\n other_tvars.append(tvar)\n print('****other param:', tvar.name)\n print('--------------\\n', '# of bert', len(bert_grad), '# of other', len(other_grad), '\\n--------------')\n bert_train_op = optimizer_bert.apply_gradients(\n zip(bert_grad, bert_tvars), global_step=global_step)\n other_train_op = optimizer.apply_gradients(\n zip(other_grad, other_tvars), global_step=global_step)\n\n new_global_step = global_step + 1\n train_op = tf.group(bert_train_op, other_train_op, [global_step.assign(new_global_step)])\n\n return train_op, grad_norm, learning_rate\n\n elif hparams.optimizer == \"sgd\":\n opt = tf.train.GradientDescentOptimizer(learning_rate)\n elif hparams.optimizer == \"adam\":\n opt = tf.train.AdamOptimizer(learning_rate)\n else:\n raise ValueError(\"Only support sgd/adam/bert_adam as optimizer option\")\n\n # Gradients\n gradients = tf.gradients(loss, tvars, colocate_gradients_with_ops=True)\n clipped_gradients, grad_norm = tf.clip_by_global_norm(gradients, hparams.max_gradient_norm)\n train_op = opt.apply_gradients(zip(clipped_gradients, tvars), global_step=global_step)\n\n return train_op, grad_norm, learning_rate\n\n\nclass AdamWeightDecayOptimizer(tf.train.Optimizer):\n \"\"\"A basic Adam optimizer that includes \"correct\" L2 weight decay.\"\"\"\n\n def __init__(self,\n learning_rate,\n weight_decay_rate=0.0,\n beta_1=0.9,\n beta_2=0.999,\n epsilon=1e-6,\n exclude_from_weight_decay=None,\n name=\"AdamWeightDecayOptimizer\"):\n \"\"\"Constructs a AdamWeightDecayOptimizer.\"\"\"\n super(AdamWeightDecayOptimizer, self).__init__(False, name)\n\n self.learning_rate = learning_rate\n self.weight_decay_rate = weight_decay_rate\n self.beta_1 = beta_1\n self.beta_2 = beta_2\n self.epsilon = epsilon\n self.exclude_from_weight_decay = exclude_from_weight_decay\n\n def apply_gradients(self, grads_and_vars, global_step=None, name=None):\n \"\"\"See base class.\"\"\"\n assignments = []\n for (grad, param) in grads_and_vars:\n if grad is None or param is None:\n continue\n\n param_name = self._get_variable_name(param.name)\n\n m = tf.get_variable(\n name=param_name + \"/adam_m\",\n shape=param.shape.as_list(),\n dtype=tf.float32,\n trainable=False,\n initializer=tf.zeros_initializer())\n v = tf.get_variable(\n name=param_name + \"/adam_v\",\n shape=param.shape.as_list(),\n dtype=tf.float32,\n trainable=False,\n initializer=tf.zeros_initializer())\n\n # Standard Adam update.\n next_m = (tf.multiply(self.beta_1, m) + tf.multiply(1.0 - self.beta_1, grad))\n next_v = (tf.multiply(self.beta_2, v) + tf.multiply(1.0 - self.beta_2, tf.square(grad)))\n\n update = next_m / (tf.sqrt(next_v) + self.epsilon)\n\n # Just adding the square of the weights to the loss function is *not*\n # the correct way of using L2 regularization/weight decay with Adam,\n # since that will interact with the m and v parameters in strange ways.\n #\n # Instead we want ot decay the weights in a manner that doesn't interact\n # with the m/v parameters. This is equivalent to adding the square\n # of the weights to the loss with plain (non-momentum) SGD.\n if self._do_use_weight_decay(param_name):\n update += self.weight_decay_rate * param\n\n update_with_lr = self.learning_rate * update\n\n next_param = param - update_with_lr\n\n assignments.extend(\n [param.assign(next_param),\n m.assign(next_m),\n v.assign(next_v)])\n return tf.group(*assignments, name=name)\n\n def _do_use_weight_decay(self, param_name):\n \"\"\"Whether to use L2 weight decay for `param_name`.\"\"\"\n if not self.weight_decay_rate:\n return False\n if self.exclude_from_weight_decay:\n for r in self.exclude_from_weight_decay:\n if re.search(r, param_name) is not None:\n return False\n return True\n\n def _get_variable_name(self, param_name):\n \"\"\"Get the variable name from the tensor name.\"\"\"\n m = re.match(\"^(.*):\\\\d+$\", param_name)\n if m is not None:\n param_name = m.group(1)\n return param_name\n"
]
| [
[
"tensorflow.train.polynomial_decay",
"tensorflow.multiply",
"tensorflow.constant",
"tensorflow.zeros_initializer",
"tensorflow.cast",
"tensorflow.gradients",
"tensorflow.train.get_or_create_global_step",
"tensorflow.clip_by_global_norm",
"tensorflow.train.GradientDescentOptimizer",
"tensorflow.train.AdamOptimizer",
"tensorflow.square",
"tensorflow.trainable_variables",
"tensorflow.sqrt",
"tensorflow.group"
]
]
|
416104443/QUANTAXIS | [
"23907d5e1398bb57f3e8d9d50c21d9fb5bfe3e86"
]
| [
"QUANTAXIS/QABacktest/QABacktest_stock_day.py"
]
| [
"# coding=utf-8\n#\n# The MIT License (MIT)\n#\n# Copyright (c) 2016-2017 yutiansut/QUANTAXIS\n#\n# Permission is hereby granted, free of charge, to any person obtaining a copy\n# of this software and associated documentation files (the \"Software\"), to deal\n# in the Software without restriction, including without limitation the rights\n# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell\n# copies of the Software, and to permit persons to whom the Software is\n# furnished to do so, subject to the following conditions:\n#\n# The above copyright notice and this permission notice shall be included in all\n# copies or substantial portions of the Software.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\n# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\n# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\n# SOFTWARE.\n\n\nimport csv\nimport datetime\nimport json\nimport os\nimport random\nimport re\nimport sys\nimport time\nimport apscheduler\nimport numpy as np\nimport pandas as pd\nimport pymongo\nfrom QUANTAXIS import (QA_Market, QA_Portfolio, QA_QAMarket_bid, QA_Risk,\n __version__)\nfrom QUANTAXIS.QAARP.QAAccount import QA_Account\nfrom QUANTAXIS.QABacktest.QAAnalysis import QA_backtest_analysis_start\nfrom QUANTAXIS.QAFetch.QAQuery import (QA_fetch_index_day, QA_fetch_stock_day,\n QA_fetch_stock_info,\n QA_fetch_stocklist_day,\n QA_fetch_trade_date)\nfrom QUANTAXIS.QASU.save_backtest import (QA_SU_save_account_message,\n QA_SU_save_account_to_csv)\nfrom QUANTAXIS.QAUtil import (QA_Setting, QA_util_get_real_date,\n QA_util_log_info, QA_util_log_expection)\n\nfrom QUANTAXIS.QATask import QA_Queue\nfrom tabulate import tabulate\n\nimport configparser\nimport queue\nfrom functools import wraps, update_wrapper, reduce\n\n\nclass QA_Backtest_stock_day():\n '最终目的还是实现一个通用的回测类'\n backtest_type = 'day'\n account = QA_Account()\n market = QA_Market()\n bid = QA_QAMarket_bid()\n setting = QA_Setting()\n clients = setting.client\n user = setting.QA_setting_user_name\n market_data = []\n now = ''\n today = ''\n\n def __init__(self):\n self.backtest_type = 'day'\n self.account = QA_Account()\n self.market = QA_Market()\n self.bid = QA_QAMarket_bid()\n self.setting = QA_Setting()\n self.clients = self.setting.client\n self.user = self.setting.QA_setting_user_name\n self.market_data = []\n self.now = ''\n self.today = ''\n\n def __QA_backtest_init(self):\n \"\"\"既然是被当做装饰器使用,就需要把变量设置放在装饰函数的前面,把函数放在装饰函数的后面\"\"\"\n\n # 设置回测的开始结束时间\n self.strategy_start_date = str('2017-01-05')\n self.strategy_end_date = str('2017-07-01')\n # 设置回测标的,是一个list对象,不过建议只用一个标的\n # gap是回测时,每日获取数据的前推日期(交易日)\n self.strategy_gap = int(60)\n # 设置全局的数据库地址,回测用户名,密码,并初始化\n self.setting.QA_util_sql_mongo_ip = str('127.0.0.1')\n self.setting.QA_setting_user_name = str('admin')\n self.setting.QA_setting_user_password = str('admin')\n self.setting.QA_setting_init()\n # 回测的名字\n self.strategy_name = str('example_min')\n # 股票的交易日历,真实回测的交易周期,和交易周期在交易日历中的id\n self.trade_list = QA_fetch_trade_date(\n self.setting.client.quantaxis.trade_date)\n self.benchmark_code = 'hs300'\n \"\"\"\n 这里会涉及一个区间的问题,开始时间是要向后推,而结束时间是要向前推,1代表向后推,-1代表向前推\n \"\"\"\n\n self.strategy_stock_list = ['000001', '000002', '000004']\n self.account.init_assest = 1000000\n self.backtest_bid_model = 'market_price'\n\n def __QA_backtest_prepare(self):\n \"\"\"\n 这是模型内部的 初始化,主要是初始化一些账户和市场资产\n 写成了私有函数\n @yutiansut\n 2017/7/20\n \"\"\"\n\n # 重新初始账户资产\n\n self.setting.QA_setting_init()\n self.account.init()\n self.start_real_date = QA_util_get_real_date(\n self.strategy_start_date, self.trade_list, 1)\n self.start_real_id = self.trade_list.index(self.start_real_date)\n self.end_real_date = QA_util_get_real_date(\n self.strategy_end_date, self.trade_list, -1)\n self.end_real_id = self.trade_list.index(self.end_real_date)\n # 重新初始化账户的cookie\n self.account.account_cookie = str(random.random())\n # 初始化股票池的市场数据\n\n if self.backtest_type in ['day', 'd', '0x00']:\n self.market_data = QA_fetch_stocklist_day(\n self.strategy_stock_list,\n [self.trade_list[self.start_real_id - int(self.strategy_gap)],\n self.trade_list[self.end_real_id]])\n elif self.backtest_type in ['min', 'm', '0x01']:\n self.market_data = QA_fetch_stocklist_min(\n self.strategy_stock_list, [self.trade_list[\n self.start_real_id - int(self.strategy_gap)],\n self.trade_list[self.end_real_id]])\n\n def __QA_backtest_start(self, *args, **kwargs):\n \"\"\"\n 这个是回测流程开始的入口\n \"\"\"\n assert len(self.strategy_stock_list) > 0\n assert len(self.trade_list) > 0\n assert isinstance(self.start_real_date, str)\n assert isinstance(self.end_real_date, str)\n assert len(self.market_data) == len(self.strategy_stock_list)\n\n QA_util_log_info('QUANTAXIS Backtest Engine Initial Successfully')\n QA_util_log_info('Basical Info: \\n' + tabulate(\n [[str(__version__), str(self.strategy_name)]], headers=('Version', 'Strategy_name')))\n QA_util_log_info('BACKTEST Cookie_ID is: ' +\n str(self.account.account_cookie))\n QA_util_log_info('Stock_List: \\n' +\n tabulate([self.strategy_stock_list]))\n\n # 初始化报价模式\n self.__QA_backtest_set_bid_model(self)\n self.__messages = []\n\n def __QA_backtest_set_bid_model(self):\n\n if self.backtest_bid_model == 'market_price':\n self.bid.price = 'market_price'\n self.bid.bid_model = 'auto'\n elif self.backtest_bid_model == 'close_price':\n self.bid.price = 'close_price'\n self.bid.bid_model = 'auto'\n elif self.backtest_bid_model == 'strategy':\n self.bid.price = 0\n self.bid.bid_model = 'strategy'\n else:\n QA_util_log_info('support bid model')\n sys.exit()\n\n def __check_state(self, bid_price, bid_amount):\n pass\n\n def __QA_bid_amount(self, __strategy_amount, __amount):\n if __strategy_amount == 'mean':\n return float(float(self.account.message['body']['account']['cash'][-1]) /\n len(self.strategy_stock_list)), 'price'\n elif __strategy_amount == 'half':\n return __amount * 0.5, 'amount'\n elif __strategy_amount == 'all':\n return __amount, 'amount'\n\n def __end_of_trading(self, *arg, **kwargs):\n # 在回测的最后一天,平掉所有仓位(回测的最后一天是不买入的)\n # 回测最后一天的交易处理\n\n while len(self.account.hold) > 1:\n __hold_list = self.account.hold[1::]\n pre_del_id = []\n for item_ in range(0, len(__hold_list)):\n if __hold_list[item_][3] > 0:\n __last_bid = self.bid\n __last_bid.amount = int(__hold_list[item_][3])\n __last_bid.order_id = str(random.random())\n __last_bid.price = 'close_price'\n __last_bid.code = str(__hold_list[item_][1])\n __last_bid.date = self.trade_list[self.end_real_id]\n __last_bid.towards = -1\n __last_bid.user = self.setting.QA_setting_user_name\n __last_bid.strategy = self.strategy_name\n __last_bid.bid_model = 'auto'\n __last_bid.type = '0x01'\n __last_bid.amount_model = 'amount'\n\n __message = self.market.receive_bid(\n __last_bid)\n _remains_day = 0\n while __message['header']['status'] == 500:\n # 停牌状态,这个时候按停牌的最后一天计算价值(假设平仓)\n\n __last_bid.date = self.trade_list[self.end_real_id - _remains_day]\n _remains_day += 1\n __message = self.market.receive_bid(\n __last_bid)\n\n # 直到市场不是为0状态位置,停止前推日期\n\n self.__messages = self.account.QA_account_receive_deal(\n __message)\n else:\n pre_del_id.append(item_)\n pre_del_id.sort()\n pre_del_id.reverse()\n for item_x in pre_del_id:\n __hold_list.pop(item_x)\n\n def __end_of_backtest(self, *arg, **kwargs):\n\n # 开始分析\n QA_util_log_info('start analysis====\\n' +\n str(self.strategy_stock_list))\n QA_util_log_info('=' * 10 + 'Trade History' + '=' * 10)\n QA_util_log_info('\\n' + tabulate(self.account.history,\n headers=('date', 'code', 'price', 'towards',\n 'amounts', 'order_id', 'trade_id', 'commission')))\n QA_util_log_info('\\n' + tabulate(self.account.detail,\n headers=('date', 'code', 'price', 'amounts', 'order_id',\n 'trade_id', 'sell_price', 'sell_order_id',\n 'sell_trade_id', 'sell_date', 'left_amount',\n 'commission')))\n __exist_time = int(self.end_real_id) - int(self.start_real_id) + 1\n self.__benchmark_data = QA_fetch_index_day(\n self.benchmark_code, self.start_real_date,\n self.end_real_date)\n if len(self.__messages) > 1:\n performace = QA_backtest_analysis_start(\n self.setting.client, self.strategy_stock_list, self.__messages,\n self.trade_list[self.start_real_id:self.end_real_id + 1],\n self.market_data, self.__benchmark_data)\n _backtest_mes = {\n 'user': self.setting.QA_setting_user_name,\n 'strategy': self.strategy_name,\n 'stock_list': performace['code'],\n 'start_time': self.strategy_start_date,\n 'end_time': self.strategy_end_date,\n 'account_cookie': self.account.account_cookie,\n 'annualized_returns': performace['annualized_returns'],\n 'benchmark_annualized_returns': performace['benchmark_annualized_returns'],\n 'assets': performace['assets'],\n 'benchmark_assets': performace['benchmark_assets'],\n 'trade_date': performace['trade_date'],\n 'total_date': performace['total_date'],\n 'win_rate': performace['win_rate'],\n 'alpha': performace['alpha'],\n 'beta': performace['beta'],\n 'sharpe': performace['sharpe'],\n 'vol': performace['vol'],\n 'benchmark_vol': performace['benchmark_vol'],\n 'max_drop': performace['max_drop'],\n 'exist': __exist_time,\n 'time': datetime.datetime.now()\n }\n QA_SU_save_backtest_message(_backtest_mes, self.setting.client)\n QA_SU_save_account_message(self.__messages, self.setting.client)\n QA_SU_save_account_to_csv(self.__messages)\n # QA.QA_SU_save_backtest_message(analysis_message, self.setting.client)\n\n def QA_backtest_get_market_data(self, code, date, type_='numpy'):\n '这个函数封装了关于获取的方式'\n index_of_code = self.strategy_stock_list.index(code)\n __res = self.market_data[index_of_code][:date].tail(self.strategy_gap)\n if type_ in ['l', 'list', 'L']:\n return np.asarray(__res).tolist()\n elif type_ in ['pd', 'pandas', 'p']:\n return __res\n else:\n return np.asarray(__res)\n\n def QA_backtest_hold_amount(self, __code):\n return sum(list(map(lambda item: item[3] if __code in item else 0, self.account.hold)))\n\n def QA_backtest_get_OHLCV(self, __data):\n '快速返回 OHLCV格式'\n return (__data.T[1].astype(float).tolist(), __data.T[2].astype(float).tolist(),\n __data.T[3].astype(float).tolist(\n ), __data.T[4].astype(float).tolist(),\n __data.T[5].astype(float).tolist())\n\n def QA_backtest_send_order(self, __code, __amount, __towards, __order):\n \"\"\"\n 2017/8/4\n 委托函数\n 在外部封装的一个报价接口,尽量满足和实盘一样的模式\n\n 输入\n =============\n 买入/卖出\n 股票代码\n 买入/卖出数量\n 委托模式*\n 0 限价委托 LIMIT ORDER\n 1 市价委托 MARKET ORDER\n 2 严格模式(买入按最高价 卖出按最低价) STRICT ORDER\n\n\n 输出\n =============\n 返回: \n\n 委托状态/委托id\n\n 成交状态/成交id/成交量/成交价\n\n 错误/错误id/\n\n return bid_status,trade_status,error\n \"\"\"\n\n # 必须是100股的倍数\n __amount = int(__amount / 100) * 100\n\n # self.__QA_backtest_set_bid_model()\n if __order['bid_model'] in ['limit', 'Limit', 'Limited', 'limited', 'l', 'L', 0, '0']:\n # 限价委托模式\n __bid_price = __order['price']\n elif __order['bid_model'] in ['Market', 'market', 'MARKET', 'm', 'M', 1, '1']:\n __bid_price = 'market_price'\n elif __order['bid_model'] in ['strict', 'Strict', 's', 'S', '2', 2]:\n __bid_price = 'strict_price'\n elif __order['bid_model'] in ['close', 'close_price', 'c', 'C', '3', 3]:\n __bid_price = 'close_price'\n __bid = self.bid\n\n __bid.order_id = str(random.random())\n __bid.user = self.setting.QA_setting_user_name\n __bid.strategy = self.strategy_name\n __bid.code = __code\n __bid.date = self.running_date\n __bid.datetime = self.running_date\n __bid.sending_time = self.running_date\n __bid.price = __bid_price\n __bid.amount = __amount\n\n if __towards == 1:\n # 这是买入的情况 买入的时候主要考虑的是能不能/有没有足够的钱来买入\n\n __bid.towards = 1\n __message = self.market.receive_bid(\n __bid)\n\n # 先扔进去买入,再通过返回的值来判定是否成功\n\n if float(self.account.message['body']['account']['cash'][-1]) > \\\n float(__message['body']['bid']['price']) * \\\n float(__message['body']['bid']['amount']):\n # 这里是买入资金充足的情况\n # 不去考虑\n pass\n else:\n # 如果买入资金不充足,则按照可用资金去买入\n # 这里可以这样做的原因是在买入的时候 手续费为0\n __message['body']['bid']['amount'] = int(float(\n self.account.message['body']['account']['cash'][-1]) / float(\n float(str(__message['body']['bid']['price'])[0:5]) * 100)) * 100\n\n if __message['body']['bid']['amount'] > 0:\n # 这个判断是为了 如果买入资金不充足,所以买入报了一个0量单的情况\n #如果买入量>0, 才判断为成功交易\n self.messages=self.account.QA_account_receive_deal(__message)\n return __message\n\n # 下面是卖出操作,这里在卖出前需要考虑一个是否有仓位的问题:\n # 因为在股票中是不允许卖空操作的,所以这里是股票的交易引擎和期货的交易引擎的不同所在\n\n elif __towards == -1:\n # 如果是卖出操作 检查是否有持仓\n # 股票中不允许有卖空操作\n # 检查持仓面板\n __amount_hold = self.QA_backtest_hold_amount(self, __code)\n if __amount_hold > 0:\n __bid.towards = -1\n __bid.amount = __amount_hold if __amount_hold < __amount else __bid.amount\n __message = self.market.receive_bid(\n __bid)\n if __message['header']['status'] == 200:\n self.messages=self.account.QA_account_receive_deal(__message)\n return __message\n else:\n err_info = 'Error: Not Enough amount for code %s in hold list' % str(\n __code)\n QA_util_log_expection(err_info)\n return err_info\n\n else:\n return \"Error: No buy/sell towards\"\n\n def QA_backtest_check_order(self, order):\n '用于检查委托单的状态'\n \"\"\"\n 委托单被报入交易所会有一个回报,回报状态就是交易所返回的字段:\n 字段目前 2xx 是成功 4xx是失败 5xx是交易所无数据(停牌)\n\n 随着回测框架的不断升级,会有更多状态需要被管理:\n\n\n 200 委托成功,交易成功\n 203 委托成功,待成交\n 20\n \"\"\"\n pass\n\n def QA_backtest_sell_all(self):\n while len(self.account.hold) > 1:\n __hold_list = self.account.hold[1::]\n pre_del_id = []\n\n def __sell(id_):\n if __hold_list[id_][3] > 0:\n __last_bid = self.bid\n __last_bid.amount = int(__hold_list[id_][3])\n __last_bid.order_id = str(random.random())\n __last_bid.price = 'close_price'\n __last_bid.code = str(__hold_list[id_][1])\n __last_bid.date = self.now\n __last_bid.towards = -1\n __last_bid.user = self.setting.QA_setting_user_name\n __last_bid.strategy = self.strategy_name\n __last_bid.bid_model = 'auto'\n __last_bid.type = '0x01'\n __last_bid.amount_model = 'amount'\n\n __message = self.market.receive_bid(\n __last_bid)\n _remains_day = 0\n while __message['header']['status'] == 500:\n # 停牌状态,这个时候按停牌的最后一天计算价值(假设平仓)\n\n __last_bid.date = self.trade_list[self.end_real_id - _remains_day]\n _remains_day += 1\n __message = self.market.receive_bid(\n __last_bid)\n\n # 直到市场不是为0状态位置,停止前推日期\n\n self.__messages = self.account.QA_account_receive_deal(\n __message)\n else:\n pre_del_id.append(id_)\n return pre_del_id\n\n pre_del_id = reduce(lambda _, x: __sell(x),\n range(len(__hold_list)))\n pre_del_id.sort()\n pre_del_id.reverse()\n for item_x in pre_del_id:\n __hold_list.pop(item_x)\n\n @classmethod\n def load_strategy(__backtest_cls, func, *arg, **kwargs):\n '策略加载函数'\n\n # 首先判断是否能满足回测的要求`\n __messages = {}\n __backtest_cls.__init_cash_per_stock = int(\n float(__backtest_cls.account.init_assest) / len(__backtest_cls.strategy_stock_list))\n\n # 策略的交易日循环\n for i in range(int(__backtest_cls.start_real_id), int(__backtest_cls.end_real_id) - 1, 1):\n __backtest_cls.running_date = __backtest_cls.trade_list[i]\n QA_util_log_info(\n '=================daily hold list====================')\n QA_util_log_info('in the begining of ' +\n __backtest_cls.running_date)\n QA_util_log_info(\n tabulate(__backtest_cls.account.message['body']['account']['hold']))\n __backtest_cls.now = __backtest_cls.running_date\n __backtest_cls.today = __backtest_cls.running_date\n func(*arg, **kwargs)\n\n # 最后一天\n __backtest_cls.__end_of_trading(__backtest_cls)\n\n @classmethod\n def backtest_init(__backtest_cls, func, *arg, **kwargs):\n def __init_backtest(__backtest_cls, *arg, **kwargs):\n __backtest_cls.__QA_backtest_init(__backtest_cls)\n func(*arg, **kwargs)\n __backtest_cls.__QA_backtest_prepare(__backtest_cls)\n return __init_backtest(__backtest_cls)\n\n @classmethod\n def before_backtest(__backtest_cls, func, *arg, **kwargs):\n func(*arg, **kwargs)\n __backtest_cls.__QA_backtest_start(__backtest_cls)\n\n @classmethod\n def end_backtest(__backtest_cls, func, *arg, **kwargs):\n # yield __backtest_cls.cash\n __backtest_cls.__end_of_backtest(__backtest_cls, func, *arg, **kwargs)\n return func(*arg, **kwargs)\n\n\nif __name__ == '__main__':\n\n pass\n"
]
| [
[
"numpy.asarray"
]
]
|
Tarheel-Formal-Methods/kaa-optimize | [
"35fe7b580df3b5efe7de9314b821c257f68d74bf"
]
| [
"models/basic/basic.py"
]
| [
"import sympy as sp\nimport numpy as np\n\nfrom kaa.bundle import Bundle\nfrom kaa.model import Model\n\nclass Basic(Model):\n\n def __init__(self):\n\n x,y = sp.Symbol('x'), sp.Symbol('y')\n\n dx = x + 1\n dy = y + 1\n\n dyns = [dx, dy]\n vars = [x, y]\n\n #L = np.empty([4,2])\n #T = np.empty([2,2])\n L = np.empty([2,2])\n T = np.empty([1,2])\n\n\n L[0] = [1, 0]\n L[1] = [0, 1]\n #L[0] = [1, 1]\n #L[1] = [-1, 1]\n\n\n T[0][0] = 0\n T[0][1] = 1\n\n #T[1][0] = 2\n #T[1][1] = 3\n\n offu = np.empty(2)\n offl = np.empty(2)\n\n offu[0] = 1\n offu[1] = 1\n #offu[2] = 5\n #offu[3] = 5\n\n offl[0] = 1\n offl[1] = 1\n #offl[2] = 5\n #offl[3] = 5\n #\n super().__init__(dyns, vars, T, L, offu, offl, name=\"Basic\")\n"
]
| [
[
"numpy.empty"
]
]
|
Landanjs/composer | [
"de75822706bc8a33e7d487b33a0b994944b9c806"
]
| [
"tests/common/datasets.py"
]
| [
"# Copyright 2022 MosaicML Composer authors\n# SPDX-License-Identifier: Apache-2.0\n\nimport dataclasses\nfrom typing import List, Optional, Sequence\n\nimport pytest\nimport torch\nimport torch.utils.data\nimport yahp as hp\nfrom PIL import Image\nfrom torch.utils.data import Dataset\nfrom torchvision.datasets import VisionDataset\n\nfrom composer.datasets import GLUEHparams, LMDatasetHparams\nfrom composer.datasets.dataloader import DataLoaderHparams\nfrom composer.datasets.hparams import DatasetHparams, SyntheticHparamsMixin\nfrom composer.models import ModelHparams\nfrom composer.models.transformer_hparams import TransformerHparams\nfrom tests.common.models import model_hparams_to_tokenizer_family\n\n\nclass RandomClassificationDataset(Dataset):\n \"\"\"Classification dataset drawn from a normal distribution.\n\n Args:\n shape (Sequence[int]): shape of features (default: (5, 1, 1))\n size (int): number of samples (default: 100)\n num_classes (int): number of classes (default: 2)\n \"\"\"\n\n def __init__(self, shape: Sequence[int] = (1, 1, 1), size: int = 100, num_classes: int = 2):\n self.size = size\n self.x = torch.randn(size, *shape)\n self.y = torch.randint(0, num_classes, size=(size,))\n\n def __len__(self):\n return self.size\n\n def __getitem__(self, index: int):\n return self.x[index], self.y[index]\n\n\[email protected]\nclass RandomClassificationDatasetHparams(DatasetHparams, SyntheticHparamsMixin):\n\n data_shape: List[int] = hp.optional(\"data shape\", default_factory=lambda: [1, 1, 1])\n num_classes: int = hp.optional(\"num_classes\", default=2)\n\n def initialize_object(self, batch_size: int, dataloader_hparams: DataLoaderHparams):\n assert self.data_shape is not None\n assert self.num_classes is not None\n dataset = RandomClassificationDataset(\n size=self.synthetic_num_unique_samples,\n shape=self.data_shape,\n num_classes=self.num_classes,\n )\n if self.shuffle:\n sampler = torch.utils.data.RandomSampler(dataset)\n else:\n sampler = torch.utils.data.SequentialSampler(dataset)\n return dataloader_hparams.initialize_object(\n dataset=dataset,\n batch_size=batch_size,\n sampler=sampler,\n drop_last=self.drop_last,\n )\n\n\nclass RandomImageDataset(VisionDataset):\n \"\"\" Image Classification dataset with values drawn from a normal distribution\n Args:\n shape (Sequence[int]): shape of features. Defaults to (32, 32, 3)\n size (int): number of samples (default: 100)\n num_classes (int): number of classes (default: 2)\n is_PIL (bool): if true, will emit image in PIL format (default: False)\n \"\"\"\n\n def __init__(self, shape: Sequence[int] = (3, 32, 32), size: int = 100, num_classes: int = 2, is_PIL: bool = False):\n self.is_PIL = is_PIL\n if is_PIL: # PIL expects HWC\n shape = (shape[1], shape[2], shape[0])\n\n self.size = size\n self.x = torch.randn(size, *shape)\n self.y = torch.randint(0, num_classes, size=(size,))\n\n super().__init__(root='')\n\n def __len__(self):\n return self.size\n\n def __getitem__(self, index: int):\n x = self.x[index]\n y = self.y[index]\n\n if self.is_PIL:\n x = x.numpy()\n x = (x - x.min())\n x = (x * (255 / x.max())).astype(\"uint8\")\n x = Image.fromarray(x)\n\n if self.transform is not None:\n return self.transform(x), y\n else:\n return x, y\n\n\ndef configure_dataset_hparams_for_synthetic(\n dataset_hparams: DatasetHparams,\n model_hparams: Optional[ModelHparams] = None,\n) -> None:\n if not isinstance(dataset_hparams, SyntheticHparamsMixin):\n pytest.xfail(f\"{dataset_hparams.__class__.__name__} does not support synthetic data or num_total_batches\")\n\n assert isinstance(dataset_hparams, SyntheticHparamsMixin)\n\n dataset_hparams.use_synthetic = True\n\n if isinstance(model_hparams, TransformerHparams):\n if type(model_hparams) not in model_hparams_to_tokenizer_family:\n raise ValueError(f\"Model {type(model_hparams)} is currently not supported for synthetic testing!\")\n\n tokenizer_family = model_hparams_to_tokenizer_family[type(model_hparams)]\n assert isinstance(dataset_hparams, (GLUEHparams, LMDatasetHparams))\n dataset_hparams.tokenizer_name = tokenizer_family\n dataset_hparams.max_seq_length = 128\n"
]
| [
[
"torch.randn",
"torch.utils.data.SequentialSampler",
"torch.randint",
"torch.utils.data.RandomSampler"
]
]
|
roosephu/boots | [
"2f4f500f54feb95cf36abd863f3de4510d6f4950"
]
| [
"boots/v_function/mlp_v_function.py"
]
| [
"from typing import List\nimport tensorflow as tf\nimport lunzi as lz\nfrom lunzi.typing import *\nimport lunzi.nn as nn\n\n\nclass MLPVFunction(nn.Module, BaseVFunction):\n def __init__(self, dim_state: int, hidden_sizes: List[int], normalizer: nn.Module = None):\n super().__init__()\n\n self.mlp = lz.MultiLayerPerceptron((dim_state, *hidden_sizes, 1), nn.ReLU, squeeze=True)\n self.normalizer = normalizer\n self.op_states = tf.placeholder(tf.float32, shape=[None, dim_state])\n self.op_values = self.forward(self.op_states)\n\n def forward(self, states):\n if self.normalizer is not None:\n states = self.normalizer(states)\n return self.mlp(states)\n\n @nn.make_method(fetch='values')\n def get_values(self, states): pass\n\n def copy(self):\n return MLPVFunction(self.mlp.blocks[0], self.mlp.blocks[1:-1], self.normalizer)\n"
]
| [
[
"tensorflow.placeholder"
]
]
|
iseong83/3d_object_reconstruction | [
"f104cdf9cf7c5a0a7d659073923b991b8ebc5b9f"
]
| [
"Reco3D/lib/vis.py"
]
| [
"import io\nimport os\nimport re\nimport json\nimport sys\nimport math\nimport shutil\nimport numpy as np\n#import tensorflow as tf\nimport tensorflow.compat.v1 as tf\ntf.disable_v2_behavior()\nimport matplotlib.pyplot as plt\nfrom mpl_toolkits.mplot3d import Axes3D\nfrom skimage import exposure\nfrom PIL import Image\nfrom Reco3D.lib import utils, dataset, network\nfrom moviepy.video.io.bindings import mplfig_to_npimage\n\n\ndef save_im(im, f_name=None, ndarray=False):\n fig = plt.figure()\n if ndarray:\n fig.set_tight_layout(True)\n fig.canvas.draw()\n ret = np.array(fig.canvas.renderer._renderer)\n fig.clf()\n plt.close()\n return ret\n\n if f_name is not None:\n params = utils.read_params()\n f_name = os.path.join(params[\"DIRS\"][\"OUTPUT\"], f_name)\n utils.make_prev_dirs(f_name)\n plt.imsave(f_name, im)\n plt.clf()\n plt.close()\n\n return plt.imshow(im)\n\n\ndef voxel(vox, color=None, f_name=None, npimage=False, view=(30, 45)):\n assert(vox.ndim == 3)\n\n vox = vox.transpose(2, 0, 1)\n color = color.transpose(2, 0, 1)\n if color is None or len(np.unique(color)) <= 2:\n color = 'red'\n else:\n color_map = plt.get_cmap('coolwarm')\n color = color_map(color)\n\n fig = plt.figure()\n ax = fig.gca(projection='3d')\n #ax.voxels(vox, facecolors=color, edgecolor='k')\n ax.voxels(vox, edgecolor='k')\n ax.view_init(view[0], view[1])\n\n if npimage:\n return mplfig_to_npimage(fig)\n\n if f_name is not None:\n params = utils.read_params()\n f_name = os.path.join(params[\"DIRS\"][\"OUTPUT\"], f_name)\n utils.make_prev_dirs(f_name)\n fig.savefig(f_name, bbox_inches='tight', dpi=2400)\n fig.clf()\n plt.close()\n return\n\n return fig.show()\n\n\ndef voxel_binary(y_hat, f_name=None, view=(30, 45)):\n vox = np.argmax(y_hat, axis=-1)\n color = y_hat[:, :, :, 1]\n return voxel(vox, color, f_name=f_name, view=view)\n\n\ndef voxel_npimage(y_hat, view=(30, 45)):\n vox = np.argmax(y_hat, axis=-1)\n color = y_hat[:, :, :, 1]\n return voxel(vox, color, npimage=True, view=view)\n\n\ndef label(y, f_name=None):\n return voxel(np.argmax(y, axis=-1), f_name=f_name)\n\n\ndef scaled(im, axis, f_name=None):\n ret_im = exposure.rescale_intensity(montage(im, axis))\n return save_im(ret_im, f_name)\n\n\ndef multichannel(im, f_name=None):\n mulitchannel_montage = flatten_multichannel(im)\n return save_im(mulitchannel_montage, f_name)\n\n\ndef img_sequence(im, f_name=None):\n sequence_montage = flatten_sequence(im)\n return save_im(sequence_montage, f_name)\n\n\ndef montage(packed_ims, axis):\n \"\"\"display as an Image the contents of packed_ims in a square gird along an aribitray axis\"\"\"\n if packed_ims.ndim == 2:\n return packed_ims\n\n # bring axis to the front\n packed_ims = np.rollaxis(packed_ims, axis)\n\n N = len(packed_ims)\n n_tile = math.ceil(math.sqrt(N))\n rows = []\n for i in range(n_tile):\n if i*n_tile > N: continue\n im = packed_ims[i * n_tile]\n for j in range(1, n_tile):\n ind = i * n_tile + j\n if ind < N:\n im = utils.hstack(im, packed_ims[ind])\n else:\n im = utils.hstack(im, np.zeros_like(packed_ims[0]))\n rows.append(im)\n\n matrix = rows[0]\n for i in range(1, len(rows)):\n matrix = utils.vstack(matrix, rows[i])\n return matrix\n\n\ndef flatten_multichannel(im):\n return montage(im, -1)\n\n\ndef flatten_sequence(im):\n return montage(im, 0)\n\n\ndef get_pylab_image(ax):\n im = Image.open(ax.get_array())\n return im\n # im.show()\n # buf.close()\n\n\ndef sample(X, y, yp, f_name=None):\n\n ax1 = plt.subplot(223)\n ax1.imshow(flatten_sequence(X))\n\n ax2 = plt.subplot(221, projection='3d')\n vox = (np.argmax(y, axis=-1)).transpose(2, 0, 1)\n color = (plt.get_cmap('coolwarm'))((y[:, :, :, 1]).transpose(2, 0, 1))\n ax2.voxels(vox, facecolors=color, edgecolor='k')\n ax2.view_init(30, 45)\n\n ax3 = plt.subplot(222, projection='3d')\n vox = (np.argmax(yp, axis=-1)).transpose(2, 0, 1)\n color = (plt.get_cmap('coolwarm'))((yp[:, :, :, 1]).transpose(2, 0, 1))\n ax3.voxels(vox, facecolors=color, edgecolor='k')\n ax3.view_init(30, 45)\n\n if f_name is not None:\n plt.savefig(f_name)\n plt.clf()\n plt.close()\n return\n\n return plt.show()\n\n\ndef create_video(obj_id=\"02691156_131db4a650873babad3ab188d086d4db\"):\n\n params = utils.read_params()\n out_dir = params[\"DIRS\"][\"OUTPUT\"]\n model_dir = params[\"SESSIONS\"][\"LONGEST\"]\n epoch_count = utils.get_latest_epoch_index(model_dir)+1\n\n x, _ = dataset.load_obj_id(obj_id)\n for i in range(epoch_count):\n net = network.Network_restored(\"{}/epoch_{}\".format(model_dir, i))\n yp = net.predict(x)\n voxel_binary(yp[0], f_name=\"{}/{}/frame_{}\".format(out_dir, obj_id, i))\n"
]
| [
[
"numpy.rollaxis",
"matplotlib.pyplot.imsave",
"matplotlib.pyplot.imshow",
"tensorflow.compat.v1.disable_v2_behavior",
"numpy.unique",
"matplotlib.pyplot.get_cmap",
"matplotlib.pyplot.savefig",
"numpy.argmax",
"matplotlib.pyplot.subplot",
"matplotlib.pyplot.clf",
"matplotlib.pyplot.close",
"numpy.zeros_like",
"numpy.array",
"matplotlib.pyplot.show",
"matplotlib.pyplot.figure"
]
]
|
robmcmullen/vispy | [
"8d5092fdae4a24fc364ae51c7e34e12d3fd6d0a2"
]
| [
"examples/demo/gloo/shadertoy.py"
]
| [
"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n# vispy: gallery 2, testskip\n# Copyright (c) 2014, Vispy Development Team.\n# Distributed under the (new) BSD License. See LICENSE.txt for more info.\n\n\"\"\"\nShadertoy demo. You can copy-paste shader code from an example on\nwww.shadertoy.com and get the demo.\n\nTODO: support cubes and videos as channel inputs (currently, only images\nare supported).\n\n\"\"\"\n\nimport sys\nfrom datetime import datetime, time\nimport numpy as np\nfrom vispy import gloo\nfrom vispy import app\n\n\nvertex = \"\"\"\n#version 120\n\nattribute vec2 position;\nvoid main()\n{\n gl_Position = vec4(position, 0.0, 1.0);\n}\n\"\"\"\n\nfragment = \"\"\"\n#version 120\n\nuniform vec3 iResolution; // viewport resolution (in pixels)\nuniform float iGlobalTime; // shader playback time (in seconds)\nuniform vec4 iMouse; // mouse pixel coords\nuniform vec4 iDate; // (year, month, day, time in seconds)\nuniform float iSampleRate; // sound sample rate (i.e., 44100)\nuniform sampler2D iChannel0; // input channel. XX = 2D/Cube\nuniform sampler2D iChannel1; // input channel. XX = 2D/Cube\nuniform sampler2D iChannel2; // input channel. XX = 2D/Cube\nuniform sampler2D iChannel3; // input channel. XX = 2D/Cube\nuniform vec3 iChannelResolution[4]; // channel resolution (in pixels)\nuniform float iChannelTime[4]; // channel playback time (in seconds)\n\n%s\n\"\"\"\n\n\ndef get_idate():\n now = datetime.now()\n utcnow = datetime.utcnow()\n midnight_utc = datetime.combine(utcnow.date(), time(0))\n delta = utcnow - midnight_utc\n return (now.year, now.month, now.day, delta.seconds)\n\n\ndef noise(resolution=64, nchannels=1):\n # Random texture.\n return np.random.randint(low=0, high=256, \n size=(resolution, resolution, nchannels)\n ).astype(np.uint8)\n\n\nclass Canvas(app.Canvas):\n\n def __init__(self, shadertoy=None):\n app.Canvas.__init__(self, keys='interactive')\n if shadertoy is None:\n shadertoy = \"\"\"\n void main(void)\n {\n vec2 uv = gl_FragCoord.xy / iResolution.xy;\n gl_FragColor = vec4(uv,0.5+0.5*sin(iGlobalTime),1.0);\n }\"\"\"\n self.program = gloo.Program(vertex, fragment % shadertoy)\n\n self.program[\"position\"] = [(-1, -1), (-1, 1), (1, 1),\n (-1, -1), (1, 1), (1, -1)]\n\n self.program['iSampleRate'] = 44100.\n for i in range(4):\n self.program['iChannelTime[%d]' % i] = 0.\n self._timer = app.Timer('auto', connect=self.on_timer, start=True)\n\n def set_channel_input(self, img, i=0):\n tex = gloo.Texture2D(img)\n tex.interpolation = 'linear'\n tex.wrapping = 'repeat'\n self.program['iChannel%d' % i] = tex\n self.program['iChannelResolution[%d]' % i] = img.shape\n \n def on_draw(self, event):\n self.program.draw()\n\n def on_mouse_click(self, event):\n # BUG: DOES NOT WORK YET, NO CLICK EVENT IN VISPY FOR NOW...\n imouse = event.pos + event.pos\n self.program['iMouse'] = imouse\n\n def on_mouse_move(self, event):\n if event.is_dragging:\n x, y = event.pos\n px, py = event.press_event.pos\n imouse = (x, self.size[1] - y, px, self.size[1] - py)\n self.program['iMouse'] = imouse\n \n def on_timer(self, event):\n self.program['iGlobalTime'] = event.elapsed\n self.program['iDate'] = get_idate() # used in some shadertoy examples\n self.update()\n \n def on_resize(self, event):\n width, height = event.size\n gloo.set_viewport(0, 0, width, height)\n self.program['iResolution'] = (width, height, 0.)\n \n# -------------------------------------------------------------------------\n# COPY-PASTE SHADERTOY CODE BELOW\n# -------------------------------------------------------------------------\nSHADERTOY = \"\"\"\n// From: https://www.shadertoy.com/view/MdX3Rr\n\n// Created by inigo quilez - iq/2013\n// License Creative Commons Attribution-NonCommercial-ShareAlike 3.0 \n// Unported License.\n\n//stereo thanks to Croqueteer\n//#define STEREO\n\n// value noise, and its analytical derivatives\nvec3 noised( in vec2 x )\n{\n vec2 p = floor(x);\n vec2 f = fract(x);\n\n vec2 u = f*f*(3.0-2.0*f);\n\n float a = texture2D(iChannel0,(p+vec2(0.5,0.5))/256.0,-100.0).x;\n float b = texture2D(iChannel0,(p+vec2(1.5,0.5))/256.0,-100.0).x;\n float c = texture2D(iChannel0,(p+vec2(0.5,1.5))/256.0,-100.0).x;\n float d = texture2D(iChannel0,(p+vec2(1.5,1.5))/256.0,-100.0).x;\n \n return vec3(a+(b-a)*u.x+(c-a)*u.y+(a-b-c+d)*u.x*u.y,\n 6.0*f*(1.0-f)*(vec2(b-a,c-a)+(a-b-c+d)*u.yx));\n}\n\nconst mat2 m2 = mat2(0.8,-0.6,0.6,0.8);\n\nfloat terrain( in vec2 x )\n{\n vec2 p = x*0.003;\n float a = 0.0;\n float b = 1.0;\n vec2 d = vec2(0.0);\n for( int i=0; i<6; i++ )\n {\n vec3 n = noised(p);\n d += n.yz;\n a += b*n.x/(1.0+dot(d,d));\n b *= 0.5;\n p = m2*p*2.0;\n }\n\n return 140.0*a;\n}\n\nfloat terrain2( in vec2 x )\n{\n vec2 p = x*0.003;\n float a = 0.0;\n float b = 1.0;\n vec2 d = vec2(0.0);\n for( int i=0; i<14; i++ )\n {\n vec3 n = noised(p);\n d += n.yz;\n a += b*n.x/(1.0+dot(d,d));\n b *= 0.5;\n p=m2*p*2.0;\n }\n\n return 140.0*a;\n}\n\nfloat terrain3( in vec2 x )\n{\n vec2 p = x*0.003;\n float a = 0.0;\n float b = 1.0;\n vec2 d = vec2(0.0);\n for( int i=0; i<4; i++ )\n {\n vec3 n = noised(p);\n d += n.yz;\n a += b*n.x/(1.0+dot(d,d));\n b *= 0.5;\n p = m2*p*2.0;\n }\n\n return 140.0*a;\n}\n\nfloat map( in vec3 p )\n{\n float h = terrain(p.xz);\n return p.y - h;\n}\n\nfloat map2( in vec3 p )\n{\n float h = terrain2(p.xz);\n return p.y - h;\n}\n\nfloat interesct( in vec3 ro, in vec3 rd )\n{\n float h = 1.0;\n float t = 1.0;\n for( int i=0; i<120; i++ )\n {\n if( h<0.01 || t>2000.0 ) break;\n t += 0.5*h;\n h = map( ro + t*rd );\n }\n\n if( t>2000.0 ) t = -1.0;\n return t;\n}\n\nfloat sinteresct(in vec3 ro, in vec3 rd )\n{\n#if 0\n // no shadows\t\n return 1.0;\n#endif\n \n#if 0\n // fake shadows\n vec3 nor;\n vec3 eps = vec3(20.0,0.0,0.0);\n nor.x = terrain3(ro.xz-eps.xy) - terrain3(ro.xz+eps.xy);\n nor.y = 1.0*eps.x;\n nor.z = terrain3(ro.xz-eps.yx) - terrain3(ro.xz+eps.yx);\n nor = normalize(nor);\n return clamp( 4.0*dot(nor,rd), 0.0, 1.0 );\n#endif\n \n#if 1\n // real shadows\t\n float res = 1.0;\n float t = 0.0;\n for( int j=0; j<48; j++ )\n {\n vec3 p = ro + t*rd;\n float h = map( p );\n res = min( res, 16.0*h/t );\n t += h;\n if( res<0.001 ||p.y>300.0 ) break;\n }\n\n return clamp( res, 0.0, 1.0 );\n#endif\n}\n\nvec3 calcNormal( in vec3 pos, float t )\n{\n float e = 0.001;\n e = 0.001*t;\n vec3 eps = vec3(e,0.0,0.0);\n vec3 nor;\n#if 0\n nor.x = map2(pos+eps.xyy) - map2(pos-eps.xyy);\n nor.y = map2(pos+eps.yxy) - map2(pos-eps.yxy);\n nor.z = map2(pos+eps.yyx) - map2(pos-eps.yyx);\n#else\n nor.x = terrain2(pos.xz-eps.xy) - terrain2(pos.xz+eps.xy);\n nor.y = 2.0*e;\n nor.z = terrain2(pos.xz-eps.yx) - terrain2(pos.xz+eps.yx);\n#endif\t\n return normalize(nor);\n}\n\nvec3 camPath( float time )\n{\n vec2 p = 1100.0*vec2( cos(0.0+0.23*time), cos(1.5+0.21*time) );\n\n return vec3( p.x, 0.0, p.y );\n}\n\n \nfloat fbm( vec2 p )\n{\n float f = 0.0;\n\n f += 0.5000*texture2D( iChannel0, p/256.0 ).x; p = m2*p*2.02;\n f += 0.2500*texture2D( iChannel0, p/256.0 ).x; p = m2*p*2.03;\n f += 0.1250*texture2D( iChannel0, p/256.0 ).x; p = m2*p*2.01;\n f += 0.0625*texture2D( iChannel0, p/256.0 ).x;\n\n return f/0.9375;\n}\n\n\nvoid main(void)\n{\n vec2 xy = -1.0 + 2.0*gl_FragCoord.xy / iResolution.xy;\n\n vec2 s = xy*vec2(iResolution.x/iResolution.y,1.0);\n\n #ifdef STEREO\n float isCyan = mod(gl_FragCoord.x + mod(gl_FragCoord.y,2.0),2.0);\n #endif\n \n float time = iGlobalTime*0.15 + 0.3 + 4.0*iMouse.x/iResolution.x;\n \n vec3 light1 = normalize( vec3(-0.8,0.4,-0.3) );\n \n\n\n vec3 ro = camPath( time );\n vec3 ta = camPath( time + 3.0 );\n ro.y = terrain3( ro.xz ) + 11.0;\n ta.y = ro.y - 20.0;\n\n float cr = 0.2*cos(0.1*time);\n vec3 cw = normalize(ta-ro);\n vec3 cp = vec3(sin(cr), cos(cr),0.0);\n vec3 cu = normalize( cross(cw,cp) );\n vec3 cv = normalize( cross(cu,cw) );\n vec3 rd = normalize( s.x*cu + s.y*cv + 2.0*cw );\n\n #ifdef STEREO\n ro += 2.0*cu*isCyan;\n #endif\n\n float sundot = clamp(dot(rd,light1),0.0,1.0);\n vec3 col;\n float t = interesct( ro, rd );\n if( t<0.0 )\n {\n // sky\t\t\n col = vec3(0.3,.55,0.8)*(1.0-0.8*rd.y);\n col += 0.25*vec3(1.0,0.7,0.4)*pow( sundot,5.0 );\n col += 0.25*vec3(1.0,0.8,0.6)*pow( sundot,64.0 );\n col += 0.2*vec3(1.0,0.8,0.6)*pow( sundot,512.0 );\n vec2 sc = ro.xz + rd.xz*(1000.0-ro.y)/rd.y;\n col = mix( col, vec3(1.0,0.95,1.0), \n 0.5*smoothstep(0.5,0.8,fbm(0.0005*sc)) );\n }\n else\n {\n // mountains\t\t\n vec3 pos = ro + t*rd;\n\n vec3 nor = calcNormal( pos, t );\n\n float r = texture2D( iChannel0, 7.0*pos.xz/256.0 ).x;\n\n col = (r*0.25+0.75)*0.9*mix( vec3(0.08,0.05,0.03), \n vec3(0.10,0.09,0.08), texture2D(iChannel0,0.00007*vec2(\n pos.x,pos.y*48.0)).x );\n col = mix( col, 0.20*vec3(0.45,.30,0.15)*(0.50+0.50*r),\n smoothstep(0.70,0.9,nor.y) );\n col = mix( col, 0.15*vec3(0.30,.30,0.10)*(0.25+0.75*r),\n smoothstep(0.95,1.0,nor.y) );\n\n // snow\n float h = smoothstep(55.0,80.0,pos.y + 25.0*fbm(0.01*pos.xz) );\n float e = smoothstep(1.0-0.5*h,1.0-0.1*h,nor.y);\n float o = 0.3 + 0.7*smoothstep(0.0,0.1,nor.x+h*h);\n float s = h*e*o;\n col = mix( col, 0.29*vec3(0.62,0.65,0.7), smoothstep( \n 0.1, 0.9, s ) );\n \n // lighting\t\t\n float amb = clamp(0.5+0.5*nor.y,0.0,1.0);\n float dif = clamp( dot( light1, nor ), 0.0, 1.0 );\n float bac = clamp( 0.2 + 0.8*dot( normalize( \n vec3(-light1.x, 0.0, light1.z ) ), nor ), 0.0, 1.0 );\n float sh = 1.0; if( dif>=0.0001 ) sh = sinteresct(\n pos+light1*20.0,light1);\n \n vec3 lin = vec3(0.0);\n lin += dif*vec3(7.00,5.00,3.00)*vec3( sh, sh*sh*0.5+0.5*sh, \n sh*sh*0.8+0.2*sh );\n lin += amb*vec3(0.40,0.60,0.80)*1.5;\n lin += bac*vec3(0.40,0.50,0.60);\n col *= lin;\n\n \n float fo = 1.0-exp(-0.0005*t);\n vec3 fco = 0.55*vec3(0.55,0.65,0.75) + 0.1*vec3(1.0,0.8,0.5)*pow( \n sundot, 4.0 );\n col = mix( col, fco, fo );\n\n col += 0.3*vec3(1.0,0.8,0.4)*pow( sundot, \n 8.0 )*(1.0-exp(-0.002*t));\n }\n\n col = pow(col,vec3(0.4545));\n\n // vignetting\t\n col *= 0.5 + 0.5*pow( (xy.x+1.0)*(xy.y+1.0)*(xy.x-1.0)*(xy.y-1.0), \n 0.1 );\n \n #ifdef STEREO\t\n col *= vec3( isCyan, 1.0-isCyan, 1.0-isCyan );\t\n #endif\n \n//\tcol *= smoothstep( 0.0, 2.0, iGlobalTime );\n\n gl_FragColor=vec4(col,1.0);\n}\n\"\"\"\n# -------------------------------------------------------------------------\n\ncanvas = Canvas(SHADERTOY)\n# Input data.\ncanvas.set_channel_input(noise(resolution=256, nchannels=1), i=0)\n \nif __name__ == '__main__':\n \n canvas.show()\n if sys.flags.interactive == 0:\n canvas.app.run()\n"
]
| [
[
"numpy.random.randint"
]
]
|
Maniues/Wav2Lip | [
"b2b5351f30efb5b580b1d006ddb53413b9fec3d6"
]
| [
"audio.py"
]
| [
"import librosa\nimport librosa.filters\nimport numpy as np\nimport tensorflow as tf\nfrom scipy import signal\nfrom scipy.io import wavfile\nfrom hparams import hparams as hp\n\ndef load_wav(path, sr):\n return librosa.core.load(path, sr=sr)[0]\n\ndef save_wav(wav, path, sr):\n wav *= 32767 / max(0.01, np.max(np.abs(wav)))\n #proposed by @dsmiller\n wavfile.write(path, sr, wav.astype(np.int16))\n\ndef save_wavenet_wav(wav, path, sr):\n librosa.output.write_wav(path, wav, sr=sr)\n\ndef preemphasis(wav, k, preemphasize=True):\n if preemphasize:\n return signal.lfilter([1, -k], [1], wav)\n return wav\n\ndef inv_preemphasis(wav, k, inv_preemphasize=True):\n if inv_preemphasize:\n return signal.lfilter([1], [1, -k], wav)\n return wav\n\ndef get_hop_size():\n hop_size = hp.hop_size\n if hop_size is None:\n assert hp.frame_shift_ms is not None\n hop_size = int(hp.frame_shift_ms / 1000 * hp.sample_rate)\n return hop_size\n\ndef linearspectrogram(wav):\n D = _stft(preemphasis(wav, hp.preemphasis, hp.preemphasize))\n S = _amp_to_db(np.abs(D)) - hp.ref_level_db\n \n if hp.signal_normalization:\n return _normalize(S)\n return S\n\ndef melspectrogram(wav):\n D = _stft(preemphasis(wav, hp.preemphasis, hp.preemphasize))\n S = _amp_to_db(_linear_to_mel(np.abs(D))) - hp.ref_level_db\n \n if hp.signal_normalization:\n return _normalize(S)\n return S\n\ndef _lws_processor():\n import lws\n return lws.lws(hp.n_fft, get_hop_size(), fftsize=hp.win_size, mode=\"speech\")\n\ndef _stft(y):\n if hp.use_lws:\n return _lws_processor(hp).stft(y).T\n else:\n return librosa.stft(y=y, n_fft=hp.n_fft, hop_length=get_hop_size(), win_length=hp.win_size)\n\n##########################################################\n#Those are only correct when using lws!!! (This was messing with Wavenet quality for a long time!)\ndef num_frames(length, fsize, fshift):\n \"\"\"Compute number of time frames of spectrogram\n \"\"\"\n pad = (fsize - fshift)\n if length % fshift == 0:\n M = (length + pad * 2 - fsize) // fshift + 1\n else:\n M = (length + pad * 2 - fsize) // fshift + 2\n return M\n\n\ndef pad_lr(x, fsize, fshift):\n \"\"\"Compute left and right padding\n \"\"\"\n M = num_frames(len(x), fsize, fshift)\n pad = (fsize - fshift)\n T = len(x) + 2 * pad\n r = (M - 1) * fshift + fsize - T\n return pad, pad + r\n##########################################################\n#Librosa correct padding\ndef librosa_pad_lr(x, fsize, fshift):\n return 0, (x.shape[0] // fshift + 1) * fshift - x.shape[0]\n\n# Conversions\n_mel_basis = None\n\ndef _linear_to_mel(spectogram):\n global _mel_basis\n if _mel_basis is None:\n _mel_basis = _build_mel_basis()\n return np.dot(_mel_basis, spectogram)\n\ndef _build_mel_basis():\n assert hp.fmax <= hp.sample_rate // 2\n return librosa.filters.mel(hp.sample_rate, hp.n_fft, n_mels=hp.num_mels,\n fmin=hp.fmin, fmax=hp.fmax)\n\ndef _amp_to_db(x):\n min_level = np.exp(hp.min_level_db / 20 * np.log(10))\n return 20 * np.log10(np.maximum(min_level, x))\n\ndef _db_to_amp(x):\n return np.power(10.0, (x) * 0.05)\n\ndef _normalize(S):\n if hp.allow_clipping_in_normalization:\n if hp.symmetric_mels:\n return np.clip((2 * hp.max_abs_value) * ((S - hp.min_level_db) / (-hp.min_level_db)) - hp.max_abs_value,\n -hp.max_abs_value, hp.max_abs_value)\n else:\n return np.clip(hp.max_abs_value * ((S - hp.min_level_db) / (-hp.min_level_db)), 0, hp.max_abs_value)\n \n assert S.max() <= 0 and S.min() - hp.min_level_db >= 0\n if hp.symmetric_mels:\n return (2 * hp.max_abs_value) * ((S - hp.min_level_db) / (-hp.min_level_db)) - hp.max_abs_value\n else:\n return hp.max_abs_value * ((S - hp.min_level_db) / (-hp.min_level_db))\n\ndef _denormalize(D):\n if hp.allow_clipping_in_normalization:\n if hp.symmetric_mels:\n return (((np.clip(D, -hp.max_abs_value,\n hp.max_abs_value) + hp.max_abs_value) * -hp.min_level_db / (2 * hp.max_abs_value))\n + hp.min_level_db)\n else:\n return ((np.clip(D, 0, hp.max_abs_value) * -hp.min_level_db / hp.max_abs_value) + hp.min_level_db)\n \n if hp.symmetric_mels:\n return (((D + hp.max_abs_value) * -hp.min_level_db / (2 * hp.max_abs_value)) + hp.min_level_db)\n else:\n return ((D * -hp.min_level_db / hp.max_abs_value) + hp.min_level_db)\n"
]
| [
[
"numpy.dot",
"numpy.log",
"numpy.maximum",
"numpy.abs",
"numpy.power",
"numpy.clip",
"scipy.signal.lfilter"
]
]
|
ibusko/glue-jupyter | [
"af67da018152a4fa29c54048a9277a8a43fa06b9"
]
| [
"glue_jupyter/bqplot/image/state.py"
]
| [
"import numpy as np\n\nfrom echo import CallbackProperty\nfrom glue.viewers.matplotlib.state import (DeferredDrawCallbackProperty as DDCProperty,\n DeferredDrawSelectionCallbackProperty as DDSCProperty)\n\nfrom glue.viewers.image.state import ImageLayerState\nfrom glue.core.state_objects import StateAttributeLimitsHelper\n\n\nclass BqplotImageLayerState(ImageLayerState):\n c_min = DDCProperty(docstring='The lower level used for the contours')\n c_max = DDCProperty(docstring='The upper level used for the contours')\n level_mode = DDSCProperty(0, docstring='How to distribute the contour levels')\n n_levels = DDCProperty(5, docstring='The number of levels, in Linear mode')\n levels = CallbackProperty(docstring='List of values where to create the contour lines')\n labels = CallbackProperty(docstring='List of labels for each contour')\n contour_percentile = DDSCProperty(docstring='The percentile value used to '\n 'automatically calculate levels for '\n 'the contour')\n contour_colors = CallbackProperty([\"red\", \"orange\", \"yellow\", \"green\", \"blue\"])\n bitmap_visible = CallbackProperty(True, 'whether to show the image as a bitmap')\n contour_visible = CallbackProperty(False, 'whether to show the image as contours')\n\n def __init__(self, *args, **kwargs):\n super(BqplotImageLayerState, self).__init__(*args, **kwargs)\n\n BqplotImageLayerState.level_mode.set_choices(self, ['Linear', 'Custom'])\n percentile_display = {100: 'Min/Max',\n 99.5: '99.5%',\n 99: '99%',\n 95: '95%',\n 90: '90%',\n 'Custom': 'Custom'}\n\n BqplotImageLayerState.contour_percentile.set_choices(self, [100, 99.5, 99, 95, 90,\n 'Custom'])\n BqplotImageLayerState.contour_percentile.set_display_func(self, percentile_display.get)\n self.contour_lim_helper = StateAttributeLimitsHelper(self, attribute='attribute',\n percentile='contour_percentile',\n lower='c_min', upper='c_max')\n\n self.add_callback('n_levels', self._update_levels)\n self.add_callback('c_min', self._update_levels)\n self.add_callback('c_max', self._update_levels)\n self.add_callback('level_mode', self._update_levels)\n self.add_callback('levels', self._update_labels)\n self._update_levels()\n\n def _update_priority(self, name):\n # if levels and level_mode get modified at the same time\n # make sure externally 'levels' is set first, so we then\n # can overwrite levels when we switch to Linear mode\n # this is tested in test_contour_state\n if name == 'levels':\n return 10\n return 0\n\n def _update_levels(self, ignore=None):\n if self.level_mode == \"Linear\":\n # TODO: this is exclusive begin/end point, is that a good choise?\n self.levels = np.linspace(self.c_min, self.c_max, self.n_levels+2)[1:-1].tolist()\n\n def _update_labels(self, ignore=None):\n # TODO: we may want to have ways to configure this in the future\n self.labels = [\"{0:.4g}\".format(level) for level in self.levels]\n"
]
| [
[
"numpy.linspace"
]
]
|
mzhao035/models | [
"1ac9639950ad81d7fde607a4f75e14c6f1b6d993"
]
| [
"research/deeplab/eval_bk.py"
]
| [
"# Lint as: python2, python3\n# Copyright 2018 The TensorFlow Authors All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Evaluation script for the DeepLab model.\n\nSee model.py for more details and usage.\n\"\"\"\n\nimport numpy as np\nimport six\nimport tensorflow as tf\nfrom tensorflow.contrib import metrics as contrib_metrics\nfrom tensorflow.contrib import quantize as contrib_quantize\nfrom tensorflow.contrib import tfprof as contrib_tfprof\nfrom tensorflow.contrib import training as contrib_training\nfrom deeplab import common\nfrom deeplab import model\nfrom deeplab.datasets import data_generator\n\n\nflags = tf.app.flags\nFLAGS = flags.FLAGS\n\nflags.DEFINE_string('master', '', 'BNS name of the tensorflow server')\n\n# Settings for log directories.\n\nflags.DEFINE_string('eval_logdir', None, 'Where to write the event logs.')\n\nflags.DEFINE_string('checkpoint_dir', None, 'Directory of model checkpoints.')\n\n# Settings for evaluating the model.\n\nflags.DEFINE_integer('eval_batch_size', 1,\n 'The number of images in each batch during evaluation.')\n\nflags.DEFINE_list('eval_crop_size', '513,513',\n 'Image crop size [height, width] for evaluation.')\n\nflags.DEFINE_integer('eval_interval_secs', 60 * 5,\n 'How often (in seconds) to run evaluation.')\n\n# For `xception_65`, use atrous_rates = [12, 24, 36] if output_stride = 8, or\n# rates = [6, 12, 18] if output_stride = 16. For `mobilenet_v2`, use None. Note\n# one could use different atrous_rates/output_stride during training/evaluation.\nflags.DEFINE_multi_integer('atrous_rates', None,\n 'Atrous rates for atrous spatial pyramid pooling.')\n\nflags.DEFINE_integer('output_stride', 16,\n 'The ratio of input to output spatial resolution.')\n\n# Change to [0.5, 0.75, 1.0, 1.25, 1.5, 1.75] for multi-scale test.\nflags.DEFINE_multi_float('eval_scales', [1.0],\n 'The scales to resize images for evaluation.')\n\n# Change to True for adding flipped images during test.\nflags.DEFINE_bool('add_flipped_images', False,\n 'Add flipped images for evaluation or not.')\n\nflags.DEFINE_integer(\n 'quantize_delay_step', -1,\n 'Steps to start quantized training. If < 0, will not quantize model.')\n\n# Dataset settings.\n\nflags.DEFINE_string('dataset', 'pascal_voc_seg',\n 'Name of the segmentation dataset.')\n\nflags.DEFINE_string('eval_split', 'val',\n 'Which split of the dataset used for evaluation')\n\nflags.DEFINE_string('dataset_dir', None, 'Where the dataset reside.')\n\nflags.DEFINE_integer('max_number_of_evaluations', 0,\n 'Maximum number of eval iterations. Will loop '\n 'indefinitely upon nonpositive values.')\n\n\ndef main(unused_argv):\n tf.logging.set_verbosity(tf.logging.INFO)\n\n dataset = data_generator.Dataset(\n dataset_name=FLAGS.dataset,\n split_name=FLAGS.eval_split,\n dataset_dir=FLAGS.dataset_dir,\n batch_size=FLAGS.eval_batch_size,\n crop_size=[int(sz) for sz in FLAGS.eval_crop_size],\n min_resize_value=FLAGS.min_resize_value,\n max_resize_value=FLAGS.max_resize_value,\n resize_factor=FLAGS.resize_factor,\n model_variant=FLAGS.model_variant,\n num_readers=2,\n is_training=False, # 设置不是在training\n should_shuffle=False,\n should_repeat=False)\n\n #关于eval的保存路径\n tf.gfile.MakeDirs(FLAGS.eval_logdir)\n tf.logging.info('Evaluating on %s set', FLAGS.eval_split)\n\n\n\n # 图结构开始定义的地方\n with tf.Graph().as_default():\n samples = dataset.get_one_shot_iterator().get_next()\n\n model_options = common.ModelOptions(\n outputs_to_num_classes={common.OUTPUT_TYPE: dataset.num_of_classes},\n crop_size=[int(sz) for sz in FLAGS.eval_crop_size],\n atrous_rates=FLAGS.atrous_rates,\n output_stride=FLAGS.output_stride)\n\n # Set shape in order for tf.contrib.tfprof.model_analyzer to work properly.\n samples[common.IMAGE].set_shape(\n [FLAGS.eval_batch_size,\n int(FLAGS.eval_crop_size[0]),\n int(FLAGS.eval_crop_size[1]),\n 3])\n if tuple(FLAGS.eval_scales) == (1.0,):\n tf.logging.info('Performing single-scale test.')\n\n ### 重点,单尺度计算分割结果image_pyramid : None\n predictions = model.predict_labels(samples[common.IMAGE], model_options,\n image_pyramid=FLAGS.image_pyramid)\n else:\n tf.logging.info('Performing multi-scale test.')\n if FLAGS.quantize_delay_step >= 0:\n raise ValueError(\n 'Quantize mode is not supported with multi-scale test.')\n\n predictions = model.predict_labels_multi_scale(\n samples[common.IMAGE],\n model_options=model_options,\n eval_scales=FLAGS.eval_scales,\n add_flipped_images=FLAGS.add_flipped_images)\n\n summary_ops = []\n\n predictions = predictions[common.OUTPUT_TYPE]\n predictions = tf.reshape(predictions, shape=[-1])\n\n labels = tf.reshape(samples[common.LABEL], shape=[-1])\n weights = tf.to_float(tf.not_equal(labels, dataset.ignore_label))\n\n # Set ignore_label regions to label 0, because metrics.mean_iou requires\n # range of labels = [0, dataset.num_classes). Note the ignore_label regions\n # are not evaluated since the corresponding regions contain weights = 0.\n labels = tf.where(\n tf.equal(labels, dataset.ignore_label), tf.zeros_like(labels), labels)\n\n predictions_tag = 'miou'\n for eval_scale in FLAGS.eval_scales:\n predictions_tag += '_' + str(eval_scale)\n if FLAGS.add_flipped_images:\n predictions_tag += '_flipped'\n\n # Define the evaluation metric.\n metric_map = {}\n num_classes = dataset.num_of_classes\n metric_map['eval/%s_overall' % predictions_tag] = tf.metrics.mean_iou(\n labels=labels, predictions=predictions, num_classes=num_classes,\n weights=weights)\n\n # IoU for each class.\n\n one_hot_predictions = tf.one_hot(predictions, num_classes)\n one_hot_predictions = tf.reshape(one_hot_predictions, [-1, num_classes])\n one_hot_labels = tf.one_hot(labels, num_classes)\n one_hot_labels = tf.reshape(one_hot_labels, [-1, num_classes])\n\n for c in range(num_classes):\n predictions_tag_c = '%s_class_%d' % (predictions_tag, c)\n tp, tp_op = tf.metrics.true_positives(\n labels=one_hot_labels[:, c], predictions=one_hot_predictions[:, c],\n weights=weights)\n fp, fp_op = tf.metrics.false_positives(\n labels=one_hot_labels[:, c], predictions=one_hot_predictions[:, c],\n weights=weights)\n fn, fn_op = tf.metrics.false_negatives(\n labels=one_hot_labels[:, c], predictions=one_hot_predictions[:, c],\n weights=weights)\n tp_fp_fn_op = tf.group(tp_op, fp_op, fn_op)\n\n iou = tf.where(tf.greater(tp + fn, 0.0),\n tp / (tp + fn + fp),\n tf.constant(np.NaN))\n metric_map['eval/%s' % predictions_tag_c] = (iou, tp_fp_fn_op)\n\n (metrics_to_values, metrics_to_updates) = contrib_metrics.aggregate_metric_map(metric_map)\n\n\n # summary_ops = []\n for metric_name, metric_value in six.iteritems(metrics_to_values):\n op = tf.summary.scalar(metric_name, metric_value)\n op = tf.Print(op, [metric_value], metric_name)\n summary_ops.append(op)\n\n confusion_matrix = tf.get_default_graph().get_tensor_by_name('mean_iou/total_confusion_matrix:0')\n print(confusion_matrix)\n cm_op = tf.summary.text(\"Confusion matrix\", tf.dtypes.as_string(confusion_matrix, precision=4))\n summary_ops.append(cm_op)\n\n # predictions_softmax = tf.get_default_graph().get_tensor_by_name('Softmax:0')\n # predictions_softmax = tf.squeeze(predictions_softmax)\n # softmax_op = tf.summary.text(\"softmax\", tf.dtypes.as_string(predictions_softmax[:, :, 0]))\n # summary_ops.append(softmax_op)\n\n summary_op = tf.summary.merge(summary_ops)\n\n summary_hook = contrib_training.SummaryAtEndHook(\n log_dir=FLAGS.eval_logdir, summary_op=summary_op)\n\n hooks = [summary_hook]\n\n num_eval_iters = None\n if FLAGS.max_number_of_evaluations > 0:\n num_eval_iters = FLAGS.max_number_of_evaluations\n\n if FLAGS.quantize_delay_step >= 0:\n contrib_quantize.create_eval_graph()\n\n # contrib_tfprof.model_analyzer.print_model_analysis(\n # tf.get_default_graph(),\n # tfprof_options=contrib_tfprof.model_analyzer\n # .TRAINABLE_VARS_PARAMS_STAT_OPTIONS)\n # contrib_tfprof.model_analyzer.print_model_analysis(\n # tf.get_default_graph(),\n # tfprof_options=contrib_tfprof.model_analyzer.FLOAT_OPS_OPTIONS)\n\n eval_ops_list = []\n eval_ops_list += list(metrics_to_updates.values())\n # eval_ops_list.append(predict_prob_mean_op)\n print(eval_ops_list)\n contrib_training.evaluate_repeatedly(\n checkpoint_dir=FLAGS.checkpoint_dir,\n master=FLAGS.master,\n eval_ops=eval_ops_list,\n max_number_of_evaluations=num_eval_iters,\n hooks=hooks,\n eval_interval_secs=FLAGS.eval_interval_secs)\n\n\nif __name__ == '__main__':\n\n flags.mark_flag_as_required('checkpoint_dir')\n flags.mark_flag_as_required('eval_logdir')\n flags.mark_flag_as_required('dataset_dir')\n tf.app.run()\n"
]
| [
[
"tensorflow.equal",
"tensorflow.contrib.metrics.aggregate_metric_map",
"tensorflow.gfile.MakeDirs",
"tensorflow.get_default_graph",
"tensorflow.group",
"tensorflow.summary.scalar",
"tensorflow.Graph",
"tensorflow.contrib.training.evaluate_repeatedly",
"tensorflow.greater",
"tensorflow.metrics.false_positives",
"tensorflow.logging.set_verbosity",
"tensorflow.contrib.training.SummaryAtEndHook",
"tensorflow.app.run",
"tensorflow.Print",
"tensorflow.zeros_like",
"tensorflow.logging.info",
"tensorflow.one_hot",
"tensorflow.contrib.quantize.create_eval_graph",
"tensorflow.metrics.true_positives",
"tensorflow.summary.merge",
"tensorflow.metrics.mean_iou",
"tensorflow.not_equal",
"tensorflow.constant",
"tensorflow.metrics.false_negatives",
"tensorflow.reshape",
"tensorflow.dtypes.as_string"
]
]
|
arthurdouillard/continuum | [
"83e7437944486cabd5e4c149d41816dbef49b7bf"
]
| [
"continuum/datasets/base.py"
]
| [
"import abc\nimport os\nimport warnings\nfrom typing import Callable, List, Optional, Tuple, Union\n\nimport numpy as np\nimport h5py\nfrom torchvision import datasets as torchdata\nfrom torchvision import transforms\n\nfrom continuum.tasks import TaskSet, TaskType\nfrom continuum.transforms import segmentation as transforms_seg\nfrom continuum import utils\n\n\nclass _ContinuumDataset(abc.ABC):\n\n def __init__(self, data_path: str = \"\", train: bool = True, download: bool = True) -> None:\n self.data_path = os.path.expanduser(data_path) if data_path is not None else None\n self.download = download\n self.train = train\n\n if self.data_path is not None and self.data_path != \"\" and not os.path.exists(self.data_path):\n os.makedirs(self.data_path)\n\n if self.download:\n self._download()\n\n if not isinstance(self.data_type, TaskType):\n raise NotImplementedError(\n f\"Dataset's data_type ({self.data_type}) is not supported.\"\n \" It must be a member of the enum TaskType.\"\n )\n\n # Initialization of the default properties\n if self.data_type == TaskType.SEGMENTATION:\n self._trsf = [transforms_seg.ToTensor()]\n else:\n self._trsf = [transforms.ToTensor()]\n self._bboxes = None\n self._attributes = None\n\n def get_data(self) -> Tuple[np.ndarray, np.ndarray, np.ndarray]:\n \"\"\"Returns the loaded data under the form of x, y, and t.\"\"\"\n raise NotImplementedError(\"This method should be implemented!\")\n\n def _download(self):\n pass\n\n def slice(\n self,\n keep_classes: Optional[List[int]] = None,\n discard_classes: Optional[List[int]] = None,\n keep_tasks: Optional[List[int]] = None,\n discard_tasks: Optional[List[int]] = None\n ):\n \"\"\"Slice dataset to keep/discard some classes/task-ids.\n\n Note that keep_* and and discard_* are mutually exclusive.\n Note also that if a selection (keep or discard) is being made on the classes\n and on the task ids, the resulting intersection will be taken.\n\n :param keep_classes: Only keep samples with these classes.\n :param discard_classes: Discard samples with these classes.\n :param keep_tasks: Only keep samples with these task ids.\n :param discard_tasks: Discard samples with these task ids.\n :return: A new Continuum dataset ready to be given to a scenario.\n \"\"\"\n if self.data_type == TaskType.SEGMENTATION:\n raise NotImplementedError(\"It's not possible yet to slice Segmentation datasets.\")\n\n x, y, t = self.get_data()\n\n indexes = utils._slice(\n y, t,\n keep_classes, discard_classes,\n keep_tasks, discard_tasks\n )\n\n new_x, new_y, new_t = x[indexes], y[indexes], t[indexes]\n sliced_dataset = InMemoryDataset(\n new_x, new_y, new_t,\n data_type=self.data_type\n )\n sliced_dataset.attributes = self.attributes\n sliced_dataset.bounding_boxes = self.bounding_boxes\n sliced_dataset.transformations = self.transformations\n\n return sliced_dataset\n\n @property\n def nb_classes(self) -> List[int]:\n return None\n\n @property\n def class_order(self) -> Union[None, List[int]]:\n return None\n\n @property\n def need_class_remapping(self) -> bool:\n \"\"\"Flag for method `class_remapping`.\"\"\"\n return False\n\n def class_remapping(self, class_ids: np.ndarray) -> np.ndarray:\n \"\"\"Optional class remapping.\n\n Used for example in PermutedMNIST, cf transformed.py;\n\n :param class_ids: Original class_ids.\n :return: A remapping of the class ids.\n \"\"\"\n return class_ids\n\n def to_taskset(\n self,\n trsf: Optional[List[Callable]] = None,\n target_trsf: Optional[List[Callable]] = None\n ) -> TaskSet:\n \"\"\"Returns a TaskSet that can be directly given to a torch's DataLoader.\n\n You can use this method if you don't care about the continual aspect and\n simply want to use the datasets in a classical supervised setting.\n\n :param trsf: List of transformations to be applied on x.\n :param target_trsf: List of transformations to be applied on y.\n :return taskset: A taskset which implement the interface of torch's Dataset.\n \"\"\"\n if trsf is None and self.data_type == TaskType.SEGMENTATION:\n trsf = transforms_seg.Compose(self.transformations)\n elif trsf is None:\n trsf = transforms.Compose(self.transformations)\n\n return TaskSet(\n *self.get_data(),\n trsf=trsf,\n target_trsf=target_trsf,\n data_type=self.data_type,\n bounding_boxes=self.bounding_boxes\n )\n\n @property\n def class_order(self) -> Union[None, List[int]]:\n return None\n\n @property\n def need_class_remapping(self) -> bool:\n \"\"\"Flag for method `class_remapping`.\"\"\"\n return False\n\n @property\n def data_type(self) -> TaskType:\n return TaskType.IMAGE_ARRAY\n\n @property\n def transformations(self):\n \"\"\"Default transformations if nothing is provided to the scenario.\"\"\"\n return self._trsf\n\n @transformations.setter\n def transformations(self, trsf: List[Callable]):\n self._trsf = trsf\n\n @property\n def bounding_boxes(self) -> List:\n \"\"\"Returns a bounding box (x1, y1, x2, y2) per sample if they need to be cropped.\"\"\"\n return self._bboxes\n\n @bounding_boxes.setter\n def bounding_boxes(self, bboxes: List):\n self._bboxes = bboxes\n\n @property\n def attributes(self) -> np.ndarray:\n \"\"\"Returns normalized attributes for all class if available.\n\n Those attributes can often be found in dataset used for Zeroshot such as\n CUB200, or AwA. The matrix shape is (nb_classes, nb_attributes), and it\n has been L2 normalized along side its attributes dimension.\n \"\"\"\n return self._attributes\n\n @attributes.setter\n def attributes(self, attributes: np.ndarray):\n self._attributes = attributes\n\n\nclass PyTorchDataset(_ContinuumDataset):\n \"\"\"Continuum version of torchvision datasets.\n :param dataset_type: A Torchvision dataset, like MNIST or CIFAR100.\n :param train: train flag\n :param download: download\n \"\"\"\n\n # TODO: some datasets have a different structure, like SVHN for ex. Handle it.\n def __init__(\n self, data_path: str = \"\", dataset_type=None, train: bool = True, download: bool = True, **kwargs):\n\n if \"transform\" in kwargs:\n raise ValueError(\n \"Don't provide `transform` to the dataset. \"\n \"You should give those to the scenario.\"\n )\n\n super().__init__(data_path=data_path, train=train, download=download)\n\n self.dataset_type = dataset_type\n self.dataset = self.dataset_type(self.data_path, download=self.download, train=self.train, **kwargs)\n\n def get_data(self) -> Tuple[np.ndarray, np.ndarray, np.ndarray]:\n x, y = np.array(self.dataset.data), np.array(self.dataset.targets)\n\n if 0 not in y:\n # This case can happen when the first class id is 1 and not 0.\n # For example in EMNIST with 'letters' split (WTF right).\n # TODO: We should handle this case in a more generic fashion later.\n warnings.warn(\"Converting 1-based class ids to 0-based class ids.\")\n y -= 1\n\n return x, y, None\n\n\nclass InMemoryDataset(_ContinuumDataset):\n \"\"\"Continuum dataset for in-memory data.\n\n :param x_train: Numpy array of images or paths to images for the train set.\n :param y_train: Targets for the train set.\n :param data_type: Format of the data.\n :param t_train: Optional task ids for the train set.\n \"\"\"\n\n def __init__(\n self,\n x: np.ndarray,\n y: np.ndarray,\n t: Union[None, np.ndarray] = None,\n data_type: TaskType = TaskType.IMAGE_ARRAY,\n train: bool = True,\n download: bool = True,\n ):\n self._data_type = data_type\n super().__init__(train=train, download=download)\n\n if len(x) != len(y):\n raise ValueError(f\"Number of datapoints ({len(x)}) != number of labels ({len(y)})!\")\n if t is not None and len(t) != len(x):\n raise ValueError(f\"Number of datapoints ({len(x)}) != number of task ids ({len(t)})!\")\n\n self.data = (x, y, t)\n self._nb_classes = len(np.unique(y))\n\n def get_data(self) -> Tuple[np.ndarray, np.ndarray, np.ndarray]:\n return self.data\n\n @property\n def nb_classes(self) -> List[int]:\n return self._nb_classes\n\n @property\n def data_type(self) -> TaskType:\n return self._data_type\n\n @data_type.setter\n def data_type(self, data_type: TaskType) -> None:\n self._data_type = data_type\n\n\nclass H5Dataset(_ContinuumDataset):\n \"\"\"Continuum dataset for in-memory data with h5 file.\n This class either creates a h5 dataset or reload an existing one.\n\n :param x_train: Numpy array of images or paths to images for the train set.\n :param y_train: Targets for the train set.\n :param data_type: Format of the data.\n :param t_train: Optional task ids for the train set.\n \"\"\"\n\n def __init__(\n self,\n x: np.ndarray = None,\n y: np.ndarray = None,\n t: Union[None, np.ndarray] = None,\n data_path: str = \"h5_dataset.h5\",\n train: bool = True,\n download: bool = True,\n ):\n self._data_type = TaskType.H5\n super().__init__(data_path=None, train=train, download=download)\n\n if x is None:\n # Load an existing h5_dataset\n self._check_existing_file(data_path)\n else:\n if len(x) != len(y):\n raise ValueError(f\"Number of datapoints ({len(x)}) != number of labels ({len(y)})!\")\n self.no_task_index = False\n if t is None:\n self.no_task_index = True\n else:\n if len(t) != len(x):\n raise ValueError(f\"Number of datapoints ({len(x)}) != number of task ids ({len(t)})!\")\n\n self.data_path = data_path\n\n if x is not None:\n self.create_file(x, y, t, self.data_path)\n\n @property\n def data_type(self) -> TaskType:\n return TaskType.H5\n\n def __len__(self):\n return len(self.get_class_vector())\n\n def _check_existing_file(self, filename):\n if not os.path.exists(filename):\n raise IOError(f\"You can not load unexisting file : {filename}\")\n\n with h5py.File(filename, 'r') as hf:\n data_vector = hf['x'][:]\n classes_vector = hf['y'][:]\n if 't' in hf.keys():\n self.no_task_index = False\n task_index_vector = hf['t'][:]\n if task_index_vector is None:\n self.no_task_index = True\n else:\n self.no_task_index = True\n\n assert len(classes_vector) == len(data_vector)\n if not self.no_task_index:\n assert len(classes_vector) == len(task_index_vector)\n\n self.data_path = filename\n\n def slice(\n self,\n new_h5_path: str,\n keep_classes: Optional[List[int]] = None,\n discard_classes: Optional[List[int]] = None,\n keep_tasks: Optional[List[int]] = None,\n discard_tasks: Optional[List[int]] = None\n ):\n \"\"\"Slice dataset to keep/discard some classes/task-ids.\n\n Note that keep_* and and discard_* are mutually exclusive.\n Note also that if a selection (keep or discard) is being made on the classes\n and on the task ids, the resulting intersection will be taken.\n\n :param new_h5_path: A path where to store the sliced dataset as H5.\n :param keep_classes: Only keep samples with these classes.\n :param discard_classes: Discard samples with these classes.\n :param keep_tasks: Only keep samples with these task ids.\n :param discard_tasks: Discard samples with these task ids.\n :return: A new Continuum dataset ready to be given to a scenario.\n \"\"\"\n _, y, t = self.get_data()\n\n indexes = utils._slice(\n y, t,\n keep_classes, discard_classes,\n keep_tasks, discard_tasks\n )\n\n with h5py.File(self.data_path, 'r') as hf:\n new_x = hf['x'][indexes]\n\n new_y, new_t = y[indexes], t[indexes]\n sliced_dataset = H5Dataset(\n new_x, new_y, new_t,\n data_path=new_h5_path\n )\n\n return sliced_dataset\n\n def create_file(self, x, y, t, data_path):\n \"\"\"\"Create and initiate h5 file with data, labels and task index (if not none)\"\"\"\n\n assert not os.path.exists(data_path), print(f\"You can not replace file : {data_path}\")\n\n with h5py.File(data_path, 'w') as hf:\n hf.create_dataset('x', data=x, chunks=True, maxshape=([None] + list(x[0].shape)))\n hf.create_dataset('y', data=y, chunks=True, maxshape=([None]))\n if not self.no_task_index:\n hf.create_dataset('t', data=t, chunks=True, maxshape=([None]))\n\n def get_task_indexes(self):\n \"\"\"\"Return the whole vector of task index\"\"\"\n task_indexe_vector = None\n if not self.no_task_index:\n with h5py.File(self.data_path, 'r') as hf:\n task_indexe_vector = hf['t'][:]\n return task_indexe_vector\n\n def get_task_index(self, index):\n \"\"\"\"Return one task index value value for a given index\"\"\"\n task_indexes_value = None\n if not self.no_task_index:\n with h5py.File(self.data_path, 'r') as hf:\n task_indexes_value = hf['t'][index]\n return task_indexes_value\n\n def get_class_vector(self):\n \"\"\"\"Return the whole vector of classes\"\"\"\n classes_vector = None\n with h5py.File(self.data_path, 'r') as hf:\n classes_vector = hf['y'][:]\n return classes_vector\n\n def get_class(self, index):\n \"\"\"\"Return one class value for a given index\"\"\"\n class_value = None\n with h5py.File(self.data_path, 'r') as hf:\n class_value = hf['y'][index]\n return class_value\n\n def add_data(self, x, y, t):\n \"\"\"\"This method is here to be able to build the h5 by part\"\"\"\n if not (self.no_task_index == (t is None)):\n raise AssertionError(\"You can not add data with task index to h5 without task index or the opposite\")\n\n with h5py.File(self.data_path, 'a') as hf:\n reshape_size = hf[\"t\"].shape[0] + t.shape[0]\n hf['x'].resize(reshape_size, axis=0)\n hf[\"x\"][-x.shape[0]:] = x\n hf['y'].resize(reshape_size, axis=0)\n hf[\"y\"][-x.shape[0]:] = y\n if not self.no_task_index:\n hf['t'].resize(reshape_size, axis=0)\n hf[\"t\"][-x.shape[0]:] = t\n\n def get_data(self) -> Tuple[np.ndarray, np.ndarray, np.ndarray]:\n return self.data_path, self.get_class_vector(), self.get_task_indexes()\n\n\nclass ImageFolderDataset(_ContinuumDataset):\n \"\"\"Continuum dataset for datasets with tree-like structure.\n\n :param train_folder: The folder of the train data.\n :param test_folder: The folder of the test data.\n :param download: Dummy parameter.\n \"\"\"\n\n def __init__(\n self,\n data_path: str,\n train: bool = True,\n download: bool = True,\n data_type: TaskType = TaskType.IMAGE_PATH\n ):\n self.data_path = data_path\n self._data_type = data_type\n super().__init__(data_path=data_path, train=train, download=download)\n\n allowed_data_types = (TaskType.IMAGE_PATH, TaskType.SEGMENTATION)\n if data_type not in allowed_data_types:\n raise ValueError(f\"Invalid data_type={data_type}, allowed={allowed_data_types}.\")\n\n @property\n def data_type(self) -> TaskType:\n return self._data_type\n\n def get_data(self) -> Tuple[np.ndarray, np.ndarray, Union[None, np.ndarray]]:\n self.dataset = torchdata.ImageFolder(self.data_path)\n x, y, t = self._format(self.dataset.imgs)\n self.list_classes = np.unique(y)\n return x, y, t\n\n @staticmethod\n def _format(raw_data: List[Tuple[str, int]]) -> Tuple[np.ndarray, np.ndarray, None]:\n x = np.empty(len(raw_data), dtype=\"S255\")\n y = np.empty(len(raw_data), dtype=np.int16)\n\n for i, (path, target) in enumerate(raw_data):\n x[i] = path\n y[i] = target\n\n return x, y, None\n\n\nclass _AudioDataset(_ContinuumDataset):\n @property\n def data_type(self) -> TaskType:\n return TaskType.AUDIO\n\n @property\n def transformations(self):\n \"\"\"Default transformations if nothing is provided to the scenario.\"\"\"\n def noop(x):\n return x\n return [noop]\n"
]
| [
[
"numpy.array",
"numpy.unique"
]
]
|
Elektriman/fractal_curves | [
"35acaea4fc53d2bcfe0006189b40a92f5fd10fdd"
]
| [
"sierpinski_triangle.py"
]
| [
"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Tue Aug 31 22:04:01 2021\r\n\r\n@author: Julien\r\n\r\nhttps://en.wikipedia.org/wiki/Sierpi%C5%84ski_triangle\r\n\"\"\"\r\n\r\n# _____ _ \r\n# |_ _| | | \r\n# | | _ __ ___ _ __ ___ _ __| |_ ___ \r\n# | | | '_ ` _ \\| '_ \\ / _ \\| '__| __/ __|\r\n# _| |_| | | | | | |_) | (_) | | | |_\\__ \\\r\n# |_____|_| |_| |_| .__/ \\___/|_| \\__|___/\r\n# | | \r\n# |_|\r\n\r\nimport cmath\r\nimport numpy as np\r\n\r\n# _____ _ _ \r\n# / ____| | | | | \r\n# | | ___ _ __ ___| |_ __ _ _ __ | |_ ___ \r\n# | | / _ \\| '_ \\/ __| __/ _` | '_ \\| __/ __|\r\n# | |___| (_) | | | \\__ \\ || (_| | | | | |_\\__ \\\r\n# \\_____\\___/|_| |_|___/\\__\\__,_|_| |_|\\__|___/\r\n\r\nX_LIMITS = (0, 1)\r\nY_LIMITS = (-0.1, np.sin(np.pi/3)+0.1)\r\n\r\n# ______ _ _ \r\n# | ____| | | (_) \r\n# | |__ _ _ _ __ ___| |_ _ ___ _ __ ___ \r\n# | __| | | | '_ \\ / __| __| |/ _ \\| '_ \\/ __|\r\n# | | | |_| | | | | (__| |_| | (_) | | | \\__ \\\r\n# |_| \\__,_|_| |_|\\___|\\__|_|\\___/|_| |_|___/\r\n\r\ndef compute_new(z1, z2, up):\r\n '''\r\n uses complex numbers to compute the next points to add for each edge.\r\n\r\n Parameters\r\n ----------\r\n z1 : complex\r\n first vertice.\r\n z2 : complex\r\n second vertice.\r\n up : boolean\r\n wether or not add the point in a clockwise or counterclockwise fashion.\r\n\r\n Returns\r\n -------\r\n a, b : complex, complex\r\n the new point(s) generated from the given edge.\r\n up : boolean\r\n returning \"up\" value for computing purposes\r\n '''\r\n v = (z2-z1)/2 #v is the 'vector' from z1 to the middle of the segment [z1, z2]\r\n \r\n #rotating the vectors by 60 degrees clokwise or counterclockwise depending on the up value\r\n #and on the starting point\r\n if up :\r\n a = z1 + v*1j**(2/3)\r\n b = z2 + v*1j**(4/3)\r\n else :\r\n a = z1 - v*1j**(4/3)\r\n b = z2 - v*1j**(2/3)\r\n \r\n up = not(up) #flipping the up value\r\n \r\n return a, b, up\r\n \r\n\r\ndef partial_sierpinski_triangle(T):\r\n '''\r\n Generates the next generation of the fractal curve algorithm. For each pair of consecutive points of T\r\n that make a vertice, we add between those two points the new point(s) generated by the \"compute_new\"\r\n method. \r\n\r\n Parameters\r\n ----------\r\n T : list[complex]\r\n the current state of the algorithm.\r\n\r\n Returns\r\n -------\r\n res : list[complex]\r\n The next step of the algorithm.\r\n '''\r\n \r\n up = len(T)%10<3\r\n res = []\r\n for i in range(len(T)-1):\r\n res.append(T[i])\r\n a, b, up = compute_new(T[i], T[i+1], up)\r\n res.append(a)\r\n res.append(b)\r\n res.append(T[-1])\r\n return res\r\n\r\ndef sierpinski_triangle(n):\r\n '''\r\n Recursive method to build the final fractal curve of degree n.\r\n\r\n Parameters\r\n ----------\r\n n : int\r\n The degree of the fractal curve to build.\r\n\r\n Returns\r\n -------\r\n list[complex]\r\n The list of complex numbers that represent the n-th degree fractal curve.\r\n '''\r\n \r\n if n==0 :\r\n return [complex(0,0), complex(1,0)]\r\n else :\r\n return partial_sierpinski_triangle(sierpinski_triangle(n-1))\r\n\r\ndef pts_num(n):\r\n '''\r\n Optional function that returns the amount of points in the n-th generation of the algorithm.\r\n\r\n Parameters\r\n ----------\r\n n : int\r\n The degree of the fractal curve.\r\n\r\n Returns\r\n -------\r\n int\r\n The number of vertices needed to build the n-th degree fractal curve.\r\n '''\r\n \r\n n0 = 2\r\n return (3**n)*(n0-1)+1\r\n\r\n# _ _ \r\n# | | | | \r\n# | |_ ___ ___| |_ ___ \r\n# | __/ _ \\/ __| __/ __|\r\n# | || __/\\__ \\ |_\\__ \\\r\n# \\__\\___||___/\\__|___/\r\n\r\nif __name__ == '__main__' :\r\n import matplotlib.pyplot as plt\r\n \r\n N = 12\r\n T = sierpinski_triangle(N)\r\n Tr = [el.real for el in T]\r\n Ti = [el.imag for el in T]\r\n plt.plot(Tr, Ti, lw=np.exp(-N/10))\r\n plt.axis('equal')\r\n plt.show()\r\n"
]
| [
[
"numpy.exp",
"matplotlib.pyplot.show",
"numpy.sin",
"matplotlib.pyplot.axis"
]
]
|
jenshenriksson/pytorch-cifar | [
"9e5d370ddd233eef445cef7c525ed23acfae8953"
]
| [
"main.py"
]
| [
"'''Train CIFAR10 with PyTorch.'''\nfrom __future__ import print_function\n\nimport torch.optim as optim\nimport torch.backends.cudnn as cudnn\n\nimport torchvision\nimport torchvision.transforms as transforms\n\nimport os\nimport argparse\n\nfrom models import *\nfrom pytorch_eval import pytorch_train, pytorch_test\n\nparser = argparse.ArgumentParser(description='PyTorch CIFAR10 Training')\nparser.add_argument('--lr', default=0.1, type=float, help='learning rate')\nparser.add_argument('--resume', '-r', action='store_true', help='resume from checkpoint')\nparser.add_argument('--type', '-t', default=0, type=int, help='0: No preprocess, 1: Augment, 2: Aug+Cosine')\nparser.add_argument('--batch', '-b', default=128, type=int, help='Batch size')\nparser.add_argument('--model', default='densenet', help='Which model to train?')\nparser.add_argument('--name', '-n', default=None, type=str, help='Use a specific name for saving.')\nparser.add_argument('--epochs', '-e', default=100, type=int, help='Number of epochs to run')\nargs = parser.parse_args()\n\ndevice = 'cuda' if torch.cuda.is_available() else 'cpu'\nbest_acc = 0 # best test accuracy\nstart_epoch = 0 # start from epoch 0 or last checkpoint epoch\n\n# Data\nprint('==> Preparing data..')\n\n\nif args.type == 0:\n transform_train = transforms.Compose([\n transforms.ToTensor(),\n ])\n transform_test = transforms.Compose([\n transforms.ToTensor(),\n ])\nelif args.type >= 1:\n transform_train = transforms.Compose([\n transforms.RandomCrop(32, padding=4),\n transforms.RandomHorizontalFlip(),\n transforms.RandomRotation(10),\n transforms.ToTensor(),\n transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010)),\n ])\n transform_test = transforms.Compose([\n transforms.ToTensor(),\n transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010)),\n ])\nelse:\n raise RuntimeError('Wrong type {}'.format(args.type))\n\n\ntrainset = torchvision.datasets.CIFAR10(root='./data', train=True, download=True, transform=transform_train)\ntrainloader = torch.utils.data.DataLoader(trainset, batch_size=128, shuffle=True, num_workers=2)\n\ntestset = torchvision.datasets.CIFAR10(root='./data', train=False, download=True, transform=transform_test)\ntestloader = torch.utils.data.DataLoader(testset, batch_size=100, shuffle=False, num_workers=2)\n\nclasses = ('plane', 'car', 'bird', 'cat', 'deer', 'dog', 'frog', 'horse', 'ship', 'truck')\n\n# Model\nprint('==> Building model..')\n\nif args.model == 'vgg': net = VGG('VGG16')\nif args.model == 'resnet': net = ResNet18()\nif args.model == 'preact': net = PreActResNet18()\nif args.model == 'googlenet': net = GoogLeNet()\nif args.model == 'densenet': net = DenseNet121()\nif args.model == 'resnetx29': net = ResNeXt29_2x64d()\nif args.model == 'mobile': net = MobileNet()\nif args.model == 'mobilev2': net = MobileNetV2()\nif args.model == 'dpn': net = DPN92()\nif args.model == 'shuffle': net = ShuffleNetG2()\nif args.model == 'senet': net = SENet18()\nif args.model == 'shufflev2': net = ShuffleNetV2(1)\nif args.model == 'wrn28': net = Wide_ResNet(28, 10, 0.3, 10)\nif args.model == 'wrn40': net = Wide_ResNet(40, 10, 0.3, 10)\n\nif args.name is not None:\n args.model = args.name\n\nnet = net.to(device)\nif device == 'cuda':\n if args.model == 'vgg19': net = torch.nn.DataParallel(net)\n cudnn.benchmark = True\n\n\nif args.resume:\n # Load checkpoint.\n print('==> Resuming from checkpoint..')\n assert os.path.isdir('checkpoint'), 'Error: no checkpoint directory found!'\n checkpoint = torch.load('./checkpoint/{}.t7'.format(args.model))\n net.load_state_dict(checkpoint['net'])\n best_acc = checkpoint['acc']\n start_epoch = checkpoint['epoch']\n\n\ncriterion = nn.CrossEntropyLoss()\noptimizer = optim.SGD(net.parameters(), lr=args.lr, momentum=0.9, weight_decay=5e-4)\n\nif args.type >= 2:\n steps = 100\n scheduler = optim.lr_scheduler.CosineAnnealingLR(optimizer, steps)\n\nif not os.path.isdir('checkpoint'):\n os.mkdir('checkpoint')\nif not os.path.isdir('results'):\n os.mkdir('results')\n\nprint('Using {} with settings of type: {}. '.format(args.model, args.type))\n\nfor epoch in range(start_epoch+1, start_epoch+1+args.epochs):\n\n if not os.path.isfile('results/{}-results.txt'.format(args.model)):\n with open('results/{}-results.txt'.format(args.model), 'a') as f:\n f.write(\"epoch accuracy testloss trainloss\\n\")\n\n loss = pytorch_train(epoch, net, trainloader, device, optimizer, criterion, testloader, args)\n acc, loss_test, best_acc = pytorch_test(epoch, net, trainloader, device, optimizer, criterion, testloader, args, best_acc)\n\n if args.type >= 2:\n scheduler.step()\n print(scheduler.get_lr())\n\n if epoch % steps == 0:\n print('Resetting scheduler.')\n scheduler = optim.lr_scheduler.CosineAnnealingLR(optimizer, steps)\n\n with open('results/{}-results.txt'.format(args.model), 'a') as f:\n f.write(\"{} {:2.2f} {:.5f} {:.5f}\\n\".format(epoch, acc, loss_test, loss))\n\n\n\n\nstate = {\n 'net': net.state_dict(),\n 'acc': acc,\n 'epoch': epoch,\n}\n\ntorch.save(state, './checkpoint/{}.t7'.format(args.model))\n"
]
| [
[
"torch.optim.lr_scheduler.CosineAnnealingLR"
]
]
|
zhnagchulan/vectorbt | [
"6b199f6cc8c32fc5eeaa10f88bf8aa81774969c9"
]
| [
"vectorbt/portfolio/base.py"
]
| [
"\"\"\"Base class for modeling portfolio and measuring its performance.\n\nProvides the class `vectorbt.portfolio.base.Portfolio` for modeling portfolio performance\nand calculating various risk and performance metrics. It uses Numba-compiled\nfunctions from `vectorbt.portfolio.nb` for most computations and record classes based on\n`vectorbt.records.base.Records` for evaluating events such as orders, logs, trades, positions and drawdowns.\n\nThe job of the `Portfolio` class is to create a series of positions allocated \nagainst a cash component, produce an equity curve, incorporate basic transaction costs\nand produce a set of statistics about its performance. In particular it outputs\nposition/profit metrics and drawdown information.\n\n## Workflow\n\nThe workflow of `Portfolio` is simple:\n\n1. Receives a set of inputs, such as entry and exit signals\n2. Uses them to generate and fill orders in form of records (simulation part)\n3. Calculates a broad range of risk & performance metrics based on these records (analysis part)\n\nIt basically builds upon the `vectorbt.portfolio.orders.Orders` class. To simplify creation of order\nrecords and keep track of balances, it exposes several convenience methods with prefix `from_`.\nFor example, you can use `Portfolio.from_signals` method to generate orders from entry and exit signals.\nAlternatively, you can use `Portfolio.from_order_func` to run a custom order function on each tick.\nThe results are then automatically passed to the constructor method of `Portfolio` and you will\nreceive a portfolio instance ready to be used for performance analysis.\n\nThis way, one can simulate and analyze his/her strategy in a couple of lines.\n\n### Example\n\nThe following example does something crazy: it checks candlestick data of 6 major cryptocurrencies\nin 2020 against every single pattern found in TA-Lib, and translates them into signals:\n\n```python-repl\n>>> import numpy as np\n>>> import pandas as pd\n>>> from datetime import datetime\n>>> import talib\n>>> import vectorbt as vbt\n\n>>> # Fetch price history\n>>> symbols = ['BTC-USD', 'ETH-USD', 'XRP-USD', 'BNB-USD', 'BCH-USD', 'LTC-USD']\n>>> start = '2020-01-01 UTC' # crypto is UTC\n>>> end = '2020-09-01 UTC'\n>>> # OHLCV by column\n>>> ohlcv = vbt.YFData.download(symbols, start=start, end=end).concat()\n>>> ohlcv['Open']\n\nsymbol BTC-USD ETH-USD XRP-USD BNB-USD \\\nDate\n2020-01-01 00:00:00+00:00 7194.892090 129.630661 0.192912 13.730962\n2020-01-02 00:00:00+00:00 7202.551270 130.820038 0.192708 13.698126\n2020-01-03 00:00:00+00:00 6984.428711 127.411263 0.187948 13.035329\n... ... ... ... ...\n2020-08-30 00:00:00+00:00 11508.713867 399.616699 0.274568 23.009060\n2020-08-31 00:00:00+00:00 11713.306641 428.509003 0.283065 23.647858\n2020-09-01 00:00:00+00:00 11679.316406 434.874451 0.281612 23.185047\n\nsymbol BCH-USD LTC-USD\nDate\n2020-01-01 00:00:00+00:00 204.671295 41.326534\n2020-01-02 00:00:00+00:00 204.354538 42.018085\n2020-01-03 00:00:00+00:00 196.007690 39.863129\n... ... ...\n2020-08-30 00:00:00+00:00 268.842865 57.207737\n2020-08-31 00:00:00+00:00 279.280426 62.844059\n2020-09-01 00:00:00+00:00 274.480865 61.105076\n\n[244 rows x 6 columns]\n\n>>> # Run every single pattern recognition indicator and combine results\n>>> result = pd.DataFrame.vbt.empty_like(ohlcv['Open'], fill_value=0.)\n>>> for pattern in talib.get_function_groups()['Pattern Recognition']:\n... PRecognizer = vbt.IndicatorFactory.from_talib(pattern)\n... pr = PRecognizer.run(ohlcv['Open'], ohlcv['High'], ohlcv['Low'], ohlcv['Close'])\n... result = result + pr.integer\n\n>>> # Don't look into future\n>>> result = result.vbt.fshift(1)\n\n>>> # Treat each number as order value in USD\n>>> size = result / ohlcv['Open']\n\n>>> # Simulate portfolio\n>>> pf = vbt.Portfolio.from_orders(\n... ohlcv['Close'], size, price=ohlcv['Open'],\n... init_cash='autoalign', fees=0.001, slippage=0.001)\n\n>>> # Visualize portfolio value\n>>> pf.value().vbt.plot()\n```\n\n\n\n## Broadcasting\n\n`Portfolio` is very flexible towards inputs:\n\n* Accepts both Series and DataFrames as inputs\n* Broadcasts inputs to the same shape using vectorbt's own broadcasting rules\n* Many inputs (such as `fees`) can be passed as a single value, value per column/row, or as a matrix\n* Implements flexible indexing wherever possible to save memory\n\n## Grouping\n\nOne of the key features of `Portfolio` is the ability to group columns. Groups can be specified by\n`group_by`, which can be anything from positions or names of column levels, to a NumPy array with\nactual groups. Groups can be formed to share capital between columns or to compute metrics\nfor a combined portfolio of multiple independent columns.\n\nFor example, let's divide our portfolio into two groups sharing the same cash balance:\n\n```python-repl\n>>> # Simulate combined portfolio\n>>> group_by = pd.Index([\n... 'first', 'first', 'first',\n... 'second', 'second', 'second'\n... ], name='group')\n>>> comb_pf = vbt.Portfolio.from_orders(\n... ohlcv['Close'], size, price=ohlcv['Open'],\n... init_cash='autoalign', fees=0.001, slippage=0.001,\n... group_by=group_by, cash_sharing=True)\n\n>>> # Get total profit per group\n>>> comb_pf.total_profit()\ngroup\nfirst 26221.571200\nsecond 10141.952674\nName: total_profit, dtype: float64\n```\n\nNot only can you analyze each group, but also each column in the group:\n\n```python-repl\n>>> # Get total profit per column\n>>> comb_pf.total_profit(group_by=False)\nsymbol\nBTC-USD 5792.120252\nETH-USD 16380.039692\nXRP-USD 4049.411256\nBNB-USD 6081.253551\nBCH-USD 400.573418\nLTC-USD 3660.125705\nName: total_profit, dtype: float64\n```\n\nIn the same way, you can introduce new grouping to the method itself:\n\n```python-repl\n>>> # Get total profit per group\n>>> pf.total_profit(group_by=group_by)\ngroup\nfirst 26221.571200\nsecond 10141.952674\nName: total_profit, dtype: float64\n```\n\n!!! note\n If cash sharing is enabled, grouping can be disabled but cannot be modified.\n\n## Indexing\n\nLike any other class subclassing `vectorbt.base.array_wrapper.Wrapping`, we can do pandas indexing\non a `Portfolio` instance, which forwards indexing operation to each object with columns:\n\n```python-repl\n>>> pf['BTC-USD']\n<vectorbt.portfolio.base.Portfolio at 0x7fac7517ac88>\n\n>>> pf['BTC-USD'].total_profit()\n5792.120252189081\n```\n\nCombined portfolio is indexed by group:\n\n```python-repl\n>>> comb_pf['first']\n<vectorbt.portfolio.base.Portfolio at 0x7fac5756b828>\n\n>>> comb_pf['first'].total_profit()\n26221.57120014546\n```\n\n!!! note\n Changing index (time axis) is not supported. The object should be treated as a Series\n rather than a DataFrame; for example, use `pf.iloc[0]` instead of `pf.iloc[:, 0]`.\n\n Indexing behavior depends solely upon `vectorbt.base.array_wrapper.ArrayWrapper`.\n For example, if `group_select` is enabled indexing will be performed on groups,\n otherwise on single columns. You can pass wrapper arguments with `wrapper_kwargs`.\n\n## Logging\n\nTo collect more information on how a specific order was processed or to be able to track the whole\nsimulation from the beginning to the end, you can turn on logging.\n\n```python-repl\n>>> # Simulate portfolio with logging\n>>> pf = vbt.Portfolio.from_orders(\n... ohlcv['Close'], size, price=ohlcv['Open'],\n... init_cash='autoalign', fees=0.001, slippage=0.001, log=True)\n\n>>> pf.logs.records\n id idx col group cash position debt free_cash val_price \\\\\n0 0 0 0 0 inf 0.000000 0.0 inf 7194.892090\n1 1 1 0 0 inf 0.000000 0.0 inf 7202.551270\n2 2 2 0 0 inf 0.000000 0.0 inf 6984.428711\n... ... ... ... ... ... ... ... ... ...\n1461 1461 241 5 5 inf 272.389644 0.0 inf 57.207737\n1462 1462 242 5 5 inf 274.137659 0.0 inf 62.844059\n1463 1463 243 5 5 inf 282.093860 0.0 inf 61.105076\n\n value ... new_free_cash new_val_price new_value res_size \\\\\n0 inf ... inf 7194.892090 inf NaN\n1 inf ... inf 7202.551270 inf NaN\n2 inf ... inf 6984.428711 inf NaN\n... ... ... ... ... ... ...\n1461 inf ... inf 57.207737 inf 1.748015\n1462 inf ... inf 62.844059 inf 7.956202\n1463 inf ... inf 61.105076 inf 1.636525\n\n res_price res_fees res_side res_status res_status_info order_id\n0 NaN NaN -1 1 0 -1\n1 NaN NaN -1 1 5 -1\n2 NaN NaN -1 1 5 -1\n... ... ... ... ... ... ...\n1461 57.264945 0.1001 0 0 -1 1070\n1462 62.906903 0.5005 0 0 -1 1071\n1463 61.043971 0.0999 1 0 -1 1072\n\n[1464 rows x 37 columns]\n```\n\nJust as orders, logs are also records and thus can be easily analyzed:\n\n```python-repl\n>>> from vectorbt.portfolio.enums import OrderStatus\n\n>>> pf.logs.map_field('res_status', mapping=OrderStatus).value_counts()\nsymbol BTC-USD ETH-USD XRP-USD BNB-USD BCH-USD LTC-USD\nIgnored 60 72 67 66 67 59\nFilled 184 172 177 178 177 185\n```\n\nLogging can also be turned on just for one order, row, or column, since as many other\nvariables it's specified per order and can broadcast automatically.\n\n!!! note\n Logging can slow down simulation.\n\n## Caching\n\n`Portfolio` heavily relies upon caching. If a method or a property requires heavy computation,\nit's wrapped with `vectorbt.utils.decorators.cached_method` and `vectorbt.utils.decorators.cached_property`\nrespectively. Caching can be disabled globally via `caching` in `vectorbt._settings.settings`.\n\n!!! note\n Because of caching, class is meant to be immutable and all properties are read-only.\n To change any attribute, use the `copy` method and pass the attribute as keyword argument.\n\nIf you're running out of memory when working with large arrays, make sure to disable caching\nand then store most important time series manually. For example, if you're interested in Sharpe\nratio or other metrics based on returns, run and save `Portfolio.returns` and then use the\n`vectorbt.returns.accessors.ReturnsAccessor` to analyze them. Do not use methods akin to\n`Portfolio.sharpe_ratio` because they will re-calculate returns each time.\n\nAlternatively, you can precisely point at attributes and methods that should or shouldn't\nbe cached. For example, you can blacklist the entire `Portfolio` class except a few most called\nmethods such as `Portfolio.cash_flow` and `Portfolio.asset_flow`:\n\n```python-repl\n>>> vbt.settings.caching['blacklist'].append(\n... vbt.CacheCondition(base_cls='Portfolio')\n... )\n>>> vbt.settings.caching['whitelist'].extend([\n... vbt.CacheCondition(base_cls='Portfolio', func='cash_flow'),\n... vbt.CacheCondition(base_cls='Portfolio', func='asset_flow')\n... ])\n```\n\nDefine rules for one instance of `Portfolio`:\n\n```python-repl\n>>> vbt.settings.caching['blacklist'].append(\n... vbt.CacheCondition(instance=pf)\n... )\n>>> vbt.settings.caching['whitelist'].extend([\n... vbt.CacheCondition(instance=pf, func='cash_flow'),\n... vbt.CacheCondition(instance=pf, func='asset_flow')\n... ])\n```\n\nSee `vectorbt.utils.decorators.should_cache` for caching rules.\n\nTo reset caching:\n\n```python-repl\n>>> vbt.settings.caching.reset()\n```\n\n## Saving and loading\n\nLike any other class subclassing `vectorbt.utils.config.Pickleable`, we can save a `Portfolio`\ninstance to the disk with `Portfolio.save` and load it with `Portfolio.load`:\n\n```python-repl\n>>> pf = vbt.Portfolio.from_orders(\n... ohlcv['Close'], size, price=ohlcv['Open'],\n... init_cash='autoalign', fees=0.001, slippage=0.001, freq='1D')\n>>> pf.sharpe_ratio()\nsymbol\nBTC-USD 1.743437\nETH-USD 2.800903\nXRP-USD 1.607904\nBNB-USD 1.805373\nBCH-USD 0.269392\nLTC-USD 1.040494\nName: sharpe_ratio, dtype: float64\n\n>>> pf.save('my_pf')\n>>> pf = vbt.Portfolio.load('my_pf')\n>>> pf.sharpe_ratio()\nsymbol\nBTC-USD 1.743437\nETH-USD 2.800903\nXRP-USD 1.607904\nBNB-USD 1.805373\nBCH-USD 0.269392\nLTC-USD 1.040494\nName: sharpe_ratio, dtype: float64\n```\n\n!!! note\n Save files won't include neither cached results nor global defaults. For example,\n passing `fillna_close` as None will also use None when the portfolio is loaded from disk.\n Make sure to either pass all arguments explicitly or to also save the `vectorbt._settings.settings` config.\n\n## Stats\n\n!!! hint\n See `vectorbt.generic.stats_builder.StatsBuilderMixin.stats` and `Portfolio.metrics`.\n\nLet's simulate a portfolio with two columns:\n\n```python-repl\n>>> close = vbt.YFData.download(\n... \"BTC-USD\",\n... start='2020-01-01 UTC',\n... end='2020-09-01 UTC'\n... ).get('Close')\n\n>>> pf = vbt.Portfolio.from_random_signals(close, n=[10, 20], seed=42)\n>>> pf.wrapper.columns\nInt64Index([10, 20], dtype='int64', name='rand_n')\n```\n\n### Column, group, and tag selection\n\nTo return the statistics for a particular column/group, use the `column` argument:\n\n```python-repl\n>>> pf.stats(column=10)\nUserWarning: Metric 'sharpe_ratio' requires frequency to be set\nUserWarning: Metric 'calmar_ratio' requires frequency to be set\nUserWarning: Metric 'omega_ratio' requires frequency to be set\nUserWarning: Metric 'sortino_ratio' requires frequency to be set\n\nStart 2020-01-01 00:00:00+00:00\nEnd 2020-09-01 00:00:00+00:00\nPeriod 244\nStart Value 100.0\nEnd Value 106.721585\nTotal Return [%] 6.721585\nBenchmark Return [%] 66.252621\nMax Gross Exposure [%] 100.0\nTotal Fees Paid 0.0\nMax Drawdown [%] 22.190944\nMax Drawdown Duration 101.0\nTotal Trades 10\nTotal Closed Trades 10\nTotal Open Trades 0\nOpen Trade P&L 0.0\nWin Rate [%] 60.0\nBest Trade [%] 15.31962\nWorst Trade [%] -9.904223\nAvg Winning Trade [%] 4.671959\nAvg Losing Trade [%] -4.851205\nAvg Winning Trade Duration 11.333333\nAvg Losing Trade Duration 14.25\nProfit Factor 1.347457\nExpectancy 0.672158\nName: 10, dtype: object\n```\n\nIf vectorbt couldn't parse the frequency of `close`:\n\n1) it won't return any duration in time units,\n2) it won't return any metric that requires annualization, and\n3) it will throw a bunch of warnings (you can silence those by passing `silence_warnings=True`)\n\nWe can provide the frequency as part of the settings dict:\n\n```python-repl\n>>> pf.stats(column=10, settings=dict(freq='d'))\nUserWarning: Changing the frequency will create a copy of this object.\nConsider setting the frequency upon object creation to re-use existing cache.\n\nStart 2020-01-01 00:00:00+00:00\nEnd 2020-09-01 00:00:00+00:00\nPeriod 244 days 00:00:00\nStart Value 100.0\nEnd Value 106.721585\nTotal Return [%] 6.721585\nBenchmark Return [%] 66.252621\nMax Gross Exposure [%] 100.0\nTotal Fees Paid 0.0\nMax Drawdown [%] 22.190944\nMax Drawdown Duration 101 days 00:00:00\nTotal Trades 10\nTotal Closed Trades 10\nTotal Open Trades 0\nOpen Trade P&L 0.0\nWin Rate [%] 60.0\nBest Trade [%] 15.31962\nWorst Trade [%] -9.904223\nAvg Winning Trade [%] 4.671959\nAvg Losing Trade [%] -4.851205\nAvg Winning Trade Duration 11 days 08:00:00\nAvg Losing Trade Duration 14 days 06:00:00\nProfit Factor 1.347457\nExpectancy 0.672158\nSharpe Ratio 0.445231\nCalmar Ratio 0.460573\nOmega Ratio 1.099192\nSortino Ratio 0.706986\nName: 10, dtype: object\n```\n\nBut in this case, our portfolio will be copied to set the new frequency and we wouldn't be\nable to re-use its cached attributes. Let's define the frequency upon the simulation instead:\n\n```python-repl\n>>> pf = vbt.Portfolio.from_random_signals(close, n=[10, 20], seed=42, freq='d')\n```\n\nWe can change the grouping of the portfolio on the fly. Let's form a single group:\n\n```python-repl\n>>> pf.stats(group_by=True)\nStart 2020-01-01 00:00:00+00:00\nEnd 2020-09-01 00:00:00+00:00\nPeriod 244 days 00:00:00\nStart Value 200.0\nEnd Value 277.49299\nTotal Return [%] 38.746495\nBenchmark Return [%] 66.252621\nMax Gross Exposure [%] 100.0\nTotal Fees Paid 0.0\nMax Drawdown [%] 14.219327\nMax Drawdown Duration 86 days 00:00:00\nTotal Trades 30\nTotal Closed Trades 30\nTotal Open Trades 0\nOpen Trade P&L 0.0\nWin Rate [%] 66.666667\nBest Trade [%] 18.332559\nWorst Trade [%] -9.904223\nAvg Winning Trade [%] 5.754788\nAvg Losing Trade [%] -4.718907\nAvg Winning Trade Duration 7 days 19:12:00\nAvg Losing Trade Duration 8 days 07:12:00\nProfit Factor 2.427948\nExpectancy 2.5831\nSharpe Ratio 1.57907\nCalmar Ratio 4.445448\nOmega Ratio 1.334032\nSortino Ratio 2.59669\nName: group, dtype: object\n```\n\nWe can see how the initial cash has changed from $100 to $200, indicating that both columns now\ncontribute to the performance.\n\n### Aggregation\n\nIf the portfolio consists of multiple columns/groups and no column/group has been selected,\neach metric is aggregated across all columns/groups based on `agg_func`, which is `np.mean` by default.\n\n```python-repl\n>>> pf.stats()\nUserWarning: Object has multiple columns. Aggregating using <function mean at 0x7fc77152bb70>.\nPass column to select a single column/group.\n\nStart 2020-01-01 00:00:00+00:00\nEnd 2020-09-01 00:00:00+00:00\nPeriod 244 days 00:00:00\nStart Value 100.0\nEnd Value 138.746495\nTotal Return [%] 38.746495\nBenchmark Return [%] 66.252621\nMax Gross Exposure [%] 100.0\nTotal Fees Paid 0.0\nMax Drawdown [%] 20.35869\nMax Drawdown Duration 93 days 00:00:00\nTotal Trades 15.0\nTotal Closed Trades 15.0\nTotal Open Trades 0.0\nOpen Trade P&L 0.0\nWin Rate [%] 65.0\nBest Trade [%] 16.82609\nWorst Trade [%] -9.701273\nAvg Winning Trade [%] 5.445408\nAvg Losing Trade [%] -4.740956\nAvg Winning Trade Duration 8 days 19:25:42.857142857\nAvg Losing Trade Duration 9 days 07:00:00\nProfit Factor 2.186957\nExpectancy 2.105364\nSharpe Ratio 1.165695\nCalmar Ratio 3.541079\nOmega Ratio 1.331624\nSortino Ratio 2.084565\nName: agg_func_mean, dtype: object\n```\n\nHere, the Sharpe ratio of 0.445231 (column=10) and 1.88616 (column=20) lead to the avarage of 1.16569.\n\nWe can also return a DataFrame with statistics per column/group by passing `agg_func=None`:\n\n```python-repl\n>>> pf.stats(agg_func=None)\n Start End Period ... Sortino Ratio\nrand_n ...\n10 2020-01-01 00:00:00+00:00 2020-09-01 00:00:00+00:00 244 days ... 0.706986\n20 2020-01-01 00:00:00+00:00 2020-09-01 00:00:00+00:00 244 days ... 3.462144\n\n[2 rows x 25 columns]\n```\n\n### Metric selection\n\nTo select metrics, use the `metrics` argument (see `Portfolio.metrics` for supported metrics):\n\n```python-repl\n>>> pf.stats(metrics=['sharpe_ratio', 'sortino_ratio'], column=10)\nSharpe Ratio 0.445231\nSortino Ratio 0.706986\nName: 10, dtype: float64\n```\n\nWe can also select specific tags (see any metric from `Portfolio.metrics` that has the `tag` key):\n\n```python-repl\n>>> pf.stats(column=10, tags=['trades'])\nTotal Trades 10\nTotal Open Trades 0\nOpen Trade P&L 0\nLong Trades [%] 100\nWin Rate [%] 60\nBest Trade [%] 15.3196\nWorst Trade [%] -9.90422\nAvg Winning Trade [%] 4.67196\nAvg Winning Trade Duration 11 days 08:00:00\nAvg Losing Trade [%] -4.8512\nAvg Losing Trade Duration 14 days 06:00:00\nProfit Factor 1.34746\nExpectancy 0.672158\nName: 10, dtype: object\n```\n\nOr provide a boolean expression:\n\n```python-repl\n>>> pf.stats(column=10, tags='trades and open and not closed')\nTotal Open Trades 0.0\nOpen Trade P&L 0.0\nName: 10, dtype: float64\n```\n\nThe reason why we included \"not closed\" along with \"open\" is because some metrics such as the win rate\nhave both tags attached since they are based upon both open and closed trades/positions\n(to see this, pass `settings=dict(incl_open=True)` and `tags='trades and open'`).\n\n### Passing parameters\n\nWe can use `settings` to pass parameters used across multiple metrics.\nFor example, let's pass required and risk-free return to all return metrics:\n\n```python-repl\n>>> pf.stats(column=10, settings=dict(required_return=0.1, risk_free=0.01))\nStart 2020-01-01 00:00:00+00:00\nEnd 2020-09-01 00:00:00+00:00\nPeriod 244 days 00:00:00\nStart Value 100.0\nEnd Value 106.721585\nTotal Return [%] 6.721585\nBenchmark Return [%] 66.252621\nMax Gross Exposure [%] 100.0\nTotal Fees Paid 0.0\nMax Drawdown [%] 22.190944\nMax Drawdown Duration 101 days 00:00:00\nTotal Trades 10\nTotal Closed Trades 10\nTotal Open Trades 0\nOpen Trade P&L 0.0\nWin Rate [%] 60.0\nBest Trade [%] 15.31962\nWorst Trade [%] -9.904223\nAvg Winning Trade [%] 4.671959\nAvg Losing Trade [%] -4.851205\nAvg Winning Trade Duration 11 days 08:00:00\nAvg Losing Trade Duration 14 days 06:00:00\nProfit Factor 1.347457\nExpectancy 0.672158\nSharpe Ratio -9.504742 << here\nCalmar Ratio 0.460573 << here\nOmega Ratio 0.233279 << here\nSortino Ratio -18.763407 << here\nName: 10, dtype: object\n```\n\nPassing any argument inside of `settings` either overrides an existing default, or acts as\nan optional argument that is passed to the calculation function upon resolution (see below).\nBoth `required_return` and `risk_free` can be found in the signature of the 4 ratio methods,\nso vectorbt knows exactly it has to pass them.\n\nLet's imagine that the signature of `vectorbt.returns.accessors.ReturnsAccessor.sharpe_ratio`\ndoesn't list those arguments: vectorbt would simply call this method without passing those two arguments.\nIn such case, we have two options:\n\n1) Set parameters globally using `settings` and set `pass_{arg}=True` individually using `metric_settings`:\n\n```python-repl\n>>> pf.stats(\n... column=10,\n... settings=dict(required_return=0.1, risk_free=0.01),\n... metric_settings=dict(\n... sharpe_ratio=dict(pass_risk_free=True),\n... omega_ratio=dict(pass_required_return=True, pass_risk_free=True),\n... sortino_ratio=dict(pass_required_return=True)\n... )\n... )\n```\n\n2) Set parameters individually using `metric_settings`:\n\n```python-repl\n>>> pf.stats(\n... column=10,\n... metric_settings=dict(\n... sharpe_ratio=dict(risk_free=0.01),\n... omega_ratio=dict(required_return=0.1, risk_free=0.01),\n... sortino_ratio=dict(required_return=0.1)\n... )\n... )\n```\n\n### Custom metrics\n\nTo calculate a custom metric, we need to provide at least two things: short name and a settings\ndict with the title and calculation function (see arguments in `vectorbt.generic.stats_builder.StatsBuilderMixin`):\n\n```python-repl\n>>> max_winning_streak = (\n... 'max_winning_streak',\n... dict(\n... title='Max Winning Streak',\n... calc_func=lambda trades: trades.winning_streak.max(),\n... resolve_trades=True\n... )\n... )\n>>> pf.stats(metrics=max_winning_streak, column=10)\nMax Winning Streak 3.0\nName: 10, dtype: float64\n```\n\nYou might wonder how vectorbt knows which arguments to pass to `calc_func`?\nIn the example above, the calculation function expects two arguments: `trades` and `group_by`.\nTo automatically pass any of the them, vectorbt searches for each in the current settings.\nAs `trades` cannot be found, it either throws an error or tries to resolve this argument if\n`resolve_{arg}=True` was passed. Argument resolution is the process of searching for property/method with\nthe same name (also with prefix `get_`) in the attributes of the current portfolio, automatically passing the\ncurrent settings such as `group_by` if they are present in the method's signature\n(a similar resolution procedure), and calling the method/property. The result of the resolution\nprocess is then passed as `arg` (or `trades` in our example).\n\nHere's an example without resolution of arguments:\n\n```python-repl\n>>> max_winning_streak = (\n... 'max_winning_streak',\n... dict(\n... title='Max Winning Streak',\n... calc_func=lambda self, group_by:\n... self.get_trades(group_by=group_by).winning_streak.max()\n... )\n... )\n>>> pf.stats(metrics=max_winning_streak, column=10)\nMax Winning Streak 3.0\nName: 10, dtype: float64\n```\n\nAnd here's an example without resolution of the calculation function:\n\n```python-repl\n>>> max_winning_streak = (\n... 'max_winning_streak',\n... dict(\n... title='Max Winning Streak',\n... calc_func=lambda self, settings:\n... self.get_trades(group_by=settings['group_by']).winning_streak.max(),\n... resolve_calc_func=False\n... )\n... )\n>>> pf.stats(metrics=max_winning_streak, column=10)\nMax Winning Streak 3.0\nName: 10, dtype: float64\n```\n\nSince `max_winning_streak` method can be expressed as a path from this portfolio, we can simply write:\n\n```python-repl\n>>> max_winning_streak = (\n... 'max_winning_streak',\n... dict(\n... title='Max Winning Streak',\n... calc_func='trades.winning_streak.max'\n... )\n... )\n```\n\nIn this case, we don't have to pass `resolve_trades=True` any more as vectorbt does it automatically.\nAnother advantage is that vectorbt can access the signature of the last method in the path\n(`vectorbt.records.mapped_array.MappedArray.max` in our case) and resolve its arguments.\n\nSince `trades` and `positions` are very similar concepts (positions are aggregations of trades),\nyou can substitute a trade with a position by passing `use_positions=True`.\nAdditionally, you can pass `incl_open=True` to also include open trades/positions.\n\n```python-repl\n>>> pf.stats(column=10, settings=dict(use_positions=True, incl_open=True))\nStart 2020-01-01 00:00:00+00:00\nEnd 2020-09-01 00:00:00+00:00\nPeriod 244 days 00:00:00\nStart Value 100.0\nEnd Value 106.721585\nTotal Return [%] 6.721585\nBenchmark Return [%] 66.252621\nMax Gross Exposure [%] 100.0\nTotal Fees Paid 0.0\nMax Drawdown [%] 22.190944\nMax Drawdown Duration 101 days 00:00:00\nTotal Positions 10\nTotal Closed Positions 10\nTotal Open Positions 0\nOpen Position P&L 0.0\nWin Rate [%] 60.0\nBest Position [%] 15.31962\nWorst Position [%] -9.904223\nAvg Winning Position [%] 4.671959\nAvg Losing Position [%] -4.851205\nAvg Winning Position Duration 11 days 08:00:00\nAvg Losing Position Duration 14 days 06:00:00\nProfit Factor 1.347457\nExpectancy 0.672158\nSharpe Ratio 0.445231\nCalmar Ratio 0.460573\nOmega Ratio 1.099192\nSortino Ratio 0.706986\nName: 10, dtype: object\n```\n\nNotice how vectorbt changed each 'Trade' to 'Position' thanks to evaluation templates\ndefined in `Portfolio.metrics`. We can use the same feature in any custom metric.\n\nAny default metric setting or even global setting can be overridden by the user using metric-specific\nkeyword arguments. Here, we override the global aggregation function for `max_dd_duration`:\n\n```python-repl\n>>> pf.stats(agg_func=lambda sr: sr.mean(),\n... metric_settings=dict(\n... max_dd_duration=dict(agg_func=lambda sr: sr.max())\n... )\n... )\nUserWarning: Object has multiple columns. Aggregating using <function <lambda> at 0x7fbf6e77b268>.\nPass column to select a single column/group.\n\nStart 2020-01-01 00:00:00+00:00\nEnd 2020-09-01 00:00:00+00:00\nPeriod 244 days 00:00:00\nStart Value 100.0\nEnd Value 138.746495\nTotal Return [%] 38.746495\nBenchmark Return [%] 66.252621\nMax Gross Exposure [%] 100.0\nTotal Fees Paid 0.0\nMax Drawdown [%] 20.35869\nMax Drawdown Duration 101 days 00:00:00 << here\nTotal Trades 15.0\nTotal Closed Trades 15.0\nTotal Open Trades 0.0\nOpen Trade P&L 0.0\nWin Rate [%] 65.0\nBest Trade [%] 16.82609\nWorst Trade [%] -9.701273\nAvg Winning Trade [%] 5.445408\nAvg Losing Trade [%] -4.740956\nAvg Winning Trade Duration 8 days 19:25:42.857142857\nAvg Losing Trade Duration 9 days 07:00:00\nProfit Factor 2.186957\nExpectancy 2.105364\nSharpe Ratio 1.165695\nCalmar Ratio 3.541079\nOmega Ratio 1.331624\nSortino Ratio 2.084565\nName: agg_func_<lambda>, dtype: object\n```\n\nLet's create a simple metric that returns a passed value to demonstrate how vectorbt overrides settings,\nfrom least to most important:\n\n```python-repl\n>>> # vbt.settings.portfolio.stats\n>>> vbt.settings.portfolio.stats['settings']['my_arg'] = 100\n>>> my_arg_metric = ('my_arg_metric', dict(title='My Arg', calc_func=lambda my_arg: my_arg))\n>>> pf.stats(my_arg_metric, column=10)\nMy Arg 100\nName: 10, dtype: int64\n\n>>> # settings >>> vbt.settings.portfolio.stats\n>>> pf.stats(my_arg_metric, column=10, settings=dict(my_arg=200))\nMy Arg 200\nName: 10, dtype: int64\n\n>>> # metric settings >>> settings\n>>> my_arg_metric = ('my_arg_metric', dict(title='My Arg', my_arg=300, calc_func=lambda my_arg: my_arg))\n>>> pf.stats(my_arg_metric, column=10, settings=dict(my_arg=200))\nMy Arg 300\nName: 10, dtype: int64\n\n>>> # metric_settings >>> metric settings\n>>> pf.stats(my_arg_metric, column=10, settings=dict(my_arg=200),\n... metric_settings=dict(my_arg_metric=dict(my_arg=400)))\nMy Arg 400\nName: 10, dtype: int64\n```\n\nHere's an example of a parametrized metric. Let's get the number of trades with P&L over some amount:\n\n```python-repl\n>>> trade_min_pnl_cnt = (\n... 'trade_min_pnl_cnt',\n... dict(\n... title=vbt.Sub('Trades with P&L over $$${min_pnl}'),\n... calc_func=lambda trades, min_pnl: trades.filter_by_mask(\n... trades.pnl.values >= min_pnl).count(),\n... resolve_trades=True\n... )\n... )\n>>> pf.stats(\n... metrics=trade_min_pnl_cnt, column=10,\n... metric_settings=dict(trade_min_pnl_cnt=dict(min_pnl=0)))\nTrades with P&L over $0 6\nName: stats, dtype: int64\n\n>>> pf.stats(\n... metrics=trade_min_pnl_cnt, column=10,\n... metric_settings=dict(trade_min_pnl_cnt=dict(min_pnl=10)))\nTrades with P&L over $10 1\nName: stats, dtype: int64\n```\n\nIf the same metric name was encountered more than once, vectorbt automatically appends an\nunderscore and its position, so we can pass keyword arguments to each metric separately:\n\n```python-repl\n>>> pf.stats(\n... metrics=[\n... trade_min_pnl_cnt,\n... trade_min_pnl_cnt,\n... trade_min_pnl_cnt\n... ],\n... column=10,\n... metric_settings=dict(\n... trade_min_pnl_cnt_0=dict(min_pnl=0),\n... trade_min_pnl_cnt_1=dict(min_pnl=10),\n... trade_min_pnl_cnt_2=dict(min_pnl=20))\n... )\nTrades with P&L over $0 6\nTrades with P&L over $10 1\nTrades with P&L over $20 0\nName: stats, dtype: int64\n```\n\nTo add a custom metric to the list of all metrics, we have three options.\n\nThe first option is to change the `Portfolio.metrics` dict in-place (this will append to the end):\n\n```python-repl\n>>> pf.metrics['max_winning_streak'] = max_winning_streak[1]\n>>> pf.stats(column=10)\nStart 2020-01-01 00:00:00+00:00\nEnd 2020-09-01 00:00:00+00:00\nPeriod 244 days 00:00:00\nStart Value 100.0\nEnd Value 106.721585\nTotal Return [%] 6.721585\nBenchmark Return [%] 66.252621\nMax Gross Exposure [%] 100.0\nTotal Fees Paid 0.0\nMax Drawdown [%] 22.190944\nMax Drawdown Duration 101 days 00:00:00\nTotal Trades 10\nTotal Closed Trades 10\nTotal Open Trades 0\nOpen Trade P&L 0.0\nWin Rate [%] 60.0\nBest Trade [%] 15.31962\nWorst Trade [%] -9.904223\nAvg Winning Trade [%] 4.671959\nAvg Losing Trade [%] -4.851205\nAvg Winning Trade Duration 11 days 08:00:00\nAvg Losing Trade Duration 14 days 06:00:00\nProfit Factor 1.347457\nExpectancy 0.672158\nSharpe Ratio 0.445231\nCalmar Ratio 0.460573\nOmega Ratio 1.099192\nSortino Ratio 0.706986\nMax Winning Streak 3.0 << here\nName: 10, dtype: object\n```\n\nSince `Portfolio.metrics` is of type `vectorbt.utils.config.Config`, we can reset it at any time\nto get default metrics:\n\n```python-repl\n>>> pf.metrics.reset()\n```\n\nThe second option is to copy `Portfolio.metrics`, append our metric, and pass as `metrics` argument:\n\n```python-repl\n>>> my_metrics = list(pf.metrics.items()) + [max_winning_streak]\n>>> pf.stats(metrics=my_metrics, column=10)\n```\n\nThe third option is to set `metrics` globally under `portfolio.stats` in `vectorbt._settings.settings`.\n\n```python-repl\n>>> vbt.settings.portfolio['stats']['metrics'] = my_metrics\n>>> pf.stats(column=10)\n```\n\n## Plotting\n\n!!! hint\n See `vectorbt.generic.plot_builder.PlotBuilderMixin.plot`.\n\n The features implemented in this method are very similar to `Portfolio.stats`.\n See also the examples under `Portfolio.stats`.\n\nPlot portfolio of a random strategy:\n\n```python-repl\n>>> pf.plot(column=10)\n```\n\n\n\nYou can choose any of the subplots in `Portfolio.subplots`, in any order, and\ncontrol their appearance using keyword arguments:\n\n```python-repl\n>>> from vectorbt.utils.colors import adjust_opacity\n\n>>> pf.plot(\n... subplots=['drawdowns', 'underwater'],\n... column=10,\n... subplot_settings=dict(\n... drawdowns=dict(top_n=3),\n... underwater=dict(\n... trace_kwargs=dict(\n... line=dict(color='#FF6F00'),\n... fillcolor=adjust_opacity('#FF6F00', 0.3)\n... )\n... )\n... )\n... )\n```\n\n\n\nTo create a new subplot, a preferred way is to pass a plotting function:\n\n```python-repl\n>>> def plot_order_size(pf, size, column=None, add_trace_kwargs=None, fig=None):\n... size = pf.select_one_from_obj(size, pf.wrapper.regroup(False), column=column)\n... size.rename('Order Size').vbt.barplot(\n... add_trace_kwargs=add_trace_kwargs, fig=fig)\n\n>>> order_size = pf.orders.size.to_pd(fill_value=0.)\n>>> pf.plot(subplots=[\n... 'orders',\n... ('order_size', dict(\n... title='Order Size',\n... yaxis_title='Order size',\n... check_is_not_grouped=True,\n... plot_func=plot_order_size\n... ))\n... ],\n... column=10,\n... subplot_settings=dict(\n... order_size=dict(\n... size=order_size\n... )\n... )\n... )\n```\n\nAlternatively, you can create a placeholder and overwrite it manually later:\n\n```python-repl\n>>> fig = pf.plot(subplots=[\n... 'orders',\n... ('order_size', dict(\n... title='Order Size',\n... yaxis_title='Order size',\n... check_is_not_grouped=True\n... )) # placeholder\n... ], column=10)\n>>> order_size[10].rename('Order Size').vbt.barplot(\n... add_trace_kwargs=dict(row=2, col=1),\n... fig=fig\n... )\n```\n\n\n\nIf a plotting function can in any way be accessed from the current portfolio, you can pass\nthe path to this function (see `vectorbt.utils.attr.deep_getattr` for the path format).\nYou can additionally use templates to make some parameters to depend upon passed keyword arguments:\n\n```python-repl\n>>> subplots = [\n... ('cumulative_returns', dict(\n... title='Cumulative Returns',\n... yaxis_title='Cumulative returns',\n... plot_func='returns.vbt.returns.cumulative.vbt.plot',\n... pass_add_trace_kwargs=True\n... )),\n... ('rolling_drawdown', dict(\n... title='Rolling Drawdown',\n... yaxis_title='Rolling drawdown',\n... plot_func=[\n... 'returns.vbt.returns', # returns accessor\n... (\n... 'rolling_max_drawdown', # function name\n... (vbt.Rep('window'),)), # positional arguments\n... 'vbt.plot' # plotting function\n... ],\n... pass_add_trace_kwargs=True,\n... trace_names=[vbt.Sub('rolling_drawdown(${window})')], # add window to the trace name\n... ))\n... ]\n>>> pf.plot(\n... subplots,\n... column=10,\n... subplot_settings=dict(\n... rolling_drawdown=dict(\n... template_mapping=dict(\n... window=10\n... )\n... )\n... )\n... )\n```\n\nYou can also replace templates across all subplots by using the global template mapping:\n\n```python-repl\n>>> pf.plot(subplots, column=10, template_mapping=dict(window=10))\n```\n\n\n\"\"\"\nimport numpy as np\nimport pandas as pd\n\nfrom vectorbt import _typing as tp\nfrom vectorbt.utils import checks\nfrom vectorbt.utils.decorators import cached_property, cached_method\nfrom vectorbt.utils.enum import map_enum_fields\nfrom vectorbt.utils.config import merge_dicts, Config\nfrom vectorbt.utils.template import RepEval, Rep\nfrom vectorbt.utils.random import set_seed\nfrom vectorbt.utils.colors import adjust_opacity\nfrom vectorbt.utils.figure import get_domain\nfrom vectorbt.base.reshape_fns import to_1d_array, to_2d_array, broadcast, broadcast_to, to_pd_array\nfrom vectorbt.base.array_wrapper import ArrayWrapper, Wrapping\nfrom vectorbt.generic.stats_builder import StatsBuilderMixin\nfrom vectorbt.generic.plot_builder import PlotBuilderMixin\nfrom vectorbt.generic.drawdowns import Drawdowns\nfrom vectorbt.signals.generators import RANDNX, RPROBNX\nfrom vectorbt.returns.accessors import ReturnsAccessor\nfrom vectorbt.returns import nb as returns_nb\nfrom vectorbt.portfolio import nb\nfrom vectorbt.portfolio.orders import Orders\nfrom vectorbt.portfolio.trades import Trades, Positions\nfrom vectorbt.portfolio.logs import Logs\nfrom vectorbt.portfolio.enums import *\nfrom vectorbt.portfolio.decorators import add_returns_acc_methods\n\n__pdoc__ = {}\n\nreturns_acc_config = Config(\n {\n 'daily_returns': dict(source_name='daily'),\n 'annual_returns': dict(source_name='annual'),\n 'cumulative_returns': dict(source_name='cumulative'),\n 'annualized_return': dict(source_name='annualized'),\n 'annualized_volatility': dict(),\n 'calmar_ratio': dict(),\n 'omega_ratio': dict(),\n 'sharpe_ratio': dict(),\n 'deflated_sharpe_ratio': dict(),\n 'downside_risk': dict(),\n 'sortino_ratio': dict(),\n 'information_ratio': dict(),\n 'beta': dict(),\n 'alpha': dict(),\n 'tail_ratio': dict(),\n 'value_at_risk': dict(),\n 'cond_value_at_risk': dict(),\n 'capture': dict(),\n 'up_capture': dict(),\n 'down_capture': dict(),\n 'drawdown': dict(),\n 'max_drawdown': dict()\n },\n as_attrs=False,\n readonly=True\n)\n\"\"\"_\"\"\"\n\n__pdoc__['returns_acc_config'] = f\"\"\"Config of returns accessor methods to be added to `Portfolio`.\n\n```json\n{returns_acc_config.to_doc()}\n```\n\"\"\"\n\nPortfolioT = tp.TypeVar(\"PortfolioT\", bound=\"Portfolio\")\n\n\nclass MetaPortfolio(type(StatsBuilderMixin), type(PlotBuilderMixin)):\n pass\n\n\n@add_returns_acc_methods(returns_acc_config)\nclass Portfolio(Wrapping, StatsBuilderMixin, PlotBuilderMixin, metaclass=MetaPortfolio):\n \"\"\"Class for modeling portfolio and measuring its performance.\n\n Args:\n wrapper (ArrayWrapper): Array wrapper.\n\n See `vectorbt.base.array_wrapper.ArrayWrapper`.\n close (array_like): Last asset price at each time step.\n order_records (array_like): A structured NumPy array of order records.\n log_records (array_like): A structured NumPy array of log records.\n init_cash (InitCashMode, float or array_like of float): Initial capital.\n cash_sharing (bool): Whether to share cash within the same group.\n call_seq (array_like of int): Sequence of calls per row and group.\n fillna_close (bool): Whether to forward and backward fill NaN values in `close`.\n\n Applied after the simulation to avoid NaNs in asset value.\n\n See `Portfolio.get_filled_close`.\n\n !!! note\n Use class methods with `from_` prefix to build a portfolio.\n The `__init__` method is reserved for indexing purposes.\n\n !!! note\n This class is meant to be immutable. To change any attribute, use `Portfolio.copy`.\"\"\"\n\n def __init__(self,\n wrapper: ArrayWrapper,\n close: tp.ArrayLike,\n order_records: tp.RecordArray,\n log_records: tp.RecordArray,\n init_cash: tp.ArrayLike,\n cash_sharing: bool,\n call_seq: tp.Array2d,\n fillna_close: tp.Optional[bool] = None) -> None:\n Wrapping.__init__(\n self,\n wrapper,\n close=close,\n order_records=order_records,\n log_records=log_records,\n init_cash=init_cash,\n cash_sharing=cash_sharing,\n call_seq=call_seq,\n fillna_close=fillna_close\n )\n StatsBuilderMixin.__init__(self)\n PlotBuilderMixin.__init__(self)\n\n # Get defaults\n from vectorbt._settings import settings\n portfolio_cfg = settings['portfolio']\n\n if fillna_close is None:\n fillna_close = portfolio_cfg['fillna_close']\n\n # Store passed arguments\n self._close = broadcast_to(close, wrapper.dummy(group_by=False))\n self._order_records = order_records\n self._log_records = log_records\n self._init_cash = init_cash\n self._cash_sharing = cash_sharing\n self._call_seq = call_seq\n self._fillna_close = fillna_close\n\n def indexing_func(self: PortfolioT, pd_indexing_func: tp.PandasIndexingFunc, **kwargs) -> PortfolioT:\n \"\"\"Perform indexing on `Portfolio`.\"\"\"\n new_wrapper, _, group_idxs, col_idxs = \\\n self.wrapper.indexing_func_meta(pd_indexing_func, column_only_select=True, **kwargs)\n new_close = new_wrapper.wrap(to_2d_array(self.close)[:, col_idxs], group_by=False)\n new_order_records = self.orders.get_by_col_idxs(col_idxs)\n new_log_records = self.logs.get_by_col_idxs(col_idxs)\n if isinstance(self._init_cash, int):\n new_init_cash = self._init_cash\n else:\n new_init_cash = to_1d_array(self._init_cash)[group_idxs if self.cash_sharing else col_idxs]\n new_call_seq = self.call_seq.values[:, col_idxs]\n\n return self.copy(\n wrapper=new_wrapper,\n close=new_close,\n order_records=new_order_records,\n log_records=new_log_records,\n init_cash=new_init_cash,\n call_seq=new_call_seq\n )\n\n # ############# Class methods ############# #\n\n @classmethod\n def from_holding(cls: tp.Type[PortfolioT], close: tp.ArrayLike, **kwargs) -> PortfolioT:\n \"\"\"Simulate portfolio from holding.\n\n Based on `Portfolio.from_orders`.\"\"\"\n size = pd.DataFrame.vbt.empty_like(close, fill_value=np.nan)\n size.iloc[0] = np.inf\n return cls.from_orders(close, size, **kwargs)\n\n @classmethod\n def from_random_signals(cls: tp.Type[PortfolioT],\n close: tp.ArrayLike,\n n: tp.Optional[tp.ArrayLike] = None,\n prob: tp.Optional[tp.ArrayLike] = None,\n entry_prob: tp.Optional[tp.ArrayLike] = None,\n exit_prob: tp.Optional[tp.ArrayLike] = None,\n param_product: bool = False,\n seed: tp.Optional[int] = None,\n run_kwargs: tp.KwargsLike = None,\n **kwargs) -> PortfolioT:\n \"\"\"Simulate portfolio from random entry and exit signals.\n\n Generates signals based either on the number of signals `n` or the probability\n of encountering a signal `prob`.\n\n If `n` is set, see `vectorbt.signals.generators.RANDNX`.\n If `prob` is set, see `vectorbt.signals.generators.RPROBNX`.\n\n Based on `Portfolio.from_signals`.\"\"\"\n from vectorbt._settings import settings\n portfolio_cfg = settings['portfolio']\n\n close = to_pd_array(close)\n close_wrapper = ArrayWrapper.from_obj(close)\n if entry_prob is None:\n entry_prob = prob\n if exit_prob is None:\n exit_prob = prob\n if seed is None:\n seed = portfolio_cfg['seed']\n if run_kwargs is None:\n run_kwargs = {}\n\n if n is not None and (entry_prob is not None or exit_prob is not None):\n raise ValueError(\"Either n or entry_prob and exit_prob should be set\")\n if n is not None:\n rand = RANDNX.run(\n n=n,\n input_shape=close.shape,\n input_index=close_wrapper.index,\n input_columns=close_wrapper.columns,\n seed=seed,\n **run_kwargs\n )\n entries = rand.entries\n exits = rand.exits\n elif entry_prob is not None and exit_prob is not None:\n rprobnx = RPROBNX.run(\n entry_prob=entry_prob,\n exit_prob=exit_prob,\n param_product=param_product,\n input_shape=close.shape,\n input_index=close_wrapper.index,\n input_columns=close_wrapper.columns,\n seed=seed,\n **run_kwargs\n )\n entries = rprobnx.entries\n exits = rprobnx.exits\n else:\n raise ValueError(\"At least n or entry_prob and exit_prob should be set\")\n\n return cls.from_signals(close, entries, exits, seed=seed, **kwargs)\n\n @classmethod\n def from_signals(cls: tp.Type[PortfolioT],\n close: tp.ArrayLike,\n entries: tp.Optional[tp.ArrayLike] = None,\n exits: tp.Optional[tp.ArrayLike] = None,\n size: tp.Optional[tp.ArrayLike] = None,\n size_type: tp.Optional[tp.ArrayLike] = None,\n direction: tp.Optional[tp.ArrayLike] = None,\n price: tp.Optional[tp.ArrayLike] = None,\n fees: tp.Optional[tp.ArrayLike] = None,\n fixed_fees: tp.Optional[tp.ArrayLike] = None,\n slippage: tp.Optional[tp.ArrayLike] = None,\n min_size: tp.Optional[tp.ArrayLike] = None,\n max_size: tp.Optional[tp.ArrayLike] = None,\n reject_prob: tp.Optional[tp.ArrayLike] = None,\n lock_cash: tp.Optional[tp.ArrayLike] = None,\n allow_partial: tp.Optional[tp.ArrayLike] = None,\n raise_reject: tp.Optional[tp.ArrayLike] = None,\n log: tp.Optional[tp.ArrayLike] = None,\n accumulate: tp.Optional[tp.ArrayLike] = None,\n conflict_mode: tp.Optional[tp.ArrayLike] = None,\n close_first: tp.Optional[tp.ArrayLike] = None,\n val_price: tp.Optional[tp.ArrayLike] = None,\n open: tp.Optional[tp.ArrayLike] = None,\n high: tp.Optional[tp.ArrayLike] = None,\n low: tp.Optional[tp.ArrayLike] = None,\n sl_stop: tp.Optional[tp.ArrayLike] = None,\n sl_trail: tp.Optional[tp.ArrayLike] = None,\n tp_stop: tp.Optional[tp.ArrayLike] = None,\n stop_entry_price: tp.Optional[tp.ArrayLike] = None,\n stop_exit_price: tp.Optional[tp.ArrayLike] = None,\n stop_conflict_mode: tp.Optional[tp.ArrayLike] = None,\n stop_exit_mode: tp.Optional[tp.ArrayLike] = None,\n stop_update_mode: tp.Optional[tp.ArrayLike] = None,\n adjust_sl_func_nb: nb.AdjustSLFuncT = nb.no_adjust_sl_func_nb,\n adjust_sl_args: tp.Args = (),\n adjust_tp_func_nb: nb.AdjustTPFuncT = nb.no_adjust_tp_func_nb,\n adjust_tp_args: tp.Args = (),\n use_stops: tp.Optional[bool] = None,\n init_cash: tp.Optional[tp.ArrayLike] = None,\n cash_sharing: tp.Optional[bool] = None,\n call_seq: tp.Optional[tp.ArrayLike] = None,\n ffill_val_price: tp.Optional[bool] = None,\n update_value: tp.Optional[bool] = None,\n max_orders: tp.Optional[int] = None,\n max_logs: tp.Optional[int] = None,\n seed: tp.Optional[int] = None,\n group_by: tp.GroupByLike = None,\n broadcast_kwargs: tp.KwargsLike = None,\n wrapper_kwargs: tp.KwargsLike = None,\n freq: tp.Optional[tp.FrequencyLike] = None,\n **kwargs) -> PortfolioT:\n \"\"\"Simulate portfolio from entry and exit signals.\n\n Args:\n close (array_like): See `Portfolio.from_orders`.\n entries (array_like of bool): Boolean array of entry signals.\n Defaults to True. Will broadcast.\n\n Becomes a long signal if `direction` is `all` or `longonly`, otherwise short.\n exits (array_like of bool): Boolean array of exit signals.\n Defaults to False. Will broadcast.\n\n Becomes a short signal if `direction` is `all` or `longonly`, otherwise long.\n size (float or array_like): See `Portfolio.from_orders`.\n\n !!! note\n Negative size is not allowed. You should express direction using signals.\n size_type (SizeType or array_like): See `Portfolio.from_orders`.\n\n Only `SizeType.Amount`, `SizeType.Value`, and `SizeType.Percent` are supported.\n Other modes such as target percentage are not compatible with signals since\n their logic may contradict the direction of the signal.\n\n !!! note\n `SizeType.Percent` does not support position reversal. Switch to a single\n direction or use `close_first`.\n\n See warning in `Portfolio.from_orders`.\n direction (Direction or array_like): See `Portfolio.from_orders`.\n price (array_like of float): See `Portfolio.from_orders`.\n fees (float or array_like): See `Portfolio.from_orders`.\n fixed_fees (float or array_like): See `Portfolio.from_orders`.\n slippage (float or array_like): See `Portfolio.from_orders`.\n min_size (float or array_like): See `Portfolio.from_orders`.\n max_size (float or array_like): See `Portfolio.from_orders`.\n\n Will be partially filled if exceeded. You might not be able to properly close\n the position if accumulation is enabled and `max_size` is too low.\n reject_prob (float or array_like): See `Portfolio.from_orders`.\n lock_cash (bool or array_like): See `Portfolio.from_orders`.\n allow_partial (bool or array_like): See `Portfolio.from_orders`.\n raise_reject (bool or array_like): See `Portfolio.from_orders`.\n log (bool or array_like): See `Portfolio.from_orders`.\n accumulate (bool or array_like): Whether to accumulate signals.\n Will broadcast.\n\n Allows gradually increasing and decreasing positions using `size`.\n When enabled, `Portfolio.from_signals` behaves like `Portfolio.from_orders`.\n conflict_mode (ConflictMode or array_like): See `vectorbt.portfolio.enums.ConflictMode`.\n Will broadcast.\n close_first (bool or array_like): Whether to close the position first before reversal.\n Will broadcast.\n\n Otherwise reverses the position with a single order and within the same tick.\n Takes only effect under `Direction.All`. Requires a second signal to enter\n the opposite position. This allows to define parameters such as `fixed_fees` for long\n and short positions separately.\n val_price (array_like of float): See `Portfolio.from_orders`.\n open (array_like of float): First asset price at each time step.\n Defaults to `np.nan`, which gets replaced by `close`. Will broadcast.\n\n Used solely for stop signals.\n high (array_like of float): Highest asset price at each time step.\n Defaults to `np.nan`, which gets replaced by the maximum out of `open` and `close`. Will broadcast.\n\n Used solely for stop signals.\n low (array_like of float): Lowest asset price at each time step.\n Defaults to `np.nan`, which gets replaced by the minimum out of `open` and `close`. Will broadcast.\n\n Used solely for stop signals.\n sl_stop (array_like of float): Stop loss.\n Will broadcast.\n\n A percentage below/above the acquisition price for long/short position.\n Note that 0.01 = 1%.\n sl_trail (array_like of bool): Whether `sl_stop` should be trailing.\n Will broadcast.\n tp_stop (array_like of float): Take profit.\n Will broadcast.\n\n A percentage above/below the acquisition price for long/short position.\n Note that 0.01 = 1%.\n stop_entry_price (StopEntryPrice or array_like): See `vectorbt.portfolio.enums.StopEntryPrice`.\n Will broadcast.\n\n If provided on per-element basis, gets applied upon entry.\n stop_exit_price (StopExitPrice or array_like): See `vectorbt.portfolio.enums.StopExitPrice`.\n Will broadcast.\n\n If provided on per-element basis, gets applied upon exit.\n stop_conflict_mode (ConflictMode or array_like): See `vectorbt.portfolio.enums.ConflictMode`.\n Will broadcast.\n\n If provided on per-element basis, gets applied upon exit.\n stop_exit_mode (StopExitMode or array_like): See `vectorbt.portfolio.enums.StopExitMode`.\n Will broadcast.\n\n If provided on per-element basis, gets applied upon exit.\n stop_update_mode (StopUpdateMode or array_like): See `vectorbt.portfolio.enums.StopUpdateMode`.\n Will broadcast.\n\n Only has effect is `accumulate` is True.\n\n If provided on per-element basis, gets applied upon repeated entry.\n adjust_sl_func_nb (callable): Function to adjust stop loss.\n Defaults to `vectorbt.portfolio.nb.no_adjust_sl_func_nb`.\n\n Called for each element before each row.\n\n Should accept `vectorbt.portfolio.enums.AdjustSLContext` and `*adjust_sl_args`.\n Should return a tuple of a new stop value and trailing flag.\n adjust_sl_args (tuple): Packed arguments passed to `adjust_sl_func_nb`.\n Defaults to `()`.\n adjust_tp_func_nb (callable): Function to adjust take profit.\n Defaults to `vectorbt.portfolio.nb.no_adjust_tp_func_nb`.\n\n Called for each element before each row.\n\n Should accept `vectorbt.portfolio.enums.AdjustTPContext` and `*adjust_tp_args`.\n of the stop, and `*adjust_tp_args`. Should return a new stop value.\n adjust_tp_args (tuple): Packed arguments passed to `adjust_tp_func_nb`.\n Defaults to `()`.\n use_stops (bool): Whether to use stops.\n Defaults to None, which becomes True if any of the stops are not NaN or\n any of the adjustment functions are custom.\n\n Disable this to make simulation a bit faster for simple use cases.\n init_cash (InitCashMode, float or array_like of float): See `Portfolio.from_orders`.\n cash_sharing (bool): See `Portfolio.from_orders`.\n call_seq (CallSeqType or array_like): See `Portfolio.from_orders`.\n ffill_val_price (bool): See `Portfolio.from_orders`.\n update_value (bool): See `Portfolio.from_orders`.\n max_orders (int): See `Portfolio.from_orders`.\n max_logs (int): See `Portfolio.from_orders`.\n seed (int): See `Portfolio.from_orders`.\n group_by (any): See `Portfolio.from_orders`.\n broadcast_kwargs (dict): See `Portfolio.from_orders`.\n wrapper_kwargs (dict): See `Portfolio.from_orders`.\n freq (any): See `Portfolio.from_orders`.\n **kwargs: Keyword arguments passed to the `__init__` method.\n\n All broadcastable arguments will broadcast using `vectorbt.base.reshape_fns.broadcast`\n but keep original shape to utilize flexible indexing and to save memory.\n\n For defaults, see `portfolio` in `vectorbt._settings.settings`.\n\n !!! hint\n If you generated signals using close price, don't forget to shift your signals by one tick\n forward, for example, with `signals.vbt.fshift(1)`. In general, make sure to use a price\n that comes after the signal.\n\n Also see notes and hints for `Portfolio.from_orders`.\n\n ## Example\n\n Entry opens long, exit closes long:\n\n ```python-repl\n >>> import pandas as pd\n >>> import vectorbt as vbt\n\n >>> close = pd.Series([1, 2, 3, 4, 5])\n >>> entries = pd.Series([True, True, True, False, False])\n >>> exits = pd.Series([False, False, True, True, True])\n\n >>> pf = vbt.Portfolio.from_signals(\n ... close, entries, exits, size=1., direction='longonly')\n >>> pf.asset_flow()\n 0 1.0\n 1 0.0\n 2 0.0\n 3 -1.0\n 4 0.0\n dtype: float64\n ```\n\n Entry opens short, exit closes short:\n\n ```python-repl\n >>> pf = vbt.Portfolio.from_signals(\n ... close, entries, exits, size=1., direction='shortonly')\n >>> pf.asset_flow()\n 0 -1.0\n 1 0.0\n 2 0.0\n 3 1.0\n 4 0.0\n dtype: float64\n ```\n\n Reversal within one tick. Entry opens long and closes short, exit closes long and opens short:\n\n ```python-repl\n >>> pf = vbt.Portfolio.from_signals(\n ... close, entries, exits, size=1., direction='all')\n >>> pf.asset_flow()\n 0 1.0\n 1 0.0\n 2 0.0\n 3 -2.0\n 4 0.0\n dtype: float64\n ```\n\n Reversal within two ticks. First signal closes position, second signal opens the opposite one:\n\n ```python-repl\n >>> pf = vbt.Portfolio.from_signals(\n ... close, entries, exits, size=1., direction='all',\n ... close_first=True)\n >>> pf.asset_flow()\n 0 1.0\n 1 0.0\n 2 0.0\n 3 -1.0\n 4 -1.0\n dtype: float64\n ```\n\n If entry and exit, chooses exit:\n\n ```python-repl\n >>> pf = vbt.Portfolio.from_signals(\n ... close, entries, exits, size=1., direction='all',\n ... close_first=True, conflict_mode='exit')\n >>> pf.asset_flow()\n 0 1.0\n 1 0.0\n 2 -1.0\n 3 -1.0\n 4 0.0\n dtype: float64\n ```\n\n Entry means long order, exit means short order (acts similar to `from_orders`):\n\n ```python-repl\n >>> pf = vbt.Portfolio.from_signals(\n ... close, entries, exits, size=1., direction='all',\n ... accumulate=True)\n >>> pf.asset_flow()\n 0 1.0\n 1 1.0\n 2 0.0\n 3 -1.0\n 4 -1.0\n dtype: float64\n ```\n\n Testing multiple parameters (via broadcasting):\n\n ```python-repl\n >>> from vectorbt.portfolio.enums import Direction\n\n >>> pf = vbt.Portfolio.from_signals(\n ... close, entries, exits, direction=[list(Direction)],\n ... broadcast_kwargs=dict(columns_from=Direction._fields))\n >>> pf.asset_flow()\n Long Short All\n 0 100.0 -100.0 100.0\n 1 0.0 0.0 0.0\n 2 0.0 0.0 0.0\n 3 -100.0 50.0 -200.0\n 4 0.0 0.0 0.0\n ```\n\n Specifying information in a more granular way thanks to broadcasting.\n Reverse the first long position by first closing it, and all other immediately:\n\n ```python-repl\n >>> entries = pd.Series([True, False, False, True, False])\n >>> exits = pd.Series([False, True, True, False, True])\n >>> close_first = pd.Series([False, True, False, False, False])\n >>> pf = vbt.Portfolio.from_signals(\n ... close, entries, exits, size=1., direction='all',\n ... close_first=close_first)\n >>> pf.asset_flow()\n 0 1.0\n 1 -1.0\n 2 -1.0\n 3 2.0\n 4 -2.0\n dtype: float64\n ```\n\n Set risk/reward ratio by passing trailing stop loss and take profit thresholds:\n\n ```python-repl\n >>> close = pd.Series([10, 11, 12, 11, 10, 9])\n >>> entries = pd.Series([True, False, False, False, False, False])\n >>> exits = pd.Series([False, False, False, False, False, True])\n >>> pf = vbt.Portfolio.from_signals(\n ... close, entries, exits,\n ... sl_stop=0.1, sl_trail=True, tp_stop=0.2) # take profit hit\n >>> pf.asset_flow()\n 0 10.0\n 1 0.0\n 2 -10.0\n 3 0.0\n 4 0.0\n 5 0.0\n dtype: float64\n\n >>> pf = vbt.Portfolio.from_signals(\n ... close, entries, exits,\n ... sl_stop=0.1, sl_trail=True, tp_stop=0.3) # stop loss hit\n >>> pf.asset_flow()\n 0 10.0\n 1 0.0\n 2 0.0\n 3 0.0\n 4 -10.0\n 5 0.0\n dtype: float64\n\n >>> pf = vbt.Portfolio.from_signals(\n ... close, entries, exits,\n ... sl_stop=np.inf, sl_trail=True, tp_stop=np.inf) # nothing hit, exit as usual\n >>> pf.asset_flow()\n 0 10.0\n 1 0.0\n 2 0.0\n 3 0.0\n 4 0.0\n 5 -10.0\n dtype: float64\n ```\n\n We can implement our own stop loss or take profit, or adjust the existing one at each time step.\n Let's implement [stepped stop-loss](https://www.freqtrade.io/en/stable/strategy-advanced/#stepped-stoploss):\n\n ```python-repl\n >>> from numba import njit\n\n >>> @njit\n ... def adjust_sl_func_nb(c):\n ... current_profit = (c.val_price_now - c.init_price) / c.init_price\n ... if current_profit >= 0.40:\n ... return 0.25, True\n ... elif current_profit >= 0.25:\n ... return 0.15, True\n ... elif current_profit >= 0.20:\n ... return 0.07, True\n ... return c.curr_stop, c.curr_trail\n\n >>> close = pd.Series([10, 11, 12, 11, 10])\n >>> pf = vbt.Portfolio.from_signals(close, adjust_sl_func_nb=adjust_sl_func_nb)\n >>> pf.asset_flow()\n 0 10.0\n 1 0.0\n 2 0.0\n 3 -10.0 # 7% from 12 hit\n 4 11.0\n dtype: float64\n ```\n\n Combine multiple exit conditions. Exit early if the price hits some threshold before an actual exit:\n\n ```python-repl\n >>> close = pd.Series([10, 11, 12, 13, 14, 15])\n >>> entries = pd.Series([True, True, True, False, False, False])\n >>> exits = pd.Series([False, False, False, True, True, True])\n\n >>> # 1. Remove adjacent entries and exits\n >>> # since stop condition refers only to the first signal\n >>> entries, exits = entries.vbt.signals.clean(exits)\n >>> entries\n 0 True\n 1 False\n 2 False\n 3 False\n 4 False\n 5 False\n dtype: bool\n >>> exits\n 0 False\n 1 False\n 2 False\n 3 True\n 4 False\n 5 False\n dtype: bool\n\n >>> # 2. Find stop exits\n >>> stop_exits = entries.vbt.signals.generate_stop_exits(close, 0.1)\n >>> stop_exits\n 0 False\n 1 True\n 2 False\n 3 False\n 4 False\n 5 False\n dtype: bool\n\n >>> # 3. Combine exits\n >>> exits = exits | stop_exits\n >>> exits\n 0 False\n 1 True\n 2 False\n 3 True\n 4 False\n 5 False\n dtype: bool\n\n >>> # 4. Pick the first exit after each entry\n >>> exits = exits.vbt.signals.first(reset_by=entries, allow_gaps=True)\n >>> exits\n 0 False\n 1 True\n 2 False\n 3 False\n 4 False\n 5 False\n dtype: bool\n\n >>> # 5. Simulate portfolio\n >>> pf = vbt.Portfolio.from_signals(close, entries, exits)\n >>> pf.asset_flow()\n 0 10.0\n 1 -10.0\n 2 0.0\n 3 0.0\n 4 0.0\n 5 0.0\n dtype: float64\n ```\n\n !!! note\n By cleaning signals, we lose information. Moreover, this automatically assumes\n that each entry/signal signal succeeds (= order gets filled). Use this with caution,\n and consider rewriting your strategy with `Portfolio.from_order_func`, which is a\n preferred way of defining a complex logic in vectorbt.\n \"\"\"\n # Get defaults\n from vectorbt._settings import settings\n portfolio_cfg = settings['portfolio']\n\n if entries is None:\n entries = True\n if exits is None:\n exits = False\n if size is None:\n size = portfolio_cfg['size']\n if size_type is None:\n size_type = portfolio_cfg['size_type']\n size_type = map_enum_fields(size_type, SizeType)\n if direction is None:\n direction = portfolio_cfg['signal_direction']\n direction = map_enum_fields(direction, Direction)\n if price is None:\n price = np.inf\n if fees is None:\n fees = portfolio_cfg['fees']\n if fixed_fees is None:\n fixed_fees = portfolio_cfg['fixed_fees']\n if slippage is None:\n slippage = portfolio_cfg['slippage']\n if min_size is None:\n min_size = portfolio_cfg['min_size']\n if max_size is None:\n max_size = portfolio_cfg['max_size']\n if reject_prob is None:\n reject_prob = portfolio_cfg['reject_prob']\n if lock_cash is None:\n lock_cash = portfolio_cfg['lock_cash']\n if allow_partial is None:\n allow_partial = portfolio_cfg['allow_partial']\n if raise_reject is None:\n raise_reject = portfolio_cfg['raise_reject']\n if log is None:\n log = portfolio_cfg['log']\n if accumulate is None:\n accumulate = portfolio_cfg['accumulate']\n if conflict_mode is None:\n conflict_mode = portfolio_cfg['conflict_mode']\n conflict_mode = map_enum_fields(conflict_mode, ConflictMode)\n if close_first is None:\n close_first = portfolio_cfg['close_first']\n if val_price is None:\n val_price = portfolio_cfg['val_price']\n if open is None:\n open = np.nan\n if high is None:\n high = np.nan\n if low is None:\n low = np.nan\n if sl_stop is None:\n sl_stop = portfolio_cfg['sl_stop']\n if sl_trail is None:\n sl_trail = portfolio_cfg['sl_trail']\n if tp_stop is None:\n tp_stop = portfolio_cfg['tp_stop']\n if stop_entry_price is None:\n stop_entry_price = portfolio_cfg['stop_entry_price']\n stop_entry_price = map_enum_fields(stop_entry_price, StopEntryPrice)\n if stop_exit_price is None:\n stop_exit_price = portfolio_cfg['stop_exit_price']\n stop_exit_price = map_enum_fields(stop_exit_price, StopExitPrice)\n if stop_conflict_mode is None:\n stop_conflict_mode = portfolio_cfg['stop_conflict_mode']\n stop_conflict_mode = map_enum_fields(stop_conflict_mode, ConflictMode)\n if stop_exit_mode is None:\n stop_exit_mode = portfolio_cfg['stop_exit_mode']\n stop_exit_mode = map_enum_fields(stop_exit_mode, StopExitMode)\n if stop_update_mode is None:\n stop_update_mode = portfolio_cfg['stop_update_mode']\n stop_update_mode = map_enum_fields(stop_update_mode, StopUpdateMode)\n if use_stops is None:\n use_stops = portfolio_cfg['use_stops']\n if use_stops is None:\n if isinstance(sl_stop, float) and \\\n np.isnan(sl_stop) and \\\n isinstance(tp_stop, float) and \\\n np.isnan(tp_stop) and \\\n adjust_sl_func_nb == nb.no_adjust_sl_func_nb and \\\n adjust_tp_func_nb == nb.no_adjust_tp_func_nb:\n use_stops = False\n else:\n use_stops = True\n\n if init_cash is None:\n init_cash = portfolio_cfg['init_cash']\n if isinstance(init_cash, str):\n init_cash = map_enum_fields(init_cash, InitCashMode)\n if isinstance(init_cash, int) and init_cash in InitCashMode:\n init_cash_mode = init_cash\n init_cash = np.inf\n else:\n init_cash_mode = None\n if cash_sharing is None:\n cash_sharing = portfolio_cfg['cash_sharing']\n if cash_sharing and group_by is None:\n group_by = True\n if call_seq is None:\n call_seq = portfolio_cfg['call_seq']\n auto_call_seq = False\n if isinstance(call_seq, str):\n call_seq = map_enum_fields(call_seq, CallSeqType)\n if isinstance(call_seq, int):\n if call_seq == CallSeqType.Auto:\n call_seq = CallSeqType.Default\n auto_call_seq = True\n if ffill_val_price is None:\n ffill_val_price = portfolio_cfg['ffill_val_price']\n if update_value is None:\n update_value = portfolio_cfg['update_value']\n if seed is None:\n seed = portfolio_cfg['seed']\n if seed is not None:\n set_seed(seed)\n if freq is None:\n freq = portfolio_cfg['freq']\n if broadcast_kwargs is None:\n broadcast_kwargs = {}\n if wrapper_kwargs is None:\n wrapper_kwargs = {}\n if not wrapper_kwargs.get('group_select', True) and cash_sharing:\n raise ValueError(\"group_select cannot be disabled if cash_sharing=True\")\n\n # Broadcast inputs\n # Only close is broadcast, others can remain unchanged thanks to flexible indexing\n broadcastable_args = (\n close,\n entries,\n exits,\n size,\n price,\n size_type,\n direction,\n fees,\n fixed_fees,\n slippage,\n min_size,\n max_size,\n reject_prob,\n lock_cash,\n allow_partial,\n raise_reject,\n log,\n accumulate,\n conflict_mode,\n close_first,\n val_price,\n open,\n high,\n low,\n sl_stop,\n sl_trail,\n tp_stop,\n stop_entry_price,\n stop_exit_price,\n stop_conflict_mode,\n stop_exit_mode,\n stop_update_mode\n )\n broadcast_kwargs = merge_dicts(dict(\n keep_raw=[False] + [True] * (len(broadcastable_args) - 1),\n require_kwargs=dict(requirements='W')\n ), broadcast_kwargs)\n broadcasted_args = broadcast(*broadcastable_args, **broadcast_kwargs)\n close = broadcasted_args[0]\n if not checks.is_pandas(close):\n close = pd.Series(close) if close.ndim == 1 else pd.DataFrame(close)\n target_shape_2d = (close.shape[0], close.shape[1] if close.ndim > 1 else 1)\n wrapper = ArrayWrapper.from_obj(close, freq=freq, group_by=group_by, **wrapper_kwargs)\n cs_group_lens = wrapper.grouper.get_group_lens(group_by=None if cash_sharing else False)\n init_cash = np.require(np.broadcast_to(init_cash, (len(cs_group_lens),)), dtype=np.float_)\n group_lens = wrapper.grouper.get_group_lens(group_by=group_by)\n if checks.is_any_array(call_seq):\n call_seq = nb.require_call_seq(broadcast(call_seq, to_shape=target_shape_2d, to_pd=False))\n else:\n call_seq = nb.build_call_seq(target_shape_2d, group_lens, call_seq_type=call_seq)\n if max_orders is None:\n max_orders = target_shape_2d[0] * target_shape_2d[1]\n if max_logs is None:\n max_logs = target_shape_2d[0] * target_shape_2d[1]\n if not np.any(log):\n max_logs = 1\n\n # Perform calculation\n order_records, log_records = nb.simulate_from_signals_nb(\n target_shape_2d,\n to_2d_array(close),\n cs_group_lens, # group only if cash sharing is enabled to speed up\n init_cash,\n call_seq,\n *map(np.asarray, broadcasted_args[1:]),\n adjust_sl_func_nb,\n adjust_sl_args,\n adjust_tp_func_nb,\n adjust_tp_args,\n use_stops,\n auto_call_seq,\n ffill_val_price,\n update_value,\n max_orders,\n max_logs,\n close.ndim == 2\n )\n\n # Create an instance\n return cls(\n wrapper,\n close,\n order_records,\n log_records,\n init_cash if init_cash_mode is None else init_cash_mode,\n cash_sharing,\n call_seq,\n **kwargs\n )\n\n @classmethod\n def from_orders(cls: tp.Type[PortfolioT],\n close: tp.ArrayLike,\n size: tp.Optional[tp.ArrayLike] = None,\n size_type: tp.Optional[tp.ArrayLike] = None,\n direction: tp.Optional[tp.ArrayLike] = None,\n price: tp.Optional[tp.ArrayLike] = None,\n fees: tp.Optional[tp.ArrayLike] = None,\n fixed_fees: tp.Optional[tp.ArrayLike] = None,\n slippage: tp.Optional[tp.ArrayLike] = None,\n min_size: tp.Optional[tp.ArrayLike] = None,\n max_size: tp.Optional[tp.ArrayLike] = None,\n reject_prob: tp.Optional[tp.ArrayLike] = None,\n lock_cash: tp.Optional[tp.ArrayLike] = None,\n allow_partial: tp.Optional[tp.ArrayLike] = None,\n raise_reject: tp.Optional[tp.ArrayLike] = None,\n log: tp.Optional[tp.ArrayLike] = None,\n val_price: tp.Optional[tp.ArrayLike] = None,\n init_cash: tp.Optional[tp.ArrayLike] = None,\n cash_sharing: tp.Optional[bool] = None,\n call_seq: tp.Optional[tp.ArrayLike] = None,\n ffill_val_price: tp.Optional[bool] = None,\n update_value: tp.Optional[bool] = None,\n max_orders: tp.Optional[int] = None,\n max_logs: tp.Optional[int] = None,\n seed: tp.Optional[int] = None,\n group_by: tp.GroupByLike = None,\n broadcast_kwargs: tp.KwargsLike = None,\n wrapper_kwargs: tp.KwargsLike = None,\n freq: tp.Optional[tp.FrequencyLike] = None,\n **kwargs) -> PortfolioT:\n \"\"\"Simulate portfolio from orders.\n\n Args:\n close (array_like): Last asset price at each time step.\n Will broadcast.\n\n Used for calculating unrealized P&L and portfolio value.\n size (float or array_like): Size to order.\n See `vectorbt.portfolio.enums.Order.size`. Will broadcast.\n size_type (SizeType or array_like): See `vectorbt.portfolio.enums.SizeType`.\n See `vectorbt.portfolio.enums.Order.size_type`. Will broadcast.\n\n !!! note\n `SizeType.Percent` does not support position reversal. Switch to a single direction.\n\n !!! warning\n Be cautious using `SizeType.Percent` with `call_seq` set to 'auto'.\n To execute sell orders before buy orders, the value of each order in the group\n needs to be approximated in advance. But since `SizeType.Percent` depends\n upon the cash balance, which cannot be calculated in advance since it may change\n after each order, this can yield a non-optimal call sequence.\n direction (Direction or array_like): See `vectorbt.portfolio.enums.Direction`.\n See `vectorbt.portfolio.enums.Order.direction`. Will broadcast.\n price (array_like of float): Order price.\n See `vectorbt.portfolio.enums.Order.price`. Defaults to `np.inf`. Will broadcast.\n\n !!! note\n Make sure to use the same timestamp for all order prices in the group with cash sharing\n and `call_seq` set to `CallSeqType.Auto`.\n fees (float or array_like): Fees in percentage of the order value.\n See `vectorbt.portfolio.enums.Order.fees`. Will broadcast.\n fixed_fees (float or array_like): Fixed amount of fees to pay per order.\n See `vectorbt.portfolio.enums.Order.fixed_fees`. Will broadcast.\n slippage (float or array_like): Slippage in percentage of price.\n See `vectorbt.portfolio.enums.Order.slippage`. Will broadcast.\n min_size (float or array_like): Minimum size for an order to be accepted.\n See `vectorbt.portfolio.enums.Order.min_size`. Will broadcast.\n max_size (float or array_like): Maximum size for an order.\n See `vectorbt.portfolio.enums.Order.max_size`. Will broadcast.\n\n Will be partially filled if exceeded.\n reject_prob (float or array_like): Order rejection probability.\n See `vectorbt.portfolio.enums.Order.reject_prob`. Will broadcast.\n lock_cash (bool or array_like): Whether to lock cash when shorting.\n See `vectorbt.portfolio.enums.Order.lock_cash`. Will broadcast.\n allow_partial (bool or array_like): Whether to allow partial fills.\n See `vectorbt.portfolio.enums.Order.allow_partial`. Will broadcast.\n\n Does not apply when size is `np.inf`.\n raise_reject (bool or array_like): Whether to raise an exception if order gets rejected.\n See `vectorbt.portfolio.enums.Order.raise_reject`. Will broadcast.\n log (bool or array_like): Whether to log orders.\n See `vectorbt.portfolio.enums.Order.log`. Will broadcast.\n val_price (array_like of float): Asset valuation price.\n Will broadcast.\n\n * Any `-np.inf` element is replaced by the latest valuation price (the previous `close` or\n the latest known valuation price if `ffill_val_price`).\n * Any `np.inf` element is replaced by the current order price.\n\n Used at the time of decision making to calculate value of each asset in the group,\n for example, to convert target value into target amount.\n\n !!! note\n In contrast to `Portfolio.from_order_func`, order price is known beforehand (kind of),\n thus `val_price` is set to the current order price (using `np.inf`) by default.\n To valuate using previous close, set it in the settings to `-np.inf`.\n\n !!! note\n Make sure to use timestamp for `val_price` that comes before timestamps of\n all orders in the group with cash sharing (previous `close` for example),\n otherwise you're cheating yourself.\n init_cash (InitCashMode, float or array_like of float): Initial capital.\n\n By default, will broadcast to the number of columns.\n If cash sharing is enabled, will broadcast to the number of groups.\n See `vectorbt.portfolio.enums.InitCashMode` to find optimal initial cash.\n\n !!! note\n Mode `InitCashMode.AutoAlign` is applied after the portfolio is initialized\n to set the same initial cash for all columns/groups. Changing grouping\n will change the initial cash, so be aware when indexing.\n cash_sharing (bool): Whether to share cash within the same group.\n\n If `group_by` is None, `group_by` becomes True to form a single group with cash sharing.\n\n !!! warning\n Introduces cross-asset dependencies.\n\n This method presumes that in a group of assets that share the same capital all\n orders will be executed within the same tick and retain their price regardless\n of their position in the queue, even though they depend upon each other and thus\n cannot be executed in parallel.\n call_seq (CallSeqType or array_like): Default sequence of calls per row and group.\n\n Each value in this sequence should indicate the position of column in the group to\n call next. Processing of `call_seq` goes always from left to right.\n For example, `[2, 0, 1]` would first call column 'c', then 'a', and finally 'b'.\n\n * Use `vectorbt.portfolio.enums.CallSeqType` to select a sequence type.\n * Set to array to specify custom sequence. Will not broadcast.\n\n If `CallSeqType.Auto` selected, rearranges calls dynamically based on order value.\n Calculates value of all orders per row and group, and sorts them by this value.\n Sell orders will be executed first to release funds for buy orders.\n\n !!! warning\n `CallSeqType.Auto` should be used with caution:\n\n * It not only presumes that order prices are known beforehand, but also that\n orders can be executed in arbitrary order and still retain their price.\n In reality, this is hardly the case: after processing one asset, some time\n has passed and the price for other assets might have already changed.\n * Even if you're able to specify a slippage large enough to compensate for\n this behavior, slippage itself should depend upon execution order.\n This method doesn't let you do that.\n * If one order is rejected, it still may execute next orders and possibly\n leave them without required funds.\n\n For more control, use `Portfolio.from_order_func`.\n ffill_val_price (bool): Whether to track valuation price only if it's known.\n\n Otherwise, unknown `close` will lead to NaN in valuation price at the next timestamp.\n update_value (bool): Whether to update group value after each filled order.\n max_orders (int): Size of the order records array.\n Defaults to the number of elements in the broadcasted shape.\n\n Set to a lower number if you run out of memory.\n max_logs (int): Size of the log records array.\n Defaults to the number of elements in the broadcasted shape if any of the `log` is True,\n otherwise to 1.\n\n Set to a lower number if you run out of memory.\n seed (int): Seed to be set for both `call_seq` and at the beginning of the simulation.\n group_by (any): Group columns. See `vectorbt.base.column_grouper.ColumnGrouper`.\n broadcast_kwargs (dict): Keyword arguments passed to `vectorbt.base.reshape_fns.broadcast`.\n wrapper_kwargs (dict): Keyword arguments passed to `vectorbt.base.array_wrapper.ArrayWrapper`.\n freq (any): Index frequency in case it cannot be parsed from `close`.\n **kwargs: Keyword arguments passed to the `__init__` method.\n\n All broadcastable arguments will broadcast using `vectorbt.base.reshape_fns.broadcast`\n but keep original shape to utilize flexible indexing and to save memory.\n\n For defaults, see `portfolio` in `vectorbt._settings.settings`.\n\n !!! note\n When `call_seq` is not `CallSeqType.Auto`, at each timestamp, processing of the assets in\n a group goes strictly in order defined in `call_seq`. This order can't be changed dynamically.\n\n This has one big implication for this particular method: the last asset in the call stack\n cannot be processed until other assets are processed. This is the reason why rebalancing\n cannot work properly in this setting: one has to specify percentages for all assets beforehand\n and then tweak the processing order to sell to-be-sold assets first in order to release funds\n for to-be-bought assets. This can be automatically done by using `CallSeqType.Auto`.\n\n !!! hint\n All broadcastable arguments can be set per frame, series, row, column, or element.\n\n ## Example\n\n Buy 10 units each tick:\n\n ```python-repl\n >>> import pandas as pd\n >>> import vectorbt as vbt\n\n >>> close = pd.Series([1, 2, 3, 4, 5])\n >>> pf = vbt.Portfolio.from_orders(close, 10)\n\n >>> pf.assets()\n 0 10.0\n 1 20.0\n 2 30.0\n 3 40.0\n 4 40.0\n dtype: float64\n >>> pf.cash()\n 0 90.0\n 1 70.0\n 2 40.0\n 3 0.0\n 4 0.0\n dtype: float64\n ```\n\n Reverse each position by first closing it:\n\n ```python-repl\n >>> size = [1, 0, -1, 0, 1]\n >>> pf = vbt.Portfolio.from_orders(close, size, size_type='targetpercent')\n\n >>> pf.assets()\n 0 100.000000\n 1 0.000000\n 2 -66.666667\n 3 0.000000\n 4 26.666667\n dtype: float64\n >>> pf.cash()\n 0 0.000000\n 1 200.000000\n 2 400.000000\n 3 133.333333\n 4 0.000000\n dtype: float64\n ```\n\n Equal-weighted portfolio as in `vectorbt.portfolio.nb.simulate_nb` example:\n It's more compact but has less control over execution:\n\n ```python-repl\n >>> import numpy as np\n\n >>> np.random.seed(42)\n >>> close = pd.DataFrame(np.random.uniform(1, 10, size=(5, 3)))\n >>> size = pd.Series(np.full(5, 1/3)) # each column 33.3%\n >>> size[1::2] = np.nan # skip every second tick\n\n >>> pf = vbt.Portfolio.from_orders(\n ... close, # acts both as reference and order price here\n ... size,\n ... size_type='targetpercent',\n ... call_seq='auto', # first sell then buy\n ... group_by=True, # one group\n ... cash_sharing=True, # assets share the same cash\n ... fees=0.001, fixed_fees=1., slippage=0.001 # costs\n ... )\n\n >>> pf.asset_value(group_by=False).vbt.plot()\n ```\n\n \n \"\"\"\n # Get defaults\n from vectorbt._settings import settings\n portfolio_cfg = settings['portfolio']\n\n if size is None:\n size = portfolio_cfg['size']\n if size_type is None:\n size_type = portfolio_cfg['size_type']\n size_type = map_enum_fields(size_type, SizeType)\n if direction is None:\n direction = portfolio_cfg['order_direction']\n direction = map_enum_fields(direction, Direction)\n if price is None:\n price = np.inf\n if size is None:\n size = portfolio_cfg['size']\n if fees is None:\n fees = portfolio_cfg['fees']\n if fixed_fees is None:\n fixed_fees = portfolio_cfg['fixed_fees']\n if slippage is None:\n slippage = portfolio_cfg['slippage']\n if min_size is None:\n min_size = portfolio_cfg['min_size']\n if max_size is None:\n max_size = portfolio_cfg['max_size']\n if reject_prob is None:\n reject_prob = portfolio_cfg['reject_prob']\n if lock_cash is None:\n lock_cash = portfolio_cfg['lock_cash']\n if allow_partial is None:\n allow_partial = portfolio_cfg['allow_partial']\n if raise_reject is None:\n raise_reject = portfolio_cfg['raise_reject']\n if log is None:\n log = portfolio_cfg['log']\n if val_price is None:\n val_price = portfolio_cfg['val_price']\n if init_cash is None:\n init_cash = portfolio_cfg['init_cash']\n if isinstance(init_cash, str):\n init_cash = map_enum_fields(init_cash, InitCashMode)\n if isinstance(init_cash, int) and init_cash in InitCashMode:\n init_cash_mode = init_cash\n init_cash = np.inf\n else:\n init_cash_mode = None\n if cash_sharing is None:\n cash_sharing = portfolio_cfg['cash_sharing']\n if cash_sharing and group_by is None:\n group_by = True\n if call_seq is None:\n call_seq = portfolio_cfg['call_seq']\n auto_call_seq = False\n if isinstance(call_seq, str):\n call_seq = map_enum_fields(call_seq, CallSeqType)\n if isinstance(call_seq, int):\n if call_seq == CallSeqType.Auto:\n call_seq = CallSeqType.Default\n auto_call_seq = True\n if ffill_val_price is None:\n ffill_val_price = portfolio_cfg['ffill_val_price']\n if update_value is None:\n update_value = portfolio_cfg['update_value']\n if seed is None:\n seed = portfolio_cfg['seed']\n if seed is not None:\n set_seed(seed)\n if freq is None:\n freq = portfolio_cfg['freq']\n if broadcast_kwargs is None:\n broadcast_kwargs = {}\n if wrapper_kwargs is None:\n wrapper_kwargs = {}\n if not wrapper_kwargs.get('group_select', True) and cash_sharing:\n raise ValueError(\"group_select cannot be disabled if cash_sharing=True\")\n\n # Broadcast inputs\n # Only close is broadcast, others can remain unchanged thanks to flexible indexing\n broadcastable_args = (\n close,\n size,\n price,\n size_type,\n direction,\n fees,\n fixed_fees,\n slippage,\n min_size,\n max_size,\n reject_prob,\n lock_cash,\n allow_partial,\n raise_reject,\n log,\n val_price\n )\n broadcast_kwargs = merge_dicts(dict(\n keep_raw=[False] + [True] * (len(broadcastable_args) - 1),\n require_kwargs=dict(requirements='W')\n ), broadcast_kwargs)\n broadcasted_args = broadcast(*broadcastable_args, **broadcast_kwargs)\n close = broadcasted_args[0]\n if not checks.is_pandas(close):\n close = pd.Series(close) if close.ndim == 1 else pd.DataFrame(close)\n target_shape_2d = (close.shape[0], close.shape[1] if close.ndim > 1 else 1)\n wrapper = ArrayWrapper.from_obj(close, freq=freq, group_by=group_by, **wrapper_kwargs)\n cs_group_lens = wrapper.grouper.get_group_lens(group_by=None if cash_sharing else False)\n init_cash = np.require(np.broadcast_to(init_cash, (len(cs_group_lens),)), dtype=np.float_)\n group_lens = wrapper.grouper.get_group_lens(group_by=group_by)\n if checks.is_any_array(call_seq):\n call_seq = nb.require_call_seq(broadcast(call_seq, to_shape=target_shape_2d, to_pd=False))\n else:\n call_seq = nb.build_call_seq(target_shape_2d, group_lens, call_seq_type=call_seq)\n if max_orders is None:\n max_orders = target_shape_2d[0] * target_shape_2d[1]\n if max_logs is None:\n max_logs = target_shape_2d[0] * target_shape_2d[1]\n if not np.any(log):\n max_logs = 1\n\n # Perform calculation\n order_records, log_records = nb.simulate_from_orders_nb(\n target_shape_2d,\n to_2d_array(close),\n cs_group_lens, # group only if cash sharing is enabled to speed up\n init_cash,\n call_seq,\n *map(np.asarray, broadcasted_args[1:]),\n auto_call_seq,\n ffill_val_price,\n update_value,\n max_orders,\n max_logs,\n close.ndim == 2\n )\n\n # Create an instance\n return cls(\n wrapper,\n close,\n order_records,\n log_records,\n init_cash if init_cash_mode is None else init_cash_mode,\n cash_sharing,\n call_seq,\n **kwargs\n )\n\n @classmethod\n def from_order_func(cls: tp.Type[PortfolioT],\n close: tp.ArrayLike,\n order_func_nb: nb.OrderFuncT,\n *order_args,\n target_shape: tp.Optional[tp.RelaxedShape] = None,\n keys: tp.Optional[tp.IndexLike] = None,\n init_cash: tp.Optional[tp.ArrayLike] = None,\n cash_sharing: tp.Optional[bool] = None,\n call_seq: tp.Optional[tp.ArrayLike] = None,\n segment_mask: tp.Optional[tp.ArrayLike] = None,\n pre_sim_func_nb: nb.PreSimFuncT = nb.no_pre_func_nb,\n pre_sim_args: tp.Args = (),\n post_sim_func_nb: nb.PostSimFuncT = nb.no_post_func_nb,\n post_sim_args: tp.Args = (),\n pre_group_func_nb: nb.PreGroupFuncT = nb.no_pre_func_nb,\n pre_group_args: tp.Args = (),\n post_group_func_nb: nb.PostGroupFuncT = nb.no_post_func_nb,\n post_group_args: tp.Args = (),\n pre_row_func_nb: nb.PreRowFuncT = nb.no_pre_func_nb,\n pre_row_args: tp.Args = (),\n post_row_func_nb: nb.PostRowFuncT = nb.no_post_func_nb,\n post_row_args: tp.Args = (),\n pre_segment_func_nb: nb.PreSegmentFuncT = nb.no_pre_func_nb,\n pre_segment_args: tp.Args = (),\n post_segment_func_nb: nb.PostSegmentFuncT = nb.no_post_func_nb,\n post_segment_args: tp.Args = (),\n post_order_func_nb: nb.PostOrderFuncT = nb.no_post_func_nb,\n post_order_args: tp.Args = (),\n call_pre_segment: tp.Optional[bool] = None,\n call_post_segment: tp.Optional[bool] = None,\n ffill_val_price: tp.Optional[bool] = None,\n update_value: tp.Optional[bool] = None,\n fill_pos_record: tp.Optional[bool] = None,\n row_wise: tp.Optional[bool] = None,\n use_numba: tp.Optional[bool] = None,\n max_orders: tp.Optional[int] = None,\n max_logs: tp.Optional[int] = None,\n seed: tp.Optional[int] = None,\n group_by: tp.GroupByLike = None,\n broadcast_kwargs: tp.KwargsLike = None,\n wrapper_kwargs: tp.KwargsLike = None,\n freq: tp.Optional[tp.FrequencyLike] = None,\n **kwargs) -> PortfolioT:\n \"\"\"Build portfolio from a custom order function.\n\n For details, see `vectorbt.portfolio.nb.simulate_nb`.\n\n if `row_wise` is True, also see `vectorbt.portfolio.nb.simulate_row_wise_nb`.\n\n Args:\n close (array_like): Last asset price at each time step.\n Will broadcast to `target_shape`.\n\n Used for calculating unrealized P&L and portfolio value.\n order_func_nb (callable): Order generation function.\n *order_args: Arguments passed to `order_func_nb`.\n target_shape (tuple): Target shape to iterate over. Defaults to `close.shape`.\n keys (sequence): Outermost column level.\n\n Each element should correspond to one iteration over columns in `close`.\n Should be set only if `target_shape` is bigger than `close.shape`.\n init_cash (InitCashMode, float or array_like of float): Initial capital.\n\n See `init_cash` in `Portfolio.from_orders`.\n cash_sharing (bool): Whether to share cash within the same group.\n\n If `group_by` is None, `group_by` becomes True to form a single group with cash sharing.\n\n !!! warning\n Introduces cross-asset dependencies.\n call_seq (CallSeqType or array_like): Default sequence of calls per row and group.\n\n * Use `vectorbt.portfolio.enums.CallSeqType` to select a sequence type.\n * Set to array to specify custom sequence. Will not broadcast.\n\n !!! note\n CallSeqType.Auto should be implemented manually.\n Use `sort_call_seq_nb` in `pre_segment_func_nb`.\n segment_mask (int or array_like of bool): Mask of whether a particular segment should be executed.\n\n Supplying an integer will activate every n-th row (just for convenience).\n Supplying a boolean will broadcast to the number of rows and groups.\n pre_sim_func_nb (callable): Function called before simulation.\n Defaults to `vectorbt.portfolio.nb.no_pre_func_nb`.\n pre_sim_args (tuple): Packed arguments passed to `pre_sim_func_nb`.\n Defaults to `()`.\n post_sim_func_nb (callable): Function called after simulation.\n Defaults to `vectorbt.portfolio.nb.no_post_func_nb`.\n post_sim_args (tuple): Packed arguments passed to `post_sim_func_nb`.\n Defaults to `()`.\n pre_group_func_nb (callable): Function called before each group.\n Defaults to `vectorbt.portfolio.nb.no_pre_func_nb`.\n\n Called only if `row_wise` is False.\n pre_group_args (tuple): Packed arguments passed to `pre_group_func_nb`.\n Defaults to `()`.\n post_group_func_nb (callable): Function called after each group.\n Defaults to `vectorbt.portfolio.nb.no_post_func_nb`.\n\n Called only if `row_wise` is False.\n post_group_args (tuple): Packed arguments passed to `post_group_func_nb`.\n Defaults to `()`.\n pre_row_func_nb (callable): Function called before each row.\n Defaults to `vectorbt.portfolio.nb.no_pre_func_nb`.\n\n Called only if `row_wise` is True.\n pre_row_args (tuple): Packed arguments passed to `pre_row_func_nb`.\n Defaults to `()`.\n post_row_func_nb (callable): Function called after each row.\n Defaults to `vectorbt.portfolio.nb.no_post_func_nb`.\n\n Called only if `row_wise` is True.\n post_row_args (tuple): Packed arguments passed to `post_row_func_nb`.\n Defaults to `()`.\n pre_segment_func_nb (callable): Function called before each segment.\n Defaults to `vectorbt.portfolio.nb.no_pre_func_nb`.\n pre_segment_args (tuple): Packed arguments passed to `pre_segment_func_nb`.\n Defaults to `()`.\n post_segment_func_nb (callable): Function called after each segment.\n Defaults to `vectorbt.portfolio.nb.no_post_func_nb`.\n post_segment_args (tuple): Packed arguments passed to `post_segment_func_nb`.\n Defaults to `()`.\n post_order_func_nb (callable): Callback that is called after the order has been processed.\n post_order_args (tuple): Packed arguments passed to `post_order_func_nb`.\n Defaults to `()`.\n call_pre_segment (bool): Whether to call `pre_segment_func_nb` regardless of `segment_mask`.\n call_post_segment (bool): Whether to call `post_segment_func_nb` regardless of `segment_mask`.\n ffill_val_price (bool): Whether to track valuation price only if it's known.\n\n Otherwise, unknown `close` will lead to NaN in valuation price at the next timestamp.\n update_value (bool): Whether to update group value after each filled order.\n fill_pos_record (bool): Whether to fill position record.\n\n Disable this to make simulation a bit faster for simple use cases.\n row_wise (bool): Whether to iterate over rows rather than columns/groups.\n\n See `vectorbt.portfolio.nb.simulate_row_wise_nb`.\n use_numba (bool): Whether to run the main simulation function using Numba.\n\n !!! note\n Disabling it does not disable Numba for other functions.\n If neccessary, you should ensure that every other function does not uses Numba as well.\n You can do this by using the `py_func` attribute of that function.\n Or, you could disable Numba globally by doing `os.environ['NUMBA_DISABLE_JIT'] = '1'`.\n max_orders (int): Size of the order records array.\n Defaults to the number of elements in the broadcasted shape.\n\n Set to a lower number if you run out of memory.\n max_logs (int): Size of the log records array.\n Defaults to the number of elements in the broadcasted shape.\n\n Set to a lower number if you run out of memory.\n seed (int): Seed to be set for both `call_seq` and at the beginning of the simulation.\n group_by (any): Group columns. See `vectorbt.base.column_grouper.ColumnGrouper`.\n broadcast_kwargs (dict): Keyword arguments passed to `vectorbt.base.reshape_fns.broadcast`.\n wrapper_kwargs (dict): Keyword arguments passed to `vectorbt.base.array_wrapper.ArrayWrapper`.\n freq (any): Index frequency in case it cannot be parsed from `close`.\n **kwargs: Keyword arguments passed to the `__init__` method.\n\n For defaults, see `portfolio` in `vectorbt._settings.settings`.\n\n !!! note\n All passed functions should be Numba-compiled.\n\n Objects passed as arguments to both functions will not broadcast to `target_shape`\n as their purpose is unknown. You should broadcast manually or use flexible indexing.\n\n Also see notes on `Portfolio.from_orders`.\n\n !!! note\n In contrast to other methods, the valuation price is previous `close`\n instead of order price, since the price of an order is unknown before call.\n You can still set valuation price explicitly in `pre_segment_func_nb`.\n\n ## Example\n\n Buy 10 units each tick using closing price:\n\n ```python-repl\n >>> import pandas as pd\n >>> from numba import njit\n >>> import vectorbt as vbt\n >>> from vectorbt.portfolio.nb import order_nb\n\n >>> @njit\n ... def order_func_nb(c, size):\n ... return order_nb(size=size)\n\n >>> close = pd.Series([1, 2, 3, 4, 5])\n >>> pf = vbt.Portfolio.from_order_func(close, order_func_nb, 10)\n\n >>> pf.assets()\n 0 10.0\n 1 20.0\n 2 30.0\n 3 40.0\n 4 40.0\n dtype: float64\n >>> pf.cash()\n 0 90.0\n 1 70.0\n 2 40.0\n 3 0.0\n 4 0.0\n dtype: float64\n ```\n\n Reverse each position by first closing it. Keep state of last position to determine\n which position to open next (just as an example, there are easier ways to do this):\n\n ```python-repl\n >>> import numpy as np\n >>> from vectorbt.portfolio.nb import close_position_nb\n\n >>> @njit\n ... def pre_group_func_nb(c):\n ... last_pos_state = np.array([-1])\n ... return (last_pos_state,)\n\n >>> @njit\n ... def order_func_nb(c, last_pos_state):\n ... if c.position_now != 0:\n ... return close_position_nb()\n ...\n ... if last_pos_state[0] == 1:\n ... size = -np.inf # open short\n ... last_pos_state[0] = -1\n ... else:\n ... size = np.inf # open long\n ... last_pos_state[0] = 1\n ... return order_nb(size=size)\n\n >>> pf = vbt.Portfolio.from_order_func(\n ... close, order_func_nb, pre_group_func_nb=pre_group_func_nb)\n\n >>> pf.assets()\n 0 100.000000\n 1 0.000000\n 2 -66.666667\n 3 0.000000\n 4 26.666667\n dtype: float64\n >>> pf.cash()\n 0 0.000000\n 1 200.000000\n 2 400.000000\n 3 133.333333\n 4 0.000000\n dtype: float64\n ```\n\n Equal-weighted portfolio as in `vectorbt.portfolio.nb.simulate_nb` example:\n\n ```python-repl\n >>> from vectorbt.portfolio.nb import sort_call_seq_nb\n >>> from vectorbt.portfolio.enums import SizeType, Direction\n\n >>> @njit\n ... def pre_group_func_nb(c):\n ... '''Define empty arrays for each group.'''\n ... order_value_out = np.empty(c.group_len, dtype=np.float_)\n ... return (order_value_out,)\n\n >>> @njit\n ... def pre_segment_func_nb(c, order_value_out):\n ... '''Perform rebalancing at each segment.'''\n ... for col in range(c.from_col, c.to_col):\n ... # Here we use order price for group valuation\n ... c.last_val_price[col] = c.close[c.i, col]\n ... # Reorder call sequence such that selling orders come first and buying last\n ... size = 1 / c.group_len\n ... size_type = SizeType.TargetPercent\n ... direction = Direction.LongOnly # long positions only\n ... sort_call_seq_nb(c, size, size_type, direction, order_value_out)\n ... return (size, size_type, direction)\n\n >>> @njit\n ... def order_func_nb(c, size, size_type, direction, fees, fixed_fees, slippage):\n ... '''Place an order.'''\n ... return order_nb(\n ... size=size,\n ... size_type=size_type,\n ... direction=direction,\n ... fees=fees,\n ... fixed_fees=fixed_fees,\n ... slippage=slippage\n ... )\n\n >>> np.random.seed(42)\n >>> close = np.random.uniform(1, 10, size=(5, 3))\n >>> fees = 0.001\n >>> fixed_fees = 1.\n >>> slippage = 0.001\n\n >>> pf = vbt.Portfolio.from_order_func(\n ... close, # acts both as reference and order price here\n ... order_func_nb, fees, fixed_fees, slippage, # order_args as *args\n ... segment_mask=2, # rebalance every second tick\n ... pre_group_func_nb=pre_group_func_nb,\n ... pre_segment_func_nb=pre_segment_func_nb,\n ... cash_sharing=True, group_by=True, # one group with cash sharing\n ... )\n\n >>> pf.asset_value(group_by=False).vbt.plot()\n ```\n\n \n\n Combine multiple exit conditions. Exit early if the price hits some threshold before an actual exit\n (similar to the example under `Portfolio.from_signals`, but doesn't remove any information):\n\n ```python-repl\n >>> from vectorbt.base.reshape_fns import flex_select_auto_nb, to_2d_array\n >>> from vectorbt.portfolio.enums import NoOrder, OrderStatus, OrderSide\n\n >>> @njit\n ... def pre_sim_func_nb(c):\n ... # We need to define stop price per column once\n ... stop_price = np.full(c.target_shape[1], np.nan, dtype=np.float_)\n ... return (stop_price,)\n\n >>> @njit\n ... def order_func_nb(c, stop_price, entries, exits, size, flex_2d):\n ... # Select info related to this order\n ... # flex_select_auto_nb allows us to pass size as single number, 1-dim or 2-dim array\n ... # If flex_2d is True, 1-dim array will be per column, otherwise per row\n ... size_now = flex_select_auto_nb(c.i, c.col, np.asarray(size), flex_2d)\n ... price_now = c.close[c.i, c.col] # close is always 2-dim array\n ... stop_price_now = stop_price[c.col]\n ...\n ... # Our logic\n ... if entries[c.i, c.col]:\n ... if c.position_now == 0:\n ... return order_nb(\n ... size=size_now,\n ... price=price_now,\n ... direction=Direction.LongOnly)\n ... elif exits[c.i, c.col] or price_now >= stop_price_now:\n ... if c.position_now > 0:\n ... return order_nb(\n ... size=-size_now,\n ... price=price_now,\n ... direction=Direction.LongOnly)\n ... return NoOrder\n\n >>> @njit\n ... def post_order_func_nb(c, stop_price, stop, flex_2d):\n ... # Same broadcasting as for size\n ... stop_now = flex_select_auto_nb(c.i, c.col, np.asarray(stop), flex_2d)\n ...\n ... if c.order_result.status == OrderStatus.Filled:\n ... if c.order_result.side == OrderSide.Buy:\n ... # Position entered: Set stop condition\n ... stop_price[c.col] = (1 + stop_now) * c.order_result.price\n ... else:\n ... # Position exited: Remove stop condition\n ... stop_price[c.col] = np.nan\n\n >>> def simulate(close, entries, exits, threshold):\n ... return vbt.Portfolio.from_order_func(\n ... close,\n ... order_func_nb,\n ... to_2d_array(entries), # 2-dim array\n ... to_2d_array(exits), # 2-dim array\n ... np.inf, # will broadcast\n ... True,\n ... pre_sim_func_nb=pre_sim_func_nb,\n ... post_order_func_nb=post_order_func_nb,\n ... post_order_args=(\n ... threshold, # will broadcast\n ... True\n ... )\n ... )\n\n >>> close = pd.Series([10, 11, 12, 13, 14])\n >>> entries = pd.Series([True, True, False, False, False])\n >>> exits = pd.Series([False, False, False, True, True])\n >>> simulate(close, entries, exits, 0.1).asset_flow()\n 0 10.0\n 1 0.0\n 2 -10.0\n 3 0.0\n 4 0.0\n dtype: float64\n >>> simulate(close, entries, exits, 0.2).asset_flow()\n 0 10.0\n 1 0.0\n 2 -10.0\n 3 0.0\n 4 0.0\n dtype: float64\n >>> simulate(close, entries, exits, np.nan).asset_flow()\n 0 10.0\n 1 0.0\n 2 0.0\n 3 -10.0\n 4 0.0\n dtype: float64\n ```\n\n The reason why stop of 10% does not result in an order at the second time step is because\n it comes at the same time as entry, so it must wait until no entry is present.\n This can be changed by replacing the statement \"elif\" with \"if\", which would execute\n an exit regardless if an entry is present (similar to using `ConflictMode.Opposite` in\n `Portfolio.from_signals`).\n \"\"\"\n # Get defaults\n from vectorbt._settings import settings\n portfolio_cfg = settings['portfolio']\n\n close = to_pd_array(close)\n if target_shape is None:\n target_shape = close.shape\n if init_cash is None:\n init_cash = portfolio_cfg['init_cash']\n if isinstance(init_cash, str):\n init_cash = map_enum_fields(init_cash, InitCashMode)\n if isinstance(init_cash, int) and init_cash in InitCashMode:\n init_cash_mode = init_cash\n init_cash = np.inf\n else:\n init_cash_mode = None\n if cash_sharing is None:\n cash_sharing = portfolio_cfg['cash_sharing']\n if cash_sharing and group_by is None:\n group_by = True\n if call_seq is None:\n call_seq = portfolio_cfg['call_seq']\n call_seq = map_enum_fields(call_seq, CallSeqType)\n if isinstance(call_seq, int):\n if call_seq == CallSeqType.Auto:\n raise ValueError(\"CallSeqType.Auto should be implemented manually. \"\n \"Use sort_call_seq_nb in pre_segment_func_nb.\")\n if segment_mask is None:\n segment_mask = True\n if call_pre_segment is None:\n call_pre_segment = portfolio_cfg['call_pre_segment']\n if call_post_segment is None:\n call_post_segment = portfolio_cfg['call_post_segment']\n if ffill_val_price is None:\n ffill_val_price = portfolio_cfg['ffill_val_price']\n if update_value is None:\n update_value = portfolio_cfg['update_value']\n if fill_pos_record is None:\n fill_pos_record = portfolio_cfg['fill_pos_record']\n if row_wise is None:\n row_wise = portfolio_cfg['row_wise']\n if use_numba is None:\n use_numba = portfolio_cfg['use_numba']\n if seed is None:\n seed = portfolio_cfg['seed']\n if seed is not None:\n set_seed(seed)\n if freq is None:\n freq = portfolio_cfg['freq']\n if broadcast_kwargs is None:\n broadcast_kwargs = {}\n require_kwargs = dict(require_kwargs=dict(requirements='W'))\n broadcast_kwargs = merge_dicts(require_kwargs, broadcast_kwargs)\n if wrapper_kwargs is None:\n wrapper_kwargs = {}\n if not wrapper_kwargs.get('group_select', True) and cash_sharing:\n raise ValueError(\"group_select cannot be disabled if cash_sharing=True\")\n\n # Broadcast inputs\n if isinstance(target_shape, int):\n target_shape = (target_shape,)\n target_shape_2d = (target_shape[0], target_shape[1] if len(target_shape) > 1 else 1)\n if close.shape != target_shape:\n close_wrapper = ArrayWrapper.from_obj(close)\n if len(close_wrapper.columns) <= target_shape_2d[1]:\n if target_shape_2d[1] % len(close_wrapper.columns) != 0:\n raise ValueError(\"Cannot broadcast close to target_shape\")\n if keys is None:\n keys = pd.Index(np.arange(target_shape_2d[1]), name='iteration_idx')\n tile_times = target_shape_2d[1] // len(close_wrapper.columns)\n close = close.vbt.tile(tile_times, keys=keys)\n close = broadcast(close, to_shape=target_shape, **broadcast_kwargs)\n wrapper = ArrayWrapper.from_obj(close, freq=freq, group_by=group_by, **wrapper_kwargs)\n cs_group_lens = wrapper.grouper.get_group_lens(group_by=None if cash_sharing else False)\n init_cash = np.require(np.broadcast_to(init_cash, (len(cs_group_lens),)), dtype=np.float_)\n group_lens = wrapper.grouper.get_group_lens(group_by=group_by)\n if isinstance(segment_mask, int):\n _segment_mask = np.full((target_shape_2d[0], len(group_lens)), False)\n _segment_mask[0::segment_mask] = True\n segment_mask = _segment_mask\n else:\n segment_mask = broadcast(\n segment_mask,\n to_shape=(target_shape_2d[0], len(group_lens)),\n to_pd=False,\n **require_kwargs\n )\n if checks.is_any_array(call_seq):\n call_seq = nb.require_call_seq(broadcast(call_seq, to_shape=target_shape_2d, to_pd=False))\n else:\n call_seq = nb.build_call_seq(target_shape_2d, group_lens, call_seq_type=call_seq)\n if max_orders is None:\n max_orders = target_shape_2d[0] * target_shape_2d[1]\n if max_logs is None:\n max_logs = target_shape_2d[0] * target_shape_2d[1]\n\n # Perform calculation\n if row_wise:\n simulate_func = nb.simulate_row_wise_nb\n if not use_numba and hasattr(simulate_func, 'py_func'):\n simulate_func = simulate_func.py_func\n order_records, log_records = simulate_func(\n target_shape=target_shape_2d,\n close=to_2d_array(close),\n group_lens=group_lens,\n init_cash=init_cash,\n cash_sharing=cash_sharing,\n call_seq=call_seq,\n segment_mask=segment_mask,\n pre_sim_func_nb=pre_sim_func_nb,\n pre_sim_args=pre_sim_args,\n post_sim_func_nb=post_sim_func_nb,\n post_sim_args=post_sim_args,\n pre_row_func_nb=pre_row_func_nb,\n pre_row_args=pre_row_args,\n post_row_func_nb=post_row_func_nb,\n post_row_args=post_row_args,\n pre_segment_func_nb=pre_segment_func_nb,\n pre_segment_args=pre_segment_args,\n post_segment_func_nb=post_segment_func_nb,\n post_segment_args=post_segment_args,\n order_func_nb=order_func_nb,\n order_args=order_args,\n post_order_func_nb=post_order_func_nb,\n post_order_args=post_order_args,\n call_pre_segment=call_pre_segment,\n call_post_segment=call_post_segment,\n ffill_val_price=ffill_val_price,\n update_value=update_value,\n fill_pos_record=fill_pos_record,\n max_orders=max_orders,\n max_logs=max_logs\n )\n else:\n simulate_func = nb.simulate_nb\n if not use_numba and hasattr(simulate_func, 'py_func'):\n simulate_func = simulate_func.py_func\n order_records, log_records = simulate_func(\n target_shape=target_shape_2d,\n close=to_2d_array(close),\n group_lens=group_lens,\n init_cash=init_cash,\n cash_sharing=cash_sharing,\n call_seq=call_seq,\n segment_mask=segment_mask,\n pre_sim_func_nb=pre_sim_func_nb,\n pre_sim_args=pre_sim_args,\n post_sim_func_nb=post_sim_func_nb,\n post_sim_args=post_sim_args,\n pre_group_func_nb=pre_group_func_nb,\n pre_group_args=pre_group_args,\n post_group_func_nb=post_group_func_nb,\n post_group_args=post_group_args,\n pre_segment_func_nb=pre_segment_func_nb,\n pre_segment_args=pre_segment_args,\n post_segment_func_nb=post_segment_func_nb,\n post_segment_args=post_segment_args,\n order_func_nb=order_func_nb,\n order_args=order_args,\n post_order_func_nb=post_order_func_nb,\n post_order_args=post_order_args,\n call_pre_segment=call_pre_segment,\n call_post_segment=call_post_segment,\n ffill_val_price=ffill_val_price,\n update_value=update_value,\n fill_pos_record=fill_pos_record,\n max_orders=max_orders,\n max_logs=max_logs\n )\n\n # Create an instance\n return cls(\n wrapper,\n close,\n order_records,\n log_records,\n init_cash if init_cash_mode is None else init_cash_mode,\n cash_sharing,\n call_seq,\n **kwargs\n )\n\n # ############# Properties ############# #\n\n @property\n def wrapper(self) -> ArrayWrapper:\n \"\"\"Array wrapper.\"\"\"\n if self.cash_sharing:\n # Allow only disabling grouping when needed (but not globally, see regroup)\n return self._wrapper.copy(\n allow_enable=False,\n allow_modify=False\n )\n return self._wrapper\n\n def regroup(self: PortfolioT, group_by: tp.GroupByLike, **kwargs) -> PortfolioT:\n \"\"\"Regroup this object.\n\n See `vectorbt.base.array_wrapper.Wrapping.regroup`.\n\n !!! note\n All cached objects will be lost.\"\"\"\n if self.cash_sharing:\n if self.wrapper.grouper.is_grouping_modified(group_by=group_by):\n raise ValueError(\"Cannot modify grouping globally when cash_sharing=True\")\n return Wrapping.regroup(self, group_by, **kwargs)\n\n @property\n def cash_sharing(self) -> bool:\n \"\"\"Whether to share cash within the same group.\"\"\"\n return self._cash_sharing\n\n @property\n def call_seq(self, wrap_kwargs: tp.KwargsLike = None) -> tp.SeriesFrame:\n \"\"\"Sequence of calls per row and group.\"\"\"\n return self.wrapper.wrap(self._call_seq, group_by=False, **merge_dicts({}, wrap_kwargs))\n\n @property\n def fillna_close(self) -> bool:\n \"\"\"Whether to forward-backward fill NaN values in `Portfolio.close`.\"\"\"\n return self._fillna_close\n\n # ############# Reference price ############# #\n\n @property\n def close(self) -> tp.SeriesFrame:\n \"\"\"Price per unit series.\"\"\"\n return self._close\n\n @cached_method\n def get_filled_close(self, wrap_kwargs: tp.KwargsLike = None) -> tp.SeriesFrame:\n \"\"\"Forward-backward-fill NaN values in `Portfolio.close`\"\"\"\n close = to_2d_array(self.close.ffill().bfill())\n return self.wrapper.wrap(close, group_by=False, **merge_dicts({}, wrap_kwargs))\n\n # ############# Records ############# #\n\n @property\n def order_records(self) -> tp.RecordArray:\n \"\"\"A structured NumPy array of order records.\"\"\"\n return self._order_records\n\n @cached_property\n def orders(self) -> Orders:\n \"\"\"`Portfolio.get_orders` with default arguments.\"\"\"\n return Orders(self.wrapper, self.order_records, self.close)\n\n @cached_method\n def get_orders(self, group_by: tp.GroupByLike = None) -> Orders:\n \"\"\"Get order records.\n\n See `vectorbt.portfolio.orders.Orders`.\"\"\"\n return self.orders.regroup(group_by)\n\n @property\n def log_records(self) -> tp.RecordArray:\n \"\"\"A structured NumPy array of log records.\"\"\"\n return self._log_records\n\n @cached_property\n def logs(self) -> Logs:\n \"\"\"`Portfolio.get_logs` with default arguments.\"\"\"\n return Logs(self.wrapper, self.log_records)\n\n @cached_method\n def get_logs(self, group_by: tp.GroupByLike = None) -> Logs:\n \"\"\"Get log records.\n\n See `vectorbt.portfolio.logs.Logs`.\"\"\"\n return self.logs.regroup(group_by)\n\n @cached_property\n def trades(self) -> Trades:\n \"\"\"`Portfolio.get_trades` with default arguments.\"\"\"\n return Trades.from_orders(self.orders)\n\n @cached_method\n def get_trades(self, group_by: tp.GroupByLike = None) -> Trades:\n \"\"\"Get trade records.\n\n See `vectorbt.portfolio.trades.Trades`.\"\"\"\n return self.trades.regroup(group_by)\n\n @cached_property\n def positions(self) -> Positions:\n \"\"\"`Portfolio.get_positions` with default arguments.\"\"\"\n return Positions.from_trades(self.trades)\n\n @cached_method\n def get_positions(self, group_by: tp.GroupByLike = None) -> Positions:\n \"\"\"Get position records.\n\n See `vectorbt.portfolio.trades.Positions`.\"\"\"\n return self.positions.regroup(group_by)\n\n @cached_property\n def drawdowns(self) -> Drawdowns:\n \"\"\"`Portfolio.get_drawdowns` with default arguments.\"\"\"\n return self.get_drawdowns()\n\n @cached_method\n def get_drawdowns(self, group_by: tp.GroupByLike = None, **kwargs) -> Drawdowns:\n \"\"\"Get drawdown records from `Portfolio.value`.\n\n See `vectorbt.generic.drawdowns.Drawdowns`.\n\n `**kwargs` are passed to `Portfolio.value`.\"\"\"\n return Drawdowns.from_ts(self.value(group_by=group_by, **kwargs), freq=self.wrapper.freq)\n\n # ############# Assets ############# #\n\n @cached_method\n def asset_flow(self, direction: str = 'all', wrap_kwargs: tp.KwargsLike = None) -> tp.SeriesFrame:\n \"\"\"Get asset flow series per column.\n\n Returns the total transacted amount of assets at each time step.\"\"\"\n direction = map_enum_fields(direction, Direction)\n asset_flow = nb.asset_flow_nb(\n self.wrapper.shape_2d,\n self.orders.values,\n self.orders.col_mapper.col_map,\n direction\n )\n return self.wrapper.wrap(asset_flow, group_by=False, **merge_dicts({}, wrap_kwargs))\n\n @cached_method\n def assets(self, direction: str = 'all', wrap_kwargs: tp.KwargsLike = None) -> tp.SeriesFrame:\n \"\"\"Get asset series per column.\n\n Returns the current position at each time step.\"\"\"\n direction = map_enum_fields(direction, Direction)\n asset_flow = to_2d_array(self.asset_flow(direction='all'))\n assets = nb.assets_nb(asset_flow)\n if direction == Direction.LongOnly:\n assets = np.where(assets > 0, assets, 0.)\n if direction == Direction.ShortOnly:\n assets = np.where(assets < 0, -assets, 0.)\n return self.wrapper.wrap(assets, group_by=False, **merge_dicts({}, wrap_kwargs))\n\n @cached_method\n def position_mask(self, direction: str = 'all', group_by: tp.GroupByLike = None,\n wrap_kwargs: tp.KwargsLike = None) -> tp.SeriesFrame:\n \"\"\"Get position mask per column/group.\n\n An element is True if the asset is in the market at this tick.\"\"\"\n direction = map_enum_fields(direction, Direction)\n assets = to_2d_array(self.assets(direction=direction))\n if self.wrapper.grouper.is_grouped(group_by=group_by):\n position_mask = to_2d_array(self.position_mask(direction=direction, group_by=False))\n group_lens = self.wrapper.grouper.get_group_lens(group_by=group_by)\n position_mask = nb.position_mask_grouped_nb(position_mask, group_lens)\n else:\n position_mask = assets != 0\n return self.wrapper.wrap(position_mask, group_by=group_by, **merge_dicts({}, wrap_kwargs))\n\n @cached_method\n def position_coverage(self, direction: str = 'all', group_by: tp.GroupByLike = None,\n wrap_kwargs: tp.KwargsLike = None) -> tp.SeriesFrame:\n \"\"\"Get position coverage per column/group.\"\"\"\n direction = map_enum_fields(direction, Direction)\n assets = to_2d_array(self.assets(direction=direction))\n if self.wrapper.grouper.is_grouped(group_by=group_by):\n position_mask = to_2d_array(self.position_mask(direction=direction, group_by=False))\n group_lens = self.wrapper.grouper.get_group_lens(group_by=group_by)\n position_coverage = nb.position_coverage_grouped_nb(position_mask, group_lens)\n else:\n position_coverage = np.mean(assets != 0, axis=0)\n wrap_kwargs = merge_dicts(dict(name_or_index='position_coverage'), wrap_kwargs)\n return self.wrapper.wrap_reduced(position_coverage, group_by=group_by, **wrap_kwargs)\n\n # ############# Cash ############# #\n\n @cached_method\n def cash_flow(self, group_by: tp.GroupByLike = None, free: bool = False,\n wrap_kwargs: tp.KwargsLike = None) -> tp.SeriesFrame:\n \"\"\"Get cash flow series per column/group.\n\n Use `free` to return the flow of the free cash, which never goes above the initial level,\n because an operation always costs money.\"\"\"\n if self.wrapper.grouper.is_grouped(group_by=group_by):\n cash_flow = to_2d_array(self.cash_flow(group_by=False, free=free))\n group_lens = self.wrapper.grouper.get_group_lens(group_by=group_by)\n cash_flow = nb.cash_flow_grouped_nb(cash_flow, group_lens)\n else:\n cash_flow = nb.cash_flow_nb(\n self.wrapper.shape_2d,\n self.orders.values,\n self.orders.col_mapper.col_map,\n free\n )\n return self.wrapper.wrap(cash_flow, group_by=group_by, **merge_dicts({}, wrap_kwargs))\n\n @cached_property\n def init_cash(self) -> tp.MaybeSeries:\n \"\"\"`Portfolio.get_init_cash` with default arguments.\"\"\"\n return self.get_init_cash()\n\n @cached_method\n def get_init_cash(self, group_by: tp.GroupByLike = None,\n wrap_kwargs: tp.KwargsLike = None) -> tp.MaybeSeries:\n \"\"\"Initial amount of cash per column/group with default arguments.\n\n !!! note\n If the initial cash balance was found automatically and no own cash is used throughout\n the simulation (for example, when shorting), it will be set to 1 instead of 0 to enable\n smooth calculation of returns.\"\"\"\n if isinstance(self._init_cash, int):\n cash_flow = to_2d_array(self.cash_flow(group_by=group_by))\n cash_min = np.min(np.cumsum(cash_flow, axis=0), axis=0)\n init_cash = np.where(cash_min < 0, np.abs(cash_min), 1.)\n if self._init_cash == InitCashMode.AutoAlign:\n init_cash = np.full(init_cash.shape, np.max(init_cash))\n else:\n init_cash = to_1d_array(self._init_cash)\n if self.wrapper.grouper.is_grouped(group_by=group_by):\n group_lens = self.wrapper.grouper.get_group_lens(group_by=group_by)\n init_cash = nb.init_cash_grouped_nb(init_cash, group_lens, self.cash_sharing)\n else:\n group_lens = self.wrapper.grouper.get_group_lens()\n init_cash = nb.init_cash_nb(init_cash, group_lens, self.cash_sharing)\n wrap_kwargs = merge_dicts(dict(name_or_index='init_cash'), wrap_kwargs)\n return self.wrapper.wrap_reduced(init_cash, group_by=group_by, **wrap_kwargs)\n\n @cached_method\n def cash(self, group_by: tp.GroupByLike = None, in_sim_order: bool = False, free: bool = False,\n wrap_kwargs: tp.KwargsLike = None) -> tp.SeriesFrame:\n \"\"\"Get cash balance series per column/group.\n\n See the explanation on `in_sim_order` in `Portfolio.value`.\n For `free`, see `Portfolio.cash_flow`.\"\"\"\n if in_sim_order and not self.cash_sharing:\n raise ValueError(\"Cash sharing must be enabled for in_sim_order=True\")\n\n cash_flow = to_2d_array(self.cash_flow(group_by=group_by, free=free))\n if self.wrapper.grouper.is_grouped(group_by=group_by):\n group_lens = self.wrapper.grouper.get_group_lens(group_by=group_by)\n init_cash = to_1d_array(self.get_init_cash(group_by=group_by))\n cash = nb.cash_grouped_nb(\n self.wrapper.shape_2d,\n cash_flow,\n group_lens,\n init_cash\n )\n else:\n if self.wrapper.grouper.is_grouping_disabled(group_by=group_by) and in_sim_order:\n group_lens = self.wrapper.grouper.get_group_lens()\n init_cash = to_1d_array(self.init_cash)\n call_seq = to_2d_array(self.call_seq)\n cash = nb.cash_in_sim_order_nb(cash_flow, group_lens, init_cash, call_seq)\n else:\n init_cash = to_1d_array(self.get_init_cash(group_by=False))\n cash = nb.cash_nb(cash_flow, init_cash)\n return self.wrapper.wrap(cash, group_by=group_by, **merge_dicts({}, wrap_kwargs))\n\n # ############# Performance ############# #\n\n @cached_method\n def asset_value(self, direction: str = 'all', group_by: tp.GroupByLike = None,\n wrap_kwargs: tp.KwargsLike = None) -> tp.SeriesFrame:\n \"\"\"Get asset value series per column/group.\"\"\"\n direction = map_enum_fields(direction, Direction)\n if self.fillna_close:\n close = to_2d_array(self.get_filled_close()).copy()\n else:\n close = to_2d_array(self.close).copy()\n assets = to_2d_array(self.assets(direction=direction))\n close[assets == 0] = 0. # for price being NaN\n if self.wrapper.grouper.is_grouped(group_by=group_by):\n asset_value = to_2d_array(self.asset_value(direction=direction, group_by=False))\n group_lens = self.wrapper.grouper.get_group_lens(group_by=group_by)\n asset_value = nb.asset_value_grouped_nb(asset_value, group_lens)\n else:\n asset_value = nb.asset_value_nb(close, assets)\n return self.wrapper.wrap(asset_value, group_by=group_by, **merge_dicts({}, wrap_kwargs))\n\n @cached_method\n def gross_exposure(self, direction: str = 'all', group_by: tp.GroupByLike = None,\n wrap_kwargs: tp.KwargsLike = None) -> tp.SeriesFrame:\n \"\"\"Get gross exposure.\"\"\"\n asset_value = to_2d_array(self.asset_value(group_by=group_by, direction=direction))\n cash = to_2d_array(self.cash(group_by=group_by, free=True))\n gross_exposure = nb.gross_exposure_nb(asset_value, cash)\n return self.wrapper.wrap(gross_exposure, group_by=group_by, **merge_dicts({}, wrap_kwargs))\n\n @cached_method\n def net_exposure(self, group_by: tp.GroupByLike = None,\n wrap_kwargs: tp.KwargsLike = None) -> tp.SeriesFrame:\n \"\"\"Get net exposure.\"\"\"\n long_exposure = to_2d_array(self.gross_exposure(direction='longonly', group_by=group_by))\n short_exposure = to_2d_array(self.gross_exposure(direction='shortonly', group_by=group_by))\n net_exposure = long_exposure - short_exposure\n return self.wrapper.wrap(net_exposure, group_by=group_by, **merge_dicts({}, wrap_kwargs))\n\n @cached_method\n def value(self, group_by: tp.GroupByLike = None, in_sim_order: bool = False,\n wrap_kwargs: tp.KwargsLike = None) -> tp.SeriesFrame:\n \"\"\"Get portfolio value series per column/group.\n\n By default, will generate portfolio value for each asset based on cash flows and thus\n independent from other assets, with the initial cash balance and position being that of the\n entire group. Useful for generating returns and comparing assets within the same group.\n\n When `group_by` is False and `in_sim_order` is True, returns value generated in\n simulation order (see [row-major order](https://en.wikipedia.org/wiki/Row-_and_column-major_order).\n This value cannot be used for generating returns as-is. Useful to analyze how value\n evolved throughout simulation.\"\"\"\n cash = to_2d_array(self.cash(group_by=group_by, in_sim_order=in_sim_order))\n asset_value = to_2d_array(self.asset_value(group_by=group_by))\n if self.wrapper.grouper.is_grouping_disabled(group_by=group_by) and in_sim_order:\n group_lens = self.wrapper.grouper.get_group_lens()\n call_seq = to_2d_array(self.call_seq)\n value = nb.value_in_sim_order_nb(cash, asset_value, group_lens, call_seq)\n # price of NaN is already addressed by ungrouped_value_nb\n else:\n value = nb.value_nb(cash, asset_value)\n return self.wrapper.wrap(value, group_by=group_by, **merge_dicts({}, wrap_kwargs))\n\n @cached_method\n def total_profit(self, group_by: tp.GroupByLike = None,\n wrap_kwargs: tp.KwargsLike = None) -> tp.MaybeSeries:\n \"\"\"Get total profit per column/group.\n\n Calculated directly from order records (fast).\"\"\"\n if self.wrapper.grouper.is_grouped(group_by=group_by):\n total_profit = to_1d_array(self.total_profit(group_by=False))\n group_lens = self.wrapper.grouper.get_group_lens(group_by=group_by)\n total_profit = nb.total_profit_grouped_nb(\n total_profit,\n group_lens\n )\n else:\n if self.fillna_close:\n close = to_2d_array(self.get_filled_close())\n else:\n close = to_2d_array(self.close)\n total_profit = nb.total_profit_nb(\n self.wrapper.shape_2d,\n close,\n self.orders.values,\n self.orders.col_mapper.col_map\n )\n wrap_kwargs = merge_dicts(dict(name_or_index='total_profit'), wrap_kwargs)\n return self.wrapper.wrap_reduced(total_profit, group_by=group_by, **wrap_kwargs)\n\n @cached_method\n def final_value(self, group_by: tp.GroupByLike = None,\n wrap_kwargs: tp.KwargsLike = None) -> tp.MaybeSeries:\n \"\"\"Get total profit per column/group.\"\"\"\n init_cash = to_1d_array(self.get_init_cash(group_by=group_by))\n total_profit = to_1d_array(self.total_profit(group_by=group_by))\n final_value = nb.final_value_nb(total_profit, init_cash)\n wrap_kwargs = merge_dicts(dict(name_or_index='final_value'), wrap_kwargs)\n return self.wrapper.wrap_reduced(final_value, group_by=group_by, **wrap_kwargs)\n\n @cached_method\n def total_return(self, group_by: tp.GroupByLike = None,\n wrap_kwargs: tp.KwargsLike = None) -> tp.MaybeSeries:\n \"\"\"Get total profit per column/group.\"\"\"\n init_cash = to_1d_array(self.get_init_cash(group_by=group_by))\n total_profit = to_1d_array(self.total_profit(group_by=group_by))\n total_return = nb.total_return_nb(total_profit, init_cash)\n wrap_kwargs = merge_dicts(dict(name_or_index='total_return'), wrap_kwargs)\n return self.wrapper.wrap_reduced(total_return, group_by=group_by, **wrap_kwargs)\n\n @cached_method\n def returns(self, group_by: tp.GroupByLike = None, in_sim_order=False,\n wrap_kwargs: tp.KwargsLike = None) -> tp.SeriesFrame:\n \"\"\"Get return series per column/group based on portfolio value.\"\"\"\n value = to_2d_array(self.value(group_by=group_by, in_sim_order=in_sim_order))\n if self.wrapper.grouper.is_grouping_disabled(group_by=group_by) and in_sim_order:\n group_lens = self.wrapper.grouper.get_group_lens()\n init_cash_grouped = to_1d_array(self.init_cash)\n call_seq = to_2d_array(self.call_seq)\n returns = nb.returns_in_sim_order_nb(value, group_lens, init_cash_grouped, call_seq)\n else:\n init_cash = to_1d_array(self.get_init_cash(group_by=group_by))\n returns = returns_nb.returns_nb(value, init_cash)\n return self.wrapper.wrap(returns, group_by=group_by, **merge_dicts({}, wrap_kwargs))\n\n @cached_method\n def asset_returns(self, group_by: tp.GroupByLike = None,\n wrap_kwargs: tp.KwargsLike = None) -> tp.SeriesFrame:\n \"\"\"Get asset return series per column/group.\n\n This type of returns is based solely on cash flows and asset value rather than portfolio\n value. It ignores passive cash and thus it will return the same numbers irrespective of the amount of\n cash currently available, even `np.inf`. The scale of returns is comparable to that of going\n all in and keeping available cash at zero.\"\"\"\n cash_flow = to_2d_array(self.cash_flow(group_by=group_by))\n asset_value = to_2d_array(self.asset_value(group_by=group_by))\n asset_returns = nb.asset_returns_nb(cash_flow, asset_value)\n return self.wrapper.wrap(asset_returns, group_by=group_by, **merge_dicts({}, wrap_kwargs))\n\n @cached_method\n def returns_acc(self,\n group_by: tp.GroupByLike = None,\n freq: tp.Optional[tp.FrequencyLike] = None,\n year_freq: tp.Optional[tp.FrequencyLike] = None,\n use_asset_returns: bool = False,\n defaults: tp.KwargsLike = None,\n **kwargs) -> ReturnsAccessor:\n \"\"\"Get returns accessor of type `vectorbt.returns.accessors.ReturnsAccessor`.\n\n !!! hint\n You can find most methods of this accessor as (cacheable) attributes of this portfolio.\"\"\"\n if freq is None:\n freq = self.wrapper.freq\n if use_asset_returns:\n returns = self.asset_returns(group_by=group_by)\n else:\n returns = self.returns(group_by=group_by)\n return returns.vbt.returns(freq=freq, year_freq=year_freq, defaults=defaults, **kwargs)\n\n @cached_method\n def benchmark_value(self, group_by: tp.GroupByLike = None,\n wrap_kwargs: tp.KwargsLike = None) -> tp.SeriesFrame:\n \"\"\"Get market benchmark value series per column/group.\n\n If grouped, evenly distributes the initial cash among assets in the group.\n\n !!! note\n Does not take into account fees and slippage. For this, create a separate portfolio.\"\"\"\n if self.fillna_close:\n close = to_2d_array(self.get_filled_close())\n else:\n close = to_2d_array(self.close)\n if self.wrapper.grouper.is_grouped(group_by=group_by):\n group_lens = self.wrapper.grouper.get_group_lens(group_by=group_by)\n init_cash_grouped = to_1d_array(self.get_init_cash(group_by=group_by))\n benchmark_value = nb.benchmark_value_grouped_nb(close, group_lens, init_cash_grouped)\n else:\n init_cash = to_1d_array(self.get_init_cash(group_by=False))\n benchmark_value = nb.benchmark_value_nb(close, init_cash)\n return self.wrapper.wrap(benchmark_value, group_by=group_by, **merge_dicts({}, wrap_kwargs))\n\n @cached_method\n def benchmark_returns(self, group_by: tp.GroupByLike = None,\n wrap_kwargs: tp.KwargsLike = None) -> tp.SeriesFrame:\n \"\"\"Get return series per column/group based on benchmark value.\"\"\"\n benchmark_value = to_2d_array(self.benchmark_value(group_by=group_by))\n init_cash = to_1d_array(self.get_init_cash(group_by=group_by))\n benchmark_returns = returns_nb.returns_nb(benchmark_value, init_cash)\n return self.wrapper.wrap(benchmark_returns, group_by=group_by, **merge_dicts({}, wrap_kwargs))\n\n @cached_method\n def total_benchmark_return(self, group_by: tp.GroupByLike = None,\n wrap_kwargs: tp.KwargsLike = None) -> tp.MaybeSeries:\n \"\"\"Get total benchmark return.\"\"\"\n benchmark_value = to_2d_array(self.benchmark_value(group_by=group_by))\n total_benchmark_return = nb.total_benchmark_return_nb(benchmark_value)\n wrap_kwargs = merge_dicts(dict(name_or_index='total_benchmark_return'), wrap_kwargs)\n return self.wrapper.wrap_reduced(total_benchmark_return, group_by=group_by, **wrap_kwargs)\n\n # ############# Resolution ############# #\n\n @property\n def self_aliases(self) -> tp.Set[str]:\n \"\"\"Names to associate with this object.\"\"\"\n return {'self', 'portfolio', 'pf'}\n\n def pre_resolve_attr(self, attr: str, final_kwargs: tp.KwargsLike = None) -> str:\n \"\"\"Pre-process an attribute before resolution.\n\n Uses the following keys:\n\n * `use_asset_returns`: Whether to use `Portfolio.asset_returns` when resolving `returns` argument.\n * `use_positions`: Whether to use `Portfolio.positions` when resolving `trades` argument.\"\"\"\n if 'use_asset_returns' in final_kwargs:\n if attr == 'returns' and final_kwargs['use_asset_returns']:\n attr = 'asset_returns'\n if 'use_positions' in final_kwargs:\n if attr == 'trades' and final_kwargs['use_positions']:\n attr = 'positions'\n return attr\n\n def post_resolve_attr(self, attr: str, out: tp.Any, final_kwargs: tp.KwargsLike = None) -> str:\n \"\"\"Post-process an object after resolution.\n\n Uses the following keys:\n\n * `incl_open`: Whether to include open trades/positions when resolving `trades`/`positions` argument.\"\"\"\n if attr in ['trades', 'positions'] and not final_kwargs['incl_open']:\n out = out.closed\n return out\n\n # ############# Stats ############# #\n\n @property\n def stats_defaults(self) -> tp.Kwargs:\n \"\"\"Defaults for `Portfolio.stats`.\n\n Merges `vectorbt.generic.stats_builder.StatsBuilderMixin.stats_defaults` and\n `portfolio.stats` in `vectorbt._settings.settings`.\"\"\"\n from vectorbt._settings import settings\n returns_cfg = settings['returns']\n portfolio_stats_cfg = settings['portfolio']['stats']\n\n return merge_dicts(\n StatsBuilderMixin.stats_defaults.__get__(self),\n dict(\n settings=dict(\n year_freq=returns_cfg['year_freq'],\n benchmark_rets=None\n )\n ),\n portfolio_stats_cfg\n )\n\n _metrics: tp.ClassVar[Config] = Config(\n dict(\n start=dict(\n title='Start',\n calc_func=lambda self: self.wrapper.index[0],\n agg_func=None,\n tags='wrapper'\n ),\n end=dict(\n title='End',\n calc_func=lambda self: self.wrapper.index[-1],\n agg_func=None,\n tags='wrapper'\n ),\n period=dict(\n title='Period',\n calc_func=lambda self: len(self.wrapper.index),\n apply_to_timedelta=True,\n agg_func=None,\n tags='wrapper'\n ),\n start_value=dict(\n title='Start Value',\n calc_func='get_init_cash',\n tags='portfolio'\n ),\n end_value=dict(\n title='End Value',\n calc_func='final_value',\n tags='portfolio'\n ),\n total_return=dict(\n title='Total Return [%]',\n calc_func='total_return',\n post_calc_func=lambda self, out, settings: out * 100,\n tags='portfolio'\n ),\n benchmark_return=dict(\n title='Benchmark Return [%]',\n calc_func=RepEval(\"'total_benchmark_return' if benchmark_rets is None else \"\n \"benchmark_rets.vbt.returns.total()\"),\n post_calc_func=lambda self, out, settings: out * 100,\n tags='portfolio'\n ),\n max_gross_exposure=dict(\n title='Max Gross Exposure [%]',\n calc_func='gross_exposure.vbt.max',\n post_calc_func=lambda self, out, settings: out * 100,\n tags='portfolio'\n ),\n total_fees_paid=dict(\n title='Total Fees Paid',\n calc_func='orders.fees.sum',\n tags=['portfolio', 'orders']\n ),\n max_dd=dict(\n title='Max Drawdown [%]',\n calc_func='drawdowns.max_drawdown',\n post_calc_func=lambda self, out, settings: -out * 100,\n tags=['portfolio', 'drawdowns']\n ),\n max_dd_duration=dict(\n title='Max Drawdown Duration',\n calc_func='drawdowns.max_duration',\n fill_wrap_kwargs=True,\n tags=['portfolio', 'drawdowns', 'duration']\n ),\n total_trades=dict(\n title=RepEval(\"'Total Positions' if use_positions else 'Total Trades'\"),\n calc_func='trades.count',\n incl_open=True,\n tags=['portfolio', Rep(\"trades_tag\")]\n ),\n total_closed_trades=dict(\n title=RepEval(\"'Total Closed Positions' if use_positions else 'Total Closed Trades'\"),\n calc_func='trades.closed.count',\n tags=['portfolio', Rep(\"trades_tag\"), 'closed']\n ),\n total_open_trades=dict(\n title=RepEval(\"'Total Open Positions' if use_positions else 'Total Open Trades'\"),\n calc_func='trades.open.count',\n incl_open=True,\n tags=['portfolio', Rep(\"trades_tag\"), 'open']\n ),\n open_trade_pnl=dict(\n title=RepEval(\"'Open Position P&L' if use_positions else 'Open Trade P&L'\"),\n calc_func='trades.open.pnl.sum',\n incl_open=True,\n tags=['portfolio', Rep(\"trades_tag\"), 'open']\n ),\n win_rate=dict(\n title='Win Rate [%]',\n calc_func='trades.win_rate',\n post_calc_func=lambda self, out, settings: out * 100,\n tags=RepEval(\"['portfolio', trades_tag, *incl_open_tags]\")\n ),\n best_trade=dict(\n title=RepEval(\"'Best Position [%]' if use_positions else 'Best Trade [%]'\"),\n calc_func='trades.returns.max',\n post_calc_func=lambda self, out, settings: out * 100,\n tags=RepEval(\"['portfolio', trades_tag, *incl_open_tags]\")\n ),\n worst_trade=dict(\n title=RepEval(\"'Worst Position [%]' if use_positions else 'Worst Trade [%]'\"),\n calc_func='trades.returns.min',\n post_calc_func=lambda self, out, settings: out * 100,\n tags=RepEval(\"['portfolio', trades_tag, *incl_open_tags]\")\n ),\n avg_winning_trade=dict(\n title=RepEval(\"'Avg Winning Position [%]' if use_positions else 'Avg Winning Trade [%]'\"),\n calc_func='trades.winning.returns.mean',\n post_calc_func=lambda self, out, settings: out * 100,\n tags=RepEval(\"['portfolio', trades_tag, *incl_open_tags, 'winning']\")\n ),\n avg_losing_trade=dict(\n title=RepEval(\"'Avg Losing Position [%]' if use_positions else 'Avg Losing Trade [%]'\"),\n calc_func='trades.losing.returns.mean',\n post_calc_func=lambda self, out, settings: out * 100,\n tags=RepEval(\"['portfolio', trades_tag, *incl_open_tags, 'losing']\")\n ),\n avg_winning_trade_duration=dict(\n title=RepEval(\"'Avg Winning Position Duration' if use_positions else 'Avg Winning Trade Duration'\"),\n calc_func='trades.winning.duration.mean',\n apply_to_timedelta=True,\n tags=RepEval(\"['portfolio', trades_tag, *incl_open_tags, 'winning', 'duration']\")\n ),\n avg_losing_trade_duration=dict(\n title=RepEval(\"'Avg Losing Position Duration' if use_positions else 'Avg Losing Trade Duration'\"),\n calc_func='trades.losing.duration.mean',\n apply_to_timedelta=True,\n tags=RepEval(\"['portfolio', trades_tag, *incl_open_tags, 'losing', 'duration']\")\n ),\n profit_factor=dict(\n title='Profit Factor',\n calc_func='trades.profit_factor',\n tags=RepEval(\"['portfolio', trades_tag, *incl_open_tags]\")\n ),\n expectancy=dict(\n title='Expectancy',\n calc_func='trades.expectancy',\n tags=RepEval(\"['portfolio', trades_tag, *incl_open_tags]\")\n ),\n sharpe_ratio=dict(\n title='Sharpe Ratio',\n calc_func='returns_acc.sharpe_ratio',\n check_has_freq=True,\n check_has_year_freq=True,\n tags=['portfolio', 'returns']\n ),\n calmar_ratio=dict(\n title='Calmar Ratio',\n calc_func='returns_acc.calmar_ratio',\n check_has_freq=True,\n check_has_year_freq=True,\n tags=['portfolio', 'returns']\n ),\n omega_ratio=dict(\n title='Omega Ratio',\n calc_func='returns_acc.omega_ratio',\n check_has_freq=True,\n check_has_year_freq=True,\n tags=['portfolio', 'returns']\n ),\n sortino_ratio=dict(\n title='Sortino Ratio',\n calc_func='returns_acc.sortino_ratio',\n check_has_freq=True,\n check_has_year_freq=True,\n tags=['portfolio', 'returns']\n )\n ),\n copy_kwargs=dict(copy_mode='deep')\n )\n\n @property\n def metrics(self) -> Config:\n return self._metrics\n\n def returns_stats(self,\n group_by: tp.GroupByLike = None,\n freq: tp.Optional[tp.FrequencyLike] = None,\n year_freq: tp.Optional[tp.FrequencyLike] = None,\n use_asset_returns: bool = False,\n defaults: tp.KwargsLike = None,\n benchmark_rets: tp.Optional[tp.ArrayLike] = None,\n **kwargs) -> tp.SeriesFrame:\n \"\"\"Compute various statistics on returns of this portfolio.\n\n See `Portfolio.returns_acc` and `vectorbt.returns.accessors.ReturnsAccessor.metrics`.\n\n `kwargs` will be passed to `vectorbt.returns.accessors.ReturnsAccessor.stats` method.\n If `benchmark_rets` is not set, uses `Portfolio.benchmark_returns`.\"\"\"\n returns_acc = self.returns_acc(\n group_by=group_by,\n freq=freq,\n year_freq=year_freq,\n use_asset_returns=use_asset_returns,\n defaults=defaults\n )\n if benchmark_rets is None:\n benchmark_rets = self.benchmark_returns(group_by=group_by)\n settings = dict(benchmark_rets=benchmark_rets)\n return getattr(returns_acc, 'stats')(settings=settings, **kwargs)\n\n # ############# Plotting ############# #\n\n def plot_asset_flow(self,\n column: tp.Optional[tp.Label] = None,\n direction: str = 'all',\n xref: str = 'x',\n yref: str = 'y',\n hline_shape_kwargs: tp.KwargsLike = None,\n **kwargs) -> tp.BaseFigure:\n \"\"\"Plot one column of asset flow.\n\n Args:\n column (str): Name of the column to plot.\n direction (Direction): See `vectorbt.portfolio.enums.Direction`.\n xref (str): X coordinate axis.\n yref (str): Y coordinate axis.\n hline_shape_kwargs (dict): Keyword arguments passed to `plotly.graph_objects.Figure.add_shape` for zeroline.\n **kwargs: Keyword arguments passed to `vectorbt.generic.accessors.GenericAccessor.plot`.\n \"\"\"\n from vectorbt._settings import settings\n plotting_cfg = settings['plotting']\n\n kwargs = merge_dicts(dict(\n trace_kwargs=dict(\n line=dict(\n color=plotting_cfg['color_schema']['brown']\n ),\n name='Assets'\n )\n ), kwargs)\n asset_flow = self.asset_flow(direction=direction)\n asset_flow = self.select_one_from_obj(asset_flow, self.wrapper.regroup(False), column=column)\n fig = asset_flow.vbt.plot(**kwargs)\n x_domain = get_domain(xref, fig)\n fig.add_shape(**merge_dicts(dict(\n type='line',\n line=dict(\n color='gray',\n dash=\"dash\",\n ),\n xref=\"paper\",\n yref=yref,\n x0=x_domain[0],\n y0=0,\n x1=x_domain[1],\n y1=0\n ), hline_shape_kwargs))\n return fig\n\n def plot_cash_flow(self,\n column: tp.Optional[tp.Label] = None,\n group_by: tp.GroupByLike = None,\n free: bool = False,\n xref: str = 'x',\n yref: str = 'y',\n hline_shape_kwargs: tp.KwargsLike = None,\n **kwargs) -> tp.BaseFigure:\n \"\"\"Plot one column/group of cash flow.\n\n Args:\n column (str): Name of the column/group to plot.\n group_by (any): Group or ungroup columns. See `vectorbt.base.column_grouper.ColumnGrouper`.\n free (bool): Whether to plot the flow of the free cash.\n xref (str): X coordinate axis.\n yref (str): Y coordinate axis.\n hline_shape_kwargs (dict): Keyword arguments passed to `plotly.graph_objects.Figure.add_shape` for zeroline.\n **kwargs: Keyword arguments passed to `vectorbt.generic.accessors.GenericAccessor.plot`.\n \"\"\"\n from vectorbt._settings import settings\n plotting_cfg = settings['plotting']\n\n kwargs = merge_dicts(dict(\n trace_kwargs=dict(\n line=dict(\n color=plotting_cfg['color_schema']['green']\n ),\n name='Cash'\n )\n ), kwargs)\n cash_flow = self.cash_flow(group_by=group_by, free=free)\n cash_flow = self.select_one_from_obj(cash_flow, self.wrapper.regroup(group_by), column=column)\n fig = cash_flow.vbt.plot(**kwargs)\n x_domain = get_domain(xref, fig)\n fig.add_shape(**merge_dicts(dict(\n type='line',\n line=dict(\n color='gray',\n dash=\"dash\",\n ),\n xref=\"paper\",\n yref=yref,\n x0=x_domain[0],\n y0=0.,\n x1=x_domain[1],\n y1=0.\n ), hline_shape_kwargs))\n return fig\n\n def plot_assets(self,\n column: tp.Optional[tp.Label] = None,\n direction: str = 'all',\n xref: str = 'x',\n yref: str = 'y',\n hline_shape_kwargs: tp.KwargsLike = None,\n **kwargs) -> tp.BaseFigure:\n \"\"\"Plot one column of assets.\n\n Args:\n column (str): Name of the column to plot.\n direction (Direction): See `vectorbt.portfolio.enums.Direction`.\n xref (str): X coordinate axis.\n yref (str): Y coordinate axis.\n hline_shape_kwargs (dict): Keyword arguments passed to `plotly.graph_objects.Figure.add_shape` for zeroline.\n **kwargs: Keyword arguments passed to `vectorbt.generic.accessors.GenericSRAccessor.plot_against`.\n \"\"\"\n from vectorbt._settings import settings\n plotting_cfg = settings['plotting']\n\n kwargs = merge_dicts(dict(\n trace_kwargs=dict(\n line=dict(\n color=plotting_cfg['color_schema']['brown']\n ),\n name='Assets'\n ),\n pos_trace_kwargs=dict(\n fillcolor=adjust_opacity(plotting_cfg['color_schema']['brown'], 0.3)\n ),\n neg_trace_kwargs=dict(\n fillcolor=adjust_opacity(plotting_cfg['color_schema']['orange'], 0.3)\n ),\n other_trace_kwargs='hidden'\n ), kwargs)\n assets = self.assets(direction=direction)\n assets = self.select_one_from_obj(assets, self.wrapper.regroup(False), column=column)\n fig = assets.vbt.plot_against(0, **kwargs)\n x_domain = get_domain(xref, fig)\n fig.add_shape(**merge_dicts(dict(\n type='line',\n line=dict(\n color='gray',\n dash=\"dash\",\n ),\n xref=\"paper\",\n yref=yref,\n x0=x_domain[0],\n y0=0.,\n x1=x_domain[1],\n y1=0.\n ), hline_shape_kwargs))\n return fig\n\n def plot_cash(self,\n column: tp.Optional[tp.Label] = None,\n group_by: tp.GroupByLike = None,\n free: bool = False,\n xref: str = 'x',\n yref: str = 'y',\n hline_shape_kwargs: tp.KwargsLike = None,\n **kwargs) -> tp.BaseFigure:\n \"\"\"Plot one column/group of cash balance.\n\n Args:\n column (str): Name of the column/group to plot.\n group_by (any): Group or ungroup columns. See `vectorbt.base.column_grouper.ColumnGrouper`.\n free (bool): Whether to plot the flow of the free cash.\n xref (str): X coordinate axis.\n yref (str): Y coordinate axis.\n hline_shape_kwargs (dict): Keyword arguments passed to `plotly.graph_objects.Figure.add_shape` for zeroline.\n **kwargs: Keyword arguments passed to `vectorbt.generic.accessors.GenericSRAccessor.plot_against`.\n \"\"\"\n from vectorbt._settings import settings\n plotting_cfg = settings['plotting']\n\n kwargs = merge_dicts(dict(\n trace_kwargs=dict(\n line=dict(\n color=plotting_cfg['color_schema']['green']\n ),\n name='Cash'\n ),\n pos_trace_kwargs=dict(\n fillcolor=adjust_opacity(plotting_cfg['color_schema']['green'], 0.3)\n ),\n neg_trace_kwargs=dict(\n fillcolor=adjust_opacity(plotting_cfg['color_schema']['red'], 0.3)\n ),\n other_trace_kwargs='hidden'\n ), kwargs)\n init_cash = self.get_init_cash(group_by=group_by)\n init_cash = self.select_one_from_obj(init_cash, self.wrapper.regroup(group_by), column=column)\n cash = self.cash(group_by=group_by, free=free)\n cash = self.select_one_from_obj(cash, self.wrapper.regroup(group_by), column=column)\n fig = cash.vbt.plot_against(init_cash, **kwargs)\n x_domain = get_domain(xref, fig)\n fig.add_shape(**merge_dicts(dict(\n type='line',\n line=dict(\n color='gray',\n dash=\"dash\",\n ),\n xref=\"paper\",\n yref=yref,\n x0=x_domain[0],\n y0=init_cash,\n x1=x_domain[1],\n y1=init_cash\n ), hline_shape_kwargs))\n return fig\n\n def plot_asset_value(self,\n column: tp.Optional[tp.Label] = None,\n group_by: tp.GroupByLike = None,\n direction: str = 'all',\n xref: str = 'x',\n yref: str = 'y',\n hline_shape_kwargs: tp.KwargsLike = None,\n **kwargs) -> tp.BaseFigure:\n \"\"\"Plot one column/group of asset value.\n\n Args:\n column (str): Name of the column/group to plot.\n group_by (any): Group or ungroup columns. See `vectorbt.base.column_grouper.ColumnGrouper`.\n direction (Direction): See `vectorbt.portfolio.enums.Direction`.\n xref (str): X coordinate axis.\n yref (str): Y coordinate axis.\n hline_shape_kwargs (dict): Keyword arguments passed to `plotly.graph_objects.Figure.add_shape` for zeroline.\n **kwargs: Keyword arguments passed to `vectorbt.generic.accessors.GenericSRAccessor.plot_against`.\n \"\"\"\n from vectorbt._settings import settings\n plotting_cfg = settings['plotting']\n\n kwargs = merge_dicts(dict(\n trace_kwargs=dict(\n line=dict(\n color=plotting_cfg['color_schema']['cyan']\n ),\n name='Asset Value'\n ),\n pos_trace_kwargs=dict(\n fillcolor=adjust_opacity(plotting_cfg['color_schema']['cyan'], 0.3)\n ),\n neg_trace_kwargs=dict(\n fillcolor=adjust_opacity(plotting_cfg['color_schema']['orange'], 0.3)\n ),\n other_trace_kwargs='hidden'\n ), kwargs)\n asset_value = self.asset_value(direction=direction, group_by=group_by)\n asset_value = self.select_one_from_obj(asset_value, self.wrapper.regroup(group_by), column=column)\n fig = asset_value.vbt.plot_against(0, **kwargs)\n x_domain = get_domain(xref, fig)\n fig.add_shape(**merge_dicts(dict(\n type='line',\n line=dict(\n color='gray',\n dash=\"dash\",\n ),\n xref=\"paper\",\n yref=yref,\n x0=x_domain[0],\n y0=0.,\n x1=x_domain[1],\n y1=0.\n ), hline_shape_kwargs))\n return fig\n\n def plot_value(self,\n column: tp.Optional[tp.Label] = None,\n group_by: tp.GroupByLike = None,\n xref: str = 'x',\n yref: str = 'y',\n hline_shape_kwargs: tp.KwargsLike = None,\n **kwargs) -> tp.BaseFigure:\n \"\"\"Plot one column/group of value.\n\n Args:\n column (str): Name of the column/group to plot.\n group_by (any): Group or ungroup columns. See `vectorbt.base.column_grouper.ColumnGrouper`.\n free (bool): Whether to plot free cash flow.\n xref (str): X coordinate axis.\n yref (str): Y coordinate axis.\n hline_shape_kwargs (dict): Keyword arguments passed to `plotly.graph_objects.Figure.add_shape` for zeroline.\n **kwargs: Keyword arguments passed to `vectorbt.generic.accessors.GenericSRAccessor.plot_against`.\n \"\"\"\n from vectorbt._settings import settings\n plotting_cfg = settings['plotting']\n\n kwargs = merge_dicts(dict(\n trace_kwargs=dict(\n line=dict(\n color=plotting_cfg['color_schema']['purple']\n ),\n name='Value'\n ),\n other_trace_kwargs='hidden'\n ), kwargs)\n init_cash = self.get_init_cash(group_by=group_by)\n init_cash = self.select_one_from_obj(init_cash, self.wrapper.regroup(group_by), column=column)\n value = self.value(group_by=group_by)\n value = self.select_one_from_obj(value, self.wrapper.regroup(group_by), column=column)\n fig = value.vbt.plot_against(init_cash, **kwargs)\n x_domain = get_domain(xref, fig)\n fig.add_shape(**merge_dicts(dict(\n type='line',\n line=dict(\n color='gray',\n dash=\"dash\",\n ),\n xref=\"paper\",\n yref=yref,\n x0=x_domain[0],\n y0=init_cash,\n x1=x_domain[1],\n y1=init_cash\n ), hline_shape_kwargs))\n return fig\n\n def plot_cum_returns(self,\n column: tp.Optional[tp.Label] = None,\n group_by: tp.GroupByLike = None,\n benchmark_rets: tp.Optional[tp.ArrayLike] = None,\n use_asset_returns: bool = False,\n **kwargs) -> tp.BaseFigure:\n \"\"\"Plot one column/group of cumulative returns.\n\n Args:\n column (str): Name of the column/group to plot.\n group_by (any): Group or ungroup columns. See `vectorbt.base.column_grouper.ColumnGrouper`.\n benchmark_rets (array_like): Benchmark returns.\n\n If None, will use `Portfolio.benchmark_returns`.\n use_asset_returns (bool): Whether to plot asset returns.\n **kwargs: Keyword arguments passed to `vectorbt.returns.accessors.ReturnsSRAccessor.plot_cumulative`.\n \"\"\"\n from vectorbt._settings import settings\n plotting_cfg = settings['plotting']\n\n if benchmark_rets is None:\n benchmark_rets = self.benchmark_returns(group_by=group_by)\n else:\n benchmark_rets = broadcast_to(benchmark_rets, self.obj)\n benchmark_rets = self.select_one_from_obj(benchmark_rets, self.wrapper.regroup(group_by), column=column)\n kwargs = merge_dicts(dict(\n benchmark_rets=benchmark_rets,\n main_kwargs=dict(\n trace_kwargs=dict(\n line=dict(\n color=plotting_cfg['color_schema']['purple']\n ),\n name='Value'\n )\n ),\n hline_shape_kwargs=dict(\n type='line',\n line=dict(\n color='gray',\n dash=\"dash\",\n )\n )\n ), kwargs)\n if use_asset_returns:\n returns = self.asset_returns(group_by=group_by)\n else:\n returns = self.returns(group_by=group_by)\n returns = self.select_one_from_obj(returns, self.wrapper.regroup(group_by), column=column)\n return returns.vbt.returns.plot_cumulative(**kwargs)\n\n def plot_drawdowns(self,\n column: tp.Optional[tp.Label] = None,\n group_by: tp.GroupByLike = None,\n **kwargs) -> tp.BaseFigure:\n \"\"\"Plot one column/group of drawdowns.\n\n Args:\n column (str): Name of the column/group to plot.\n group_by (any): Group or ungroup columns. See `vectorbt.base.column_grouper.ColumnGrouper`.\n **kwargs: Keyword arguments passed to `vectorbt.generic.drawdowns.Drawdowns.plot`.\n \"\"\"\n from vectorbt._settings import settings\n plotting_cfg = settings['plotting']\n\n kwargs = merge_dicts(dict(\n ts_trace_kwargs=dict(\n line=dict(\n color=plotting_cfg['color_schema']['purple']\n ),\n name='Value'\n )\n ), kwargs)\n return self.get_drawdowns(group_by=group_by).plot(column=column, **kwargs)\n\n def plot_underwater(self,\n column: tp.Optional[tp.Label] = None,\n group_by: tp.GroupByLike = None,\n xref: str = 'x',\n yref: str = 'y',\n hline_shape_kwargs: tp.KwargsLike = None,\n **kwargs) -> tp.BaseFigure:\n \"\"\"Plot one column/group of underwater.\n\n Args:\n column (str): Name of the column/group to plot.\n group_by (any): Group or ungroup columns. See `vectorbt.base.column_grouper.ColumnGrouper`.\n xref (str): X coordinate axis.\n yref (str): Y coordinate axis.\n hline_shape_kwargs (dict): Keyword arguments passed to `plotly.graph_objects.Figure.add_shape` for zeroline.\n **kwargs: Keyword arguments passed to `vectorbt.generic.accessors.GenericAccessor.plot`.\n \"\"\"\n from vectorbt._settings import settings\n plotting_cfg = settings['plotting']\n\n kwargs = merge_dicts(dict(\n trace_kwargs=dict(\n line=dict(\n color=plotting_cfg['color_schema']['red']\n ),\n fillcolor=adjust_opacity(plotting_cfg['color_schema']['red'], 0.3),\n fill='tozeroy',\n name='Drawdown'\n )\n ), kwargs)\n drawdown = self.drawdown(group_by=group_by)\n drawdown = self.select_one_from_obj(drawdown, self.wrapper.regroup(group_by), column=column)\n fig = drawdown.vbt.plot(**kwargs)\n x_domain = get_domain(xref, fig)\n fig.add_shape(**merge_dicts(dict(\n type='line',\n line=dict(\n color='gray',\n dash=\"dash\",\n ),\n xref=\"paper\",\n yref=yref,\n x0=x_domain[0],\n y0=0,\n x1=x_domain[1],\n y1=0\n ), hline_shape_kwargs))\n yaxis = 'yaxis' + yref[1:]\n fig.layout[yaxis]['tickformat'] = '%'\n return fig\n\n def plot_gross_exposure(self,\n column: tp.Optional[tp.Label] = None,\n group_by: tp.GroupByLike = None,\n direction: str = 'all',\n xref: str = 'x',\n yref: str = 'y',\n hline_shape_kwargs: tp.KwargsLike = None,\n **kwargs) -> tp.BaseFigure:\n \"\"\"Plot one column/group of gross exposure.\n\n Args:\n column (str): Name of the column/group to plot.\n group_by (any): Group or ungroup columns. See `vectorbt.base.column_grouper.ColumnGrouper`.\n direction (Direction): See `vectorbt.portfolio.enums.Direction`.\n xref (str): X coordinate axis.\n yref (str): Y coordinate axis.\n hline_shape_kwargs (dict): Keyword arguments passed to `plotly.graph_objects.Figure.add_shape` for zeroline.\n **kwargs: Keyword arguments passed to `vectorbt.generic.accessors.GenericSRAccessor.plot_against`.\n \"\"\"\n from vectorbt._settings import settings\n plotting_cfg = settings['plotting']\n\n kwargs = merge_dicts(dict(\n trace_kwargs=dict(\n line=dict(\n color=plotting_cfg['color_schema']['pink']\n ),\n name='Exposure'\n ),\n pos_trace_kwargs=dict(\n fillcolor=adjust_opacity(plotting_cfg['color_schema']['orange'], 0.3)\n ),\n neg_trace_kwargs=dict(\n fillcolor=adjust_opacity(plotting_cfg['color_schema']['pink'], 0.3)\n ),\n other_trace_kwargs='hidden'\n ), kwargs)\n gross_exposure = self.gross_exposure(direction=direction, group_by=group_by)\n gross_exposure = self.select_one_from_obj(gross_exposure, self.wrapper.regroup(group_by), column=column)\n fig = gross_exposure.vbt.plot_against(1, **kwargs)\n x_domain = get_domain(xref, fig)\n fig.add_shape(**merge_dicts(dict(\n type='line',\n line=dict(\n color='gray',\n dash=\"dash\",\n ),\n xref=\"paper\",\n yref=yref,\n x0=x_domain[0],\n y0=1,\n x1=x_domain[1],\n y1=1\n ), hline_shape_kwargs))\n return fig\n\n def plot_net_exposure(self,\n column: tp.Optional[tp.Label] = None,\n group_by: tp.GroupByLike = None,\n xref: str = 'x',\n yref: str = 'y',\n hline_shape_kwargs: tp.KwargsLike = None,\n **kwargs) -> tp.BaseFigure:\n \"\"\"Plot one column/group of net exposure.\n\n Args:\n column (str): Name of the column/group to plot.\n group_by (any): Group or ungroup columns. See `vectorbt.base.column_grouper.ColumnGrouper`.\n xref (str): X coordinate axis.\n yref (str): Y coordinate axis.\n hline_shape_kwargs (dict): Keyword arguments passed to `plotly.graph_objects.Figure.add_shape` for zeroline.\n **kwargs: Keyword arguments passed to `vectorbt.generic.accessors.GenericSRAccessor.plot_against`.\n \"\"\"\n from vectorbt._settings import settings\n plotting_cfg = settings['plotting']\n\n kwargs = merge_dicts(dict(\n trace_kwargs=dict(\n line=dict(\n color=plotting_cfg['color_schema']['pink']\n ),\n name='Exposure'\n ),\n pos_trace_kwargs=dict(\n fillcolor=adjust_opacity(plotting_cfg['color_schema']['pink'], 0.3)\n ),\n neg_trace_kwargs=dict(\n fillcolor=adjust_opacity(plotting_cfg['color_schema']['orange'], 0.3)\n ),\n other_trace_kwargs='hidden'\n ), kwargs)\n net_exposure = self.net_exposure(group_by=group_by)\n net_exposure = self.select_one_from_obj(net_exposure, self.wrapper.regroup(group_by), column=column)\n fig = net_exposure.vbt.plot_against(0, **kwargs)\n x_domain = get_domain(xref, fig)\n fig.add_shape(**merge_dicts(dict(\n type='line',\n line=dict(\n color='gray',\n dash=\"dash\",\n ),\n xref=\"paper\",\n yref=yref,\n x0=x_domain[0],\n y0=0,\n x1=x_domain[1],\n y1=0\n ), hline_shape_kwargs))\n return fig\n\n @property\n def plot_defaults(self) -> tp.Kwargs:\n \"\"\"Defaults for `Portfolio.plot`.\n\n Merges `vectorbt.generic.plot_builder.PlotBuilderMixin.plot_defaults` and\n `portfolio.plot` in `vectorbt._settings.settings`.\"\"\"\n from vectorbt._settings import settings\n returns_cfg = settings['returns']\n portfolio_plot_cfg = settings['portfolio']['plot']\n\n return merge_dicts(\n PlotBuilderMixin.plot_defaults.__get__(self),\n dict(\n settings=dict(\n year_freq=returns_cfg['year_freq'],\n benchmark_rets=None\n )\n ),\n portfolio_plot_cfg\n )\n\n _subplots: tp.ClassVar[Config] = Config(\n dict(\n orders=dict(\n title=\"Orders\",\n yaxis_title=\"Price\",\n check_is_not_grouped=True,\n plot_func='orders.plot',\n tags=['portfolio', 'orders']\n ),\n trades=dict(\n title=RepEval(\"'Positions' if use_positions else 'Trades'\"),\n yaxis_title=\"Price\",\n check_is_not_grouped=True,\n plot_func='trades.plot',\n tags=RepEval(\"['portfolio', trades_tag, *incl_open_tags]\")\n ),\n trade_pnl=dict(\n title=RepEval(\"'Position P&L' if use_positions else 'Trade P&L'\"),\n yaxis_title=RepEval(\"'Position P&L' if use_positions else 'Trade P&L'\"),\n check_is_not_grouped=True,\n plot_func='trades.plot_pnl',\n pass_column=True,\n pass_hline_shape_kwargs=True,\n pass_add_trace_kwargs=True,\n pass_xref=True,\n pass_yref=True,\n tags=RepEval(\"['portfolio', trades_tag, *incl_open_tags]\")\n ),\n trade_returns=dict(\n title=RepEval(\"'Position Returns' if use_positions else 'Trade Returns'\"),\n yaxis_title=RepEval(\"'Position returns' if use_positions else 'Trade returns'\"),\n check_is_not_grouped=True,\n plot_func='trades.plot_returns',\n pass_column=True,\n pass_hline_shape_kwargs=True,\n pass_add_trace_kwargs=True,\n pass_xref=True,\n pass_yref=True,\n tags=RepEval(\"['portfolio', trades_tag, *incl_open_tags]\")\n ),\n asset_flow=dict(\n title=\"Asset Flow\",\n yaxis_title=\"Asset flow\",\n check_is_not_grouped=True,\n plot_func='plot_asset_flow',\n pass_add_trace_kwargs=True,\n tags=['portfolio', 'assets']\n ),\n cash_flow=dict(\n title=\"Cash Flow\",\n yaxis_title=\"Cash flow\",\n plot_func='plot_cash_flow',\n pass_add_trace_kwargs=True,\n tags=['portfolio', 'cash']\n ),\n assets=dict(\n title=\"Assets\",\n yaxis_title=\"Assets\",\n check_is_not_grouped=True,\n plot_func='plot_assets',\n pass_add_trace_kwargs=True,\n tags=['portfolio', 'assets']\n ),\n cash=dict(\n title=\"Cash\",\n yaxis_title=\"Cash\",\n plot_func='plot_cash',\n pass_add_trace_kwargs=True,\n tags=['portfolio', 'cash']\n ),\n asset_value=dict(\n title=\"Asset Value\",\n yaxis_title=\"Asset value\",\n plot_func='plot_asset_value',\n pass_add_trace_kwargs=True,\n tags=['portfolio', 'assets', 'value']\n ),\n value=dict(\n title=\"Value\",\n yaxis_title=\"Value\",\n plot_func='plot_value',\n pass_add_trace_kwargs=True,\n tags=['portfolio', 'value']\n ),\n cum_returns=dict(\n title=\"Cumulative Returns\",\n yaxis_title=\"Cumulative returns\",\n plot_func='plot_cum_returns',\n pass_hline_shape_kwargs=True,\n pass_add_trace_kwargs=True,\n pass_xref=True,\n pass_yref=True,\n tags=['portfolio', 'returns']\n ),\n drawdowns=dict(\n title=\"Drawdowns\",\n yaxis_title=\"Value\",\n plot_func='plot_drawdowns',\n pass_add_trace_kwargs=True,\n pass_xref=True,\n pass_yref=True,\n tags=['portfolio', 'value', 'drawdowns']\n ),\n underwater=dict(\n title=\"Underwater\",\n yaxis_title=\"Drawdown\",\n plot_func='plot_underwater',\n pass_add_trace_kwargs=True,\n tags=['portfolio', 'value', 'drawdowns']\n ),\n gross_exposure=dict(\n title=\"Gross Exposure\",\n yaxis_title=\"Gross exposure\",\n plot_func='plot_gross_exposure',\n pass_add_trace_kwargs=True,\n tags=['portfolio', 'exposure']\n ),\n net_exposure=dict(\n title=\"Net Exposure\",\n yaxis_title=\"Net exposure\",\n plot_func='plot_net_exposure',\n pass_add_trace_kwargs=True,\n tags=['portfolio', 'exposure']\n )\n ),\n copy_kwargs=dict(copy_mode='deep')\n )\n\n @property\n def subplots(self) -> Config:\n return self._subplots\n\n\nPortfolio.override_metrics_doc(__pdoc__)\nPortfolio.override_subplots_doc(__pdoc__)\n"
]
| [
[
"pandas.DataFrame.vbt.empty_like",
"pandas.Series",
"numpy.abs",
"numpy.isnan",
"numpy.arange",
"numpy.cumsum",
"pandas.DataFrame",
"numpy.max",
"numpy.mean",
"numpy.any",
"numpy.where"
]
]
|
JoungheeKim/emotion_recognition | [
"96dbcf432a23553b9d573e8388ecc22195f1d85e"
]
| [
"src/process/label_preprocess.py"
]
| [
"import io\nimport os\nimport pandas as pd\nfrom pathlib import Path\nfrom tqdm import tqdm\nimport pandas as pd\nfrom .base_preprocess import BaseProcessor, BasicProcessConfig\nimport numpy as np\nimport logging\nfrom dataclasses import _MISSING_TYPE, dataclass, field\n\n@dataclass\nclass LabelPreprocessConfig(BasicProcessConfig):\n name:str='labels'\n column_name: str = 'labels'\n selected_columns:str = \"['1번 감정', '2번 감정', '3번 감정', '4번 감정', '5번 감정']\"\n selected_class:str = \"['Angry', 'Disgust', 'Fear', 'Happiness', 'Neutral', 'Sadness', 'Surprise']\"\n loss_type:str = 'bce'\n\n@dataclass\nclass FiveLabelPreprocessConfig(LabelPreprocessConfig):\n name = '5label'\n selected_class:str = \"['Angry', 'Disgust', 'Fear', 'Neutral', 'Sadness']\"\n\n@dataclass\nclass SevenLabelPreprocessConfig(LabelPreprocessConfig):\n name = '7label'\n selected_class:str = \"['Angry', 'Disgust', 'Fear', 'Happiness', 'Neutral', 'Sadness', 'Surprise']\"\n\n\n@dataclass\nclass Multimodal8PreprocessConfig(LabelPreprocessConfig):\n name = 'multimodal8'\n selected_columns='[]'\n selected_class:str = \"['dislike', 'happy', 'surprise', 'neutral', 'sad', 'angry', 'fear', 'contempt']\"\n\n@dataclass\nclass Multimodal7PreprocessConfig(LabelPreprocessConfig):\n name = 'multimodal7'\n selected_columns = '[]'\n selected_class:str = \"['dislike', 'happy', 'surprise', 'neutral', 'sad', 'angry', 'fear']\"\n\n@dataclass\nclass Multimodal6PreprocessConfig(LabelPreprocessConfig):\n name = 'multimodal6'\n selected_columns = '[]'\n selected_class:str = \"['dislike', 'happy', 'surprise', 'neutral', 'sad', 'angry']\"\n\nclass Multimodal5PreprocessConfig(LabelPreprocessConfig):\n name = 'multimodal5'\n selected_columns = '[]'\n selected_class:str = \"['dislike', 'happy', 'surprise', 'neutral', 'sad']\"\n\n@dataclass\nclass Multimodal4PreprocessConfig(LabelPreprocessConfig):\n name = 'multimodal4'\n selected_columns = '[]'\n selected_class:str = \"['dislike', 'happy', 'surprise', 'neutral']\"\n\n@dataclass\nclass Synthesis7PreprocessConfig(LabelPreprocessConfig):\n name = 'synthesis7'\n selected_columns = '[]'\n selected_class:str = \"['ang', 'dis', 'fea', 'hap', 'neu', 'sad', 'sur']\"\n\n\nclass LabelProcessor(BaseProcessor):\n def __init__(self, cfg:LabelPreprocessConfig):\n super(LabelProcessor, self).__init__(cfg)\n self.selected_columns=eval(cfg.selected_columns)\n self.selected_class=eval(cfg.selected_class)\n self.column_name=cfg.column_name\n self.stratify_name = cfg.stratify_name\n self.loss_type = cfg.loss_type.lower()\n assert self.loss_type in ['bce', 'cross']\n\n def convert_data(self, script_df):\n if len(self.selected_columns) > 0:\n ## get Emotions\n emotion_lists = script_df[self.selected_columns].apply(pd.value_counts).T.columns\n\n before_num = len(script_df)\n ## get major emotion\n mask = script_df[self.selected_columns].apply(pd.value_counts, axis=1).max(axis=1) > 2\n script_df = script_df[mask]\n after_num = len(script_df)\n logging.info('befor [{}] after [{}]'.format(before_num, after_num))\n\n major_label_index = np.nanargmax(\n script_df[self.selected_columns].apply(pd.value_counts, axis=1)[emotion_lists].values, axis=1)\n\n ## set label_name\n script_df.loc[:, self.stratify_name] = emotion_lists[major_label_index]\n\n ## 추가부분\n mask = script_df[self.stratify_name].isin(self.selected_class)\n script_df = script_df[mask]\n\n logging.info(\"convert label to selected class\")\n selected_dict = {name: idx for idx, name in enumerate(self.selected_class)}\n script_df[self.column_name] = script_df[self.stratify_name].map(selected_dict)\n\n return script_df\n\n def get_data(self, script_df):\n ## extract selected class data\n\n labels = script_df[self.column_name].values.tolist()\n if self.loss_type =='bce':\n logging.info('load Byte Cross Entropy Labels')\n targets = np.array(labels)\n labels = np.eye(len(self.selected_class))[targets]\n\n return labels\n\n\n"
]
| [
[
"numpy.array"
]
]
|
rtajan/eirballoon | [
"0eded8f86174a9e5ed38297fa26c7f5a53b5ea53"
]
| [
"tests/python/synchro_freq_fine.py"
]
| [
"from numba import jit\nfrom py_aff3ct.module.py_module import Py_Module\nfrom math import *\nimport numpy as np\nimport sys\nsys.path.insert(0, '../../build/lib')\n\n\nclass synchro_freq_fine(Py_Module):\n @jit(nopython=True, nogil=True, cache = True)\n def func_(s_in, s_out, previousSample, phase, loopFiltState, DDSPreviousInp, IntegFiltState, gI, gP):\n for k in range(len(s_in[0, :])//2):\n phErr = np.sign(np.real(previousSample))*np.imag(previousSample) - \\\n np.sign(np.imag(previousSample))*np.real(previousSample)\n c = cos(phase)\n s = sin(phase)\n t = s_in[0, 2*k:2*k+2]\n s_out[0, 2*k] = t[0]*c-t[1]*s\n s_out[0, 2*k+1] = t[0]*s+t[1]*c\n\n loopFiltOut = phErr*gI + loopFiltState\n loopFiltState = loopFiltOut\n\n DDSout = DDSPreviousInp + IntegFiltState\n IntegFiltState = DDSout\n DDSPreviousInp = phErr * gP+loopFiltOut\n\n phase = -DDSout\n previousSample = s_out[0, 2*k] + 1j * s_out[0, 2*k+1]\n return previousSample, phase, loopFiltState, DDSPreviousInp, IntegFiltState\n \n\n def func(self, s_in, s_out):\n self.previousSample, self.phase, self.loopFiltState, self.DDSPreviousInp, self.IntegFiltState = synchro_freq_fine.func_(\n s_in, s_out, self.previousSample, self.phase, self.loopFiltState, self.DDSPreviousInp, self.IntegFiltState, self.gI, self.gP)\n return 0\n\n def __init__(self, fse, N):\n Py_Module.__init__(self)\n self.name = \"py_sync_freq_fine\"\n self.N = N\n self.Bn = 0.01\n self.zeta = sqrt(2)/2\n self.Kp = 2\n self.SPS = fse\n self.teta = (self.Bn * self.SPS) / \\\n ((self.zeta + 0.25/self.zeta)*self.SPS)\n self.d = 1+2*self.zeta*self.teta+self.teta**2\n self.gI = (4*(self.teta**2)/self.d)/(self.Kp*self.SPS)\n self.gP = (4*(self.teta*self.zeta)/self.d)/(self.Kp*self.SPS)\n self.previousSample = 1\n self.phase = 0\n self.loopFiltState = 0\n self.DDSPreviousInp = 0\n self.IntegFiltState = 0\n\n t_source = self.create_task('synchronize')\n s_in = self.create_socket_in(t_source, 'sync_in', N, np.float32)\n s_out = self.create_socket_out(t_source, 'sync_out', N, np.float32)\n\n self.create_codelet(t_source, lambda slf, lsk,\n fid: slf.func(lsk[s_in], lsk[s_out]))\n"
]
| [
[
"numpy.real",
"numpy.imag"
]
]
|
sulabh-shr/pytorch-deeplab-xception | [
"9135e104a7a51ea9effa9c6676a2fcffe6a6a2e6"
]
| [
"train.py"
]
| [
"import argparse\nimport os\nimport numpy as np\nfrom tqdm import tqdm\n\nfrom mypath import Path\nfrom dataloaders import make_data_loader\nfrom modeling.sync_batchnorm.replicate import patch_replication_callback\nfrom modeling.deeplab import *\nfrom utils.loss import SegmentationLosses\nfrom utils.calculate_weights import calculate_weigths_labels\nfrom utils.lr_scheduler import LR_Scheduler\nfrom utils.saver import Saver\nfrom utils.summaries import TensorboardSummary\nfrom utils.metrics import Evaluator\n\nclass Trainer(object):\n def __init__(self, args):\n self.args = args\n\n # Define Saver\n self.saver = Saver(args)\n self.saver.save_experiment_config()\n # Define Tensorboard Summary\n self.summary = TensorboardSummary(self.saver.experiment_dir)\n self.writer = self.summary.create_summary()\n \n # Define Dataloader\n kwargs = {'num_workers': args.workers, 'pin_memory': True}\n self.train_loader, self.val_loader, self.test_loader, self.nclass = make_data_loader(args, **kwargs)\n\n # Define network\n model = DeepLab(num_classes=self.nclass,\n backbone=args.backbone,\n output_stride=args.out_stride,\n sync_bn=args.sync_bn,\n freeze_bn=args.freeze_bn)\n\n train_params = [{'params': model.get_1x_lr_params(), 'lr': args.lr},\n {'params': model.get_10x_lr_params(), 'lr': args.lr * 10}]\n\n # Define Optimizer\n optimizer = torch.optim.SGD(train_params, momentum=args.momentum,\n weight_decay=args.weight_decay, nesterov=args.nesterov)\n\n # Define Criterion\n # whether to use class balanced weights\n if args.use_balanced_weights:\n classes_weights_path = os.path.join(Path.db_root_dir(args.dataset), args.dataset+'_classes_weights.npy')\n if os.path.isfile(classes_weights_path):\n weight = np.load(classes_weights_path)\n else:\n weight = calculate_weigths_labels(args.dataset, self.train_loader, self.nclass)\n weight = torch.from_numpy(weight.astype(np.float32))\n else:\n weight = None\n self.criterion = SegmentationLosses(weight=weight, cuda=args.cuda).build_loss(mode=args.loss_type)\n self.model, self.optimizer = model, optimizer\n \n # Define Evaluator\n self.evaluator = Evaluator(self.nclass)\n # Define lr scheduler\n self.scheduler = LR_Scheduler(args.lr_scheduler, args.lr,\n args.epochs, len(self.train_loader))\n\n # Using cuda\n if args.cuda:\n self.model = torch.nn.DataParallel(self.model, device_ids=self.args.gpu_ids)\n patch_replication_callback(self.model)\n self.model = self.model.cuda()\n\n # Resuming checkpoint\n self.best_pred = 0.0\n if args.resume is not None:\n if not os.path.isfile(args.resume):\n raise RuntimeError(\"=> no checkpoint found at '{}'\" .format(args.resume))\n checkpoint = torch.load(args.resume)\n args.start_epoch = checkpoint['epoch']\n if args.cuda:\n self.model.module.load_state_dict(checkpoint['state_dict'])\n else:\n self.model.load_state_dict(checkpoint['state_dict'])\n if not args.ft:\n self.optimizer.load_state_dict(checkpoint['optimizer'])\n self.best_pred = checkpoint['best_pred']\n print(\"=> loaded checkpoint '{}' (epoch {})\"\n .format(args.resume, checkpoint['epoch']))\n\n # Clear start epoch if fine-tuning\n if args.ft:\n args.start_epoch = 0\n\n def training(self, epoch):\n train_loss = 0.0\n self.model.train()\n tbar = tqdm(self.train_loader)\n num_img_tr = len(self.train_loader)\n for i, sample in enumerate(tbar):\n image, target = sample['image'], sample['label']\n if self.args.cuda:\n image, target = image.cuda(), target.cuda()\n self.scheduler(self.optimizer, i, epoch, self.best_pred)\n self.optimizer.zero_grad()\n output = self.model(image)\n loss = self.criterion(output, target)\n loss.backward()\n self.optimizer.step()\n train_loss += loss.item()\n tbar.set_description('Train loss: %.3f' % (train_loss / (i + 1)))\n self.writer.add_scalar('train/total_loss_iter', loss.item(), i + num_img_tr * epoch)\n\n # Show 10 * 3 inference results each epoch\n if i % (num_img_tr // 10) == 0:\n global_step = i + num_img_tr * epoch\n self.summary.visualize_image(self.writer, self.args.dataset, image, target, output, global_step)\n\n self.writer.add_scalar('train/total_loss_epoch', train_loss, epoch)\n print('[Epoch: %d, numImages: %5d]' % (epoch, i * self.args.batch_size + image.data.shape[0]))\n print('Loss: %.3f' % train_loss)\n\n if self.args.no_val:\n # save checkpoint every epoch\n is_best = False\n self.saver.save_checkpoint({\n 'epoch': epoch + 1,\n 'state_dict': self.model.module.state_dict(),\n 'optimizer': self.optimizer.state_dict(),\n 'best_pred': self.best_pred,\n }, is_best)\n\n\n def validation(self, epoch):\n self.model.eval()\n self.evaluator.reset()\n tbar = tqdm(self.val_loader, desc='\\r')\n test_loss = 0.0\n for i, sample in enumerate(tbar):\n image, target = sample['image'], sample['label']\n if self.args.cuda:\n image, target = image.cuda(), target.cuda()\n with torch.no_grad():\n output = self.model(image)\n loss = self.criterion(output, target)\n test_loss += loss.item()\n tbar.set_description('Test loss: %.3f' % (test_loss / (i + 1)))\n pred = output.data.cpu().numpy()\n target = target.cpu().numpy()\n pred = np.argmax(pred, axis=1)\n # Add batch sample into evaluator\n self.evaluator.add_batch(target, pred)\n\n # Fast test during the training\n Acc = self.evaluator.Pixel_Accuracy()\n Acc_class = self.evaluator.Pixel_Accuracy_Class()\n mIoU = self.evaluator.Mean_Intersection_over_Union()\n FWIoU = self.evaluator.Frequency_Weighted_Intersection_over_Union()\n self.writer.add_scalar('val/total_loss_epoch', test_loss, epoch)\n self.writer.add_scalar('val/mIoU', mIoU, epoch)\n self.writer.add_scalar('val/Acc', Acc, epoch)\n self.writer.add_scalar('val/Acc_class', Acc_class, epoch)\n self.writer.add_scalar('val/fwIoU', FWIoU, epoch)\n print('Validation:')\n print('[Epoch: %d, numImages: %5d]' % (epoch, i * self.args.batch_size + image.data.shape[0]))\n print(\"Acc:{}, Acc_class:{}, mIoU:{}, fwIoU: {}\".format(Acc, Acc_class, mIoU, FWIoU))\n print('Loss: %.3f' % test_loss)\n\n new_pred = mIoU\n if new_pred > self.best_pred:\n is_best = True\n self.best_pred = new_pred\n self.saver.save_checkpoint({\n 'epoch': epoch + 1,\n 'state_dict': self.model.module.state_dict(),\n 'optimizer': self.optimizer.state_dict(),\n 'best_pred': self.best_pred,\n }, is_best)\n\ndef main():\n parser = argparse.ArgumentParser(description=\"PyTorch DeeplabV3Plus Training\")\n parser.add_argument('--backbone', type=str, default='resnet',\n choices=['resnet', 'xception', 'drn', 'mobilenet'],\n help='backbone name (default: resnet)')\n parser.add_argument('--out-stride', type=int, default=16,\n help='network output stride (default: 8)')\n parser.add_argument('--dataset', type=str, default='pascal',\n choices=['pascal', 'coco', 'cityscapes'],\n help='dataset name (default: pascal)')\n parser.add_argument('--use-sbd', action='store_true', default=True,\n help='whether to use SBD dataset (default: True)')\n parser.add_argument('--workers', type=int, default=4,\n metavar='N', help='dataloader threads')\n parser.add_argument('--base-size', type=int, default=513,\n help='base image size')\n parser.add_argument('--crop-size', type=int, default=513,\n help='crop image size')\n parser.add_argument('--sync-bn', type=bool, default=None,\n help='whether to use sync bn (default: auto)')\n parser.add_argument('--freeze-bn', type=bool, default=False,\n help='whether to freeze bn parameters (default: False)')\n parser.add_argument('--loss-type', type=str, default='ce',\n choices=['ce', 'focal'],\n help='loss func type (default: ce)')\n # training hyper params\n parser.add_argument('--epochs', type=int, default=None, metavar='N',\n help='number of epochs to train (default: auto)')\n parser.add_argument('--start_epoch', type=int, default=0,\n metavar='N', help='start epochs (default:0)')\n parser.add_argument('--batch-size', type=int, default=None,\n metavar='N', help='input batch size for \\\n training (default: auto)')\n parser.add_argument('--test-batch-size', type=int, default=None,\n metavar='N', help='input batch size for \\\n testing (default: auto)')\n parser.add_argument('--use-balanced-weights', action='store_true', default=False,\n help='whether to use balanced weights (default: False)')\n # optimizer params\n parser.add_argument('--lr', type=float, default=None, metavar='LR',\n help='learning rate (default: auto)')\n parser.add_argument('--lr-scheduler', type=str, default='poly',\n choices=['poly', 'step', 'cos'],\n help='lr scheduler mode: (default: poly)')\n parser.add_argument('--momentum', type=float, default=0.9,\n metavar='M', help='momentum (default: 0.9)')\n parser.add_argument('--weight-decay', type=float, default=5e-4,\n metavar='M', help='w-decay (default: 5e-4)')\n parser.add_argument('--nesterov', action='store_true', default=False,\n help='whether use nesterov (default: False)')\n # cuda, seed and logging\n parser.add_argument('--no-cuda', action='store_true', default=\n False, help='disables CUDA training')\n parser.add_argument('--gpu-ids', type=str, default='0',\n help='use which gpu to train, must be a \\\n comma-separated list of integers only (default=0)')\n parser.add_argument('--seed', type=int, default=1, metavar='S',\n help='random seed (default: 1)')\n # checking point\n parser.add_argument('--resume', type=str, default=None,\n help='put the path to resuming file if needed')\n parser.add_argument('--checkname', type=str, default=None,\n help='set the checkpoint name')\n # finetuning pre-trained models\n parser.add_argument('--ft', action='store_true', default=False,\n help='finetuning on a different dataset')\n # evaluation option\n parser.add_argument('--eval-interval', type=int, default=1,\n help='evaluuation interval (default: 1)')\n parser.add_argument('--no-val', action='store_true', default=False,\n help='skip validation during training')\n\n args = parser.parse_args()\n args.cuda = not args.no_cuda and torch.cuda.is_available()\n if args.cuda:\n try:\n args.gpu_ids = [int(s) for s in args.gpu_ids.split(',')]\n except ValueError:\n raise ValueError('Argument --gpu_ids must be a comma-separated list of integers only')\n\n if args.sync_bn is None:\n if args.cuda and len(args.gpu_ids) > 1:\n args.sync_bn = True\n else:\n args.sync_bn = False\n\n # default settings for epochs, batch_size and lr\n if args.epochs is None:\n epoches = {\n 'coco': 30,\n 'cityscapes': 200,\n 'pascal': 50,\n }\n args.epochs = epoches[args.dataset.lower()]\n\n if args.batch_size is None:\n args.batch_size = 4 * len(args.gpu_ids)\n\n if args.test_batch_size is None:\n args.test_batch_size = args.batch_size\n\n if args.lr is None:\n lrs = {\n 'coco': 0.1,\n 'cityscapes': 0.01,\n 'pascal': 0.007,\n }\n args.lr = lrs[args.dataset.lower()] / (4 * len(args.gpu_ids)) * args.batch_size\n\n\n if args.checkname is None:\n args.checkname = 'deeplab-'+str(args.backbone)\n print(args)\n torch.manual_seed(args.seed)\n trainer = Trainer(args)\n print('Starting Epoch:', trainer.args.start_epoch)\n print('Total Epoches:', trainer.args.epochs)\n for epoch in range(trainer.args.start_epoch, trainer.args.epochs):\n trainer.training(epoch)\n if not trainer.args.no_val and epoch % args.eval_interval == (args.eval_interval - 1):\n trainer.validation(epoch)\n\n trainer.writer.close()\n\nif __name__ == \"__main__\":\n main()\n"
]
| [
[
"numpy.load",
"numpy.argmax"
]
]
|
wegamekinglc/incubator-airflow | [
"fc174635b0729253a86e8c877d6d8551a815a2cb"
]
| [
"airflow/providers/oracle/hooks/oracle.py"
]
| [
"#\n# Licensed to the Apache Software Foundation (ASF) under one\n# or more contributor license agreements. See the NOTICE file\n# distributed with this work for additional information\n# regarding copyright ownership. The ASF licenses this file\n# to you under the Apache License, Version 2.0 (the\n# \"License\"); you may not use this file except in compliance\n# with the License. You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing,\n# software distributed under the License is distributed on an\n# \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n# KIND, either express or implied. See the License for the\n# specific language governing permissions and limitations\n# under the License.\n\nfrom datetime import datetime\nfrom typing import List, Optional\n\nimport cx_Oracle\nimport numpy\n\nfrom airflow.hooks.dbapi import DbApiHook\n\n\nclass OracleHook(DbApiHook):\n \"\"\"Interact with Oracle SQL.\"\"\"\n\n conn_name_attr = 'oracle_conn_id'\n default_conn_name = 'oracle_default'\n conn_type = 'oracle'\n hook_name = 'Oracle'\n\n supports_autocommit = False\n\n # pylint: disable=c-extension-no-member\n def get_conn(self) -> 'OracleHook':\n \"\"\"\n Returns a oracle connection object\n Optional parameters for using a custom DSN connection\n (instead of using a server alias from tnsnames.ora)\n The dsn (data source name) is the TNS entry\n (from the Oracle names server or tnsnames.ora file)\n or is a string like the one returned from makedsn().\n\n :param dsn: the host address for the Oracle server\n :param service_name: the db_unique_name of the database\n that you are connecting to (CONNECT_DATA part of TNS)\n\n You can set these parameters in the extra fields of your connection\n as in ``{ \"dsn\":\"some.host.address\" , \"service_name\":\"some.service.name\" }``\n see more param detail in\n `cx_Oracle.connect <https://cx-oracle.readthedocs.io/en/latest/module.html#cx_Oracle.connect>`_\n \"\"\"\n conn = self.get_connection(\n self.oracle_conn_id # type: ignore[attr-defined] # pylint: disable=no-member\n )\n conn_config = {'user': conn.login, 'password': conn.password}\n dsn = conn.extra_dejson.get('dsn')\n sid = conn.extra_dejson.get('sid')\n mod = conn.extra_dejson.get('module')\n\n service_name = conn.extra_dejson.get('service_name')\n port = conn.port if conn.port else 1521\n if dsn and sid and not service_name:\n conn_config['dsn'] = cx_Oracle.makedsn(dsn, port, sid)\n elif dsn and service_name and not sid:\n conn_config['dsn'] = cx_Oracle.makedsn(dsn, port, service_name=service_name)\n else:\n conn_config['dsn'] = conn.host\n\n if 'encoding' in conn.extra_dejson:\n conn_config['encoding'] = conn.extra_dejson.get('encoding')\n # if `encoding` is specific but `nencoding` is not\n # `nencoding` should use same values as `encoding` to set encoding, inspired by\n # https://github.com/oracle/python-cx_Oracle/issues/157#issuecomment-371877993\n if 'nencoding' not in conn.extra_dejson:\n conn_config['nencoding'] = conn.extra_dejson.get('encoding')\n if 'nencoding' in conn.extra_dejson:\n conn_config['nencoding'] = conn.extra_dejson.get('nencoding')\n if 'threaded' in conn.extra_dejson:\n conn_config['threaded'] = conn.extra_dejson.get('threaded')\n if 'events' in conn.extra_dejson:\n conn_config['events'] = conn.extra_dejson.get('events')\n\n mode = conn.extra_dejson.get('mode', '').lower()\n if mode == 'sysdba':\n conn_config['mode'] = cx_Oracle.SYSDBA\n elif mode == 'sysasm':\n conn_config['mode'] = cx_Oracle.SYSASM\n elif mode == 'sysoper':\n conn_config['mode'] = cx_Oracle.SYSOPER\n elif mode == 'sysbkp':\n conn_config['mode'] = cx_Oracle.SYSBKP\n elif mode == 'sysdgd':\n conn_config['mode'] = cx_Oracle.SYSDGD\n elif mode == 'syskmt':\n conn_config['mode'] = cx_Oracle.SYSKMT\n elif mode == 'sysrac':\n conn_config['mode'] = cx_Oracle.SYSRAC\n\n purity = conn.extra_dejson.get('purity', '').lower()\n if purity == 'new':\n conn_config['purity'] = cx_Oracle.ATTR_PURITY_NEW\n elif purity == 'self':\n conn_config['purity'] = cx_Oracle.ATTR_PURITY_SELF\n elif purity == 'default':\n conn_config['purity'] = cx_Oracle.ATTR_PURITY_DEFAULT\n\n conn = cx_Oracle.connect(**conn_config)\n if mod is not None:\n conn.module = mod\n\n return conn\n\n def insert_rows(\n self,\n table: str,\n rows: List[tuple],\n target_fields=None,\n commit_every: int = 1000,\n replace: Optional[bool] = False,\n **kwargs,\n ) -> None:\n \"\"\"\n A generic way to insert a set of tuples into a table,\n the whole set of inserts is treated as one transaction\n Changes from standard DbApiHook implementation:\n\n - Oracle SQL queries in cx_Oracle can not be terminated with a semicolon (`;`)\n - Replace NaN values with NULL using `numpy.nan_to_num` (not using\n `is_nan()` because of input types error for strings)\n - Coerce datetime cells to Oracle DATETIME format during insert\n\n :param table: target Oracle table, use dot notation to target a\n specific database\n :type table: str\n :param rows: the rows to insert into the table\n :type rows: iterable of tuples\n :param target_fields: the names of the columns to fill in the table\n :type target_fields: iterable of str\n :param commit_every: the maximum number of rows to insert in one transaction\n Default 1000, Set greater than 0.\n Set 1 to insert each row in each single transaction\n :type commit_every: int\n :param replace: Whether to replace instead of insert\n :type replace: bool\n \"\"\"\n if target_fields:\n target_fields = ', '.join(target_fields)\n target_fields = f'({target_fields})'\n else:\n target_fields = ''\n conn = self.get_conn()\n cur = conn.cursor() # type: ignore[attr-defined]\n if self.supports_autocommit:\n cur.execute('SET autocommit = 0')\n conn.commit() # type: ignore[attr-defined]\n i = 0\n for row in rows:\n i += 1\n lst = []\n for cell in row:\n if isinstance(cell, str):\n lst.append(\"'\" + str(cell).replace(\"'\", \"''\") + \"'\")\n elif cell is None:\n lst.append('NULL')\n elif isinstance(cell, float) and numpy.isnan(cell): # coerce numpy NaN to NULL\n lst.append('NULL')\n elif isinstance(cell, numpy.datetime64):\n lst.append(\"'\" + str(cell) + \"'\")\n elif isinstance(cell, datetime):\n lst.append(\n \"to_date('\" + cell.strftime('%Y-%m-%d %H:%M:%S') + \"','YYYY-MM-DD HH24:MI:SS')\"\n )\n else:\n lst.append(str(cell))\n values = tuple(lst)\n sql = f\"INSERT /*+ APPEND */ INTO {table} {target_fields} VALUES ({','.join(values)})\"\n cur.execute(sql)\n if i % commit_every == 0:\n conn.commit() # type: ignore[attr-defined]\n self.log.info('Loaded %s into %s rows so far', i, table)\n conn.commit() # type: ignore[attr-defined]\n cur.close()\n conn.close() # type: ignore[attr-defined]\n self.log.info('Done loading. Loaded a total of %s rows', i)\n\n def bulk_insert_rows(\n self,\n table: str,\n rows: List[tuple],\n target_fields: Optional[List[str]] = None,\n commit_every: int = 5000,\n ):\n \"\"\"\n A performant bulk insert for cx_Oracle\n that uses prepared statements via `executemany()`.\n For best performance, pass in `rows` as an iterator.\n\n :param table: target Oracle table, use dot notation to target a\n specific database\n :type table: str\n :param rows: the rows to insert into the table\n :type rows: iterable of tuples\n :param target_fields: the names of the columns to fill in the table, default None.\n If None, each rows should have some order as table columns name\n :type target_fields: iterable of str Or None\n :param commit_every: the maximum number of rows to insert in one transaction\n Default 5000. Set greater than 0. Set 1 to insert each row in each transaction\n :type commit_every: int\n \"\"\"\n if not rows:\n raise ValueError(\"parameter rows could not be None or empty iterable\")\n conn = self.get_conn()\n cursor = conn.cursor() # type: ignore[attr-defined]\n values_base = target_fields if target_fields else rows[0]\n prepared_stm = 'insert into {tablename} {columns} values ({values})'.format(\n tablename=table,\n columns='({})'.format(', '.join(target_fields)) if target_fields else '',\n values=', '.join(':%s' % i for i in range(1, len(values_base) + 1)),\n )\n row_count = 0\n # Chunk the rows\n row_chunk = []\n for row in rows:\n row_chunk.append(row)\n row_count += 1\n if row_count % commit_every == 0:\n cursor.prepare(prepared_stm)\n cursor.executemany(None, row_chunk)\n conn.commit() # type: ignore[attr-defined]\n self.log.info('[%s] inserted %s rows', table, row_count)\n # Empty chunk\n row_chunk = []\n # Commit the leftover chunk\n cursor.prepare(prepared_stm)\n cursor.executemany(None, row_chunk)\n conn.commit() # type: ignore[attr-defined]\n self.log.info('[%s] inserted %s rows', table, row_count)\n cursor.close()\n conn.close() # type: ignore[attr-defined]\n"
]
| [
[
"numpy.isnan"
]
]
|
NgvTue/test2 | [
"d475cb2fed16b46185adbe50b4179d5299c791cb"
]
| [
"speech_dia_@/src/pyannote_xxx/core/feature.py"
]
| [
"#!/usr/bin/env python\n# encoding: utf-8\n\n# The MIT License (MIT)\n\n# Copyright (c) 2014-2019 CNRS\n\n# Permission is hereby granted, free of charge, to any person obtaining a copy\n# of this software and associated documentation files (the \"Software\"), to deal\n# in the Software without restriction, including without limitation the rights\n# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell\n# copies of the Software, and to permit persons to whom the Software is\n# furnished to do so, subject to the following conditions:\n\n# The above copyright notice and this permission notice shall be included in\n# all copies or substantial portions of the Software.\n\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\n# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\n# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\n# SOFTWARE.\n\n# AUTHORS\n# Hervé BREDIN - http://herve.niderb.fr\n\n\n\"\"\"\n########\nFeatures\n########\n\nSee :class:`pyannote.core.SlidingWindowFeature` for the complete reference.\n\"\"\"\nimport numbers\nimport warnings\nfrom typing import Tuple, Optional, Union, Iterator\n\nimport numpy as np\n\nfrom pyannote_xxx.core.utils.types import CropMode\nfrom .segment import Segment\nfrom .segment import SlidingWindow\nfrom .timeline import Timeline\n\n\nclass SlidingWindowFeature(np.lib.mixins.NDArrayOperatorsMixin):\n \"\"\"Periodic feature vectors\n\n Parameters\n ----------\n data : (nSamples, nFeatures) numpy array\n\n sliding_window : SlidingWindow\n\n\n \"\"\"\n\n def __init__(self, data: np.ndarray, sliding_window: SlidingWindow):\n self.sliding_window: SlidingWindow = sliding_window\n self.data = data\n self.__i: int = -1\n\n def __len__(self):\n \"\"\"Number of feature vectors\"\"\"\n return self.data.shape[0]\n\n @property\n def extent(self):\n return self.sliding_window.range_to_segment(0, len(self))\n\n @property\n def dimension(self):\n \"\"\"Dimension of feature vectors\"\"\"\n return self.data.shape[1]\n\n def getNumber(self):\n warnings.warn(\"This is deprecated in favor of `__len__`\",\n DeprecationWarning)\n return self.data.shape[0]\n\n def getDimension(self):\n warnings.warn(\"This is deprecated in favor of `dimension` property\",\n DeprecationWarning)\n return self.dimension\n\n def getExtent(self):\n warnings.warn(\"This is deprecated in favor of `extent` property\",\n DeprecationWarning)\n return self.extent\n\n def __getitem__(self, i: int) -> np.ndarray:\n \"\"\"Get ith feature vector\"\"\"\n return self.data[i]\n\n def __iter__(self):\n self.__i = -1\n return self\n\n def __next__(self) -> Tuple[Segment, np.ndarray]:\n self.__i += 1\n try:\n return self.sliding_window[self.__i], self.data[self.__i]\n except IndexError as e:\n raise StopIteration()\n\n def next(self):\n return self.__next__()\n\n def iterfeatures(self, window: Optional[bool] = False) \\\n -> Iterator[Union[Tuple[np.ndarray, Segment], np.ndarray]]:\n \"\"\"Feature vector iterator\n\n Parameters\n ----------\n window : bool, optional\n When True, yield both feature vector and corresponding window.\n Default is to only yield feature vector\n\n \"\"\"\n n_samples = self.data.shape[0]\n for i in range(n_samples):\n if window:\n yield self.data[i], self.sliding_window[i]\n else:\n yield self.data[i]\n\n def crop(self,\n focus: Union[Segment, Timeline],\n mode: CropMode = 'loose',\n fixed: Optional[float] = None,\n return_data: bool = True) \\\n -> Union[np.ndarray, 'SlidingWindowFeature']:\n \"\"\"Extract frames\n\n Parameters\n ----------\n focus : Segment or Timeline\n mode : {'loose', 'strict', 'center'}, optional\n In 'strict' mode, only frames fully included in 'focus' support are\n returned. In 'loose' mode, any intersecting frames are returned. In\n 'center' mode, first and last frames are chosen to be the ones\n whose centers are the closest to 'focus' start and end times.\n Defaults to 'loose'.\n fixed : float, optional\n Overrides `Segment` 'focus' duration and ensures that the number of\n returned frames is fixed (which might otherwise not be the case\n because of rounding errors).\n return_data : bool, optional\n Return a numpy array (default). For `Segment` 'focus', setting it\n to False will return a `SlidingWindowFeature` instance.\n\n Returns\n -------\n data : `numpy.ndarray` or `SlidingWindowFeature`\n Frame features.\n\n See also\n --------\n SlidingWindow.crop\n\n \"\"\"\n\n if (not return_data) and (not isinstance(focus, Segment)):\n msg = ('\"focus\" must be a \"Segment\" instance when \"return_data\"'\n 'is set to False.')\n raise ValueError(msg)\n\n ranges = self.sliding_window.crop(focus, mode=mode, fixed=fixed,\n return_ranges=True)\n\n # total number of samples in features\n n_samples = self.data.shape[0]\n\n # 1 for vector features (e.g. MFCC in pyannote.audio)\n # 2 for matrix features (e.g. grey-level frames in pyannote.video)\n # 3 for 3rd order tensor (e.g. RBG frames in pyannote.video)\n n_dimensions = len(self.data.shape) - 1\n\n # clip ranges\n clipped_ranges, repeat_first, repeat_last = [], 0, 0\n for start, end in ranges:\n # count number of requested samples before first sample\n repeat_first += min(end, 0) - min(start, 0)\n # count number of requested samples after last sample\n repeat_last += max(end, n_samples) - max(start, n_samples)\n # if all requested samples are out of bounds, skip\n if end < 0 or start >= n_samples:\n continue\n # keep track of non-empty clipped ranges\n clipped_ranges += [[max(start, 0), min(end, n_samples)]]\n\n if clipped_ranges:\n data = np.vstack(\n [self.data[start: end, :] for start, end in clipped_ranges])\n else:\n # if all ranges are out of bounds, just return empty data\n shape = (0,) + self.data.shape[1:]\n data = np.empty(shape)\n\n # corner case when 'fixed' duration cropping is requested:\n # correct number of samples even with out-of-bounds indices\n if fixed is not None:\n data = np.vstack([\n # repeat first sample as many times as needed\n np.tile(self.data[0], (repeat_first,) + (1,) * n_dimensions),\n data,\n # repeat last sample as many times as needed\n np.tile(self.data[n_samples - 1],\n (repeat_last,) + (1,) * n_dimensions)])\n\n # return data\n if return_data:\n return data\n\n # wrap data in a SlidingWindowFeature and return\n sliding_window = SlidingWindow(\n start=self.sliding_window[ranges[0][0]].start,\n duration=self.sliding_window.duration,\n step=self.sliding_window.step)\n return SlidingWindowFeature(data, sliding_window)\n\n def _repr_png_(self):\n from .notebook import repr_feature\n return repr_feature(self)\n\n _HANDLED_TYPES = (np.ndarray, numbers.Number)\n\n def __array__(self) -> np.ndarray:\n return self.data\n\n def __array_ufunc__(self, ufunc, method, *inputs, **kwargs):\n out = kwargs.get('out', ())\n for x in inputs + out:\n # Only support operations with instances of _HANDLED_TYPES.\n # Use SlidingWindowFeature instead of type(self) for isinstance to\n # allow subclasses that don't override __array_ufunc__ to\n # handle SlidingWindowFeature objects.\n if not isinstance(x, self._HANDLED_TYPES + (SlidingWindowFeature,)):\n return NotImplemented\n\n # Defer to the implementation of the ufunc on unwrapped values.\n inputs = tuple(x.data if isinstance(x, SlidingWindowFeature) else x\n for x in inputs)\n if out:\n kwargs['out'] = tuple(\n x.data if isinstance(x, SlidingWindowFeature) else x\n for x in out)\n data = getattr(ufunc, method)(*inputs, **kwargs)\n\n if type(data) is tuple:\n # multiple return values\n return tuple(type(self)(x, self.sliding_window) for x in data)\n elif method == 'at':\n # no return value\n return None\n else:\n # one return value\n return type(self)(data, self.sliding_window)\n\n def align(self, to: 'SlidingWindowFeature') -> 'SlidingWindowFeature':\n \"\"\"Align features by linear temporal interpolation\n\n Parameters\n ----------\n to : SlidingWindowFeature\n Features to align with.\n\n Returns\n -------\n aligned : SlidingWindowFeature\n Aligned features\n \"\"\"\n\n old_start = self.sliding_window.start\n old_step = self.sliding_window.step\n old_duration = self.sliding_window.duration\n old_samples = len(self)\n old_t = old_start + 0.5 * old_duration + np.arange(old_samples) * old_step\n\n new_start = to.sliding_window.start\n new_step = to.sliding_window.step\n new_duration = to.sliding_window.duration\n new_samples = len(to)\n new_t = new_start + 0.5 * new_duration + np.arange(new_samples) * new_step\n\n new_data = np.hstack([np.interp(new_t, old_t, old_data)[:, np.newaxis]\n for old_data in self.data.T])\n return SlidingWindowFeature(new_data, to.sliding_window)\n\n\nif __name__ == \"__main__\":\n import doctest\n\n doctest.testmod()\n"
]
| [
[
"numpy.arange",
"numpy.vstack",
"numpy.tile",
"numpy.interp",
"numpy.empty"
]
]
|
HERA-Team/pyuvsim | [
"d5ae72b06e4bad07433c3fc8073bebb43a3be399"
]
| [
"pyuvsim/tests/test_antenna.py"
]
| [
"# -*- mode: python; coding: utf-8 -*\n# Copyright (c) 2021 Radio Astronomy Software Group\n# Licensed under the 3-clause BSD License\nimport os\n\nimport numpy as np\nimport yaml\nfrom astropy import units\nimport pytest\nimport pyuvdata.tests as uvtest\n\nimport pyuvsim\nfrom pyuvsim.data import DATA_PATH as SIM_DATA_PATH\n\n\[email protected](\"ignore:Cannot check consistency of a string-mode BeamList\")\ndef test_jones_set_spline(cst_beam, hera_loc):\n # Run get_beam_jones with spline options.\n array_location = hera_loc\n beam0 = cst_beam.copy()\n beam0.freq_interp_kind = 'cubic'\n telescope_config_name = os.path.join(SIM_DATA_PATH, 'mwa128_config.yaml')\n with open(telescope_config_name, 'r') as yf:\n telconfig = yaml.safe_load(yf)\n telconfig['spline_interp_opts'] = {'kx' : 1, 'ky' : 1}\n\n beam_list = pyuvsim.simsetup._construct_beam_list(np.arange(1), telconfig)\n beam_list.set_obj_mode()\n beam_list.append(beam0)\n\n assert beam0 is beam_list[-1]\n\n # Make antenna that uses beam #1\n antenna = pyuvsim.Antenna('ant1', 1, np.array([0, 10, 0]), 1)\n array = pyuvsim.Telescope('telescope_name', array_location, beam_list)\n\n altaz = [[0.0134], [1.0]]\n\n alts = np.linspace(0, np.pi / 4, 50)\n azs = np.linspace(0, 2 * np.pi, 50)\n\n alts, azs = np.meshgrid(alts, azs)\n\n altaz = np.zeros((50**2, 3))\n altaz[:, 0] = alts.flatten()\n altaz[:, 1] = azs.flatten()\n\n antenna.get_beam_jones(array, altaz, 150e6, interpolation_function='az_za_simple')\n\n\ndef test_jones_set_interp(cst_beam, hera_loc):\n # check setting the interpolation method\n\n array_location = hera_loc\n\n beam = cst_beam.copy()\n beam.freq_interp_kind = None\n\n beam_list = pyuvsim.BeamList([beam])\n antenna1 = pyuvsim.Antenna('ant1', 1, np.array([0, 10, 0]), 0)\n array = pyuvsim.Telescope('telescope_name', array_location, beam_list)\n source_altaz = np.array([[0.0], [np.pi / 4.]])\n freq = 123e6 * units.Hz\n\n with pytest.raises(ValueError, match='freq_interp_kind must be set'):\n antenna1.get_beam_jones(array, source_altaz, freq)\n\n jones = antenna1.get_beam_jones(array, source_altaz, freq, freq_interp_kind='cubic')\n assert beam.freq_interp_kind == 'cubic'\n jones0 = antenna1.get_beam_jones(array, source_altaz, freq)\n jones1 = antenna1.get_beam_jones(array, source_altaz, freq, freq_interp_kind='linear')\n assert beam.freq_interp_kind == 'linear'\n jones2 = antenna1.get_beam_jones(array, source_altaz, freq)\n\n assert (np.all(jones2 == jones0)\n and np.all(jones1 == jones)\n and np.all(jones1 == jones0))\n\n\ndef test_set_interps(cst_beam, hera_loc):\n array_location = hera_loc\n\n beam = cst_beam.copy()\n beam.interpolation_function = None\n\n beam_list = pyuvsim.BeamList([beam])\n antenna1 = pyuvsim.Antenna('ant1', 1, np.array([0, 10, 0]), 0)\n array = pyuvsim.Telescope('telescope_name', array_location, beam_list)\n source_altaz = np.array([[0.0], [np.pi / 4.]])\n freq = 123e6 * units.Hz\n\n with uvtest.check_warnings(\n UserWarning, match=\"UVBeam interpolation_function is not set\"\n ):\n antenna1.get_beam_jones(array, source_altaz, freq)\n\n assert beam.interpolation_function == 'az_za_simple'\n"
]
| [
[
"numpy.meshgrid",
"numpy.linspace",
"numpy.arange",
"numpy.all",
"numpy.array",
"numpy.zeros"
]
]
|
frgfm/sdcnd-capstone | [
"75b0c83ea4c01be4d249caa1a0e4faff7419b2b7"
]
| [
"ros/src/styx/bridge.py"
]
| [
"\nimport rospy\n\nimport tf\nfrom geometry_msgs.msg import PoseStamped, Quaternion, TwistStamped\nfrom dbw_mkz_msgs.msg import SteeringReport, ThrottleCmd, BrakeCmd, SteeringCmd\nfrom std_msgs.msg import Float32 as Float\nfrom std_msgs.msg import Bool\nfrom sensor_msgs.msg import PointCloud2\nfrom sensor_msgs.msg import Image\nimport sensor_msgs.point_cloud2 as pcl2\nfrom std_msgs.msg import Header\nfrom cv_bridge import CvBridge, CvBridgeError\n\nfrom styx_msgs.msg import TrafficLight, TrafficLightArray, Lane\nimport numpy as np\nfrom PIL import Image as PIL_Image\nfrom io import BytesIO\nimport base64\n\nimport math\n\nTYPE = {\n 'bool': Bool,\n 'float': Float,\n 'pose': PoseStamped,\n 'pcl': PointCloud2,\n 'twist': TwistStamped,\n 'steer': SteeringReport,\n 'trafficlights': TrafficLightArray,\n 'steer_cmd': SteeringCmd,\n 'brake_cmd': BrakeCmd,\n 'throttle_cmd': ThrottleCmd,\n 'path_draw': Lane,\n 'image': Image\n}\n\nNUM_IMAGES_TO_SKIP = 4\n\n\nclass Bridge(object):\n def __init__(self, conf, server):\n rospy.init_node('styx_server')\n self.server = server\n self.vel = 0.\n self.yaw = None\n self.angular_vel = 0.\n self.bridge = CvBridge()\n self.img_count = 0\n\n self.callbacks = {\n '/vehicle/steering_cmd': self.callback_steering,\n '/vehicle/throttle_cmd': self.callback_throttle,\n '/vehicle/brake_cmd': self.callback_brake,\n '/final_waypoints': self.callback_path\n }\n\n self.subscribers = [rospy.Subscriber(e.topic, TYPE[e.type], self.callbacks[e.topic])\n for e in conf.subscribers]\n\n self.publishers = {e.name: rospy.Publisher(e.topic, TYPE[e.type], queue_size=1)\n for e in conf.publishers}\n\n def create_light(self, x, y, z, yaw, state):\n light = TrafficLight()\n\n light.header = Header()\n light.header.stamp = rospy.Time.now()\n light.header.frame_id = '/world'\n\n light.pose = self.create_pose(x, y, z, yaw)\n light.state = state\n\n return light\n\n def create_pose(self, x, y, z, yaw=0.):\n pose = PoseStamped()\n\n pose.header = Header()\n pose.header.stamp = rospy.Time.now()\n pose.header.frame_id = '/world'\n\n pose.pose.position.x = x\n pose.pose.position.y = y\n pose.pose.position.z = z\n\n q = tf.transformations.quaternion_from_euler(0., 0., math.pi * yaw / 180.)\n pose.pose.orientation = Quaternion(*q)\n\n return pose\n\n def create_float(self, val):\n fl = Float()\n fl.data = val\n return fl\n\n def create_twist(self, velocity, angular):\n tw = TwistStamped()\n tw.twist.linear.x = velocity\n tw.twist.angular.z = angular\n return tw\n\n def create_steer(self, val):\n st = SteeringReport()\n st.steering_wheel_angle_cmd = val * math.pi / 180.\n st.enabled = True\n st.speed = self.vel\n return st\n\n def calc_angular(self, yaw):\n angular_vel = 0.\n if self.yaw is not None:\n angular_vel = (yaw - self.yaw) / (rospy.get_time() - self.prev_time)\n self.yaw = yaw\n self.prev_time = rospy.get_time()\n return angular_vel\n\n def create_point_cloud_message(self, pts):\n header = Header()\n header.stamp = rospy.Time.now()\n header.frame_id = '/world'\n cloud_message = pcl2.create_cloud_xyz32(header, pts)\n return cloud_message\n\n def broadcast_transform(self, name, position, orientation):\n br = tf.TransformBroadcaster()\n br.sendTransform(position, orientation, rospy.Time.now(), name, \"world\")\n\n def publish_odometry(self, data):\n pose = self.create_pose(data['x'], data['y'], data['z'], data['yaw'])\n\n position = (data['x'], data['y'], data['z'])\n orientation = tf.transformations.quaternion_from_euler(0, 0, math.pi * data['yaw'] / 180.)\n self.broadcast_transform(\"base_link\", position, orientation)\n\n self.publishers['current_pose'].publish(pose)\n self.vel = data['velocity'] * 0.44704\n self.angular = self.calc_angular(data['yaw'] * math.pi / 180.)\n self.publishers['current_velocity'].publish(self.create_twist(self.vel, self.angular))\n\n def publish_controls(self, data):\n steering, throttle, brake = data['steering_angle'], data['throttle'], data['brake']\n self.publishers['steering_report'].publish(self.create_steer(steering))\n self.publishers['throttle_report'].publish(self.create_float(throttle))\n self.publishers['brake_report'].publish(self.create_float(brake))\n\n def publish_obstacles(self, data):\n for obs in data['obstacles']:\n pose = self.create_pose(obs[0], obs[1], obs[2])\n self.publishers['obstacle'].publish(pose)\n header = Header()\n header.stamp = rospy.Time.now()\n header.frame_id = '/world'\n cloud = pcl2.create_cloud_xyz32(header, data['obstacles'])\n self.publishers['obstacle_points'].publish(cloud)\n\n def publish_lidar(self, data):\n self.publishers['lidar'].publish(self.create_point_cloud_message(zip(data['lidar_x'],\n data['lidar_y'], data['lidar_z'])))\n\n def publish_traffic(self, data):\n x, y, z = data['light_pos_x'], data['light_pos_y'], data['light_pos_z'],\n yaw = [math.atan2(dy, dx) for dx, dy in zip(data['light_pos_dx'], data['light_pos_dy'])]\n status = data['light_state']\n\n lights = TrafficLightArray()\n header = Header()\n header.stamp = rospy.Time.now()\n header.frame_id = '/world'\n lights.lights = [self.create_light(*e) for e in zip(x, y, z, yaw, status)]\n self.publishers['trafficlights'].publish(lights)\n\n def publish_dbw_status(self, data):\n self.publishers['dbw_status'].publish(Bool(data))\n\n def publish_camera(self, data):\n self.img_count += 1\n if self.img_count >= NUM_IMAGES_TO_SKIP:\n imgString = data[\"image\"]\n image = PIL_Image.open(BytesIO(base64.b64decode(imgString)))\n image_array = np.asarray(image)\n image_message = self.bridge.cv2_to_imgmsg(image_array, encoding=\"rgb8\")\n self.publishers['image'].publish(image_message)\n self.img_count = 0\n\n def callback_steering(self, data):\n self.server('steer', data={'steering_angle': str(data.steering_wheel_angle_cmd)})\n\n def callback_throttle(self, data):\n self.server('throttle', data={'throttle': str(data.pedal_cmd)})\n\n def callback_brake(self, data):\n self.server('brake', data={'brake': str(data.pedal_cmd)})\n\n def callback_path(self, data):\n x_values = []\n y_values = []\n z_values = []\n for waypoint in data.waypoints:\n x = waypoint.pose.pose.position.x\n y = waypoint.pose.pose.position.y\n z = waypoint.pose.pose.position.z + 0.5\n x_values.append(x)\n y_values.append(y)\n z_values.append(z)\n\n self.server('drawline', data={'next_x': x_values, 'next_y': y_values, 'next_z': z_values})\n"
]
| [
[
"numpy.asarray"
]
]
|
xuzhenghao0502/CarND-Capstone | [
"57aa0e961e727f120af5aaa28660e2b865b014cc"
]
| [
"traffic_light_classifier_training/tl_detector_model.py"
]
| [
"import glob\nimport cv2\nimport numpy as np\n\n\nimport tensorflow as tf\nfrom keras.layers import Dense, Flatten, Lambda, Activation, MaxPooling2D, Conv2D, Dropout\nfrom keras.layers.convolutional import Convolution2D\nfrom keras.models import Sequential\nfrom keras.optimizers import Adam\nfrom keras.utils.np_utils import to_categorical\nfrom keras import losses, optimizers, regularizers\n\nfrom sklearn.utils import shuffle\nimport sklearn\nfrom sklearn.model_selection import train_test_split\n\nNO_LIGHT_DATA_PATH = '/share/simulator_0220_3/no_light/'\nRED_LIGHT_DATA_PATH = '/share/simulator_0220_3/has_light/red/'\nYELLOW_LIGHT_DATA_PATH = '/share/simulator_0220_3/has_light/yellow/'\nGREEN_LIGHT_DATA_PATH = '/share/simulator_0220_3/has_light/green/'\n\n\ndef import_data_set():\n training_data = []\n for name in glob.glob(RED_LIGHT_DATA_PATH + '*.jpg'):\n img = cv2.imread(name)\n img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)\n resized_img = cv2.resize(img, (60, 80))\n training_data.append((resized_img / 255., '0'))\n for name in glob.glob(YELLOW_LIGHT_DATA_PATH + '*.jpg'):\n img = cv2.imread(name)\n img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)\n resized_img = cv2.resize(img, (60, 80))\n training_data.append((resized_img / 255., '1'))\n# training_data.append((resized_img / 255., '1'))\n for name in glob.glob(GREEN_LIGHT_DATA_PATH + '*.jpg'):\n img = cv2.imread(name)\n img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)\n resized_img = cv2.resize(img, (60, 80))\n training_data.append((resized_img / 255., '2'))\n# training_data.append((resized_img / 255., '2'))\n# too many no_light images, use one-third of them\n idx = 0\n for name in glob.glob(NO_LIGHT_DATA_PATH + '*.jpg'):\n if idx % 3 == 0:\n img = cv2.imread(name)\n img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)\n resized_img = cv2.resize(img, (60, 80))\n training_data.append((resized_img / 255., '3'))\n idx += 1\n return training_data\n\n\ndef generate_training_validation_data(dataset):\n X_data = []\n y_data = []\n\n for data in dataset:\n X_data.append(data[0])\n y_data.append(data[1])\n # flip the last image and add it with the label to the list\n X_data.append(cv2.flip(data[0],1))\n y_data.append(data[1]) \n # Shuffle the list\n sklearn.utils.shuffle(X_data, y_data)\n print(\"Data configured\")\n print(\"--------------------------------------------\")\n print(\"Images =\", len(dataset))\n print(\"Augmented Images =\", len(X_data))\n print(\"--------------------------------------------\")\n return np.array(X_data), np.array(y_data)\n\n\ndata_set = import_data_set()\n\nprint(\"training data size: {}\".format(len(data_set)))\nprint(\"first image size: \")\nprint(data_set[0][0].shape)\ndata = generate_training_validation_data(data_set)\n\ncategorical_labels = to_categorical(data[1])\n\nnum_classes = 4\nmodel = Sequential()\nmodel.add(Conv2D(32, (3, 3), input_shape=(80, 60, 3), padding='same', activation='relu', kernel_initializer='random_uniform', kernel_regularizer=regularizers.l2(0.01)))\nmodel.add(Conv2D(64, (3, 3), padding='same', activation='relu', kernel_initializer='random_uniform', kernel_regularizer=regularizers.l2(0.01)))\nmodel.add(MaxPooling2D(2,2))\nDropout(0.8)\n\n# model.add(Conv2D(64, (3, 3), padding='same', activation='relu', kernel_initializer='random_uniform', kernel_regularizer=regularizers.l2(0.01)))\n# model.add(MaxPooling2D(2,2))\nmodel.add(Conv2D(32, (3, 3), padding='same', activation='relu', kernel_initializer='random_uniform', kernel_regularizer=regularizers.l2(0.01)))\nmodel.add(MaxPooling2D(2,2))\nDropout(0.8)\nmodel.add(Flatten())\n\n#model.add(Dense(128, activation='relu', kernel_initializer='random_uniform', kernel_regularizer=regularizers.l2(0.01)))\n# model.add(Dense(16, activation='relu', kernel_initializer='random_uniform', kernel_regularizer=regularizers.l2(0.01)))\nmodel.add(Dense(8, activation='relu', kernel_initializer='random_uniform', kernel_regularizer=regularizers.l2(0.01)))\nmodel.add(Dense(num_classes, activation='softmax'))\n\n\nloss = losses.categorical_crossentropy\noptimizer = optimizers.Adam()\n\nmodel.compile(loss=loss, optimizer=optimizer, metrics=['accuracy'])\n\nmodel.fit(data[0], categorical_labels, batch_size=32, epochs=25, verbose=True, validation_split=0.15, shuffle=True)\n\nscore = model.evaluate(data[0], categorical_labels, verbose=0)\nprint(score)\n\nmodel.save('tl_classifier_simulator.h5')\n"
]
| [
[
"sklearn.utils.shuffle",
"numpy.array"
]
]
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.