repo_name
stringlengths
6
130
hexsha
sequence
file_path
sequence
code
sequence
apis
sequence
possible_versions
list
AshleySetter/optosim
[ "248d18216bbe8c494d43b2ed43646546ca30dc6b" ]
[ "optosim/optosim/deprecated_code/sde_solver_deprecated/setup.py" ]
[ "from distutils.core import setup, Extension\nfrom Cython.Build import cythonize\nimport numpy \n\nextensions = [Extension(\n name=\"solve\",\n sources=[\"solve.pyx\"],\n include_dirs=[numpy.get_include()],\n )\n]\n\nsetup(\n ext_modules = cythonize(extensions),\n)\n" ]
[ [ "numpy.get_include" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
Qcircuits/exopy_hqc_legacy
[ "1566f08e88c828b5410c02347b3539252e197d40" ]
[ "exopy_hqc_legacy/tasks/tasks/util/load_tasks.py" ]
[ "# -*- coding: utf-8 -*-\n# -----------------------------------------------------------------------------\n# Copyright 2015-2018 by ExopyHqcLegacy Authors, see AUTHORS for more details.\n#\n# Distributed under the terms of the BSD license.\n#\n# The full license is in the file LICENCE, distributed with this software.\n# -----------------------------------------------------------------------------\n\"\"\"Tasks to used to load a file in memory.\n\n\"\"\"\nimport os\n\nimport numpy as np\nimport h5py\nfrom atom.api import (Bool, Unicode, List, set_default)\nfrom past.builtins import basestring\n\nfrom exopy.tasks.api import SimpleTask, InterfaceableTaskMixin, TaskInterface\n\n\ndef _make_array(names, dtypes='f8'):\n if isinstance(dtypes, basestring):\n dtypes = [dtypes for i in range(len(names))]\n\n dtype = {'names': names, 'formats': dtypes}\n return np.ones((5,), dtype=dtype)\n\n\nclass LoadArrayTask(InterfaceableTaskMixin, SimpleTask):\n \"\"\" Load an array from the disc into the database.\n\n \"\"\"\n #: Folder from which to load the data.\n folder = Unicode().tag(pref=True, fmt=True)\n\n #: Name of the file from which to load the data.\n filename = Unicode().tag(pref=True, fmt=True)\n\n #: Kind of file to load.\n selected_format = Unicode().tag(pref=True)\n\n database_entries = set_default({'array': _make_array(['var1', 'var2'])})\n\n def check(self, *args, **kwargs):\n \"\"\"Check that the provided path and filename make sense.\n\n \"\"\"\n test, traceback = super(LoadArrayTask, self).check(*args, **kwargs)\n err_path = self.get_error_path()\n\n if not test:\n return test, traceback\n\n full_folder_path = self.format_string(self.folder)\n filename = self.format_string(self.filename)\n full_path = os.path.join(full_folder_path, filename)\n\n if not os.path.isfile(full_path):\n msg = ('File does not exist, be sure that your measure will '\n 'create it before this task is executed.')\n traceback[err_path + '-file'] = msg\n\n return test, traceback\n\n\nclass CSVLoadInterface(TaskInterface):\n \"\"\"Interface used to load CSV files.\n\n \"\"\"\n #: Delimiter used in the file to load.\n delimiter = Unicode('\\t').tag(pref=True)\n\n #: Character used to signal a comment.\n comments = Unicode('#').tag(pref=True)\n\n #: Flag indicating whether or not to use the first row as column names.\n names = Bool(True).tag(pref=True)\n\n #: The users can provide the names which will be available in its file\n #: if the file cannot be found when checks are run.\n c_names = List(Unicode()).tag(pref=True)\n\n #: Class attr used in the UI.\n file_formats = ['CSV']\n\n def perform(self):\n \"\"\"Load a file stored in csv format.\n\n \"\"\"\n task = self.task\n folder = task.format_string(task.folder)\n filename = task.format_string(task.filename)\n full_path = os.path.join(folder, filename)\n\n comment_lines = 0\n with open(full_path) as f:\n while True:\n if f.readline().startswith(self.comments):\n comment_lines += 1\n else:\n break\n\n data = np.genfromtxt(full_path, comments=self.comments,\n delimiter=self.delimiter, names=self.names,\n skip_header=comment_lines)\n\n task.write_in_database('array', data)\n\n def check(self, *args, **kwargs):\n \"\"\"Try to find the names of the columns to add the array in the\n database.\n\n \"\"\"\n task = self.task\n if self.c_names:\n return True, {}\n\n try:\n full_folder_path = task.format_string(task.folder)\n filename = task.format_string(task.filename)\n except Exception:\n return True, {}\n\n full_path = os.path.join(full_folder_path, filename)\n\n if os.path.isfile(full_path):\n with open(full_path) as f:\n while True:\n line = f.readline()\n if not line.startswith(self.comments):\n names = line.split(self.delimiter)\n names = [n.strip() for n in names if n]\n self.task.write_in_database('array',\n _make_array(names))\n break\n\n return True, {}\n\n def _post_setattr_c_names(self, old, new):\n \"\"\"Keep the c_names in sync with the array in the database.\n\n \"\"\"\n if new:\n self.task.write_in_database('array', _make_array(new))\n\n\nclass H5PYLoadInterface(TaskInterface):\n \"\"\"Interface used to load .h5 files.\n\n The task supports the Single Writter Multiple\n Readers mode which allows read-only access while the file is still\n being written by another task without any race conditions.\n\n \"\"\"\n #: Class attr used in the UI.\n file_formats = ['H5PY']\n\n #: Whether or not the HDF5 file supports SWMR\n swmr = Bool(True).tag(pref=True)\n\n def perform(self):\n \"\"\"Load a file stored in h5py format.\n\n Can also handle a file currently opened by a SaveFileHDF5Task.\n For more reliability, both tasks should use the SWMR mode.\n \"\"\"\n task = self.task\n folder = task.format_string(task.folder)\n filename = task.format_string(task.filename)\n full_path = os.path.join(folder, filename)\n\n with h5py.File(full_path,'r', swmr=self.swmr) as f:\n data_dict = {}\n # If the file is still opened by a saveFileHDF5Task,\n # we need to truncate the data\n if 'count_calls' in f.attrs:\n count_calls = f.attrs['count_calls']\n for key in f:\n data_dict[key] = np.array(f[key][:count_calls])\n\n task.write_in_database('array', data_dict)\n\n def check(self, *args, **kwargs):\n \"\"\"Try to find the names of the keys\n\n \"\"\"\n try:\n full_folder_path = task.format_string(task.folder)\n filename = task.format_string(task.filename)\n except Exception:\n return True, {}\n\n full_path = os.path.join(full_folder_path, filename)\n\n if os.path.isfile(full_path):\n with h5py.File(full_path,'r', swmr=self.swmr) as f:\n self.task.write_in_database('array',\n {k: np.ones(5) for k in f})\n\n return True, {}\n" ]
[ [ "numpy.array", "numpy.genfromtxt", "numpy.ones" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
xiaoyu2018/QuantumCnn--NetworkAttackDetection
[ "fe96b3ab9641e0c2a4df421f3fa470be1c4061ed" ]
[ "prehandle.py" ]
[ "#coding:utf-8\r\n\r\n#KDD99数据集预处理\r\n#共使用39个特征,去除了原数据集中20、21号特征\r\n\r\nimport numpy as np\r\nimport pandas as pd\r\nimport csv\r\nfrom datetime import datetime\r\nfrom sklearn import preprocessing # 数据标准化处理\r\n\r\n\r\n\r\n#定义KDD99字符型特征转数值型特征函数\r\ndef char2num(sourceFile, handledFile):\r\n print('START: 字符型特征转数值型特征函数中')\r\n data_file=open(handledFile,'w',newline='') #python3.x中添加newline=''这一参数使写入的文件没有多余的空行\r\n global dataCnt\r\n with open(sourceFile, 'r') as data_source:\r\n csv_reader=csv.reader(data_source)\r\n csv_writer=csv.writer(data_file)\r\n dataCnt=0 #记录数据的行数,初始化为0\r\n for row in csv_reader:\r\n temp_line=np.array(row) #将每行数据存入temp_line数组里\r\n temp_line[1]=handleProtocol(row) #将源文件行中3种协议类型转换成数字标识\r\n temp_line[2]=handleService(row) #将源文件行中70种网络服务类型转换成数字标识\r\n temp_line[3]=handleFlag(row) #将源文件行中11种网络连接状态转换成数字标识\r\n temp_line[41]=handleLabel(row) #将源文件行中23种攻击类型转换成数字标识\r\n csv_writer.writerow(temp_line)\r\n dataCnt+=1\r\n #输出每行数据中所修改后的状态\r\n data_file.close()\r\n print('FINISH: 字符型特征转数值型特征函数完成\\n')\r\n\r\n#将相应的非数字类型转换为数字标识即符号型数据转化为数值型数据\r\ndef find_index(x,y):\r\n return [i for i in range(len(y)) if y[i]==x]\r\n\r\n#定义将源文件行中3种协议类型转换成数字标识的函数\r\ndef handleProtocol(input):\r\n protocol_list=['tcp','udp','icmp']\r\n if input[1] in protocol_list:\r\n return find_index(input[1],protocol_list)[0]\r\n\r\n#定义将源文件行中70种网络服务类型转换成数字标识的函数\r\ndef handleService(input):\r\n service_list=['aol','auth','bgp','courier','csnet_ns','ctf','daytime','discard','domain','domain_u',\r\n 'echo','eco_i','ecr_i','efs','exec','finger','ftp','ftp_data','gopher','harvest','hostnames',\r\n 'http','http_2784','http_443','http_8001','imap4','IRC','iso_tsap','klogin','kshell','ldap',\r\n 'link','login','mtp','name','netbios_dgm','netbios_ns','netbios_ssn','netstat','nnsp','nntp',\r\n 'ntp_u','other','pm_dump','pop_2','pop_3','printer','private','red_i','remote_job','rje','shell',\r\n 'smtp','sql_net','ssh','sunrpc','supdup','systat','telnet','tftp_u','tim_i','time','urh_i','urp_i',\r\n 'uucp','uucp_path','vmnet','whois','X11','Z39_50']\r\n if input[2] in service_list:\r\n return find_index(input[2],service_list)[0]\r\n\r\n#定义将源文件行中11种网络连接状态转换成数字标识的函数\r\ndef handleFlag(input):\r\n flag_list=['OTH','REJ','RSTO','RSTOS0','RSTR','S0','S1','S2','S3','SF','SH']\r\n if input[3] in flag_list:\r\n return find_index(input[3],flag_list)[0]\r\n\r\n#定义将源文件行中攻击类型转换成数字标识的函数(共出现了22个攻击类型+1个未受到攻击)\r\ndef handleLabel(input):\r\n global label_list\r\n label_list = ['normal.', # normal\r\n 'back.', 'land.', 'neptune.', 'pod.', 'smurf.', 'teardrop.', # DOS\r\n 'ipsweep.', 'nmap.', 'portsweep.', 'satan.', # PROBE\r\n 'ftp_write.', 'guess_passwd.', 'imap.', 'multihop.', 'phf.', 'spy.', 'warezclient.', 'warezmaster.', # R2L\r\n 'buffer_overflow.', 'loadmodule.', 'perl.', 'rootkit.'] # U2R\r\n\r\n if input[41] in label_list:\r\n return find_index(input[41], label_list)[0]\r\n else:\r\n label_list.append(input[41])\r\n return find_index(input[41], label_list)[0]\r\n\r\ndef standardize(inputFile):\r\n import warnings\r\n # 忽略UserWarning: Numerical issues were encountered when centering the data and might not be solved. Dataset may contain too large values. You may need to prescale your features.\r\n # warnings.warn(\"Numerical issues were encountered \"\r\n warnings.filterwarnings(\"ignore\", message=\"Numerical issues were encountered \")\r\n print('START: 数据标准化中')\r\n dataMatrix = np.loadtxt(open(inputFile,\"rb\"),delimiter=\",\",skiprows=0) # 读入数据\r\n labelColumn = dataMatrix[:,-1]\r\n result = preprocessing.scale(dataMatrix[:,:-1]) # 标签列不参与训练\r\n print('FINISH: 数据标准化完成\\n')\r\n return result, labelColumn\r\n\r\ndef normalize(inMatrix):\r\n print('START: 数据归一化中')\r\n np.seterr(divide='ignore',invalid='ignore') # 忽略0/0的报错\r\n minVals = inMatrix.min(0)\r\n maxVals = inMatrix.max(0)\r\n ranges = maxVals - minVals\r\n # normData = np.zeros(np.shape(inMatrix))\r\n m = inMatrix.shape[0]\r\n normData = inMatrix - np.tile(minVals, (m, 1))\r\n normData = normData/np.tile(ranges, (m, 1))\r\n # 去掉数据中的空列\r\n print('FINISH: 数据归一化完成\\n')\r\n return normData, ranges, minVals\r\n\r\n\r\n\r\n\r\ndef exportData(npData, outputFile):\r\n \r\n\r\n \r\n pd_data = pd.DataFrame(npData, columns=['duration', 'protocol_type', 'service', 'flag', 'src_bytes', 'dst_bytes', 'land', 'wrong_fragment',\r\n 'urgent', 'hot', 'num_failed_logins', 'logged_in', 'num_compromised', 'root_shell', 'su_attempted',\r\n 'num_root', 'num_file_creations', 'num_shells', 'num_access_files', 'num_outbound_cmds',\r\n 'is_host_login', 'is_guest_login', 'count', 'srv_count', 'serror_rate', 'srv_serror_rate',\r\n 'rerror_rate', 'srv_rerror_rate', 'same_srv_rate', 'diff_srv_rate', 'srv_diff_host_rate',\r\n 'dst_host_count', 'dst_host_srv_count', 'dst_host_same_srv_rate', 'dst_host_diff_srv_rate',\r\n 'dst_host_same_src_port_rate', 'dst_host_srv_diff_host_rate', 'dst_host_serror_rate',\r\n 'dst_host_srv_serror_rate', 'dst_host_rerror_rate', 'dst_host_srv_rerror_rate'])\r\n pd_data.drop('num_outbound_cmds', axis=1, inplace=True) # 删除存在空值的列\r\n pd_data.drop('is_host_login', axis=1, inplace=True) # 删除存在空值的列\r\n pd_data.to_csv(outputFile, header=None, index=None)\r\n \r\n\r\n\r\n\r\ndef run(source,temp):\r\n char2num(source, temp) # 字符型特征转数字型特征\r\n stdData, labelColumn = standardize(temp)\r\n normData, _, _ = normalize(stdData)\r\n\r\n #数据集乱序\r\n np.random.seed(116)\r\n np.random.shuffle(normData)\r\n np.random.seed(116)\r\n np.random.shuffle(labelColumn)\r\n \r\n #按6:2:2分出训练集,验证集和测试集\r\n n_data=len(labelColumn)\r\n split_ind1 = int(n_data * 0.6)\r\n split_ind2 = int(n_data * 0.8)\r\n\r\n train_data=normData[:split_ind1,:]\r\n train_label = labelColumn[:split_ind1]\r\n val_data=normData[split_ind1:split_ind2,:]\r\n val_label = labelColumn[split_ind1:split_ind2]\r\n test_data=normData[split_ind2:,:]\r\n test_label = labelColumn[split_ind2:]\r\n \r\n \r\n\r\n label = pd.DataFrame(train_label,columns=[\"attack_type\"])\r\n label.to_csv(\".//dataset//\"+\"train_label.csv\", header=None, index=None)\r\n label = pd.DataFrame(val_label, columns=[\"attack_type\"])\r\n label.to_csv(\".//dataset//\"+\"val_label.csv\", header=None, index=None)\r\n label = pd.DataFrame(test_label, columns=[\"attack_type\"])\r\n label.to_csv(\".//dataset//\"+\"test_label.csv\", header=None, index=None)\r\n \r\n print('START: 数据导出中')\r\n exportData(train_data, \".//dataset//\"+\"train_data.csv\")\r\n exportData(val_data, \".//dataset//\"+\"val_data.csv\")\r\n exportData(test_data, \".//dataset//\"+\"test_data.csv\")\r\n \r\n print(f'FINISH: 数据导出成功\\n共导出 {dataCnt} 条数据')\r\n\r\n\r\nif __name__=='__main__': \r\n start_time=datetime.now()\r\n\r\n sourceFile= './/dataset//kddcup.data_10_percent_corrected'\r\n deCharFile = './/dataset//decharedData.csv'\r\n \r\n run(sourceFile,deCharFile)\r\n \r\n end_time=datetime.now()\r\n print(\"运行时间 \",(end_time-start_time),'s') #输出程序运行时间\r\n" ]
[ [ "numpy.random.seed", "numpy.tile", "numpy.random.shuffle", "pandas.DataFrame", "numpy.seterr", "numpy.array", "sklearn.preprocessing.scale" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [ "0.23", "0.21", "2.0", "1.4", "0.19", "1.1", "1.5", "1.2", "0.24", "0.20", "1.0", "0.25", "1.3" ], "scipy": [], "tensorflow": [] } ]
TuTou1024/computer-vision
[ "bdb71c3037c02e594900ae39367b7ecd69f9fc30" ]
[ "exp3/blend.py" ]
[ "import math\nimport sys\n\nimport cv2\nimport numpy as np\n\n\nclass ImageInfo:\n def __init__(self, name, img, position):\n self.name = name\n self.img = img\n self.position = position\n\n\ndef imageBoundingBox(img, M):\n \"\"\"\n This is a useful helper function that you might choose to implement\n that takes an image, and a transform, and computes the bounding box\n of the transformed image.\n\n INPUT:\n img: image to get the bounding box of\n M: the transformation to apply to the img\n OUTPUT:\n minX: int for the minimum X value of a corner\n minY: int for the minimum Y value of a corner\n maxX: int for the maximum X value of a corner\n maxY: int for the maximum Y value of a corner\n \"\"\"\n #TODO 8\n #TODO-BLOCK-BEGIN\n row, col = img.shape[:2]\n\n corner = np.array([[0, 0, col - 1, col - 1],\n [0, row - 1, 0, row - 1],\n [1, 1, 1, 1]])\n res = np.dot(M, corner)\n res = res / res[-1]\n minX, minY, _ = np.min(res, axis=1)\n maxX, maxY, _ = np.max(res, axis=1)\n #raise Exception(\"TODO in blend.py not implemented\")\n #TODO-BLOCK-END\n return int(minX), int(minY), int(maxX), int(maxY)\n\n\ndef accumulateBlend(img, acc, M, blendWidth):\n \"\"\"\n INPUT:\n img: image to add to the accumulator\n acc: portion of the accumulated image where img should be added\n M: the transformation mapping the input image to the accumulator\n blendWidth: width of blending function. horizontal hat function\n OUTPUT:\n modify acc with weighted copy of img added where the first\n three channels of acc record the weighted sum of the pixel colors\n and the fourth channel of acc records a sum of the weights\n \"\"\"\n # BEGIN TODO 10\n # Fill in this routine\n #TODO-BLOCK-BEGIN\n acc_rows, acc_cols, _ = acc.shape\n rows, cols = img.shape[:2]\n img = cv2.copyMakeBorder(img, 0, acc_rows - rows, 0, acc_cols - cols, cv2.BORDER_CONSTANT, value=0)\n\n row, col, _ = img.shape\n x_range = np.arange(col)\n y_range = np.arange(row)\n (x_grid, y_grid) = np.meshgrid(x_range, y_range)\n ones = np.ones((row, col))\n coordinates = np.dstack((x_grid, y_grid, ones))\n coordinates = coordinates.reshape((col * row, 3))\n coordinates = coordinates.T\n\n location = np.linalg.inv(M).dot(coordinates)\n location = location / location[2]\n\n map_x = location[0].reshape((row, col)).astype(np.float32)\n map_y = location[1].reshape((row, col)).astype(np.float32)\n img_warped = cv2.remap(img, map_x, map_y, cv2.INTER_LINEAR)\n\n (minX, minY, maxX, maxY) = imageBoundingBox(img, M)\n\n dst_one = np.ones((img_warped.shape[0], img_warped.shape[1], 1))\n dst_img = np.dstack((img_warped, dst_one))\n k = 1 / blendWidth\n feather_right = np.clip(np.linspace(-k * minX, k * (acc_cols - 1 - minX), acc_cols), 0, 1).reshape((1, acc_cols, 1))\n feather_left = np.ones((1, acc_cols, 1)) - feather_right\n\n img_feathered = feather_right * dst_img\n acc *= feather_left\n\n grayimg = cv2.cvtColor(img_warped, cv2.COLOR_BGR2GRAY)\n grayacc = cv2.cvtColor(acc[:, :, :3].astype(np.uint8), cv2.COLOR_BGR2GRAY)\n maskimg = (grayimg != 0).reshape((acc_rows, acc_cols, 1))\n maskacc = (grayacc != 0).reshape((acc_rows, acc_cols, 1))\n\n img_masked = maskimg * img_feathered\n acc *= maskacc\n acc += img_masked\n #raise Exception(\"TODO in blend.py not implemented\")\n #TODO-BLOCK-END\n # END TODO\n\n\ndef normalizeBlend(acc):\n \"\"\"\n INPUT:\n acc: input image whose alpha channel (4th channel) contains\n normalizing weight values\n OUTPUT:\n img: image with r,g,b values of acc normalized\n \"\"\"\n # BEGIN TODO 11\n # fill in this routine..\n #TODO-BLOCK-BEGIN\n acc[:, :, 3][acc[:, :, 3] == 0] = 1\n img = acc / acc[:, :, 3].reshape((acc.shape[0], acc.shape[1], 1))\n img = img[:, :, :3].astype(np.uint8)\n #raise Exception(\"TODO in blend.py not implemented\")\n #TODO-BLOCK-END\n # END TODO\n return img\n\n\ndef getAccSize(ipv):\n \"\"\"\n This function takes a list of ImageInfo objects consisting of images and\n corresponding transforms and Returns useful information about the accumulated\n image.\n\n INPUT:\n ipv: list of ImageInfo objects consisting of image (ImageInfo.img) and transform(image (ImageInfo.position))\n OUTPUT:\n accWidth: Width of accumulator image(minimum width such that all tranformed images lie within acc)\n accHeight: Height of accumulator image(minimum height such that all tranformed images lie within acc)\n\n channels: Number of channels in the accumulator image\n width: Width of each image(assumption: all input images have same width)\n translation: transformation matrix so that top-left corner of accumulator image is origin\n \"\"\"\n\n # Compute bounding box for the mosaic\n minX = sys.maxsize\n minY = sys.maxsize\n maxX = 0\n maxY = 0\n channels = -1\n width = -1 # Assumes all images are the same width\n M = np.identity(3)\n for i in ipv:\n M = i.position\n img = i.img\n _, w, c = img.shape\n if channels == -1:\n channels = c\n width = w\n\n # BEGIN TODO 9\n # add some code here to update minX, ..., maxY\n #TODO-BLOCK-BEGIN\n m = imageBoundingBox(img, M)\n minX = min(minX, m[0])\n minY = min(minY, m[1])\n maxX = max(maxX, m[2])\n maxY = max(maxY, m[3])\n #raise Exception(\"TODO in blend.py not implemented\")\n #TODO-BLOCK-END\n # END TODO\n\n # Create an accumulator image\n accWidth = int(math.ceil(maxX) - math.floor(minX))\n accHeight = int(math.ceil(maxY) - math.floor(minY))\n print('accWidth, accHeight:', (accWidth, accHeight))\n translation = np.array([[1, 0, -minX], [0, 1, -minY], [0, 0, 1]])\n\n return accWidth, accHeight, channels, width, translation\n\n\ndef pasteImages(ipv, translation, blendWidth, accWidth, accHeight, channels):\n acc = np.zeros((accHeight, accWidth, channels + 1))\n # Add in all the images\n M = np.identity(3)\n for count, i in enumerate(ipv):\n M = i.position\n img = i.img\n\n M_trans = translation.dot(M)\n accumulateBlend(img, acc, M_trans, blendWidth)\n\n return acc\n\n\ndef getDriftParams(ipv, translation, width):\n # Add in all the images\n M = np.identity(3)\n for count, i in enumerate(ipv):\n if count != 0 and count != (len(ipv) - 1):\n continue\n\n M = i.position\n\n M_trans = translation.dot(M)\n\n p = np.array([0.5 * width, 0, 1])\n p = M_trans.dot(p)\n\n # First image\n if count == 0:\n x_init, y_init = p[:2] / p[2]\n # Last image\n if count == (len(ipv) - 1):\n x_final, y_final = p[:2] / p[2]\n\n return x_init, y_init, x_final, y_final\n\n\ndef computeDrift(x_init, y_init, x_final, y_final, width):\n A = np.identity(3)\n drift = (float)(y_final - y_init)\n # We implicitly multiply by -1 if the order of the images is swapped...\n length = (float)(x_final - x_init)\n A[0, 2] = -0.5 * width\n # Negative because positive y points downwards\n A[1, 0] = -drift / length\n\n return A\n\n\ndef blendImages(ipv, blendWidth, is360=False, A_out=None):\n \"\"\"\n INPUT:\n ipv: list of input images and their relative positions in the mosaic\n blendWidth: width of the blending function\n OUTPUT:\n croppedImage: final mosaic created by blending all images and\n correcting for any vertical drift\n \"\"\"\n accWidth, accHeight, channels, width, translation = getAccSize(ipv)\n acc = pasteImages(\n ipv, translation, blendWidth, accWidth, accHeight, channels\n )\n compImage = normalizeBlend(acc)\n\n # Determine the final image width\n outputWidth = (accWidth - width) if is360 else accWidth\n x_init, y_init, x_final, y_final = getDriftParams(ipv, translation, width)\n # Compute the affine transform\n A = np.identity(3)\n # BEGIN TODO 12\n # fill in appropriate entries in A to trim the left edge and\n # to take out the vertical drift if this is a 360 panorama\n # (i.e. is360 is true)\n # Shift it left by the correct amount\n # Then handle the vertical drift\n # Note: warpPerspective does forward mapping which means A is an affine\n # transform that maps accumulator coordinates to final panorama coordinates\n #TODO-BLOCK-BEGIN\n if is360:\n A = computeDrift(x_init, y_init, x_final, y_final, width)\n #raise Exception(\"TODO in blend.py not implemented\")\n #TODO-BLOCK-END\n # END TODO\n\n if A_out is not None:\n A_out[:] = A\n\n # Warp and crop the composite\n croppedImage = cv2.warpPerspective(\n compImage, A, (outputWidth, accHeight), flags=cv2.INTER_LINEAR\n )\n\n return croppedImage\n\n" ]
[ [ "numpy.dot", "numpy.meshgrid", "numpy.min", "numpy.linalg.inv", "numpy.arange", "numpy.linspace", "numpy.dstack", "numpy.ones", "numpy.max", "numpy.identity", "numpy.array", "numpy.zeros" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
webclinic017/Backtrader-14
[ "b10d816ddfecef9cdafa5f936081a8035c5c49ba" ]
[ "samples/TURTLE_RULES/utils.py" ]
[ "import pandas as pd\n\ndef read_bars(csv_file: str)->pd.DataFrame:\n dtypes = {'exchange': 'category', 'market_type': 'category', 'pair': 'category', 'bar_type': 'category',\n 'bar_size':'category', 'open': 'float32', 'timestamp': 'int64', 'timestamp_end': 'int64',\n 'high': 'float32', 'low': 'float32', 'close': 'float32', 'mean': 'float32', 'median': 'float32',\n 'volume': 'float64', 'volume_sell': 'float64', 'volume_buy': 'float64', \n 'volume_quote': 'float64', 'volume_quote_sell': 'float64', 'volume_quote_buy': 'float64',\n 'count': 'int32', 'count_sell': 'int32', 'count_buy': 'int32'}\n bars_df = pd.read_csv(csv_file, engine='c', dtype=dtypes)\n bars_df['vwap'] = bars_df['volume_quote'] / bars_df['volume']\n bars_df['timestamp'] = pd.to_datetime(bars_df['timestamp'], unit='ms', utc=True)\n bars_df['timestamp_end'] = pd.to_datetime(bars_df['timestamp_end'], unit='ms', utc=True)\n if 'TimeBar' in csv_file:\n bars_df.set_index('timestamp', drop=False, inplace=True, verify_integrity=True)\n return bars_df" ]
[ [ "pandas.read_csv", "pandas.to_datetime" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [ "2.0", "1.4", "1.3", "1.1", "1.5", "1.2" ], "scipy": [], "tensorflow": [] } ]
carlocontaldi/hellofresh_case_study
[ "08c060f5a620f51beec7fadebe5853a4b020d5a0" ]
[ "code/evaluate.py" ]
[ "from sklearn.metrics import accuracy_score, classification_report\r\nfrom code import train, test, visualize\r\n\r\n# Iteration 8\r\ndef eval_4(X_train, y_train, X_test, y_test):\r\n\tprint('Evaluate')\r\n\tbest_params, y_pred = train.train_4(X_train, y_train)\r\n\taccuracy = accuracy_score(y_train, y_pred)\r\n\tprint('Validation Accuracy:', round(accuracy*100, 2), '%')\r\n\tprint(classification_report(y_train, y_pred))\r\n\tvisualize.confusion_matrix(y_train, y_pred, 'Validation Accuracy')\r\n\ty_pred = test.test_4(best_params, X_train, y_train, X_test)\r\n\taccuracy = accuracy_score(y_test, y_pred)\r\n\tprint('Test Accuracy:', round(accuracy*100, 2), '%')\r\n\tprint(classification_report(y_test, y_pred))\r\n\tvisualize.confusion_matrix(y_test, y_pred, 'Test Accuracy')\r\n\treturn y_pred\r\n\r\n# Iteration 7\r\ndef eval_3(X_train, y_train, X_test, y_test):\r\n\tprint('Evaluate')\r\n\tbest_params, y_pred = train.train_3(X_train, y_train)\r\n\taccuracy = accuracy_score(y_train, y_pred)\r\n\tprint('Validation Accuracy:', round(accuracy*100, 2), '%')\r\n\tvisualize.confusion_matrix(y_train, y_pred, 'Validation Accuracy')\r\n\ty_pred = test.test_3(best_params, X_train, y_train, X_test)\r\n\taccuracy = accuracy_score(y_test, y_pred)\r\n\tprint('Test Accuracy:', round(accuracy*100, 2), '%')\r\n\tvisualize.confusion_matrix(y_test, y_pred, 'Test Accuracy')\r\n\treturn y_pred\r\n\r\n# Iteration 6\r\ndef eval_2(X_train, y_train, X_test, y_test):\r\n\tprint('Evaluate')\r\n\ty_pred = train.train_2(X_train, y_train)\r\n\taccuracy = accuracy_score(y_train, y_pred)\r\n\tprint('Validation Accuracy:', round(accuracy*100, 2), '%')\r\n\tvisualize.confusion_matrix(y_train, y_pred, 'Validation Accuracy')\r\n\ty_pred = test.test_2(X_train, y_train, X_test)\r\n\taccuracy = accuracy_score(y_test, y_pred)\r\n\tprint('Test Accuracy:', round(accuracy*100, 2), '%')\r\n\tvisualize.confusion_matrix(y_test, y_pred, 'Test Accuracy')\r\n\treturn y_pred\r\n\r\n# Iteration 5\r\ndef eval_1(X_train, y_train, X_test, y_test):\r\n\tprint('Evaluate')\r\n\tbest_params, y_pred = train.train_1(X_train, y_train)\r\n\taccuracy = accuracy_score(y_train, y_pred)\r\n\tprint('Validation Accuracy:', round(accuracy*100, 2), '%')\r\n\tvisualize.confusion_matrix(y_train, y_pred, 'Validation Accuracy')\r\n\ty_pred = test.test_1(best_params, X_train, y_train, X_test)\r\n\taccuracy = accuracy_score(y_test, y_pred)\r\n\tprint('Test Accuracy:', round(accuracy*100, 2), '%')\r\n\tvisualize.confusion_matrix(y_test, y_pred, 'Test Accuracy')\r\n\treturn y_pred\r\n\r\n# Iteration 1-4\r\ndef eval_0(X_train, y_train, X_test, y_test):\r\n\tprint('Evaluate')\r\n\ty_pred = train.train_0(X_train, y_train)\r\n\taccuracy = accuracy_score(y_train, y_pred)\r\n\tprint('Validation Accuracy:', round(accuracy*100, 2), '%')\r\n\tvisualize.confusion_matrix(y_train, y_pred, 'Validation Accuracy')\r\n\ty_pred = test.test_0(X_train, y_train, X_test)\r\n\taccuracy = accuracy_score(y_test, y_pred)\r\n\tprint('Test Accuracy:', round(accuracy*100, 2), '%')\r\n\tvisualize.confusion_matrix(y_test, y_pred, 'Test Accuracy')\r\n\treturn y_pred\r\n" ]
[ [ "sklearn.metrics.classification_report", "sklearn.metrics.accuracy_score" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
kkahloots/Variational-Autoencoders
[ "b71a7448f47ec5b5ab752adcf0e2d41cca160726" ]
[ "utils/utils.py" ]
[ "import math\r\nimport random\r\n\r\nimport numpy as np\r\nimport dask.array as da\r\nfrom dask import delayed\r\nimport dask.dataframe as dd\r\nfrom time import strftime\r\n\r\n\r\n\r\nimport os\r\nimport shutil\r\nimport time\r\n\r\n\r\nimport tensorflow as tf\r\nfrom sklearn.preprocessing import MinMaxScaler\r\nfrom skimage.color import grey2rgb\r\n\r\nimport sys\r\n\r\nsys.path.append('..')\r\n\r\nfrom utils.dataset import Dataset\r\n\r\n\r\n''' ------------------------------------------------------------------------------\r\n DATA METHODS\r\n ------------------------------------------------------------------------------ '''\r\nfrom dask_ml.model_selection import train_test_split\r\n\r\nscalar = None\r\n\r\n\r\ndef prepare_dataset(X):\r\n len_ = X.shape[0]\r\n shape_ = X.shape\r\n\r\n d = int(da.sqrt(X.flatten().reshape(X.shape[0], -1).shape[1]))\r\n\r\n if len(shape_) == 4:\r\n d = int(da.sqrt(X.flatten().reshape(X.shape[0], -1).shape[1] / 3))\r\n X = da.reshape(X, [-1, d, d, 3])\r\n\r\n elif d == shape_[1] and len(shape_) == 3:\r\n X = da.reshape(X, [-1, d, d])\r\n X = da.array(list(map(lambda x: grey2rgb(x), X)), dtype=da.float32)\r\n\r\n else:\r\n r = d ** 2 - X.shape[1]\r\n train_padding = da.zeros((shape_[0], r))\r\n X = da.vstack([X, train_padding])\r\n\r\n X = da.reshape(X, [-1, d, d])\r\n X = da.array(list(map(lambda x: grey2rgb(x), X)), dtype=da.float32)\r\n\r\n print('Scaling dataset')\r\n if scalar is not None:\r\n X = scaler.transform(X.flatten().reshape(-1, 1).astype(da.float32)).reshape(X.shape)\r\n else:\r\n scaler = MinMaxScaler()\r\n X = scaler.fit_transform(X.flatten().reshape(-1, 1).astype(da.float32)).reshape(X.shape)\r\n\r\n return X\r\n\r\ndef process_data_nosplit(X, y=None, test_size=0.20, dummies=False):\r\n if y is None:\r\n y = da.ones(X.shape[0])\r\n y_uniqs = np.unique(y)\r\n\r\n len_ = X.shape[0]\r\n X = prepare_dataset(X)\r\n\r\n if dummies:\r\n y = dd.get_dummies(y)\r\n\r\n shape_ = list(X.shape[1:])\r\n\r\n samples = list()\r\n for _ in range(10):\r\n for y_uniq in y_uniqs:\r\n sample = list()\r\n for xa, ya in zip(chunks(X, 10),chunks(y, 10)):\r\n try:\r\n sample.append([xa[ya == y_uniq][random.randint(0, len(xa[ya == y_uniq]) - 1)]])\r\n if len(sample) >= 500:\r\n break\r\n except:\r\n pass\r\n samples += sample\r\n samples = da.vstack(samples)\r\n\r\n _X = X.reshape([X.shape[0]] + shape_)\r\n\r\n dataset = Dataset(_X, y)\r\n\r\n dataset.samples = samples\r\n print('Sample dataset shape: ', dataset.samples.shape)\r\n return dataset\r\n\r\n\r\ndef process_data(X, y=None, test_size=0.20, dummies=False):\r\n if y is None:\r\n y = da.ones(X.shape[0])\r\n y_uniqs = np.unique(y)\r\n\r\n len_ = X.shape[0]\r\n X = prepare_dataset(X)\r\n\r\n if dummies:\r\n y = dd.get_dummies(y)\r\n\r\n shape_ = list(X.shape[1:])\r\n\r\n samples = list()\r\n for _ in range(10):\r\n for y_uniq in y_uniqs:\r\n sample = list()\r\n for xa, ya in zip(chunks(X, 10),chunks(y, 10)):\r\n try:\r\n sample.append([xa[ya == y_uniq][random.randint(0, len(xa[ya == y_uniq]) - 1)]])\r\n if len(sample) >= 500:\r\n break\r\n except:\r\n pass\r\n samples += sample\r\n samples = da.vstack(samples)\r\n\r\n X_train, X_test, y_train, y_test = train_test_split(X.flatten().reshape(len_, -1), y, test_size=test_size,\r\n random_state=4891)\r\n\r\n X_train = X_train.reshape([X_train.shape[0]] + shape_)\r\n X_test = X_test.reshape([X_test.shape[0]] + shape_)\r\n\r\n print('Training dataset shape: ', X_train.shape)\r\n print('Validation dataset shape: ', X_test.shape)\r\n\r\n train_dataset = Dataset(X_train, y_train)\r\n test_dataset = Dataset(X_test, y_test)\r\n\r\n train_dataset.samples = samples\r\n print('Sample dataset shape: ', train_dataset.samples.shape)\r\n return train_dataset, test_dataset\r\n\r\n\r\ndef chunks(l, n):\r\n \"\"\"Yield successive n-sized chunks from l.\"\"\"\r\n for i in range(0, len(l), n):\r\n yield l[i:i + n]\r\n\r\n\r\ndef merge_datasets(data, data_dim, train_size, valid_size=0):\r\n valid_dataset = da.ndarray((valid_size, data_dim), dtype=da.float32)\r\n train_dataset = da.ndarray((train_size, data_dim), dtype=da.float32)\r\n\r\n da.random.shuffle(data)\r\n\r\n if valid_dataset is not None:\r\n valid_dataset = data[:valid_size, :]\r\n\r\n train_dataset = data[valid_size:, :]\r\n\r\n return valid_dataset, train_dataset\r\n\r\n\r\n''' ------------------------------------------------------------------------------\r\n FILES & DIRS\r\n ------------------------------------------------------------------------------ '''\r\n\r\n\r\nclass Config():\r\n def __init__(self, args):\r\n for k, v in args.items():\r\n setattr(self, k, v)\r\n\r\n def __getitem__(self, item):\r\n return self.__dict__[item]\r\n\r\n\r\n''' ------------------------------------------------------------------------------\r\n FILES & DIRS\r\n ------------------------------------------------------------------------------ '''\r\n\r\n\r\ndef save_img(fig, model_name, image_name, result_dir):\r\n complete_name = result_dir + '/' + model_name + '_' + image_name + '.png'\r\n idx = 1\r\n while (os.path.exists(complete_name)):\r\n complete_name = result_dir + '/' + model_name + '_' + image_name + '_' + str(idx) + '.png'\r\n idx += 1\r\n fig.savefig(complete_name)\r\n\r\n\r\ndef save_args(args, summary_dir):\r\n my_file = summary_dir + '/' + 'my_args.txt'\r\n args_string = str(args).replace(', ', ' --')\r\n with open(my_file, 'a+') as file_:\r\n file_.write(args_string)\r\n\r\n\r\ndef create_dirs(dirs):\r\n \"\"\"\r\n dirs - a list of directories to create if these directories are not found\r\n :param dirs:\r\n :return exit_code: 0:success -1:failed\r\n \"\"\"\r\n try:\r\n for dir_ in dirs:\r\n if not os.path.exists(dir_):\r\n os.makedirs(dir_)\r\n return 0\r\n except Exception as err:\r\n print(\"Creating directories error: {0}\".format(err))\r\n exit(-1)\r\n\r\n\r\n''' ------------------------------------------------------------------------------\r\n FOLDER/FILE METHODS\r\n ------------------------------------------------------------------------------ '''\r\n\r\n\r\ndef check_folder(log_dir):\r\n if not os.path.exists(log_dir):\r\n os.makedirs(log_dir)\r\n return log_dir\r\n\r\n\r\ndef clean_folder(folder):\r\n if os.path.exists(folder):\r\n shutil.rmtree(folder, ignore_errors=True)\r\n return\r\n\r\n\r\ndef open_log_file(filename, args):\r\n '''\r\n Open a file and writes the first line if it does not exists\r\n '''\r\n if (os.path.isfile(filename)):\r\n return\r\n\r\n with open(filename, 'w+') as logfile:\r\n my_string = ''\r\n for arg in args[:-1]:\r\n my_string += arg + ';'\r\n\r\n my_string += args[-1] + '\\n'\r\n logfile.write(my_string)\r\n return\r\n\r\n\r\ndef write_log_file(filename, args):\r\n '''\r\n Write a line to a file with elements separated by commas.\r\n '''\r\n if (not os.path.isfile(filename)):\r\n return\r\n\r\n with open(filename, 'a+') as logfile:\r\n my_string = ''\r\n for arg in args[:-1]:\r\n my_string += arg + ';'\r\n\r\n my_string += args[-1] + '\\n'\r\n logfile.write(my_string)\r\n return\r\n\r\n\r\nimport re\r\n\r\n\r\ndef atoi(text):\r\n return int(text) if text.isdigit() else text\r\n\r\n\r\ndef natural_keys(text):\r\n '''\r\n alist.sort(key=natural_keys) sorts in human order\r\n http://nedbatchelder.com/blog/200712/human_sorting.html\r\n (See Toothy's implementation in the comments)\r\n '''\r\n return [atoi(c) for c in re.split(r'(\\d+)', text)]\r\n\r\n\r\n''' ------------------------------------------------------------------------------\r\n PRINT METHODS\r\n ------------------------------------------------------------------------------ '''\r\n\r\n\r\ndef printt(string, log):\r\n if (log):\r\n print(string)\r\n\r\n\r\ndef get_time():\r\n return strftime(\"%Y-%m-%d %H:%M:%S\", time.gmtime()) + '\\n'\r\n\r\n\r\ndef get_params(args):\r\n retval = ''\r\n for key in args:\r\n retval += '\\t' + str(key) + ':' + str(args[key]) + '\\n'\r\n return retval\r\n\r\n\r\n''' ------------------------------------------------------------------------------\r\n TF METHODS\r\n ------------------------------------------------------------------------------ '''\r\n\r\n\r\ndef lrelu(x, leak=0.2, name=\"lrelu\"):\r\n with tf.variable_scope(name):\r\n f1 = 0.5 * (1 + leak)\r\n f2 = 0.5 * (1 - leak)\r\n return f1 * x + f2 * abs(x)\r\n\r\n\r\ndef get_variable(dim, name, init_value=0.54):\r\n out = tf.get_variable(name,\r\n initializer=tf.constant_initializer(init_value),\r\n shape=[1, dim],\r\n trainable=True,\r\n dtype=tf.float32)\r\n out = tf.nn.softplus(out)\r\n return out\r\n\r\n\r\ndef variable_summary(var, name='summaries'):\r\n with tf.name_scope(name):\r\n mean = tf.reduce_mean(var)\r\n tf.summary.scalar('mean', mean)\r\n\r\n stddev = tf.sqrt(tf.reduce_mean(tf.square(var - mean)))\r\n tf.summary.scalar('stddev', stddev)\r\n\r\n # tf.summary.scalar('max', tf.reduce_max(var))\r\n # tf.summary.scalar('min', tf.reduce_min(var))\r\n\r\n tf.summary.histogram('histogram', var)\r\n return\r\n\r\n\r\ndef softplus_bias(tensor):\r\n out = tf.add(tf.nn.softplus(tensor), 0.1)\r\n return out\r\n\r\n\r\ndef sigmoid(x):\r\n return 1 / (1 + math.exp(-x))\r\n\r\n\r\n# TensorFlow Graph visualizer code\r\nimport numpy as da\r\nfrom IPython.display import clear_output, Image, display, HTML\r\n\r\n\r\ndef strip_consts(graph_def, max_const_size=32):\r\n \"\"\"Strip large constant values from graph_def.\"\"\"\r\n strip_def = tf.GraphDef()\r\n for n0 in graph_def.node:\r\n n = strip_def.node.add()\r\n n.MergeFrom(n0)\r\n if n.op == 'Const':\r\n tensor = n.attr['value'].tensor\r\n size = len(tensor.tensor_content)\r\n if size > max_const_size:\r\n tensor.tensor_content = \"<stripped %d bytes>\" % size\r\n return strip_def\r\n\r\n\r\ndef show_graph(graph_def, max_const_size=32):\r\n \"\"\"Visualize TensorFlow graph.\"\"\"\r\n if hasattr(graph_def, 'as_graph_def'):\r\n graph_def = graph_def.as_graph_def()\r\n strip_def = strip_consts(graph_def, max_const_size=max_const_size)\r\n code = \"\"\"\r\n <script src=\"//cdnjs.cloudflare.com/ajax/libs/polymer/0.3.3/platform.js\"></script>\r\n <script>\r\n function load() {{\r\n document.getElementById(\"{id}\").pbtxt = {data};\r\n }}\r\n </script>\r\n <link rel=\"import\" href=\"https://tensorboard.appspot.com/tf-graph-basic.build.html\" onload=load()>\r\n <div style=\"height:600px\">\r\n <tf-graph-basic id=\"{id}\"></tf-graph-basic>\r\n </div>\r\n \"\"\".format(data=repr(str(strip_def)), id='graph' + str(da.random.rand()))\r\n\r\n iframe = \"\"\"\r\n <iframe seamless style=\"width:1200px;height:620px;border:0\" srcdoc=\"{}\"></iframe>\r\n \"\"\".format(code.replace('\"', '&quot;'))\r\n display(HTML(iframe))" ]
[ [ "numpy.unique", "numpy.reshape", "tensorflow.reduce_mean", "numpy.vstack", "numpy.ndarray", "numpy.random.shuffle", "numpy.ones", "tensorflow.constant_initializer", "tensorflow.name_scope", "numpy.zeros", "sklearn.preprocessing.MinMaxScaler", "tensorflow.variable_scope", "tensorflow.square", "tensorflow.GraphDef", "numpy.random.rand", "tensorflow.summary.scalar", "tensorflow.nn.softplus", "tensorflow.summary.histogram" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [ "1.10" ] } ]
janihuuh/cd8_tlgll_manu
[ "fad8935bf6f7b99bcdc7d3c0b899d20a2975a636" ]
[ "python/run_tcrgp.py" ]
[ "import os\nme=\"janihuuh\"\n# os.chdir(\"/Users/hru/Dropbox/MelanoMAP/src/jani/python/tcrgp/\")\nos.chdir(\"/Users/janihuuh/Dropbox/MelanoMAP/src/jani/python/tcrgp/\")\n\nimport tcrgp\nimport pickle\nimport ast\nimport csv\nimport numpy as np\nfrom matplotlib import pyplot as plt\nplt.style.use('fivethirtyeight')\n\nimport pandas as pd\nfrom glob import glob\n\nsubsmat = tcrgp.subsmatFromAA2('HENS920102')\npc_blo = tcrgp.get_pcs(subsmat,d=21)\ncdrs = tcrgp.create_cdr_dict(alignment='imgt',species=['human'])\n\n## Models\nwith open('models/common_viral/model_GILGFVFTL_cdr3b','rb') as f: GILGFVFTL_cdr3b = pickle.load(f)\nwith open('models/common_viral/model_GLCTLVAML_cdr3ab','rb') as f: GLCTLVAML_cdr3ab = pickle.load(f)\nwith open('models/common_viral/model_GLCTLVAML_cdr3b','rb') as f: GLCTLVAML_cdr3b = pickle.load(f)\nwith open('models/common_viral/model_IPSINVHHY_cdr3b','rb') as f: IPSINVHHY_cdr3b = pickle.load(f)\nwith open('models/common_viral/model_NLVPMVATV_cdr3ab','rb') as f: NLVPMVATV_cdr3ab = pickle.load(f)\nwith open('models/common_viral/model_NLVPMVATV_cdr3b','rb') as f: NLVPMVATV_cdr3b = pickle.load(f)\nwith open('models/common_viral/model_PKYVKQNTLKLAT_cdr3b','rb') as f: PKYVKQNTLKLAT_cdr3b = pickle.load(f)\nwith open('models/common_viral/model_RAKFKQLL_cdr3b','rb') as f: RAKFKQLL_cdr3b = pickle.load(f)\nwith open('models/common_viral/model_RPRGEVRFL_cdr3b','rb') as f: RPRGEVRFL_cdr3b = pickle.load(f)\nwith open('models/common_viral/model_TPRVTGGGAM_cdr3b','rb') as f: TPRVTGGGAM_cdr3b = pickle.load(f)\nwith open('models/common_viral/model_YVLDHLIVV_cdr3b','rb') as f: YVLDHLIVV_cdr3b = pickle.load(f)\n\n\nviral_models = [GILGFVFTL_cdr3b, GLCTLVAML_cdr3b, IPSINVHHY_cdr3b,\n NLVPMVATV_cdr3b, PKYVKQNTLKLAT_cdr3b, RAKFKQLL_cdr3b,\n RPRGEVRFL_cdr3b, TPRVTGGGAM_cdr3b, YVLDHLIVV_cdr3b]\nviral_model_names = [\"GILGFVFTL_cdr3b\", \"GLCTLVAML_cdr3b\", \"IPSINVHHY_cdr3b\",\n \"NLVPMVATV_cdr3b\", \"PKYVKQNTLKLAT_cdr3b\", \"RAKFKQLL_cdr3b\",\n \"RPRGEVRFL_cdr3b\", \"TPRVTGGGAM_cdr3b\", \"YVLDHLIVV_cdr3b\"]\nmodels = viral_models\nmodel_names = viral_model_names\n\n\n### lgll predictions\nmodels = viral_models + melanoma_models\nmodel_names = viral_model_names + melanoma_model_names\n\n# file=\"/Users/\"+me+\"/Dropbox/lgll_sc/results/manuscript/tcr/tcrgp_total_input_non_singleton.txt\"\nfile=\"/Users/\"+me+\"/Dropbox/lgll_sc/results/manuscript/tcrgp_full.txt\"\nfilename=\"lgll_full\"\noutput_dir=\"/Users/\"+me+\"/Dropbox/lgll_sc/results/manuscript/tcr/tcrgp2/\"\n\n## Make predictions with each of the models\nfor model, model_name in zip(models, model_names):\n preds = tcrgp.predict(file, model, cdr3b=\"cdr3aa\", vb=\"v\", delimiter='\\t')\n ## Write results\n with open(output_dir + filename + \"_\" + model_name + \".csv\", 'w', newline='') as csvfile:\n writer = csv.writer(csvfile, delimiter=',')\n writer.writerow(['prediction'])\n for p in preds:\n writer.writerow(['{:.4f}'.format(p[0])])\n" ]
[ [ "matplotlib.pyplot.style.use" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
rezaabdullah/data_science_portfolio
[ "29ccb3409441d44696eb24b9191e4d7b1810b6bf" ]
[ "disaster_response_pipeline/data/process_data.py" ]
[ "# Import libraries\nimport sys\nimport pandas as pd\nfrom sqlalchemy import create_engine\n\n# Load datasets\ndef load_data(messages_filepath, categories_filepath):\n \"\"\"\n Method for reading two CSV files and merge into one pandas dataframe\n\n Args:\n messages_filepath (str): The .csv file path of messages dataset\n categories_filepath (str): The .csv file path of categories dataset\n \n output:\n df (pandas dataframe): Return merged dataset of messages and categories\n \"\"\"\n \n # Read input dataset\n messages = pd.read_csv(messages_filepath)\n categories = pd.read_csv(categories_filepath)\n \n # Merge two dataframes into one dataframe\n df = pd.merge(messages, categories, on = \"id\", how = \"outer\")\n \n return df\n\n# Clean the dataset\ndef clean_data(df):\n \"\"\"\n Method for cleaning the dataframe obtained from 'load_data'\n \n Args:\n df (pandas dataframe): The merged dataframe from load_data\n \n output:\n df (pandas dataframe): Sanitized dataframe\n \"\"\"\n \n # Split categories into separate category columns\n # Ceate a dataframe of the 36 individual category columns\n categories = df.categories.str.split(';', expand = True)\n \n # Select the first row of the categories dataframe\n row = categories.loc[0]\n \n # Use this row to extract a list of new column names for categories\n category_colnames = row.str.split(\"-\").str[0].tolist()\n \n # Rename the columns of `categories`\n categories.columns = category_colnames\n \n # Convert category values to just numbers 0 or 1\n for column in categories:\n categories[column] = categories[column].astype(str).str[-1]\n categories[column] = pd.to_numeric(categories[column])\n \n # Some rows on related column has value of 2\n # Replace 2 with 1\n categories[\"related\"] = categories[\"related\"].replace(2, 1)\n \n # Replace categories column in df with new category columns\n df.drop(\"categories\", axis = 1, inplace = True)\n \n # Concatenate the original dataframe with the new `categories` dataframe\n df = pd.concat([df, categories], axis = 1)\n \n # Deduplication\n df.drop_duplicates(inplace = True)\n \n return df\n\n# Save the clean dataset into an sqlite database\ndef save_data(df, database_filename):\n \"\"\"\n Method for saving the clean dataframe into an sqlite database\n\n Args:\n df (pandas dataframe): Sanitized dataframe obtained from 'clean_data'\n database_filename (str): DB filename\n\n Output:\n None\n \"\"\"\n\n engine = create_engine(\"sqlite:///{}\".format(database_filename))\n df.to_sql(\"messages\", engine, index = False)\n\ndef main():\n if len(sys.argv) == 4:\n\n messages_filepath, categories_filepath, database_filepath = sys.argv[1:]\n\n print('Loading data...\\n MESSAGES: {}\\n CATEGORIES: {}'\n .format(messages_filepath, categories_filepath))\n df = load_data(messages_filepath, categories_filepath)\n\n print('Cleaning data...')\n df = clean_data(df)\n \n print('Saving data...\\n DATABASE: {}'.format(database_filepath))\n save_data(df, database_filepath)\n \n print('Cleaned data saved to database!')\n \n else:\n print('Please provide the filepaths of the messages and categories '\\\n 'datasets as the first and second argument respectively, as '\\\n 'well as the filepath of the database to save the cleaned data '\\\n 'to as the third argument. \\n\\nExample: python process_data.py '\\\n 'disaster_messages.csv disaster_categories.csv '\\\n 'DisasterResponse.db')\n\nif __name__ == '__main__':\n main()" ]
[ [ "pandas.merge", "pandas.read_csv", "pandas.to_numeric", "pandas.concat" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [ "2.0", "1.4", "1.3", "1.1", "1.5", "1.2" ], "scipy": [], "tensorflow": [] } ]
yunqiic/DeepSolarV1
[ "2126aa1d686edc8728502ab53c7392186a65720b" ]
[ "inception/slim/losses.py" ]
[ "# Copyright 2016 Google Inc. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Contains convenience wrappers for various Neural Network TensorFlow losses.\n\n All the losses defined here add themselves to the LOSSES_COLLECTION\n collection.\n\n l1_loss: Define a L1 Loss, useful for regularization, i.e. lasso.\n l2_loss: Define a L2 Loss, useful for regularization, i.e. weight decay.\n cross_entropy_loss: Define a cross entropy loss using\n softmax_cross_entropy_with_logits. Useful for classification.\n\"\"\"\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport tensorflow as tf\n\n# In order to gather all losses in a network, the user should use this\n# key for get_collection, i.e:\n# losses = tf.get_collection(slim.losses.LOSSES_COLLECTION)\nLOSSES_COLLECTION = '_losses'\n\n\ndef l1_regularizer(weight=1.0, scope=None):\n \"\"\"Define a L1 regularizer.\n\n Args:\n weight: scale the loss by this factor.\n scope: Optional scope for name_scope.\n\n Returns:\n a regularizer function.\n \"\"\"\n def regularizer(tensor):\n with tf.name_scope(scope, 'L1Regularizer', [tensor]):\n l1_weight = tf.convert_to_tensor(weight,\n dtype=tensor.dtype.base_dtype,\n name='weight')\n return tf.multiply(l1_weight, tf.reduce_sum(tf.abs(tensor)), name='value')\n return regularizer\n\n\ndef l2_regularizer(weight=1.0, scope=None):\n \"\"\"Define a L2 regularizer.\n\n Args:\n weight: scale the loss by this factor.\n scope: Optional scope for name_scope.\n\n Returns:\n a regularizer function.\n \"\"\"\n def regularizer(tensor):\n with tf.name_scope(scope, 'L2Regularizer', [tensor]):\n l2_weight = tf.convert_to_tensor(weight,\n dtype=tensor.dtype.base_dtype,\n name='weight')\n return tf.multiply(l2_weight, tf.nn.l2_loss(tensor), name='value')\n return regularizer\n\n\ndef l1_l2_regularizer(weight_l1=1.0, weight_l2=1.0, scope=None):\n \"\"\"Define a L1L2 regularizer.\n\n Args:\n weight_l1: scale the L1 loss by this factor.\n weight_l2: scale the L2 loss by this factor.\n scope: Optional scope for name_scope.\n\n Returns:\n a regularizer function.\n \"\"\"\n def regularizer(tensor):\n with tf.name_scope(scope, 'L1L2Regularizer', [tensor]):\n weight_l1_t = tf.convert_to_tensor(weight_l1,\n dtype=tensor.dtype.base_dtype,\n name='weight_l1')\n weight_l2_t = tf.convert_to_tensor(weight_l2,\n dtype=tensor.dtype.base_dtype,\n name='weight_l2')\n reg_l1 = tf.multiply(weight_l1_t, tf.reduce_sum(tf.abs(tensor)),\n name='value_l1')\n reg_l2 = tf.multiply(weight_l2_t, tf.nn.l2_loss(tensor),\n name='value_l2')\n return tf.add(reg_l1, reg_l2, name='value')\n return regularizer\n\n\ndef l1_loss(tensor, weight=1.0, scope=None):\n \"\"\"Define a L1Loss, useful for regularize, i.e. lasso.\n\n Args:\n tensor: tensor to regularize.\n weight: scale the loss by this factor.\n scope: Optional scope for name_scope.\n\n Returns:\n the L1 loss op.\n \"\"\"\n with tf.name_scope(scope, 'L1Loss', [tensor]):\n weight = tf.convert_to_tensor(weight,\n dtype=tensor.dtype.base_dtype,\n name='loss_weight')\n loss = tf.multiply(weight, tf.reduce_sum(tf.abs(tensor)), name='value')\n tf.add_to_collection(LOSSES_COLLECTION, loss)\n return loss\n\n\ndef l2_loss(tensor, weight=1.0, scope=None):\n \"\"\"Define a L2Loss, useful for regularize, i.e. weight decay.\n\n Args:\n tensor: tensor to regularize.\n weight: an optional weight to modulate the loss.\n scope: Optional scope for name_scope.\n\n Returns:\n the L2 loss op.\n \"\"\"\n with tf.name_scope(scope, 'L2Loss', [tensor]):\n weight = tf.convert_to_tensor(weight,\n dtype=tensor.dtype.base_dtype,\n name='loss_weight')\n loss = tf.multiply(weight, tf.nn.l2_loss(tensor), name='value')\n tf.add_to_collection(LOSSES_COLLECTION, loss)\n return loss\n\n\ndef cross_entropy_loss(logits, one_hot_labels, penalty_vector, label_smoothing=0,\n weight=1.0, scope=None):\n \"\"\"Define a Cross Entropy loss using softmax_cross_entropy_with_logits.\n\n It can scale the loss by weight factor, and smooth the labels.\n\n Args:\n logits: [batch_size, num_classes] logits outputs of the network .\n one_hot_labels: [batch_size, num_classes] target one_hot_encoded labels.\n label_smoothing: if greater than 0 then smooth the labels.\n weight: scale the loss by this factor.\n scope: Optional scope for name_scope.\n\n Returns:\n A tensor with the softmax_cross_entropy loss.\n \"\"\"\n logits.get_shape().assert_is_compatible_with(one_hot_labels.get_shape())\n with tf.name_scope(scope, 'CrossEntropyLoss', [logits, one_hot_labels]):\n num_classes = one_hot_labels.get_shape()[-1].value\n one_hot_labels = tf.cast(one_hot_labels, logits.dtype)\n if label_smoothing > 0:\n smooth_positives = 1.0 - label_smoothing\n smooth_negatives = label_smoothing / num_classes\n one_hot_labels = one_hot_labels * smooth_positives + smooth_negatives\n cross_entropy = tf.contrib.nn.deprecated_flipped_softmax_cross_entropy_with_logits(\n logits, one_hot_labels, name='xentropy')\n\n weight = tf.convert_to_tensor(weight,\n dtype=logits.dtype.base_dtype,\n name='loss_weight')\n # loss = tf.multiply(weight, tf.reduce_mean(cross_entropy), name='value')\n # Robban added support for penalty_vector\n cost_sensitive_cross_entropy = tf.multiply(penalty_vector, cross_entropy)\n loss = tf.multiply(weight, tf.reduce_mean(cost_sensitive_cross_entropy), name='value')\n\n tf.add_to_collection(LOSSES_COLLECTION, loss)\n return loss\n" ]
[ [ "tensorflow.convert_to_tensor", "tensorflow.multiply", "tensorflow.reduce_mean", "tensorflow.cast", "tensorflow.contrib.nn.deprecated_flipped_softmax_cross_entropy_with_logits", "tensorflow.nn.l2_loss", "tensorflow.add", "tensorflow.name_scope", "tensorflow.add_to_collection", "tensorflow.abs" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [ "1.10" ] } ]
Sambor123/DRL_Navigation
[ "3694c1e99d26e448777f12eb5afd7bf22bd2e15b" ]
[ "keyboard_agent.py" ]
[ "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\nfrom __future__ import print_function\n\nimport sys\nimport signal\nimport argparse\nimport numpy as np\n\nfrom scene_loader import THORDiscreteEnvironment\nfrom utils.tools import SimpleImageViewer\n\n#\n# Navigate the scene using your keyboard\n#\n\ndef key_press(key, mod):\n\n global human_agent_action, human_wants_restart, stop_requested\n if key == ord('R') or key == ord('r'): # r/R\n human_wants_restart = True\n if key == ord('Q') or key == ord('q'): # q/Q\n stop_requested = True\n if key == 0xFF52: # up\n human_agent_action = 0\n if key == 0xFF53: # right\n human_agent_action = 1\n if key == 0xFF51: # left\n human_agent_action = 2\n if key == 0xFF54: # down\n human_agent_action = 3\n\ndef rollout(env):\n\n global human_agent_action, human_wants_restart, stop_requested\n human_agent_action = None\n human_wants_restart = False\n while True:\n # waiting for keyboard input\n if human_agent_action is not None:\n # move actions\n env.step(human_agent_action)\n human_agent_action = None\n\n # waiting for reset command\n if human_wants_restart:\n # reset agent to random location\n env.reset()\n human_wants_restart = False\n\n # check collision\n if env.collided:\n print('Collision occurs.')\n env.collided = False\n\n # check quit command\n if stop_requested: break\n\n viewer.imshow(env.observation)\n\nif __name__ == '__main__':\n\n parser = argparse.ArgumentParser()\n parser.add_argument(\"-s\", \"--scene_dump\", type=str, default=\"./data/bedroom_04.h5\",\n help=\"path to a hdf5 scene dump file\")\n args = parser.parse_args()\n\n print(\"Loading scene dump {}\".format(args.scene_dump))\n env = THORDiscreteEnvironment({\n 'h5_file_path': args.scene_dump\n })\n\n # manually disable terminal states\n env.terminals = np.zeros_like(env.terminals)\n env.terminal_states, = np.where(env.terminals)\n env.reset()\n\n human_agent_action = None\n human_wants_restart = False\n stop_requested = False\n\n viewer = SimpleImageViewer()\n viewer.imshow(env.observation)\n viewer.window.on_key_press = key_press\n\n print(\"Use arrow keys to move the agent.\")\n print(\"Press R to reset agent\\'s location.\")\n print(\"Press Q to quit.\")\n\n rollout(env)\n\n print(\"Goodbye.\")\n" ]
[ [ "numpy.zeros_like", "numpy.where" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
caiolang/voiture-autonome
[ "92def5b8ccd24cd4deab759df9bed7a028e9339d" ]
[ "DOCS/6-References/documentation_equipe_2019/trajectoire_2019/testtrouvecible.py" ]
[ "import matplotlib.pyplot as plt\r\nimport numpy as np\r\n\r\nimport copy\r\nfrom zonesafe import *\r\nfrom adaptevitesserelat import *\r\nfrom trouvecible import *\r\n\r\norientation=0\r\norientationm1=0\r\nN=720\r\nlidar1=[]\r\nlidar2=[]\r\nrv=20\r\nm=5\r\ni=0\r\nr1=50\r\nr2=41\r\nepsilon=0.15\r\nalpha=15 #angle cone correction\r\nv=100\r\ndeltat=0.1\r\nrmax=1000\r\n\r\nwhile i<N:\r\n lidar1.append((i,r1))\r\n i+=1\r\ni=0\r\nwhile i<N:\r\n lidar2.append((i,r2))\r\n i+=1\r\n\r\nMMMR=zonesafe(lidar2,rv,m)\r\nMMMRC=adaptevitesserelat (lidar1,lidar2,MMMR,alpha,v,deltat,rmax,orientation,orientationm1)\r\ncible=trouvecible(MMMRC)\r\ni=0\r\nwhile i<len(MMMR):\r\n plt.plot(MMMR[i][2], MMMR[i][3],\"b:o\")\r\n plt.plot(MMMR[i][4], MMMR[i][5],\"r:o\")\r\n plt.plot(MMMRC[i][4], MMMRC[i][5],\"g:o\")\r\n i=i+1\r\nplt.plot(cible[0],cible[1],\"y:o\")\r\nplt.axis('equal')\r\nplt.show()\r\n\r\n#trouvecible testé avec succés" ]
[ [ "matplotlib.pyplot.plot", "matplotlib.pyplot.show", "matplotlib.pyplot.axis" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
gokceneraslan/scib
[ "91cfe2e4872230d8806c8f9ad5a0c251f268fdc4" ]
[ "tests/metrics/test_kbet.py" ]
[ "import numpy as np\n\nfrom tests.common import *\n\n\ndef test_kbet(adata_pca):\n score = scIB.me.kBET(\n adata_pca,\n batch_key='batch',\n label_key='celltype',\n embed='X_pca'\n )\n score = 1 - np.nanmean(score['kBET'])\n LOGGER.info(f\"score: {score}\")\n assert np.isnan(score)\n" ]
[ [ "numpy.isnan", "numpy.nanmean" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
fishfishson/MeshCNN
[ "8b4ddccf79638c0ca6c1ba17bec287981ddb65a1" ]
[ "models/attention.py" ]
[ "###########################################################################\n# Created by: CASIA IVA\n# Email: [email protected]\n# Copyright (c) 2018\n###########################################################################\n\nimport numpy as np\nimport torch\nimport math\nfrom torch.nn import Module, Sequential, Conv3d, ReLU, AdaptiveMaxPool3d, AdaptiveAvgPool3d, \\\n NLLLoss, BCELoss, CrossEntropyLoss, AvgPool3d, MaxPool3d, Parameter, Linear, Sigmoid, Softmax, Dropout, Embedding\nfrom torch.nn import functional as F\nfrom torch.autograd import Variable\n\ntorch_ver = torch.__version__[:3]\n\n__all__ = ['PAM_Module', 'CAM_Module']\n\n\nclass PAM_Module(Module):\n \"\"\" Position attention module\"\"\"\n\n # Ref from SAGAN\n def __init__(self, in_dim):\n super(PAM_Module, self).__init__()\n self.chanel_in = in_dim\n\n self.query_conv = Conv3d(in_channels=in_dim, out_channels=in_dim // 8, kernel_size=1)\n self.key_conv = Conv3d(in_channels=in_dim, out_channels=in_dim // 8, kernel_size=1)\n self.value_conv = Conv3d(in_channels=in_dim, out_channels=in_dim, kernel_size=1)\n self.gamma = Parameter(torch.zeros(1))\n\n self.softmax = Softmax(dim=-1)\n\n def forward(self, x):\n \"\"\"\n inputs :\n x : input feature maps( B X C X H X W)\n returns :\n out : attention value + input feature\n attention: B X (HxW) X (HxW)\n \"\"\"\n m_batchsize, C, depth, height, width = x.size()\n proj_query = self.query_conv(x).view(m_batchsize, -1, depth * width * height).permute(0, 2, 1)\n proj_key = self.key_conv(x).view(m_batchsize, -1, depth * width * height)\n energy = torch.bmm(proj_query, proj_key)\n attention = self.softmax(energy)\n proj_value = self.value_conv(x).view(m_batchsize, -1, depth * width * height)\n\n out = torch.bmm(proj_value, attention.permute(0, 2, 1))\n out = out.view(m_batchsize, C, depth, height, width)\n\n out = self.gamma * out + x\n return out\n\n\nclass CAM_Module(Module):\n \"\"\" Channel attention module\"\"\"\n\n def __init__(self, in_dim):\n super(CAM_Module, self).__init__()\n self.chanel_in = in_dim\n\n self.gamma = Parameter(torch.zeros(1))\n self.softmax = Softmax(dim=-1)\n\n def forward(self, x):\n \"\"\"\n inputs :\n x : input feature maps( B X C X H X W)\n returns :\n out : attention value + input feature\n attention: B X C X C\n \"\"\"\n m_batchsize, C, depth, height, width = x.size()\n proj_query = x.view(m_batchsize, C, -1)\n proj_key = x.view(m_batchsize, C, -1).permute(0, 2, 1)\n energy = torch.bmm(proj_query, proj_key)\n energy_new = torch.max(energy, -1, keepdim=True)[0].expand_as(energy) - energy\n attention = self.softmax(energy_new)\n proj_value = x.view(m_batchsize, C, -1)\n\n out = torch.bmm(attention, proj_value)\n out = out.view(m_batchsize, C, depth, height, width)\n\n out = self.gamma * out + x\n return out\n" ]
[ [ "torch.nn.Softmax", "torch.max", "torch.zeros", "torch.nn.Conv3d", "torch.bmm" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
nitthapr/transformers
[ "f8bf5713fa61a5009d0b66dfaa4e320b316ab1aa" ]
[ "src/transformers/modeling_bert.py" ]
[ "# coding=utf-8\n# Copyright 2018 The Google AI Language Team Authors and The HuggingFace Inc. team.\n# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"PyTorch BERT model. \"\"\"\n\n\nimport math\nimport os\nimport warnings\nfrom dataclasses import dataclass\nfrom typing import Optional, Tuple\n\nimport torch\nimport torch.utils.checkpoint\nfrom torch import nn\nfrom torch.nn import CrossEntropyLoss, MSELoss\n\nfrom .activations import gelu, gelu_new, swish\nfrom .configuration_bert import BertConfig\nfrom .file_utils import (\n ModelOutput,\n add_code_sample_docstrings,\n add_start_docstrings,\n add_start_docstrings_to_callable,\n replace_return_docstrings,\n)\nfrom .modeling_outputs import (\n BaseModelOutput,\n BaseModelOutputWithPooling,\n CausalLMOutput,\n MaskedLMOutput,\n MultipleChoiceModelOutput,\n NextSentencePredictorOutput,\n QuestionAnsweringModelOutput,\n SequenceClassifierOutput,\n TokenClassifierOutput,\n)\nfrom .modeling_utils import (\n PreTrainedModel,\n apply_chunking_to_forward,\n find_pruneable_heads_and_indices,\n prune_linear_layer,\n)\nfrom .utils import logging\n\n\nlogger = logging.get_logger(__name__)\n\n_CONFIG_FOR_DOC = \"BertConfig\"\n_TOKENIZER_FOR_DOC = \"BertTokenizer\"\n\nBERT_PRETRAINED_MODEL_ARCHIVE_LIST = [\n \"bert-base-uncased\",\n \"bert-large-uncased\",\n \"bert-base-cased\",\n \"bert-large-cased\",\n \"bert-base-multilingual-uncased\",\n \"bert-base-multilingual-cased\",\n \"bert-base-chinese\",\n \"bert-base-german-cased\",\n \"bert-large-uncased-whole-word-masking\",\n \"bert-large-cased-whole-word-masking\",\n \"bert-large-uncased-whole-word-masking-finetuned-squad\",\n \"bert-large-cased-whole-word-masking-finetuned-squad\",\n \"bert-base-cased-finetuned-mrpc\",\n \"bert-base-german-dbmdz-cased\",\n \"bert-base-german-dbmdz-uncased\",\n \"cl-tohoku/bert-base-japanese\",\n \"cl-tohoku/bert-base-japanese-whole-word-masking\",\n \"cl-tohoku/bert-base-japanese-char\",\n \"cl-tohoku/bert-base-japanese-char-whole-word-masking\",\n \"TurkuNLP/bert-base-finnish-cased-v1\",\n \"TurkuNLP/bert-base-finnish-uncased-v1\",\n \"wietsedv/bert-base-dutch-cased\",\n # See all BERT models at https://huggingface.co/models?filter=bert\n]\n\n\ndef load_tf_weights_in_bert(model, config, tf_checkpoint_path):\n \"\"\"Load tf checkpoints in a pytorch model.\"\"\"\n try:\n import re\n\n import numpy as np\n import tensorflow as tf\n except ImportError:\n logger.error(\n \"Loading a TensorFlow model in PyTorch, requires TensorFlow to be installed. Please see \"\n \"https://www.tensorflow.org/install/ for installation instructions.\"\n )\n raise\n tf_path = os.path.abspath(tf_checkpoint_path)\n logger.info(\"Converting TensorFlow checkpoint from {}\".format(tf_path))\n # Load weights from TF model\n init_vars = tf.train.list_variables(tf_path)\n names = []\n arrays = []\n for name, shape in init_vars:\n logger.info(\"Loading TF weight {} with shape {}\".format(name, shape))\n array = tf.train.load_variable(tf_path, name)\n names.append(name)\n arrays.append(array)\n\n for name, array in zip(names, arrays):\n name = name.split(\"/\")\n # adam_v and adam_m are variables used in AdamWeightDecayOptimizer to calculated m and v\n # which are not required for using pretrained model\n if any(\n n in [\"adam_v\", \"adam_m\", \"AdamWeightDecayOptimizer\", \"AdamWeightDecayOptimizer_1\", \"global_step\"]\n for n in name\n ):\n logger.info(\"Skipping {}\".format(\"/\".join(name)))\n continue\n pointer = model\n for m_name in name:\n if re.fullmatch(r\"[A-Za-z]+_\\d+\", m_name):\n scope_names = re.split(r\"_(\\d+)\", m_name)\n else:\n scope_names = [m_name]\n if scope_names[0] == \"kernel\" or scope_names[0] == \"gamma\":\n pointer = getattr(pointer, \"weight\")\n elif scope_names[0] == \"output_bias\" or scope_names[0] == \"beta\":\n pointer = getattr(pointer, \"bias\")\n elif scope_names[0] == \"output_weights\":\n pointer = getattr(pointer, \"weight\")\n elif scope_names[0] == \"squad\":\n pointer = getattr(pointer, \"classifier\")\n else:\n try:\n pointer = getattr(pointer, scope_names[0])\n except AttributeError:\n logger.info(\"Skipping {}\".format(\"/\".join(name)))\n continue\n if len(scope_names) >= 2:\n num = int(scope_names[1])\n pointer = pointer[num]\n if m_name[-11:] == \"_embeddings\":\n pointer = getattr(pointer, \"weight\")\n elif m_name == \"kernel\":\n array = np.transpose(array)\n try:\n assert (\n pointer.shape == array.shape\n ), f\"Pointer shape {pointer.shape} and array shape {array.shape} mismatched\"\n except AssertionError as e:\n e.args += (pointer.shape, array.shape)\n raise\n logger.info(\"Initialize PyTorch weight {}\".format(name))\n pointer.data = torch.from_numpy(array)\n return model\n\n\ndef mish(x):\n return x * torch.tanh(nn.functional.softplus(x))\n\n\nACT2FN = {\"gelu\": gelu, \"relu\": torch.nn.functional.relu, \"swish\": swish, \"gelu_new\": gelu_new, \"mish\": mish}\n\n\nBertLayerNorm = torch.nn.LayerNorm\n\n\nclass BertEmbeddings(nn.Module):\n \"\"\"Construct the embeddings from word, position and token_type embeddings.\"\"\"\n\n def __init__(self, config):\n super().__init__()\n self.word_embeddings = nn.Embedding(config.vocab_size, config.hidden_size, padding_idx=config.pad_token_id)\n self.position_embeddings = nn.Embedding(config.max_position_embeddings, config.hidden_size)\n self.token_type_embeddings = nn.Embedding(config.type_vocab_size, config.hidden_size)\n\n # self.LayerNorm is not snake-cased to stick with TensorFlow model variable name and be able to load\n # any TensorFlow checkpoint file\n self.LayerNorm = BertLayerNorm(config.hidden_size, eps=config.layer_norm_eps)\n self.dropout = nn.Dropout(config.hidden_dropout_prob)\n\n # position_ids (1, len position emb) is contiguous in memory and exported when serialized\n self.register_buffer(\"position_ids\", torch.arange(config.max_position_embeddings).expand((1, -1)))\n\n def forward(self, input_ids=None, token_type_ids=None, position_ids=None, inputs_embeds=None):\n if input_ids is not None:\n input_shape = input_ids.size()\n else:\n input_shape = inputs_embeds.size()[:-1]\n\n seq_length = input_shape[1]\n\n if position_ids is None:\n position_ids = self.position_ids[:, :seq_length]\n\n if token_type_ids is None:\n token_type_ids = torch.zeros(input_shape, dtype=torch.long, device=self.position_ids.device)\n\n if inputs_embeds is None:\n inputs_embeds = self.word_embeddings(input_ids)\n position_embeddings = self.position_embeddings(position_ids)\n token_type_embeddings = self.token_type_embeddings(token_type_ids)\n\n embeddings = inputs_embeds + position_embeddings + token_type_embeddings\n embeddings = self.LayerNorm(embeddings)\n embeddings = self.dropout(embeddings)\n return embeddings\n\n\nclass BertSelfAttention(nn.Module):\n def __init__(self, config):\n super().__init__()\n if config.hidden_size % config.num_attention_heads != 0 and not hasattr(config, \"embedding_size\"):\n raise ValueError(\n \"The hidden size (%d) is not a multiple of the number of attention \"\n \"heads (%d)\" % (config.hidden_size, config.num_attention_heads)\n )\n\n self.num_attention_heads = config.num_attention_heads\n self.attention_head_size = int(config.hidden_size / config.num_attention_heads)\n self.all_head_size = self.num_attention_heads * self.attention_head_size\n\n self.query = nn.Linear(config.hidden_size, self.all_head_size)\n self.key = nn.Linear(config.hidden_size, self.all_head_size)\n self.value = nn.Linear(config.hidden_size, self.all_head_size)\n\n self.dropout = nn.Dropout(config.attention_probs_dropout_prob)\n\n def transpose_for_scores(self, x):\n new_x_shape = x.size()[:-1] + (self.num_attention_heads, self.attention_head_size)\n x = x.view(*new_x_shape)\n return x.permute(0, 2, 1, 3)\n\n def forward(\n self,\n hidden_states,\n attention_mask=None,\n head_mask=None,\n encoder_hidden_states=None,\n encoder_attention_mask=None,\n output_attentions=False,\n ):\n mixed_query_layer = self.query(hidden_states)\n\n # If this is instantiated as a cross-attention module, the keys\n # and values come from an encoder; the attention mask needs to be\n # such that the encoder's padding tokens are not attended to.\n if encoder_hidden_states is not None:\n mixed_key_layer = self.key(encoder_hidden_states)\n mixed_value_layer = self.value(encoder_hidden_states)\n attention_mask = encoder_attention_mask\n else:\n mixed_key_layer = self.key(hidden_states)\n mixed_value_layer = self.value(hidden_states)\n\n query_layer = self.transpose_for_scores(mixed_query_layer)\n key_layer = self.transpose_for_scores(mixed_key_layer)\n value_layer = self.transpose_for_scores(mixed_value_layer)\n\n # Take the dot product between \"query\" and \"key\" to get the raw attention scores.\n attention_scores = torch.matmul(query_layer, key_layer.transpose(-1, -2))\n attention_scores = attention_scores / math.sqrt(self.attention_head_size)\n if attention_mask is not None:\n # Apply the attention mask is (precomputed for all layers in BertModel forward() function)\n attention_scores = attention_scores + attention_mask\n\n # Normalize the attention scores to probabilities.\n attention_probs = nn.Softmax(dim=-1)(attention_scores)\n\n # This is actually dropping out entire tokens to attend to, which might\n # seem a bit unusual, but is taken from the original Transformer paper.\n attention_probs = self.dropout(attention_probs)\n\n # Mask heads if we want to\n if head_mask is not None:\n attention_probs = attention_probs * head_mask\n\n context_layer = torch.matmul(attention_probs, value_layer)\n\n context_layer = context_layer.permute(0, 2, 1, 3).contiguous()\n new_context_layer_shape = context_layer.size()[:-2] + (self.all_head_size,)\n context_layer = context_layer.view(*new_context_layer_shape)\n\n outputs = (context_layer, attention_probs) if output_attentions else (context_layer,)\n return outputs\n\n\nclass BertSelfOutput(nn.Module):\n def __init__(self, config):\n super().__init__()\n self.dense = nn.Linear(config.hidden_size, config.hidden_size)\n self.LayerNorm = BertLayerNorm(config.hidden_size, eps=config.layer_norm_eps)\n self.dropout = nn.Dropout(config.hidden_dropout_prob)\n\n def forward(self, hidden_states, input_tensor):\n hidden_states = self.dense(hidden_states)\n hidden_states = self.dropout(hidden_states)\n hidden_states = self.LayerNorm(hidden_states + input_tensor)\n return hidden_states\n\n\nclass BertAttention(nn.Module):\n def __init__(self, config):\n super().__init__()\n self.self = BertSelfAttention(config)\n self.output = BertSelfOutput(config)\n self.pruned_heads = set()\n\n def prune_heads(self, heads):\n if len(heads) == 0:\n return\n heads, index = find_pruneable_heads_and_indices(\n heads, self.self.num_attention_heads, self.self.attention_head_size, self.pruned_heads\n )\n\n # Prune linear layers\n self.self.query = prune_linear_layer(self.self.query, index)\n self.self.key = prune_linear_layer(self.self.key, index)\n self.self.value = prune_linear_layer(self.self.value, index)\n self.output.dense = prune_linear_layer(self.output.dense, index, dim=1)\n\n # Update hyper params and store pruned heads\n self.self.num_attention_heads = self.self.num_attention_heads - len(heads)\n self.self.all_head_size = self.self.attention_head_size * self.self.num_attention_heads\n self.pruned_heads = self.pruned_heads.union(heads)\n\n def forward(\n self,\n hidden_states,\n attention_mask=None,\n head_mask=None,\n encoder_hidden_states=None,\n encoder_attention_mask=None,\n output_attentions=False,\n ):\n self_outputs = self.self(\n hidden_states,\n attention_mask,\n head_mask,\n encoder_hidden_states,\n encoder_attention_mask,\n output_attentions,\n )\n attention_output = self.output(self_outputs[0], hidden_states)\n outputs = (attention_output,) + self_outputs[1:] # add attentions if we output them\n return outputs\n\n\nclass BertIntermediate(nn.Module):\n def __init__(self, config):\n super().__init__()\n self.dense = nn.Linear(config.hidden_size, config.intermediate_size)\n if isinstance(config.hidden_act, str):\n self.intermediate_act_fn = ACT2FN[config.hidden_act]\n else:\n self.intermediate_act_fn = config.hidden_act\n\n def forward(self, hidden_states):\n hidden_states = self.dense(hidden_states)\n hidden_states = self.intermediate_act_fn(hidden_states)\n return hidden_states\n\n\nclass BertOutput(nn.Module):\n def __init__(self, config):\n super().__init__()\n self.dense = nn.Linear(config.intermediate_size, config.hidden_size)\n self.LayerNorm = BertLayerNorm(config.hidden_size, eps=config.layer_norm_eps)\n self.dropout = nn.Dropout(config.hidden_dropout_prob)\n\n def forward(self, hidden_states, input_tensor):\n hidden_states = self.dense(hidden_states)\n hidden_states = self.dropout(hidden_states)\n hidden_states = self.LayerNorm(hidden_states + input_tensor)\n return hidden_states\n\n\nclass BertLayer(nn.Module):\n def __init__(self, config):\n super().__init__()\n self.chunk_size_feed_forward = config.chunk_size_feed_forward\n self.seq_len_dim = 1\n self.attention = BertAttention(config)\n self.is_decoder = config.is_decoder\n self.add_cross_attention = config.add_cross_attention\n if self.add_cross_attention:\n assert self.is_decoder, f\"{self} should be used as a decoder model if cross attention is added\"\n self.crossattention = BertAttention(config)\n self.intermediate = BertIntermediate(config)\n self.output = BertOutput(config)\n\n def forward(\n self,\n hidden_states,\n attention_mask=None,\n head_mask=None,\n encoder_hidden_states=None,\n encoder_attention_mask=None,\n output_attentions=False,\n ):\n self_attention_outputs = self.attention(\n hidden_states,\n attention_mask,\n head_mask,\n output_attentions=output_attentions,\n )\n attention_output = self_attention_outputs[0]\n outputs = self_attention_outputs[1:] # add self attentions if we output attention weights\n\n if self.is_decoder and encoder_hidden_states is not None:\n assert hasattr(\n self, \"crossattention\"\n ), f\"If `encoder_hidden_states` are passed, {self} has to be instantiated with cross-attention layers by setting `config.add_cross_attention=True`\"\n cross_attention_outputs = self.crossattention(\n attention_output,\n attention_mask,\n head_mask,\n encoder_hidden_states,\n encoder_attention_mask,\n output_attentions,\n )\n attention_output = cross_attention_outputs[0]\n outputs = outputs + cross_attention_outputs[1:] # add cross attentions if we output attention weights\n\n layer_output = apply_chunking_to_forward(\n self.feed_forward_chunk, self.chunk_size_feed_forward, self.seq_len_dim, attention_output\n )\n outputs = (layer_output,) + outputs\n return outputs\n\n def feed_forward_chunk(self, attention_output):\n intermediate_output = self.intermediate(attention_output)\n layer_output = self.output(intermediate_output, attention_output)\n return layer_output\n\n\nclass BertEncoder(nn.Module):\n def __init__(self, config):\n super().__init__()\n self.config = config\n self.layer = nn.ModuleList([BertLayer(config) for _ in range(config.num_hidden_layers)])\n\n def forward(\n self,\n hidden_states,\n attention_mask=None,\n head_mask=None,\n encoder_hidden_states=None,\n encoder_attention_mask=None,\n output_attentions=False,\n output_hidden_states=False,\n return_dict=False,\n ):\n all_hidden_states = () if output_hidden_states else None\n all_attentions = () if output_attentions else None\n for i, layer_module in enumerate(self.layer):\n if output_hidden_states:\n all_hidden_states = all_hidden_states + (hidden_states,)\n\n if getattr(self.config, \"gradient_checkpointing\", False):\n\n def create_custom_forward(module):\n def custom_forward(*inputs):\n return module(*inputs, output_attentions)\n\n return custom_forward\n\n layer_outputs = torch.utils.checkpoint.checkpoint(\n create_custom_forward(layer_module),\n hidden_states,\n attention_mask,\n head_mask[i],\n encoder_hidden_states,\n encoder_attention_mask,\n )\n else:\n layer_outputs = layer_module(\n hidden_states,\n attention_mask,\n head_mask[i],\n encoder_hidden_states,\n encoder_attention_mask,\n output_attentions,\n )\n hidden_states = layer_outputs[0]\n if output_attentions:\n all_attentions = all_attentions + (layer_outputs[1],)\n\n if output_hidden_states:\n all_hidden_states = all_hidden_states + (hidden_states,)\n\n if not return_dict:\n return tuple(v for v in [hidden_states, all_hidden_states, all_attentions] if v is not None)\n return BaseModelOutput(\n last_hidden_state=hidden_states, hidden_states=all_hidden_states, attentions=all_attentions\n )\n\n\nclass BertPooler(nn.Module):\n def __init__(self, config):\n super().__init__()\n self.dense = nn.Linear(config.hidden_size, config.hidden_size)\n self.activation = nn.Tanh()\n\n def forward(self, hidden_states):\n # We \"pool\" the model by simply taking the hidden state corresponding\n # to the first token.\n first_token_tensor = hidden_states[:, 0]\n pooled_output = self.dense(first_token_tensor)\n pooled_output = self.activation(pooled_output)\n return pooled_output\n\n\nclass BertPredictionHeadTransform(nn.Module):\n def __init__(self, config):\n super().__init__()\n self.dense = nn.Linear(config.hidden_size, config.hidden_size)\n if isinstance(config.hidden_act, str):\n self.transform_act_fn = ACT2FN[config.hidden_act]\n else:\n self.transform_act_fn = config.hidden_act\n self.LayerNorm = BertLayerNorm(config.hidden_size, eps=config.layer_norm_eps)\n\n def forward(self, hidden_states):\n hidden_states = self.dense(hidden_states)\n hidden_states = self.transform_act_fn(hidden_states)\n hidden_states = self.LayerNorm(hidden_states)\n return hidden_states\n\n\nclass BertLMPredictionHead(nn.Module):\n def __init__(self, config):\n super().__init__()\n self.transform = BertPredictionHeadTransform(config)\n\n # The output weights are the same as the input embeddings, but there is\n # an output-only bias for each token.\n self.decoder = nn.Linear(config.hidden_size, config.vocab_size, bias=False)\n\n self.bias = nn.Parameter(torch.zeros(config.vocab_size))\n\n # Need a link between the two variables so that the bias is correctly resized with `resize_token_embeddings`\n self.decoder.bias = self.bias\n\n def forward(self, hidden_states):\n hidden_states = self.transform(hidden_states)\n hidden_states = self.decoder(hidden_states)\n return hidden_states\n\n\nclass BertOnlyMLMHead(nn.Module):\n def __init__(self, config):\n super().__init__()\n self.predictions = BertLMPredictionHead(config)\n\n def forward(self, sequence_output):\n prediction_scores = self.predictions(sequence_output)\n return prediction_scores\n\n\nclass BertOnlyNSPHead(nn.Module):\n def __init__(self, config):\n super().__init__()\n self.seq_relationship = nn.Linear(config.hidden_size, 2)\n\n def forward(self, pooled_output):\n seq_relationship_score = self.seq_relationship(pooled_output)\n return seq_relationship_score\n\n\nclass BertPreTrainingHeads(nn.Module):\n def __init__(self, config):\n super().__init__()\n self.predictions = BertLMPredictionHead(config)\n self.seq_relationship = nn.Linear(config.hidden_size, 2)\n\n def forward(self, sequence_output, pooled_output):\n prediction_scores = self.predictions(sequence_output)\n seq_relationship_score = self.seq_relationship(pooled_output)\n return prediction_scores, seq_relationship_score\n\n\nclass BertPreTrainedModel(PreTrainedModel):\n \"\"\"An abstract class to handle weights initialization and\n a simple interface for downloading and loading pretrained models.\n \"\"\"\n\n config_class = BertConfig\n load_tf_weights = load_tf_weights_in_bert\n base_model_prefix = \"bert\"\n authorized_missing_keys = [r\"position_ids\"]\n\n def _init_weights(self, module):\n \"\"\" Initialize the weights \"\"\"\n if isinstance(module, (nn.Linear, nn.Embedding)):\n # Slightly different from the TF version which uses truncated_normal for initialization\n # cf https://github.com/pytorch/pytorch/pull/5617\n module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)\n elif isinstance(module, BertLayerNorm):\n module.bias.data.zero_()\n module.weight.data.fill_(1.0)\n if isinstance(module, nn.Linear) and module.bias is not None:\n module.bias.data.zero_()\n\n\n@dataclass\nclass BertForPreTrainingOutput(ModelOutput):\n \"\"\"\n Output type of :class:`~transformers.BertForPreTrainingModel`.\n\n Args:\n loss (`optional`, returned when ``labels`` is provided, ``torch.FloatTensor`` of shape :obj:`(1,)`):\n Total loss as the sum of the masked language modeling loss and the next sequence prediction (classification) loss.\n prediction_logits (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length, config.vocab_size)`):\n Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax).\n seq_relationship_logits (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, 2)`):\n Prediction scores of the next sequence prediction (classification) head (scores of True/False\n continuation before SoftMax).\n hidden_states (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``output_hidden_states=True`` is passed or when ``config.output_hidden_states=True``):\n Tuple of :obj:`torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer)\n of shape :obj:`(batch_size, sequence_length, hidden_size)`.\n\n Hidden-states of the model at the output of each layer plus the initial embedding outputs.\n attentions (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``output_attentions=True`` is passed or when ``config.output_attentions=True``):\n Tuple of :obj:`torch.FloatTensor` (one for each layer) of shape\n :obj:`(batch_size, num_heads, sequence_length, sequence_length)`.\n\n Attentions weights after the attention softmax, used to compute the weighted average in the self-attention\n heads.\n \"\"\"\n\n loss: Optional[torch.FloatTensor] = None\n prediction_logits: torch.FloatTensor = None\n seq_relationship_logits: torch.FloatTensor = None\n hidden_states: Optional[Tuple[torch.FloatTensor]] = None\n attentions: Optional[Tuple[torch.FloatTensor]] = None\n\n\nBERT_START_DOCSTRING = r\"\"\"\n This model is a PyTorch `torch.nn.Module <https://pytorch.org/docs/stable/nn.html#torch.nn.Module>`_ sub-class.\n Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general\n usage and behavior.\n\n Parameters:\n config (:class:`~transformers.BertConfig`): Model configuration class with all the parameters of the model.\n Initializing with a config file does not load the weights associated with the model, only the configuration.\n Check out the :meth:`~transformers.PreTrainedModel.from_pretrained` method to load the model weights.\n\"\"\"\n\nBERT_INPUTS_DOCSTRING = r\"\"\"\n Args:\n input_ids (:obj:`torch.LongTensor` of shape :obj:`{0}`):\n Indices of input sequence tokens in the vocabulary.\n\n Indices can be obtained using :class:`transformers.BertTokenizer`.\n See :func:`transformers.PreTrainedTokenizer.encode` and\n :func:`transformers.PreTrainedTokenizer.__call__` for details.\n\n `What are input IDs? <../glossary.html#input-ids>`__\n attention_mask (:obj:`torch.FloatTensor` of shape :obj:`{0}`, `optional`):\n Mask to avoid performing attention on padding token indices.\n Mask values selected in ``[0, 1]``:\n ``1`` for tokens that are NOT MASKED, ``0`` for MASKED tokens.\n\n `What are attention masks? <../glossary.html#attention-mask>`__\n token_type_ids (:obj:`torch.LongTensor` of shape :obj:`{0}`, `optional`):\n Segment token indices to indicate first and second portions of the inputs.\n Indices are selected in ``[0, 1]``: ``0`` corresponds to a `sentence A` token, ``1``\n corresponds to a `sentence B` token\n\n `What are token type IDs? <../glossary.html#token-type-ids>`_\n position_ids (:obj:`torch.LongTensor` of shape :obj:`{0}`, `optional`):\n Indices of positions of each input sequence tokens in the position embeddings.\n Selected in the range ``[0, config.max_position_embeddings - 1]``.\n\n `What are position IDs? <../glossary.html#position-ids>`_\n head_mask (:obj:`torch.FloatTensor` of shape :obj:`(num_heads,)` or :obj:`(num_layers, num_heads)`, `optional`):\n Mask to nullify selected heads of the self-attention modules.\n Mask values selected in ``[0, 1]``:\n :obj:`1` indicates the head is **not masked**, :obj:`0` indicates the head is **masked**.\n inputs_embeds (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length, hidden_size)`, `optional`):\n Optionally, instead of passing :obj:`input_ids` you can choose to directly pass an embedded representation.\n This is useful if you want more control over how to convert `input_ids` indices into associated vectors\n than the model's internal embedding lookup matrix.\n output_attentions (:obj:`bool`, `optional`):\n If set to ``True``, the attentions tensors of all attention layers are returned. See ``attentions`` under returned tensors for more detail.\n output_hidden_states (:obj:`bool`, `optional`):\n If set to ``True``, the hidden states of all layers are returned. See ``hidden_states`` under returned tensors for more detail.\n return_dict (:obj:`bool`, `optional`):\n If set to ``True``, the model will return a :class:`~transformers.file_utils.ModelOutput` instead of a\n plain tuple.\n\"\"\"\n\n\n@add_start_docstrings(\n \"The bare Bert Model transformer outputting raw hidden-states without any specific head on top.\",\n BERT_START_DOCSTRING,\n)\nclass BertModel(BertPreTrainedModel):\n \"\"\"\n\n The model can behave as an encoder (with only self-attention) as well\n as a decoder, in which case a layer of cross-attention is added between\n the self-attention layers, following the architecture described in `Attention is all you need`_ by Ashish Vaswani,\n Noam Shazeer, Niki Parmar, Jakob Uszkoreit, Llion Jones, Aidan N. Gomez, Lukasz Kaiser and Illia Polosukhin.\n\n To behave as an decoder the model needs to be initialized with the\n :obj:`is_decoder` argument of the configuration set to :obj:`True`.\n To be used in a Seq2Seq model, the model needs to initialized with both :obj:`is_decoder`\n argument and :obj:`add_cross_attention` set to :obj:`True`; an\n :obj:`encoder_hidden_states` is then expected as an input to the forward pass.\n\n .. _`Attention is all you need`:\n https://arxiv.org/abs/1706.03762\n\n \"\"\"\n\n def __init__(self, config):\n super().__init__(config)\n self.config = config\n\n self.embeddings = BertEmbeddings(config)\n self.encoder = BertEncoder(config)\n self.pooler = BertPooler(config)\n\n self.init_weights()\n\n def get_input_embeddings(self):\n return self.embeddings.word_embeddings\n\n def set_input_embeddings(self, value):\n self.embeddings.word_embeddings = value\n\n def _prune_heads(self, heads_to_prune):\n \"\"\"Prunes heads of the model.\n heads_to_prune: dict of {layer_num: list of heads to prune in this layer}\n See base class PreTrainedModel\n \"\"\"\n for layer, heads in heads_to_prune.items():\n self.encoder.layer[layer].attention.prune_heads(heads)\n\n @add_start_docstrings_to_callable(BERT_INPUTS_DOCSTRING.format(\"(batch_size, sequence_length)\"))\n @add_code_sample_docstrings(\n tokenizer_class=_TOKENIZER_FOR_DOC,\n checkpoint=\"bert-base-uncased\",\n output_type=BaseModelOutputWithPooling,\n config_class=_CONFIG_FOR_DOC,\n )\n def forward(\n self,\n input_ids=None,\n attention_mask=None,\n token_type_ids=None,\n position_ids=None,\n head_mask=None,\n inputs_embeds=None,\n encoder_hidden_states=None,\n encoder_attention_mask=None,\n output_attentions=None,\n output_hidden_states=None,\n return_dict=None,\n ):\n r\"\"\"\n encoder_hidden_states (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length, hidden_size)`, `optional`):\n Sequence of hidden-states at the output of the last layer of the encoder. Used in the cross-attention\n if the model is configured as a decoder.\n encoder_attention_mask (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length)`, `optional`):\n Mask to avoid performing attention on the padding token indices of the encoder input. This mask\n is used in the cross-attention if the model is configured as a decoder.\n Mask values selected in ``[0, 1]``:\n ``1`` for tokens that are NOT MASKED, ``0`` for MASKED tokens.\n \"\"\"\n output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions\n output_hidden_states = (\n output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states\n )\n return_dict = return_dict if return_dict is not None else self.config.use_return_dict\n\n if input_ids is not None and inputs_embeds is not None:\n raise ValueError(\"You cannot specify both input_ids and inputs_embeds at the same time\")\n elif input_ids is not None:\n input_shape = input_ids.size()\n elif inputs_embeds is not None:\n input_shape = inputs_embeds.size()[:-1]\n else:\n raise ValueError(\"You have to specify either input_ids or inputs_embeds\")\n\n device = input_ids.device if input_ids is not None else inputs_embeds.device\n\n if attention_mask is None:\n attention_mask = torch.ones(input_shape, device=device)\n if token_type_ids is None:\n token_type_ids = torch.zeros(input_shape, dtype=torch.long, device=device)\n\n # We can provide a self-attention mask of dimensions [batch_size, from_seq_length, to_seq_length]\n # ourselves in which case we just need to make it broadcastable to all heads.\n extended_attention_mask: torch.Tensor = self.get_extended_attention_mask(attention_mask, input_shape, device)\n\n # If a 2D or 3D attention mask is provided for the cross-attention\n # we need to make broadcastable to [batch_size, num_heads, seq_length, seq_length]\n if self.config.is_decoder and encoder_hidden_states is not None:\n encoder_batch_size, encoder_sequence_length, _ = encoder_hidden_states.size()\n encoder_hidden_shape = (encoder_batch_size, encoder_sequence_length)\n if encoder_attention_mask is None:\n encoder_attention_mask = torch.ones(encoder_hidden_shape, device=device)\n encoder_extended_attention_mask = self.invert_attention_mask(encoder_attention_mask)\n else:\n encoder_extended_attention_mask = None\n\n # Prepare head mask if needed\n # 1.0 in head_mask indicate we keep the head\n # attention_probs has shape bsz x n_heads x N x N\n # input head_mask has shape [num_heads] or [num_hidden_layers x num_heads]\n # and head_mask is converted to shape [num_hidden_layers x batch x num_heads x seq_length x seq_length]\n head_mask = self.get_head_mask(head_mask, self.config.num_hidden_layers)\n\n embedding_output = self.embeddings(\n input_ids=input_ids, position_ids=position_ids, token_type_ids=token_type_ids, inputs_embeds=inputs_embeds\n )\n encoder_outputs = self.encoder(\n embedding_output,\n attention_mask=extended_attention_mask,\n head_mask=head_mask,\n encoder_hidden_states=encoder_hidden_states,\n encoder_attention_mask=encoder_extended_attention_mask,\n output_attentions=output_attentions,\n output_hidden_states=output_hidden_states,\n return_dict=return_dict,\n )\n sequence_output = encoder_outputs[0]\n pooled_output = self.pooler(sequence_output)\n\n if not return_dict:\n return (sequence_output, pooled_output) + encoder_outputs[1:]\n\n return BaseModelOutputWithPooling(\n last_hidden_state=sequence_output,\n pooler_output=pooled_output,\n hidden_states=encoder_outputs.hidden_states,\n attentions=encoder_outputs.attentions,\n )\n\n\n@add_start_docstrings(\n \"\"\"Bert Model with two heads on top as done during the pre-training: a `masked language modeling` head and\n a `next sentence prediction (classification)` head. \"\"\",\n BERT_START_DOCSTRING,\n)\nclass BertForPreTraining(BertPreTrainedModel):\n def __init__(self, config):\n super().__init__(config)\n\n self.bert = BertModel(config)\n self.cls = BertPreTrainingHeads(config)\n\n self.init_weights()\n\n def get_output_embeddings(self):\n return self.cls.predictions.decoder\n\n @add_start_docstrings_to_callable(BERT_INPUTS_DOCSTRING.format(\"(batch_size, sequence_length)\"))\n @replace_return_docstrings(output_type=BertForPreTrainingOutput, config_class=_CONFIG_FOR_DOC)\n def forward(\n self,\n input_ids=None,\n attention_mask=None,\n token_type_ids=None,\n position_ids=None,\n head_mask=None,\n inputs_embeds=None,\n labels=None,\n next_sentence_label=None,\n output_attentions=None,\n output_hidden_states=None,\n return_dict=None,\n **kwargs\n ):\n r\"\"\"\n labels (``torch.LongTensor`` of shape ``(batch_size, sequence_length)``, `optional`):\n Labels for computing the masked language modeling loss.\n Indices should be in ``[-100, 0, ..., config.vocab_size]`` (see ``input_ids`` docstring)\n Tokens with indices set to ``-100`` are ignored (masked), the loss is only computed for the tokens with labels\n in ``[0, ..., config.vocab_size]``\n next_sentence_label (``torch.LongTensor`` of shape ``(batch_size,)``, `optional`):\n Labels for computing the next sequence prediction (classification) loss. Input should be a sequence pair (see :obj:`input_ids` docstring)\n Indices should be in ``[0, 1]``.\n ``0`` indicates sequence B is a continuation of sequence A,\n ``1`` indicates sequence B is a random sequence.\n kwargs (:obj:`Dict[str, any]`, optional, defaults to `{}`):\n Used to hide legacy arguments that have been deprecated.\n\n Returns:\n\n Examples::\n\n >>> from transformers import BertTokenizer, BertForPreTraining\n >>> import torch\n\n >>> tokenizer = BertTokenizer.from_pretrained('bert-base-uncased')\n >>> model = BertForPreTraining.from_pretrained('bert-base-uncased', return_dict=True)\n\n >>> inputs = tokenizer(\"Hello, my dog is cute\", return_tensors=\"pt\")\n >>> outputs = model(**inputs)\n\n >>> prediction_logits = outputs.prediction_logits\n >>> seq_relationship_logits = outputs.seq_relationship_logits\n \"\"\"\n if \"masked_lm_labels\" in kwargs:\n warnings.warn(\n \"The `masked_lm_labels` argument is deprecated and will be removed in a future version, use `labels` instead.\",\n FutureWarning,\n )\n labels = kwargs.pop(\"masked_lm_labels\")\n assert kwargs == {}, f\"Unexpected keyword arguments: {list(kwargs.keys())}.\"\n return_dict = return_dict if return_dict is not None else self.config.use_return_dict\n\n outputs = self.bert(\n input_ids,\n attention_mask=attention_mask,\n token_type_ids=token_type_ids,\n position_ids=position_ids,\n head_mask=head_mask,\n inputs_embeds=inputs_embeds,\n output_attentions=output_attentions,\n output_hidden_states=output_hidden_states,\n return_dict=return_dict,\n )\n\n sequence_output, pooled_output = outputs[:2]\n prediction_scores, seq_relationship_score = self.cls(sequence_output, pooled_output)\n\n total_loss = None\n if labels is not None and next_sentence_label is not None:\n loss_fct = CrossEntropyLoss()\n masked_lm_loss = loss_fct(prediction_scores.view(-1, self.config.vocab_size), labels.view(-1))\n next_sentence_loss = loss_fct(seq_relationship_score.view(-1, 2), next_sentence_label.view(-1))\n total_loss = masked_lm_loss + next_sentence_loss\n\n if not return_dict:\n output = (prediction_scores, seq_relationship_score) + outputs[2:]\n return ((total_loss,) + output) if total_loss is not None else output\n\n return BertForPreTrainingOutput(\n loss=total_loss,\n prediction_logits=prediction_scores,\n seq_relationship_logits=seq_relationship_score,\n hidden_states=outputs.hidden_states,\n attentions=outputs.attentions,\n )\n\n\n@add_start_docstrings(\n \"\"\"Bert Model with a `language modeling` head on top for CLM fine-tuning. \"\"\", BERT_START_DOCSTRING\n)\nclass BertLMHeadModel(BertPreTrainedModel):\n def __init__(self, config):\n super().__init__(config)\n\n if not config.is_decoder:\n logger.warning(\"If you want to use `BertLMHeadModel` as a standalone, add `is_decoder=True.`\")\n\n self.bert = BertModel(config)\n self.cls = BertOnlyMLMHead(config)\n\n self.init_weights()\n\n def get_output_embeddings(self):\n return self.cls.predictions.decoder\n\n @add_start_docstrings_to_callable(BERT_INPUTS_DOCSTRING.format(\"(batch_size, sequence_length)\"))\n @replace_return_docstrings(output_type=CausalLMOutput, config_class=_CONFIG_FOR_DOC)\n def forward(\n self,\n input_ids=None,\n attention_mask=None,\n token_type_ids=None,\n position_ids=None,\n head_mask=None,\n inputs_embeds=None,\n encoder_hidden_states=None,\n encoder_attention_mask=None,\n labels=None,\n output_attentions=None,\n output_hidden_states=None,\n return_dict=None,\n ):\n r\"\"\"\n encoder_hidden_states (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length, hidden_size)`, `optional`):\n Sequence of hidden-states at the output of the last layer of the encoder. Used in the cross-attention\n if the model is configured as a decoder.\n encoder_attention_mask (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length)`, `optional`):\n Mask to avoid performing attention on the padding token indices of the encoder input. This mask\n is used in the cross-attention if the model is configured as a decoder.\n Mask values selected in ``[0, 1]``:\n ``1`` for tokens that are NOT MASKED, ``0`` for MASKED tokens.\n labels (:obj:`torch.LongTensor` of shape :obj:`(batch_size, sequence_length)`, `optional`):\n Labels for computing the left-to-right language modeling loss (next word prediction).\n Indices should be in ``[-100, 0, ..., config.vocab_size]`` (see ``input_ids`` docstring)\n Tokens with indices set to ``-100`` are ignored (masked), the loss is only computed for the tokens with labels\n in ``[0, ..., config.vocab_size]``\n\n Returns:\n\n Example::\n\n >>> from transformers import BertTokenizer, BertLMHeadModel, BertConfig\n >>> import torch\n\n >>> tokenizer = BertTokenizer.from_pretrained('bert-base-cased')\n >>> config = BertConfig.from_pretrained(\"bert-base-cased\")\n >>> config.is_decoder = True\n >>> model = BertLMHeadModel.from_pretrained('bert-base-cased', config=config, return_dict=True)\n\n >>> inputs = tokenizer(\"Hello, my dog is cute\", return_tensors=\"pt\")\n >>> outputs = model(**inputs)\n\n >>> prediction_logits = outputs.logits\n \"\"\"\n return_dict = return_dict if return_dict is not None else self.config.use_return_dict\n\n outputs = self.bert(\n input_ids,\n attention_mask=attention_mask,\n token_type_ids=token_type_ids,\n position_ids=position_ids,\n head_mask=head_mask,\n inputs_embeds=inputs_embeds,\n encoder_hidden_states=encoder_hidden_states,\n encoder_attention_mask=encoder_attention_mask,\n output_attentions=output_attentions,\n output_hidden_states=output_hidden_states,\n return_dict=return_dict,\n )\n\n sequence_output = outputs[0]\n prediction_scores = self.cls(sequence_output)\n\n lm_loss = None\n if labels is not None:\n # we are doing next-token prediction; shift prediction scores and input ids by one\n shifted_prediction_scores = prediction_scores[:, :-1, :].contiguous()\n labels = labels[:, 1:].contiguous()\n loss_fct = CrossEntropyLoss()\n lm_loss = loss_fct(shifted_prediction_scores.view(-1, self.config.vocab_size), labels.view(-1))\n\n if not return_dict:\n output = (prediction_scores,) + outputs[2:]\n return ((lm_loss,) + output) if lm_loss is not None else output\n\n return CausalLMOutput(\n loss=lm_loss,\n logits=prediction_scores,\n hidden_states=outputs.hidden_states,\n attentions=outputs.attentions,\n )\n\n def prepare_inputs_for_generation(self, input_ids, attention_mask=None, **model_kwargs):\n input_shape = input_ids.shape\n\n # if model is used as a decoder in encoder-decoder model, the decoder attention mask is created on the fly\n if attention_mask is None:\n attention_mask = input_ids.new_ones(input_shape)\n\n return {\"input_ids\": input_ids, \"attention_mask\": attention_mask}\n\n\n@add_start_docstrings(\"\"\"Bert Model with a `language modeling` head on top. \"\"\", BERT_START_DOCSTRING)\nclass BertForMaskedLM(BertPreTrainedModel):\n def __init__(self, config):\n super().__init__(config)\n\n if config.is_decoder:\n logger.warning(\n \"If you want to use `BertForMaskedLM` make sure `config.is_decoder=False` for \"\n \"bi-directional self-attention.\"\n )\n\n self.bert = BertModel(config)\n self.cls = BertOnlyMLMHead(config)\n\n self.init_weights()\n\n def get_output_embeddings(self):\n return self.cls.predictions.decoder\n\n @add_start_docstrings_to_callable(BERT_INPUTS_DOCSTRING.format(\"(batch_size, sequence_length)\"))\n @add_code_sample_docstrings(\n tokenizer_class=_TOKENIZER_FOR_DOC,\n checkpoint=\"bert-base-uncased\",\n output_type=MaskedLMOutput,\n config_class=_CONFIG_FOR_DOC,\n )\n def forward(\n self,\n input_ids=None,\n attention_mask=None,\n token_type_ids=None,\n position_ids=None,\n head_mask=None,\n inputs_embeds=None,\n encoder_hidden_states=None,\n encoder_attention_mask=None,\n labels=None,\n output_attentions=None,\n output_hidden_states=None,\n return_dict=None,\n **kwargs\n ):\n r\"\"\"\n labels (:obj:`torch.LongTensor` of shape :obj:`(batch_size, sequence_length)`, `optional`):\n Labels for computing the masked language modeling loss.\n Indices should be in ``[-100, 0, ..., config.vocab_size]`` (see ``input_ids`` docstring)\n Tokens with indices set to ``-100`` are ignored (masked), the loss is only computed for the tokens with labels\n in ``[0, ..., config.vocab_size]``\n kwargs (:obj:`Dict[str, any]`, optional, defaults to `{}`):\n Used to hide legacy arguments that have been deprecated.\n \"\"\"\n if \"masked_lm_labels\" in kwargs:\n warnings.warn(\n \"The `masked_lm_labels` argument is deprecated and will be removed in a future version, use `labels` instead.\",\n FutureWarning,\n )\n labels = kwargs.pop(\"masked_lm_labels\")\n assert \"lm_labels\" not in kwargs, \"Use `BertWithLMHead` for autoregressive language modeling task.\"\n assert kwargs == {}, f\"Unexpected keyword arguments: {list(kwargs.keys())}.\"\n\n return_dict = return_dict if return_dict is not None else self.config.use_return_dict\n\n outputs = self.bert(\n input_ids,\n attention_mask=attention_mask,\n token_type_ids=token_type_ids,\n position_ids=position_ids,\n head_mask=head_mask,\n inputs_embeds=inputs_embeds,\n encoder_hidden_states=encoder_hidden_states,\n encoder_attention_mask=encoder_attention_mask,\n output_attentions=output_attentions,\n output_hidden_states=output_hidden_states,\n return_dict=return_dict,\n )\n\n sequence_output = outputs[0]\n prediction_scores = self.cls(sequence_output)\n\n masked_lm_loss = None\n if labels is not None:\n loss_fct = CrossEntropyLoss() # -100 index = padding token\n masked_lm_loss = loss_fct(prediction_scores.view(-1, self.config.vocab_size), labels.view(-1))\n\n if not return_dict:\n output = (prediction_scores,) + outputs[2:]\n return ((masked_lm_loss,) + output) if masked_lm_loss is not None else output\n\n return MaskedLMOutput(\n loss=masked_lm_loss,\n logits=prediction_scores,\n hidden_states=outputs.hidden_states,\n attentions=outputs.attentions,\n )\n\n def prepare_inputs_for_generation(self, input_ids, attention_mask=None, **model_kwargs):\n input_shape = input_ids.shape\n effective_batch_size = input_shape[0]\n\n # add a dummy token\n assert self.config.pad_token_id is not None, \"The PAD token should be defined for generation\"\n attention_mask = torch.cat([attention_mask, attention_mask.new_zeros((attention_mask.shape[0], 1))], dim=-1)\n dummy_token = torch.full(\n (effective_batch_size, 1), self.config.pad_token_id, dtype=torch.long, device=input_ids.device\n )\n input_ids = torch.cat([input_ids, dummy_token], dim=1)\n\n return {\"input_ids\": input_ids, \"attention_mask\": attention_mask}\n\n\n@add_start_docstrings(\n \"\"\"Bert Model with a `next sentence prediction (classification)` head on top. \"\"\",\n BERT_START_DOCSTRING,\n)\nclass BertForNextSentencePrediction(BertPreTrainedModel):\n def __init__(self, config):\n super().__init__(config)\n\n self.bert = BertModel(config)\n self.cls = BertOnlyNSPHead(config)\n\n self.init_weights()\n\n @add_start_docstrings_to_callable(BERT_INPUTS_DOCSTRING.format(\"(batch_size, sequence_length)\"))\n @replace_return_docstrings(output_type=NextSentencePredictorOutput, config_class=_CONFIG_FOR_DOC)\n def forward(\n self,\n input_ids=None,\n attention_mask=None,\n token_type_ids=None,\n position_ids=None,\n head_mask=None,\n inputs_embeds=None,\n next_sentence_label=None,\n output_attentions=None,\n output_hidden_states=None,\n return_dict=None,\n ):\n r\"\"\"\n next_sentence_label (:obj:`torch.LongTensor` of shape :obj:`(batch_size,)`, `optional`):\n Labels for computing the next sequence prediction (classification) loss. Input should be a sequence pair (see ``input_ids`` docstring)\n Indices should be in ``[0, 1]``.\n ``0`` indicates sequence B is a continuation of sequence A,\n ``1`` indicates sequence B is a random sequence.\n\n Returns:\n\n Example::\n\n >>> from transformers import BertTokenizer, BertForNextSentencePrediction\n >>> import torch\n\n >>> tokenizer = BertTokenizer.from_pretrained('bert-base-uncased')\n >>> model = BertForNextSentencePrediction.from_pretrained('bert-base-uncased', return_dict=True)\n\n >>> prompt = \"In Italy, pizza served in formal settings, such as at a restaurant, is presented unsliced.\"\n >>> next_sentence = \"The sky is blue due to the shorter wavelength of blue light.\"\n >>> encoding = tokenizer(prompt, next_sentence, return_tensors='pt')\n\n >>> outputs = model(**encoding, next_sentence_label=torch.LongTensor([1]))\n >>> logits = outputs.logits\n >>> assert logits[0, 0] < logits[0, 1] # next sentence was random\n \"\"\"\n return_dict = return_dict if return_dict is not None else self.config.use_return_dict\n\n outputs = self.bert(\n input_ids,\n attention_mask=attention_mask,\n token_type_ids=token_type_ids,\n position_ids=position_ids,\n head_mask=head_mask,\n inputs_embeds=inputs_embeds,\n output_attentions=output_attentions,\n output_hidden_states=output_hidden_states,\n return_dict=return_dict,\n )\n\n pooled_output = outputs[1]\n\n seq_relationship_scores = self.cls(pooled_output)\n\n next_sentence_loss = None\n if next_sentence_label is not None:\n loss_fct = CrossEntropyLoss()\n next_sentence_loss = loss_fct(seq_relationship_scores.view(-1, 2), next_sentence_label.view(-1))\n\n if not return_dict:\n output = (seq_relationship_scores,) + outputs[2:]\n return ((next_sentence_loss,) + output) if next_sentence_loss is not None else output\n\n return NextSentencePredictorOutput(\n loss=next_sentence_loss,\n logits=seq_relationship_scores,\n hidden_states=outputs.hidden_states,\n attentions=outputs.attentions,\n )\n\n\n@add_start_docstrings(\n \"\"\"Bert Model transformer with a sequence classification/regression head on top (a linear layer on top of\n the pooled output) e.g. for GLUE tasks. \"\"\",\n BERT_START_DOCSTRING,\n)\nclass BertForSequenceClassification(BertPreTrainedModel):\n def __init__(self, config):\n super().__init__(config)\n self.num_labels = config.num_labels\n\n self.bert = BertModel(config)\n self.dropout = nn.Dropout(config.hidden_dropout_prob)\n self.classifier = nn.Linear(config.hidden_size, config.num_labels)\n\n self.init_weights()\n\n @add_start_docstrings_to_callable(BERT_INPUTS_DOCSTRING.format(\"(batch_size, sequence_length)\"))\n @add_code_sample_docstrings(\n tokenizer_class=_TOKENIZER_FOR_DOC,\n checkpoint=\"bert-base-uncased\",\n output_type=SequenceClassifierOutput,\n config_class=_CONFIG_FOR_DOC,\n )\n def forward(\n self,\n input_ids=None,\n attention_mask=None,\n token_type_ids=None,\n position_ids=None,\n head_mask=None,\n inputs_embeds=None,\n labels=None,\n output_attentions=None,\n output_hidden_states=None,\n return_dict=None,\n ):\n r\"\"\"\n labels (:obj:`torch.LongTensor` of shape :obj:`(batch_size,)`, `optional`):\n Labels for computing the sequence classification/regression loss.\n Indices should be in :obj:`[0, ..., config.num_labels - 1]`.\n If :obj:`config.num_labels == 1` a regression loss is computed (Mean-Square loss),\n If :obj:`config.num_labels > 1` a classification loss is computed (Cross-Entropy).\n \"\"\"\n return_dict = return_dict if return_dict is not None else self.config.use_return_dict\n\n outputs = self.bert(\n input_ids,\n attention_mask=attention_mask,\n token_type_ids=token_type_ids,\n position_ids=position_ids,\n head_mask=head_mask,\n inputs_embeds=inputs_embeds,\n output_attentions=output_attentions,\n output_hidden_states=output_hidden_states,\n return_dict=return_dict,\n )\n\n pooled_output = outputs[1]\n\n pooled_output = self.dropout(pooled_output)\n logits = self.classifier(pooled_output)\n\n loss = None\n if labels is not None:\n if self.num_labels == 1:\n # We are doing regression\n loss_fct = MSELoss()\n loss = loss_fct(logits.view(-1), labels.view(-1))\n else:\n loss_fct = CrossEntropyLoss()\n loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1))\n\n if not return_dict:\n output = (logits,) + outputs[2:]\n return ((loss,) + output) if loss is not None else output\n\n return SequenceClassifierOutput(\n loss=loss,\n logits=logits,\n hidden_states=outputs.hidden_states,\n attentions=outputs.attentions,\n )\n\n\n@add_start_docstrings(\n \"\"\"Bert Model with a multiple choice classification head on top (a linear layer on top of\n the pooled output and a softmax) e.g. for RocStories/SWAG tasks. \"\"\",\n BERT_START_DOCSTRING,\n)\nclass BertForMultipleChoice(BertPreTrainedModel):\n def __init__(self, config):\n super().__init__(config)\n\n self.bert = BertModel(config)\n self.dropout = nn.Dropout(config.hidden_dropout_prob)\n self.classifier = nn.Linear(config.hidden_size, 1)\n\n self.init_weights()\n\n @add_start_docstrings_to_callable(BERT_INPUTS_DOCSTRING.format(\"(batch_size, num_choices, sequence_length)\"))\n @add_code_sample_docstrings(\n tokenizer_class=_TOKENIZER_FOR_DOC,\n checkpoint=\"bert-base-uncased\",\n output_type=MultipleChoiceModelOutput,\n config_class=_CONFIG_FOR_DOC,\n )\n def forward(\n self,\n input_ids=None,\n attention_mask=None,\n token_type_ids=None,\n position_ids=None,\n head_mask=None,\n inputs_embeds=None,\n labels=None,\n output_attentions=None,\n output_hidden_states=None,\n return_dict=None,\n ):\n r\"\"\"\n labels (:obj:`torch.LongTensor` of shape :obj:`(batch_size,)`, `optional`):\n Labels for computing the multiple choice classification loss.\n Indices should be in ``[0, ..., num_choices-1]`` where `num_choices` is the size of the second dimension\n of the input tensors. (see `input_ids` above)\n \"\"\"\n return_dict = return_dict if return_dict is not None else self.config.use_return_dict\n num_choices = input_ids.shape[1] if input_ids is not None else inputs_embeds.shape[1]\n\n input_ids = input_ids.view(-1, input_ids.size(-1)) if input_ids is not None else None\n attention_mask = attention_mask.view(-1, attention_mask.size(-1)) if attention_mask is not None else None\n token_type_ids = token_type_ids.view(-1, token_type_ids.size(-1)) if token_type_ids is not None else None\n position_ids = position_ids.view(-1, position_ids.size(-1)) if position_ids is not None else None\n inputs_embeds = (\n inputs_embeds.view(-1, inputs_embeds.size(-2), inputs_embeds.size(-1))\n if inputs_embeds is not None\n else None\n )\n\n outputs = self.bert(\n input_ids,\n attention_mask=attention_mask,\n token_type_ids=token_type_ids,\n position_ids=position_ids,\n head_mask=head_mask,\n inputs_embeds=inputs_embeds,\n output_attentions=output_attentions,\n output_hidden_states=output_hidden_states,\n return_dict=return_dict,\n )\n\n pooled_output = outputs[1]\n\n pooled_output = self.dropout(pooled_output)\n logits = self.classifier(pooled_output)\n reshaped_logits = logits.view(-1, num_choices)\n\n loss = None\n if labels is not None:\n loss_fct = CrossEntropyLoss()\n loss = loss_fct(reshaped_logits, labels)\n\n if not return_dict:\n output = (reshaped_logits,) + outputs[2:]\n return ((loss,) + output) if loss is not None else output\n\n return MultipleChoiceModelOutput(\n loss=loss,\n logits=reshaped_logits,\n hidden_states=outputs.hidden_states,\n attentions=outputs.attentions,\n )\n\n\n@add_start_docstrings(\n \"\"\"Bert Model with a token classification head on top (a linear layer on top of\n the hidden-states output) e.g. for Named-Entity-Recognition (NER) tasks. \"\"\",\n BERT_START_DOCSTRING,\n)\nclass BertForTokenClassification(BertPreTrainedModel):\n def __init__(self, config):\n super().__init__(config)\n self.num_labels = config.num_labels\n\n self.bert = BertModel(config)\n self.dropout = nn.Dropout(config.hidden_dropout_prob)\n self.classifier = nn.Linear(config.hidden_size, config.num_labels)\n\n self.init_weights()\n\n @add_start_docstrings_to_callable(BERT_INPUTS_DOCSTRING.format(\"(batch_size, sequence_length)\"))\n @add_code_sample_docstrings(\n tokenizer_class=_TOKENIZER_FOR_DOC,\n checkpoint=\"bert-base-uncased\",\n output_type=TokenClassifierOutput,\n config_class=_CONFIG_FOR_DOC,\n )\n def forward(\n self,\n input_ids=None,\n attention_mask=None,\n token_type_ids=None,\n position_ids=None,\n head_mask=None,\n inputs_embeds=None,\n labels=None,\n output_attentions=None,\n output_hidden_states=None,\n return_dict=None,\n ):\n r\"\"\"\n labels (:obj:`torch.LongTensor` of shape :obj:`(batch_size, sequence_length)`, `optional`):\n Labels for computing the token classification loss.\n Indices should be in ``[0, ..., config.num_labels - 1]``.\n \"\"\"\n return_dict = return_dict if return_dict is not None else self.config.use_return_dict\n\n outputs = self.bert(\n input_ids,\n attention_mask=attention_mask,\n token_type_ids=token_type_ids,\n position_ids=position_ids,\n head_mask=head_mask,\n inputs_embeds=inputs_embeds,\n output_attentions=output_attentions,\n output_hidden_states=output_hidden_states,\n return_dict=return_dict,\n )\n\n sequence_output = outputs[0]\n\n sequence_output = self.dropout(sequence_output)\n logits = self.classifier(sequence_output)\n\n loss = None\n if labels is not None:\n loss_fct = CrossEntropyLoss()\n # Only keep active parts of the loss\n if attention_mask is not None:\n active_loss = attention_mask.view(-1) == 1\n active_logits = logits.view(-1, self.num_labels)\n active_labels = torch.where(\n active_loss, labels.view(-1), torch.tensor(loss_fct.ignore_index).type_as(labels)\n )\n loss = loss_fct(active_logits, active_labels)\n else:\n loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1))\n\n if not return_dict:\n output = (logits,) + outputs[2:]\n return ((loss,) + output) if loss is not None else output\n\n return TokenClassifierOutput(\n loss=loss,\n logits=logits,\n hidden_states=outputs.hidden_states,\n attentions=outputs.attentions,\n )\n\n\n@add_start_docstrings(\n \"\"\"Bert Model with a span classification head on top for extractive question-answering tasks like SQuAD (a linear\n layers on top of the hidden-states output to compute `span start logits` and `span end logits`). \"\"\",\n BERT_START_DOCSTRING,\n)\nclass BertForQuestionAnswering(BertPreTrainedModel):\n def __init__(self, config):\n super().__init__(config)\n self.num_labels = config.num_labels\n\n self.bert = BertModel(config)\n self.qa_outputs = nn.Linear(config.hidden_size, config.num_labels)\n\n self.init_weights()\n\n @add_start_docstrings_to_callable(BERT_INPUTS_DOCSTRING.format(\"(batch_size, sequence_length)\"))\n @add_code_sample_docstrings(\n tokenizer_class=_TOKENIZER_FOR_DOC,\n checkpoint=\"bert-base-uncased\",\n output_type=QuestionAnsweringModelOutput,\n config_class=_CONFIG_FOR_DOC,\n )\n def forward(\n self,\n input_ids=None,\n attention_mask=None,\n token_type_ids=None,\n position_ids=None,\n head_mask=None,\n inputs_embeds=None,\n start_positions=None,\n end_positions=None,\n output_attentions=None,\n output_hidden_states=None,\n return_dict=None,\n ):\n r\"\"\"\n start_positions (:obj:`torch.LongTensor` of shape :obj:`(batch_size,)`, `optional`):\n Labels for position (index) of the start of the labelled span for computing the token classification loss.\n Positions are clamped to the length of the sequence (`sequence_length`).\n Position outside of the sequence are not taken into account for computing the loss.\n end_positions (:obj:`torch.LongTensor` of shape :obj:`(batch_size,)`, `optional`):\n Labels for position (index) of the end of the labelled span for computing the token classification loss.\n Positions are clamped to the length of the sequence (`sequence_length`).\n Position outside of the sequence are not taken into account for computing the loss.\n \"\"\"\n return_dict = return_dict if return_dict is not None else self.config.use_return_dict\n\n outputs = self.bert(\n input_ids,\n attention_mask=attention_mask,\n token_type_ids=token_type_ids,\n position_ids=position_ids,\n head_mask=head_mask,\n inputs_embeds=inputs_embeds,\n output_attentions=output_attentions,\n output_hidden_states=output_hidden_states,\n return_dict=return_dict,\n )\n\n sequence_output = outputs[0]\n\n logits = self.qa_outputs(sequence_output)\n start_logits, end_logits = logits.split(1, dim=-1)\n start_logits = start_logits.squeeze(-1)\n end_logits = end_logits.squeeze(-1)\n\n total_loss = None\n if start_positions is not None and end_positions is not None:\n # If we are on multi-GPU, split add a dimension\n if len(start_positions.size()) > 1:\n start_positions = start_positions.squeeze(-1)\n if len(end_positions.size()) > 1:\n end_positions = end_positions.squeeze(-1)\n # sometimes the start/end positions are outside our model inputs, we ignore these terms\n ignored_index = start_logits.size(1)\n start_positions.clamp_(0, ignored_index)\n end_positions.clamp_(0, ignored_index)\n\n loss_fct = CrossEntropyLoss(ignore_index=ignored_index)\n start_loss = loss_fct(start_logits, start_positions)\n end_loss = loss_fct(end_logits, end_positions)\n total_loss = (start_loss + end_loss) / 2\n\n if not return_dict:\n output = (start_logits, end_logits) + outputs[2:]\n return ((total_loss,) + output) if total_loss is not None else output\n\n return QuestionAnsweringModelOutput(\n loss=total_loss,\n start_logits=start_logits,\n end_logits=end_logits,\n hidden_states=outputs.hidden_states,\n attentions=outputs.attentions,\n )\n" ]
[ [ "torch.nn.Softmax", "torch.cat", "torch.zeros", "torch.nn.Embedding", "torch.nn.Dropout", "torch.nn.CrossEntropyLoss", "torch.ones", "torch.from_numpy", "torch.tensor", "torch.arange", "tensorflow.train.list_variables", "torch.nn.functional.softplus", "torch.full", "tensorflow.train.load_variable", "torch.nn.Linear", "numpy.transpose", "torch.nn.Tanh", "torch.matmul", "torch.nn.MSELoss" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [ "1.10" ] } ]
jlikhuva/loompy
[ "b694a4d299604299b27e2a407f508ccf6baa8823" ]
[ "loompy/loom_validator.py" ]
[ "import h5py\nfrom typing import *\nimport logging\nimport numpy as np\nimport loompy\n\nfrom .utils import get_loom_spec_version\n\n\nclass LoomValidator:\n\tdef __init__(self, version: str = None) -> None:\n\t\t\"\"\"\n\t\tArgs:\n\t\t\tversion: \t\tThe Loom file format version to validate against (\"3.0.0\", \"2.0.1\", \"old\"), or None to infer from file\n\t\t\n\t\tRemarks:\n\t\t\t\"old\" version will accept files that lack the \"row_graphs\" and \"col_graphs\" groups\n\t\t\"\"\"\n\t\tself.version = version #: Version of the spec to validate against\n\t\tself.errors: List[str] = [] #: Errors found during validation\n\t\tself.warnings: List[str] = [] #: Warnings triggered during validation\n\t\tself.summary: List[str] = [] #: Summary of the file structure\n\n\tdef _check(self, condition: bool, message: str) -> bool:\n\t\tif not condition:\n\t\t\tself.errors.append(message)\n\t\treturn condition\n\t\n\tdef _warn(self, condition: bool, message: str) -> bool:\n\t\tif not condition:\n\t\t\tself.warnings.append(message)\n\t\treturn condition\n\n\tdef validate(self, path: str, strictness: str = \"speconly\") -> bool:\n\t\t\"\"\"\n\t\tValidate a file for conformance to the Loom specification\n\n\t\tArgs:\n\t\t\tpath: \t\t\tFull path to the file to be validated\n\t\t\tstrictness:\t\t\"speconly\" or \"conventions\"\n\n\t\tRemarks:\n\t\t\tIn \"speconly\" mode, conformance is assessed relative to the file format specification\n\t\t\tat http://linnarssonlab.org/loompy/format/. In \"conventions\" mode, conformance is additionally\n\t\t\tassessed relative to attribute name and data type conventions given at http://linnarssonlab.org/loompy/conventions/.\n\t\t\"\"\"\n\t\tvalid1 = True\n\t\twith h5py.File(path, mode=\"r\") as f:\n\t\t\tif self.version == None:\n\t\t\t\tself.version = get_loom_spec_version(f)\n\t\t\tvalid1 = self.validate_spec(f)\n\t\t\tif not valid1:\n\t\t\t\tself.errors.append(\"For help, see http://linnarssonlab.org/loompy/format/\")\n\n\t\tvalid2 = True\n\t\tif strictness == \"conventions\":\n\t\t\twith loompy.connect(path, mode=\"r\") as ds:\n\t\t\t\tvalid2 = self.validate_conventions(ds)\n\t\t\t\tif not valid2:\n\t\t\t\t\tself.errors.append(\"For help, see http://linnarssonlab.org/loompy/conventions/\")\n\n\t\treturn valid1 and valid2\n\n\tdef validate_conventions(self, ds: loompy.LoomConnection) -> bool:\n\t\t\"\"\"\n\t\tValidate the LoomConnection object against the attribute name/dtype conventions.\n\n\t\tArgs:\n\t\t\tds:\t\t\tLoomConnection object\n\t\t\n\t\tReturns:\n\t\t\tTrue if the file conforms to the conventions, else False\n\t\t\n\t\tRemarks:\n\t\t\tUpon return, the instance attributes 'self.errors' and 'self.warnings' contain\n\t\t\tlists of errors and warnings.\n\t\t\"\"\"\n\t\t(n_genes, n_cells) = ds.shape\n\n\t\tself._warn(\"Description\" in ds.attrs, \"Optional global attribute 'Description' is missing\")\n\t\tself._warn(\"Journal\" in ds.attrs, \"Optional global attribute 'Journal' is missing\")\n\t\tself._warn(\"Authors\" in ds.attrs, \"Optional global attribute 'Authors' is missing\")\n\t\tself._warn(\"Title\" in ds.attrs, \"Optional global attribute 'Title' is missing\")\n\t\tself._warn(\"Year\" in ds.attrs, \"Optional global attribute 'Year' is missing\")\n\t\tself._warn(\"CreationDate\" in ds.attrs, \"Optional global attribute 'CreationDate' is missing\")\n\n\t\tif self._check(\"ClusterID\" in ds.ca, \"Column attribute 'ClusterID' is missing\"):\n\t\t\tself._check(np.issubdtype(ds.ca.ClusterID.dtype, np.int_), \"Column attribute 'ClusterID' must be integer dtype\")\n\t\t\tself._check(len(np.unique(ds.ca.ClusterID)) == np.max(ds.ca.ClusterID) and np.min(ds.ca.ClusterID) == 0, \"Column attribute 'ClusterID' must be integers 0, 1, 2, ... with no missing values\")\n\t\t\tself._check(ds.ca.ClusterID.shape == (n_cells,), f\"Column attribute 'ClusterID' must be 1-dimensional array of {n_cells} elements\")\n\n\t\tif \"ClusterName\" in ds.ca:\n\t\t\tself._check(ds.ca.ClusterName.dtype == object and np.issubdtype(ds.ca.ClusterName[0].dtype, np.str_), \"Column attribute 'ClusterName' must be an array of strings\")\n\t\t\tself._check(ds.ca.ClusterName.shape == (n_cells,), f\"Column attribute 'ClusterName' must be 1-dimensional array of {n_cells} elements\")\n\t\t\tone_to_one = True\n\t\t\tfor cid in np.unique(ds.ca.ClusterID):\n\t\t\t\tif len(np.unique(ds.ca.ClusterName[ds.ca.ClusterID == cid])) != 1:\n\t\t\t\t\tone_to_one = False\n\t\t\t\t\tbreak\n\t\t\tfor cn in np.unique(ds.ca.ClusterName):\n\t\t\t\tif len(np.unique(ds.ca.ClusterID[ds.ca.ClusterName == cn])) != 1:\n\t\t\t\t\tone_to_one = False\n\t\t\t\t\tbreak\n\t\t\tif not one_to_one:\n\t\t\t\tself._check(False, \"ClusterName must correspond 1:1 with ClusterID\")\n\t\telse:\n\t\t\tself.warnings.append(\"Optional column attribute 'ClusterName' is missing\")\n\n\t\tif self._check(\"CellID\" in ds.ca, \"Column attribute 'CellID' is missing\"):\n\t\t\tself._check(ds.ca.CellID.dtype == object and np.issubdtype(ds.ca.CellID[0].dtype, np.str_), f\"Column attribute 'CellID' must be an array of strings, not '{ds.ca.CellID[0].dtype}'\")\n\t\t\tself._check(ds.ca.CellID.shape == (n_cells,), f\"Column attribute 'CellID' must be 1-dimensional array of {n_cells} elements\")\n\t\t\tself._check(len(np.unique(ds.ca.CellID)) == n_cells, \"Column attribute 'CellID' cannot contain duplicate values\")\n\n\t\tif \"Valid\" in ds.ca:\n\t\t\tself._check(np.issubdtype(ds.ca.Valid.dtype, np.int_), f\"Column attribute 'Valid' must be integer dtype, not '{ds.ca.Valid.dtype}'\")\n\t\t\tvalids = np.unique(ds.ca.Valid)\n\t\t\tself._check(np.all(np.isin(ds.ca.Valid, [0, 1])), \"Column attribute 'Valid' must be integers 0 or 1 only\")\n\t\t\tself._check(ds.ca.Valid.shape == (n_cells,), f\"Column attribute 'Valid' must be 1-dimensional array of {n_cells} elements\")\n\t\telse:\n\t\t\tself.warnings.append(\"Optional column attribute 'Valid' is missing\")\n\n\t\tif \"Outliers\" in ds.ca:\n\t\t\tself._check(np.issubdtype(ds.ca.Outliers.dtype, np.int_), f\"Column attribute 'Outliers' must be integer dtype, not '{ds.ca.Outliers.dtype}'\")\n\t\t\tself._check(np.all(np.isin(ds.ca.Outliers, [0, 1])), \"Column attribute 'Outliers' must be integers 0 or 1 only\")\n\t\t\tself._check(ds.ca.Outliers.shape == (n_cells,), f\"Column attribute 'Outliers' must be 1-dimensional array of {n_cells} elements\")\n\t\telse:\n\t\t\tself.warnings.append(\"Optional column attribute 'Outliers' is missing\")\n\n\t\tif self._check(\"Accession\" in ds.ra, \"Row attribute 'Accession' is missing\"):\n\t\t\tself._check(ds.ra.Accession.dtype == object and np.issubdtype(ds.ra.Accession[0].dtype, np.str_), f\"Row attribute 'Accession' must be an array of strings, not '{ds.ra.Accession[0].dtype}'\")\n\t\t\tself._check(ds.ra.Accession.shape == (n_genes,), f\"Row attribute 'Accession' must be 1-dimensional array of {n_genes} elements\")\n\t\t\tself._check(len(np.unique(ds.ra.Accession)) == n_genes, \"Row attribute 'Accession' cannot contain duplicate values\")\n\n\t\tif self._check(\"Gene\" in ds.ra, \"Row attribute 'Gene' is missing\"):\n\t\t\tself._check(ds.ra.Gene.dtype == object and np.issubdtype(ds.ra.Gene[0].dtype, np.str_), f\"Row attribute 'Gene' must be an array of strings, not '{ds.ra.Gene[0].dtype}'\")\n\t\t\tself._check(ds.ra.Gene.shape == (n_genes,), f\"Row attribute 'Gene' must be 1-dimensional array of {n_genes} elements\")\n\n\t\tif \"Valid\" in ds.ra:\n\t\t\tself._check(np.issubdtype(ds.ra.Valid.dtype, np.int_), f\"Row attribute 'Valid' must be integer dtype, not '{ds.ra.Valid.dtype}'\")\n\t\t\tvalids = np.unique(ds.ra.Valid)\n\t\t\tself._check(np.all(np.isin(ds.ra.Valid, [0, 1])), \"Row attribute 'Valid' must be integers 0 or 1 only\")\n\t\t\tself._check(ds.ra.Valid.shape == (n_cells,), f\"Row attribute 'Valid' must be 1-dimensional array of {n_cells} elements\")\n\t\telse:\n\t\t\tself.warnings.append(\"Optional row attribute 'Valid' is missing\")\n\n\t\tif \"Selected\" in ds.ra:\n\t\t\tself._check(np.issubdtype(ds.ra.Selected.dtype, np.int_), f\"Row attribute 'Selected' must be integer dtype, not '{ds.ra.Selected.dtype}'\")\n\t\t\tvalids = np.unique(ds.ra.Selected)\n\t\t\tself._check(np.all(np.isin(ds.ra.Selected, [0, 1])), \"Row attribute 'Selected' must be integers 0 or 1 only\")\n\t\t\tself._check(ds.ra.Selected.shape == (n_cells,), f\"Row attribute 'Selected' must be 1-dimensional array of {n_cells} elements\")\n\t\telse:\n\t\t\tself.warnings.append(\"Optional row attribute 'Selected' is missing\")\n\n\t\treturn len(self.errors) == 0\n\t\t\n\tdef validate_spec(self, file: h5py.File) -> bool:\n\t\t\"\"\"\n\t\tValidate the LoomConnection object against the format specification.\n\n\t\tArgs:\n\t\t\tfile:\t\t\th5py File object\n\t\t\n\t\tReturns:\n\t\t\tTrue if the file conforms to the specs, else False\n\t\t\n\t\tRemarks:\n\t\t\tUpon return, the instance attributes 'self.errors' and 'self.warnings' contain\n\t\t\tlists of errors and warnings, and the 'self.summary' attribute contains a summary\n\t\t\tof the file contents.\n\t\t\"\"\"\n\t\tmatrix_types = [\"float16\", \"float32\", \"float64\", \"int8\", \"int16\", \"int32\", \"int64\", \"uint8\", \"uint16\", \"uint32\", \"uint64\"]\n\t\tvertex_types = [\"int8\", \"int16\", \"int32\", \"int64\", \"uint8\", \"uint16\", \"uint32\", \"uint64\"]\n\t\tweight_types = [\"float16\", \"float32\", \"float64\"]\n\n\t\tdef delay_print(text: str) -> None:\n\t\t\tself.summary.append(text)\n\n\t\tdef dt(t: str) -> str:\n\t\t\tif str(t).startswith(\"|S\"):\n\t\t\t\treturn f\"string\"\n\t\t\treturn str(t)\n\n\t\twidth_ra = 0\n\t\twidth_ca = 0\n\t\twidth_globals = 0\n\t\tif self._check(\"row_attrs\" in file, \"'row_attrs' group is missing\"):\n\t\t\twidth_ra = max([len(x) for x in (file[\"row_attrs\"].keys())], default=0)\n\t\tif self._check(\"col_attrs\" in file, \"'col_attrs' group is missing\"):\n\t\t\twidth_ca = max([len(x) for x in (file[\"col_attrs\"].keys())], default=0)\n\t\tif self.version == \"3.0.0\":\n\t\t\tif self._check(\"attrs\" in file, \"Global attributes missing\"):\n\t\t\t\twidth_globals = max([len(x) for x in (file[\"attrs\"].keys())], default=0)\n\t\telif len(file.attrs) > 0:\n\t\t\twidth_globals = max([len(x) for x in file.attrs.keys()])\n\t\twidth_layers = 0\n\t\tif \"layers\" in file and len(file[\"layers\"]) > 0:\n\t\t\twidth_layers = max([len(x) for x in file[\"layers\"].keys()])\n\t\twidth_layers = max(width_layers, len(\"Main matrix\"))\n\t\twidth = max(width_ca, width_ra, width_globals)\n\n\t\tdelay_print(\"Global attributes:\")\n\t\tif self.version == \"3.0.0\":\n\t\t\tself._check(\"attrs\" in file, \"Global attributes missing\")\n\t\t\tfor attr in file[\"attrs\"]:\n\t\t\t\tif type(attr) is np.ndarray:\n\t\t\t\t\tdelay_print(f\"{attr: >{width}} {attr.dtype} {attr.shape}\")\n\t\t\t\telse:\n\t\t\t\t\tdelay_print(f\"{attr: >{width}} {type(attr).__name__} (scalar)\")\n\t\telse:\n\t\t\tfor key, value in file.attrs.items():\n\t\t\t\tif type(value) is str:\n\t\t\t\t\tself.warnings.append(f\"Global attribute '{key}' has dtype string, which will be deprecated in future Loom versions\")\n\t\t\t\t\tdelay_print(f\"{key: >{width}} string\")\n\t\t\t\telif type(value) is bytes:\n\t\t\t\t\tself.warnings.append(f\"Global attribute '{key}' has dtype bytes, which will be deprecated in future Loom versions\")\n\t\t\t\t\tdelay_print(f\"{key: >{width}} bytes\")\n\t\t\t\telse:\n\t\t\t\t\tdelay_print(f\"{key: >{width}} {dt(file.attrs[key].dtype)}\")\n\t\t\t\t\n\t\tif self._check(\"matrix\" in file, \"Main matrix missing\"):\n\t\t\tself._check(file[\"matrix\"].dtype in matrix_types, f\"Main matrix dtype={file['matrix'].dtype} is not allowed\")\n\t\t\tshape = file[\"matrix\"].shape\n\t\t\tdelay_print(f\"Layers shape={shape}:\")\n\t\t\tdelay_print(f\"{'Main matrix': >{width}} {file['matrix'].dtype}\")\n\n\t\tif \"layers\" in file:\n\t\t\tfor layer in file[\"layers\"]:\n\t\t\t\tself._check(file[\"layers\"][layer].shape == shape, f\"Layer '{layer}' shape {file['layers'][layer].shape} does not match main matrix shape {shape}\")\n\t\t\t\tself._check(file[\"layers\"][layer].dtype in matrix_types, f\"Layer '{layer}' dtype={file['layers'][layer].dtype} is not allowed\")\n\t\t\t\tdelay_print(f\"{layer: >{width}} {file['layers'][layer].dtype}\")\n\n\t\tif self.version == \"3.0.0\":\n\t\t\texpected_dtype = np.object_\n\t\telse:\n\t\t\texpected_dtype = np.string_\n\t\tdelay_print(\"Row attributes:\")\n\t\tif self._check(\"row_attrs\" in file, \"'row_attrs' group is missing\"):\n\t\t\tfor ra in file[\"row_attrs\"]:\n\t\t\t\tself._check(file[\"row_attrs\"][ra].shape[0] == shape[0], f\"Row attribute '{ra}' shape {file['row_attrs'][ra].shape[0]} first dimension does not match row dimension {shape}\")\n\t\t\t\tself._check(file[\"row_attrs\"][ra].dtype in matrix_types or np.issubdtype(file['row_attrs'][ra].dtype, expected_dtype), f\"Row attribute '{ra}' dtype {file['row_attrs'][ra].dtype} is not allowed\")\n\t\t\t\tra_shape = file['row_attrs'][ra].shape\n\t\t\t\tdelay_print(f\"{ra: >{width}} {dt(file['row_attrs'][ra].dtype)} {ra_shape if len(ra_shape) > 1 else ''}\")\n\t\t\tif len(file[\"row_attrs\"]) == 0:\n\t\t\t\tdelay_print(\" (none)\")\n\n\t\tdelay_print(\"Column attributes:\")\n\t\tif self._check(\"col_attrs\" in file, \"'col_attrs' group is missing\"):\n\t\t\tfor ca in file[\"col_attrs\"]:\n\t\t\t\tself._check(file[\"col_attrs\"][ca].shape[0] == shape[1], f\"Column attribute '{ca}' shape {file['col_attrs'][ca].shape[0]} first dimension does not match column dimension {shape}\")\n\t\t\t\tself._check(file[\"col_attrs\"][ca].dtype in matrix_types or np.issubdtype(file[\"col_attrs\"][ca].dtype, expected_dtype), f\"Column attribute '{ca}' dtype {file['col_attrs'][ca].dtype} is not allowed\")\n\t\t\t\tca_shape = file['col_attrs'][ca].shape\n\t\t\t\tdelay_print(f\"{ca: >{width}} {dt(file['col_attrs'][ca].dtype)} {ca_shape if len(ca_shape) > 1 else ''}\")\n\t\t\tif len(file[\"col_attrs\"]) == 0:\n\t\t\t\tdelay_print(\" (none)\")\n\n\t\tdelay_print(\"Row graphs:\")\n\t\tif \"row_graphs\" in file:\n\t\t\tif self.version == \"2.0.1\" or self.version == \"3.0.0\":\n\t\t\t\tself._check(\"row_graphs\" in file, \"'row_graphs' group is missing (try spec_version='old')\")\n\t\t\tfor g in file[\"row_graphs\"]:\n\t\t\t\tself._check(\"a\" in file[\"row_graphs\"][g], f\"Row graph '{g}' is missing vector 'a', denoting start vertices\")\n\t\t\t\tself._check(file[\"row_graphs\"][g]['a'].dtype in vertex_types, f\"/row_graphs/{g}/a.dtype {file['row_graphs'][g]['a'].dtype} must be integer\")\n\t\t\t\tself._check(\"b\" in file[\"row_graphs\"][g], f\"Row graph '{g}' is missing vector 'b', denoting end vertices\")\n\t\t\t\tself._check(file[\"row_graphs\"][g]['b'].dtype in vertex_types, f\"/row_graphs/{g}/b.dtype {file['row_graphs'][g]['b'].dtype} must be integer\")\n\t\t\t\tself._check(\"w\" in file[\"row_graphs\"][g], f\"Row graph '{g}' is missing vector 'w', denoting vertex weights\")\n\t\t\t\tself._check(file[\"row_graphs\"][g]['w'].dtype in weight_types, f\"/row_graphs/{g}/w.dtype {file['row_graphs'][g]['w'].dtype} must be float\")\n\t\t\t\tself._check(file['row_graphs'][g]['a'].shape[0] == file['row_graphs'][g]['b'].shape[0] and file['row_graphs'][g]['a'].shape[0] == file['row_graphs'][g]['w'].shape[0], f\"Row graph '{g}' sparse vectors a, b and w must have equal length\")\n\t\t\t\tdelay_print(f\" '{g}' with {file['row_graphs'][g]['a'].shape[0]} edges\")\n\t\t\tif len(file[\"row_graphs\"]) == 0:\n\t\t\t\tdelay_print(\" (none)\")\n\n\t\tdelay_print(\"Column graphs:\")\n\t\tif \"col_graphs\" in file:\n\t\t\tif self.version == \"2.0.1\" or self.version == \"3.0.0\":\n\t\t\t\tself._check(\"col_graphs\" in file, \"'col_graphs' group is missing (try spec_version='old')\")\n\t\t\tfor g in file[\"col_graphs\"]:\n\t\t\t\tself._check(\"a\" in file[\"col_graphs\"][g], f\"Column graph '{g}' is missing vector 'a', denoting start vertices\")\n\t\t\t\tself._check(file[\"col_graphs\"][g]['a'].dtype in vertex_types, f\"/col_graphs/{g}/a.dtype {file['col_graphs'][g]['a'].dtype} must be integer\")\n\t\t\t\tself._check(\"b\" in file[\"col_graphs\"][g], f\"Column graph '{g}' is missing vector 'b', denoting end vertices\")\n\t\t\t\tself._check(file[\"col_graphs\"][g]['b'].dtype in vertex_types, f\"/col_graphs/{g}/b.dtype {file['col_graphs'][g]['b'].dtype} must be integer\")\n\t\t\t\tself._check(\"w\" in file[\"col_graphs\"][g], f\"Column graph '{g}' is missing vector 'w', denoting vertex weights\")\n\t\t\t\tself._check(file[\"col_graphs\"][g]['w'].dtype in weight_types, f\"/col_graphs/{g}/w.dtype {file['col_graphs'][g]['w'].dtype} must be float\")\n\t\t\t\tself._check(file['col_graphs'][g]['a'].shape[0] == file['col_graphs'][g]['b'].shape[0] and file['col_graphs'][g]['a'].shape[0] == file['col_graphs'][g]['w'].shape[0], f\"Column graph '{g}' sparse vectors a, b and w must have equal length\")\n\t\t\t\tdelay_print(f\" '{g}' with {file['col_graphs'][g]['a'].shape[0]} edges\")\n\t\t\tif len(file[\"col_graphs\"]) == 0:\n\t\t\t\tdelay_print(\" (none)\")\n\n\t\treturn len(self.errors) == 0\n" ]
[ [ "numpy.min", "numpy.unique", "numpy.issubdtype", "numpy.max", "numpy.isin" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
griffij/QuakeRates
[ "70069bb271a1987e72fcbdf3aa0c0a8a79591580" ]
[ "utilities/bilinear.py" ]
[ "\"\"\"Functions for bi-linear fitting\nWith thanks to Trevor Allen, Geoscience Australia\n\"\"\"\n\nimport numpy as np\nfrom numpy import zeros_like\nfrom scipy.odr import Data, Model, ODR, models\nimport scipy.odr.odrpack as odrpack\n\n# functions to return binary arrays\ndef highside(x, hx):\n from numpy import zeros_like\n xmod = zeros_like(x)\n \n idx = x >= hx\n xmod[idx] = 1\n return xmod\n \ndef lowside(x, hx):\n from numpy import zeros_like\n xmod = zeros_like(x)\n \n idx = x <= hx\n xmod[idx] = 1\n return xmod\n\ndef bilinear_reg_zero_slope(c, x):\n hx = c[2] # x-hinge\n ans2 = zeros_like(x)\n ans1 = zeros_like(x)\n \n idx1 = x <= hx\n idx2 = x >= hx\n \n modx_lo = lowside(x, hx)\n modx_hi = highside(x, hx)\n \n ans1 = modx_lo * (c[0] * x + c[1])\n yarea = c[0] * hx + c[1]\n ans2 = modx_hi * yarea\n \n return ans1 + ans2 \n\ndef bilinear_reg_fix(c, x):\n from numpy import zeros_like\n hxfix = np.log10(2e-4) #4.0 # hinge magnitude\n ans2 = zeros_like(x)\n ans1 = zeros_like(x)\n\n #idx1 = x <= hx\n #idx2 = x >= hx\n\n modx_lo = lowside(x, hxfix)\n modx_hi = highside(x, hxfix)\n\n ans1 = modx_lo * (c[0] * x + c[1])\n yarea = c[0] * hxfix + c[1]\n ans2 = modx_hi * (c[2] * (x-hxfix) + yarea)\n\n return ans1 + ans2\n\ndef bilinear_reg_fix_zero_slope(c, x):\n from numpy import zeros_like\n hxfix = np.log10(2e-4) #4.0 # hinge magnitude\n ans2 = zeros_like(x)\n ans1 = zeros_like(x)\n\n #idx1 = x <= hx\n #idx2 = x >= hx\n\n modx_lo = lowside(x, hxfix)\n modx_hi = highside(x, hxfix)\n\n ans1 = modx_lo * (c[0] * x + c[1])\n yarea = c[0] * hxfix + c[1]\n# yarea = c[0] * hx + c[1] \n# ans2 = modx_hi * (c[2] * (x-hxfix) + yarea)\n ans2 = modx_hi * yarea \n return ans1 + ans2\n" ]
[ [ "numpy.log10", "numpy.zeros_like" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
bsulman/CORPSE_rhizosphere_Moore_etal_2019
[ "cbe182ddaaf38b21291dc757a23c2dba1412e7b5" ]
[ "rhiz-sim.py" ]
[ "import CORPSE\nimport pandas\nfrom pylab import *\n\n# 5% clay\nparams={\n 'vmaxref':[1000,50,600], # Relative maximum enzymatic decomp rates. Multiply by 0.022 to get actual turnover rates (to take moisture equation into account)\n 'Ea':[37e3,54e3,50e3], # Activation energy\n 'kC':[0.01,0.01,0.01], # Michaelis-Menton parameter\n 'gas_diffusion_exp':2.5, # Determines suppression of decomp at high soil moisture\n 'minMicrobeC':1e-3, #Minimum microbial biomass (fraction of total C)\n 'Tmic':0.25, # Microbial lifetime\n 'et':0.5, # Fraction of turnover not converted to CO2\n 'eup':[0.6,0.05,0.6], # Carbon uptake efficiency\n 'tProtected':75.0, # Protected C turnover time (years)\n 'protection_rate':[0.25,0.00005,0.75], # Protected carbon formation rate (year-1)\n 'Resp_uses_total_C':False\n}\n\ndt=1.0/365 # Daily time step\ndepth=5 #cm\n\n# Read daily temperature data for upland site\n## NOTE: Weather data downloaded from SPRUCE data repository: http://dx.doi.org/10.3334/CDIAC/spruce.001\nweatherdata=pandas.read_csv('../SPRUCE-data/EM123_Combined_public_data_2010_2015.csv',index_col='datetime',parse_dates=True,na_values=[-9999.0])\nsoilT=weatherdata['EM1_Hummock10cm']+273.15\nsoilmoisture=weatherdata['EM1_VW1_TopofHummock']\nsoilmoisture[soilmoisture<0.0]=0.0\nsoilmoisture=soilmoisture/soilmoisture.max()\n\n# Soil moisture from observations\ntheta=soilmoisture.fillna(method='backfill').resample('1D').mean()[365:]\nT=soilT.fillna(method='backfill').resample('1D').mean()[365:]\n\n# After spinup, run with no fresh inputs\ninputs = array([0.3,0.7,0.0])*0.0e-3 # gC/g soil/year\n\n# Calculate ranges of root density\nrootdata=pandas.read_excel('All Site SEM data SULMAN with volume.xlsx')\nbulkdensity=1.26 #g/cm3\n\nrootdensity=rootdata['roots per volume']*1e-3 #g/cm3 of soil\nrootdensity[rootdensity<0]=nan\nrootmass=rootdata['roots']*1e-3 # g roots/g soil\n\nsrl=rootdata['SRL (m g-1)'].fillna(rootdata['SRL (m g-1)'].mean()) # m/g root\nrootlength = rootmass*srl # m root/g soil\nexudationrate=rootlength*100*1e-6*24*365/1000.0 # Phillips et al: 1 ugC/cm root length/hour\n\ndo_spinup=False\nsoc=rootdata['SOC g kg-1']\nsoc_kg_m2=soc/(bulkdensity*1e3)*100**2*depth*1e-3\n\n# Set up initial cohorts\nif do_spinup:\n nbins=1\nelse:\n nbins=10\n\nrootlength_bins=rootlength.quantile(linspace(0.5/nbins,1.0-0.5/nbins,nbins)) # m/g soil\nrootdensity_cm_gsoil=array(rootlength_bins)*100\nexudate_ugC_cmroot_hour=0.25\nexudate_gC_gsoil_hour=exudate_ugC_cmroot_hour*1e-6*rootdensity_cm_gsoil\nexudate_gC_gsoil_year=exudate_gC_gsoil_hour*24*365\nexudationrate=exudate_gC_gsoil_year # Phillips et al: 1 ugC/cm root length/hour\n\n# c=CORPSE.soil_carbon_cohort(litterC=[6e-05,0.012,9.6e-05], protectedC=[0.0037,4.2e-05,0.0054], livingMicrobeC=8.4e-05, params=params)\nc=CORPSE.soil_carbon_cohort(litterC=[0.0001,0.011,8.7e-05], protectedC=[0.0032,4.1e-05,0.0061], livingMicrobeC=0.00021, params=params)\nc2=c.copy()\nc2.params.update({'Resp_uses_total_C':True})\n\ncohorts=[]\ncohorts2=[]\nfor ii in range(nbins):\n cohorts.append(c.copy())\n cohorts2.append(c2.copy())\n\nif do_spinup:\n nsteps=365*50\nelse:\n nsteps=365*4\n\n\noutputs={'unprotectedC':zeros((nsteps,nbins,3)),\n 'protectedC':zeros((nsteps,nbins,3)),\n 'decomp':zeros((nsteps,nbins,3)),\n 'microbeC':zeros((nsteps,nbins)),\n 'CO2':zeros((nsteps,nbins))\n }\noutputs2={'unprotectedC':zeros((nsteps,nbins,3)),\n 'protectedC':zeros((nsteps,nbins,3)),\n 'decomp':zeros((nsteps,nbins,3)),\n 'microbeC':zeros((nsteps,nbins)),\n 'CO2':zeros((nsteps,nbins))\n }\n\ndoy=T.index.dayofyear\nexudation_ts=cos((doy-0.67*365)*2*pi/365)+1\n\nfor step in range(nsteps):\n if step%365==0:\n print ('Year %d of %d'%(floor(step/365),floor(nsteps/365)))\n for cc in range(len(cohorts)):\n out=cohorts[cc].update(T[step%len(T)],theta[step%len(T)],dt)\n cohorts[cc].check_validity()\n outputs['unprotectedC'][step,cc,:]=cohorts[cc].litterC\n outputs['decomp'][step,cc,:]=out['decomp']\n outputs['protectedC'][step,cc,:]=cohorts[cc].protectedC\n outputs['microbeC'][step,cc]=cohorts[cc].livingMicrobeC\n outputs['CO2'][step,cc]=cohorts[cc].CO2\n\n cohorts[cc].add_carbon((inputs+array([exudationrate[cc],0.0,0.0]))*dt*exudation_ts[step%len(T)])\n\n out2=cohorts2[cc].update(T[step%len(T)],theta[step%len(T)],dt)\n cohorts2[cc].check_validity()\n outputs2['unprotectedC'][step,cc,:]=cohorts2[cc].litterC\n outputs2['decomp'][step,cc,:]=out2['decomp']\n outputs2['protectedC'][step,cc,:]=cohorts2[cc].protectedC\n outputs2['microbeC'][step,cc]=cohorts2[cc].livingMicrobeC\n outputs2['CO2'][step,cc]=cohorts2[cc].CO2\n\n cohorts2[cc].add_carbon((inputs+array([exudationrate[cc],0.0,0.0]))*dt*exudation_ts[step%len(T)])\n\n# Plot results\n\n\nif do_spinup:\n t=arange(nsteps)/365.0\n figure(1);clf()\n subplot(211)\n plot(t,outputs['unprotectedC'][:,0,:].sum(axis=1),'b-',label='Unprotected')\n plot(t,outputs['protectedC'][:,0,:].sum(axis=1),'r-',label='Protected')\n plot(t,outputs['unprotectedC'][:,0,0],':g',label='Fast')\n plot(t,outputs['unprotectedC'][:,0,1],'b:',label='Slow')\n plot(t,outputs['unprotectedC'][:,0,2],'r:',label='Microbe necro')\n plot(t,outputs['microbeC'][:,0],'m-',label='Microbe')\n plot(t,outputs['unprotectedC'][:,0,:].sum(axis=1)+outputs['protectedC'][:,0,:].sum(axis=1),'k-',label='Total')\n legend(loc='best',fontsize='medium')\n draw()\n\n subplot(212)\n plot(t,outputs2['unprotectedC'][:,0,:].sum(axis=1),'b-',label='Unprotected')\n plot(t,outputs2['protectedC'][:,0,:].sum(axis=1),'r-',label='Protected')\n plot(t,outputs2['unprotectedC'][:,0,0],':g',label='Fast')\n plot(t,outputs2['unprotectedC'][:,0,1],'b:',label='Slow')\n plot(t,outputs2['unprotectedC'][:,0,2],'r:',label='Microbe necro')\n plot(t,outputs2['microbeC'][:,0],'m-',label='Microbe')\n plot(t,outputs2['unprotectedC'][:,0,:].sum(axis=1)+outputs2['protectedC'][:,0,:].sum(axis=1),'k-',label='Total')\n\nelse:\n t=T.index[:nsteps]\n figure(1);clf()\n # subplot(131)\n lowpoint=0\n plot(t,outputs['unprotectedC'][:,lowpoint,:].sum(axis=1)*1e3,'b-',label='Unprotected')\n # plot(t,outputs['protectedC'][:,0,:].sum(axis=1),'r-',label='Protected')\n plot(t,outputs['unprotectedC'][:,lowpoint,0]*1e3,'-g',label='Fast')\n # plot(t,outputs['unprotectedC'][:,0,1],'c:',label='Slow')\n plot(t,outputs['unprotectedC'][:,lowpoint,2]*1e3,'y-',label='Dead microbe')\n plot(t,outputs['microbeC'][:,lowpoint]*1e3,'m-',label='Live microbe')\n # plot(t,outputs['unprotectedC'][:,0,:].sum(axis=1)+outputs['protectedC'][:,0,:].sum(axis=1),'k-',label='Total')\n leg=legend(loc='best',fontsize='medium');leg.get_frame().set_alpha(0.5)\n\n highpoint=4\n plot(t,outputs['unprotectedC'][:,highpoint,:].sum(axis=1)*1e3,'b--',label='Unprotected')\n # plot(t,outputs['protectedC'][:,-1,:].sum(axis=1),'r--',label='Protected')\n plot(t,outputs['unprotectedC'][:,highpoint,0]*1e3,'--g',label='Fast')\n # plot(t,outputs['unprotectedC'][:,-1,1],'c-.',label='Slow')\n plot(t,outputs['unprotectedC'][:,highpoint,2]*1e3,'y--',label='Microbe necro')\n plot(t,outputs['microbeC'][:,highpoint]*1e3,'m--',label='Microbe')\n # plot(t,outputs['unprotectedC'][:,0-1,:].sum(axis=1)+outputs['protectedC'][:,0-1,:].sum(axis=1),'k--',label='Total')\n xlabel('Time (years)')\n ylabel('Carbon content(mgC/g soil)')\n title('Labile carbon pools')\n\n plot(t,outputs2['unprotectedC'][:,highpoint,:].sum(axis=1)*1e3,'b:')\n # plot(t,outputs['protectedC'][:,0,:].sum(axis=1),'r-',label='Protected')\n plot(t,outputs2['unprotectedC'][:,highpoint,0]*1e3,':g')\n # plot(t,outputs['unprotectedC'][:,0,1],'c:',label='Slow')\n plot(t,outputs2['unprotectedC'][:,highpoint,2]*1e3,'y:')\n plot(t,outputs2['microbeC'][:,highpoint]*1e3,'m:')\n\n draw()\n\n figure(2,figsize=(8,5.3));clf()\n\n timepoint=nonzero((t.week==25)&(t.year==2015))[0]\n\n\n subplot(122)\n semilogx(rootlength_bins*1e3,outputs['microbeC'][timepoint,:].mean(axis=0)*1e6,'ko-',label='June')\n semilogx(rootlength_bins*1e3,outputs2['microbeC'][timepoint,:].mean(axis=0)*1e6,'k^--')\n xlabel('Root density (mm g soil$^{-1}$)')\n ylabel('Microbial biomass (mg C kg soil$^{-1}$)')\n title('Living microbial biomass')\n # gca().set_ylim(bottom=-0.1,top=200)\n # legend(loc='upper left',fontsize='medium')\n\n data_out=pandas.DataFrame({'H2 Microbial biomass (mgC/kgsoil)':outputs['microbeC'][timepoint,:].mean(axis=0)*1e6,\n 'H1 Microbial biomass (mgC/kgsoil)':outputs2['microbeC'][timepoint,:].mean(axis=0)*1e6,\n 'rootdensity (mm/gsoil)':rootlength_bins*1e3})\n\n subplot(121)\n zeropoint=outputs['decomp'][timepoint,0,0].mean(axis=0)/outputs['unprotectedC'][timepoint,0,0].mean(axis=0)\n zeropoint=1.0\n semilogx(rootlength_bins*1e3,outputs['decomp'][timepoint,:,0].mean(axis=0)/outputs['unprotectedC'][timepoint,:,0].mean(axis=0)/10,'go-',label='Labile*0.1')\n data_out['H2 Labile Decomp rate (year-1)']=outputs['decomp'][timepoint,:,0].mean(axis=0)/outputs['unprotectedC'][timepoint,:,0].mean(axis=0)\n\n zeropoint=outputs['decomp'][timepoint,0,1].mean(axis=0)/outputs['unprotectedC'][timepoint,0,1].mean(axis=0)\n zeropoint=1.0\n semilogx(rootlength_bins*1e3,outputs['decomp'][timepoint,:,1].mean(axis=0)/outputs['unprotectedC'][timepoint,:,1].mean(axis=0),'bo-',label='Resistant')\n data_out['H2 Resistant Decomp rate (year-1)']=outputs['decomp'][timepoint,:,1].mean(axis=0)/outputs['unprotectedC'][timepoint,:,1].mean(axis=0)\n\n zeropoint=outputs2['decomp'][timepoint,0,0].mean(axis=0)/outputs2['unprotectedC'][timepoint,0,0].mean(axis=0)\n zeropoint=1.0\n semilogx(rootlength_bins*1e3,outputs2['decomp'][timepoint,:,0].mean(axis=0)/outputs2['unprotectedC'][timepoint,:,0].mean(axis=0)/10,'g^--')\n data_out['H1 Labile Decomp rate (year-1)']=outputs2['decomp'][timepoint,:,0].mean(axis=0)/outputs2['unprotectedC'][timepoint,:,0].mean(axis=0)\n\n zeropoint=outputs2['decomp'][timepoint,0,1].mean(axis=0)/outputs2['unprotectedC'][timepoint,0,1].mean(axis=0)\n zeropoint=1.0\n semilogx(rootlength_bins*1e3,outputs2['decomp'][timepoint,:,1].mean(axis=0)/outputs2['unprotectedC'][timepoint,:,1].mean(axis=0),'b^--')\n data_out['H1 Resistant Decomp rate (year-1)']=outputs2['decomp'][timepoint,:,1].mean(axis=0)/outputs2['unprotectedC'][timepoint,:,1].mean(axis=0)\n\n\n xlabel('Root density (mm g soil$^{-1}$)')\n ylabel('Decomposition rate (year$^{-1}$)')\n title('SOC turnover rate')\n legend(fontsize='medium')\n # ylim(-.5,20.2)\n\n tight_layout()\n draw()\n\n data_out.to_csv('plotted_data.csv',index_label='Root density percentile')\n\n\n figure(3);clf()\n subplot(111)\n totalC=(outputs['unprotectedC'][timepoint,:,:].mean(axis=0).sum(axis=1)+outputs['protectedC'][timepoint,:,:].mean(axis=0).sum(axis=1))*1000\n plot(rootlength_bins,totalC-totalC[0],'go-')\n totalC2=(outputs2['unprotectedC'][timepoint,:,:].mean(axis=0).sum(axis=1)+outputs2['protectedC'][timepoint,:,:].mean(axis=0).sum(axis=1))*1000\n plot(rootlength_bins,totalC2-totalC2[0],'go--')\n xlabel('Root length (m/g soil)')\n ylabel('Difference in soil C (mgC/g soil)')\n title('4-year difference in soil C')\n\n subplots_adjust(left=0.07,right=0.95,wspace=0.25)\n\n draw()\n\nshow()\n" ]
[ [ "pandas.read_excel", "pandas.read_csv" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [ "2.0", "1.4", "1.3", "1.1", "1.5", "1.2" ], "scipy": [], "tensorflow": [] } ]
syoyo/nnabla
[ "e94bac5bed65337010e2ac07a5937fb862ab2dd8" ]
[ "python/test/function/test_random_functions.py" ]
[ "# Copyright (c) 2017 Sony Corporation. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\nfrom six.moves import range\n\nimport pytest\nimport numpy as np\nimport nnabla as nn\nimport nnabla.functions as F\nfrom nbla_test_utils import list_context\n\nctxs_rand = list_context('Rand')\nctxs_randint = list_context('Randint')\nctxs_randn = list_context('Randn')\n\n\[email protected](\"ctx, func_name\", ctxs_rand)\[email protected](\"low, high\", [(0, 1), (-2.5, 100), (0.1, 0.11)])\[email protected](\"shape\", [[], [5], [100, 100]])\[email protected](\"seed\", [-1, 313])\ndef test_rand_forward(seed, ctx, func_name, low, high, shape):\n with nn.context_scope(ctx):\n o = F.rand(low, high, shape, seed=seed)\n assert o.shape == tuple(shape)\n assert o.parent.name == func_name\n o.forward()\n # NOTE: The following should be < high,\n # but use <= high because std::uniform_random contains a bug.\n assert np.all(o.d <= high)\n assert np.all(o.d >= low)\n\n\[email protected](\"ctx, func_name\", ctxs_randint)\[email protected](\"low, high\", [(100, 50000), (-5, 100), (101, 102)])\[email protected](\"shape\", [[], [5], [100, 100]])\[email protected](\"seed\", [-1, 313])\ndef test_randint_forward(seed, ctx, func_name, low, high, shape):\n with nn.context_scope(ctx):\n o = F.randint(low, high, shape, seed=seed)\n assert o.shape == tuple(shape)\n assert o.parent.name == func_name\n o.forward()\n # NOTE: The following should be < high,\n # but use <= high because std::uniform_random contains a bug.\n assert np.all(o.d <= high)\n assert np.all(o.d >= low)\n\n\[email protected](\"ctx, func_name\", ctxs_randn)\[email protected](\"mu, sigma\", [(0, 1), (-10, 10), (10000.5, 0.5)])\[email protected](\"shape\", [[], [5], [100, 100]])\[email protected](\"seed\", [-1, 313])\ndef test_randn_forward_backward(seed, ctx, func_name, mu, sigma, shape):\n with nn.context_scope(ctx):\n o = F.randn(mu, sigma, shape, seed=seed)\n assert o.shape == tuple(shape)\n assert o.parent.name == func_name\n o.forward()\n if o.size >= 10000:\n est_mu = o.d.mean()\n est_sigma = o.d.std()\n np.isclose(est_mu, mu, atol=sigma)\n np.isclose(est_sigma, sigma, atol=sigma)\n else:\n data = []\n for i in range(10000):\n o.forward()\n data += [o.d.copy()]\n est_mu = np.mean(np.array(data))\n est_sigma = np.std(np.array(data))\n np.isclose(est_mu, mu, atol=sigma)\n np.isclose(est_sigma, sigma, atol=sigma)\n" ]
[ [ "numpy.all", "numpy.array", "numpy.isclose" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
ajitesh-30/ImageTagging
[ "e71cd83cd0c189ec2f64ed3219a89c7f4d1693d4" ]
[ "Loading Images/test1.py" ]
[ "# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Thu Jun 28 14:14:13 2018\r\n\r\n@author: HP-USER\r\n\"\"\"\r\n\r\nimport os\r\n\r\nroot='G:/INTERNSHIP IIITA/Dataset/NUS-WIDE/Testing_Image/'\r\ndirlist = [ item for item in os.listdir(root) if os.path.isdir(os.path.join(root, item)) ]\r\nprint (dirlist)\r\n\r\n\r\n\r\nimport numpy as np\r\ndirlist = np.sort(dirlist)\r\n\r\nfrom keras.preprocessing.image import load_img, img_to_array\r\nx_train = []\r\nfor dirs in range(len(dirlist)):\r\n for image_path in os.listdir(root+dirlist[dirs]):\r\n if not image_path.endswith('.db'):\r\n img = load_img(\r\n root+str(dirlist[dirs])+'/'+str(image_path),\r\n grayscale=False,\r\n target_size=(256,256),\r\n interpolation='nearest'\r\n )\r\n img = img_to_array(img, data_format=None)\r\n print(root+str(dirlist[dirs])+'/'+str(image_path))\r\n x_train.append(img)\r\n\r\nprint('done')\r\n\r\n\r\nx_train = np.array(x_train)\r\n\r\nprint(x_train.shape)\r\n\r\nx_train=x_train.astype('float32')\r\nx_train=x_train/255.0\r\n\r\nfrom matplotlib import pyplot as plt\r\n\r\nplt.subplot(111)\r\nplt.imshow(x_train[5].reshape(256,256,3))\r\nplt.show()" ]
[ [ "matplotlib.pyplot.show", "numpy.array", "matplotlib.pyplot.subplot", "numpy.sort" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
nicolas-suarez/urban_emissions
[ "6a16bc52ad7c6fc05a583c1cff21ba32f8ad3090" ]
[ "build_dataset.py" ]
[ "import h5py\nimport numpy as np\nimport os\nimport pandas as pd\nimport pickle\nimport random\nimport utils\n\n\n# Define global means over each channel of the training set. These are\n# estimated means in order to compute channel standard deviations when\n# building the dataset\nGLOBAL_MEANS = np.array([0.26334649324417114, 0.25160321593284607,\n 0.2347201257944107, 0.24427558481693268,\n 0.33065736293792725, 0.23822002112865448,\n 0.1870376616716385])\n\n\ndef create_data_split(valid_rows_bool, data_split):\n \"\"\"\n Creates a train/val/test split given a list of sizes for each split.\n :param valid_rows: (np.array) Boolean array indicating images that should\n be included in the train/dev/test split\n :param data_split: (list) containing the % of each split in the order\n [size_train, size_val, size_test]\n :return: a tuple (split_IDs, split_sizes) containing an array indicating\n to which split a row belongs and a dictionary of sizes for each split\n \"\"\"\n # Set seed and verify sat_data_split is appropriate given dataset size\n np.random.seed(42)\n assert abs(sum(data_split) - 1) < 1e-10\n\n # Get valid image IDs\n valid_imgs_IDs = np.arange(0, valid_rows_bool.shape[0])[valid_rows_bool]\n\n # Get sizes for each split\n m = valid_rows_bool.sum()\n train_size = int(m * data_split[0])\n val_size = int(m * data_split[1])\n\n # Create permutation and indexes for each split\n perm = np.random.permutation(valid_imgs_IDs)\n train_id, val_id, test_id = perm[: train_size], \\\n perm[train_size: train_size + val_size], \\\n perm[train_size + val_size:]\n\n # Reorder\n split_IDs = np.zeros(valid_rows_bool.shape, dtype='object')\n split_IDs[train_id] = 'train'\n split_IDs[val_id] = 'val'\n split_IDs[test_id] = 'test'\n\n # Get split sizes\n split_sizes = {'train': train_size, 'val': val_size,\n 'test': m - train_size- val_size}\n\n return split_IDs, split_sizes\n\n\ndef preprocess_label_data(base_labels_file, output_variable):\n \"\"\"\n Prepares the labels data frame for processing\n :param base_labels_file: (str) location of labels data\n :param output_variable: (str) selected model output variable\n :return: (DataFrame)\n \"\"\"\n # Load Labels file\n try:\n with open(base_labels_file, 'r') as labels_file:\n label_data = pd.read_csv(\n labels_file, dtype={\n 'Unique_ID': 'string', 'Location_type': 'string',\n 'Zipcode': 'string', 'County': 'string', 'type': 'string',\n 'measurement': 'string', 'value': float, 'lat': float,\n 'lon': float, 'AQI_level': 'string'})\n except FileNotFoundError:\n print('[ERROR] Labels file not found.')\n\n # Filter label data for selected variable\n label_data = label_data[label_data['type'] == output_variable]\n\n # Define label and convert to int for AQI (hdf5 requires this)\n if 'AQI' in output_variable:\n label_data['label'] = label_data['AQI_level']\n aqi_dict = {'good': 0, 'moderate': 1, 'unhealthy_sensitive_groups': 2,\n 'unhealthy': 3, 'very_unhealthy': 4, 'hazardous': 5}\n label_data = label_data.replace({'label': aqi_dict})\n else:\n label_data['label'] = label_data['value']\n\n return label_data[['Unique_ID', 'label']]\n\n\ndef process_sat_data(base_image_file, base_id_file, base_labels_file,\n data_dir, output_variable, data_split):\n \"\"\"\n Pre-processes Satellite Data and creates train/val/test splits.\n :param base_labels_file: (str) Path to the satellite images\n :param base_id_file: (str) Path to the image identifiers and status\n :param base_image_file: (str) Path to the unique_ID labels\n :param data_dir: (str) Path to where train, val, test files will be\n exported\n :param output_variable: (str) Selected output variable\n :param data_split: (list) containing the % of each split in the order\n [size_train, size_val, size_test]\n :return: void\n \"\"\"\n # Load image file\n try:\n db = h5py.File(base_image_file, 'r')\n image_data = db['imagery_no_mask']\n except FileNotFoundError:\n print('[ERROR] Dataset not found.')\n\n # Load Identifier file\n try:\n with open(base_id_file, 'rb') as id_file:\n id_data = pickle.load(id_file)\n except FileNotFoundError:\n print('[ERROR] Image IDs file not found.')\n\n # Join id_data and label_data\n label_data = preprocess_label_data(base_labels_file, output_variable)\n id_data = id_data.merge(\n label_data, on='Unique_ID', how='left', validate='one_to_one')\n id_data = id_data[['Unique_ID', 'task_status', 'label']]\n\n # Get rows whose task was completed and have a label\n valid_imgs = np.array((id_data['task_status'] == 'COMPLETED') &\n (id_data['label'].notna()))\n\n # Create train/dev/test splits\n split_IDs, split_sizes = create_data_split(valid_imgs, data_split)\n\n # Gather image dimensions and ensure dimension ordering is (n_W, n_H, n_C)\n n_H = image_data.shape[1]\n n_C = image_data.shape[2]\n\n # Ensure dimension ordering is in line with PyTorch\n if n_C > n_H:\n print('[ERROR] Channel ordering is incorrect.')\n assert 0\n\n # Create path for data directories\n main_path = os.path.join(data_dir, output_variable)\n if not os.path.exists(main_path):\n os.mkdir(main_path)\n\n # Open datasets\n path = os.path.join(main_path, 'sat_{}.hdf5')\n train_db = h5py.File(path.format('train'), \"w\")\n train_db.create_dataset(name='X', shape=(split_sizes['train'], n_H, n_H, n_C), dtype='f')\n train_db.create_dataset(name='Y', shape=(split_sizes['train'], 1), dtype='f')\n\n val_db = h5py.File(path.format('dev'), \"w\")\n val_db.create_dataset(name='X', shape=(split_sizes['val'], n_H, n_H, n_C), dtype='f')\n val_db.create_dataset(name='Y', shape=(split_sizes['val'], 1), dtype='f')\n\n test_db = h5py.File(path.format('test'), \"w\")\n test_db.create_dataset(name='X', shape=(split_sizes['test'], n_H, n_H, n_C), dtype='f')\n test_db.create_dataset(name='Y', shape=(split_sizes['test'], 1), dtype='f')\n\n # Set up arrays to compute normalization metrics\n band_means = np.zeros((n_C,))\n band_sds = np.zeros((n_C,))\n\n # Loop over each image, identify valid images and create train/dev/test\n train_counter, val_counter, test_counter = 0, 0, 0\n for i in range(image_data.shape[3]):\n # Identify if image is valid\n if valid_imgs[i]:\n if i % 500 == 0:\n print('[INFO] Processing image {}/{}'.format(i, image_data.shape[3]))\n\n # Grab image\n img = np.array(image_data[:, :, :, i])\n label = id_data.iloc[i]['label']\n assert(img.shape == (n_H, n_H, n_C))\n\n # Identify which dataset the image belongs to and write\n if split_IDs[i] == 'train':\n train_db['X'][train_counter] = img\n train_db['Y'][train_counter] = label\n train_counter += 1\n\n # Add to normalization metrics\n img_means = np.mean(img, axis=(0, 1))\n band_means += img_means / split_sizes['train']\n\n img_sds = (img_means - GLOBAL_MEANS)**2\n band_sds += img_sds / split_sizes['train']\n\n elif split_IDs[i] == 'val':\n val_db['X'][val_counter] = img\n val_db['Y'][val_counter] = label\n val_counter += 1\n elif split_IDs[i] == 'test':\n test_db['X'][test_counter] = img\n test_db['Y'][test_counter] = label\n test_counter += 1\n else:\n assert 0\n\n # Close datasets\n train_db.close()\n val_db.close()\n test_db.close()\n db.close()\n\n # Verify that all images were added to databases\n print('Number of train images loaded: ', train_counter)\n print('Number of val images loaded: ', val_counter)\n print('Number of test images loaded: ', test_counter)\n\n # Obtain means and sds for each band in the train set (for normalization)\n band_means = {'band_{}'.format(i): band_mean.item() for (i, band_mean) in\n enumerate(np.nditer(band_means))}\n band_sds = np.sqrt(band_sds)\n band_sds = {'band_{}'.format(i): band_sd.item() for (i, band_sd) in\n enumerate(np.nditer(band_sds))}\n utils.save_dict(band_means, os.path.join(main_path, 'band_means.json'))\n utils.save_dict(band_sds, os.path.join(main_path, 'band_sds.json'))\n" ]
[ [ "pandas.read_csv", "numpy.sqrt", "numpy.random.seed", "numpy.nditer", "numpy.arange", "numpy.random.permutation", "numpy.mean", "numpy.array", "numpy.zeros" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [ "2.0", "1.4", "1.1", "1.5", "1.2", "1.3" ], "scipy": [], "tensorflow": [] } ]
chrisconlon/DiversionReplication
[ "12d6f83a8f4621323e1542167f375d8dcf76b80a" ]
[ "code/tab34_params.py" ]
[ "#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Mon Jul 27 15:07:45 2020\n\n@author: chitra\n\nMake Table 3: Parameter Estimates\nNevo Results\nBLP Results\n\nMake Table 4: PostEstimation Information for Table 2\n\nFirst, run_blp_cases and run_nevo_cases \nThen this file\n\n\"\"\"\n\nimport numpy as np\nimport pyblp\nimport pandas as pd\nimport pathlib\n\nfrom tabulate import tabulate\n\nmain_dir = pathlib.Path.cwd().parent\ndata_dir = main_dir / 'data'\ndict_dir = data_dir / 'dict'\nraw_dir = data_dir / 'raw'\n\ntab_dir = main_dir / 'tables'\n\npyblp.options.digits = 2\npyblp.options.verbose = False\n\nfrom aux_table_functions import load_pyblp_dict, get_params_nevo, get_params_blp, outreg\n\n\nBESTPRACTICES_TEXT = 'Best Practices'\nNOCONS_TEXT = '$\\\\Sigma_{\\\\text{cons}} = \\\\pi_{\\\\text{cons}} = 0 $'\nNOALPHA_TEXT = '$\\\\Sigma_p = \\\\pi_p = 0 $'\nTRIPLE_TEXT = 'Rescaled Shares'\nNL_TEXT = 'Nested Logit'\n\n# %%\n\n# get the BLP Results Back\n# requires only the filename, not the problem!\n\n# read in the data for use in the weights only\nproduct_data = pd.read_parquet(raw_dir / 'blp_product_data_opt.parquet')\n\nblp_w = product_data.shares.values[:,None]\n\n\n\nfilename_blp_base = dict_dir / 'blp_results_base.npy'\nfilename_blp_nocons = dict_dir / 'blp_results_noconst.npy'\nfilename_blp_noalpha = dict_dir / 'blp_results_noalpha.npy'\nfilename_blp_triple = dict_dir / 'blp_results_triple.npy'\nfilename_blp_nl = dict_dir / 'blp_results_nl.npy'\n\n\nblp_base_dict = load_pyblp_dict(filename_blp_base)\nblp_base = get_params_blp(blp_base_dict,blp_w)\n\nblp_nocons_dict = load_pyblp_dict(filename_blp_nocons)\nblp_nocons = get_params_blp(blp_nocons_dict,blp_w)\n\nblp_noalpha_dict = load_pyblp_dict(filename_blp_noalpha)\nblp_noalpha = get_params_blp(blp_noalpha_dict,blp_w)\n\nblp_triple_dict = load_pyblp_dict(filename_blp_triple)\nblp_triple = get_params_blp(blp_triple_dict,blp_w)\n\nblp_nl_dict = load_pyblp_dict(filename_blp_nl)\nblp_nl = get_params_blp(blp_nl_dict,blp_w)\n\n\n\nblp_table=pd.concat([pd.Series(blp_base),\n pd.Series(blp_nocons),\n pd.Series(blp_noalpha),\n pd.Series(blp_triple),\n pd.Series(blp_nl)\n ],axis=1)\n\nblp_table.columns = [BESTPRACTICES_TEXT, NOCONS_TEXT, NOALPHA_TEXT, TRIPLE_TEXT, NL_TEXT]\n\n\nblp_table = blp_table.fillna(0)\n# %%\n# get the Nevo Results Back\n# not using the interesting saving for these\n# to keep it simpler\nfilename_nevo_base = dict_dir / 'nevo_results_base.npy'\nfilename_nevo_nocons = dict_dir / 'nevo_results_noconst.npy'\nfilename_nevo_noalpha = dict_dir / 'nevo_results_noalpha.npy'\nfilename_nevo_triple = dict_dir / 'nevo_results_triple.npy'\nfilename_nevo_nl = dict_dir / 'nevo_results_nl.npy'\n\n\nagent_data = pd.read_csv(pyblp.data.NEVO_AGENTS_LOCATION)\nproduct_data = pd.read_parquet(raw_dir / 'nevo_product_data_opt.parquet')\nnevo_w = product_data.shares.values[:,None]\n\nnevo_base_dict = load_pyblp_dict(filename_nevo_base)\nnevo_base = get_params_nevo(nevo_base_dict,nevo_w)\n\nnevo_nocons_dict = load_pyblp_dict(filename_nevo_nocons)\nnevo_nocons = get_params_nevo(nevo_nocons_dict,nevo_w)\n\nnevo_noalpha_dict = load_pyblp_dict(filename_nevo_noalpha)\nnevo_noalpha = get_params_nevo(nevo_noalpha_dict,nevo_w)\n\nnevo_triple_dict = load_pyblp_dict(filename_nevo_triple)\nnevo_triple = get_params_nevo(nevo_triple_dict,nevo_w)\n\nnevo_nl_dict = load_pyblp_dict(filename_nevo_nl)\nnevo_nl = get_params_nevo(nevo_nl_dict,nevo_w)\n\nnevo_table=pd.concat([pd.Series(nevo_base),\n pd.Series(nevo_nocons),\n pd.Series(nevo_noalpha),\n pd.Series(nevo_triple),\n pd.Series(nevo_nl)\n ],axis=1)\n\nnevo_table.columns = [BESTPRACTICES_TEXT, NOCONS_TEXT, NOALPHA_TEXT, TRIPLE_TEXT,\n NL_TEXT]\n\nnevo_table = nevo_table.round(decimals=5)\n\n\n\n# %%\n# Combine the Nevo and the BLP Tables\n# First, make the postestimation tale because that is much easier\nstats_order = ['median_own_elas',\n 'median_agg_elas',\n 'median_og_div',\n 'mean_top5_div',\n 'mean_markup',\n 'median_cs'\n ]\n\nnevo_stats = pd.DataFrame(nevo_table.loc[stats_order])\nblp_stats = pd.DataFrame(blp_table.loc[stats_order])\n\nnevo_stats.index = ['\\\\midrule \\\\textbf{Nevo} \\\\\\ \\\\midrule Median Own-Elasticity', 'Median Aggregate Elasticity',\n 'Median Outside-Good Diversion', 'Mean Top 5 Diversion',\n 'Mean Markup', 'Median Consumer Surplus'\n ]\n\nblp_stats.index = ['\\\\textbf{BLP} \\\\\\ \\\\midrule Median Own-Elasticity', 'Median Aggregate Elasticity',\n 'Median Outside-Good Diversion', 'Mean Top 5 Diversion',\n 'Mean Markup', 'Median Consumer Surplus'\n ]\n\n\n# make this into a nice two-panel NEVO and BLP Table\n# in the style of Table 5\n\n# stack the two\nfull_stats = blp_stats.append(nevo_stats)\n\ntab_outreg_stats = tabulate(full_stats, tablefmt='latex_raw', \n floatfmt='0.3f',\n headers=nevo_table.columns)\ntab_outreg_stats = tab_outreg_stats.replace('\\hline','\\midrule')\n\nfile_outreg_stats = tab_dir / 'tab4_stats.tex'\n\n\nprint(tab_outreg_stats)\nwith open(file_outreg_stats, 'w') as file:\n file.write(tab_outreg_stats)\n\n\n# %%\n# Create the parameters table\n# also a two-panel job\nfile_outreg_params = tab_dir / 'tab3_params.tex'\n\n\n# using outreg to convert everything like we did earlier\nblp_param_order = ['price_term', 'sigma_cons', 'sigma_hpwt', 'sigma_air', 'sigma_mpd',\n 'sigma_size' ]\n\nblp_se_order = [ 'price_se', 'sigma_cons_se', 'sigma_hpwt_se', 'sigma_air_se', \n 'sigma_mpd_se', 'sigma_size_se']\n\nblp_names = [\n '\\\\hline \\\\textbf{BLP} \\\\\\ \\hline $\\\\text{price}/\\\\text{inc}$',\n '$\\\\sigma_{\\\\text{cons}}$', \n '$\\\\sigma_{\\\\text{HP/weight}}$', \n '$\\\\sigma_{\\\\text{air}}$', \n '$\\\\sigma_{\\\\text{MP\\$}}$', \n '$\\\\sigma_{\\\\text{size}}$'\n ]\n\nparamcols = [BESTPRACTICES_TEXT, NOCONS_TEXT, NOALPHA_TEXT, TRIPLE_TEXT]\n\nblp_table = blp_table[paramcols]\nblp_params = blp_table.loc[blp_param_order]\nblp_ses = blp_table.loc[blp_se_order]\n\nblp_outreg = outreg(beta = blp_params, sigma = blp_ses, names = blp_names)\nblp_outreg = blp_outreg[blp_table.columns]\n\n\n\n## now Nevo Table\nnevo_param_order = ['price_coeff', 'sigma_price', 'sigma_cons', 'sigma_sugar',\n 'sigma_mushy', 'pi_price_inc', 'pi_price_inc2',\n 'pi_price_child',\n 'pi_cons_inc', 'pi_cons_age', 'pi_sugar_inc',\n 'pi_sugar_age', 'pi_mushy_inc', 'pi_mushy_age']\n\nnevo_se_order = ['price_se', 'sigma_price_se', 'sigma_cons_se', 'sigma_sugar_se',\n 'sigma_mushy_se', 'pi_price_inc_se', 'pi_price_inc2_se',\n 'pi_price_child_se',\n 'pi_cons_inc_se', 'pi_cons_age_se', 'pi_sugar_inc_se',\n 'pi_sugar_age_se', 'pi_mushy_inc_se', 'pi_mushy_age_se']\n\nnevo_names = ['\\\\hline \\\\textbf{Nevo} \\\\\\ \\\\hline $\\\\alpha_{\\\\text{price}}$', \n '$\\\\sigma_{\\\\text{price}}$', \n '$\\\\sigma_{\\\\text{cons}}$',\n '$\\\\sigma_{\\\\text{sugar}}$',\n '$\\\\sigma_{\\\\text{mushy}}$',\n '$\\\\pi_{\\\\text{price} \\\\times \\\\text{inc}}$', \n '$\\\\pi_{\\\\text{price} \\\\times \\\\text{inc}^2}$',\n '$\\\\pi_{\\\\text{price} \\\\times \\\\text{kids}}$',\n '$\\\\pi_{\\\\text{cons} \\\\times \\\\text{inc}}$',\n '$\\\\pi_{\\\\text{cons} \\\\times \\\\text{age}}$',\n '$\\\\pi_{\\\\text{sugar} \\\\times \\\\text{inc}}$',\n '$\\\\pi_{\\\\text{sugar} \\\\times \\\\text{age}}$',\n '$\\\\pi_{\\\\text{mushy} \\\\times \\\\text{inc}}$',\n '$\\\\pi_{\\\\text{mushy} \\\\times \\\\text{age}}$']\n\nnevo_table = nevo_table[paramcols]\nnevo_params = nevo_table.loc[nevo_param_order]\nnevo_ses = nevo_table.loc[nevo_se_order]\n\nnevo_outreg = outreg(beta = nevo_params, sigma = nevo_ses, names = nevo_names)\nnevo_outreg = nevo_outreg[nevo_table.columns]\n\n\n\nnevo_outreg.columns = paramcols\nblp_outreg.columns = paramcols\n\n\n# %%\n# stack the tables\n\nfull_table = blp_outreg.append(nevo_outreg)\ncolnames = ['', BESTPRACTICES_TEXT, NOCONS_TEXT, NOALPHA_TEXT, TRIPLE_TEXT]\n\ntab_outreg = tabulate(full_table, tablefmt='latex_raw',floatfmt='0.3f', headers=colnames)\n\n# booktabby, not perfect though\ntab_outreg = tab_outreg.replace('\\\\hline','\\\\midrule')\n\nprint(tab_outreg)\n\nwith open(file_outreg_params, 'w') as file:\n file.write(tab_outreg)\n \n" ]
[ [ "pandas.read_parquet", "pandas.read_csv", "pandas.Series", "pandas.DataFrame" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [ "2.0", "1.4", "1.3", "1.1", "1.5", "1.2" ], "scipy": [], "tensorflow": [] } ]
jw176/img2ascii
[ "61814be2103aa6f929b1dfc020b0b1be3697d903" ]
[ "main.py" ]
[ "import shutil\nfrom collections import namedtuple\nimport itertools\nimport time\nimport argparse\n\nimport numpy as np\nfrom PIL import ImageFont, Image, ImageDraw, ImageEnhance\nfrom colorama import Fore, init, Back, Style\nfrom numba import njit\n\n\n\ninit()\n\nColor = namedtuple('Color', ['name', 'fore', 'back'])\ncolours = [\n Color(name=\"black\", fore=Fore.BLACK, back=Back.BLACK),\n Color(name=\"red\", fore=Fore.RED, back=Back.RED),\n Color(name=\"green\", fore=Fore.GREEN, back=Back.GREEN),\n Color(name=\"yellow\", fore=Fore.YELLOW, back=Back.YELLOW),\n Color(name=\"blue\", fore=Fore.BLUE, back=Back.BLUE),\n Color(name=\"magenta\", fore=Fore.MAGENTA, back=Back.MAGENTA),\n Color(name=\"cyan\", fore=Fore.CYAN, back=Back.CYAN),\n Color(name=\"white\", fore=Fore.WHITE, back=Back.WHITE)\n]\n\n\ndef get_font_bitmap(colours, ascii_start=32, ascii_stop=127):\n font = ImageFont.truetype(font='fonts/CascadiaMono.ttf', size=16)\n bitmaps = np.zeros((ascii_stop - ascii_start, len(colours), 16, 9, 3))\n for i in range(ascii_start, ascii_stop):\n for j, (fore, back) in enumerate(colours):\n chr_bitmap = get_char_bitmap(chr(i), font, fore, back)\n bitmaps[i - ascii_start, j] = chr_bitmap\n return bitmaps\n\n\ndef get_char_bitmap(char, font, fore_color, back_color):\n img = Image.new(mode='RGB', size=(9, 16), color=back_color.name)\n draw = ImageDraw.Draw(im=img)\n draw.text(xy=(5, 8), text=char, font=font, fill=fore_color.name, anchor='mm')\n return np.array(img)\n\n\ndef load_img(filename, x_gap=4, y_gap=10, chr_width=9, chr_height=16, target_width=None, target_height=None):\n img = Image.open(filename)\n img = img.convert(mode=\"RGB\")\n\n brightness = ImageEnhance.Brightness(img)\n img = brightness.enhance(0.7)\n\n contrast = ImageEnhance.Contrast(img)\n img = contrast.enhance(1.5)\n # img.show()\n\n width, height = img.size\n\n if target_width:\n factor = target_width / width\n width = round(target_width)\n height = round(factor * height)\n\n if target_height:\n factor = target_height / height\n height = round(target_height)\n width = round(factor * width)\n\n new_width = (width // (x_gap + chr_width)) * (x_gap + chr_width)\n new_height = (height // (y_gap + chr_height)) * (y_gap + chr_height)\n img = img.resize((new_width, new_height))\n return np.array(img)\n\n\n@njit()\ndef get_ascii_representation(img, chr_bitmaps, x_gap=4, y_gap=10, chr_width=9, chr_height=16):\n ascii_img = []\n\n for y in range(0, img.shape[0], (y_gap + chr_height)):\n ascii_row = []\n for x in range(0, img.shape[1], (x_gap + chr_width)):\n min_value = np.inf\n min_index = (0, 0)\n for index, character_bitmaps in enumerate(chr_bitmaps):\n for c_index, coloured_bitmap in enumerate(character_bitmaps):\n # val = np.abs(img[])\n img_section = img[y:(y + chr_height), x:(x + chr_width)]\n val = np.sum(np.abs(img_section - coloured_bitmap))\n\n if val < min_value:\n min_value = val\n min_index = (index, c_index)\n\n ascii_row.append(min_index)\n ascii_img.append(ascii_row)\n return ascii_img\n\n\ndef get_string_representation(index_data, ascii_start, colour_combinations):\n str_img = []\n for row in index_data:\n str_row = []\n for ascii_index, c_index in row:\n fore, back = colour_combinations[c_index]\n character = chr(ascii_index + ascii_start)\n if fore.name != \"white\":\n character = fore.fore + character + Fore.RESET\n if back.name != \"black\":\n character = back.back + character + Back.RESET\n str_row.append(character)\n str_img.append(str_row)\n return str_img\n\n\ndef main(ascii_start, ascii_stop, x_gap, y_gap, chr_width, chr_height, input_img, width=None, height=None,\n output_file=None, to_print=True, coloured_foreground=False, light_background=False, coloured_background=False):\n\n colour_combinations = list(itertools.combinations(colours, 2)) + list(itertools.combinations(colours[::-1], 2))\n colour_combinations = list(set(colour_combinations))\n\n if light_background:\n colour_combinations = list(filter(lambda x: x[1].name == \"white\", colour_combinations))\n elif not coloured_background:\n colour_combinations = list(filter(lambda x: x[1].name == \"black\", colour_combinations))\n\n if not coloured_foreground:\n text_colour = \"white\" if not light_background else \"black\"\n colour_combinations = list(filter(lambda x: x[0].name == text_colour, colour_combinations))\n\n chr_bitmaps = get_font_bitmap(colour_combinations, ascii_start=ascii_start, ascii_stop=ascii_stop)\n\n target_height, target_width = None, None\n if width is None and height is None:\n terminal_size = shutil.get_terminal_size((80, 20))\n target_height = (terminal_size.lines - 1) * (y_gap + chr_height)\n else:\n target_width = width\n target_height = height\n\n img = load_img(input_img, x_gap=x_gap, y_gap=y_gap, chr_width=chr_width, chr_height=chr_height,\n target_width=target_width, target_height=target_height)\n\n ascii_img = get_ascii_representation(img, chr_bitmaps, x_gap=x_gap, y_gap=y_gap, chr_width=chr_width,\n chr_height=chr_height)\n\n str_img = get_string_representation(ascii_img, ascii_start, colour_combinations)\n\n if to_print:\n print(\"\\n\".join(\"\".join(row) for row in str_img))\n\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser(description=\"Convert an image into ascii art.\")\n parser.add_argument('source', type=str, help=\"the filename of the image to convert to ascii\")\n parser.add_argument('--ascii-start', '--start', type=int, default=32,\n help=\"The starting value of the ascii characters\")\n parser.add_argument('--ascii-stop', '--stop', type=int, default=126, help=\"The end value of the ascii characters\")\n parser.add_argument('--output', '-o', type=str, default=32, help=\"Optional output file to save as image\")\n\n parser.add_argument('--coloured-foreground', '-cf', dest='coloured_foreground', action='store_true',\n help=\"Use coloured printing to the terminal\")\n\n group = parser.add_mutually_exclusive_group(required=False)\n group.add_argument('--colour-background', '-cb', dest='coloured_background', action='store_true',\n help=\"Use coloured background when printing\")\n group.add_argument('--light-background', '-lb', dest='light_background', action='store_true',\n help=\"Use a light background when printing\")\n group.add_argument('--dark-background', '-db', dest='dark_background', action='store_true',\n help=\"Use a dark background when printing\")\n\n x_gap = 4\n y_gap = 10\n chr_width = 9\n chr_height = 16\n\n args = parser.parse_args()\n print(args)\n\n main(args.ascii_start, args.ascii_stop, x_gap, y_gap, chr_width, chr_height, args.source,\n coloured_foreground=args.coloured_foreground, light_background=args.light_background,\n coloured_background=args.coloured_background)\n" ]
[ [ "numpy.array", "numpy.abs" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
Soyuen/age_model_tfapi
[ "2ed7825f8c6aa800766c3efa0495146f4eaa8b3c" ]
[ "training/aec_model.py" ]
[ "import logging\r\nimport sys\r\nimport numpy as np\r\nfrom tensorflow.keras import Input, Model\r\nfrom tensorflow.keras.layers import Activation, Dense, Flatten, Dropout, Lambda,Conv2D,AveragePooling2D,BatchNormalization\r\nimport tensorflow as tf\r\nfrom generators import ker_init\r\nimport tensorflow.compat.v1 as tfc\r\nconfig = tfc.ConfigProto()\r\nconfig.gpu_options.allow_growth=True\r\nsess = tfc.Session(config=config)\r\nsys.setrecursionlimit(2 ** 20)\r\nnp.random.seed(2 ** 10)\r\ndef conv(Input,num):\r\n i = 1\r\n while i <=num:\r\n Input = Conv2D(16, (3, 3),padding='same')(Input)\r\n Input = BatchNormalization()(Input)\r\n Input = Activation(\"relu\")(Input)\r\n i+=1\r\n Input = AveragePooling2D((2, 2))(Input)\r\n return Input\r\ndef stage_layer(Input,units1,units2,units3):\r\n Input=Dense(units1,kernel_initializer='he_uniform',activation='relu')(Input)\r\n Input=Dense(units2,kernel_initializer='he_uniform',activation='relu')(Input)\r\n Input=Dense(units3,kernel_initializer='he_uniform',activation='softmax')(Input)\r\n return Input\r\ndef pred_4(x):\r\n a4 = x[0][:,0]*0\r\n a4 = x[0][:,0]\r\n for j in range(0,5):\r\n a4 = a4+(j*4)*x[0][:,j] \r\n a4 = tf.expand_dims(a4,-1)\r\n a20 = x[1][:,0]*0\r\n a20 = x[1][:,0]\r\n for k in range(0,5):\r\n a20 = a20+(k*20)*x[1][:,k] \r\n a20 = tf.expand_dims(a20,-1)\r\n a20_4=a4+a20\r\n return a20_4\r\ndef merge_age(x):\r\n a = x[0][:,0]*0\r\n b = x[0][:,0]*0\r\n c = x[0][:,0]*0\r\n a = x[0][:,0]\r\n b = x[1][:,0]\r\n c = x[2][:,0]\r\n for i in range(0,4):\r\n a = a+(i)*x[0][:,i] \r\n for j in range(0,5):\r\n b = b+(j*4)*x[1][:,j] \r\n for k in range(0,5):\r\n c = c+(k*20)*x[2][:,k] \r\n a = tf.expand_dims(a,-1)\r\n b = tf.expand_dims(b,-1)\r\n c = tf.expand_dims(c,-1)\r\n ori_age=a+b+c\r\n return ori_age\r\ndef AEC_model(input_shape):\r\n logging.debug(\"Creating model...\")\r\n \r\n Inputs = Input(shape=input_shape)\r\n sharp = Conv2D(3, (3, 3),padding='same',kernel_initializer=ker_init)(Inputs)\r\n \r\n layer = conv(sharp,3)\r\n layer = conv(layer,1)\r\n layer = conv(layer,2)\r\n layer = conv(layer,2)\r\n layer = Conv2D(10,(1,1),kernel_initializer='he_uniform',activation='relu')(layer)\r\n layer = Flatten()(layer)\r\n layer = Dropout(0.2)(layer)\r\n stage3 = stage_layer(layer,5,10,5)\r\n stage2 = stage_layer(layer,5,10,5)\r\n stage1 = stage_layer(layer,4,8,4)\r\n pred_a = Lambda(merge_age,output_shape=(1,),name='pred_a')([stage1,stage2,stage3])\r\n pre_cod = Lambda(merge_age,output_shape=(1,),name='pre_cod')([stage1,stage2,stage3])\r\n pre_4 = Lambda(pred_4,output_shape=(1,),name='pre_4')([stage2,stage3])\r\n pre_1 = Lambda(merge_age,output_shape=(1,),name='pre_1')([stage1,stage2,stage3])\r\n model = Model(inputs=Inputs, outputs=[pred_a,pre_4,pre_1,pre_cod])\r\n return model" ]
[ [ "tensorflow.keras.layers.AveragePooling2D", "tensorflow.compat.v1.ConfigProto", "tensorflow.keras.layers.Activation", "numpy.random.seed", "tensorflow.keras.layers.Lambda", "tensorflow.keras.layers.Dense", "tensorflow.keras.layers.Conv2D", "tensorflow.expand_dims", "tensorflow.keras.Model", "tensorflow.compat.v1.Session", "tensorflow.keras.layers.BatchNormalization", "tensorflow.keras.layers.Dropout", "tensorflow.keras.layers.Flatten" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [ "1.10", "2.7", "2.6", "2.4", "2.3", "2.5", "2.2" ] } ]
mvdelt/coco-analyze
[ "38354dad9cdc838a58a10606eed8cf080ec00009" ]
[ "analysisAPI/utilities.py" ]
[ "## imports\nimport os, sys, time, json\nimport numpy as np\nfrom colour import Color\nimport matplotlib.pyplot as plt\nimport matplotlib.path as mplPath\nfrom matplotlib.collections import PatchCollection\nfrom matplotlib.patches import Polygon\n# from scipy.misc import imresize # i. 여기서 imresize 쓰지도 않는데? 근데문제는 scipy 버전 1.3인가부터는 imresize 없어졌음. 그래서 자꾸 임포트시 에러남. 이거대신 Pillow를 사용하란 메시지를 공식 소스코드에서 볼수있음. \nimport skimage.io as io\n\n\"\"\"\nUtility functions\n\"\"\"\nnum_kpts = 6 # i. 내 6개키포인트에 맞춰 수정.\noks = [0.5, 0.55, 0.6, 0.65, 0.7, 0.75, 0.8, 0.85, 0.9, 0.95]\nsqrt_neg_log_oks = np.sqrt(-2*np.log(oks))\n# sigmas = np.array([.26, .25, .25, .35, .35, .79, .79, .72, .72, .62,.62, 1.07, 1.07, .87, .87, .89, .89])/10.0\n# sigmas = np.array([0.025, 0.025, 0.025, 0.025, 0.025, 0.025]) # i. 내 6개키포인트에 맞춰 수정. # i. ->임의로 이 값 사용했었으나, 아래처럼 내가 직접 계산한값으로 바꿔줌.\n# i. 2020.06.24.) 36개 PA방사선사진 두번 어노테이션해서 OKS sigmas 계산해봄: [0.08953876 0.08166177 0.0193918 0.01967773 0.02095149 0.02738186]\nsigmas = np.array([0.08953876, 0.08166177, 0.0193918, 0.01967773, 0.02095149, 0.02738186])\n\n\nvariances = (sigmas * 2)**2\n# skeleton = [[16, 14], [14, 12], [17, 15], [15, 13], [12, 13], [6, 12], [7, 13], [6, 7], [6, 8], [7, 9],\n# [8, 10], [9, 11], [2, 3], [1, 2], [1, 3], [2, 4], [3, 5], [4, 6], [5, 7]]\nskeleton = [[1,3],[2,4],[1,5],[2,6]] # i. 내 6개키포인트에 맞춰 수정.\n# colors = {(0,1): '#cd87ff', (0,2): '#cd87ff', (1,2): '#cd87ff', (1,3): '#cd87ff', (2,4): '#cd87ff',\n# (3,5): '#74c8f9', (4,6): '#74c8f9', (5,6): '#feff95', (5,7): '#74c8f9', (5,11): '#feff95',\n# (6,8): '#74c8f9', (6,12): '#feff95',(7,9): '#74c8f9', (8,10): '#74c8f9',(11,12): '#feff95',\n# (13,11): '#a2805b',(14,12): '#a2805b',(15,13): '#a2805b',(16,14): '#a2805b'}\ncolors = {(0,2): '#cd87ff', (1,3): '#cd87ff', (0,4): '#cd87ff', (1,5): '#74c8f9'} # i. 내 6개키포인트에 맞춰 수정.\n\ndef show_dets(coco_dts, coco_gts, img_info, save_path=None):\n if len(coco_dts) == 0 and len(coco_gts)==0:\n return 0\n\n I = io.imread(img_info['coco_url'])\n plt.figure(figsize=(10,10)); plt.axis('off')\n plt.imshow(I)\n ax = plt.gca(); ax.set_autoscale_on(False)\n polygons = []; color = []\n\n for ann in coco_gts:\n c = (np.random.random((1, 3))*0.6+0.4).tolist()[0]\n if 'keypoints' in ann and type(ann['keypoints']) == list:\n # turn skeleton into zero-based index\n sks = np.array(skeleton)-1\n kp = np.array(ann['keypoints'])\n x = kp[0::3]; y = kp[1::3]; v = kp[2::3]\n for sk in sks:\n if np.all(v[sk]>0):\n plt.plot(x[sk],y[sk], linewidth=3, color='green')\n\n plt.plot(x[v>0], y[v>0],'o',markersize=2, markerfacecolor='green',\n markeredgecolor='k',markeredgewidth=3)\n plt.plot(x[v>1], y[v>1],'o',markersize=2, markerfacecolor='green',\n markeredgecolor='green', markeredgewidth=2)\n\n for x1, y1, sigma1 in zip(x[v>0], y[v>0], sigmas[v>0]):\n r = sigma1 * (np.sqrt(ann[\"area\"])+np.spacing(1))\n circle = plt.Circle((x1,y1), sqrt_neg_log_oks[0]*r, fc=(1,0,0,0.4),ec='k')\n ax.add_patch(circle)\n for a1 in sqrt_neg_log_oks[1:]:\n circle = plt.Circle((x1,y1), a1*r, fc=(0,0,0,0),ec='k')\n ax.add_patch(circle)\n\n if len(coco_dts)==0 and len(coco_gts)==1:\n bbox = ann['bbox']\n rect = plt.Rectangle((bbox[0],bbox[1]),bbox[2],bbox[3],fill=False,edgecolor=[1, .6, 0],linewidth=3)\n ax.add_patch(rect)\n title = \"[%d][%d][%d]\"%(coco_gts[0]['image_id'],coco_gts[0]['id'],coco_gts[0]['num_keypoints'])\n plt.title(title,fontsize=20)\n\n for ann in coco_dts:\n c = (np.random.random((1, 3))*0.6+0.4).tolist()[0]\n sks = np.array(skeleton)-1\n kp = np.array(ann['keypoints'])\n x = kp[0::3]; y = kp[1::3]; v = kp[2::3]\n for sk in sks:\n plt.plot(x[sk],y[sk], linewidth=3, color=colors[sk[0],sk[1]])\n\n for kk in range(num_kpts): # i. 17로 돼있었는데 변수이용하도록 수정.\n # if kk in [1,3,5,7,9,11,13,15]:\n # plt.plot(x[kk], y[kk],'o',markersize=5, markerfacecolor='r',\n # markeredgecolor='r', markeredgewidth=3)\n # elif kk in [2,4,6,8,10,12,14,16]:\n # plt.plot(x[kk], y[kk],'o',markersize=5, markerfacecolor='g',\n # markeredgecolor='g', markeredgewidth=3)\n # else:\n # plt.plot(x[kk], y[kk],'o',markersize=5, markerfacecolor='b',\n # markeredgecolor='b', markeredgewidth=3)\n if kk in [1,3,5]:\n plt.plot(x[kk], y[kk],'o',markersize=5, markerfacecolor='r',\n markeredgecolor='r', markeredgewidth=3)\n elif kk in [0,2,4]:\n plt.plot(x[kk], y[kk],'o',markersize=5, markerfacecolor='g',\n markeredgecolor='g', markeredgewidth=3)\n\n\n bbox = ann['bbox']; score = ann['score']\n rect = plt.Rectangle((bbox[0],bbox[1]),bbox[2],bbox[3],fill=False,edgecolor=[1, .6, 0],linewidth=3)\n\n if len(coco_dts)==1:\n if len(coco_gts)==0:\n title = \"[%d][%d][%.3f]\"%(coco_dts[0]['image_id'],coco_dts[0]['id'],coco_dts[0]['score'])\n plt.title(title,fontsize=20)\n\n if len(coco_gts)==1:\n oks = compute_kpts_oks(coco_dts[0]['keypoints'], coco_gts[0]['keypoints'],coco_gts[0]['area'])\n title = \"[%.3f][%.3f][%d][%d][%d]\"%(score,oks,coco_gts[0]['image_id'],coco_gts[0]['id'],coco_dts[0]['id'])\n plt.title(title,fontsize=20)\n\n else:\n ax.annotate(\"[%.3f][%.3f]\"%(score,0), (bbox[0]+bbox[2]/2.0, bbox[1]-5),\n color=[1, .6, 0], weight='bold', fontsize=12, ha='center', va='center')\n ax.add_patch(rect)\n\n p = PatchCollection(polygons, facecolor=color, linewidths=0, alpha=0.4)\n ax.add_collection(p)\n p = PatchCollection(polygons, facecolor=\"none\", edgecolors=color, linewidths=2)\n ax.add_collection(p)\n\n if save_path:\n plt.savefig(save_path,bbox_inches='tight',dpi=50)\n else:\n plt.show()\n plt.close()\n\ndef compute_kpts_oks(dt_kpts, gt_kpts, area):\n # this function only works for computing oks with keypoints\n g = np.array(gt_kpts); xg = g[0::3]; yg = g[1::3]; vg = g[2::3]\n assert( np.count_nonzero(vg > 0) > 0)\n d = np.array(dt_kpts); xd = d[0::3]; yd = d[1::3]\n\n dx = xd - xg; dy = yd - yg\n e = (dx**2 + dy**2) / variances / (area+np.spacing(1)) / 2\n e=e[vg > 0]\n\n return np.sum(np.exp(-e)) / e.shape[0]\n\ndef compute_oks(dts, gts):\n if len(dts) * len(gts) == 0:\n return np.array([])\n oks_mat = np.zeros((len(dts), len(gts)))\n\n # compute oks between each detection and ground truth object\n for j, gt in enumerate(gts):\n # create bounds for ignore regions(double the gt bbox)\n g = np.array(gt['keypoints'])\n xg = g[0::3]; yg = g[1::3]; vg = g[2::3]\n k1 = np.count_nonzero(vg > 0)\n bb = gt['bbox']\n x0 = bb[0] - bb[2]; x1 = bb[0] + bb[2] * 2\n y0 = bb[1] - bb[3]; y1 = bb[1] + bb[3] * 2\n for i, dt in enumerate(dts):\n d = np.array(dt['keypoints'])\n xd = d[0::3]; yd = d[1::3]\n if k1>0:\n # measure the per-keypoint distance if keypoints visible\n dx = xd - xg\n dy = yd - yg\n else:\n # measure minimum distance to keypoints in (x0,y0) & (x1,y1)\n z = np.zeros((len(sigmas)))\n dx = np.max((z, x0-xd),axis=0)+np.max((z, xd-x1),axis=0)\n dy = np.max((z, y0-yd),axis=0)+np.max((z, yd-y1),axis=0)\n e = (dx**2 + dy**2) / variances / (gt['area']+np.spacing(1)) / 2\n if k1 > 0:\n e=e[vg > 0]\n oks_mat[i, j] = np.sum(np.exp(-e)) / e.shape[0]\n return oks_mat\n\ndef compute_iou(bbox_1, bbox_2):\n\n x1_l = bbox_1[0]\n x1_r = bbox_1[0] + bbox_1[2]\n y1_t = bbox_1[1]\n y1_b = bbox_1[1] + bbox_1[3]\n w1 = bbox_1[2]\n h1 = bbox_1[3]\n\n x2_l = bbox_2[0]\n x2_r = bbox_2[0] + bbox_2[2]\n y2_t = bbox_2[1]\n y2_b = bbox_2[1] + bbox_2[3]\n w2 = bbox_2[2]\n h2 = bbox_2[3]\n\n xi_l = max(x1_l, x2_l)\n xi_r = min(x1_r, x2_r)\n yi_t = max(y1_t, y2_t)\n yi_b = min(y1_b, y2_b)\n\n width = max(0, xi_r - xi_l)\n height = max(0, yi_b - yi_t)\n a1 = w1 * h1\n a2 = w2 * h2\n\n if float(a1 + a2 - (width * height)) == 0:\n return 0\n else:\n iou = (width * height) / float(a1 + a2 - (width * height))\n\n return iou\n\ndef compute_ious(anns):\n num_boxes = len(anns)\n ious = np.zeros((num_boxes, num_boxes))\n\n for i in range(num_boxes):\n for j in range(i,num_boxes):\n ious[i,j] = compute_iou(anns[i]['bbox'],anns[j]['bbox'])\n if i!=j:\n ious[j,i] = ious[i,j]\n return ious\n" ]
[ [ "matplotlib.pyplot.imshow", "numpy.sqrt", "matplotlib.pyplot.plot", "numpy.all", "numpy.max", "numpy.exp", "matplotlib.pyplot.gca", "matplotlib.pyplot.Circle", "matplotlib.pyplot.close", "matplotlib.pyplot.axis", "numpy.count_nonzero", "numpy.zeros", "matplotlib.pyplot.figure", "numpy.log", "numpy.spacing", "matplotlib.pyplot.title", "matplotlib.pyplot.savefig", "numpy.array", "matplotlib.pyplot.show", "matplotlib.pyplot.Rectangle", "matplotlib.collections.PatchCollection", "numpy.random.random" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
PedroAugustoDev/Algoritmos-em-Python
[ "aae67133c249ca67a597431b3fc6d46d80a54a7e" ]
[ "src/matematica/matrizes/soma.py" ]
[ "# Lib para operações matemáticas mais complexas\nimport numpy as np\n\na = np.matrix([[1,2,3], [4,5,6]])\nb = np.matrix([ [0,0],[2,3], [1,1]])\n\n\nc = np.matrix([[1,1], [1,1]])\n''' \nMATRIX C = |1 1|\n |1 1|\n \n'''\n\n\n\n\ne = np.matrix([[1,2,3], [4,5,6], [7,8,9]])\nf = np.matrix([[1,0,0],[0,1,0], [0,0,1]])\n\n# A função dot serve para somar matrizes\nprint(e.dot(f))\n\n\n\n\n" ]
[ [ "numpy.matrix" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
m3rlin45/pytorch
[ "026cfe85b4f7dc7b63a53b357500014114d749ee" ]
[ "torch/testing/_internal/distributed/distributed_test.py" ]
[ "import copy\nimport itertools\nimport math\nimport os\nimport random\nimport sys\nimport tempfile\nimport time\nimport unittest\nfrom collections import namedtuple\nfrom contextlib import contextmanager, suppress\nfrom datetime import timedelta\nfrom functools import reduce\nfrom typing import Union, NamedTuple, Callable, Any\n\nimport torch\nimport torch.cuda\nimport torch.distributed as dist\nimport torch.distributed.algorithms.ddp_comm_hooks.powerSGD_hook as powerSGD\nimport torch.distributed.algorithms.ddp_comm_hooks.post_localSGD_hook as post_localSGD\nimport torch.distributed.algorithms.model_averaging.averagers as averagers\nimport torch.distributed.algorithms.model_averaging.utils as model_averaging_utils\nimport torch.nn as nn\nimport torch.nn.functional as F\nfrom torch._utils_internal import TEST_MASTER_ADDR as MASTER_ADDR\nfrom torch._utils_internal import TEST_MASTER_PORT as MASTER_PORT\nfrom torch.cuda.amp import GradScaler, autocast\nfrom torch.distributed.algorithms.ddp_comm_hooks import default_hooks as default\nfrom torch.distributed.algorithms.ddp_comm_hooks import (\n quantization as quantization_hooks,\n)\nfrom torch.distributed.distributed_c10d import (\n get_world_size,\n _get_default_group,\n AllreduceOptions,\n GroupMember,\n)\nfrom torch.nn.parallel import DistributedDataParallel\nfrom torch.nn.parallel.distributed import _dump_DDP_relevant_env_vars\nfrom torch.testing._internal.common_distributed import (\n MultiProcessTestCase,\n TEST_SKIPS,\n initialize_temp_directories,\n cleanup_temp_dir,\n simple_sparse_reduce_tests,\n skip_if_rocm,\n skip_if_small_worldsize,\n skip_if_lt_x_gpu,\n nccl_skip_if_lt_x_gpu,\n skip_if_no_gpu,\n require_n_gpus_for_nccl_backend,\n requires_nccl_version,\n captured_output,\n with_nccl_blocking_wait,\n with_dist_debug_levels,\n verify_ddp_error_logged,\n)\nfrom torch.testing._internal.common_utils import (\n IS_MACOS,\n IS_WINDOWS,\n FILE_SCHEMA,\n IS_FBCODE,\n NO_MULTIPROCESSING_SPAWN,\n)\n\nif not IS_WINDOWS:\n from torch.distributed.optim.functional_sgd import _FunctionalSGD\n\nfrom torch.utils.data.distributed import DistributedSampler\n\ntry:\n import torchvision\n\n HAS_TORCHVISION = True\nexcept ImportError:\n HAS_TORCHVISION = False\n\nif sys.platform == \"win32\":\n import msvcrt\nelse:\n import fcntl\n\n\nclass Foo:\n def __init__(self, x):\n # Can be tensor or int\n self.x = x\n\n def __eq__(self, other):\n def eq(value, other):\n if isinstance(value, torch.Tensor):\n return torch.equal(value, other)\n return value == other\n\n for attr, value in self.__dict__.items():\n other_value = other.__dict__[attr]\n if not eq(value, other_value):\n return False\n return True\n\n\nf = Foo(10)\nf.bar = 1\n\nfoo_cpu_tensor = Foo(torch.randn(3, 3))\n\n\nCOLLECTIVES_OBJECT_TEST_LIST = [\n {\"key1\": 3, \"key2\": 4, \"key3\": {\"nested\": True}},\n f,\n foo_cpu_tensor,\n \"foo\",\n [1, 2, True, \"string\", [4, 5, \"nested\"]],\n]\n\n# Allowlist of distributed backends where profiling collectives is supported.\nPROFILING_SUPPORTED_BACKENDS = [\n dist.Backend.NCCL,\n dist.Backend.GLOO,\n dist.Backend.MPI,\n]\n\n# Allowlist of distributed backends where profiling is supported with use_cuda=True\nCUDA_PROFILING_SUPPORTED_BACKENDS = [\n dist.Backend.GLOO,\n dist.Backend.MPI,\n dist.Backend.NCCL,\n]\n\n# Allowlist of distributed backends where profiling is supported for p2p ops\nSEND_RECV_PROFILING_SUPPORTED_BACKENDS = [\n dist.Backend.MPI,\n dist.Backend.GLOO,\n dist.Backend.NCCL,\n]\n\n# Dummy NamedTuple data structures to test DDP support for NamedTuple types.\nEXPECTED_FIELDS = (\"a\", \"b\")\nTestNamedTupleInput_0 = namedtuple(\"NamedTuple\", EXPECTED_FIELDS)\n\n\nclass TestNamedTupleInput_1(NamedTuple):\n a: torch.tensor\n b: torch.tensor\n\n\nskipIfNoTorchVision = unittest.skipIf(not HAS_TORCHVISION, \"no torchvision\")\n\nBACKEND = os.environ[\"BACKEND\"]\nINIT_METHOD = os.getenv(\"INIT_METHOD\", \"env://\")\n\nDEFAULT_TIMEOUT = 300\nCUSTOMIZED_TIMEOUT = {\"test_DistributedDataParallel\": 500}\n\n\ndef get_profiling_event(postfix, profiler):\n event_list = (\n profiler.events()\n if isinstance(profiler, torch.profiler.profile)\n else profiler.function_events\n )\n return [event for event in event_list if event.name.endswith(postfix)]\n\n\n# Base error message substring on unfinished reductions.\nddp_prev_reduction_unfinished_str = (\n \"Expected to have finished reduction in the prior iteration\"\n)\n# Error message substring when find_unused_parameters=True has not been passed\nddp_recommend_find_unused_params_str = (\n \"passing the keyword argument `find_unused_parameters=True`\"\n)\n# Error message substring when find_unused_parameters=True is enabled\nddp_find_unused_params_enabled_str = \"Since `find_unused_parameters=True` is enabled\"\n# Error message substring for possibility of not all model outputs being used\n# in loss computation\nddp_outputs_not_used_in_loss_str = (\n \"`forward` function outputs participate in calculating loss\"\n)\n# Error message substring suggesting to use TORCH_DISTRIBUTED_DEBUG\nddp_suggest_debug_mode_str = (\n \"set the environment variable TORCH_DISTRIBUTED_DEBUG to either INFO or DETAIL\"\n)\n\n\nclass DDPUnevenTestInput(NamedTuple):\n name: str\n model: nn.Module\n inp: Union[torch.tensor, tuple]\n sync_interval: int\n throw_on_early_termination: bool = False\n hook: Callable = None\n state: Any = None\n\n\nclass _FC2(nn.Module):\n def __init__(self):\n super(_FC2, self).__init__()\n self.fc = nn.Linear(10, 50, bias=True)\n self.fc.bias.requires_grad = False\n\n def forward(self, x):\n x = self.fc(x)\n return x\n\n\nclass Net(nn.Module):\n def __init__(self):\n super(Net, self).__init__()\n self.fc1 = nn.Linear(2, 10, bias=False)\n self.fc2 = _FC2()\n self.fc3 = nn.Linear(50, 4, bias=False)\n self.relu = nn.ReLU()\n self.no_grad_param = nn.Parameter(\n torch.tensor([2, 2]).long(), requires_grad=False\n )\n\n def forward(self, x):\n x = self.relu(self.fc1(x))\n x = self.relu(self.fc2(x))\n x = self.fc3(x)\n return F.softmax(x, dim=1)\n\n\nclass LargeNet(nn.Module):\n def __init__(self):\n super(LargeNet, self).__init__()\n self.fc1 = nn.Linear(1000, 2000, bias=False)\n self.fc2 = nn.Linear(2000, 500, bias=False)\n\n def forward(self, x):\n x = self.fc1(x)\n x = self.fc2(x)\n return x\n\n\nclass Task(nn.Module):\n def __init__(self):\n super().__init__()\n self.p = nn.Parameter(torch.ones(2, 2))\n\n def forward(self, x):\n return self.p + x\n\n\nclass BatchNormNet(nn.Module):\n def __init__(self, affine=True):\n super(BatchNormNet, self).__init__()\n self.fc1 = nn.Linear(2, 40, bias=False)\n self.bn = nn.BatchNorm1d(4, affine=affine)\n self.fc2 = nn.Linear(40, 4, bias=False)\n\n def forward(self, x):\n x = torch.reshape(self.fc1(x), (-1, 4, 10))\n x = self.bn(x)\n x = torch.reshape(x, (-1, 40))\n x = self.fc2(x)\n return F.softmax(x, dim=1)\n\nclass TwoLinLayerNet(nn.Module):\n def __init__(self):\n super().__init__()\n self.a = nn.Linear(10, 10, bias=False)\n self.b = nn.Linear(10, 10, bias=False)\n\n def forward(self, x):\n a = self.a(x)\n b = self.b(x)\n return (a, b)\n\n\nclass EmbeddingNet(nn.Module):\n def __init__(self, rank):\n super().__init__()\n embedding_dim = 500 if rank == 0 else 50\n self.embedding = nn.Embedding(num_embeddings=10, embedding_dim=embedding_dim)\n self.lin = nn.Linear(embedding_dim, 1)\n\n def forward(self, x):\n x = self.embedding(x)\n return self.lin(x)\n\n\nclass ControlFlowToyModel(nn.Module):\n def __init__(self):\n super(ControlFlowToyModel, self).__init__()\n self.lin1 = nn.Linear(10, 10, bias=False)\n self.lin2 = nn.Linear(10, 10, bias=False)\n\n def forward(self, x):\n # Second layer is used dependent on input x.\n use_second_layer = torch.equal(x, torch.ones(20, 10, device=x.device))\n if use_second_layer:\n return self.lin2(F.relu(self.lin1(x)))\n else:\n return F.relu(self.lin1(x))\n\n\nDDP_NET = Net()\nBN_NET = BatchNormNet()\nBN_NET_NO_AFFINE = BatchNormNet(affine=False)\nONLY_SBN_NET = nn.SyncBatchNorm(2, momentum=0.99)\n\n\ndef get_timeout(test_id):\n test_name = test_id.split(\".\")[-1]\n if test_name in CUSTOMIZED_TIMEOUT:\n return CUSTOMIZED_TIMEOUT[test_name]\n else:\n return DEFAULT_TIMEOUT\n\n\ndefault_pg_timeout = 60\n\nCUSTOM_PG_TIMEOUT = {\n # This test runs slowly and needs additional time to complete, otherwise can\n # be taken down by NCCL_ASYNC_ERROR_HANDLING\n \"test_ddp_uneven_inputs\": 300,\n # This test has a short timeout since it tests being taken down by\n # NCCL_ASYNC_ERROR_HANDLING which we want to happen quickly.\n \"test_ddp_model_diff_across_ranks\": 5,\n}\n\n\ndef require_backend(backends):\n if BACKEND not in backends:\n return unittest.skip(\"Test requires backend to be one of %s\" % backends)\n return lambda func: func\n\n\ndef require_backends_available(backends):\n def check(backend):\n if backend == dist.Backend.GLOO:\n return dist.is_gloo_available()\n if backend == dist.Backend.NCCL:\n return dist.is_nccl_available()\n if backend == dist.Backend.MPI:\n return dist.is_mpi_available()\n return False\n\n if not all(check(dist.Backend(backend)) for backend in backends):\n return unittest.skip(\"Test requires backends to be available %s\" % backends)\n return lambda func: func\n\n\ndef require_world_size(world_size):\n if int(os.environ[\"WORLD_SIZE\"]) < world_size:\n return unittest.skip(\"Test requires world size of %d\" % world_size)\n return lambda func: func\n\n\ndef apply_hack_for_nccl():\n # This is a hack for a known NCCL issue using multiprocess\n # in conjunction with multiple threads to manage different GPUs which\n # may cause ncclCommInitRank to fail.\n # http://docs.nvidia.com/deeplearning/sdk/nccl-release-notes/rel_2.1.4.html#rel_2.1.4\n # It slows down the performance of collective operations.\n # Without this setting NCCL might throw unhandled error.\n os.environ[\"NCCL_MAX_NRINGS\"] = \"1\"\n\n\n@contextmanager\ndef _lock():\n TEMP_DIR = os.environ[\"TEMP_DIR\"]\n lockfile = os.path.join(TEMP_DIR, \"lockfile\")\n with open(lockfile, \"w\") as lf:\n try:\n if sys.platform == \"win32\":\n msvcrt.locking(lf.fileno(), msvcrt.LK_RLCK, 1)\n yield\n else:\n fcntl.flock(lf.fileno(), fcntl.LOCK_EX)\n yield\n finally:\n if sys.platform == \"win32\":\n msvcrt.locking(lf.fileno(), msvcrt.LK_UNLCK, 1)\n else:\n fcntl.flock(lf.fileno(), fcntl.LOCK_UN)\n lf.close()\n\n\ndef _build_tensor(size, value=None, dtype=torch.float, device_id=None):\n if value is None:\n value = size\n if device_id is None:\n return torch.empty(size, size, size, dtype=dtype).fill_(value)\n else:\n return torch.empty(size, size, size, dtype=dtype).fill_(value).cuda(device_id)\n\n\ndef _build_multidim_tensor(dim, dim_size, value=None, dtype=torch.float):\n if value is None:\n value = size\n return torch.empty(size=[dim_size for _ in range(dim)], dtype=dtype).fill_(value)\n\n\ndef _create_autograd_profiler():\n return torch.autograd.profiler.profile(record_shapes=True)\n\n\ndef _create_torch_profiler():\n return torch.profiler.profile(\n activities=[\n torch.profiler.ProfilerActivity.CPU,\n ],\n record_shapes=True,\n )\n\n\nclass Barrier(object):\n barrier_id = 0\n\n @classmethod\n def init(cls):\n cls.barrier_id = 0\n barrier_dir = os.path.join(os.environ[\"TEMP_DIR\"], \"barrier\")\n for f_name in os.listdir(barrier_dir):\n os.unlink(os.path.join(barrier_dir, f_name))\n\n @classmethod\n def sync(cls, wait_for=None, timeout=10):\n if wait_for is None:\n wait_for = dist.get_world_size()\n cls.barrier_id += 1\n barrier_dir = os.path.join(os.environ[\"TEMP_DIR\"], \"barrier\")\n pid = str(os.getpid())\n barrier_file = os.path.join(barrier_dir, pid)\n with _lock():\n with open(barrier_file, \"w\") as f:\n f.write(str(cls.barrier_id))\n\n start_time = time.time()\n while True:\n arrived = 0\n with _lock():\n for f_name in os.listdir(barrier_dir):\n with open(os.path.join(barrier_dir, f_name), \"r\") as f:\n data = f.read()\n if int(data) >= cls.barrier_id:\n arrived += 1\n if arrived == wait_for:\n break\n\n if time.time() - start_time > timeout:\n raise RuntimeError(\"barrier timeout\")\n time.sleep(0.1)\n\n\nclass TestDistBackend(MultiProcessTestCase):\n @classmethod\n def setUpClass(cls):\n os.environ[\"MASTER_ADDR\"] = str(MASTER_ADDR)\n os.environ[\"MASTER_PORT\"] = str(MASTER_PORT)\n # NCCL_BLOCKING_WAIT overrides NCCL_ASYNC_ERROR_HANDLING hence tests\n # such as test_batch_isend_irecv_nccl will test NCCL_BLOCKING_WAIT as\n # expected.\n os.environ[\"NCCL_ASYNC_ERROR_HANDLING\"] = \"1\"\n super().setUpClass()\n\n def setUp(self):\n super().setUp()\n # initialize temp directories\n initialize_temp_directories()\n # initialize Barrier\n Barrier.init()\n # Skip return code checking for following tests as they are expected to\n # crash a process due to NCCL_ASYNC_ERROR_HANDLING.\n self.skip_return_code_checks = []\n\n def tearDown(self):\n cleanup_temp_dir()\n super().tearDown()\n\n @property\n def init_method(self):\n return \"{}{file_name}\".format(FILE_SCHEMA, file_name=self.file_name)\n\n @classmethod\n def _run(cls, rank, test_name, file_name, pipe):\n if BACKEND == \"nccl\" and not torch.cuda.is_available():\n sys.exit(TEST_SKIPS[\"no_cuda\"].exit_code)\n self = cls(test_name)\n self.rank = rank\n self.file_name = file_name\n\n if torch.cuda.is_available() and torch.cuda.device_count() < int(\n self.world_size\n ):\n sys.exit(TEST_SKIPS[f\"multi-gpu-{self.world_size}\"].exit_code)\n try:\n pg_timeout_seconds = CUSTOM_PG_TIMEOUT.get(test_name, default_pg_timeout)\n timeout = timedelta(seconds=pg_timeout_seconds)\n dist.init_process_group(\n init_method=self.init_method,\n backend=BACKEND,\n world_size=int(self.world_size),\n rank=self.rank,\n timeout=timeout,\n )\n except RuntimeError as e:\n if \"recompile\" in e.args[0]:\n sys.exit(TEST_SKIPS[\"backend_unavailable\"].exit_code)\n\n raise\n\n # Execute barrier prior to running test to ensure that every process\n # has finished initialization and that the following test\n # immediately exiting due to a skip doesn't cause flakiness.\n self._barrier()\n\n self.run_test(test_name, pipe)\n self._barrier()\n dist.destroy_process_group()\n sys.exit(0)\n\n # Needed since MultiProcessTestCase assumes a world_size of 4, but we\n # run these tests under other various world_sizes.\n @property\n def world_size(self):\n return os.environ[\"WORLD_SIZE\"]\n\n\nclass DistributedTest:\n class _DistTestBase:\n def _barrier(self, *args, **kwargs):\n Barrier.sync(*args, **kwargs)\n\n def _init_group_test(self, **kwargs):\n group = [1, 2]\n group_id = dist.new_group(group, **kwargs)\n rank = dist.get_rank()\n if rank not in group:\n return ([], None, rank)\n\n return (group, group_id, rank)\n\n def _init_full_group_test(self, **kwargs):\n group = list(range(0, dist.get_world_size()))\n group_id = dist.new_group(**kwargs)\n rank = dist.get_rank()\n return (group, group_id, rank)\n\n def _init_global_test(self):\n group = list(range(0, dist.get_world_size()))\n group_id = dist.group.WORLD\n rank = dist.get_rank()\n return (group, group_id, rank)\n\n # HELPER FOR MULTIGPU TESTS\n def _init_multigpu_helper(self):\n \"\"\"Multigpu tests are designed to simulate the multi nodes with multi\n GPUs on each node. Nccl backend requires equal #GPUs in each process.\n On a single node, all visible GPUs are evenly\n divided to subsets, each process only uses a subset.\n \"\"\"\n nGPUs = torch.cuda.device_count()\n world_size = dist.get_world_size()\n visible_devices = range(nGPUs)\n\n if BACKEND == \"nccl\":\n apply_hack_for_nccl()\n\n # If rank is lesser than or equal to number of available GPU's\n # then each rank can be mapped to corresponding GPU.\n nGPUs_per_process = 1\n if world_size > nGPUs:\n nGPUs_per_process = nGPUs // world_size\n rank_to_GPU = {\n i: list(\n visible_devices[i * nGPUs_per_process : (i + 1) * nGPUs_per_process]\n )\n for i in range(world_size)\n }\n return rank_to_GPU\n\n def test_dump_DDP_relevant_env_vars(self):\n with captured_output() as (out, _):\n _dump_DDP_relevant_env_vars()\n lines = out.getvalue().splitlines()\n\n def format_line(var):\n return \"env:%s=%s\" % (\n var,\n os.environ[var] if var in os.environ else \"N/A\",\n )\n\n # Check relevant env vars\n vars = [\n \"MASTER_ADDR\",\n \"MASTER_PORT\",\n \"WORLD_SIZE\",\n \"NCCL_TOPO_DUMP_FILE\", # N/A\n \"NCCL_ASYNC_ERROR_HANDLING\",\n ]\n for var in vars:\n line = format_line(var)\n self.assertIn(line, lines)\n # Check irrelevant env vars\n vars = [\n \"xxx\",\n \"yyy\",\n \"zzz\",\n ]\n for var in vars:\n line = format_line(var)\n self.assertNotIn(line, lines)\n\n # GET RANK\n def test_get_rank(self):\n test_dir = os.path.join(os.environ[\"TEMP_DIR\"], \"test_dir\")\n pid = str(os.getpid())\n num_processes = dist.get_world_size()\n with open(os.path.join(test_dir, pid), \"w\") as f:\n f.write(str(dist.get_rank()))\n\n self._barrier()\n\n all_ranks = set()\n for f_name in os.listdir(test_dir):\n with open(os.path.join(test_dir, f_name), \"r\") as f:\n all_ranks.add(int(f.read()))\n self.assertEqual(len(all_ranks), num_processes)\n\n self._barrier()\n\n if dist.get_rank() == 0:\n for f_name in os.listdir(test_dir):\n os.unlink(os.path.join(test_dir, f_name))\n\n self._barrier()\n\n def test_get_backend(self):\n if dist.get_world_size() > 2:\n group = [1, 2]\n else:\n group = [0, 1]\n group_id = dist.new_group(group)\n backend_str = BACKEND.lower()\n self.assertEqual(dist.get_backend(), backend_str)\n if dist.get_rank() in group:\n self.assertEqual(dist.get_backend(group_id), backend_str)\n else:\n with self.assertRaisesRegex(\n RuntimeError, \"Invalid process group specified\"\n ):\n dist.get_backend(group_id)\n\n def test_Backend_enum_class(self):\n # test parsing\n backend = BACKEND.lower()\n self.assertEqual(dist.Backend(BACKEND.upper()), backend)\n self.assertEqual(dist.Backend(BACKEND), backend)\n with self.assertRaisesRegex(ValueError, \"Invalid backend: 'undefined'\"):\n dist.Backend(\"undefined\")\n with self.assertRaisesRegex(ValueError, \"Invalid backend: 'xYz'\"):\n dist.Backend(\"xYz\")\n with self.assertRaises(ValueError):\n dist.Backend(None)\n with self.assertRaises(ValueError):\n dist.Backend(3)\n with self.assertRaises(ValueError):\n dist.Backend([\"gloo\"])\n\n # Test destroy\n def test_destroy_group(self):\n if dist.get_world_size() > 2:\n group = [1, 2]\n else:\n group = [0, 1]\n group_id = dist.new_group(group)\n self._barrier()\n dist.destroy_process_group(group_id)\n\n # Test get rank and size of group\n def test_get_rank_size_group(self):\n if dist.get_world_size() > 2:\n group = [1, 2]\n else:\n group = [0, 1]\n group_id = dist.new_group(group)\n if dist.get_rank() in group:\n self.assertEqual(dist.get_world_size(group_id), 2)\n self.assertTrue(dist.get_rank(group_id) in list(range(2)))\n else:\n self.assertEqual(dist.get_world_size(group_id), -1)\n self.assertEqual(dist.get_rank(group_id), -1)\n\n # Test destroy full groups\n def test_destroy_full_group(self):\n _, group_id, _ = self._init_full_group_test()\n self._barrier()\n dist.destroy_process_group(group_id)\n\n # Test get rank and size of full group\n def test_get_rank_size_full_group(self):\n _, group_id, _ = self._init_full_group_test()\n self.assertEqual(dist.get_world_size(group_id), dist.get_world_size())\n self.assertEqual(dist.get_rank(group_id), dist.get_rank())\n\n def _test_barrier_timeout(self, group_id, timeout):\n local_rank = dist.get_rank(group_id)\n\n # Only execute barrier on rank == 0, causing it to timeout\n if local_rank == 0:\n expected_time = time.time() + timeout.total_seconds()\n # In debug mode, we execute a monitored_barrier before the\n # collective, so assert on that.\n if dist._get_debug_mode() == dist._DistributedDebugLevel.DETAIL:\n exception_ctx = self.assertRaisesRegex(\n Exception, \"failed to pass monitoredBarrier\"\n )\n else:\n exception_ctx = self.assertRaisesRegex(\n Exception, \" (Timed out|closed|timeout) \"\n )\n with exception_ctx:\n dist.barrier(group_id)\n self.assertGreaterAlmostEqual(time.time(), expected_time, delta=0.1)\n else:\n pass\n\n @unittest.skipIf(BACKEND != \"gloo\", \"Only gloo backend supports timeouts\")\n @unittest.skipIf(\n not INIT_METHOD.startswith(\"file://\"),\n \"Requires file:// initialization method. \"\n + \"Both tcp:// and env:// rely on the TCP store for which \"\n \"reinitialization has proven racy.\",\n )\n def test_barrier_timeout_global(self):\n dist.destroy_process_group()\n\n # Explicitly pass world size to the barrier because we've\n # just destroyed any state in torch.distributed.\n self._barrier(wait_for=int(os.environ[\"WORLD_SIZE\"]))\n\n # Reinitialize global process group\n timeout = timedelta(seconds=1)\n dist.init_process_group(\n init_method=INIT_METHOD,\n backend=BACKEND,\n world_size=int(os.environ[\"WORLD_SIZE\"]),\n rank=self.rank,\n timeout=timeout,\n )\n self._test_barrier_timeout(dist.group.WORLD, timeout)\n\n @skip_if_small_worldsize\n @unittest.skipIf(BACKEND != \"gloo\", \"Only gloo backend supports timeouts\")\n def test_barrier_timeout_group(self):\n timeout = timedelta(seconds=5)\n _, group_id, _ = self._init_group_test(timeout=timeout)\n if group_id is not None:\n self._test_barrier_timeout(group_id, timeout)\n\n @unittest.skipIf(BACKEND != \"gloo\", \"Only gloo backend supports timeouts\")\n def test_barrier_timeout_full_group(self):\n timeout = timedelta(seconds=1)\n _, group_id, _ = self._init_full_group_test(timeout=timeout)\n if group_id is not None:\n self._test_barrier_timeout(group_id, timeout)\n\n # This test helper can only be used when using the Gloo or NCCL backend\n # **and** both the Gloo and NCCL backends are available.\n # See the @skip annotations below.\n def _test_group_override_backend(self, initializer):\n if BACKEND == \"gloo\":\n new_backend = \"nccl\"\n if BACKEND == \"nccl\":\n new_backend = \"gloo\"\n\n group, group_id, rank = initializer(backend=new_backend)\n if group_id is None:\n return\n\n if new_backend == \"gloo\":\n self.assertTrue(isinstance(group_id, dist.ProcessGroupGloo))\n if new_backend == \"nccl\":\n self.assertTrue(isinstance(group_id, dist.ProcessGroupNCCL))\n\n self.assertEqual(rank, group[dist.get_rank(group_id)])\n self.assertEqual(len(group), dist.get_world_size(group_id))\n\n # Pin device (so we avoid NCCL race conditions/deadlocks).\n group_rank = dist.get_rank(group_id)\n torch.cuda.set_device(group_rank)\n\n # Run broadcast of CUDA tensor (so it works for both Gloo and NCCL).\n tensor = _build_tensor(2, value=group_rank).cuda()\n dist.broadcast(tensor, src=group[0], group=group_id)\n self.assertEqual(_build_tensor(2, value=0), tensor.to(\"cpu\"))\n\n @require_backend({\"gloo\", \"nccl\"})\n @require_backends_available({\"gloo\", \"nccl\"})\n @require_world_size(3)\n @skip_if_lt_x_gpu(2)\n def test_backend_group(self):\n self._test_group_override_backend(self._init_group_test)\n\n @require_backend({\"gloo\", \"nccl\"})\n @require_backends_available({\"gloo\", \"nccl\"})\n @skip_if_lt_x_gpu(3)\n def test_backend_full_group(self):\n self._test_group_override_backend(self._init_full_group_test)\n\n @unittest.skipIf(\n BACKEND != \"nccl\" and BACKEND != \"gloo\",\n \"MPI backend does not support creating subgroups on CUDA devices\",\n )\n @require_world_size(4)\n @skip_if_lt_x_gpu(2)\n def test_new_subgroups(self):\n subgroup_size = 2\n cur_subgroup, subgroups = dist.new_subgroups(subgroup_size)\n\n world_size = dist.get_world_size()\n self.assertEqual(cur_subgroup.size(), subgroup_size)\n self.assertEqual(len(subgroups), world_size / subgroup_size)\n self.assertFalse(dist._rank_not_in_group(cur_subgroup))\n\n for subgroup in subgroups:\n dist.destroy_process_group(subgroup)\n\n @unittest.skipIf(\n BACKEND != \"nccl\" and BACKEND != \"gloo\",\n \"MPI backend does not support creating subgroups on CUDA devices\",\n )\n @skip_if_no_gpu\n def test_new_subgroups_group_size_exceeds_world_size(self):\n with self.assertRaisesRegex(\n ValueError, \"The arg 'group_size' must not exceed the world size\"\n ):\n dist.new_subgroups(100)\n\n @unittest.skipIf(\n BACKEND != \"nccl\" and BACKEND != \"gloo\",\n \"MPI backend does not support creating subgroups on CUDA devices\",\n )\n @require_world_size(4)\n @skip_if_lt_x_gpu(4)\n def test_new_subgroups_world_size_not_divisible_by_group_size(self):\n with self.assertRaisesRegex(\n ValueError, \"The world size must be divisible by 'group_size'\"\n ):\n dist.new_subgroups(3)\n\n @unittest.skipIf(\n BACKEND != \"nccl\" and BACKEND != \"gloo\",\n \"MPI backend does not support creating subgroups on CUDA devices\",\n )\n @require_world_size(4)\n @skip_if_lt_x_gpu(4)\n def test_new_subgroups_by_enumeration(self):\n group, group_id, rank = self._init_global_test()\n rank_to_GPU = self._init_multigpu_helper()\n device_id = rank_to_GPU[rank][0]\n cur_subgroup, subgroups = dist.new_subgroups_by_enumeration(\n ranks_per_subgroup_list=[[0, 2], [1, 3]]\n )\n if device_id >= 4:\n self.assertIsNone(cur_subgroup)\n else:\n self.assertEqual(cur_subgroup.size(), 2)\n self.assertEqual(len(subgroups), 2)\n if device_id == 0 or device_id == 2:\n self.assertEqual(cur_subgroup, subgroups[0])\n else:\n self.assertEqual(cur_subgroup, subgroups[1])\n\n for subgroup in subgroups:\n dist.destroy_process_group(subgroup)\n\n @unittest.skipIf(\n BACKEND != \"nccl\" and BACKEND != \"gloo\",\n \"MPI backend does not support creating subgroups on CUDA devices\",\n )\n @require_world_size(4)\n @skip_if_lt_x_gpu(4)\n def test_new_subgroups_by_enumeration_input_rank_exceeds_world_size(self):\n group, group_id, rank = self._init_global_test()\n rank_to_GPU = self._init_multigpu_helper()\n device_id = rank_to_GPU[rank][0]\n world_size = get_world_size(group_id)\n\n with self.assertRaisesRegex(\n RuntimeError,\n \"The new group's rank should be within the the world_size set by init_process_group\",\n ):\n dist.new_subgroups_by_enumeration(\n ranks_per_subgroup_list=[[0, 1], [world_size, 2]]\n )\n\n @unittest.skipIf(\n BACKEND != \"nccl\" and BACKEND != \"gloo\",\n \"MPI backend does not support creating subgroups on CUDA devices\",\n )\n @skip_if_no_gpu\n def test_new_subgroups_by_enumeration_negative_input_rank(self):\n group, group_id, rank = self._init_global_test()\n\n with self.assertRaisesRegex(\n RuntimeError,\n \"The new group's rank should be within the the world_size set by init_process_group\",\n ):\n dist.new_subgroups_by_enumeration(\n ranks_per_subgroup_list=[[-1, -2], [-3, -4]]\n )\n\n @unittest.skipIf(\n BACKEND != \"nccl\" and BACKEND != \"gloo\",\n \"MPI backend does not support creating subgroups on CUDA devices\",\n )\n @require_world_size(4)\n @skip_if_lt_x_gpu(4)\n def test_new_subgroups_overlap_not_allowed(self):\n with self.assertRaisesRegex(\n ValueError, \"Rank 1 has appeared in both subgroup\"\n ):\n dist.new_subgroups_by_enumeration(\n ranks_per_subgroup_list=[[0], [1, 2], [1, 3]]\n )\n\n @unittest.skipIf(\n BACKEND != \"nccl\" and BACKEND != \"gloo\",\n \"MPI backend does not support creating subgroups on CUDA devices\",\n )\n @skip_if_lt_x_gpu(2)\n def test_average_parameters(self):\n rank = dist.get_rank()\n rank_to_GPU = self._init_multigpu_helper()\n device_id = rank_to_GPU[rank][0]\n\n model = (\n nn.Sequential(\n nn.Conv2d(3, 3, kernel_size=3, padding=1),\n nn.ReLU(),\n nn.Linear(1, 5, bias=False)\n ).cuda(device_id)\n )\n # Test global model averaging\n for p in model.parameters():\n p.data = torch.ones_like(p.data)\n model_averaging_utils.average_parameters(params=model.parameters(), process_group=None)\n # Every element will be the same as the input.\n for p in model.parameters():\n self.assertEqual(p.data, torch.ones_like(p.data))\n\n # Test partial model averaging\n for p in model.parameters():\n p.data = torch.ones_like(p.data) * rank\n group_nccl = dist.new_group(ranks=[0, 1], backend=\"nccl\")\n model_averaging_utils.average_parameters(params=model.parameters(), process_group=group_nccl)\n if not dist._rank_not_in_group(group_nccl):\n # Every element on device 0 or 1 should be the average of 0 and 1, i.e., 0.5.\n for p in model.parameters():\n self.assertEqual(p.data, torch.ones_like(p.data) * 0.5)\n else:\n # Every element on device not in the subgroup should remain the same.\n for p in model.parameters():\n self.assertEqual(p.data, torch.ones_like(p.data) * rank)\n\n @unittest.skipIf(\n BACKEND != \"nccl\" and BACKEND != \"gloo\",\n \"MPI backend does not support creating subgroups on CUDA devices\",\n )\n @skip_if_lt_x_gpu(2)\n def test_periodic_model_averager(self):\n rank = dist.get_rank()\n rank_to_GPU = self._init_multigpu_helper()\n device_id = rank_to_GPU[rank][0]\n world_size = dist.get_world_size()\n\n model = nn.Linear(1, 5, bias=False).cuda(device_id)\n param = next(model.parameters())\n tensor = torch.ones_like(param.data) * rank\n expected_avg_tensor = torch.ones_like(param.data) * sum(range(world_size)) / world_size\n period = 4\n for warmup_steps in [12, 13, 14, 15]:\n averager = averagers.PeriodicModelAverager(model.parameters(), warmup_steps=warmup_steps, period=period)\n for step in range(0, 20):\n # Reset the parameters at every step.\n param.data = copy.deepcopy(tensor)\n averager.average_parameters()\n if step >= warmup_steps and (step - warmup_steps) % period == 0:\n self.assertEqual(param.data, expected_avg_tensor)\n else:\n # No model averaging, so the parameters are not updated.\n self.assertEqual(param.data, tensor)\n\n # NCCL Batch SEND RECV\n @skip_if_no_gpu\n @unittest.skipIf(BACKEND != \"nccl\", \"NCCL Batch Send Recv Only\")\n @requires_nccl_version(2700, \"Need NCCL 2.7+ for send/recv\")\n def test_batch_isend_irecv_nccl(self):\n self._barrier()\n rank = dist.get_rank()\n rank_to_GPU = self._init_multigpu_helper()\n device_id = rank_to_GPU[rank][0]\n torch.cuda.set_device(device_id)\n p2p_op_list = []\n\n for val in [\"1\", \"0\"]:\n os.environ[\"NCCL_BLOCKING_WAIT\"] = val\n for src in range(0, dist.get_world_size()):\n send_tensor = _build_tensor(rank + 1, device_id=device_id)\n recv_tensor = _build_tensor(src + 1, value=-1, device_id=device_id)\n recv_op = dist.P2POp(dist.irecv, recv_tensor, src)\n p2p_op_list.append(recv_op)\n send_op = dist.P2POp(dist.isend, send_tensor, src)\n p2p_op_list.append(send_op)\n\n reqs = dist.batch_isend_irecv(p2p_op_list)\n for req in reqs:\n req.wait()\n\n self._barrier()\n\n @skip_if_no_gpu\n @unittest.skipIf(BACKEND != \"nccl\", \"NCCL Batch Send Recv Only\")\n @requires_nccl_version(2700, \"Need NCCL 2.7+ for send/recv\")\n def test_batch_isend_irecv_self_nccl(self):\n self._barrier()\n rank = dist.get_rank()\n rank_to_GPU = self._init_multigpu_helper()\n device_id = rank_to_GPU[rank][0]\n p2p_op_list = []\n\n if rank == 0:\n send_tensor = _build_tensor(rank + 1, device_id=device_id)\n recv_tensor = _build_tensor(rank + 1, value=-1, device_id=device_id)\n recv_op = dist.P2POp(dist.irecv, recv_tensor, 0)\n p2p_op_list.append(recv_op)\n send_op = dist.P2POp(dist.isend, send_tensor, 0)\n p2p_op_list.append(send_op)\n\n reqs = dist.batch_isend_irecv(p2p_op_list)\n for req in reqs:\n req.wait()\n\n self._barrier()\n\n @skip_if_no_gpu\n @skip_if_small_worldsize\n @unittest.skipIf(BACKEND != \"nccl\", \"NCCL Batch Send Recv Only\")\n @requires_nccl_version(2700, \"Need NCCL 2.7+ for send/recv\")\n def test_batch_isend_irecv_no_rank_zero_nccl(self):\n self._barrier()\n rank = dist.get_rank()\n rank_to_GPU = self._init_multigpu_helper()\n device_id = rank_to_GPU[rank][0]\n torch.cuda.set_device(device_id)\n p2p_op_list = []\n\n if rank == 1:\n peer = 2\n elif rank == 2:\n peer = 1\n\n if rank in [1, 2]:\n send_tensor = _build_tensor(rank + 1, device_id=device_id)\n recv_tensor = _build_tensor(peer + 1, value=-1, device_id=device_id)\n recv_op = dist.P2POp(dist.irecv, recv_tensor, peer)\n p2p_op_list.append(recv_op)\n send_op = dist.P2POp(dist.isend, send_tensor, peer)\n p2p_op_list.append(send_op)\n\n reqs = dist.batch_isend_irecv(p2p_op_list)\n for req in reqs:\n req.wait()\n\n self._barrier()\n\n # GLOO Batch SEND RECV CPU\n @unittest.skipIf(BACKEND != \"gloo\", \"GLOO Batch Send Recv CPU\")\n def test_batch_isend_irecv_gloo(self):\n self._barrier()\n rank = dist.get_rank()\n p2p_op_list = []\n\n for src in range(0, dist.get_world_size()):\n if src == rank:\n continue\n send_tensor = _build_tensor(rank + 1)\n recv_tensor = _build_tensor(src + 1, value=-1)\n recv_op = dist.P2POp(dist.irecv, recv_tensor, src)\n p2p_op_list.append(recv_op)\n send_op = dist.P2POp(dist.isend, send_tensor, src)\n p2p_op_list.append(send_op)\n\n reqs = dist.batch_isend_irecv(p2p_op_list)\n for req in reqs:\n req.wait()\n\n self._barrier()\n\n # GLOO Batch SEND RECV CPU with provided tags\n @unittest.skipIf(BACKEND != \"gloo\", \"GLOO Batch Send Recv CPU\")\n def test_batch_isend_irecv_gloo_tags(self):\n self._barrier()\n rank = dist.get_rank()\n p2p_op_list = []\n\n for src in range(0, dist.get_world_size()):\n if src == rank:\n continue\n send_tensor = _build_tensor(rank + 1)\n recv_tensor = _build_tensor(src + 1, value=-1)\n recv_op = dist.P2POp(dist.irecv, recv_tensor, src, tag=src)\n p2p_op_list.append(recv_op)\n send_op = dist.P2POp(dist.isend, send_tensor, src, tag=rank)\n p2p_op_list.append(send_op)\n\n reqs = dist.batch_isend_irecv(p2p_op_list)\n for req in reqs:\n req.wait()\n\n self._barrier()\n\n # NCCL Batch SEND RECV Tensor Error\n @unittest.skipIf(BACKEND != \"nccl\", \"NCCL Batch Send Recv Only\")\n @requires_nccl_version(2700, \"Need NCCL 2.7+ for send/recv\")\n def test_batch_isend_irecv_tensor_err(self):\n self._barrier()\n rank = dist.get_rank()\n if rank == 0:\n rank_to_GPU = self._init_multigpu_helper()\n device_id = rank_to_GPU[rank][0]\n with self.assertRaisesRegex(\n RuntimeError, \"Tensors must be CUDA and dense\"\n ):\n send_tensor = _build_tensor(rank + 1)\n send_op = dist.P2POp(dist.isend, send_tensor, 1)\n req = dist.batch_isend_irecv([send_op])\n req.wait()\n\n # NCCL Batch SEND RECV Op Error\n @unittest.skipIf(BACKEND != \"nccl\", \"NCCL Batch Send Recv Only\")\n @requires_nccl_version(2700, \"Need NCCL 2.7+ for send/recv\")\n def test_batch_isend_irecv_op_err(self):\n self._barrier()\n rank = dist.get_rank()\n if rank == 0:\n rank_to_GPU = self._init_multigpu_helper()\n device_id = rank_to_GPU[rank][0]\n with self.assertRaisesRegex(RuntimeError, \"^Invalid ``op``\"):\n send_tensor = _build_tensor(rank + 1, device_id=device_id)\n send_op = dist.P2POp(dist.broadcast, send_tensor, 1)\n req = dist.batch_isend_irecv([send_op])\n req.wait()\n\n # NCCL Batch SEND RECV p2p_op_list Error\n @unittest.skipIf(BACKEND != \"nccl\", \"NCCL Batch Send Recv Only\")\n @requires_nccl_version(2700, \"Need NCCL 2.7+ for send/recv\")\n def test_batch_isend_irecv_op_list_err(self):\n self._barrier()\n rank = dist.get_rank()\n if rank == 0:\n rank_to_GPU = self._init_multigpu_helper()\n device_id = rank_to_GPU[rank][0]\n with self.assertRaisesRegex(RuntimeError, \"^Invalid ``p2p_op_list``\"):\n send_tensor = _build_tensor(rank + 1)\n req = dist.batch_isend_irecv([1, 2])\n req.wait()\n\n # NCCL Batch SEND RECV Mixed Backend Error\n @unittest.skipIf(BACKEND != \"nccl\", \"NCCL Batch Send Recv Only\")\n @requires_nccl_version(2700, \"Need NCCL 2.7+ for send/recv\")\n def test_batch_isend_irecv_mixed_backend_err(self):\n self._barrier()\n rank = dist.get_rank()\n rank_to_GPU = self._init_multigpu_helper()\n device_id = rank_to_GPU[rank][0]\n group_gloo = dist.new_group(ranks=[0, 1], backend=\"gloo\")\n group_nccl = dist.new_group(ranks=[0, 1], backend=\"nccl\")\n if rank == 0:\n with self.assertRaisesRegex(\n RuntimeError, \"All groups need to use the same backend\"\n ):\n send_tensor = _build_tensor(rank + 1)\n send_op_gloo = dist.P2POp(dist.isend, send_tensor, 1, group_gloo)\n send_op_nccl = dist.P2POp(dist.isend, send_tensor, 1, group_nccl)\n req = dist.batch_isend_irecv([send_op_gloo, send_op_nccl])\n req.wait()\n\n # NCCL SEND RECV\n @skip_if_no_gpu\n @unittest.skipIf(BACKEND != \"nccl\", \"NCCL Send Recv Only\")\n @requires_nccl_version(2700, \"Need NCCL 2.7+ for send/recv\")\n def _test_send_recv_nccl(self, profiler_ctx=None):\n # TODO: now that nccl send/recv is supported, there does not seem to\n # be a need to have nccl send/recv be tested separately.\n rank = dist.get_rank()\n rank_to_GPU = self._init_multigpu_helper()\n device_id = rank_to_GPU[rank][0]\n torch.cuda.set_device(device_id)\n\n tensor = _build_tensor(rank + 1, device_id=device_id)\n profiler_cls = profiler_ctx if profiler_ctx is not None else suppress()\n with profiler_cls as prof:\n for src in range(0, dist.get_world_size()):\n if src == rank:\n # Send mode\n for dst in range(0, dist.get_world_size()):\n if dst == rank:\n continue\n dist.send(tensor, dst)\n else:\n # Recv mode\n expected_tensor = _build_tensor(src + 1)\n output_tensor = _build_tensor(\n src + 1, value=-1, device_id=device_id\n )\n dist.recv(output_tensor, src)\n self.assertEqual(output_tensor, expected_tensor)\n\n self._barrier()\n\n if profiler_ctx is not None:\n backend = dist.get_backend()\n if backend in SEND_RECV_PROFILING_SUPPORTED_BACKENDS:\n for event_name in [f\"{backend}:send\", f\"{backend}:recv\"]:\n events = get_profiling_event(event_name, prof)\n self.assertTrue(events)\n # Event order is not deterministic, so simply assert their shape\n # is found in the following list.\n expected_shapes = [\n [[rank + 1] * 3] for rank in range(dist.get_world_size())\n ]\n for event in events:\n self.assertTrue(event.input_shapes in expected_shapes)\n\n @skip_if_no_gpu\n @unittest.skipIf(BACKEND != \"nccl\", \"NCCL Send Recv Only\")\n @requires_nccl_version(2700, \"Need NCCL 2.7+ for send/recv\")\n def test_send_recv_nccl(self):\n self._test_send_recv_nccl()\n\n @skip_if_no_gpu\n @unittest.skipIf(BACKEND != \"nccl\", \"NCCL Send Recv Only\")\n @requires_nccl_version(2700, \"Need NCCL 2.7+ for send/recv\")\n def test_send_recv_nccl_autograd_profiler(self):\n profiler_ctx = torch.autograd.profiler.profile(record_shapes=True)\n self._test_send_recv_nccl(profiler_ctx)\n\n @skip_if_no_gpu\n @unittest.skipIf(BACKEND != \"nccl\", \"NCCL Send Recv Only\")\n @requires_nccl_version(2700, \"Need NCCL 2.7+ for send/recv\")\n @unittest.skipIf(IS_FBCODE, \"Kineto in fbcode causes hang\")\n @unittest.skipIf(\n IS_MACOS or IS_WINDOWS,\n \"torch.profiler not enabled for mac/windows: https://github.com/pytorch/pytorch/pull/56124\",\n )\n def test_send_recv_nccl_torch_profiler(self):\n profiler_ctx = torch.profiler.profile(\n activities=[\n torch.profiler.ProfilerActivity.CPU,\n torch.profiler.ProfilerActivity.CUDA,\n ],\n record_shapes=True,\n )\n self._test_send_recv_nccl(profiler_ctx)\n\n # SEND RECV\n def _test_send_recv(self, profiler_ctx):\n rank = dist.get_rank()\n send_size = rank + 1\n tensor = _build_tensor(send_size)\n ctx = profiler_ctx if profiler_ctx is not None else suppress()\n with ctx as prof:\n for src in range(0, dist.get_world_size()):\n if src == rank:\n # Send mode\n for dst in range(0, dist.get_world_size()):\n if dst == rank:\n continue\n dist.send(tensor, dst)\n else:\n # Recv mode\n recv_size = src + 1\n expected_tensor = _build_tensor(recv_size)\n output_tensor = _build_tensor(recv_size, value=-1)\n dist.recv(output_tensor, src)\n self.assertEqual(output_tensor, expected_tensor)\n\n if profiler_ctx is not None:\n backend = dist.get_backend()\n if backend in SEND_RECV_PROFILING_SUPPORTED_BACKENDS:\n for event_name in [f\"{backend}:send\", f\"{backend}:recv\"]:\n events = get_profiling_event(event_name, prof)\n # Each rank sends/recvs from all other ranks.\n event_count = sum(e.count for e in events)\n expected_event_count = dist.get_world_size() - 1\n self.assertEqual(event_count, expected_event_count)\n # Event order is not deterministic, so simply assert their shape\n # is found in the following list.\n expected_shapes = [\n [[rank + 1] * 3] for rank in range(dist.get_world_size())\n ]\n for event in events:\n self.assertTrue(event.is_async)\n self.assertTrue(event.input_shapes in expected_shapes)\n\n @unittest.skipIf(\n BACKEND == \"nccl\", \"Nccl send/recv tested by test_send_recv_nccl\"\n )\n def test_send_recv(self):\n self._test_send_recv(profiler_ctx=None)\n\n @unittest.skipIf(\n BACKEND == \"nccl\", \"NCCL send/recv tested by test_send_recv_nccl\"\n )\n def test_send_recv_autograd_profiler(self):\n autograd_profiler_ctx = _create_autograd_profiler()\n self._test_send_recv(profiler_ctx=autograd_profiler_ctx)\n\n @unittest.skipIf(\n BACKEND == \"nccl\", \"NCCL send/recv tested by test_send_recv_nccl\"\n )\n @unittest.skipIf(IS_FBCODE, \"Kineto in fbcode causes hang\")\n @unittest.skipIf(\n IS_MACOS or IS_WINDOWS,\n \"torch.profiler not enabled for mac/windows: https://github.com/pytorch/pytorch/pull/56124\",\n )\n def test_send_recv_torch_profiler(self):\n torch_profiler_ctx = _create_torch_profiler()\n return self._test_send_recv(profiler_ctx=torch_profiler_ctx)\n\n # SEND RECV ANY SOURCE\n def _test_send_recv_any_source(self, profiler_ctx):\n rank = dist.get_rank()\n send_recv_size = 10\n tensor = _build_tensor(send_recv_size, value=rank)\n recv_ranks = list()\n irecv_ranks = list()\n\n ctx = profiler_ctx if profiler_ctx is not None else suppress()\n with ctx as prof:\n for dst in range(0, dist.get_world_size()):\n if dst == rank:\n # Recv mode\n for dst in range(0, dist.get_world_size()):\n if dst == rank:\n continue\n\n for recv in [\"recv\", \"irecv\"]:\n output_tensor = _build_tensor(send_recv_size, value=-1)\n\n if recv == \"recv\":\n sender = dist.recv(output_tensor)\n recv_ranks.append(sender)\n elif recv == \"irecv\":\n work = dist.irecv(output_tensor)\n work.wait()\n sender = work._source_rank()\n irecv_ranks.append(sender)\n\n # Assert the scalar value \"sender\" that should be\n # equal to the rank of the sender is equal to all\n # values in the received tensor.\n self.assertTrue(output_tensor.eq(sender).all())\n else:\n # Send mode\n dist.send(tensor, dst) # recv\n dist.send(tensor, dst) # irecv\n\n if profiler_ctx is not None:\n backend = dist.get_backend()\n if backend in SEND_RECV_PROFILING_SUPPORTED_BACKENDS:\n for event_name in [f\"{backend}:send\", f\"{backend}:recvAnySource\"]:\n events = get_profiling_event(event_name, prof)\n # Each rank sends/recvs from other rank twice.\n self.assertEqual(\n sum(event.count for event in events),\n 2 * (dist.get_world_size() - 1),\n )\n for event in events:\n self.assertTrue(event.is_async)\n self.assertEqual(event.input_shapes, [[send_recv_size] * 3])\n\n # Each rank would have 2 * (world_size - 1) sends, verify that\n # globally we receive the same amount on the other end.\n recv_ranks_tensor = torch.cat(\n (torch.tensor(recv_ranks), torch.tensor(irecv_ranks)), 0\n )\n global_recv_ranks = [\n torch.empty_like(recv_ranks_tensor)\n for _ in range(dist.get_world_size())\n ]\n dist.all_gather(global_recv_ranks, recv_ranks_tensor)\n global_recv_ranks_list = []\n for tensor in global_recv_ranks:\n global_recv_ranks_list += tensor.tolist()\n\n from itertools import groupby\n\n global_recv_ranks_list.sort()\n frequency = [\n len(list(group)) for key, group in groupby(global_recv_ranks_list)\n ]\n self.assertEqual(dist.get_world_size(), len(frequency))\n self.assertEqual(\n [2 * (dist.get_world_size() - 1)] * dist.get_world_size(), frequency\n )\n self._barrier()\n\n @unittest.skipIf(\n BACKEND == \"nccl\", \"Nccl does not support send/recv from any source\"\n )\n def test_send_recv_any_source(self):\n self._test_send_recv_any_source(profiler_ctx=None)\n\n @unittest.skipIf(\n BACKEND == \"nccl\", \"Nccl does not support send/recv from any source\"\n )\n def test_send_recv_any_source_autograd_profiler(self):\n autograd_profiler_ctx = _create_autograd_profiler()\n self._test_send_recv_any_source(profiler_ctx=autograd_profiler_ctx)\n\n @unittest.skipIf(\n BACKEND == \"nccl\", \"Nccl does not support send/recv from any source\"\n )\n @unittest.skipIf(IS_FBCODE, \"Kineto in fbcode code causes hang\")\n @unittest.skipIf(\n IS_MACOS or IS_WINDOWS,\n \"torch.profiler not enabled for mac/windows: https://github.com/pytorch/pytorch/pull/56124\",\n )\n def test_send_recv_any_source_torch_profiler(self):\n torch_profiler_ctx = _create_torch_profiler()\n return self._test_send_recv_any_source(profiler_ctx=torch_profiler_ctx)\n\n # SEND RECV WITH TAG\n def _test_send_recv_with_tag(self, profiler_ctx):\n rank = dist.get_rank()\n world_size = dist.get_world_size()\n send_recv_size = 10\n tensor = _build_tensor(send_recv_size, value=rank)\n ctx = profiler_ctx if profiler_ctx is not None else suppress()\n with ctx as prof:\n for dst in range(0, world_size):\n if dst == rank:\n # Recv mode\n for src in range(0, world_size):\n if src == rank:\n continue\n output_tensor = _build_tensor(send_recv_size, value=-1)\n dist.recv(output_tensor, src, tag=src)\n self.assertTrue(output_tensor.eq(src).all())\n else:\n # Send mode\n dist.send(tensor, dst, tag=rank)\n\n if profiler_ctx is not None:\n backend = dist.get_backend()\n if backend in SEND_RECV_PROFILING_SUPPORTED_BACKENDS:\n for event_name in [f\"{backend}:send\", f\"{backend}:recv\"]:\n events = get_profiling_event(event_name, prof)\n # Each rank sends/recvs from all other ranks\n event_count = sum(e.count for e in events)\n expected_event_count = dist.get_world_size() - 1\n self.assertEqual(event_count, expected_event_count)\n for event in events:\n self.assertTrue(event.is_async)\n self.assertEqual(event.name, event_name)\n self.assertEqual(event.input_shapes, [[send_recv_size] * 3])\n\n @unittest.skipIf(\n BACKEND == \"nccl\", \"NCCL send/recv tested by test_send_recv_nccl\"\n )\n def test_send_recv_with_tag(self):\n self._test_send_recv_with_tag(profiler_ctx=None)\n\n @unittest.skipIf(\n BACKEND == \"nccl\", \"NCCL send/recv tested by test_send_recv_nccl\"\n )\n def test_send_recv_with_tag_autograd_profiler(self):\n autograd_profiler_ctx = _create_autograd_profiler()\n return self._test_send_recv_with_tag(profiler_ctx=autograd_profiler_ctx)\n\n @unittest.skipIf(\n BACKEND == \"nccl\", \"NCCL send/recv tested by test_send_recv_nccl\"\n )\n @unittest.skipIf(IS_FBCODE, \"Kineto in fbcode code causes hang\")\n @unittest.skipIf(\n IS_MACOS or IS_WINDOWS,\n \"torch.profiler not enabled for mac/windows: https://github.com/pytorch/pytorch/pull/56124\",\n )\n def test_send_recv_with_tag_torch_profiler(self):\n torch_profiler_ctx = _create_torch_profiler()\n return self._test_send_recv_with_tag(profiler_ctx=torch_profiler_ctx)\n\n # ISEND\n def _test_isend(self, profiler_ctx):\n rank = dist.get_rank()\n world_size = dist.get_world_size()\n ctx = profiler_ctx if profiler_ctx is not None else suppress()\n with ctx as prof:\n if rank == 0:\n requests = [\n dist.isend(_build_tensor(dest, 10), dest)\n for dest in range(1, world_size)\n ]\n for request in requests:\n request.wait()\n self.assertTrue(request.is_completed())\n else:\n tensor = _build_tensor(rank, -1)\n dist.recv(tensor, 0)\n self.assertEqual(tensor, _build_tensor(rank, 10))\n\n self._barrier()\n\n if profiler_ctx is not None:\n backend = dist.get_backend()\n if backend in SEND_RECV_PROFILING_SUPPORTED_BACKENDS:\n expected_event_name = (\n f\"{backend}:send\" if rank == 0 else f\"{backend}:recv\"\n )\n events = get_profiling_event(expected_event_name, prof)\n event_count = sum(e.count for e in events)\n expected_count = dist.get_world_size() - 1 if rank == 0 else 1\n self.assertEqual(expected_count, event_count)\n # Event ordering is not guaranteed, so simply ensure the shapes are\n # found in the following map.\n expected_shapes = {\n r: [[r] * 3] for r in range(1, dist.get_world_size())\n }\n for event in events:\n self.assertTrue(event.is_async)\n self.assertEqual(event.name, expected_event_name)\n if rank == 0:\n self.assertTrue(\n event.input_shapes in expected_shapes.values()\n )\n else:\n self.assertEqual(event.input_shapes, expected_shapes[rank])\n\n @unittest.skipIf(BACKEND == \"nccl\", \"Nccl does not support isend\")\n def test_isend(self):\n self._test_isend(profiler_ctx=None)\n\n @unittest.skipIf(BACKEND == \"nccl\", \"Nccl does not support isend\")\n def test_isend_autograd_profiler(self):\n autograd_profiler_ctx = _create_autograd_profiler()\n self._test_isend(profiler_ctx=autograd_profiler_ctx)\n\n @unittest.skipIf(BACKEND == \"nccl\", \"Nccl does not support isend\")\n @unittest.skipIf(IS_FBCODE, \"Kineto in fbcode code causes hang\")\n @unittest.skipIf(\n IS_MACOS or IS_WINDOWS,\n \"torch.profiler not enabled for mac/windows: https://github.com/pytorch/pytorch/pull/56124\",\n )\n def test_isend_torch_profiler(self):\n torch_profiler_ctx = _create_torch_profiler()\n self._test_isend(profiler_ctx=torch_profiler_ctx)\n\n # IRECV\n @unittest.skipIf(BACKEND == \"nccl\", \"Nccl does not support irecv\")\n def test_irecv(self):\n rank = dist.get_rank()\n world_size = dist.get_world_size()\n\n if rank == 0:\n expected_tensors = [\n _build_tensor(src, -1) for src in range(1, world_size)\n ]\n requests = [\n dist.irecv(expected_tensors[src - 1], src)\n for src in range(1, world_size)\n ]\n\n for src in range(1, world_size):\n requests[src - 1].wait()\n self.assertTrue(requests[src - 1].is_completed())\n self.assertEqual(expected_tensors[src - 1], _build_tensor(src, 10))\n else:\n tensor = _build_tensor(rank, 10)\n dist.send(tensor, 0)\n\n self._barrier()\n\n # BROADCAST\n def _test_broadcast_helper(\n self,\n group,\n group_id,\n rank,\n cuda=False,\n rank_to_GPU=None,\n with_options=False,\n ):\n for dtype, value, requires_cuda in [\n (torch.float, -1e-10, False),\n (torch.double, -1e-100, False),\n (torch.half, -0.1, True),\n (torch.int8, -2, False),\n (torch.uint8, 129, False),\n (torch.int, -1e5, False),\n (torch.long, -1e15, False),\n ]:\n if requires_cuda and not cuda:\n continue\n for src in group:\n expected_tensor = _build_tensor(src + 1, value, dtype)\n if cuda:\n expected_tensor = expected_tensor.cuda(rank_to_GPU[rank][0])\n if rank == src:\n if with_options:\n opts = dist.BroadcastOptions()\n opts.rootTensor = 0\n opts.rootRank = src\n self.call_dist_op(\n \":broadcast\",\n True,\n group_id.broadcast,\n [expected_tensor],\n opts,\n )\n else:\n self.call_dist_op(\n \":broadcast\",\n False,\n dist.broadcast,\n expected_tensor,\n src,\n group_id,\n )\n else:\n tensor = _build_tensor(src + 1, -1, dtype)\n if cuda:\n tensor = tensor.cuda(rank_to_GPU[rank][0])\n if with_options:\n opts = dist.BroadcastOptions()\n opts.rootTensor = 0\n opts.rootRank = src\n self.call_dist_op(\n \":broadcast\", True, group_id.broadcast, [tensor], opts\n )\n else:\n self.call_dist_op(\n \":broadcast\",\n False,\n dist.broadcast,\n tensor,\n src,\n group_id,\n )\n self.assertEqual(tensor.size(), expected_tensor.size())\n self.assertEqual(\n tensor.ne(expected_tensor).max(), torch.tensor(False)\n )\n\n self._barrier()\n\n @unittest.skipIf(BACKEND == \"nccl\", \"Nccl does not support CPU tensors\")\n def test_broadcast(self):\n group, group_id, rank = self._init_global_test()\n self._test_broadcast_helper(group, group_id, rank)\n\n @unittest.skipIf(\n BACKEND != \"gloo\" and BACKEND != \"nccl\",\n \"Only Gloo and Nccl backend supports CUDA allReduce\",\n )\n @skip_if_no_gpu\n def test_broadcast_cuda(self):\n group, group_id, rank = self._init_global_test()\n rank_to_GPU = self._init_multigpu_helper()\n device_id = rank_to_GPU[rank][0]\n torch.cuda.set_device(device_id)\n self._test_broadcast_helper(group, group_id, rank, True, rank_to_GPU)\n\n @skip_if_small_worldsize\n @unittest.skipIf(BACKEND == \"nccl\", \"Nccl does not support CPU tensors\")\n def test_broadcast_group(self):\n group, group_id, rank = self._init_group_test()\n self._test_broadcast_helper(group, group_id, rank)\n\n @unittest.skipIf(BACKEND == \"nccl\", \"Nccl does not support CPU tensors\")\n def test_broadcast_full_group(self):\n group, group_id, rank = self._init_full_group_test()\n self._test_broadcast_helper(group, group_id, rank)\n\n @unittest.skipIf(\n BACKEND != \"nccl\",\n \"Only NCCL backend supports high priority stream\",\n )\n @skip_if_no_gpu\n def test_nccl_high_priority_stream(self):\n group, _, rank = self._init_global_test()\n rank_to_GPU = self._init_multigpu_helper()\n device_id = rank_to_GPU[rank][0]\n torch.cuda.set_device(device_id)\n\n new_port = str(MASTER_PORT + 1)\n os.environ[\"MASTER_PORT\"] = new_port\n gen_iterator = dist.rendezvous(\"env://\", rank, dist.get_world_size())\n store, rank, size = next(gen_iterator)\n store = dist.PrefixStore(new_port, store)\n\n opts = dist.ProcessGroupNCCL.Options()\n opts.is_high_priority_stream = False\n group_id = dist.ProcessGroupNCCL(store, rank, size, opts)\n\n self._test_broadcast_helper(group, group_id, rank, True, rank_to_GPU, True)\n\n # REDUCE\n def _test_reduce_helper(\n self,\n group,\n group_id,\n rank,\n op,\n master_value,\n worker_value,\n expected_value,\n cuda=False,\n rank_to_GPU=None,\n ):\n for src in group:\n tensor = _build_tensor(src + 1).fill_(\n master_value if rank == src else worker_value\n )\n if cuda:\n tensor = tensor.cuda(rank_to_GPU[rank][0])\n self.call_dist_op(\n \":reduce\",\n False,\n dist.reduce,\n tensor,\n src,\n op,\n group_id,\n tensor_shapes=[tensor.shape],\n )\n if rank == src:\n self.assertEqual(tensor, _build_tensor(src + 1, expected_value))\n\n self._barrier()\n\n @unittest.skipIf(BACKEND == \"nccl\", \"Nccl does not support CPU tensors\")\n def test_reduce_sum(self):\n group, group_id, rank = self._init_global_test()\n self._test_reduce_helper(\n group,\n group_id,\n rank,\n dist.ReduceOp.SUM,\n 2,\n 10,\n 2 + (10 * (len(group) - 1)),\n )\n\n @unittest.skipIf(BACKEND != \"nccl\", \"Only Nccl supports CUDA reduce\")\n @skip_if_no_gpu\n def test_reduce_sum_cuda(self):\n group, group_id, rank = self._init_global_test()\n rank_to_GPU = self._init_multigpu_helper()\n device_id = rank_to_GPU[rank][0]\n torch.cuda.set_device(device_id)\n self._test_reduce_helper(\n group,\n group_id,\n rank,\n dist.ReduceOp.SUM,\n 2,\n 10,\n 2 + 10 * (len(group) - 1),\n True,\n rank_to_GPU,\n )\n\n @unittest.skipIf(BACKEND == \"nccl\", \"Nccl does not support CPU tensors\")\n def test_reduce_product(self):\n group, group_id, rank = self._init_global_test()\n self._test_reduce_helper(\n group,\n group_id,\n rank,\n dist.ReduceOp.PRODUCT,\n 2,\n 10,\n reduce((lambda x, y: x * y), [10] * (len(group) - 1), 2),\n )\n\n @unittest.skipIf(BACKEND == \"nccl\", \"Nccl does not support CPU tensors\")\n def test_reduce_min(self):\n group, group_id, rank = self._init_global_test()\n self._test_reduce_helper(\n group, group_id, rank, dist.ReduceOp.MIN, 1010, 1, 1\n )\n\n @unittest.skipIf(BACKEND == \"nccl\", \"Nccl does not support CPU tensors\")\n def test_reduce_max(self):\n group, group_id, rank = self._init_global_test()\n self._test_reduce_helper(\n group, group_id, rank, dist.ReduceOp.MAX, -1, 10, 10\n )\n\n @unittest.skipIf(BACKEND == \"nccl\", \"Nccl does not support CPU tensors\")\n @skip_if_small_worldsize\n def test_reduce_group_sum(self):\n group, group_id, rank = self._init_group_test()\n self._test_reduce_helper(\n group,\n group_id,\n rank,\n dist.ReduceOp.SUM,\n 2,\n 10,\n 2 + (10 * (len(group) - 1)),\n )\n\n @unittest.skipIf(BACKEND == \"nccl\", \"Nccl does not support CPU tensors\")\n @skip_if_small_worldsize\n def test_reduce_group_product(self):\n group, group_id, rank = self._init_group_test()\n self._test_reduce_helper(\n group,\n group_id,\n rank,\n dist.ReduceOp.PRODUCT,\n 2,\n 10,\n reduce((lambda x, y: x * y), [10] * (len(group) - 1), 2),\n )\n\n @unittest.skipIf(BACKEND == \"nccl\", \"Nccl does not support CPU tensors\")\n @skip_if_small_worldsize\n def test_reduce_group_min(self):\n group, group_id, rank = self._init_group_test()\n self._test_reduce_helper(\n group, group_id, rank, dist.ReduceOp.MIN, 1010, 1, 1\n )\n\n @unittest.skipIf(BACKEND == \"nccl\", \"Nccl does not support CPU tensors\")\n @skip_if_small_worldsize\n def test_reduce_group_max(self):\n group, group_id, rank = self._init_group_test()\n self._test_reduce_helper(\n group, group_id, rank, dist.ReduceOp.MAX, -1, 10, 10\n )\n\n @unittest.skipIf(BACKEND == \"nccl\", \"Nccl does not support CPU tensors\")\n def test_reduce_full_group_sum(self):\n group, group_id, rank = self._init_full_group_test()\n self._test_reduce_helper(\n group,\n group_id,\n rank,\n dist.ReduceOp.SUM,\n 2,\n 10,\n 2 + (10 * (len(group) - 1)),\n )\n\n @unittest.skipIf(BACKEND == \"nccl\", \"Nccl does not support CPU tensors\")\n def test_reduce_full_group_product(self):\n group, group_id, rank = self._init_full_group_test()\n self._test_reduce_helper(\n group,\n group_id,\n rank,\n dist.ReduceOp.PRODUCT,\n 2,\n 10,\n reduce((lambda x, y: x * y), [10] * (len(group) - 1), 2),\n )\n\n @unittest.skipIf(BACKEND == \"nccl\", \"Nccl does not support CPU tensors\")\n def test_reduce_full_group_min(self):\n group, group_id, rank = self._init_full_group_test()\n self._test_reduce_helper(\n group, group_id, rank, dist.ReduceOp.MIN, 1010, 1, 1\n )\n\n @unittest.skipIf(BACKEND == \"nccl\", \"Nccl does not support CPU tensors\")\n def test_reduce_full_group_max(self):\n group, group_id, rank = self._init_full_group_test()\n self._test_reduce_helper(\n group, group_id, rank, dist.ReduceOp.MAX, -1, 10, 10\n )\n\n # REDUCE TWICE\n def _test_reduce_twice_helper(\n self,\n group,\n group_id,\n rank,\n op,\n master_value,\n worker_value,\n expected_value,\n cuda=False,\n rank_to_GPU=None,\n ):\n for src in group:\n tensors = [\n _build_tensor(src + 1).fill_(\n master_value if rank == src else worker_value\n )\n for i in range(2)\n ]\n if cuda:\n for i in range(2):\n tensors[i] = tensors[i].cuda(rank_to_GPU[rank][0])\n self.call_dist_op(\n \":reduce\",\n False,\n dist.reduce,\n tensors[0],\n src,\n op,\n group_id,\n secondary_op_call=lambda: dist.reduce(\n tensors[1], src, op, group_id\n ),\n tensor_shapes=[tensors[0].shape],\n )\n if rank == src:\n for tensor in tensors:\n self.assertEqual(tensor, _build_tensor(src + 1, expected_value))\n\n self._barrier()\n\n @unittest.skipIf(BACKEND == \"nccl\", \"Nccl does not support CPU tensors\")\n def test_reduce_sum_twice(self):\n group, group_id, rank = self._init_global_test()\n self._test_reduce_twice_helper(\n group,\n group_id,\n rank,\n dist.ReduceOp.SUM,\n 2,\n 10,\n 2 + (10 * (len(group) - 1)),\n )\n\n @unittest.skipIf(BACKEND != \"nccl\", \"Only Nccl supports CUDA reduce\")\n @skip_if_no_gpu\n def test_reduce_sum_cuda_twice(self):\n group, group_id, rank = self._init_global_test()\n rank_to_GPU = self._init_multigpu_helper()\n device_id = rank_to_GPU[rank][0]\n torch.cuda.set_device(device_id)\n self._test_reduce_twice_helper(\n group,\n group_id,\n rank,\n dist.ReduceOp.SUM,\n 2,\n 10,\n 2 + 10 * (len(group) - 1),\n True,\n rank_to_GPU,\n )\n\n @skip_if_no_gpu\n @require_backend({\"gloo\", \"nccl\"})\n def test_all_reduce_result_cuda(self):\n group, group_id, rank = self._init_global_test()\n rank_to_GPU = self._init_multigpu_helper()\n for src in group:\n if rank == src:\n tensor = _build_tensor(src + 1, 2)\n else:\n tensor = _build_tensor(src + 1, 10)\n tensor = tensor.cuda(rank_to_GPU[rank][0])\n\n opts = AllreduceOptions()\n opts.reduceOp = dist.ReduceOp.SUM\n\n if group_id == GroupMember.WORLD:\n work = _get_default_group().allreduce([tensor], opts)\n else:\n work = group_id.allreduce([tensor], opts)\n\n if BACKEND == \"gloo\":\n # Calling result right the work is finished should throw exception.\n # Here we have a race condition, we may not assume the work is not\n # finished by the time we run next lines.\n try:\n with self.assertRaisesRegex(\n RuntimeError,\n \"Work needs to be completed before calling result\",\n ):\n work.result()\n except AssertionError:\n # Exception was not raised, ensure is_completed()\n self.assertTrue(work.is_completed())\n\n work.wait()\n result = work.result()\n else:\n # In case of NCCL we should be able to retrieve pointer to the result\n # even before work is finished.\n result = work.result()\n work.wait()\n\n expected_value = 2 + (10 * (len(group) - 1))\n self.assertEqual(result, [_build_tensor(src + 1, expected_value)])\n self._barrier()\n\n def call_dist_op(\n self,\n profiling_title_postfix,\n is_async,\n op,\n *args,\n expect_event=True,\n secondary_op_call=None,\n profile_cuda=False,\n tensor_shapes=None,\n **kwargs,\n ):\n op_calls = [lambda: op(*args, **kwargs)]\n if secondary_op_call is not None:\n op_calls.append(secondary_op_call)\n\n autograd_profiler_ctx = torch.autograd.profiler.profile(\n use_cuda=profile_cuda, record_shapes=True\n )\n\n # TODO: move this test to use torch.profiler once kineto issues are\n # fixed internally.\n with autograd_profiler_ctx as prof:\n works = [op_call() for op_call in op_calls]\n if is_async:\n for work in works:\n work.wait()\n\n if expect_event and dist.get_backend() in PROFILING_SUPPORTED_BACKENDS:\n events = get_profiling_event(\n profiling_title_postfix, autograd_profiler_ctx\n )\n # DETAIL debug mode can use a pg wrapper that issues more collectives\n # under the hood\n if dist._get_debug_mode() != dist._DistributedDebugLevel.DETAIL:\n self.assertEqual(len(events), len(op_calls))\n for e in events:\n self.assertTrue(e.is_async)\n self.assertEqual(e.count, 1)\n self.assertGreaterEqual(e.cpu_time, 0)\n # Verify tensor shapes if given\n # DETAIL debug mode can use a pg wrapper that issues more collectives\n # under the hood\n if (\n tensor_shapes is not None\n and dist._get_debug_mode() != dist._DistributedDebugLevel.DETAIL\n ):\n self.assertEqual(\n e.input_shapes,\n tensor_shapes,\n f\"event shape: {e.input_shapes} vs tensor {tensor_shapes}\",\n )\n\n # ALL REDUCE\n def _test_all_reduce_helper(\n self,\n group,\n group_id,\n rank,\n op,\n master_value,\n worker_value,\n expected_value,\n cuda=False,\n rank_to_GPU=None,\n dtype=torch.float,\n async_op=False,\n ):\n for src in group:\n curr_value = master_value if rank == src else worker_value\n\n tensor = _build_tensor(src + 1, dtype=dtype).fill_(curr_value)\n if cuda:\n tensor = tensor.cuda(rank_to_GPU[rank][0])\n if tensor.dtype == torch.complex64:\n tensor_shapes = [torch.view_as_real(tensor).shape]\n else:\n tensor_shapes = [tensor.shape]\n self.call_dist_op(\n \":all_reduce\",\n async_op,\n dist.all_reduce,\n tensor,\n op,\n group_id,\n async_op=async_op,\n tensor_shapes=tensor_shapes,\n )\n # Currently, only Gloo backend has profiling tested with CUDA enabled.\n # Only run cuda profiling test for one rank to speed up since\n # running with different src_rank does not affect the correctness.\n if (\n src == 0\n and cuda\n and dist.get_backend() in CUDA_PROFILING_SUPPORTED_BACKENDS\n ):\n self.call_dist_op(\n \":all_reduce\",\n async_op,\n dist.all_reduce,\n tensor,\n op,\n group_id,\n async_op=async_op,\n profile_cuda=True,\n tensor_shapes=tensor_shapes,\n )\n\n self._barrier()\n\n @unittest.skipIf(BACKEND == \"nccl\", \"Nccl does not support CPU tensors\")\n def test_all_reduce_sum(self):\n group, group_id, rank = self._init_global_test()\n self._test_all_reduce_helper(\n group,\n group_id,\n rank,\n dist.ReduceOp.SUM,\n 2,\n 10,\n 2 + (10 * (len(group) - 1)),\n )\n\n @unittest.skipIf(BACKEND == \"nccl\", \"Nccl does not support CPU tensors\")\n def test_all_reduce_sum_async(self):\n group, group_id, rank = self._init_global_test()\n self._test_all_reduce_helper(\n group,\n group_id,\n rank,\n dist.ReduceOp.SUM,\n 2,\n 10,\n 2 + (10 * (len(group) - 1)),\n async_op=True,\n )\n\n @unittest.skipIf(\n BACKEND != \"gloo\" and BACKEND != \"nccl\",\n \"Only Gloo and NCCL backends will have CUDA allReduce tested\",\n )\n @skip_if_no_gpu\n def test_all_reduce_sum_cuda(self):\n torch.cuda.set_device(self.rank)\n group, group_id, rank = self._init_global_test()\n rank_to_GPU = self._init_multigpu_helper()\n self._test_all_reduce_helper(\n group,\n group_id,\n rank,\n dist.ReduceOp.SUM,\n 2,\n 10,\n 2 + (10 * (len(group) - 1)),\n True,\n rank_to_GPU,\n )\n\n @unittest.skipIf(\n BACKEND != \"gloo\" and BACKEND != \"nccl\",\n \"Only Gloo and NCCL backends will have CUDA allReduce tested\",\n )\n @skip_if_no_gpu\n def test_all_reduce_sum_cuda_async(self):\n torch.cuda.set_device(self.rank)\n group, group_id, rank = self._init_global_test()\n rank_to_GPU = self._init_multigpu_helper()\n self._test_all_reduce_helper(\n group,\n group_id,\n rank,\n dist.ReduceOp.SUM,\n 2,\n 10,\n 2 + (10 * (len(group) - 1)),\n True,\n rank_to_GPU,\n async_op=True,\n )\n\n @unittest.skipIf(BACKEND == \"nccl\", \"Nccl does not support CPU tensors\")\n def test_all_reduce_sum_complex(self):\n group, group_id, rank = self._init_global_test()\n self._test_all_reduce_helper(\n group,\n group_id,\n rank,\n dist.ReduceOp.SUM,\n complex(2, 3),\n complex(10, 11),\n complex(2, 3) + (complex(10, 11) * (len(group) - 1)),\n dtype=torch.cfloat,\n )\n\n @unittest.skipIf(BACKEND == \"nccl\", \"Nccl does not support CPU tensors\")\n def test_all_reduce_complex_unsupported_ops(self):\n unsupported_ops = [\n dist.ReduceOp.MAX,\n dist.ReduceOp.MIN,\n dist.ReduceOp.PRODUCT,\n dist.ReduceOp.BAND,\n dist.ReduceOp.BOR,\n dist.ReduceOp.BXOR,\n ]\n group, group_id, rank = self._init_global_test()\n for unsupported_op in unsupported_ops:\n with self.assertRaisesRegex(\n RuntimeError, \"all_reduce does not support\"\n ):\n dist.all_reduce(\n _build_tensor(1, dtype=torch.cfloat), unsupported_op, group_id\n )\n\n @unittest.skipIf(\n BACKEND != \"gloo\" and BACKEND != \"nccl\",\n \"Only Gloo and NCCL backends will have CUDA allReduce tested\",\n )\n @skip_if_no_gpu\n def test_all_reduce_sum_cuda_complex(self):\n torch.cuda.set_device(self.rank)\n group, group_id, rank = self._init_global_test()\n rank_to_GPU = self._init_multigpu_helper()\n self._test_all_reduce_helper(\n group,\n group_id,\n rank,\n dist.ReduceOp.SUM,\n complex(2, 3),\n complex(10, 11),\n complex(2, 3) + (complex(10, 11) * (len(group) - 1)),\n True,\n rank_to_GPU,\n dtype=torch.cfloat,\n )\n\n @unittest.skipIf(BACKEND == \"nccl\", \"Nccl does not support CPU tensors\")\n def test_all_reduce_product(self):\n group, group_id, rank = self._init_global_test()\n self._test_all_reduce_helper(\n group,\n group_id,\n rank,\n dist.ReduceOp.PRODUCT,\n 2,\n 10,\n reduce((lambda x, y: x * y), [10] * (len(group) - 1), 2),\n )\n\n @unittest.skipIf(BACKEND == \"nccl\", \"Nccl does not support CPU tensors\")\n def test_all_reduce_min(self):\n group, group_id, rank = self._init_global_test()\n self._test_all_reduce_helper(\n group, group_id, rank, dist.ReduceOp.MIN, 1010, 1, 1\n )\n\n @unittest.skipIf(BACKEND == \"nccl\", \"Nccl does not support CPU tensors\")\n def test_all_reduce_max(self):\n group, group_id, rank = self._init_global_test()\n self._test_all_reduce_helper(\n group, group_id, rank, dist.ReduceOp.MAX, -1, 10, 10\n )\n\n @skip_if_small_worldsize\n @unittest.skipIf(BACKEND == \"nccl\", \"Nccl does not support CPU tensors\")\n def test_all_reduce_group_sum(self):\n group, group_id, rank = self._init_group_test()\n self._test_all_reduce_helper(\n group,\n group_id,\n rank,\n dist.ReduceOp.SUM,\n 2,\n 10,\n 2 + (10 * (len(group) - 1)),\n )\n\n @skip_if_small_worldsize\n @unittest.skipIf(BACKEND == \"nccl\", \"Nccl does not support CPU tensors\")\n def test_all_reduce_group_product(self):\n group, group_id, rank = self._init_group_test()\n self._test_all_reduce_helper(\n group,\n group_id,\n rank,\n dist.ReduceOp.PRODUCT,\n 2,\n 10,\n reduce((lambda x, y: x * y), [10] * (len(group) - 1), 2),\n )\n\n @skip_if_small_worldsize\n @unittest.skipIf(BACKEND == \"nccl\", \"Nccl does not support CPU tensors\")\n def test_all_reduce_group_min(self):\n group, group_id, rank = self._init_group_test()\n self._test_all_reduce_helper(\n group, group_id, rank, dist.ReduceOp.MIN, 1010, 1, 1\n )\n\n @skip_if_small_worldsize\n @unittest.skipIf(BACKEND == \"nccl\", \"Nccl does not support CPU tensors\")\n def test_all_reduce_group_max(self):\n group, group_id, rank = self._init_group_test()\n self._test_all_reduce_helper(\n group, group_id, rank, dist.ReduceOp.MAX, -1, 10, 10\n )\n\n @unittest.skipIf(BACKEND == \"nccl\", \"Nccl does not support CPU tensors\")\n def test_all_reduce_full_group_sum(self):\n group, group_id, rank = self._init_full_group_test()\n self._test_all_reduce_helper(\n group,\n group_id,\n rank,\n dist.ReduceOp.SUM,\n 2,\n 10,\n 2 + (10 * (len(group) - 1)),\n )\n\n @unittest.skipIf(BACKEND == \"nccl\", \"Nccl does not support CPU tensors\")\n def test_all_reduce_full_group_product(self):\n group, group_id, rank = self._init_full_group_test()\n self._test_all_reduce_helper(\n group,\n group_id,\n rank,\n dist.ReduceOp.PRODUCT,\n 2,\n 10,\n reduce((lambda x, y: x * y), [10] * (len(group) - 1), 2),\n )\n\n @unittest.skipIf(BACKEND == \"nccl\", \"Nccl does not support CPU tensors\")\n def test_all_reduce_full_group_min(self):\n group, group_id, rank = self._init_full_group_test()\n self._test_all_reduce_helper(\n group, group_id, rank, dist.ReduceOp.MIN, 1010, 1, 1\n )\n\n @unittest.skipIf(BACKEND == \"nccl\", \"Nccl does not support CPU tensors\")\n def test_all_reduce_full_group_max(self):\n group, group_id, rank = self._init_full_group_test()\n self._test_all_reduce_helper(\n group, group_id, rank, dist.ReduceOp.MAX, -1, 10, 10\n )\n\n # SPARSE ALL REDUCE\n def _test_sparse_all_reduce_sum(self, fn):\n group, group_id, rank = self._init_global_test()\n\n tests = simple_sparse_reduce_tests(\n rank, dist.get_world_size(), num_inputs=1\n )\n for (inputs, outputs) in tests:\n tensors = [fn(input) for input in inputs]\n dist.all_reduce(tensors[0], dist.ReduceOp.SUM, group_id)\n self.assertEqual(tensors[0], outputs[0])\n\n @unittest.skipIf(\n BACKEND != \"gloo\", \"Only Gloo backend support sparse all reduce\"\n )\n def test_sparse_all_reduce_sum(self):\n self._test_sparse_all_reduce_sum(lambda t: t)\n\n @unittest.skipIf(\n BACKEND != \"gloo\", \"Only Gloo backend support sparse all reduce\"\n )\n @skip_if_no_gpu\n def test_sparse_all_reduce_sum_cuda(self):\n self._test_sparse_all_reduce_sum(lambda t: t.clone().cuda())\n\n # ALL REDUCE - COALESCED\n @staticmethod\n def _all_reduce_coalesced_sum_test_cases(group_size):\n return (\n [2, 3, complex(2, 3)],\n [10, 11, complex(10, 11)],\n [\n 2 + 10 * (group_size - 1),\n 3 + 11 * (group_size - 1),\n complex(2, 3) + complex(10, 11) * (group_size - 1),\n ],\n [torch.float, torch.float, torch.cfloat],\n )\n\n @staticmethod\n def _all_reduce_coalesced_product_test_cases(group_size):\n return (\n [1, 2],\n [3, 4],\n [1 * 3 ** (group_size - 1), 2 * 4 ** (group_size - 1)],\n [torch.float, torch.float],\n )\n\n @staticmethod\n def _all_reduce_coalesced_min_test_cases(group_size):\n return (\n [1, 4],\n [2, 3],\n [1, 3],\n [torch.float, torch.float],\n )\n\n @staticmethod\n def _all_reduce_coalesced_max_test_cases(group_size):\n return (\n [1, 4],\n [2, 3],\n [2, 4],\n [torch.float, torch.float],\n )\n\n @unittest.skipIf(BACKEND == \"nccl\", \"Nccl does not support CPU tensors\")\n def test_all_reduce_coalesced_max_complex_unsupported(self):\n group, group_id, rank = self._init_global_test()\n with self.assertRaisesRegex(RuntimeError, \"all_reduce does not support\"):\n dist.all_reduce_coalesced(\n [_build_tensor(1, dtype=torch.cfloat)], dist.ReduceOp.MAX, group_id\n )\n\n def _test_all_reduce_coalesced_helper(\n self,\n group,\n group_id,\n rank,\n op,\n cuda=False,\n rank_to_GPU=None,\n ):\n test_case_func = {\n dist.ReduceOp.SUM: self._all_reduce_coalesced_sum_test_cases,\n dist.ReduceOp.PRODUCT: self._all_reduce_coalesced_product_test_cases,\n dist.ReduceOp.MIN: self._all_reduce_coalesced_min_test_cases,\n dist.ReduceOp.MAX: self._all_reduce_coalesced_max_test_cases,\n }[op]\n\n master_values, worker_values, expected_values, dtypes = test_case_func(\n len(group)\n )\n\n for src in group:\n curr_values = master_values if rank == src else worker_values\n tensors = [\n _build_tensor(src + 1, val, dtype=dtype)\n for dtype, val in zip(dtypes, curr_values)\n ]\n if cuda:\n tensors = [t.cuda(rank_to_GPU[rank][0]) for t in tensors]\n tensor_shapes = []\n for tensor in tensors:\n if tensor.dtype == torch.complex64:\n tensor_shapes.append(torch.view_as_real(tensor).shape)\n else:\n tensor_shapes.append(tensor.shape)\n self.call_dist_op(\n \":all_reduce\",\n False,\n dist.all_reduce_coalesced,\n tensors,\n op,\n group_id,\n tensor_shapes=tensor_shapes,\n )\n expected_tensors = [\n _build_tensor(src + 1, expected_value, dtype=dtype)\n for dtype, expected_value in zip(dtypes, expected_values)\n ]\n self.assertEqual(tensors, expected_tensors)\n\n self._barrier()\n\n @require_backend({\"gloo\"})\n def test_all_reduce_coalesced_sum(self):\n group, group_id, rank = self._init_global_test()\n self._test_all_reduce_coalesced_helper(\n group,\n group_id,\n rank,\n dist.ReduceOp.SUM,\n cuda=False,\n rank_to_GPU=None,\n )\n\n @require_backend({\"gloo\"})\n def test_all_reduce_coalesced_product(self):\n group, group_id, rank = self._init_global_test()\n self._test_all_reduce_coalesced_helper(\n group,\n group_id,\n rank,\n dist.ReduceOp.PRODUCT,\n cuda=False,\n rank_to_GPU=None,\n )\n\n @require_backend({\"gloo\"})\n def test_all_reduce_coalesced_min(self):\n group, group_id, rank = self._init_global_test()\n self._test_all_reduce_coalesced_helper(\n group,\n group_id,\n rank,\n dist.ReduceOp.MIN,\n cuda=False,\n rank_to_GPU=None,\n )\n\n @require_backend({\"gloo\"})\n def test_all_reduce_coalesced_max(self):\n group, group_id, rank = self._init_global_test()\n self._test_all_reduce_coalesced_helper(\n group, group_id, rank, dist.ReduceOp.MAX, cuda=False, rank_to_GPU=None\n )\n\n @skip_if_small_worldsize\n @require_backend({\"gloo\"})\n def test_all_reduce_coalesced_group_sum(self):\n group, group_id, rank = self._init_group_test()\n self._test_all_reduce_coalesced_helper(\n group, group_id, rank, dist.ReduceOp.SUM, cuda=False, rank_to_GPU=None\n )\n\n @skip_if_small_worldsize\n @require_backend({\"gloo\"})\n def test_all_reduce_coalesced_group_product(self):\n group, group_id, rank = self._init_group_test()\n self._test_all_reduce_coalesced_helper(\n group,\n group_id,\n rank,\n dist.ReduceOp.PRODUCT,\n cuda=False,\n rank_to_GPU=None,\n )\n\n @skip_if_small_worldsize\n @require_backend({\"gloo\"})\n def test_all_reduce_coalesced_group_min(self):\n group, group_id, rank = self._init_group_test()\n self._test_all_reduce_coalesced_helper(\n group, group_id, rank, dist.ReduceOp.MIN, cuda=False, rank_to_GPU=None\n )\n\n @skip_if_small_worldsize\n @require_backend({\"gloo\"})\n def test_all_reduce_coalesced_group_max(self):\n group, group_id, rank = self._init_group_test()\n self._test_all_reduce_coalesced_helper(\n group, group_id, rank, dist.ReduceOp.MAX, cuda=False, rank_to_GPU=None\n )\n\n @require_backend({\"gloo\"})\n def test_all_reduce_coalesced_full_group_sum(self):\n group, group_id, rank = self._init_full_group_test()\n self._test_all_reduce_coalesced_helper(\n group, group_id, rank, dist.ReduceOp.SUM, cuda=False, rank_to_GPU=None\n )\n\n @require_backend({\"gloo\"})\n def test_all_reduce_coalesced_full_group_product(self):\n group, group_id, rank = self._init_full_group_test()\n self._test_all_reduce_coalesced_helper(\n group,\n group_id,\n rank,\n dist.ReduceOp.PRODUCT,\n cuda=False,\n rank_to_GPU=None,\n )\n\n @require_backend({\"gloo\"})\n def test_all_reduce_coalesced_full_group_min(self):\n group, group_id, rank = self._init_full_group_test()\n self._test_all_reduce_coalesced_helper(\n group,\n group_id,\n rank,\n dist.ReduceOp.MIN,\n cuda=False,\n rank_to_GPU=None,\n )\n\n @require_backend({\"gloo\"})\n def test_all_reduce_coalesced_full_group_max(self):\n group, group_id, rank = self._init_full_group_test()\n self._test_all_reduce_coalesced_helper(\n group, group_id, rank, dist.ReduceOp.MAX, cuda=False, rank_to_GPU=None\n )\n\n # SCATTER\n def _test_scatter_helper(self, group, group_id, rank, dtype=torch.float):\n for dest in group:\n tensor = _build_tensor(dest + 1, -1, dtype=dtype)\n expected_tensor = _build_tensor(dest + 1, rank, dtype=dtype)\n tensors = (\n [_build_tensor(dest + 1, i, dtype=dtype) for i in group] if rank == dest else []\n )\n if dtype == torch.complex64:\n tensor_shapes = [torch.view_as_real(t).shape for t in tensors]\n else:\n tensor_shapes = [t.shape for t in tensors]\n self.call_dist_op(\n \":scatter\",\n False,\n dist.scatter,\n tensor,\n src=dest,\n scatter_list=tensors,\n group=group_id,\n tensor_shapes=tensor_shapes,\n )\n self.assertEqual(tensor, expected_tensor)\n\n self._barrier()\n\n @unittest.skipIf(BACKEND == \"nccl\", \"Nccl does not support CPU tensors\")\n def test_scatter_checks(self):\n group, group_id, rank = self._init_global_test()\n one = torch.ones([1])\n\n # Specify scatter_list argument only on source rank.\n output = one.clone() * -1\n if rank == 0:\n scatter_list = [one.clone() * i for i in group]\n dist.scatter(output, src=0, scatter_list=scatter_list)\n else:\n dist.scatter(output, src=0)\n self.assertEqual(output, one * rank)\n\n # Don't specify src argument.\n output = one.clone() * -1\n if rank == 0:\n scatter_list = [one.clone() * i for i in group]\n dist.scatter(output, scatter_list=scatter_list)\n else:\n dist.scatter(output)\n self.assertEqual(output, one * rank)\n\n @unittest.skipIf(BACKEND == \"nccl\", \"Nccl does not support scatter\")\n def test_scatter(self):\n group, group_id, rank = self._init_global_test()\n self._test_scatter_helper(group, group_id, rank)\n\n @unittest.skipIf(BACKEND == \"nccl\", \"Nccl does not support scatter\")\n def test_scatter_complex(self):\n group, group_id, rank = self._init_global_test()\n self._test_scatter_helper(group, group_id, rank, dtype=torch.cfloat)\n\n @unittest.skipIf(BACKEND == \"nccl\", \"Nccl does not support scatter\")\n @skip_if_small_worldsize\n def test_scatter_group(self):\n group, group_id, rank = self._init_group_test()\n self._test_scatter_helper(group, group_id, rank)\n\n @unittest.skipIf(BACKEND == \"nccl\", \"Nccl does not support scatter\")\n def test_scatter_full_group(self):\n group, group_id, rank = self._init_full_group_test()\n self._test_scatter_helper(group, group_id, rank)\n\n # GATHER\n def _test_gather_helper(self, group, group_id, rank):\n for dest in group:\n tensor = _build_tensor(dest + 1, rank)\n tensors = (\n [_build_tensor(dest + 1, -1) for i in group] if rank == dest else []\n )\n self.call_dist_op(\n \":gather\",\n False,\n dist.gather,\n tensor,\n dst=dest,\n gather_list=tensors,\n group=group_id,\n tensor_shapes=[tensors[0].shape] if len(tensors) > 0 else None,\n )\n if rank == dest:\n expected_tensors = [_build_tensor(dest + 1, i) for i in group]\n for t1, t2 in zip(tensors, expected_tensors):\n self.assertEqual(t1, t2)\n\n self._barrier()\n\n @unittest.skipIf(BACKEND == \"nccl\", \"Nccl does not support CPU tensors\")\n def test_gather_checks(self):\n group, group_id, rank = self._init_global_test()\n one = torch.ones([1])\n\n # Specify gather_list argument only on destination rank.\n if rank == 0:\n gather_list = [one.clone() for _ in group]\n dist.gather(one * rank, dst=0, gather_list=gather_list)\n for i in group:\n self.assertEqual(gather_list[i], one * i)\n else:\n dist.gather(one * rank, dst=0)\n\n # Don't specify dst argument.\n if rank == 0:\n gather_list = [one.clone() for _ in group]\n dist.gather(one * rank, gather_list=gather_list)\n for i in group:\n self.assertEqual(gather_list[i], one * i)\n else:\n dist.gather(one * rank)\n\n @unittest.skipIf(BACKEND == \"nccl\", \"Nccl does not support CPU tensors\")\n def test_gather(self):\n group, group_id, rank = self._init_global_test()\n self._test_gather_helper(group, group_id, rank)\n\n @unittest.skipIf(BACKEND == \"nccl\", \"Nccl does not support CPU tensors\")\n @skip_if_small_worldsize\n def test_gather_group(self):\n group, group_id, rank = self._init_group_test()\n self._test_gather_helper(group, group_id, rank)\n\n @unittest.skipIf(BACKEND == \"nccl\", \"Nccl does not support CPU tensors\")\n def test_gather_full_group(self):\n group, group_id, rank = self._init_full_group_test()\n self._test_gather_helper(group, group_id, rank)\n\n # ALL GATHER\n def _test_all_gather_helper(\n self, group, group_id, rank, cuda=False, rank_to_GPU=None, dtype=torch.float\n ):\n for dest in group:\n tensor = _build_tensor(dest + 1, rank, dtype=dtype)\n tensors = [_build_tensor(dest + 1, -1, dtype=dtype) for i in group]\n if cuda:\n tensor = tensor.cuda(rank_to_GPU[rank][0])\n tensors = [t.cuda(rank_to_GPU[rank][0]) for t in tensors]\n if tensors[0].dtype == torch.complex64:\n tensor_shapes = [torch.view_as_real(tensors[0]).shape]\n else:\n tensor_shapes = [tensors[0].shape]\n self.call_dist_op(\n \":all_gather\",\n False,\n dist.all_gather,\n tensors,\n tensor,\n group_id,\n tensor_shapes=tensor_shapes,\n )\n\n expected_tensors = [\n _build_tensor(dest + 1, i, dtype=dtype) for i in group\n ]\n for t1, t2 in zip(tensors, expected_tensors):\n self.assertEqual(t1, t2)\n\n self._barrier()\n\n @unittest.skipIf(BACKEND == \"nccl\", \"Nccl does not support CPU tensors\")\n def test_all_gather(self):\n group, group_id, rank = self._init_global_test()\n self._test_all_gather_helper(group, group_id, rank)\n\n @unittest.skipIf(BACKEND != \"nccl\", \"Only Nccl supports CUDA all gather\")\n @unittest.skipIf(BACKEND == \"nccl\", \"CUDA all gather skipped for NCCL\")\n @skip_if_no_gpu\n def test_all_gather_cuda(self):\n group, group_id, rank = self._init_global_test()\n rank_to_GPU = self._init_multigpu_helper()\n self._test_all_gather_helper(group, group_id, rank, True, rank_to_GPU)\n\n @unittest.skipIf(BACKEND == \"nccl\", \"Nccl does not support CPU tensors\")\n def test_all_gather_complex(self):\n group, group_id, rank = self._init_global_test()\n self._test_all_gather_helper(group, group_id, rank, dtype=torch.cfloat)\n\n @unittest.skipIf(BACKEND != \"nccl\", \"Only Nccl supports CUDA all gather\")\n @unittest.skipIf(BACKEND == \"nccl\", \"CUDA all gather skipped for NCCL\")\n @skip_if_no_gpu\n def test_all_gather_cuda_complex(self):\n group, group_id, rank = self._init_global_test()\n rank_to_GPU = self._init_multigpu_helper()\n self._test_all_gather_helper(\n group, group_id, rank, True, rank_to_GPU, dtype=torch.cfloat\n )\n\n @skip_if_small_worldsize\n @unittest.skipIf(BACKEND == \"nccl\", \"Nccl does not support CPU tensors\")\n def test_all_gather_group(self):\n group, group_id, rank = self._init_group_test()\n self._test_all_gather_helper(group, group_id, rank)\n\n @unittest.skipIf(BACKEND == \"nccl\", \"Nccl does not support CPU tensors\")\n def test_all_gather_full_group(self):\n group, group_id, rank = self._init_full_group_test()\n self._test_all_gather_helper(group, group_id, rank)\n\n def _run_all_gather_coalesced_and_verify(\n self, output_tensor_lists, input_tensors, expected_tensors, group_id\n ):\n \"\"\"\n Helper that runs all_gather_coalesced and returns true if output\n matches expectations.\n \"\"\"\n tensor_shapes = []\n for input_tensor in input_tensors:\n if input_tensor.dtype == torch.complex64:\n tensor_shapes.append(torch.view_as_real(input_tensor).shape)\n else:\n tensor_shapes.append(input_tensor.shape)\n self.call_dist_op(\n \":all_gather\",\n False,\n dist.all_gather_coalesced,\n output_tensor_lists,\n input_tensors,\n group_id,\n tensor_shapes=tensor_shapes,\n )\n\n for l1, l2 in zip(output_tensor_lists, expected_tensors):\n for t1, t2 in zip(l1, l2):\n if not torch.equal(t1, t2):\n return False\n return True\n\n def _test_all_gather_coalesced_helper(\n self, group, group_id, rank, dtype=torch.float\n ):\n # TODO: Instead we should probably go through _rank_not_in_group\n # mechanism to disable sending tensors\n if group_id is not None:\n for test_case_id in range(2, 5):\n # Make sure we create tensors of incompatible sizes, e.g.\n # [1], [2x2], [3x3x3] ... to be sent in one batch\n input_tensors = [\n _build_multidim_tensor(\n tensor_id, tensor_id, rank + tensor_id, dtype=dtype\n )\n for tensor_id in range(1, test_case_id)\n ]\n output_tensor_lists = [\n [\n _build_multidim_tensor(\n tensor_id, tensor_id, -1, dtype=dtype\n )\n for tensor_id in range(1, test_case_id)\n ]\n for _ in group\n ]\n expected_tensors = [\n [\n _build_multidim_tensor(\n tensor_id, tensor_id, rank_iter + tensor_id, dtype=dtype\n )\n for tensor_id in range(1, test_case_id)\n ]\n for rank_iter in group\n ]\n assert self._run_all_gather_coalesced_and_verify(\n output_tensor_lists, input_tensors, expected_tensors, group_id\n ), \"output tensors do not match expected ouputs\"\n\n self._barrier()\n\n @unittest.skipIf(\n BACKEND == \"nccl\", \"all_gather_coalesced does not support NCCL\"\n )\n @unittest.skipIf(BACKEND == \"mpi\", \"all_gather_coalesced does not support MPI\")\n def test_all_gather_coalesced_simple(self):\n group, group_id, rank = self._init_global_test()\n self._test_all_gather_coalesced_helper(group, group_id, rank)\n\n @unittest.skipIf(\n BACKEND == \"nccl\", \"all_gather_coalesced does not support NCCL\"\n )\n @unittest.skipIf(BACKEND == \"mpi\", \"all_gather_coalesced does not support MPI\")\n def test_all_gather_coalesced_complex(self):\n group, group_id, rank = self._init_global_test()\n self._test_all_gather_coalesced_helper(\n group, group_id, rank, dtype=torch.cfloat\n )\n\n @skip_if_small_worldsize\n @unittest.skipIf(\n BACKEND == \"nccl\", \"all_gather_coalesced does not support NCCL\"\n )\n @unittest.skipIf(BACKEND == \"mpi\", \"all_gather_coalesced does not support MPI\")\n def test_all_gather_coalesced_group(self):\n group, group_id, rank = self._init_group_test()\n self._test_all_gather_coalesced_helper(group, group_id, rank)\n\n @unittest.skipIf(\n BACKEND == \"nccl\", \"all_gather_coalesced does not support NCCL\"\n )\n @unittest.skipIf(BACKEND == \"mpi\", \"all_gather_coalesced does not support MPI\")\n def test_all_gather_coalesced_full_group(self):\n group, group_id, rank = self._init_full_group_test()\n self._test_all_gather_coalesced_helper(group, group_id, rank)\n\n @unittest.skipIf(\n BACKEND == \"nccl\", \"all_gather_coalesced does not support NCCL\"\n )\n @unittest.skipIf(BACKEND == \"mpi\", \"all_gather_coalesced does not support MPI\")\n def test_all_gather_coalesced_with_empty(self):\n group, group_id, rank = self._init_global_test()\n input_tensors = [\n rank * torch.ones([2, 2]),\n torch.ones([0]),\n (rank + 1) * torch.ones([3, 3]),\n torch.ones([0]),\n torch.ones([0]),\n ]\n output_tensors_lists = [\n [\n -1 * torch.ones([2, 2]),\n -1 * torch.ones([0]),\n -1 * torch.ones([3, 3]),\n -1 * torch.ones([0]),\n -1 * torch.ones([0]),\n ]\n for _ in group\n ]\n expected_tensors = [\n [\n r * torch.ones([2, 2]),\n torch.ones([0]),\n (r + 1) * torch.ones([3, 3]),\n torch.ones([0]),\n torch.ones([0]),\n ]\n for r in group\n ]\n assert self._run_all_gather_coalesced_and_verify(\n output_tensors_lists, input_tensors, expected_tensors, group_id\n )\n self._barrier()\n\n # AllToAll\n def _test_all_to_all_single_equal_split_helper(\n self,\n group,\n group_id,\n rank,\n cuda=False,\n rank_to_GPU=None,\n dtype=torch.float\n ):\n if group_id is not None:\n size = len(group)\n in_tensor = torch.ones([size, size], dtype=dtype) * rank\n expected_tensor = torch.cat([torch.ones([1, size], dtype=dtype) * i for i in group])\n out_tensor = torch.ones([size, size], dtype=dtype) * -1\n if cuda:\n in_tensor = in_tensor.cuda(rank_to_GPU[rank][0])\n expected_tensor = expected_tensor.cuda(rank_to_GPU[rank][0])\n out_tensor = out_tensor.cuda(rank_to_GPU[rank][0])\n if dtype == torch.complex64:\n tensor_shapes = [torch.view_as_real(in_tensor).shape]\n else:\n tensor_shapes = [in_tensor.shape]\n self.call_dist_op(\n \":all_to_all\",\n False,\n dist.all_to_all_single,\n out_tensor,\n in_tensor,\n group=group_id,\n tensor_shapes=tensor_shapes,\n )\n self.assertEqual(out_tensor, expected_tensor)\n self._barrier()\n\n def _test_all_to_all_single_unequal_split_helper(\n self,\n group,\n group_id,\n rank,\n cuda=False,\n rank_to_GPU=None,\n dtype=torch.float\n ):\n if group_id is not None:\n size = len(group)\n in_splits = [i + 1 for i in group]\n out_splits = [rank + 1 for _ in group]\n in_tensor = torch.ones([sum(in_splits), size], dtype=dtype) * rank\n out_tensor = torch.ones([(rank + 1) * size, size], dtype=dtype)\n expected_tensor = torch.cat(\n [torch.ones([rank + 1, size], dtype=dtype) * i for i in group]\n )\n if cuda:\n in_tensor = in_tensor.cuda(rank_to_GPU[rank][0])\n expected_tensor = expected_tensor.cuda(rank_to_GPU[rank][0])\n out_tensor = out_tensor.cuda(rank_to_GPU[rank][0])\n dist.all_to_all_single(\n out_tensor, in_tensor, out_splits, in_splits, group=group_id\n )\n self.assertEqual(out_tensor, expected_tensor)\n self._barrier()\n\n def _test_all_to_all_helper(\n self,\n group,\n group_id,\n rank,\n cuda=False,\n rank_to_GPU=None,\n dtype=torch.float,\n ):\n if group_id is not None:\n size = len(group)\n in_splits = [i + 1 for i in group]\n in_tensors = [\n torch.ones([in_splits[i], size], dtype=dtype) * rank for i, _ in enumerate(group)\n ]\n out_tensors = [torch.ones([(rank + 1), size], dtype=dtype) for _ in group]\n expected_tensors = [torch.ones([rank + 1, size], dtype=dtype) * i for i in group]\n if cuda:\n in_tensors = [t.cuda(rank_to_GPU[rank][0]) for t in in_tensors]\n expected_tensors = [\n t.cuda(rank_to_GPU[rank][0]) for t in expected_tensors\n ]\n out_tensors = [t.cuda(rank_to_GPU[rank][0]) for t in out_tensors]\n dist.all_to_all(out_tensors, in_tensors, group=group_id)\n for t1, t2 in zip(out_tensors, expected_tensors):\n self.assertEqual(t1, t2)\n self._barrier()\n\n @unittest.skipIf(BACKEND != \"mpi\", \"Only MPI supports CPU all_to_all_single\")\n def test_all_to_all_single_equal_split(self):\n group, group_id, rank = self._init_global_test()\n self._test_all_to_all_single_equal_split_helper(group, group_id, rank)\n\n @unittest.skipIf(BACKEND != \"nccl\", \"Only Nccl supports CUDA all_to_all_single\")\n @skip_if_no_gpu\n def test_all_to_all_single_equal_split_cuda(self):\n group, group_id, rank = self._init_global_test()\n rank_to_GPU = self._init_multigpu_helper()\n self._test_all_to_all_single_equal_split_helper(\n group,\n group_id,\n rank,\n True,\n rank_to_GPU,\n )\n\n @unittest.skipIf(BACKEND != \"mpi\", \"Only MPI supports CPU all_to_all_single\")\n def test_all_to_all_single_equal_split_complex(self):\n group, group_id, rank = self._init_global_test()\n self._test_all_to_all_single_equal_split_helper(\n group, group_id, rank, dtype=torch.cfloat\n )\n\n @unittest.skipIf(BACKEND != \"nccl\", \"Only Nccl supports CUDA all_to_all_single\")\n @skip_if_no_gpu\n def test_all_to_all_single_equal_split_cuda_complex(self):\n group, group_id, rank = self._init_global_test()\n rank_to_GPU = self._init_multigpu_helper()\n self._test_all_to_all_single_equal_split_helper(\n group,\n group_id,\n rank,\n True,\n rank_to_GPU,\n dtype=torch.cfloat\n )\n\n @unittest.skipIf(BACKEND != \"mpi\", \"Only MPI supports CPU all_to_all_single\")\n def test_all_to_all_single_unequal_split(self):\n group, group_id, rank = self._init_global_test()\n self._test_all_to_all_single_unequal_split_helper(group, group_id, rank)\n\n @unittest.skipIf(BACKEND != \"nccl\", \"Only Nccl supports CUDA all_to_all_single\")\n @skip_if_no_gpu\n def test_all_to_all_single_unequal_split_cuda(self):\n group, group_id, rank = self._init_global_test()\n rank_to_GPU = self._init_multigpu_helper()\n self._test_all_to_all_single_unequal_split_helper(\n group,\n group_id,\n rank,\n True,\n rank_to_GPU,\n )\n\n @unittest.skipIf(BACKEND != \"mpi\", \"Only MPI supports CPU all_to_all_single\")\n def test_all_to_all_single_unequal_split_complex(self):\n group, group_id, rank = self._init_global_test()\n self._test_all_to_all_single_unequal_split_helper(\n group, group_id, rank, dtype=torch.cfloat\n )\n\n @unittest.skipIf(BACKEND != \"nccl\", \"Only Nccl supports CUDA all_to_all_single\")\n @skip_if_no_gpu\n def test_all_to_all_single_unequal_split_cuda_complex(self):\n group, group_id, rank = self._init_global_test()\n rank_to_GPU = self._init_multigpu_helper()\n self._test_all_to_all_single_unequal_split_helper(\n group,\n group_id,\n rank,\n True,\n rank_to_GPU,\n dtype=torch.cfloat,\n )\n\n @unittest.skipIf(BACKEND != \"mpi\", \"Only MPI supports all_to_all\")\n def test_all_to_all(self):\n group, group_id, rank = self._init_global_test()\n self._test_all_to_all_helper(group, group_id, rank)\n\n @unittest.skipIf(BACKEND != \"nccl\", \"Only NCCL supports CUDA all_to_all\")\n @skip_if_rocm\n def test_all_to_all_cuda(self):\n group, group_id, rank = self._init_global_test()\n rank_to_GPU = self._init_multigpu_helper()\n self._test_all_to_all_helper(group, group_id, rank, True, rank_to_GPU)\n\n @unittest.skipIf(BACKEND != \"mpi\", \"Only MPI supports all_to_all\")\n def test_all_to_all_complex(self):\n group, group_id, rank = self._init_global_test()\n self._test_all_to_all_helper(group, group_id, rank, dtype=torch.cfloat)\n\n @unittest.skipIf(BACKEND != \"nccl\", \"Only NCCL supports CUDA all_to_all\")\n @skip_if_rocm\n def test_all_to_all_cuda_complex(self):\n group, group_id, rank = self._init_global_test()\n rank_to_GPU = self._init_multigpu_helper()\n self._test_all_to_all_helper(\n group, group_id, rank, True, rank_to_GPU, dtype=torch.cfloat\n )\n\n @unittest.skipIf(BACKEND != \"mpi\", \"Only MPI supports CPU all_to_all_single\")\n @skip_if_small_worldsize\n def test_all_to_all_single_equal_split_group(self):\n group, group_id, rank = self._init_group_test()\n self._test_all_to_all_single_equal_split_helper(group, group_id, rank)\n\n @unittest.skipIf(BACKEND != \"nccl\", \"Only Nccl supports CUDA all_to_all_single\")\n @skip_if_no_gpu\n @skip_if_small_worldsize\n def test_all_to_all_single_equal_split_group_cuda(self):\n group, group_id, rank = self._init_group_test()\n rank_to_GPU = self._init_multigpu_helper()\n self._test_all_to_all_single_equal_split_helper(\n group,\n group_id,\n rank,\n True,\n rank_to_GPU,\n )\n\n @unittest.skipIf(BACKEND != \"mpi\", \"Only MPI supports CPU all_to_all_single\")\n @skip_if_small_worldsize\n def test_all_to_all_single_unequal_split_group(self):\n group, group_id, rank = self._init_group_test()\n self._test_all_to_all_single_unequal_split_helper(group, group_id, rank)\n\n @unittest.skipIf(BACKEND != \"nccl\", \"Only Nccl supports CUDA all_to_all_single\")\n @skip_if_no_gpu\n @skip_if_small_worldsize\n def test_all_to_all_single_unequal_split_group_cuda(self):\n group, group_id, rank = self._init_global_test()\n rank_to_GPU = self._init_multigpu_helper()\n self._test_all_to_all_single_unequal_split_helper(\n group,\n group_id,\n rank,\n True,\n rank_to_GPU,\n )\n\n @unittest.skipIf(BACKEND != \"mpi\", \"Only MPI supports all_to_all\")\n @skip_if_small_worldsize\n def test_all_to_all_group(self):\n group, group_id, rank = self._init_group_test()\n self._test_all_to_all_helper(group, group_id, rank)\n\n @unittest.skipIf(BACKEND != \"nccl\", \"Only Nccl supports CUDA all_to_all_single\")\n @skip_if_small_worldsize\n @skip_if_rocm\n def test_all_to_all_group_cuda(self):\n group, group_id, rank = self._init_group_test()\n rank_to_GPU = self._init_multigpu_helper()\n self._test_all_to_all_helper(group, group_id, rank, True, rank_to_GPU)\n\n @unittest.skipIf(BACKEND != \"mpi\", \"Only MPI supports CPU all_to_all_single\")\n def test_all_to_all_single_equal_split_full_group(self):\n group, group_id, rank = self._init_full_group_test()\n self._test_all_to_all_single_equal_split_helper(group, group_id, rank)\n\n @unittest.skipIf(BACKEND != \"nccl\", \"Only Nccl supports CUDA all_to_all_single\")\n @skip_if_no_gpu\n def test_all_to_all_single_equal_split_full_group_cuda(self):\n group, group_id, rank = self._init_full_group_test()\n rank_to_GPU = self._init_multigpu_helper()\n self._test_all_to_all_single_equal_split_helper(\n group,\n group_id,\n rank,\n True,\n rank_to_GPU,\n )\n\n @unittest.skipIf(BACKEND != \"mpi\", \"Only MPI supports CPU all_to_all_single\")\n def test_all_to_all_single_unequal_split_full_group(self):\n group, group_id, rank = self._init_full_group_test()\n self._test_all_to_all_single_unequal_split_helper(group, group_id, rank)\n\n @unittest.skipIf(BACKEND != \"nccl\", \"Only Nccl supports CUDA all_to_all_single\")\n @skip_if_no_gpu\n def test_all_to_all_single_unequal_split_full_group_cuda(self):\n group, group_id, rank = self._init_full_group_test()\n rank_to_GPU = self._init_multigpu_helper()\n self._test_all_to_all_single_unequal_split_helper(\n group,\n group_id,\n rank,\n True,\n rank_to_GPU,\n )\n\n @unittest.skipIf(BACKEND != \"mpi\", \"Only MPI supports all_to_all\")\n def test_all_to_all_full_group(self):\n group, group_id, rank = self._init_full_group_test()\n self._test_all_to_all_helper(group, group_id, rank)\n\n @unittest.skipIf(BACKEND != \"nccl\", \"Only NCCL supports CUDA all_to_all\")\n @skip_if_rocm\n def test_all_to_all_full_group_cuda(self):\n group, group_id, rank = self._init_full_group_test()\n rank_to_GPU = self._init_multigpu_helper()\n self._test_all_to_all_helper(group, group_id, rank, True, rank_to_GPU)\n\n # BARRIER\n def _test_barrier_helper(\n self, group, group_id, rank, cuda=False, rank_to_GPU=None\n ):\n WAIT_TIME = 0.3 # seconds\n\n for dest in group:\n expected_time = torch.DoubleTensor(1).fill_(0.0)\n if cuda:\n expected_time = expected_time.cuda(rank_to_GPU[rank][0])\n if dest == rank:\n expected_time.fill_(time.time() + WAIT_TIME)\n dist.broadcast(expected_time, dest, group_id)\n time.sleep(WAIT_TIME + 0.1) # sleep a little bit longer\n dist.barrier(group_id)\n else:\n dist.broadcast(expected_time, dest, group_id)\n dist.barrier(group_id)\n self.assertGreaterAlmostEqual(\n float(time.time()),\n float(expected_time[0]),\n \"destination rank: %d, my rank: %d\" % (dest, rank)\n + \" (if you see this failure, please report in #14554)\",\n )\n\n # Use higher timeout for the instance where the test runs\n # against a subgroup and uses a CUDA tensor for expected time.\n # The CUDA initialization for the participating processes can\n # take long enough for the barrier timeout to trigger on the\n # process that doesn't participate in the group.\n self._barrier(timeout=20)\n\n @skip_if_no_gpu\n @unittest.skipIf(BACKEND == \"mpi\", \"MPI doesn't supports GPU barrier\")\n def test_barrier_cuda(self):\n group, group_id, rank = self._init_global_test()\n rank_to_GPU = self._init_multigpu_helper()\n self._test_barrier_helper(group, group_id, rank, True, rank_to_GPU)\n\n @skip_if_small_worldsize\n @skip_if_no_gpu\n @unittest.skipIf(BACKEND == \"mpi\", \"MPI doesn't supports GPU barrier\")\n def test_barrier_group_cuda(self):\n group, group_id, rank = self._init_group_test()\n rank_to_GPU = self._init_multigpu_helper()\n self._test_barrier_helper(group, group_id, rank, True, rank_to_GPU)\n\n @skip_if_small_worldsize\n @skip_if_no_gpu\n @unittest.skipIf(BACKEND == \"mpi\", \"MPI doesn't supports GPU barrier\")\n def test_barrier_full_group_cuda(self):\n group, group_id, rank = self._init_full_group_test()\n rank_to_GPU = self._init_multigpu_helper()\n self._test_barrier_helper(group, group_id, rank, True, rank_to_GPU)\n\n @unittest.skipIf(BACKEND == \"nccl\", \"NCCL does not support CPU barrier\")\n def test_barrier(self):\n group, group_id, rank = self._init_global_test()\n self._test_barrier_helper(group, group_id, rank)\n\n @skip_if_small_worldsize\n @unittest.skipIf(BACKEND == \"nccl\", \"NCCL does not support CPU barrier\")\n def test_barrier_group(self):\n group, group_id, rank = self._init_group_test()\n self._test_barrier_helper(group, group_id, rank)\n\n @unittest.skipIf(BACKEND == \"nccl\", \"NCCL does not support CPU barrier\")\n def test_barrier_full_group(self):\n group, group_id, rank = self._init_full_group_test()\n self._test_barrier_helper(group, group_id, rank)\n\n def _test_broadcast_multigpu_helper(self, group, group_id, rank, rank_to_GPU):\n for src in group:\n expected_tensor = _build_tensor(src + 1)\n tensors = [\n _build_tensor(src + 1, -1).cuda(device=i) for i in rank_to_GPU[rank]\n ]\n if rank == src:\n tensors[0] = expected_tensor.cuda(device=rank_to_GPU[rank][0])\n\n dist.broadcast_multigpu(tensors, src, group_id)\n for tensor in tensors:\n self.assertEqual(tensor, expected_tensor)\n self._barrier()\n\n @unittest.skipIf(BACKEND == \"mpi\", \"MPI doesn't support broadcast multigpu\")\n @unittest.skipIf(BACKEND == \"nccl\", \"NCCL broadcast multigpu skipped\")\n @skip_if_no_gpu\n def test_broadcast_multigpu(self):\n group, group_id, rank = self._init_global_test()\n rank_to_GPU = self._init_multigpu_helper()\n self._test_broadcast_multigpu_helper(group, group_id, rank, rank_to_GPU)\n\n def _test_all_reduce_multigpu_helper(\n self,\n group,\n group_id,\n rank,\n rank_to_GPU,\n op,\n master_value,\n worker_value,\n expected_value,\n dtype=torch.float,\n ):\n for src in group:\n curr_value = master_value if rank == src else worker_value\n tensors = [\n _build_tensor(src + 1, curr_value, dtype=dtype).cuda(device=i)\n for i in rank_to_GPU[rank]\n ]\n self.call_dist_op(\n \":all_reduce\",\n False,\n dist.all_reduce_multigpu,\n tensors,\n op,\n group_id,\n )\n expected_tensor = _build_tensor(src + 1, expected_value, dtype=dtype)\n for tensor in tensors:\n self.assertEqual(tensor, expected_tensor)\n\n self._barrier()\n\n @unittest.skipIf(BACKEND == \"mpi\", \"MPI doesn't support broadcast multigpu\")\n @unittest.skipIf(BACKEND == \"nccl\", \"CUDA all_reduce multigpu skipped for NCCL\")\n @skip_if_no_gpu\n def test_all_reduce_multigpu(self):\n group, group_id, rank = self._init_global_test()\n rank_to_GPU = self._init_multigpu_helper()\n self._test_all_reduce_multigpu_helper(\n group,\n group_id,\n rank,\n rank_to_GPU,\n dist.ReduceOp.SUM,\n 2,\n 10,\n (2 + 10 * (len(group) - 1)) * len(rank_to_GPU[0]),\n )\n\n @unittest.skipIf(BACKEND == \"mpi\", \"MPI doesn't support broadcast multigpu\")\n @unittest.skipIf(BACKEND == \"nccl\", \"CUDA all_reduce multigpu skipped for NCCL\")\n @skip_if_no_gpu\n def test_all_reduce_multigpu_complex(self):\n group, group_id, rank = self._init_global_test()\n rank_to_GPU = self._init_multigpu_helper()\n self._test_all_reduce_multigpu_helper(\n group,\n group_id,\n rank,\n rank_to_GPU,\n dist.ReduceOp.SUM,\n complex(2, 3),\n complex(10, 11),\n (complex(2, 3) + complex(10, 11) * (len(group) - 1))\n * len(rank_to_GPU[0]),\n dtype=torch.cfloat,\n )\n\n def _test_reduce_multigpu_helper(\n self,\n group,\n group_id,\n rank,\n rank_to_GPU,\n op,\n master_value,\n worker_value,\n expected_value,\n ):\n for src in group:\n tensor_value = master_value if rank == src else worker_value\n tensors = [\n _build_tensor(src + 1, tensor_value).cuda(device=i)\n for i in rank_to_GPU[rank]\n ]\n self.call_dist_op(\n \"reduce\",\n False,\n dist.reduce_multigpu,\n tensors,\n src,\n op,\n group_id,\n expect_event=len(tensors) == 1,\n tensor_shapes=[tensors[0].shape],\n )\n if rank == src:\n expected_tensor = _build_tensor(src + 1, expected_value)\n self.assertEqual(tensors[0], expected_tensor)\n\n self._barrier()\n\n @unittest.skipIf(\n BACKEND != \"nccl\", \"Only Nccl backend supports reduce multigpu\"\n )\n @skip_if_no_gpu\n def test_reduce_multigpu(self):\n group, group_id, rank = self._init_global_test()\n rank_to_GPU = self._init_multigpu_helper()\n device_id = rank_to_GPU[rank][0]\n torch.cuda.set_device(device_id)\n self._test_reduce_multigpu_helper(\n group,\n group_id,\n rank,\n rank_to_GPU,\n dist.ReduceOp.SUM,\n 2,\n 10,\n (2 + 10 * (len(group) - 1)) * len(rank_to_GPU[0]),\n )\n\n def _test_all_gather_multigpu_helper(\n self, group, group_id, rank, rank_to_GPU, dtype=torch.float\n ):\n for dest in group:\n tensors = [\n _build_tensor(dest + 1, dtype=dtype).cuda(device=i)\n for i in rank_to_GPU[rank]\n ]\n\n # construct expected output along with\n # a place holder to receive all gather results\n output_tensors = []\n expected_output = []\n output_per_gpu = (\n [_build_tensor(dest + 1, -1, dtype=dtype)]\n * len(rank_to_GPU[0])\n * len(group)\n )\n expected_per_gpu = (\n [_build_tensor(dest + 1, dtype=dtype)]\n * len(rank_to_GPU[0])\n * len(group)\n )\n for gpu in rank_to_GPU[rank]:\n output_tensors.append([t.cuda(device=gpu) for t in output_per_gpu])\n expected_output.append(\n [t.cuda(device=gpu) for t in expected_per_gpu]\n )\n self.call_dist_op(\n \"all_gather\",\n False,\n dist.all_gather_multigpu,\n output_tensors,\n tensors,\n group_id,\n expect_event=len(expected_output) == 1,\n )\n self.assertEqual(output_tensors, expected_output)\n\n self._barrier()\n\n @unittest.skipIf(\n BACKEND != \"nccl\", \"Only Nccl backend supports allgather multigpu\"\n )\n @skip_if_no_gpu\n def test_all_gather_multigpu(self):\n group, group_id, rank = self._init_global_test()\n rank_to_GPU = self._init_multigpu_helper()\n device_id = rank_to_GPU[rank][0]\n torch.cuda.set_device(device_id)\n self._test_all_gather_multigpu_helper(group, group_id, rank, rank_to_GPU)\n\n @unittest.skipIf(\n BACKEND != \"nccl\", \"Only Nccl backend supports allgather multigpu\"\n )\n @skip_if_no_gpu\n def test_all_gather_multigpu_complex(self):\n group, group_id, rank = self._init_global_test()\n rank_to_GPU = self._init_multigpu_helper()\n device_id = rank_to_GPU[rank][0]\n torch.cuda.set_device(device_id)\n self._test_all_gather_multigpu_helper(\n group, group_id, rank, rank_to_GPU, dtype=torch.cfloat\n )\n\n def _model_step(self, model):\n for param in model.parameters():\n if param.grad is not None:\n with torch.no_grad():\n param += param.grad\n param.grad = None\n\n def _model_step_with_zero_grad(self, model):\n for param in model.parameters():\n if param.grad is not None:\n with torch.no_grad():\n param += param.grad\n param.grad.requires_grad_(False)\n param.grad.zero_()\n\n def _prepare_dummy_data(self, local_bs):\n # global_bs for DDP should be divisible by WORLD_SIZE\n world_size = int(os.environ[\"WORLD_SIZE\"])\n global_bs = world_size * local_bs\n input_cpu = torch.randn(global_bs, 2)\n target = torch.randn(global_bs, 4)\n loss = nn.MSELoss()\n return global_bs, input_cpu, target, loss\n\n # END TO END TEST FOR DISTRIBUTEDDATAPARALLEL\n def _test_DDP_helper(\n self, model, input_var, target, loss, scale_factor=1.0, memory_format=None\n ):\n model.train()\n output = model(input_var)\n l = loss(output, target) * scale_factor\n l.backward()\n if memory_format is not None:\n self.assertTrue(output.is_contiguous(memory_format=memory_format))\n\n def _assert_equal_param(self, param_gpu, param_DDP):\n self.assertEqual(len(param_gpu), len(param_DDP))\n for p_gpu, p_DDP in zip(param_gpu, param_DDP):\n self.assertEqual(p_gpu, p_DDP)\n\n def _test_DDP_niter(\n self,\n model_base,\n model_DDP,\n input,\n target,\n loss,\n local_bs,\n rank,\n batch_size,\n test_save,\n offset=None,\n world_size=0,\n zero_grad=False,\n memory_format=None,\n n_iter=5,\n ):\n for idx in range(n_iter):\n # single cpu/gpu training\n self._test_DDP_helper(\n model_base, input, target, loss, memory_format=memory_format\n )\n\n if offset is None:\n offset = rank * local_bs\n\n # DDP training, DDP scatters subsets of input_cpu to nodes/GPUs\n self._test_DDP_helper(\n model_DDP,\n input[offset : offset + local_bs],\n target[offset : offset + local_bs],\n loss,\n world_size * local_bs / batch_size if world_size != 0 else 1,\n memory_format=memory_format,\n )\n\n # Update weights and run a second iteration to shake out errors\n if zero_grad:\n self._model_step_with_zero_grad(model_base)\n self._model_step_with_zero_grad(model_DDP)\n else:\n self._model_step(model_base)\n self._model_step(model_DDP)\n self._assert_equal_param(\n list(model_base.parameters()), list(model_DDP.module.parameters())\n )\n\n # Shuffle the input so that DDP input is different\n input = input[torch.randperm(batch_size)]\n\n # save the model in the middle and reload\n if test_save and idx == 2 and INIT_METHOD.startswith(\"file://\"):\n with tempfile.NamedTemporaryFile() as tmp:\n if sys.platform == \"win32\":\n torch.save(model_DDP, tmp)\n tmp.seek(0)\n model_DDP = torch.load(tmp)\n else:\n torch.save(model_DDP, tmp.name)\n model_DDP = torch.load(tmp.name)\n\n with tempfile.TemporaryFile() as tmp_file:\n torch.save(model_DDP, tmp_file)\n tmp_file.seek(0)\n saved_model = torch.load(tmp_file)\n for k in model_DDP.state_dict():\n self.assertEqual(model_DDP.state_dict()[k], saved_model.state_dict()[k])\n\n def _test_DistributedDataParallel(\n self,\n gpu_subset,\n rank,\n output_device=None,\n gradient_as_bucket_view=False,\n static_graph=False,\n ):\n # Run a simple end to end DDP model, use result of single node model\n # as baseline\n\n # cpu training setup\n model = DDP_NET\n\n # single gpu training setup\n model_gpu = copy.deepcopy(model)\n model_gpu.cuda(gpu_subset[0])\n\n # DDP training setup\n model_DDP = copy.deepcopy(model)\n model_DDP.cuda(gpu_subset[0])\n model_DDP = nn.parallel.DistributedDataParallel(\n model_DDP,\n device_ids=gpu_subset,\n gradient_as_bucket_view=gradient_as_bucket_view,\n )\n if static_graph:\n model_DDP._set_static_graph()\n\n # test serializable/unserializable\n with tempfile.NamedTemporaryFile() as tmp:\n if sys.platform == \"win32\":\n torch.save(model_DDP, tmp)\n tmp.seek(0)\n model_DDP = torch.load(tmp)\n else:\n torch.save(model_DDP, tmp.name)\n model_DDP = torch.load(tmp.name)\n\n # dummy data initialization\n local_bs = len(gpu_subset)\n global_bs, input_cpu, target, loss = self._prepare_dummy_data(local_bs)\n\n # check two model parameters over 5 iterations\n self._test_DDP_niter(\n model_gpu,\n model_DDP,\n input_cpu.cuda(gpu_subset[0]),\n target.cuda(gpu_subset[0]),\n loss,\n local_bs,\n rank,\n global_bs,\n True,\n )\n self._barrier()\n\n def _test_DistributedDataParallelCPU(self, gradient_as_bucket_view=False):\n # Run a simple end to end DDP-CPU model, use result of single node\n # model as baseline\n group, group_id, rank = self._init_global_test()\n\n # cpu training setup\n model_base = DDP_NET\n\n # DDP-CPU training setup\n model_DDP = copy.deepcopy(model_base)\n model_DDP = nn.parallel.DistributedDataParallel(\n model_DDP, gradient_as_bucket_view=gradient_as_bucket_view\n )\n\n # dummy data initialization\n local_bs = 2\n global_bs, input_cpu, target, loss = self._prepare_dummy_data(local_bs)\n\n # check two model parameters over 5 iterations\n self._test_DDP_niter(\n model_base,\n model_DDP,\n input_cpu,\n target,\n loss,\n local_bs,\n rank,\n global_bs,\n False,\n zero_grad=True,\n )\n self._barrier()\n\n return model_DDP\n\n @unittest.skipIf(BACKEND == \"nccl\", \"nccl does not support DDP on CPU models\")\n def test_DistributedDataParallelCPU(self):\n self._test_DistributedDataParallelCPU()\n\n @unittest.skipIf(BACKEND == \"nccl\", \"nccl does not support DDP on CPU models\")\n def test_DistributedDataParallelCPU_grad_is_view(self):\n self._test_DistributedDataParallelCPU(gradient_as_bucket_view=True)\n\n @unittest.skipIf(\n BACKEND != \"nccl\" and BACKEND != \"gloo\",\n \"Only Nccl & Gloo backend support DistributedDataParallel\",\n )\n def test_DistributedDataParallel_requires_grad(self):\n # a module without gradients shouldn't be accepted\n self.assertRaises(\n RuntimeError, lambda: nn.parallel.DistributedDataParallel(nn.Module())\n )\n self._barrier()\n\n @unittest.skipIf(\n BACKEND != \"nccl\" and BACKEND != \"gloo\",\n \"Only NCCL and GLOO backend support DistributedDataParallel\",\n )\n @skip_if_lt_x_gpu(int(os.environ[\"WORLD_SIZE\"]))\n @skip_if_rocm\n def test_DistributedDataParallel_non_default_stream(self):\n stream = torch.cuda.Stream(self.rank)\n rank = self.rank\n with torch.cuda.stream(stream):\n net = torch.nn.parallel.DistributedDataParallel(\n torch.nn.Linear(1, 1, bias=False).cuda(rank), device_ids=[rank]\n )\n for i in range(1000):\n # Clear gradients manually\n grad = net.module.weight.grad\n if grad is not None:\n grad.requires_grad_(False)\n grad.zero_()\n # Forward + BW\n batch = torch.tensor([rank]).float().cuda(rank)\n loss = net(batch).sum()\n loss.backward()\n # For each worker, the gradient on the weight should be worker_rank.\n grad = net.module.weight.grad\n avg = grad.clone()\n # All-reducing the gradient averages should give us the gradient\n # average. If not, then one of the workers has not correctly\n # written back the averaged gradient before this all-reduce call.\n dist.all_reduce(avg)\n world_size = int(os.environ[\"WORLD_SIZE\"])\n avg.div_(world_size)\n expected_grad = sum(i for i in range(world_size)) / world_size\n self.assertEqual(\n avg[0, 0],\n expected_grad,\n msg=f\"Expected gradient of {expected_grad} but got {avg} on rank {self.rank}\",\n )\n\n @unittest.skipIf(\n BACKEND != \"nccl\" and BACKEND != \"gloo\",\n \"MPI backend does not support DDP communication hook on CUDA devices\",\n )\n @skip_if_lt_x_gpu(int(os.environ[\"WORLD_SIZE\"]))\n @skip_if_rocm\n def test_ddp_comm_hook_logging(self):\n hooks = [\n default.allreduce_hook,\n default.fp16_compress_hook,\n powerSGD.powerSGD_hook,\n powerSGD.batched_powerSGD_hook,\n quantization_hooks.quantization_pertensor_hook,\n quantization_hooks.quantization_perchannel_hook,\n ]\n\n cpp_builtin_hooks = [\n dist.BuiltinCommHookType.ALLREDUCE,\n dist.BuiltinCommHookType.FP16_COMPRESS,\n ]\n\n for hook in hooks:\n ddp_model = torch.nn.parallel.DistributedDataParallel(\n torch.nn.Linear(1, 1, bias=False).cuda(self.rank),\n device_ids=[self.rank],\n )\n ddp_logging_data = ddp_model._get_ddp_logging_data()\n # Hook not registered yet, so should be empty\n self.assertEqual(ddp_logging_data.get(\"comm_hook\"), None)\n ddp_model.register_comm_hook(None, hook)\n ddp_logging_data = ddp_model._get_ddp_logging_data()\n self.assertEqual(ddp_logging_data.get(\"comm_hook\"), hook.__qualname__)\n\n for hook in cpp_builtin_hooks:\n ddp_model = torch.nn.parallel.DistributedDataParallel(\n torch.nn.Linear(1, 1, bias=False).cuda(self.rank),\n device_ids=[self.rank],\n )\n ddp_logging_data = ddp_model._get_ddp_logging_data()\n # Hook not registered yet, so should be empty\n self.assertEqual(ddp_logging_data.get(\"comm_hook\"), None)\n ddp_model._register_builtin_comm_hook(hook)\n ddp_logging_data = ddp_model._get_ddp_logging_data()\n self.assertEqual(ddp_logging_data.get(\"comm_hook\"), str(hook))\n\n # No hook registered\n ddp_model = torch.nn.parallel.DistributedDataParallel(\n torch.nn.Linear(1, 1, bias=False).cuda(self.rank),\n device_ids=[self.rank],\n )\n ddp_logging_data = ddp_model._get_ddp_logging_data()\n # Hook not registered yet, so should be empty\n self.assertEqual(ddp_logging_data.get(\"comm_hook\"), None)\n # After second forward pass, hook should still be empty string\n for i in range(2):\n inp = torch.ones(1, 1, device=self.rank)\n loss = ddp_model(inp).sum()\n loss.backward()\n\n ddp_logging_data = ddp_model._get_ddp_logging_data()\n # Note: DETAIL debug mode logs DDP logging data to stdout and\n # thus accesses std::map, which fills in a default value for the\n # type if it didn't exist.\n self.assertEqual(ddp_logging_data.get(\"comm_hook\", \"\"), \"\")\n\n def _test_ddp_hook_with_optimizer_parity(self, grad_as_bucket_view, static_graph):\n rank = self.rank\n torch.cuda.set_device(rank)\n torch.manual_seed(rank)\n torch.cuda.manual_seed(rank)\n models_to_test = [\n (LargeNet(), torch.randn(1, 1000).cuda()),\n ]\n if HAS_TORCHVISION:\n models_to_test.append(\n (torchvision.models.resnet50(), torch.randn(1, 3, 3, 1000).cuda())\n )\n # Enable determinism in cudnn operators\n for (model, inp) in models_to_test:\n with torch.backends.cudnn.flags(\n enabled=True, deterministic=True, benchmark=False\n ):\n sgd_lr = 1e-2\n ddp_model_with_optimizer_hook = torch.nn.parallel.DistributedDataParallel(\n copy.deepcopy(model).cuda(),\n device_ids=[self.rank],\n gradient_as_bucket_view=grad_as_bucket_view\n )\n if static_graph:\n ddp_model_with_optimizer_hook._set_static_graph()\n\n # Register hook that runs allreduce + functional SGD step.\n allreduce_hook = default.allreduce_hook\n opt_hook_state = default.OptimizerHookState(\n _FunctionalSGD,\n sgd_lr,\n )\n ddp_model_with_optimizer_hook.register_comm_hook(\n None,\n default.hook_then_optimizer(allreduce_hook, opt_hook_state),\n )\n # Create DDP model with no hook that does optimizer after\n # backward.\n ddp_model_with_no_hook = torch.nn.parallel.DistributedDataParallel(\n copy.deepcopy(model).cuda(),\n device_ids=[self.rank],\n gradient_as_bucket_view=grad_as_bucket_view\n )\n if static_graph:\n ddp_model_with_no_hook._set_static_graph()\n\n sgd_no_hook = torch.optim.SGD(\n ddp_model_with_no_hook.parameters(),\n lr=sgd_lr\n )\n\n # Verify parameters are equal initially.\n for hook_param, allreduce_param in zip(\n ddp_model_with_optimizer_hook.parameters(),\n ddp_model_with_no_hook.parameters(),\n ):\n self.assertEqual(hook_param, allreduce_param)\n\n # Save old parameters to later verify optimizer modified them.\n opt_hook_init_params = copy.deepcopy(\n list(ddp_model_with_optimizer_hook.parameters())\n )\n\n # Run optimizer with hook model.\n for i in range(6):\n ddp_model_with_optimizer_hook.zero_grad()\n out = ddp_model_with_optimizer_hook(inp)\n loss = out.sum()\n loss.backward()\n\n dist.barrier()\n\n # Run regular model.\n for i in range(6):\n ddp_model_with_no_hook.zero_grad()\n out = ddp_model_with_no_hook(inp)\n loss = out.sum()\n loss.backward()\n sgd_no_hook.step()\n\n dist.barrier()\n\n # Now verify parameters are equal.\n for hook_param, allreduce_param in zip(\n ddp_model_with_optimizer_hook.parameters(),\n ddp_model_with_no_hook.parameters(),\n ):\n self.assertEqual(hook_param, allreduce_param)\n\n # Verify optimizer modified parameters, otherwise they would be\n # trivially equal above.\n self.assertNotEqual(\n opt_hook_init_params,\n list(ddp_model_with_optimizer_hook.parameters())\n )\n dist.barrier()\n\n @unittest.skipIf(\n BACKEND != \"nccl\" and BACKEND != \"gloo\",\n \"Only Nccl & Gloo backend support DistributedDataParallel\",\n )\n @unittest.skipIf(\n IS_WINDOWS,\n \"FunctionalSGD not yet supported with Windows.\"\n )\n @skip_if_lt_x_gpu(2)\n @skip_if_rocm\n def test_ddp_hook_with_optimizer_parity(self):\n for grad_as_bucket_view, static_graph in itertools.product(\n [True, False],\n [True, False]\n ):\n self._test_ddp_hook_with_optimizer_parity(\n grad_as_bucket_view=grad_as_bucket_view,\n static_graph=static_graph\n )\n\n def _test_ddp_hook_parity(self, state, hook):\n rank = self.rank\n m = torch.nn.Linear(1, 5)\n try:\n process_group = state.process_group\n except AttributeError:\n process_group = state\n\n net_with_hook = torch.nn.parallel.DistributedDataParallel(\n copy.deepcopy(m).to(rank),\n device_ids=[rank],\n process_group=process_group,\n )\n net_with_hook.register_comm_hook(state=state, hook=hook)\n net_without_hook = torch.nn.parallel.DistributedDataParallel(\n copy.deepcopy(m).to(rank),\n device_ids=[rank],\n process_group=process_group,\n )\n for i in range(100):\n # Clear gradients manually.\n for g in [\n net_without_hook.module.weight.grad,\n net_with_hook.module.weight.grad,\n ]:\n if g is not None:\n g.requires_grad_(False)\n g.zero_()\n # Forward + BW\n batch = torch.tensor([rank]).float().cuda(rank)\n loss = net_without_hook(batch).sum()\n loss.backward()\n # For each worker, the gradient on the weight should be worker_rank.\n grad = net_without_hook.module.weight.grad\n avg = grad.clone()\n expected_grad = (\n sum(i for i in range(dist.get_world_size())) / dist.get_world_size()\n )\n loss_hook = net_with_hook(batch).sum()\n loss_hook.backward()\n grad_hook = net_with_hook.module.weight.grad\n avg_hook = grad_hook.clone()\n # Verify hook grad with expected.\n # Cannot use exact match here due to a very small accuracy loss,\n # e.g. 1e-05, for powerSGD hook case.\n assert_func = (\n self.assertEqual\n if hook == default.allreduce_hook\n else torch.testing.assert_allclose\n )\n assert_func(\n avg_hook[0, 0],\n expected_grad,\n msg=f\"Expected hook grad of {expected_grad} but got {avg_hook[0, 0]}\",\n )\n # Verify hook grad with vanilla allreduce\n assert_func(\n avg_hook[0, 0],\n avg[0, 0],\n msg=f\"Expected hook grad to be close to allreduce {avg[0, 0]}, but got {avg_hook[0, 0]}\",\n )\n\n @unittest.skipIf(\n BACKEND != \"nccl\" and BACKEND != \"gloo\",\n \"MPI backend does not support DDP communication hook on CUDA devices\",\n )\n @skip_if_lt_x_gpu(int(os.environ[\"WORLD_SIZE\"]))\n @skip_if_rocm\n def test_ddp_hook_parity_allreduce(self):\n self._test_ddp_hook_parity(state=None, hook=default.allreduce_hook)\n\n @unittest.skipIf(\n BACKEND != \"nccl\" and BACKEND != \"gloo\",\n \"MPI backend does not support DDP communication hook on CUDA devices\",\n )\n @skip_if_lt_x_gpu(int(os.environ[\"WORLD_SIZE\"]))\n @skip_if_rocm\n def test_ddp_hook_parity_allreduce_process_group(self):\n # process_group is passed in to both DDP and comm. hook\n rank_to_GPU = self._init_multigpu_helper()\n gpus = [rank_to_GPU[int(r)][0] for r in range(dist.get_world_size())]\n process_group = torch.distributed.new_group(gpus)\n self._test_ddp_hook_parity(state=process_group, hook=default.allreduce_hook)\n\n @unittest.skipIf(\n BACKEND != \"nccl\" and BACKEND != \"gloo\",\n \"MPI backend does not support DDP communication hook on CUDA devices\",\n )\n @skip_if_lt_x_gpu(int(os.environ[\"WORLD_SIZE\"]))\n @skip_if_rocm\n def test_ddp_hook_parity_powerSGD(self):\n for warm_start in [True, False]:\n powersgd_state = powerSGD.PowerSGDState(\n process_group=None,\n matrix_approximation_rank=1,\n start_powerSGD_iter=2,\n warm_start=warm_start,\n )\n self._test_ddp_hook_parity(\n state=powersgd_state, hook=powerSGD.powerSGD_hook\n )\n\n @unittest.skipIf(\n BACKEND != \"nccl\" and BACKEND != \"gloo\",\n \"MPI backend does not support DDP communication hook on CUDA devices\",\n )\n @unittest.skipIf(NO_MULTIPROCESSING_SPAWN, \"Disabled for environments that \\\n don't support multiprocessing with spawn start method\")\n @skip_if_lt_x_gpu(int(os.environ[\"WORLD_SIZE\"]))\n @skip_if_rocm\n def test_ddp_hook_parity_post_localSGD(self):\n # Although we start run local SGD at iteration 10, since we still use the global process group to run it,\n # the post-LocalSGD actually still allreduces gradients globally for the remaining iterations.\n state = post_localSGD.PostLocalSGDState(process_group=None, subgroup=dist.group.WORLD, start_localSGD_iter=10)\n self._test_ddp_hook_parity(state=state, hook=post_localSGD.post_localSGD_hook)\n\n # Since we start local SGD later than the total number of 100 iterations,\n # no local SGD actually is executed, and we don't even need to provide a subgroup for this case.\n state = post_localSGD.PostLocalSGDState(process_group=None, subgroup=None, start_localSGD_iter=1000)\n self._test_ddp_hook_parity(state=state, hook=post_localSGD.post_localSGD_hook)\n\n def _prepare_single_device_module(\n self,\n rank,\n process_group,\n devices,\n device_ids,\n global_batch_size,\n gradient_as_bucket_view=False,\n ):\n model = Net()\n device = devices[0] if devices else torch.device(\"cuda:%d\" % rank)\n ddp_model = DistributedDataParallel(\n copy.deepcopy(model).to(device),\n device_ids=device_ids,\n process_group=process_group,\n bucket_cap_mb=0.001,\n gradient_as_bucket_view=gradient_as_bucket_view,\n )\n\n model.to(device)\n\n input = torch.randn(global_batch_size, 2).to(device)\n target = torch.randn(global_batch_size, 4).to(device)\n\n return model, ddp_model, input, target\n\n def _prepare_cpu_module(\n self,\n process_group,\n global_batch_size,\n gradient_as_bucket_view=False,\n ):\n model = Net()\n ddp_model = DistributedDataParallel(\n copy.deepcopy(model),\n process_group=process_group,\n bucket_cap_mb=0.001,\n gradient_as_bucket_view=gradient_as_bucket_view,\n )\n input = torch.randn(global_batch_size, 2)\n target = torch.randn(global_batch_size, 4)\n return model, ddp_model, input, target\n\n def _test_accumulate_gradients_no_sync(\n self, num_iters=2, ddp_comm_hook=None, gradient_as_bucket_view=False\n ):\n \"\"\"\n This is the recommended way to implement accumulate grads.\n If ``ddp_comm_hook`` input was specified, it will also register that hook\n to the ``ddp_model``. The hook fed into this function should not change\n the resulting gradients.\n \"\"\"\n group, group_id, rank = self._init_global_test()\n world_size = get_world_size()\n\n # FIXME: Add testing for gloo/CUDA\n if BACKEND == \"mpi\" or BACKEND == \"gloo\":\n global_batch_size = world_size\n local_batch_size = 1\n model, ddp_model, input, target = self._prepare_cpu_module(\n group_id, global_batch_size, gradient_as_bucket_view\n )\n\n if BACKEND == \"nccl\":\n rank_to_GPU = self._init_multigpu_helper()\n int_devices = rank_to_GPU[rank][:1]\n devices = [torch.device(\"cuda:\" + str(i)) for i in int_devices]\n global_batch_size = world_size\n local_batch_size = len(devices)\n model, ddp_model, input, target = self._prepare_single_device_module(\n rank,\n group_id,\n devices,\n devices,\n global_batch_size,\n gradient_as_bucket_view,\n )\n\n if ddp_comm_hook is not None:\n ddp_model.register_comm_hook(group_id, ddp_comm_hook)\n\n def step_model(model, input, target):\n model.train()\n output = model(input)\n loss = F.mse_loss(output, target.to(output.device))\n loss.backward()\n\n # ensure accumulate grads works with no_grad => no grads are accumulated.\n with torch.no_grad():\n with ddp_model.no_sync():\n ddp_model.train()\n ddp_model(input)\n\n # check two model parameters over num_iters iterations\n for iteration in range(num_iters):\n step_model(model, input, target)\n\n ddp_input = input[\n rank * local_batch_size : (rank + 1) * local_batch_size\n ]\n ddp_target = target[\n rank * local_batch_size : (rank + 1) * local_batch_size\n ]\n\n if iteration % num_iters == 0:\n # accumulate grads locally\n with ddp_model.no_sync():\n step_model(ddp_model, ddp_input, ddp_target)\n else:\n # sync grads\n step_model(ddp_model, ddp_input, ddp_target)\n\n for i, j in zip(model.parameters(), ddp_model.parameters()):\n if not i.requires_grad:\n continue\n if iteration % num_iters == 0:\n self.assertNotEqual(i.grad, j.grad)\n else:\n self.assertEqual(i.grad, j.grad)\n\n # Shuffle the input so that DDP input is different\n torch.manual_seed(1337 + iteration)\n input = input[torch.randperm(global_batch_size)]\n\n @unittest.skipIf(\n BACKEND != \"mpi\" and BACKEND != \"nccl\" and BACKEND != \"gloo\",\n \"get_future is only supported on mpi, nccl and gloo\",\n )\n @nccl_skip_if_lt_x_gpu(BACKEND, 2)\n def test_accumulate_gradients_no_sync(self):\n \"\"\"\n Runs _test_accumulate_gradients_no_sync using default inputs\n \"\"\"\n self._test_accumulate_gradients_no_sync()\n\n @unittest.skipIf(\n BACKEND != \"mpi\" and BACKEND != \"nccl\" and BACKEND != \"gloo\",\n \"get_future is only supported on mpi, nccl and gloo\",\n )\n @nccl_skip_if_lt_x_gpu(BACKEND, 2)\n def test_accumulate_gradients_no_sync_grad_is_view(self):\n \"\"\"\n Runs _test_accumulate_gradients_no_sync using default inputs\n \"\"\"\n self._test_accumulate_gradients_no_sync(gradient_as_bucket_view=True)\n\n @unittest.skipIf(\n BACKEND != \"mpi\" and BACKEND != \"nccl\" and BACKEND != \"gloo\",\n \"get_future is only supported on mpi, nccl and gloo\",\n )\n @nccl_skip_if_lt_x_gpu(BACKEND, 2)\n def test_accumulate_gradients_no_sync_allreduce_hook(self):\n \"\"\"\n Runs multiple iterations on _test_accumulate_gradients_no_sync\n using allreduce hook and validates whether future result was properly\n passed as gradients in reducer.\n \"\"\"\n\n world_size = get_world_size()\n\n def allreduce_hook(\n group_id: object, bucket: dist.GradBucket\n ) -> torch._C.Future:\n tensors = [bucket.get_tensor() / world_size]\n return group_id.allreduce(tensors).get_future()\n\n self._test_accumulate_gradients_no_sync(\n num_iters=4, ddp_comm_hook=allreduce_hook\n )\n\n @unittest.skipIf(\n BACKEND != \"mpi\" and BACKEND != \"nccl\" and BACKEND != \"gloo\",\n \"get_future is only supported on mpi, nccl and gloo\",\n )\n @nccl_skip_if_lt_x_gpu(BACKEND, 2)\n def test_accumulate_gradients_no_sync_allreduce_with_then_hook(self):\n \"\"\"\n Runs multiple iterations on _test_accumulate_gradients_no_sync using allreduce\n hook that also uses then callbacks. In first then callback result is multiplied\n by 2, and the second callback divides the result by 2 * world_size. It validates\n whether final result was properly passed as gradients in reducer.\n \"\"\"\n\n world_size = get_world_size()\n\n def allreduce_with_then_hook(\n group_id: object, bucket: dist.GradBucket\n ) -> torch.futures.Future:\n fut = group_id.allreduce([bucket.get_tensor()]).get_future()\n\n def mult(fut):\n # Multiply the result by 2.\n return 2 * fut.wait()[0]\n\n def div(fut):\n # Divide the result by 2 * world_size.\n return fut.wait() / (2 * world_size)\n\n return fut.then(mult).then(div)\n\n self._test_accumulate_gradients_no_sync(\n num_iters=4, ddp_comm_hook=allreduce_with_then_hook\n )\n\n @unittest.skipIf(\n BACKEND != \"mpi\" and BACKEND != \"nccl\" and BACKEND != \"gloo\",\n \"get_future is only supported on mpi, nccl and gloo\",\n )\n @nccl_skip_if_lt_x_gpu(BACKEND, 2)\n def test_get_future(self):\n def mult(fut):\n return [t * 3 for t in fut.wait()]\n\n def add(fut):\n return [t + 1 for t in fut.wait()]\n\n group, group_id, rank = self._init_global_test()\n input = _build_tensor(3, 2)\n if BACKEND == \"nccl\":\n rank_to_GPU = self._init_multigpu_helper()\n device_id = rank_to_GPU[rank][0]\n input = input.to(device_id)\n fut = group_id.allreduce([input]).get_future()\n res = fut.then(mult).then(add).wait()\n expected = _build_tensor(3, 2 * len(group) * 3 + 1)\n\n self.assertEqual(res[0], expected)\n\n @unittest.skipIf(\n BACKEND != \"nccl\" and BACKEND != \"gloo\",\n \"Only Nccl & Gloo backend support DistributedDataParallel\",\n )\n @skip_if_no_gpu\n def test_DistributedDataParallel(self):\n group, group_id, rank = self._init_global_test()\n rank_to_GPU = self._init_multigpu_helper()\n gpus = list(rank_to_GPU[rank])\n\n for use_bucket_view, static_graph in itertools.product(\n (False, True), (False, True)\n ):\n self._test_DistributedDataParallel(\n gpu_subset=gpus,\n rank=rank,\n gradient_as_bucket_view=use_bucket_view,\n static_graph=static_graph,\n )\n\n # test output_device\n self._test_DistributedDataParallel(\n gpu_subset=gpus,\n rank=rank,\n output_device=torch.device(\"cuda\"),\n gradient_as_bucket_view=use_bucket_view,\n static_graph=static_graph,\n )\n\n # test device_ids\n gpus_list = [torch.device(\"cuda:\" + str(i)) for i in gpus]\n self._test_DistributedDataParallel(\n gpu_subset=gpus_list,\n rank=rank,\n output_device=torch.device(\"cuda\"),\n gradient_as_bucket_view=use_bucket_view,\n static_graph=static_graph,\n )\n\n def _test_DistributedDataParallel_with_amp(self, grad_is_view=False):\n torch.manual_seed(31415)\n # Creates model and optimizer in default precision\n model = copy.deepcopy(DDP_NET).cuda()\n optimizer = torch.optim.SGD(model.parameters(), lr=0.03)\n\n # Creates a GradScaler once at the beginning of training.\n scaler = GradScaler()\n\n ddp_model = nn.parallel.DistributedDataParallel(\n model, device_ids=[self.rank], gradient_as_bucket_view=grad_is_view\n )\n\n input = torch.randn(dist.get_world_size() * 2, 2).cuda()\n target = torch.randn(dist.get_world_size() * 2, 4).cuda()\n loss_fn = nn.MSELoss()\n\n # verify grads are none before training\n for p in ddp_model.parameters():\n self.assertTrue(p is not None)\n self.assertTrue(p.grad is None)\n\n for idx in range(20):\n optimizer.zero_grad()\n # Runs the forward pass with autocasting.\n with autocast():\n output = ddp_model(input)\n loss = loss_fn(output, target)\n\n # Scales loss. Calls backward() on scaled loss to create scaled gradients.\n # Backward passes under autocast are not recommended.\n # Backward ops run in the same dtype autocast chose for corresponding forward ops.\n scaler.scale(loss).backward()\n\n # verify grads are not none and are valid during training\n for p in ddp_model.parameters():\n if p.requires_grad:\n self.assertTrue(p.grad is not None)\n self.assertFalse(p.grad.isnan().any())\n self.assertFalse(p.grad.isinf().any())\n\n # scaler.step() first unscales the gradients of the optimizer's assigned params.\n # If these gradients do not contain infs or NaNs, optimizer.step() is then called,\n # otherwise, optimizer.step() is skipped.\n scaler.step(optimizer)\n\n # Updates the scale for next iteration.\n scaler.update()\n\n # Shuffle the input so that DDP input is different\n torch.manual_seed(1337 + idx)\n input = input[torch.randperm(dist.get_world_size() * 2)]\n\n return ddp_model\n\n @unittest.skipIf(\n BACKEND != \"nccl\" and BACKEND != \"gloo\",\n \"Only Nccl & Gloo backend support DistributedDataParallel\",\n )\n @skip_if_no_gpu\n def test_DistributedDataParallel_with_amp_and_grad_is_view(self):\n torch.cuda.set_device(self.rank)\n ddp_model_grad_not_view = self._test_DistributedDataParallel_with_amp(\n grad_is_view=False\n )\n ddp_model_grad_is_view = self._test_DistributedDataParallel_with_amp(\n grad_is_view=True\n )\n for i, j in zip(\n ddp_model_grad_not_view.parameters(),\n ddp_model_grad_is_view.parameters(),\n ):\n self.assertEqual(i, j)\n\n def _test_DistributedDataParallel_SyncBatchNorm(\n self,\n gpu_subset,\n rank,\n local_bs,\n global_bs,\n offset,\n output_device=None,\n affine=True,\n ):\n # Run a simple end to end DDP model, use result of single node model\n # as baseline\n\n # cpu training setup\n model = BN_NET if affine else BN_NET_NO_AFFINE\n\n # single gpu training setup\n model_gpu = copy.deepcopy(model)\n model_gpu.cuda(gpu_subset[0])\n\n # DDP training setup\n model_DDP = nn.SyncBatchNorm.convert_sync_batchnorm(copy.deepcopy(model))\n model_DDP.cuda(gpu_subset[0])\n model_DDP = nn.parallel.DistributedDataParallel(\n model_DDP, device_ids=gpu_subset\n )\n\n # test serializable/unserializable\n with tempfile.NamedTemporaryFile() as tmp:\n if sys.platform == \"win32\":\n torch.save(model_DDP, tmp)\n tmp.seek(0)\n model_DDP = torch.load(tmp)\n else:\n torch.save(model_DDP, tmp.name)\n model_DDP = torch.load(tmp.name)\n\n # data initialization\n input_cpu = torch.randn(global_bs, 2)\n target = torch.randn(global_bs, 4)\n loss = nn.MSELoss()\n\n # check two model parameters over 5 iterations\n self._test_DDP_niter(\n model_gpu,\n model_DDP,\n input_cpu.cuda(gpu_subset[0]),\n target.cuda(gpu_subset[0]),\n loss,\n local_bs,\n rank,\n global_bs,\n True,\n offset,\n dist.get_world_size(),\n 5 if affine else 2,\n )\n self._barrier()\n\n @unittest.skipIf(\n BACKEND != \"nccl\" and BACKEND != \"gloo\",\n \"Only Nccl & Gloo backend support DistributedDataParallel\",\n )\n @skip_if_no_gpu\n def test_DistributedDataParallel_SyncBatchNorm_Channels_Last(self):\n group, group_id, rank = self._init_global_test()\n num_processes = dist.get_world_size()\n local_bs = 2\n bs_offset = int(rank * 2)\n global_bs = int(num_processes * 2)\n\n model = ONLY_SBN_NET\n model_gpu = copy.deepcopy(model).cuda(rank)\n model_DDP = nn.parallel.DistributedDataParallel(\n model_gpu, device_ids=[rank]\n )\n\n memory_format = torch.channels_last\n input_gpu = (\n torch.randn(global_bs, 2, 4, 4, dtype=torch.float)\n .cuda(rank)\n .to(memory_format=memory_format)\n )\n target_gpu = (\n torch.randn(global_bs, 2, 4, 4, dtype=torch.float)\n .cuda(rank)\n .to(memory_format=memory_format)\n )\n loss = nn.MSELoss()\n\n # check two model parameters over 5 iterations\n self._test_DDP_niter(\n model_gpu,\n model_DDP,\n input_gpu,\n target_gpu,\n loss,\n local_bs,\n rank,\n global_bs,\n True,\n bs_offset,\n dist.get_world_size(),\n memory_format=memory_format,\n )\n self._barrier()\n\n @unittest.skipIf(\n BACKEND != \"nccl\" and BACKEND != \"gloo\",\n \"Only Nccl & Gloo backend support DistributedDataParallel\",\n )\n @skip_if_no_gpu\n def test_DistributedDataParallel_SyncBatchNorm(self):\n group, group_id, rank = self._init_global_test()\n rank_to_GPU = self._init_multigpu_helper()\n # DDP does not support replicating BN layers within a process, hence\n # testing with one module replica per process\n gpus = [rank]\n\n num_processes = dist.get_world_size()\n local_bs = 2\n bs_offset = int(rank * 2)\n global_bs = int(num_processes * 2)\n\n self._test_DistributedDataParallel_SyncBatchNorm(\n gpu_subset=gpus,\n rank=rank,\n local_bs=local_bs,\n global_bs=global_bs,\n offset=bs_offset,\n )\n\n # test output_device\n self._test_DistributedDataParallel_SyncBatchNorm(\n gpu_subset=gpus,\n rank=rank,\n local_bs=local_bs,\n global_bs=global_bs,\n offset=bs_offset,\n output_device=torch.device(\"cuda\"),\n )\n\n # test device_ids\n gpus = [torch.device(\"cuda:\" + str(i)) for i in gpus]\n self._test_DistributedDataParallel_SyncBatchNorm(\n gpu_subset=gpus,\n rank=rank,\n local_bs=local_bs,\n global_bs=global_bs,\n offset=bs_offset,\n output_device=torch.device(\"cuda\"),\n )\n\n @unittest.skipIf(\n BACKEND != \"nccl\" and BACKEND != \"gloo\",\n \"Only Nccl & Gloo backend support DistributedDataParallel\",\n )\n @skip_if_no_gpu\n def test_DistributedDataParallel_SyncBatchNorm_No_Affine(self):\n group, group_id, rank = self._init_global_test()\n rank_to_GPU = self._init_multigpu_helper()\n # DDP does not support replicating BN layers within a process, hence\n # testing with one module replica per process\n gpus = [rank]\n\n num_processes = dist.get_world_size()\n local_bs = 2\n bs_offset = int(rank * 2)\n global_bs = int(num_processes * 2)\n\n self._test_DistributedDataParallel_SyncBatchNorm(\n gpu_subset=gpus,\n rank=rank,\n local_bs=local_bs,\n global_bs=global_bs,\n offset=bs_offset,\n affine=False,\n )\n\n @unittest.skipIf(\n BACKEND != \"nccl\" and BACKEND != \"gloo\",\n \"Only Nccl & Gloo backend support DistributedDataParallel\",\n )\n @skip_if_no_gpu\n def test_DistributedDataParallel_SyncBatchNorm_2D_Input(self):\n group, group_id, rank = self._init_global_test()\n rank_to_GPU = self._init_multigpu_helper()\n # DDP does not support replicating BN layers within a process, hence\n # testing with one module replica per process\n gpus = [rank]\n\n model = nn.BatchNorm1d(2)\n\n # single gpu training setup\n model_gpu = copy.deepcopy(model)\n model_gpu.cuda(gpus[0])\n\n # DDP training setup\n model_DDP = nn.SyncBatchNorm.convert_sync_batchnorm(copy.deepcopy(model))\n model_DDP.cuda(gpus[0])\n model_DDP = nn.parallel.DistributedDataParallel(model_DDP, device_ids=gpus)\n\n local_bs = len(gpus) * 2\n global_bs = dist.get_world_size() * local_bs\n input_cpu = torch.randn(global_bs, 2)\n target = torch.randn(global_bs, 2)\n loss = nn.MSELoss()\n\n # disabling cudnn.\n # SyncBatchNorm goes through native_batch_norm kernel, this avoids the\n # numerical issue created by the divergent code path.\n with torch.backends.cudnn.flags(False):\n # check two model parameters over 5 iterations\n self._test_DDP_niter(\n model_gpu,\n model_DDP,\n input_cpu.cuda(gpus[0]),\n target.cuda(gpus[0]),\n loss,\n local_bs,\n rank,\n global_bs,\n True,\n )\n self._barrier()\n\n @unittest.skipIf(\n BACKEND != \"nccl\" and BACKEND != \"gloo\",\n \"Only Nccl & Gloo backend support DistributedDataParallel\",\n )\n @skip_if_no_gpu\n @require_world_size(2)\n def test_DistributedDataParallel_SyncBatchNorm_Single_Input_Per_Process(self):\n group, group_id, rank = self._init_global_test()\n rank_to_GPU = self._init_multigpu_helper()\n # DDP does not support replicating BN layers within a process, hence\n # testing with one module replica per process\n gpus = [rank]\n\n model = nn.BatchNorm1d(2)\n\n # single gpu training setup\n model_gpu = copy.deepcopy(model)\n model_gpu.cuda(gpus[0])\n\n # DDP training setup\n model_DDP = nn.SyncBatchNorm.convert_sync_batchnorm(copy.deepcopy(model))\n model_DDP.cuda(gpus[0])\n model_DDP = nn.parallel.DistributedDataParallel(model_DDP, device_ids=gpus)\n\n local_bs = 1\n global_bs = dist.get_world_size()\n input_cpu = torch.randn(global_bs, 2)\n target = torch.randn(global_bs, 2)\n loss = nn.MSELoss()\n\n # disabling cudnn.\n # SyncBatchNorm goes through native_batch_norm kernel, this avoids the\n # numerical issue created by the divergent code path.\n with torch.backends.cudnn.flags(False):\n # check two model parameters over 5 iterations\n self._test_DDP_niter(\n model_gpu,\n model_DDP,\n input_cpu.cuda(gpus[0]),\n target.cuda(gpus[0]),\n loss,\n local_bs,\n rank,\n global_bs,\n True,\n )\n self._barrier()\n\n @unittest.skipIf(\n BACKEND != \"nccl\" and BACKEND != \"gloo\",\n \"Only Nccl & Gloo backend support DistributedDataParallel\",\n )\n @skip_if_no_gpu\n def test_DistributedDataParallel_SyncBatchNorm_Diff_Input_Sizes_Running_Value(\n self,\n ):\n group, group_id, rank = self._init_global_test()\n rank_to_GPU = self._init_multigpu_helper()\n model = nn.parallel.DistributedDataParallel(\n ONLY_SBN_NET.cuda(rank), device_ids=[rank]\n )\n\n input_var = []\n for i in range(dist.get_world_size()):\n input_var_rank = torch.cat(\n [\n torch.ones(2, 1, 10 ** (i + 1)) * (0.1 ** (i - 1)),\n torch.ones(2, 1, 10 ** (i + 1)) * (0.3 ** (i - 1)),\n ],\n dim=1,\n )\n input_var.append(input_var_rank)\n\n all_input_var = torch.cat(\n [\n x.permute(1, 0, 2).contiguous().view(ONLY_SBN_NET.num_features, -1)\n for x in input_var\n ],\n dim=1,\n ).cuda(rank)\n\n for i in range(100):\n y = model(input_var[rank].cuda(rank))\n y.mean().backward()\n\n running_mean, running_var = (\n model.module.running_mean,\n model.module.running_var,\n )\n torch.testing.assert_allclose(running_mean, all_input_var.mean(1))\n torch.testing.assert_allclose(running_var, all_input_var.var(1))\n\n @unittest.skipIf(\n BACKEND != \"nccl\" and BACKEND != \"gloo\",\n \"Only Nccl & Gloo backend support DistributedDataParallel\",\n )\n @skip_if_no_gpu\n def test_DistributedDataParallel_SyncBatchNorm_Diff_Input_Sizes_gradient(self):\n group, group_id, rank = self._init_global_test()\n # only do single GPU per process\n gpus = [rank]\n\n # cpu training setup\n model = BN_NET\n\n num_processes = dist.get_world_size()\n local_bs = rank + 2\n bs_offset = int((rank + 3) * rank / 2)\n global_bs = int((num_processes + 3) * num_processes / 2)\n\n self._test_DistributedDataParallel_SyncBatchNorm(\n gpu_subset=gpus,\n rank=rank,\n local_bs=local_bs,\n global_bs=global_bs,\n offset=bs_offset,\n )\n\n def _test_ddp_logging_data(self, is_gpu):\n rank = dist.get_rank()\n model_DDP = copy.deepcopy(DDP_NET)\n if is_gpu:\n model_DDP = nn.parallel.DistributedDataParallel(\n model_DDP.cuda(rank), device_ids=[rank]\n )\n else:\n model_DDP = nn.parallel.DistributedDataParallel(model_DDP)\n\n # dummy data initialization\n local_bs = 2\n batch_size, input, target, loss = self._prepare_dummy_data(local_bs)\n if is_gpu:\n input = input.cuda(rank)\n target = target.cuda(rank)\n\n model_DDP._set_ddp_runtime_logging_sample_rate(2)\n\n for idx in range(20):\n offset = rank * local_bs\n\n # DDP training, DDP scatters subsets of input to nodes/GPUs\n self._test_DDP_helper(\n model_DDP,\n input[offset : offset + local_bs],\n target[offset : offset + local_bs],\n loss,\n 1,\n )\n\n self._model_step_with_zero_grad(model_DDP)\n\n # Verify DDP logging data is sampled as expected\n # If it has ran more than 10 iteratons and this is\n # the sampled iteration for measuring run time stats,\n # the run time stats for this idx-th iteration will not\n # be zeros.\n ddp_logging_data = model_DDP._get_ddp_logging_data()\n if idx > 0 and (idx < 10 or idx % 2 == 0):\n self.assertGreaterEqual(\n ddp_logging_data.get(\"forward_compute_time\"), 1\n )\n self.assertGreaterEqual(\n ddp_logging_data.get(\"backward_compute_time\"), 1\n )\n self.assertGreaterEqual(\n ddp_logging_data.get(\"backward_comm_time\"), 1\n )\n self.assertGreaterEqual(\n ddp_logging_data.get(\"backward_compute_time\"),\n ddp_logging_data.get(\"backward_compute_comm_overlap_time\"),\n )\n self.assertGreaterEqual(\n ddp_logging_data.get(\"backward_comm_time\"),\n ddp_logging_data.get(\"backward_compute_comm_overlap_time\"),\n )\n self.assertEqual(ddp_logging_data.get(\"iteration\"), idx)\n elif idx > 0:\n # if the idx-th iteration is not sampled to set runtime stats,\n # ddp_logging_data.iteration will not be updated to current\n # iteration.\n self.assertNotEqual(ddp_logging_data.get(\"iteration\"), idx)\n\n # Shuffle the input so that DDP input is different\n input = input[torch.randperm(batch_size)]\n\n return model_DDP\n\n @unittest.skipIf(BACKEND == \"nccl\", \"nccl does not support DDP on CPU models\")\n def test_ddp_logging_data_cpu(self):\n def parse_env(var):\n return os.environ[var] if var in os.environ else \"N/A\"\n\n os.environ[\"TORCH_DISTRIBUTED_DEBUG\"] = \"INFO\"\n group, group_id, rank = self._init_global_test()\n model_DDP = self._test_ddp_logging_data(is_gpu=False)\n\n ddp_logging_data = model_DDP._get_ddp_logging_data()\n self.assertEqual(ddp_logging_data.get(\"world_size\"), dist.get_world_size())\n self.assertEqual(ddp_logging_data.get(\"rank\"), dist.get_rank())\n self.assertEqual(ddp_logging_data.get(\"module_name\"), \"Net\")\n self.assertEqual(ddp_logging_data.get(\"device_ids\"), \"\")\n # output_device is -1 in default if it is not set, e.g.\n # output_device of CPU training is -1.\n self.assertEqual(ddp_logging_data.get(\"output_device\"), -1)\n self.assertEqual(ddp_logging_data.get(\"broadcast_buffers\"), 1)\n self.assertEqual(ddp_logging_data.get(\"bucket_cap_bytes\"), 25 * 1024 * 1024)\n self.assertEqual(ddp_logging_data.get(\"find_unused_parameters\"), 0)\n self.assertEqual(ddp_logging_data.get(\"gradient_as_bucket_view\"), 0)\n self.assertEqual(\n ddp_logging_data.get(\"backend_name\"), dist.get_backend(group_id)\n )\n self.assertEqual(ddp_logging_data.get(\"iteration\"), 18)\n params = list(model_DDP.parameters())\n num_params = 0\n param_size = 0\n params = list(\n parameter\n for parameter in filter(\n lambda parameter: parameter.requires_grad, params\n )\n )\n for p in params:\n num_params += 1\n param_size += p.numel() * p.element_size()\n self.assertEqual(ddp_logging_data.get(\"dtypes\"), \"float\")\n self.assertEqual(\n ddp_logging_data.get(\"total_parameter_size_bytes\"), param_size\n )\n self.assertEqual(ddp_logging_data.get(\"num_parameter_tensors\"), num_params)\n self.assertEqual(ddp_logging_data.get(\"bucket_sizes\"), str(param_size))\n self.assertEqual(\n ddp_logging_data.get(\"master_port\"), parse_env(\"MASTER_PORT\")\n )\n self.assertEqual(\n ddp_logging_data.get(\"master_addr\"), parse_env(\"MASTER_ADDR\")\n )\n self.assertEqual(\n ddp_logging_data.get(\"torch_distributed_debug\"),\n parse_env(\"TORCH_DISTRIBUTED_DEBUG\"),\n )\n self.assertEqual(\n ddp_logging_data.get(\"cuda_visible_devices\"),\n parse_env(\"CUDA_VISIBLE_DEVICES\"),\n )\n if ddp_logging_data.get(\"backend_name\") == \"gloo\":\n self.assertEqual(\n ddp_logging_data.get(\"gloo_socket_ifname\"),\n parse_env(\"GLOO_SOCKET_IFNAME\"),\n )\n self.assertEqual(\n ddp_logging_data.get(\"gloo_device_transport\"),\n parse_env(\"GLOO_DEVICE_TRANSPORT\"),\n )\n self.assertEqual(ddp_logging_data.get(\"nccl_socket_ifname\"), None)\n self.assertEqual(ddp_logging_data.get(\"nccl_blocking_wait\"), None)\n self.assertEqual(ddp_logging_data.get(\"nccl_async_error_handling\"), None)\n self.assertEqual(ddp_logging_data.get(\"nccl_debug\"), None)\n self.assertEqual(ddp_logging_data.get(\"nccl_nthreads\"), None)\n self.assertEqual(ddp_logging_data.get(\"nccl_ib_timeout\"), None)\n # test runtime logging fields\n # Note: DETAIL debug mode logs DDP logging data to stdout and\n # thus accesses std::map, which fills in a default value for the\n # type if it didn't exist.\n self.assertEqual(ddp_logging_data.get(\"unused_parameter_size\", 0), 0)\n self.assertEqual(ddp_logging_data.get(\"has_rebuilt_buckets\"), 1)\n self.assertEqual(\n ddp_logging_data.get(\"rebuilt_bucket_sizes\"), str(param_size)\n )\n # It is hard to test accurate latency, but it can test whether the latency is\n # a valid value and in the expected range.\n self.assertGreaterEqual(ddp_logging_data.get(\"avg_forward_compute_time\"), 1)\n self.assertGreaterEqual(\n ddp_logging_data.get(\"avg_backward_compute_time\"), 1\n )\n self.assertGreaterEqual(ddp_logging_data.get(\"avg_backward_comm_time\"), 1)\n self.assertGreaterEqual(\n ddp_logging_data.get(\"avg_backward_compute_time\"),\n ddp_logging_data.get(\"avg_backward_compute_comm_overlap_time\"),\n )\n self.assertGreaterEqual(\n ddp_logging_data.get(\"avg_backward_comm_time\"),\n ddp_logging_data.get(\"avg_backward_compute_comm_overlap_time\"),\n )\n # test larger net with mixed data types, verify multiple bucket sizes\n model = LargeNet()\n model.float()\n model.fc1.double()\n model_DDP = nn.parallel.DistributedDataParallel(model, bucket_cap_mb=1.5)\n ddp_logging_data = model_DDP._get_ddp_logging_data()\n params = list(model_DDP.parameters())\n self.assertEqual(\n ddp_logging_data.get(\"bucket_cap_bytes\"), int(1.5 * 1024 * 1024)\n )\n bucket_sizes = [\n params[1].numel() * params[1].element_size(),\n params[0].numel() * params[0].element_size(),\n ]\n self.assertEqual(\n ddp_logging_data.get(\"bucket_sizes\"),\n \", \".join(str(x) for x in bucket_sizes),\n )\n self.assertEqual(ddp_logging_data.get(\"dtypes\"), \"double, float\")\n\n @unittest.skipIf(\n BACKEND != \"nccl\" and BACKEND != \"gloo\",\n \"Only Nccl & Gloo backend support DistributedDataParallel\",\n )\n @skip_if_no_gpu\n def test_ddp_logging_data_gpu(self):\n group, group_id, rank = self._init_global_test()\n model_DDP = self._test_ddp_logging_data(is_gpu=True)\n ddp_logging_data = model_DDP._get_ddp_logging_data()\n self.assertEqual(ddp_logging_data.get(\"device_ids\"), str(rank))\n self.assertEqual(ddp_logging_data.get(\"output_device\"), rank)\n # test runtime logging fields\n # It is hard to test accurate latency, but it can test whether the latency is\n # a valid value and in the expected range.\n self.assertGreaterEqual(ddp_logging_data.get(\"avg_forward_compute_time\"), 1)\n self.assertGreaterEqual(\n ddp_logging_data.get(\"avg_backward_compute_comm_overlap_time\"), 1\n )\n self.assertGreaterEqual(\n ddp_logging_data.get(\"avg_backward_compute_time\"),\n ddp_logging_data.get(\"avg_backward_compute_comm_overlap_time\"),\n )\n self.assertGreaterEqual(\n ddp_logging_data.get(\"avg_backward_comm_time\"),\n ddp_logging_data.get(\"avg_backward_compute_comm_overlap_time\"),\n )\n\n @unittest.skipIf(BACKEND == \"nccl\", \"nccl does not support DDP on CPU models\")\n def test_static_graph_api_cpu(self):\n model_DDP = nn.parallel.DistributedDataParallel(DDP_NET)\n model_DDP._set_static_graph()\n self.assertEqual(\n model_DDP._get_ddp_logging_data().get(\"static_graph\"), True\n )\n expected_err = \"should be called before training loop starts\"\n with self.assertRaisesRegex(RuntimeError, expected_err):\n local_bs = 2\n batch_size, input, target, loss = self._prepare_dummy_data(local_bs)\n offset = dist.get_rank() * local_bs\n\n # DDP training, DDP scatters subsets of input to nodes/GPUs\n self._test_DDP_helper(\n model_DDP,\n input[offset : offset + local_bs],\n target[offset : offset + local_bs],\n loss,\n 1,\n )\n model_DDP._set_static_graph()\n\n # Verify error was logged in ddp_logging_data.\n verify_ddp_error_logged(model_DDP, expected_err)\n\n @skipIfNoTorchVision\n def test_SyncBatchNorm_process_group(self):\n # When adopting `convert_sync_batchnorm` to convert a `nn.modules`,\n # it need to recursively pass the `process_group` in the module when the `SyncBatchNorm`\n # is nested in a sub-module or sub-sub-module (e.g. resnet50 in torchvision.models).\n\n process_ids = 0\n process_group = torch.distributed.new_group([process_ids])\n res50_model = torchvision.models.resnet50()\n res50_model_sync = nn.SyncBatchNorm.convert_sync_batchnorm(\n copy.deepcopy(res50_model), process_group\n )\n process_group_sync = res50_model_sync.layer1[0].bn1.process_group\n self.assertEqual(process_group_sync, process_group)\n\n def _run_reduction_test(\n self, tensor, expected_tensor, op, reduction_fn=dist.all_reduce, dst=None\n ):\n if reduction_fn != dist.all_reduce and dst is None:\n raise ValueError(f\"Reduction fn {reduction_fn} must specify dst!\")\n if dst is not None:\n reduction_fn(tensor, dst, op)\n # Only destination rank tensor is expected to have final result.\n if dist.get_rank() == dst:\n self.assertEqual(tensor, expected_tensor)\n else:\n reduction_fn(tensor, op)\n self.assertEqual(tensor, expected_tensor)\n\n @require_backend({\"nccl\"})\n @require_backends_available({\"nccl\"})\n @skip_if_lt_x_gpu(2)\n def test_nccl_backend_bool_allreduce(self):\n torch.cuda.set_device(self.rank)\n # Run all_reduce with PRODUCT\n element = self.rank % 2 == 0\n for op in [dist.ReduceOp.PRODUCT, dist.ReduceOp.MIN]:\n input_tensor = torch.tensor([element, element]).to(self.rank)\n self._run_reduction_test(\n input_tensor, torch.tensor([False, False]).to(self.rank), op\n )\n # Ensure that all ranks contributing True (cast to 1) results in the\n # correct reduction.\n input_tensor = torch.tensor([True, True]).to(self.rank)\n expected_tensor = input_tensor.clone()\n self._run_reduction_test(input_tensor, expected_tensor, op)\n\n # Run all_reduce with SUM\n for op in [dist.ReduceOp.SUM, dist.ReduceOp.MAX]:\n input_tensor = torch.tensor([element, element]).to(self.rank)\n self._run_reduction_test(\n input_tensor, torch.tensor([True, True]).to(self.rank), op\n )\n # TODO: NCCL backend does not work correctly for bitwise reduction ops\n # (see https://github.com/pytorch/pytorch/issues/41362). Add tests for\n # these once it is supported.\n\n @require_backend({\"nccl\"})\n @require_backends_available({\"nccl\"})\n @skip_if_lt_x_gpu(2)\n def test_nccl_backend_bool_allgather(self):\n torch.cuda.set_device(self.rank)\n inp = {0: [True, True], 1: [False, True]}\n input_tensor = torch.tensor(inp[self.rank % 2]).to(self.rank)\n # Preserve a copy of the tensor to compare against after allgather.\n input_tensor_copy = input_tensor.clone()\n tensor_list = [\n torch.tensor([False, False]).to(self.rank)\n for _ in range(dist.get_world_size())\n ]\n dist.all_gather(tensor_list, input_tensor)\n\n self.assertEqual(len(tensor_list), dist.get_world_size())\n for i, t in enumerate(tensor_list):\n expected = torch.tensor(inp[i % 2]).to(self.rank)\n self.assertEqual(t, expected)\n # Ensure that the input tensor is not modified, since this collective\n # does not modify its input.\n self.assertEqual(input_tensor_copy, input_tensor)\n\n @require_backend({\"nccl\"})\n @require_backends_available({\"nccl\"})\n @skip_if_lt_x_gpu(int(os.environ[\"WORLD_SIZE\"]))\n def test_nccl_backend_bool_reduce(self):\n torch.cuda.set_device(self.rank)\n inp = {0: [True, True], 1: [False, False]}\n # Run reduce() with product op\n for op in [dist.ReduceOp.PRODUCT, dist.ReduceOp.MIN]:\n input_tensor = torch.tensor(inp[self.rank % 2]).to(self.rank)\n expected = torch.tensor([False, False]).to(self.rank)\n self._run_reduction_test(input_tensor, expected, op, dist.reduce, dst=0)\n # Ensure that all ranks contributing True (cast to 1) results in the\n # correct reduction.\n input_tensor = torch.tensor([True, True]).to(self.rank)\n expected_tensor = input_tensor.clone()\n self._run_reduction_test(\n input_tensor, expected_tensor, op, dist.reduce, dst=0\n )\n\n for op in [dist.ReduceOp.SUM, dist.ReduceOp.MAX]:\n input_tensor = torch.tensor(inp[self.rank % 2]).to(self.rank)\n expected = (\n torch.tensor([True, True]).to(self.rank)\n if self.rank == 0\n else input_tensor.clone()\n )\n self._run_reduction_test(input_tensor, expected, op, dist.reduce, dst=0)\n\n @require_backend({\"nccl\"})\n @require_backends_available({\"nccl\"})\n @skip_if_lt_x_gpu(2)\n def test_nccl_backend_bool_broadcast(self):\n tensor_size = 10\n bcast_tensor = torch.tensor(\n [\n (random.random() < 0.5 if self.rank == 0 else False)\n for _ in range(tensor_size)\n ]\n ).to(self.rank)\n dist.broadcast(bcast_tensor, src=0)\n # Now allgather and ensure the tensors are equal.\n tensor_list = [\n torch.tensor([False for _ in range(tensor_size)]).to(self.rank)\n for _ in range(dist.get_world_size())\n ]\n dist.all_gather(tensor_list, bcast_tensor)\n expected = tensor_list[0]\n for tensor in tensor_list[1:]:\n self.assertEqual(tensor, expected)\n\n @unittest.skipIf(\n BACKEND != \"nccl\" and BACKEND != \"gloo\",\n \"Only NCCL and GLOO backend support DistributedDataParallel\",\n )\n @skip_if_lt_x_gpu(int(os.environ[\"WORLD_SIZE\"]))\n def test_DistributedSampler_padding(self):\n # Tests padding of distributed sampler.\n world_size = dist.get_world_size()\n\n # Simulates the 'casual' dataset size\n dataset_size = 100 + world_size + 1\n dataset = [torch.ones(1).to(self.rank) * i for i in range(dataset_size)]\n\n # Simulates the 'tiny' dataset size\n dataset_tiny_size = max(world_size // 2 - 1, 1)\n dataset_tiny = [\n torch.ones(1).to(self.rank) * i for i in range(dataset_tiny_size)\n ]\n\n # Specifying drop_last=True will cause the tail of the data to be dropped.\n dist_sampler = DistributedSampler(dataset=dataset, drop_last=True)\n local_num_samples, local_dataset_size = (\n dist_sampler.num_samples,\n dist_sampler.total_size,\n )\n # The effective dataset size should be the greatest integer that is <=\n # dataset_size that is divisible by the world_size. This is to ensure each\n # rank processes the same number of samples.\n effective_dataset_size = (\n math.ceil((dataset_size - world_size) / world_size)\n if dataset_size % world_size != 0\n else dataset_size / world_size\n )\n self.assertEqual(local_num_samples, effective_dataset_size)\n self.assertEqual(local_dataset_size, local_num_samples * world_size)\n indices_list = list(iter(dist_sampler))\n self.assertEqual(len(indices_list), local_num_samples)\n\n def validate_global_samples(local_num_samples):\n # Ensure that each rank processes the same number of samples.\n world_samples = [\n torch.LongTensor([0]).to(self.rank) for _ in range(world_size)\n ]\n dist.all_gather(\n world_samples, torch.tensor([local_num_samples]).to(self.rank)\n )\n world_samples = [sample.item() for sample in world_samples]\n self.assertEqual(len(set(world_samples)), 1)\n\n validate_global_samples(local_num_samples)\n\n # drop_last=False is the default and will add additional indices to be sampled,\n # increasing the effective dataset size.\n dist_sampler_added_samples = DistributedSampler(dataset=dataset)\n local_num_samples, local_dataset_size = (\n dist_sampler_added_samples.num_samples,\n dist_sampler_added_samples.total_size,\n )\n # The effective dataset size is the smallest integer that is >= dataset_size\n # and divisible by the world size.\n self.assertEqual(local_num_samples, math.ceil(dataset_size / world_size))\n self.assertEqual(local_dataset_size, local_num_samples * world_size)\n indices_list = list(iter(dist_sampler_added_samples))\n self.assertEqual(len(indices_list), local_num_samples)\n\n # Ensure that each rank processes the same number of samples.\n validate_global_samples(local_num_samples)\n\n # Ensure additional samples are padded even when\n # the extremely small dataset is given.\n dist_sampler_added_samples_tiny = DistributedSampler(dataset=dataset_tiny)\n local_num_samples, local_dataset_size = (\n dist_sampler_added_samples_tiny.num_samples,\n dist_sampler_added_samples_tiny.total_size,\n )\n self.assertEqual(\n local_num_samples, math.ceil(dataset_tiny_size / world_size)\n )\n self.assertEqual(local_dataset_size, local_num_samples * world_size)\n indices_list = list(iter(dist_sampler_added_samples_tiny))\n self.assertEqual(len(indices_list), local_num_samples)\n validate_global_samples(local_num_samples)\n\n @require_backend({\"nccl\", \"gloo\"})\n @require_n_gpus_for_nccl_backend(\n int(os.environ[\"WORLD_SIZE\"]), os.environ[\"BACKEND\"]\n )\n def test_allgather_object(self):\n # Only set device for NCCL backend since it must use GPUs.\n backend = os.environ[\"BACKEND\"]\n if backend == \"nccl\":\n # Case where rank != GPU device.\n next_rank = (self.rank + 1) % int(self.world_size)\n torch.cuda.set_device(next_rank)\n\n # If GPU test, add object with GPU tensor\n if backend == \"nccl\":\n COLLECTIVES_OBJECT_TEST_LIST.append(Foo(torch.randn(3, 3, device=0)))\n\n gather_objects = COLLECTIVES_OBJECT_TEST_LIST\n\n output_gathered = [None for _ in range(dist.get_world_size())]\n dist.all_gather_object(\n output_gathered, gather_objects[self.rank % len(gather_objects)]\n )\n\n for i, val in enumerate(output_gathered):\n expected = gather_objects[i % len(gather_objects)]\n self.assertEqual(val, expected)\n\n output_gathered = [None for _ in range(dist.get_world_size())]\n dist.all_gather_object(\n output_gathered, gather_objects[self.rank % len(gather_objects)]\n )\n\n @require_backend({\"gloo\"})\n @unittest.skipIf(BACKEND == \"nccl\", \"NCCL does not support gather\")\n def test_gather_object(self):\n # Ensure stateful objects can be gathered\n gather_objects = COLLECTIVES_OBJECT_TEST_LIST\n output_gathered = [None for _ in range(dist.get_world_size())]\n gather_on_rank = 0\n my_rank = dist.get_rank()\n dist.gather_object(\n gather_objects[self.rank % len(gather_objects)],\n object_gather_list=output_gathered\n if my_rank == gather_on_rank\n else None,\n dst=gather_on_rank,\n )\n if my_rank != gather_on_rank:\n self.assertEqual(\n output_gathered, [None for _ in range(dist.get_world_size())]\n )\n else:\n for i, val in enumerate(output_gathered):\n expected = gather_objects[i % len(gather_objects)]\n self.assertEqual(val, expected)\n\n # Validate errors when objects can't be pickled.\n class Bar:\n pass\n\n b = Bar()\n gather_objects = [b for _ in range(dist.get_world_size())]\n with self.assertRaisesRegex(AttributeError, \"Can't pickle local object\"):\n dist.all_gather_object(\n [None for _ in range(dist.get_world_size())],\n gather_objects[self.rank],\n )\n\n @require_backend({\"nccl\"})\n @require_backends_available({\"nccl\"})\n @skip_if_lt_x_gpu(2)\n def test_nccl_gather_object_err(self):\n output_gathered = [None for _ in range(dist.get_world_size())]\n gather_on_rank = 0\n # Case where rank != GPU device.\n my_rank = dist.get_rank()\n next_rank = (my_rank + 1) % dist.get_world_size()\n torch.cuda.set_device(next_rank)\n with self.assertRaisesRegex(\n RuntimeError, \"ProcessGroupNCCL does not support gather\"\n ):\n dist.gather_object(\n \"foo\",\n object_gather_list=output_gathered\n if my_rank == gather_on_rank\n else None,\n dst=gather_on_rank,\n )\n\n def validate_net_equivalence(self, net):\n # Helper to validate synchronization of nets across ranks.\n net_module_states = list(net.module.state_dict().values())\n # Check that all tensors in module's state_dict() are equal.\n for t in net_module_states:\n tensor_list = [\n torch.zeros_like(t) for _ in range(dist.get_world_size())\n ]\n dist.all_gather(tensor_list, t)\n for tensor in tensor_list:\n self.assertEqual(tensor, t)\n\n @skip_if_lt_x_gpu(2)\n @unittest.skipIf(\n BACKEND != \"nccl\" and BACKEND != \"gloo\",\n \"Only NCCL and GLOO backend support DistributedDataParallel\",\n )\n def test_ddp_sync_params_and_buffers(self):\n # Test that after calling _sync_params_and_buffers, models across ranks\n # are the same and are equal to the model on the input rank.\n dim = 2\n rank = self.rank\n rank_to_broadcast = 1\n # Seed to ensure that ranks are initialized with different initial models.\n torch.manual_seed(rank)\n model = nn.Linear(dim, dim, bias=False)\n net = torch.nn.parallel.DistributedDataParallel(\n model.cuda(rank), device_ids=[self.rank], bucket_cap_mb=1\n )\n new_model = nn.Linear(dim, dim, bias=False).cuda(rank)\n net.module = copy.deepcopy(new_model)\n # Assert params are different\n net_module_states = list(net.module.state_dict().values())\n for t in net_module_states:\n tensor_list = [\n torch.zeros_like(t) for _ in range(dist.get_world_size())\n ]\n dist.all_gather(tensor_list, t)\n for i, tensor in enumerate(tensor_list):\n if i == rank:\n self.assertEqual(t, tensor)\n else:\n # tensor from another rank should be different.\n self.assertNotEqual(t, tensor)\n\n net._sync_params_and_buffers(authoritative_rank=rank_to_broadcast)\n # Now all model params should be the same.\n self.validate_net_equivalence(net)\n # Since the network params were broadcast from rank_to_broadcast, validate that\n # they are the same as new_model on rank_to_broadcast.\n if rank == rank_to_broadcast:\n expected_states = new_model.state_dict().values()\n for t, expected in zip(net_module_states, expected_states):\n self.assertEqual(t, expected)\n\n @skip_if_lt_x_gpu(2)\n @unittest.skipIf(\n BACKEND != \"nccl\" and BACKEND != \"gloo\",\n \"Only NCCL and GLOO backend support DistributedDataParallel\",\n )\n def test_ddp_grad_div_uneven_inputs(self):\n # Test gradient division during training with join() API. If\n # divide_by_initial_world_size=False, we scale by the effective world\n # size when allreducing grads.\n dim = 5\n batch = 1\n grad_scale = 50\n rank = self.rank\n model = nn.Linear(dim, dim, bias=False)\n inp = torch.ones(batch, dim, device=self.rank) * grad_scale\n net = torch.nn.parallel.DistributedDataParallel(\n model.cuda(rank), device_ids=[self.rank], bucket_cap_mb=1\n )\n n_iters = 3\n if self.rank > 0:\n n_iters += 2\n\n with net.join(divide_by_initial_world_size=False):\n for _ in range(n_iters):\n loss = net(inp).sum()\n loss.backward()\n # The grad is always expected_grad, since we divide by the number\n # of currently active processes and inactive processes contribute\n # zero gradient. If we kept dividing by static initial world\n # size as processes leave, the grad would be smaller.\n expected_grad = torch.ones(dim, dim, device=self.rank) * grad_scale\n param = list(net.parameters())[0]\n self.assertEqual(expected_grad, param.grad)\n # Avoid accumulating grads so that it's the same every iteration\n net.zero_grad()\n torch.cuda.synchronize(device=self.rank)\n\n # If divide_by_initial_world_size=True (default), we always scale grads\n # by the initial world_size.\n with net.join(divide_by_initial_world_size=True):\n for i in range(n_iters):\n loss = net(inp).sum()\n loss.backward()\n effective_ws = dist.get_world_size()\n if i >= 3:\n effective_ws -= 1\n expected_grad = (\n torch.ones(dim, dim, device=self.rank)\n * grad_scale\n * effective_ws\n ) / dist.get_world_size()\n param = list(net.parameters())[0]\n self.assertEqual(expected_grad, param.grad)\n # Avoid accumulating grad so that it's the same every iteration.\n net.zero_grad()\n torch.cuda.synchronize(device=self.rank)\n\n def _test_ddp_profiling(self, profiler_ctx):\n batch = 3\n dim = 10\n num_iters = 6\n torch.cuda.set_device(self.rank)\n model = nn.Linear(dim, dim, bias=False)\n inp = torch.rand(batch, dim, device=self.rank)\n net = torch.nn.parallel.DistributedDataParallel(\n model.cuda(self.rank),\n device_ids=[self.rank],\n )\n profiler_ctx_copy = copy.deepcopy(profiler_ctx)\n\n with profiler_ctx as prof:\n for i in range(num_iters):\n loss = net(inp).sum()\n loss.backward()\n\n all_reduce_event_name = f\"{dist.get_backend()}:all_reduce\"\n events = get_profiling_event(all_reduce_event_name, prof)\n event_count = sum(e.count for e in events)\n self.assertEqual(event_count, num_iters)\n for event in events:\n self.assertTrue(event.is_async)\n self.assertEqual(event.name, all_reduce_event_name)\n\n broadcast_event_name = f\"{dist.get_backend()}:broadcast\"\n broadcast_events = get_profiling_event(broadcast_event_name, prof)\n event_count = sum(e.count for e in broadcast_events)\n # Broadcast is called during rebuild_buckets\n self.assertGreaterEqual(event_count, 1)\n for event in broadcast_events:\n self.assertEqual(event.name, broadcast_event_name)\n\n # Run DDP with profiling for a few iterations, then enable profiling\n # for a single pass, and ensure it is recorded. This tests that the\n # thread local state is correctly updated.\n net = torch.nn.parallel.DistributedDataParallel(\n model.cuda(self.rank),\n device_ids=[self.rank],\n find_unused_parameters=True,\n )\n for i in range(3):\n loss = net(inp).sum()\n loss.backward()\n # Now enable the profiler.\n with profiler_ctx_copy as prof:\n loss = net(inp).sum()\n loss.backward()\n\n events = get_profiling_event(all_reduce_event_name, prof)\n self.assertGreaterEqual(len(events), 1)\n self.assertGreaterEqual(events[0].count, 1)\n self.assertEqual(events[0].name, all_reduce_event_name)\n for event in events:\n self.assertTrue(event.is_async)\n # Ensure searching unused parameters was profiled\n events = get_profiling_event(\"search_unused_parameters\", prof)\n self.assertEqual(len(events), 1)\n\n @require_backend({\"gloo\", \"nccl\"})\n @require_backends_available({\"gloo\", \"nccl\"})\n @skip_if_lt_x_gpu(2)\n def test_ddp_profiling_autograd_profiler(self):\n autograd_profiler_ctx = torch.autograd.profiler.profile()\n return self._test_ddp_profiling(profiler_ctx=autograd_profiler_ctx)\n\n @require_backend({\"gloo\", \"nccl\"})\n @require_backends_available({\"gloo\", \"nccl\"})\n @skip_if_lt_x_gpu(2)\n @unittest.skipIf(IS_FBCODE, \"Kineto in fbcode code causes hang\")\n @unittest.skipIf(\n IS_MACOS or IS_WINDOWS,\n \"torch.profiler not enabled for mac/windows: https://github.com/pytorch/pytorch/pull/56124\",\n )\n def test_ddp_profiling_torch_profiler(self):\n cpu_act = torch.profiler.ProfilerActivity.CPU\n cuda_act = torch.profiler.ProfilerActivity.CUDA\n torch_profiler_ctx = torch.profiler.profile(activities=[cpu_act, cuda_act])\n self._test_ddp_profiling(profiler_ctx=torch_profiler_ctx)\n\n @skip_if_lt_x_gpu(2)\n @unittest.skipIf(\n BACKEND != \"nccl\" and BACKEND != \"gloo\",\n \"Only NCCL and GLOO backend support DistributedDataParallel\",\n )\n def test_ddp_join_model_equivalence(self):\n # Verifies equivalence with model training locally and with DDP under\n # the join context manager.\n batch = 3\n dim = 10\n learning_rate = 0.03\n model = nn.Linear(dim, dim, bias=False)\n inp = torch.rand(batch, dim, device=self.rank)\n local_model = copy.deepcopy(model)\n local_model = local_model.cuda(self.rank)\n rank_to_iter_mapping = {\n rank: 2 * (rank + 1) for rank in range(dist.get_world_size())\n }\n # run local model\n local_iters = sum(rank_to_iter_mapping.values())\n local_optim = torch.optim.SGD(local_model.parameters(), lr=learning_rate)\n for _ in range(local_iters):\n local_optim.zero_grad()\n out = local_model(inp)\n loss = out.sum()\n loss.backward()\n local_optim.step()\n\n # run DDP model with join API\n num_iters = rank_to_iter_mapping[self.rank]\n net = torch.nn.parallel.DistributedDataParallel(\n model.cuda(self.rank), device_ids=[self.rank]\n )\n ddp_optim = torch.optim.SGD(\n model.parameters(), lr=learning_rate * dist.get_world_size()\n )\n with net.join():\n for i in range(num_iters):\n ddp_optim.zero_grad()\n out = net(inp)\n loss = out.sum()\n loss.backward()\n torch.cuda.synchronize(device=self.rank)\n ddp_optim.step()\n\n # Validate model state dicts are equal\n for (_, local_tensor), (_, dist_tensor) in zip(\n local_model.state_dict().items(), net.module.state_dict().items()\n ):\n self.assertEqual(local_tensor, dist_tensor)\n\n def _run_uneven_inputs_test(\n self,\n test_case,\n iteration_mapping,\n find_unused_params,\n ):\n model = test_case.model\n inp = test_case.inp\n rank = self.rank\n sync_interval = test_case.sync_interval\n torch.cuda.set_device(rank)\n # Ensure all outsanding GPU work is comlete so this test runs independently.\n dist.barrier()\n # Bucket_cap_mb is intentionally low to test allreduce scheduling when\n # there are many buckets.\n net = torch.nn.parallel.DistributedDataParallel(\n model.cuda(rank),\n device_ids=[rank],\n bucket_cap_mb=1,\n find_unused_parameters=find_unused_params,\n )\n # Register hook if specified\n if test_case.hook is not None:\n net.register_comm_hook(test_case.state, test_case.hook)\n print(f\"registered hook {test_case.hook}\")\n\n\n # Determine num iters for this rank via the passed in mapping.\n num_iters = iteration_mapping[rank]\n # If we throw when earliest rank terminates, we should ensure\n # that we iterate for that minimum number of times.\n num_iters_tensor = torch.tensor(\n [num_iters], device=torch.cuda.current_device()\n )\n dist.all_reduce(num_iters_tensor, op=dist.ReduceOp.MIN)\n min_num_iters = num_iters_tensor.item()\n total_iters = 0\n if test_case.throw_on_early_termination:\n if min_num_iters == num_iters:\n # Early termination rank(s)\n exception_ctx = self.assertRaisesRegex(\n RuntimeError, f\"Rank {self.rank} exhausted all inputs\"\n )\n else:\n # Non early termination rank\n exception_ctx = self.assertRaisesRegex(\n RuntimeError,\n \"Detected at least one rank that exhausted inputs.\",\n )\n else:\n exception_ctx = suppress()\n with exception_ctx:\n with net.join(\n throw_on_early_termination=test_case.throw_on_early_termination\n ):\n for i in range(num_iters):\n # Use model.no_sync() to disable grad synchronization every\n # sync_interval.\n if i % sync_interval != 0:\n context = net.no_sync()\n else:\n context = suppress()\n with context:\n if isinstance(inp, tuple):\n loss = net(*inp).sum()\n else:\n loss = net(inp).sum()\n loss.backward()\n self._model_step(net)\n # Ensure completion of GPU kernels (including allreduce). If the\n # join API is not properly implemented, then this should hang\n # since the allreduce will hang.\n torch.cuda.synchronize(device=rank)\n total_iters += 1\n if test_case.throw_on_early_termination:\n # Ensure we iterated min_num_iters times.\n self.assertEqual(total_iters, min_num_iters)\n else:\n # Ensure we iterated at least min_num_iters times.\n self.assertGreaterEqual(total_iters, min_num_iters)\n\n # Ensure completion of all GPU kernels.\n torch.cuda.synchronize(device=rank)\n # When throwing on early rank termination, we do not\n # broadcast model state from an authoritative rank. All models\n # should already be in sync.\n if not test_case.throw_on_early_termination:\n self.assertTrue(net._authoritative_rank)\n # All ranks should have agreed on the same authoritative_rank!\n final_rank_tensor = torch.tensor(\n [net._authoritative_rank], device=self.rank\n )\n tensor_list = [\n torch.zeros_like(final_rank_tensor)\n for _ in range(dist.get_world_size())\n ]\n dist.all_gather(tensor_list, final_rank_tensor)\n max_rank = dist.get_world_size() - 1\n self.assertSetEqual(\n {max_rank}, set(tensor.item() for tensor in tensor_list)\n )\n # Ensure that all models are the same across ranks after all have joined.\n self.validate_net_equivalence(net)\n # Ensure that running with DDP uneven inputs was logged.\n ddp_logging_data = net._get_ddp_logging_data()\n self.assertTrue(ddp_logging_data.get(\"join_uneven_inputs\"))\n dist.barrier()\n\n @skip_if_lt_x_gpu(2)\n @unittest.skipIf(\n BACKEND != \"nccl\" and BACKEND != \"gloo\",\n \"Only NCCL and GLOO backend support DistributedDataParallel\",\n )\n def test_ddp_uneven_inputs_stop_iteration_sync_bn(self):\n # Tests that uneven inputs join handler correctly throws StopIteration\n # for models with SyncBN or general collective comm when\n # throw_on_early_termination=True.\n class ModelWithComm(torch.nn.Module):\n def __init__(self):\n super().__init__()\n self.lin = nn.Linear(2, 40, bias=False)\n\n def forward(self, x):\n x = self.lin(x)\n dist.all_reduce(x)\n return x\n\n torch.cuda.set_device(self.rank)\n model_bn = BN_NET\n model_bn = nn.SyncBatchNorm.convert_sync_batchnorm(\n copy.deepcopy(model_bn)\n ).cuda(self.rank)\n comm_model = ModelWithComm().cuda(self.rank)\n model_input = torch.randn(10, 2).cuda(torch.cuda.current_device())\n\n for model in [model_bn, comm_model]:\n model = torch.nn.parallel.DistributedDataParallel(\n model,\n device_ids=[self.rank],\n )\n min_num_iters = 5\n if self.rank != 0:\n # Early termination rank(s)\n num_iters = min_num_iters\n exception_ctx = self.assertRaisesRegex(\n RuntimeError, f\"Rank {self.rank} exhausted all inputs\"\n )\n else:\n # Non early termination rank\n num_iters = min_num_iters * 2\n exception_ctx = self.assertRaisesRegex(\n RuntimeError,\n \"Detected at least one rank that exhausted inputs.\",\n )\n n = 0\n with exception_ctx:\n with model.join(throw_on_early_termination=True):\n for i in range(num_iters):\n loss = model(model_input).sum()\n loss.backward()\n self._model_step(model)\n n += 1\n\n self.assertEqual(n, min_num_iters)\n # Verify model equivalence\n self.validate_net_equivalence(model)\n\n @skip_if_lt_x_gpu(2)\n @unittest.skipIf(\n BACKEND != \"nccl\" and BACKEND != \"gloo\",\n \"Only NCCL and GLOO backend support DistributedDataParallel\",\n )\n def test_ddp_uneven_inputs(self):\n dim = 1000\n batch = 1\n # Create a variety of models to run uneven input tests on.\n large_model = nn.Sequential(\n nn.Conv2d(1, 20, 5),\n nn.ReLU(),\n nn.Conv2d(20, 32, 5),\n nn.ReLU(),\n nn.Conv2d(32, 256, 5),\n nn.ReLU(),\n )\n small_model = nn.Linear(dim, dim, bias=False)\n bn_net = BatchNormNet()\n\n class UnusedParamModule(nn.Module):\n def __init__(self, unused_params_rank):\n super().__init__()\n self.t0 = Task()\n self.t1 = Task()\n self.unused_params_rank = unused_params_rank\n\n def task_parameters(self):\n return (self.t0.p, self.t1.p)\n\n def forward(self, x, rank):\n return (\n self.t1(self.t0(x))\n if rank != self.unused_params_rank\n else self.t1(x)\n )\n\n unjoined_rank_with_unused_params_model = UnusedParamModule(1)\n joined_rank_with_unused_params_model = UnusedParamModule(0)\n\n rank = self.rank\n models_to_test = [\n # Network with batchnorm\n DDPUnevenTestInput(\n name=\"batch_norm_net\",\n model=bn_net,\n inp=torch.ones(batch, 2, device=rank),\n sync_interval=1,\n ),\n DDPUnevenTestInput(\n name=\"large_conv_model\",\n model=large_model,\n inp=torch.ones(batch, batch, dim, dim, device=rank),\n sync_interval=1,\n ),\n DDPUnevenTestInput(\n name=\"small_model\",\n model=small_model,\n inp=torch.ones(batch, dim, device=rank),\n sync_interval=1,\n ),\n # Unused parameter test where rank that does not join early has unused params\n DDPUnevenTestInput(\n name=\"unjoined_rank_with_unused_params_model\",\n model=unjoined_rank_with_unused_params_model,\n inp=(torch.ones(batch, 2, device=rank), rank),\n sync_interval=1,\n ),\n # Unused parameter test where rank that does join early has unused params\n DDPUnevenTestInput(\n name=\"joined_rank_with_unused_params_model\",\n model=joined_rank_with_unused_params_model,\n inp=(torch.ones(batch, 2, device=rank), rank),\n sync_interval=1,\n ),\n ]\n\n # Test models that have hook installed.\n models_with_hook = [\n DDPUnevenTestInput(\n name=\"small_model_allreduce_hook\",\n model=small_model,\n hook=default.allreduce_hook,\n state=None,\n inp=torch.ones(batch, dim, device=rank),\n sync_interval=1,\n ),\n DDPUnevenTestInput(\n name=\"small_model_power_sgd_hook\",\n model=small_model,\n hook=powerSGD.powerSGD_hook,\n state=powerSGD.PowerSGDState(\n process_group=None,\n matrix_approximation_rank=1,\n # Config so that powerSGD runs immediately instead of\n # allreduce.\n start_powerSGD_iter=1,\n warm_start=False,\n use_error_feedback=False,\n ),\n inp=torch.ones(batch, dim, device=rank),\n sync_interval=1,\n ),\n ]\n models_to_test.extend(models_with_hook)\n\n # Add resnet model if we have torchvision installed.\n if HAS_TORCHVISION:\n resnet_model = torchvision.models.resnet50()\n models_to_test.append(\n DDPUnevenTestInput(\n name=\"resnet_model\",\n model=resnet_model,\n inp=torch.ones(1, 3, 1000, 1000),\n sync_interval=1,\n )\n )\n\n # Test with no_sync every 2, 3, 4, ... iterations.\n models_with_sync = []\n for i, test_input in enumerate(models_to_test):\n models_with_sync.append(\n DDPUnevenTestInput(\n name=test_input.name,\n model=test_input.model,\n inp=test_input.inp,\n sync_interval=i + 2,\n )\n )\n\n throw_on_early_term_tests = []\n for test_input in models_to_test:\n throw_on_early_term_tests.append(\n DDPUnevenTestInput(\n name=test_input.name,\n model=test_input.model,\n inp=test_input.inp,\n sync_interval=test_input.sync_interval,\n throw_on_early_termination=True,\n )\n )\n\n models_to_test.extend(models_with_sync)\n models_to_test.extend(throw_on_early_term_tests)\n\n # 0 iteration tests for when one process does not train model at all, so\n # we must shadow the broadcast calls made when rebuilding buckets.\n baseline_num_iters = [0, 5]\n iteration_offsets = [2, 3, 10]\n num_uneven_ranks = [1]\n if dist.get_world_size() > 2:\n num_uneven_ranks.append(2)\n iteration_mappings = []\n # Generate rank : num_iters mappings for various uneven input scenarios.\n # This includes cases where rank 0 joins early and all other ranks join\n # later, and scenarios where multiple ranks join early, but at different\n # iterations, and later ranks join later.\n for num_early_join_ranks in num_uneven_ranks:\n for baseline_iter in baseline_num_iters:\n for offset in iteration_offsets:\n mapping = {\n rank: baseline_iter\n for rank in range(0, num_early_join_ranks)\n }\n # if num_early_join_ranks > 1, ranks > 0 that will join early\n # iterate offset//2 more times than rank 0, to test nodes\n # depleting inputs at different times.\n if num_early_join_ranks > 1:\n for rank in mapping.keys():\n if rank > 0:\n mapping[rank] += offset // 2\n mapping.update(\n {\n rank: baseline_iter + offset\n for rank in range(\n num_early_join_ranks, dist.get_world_size()\n )\n }\n )\n iteration_mappings.append(mapping)\n\n for (test_case, iteration_mapping) in itertools.product(\n models_to_test, iteration_mappings\n ):\n if self.rank == 0:\n print(\n f\"\"\"Running test: {test_case.name} sync interval\n {test_case.sync_interval} with iteration mapping\n {iteration_mapping}\"\"\"\n )\n self._run_uneven_inputs_test(\n test_case,\n iteration_mapping,\n find_unused_params=(\"unused_params_model\" in test_case.name),\n )\n\n @skip_if_lt_x_gpu(2)\n @unittest.skipIf(\n BACKEND != \"nccl\" and BACKEND != \"gloo\",\n \"Only NCCL and GLOO backend support DistributedDataParallel\",\n )\n def test_ddp_uneven_input_join_disable(self):\n # tests that if net.join() with enable=False is specified, DDP works as\n # expected with even inputs.\n torch.manual_seed(self.rank)\n net = torch.nn.parallel.DistributedDataParallel(\n torch.nn.Linear(1, 1).cuda(self.rank), device_ids=[self.rank]\n )\n inp = torch.ones(1) * self.rank\n n_iters = 5\n world_size = dist.get_world_size()\n with net.join(enable=False):\n for _ in range(n_iters):\n # Clear grads\n grad = net.module.weight.grad\n if grad is not None:\n grad.requires_grad_(False)\n grad.zero_()\n out = net(inp)\n loss = out.sum()\n loss.backward()\n # Validate gradients to ensure that we divide by the correct\n # world_size when join mode is disabled.\n expected_grad = sum(i for i in range(world_size)) / world_size\n self.assertEqual(net.module.weight.grad.item(), expected_grad)\n\n join_config = net._join_config\n self.assertFalse(join_config.enable)\n self.validate_net_equivalence(net)\n\n @skip_if_lt_x_gpu(2)\n @unittest.skipIf(\n BACKEND != \"nccl\" and BACKEND != \"gloo\",\n \"Only NCCL and GLOO backend support DistributedDataParallel\",\n )\n def test_ddp_uneven_input_exception(self):\n # Tests that exceptions during training are correctly propagated by the\n # context manager.\n error_str = \"Intentional error\"\n\n class ExceptionModule(nn.Module):\n def __init__(self):\n super().__init__()\n self.param = nn.Parameter(torch.ones(1, requires_grad=True))\n\n def forward(self, _):\n raise ValueError(error_str)\n\n exception_module = ExceptionModule()\n net = torch.nn.parallel.DistributedDataParallel(\n exception_module.cuda(self.rank), device_ids=[self.rank]\n )\n inp = torch.ones(1)\n with self.assertRaisesRegex(ValueError, error_str):\n with net.join():\n out = net(inp)\n loss = out.sum()\n loss.backward()\n\n @require_backend({\"nccl\", \"gloo\"})\n @require_n_gpus_for_nccl_backend(\n int(os.environ[\"WORLD_SIZE\"]), os.environ[\"BACKEND\"]\n )\n def test_broadcast_object_list(self):\n # Only set device for NCCL backend since it must use GPUs.\n # Case where rank != GPU device.\n next_rank = (self.rank + 1) % int(self.world_size)\n backend = os.environ[\"BACKEND\"]\n if backend == \"nccl\":\n torch.cuda.set_device(next_rank)\n\n src_rank = 0\n # If GPU test, add object with GPU tensor\n if backend == \"nccl\":\n COLLECTIVES_OBJECT_TEST_LIST.append(Foo(torch.randn(3, 3, device=0)))\n\n objects = (\n COLLECTIVES_OBJECT_TEST_LIST\n if self.rank == src_rank\n else [None for _ in COLLECTIVES_OBJECT_TEST_LIST]\n )\n\n # Single object test with device specified. Backend=\"gloo\", device=cpu\n if backend != \"nccl\":\n single_obj_list = [objects[0]]\n if self.rank != src_rank:\n self.assertNotEqual(single_obj_list[0], COLLECTIVES_OBJECT_TEST_LIST[0])\n dist.broadcast_object_list(single_obj_list, src=0, group=None, device=torch.device('cpu'))\n self.assertEqual(single_obj_list[0], COLLECTIVES_OBJECT_TEST_LIST[0])\n\n # Single object test with device specified. Backend=\"gloo\", device=current_device+1\n # The test is gated by the fact GPU count is the same as world size to avoid the case\n # when backend is gloo but there is no multiple GPU devices.\n if backend != \"nccl\" and torch.cuda.device_count() == int(self.world_size):\n single_obj_list = [objects[0]]\n if self.rank != src_rank:\n self.assertNotEqual(single_obj_list[0], COLLECTIVES_OBJECT_TEST_LIST[0])\n dist.broadcast_object_list(single_obj_list, src=0, group=None, device=torch.device(next_rank))\n self.assertEqual(single_obj_list[0], COLLECTIVES_OBJECT_TEST_LIST[0])\n\n # Single object test with device specified. Backend=\"nccl\", device=current_device+1\n if backend == \"nccl\" and torch.cuda.device_count() == int(self.world_size):\n single_obj_list = [objects[0]]\n if self.rank != src_rank:\n self.assertNotEqual(single_obj_list[0], COLLECTIVES_OBJECT_TEST_LIST[0])\n dist.broadcast_object_list(single_obj_list, src=0, group=None, device=torch.device(next_rank))\n self.assertEqual(single_obj_list[0], COLLECTIVES_OBJECT_TEST_LIST[0])\n\n # Single object test: backward compatibility with device unspecified\n single_obj_list = [objects[0]]\n if self.rank != src_rank:\n self.assertNotEqual(single_obj_list[0], COLLECTIVES_OBJECT_TEST_LIST[0])\n dist.broadcast_object_list(single_obj_list, src=0)\n self.assertEqual(single_obj_list[0], COLLECTIVES_OBJECT_TEST_LIST[0])\n\n # Multiple input objects test\n if self.rank != src_rank:\n self.assertNotEqual(objects, COLLECTIVES_OBJECT_TEST_LIST)\n dist.broadcast_object_list(objects, src=0)\n self.assertEqual(objects, COLLECTIVES_OBJECT_TEST_LIST)\n\n def _test_ddp_ignore_params_arg(self, static_graph=False):\n class TestModel(nn.Module):\n def __init__(self, rank):\n self.rank = rank\n super(TestModel, self).__init__()\n self.fc1 = nn.Linear(1, 1, bias=False)\n # Proxy that will be materialized to another architecture later.\n # (after wrapping model with DDP)\n if self.rank == 0:\n self.fc2 = nn.Linear(1, 10, bias=False)\n else:\n self.fc2 = nn.Linear(10, 10, bias=False)\n\n def forward(self, x):\n x = self.fc1(x)\n x = self.fc2(x)\n return x\n\n device_id = self.rank\n # Ensure the test works for both find_unused_parameter and broadcast_buffer settings.\n for (find_unused, broadcast_buffers) in itertools.product(\n [False, True], [False, True]\n ):\n model = TestModel(self.rank).float().to(device_id)\n # Note that the model can have different shape buffers if we pass\n # them in to be ignored as well.\n model.fc2.register_buffer(\n \"ignore_buffer\", torch.zeros(5 + self.rank, device=self.rank)\n )\n proxy_params = list(model.fc2.parameters())\n proxy_buffers = list(model.fc2.buffers())\n model_fc2_name = [\n module_name\n for module_name, module in model.named_modules()\n if module is model.fc2\n ][0]\n proxy_param_names = [\n f\"{model_fc2_name}.{param_name}\"\n for param_name, _ in model.fc2.named_parameters()\n ]\n proxy_buffer_names = [\n f\"{model_fc2_name}.{buf_name}\"\n for buf_name, _ in model.fc2.named_buffers()\n ]\n # Specify that we should ignore proxy_params since it will be\n # materialized later.\n torch.nn.parallel.DistributedDataParallel._set_params_and_buffers_to_ignore_for_model(\n model, proxy_param_names + proxy_buffer_names\n )\n ddp = torch.nn.parallel.DistributedDataParallel(\n model,\n device_ids=[device_id],\n find_unused_parameters=find_unused,\n broadcast_buffers=broadcast_buffers,\n )\n if static_graph:\n ddp._set_static_graph()\n # Materialize new params. These are not registered in DDP and thus\n # don't have autograd hooks installed on them.\n ddp.module.fc2 = nn.Linear(1, 1, bias=False).to(device_id)\n # local model with the new materialized parameters.\n local_model = copy.deepcopy(ddp.module).cuda(self.rank)\n\n inp = torch.ones(1, dtype=torch.float).to(device_id) * (self.rank + 1)\n for i in range(6):\n ddp(inp).sum().backward()\n local_model(inp).sum().backward()\n # materialized param grad is not touched by DDP, so its grad should\n # be the same as if running locally.\n for materialized_param, local_param in zip(\n ddp.module.fc2.parameters(), local_model.fc2.parameters()\n ):\n self.assertEqual(materialized_param.grad, local_param.grad)\n\n # fc1 parameter grad should still be different, due to allreduce.\n for synced_param, local_param in zip(\n ddp.module.fc1.parameters(), local_model.fc1.parameters()\n ):\n self.assertFalse(synced_param.grad == local_param.grad)\n\n # Proxy module grad should not be touched\n for proxy_param in proxy_params:\n self.assertTrue(proxy_param.grad is None)\n\n # Synchronize since we run multiple iterations of this test, to\n # isolate failure hangs.\n torch.cuda.synchronize(device=self.rank)\n\n @require_backend({\"gloo\", \"nccl\"})\n @require_backends_available({\"gloo\", \"nccl\"})\n @skip_if_lt_x_gpu(2)\n def test_ddp_ignore_params_arg(self):\n self._test_ddp_ignore_params_arg(static_graph=False)\n self._test_ddp_ignore_params_arg(static_graph=True)\n\n @with_dist_debug_levels(levels=[\"OFF\", \"INFO\", \"DETAIL\"])\n @require_backend({\"gloo\", \"nccl\"})\n @require_backends_available({\"gloo\", \"nccl\"})\n @skip_if_lt_x_gpu(2)\n def test_ddp_unused_params_rebuild_buckets_exception(self):\n class ToyModel(nn.Module):\n def __init__(self):\n super(ToyModel, self).__init__()\n self.net1 = nn.Linear(10, 10, bias=False)\n self.net2 = nn.Linear(10, 10, bias=False)\n\n def forward(self, x):\n return self.net1(x)\n\n ddp = torch.nn.parallel.DistributedDataParallel(\n ToyModel().cuda(self.rank), device_ids=[self.rank]\n )\n for i in range(2):\n inp = torch.rand(1, 10)\n if i > 0:\n # On 2nd iteration, this will fail during rebuild_buckets,\n # but we should report an error regarding unused parameters\n # since that is the underlying root cause.\n try:\n ddp(inp).sum().backward()\n except RuntimeError as e:\n msg = str(e)\n verify_ddp_error_logged(ddp, msg)\n expected_strs = [\n ddp_prev_reduction_unfinished_str,\n ddp_recommend_find_unused_params_str,\n ddp_outputs_not_used_in_loss_str,\n ]\n # In debug mode, should show parameters that weren't reduced.\n # Without debug mode, should show suggestion to use debug mode.\n if dist._get_debug_mode() == dist._DistributedDebugLevel.OFF:\n expected_strs.append(ddp_suggest_debug_mode_str)\n else:\n unreduced_params = \", \".join([\"net2.weight\"])\n expected_strs.append(\n f\"did not receive grad for rank {self.rank}: {unreduced_params}\"\n )\n for s in expected_strs:\n self.assertTrue(s in msg, f\"Expected {s} to be in {msg}\")\n self.assertFalse(ddp_find_unused_params_enabled_str in msg)\n else:\n self.assertFalse(\n True, \"DDP unused parameters error not raised.\"\n )\n else:\n ddp(inp).sum().backward()\n\n dist.barrier()\n\n @require_backend({\"gloo\", \"nccl\"})\n @require_backends_available({\"gloo\", \"nccl\"})\n @skip_if_lt_x_gpu(2)\n def test_ddp_shared_grad_acc_unused_params(self):\n # When find_unused_parameters=True, ensure we mark unused parameters\n # even if they share gradient accumulators.\n class ToyModel(nn.Module):\n def __init__(self):\n super(ToyModel, self).__init__()\n # net1, bias, and net1.bias are all unused params.\n self.net1 = nn.Linear(10, 5, bias=False)\n self.bias = nn.Parameter(torch.zeros(5))\n # net1.bias and self.bias are names for the same underlying\n # parameter, so they share the same grad acc. This caused\n # the bug reported in https://github.com/pytorch/pytorch/issues/41324.\n self.net1.bias = self.bias\n self.net2 = nn.Linear(10, 5)\n\n def forward(self, x):\n return self.net2(x)\n\n torch.cuda.set_device(self.rank)\n model = ToyModel().to(torch.cuda.current_device())\n ddp_model = torch.nn.parallel.DistributedDataParallel(\n model, device_ids=[self.rank], find_unused_parameters=True\n )\n inp = torch.randn(20, 10, device=self.rank)\n for i in range(6):\n out = ddp_model(inp)\n loss = out.sum()\n loss.backward()\n\n @require_backend({\"gloo\", \"nccl\"})\n @require_backends_available({\"gloo\", \"nccl\"})\n @skip_if_lt_x_gpu(2)\n def test_ddp_device(self):\n m = nn.Linear(10, 10).to(self.rank)\n expected_len = 2\n\n class TensorWrapper:\n __slots__ = [\"t\", \"moved_to_gpu\"]\n\n def __init__(self, t):\n self.t = t\n self.moved_to_gpu = False\n\n # Handlers for specific types of validation we want to do based on\n # the input type.\n\n def tuple_and_list_validator(x):\n self.assertTrue(len(x), expected_len)\n self.assertEqual(1, len(set(t.device for t in x)))\n self.assertEqual(x[0].device.index, self.rank)\n return x[0] + x[1]\n\n def namedtuple_validator(x):\n self.assertEqual(x._fields, EXPECTED_FIELDS)\n self.assertEqual(x.a.device.index, x.b.device.index)\n self.assertEqual(x.a.device.index, self.rank)\n return x.a + x.b\n\n def custom_type_validator(x):\n self.assertTrue(x.moved_to_gpu or (str(x.t.device) == \"cpu\"))\n x.t = x.t.to(self.rank)\n x.moved_to_gpu = True\n return x.t\n\n def dict_validator(x):\n self.assertTrue(EXPECTED_FIELDS[0] in x.keys())\n self.assertTrue(EXPECTED_FIELDS[1] in x.keys())\n self.assertEqual(1, len(set(t.device for t in x.values())))\n self.assertEqual(x[EXPECTED_FIELDS[0]].device.index, self.rank)\n return x[EXPECTED_FIELDS[0]] + x[EXPECTED_FIELDS[1]]\n\n validators = {\n TensorWrapper: custom_type_validator,\n tuple: tuple_and_list_validator,\n list: tuple_and_list_validator,\n TestNamedTupleInput_0: namedtuple_validator,\n TestNamedTupleInput_1: namedtuple_validator,\n dict: dict_validator,\n }\n\n class ToyModel(torch.nn.Module):\n def __init__(_self): # noqa: B902\n super().__init__()\n _self.lin = nn.Linear(10, 10, bias=False)\n\n def forward(_self, x, expected_type): # noqa: B902\n # Similar to scatter, the recursive to in the single-device\n # case does not move tensors if they are in a custom type.\n self.assertTrue(isinstance(x, expected_type))\n fwd_tensor = validators[expected_type](x)\n return _self.lin(fwd_tensor)\n\n model = torch.nn.parallel.DistributedDataParallel(\n ToyModel().to(self.rank), device_ids=[self.rank]\n )\n\n def train_iter(inp, input_type):\n for _ in range(4):\n out = model(inp, input_type)\n out.sum().backward()\n\n # CPU tuple input, should be moved to the proper device before call\n # to forward.\n inp = tuple(torch.randn(10, 10) for _ in range(expected_len))\n train_iter(inp, tuple)\n\n # List CPU input, should be moved to proper device before call to\n # forward.\n inp = [torch.randn(10, 10) for _ in range(expected_len)]\n train_iter(inp, list)\n # Custom type containing tensor. The type is maintained, but the\n # device is not propagated (which is what happens with scatter too)\n inp = TensorWrapper(torch.randn(10, 10))\n train_iter(inp, TensorWrapper)\n # NamedTuple input. The type should be maintained and tensor inputs\n # should be moved to the correct device as in scatter.\n batch = 5\n dim = 10\n a = torch.rand(batch, dim)\n b = torch.rand(batch, dim)\n\n inp = TestNamedTupleInput_0(a, b)\n train_iter(inp, type(inp))\n\n inp = TestNamedTupleInput_1(a, b)\n train_iter(inp, type(inp))\n\n # dictionary input.\n inp = {\n EXPECTED_FIELDS[0]: a,\n EXPECTED_FIELDS[1]: b,\n }\n train_iter(inp, type(inp))\n\n @require_backend({\"gloo\", \"nccl\"})\n @require_backends_available({\"gloo\", \"nccl\"})\n @skip_if_lt_x_gpu(2)\n def test_ddp_namedtuple(self):\n batch = 5\n dim = 10\n\n a = torch.rand(batch, dim, device=self.rank)\n b = torch.rand(batch, dim, device=self.rank)\n\n class NamedTupleModule(torch.nn.Module):\n def __init__(_self): # noqa: B902\n super().__init__()\n _self.lin = nn.Linear(10, 1)\n\n def forward(_self, input, expected_type): # noqa: B902\n # Without NamedTuple support, this would be of type tuple.\n self.assertTrue(\n isinstance(input, expected_type),\n f\"Expected type {expected_type} but got {type(input)}\",\n )\n self.assertEqual(input._fields, EXPECTED_FIELDS)\n self.assertEqual(a, input.a)\n self.assertEqual(b, input.b)\n return _self.lin(torch.mul(input.a, input.b))\n\n model = torch.nn.parallel.DistributedDataParallel(\n NamedTupleModule().cuda(self.rank), device_ids=[self.rank]\n )\n inp = TestNamedTupleInput_0(a, b)\n # The following would fail if DDP does not propagate NamedTuples correctly.\n model(inp, type(inp))\n\n inp = TestNamedTupleInput_1(a, b)\n model(inp, type(inp))\n\n @with_dist_debug_levels(levels=[\"OFF\", \"INFO\", \"DETAIL\"])\n @require_backend({\"gloo\", \"nccl\"})\n @require_backends_available({\"gloo\", \"nccl\"})\n @skip_if_lt_x_gpu(2)\n def test_ddp_control_flow_same_across_ranks(self):\n # Control flow that is the same across ranks.\n batch = 20\n dim = 10\n\n world_size = dist.get_world_size()\n torch.cuda.set_device(self.rank)\n model = torch.nn.parallel.DistributedDataParallel(\n ControlFlowToyModel().cuda(self.rank),\n device_ids=[self.rank],\n find_unused_parameters=True,\n )\n random_input = torch.randn(batch, dim, device=self.rank)\n ones_input = torch.ones(batch, dim, device=self.rank)\n for i in range(6):\n if i % 2 == 0:\n out = model(random_input)\n else:\n out = model(ones_input)\n loss = out.sum()\n loss.backward()\n # On even iterations, 2nd param goes unused, on odd iterations,\n # it is used.\n local_used_maps = model.reducer._get_local_used_maps()\n if i % 2 == 0:\n expected = torch.tensor(\n [world_size, 0], device=self.rank, dtype=torch.int32\n )\n else:\n expected = torch.tensor(\n [world_size, world_size], device=self.rank, dtype=torch.int32\n )\n\n # Validate parameter usage.\n variable_usage_tensor = local_used_maps[0]\n self.assertEqual(variable_usage_tensor, expected)\n\n # Validate appropriate error message when DDP is used with\n # find_unused_parameters=False.\n model = torch.nn.parallel.DistributedDataParallel(\n ControlFlowToyModel().cuda(self.rank),\n device_ids=[self.rank],\n find_unused_parameters=False,\n )\n for i in range(2):\n if i == 0:\n loss = model(random_input).sum()\n loss.backward()\n else:\n try:\n loss = model(random_input).sum()\n loss.backward()\n except RuntimeError as e:\n msg = str(e)\n verify_ddp_error_logged(model, msg)\n # 2nd linear layer is unused\n unused_param_index = 1\n expected_strs = [\n ddp_prev_reduction_unfinished_str,\n ddp_recommend_find_unused_params_str,\n ddp_outputs_not_used_in_loss_str,\n f\"Parameter indices which did not receive grad for rank {self.rank}: {unused_param_index}\",\n ]\n # In debug mode, should show parameters that weren't reduced.\n # Without debug mode, should show suggestion to use debug mode.\n if dist._get_debug_mode() == dist._DistributedDebugLevel.OFF:\n expected_strs.append(ddp_suggest_debug_mode_str)\n else:\n unreduced_params = \", \".join([\"lin2.weight\"])\n expected_strs.append(\n f\"did not receive grad for rank {self.rank}: {unreduced_params}\"\n )\n for s in expected_strs:\n self.assertTrue(s in msg, f\"Expected {s} to be in {msg}\")\n self.assertFalse(ddp_find_unused_params_enabled_str in msg)\n else:\n self.assertFalse(True, \"DDP error not raised\")\n\n dist.barrier()\n\n @require_backend({\"gloo\", \"nccl\"})\n @require_backends_available({\"gloo\", \"nccl\"})\n @skip_if_lt_x_gpu(2)\n def test_invalid_static_graph(self):\n world_size = dist.get_world_size()\n torch.cuda.set_device(self.rank)\n model = torch.nn.parallel.DistributedDataParallel(\n ControlFlowToyModel().cuda(self.rank),\n device_ids=[self.rank],\n )\n model._set_static_graph()\n random_input = torch.randn(20, 10, device=self.rank)\n ones_input = torch.ones(20, 10, device=self.rank)\n # unused parameter in the first iteration got used\n # in second iteration.\n expected_err = \"Your training graph has changed in this iteration\"\n with self.assertRaisesRegex(RuntimeError, expected_err):\n for i in range(2):\n if i % 2 == 0:\n out = model(random_input)\n else:\n out = model(ones_input)\n loss = out.sum()\n loss.backward()\n\n verify_ddp_error_logged(model, expected_err)\n\n # used parameter in the first iteration got unused\n # in second iteration.\n with self.assertRaisesRegex(\n RuntimeError,\n \"Expected to have finished reduction in the prior iteration \"\n \"before starting a new one. This error indicates that your \"\n \"training graph has changed in this iteration\",\n ):\n for i in range(2):\n if i % 2 != 0:\n out = model(random_input)\n else:\n out = model(ones_input)\n loss = out.sum()\n loss.backward()\n\n verify_ddp_error_logged(model, \"Expected to have finished reduction\")\n\n @with_dist_debug_levels(levels=[\"OFF\", \"INFO\", \"DETAIL\"])\n @require_backend({\"gloo\", \"nccl\"})\n @require_backends_available({\"gloo\", \"nccl\"})\n @skip_if_lt_x_gpu(2)\n def test_ddp_control_flow_different_across_ranks(self):\n # Control flow that is different across ranks.\n batch = 20\n dim = 10\n\n class ToyModel(nn.Module):\n def __init__(self, rank):\n super(ToyModel, self).__init__()\n self.lin1 = nn.Linear(10, 10, bias=False)\n self.lin2 = nn.Linear(10, 10, bias=False)\n self.rank = rank\n\n def forward(self, x):\n # Control-flow that is rank and input dependent for the\n # model.\n use_second_layer = (\n torch.equal(x, torch.ones(batch, dim, device=x.device))\n and self.rank == 1\n )\n\n if use_second_layer:\n return self.lin2(F.relu(self.lin1(x)))\n else:\n return F.relu(self.lin1(x))\n\n world_size = dist.get_world_size()\n torch.cuda.set_device(self.rank)\n model = torch.nn.parallel.DistributedDataParallel(\n ToyModel(self.rank).cuda(self.rank),\n device_ids=[self.rank],\n find_unused_parameters=True,\n )\n random_input = torch.randn(batch, dim, device=self.rank)\n ones_input = torch.ones(batch, dim, device=self.rank)\n for i in range(6):\n if i % 2 == 0:\n out = model(random_input)\n else:\n out = model(ones_input)\n loss = out.sum()\n loss.backward()\n # On even iterations, 2nd param goes unused, on odd iterations,\n # it is used only on rank 1.\n local_used_maps = model.reducer._get_local_used_maps()\n\n if i % 2 == 0:\n expected = torch.tensor(\n [world_size, 0], device=self.rank, dtype=torch.int32\n )\n else:\n expected = torch.tensor(\n [world_size, 1], device=self.rank, dtype=torch.int32\n )\n\n variable_usage_tensor = local_used_maps[0]\n # Validate parameter usage. On odd iterations, 2nd param is only\n # used on rank 1.\n self.assertEqual(variable_usage_tensor, expected)\n\n # Validate appropriate error message when DDP is used with\n # find_unused_parameters=False.\n model = torch.nn.parallel.DistributedDataParallel(\n ToyModel(self.rank).cuda(self.rank),\n device_ids=[self.rank],\n find_unused_parameters=False,\n )\n for i in range(2):\n if i == 0:\n loss = model(random_input).sum()\n loss.backward()\n else:\n try:\n loss = model(random_input).sum()\n loss.backward()\n except RuntimeError as e:\n msg = str(e)\n verify_ddp_error_logged(model, msg)\n unused_param_index = 1\n expected_strs = [\n ddp_prev_reduction_unfinished_str,\n ddp_recommend_find_unused_params_str,\n ddp_outputs_not_used_in_loss_str,\n f\"Parameter indices which did not receive grad for rank {self.rank}: {unused_param_index}\",\n ]\n # In debug mode, should show parameters that weren't reduced.\n # Without debug mode, should show suggestion to use debug mode.\n if dist._get_debug_mode() == dist._DistributedDebugLevel.OFF:\n expected_strs.append(ddp_suggest_debug_mode_str)\n else:\n unreduced_params = \", \".join([\"lin2.weight\"])\n expected_strs.append(\n f\"did not receive grad for rank {self.rank}: {unreduced_params}\"\n )\n for s in expected_strs:\n self.assertTrue(s in msg, f\"Expected {s} to be in {msg}\")\n self.assertFalse(ddp_find_unused_params_enabled_str in msg)\n else:\n self.assertFalse(True, \"DDP error not raised\")\n\n dist.barrier()\n\n @require_backend({\"gloo\"})\n @unittest.skipIf(BACKEND == \"nccl\", \"NCCL does not support scatter\")\n def test_scatter_object_list(self):\n src_rank = 0\n scatter_list = (\n COLLECTIVES_OBJECT_TEST_LIST\n if self.rank == src_rank\n else [None for _ in COLLECTIVES_OBJECT_TEST_LIST]\n )\n world_size = dist.get_world_size()\n scatter_list = scatter_list[:world_size]\n i = 0\n while len(scatter_list) < world_size:\n scatter_list.append(scatter_list[i])\n i += 1\n\n output_obj_list = [None]\n dist.scatter_object_list(output_obj_list, scatter_list, src=src_rank)\n self.assertEqual(\n output_obj_list[0],\n COLLECTIVES_OBJECT_TEST_LIST[\n self.rank % len(COLLECTIVES_OBJECT_TEST_LIST)\n ],\n )\n # Ensure errors are raised upon incorrect arguments.\n with self.assertRaisesRegex(\n RuntimeError,\n \"Expected argument scatter_object_output_list to be a list of size at least 1.\",\n ):\n dist.scatter_object_list([], scatter_list, src=src_rank)\n\n @require_backend({\"gloo\", \"nccl\"})\n @require_backends_available({\"gloo\", \"nccl\"})\n @skip_if_lt_x_gpu(2)\n @skip_if_rocm\n def test_ddp_model_diff_across_ranks(self):\n group_gloo = dist.new_group(\n timeout=timedelta(seconds=60),\n backend=dist.Backend.GLOO\n )\n # Set NCCL_BLOCKING_WAIT and use a new NCCL group to improve test\n # determinism.\n os.environ[\"NCCL_BLOCKING_WAIT\"] = \"1\"\n group_to_use = dist.new_group(\n backend=dist.get_backend(),\n timeout=timedelta(seconds=5)\n )\n torch.cuda.set_device(self.rank)\n # Creates network with different sized embedding table on different\n # ranks. This should throw an error during DDP init.\n net = EmbeddingNet(self.rank)\n # When running with NCCL backend, we don't expect an error on rank 0,\n # rather, it will be taken down by NCCL_ASYNC_ERROR_HANDLING. When\n # running with Gloo or with debug mode wrapper, we expect the error\n # to be caught inline.\n is_detail_dbg_mode = (\n dist._get_debug_mode() == dist._DistributedDebugLevel.DETAIL\n )\n rank_0_ctx = (\n self.assertRaisesRegex(\n RuntimeError,\n \"Caught collective operation timeout\"\n )\n if dist.get_backend(group_to_use) == dist.Backend.NCCL and not is_detail_dbg_mode\n # Gloo can raise various exception messages, so just assert\n # Runtime error here.\n else self.assertRaises(RuntimeError)\n )\n ctx = (\n rank_0_ctx\n if self.rank == 0\n else self.assertRaisesRegex(RuntimeError, \"appears not to match\")\n )\n with ctx:\n net = torch.nn.parallel.DistributedDataParallel(\n net.to(self.rank), device_ids=[self.rank], process_group=group_to_use,\n )\n # Should only be run by rank 0, and blocking_wait catches and\n # reports exception.\n dist.barrier(group_to_use)\n\n # Perform gloo-based barrier to ensure one rank doesn't exit test\n # early which causes failure with Barrier.sync.\n dist.barrier(group_gloo)\n\n @with_dist_debug_levels(levels=[\"OFF\", \"INFO\", \"DETAIL\"])\n @require_backend({\"gloo\", \"nccl\"})\n @require_backends_available({\"gloo\", \"nccl\"})\n @skip_if_lt_x_gpu(2)\n def test_output_unused_in_loss(self):\n model = TwoLinLayerNet()\n # Need copy of model to pass into 2nd DDP ctor otherwise autograd hooks\n # on first DDP reducer will execute!\n model_copy = copy.deepcopy(model)\n net = torch.nn.parallel.DistributedDataParallel(\n copy.deepcopy(model).cuda(self.rank),\n device_ids=[self.rank],\n )\n net_with_find_unused = torch.nn.parallel.DistributedDataParallel(\n model_copy.cuda(self.rank),\n device_ids=[self.rank],\n find_unused_parameters=True,\n )\n\n inp = torch.randn(10, 10)\n\n for ddp in [net, net_with_find_unused]:\n for i in range(2):\n if i == 0:\n a, b = ddp(inp)\n loss = b.sum()\n loss.backward()\n else:\n try:\n a, b = ddp(inp)\n loss = b.sum()\n loss.backward()\n except RuntimeError as e:\n msg = str(e)\n unused_index = 0\n unused_index_substr = (\n f\"Parameter indices which did not receive grad for rank {self.rank}: {unused_index}\"\n )\n if ddp == net:\n expected_strs = [\n ddp_prev_reduction_unfinished_str,\n ddp_recommend_find_unused_params_str,\n ddp_outputs_not_used_in_loss_str,\n unused_index_substr,\n ]\n unexpected_strs = [\n ddp_find_unused_params_enabled_str,\n ]\n elif ddp == net_with_find_unused:\n expected_strs = [\n ddp_prev_reduction_unfinished_str,\n ddp_outputs_not_used_in_loss_str,\n ddp_find_unused_params_enabled_str,\n unused_index_substr,\n ]\n unexpected_strs = [\n ddp_recommend_find_unused_params_str,\n ]\n # In debug mode, should show parameters that weren't reduced.\n # Without debug mode, should show suggestion to use debug mode.\n if dist._get_debug_mode() == dist._DistributedDebugLevel.OFF:\n expected_strs.append(ddp_suggest_debug_mode_str)\n else:\n unreduced_params = \", \".join(['a.weight'])\n expected_strs.append(\n f\"did not receive grad for rank {self.rank}: {unreduced_params}\"\n )\n for s in expected_strs:\n self.assertTrue(\n s in msg,\n f\"Expected {s} to be in {msg}\"\n )\n for s in unexpected_strs:\n self.assertFalse(\n s in msg,\n f\"Expected {s} not to be in {msg}\"\n )\n else:\n self.assertFalse(True, \"DDP error not raised\")\n\n dist.barrier()\n\n def _test_different_graph_across_ranks(\n self, find_unused_parameters=False, static_graph=False\n ):\n class ToyModel(nn.Module):\n def __init__(self, rank):\n super(ToyModel, self).__init__()\n self.lin1 = nn.Linear(10, 10, bias=False)\n self.lin2 = nn.Linear(10, 10, bias=False)\n self.rank = rank\n\n def forward(self, x):\n if self.rank == 0:\n return self.lin2(F.relu(self.lin1(x)))\n else:\n return F.relu(self.lin1(x))\n\n torch.manual_seed(31415)\n world_size = dist.get_world_size()\n torch.cuda.set_device(self.rank)\n model = ToyModel(self.rank).cuda(self.rank)\n ddp_model = torch.nn.parallel.DistributedDataParallel(\n model,\n device_ids=[self.rank],\n find_unused_parameters=find_unused_parameters,\n gradient_as_bucket_view=True,\n )\n if static_graph:\n ddp_model._set_static_graph()\n random_input = torch.randn(20, 10, device=self.rank)\n for i in range(10):\n out = ddp_model(random_input)\n loss = out.sum()\n loss.backward()\n return ddp_model\n\n @require_backend({\"gloo\", \"nccl\"})\n @require_backends_available({\"gloo\", \"nccl\"})\n @skip_if_lt_x_gpu(2)\n def test_different_graph_across_ranks(self):\n base_model = self._test_different_graph_across_ranks(\n find_unused_parameters=True\n )\n self.assertFalse(\n base_model._get_ddp_logging_data().get(\"has_rebuilt_buckets\", 0)\n )\n static_model = self._test_different_graph_across_ranks(static_graph=True)\n self.assertTrue(\n static_model._get_ddp_logging_data().get(\"has_rebuilt_buckets\", 0)\n )\n for i, j in zip(base_model.parameters(), static_model.parameters()):\n self.assertEqual(i, j)\n\n @require_backend({\"gloo\"})\n @require_backends_available({\"gloo\"})\n @unittest.skipIf(\n IS_MACOS or IS_WINDOWS,\n \"MacOS uses uv transport which does not have as robust error handling as tcp transport\",\n )\n def test_monitored_barrier_gloo(self):\n tensors = [torch.ones(10) * self.rank]\n # Kick off some allreduce work on all ranks\n for _ in range(10):\n dist.all_reduce(torch.cat(tensors))\n # Run monitored barrier and ensure it passees\n timeout = timedelta(seconds=2)\n dist.monitored_barrier(timeout=timeout)\n # Check monitored_barrier success with wait_all_ranks=True\n for _ in range(10):\n dist.all_reduce(torch.cat(tensors))\n dist.monitored_barrier(timeout=timeout, wait_all_ranks=True)\n # All ranks besides 1 call into barrier, rank 0 should report failure\n # while others report gloo error.\n failed_rank = 1\n src_rank = 0\n if self.rank == src_rank:\n with self.assertRaisesRegex(\n RuntimeError, f\"Rank {failed_rank} failed to pass monitoredBarrier\"\n ):\n dist.monitored_barrier(timeout=timeout)\n elif self.rank != failed_rank:\n # Other ranks should not pass barrier since rank 0 failed.\n err_regex = (\n f\"Rank {self.rank} successfully reached monitoredBarrier,\"\n f\" but received errors while waiting to be unblocked by rank\"\n f\" {src_rank}\"\n )\n with self.assertRaisesRegex(RuntimeError, err_regex):\n dist.monitored_barrier(timeout=timeout)\n\n # We need a barrier since otherwise failed_rank exits too early\n # and cause a timeout.\n self._barrier(timeout=30)\n\n @require_backend({\"gloo\"})\n @require_backends_available({\"gloo\"})\n def test_monitored_barrier_gloo_subgroup(self):\n # Tests that monitored_barrier works as expected on non-default\n # process groups.\n failed_rank = 1\n timeout = 0.1\n subgroup = dist.new_group(ranks=[0, 1])\n\n if self.rank == failed_rank:\n return\n\n if self.rank == 0:\n with self.assertRaisesRegex(\n RuntimeError, f\"Rank {failed_rank} failed to pass monitoredBarrier\"\n ):\n dist.monitored_barrier(subgroup, timeout)\n else:\n # Other ranks call into monitored_barrier, but this should be a\n # noop because they are not part of the subgroup. Verify that\n # there are no errors here.\n dist.monitored_barrier(subgroup, timeout)\n\n def _test_monitored_barrier_allreduce_hang(self, wait_all_ranks):\n # tests expected behavior when nonzero rank hangs.\n nccl_pg = dist.new_group(\n ranks=list(i for i in range(int(self.world_size))),\n timeout=timedelta(seconds=2),\n backend=dist.Backend.NCCL,\n )\n gloo_pg = dist.new_group(\n ranks=list(i for i in range(int(self.world_size))),\n backend=dist.Backend.GLOO,\n )\n tensors = [torch.ones(10, device=self.rank) * self.rank]\n # Let all ranks call allreduce first to set up communicators etc.\n # Directly simulating error here will run into store issue described\n # in https://github.com/pytorch/pytorch/issues/54524.\n nccl_pg.allreduce(tensors).wait()\n # All ranks besides 0 call into allreduce. This is to simulate a\n # desync across the world, where some ranks call into\n # monitored_barrier() and others are stuck in collective comm. In\n # practice, we don't need NCCL_BLOCKING_WAIT, but we use it in this\n # test to ensure it exits cleanly.\n if self.rank != 0:\n # Can get different errors here depending on whether gloo-based\n # wrapper PG is enabled or not, since with wrapper pg, it will\n # fail in a collective synchronization check and not actually\n # call into the nccl pg.\n if dist._get_debug_mode() == dist._DistributedDebugLevel.DETAIL:\n err_regex = \"Timed out waiting\"\n else:\n err_regex = \"Caught collective operation timeout\"\n with self.assertRaisesRegex(RuntimeError, err_regex):\n nccl_pg.allreduce(tensors).wait(timedelta(seconds=0.1))\n else:\n # Rank 0 should report first (in order) timed out rank or all ranks\n # depending on wait_all_ranks flag passed into monitored_barrier.\n if wait_all_ranks:\n rank_str = \", \".join(\n [str(i) for i in range(1, int(self.world_size))]\n )\n err_regex = f\"Ranks {rank_str} failed to pass monitoredBarrier\"\n else:\n expected_first_fail_rank = 1\n err_regex = f\"Rank {expected_first_fail_rank} failed to pass monitoredBarrier\"\n monitored_barrier_timeout_seconds = timedelta(seconds=0.1)\n with self.assertRaisesRegex(RuntimeError, err_regex):\n gloo_pg.monitored_barrier(\n monitored_barrier_timeout_seconds, wait_all_ranks=wait_all_ranks\n )\n\n @with_nccl_blocking_wait\n @require_backend({\"gloo\", \"nccl\"})\n @require_backends_available({\"gloo\", \"nccl\"})\n @skip_if_rocm\n @skip_if_lt_x_gpu(int(os.environ[\"WORLD_SIZE\"]))\n def test_monitored_barrier_allreduce_hang(self):\n # tests expected behavior when nonzero rank hangs and we want to\n # report first timed out rank.\n self._test_monitored_barrier_allreduce_hang(wait_all_ranks=False)\n\n @with_nccl_blocking_wait\n @require_backend({\"gloo\", \"nccl\"})\n @require_backends_available({\"gloo\", \"nccl\"})\n @skip_if_rocm\n @skip_if_lt_x_gpu(int(os.environ[\"WORLD_SIZE\"]))\n def test_monitored_barrier_allreduce_hang_wait_all_ranks(self):\n # tests expected behavior when nonzero rank hangs and we want to\n # report all timed out ranks.\n self._test_monitored_barrier_allreduce_hang(wait_all_ranks=True)\n\n @require_backend({\"gloo\"})\n @require_backends_available({\"gloo\"})\n def test_monitored_barrier_gloo_rank_0_timeout(self):\n # tests error when rank 0 exhausts its given timeout.\n process_group = dist.new_group(\n ranks=list(i for i in range(int(self.world_size)))\n )\n timeout = timedelta(seconds=0)\n if self.rank == 0:\n with self.assertRaisesRegex(\n RuntimeError, f\"Rank {self.rank} timed out in monitoredBarrier\"\n ):\n process_group.monitored_barrier(timeout)\n\n @require_backend({\"gloo\"})\n @require_backends_available({\"gloo\"})\n @skip_if_small_worldsize\n @unittest.skipIf(\n IS_MACOS or IS_WINDOWS,\n \"MacOS uses uv transport which does not have as robust error handling as tcp transport\",\n )\n def test_monitored_barrier_failure_order(self):\n # Ensure that the first (in sorted order) rank is reported when\n # multiple ranks fail to pass the monitored_barrier.\n # TODO(#54879): Provide ability to wait and report all failed ranks\n expected_first_failed_rank = 2\n timeout = timedelta(seconds=2)\n src_rank = 0\n if self.rank == src_rank:\n with self.assertRaisesRegex(\n RuntimeError, f\"Rank {expected_first_failed_rank}\"\n ):\n dist.monitored_barrier(timeout=timeout)\n elif self.rank == 1:\n err_regex = (\n f\"Rank {self.rank} successfully reached monitoredBarrier,\"\n f\" but received errors while waiting to be unblocked by rank\"\n f\" {src_rank}\"\n )\n with self.assertRaisesRegex(RuntimeError, err_regex):\n dist.monitored_barrier(timeout=timeout)\n\n @require_backend({\"gloo\"})\n @require_backends_available({\"gloo\"})\n @skip_if_small_worldsize\n def test_monitored_barrier_wait_all_ranks(self):\n # Tests simple case where > 1 rank does not call into monitored\n # barrier and verifies all ranks are reported by rank 0.\n if self.rank == 0:\n timeout = timedelta(seconds=0.1)\n rank_str = \", \".join([str(i) for i in range(1, int(self.world_size))])\n err_regex = f\"Ranks {rank_str} failed to pass monitoredBarrier\"\n with self.assertRaisesRegex(RuntimeError, err_regex):\n dist.monitored_barrier(timeout=timeout, wait_all_ranks=True)\n\n @require_backend({\"gloo\", \"nccl\"})\n @require_backends_available({\"gloo\", \"nccl\"})\n @skip_if_lt_x_gpu(2)\n def test_ddp_build_param_to_name_mapping(self):\n model = TwoLinLayerNet()\n net = torch.nn.parallel.DistributedDataParallel(\n model.cuda(self.rank),\n device_ids=[self.rank],\n )\n expected_mapping = {0: \"a.weight\", 1: \"b.weight\"}\n net_params, _ = net._build_params_for_reducer()\n param_to_name_mapping = net._build_param_to_name_mapping(net_params)\n self.assertDictEqual(expected_mapping, param_to_name_mapping)\n\n # Test when DDP is used with ignored parameters.\n model = TwoLinLayerNet()\n # Parameters to ignore are in the format {module_name}.{param_name}\n params_to_ignore = [\"a.weight\"]\n torch.nn.parallel.DistributedDataParallel._set_params_and_buffers_to_ignore_for_model(\n model, params_to_ignore\n )\n net = torch.nn.parallel.DistributedDataParallel(\n model.cuda(self.rank),\n device_ids=[self.rank],\n )\n expected_mapping = {0: \"b.weight\"}\n net_params, _ = net._build_params_for_reducer()\n param_to_name_mapping = net._build_param_to_name_mapping(net_params)\n self.assertDictEqual(expected_mapping, param_to_name_mapping)\n\n # Test errors are raised when DDP and module parameters mismatch.\n # This generally indicates a bug with DDP and is not expected to\n # happen in user applications.\n model = TwoLinLayerNet()\n net = torch.nn.parallel.DistributedDataParallel(\n model.cuda(self.rank),\n device_ids=[self.rank],\n )\n net_params, _ = net._build_params_for_reducer()\n if self.rank == 0:\n print(type(net_params[0][0]))\n\n net_params[0].extend(\n [\n torch.nn.Parameter(torch.ones(1)),\n torch.nn.Parameter(torch.ones(1)),\n ]\n )\n\n with self.assertRaisesRegex(ValueError, \"Expected param to name mapping\"):\n net._build_param_to_name_mapping(net_params)\n\n net_params[0] = net_params[0][:-3]\n with self.assertRaisesRegex(ValueError, \"Param with name\"):\n net._build_param_to_name_mapping(net_params)\n\n net_params[0].extend(\n [\n torch.nn.Parameter(torch.ones(1)),\n torch.nn.Parameter(torch.ones(1)),\n ]\n )\n\n @unittest.skipIf(\n BACKEND != \"nccl\" and BACKEND != \"gloo\",\n \"Only Nccl & Gloo backend support DistributedDataParallel\",\n )\n @skip_if_lt_x_gpu(2)\n def test_ddp_build_param_to_name_mapping_requires_grad(self):\n class Net(nn.Module):\n def __init__(self):\n super().__init__()\n self.lin = nn.Linear(10, 10)\n # Is not tracked by DDP and should not show up in param to\n # name mapping.\n self.lin.bias.requires_grad_(False)\n\n def forward(self, x):\n return self.lin(x)\n\n model = Net()\n net = torch.nn.parallel.DistributedDataParallel(\n model.cuda(self.rank), device_ids=[self.rank]\n )\n expected_mapping = {\n 0: \"lin.weight\",\n }\n net_params, _ = net._build_params_for_reducer()\n param_to_name_mapping = net._build_param_to_name_mapping(net_params)\n self.assertEqual(param_to_name_mapping, expected_mapping)\n\n def _test_ddp_multiple_nested_unused_params_error(self, ignore_sparse):\n debug_mode_off = dist._get_debug_mode() == dist._DistributedDebugLevel.OFF\n\n class SubModule(nn.Module):\n def __init__(self):\n super().__init__()\n self.embedding_net = EmbeddingNet(0)\n self.lin = TwoLinLayerNet()\n self.bn = BatchNormNet()\n self.lin_layer = nn.Linear(4, 10, bias=False)\n\n def forward(self, x):\n x = self.bn(x)\n x = self.lin_layer(x)\n x = self.lin.a(x) # self.lin.b param unused\n # EmbeddingNet entirely unused: self.embedding_net.embedding and\n # self.embedding_net.lin unused.\n return x\n\n class MyModel(nn.Module):\n def __init__(self):\n super().__init__()\n self.sub_module = SubModule()\n\n def forward(self, x):\n return self.sub_module(x)\n\n model = MyModel()\n sparse_embedding_fqns = []\n if ignore_sparse:\n for module_name, module in model.named_modules():\n if module == model.sub_module.embedding_net.embedding:\n for parameter_name, param in module.named_parameters(\n recurse=False\n ):\n fqn = f\"{module_name}.{parameter_name}\"\n sparse_embedding_fqns.append(fqn)\n\n torch.nn.parallel.DistributedDataParallel._set_params_and_buffers_to_ignore_for_model(\n model, sparse_embedding_fqns\n )\n unused_modules = [\n model.sub_module.embedding_net.lin,\n model.sub_module.lin.b,\n ]\n else:\n unused_modules = list(model.sub_module.embedding_net.modules()) + [\n model.sub_module.lin.b,\n ]\n\n expected_unused_param_fqns = []\n used_param_fqns = [] # Validate that these don't mistakenly show up.\n fqn_to_param_index = {}\n index = 0\n for module_name, module in model.named_modules():\n for parameter_name, param in module.named_parameters(recurse=False):\n fqn = f\"{module_name}.{parameter_name}\"\n fqn_to_param_index[fqn] = index\n if fqn not in sparse_embedding_fqns:\n index += 1\n if module in unused_modules:\n expected_unused_param_fqns.append(fqn)\n else:\n if (\n not ignore_sparse\n or module != model.sub_module.embedding_net.embedding\n ):\n used_param_fqns.append(fqn)\n\n net = torch.nn.parallel.DistributedDataParallel(\n model.cuda(self.rank),\n device_ids=[self.rank],\n )\n batch, dim = 10, 2\n inp = torch.ones(batch, dim)\n for i in range(2):\n if i == 0:\n out = net(inp)\n loss = out.sum()\n loss.backward()\n else:\n try:\n out = net(inp)\n loss = out.sum()\n loss.backward()\n except RuntimeError as e:\n e = str(e)\n\n unused_param_substr = e[e.find(\"did not receive grad\") :]\n # Validate that each unused param fully qualified name\n # shows up in error logs. We do this instead of\n # constructing a joined string since order of parameters\n # can be different in Reducer. In addition, validate\n # param indices show up as well.\n for unused_param_fqn in expected_unused_param_fqns:\n self.assertTrue(\n unused_param_fqn in unused_param_substr\n or debug_mode_off\n )\n self.assertTrue(\n str(fqn_to_param_index[unused_param_fqn])\n in unused_param_substr,\n f\"Did not find index {fqn_to_param_index[unused_param_fqn]} for {unused_param_fqn}\",\n )\n\n # Validate that used param fqns don't show up in error\n # logs.\n for used_param_fqn in used_param_fqns:\n self.assertFalse(used_param_fqn in unused_param_substr)\n # Validate that ignored param fqns don't show up as unused\n # (since DDP does not track them)\n for sparse_param_fqn in sparse_embedding_fqns:\n self.assertFalse(sparse_param_fqn in unused_param_substr)\n else:\n self.assertTrue(False, \"Expected error was not raised!\")\n\n @with_dist_debug_levels(levels=[\"OFF\", \"INFO\", \"DETAIL\"])\n @require_backend({\"gloo\", \"nccl\"})\n @require_backends_available({\"gloo\", \"nccl\"})\n @skip_if_lt_x_gpu(2)\n def test_ddp_multiple_nested_unused_params_error(self):\n self._test_ddp_multiple_nested_unused_params_error(ignore_sparse=False)\n\n @with_dist_debug_levels(levels=[\"OFF\", \"INFO\", \"DETAIL\"])\n @require_backend({\"gloo\", \"nccl\"})\n @require_backends_available({\"gloo\", \"nccl\"})\n @skip_if_lt_x_gpu(2)\n def test_ddp_multiple_nested_unused_params_err_ignore_params(self):\n # Tests unused parameter reporting when DDP is configured to ignore\n # certain parameters.\n self._test_ddp_multiple_nested_unused_params_error(ignore_sparse=True)\n\n @unittest.skipIf(BACKEND != 'nccl' and BACKEND != 'gloo',\n \"Only Nccl & Gloo backend support DistributedDataParallel\")\n @skip_if_lt_x_gpu(2)\n def test_ddp_inference(self):\n # tests that DDP module can be run on a single node with no_grad\n # or eval setting and there is no hang.\n rank = self.rank\n torch.cuda.set_device(rank)\n model = Net().cuda()\n local_model = copy.deepcopy(model)\n model = torch.nn.parallel.DistributedDataParallel(\n model,\n device_ids=[rank],\n )\n syncbn_model = nn.SyncBatchNorm(\n 2, momentum=0.99, track_running_stats=False\n ).cuda()\n local_syncbn_model = copy.deepcopy(syncbn_model)\n syncbn_model = torch.nn.parallel.DistributedDataParallel(\n syncbn_model,\n device_ids=[rank]\n )\n inp = torch.randn(10, 2, device=rank)\n inp_syncbn = torch.randn(10, 2, 4, 4, device=rank)\n tests = [\n (model, local_model, inp),\n (syncbn_model, local_syncbn_model, inp_syncbn),\n ]\n for test in tests:\n test_model, test_local_model, test_inp = test\n if self.rank == 0:\n test_model.eval()\n test_local_model.eval()\n for _ in range(6):\n self.assertEqual(\n test_model(test_inp),\n test_local_model(test_inp)\n )\n\n # Barrier since only rank 0 runs inference. Test should be\n # much faster than 30s, but this is to avoid flakiness.\n self._barrier(timeout=30)\n\n\n @unittest.skipIf(BACKEND != 'nccl' and BACKEND != 'gloo',\n \"Only Nccl & Gloo backend support DistributedDataParallel\")\n @skip_if_lt_x_gpu(2)\n def test_ddp_sync_bn_training_vs_eval(self):\n rank = self.rank\n torch.cuda.set_device(rank)\n # Need to set track_running_stats=False, when track_running_stats=True,\n # bn_training is False and sync could not occur in eval model.\n model = nn.SyncBatchNorm(2, momentum=0.99, track_running_stats=False).cuda(\n rank\n )\n model = torch.nn.parallel.DistributedDataParallel(model, device_ids=[rank])\n # Test sync occurs in training mode.\n with torch.autograd.profiler.profile() as prof:\n for i in range(6):\n inp = torch.randn(10, 2, 4, 4).cuda(rank)\n out = model(inp)\n loss = out.sum()\n loss.backward()\n\n # SyncBN allgathers stats across all ranks, so verify call to\n # all_gather in profiler.\n if BACKEND == \"nccl\":\n all_gather_calls = get_profiling_event(\"_all_gather_base\", prof)\n else:\n all_gather_calls = get_profiling_event(\"all_gather\", prof)\n self.assertNotEqual([], all_gather_calls)\n\n # Only do inference on one rank. If SyncBN did collective stats sync,\n # this would hang/error.\n model_inference = model.module\n if self.rank == 0:\n model_inference.eval()\n with torch.autograd.profiler.profile() as prof:\n for i in range(6):\n inp = torch.randn(10, 2, 4, 4).cuda(rank)\n out = model_inference(inp)\n loss = out.sum()\n loss.backward()\n\n # Ensure sync does not occur in eval() mode.\n if BACKEND == \"nccl\":\n all_gather_calls = get_profiling_event(\"_all_gather_base\", prof)\n else:\n all_gather_calls = get_profiling_event(\"all_gather\", prof)\n self.assertEqual([], all_gather_calls)\n\n @skip_if_lt_x_gpu(2)\n @unittest.skipIf(\n BACKEND != \"nccl\" and BACKEND != \"gloo\",\n \"Only Nccl & Gloo backend support DistributedDataParallel\",\n )\n def test_ddp_python_error_logged(self):\n # Most python exceptions in DDP are raised during init before\n # reducer is constructed, so we don't have a logger in those cases.\n # However, the below is one example where a python error is thrown\n # after reducer is constructed.\n model = TwoLinLayerNet().cuda(self.rank)\n model = torch.nn.parallel.DistributedDataParallel(\n model,\n device_ids=[self.rank],\n )\n expected_err = \"must be callable\"\n with self.assertRaisesRegex(TypeError, expected_err):\n model.register_comm_hook({}, {})\n\n verify_ddp_error_logged(model, expected_err)\n\n @skip_if_lt_x_gpu(2)\n @unittest.skipIf(\n BACKEND != \"nccl\" and BACKEND != \"gloo\",\n \"Only Nccl & Gloo backend support DistributedDataParallel\",\n )\n def test_ddp_static_graph_nested_types(self):\n # Tests for static graph training when outputs are not just tensors\n # but can be (nested) tuple, list, dict, etc.\n rank = self.rank\n torch.cuda.set_device(rank)\n\n class NestedOutputModule(torch.nn.Module):\n def __init__(self):\n super().__init__()\n self.lin = nn.Linear(100, 1, bias=False)\n\n def forward(self, inp, output_type):\n if output_type == \"tuple\":\n return (\n self.lin(inp),\n (\n self.lin(inp),\n self.lin(inp),\n ),\n )\n elif output_type == \"list\":\n return [\n self.lin(inp),\n [\n self.lin(inp),\n self.lin(inp),\n ],\n ]\n elif output_type == \"dict\":\n return {\n \"a\": self.lin(inp),\n \"b\": {\n \"c\": self.lin(inp),\n },\n }\n\n def get_loss(model_output):\n loss = 0.0\n if isinstance(model_output, torch.Tensor):\n return model_output.sum()\n elif isinstance(model_output, dict):\n for value in model_output.values():\n loss += get_loss(value)\n elif isinstance(model_output, tuple) or isinstance(model_output, list):\n for x in model_output:\n loss += get_loss(x)\n else:\n raise ValueError(f\"Unknown model output type {type(model_output)}\")\n return loss\n\n model = NestedOutputModule().cuda(rank)\n model_static_graph = copy.deepcopy(model)\n model = torch.nn.parallel.DistributedDataParallel(\n model,\n device_ids=[rank],\n )\n model_static_graph = torch.nn.parallel.DistributedDataParallel(\n model,\n device_ids=[rank],\n )\n model_static_graph._set_static_graph()\n inp = torch.randn(10, 100)\n type_mapping = {\n \"list\": list,\n \"tuple\": tuple,\n \"dict\": dict,\n }\n for output_type in type_mapping.keys():\n for i in range(6):\n out = model(inp, output_type=output_type)\n loss = get_loss(out)\n loss.backward()\n self._model_step(model)\n out_static = model_static_graph(inp, output_type=output_type)\n self.assertTrue(isinstance(out_static, type_mapping[output_type]))\n loss_static = get_loss(out_static)\n loss_static.backward()\n self._model_step(model_static_graph)\n for (p, p_static) in zip(\n model.parameters(), model_static_graph.parameters()\n ):\n self.assertEqual(p, p_static)\n\n @skip_if_lt_x_gpu(2)\n @unittest.skipIf(\n BACKEND != \"nccl\" and BACKEND != \"gloo\",\n \"Only Nccl & Gloo backend support DistributedDataParallel\",\n )\n def test_ddp_new_tensor_in_fwd(self):\n # Test from https://github.com/pytorch/pytorch/issues/60733\n class MyModel(nn.Module):\n def __init__(self):\n super().__init__()\n self.fc1 = nn.Linear(10, 10, bias=False)\n self.fc2 = nn.Linear(10, 10, bias=False)\n\n def __init_opt(self):\n param = next(self.parameters())\n opt = torch.randn(1, 10, device=param.device)\n return opt\n\n def forward(self, x, opt_1, opt_2, opt_nested):\n x = F.relu(self.fc1(x))\n x = self.fc2(x)\n if opt_1 is None:\n opt_1 = self.__init_opt()\n if opt_2 is None:\n opt_2 = self.__init_opt()\n if opt_nested is None or not torch.is_tensor(opt_nested):\n opt_nested = self.__init_opt()\n # Test multiple tensors as well as newly created tensors\n # within a struct.\n return x, opt_1, opt_2, {\"tensor\": opt_nested}\n\n model = MyModel().to(self.rank)\n for find_unused in [True, False]:\n ddp = DistributedDataParallel(\n model,\n device_ids=[self.rank],\n output_device=self.rank,\n broadcast_buffers=False,\n find_unused_parameters=find_unused,\n )\n\n opt = [None for _ in range(3)]\n for i in range(2):\n ddp.zero_grad()\n x = torch.randn(1, 10, device=self.rank)\n out, opt[0], opt[1], opt[2] = ddp(\n x,\n opt_1=opt[0],\n opt_2=opt[1],\n opt_nested=opt[2]\n )\n for i in range(len(opt)):\n if torch.is_tensor(opt[i]):\n self.assertEqual(opt[i].grad_fn, None)\n else:\n self.assertEqual(opt[i][\"tensor\"].grad_fn, None)\n out.mean().backward()\n" ]
[ [ "torch.distributed.scatter", "torch.zeros", "torch.testing._internal.common_distributed.nccl_skip_if_lt_x_gpu", "torch.distributed.algorithms.ddp_comm_hooks.post_localSGD_hook.PostLocalSGDState", "torch.cuda.amp.autocast", "torch.distributed.distributed_c10d._get_default_group", "torch.nn.SyncBatchNorm", "torch.cuda.stream", "torch.device", "torch.distributed.is_gloo_available", "torch.randn", "torch.equal", "torch.ones_like", "torch.distributed.distributed_c10d.AllreduceOptions", "torch.empty_like", "torch.cuda.current_device", "torch.nn.Conv2d", "torch.profiler.profile", "torch.nn.Module", "torch.nn.Linear", "torch.distributed.destroy_process_group", "torch.distributed.broadcast_object_list", "torch.distributed.all_to_all_single", "torch.distributed.all_gather", "torch.distributed.is_nccl_available", "torch.distributed.broadcast", "torch.testing._internal.common_distributed.verify_ddp_error_logged", "torch.randperm", "torch.distributed.algorithms.ddp_comm_hooks.default_hooks.hook_then_optimizer", "torch.distributed.gather_object", "torch.cuda.is_available", "torch.distributed.is_mpi_available", "torch.nn.parallel.DistributedDataParallel._set_params_and_buffers_to_ignore_for_model", "torch.distributed._get_debug_mode", "torch.utils.data.distributed.DistributedSampler", "torch.reshape", "torch.tensor", "torch.mul", "torch.rand", "torch.distributed.all_to_all", "torch.distributed.algorithms.ddp_comm_hooks.powerSGD_hook.PowerSGDState", "torch.LongTensor", "torch.distributed.send", "torch.distributed.scatter_object_list", "torch.zeros_like", "torch.is_tensor", "torch.cuda.device_count", "torch.cuda.manual_seed", "torch.manual_seed", "torch.distributed.recv", "torch.distributed.reduce", "torch.distributed.gather", "torch.nn.ReLU", "torch.load", "torch.cat", "torch.nn.Embedding", "torch.testing._internal.common_distributed.initialize_temp_directories", "torch.distributed.irecv", "torch.save", "torch.cuda.synchronize", "torch.ones", "torch.distributed.barrier", "torch.distributed.BroadcastOptions", "torch.nn.parallel.distributed._dump_DDP_relevant_env_vars", "torch.testing._internal.common_distributed.skip_if_lt_x_gpu", "torch.DoubleTensor", "torch.cuda.amp.GradScaler", "torch.distributed.get_world_size", "torch.distributed.new_subgroups_by_enumeration", "torch.distributed.get_backend", "torch.distributed.batch_isend_irecv", "torch.cuda.set_device", "torch.distributed.ProcessGroupNCCL.Options", "torch.distributed.broadcast_multigpu", "torch.distributed._rank_not_in_group", "torch.backends.cudnn.flags", "torch.distributed.ProcessGroupNCCL", "torch.cuda.Stream", "torch.testing._internal.common_distributed.cleanup_temp_dir", "torch.nn.functional.softmax", "torch.testing._internal.common_distributed.with_dist_debug_levels", "torch.distributed.PrefixStore", "torch.distributed.algorithms.ddp_comm_hooks.default_hooks.OptimizerHookState", "torch.no_grad", "torch.distributed.monitored_barrier", "torch.distributed.get_rank", "torch.distributed.P2POp", "torch.nn.BatchNorm1d", "torch.testing._internal.common_distributed.requires_nccl_version", "torch.empty", "torch.distributed.distributed_c10d.get_world_size", "torch.distributed.new_subgroups", "torch.autograd.profiler.profile", "torch.distributed.Backend", "torch.nn.parallel.DistributedDataParallel", "torch.view_as_real", "torch.testing._internal.common_distributed.captured_output", "torch.distributed.new_group", "torch.distributed.all_reduce", "torch.nn.MSELoss" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
k4rma99/Year_End_Project
[ "a21541afb01c0701e59c315946278988fd8e76aa" ]
[ "src/train_abstractive.py" ]
[ "#!/usr/bin/env python\n\"\"\"\n Main training workflow\n\"\"\"\nfrom __future__ import division\n\nimport argparse\nimport glob\nimport os\nimport random\nimport signal\nimport time\n\nimport torch\nfrom pytorch_transformers import BertTokenizer\n\nimport distributed\nfrom models import data_loader, model_builder\nfrom models.data_loader import load_dataset\nfrom models.loss import abs_loss\nfrom models.model_builder import AbsSummarizer\nfrom models.predictor import build_predictor\nfrom models.trainer import build_trainer\nfrom others.logging import logger, init_logger\n\nmodel_flags = ['hidden_size', 'ff_size', 'heads', 'emb_size', 'enc_layers', 'enc_hidden_size', 'enc_ff_size',\n 'dec_layers', 'dec_hidden_size', 'dec_ff_size', 'encoder', 'ff_actv', 'use_interval']\n\n\ndef str2bool(v):\n if v.lower() in ('yes', 'true', 't', 'y', '1'):\n return True\n elif v.lower() in ('no', 'false', 'f', 'n', '0'):\n return False\n else:\n raise argparse.ArgumentTypeError('Boolean value expected.')\n\n\ndef train_abs_multi(args):\n \"\"\" Spawns 1 process per GPU \"\"\"\n init_logger()\n\n nb_gpu = args.world_size\n mp = torch.multiprocessing.get_context('spawn')\n\n # Create a thread to listen for errors in the child processes.\n error_queue = mp.SimpleQueue()\n error_handler = ErrorHandler(error_queue)\n\n # Train with multiprocessing.\n procs = []\n for i in range(nb_gpu):\n device_id = i\n procs.append(mp.Process(target=run, args=(args,\n device_id, error_queue,), daemon=True))\n procs[i].start()\n logger.info(\" Starting process pid: %d \" % procs[i].pid)\n error_handler.add_child(procs[i].pid)\n for p in procs:\n p.join()\n\n\ndef run(args, device_id, error_queue):\n \"\"\" run process \"\"\"\n\n setattr(args, 'gpu_ranks', [int(i) for i in args.gpu_ranks])\n\n try:\n gpu_rank = distributed.multi_init(device_id, args.world_size, args.gpu_ranks)\n print('gpu_rank %d' % gpu_rank)\n if gpu_rank != args.gpu_ranks[device_id]:\n raise AssertionError(\"An error occurred in \\\n Distributed initialization\")\n\n train_abs_single(args, device_id)\n except KeyboardInterrupt:\n pass # killed by parent, do nothing\n except Exception:\n # propagate exception to parent process, keeping original traceback\n import traceback\n error_queue.put((args.gpu_ranks[device_id], traceback.format_exc()))\n\n\nclass ErrorHandler(object):\n \"\"\"A class that listens for exceptions in children processes and propagates\n the tracebacks to the parent process.\"\"\"\n\n def __init__(self, error_queue):\n \"\"\" init error handler \"\"\"\n import signal\n import threading\n self.error_queue = error_queue\n self.children_pids = []\n self.error_thread = threading.Thread(\n target=self.error_listener, daemon=True)\n self.error_thread.start()\n signal.signal(signal.SIGUSR1, self.signal_handler)\n\n def add_child(self, pid):\n \"\"\" error handler \"\"\"\n self.children_pids.append(pid)\n\n def error_listener(self):\n \"\"\" error listener \"\"\"\n (rank, original_trace) = self.error_queue.get()\n self.error_queue.put((rank, original_trace))\n os.kill(os.getpid(), signal.SIGUSR1)\n\n def signal_handler(self, signalnum, stackframe):\n \"\"\" signal handler \"\"\"\n for pid in self.children_pids:\n os.kill(pid, signal.SIGINT) # kill children processes\n (rank, original_trace) = self.error_queue.get()\n msg = \"\"\"\\n\\n-- Tracebacks above this line can probably\n be ignored --\\n\\n\"\"\"\n msg += original_trace\n raise Exception(msg)\n\n\ndef validate_abs(args, device_id):\n timestep = 0\n if (args.test_all):\n cp_files = sorted(glob.glob(os.path.join(args.model_path, 'model_step_*.pt')))\n cp_files.sort(key=os.path.getmtime)\n xent_lst = []\n for i, cp in enumerate(cp_files):\n step = int(cp.split('.')[-2].split('_')[-1])\n if (args.test_start_from != -1 and step < args.test_start_from):\n xent_lst.append((1e6, cp))\n continue\n xent = validate(args, device_id, cp, step)\n xent_lst.append((xent, cp))\n max_step = xent_lst.index(min(xent_lst))\n if (i - max_step > 10):\n break\n xent_lst = sorted(xent_lst, key=lambda x: x[0])[:5]\n logger.info('PPL %s' % str(xent_lst))\n for xent, cp in xent_lst:\n step = int(cp.split('.')[-2].split('_')[-1])\n test_abs(args, device_id, cp, step)\n else:\n while (True):\n cp_files = sorted(glob.glob(os.path.join(args.model_path, 'model_step_*.pt')))\n cp_files.sort(key=os.path.getmtime)\n if (cp_files):\n cp = cp_files[-1]\n time_of_cp = os.path.getmtime(cp)\n if (not os.path.getsize(cp) > 0):\n time.sleep(60)\n continue\n if (time_of_cp > timestep):\n timestep = time_of_cp\n step = int(cp.split('.')[-2].split('_')[-1])\n validate(args, device_id, cp, step)\n test_abs(args, device_id, cp, step)\n if (time_of_cp == timestep):\n break\n\n cp_files = sorted(glob.glob(os.path.join(args.model_path, 'model_step_*.pt')))\n cp_files.sort(key=os.path.getmtime)\n if (cp_files):\n cp = cp_files[-1]\n time_of_cp = os.path.getmtime(cp)\n if (time_of_cp > timestep):\n continue\n else:\n time.sleep(300)\n\n\ndef validate(args, device_id, pt, step):\n device = \"cpu\" if args.visible_gpus == '-1' else \"cuda\"\n if (pt != ''):\n test_from = pt\n else:\n test_from = args.test_from\n logger.info('Loading checkpoint from %s' % test_from)\n checkpoint = torch.load(test_from, map_location=lambda storage, loc: storage)\n opt = vars(checkpoint['opt'])\n for k in opt.keys():\n if (k in model_flags):\n setattr(args, k, opt[k])\n print(args)\n\n model = AbsSummarizer(args, device, checkpoint)\n model.eval()\n\n valid_iter = data_loader.Dataloader(args, load_dataset(args, 'valid', shuffle=False),\n args.batch_size, device,\n shuffle=False, is_test=False)\n\n tokenizer = BertTokenizer.from_pretrained('bert-base-uncased', do_lower_case=True, cache_dir=args.temp_dir)\n symbols = {'BOS': tokenizer.vocab['[unused0]'], 'EOS': tokenizer.vocab['[unused1]'],\n 'PAD': tokenizer.vocab['[PAD]'], 'EOQ': tokenizer.vocab['[unused2]']}\n\n valid_loss = abs_loss(model.generator, symbols, model.vocab_size, train=False, device=device)\n\n trainer = build_trainer(args, device_id, model, None, valid_loss)\n stats = trainer.validate(valid_iter, step)\n return stats.xent()\n\n\ndef test_abs(args, device_id, pt, step):\n device = \"cpu\" if args.visible_gpus == '-1' else \"cuda\"\n if (pt != ''):\n test_from = pt\n else:\n test_from = args.test_from\n logger.info('Loading checkpoint from %s' % test_from)\n\n checkpoint = torch.load(test_from, map_location=lambda storage, loc: storage)\n opt = vars(checkpoint['opt'])\n for k in opt.keys():\n if (k in model_flags):\n setattr(args, k, opt[k])\n print(args)\n\n model = AbsSummarizer(args, device, checkpoint)\n model.eval()\n\n test_iter = data_loader.Dataloader(args, load_dataset(args, 'test', shuffle=False),\n args.test_batch_size, device,\n shuffle=False, is_test=True)\n tokenizer = BertTokenizer.from_pretrained('bert-base-uncased', do_lower_case=True, cache_dir=args.temp_dir)\n symbols = {'BOS': tokenizer.vocab['[unused0]'], 'EOS': tokenizer.vocab['[unused1]'],\n 'PAD': tokenizer.vocab['[PAD]'], 'EOQ': tokenizer.vocab['[unused2]']}\n predictor = build_predictor(args, tokenizer, symbols, model, logger)\n predictor.translate(test_iter, step)\n\n\ndef test_text_abs(args, device_id, pt, step):\n device = \"cpu\" if args.visible_gpus == '-1' else \"cuda\"\n if (pt != ''):\n test_from = pt\n else:\n test_from = args.test_from\n logger.info('Loading checkpoint from %s' % test_from)\n\n checkpoint = torch.load(test_from, map_location=lambda storage, loc: storage)\n opt = vars(checkpoint['opt'])\n for k in opt.keys():\n if (k in model_flags):\n setattr(args, k, opt[k])\n print(args)\n\n model = AbsSummarizer(args, device, checkpoint)\n model.eval()\n\n test_iter = data_loader.Dataloader(args, load_dataset(args, 'test', shuffle=False),\n args.test_batch_size, device,\n shuffle=False, is_test=True)\n tokenizer = BertTokenizer.from_pretrained('bert-base-uncased', do_lower_case=True, cache_dir=args.temp_dir)\n symbols = {'BOS': tokenizer.vocab['[unused0]'], 'EOS': tokenizer.vocab['[unused1]'],\n 'PAD': tokenizer.vocab['[PAD]'], 'EOQ': tokenizer.vocab['[unused2]']}\n predictor = build_predictor(args, tokenizer, symbols, model, logger)\n predictor.translate(test_iter, step)\n\n\ndef baseline(args, cal_lead=False, cal_oracle=False):\n test_iter = data_loader.Dataloader(args, load_dataset(args, 'test', shuffle=False),\n args.batch_size, 'cpu',\n shuffle=False, is_test=True)\n\n trainer = build_trainer(args, '-1', None, None, None)\n #\n if (cal_lead):\n trainer.test(test_iter, 0, cal_lead=True)\n elif (cal_oracle):\n trainer.test(test_iter, 0, cal_oracle=True)\n\n\ndef train_abs(args, device_id):\n if (args.world_size > 1):\n train_abs_multi(args)\n else:\n train_abs_single(args, device_id)\n\n\ndef train_abs_single(args, device_id):\n init_logger(args.log_file)\n logger.info(str(args))\n device = \"cpu\" if args.visible_gpus == '-1' else \"cuda\"\n logger.info('Device ID %d' % device_id)\n logger.info('Device %s' % device)\n torch.manual_seed(args.seed)\n random.seed(args.seed)\n torch.backends.cudnn.deterministic = True\n\n if device_id >= 0:\n torch.cuda.set_device(device_id)\n torch.cuda.manual_seed(args.seed)\n\n if args.train_from != '':\n logger.info('Loading checkpoint from %s' % args.train_from)\n checkpoint = torch.load(args.train_from,\n map_location=lambda storage, loc: storage)\n opt = vars(checkpoint['opt'])\n for k in opt.keys():\n if (k in model_flags):\n setattr(args, k, opt[k])\n else:\n checkpoint = None\n\n if (args.load_from_extractive != ''):\n logger.info('Loading bert from extractive model %s' % args.load_from_extractive)\n bert_from_extractive = torch.load(args.load_from_extractive, map_location=lambda storage, loc: storage)\n bert_from_extractive = bert_from_extractive['model']\n else:\n bert_from_extractive = None\n torch.manual_seed(args.seed)\n random.seed(args.seed)\n torch.backends.cudnn.deterministic = True\n\n def train_iter_fct():\n return data_loader.Dataloader(args, load_dataset(args, 'train', shuffle=True), args.batch_size, device,\n shuffle=True, is_test=False)\n\n model = AbsSummarizer(args, device, checkpoint, bert_from_extractive)\n if (args.sep_optim):\n optim_bert = model_builder.build_optim_bert(args, model, checkpoint)\n optim_dec = model_builder.build_optim_dec(args, model, checkpoint)\n optim = [optim_bert, optim_dec]\n else:\n optim = [model_builder.build_optim(args, model, checkpoint)]\n\n for param in model.parameters():\n param.requires_grad = False\n\n for param in model.decoder.parameters():\n param.requires_grad = True\n\n for param in model.generator.parameters():\n param.requires_grad = True\n\n # for name,param in model.named_parameters():\n # if param.requires_grad == True:\n # logger.info(name)\n logger.info(model)\n \n \n\n tokenizer = BertTokenizer.from_pretrained('bert-base-uncased', do_lower_case=True, cache_dir=args.temp_dir)\n symbols = {'BOS': tokenizer.vocab['[unused0]'], 'EOS': tokenizer.vocab['[unused1]'],\n 'PAD': tokenizer.vocab['[PAD]'], 'EOQ': tokenizer.vocab['[unused2]']}\n\n train_loss = abs_loss(model.generator, symbols, model.vocab_size, device, train=True,\n label_smoothing=args.label_smoothing)\n\n trainer = build_trainer(args, device_id, model, optim, train_loss)\n\n trainer.train(train_iter_fct, args.train_steps)\n" ]
[ [ "torch.cuda.manual_seed", "torch.cuda.set_device", "torch.load", "torch.manual_seed", "torch.multiprocessing.get_context" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
mas-veritas2/veritastool
[ "37f36b620c3637e230efd8ed69cbb5e4ef87fe2b" ]
[ "veritastool/examples/customer_marketing_example/measures.py" ]
[ "\"\"\"\nBasic fairness measures specific to uplift models.\n\nWritten by Daniel Steinberg and Lachlan McCalman,\nGradient Institute Ltd. ([email protected]).\n\nCopyright © 2020 Monetary Authority of Singapore\n\nLicensed under the Apache License, Version 2.0 (the \"License\"); you may not use\nthis file except in compliance with the License. You may obtain a copy of the\nLicense at\n\nhttp://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software distributed\nunder the License is distributed on an \"AS IS\" BASIS, WITHOUT WARRANTIES OR\nCONDITIONS OF ANY KIND, either express or implied. See the License for the\nspecific language governing permissions and limitations under the License.\n\"\"\"\nimport numpy as np\nimport pandas as pd\nfrom functools import partial\nfrom typing import Union, Tuple, Callable, Dict\nfrom scipy.stats import multinomial\nfrom scipy.stats.mstats import mquantiles\nfrom scipy.integrate import simps\nfrom sklearn.metrics import log_loss, r2_score\nfrom sklearn.preprocessing import OneHotEncoder\n\nfrom uplift import Uplifter\nfrom response import Responder\n\n\n#\n# Generic Uplift Scorers\n#\n\ndef make_model_scorer(\n scorefn: Callable,\n *args,\n pred_method: str=\"select\",\n **kwargs\n) -> float:\n \"\"\"Make a scorer for an uplift model (Uplifter or Responder class).\"\"\"\n # Make the scorer function\n def scorer(estimator, X, y):\n predict = getattr(estimator, pred_method)(X)\n score = scorefn(y, predict, *args, **kwargs)\n return score\n\n return scorer\n\n\ndef make_fair_scorer(\n func: Callable,\n prot_attr: str,\n priv_group: int,\n diff: bool,\n *args,\n pred_method: str=\"select\",\n **kwargs\n) -> float:\n \"\"\"Make a scorer that is the disparity between protected group scores.\"\"\"\n # make the scorer function\n def scorer(estimator, X, y):\n # get selection and protected attribute\n predict = getattr(estimator, pred_method)(X)\n attributes = _get_attributes(y)\n protected = np.array(attributes[prot_attr] != priv_group)\n\n # call the input scoring function\n s_r = func(y[protected], predict[protected], *args, **kwargs)\n s_i = func(y[~protected], predict[~protected], *args, **kwargs)\n\n measure = s_r - s_i if diff else s_r / s_i\n return measure\n\n return scorer\n\n\ndef gini_fairness(\n estimator: Union[Responder, Uplifter],\n X: Union[np.array, pd.DataFrame],\n y: pd.Series,\n prot_attr: str,\n n_percentiles: int=20,\n) -> float:\n \"\"\"Calculate the Gini coefficient for continuous protected attributes.\n\n This calculation is based on trapezoidal integration of the Lorenz curve.\n \"\"\"\n selection = estimator.select(X)\n attributes = _get_attributes(y)\n\n # Get the continuous protected attribute\n if prot_attr in attributes.columns:\n A = np.array(attributes[prot_attr])\n elif prot_attr in X.columns:\n A = np.array(X[prot_attr])\n else:\n raise ValueError(\"`prot_attr` is not in y or X!\")\n\n G = _gini_coefficient(selection, A, n_percentiles)\n return G\n\n\n#\n# Test data evaluator\n#\n\ndef test_model(\n estimator: Uplifter,\n X_test: Union[pd.DataFrame, np.array],\n y_test: pd.Series,\n scorers: Dict[str, Callable],\n lower_quantile: float=0.05,\n upper_quantile: float=0.95,\n replications: int=50\n) -> Dict[str, Tuple[float, int]]:\n \"\"\"Evaluate the uplift model scores on a test dataset, with uncertainty.\"\"\"\n scores = {}\n for k, fun in scorers.items():\n pfun = partial(fun, estimator)\n scores[k] = _empirical_bootstrap(\n pfun, X_test, y_test,\n q_lower=lower_quantile,\n q_upper=upper_quantile,\n replications=replications\n )\n\n return scores\n\n\n#\n# Additional model measures\n#\n\ndef std_nlog_loss(\n y_true: Union[pd.Series, np.ndarray],\n p_pred: np.array\n) -> float:\n \"\"\"Standardised negative log-loss.\n\n Standardised against a naive predictor trained on the test set.\n \"\"\"\n nll = - log_loss(y_true, p_pred)\n # This assumes the labels were sorted using python's sort function,\n # which is true for scikit learn classes.\n y_enc = OneHotEncoder(sparse=False).fit_transform(\n y_true.to_numpy()[:, np.newaxis])\n\n p_rate = y_enc.mean(axis=0)\n naive = multinomial(n=1, p=p_rate)\n naivell = naive.logpmf(y_enc).mean()\n\n std_nll = nll - naivell\n return std_nll\n\n\ndef empirical_lift(\n y: pd.Series,\n selected: np.array\n) -> float:\n \"\"\"Estimate the empirical lift from a selection.\"\"\"\n Ntr = sum(y[selected] == \"TR\")\n Ntn = sum(y[selected] == \"TN\")\n pRcT = Ntr / (Ntr + Ntn)\n Ncr = sum(y[selected] == \"CR\")\n Ncn = sum(y[selected] == \"CN\")\n pRcC = Ncr / (Ncr + Ncn)\n emp_lift = pRcT - pRcC\n return emp_lift\n\n\ndef lift_r2(\n y: pd.Series,\n lift: np.array\n) -> float:\n \"\"\"Calculate R2 score between predicted lift and empirical lift deciles.\"\"\"\n deciles = np.arange(10)\n dec_idx = pd.qcut(lift, 10, labels=deciles)\n\n # Compute the empirical lift per deciles\n emp_lift = np.array([empirical_lift(y, dec_idx == d) for d in deciles])\n\n # Compute the average predicted lift per decile\n med_lift = np.array([np.median(lift[dec_idx == d]) for d in deciles])\n\n # R2 between lifts\n r2 = r2_score(emp_lift, med_lift)\n return r2\n\n\ndef proportion_selected(\n y: pd.Series,\n selected: np.array\n) -> float:\n \"\"\"Calculate the proportion of the cohort selected.\"\"\"\n # Impact rate\n p_sel = sum(selected) / len(selected)\n return p_sel\n\n\n#\n# Mock deployment impact scoring\n#\n\n\ndef deployment_outcomes(y: pd.Series, selected: np.array) -> pd.DataFrame:\n \"\"\"Get the 'real world' outcomes from a deployment.\"\"\"\n attributes = _get_attributes(y)\n\n # Copy not selected outcomes to outcomes\n applied = np.array(attributes.ns_applied)\n acquired = np.array(attributes.ns_acquired)\n success = np.array(attributes.ns_success)\n\n # Change selected to selected outcomes\n applied[selected] = attributes.loc[selected, \"s_applied\"]\n acquired[selected] = attributes.loc[selected, \"s_acquired\"]\n success[selected] = attributes.loc[selected, \"s_success\"]\n\n outcomes = pd.DataFrame({\n \"applied\": applied,\n \"acquired\": acquired,\n \"success\": success,\n }, index=y.index)\n\n return outcomes\n\n\ndef mock_deploy(\n estimator: Uplifter,\n X_deploy: Union[pd.DataFrame, np.array],\n y_deploy: pd.Series,\n y_train: pd.Series,\n scorers: Dict[str, Callable],\n lower_quantile: float=0.05,\n upper_quantile: float=0.95,\n replications: int=50\n) -> Dict[str, Tuple[float, int]]:\n \"\"\"Evaluate the uplift model selection harms and benefits.\"\"\"\n scores = {}\n for k, fun in scorers.items():\n pfun = partial(fun, estimator)\n scores[k] = _empirical_bootstrap(\n pfun, X_deploy, y_deploy, y_train,\n q_lower=lower_quantile,\n q_upper=upper_quantile,\n replications=replications\n )\n\n return scores\n\n\ndef make_impacts(\n scorefn: Callable,\n *args,\n **kwargs\n) -> Callable:\n \"\"\"Make a deployment impact scorer.\"\"\"\n # make the scorer function\n def scorer(estimator, X_deploy, y_deploy, y_train):\n selected = estimator.select(X_deploy)\n outcomes = estimator.predict_outcomes(X_deploy)\n out_dep = deployment_outcomes(y_deploy, selected)\n out_ctl = _control_outcomes(y_train)\n Ir = scorefn(out_dep, out_ctl, selected, outcomes, *args, **kwargs)\n return Ir\n\n return scorer\n\n\ndef make_fair_impacts(\n scorefn: Callable,\n prot_attr: str,\n reported_group: int,\n *args,\n **kwargs\n) -> Callable:\n \"\"\"Make an impact scorer that is the disparity between groups.\"\"\"\n # make the scorer function\n def scorer(estimator, X_deploy, y_deploy, y_train):\n # get selection and control\n selected = estimator.select(X_deploy)\n outcomes = estimator.predict_outcomes(X_deploy)\n out_dep = deployment_outcomes(y_deploy, selected)\n out_ctl = _control_outcomes(y_train)\n\n # selection reported mask\n att_dep = _get_attributes(y_deploy)\n rpt_dep = np.array(att_dep[prot_attr] == reported_group)\n\n # control reported mask\n att_ctl = _get_attributes(y_train)[_get_control(y_train)]\n rpt_ctl = np.array(att_ctl[prot_attr] == reported_group)\n\n pred_out_dep = {k: v[rpt_dep] for k, v in outcomes.items()}\n\n # call the input scoring function\n Ir = scorefn(out_dep[rpt_dep], out_ctl[rpt_ctl], selected[rpt_dep],\n pred_out_dep, *args, **kwargs)\n\n return Ir\n\n return scorer\n\n\n#\n# Direct harm and Benefit scoring functions\n#\n\ndef benefit_from_receive(\n out_dep: pd.DataFrame,\n out_ctl: pd.DataFrame,\n selected: np.array,\n outcomes: Dict,\n) -> float:\n \"\"\"Calculate benefit from receiving an intervention.\"\"\"\n return proportion_selected(out_dep, selected)\n\n\ndef benefit_from_receive_gini(\n out_dep: pd.DataFrame,\n out_ctl: pd.DataFrame,\n selected: np.array,\n outcomes: Dict,\n prot_attr: str,\n n_percentiles: int=20,\n) -> float:\n \"\"\"Calculate benefit from receiving an intervention, continuous.\"\"\"\n attributes = _get_attributes(out_dep)\n A = np.array(attributes[prot_attr])\n G = _gini_coefficient(selected, A, n_percentiles)\n return G\n\n\ndef harm_from_unwanted(\n out_dep: pd.DataFrame,\n out_ctl: pd.DataFrame,\n selected: np.array,\n outcomes: Dict,\n) -> float:\n \"\"\"Calculate the harm from receiving an unwanted intervention.\"\"\"\n selected = selected.astype(bool)\n s_napplied = sum(1 - out_dep.applied[selected])\n ns_napplied = sum(1 - out_dep.applied[~selected])\n Ir = s_napplied / (s_napplied + ns_napplied)\n return Ir\n\n\ndef harm_from_unwanted_gini(\n out_dep: pd.DataFrame,\n out_ctl: pd.DataFrame,\n selected: np.array,\n outcomes: Dict,\n prot_attr: str,\n n_percentiles: int=20,\n) -> float:\n \"\"\"Calculate the harm from receiving unwanted intervention, continuous.\"\"\"\n selected = selected.astype(bool)\n napplied = (1 - out_dep.applied).astype(bool)\n s_napplied = np.logical_and(selected, napplied)\n attributes = _get_attributes(out_dep)\n A = np.array(attributes[prot_attr])\n G = _gini_coefficient(s_napplied, A, n_percentiles)\n return G\n\n\n#\n# Indirect/Causal Harm and Benefit Measures\n#\n\ndef benefit_from_acquire(\n out_dep: pd.DataFrame,\n out_ctl: pd.DataFrame,\n selected: np.array,\n outcomes: Dict,\n) -> Tuple[float, int]:\n \"\"\"Calculate the benefit from acquiring the product.\"\"\"\n I_acq = sum(out_dep.acquired) / len(out_dep)\n I_c_acq = sum(out_ctl.acquired) / len(out_ctl)\n DI = I_acq - I_c_acq\n return DI\n\n\ndef harm_failed_application(\n out_dep: pd.DataFrame,\n out_ctl: pd.DataFrame,\n selected: np.array,\n outcomes: Dict,\n denominator_applied: bool=False\n) -> Tuple[float, int]:\n \"\"\"Calculate the harm from a failed application.\"\"\"\n # Impact rates\n sel_app_nacq = np.logical_and(out_dep.acquired == 0, out_dep.applied == 1)\n ctl_app_nacq = np.logical_and(out_ctl.acquired == 0, out_ctl.applied == 1)\n if denominator_applied:\n I_acq = sum(sel_app_nacq) / sum(out_dep.applied)\n I_c_acq = sum(ctl_app_nacq) / sum(out_ctl.applied)\n else:\n I_acq = sum(sel_app_nacq) / len(out_dep)\n I_c_acq = sum(ctl_app_nacq) / len(out_ctl)\n DI = I_acq - I_c_acq\n return DI\n\n\ndef harm_longterm(\n out_dep: pd.DataFrame,\n out_ctl: pd.DataFrame,\n selected: np.array,\n outcomes: Dict,\n) -> Tuple[float, int]:\n \"\"\"Calculate the harm from a long-term credit outcome.\"\"\"\n # Impact rates\n sel_acq_nsuc = np.logical_and(out_dep.success == 0, out_dep.acquired == 1)\n ctl_acq_nsuc = np.logical_and(out_ctl.success == 0, out_ctl.acquired == 1)\n Ncohort = sum(out_dep.acquired)\n I_suc = sum(sel_acq_nsuc) / Ncohort\n I_c_suc = sum(ctl_acq_nsuc) / sum(out_ctl.acquired)\n DI = I_suc - I_c_suc\n return DI\n\n\n#\n# Private module functions\n#\n\ndef _get_attributes(y: pd.Series) -> pd.DataFrame:\n \"\"\"Get the attributes from a target Series.\"\"\"\n attributes = y.index.to_frame(index=False)\n attributes.set_index(\"ID\", inplace=True)\n return attributes\n\n\ndef _get_selection_protection(\n estimator: Uplifter,\n X: Union[np.array, pd.DataFrame],\n y: pd.Series,\n prot_attr: str,\n priv_group: int\n) -> Tuple[np.array, np.array, pd.DataFrame]:\n \"\"\"Get selection, protected attribute masks, and other attributes.\"\"\"\n selection = estimator.select(X)\n attributes = _get_attributes(y)\n protected = np.array(attributes[prot_attr] != priv_group)\n return selection, protected\n\n\ndef _control_outcomes(y_train: pd.Series) -> pd.DataFrame:\n \"\"\"Get the control outcomes from the experiment.\"\"\"\n attributes = _get_attributes(y_train)\n\n # Filter only control data\n incontrol = _get_control(y_train)\n control = attributes[incontrol]\n\n # Copy not selected outcomes to outcomes\n outcomes = control[[\"ns_applied\", \"ns_acquired\", \"ns_success\"]]\n outcomes.columns = [\"applied\", \"acquired\", \"success\"]\n\n return outcomes\n\n\ndef _get_control(y):\n \"\"\"Get a mask of those in the control group.\"\"\"\n CR = np.array(y == \"CR\")\n CN = np.array(y == \"CN\")\n incontrol = np.logical_or(CR, CN)\n return incontrol\n\n\n# def _empirical_bootstrap(\n# func: Callable,\n# *data,\n# replications: int,\n# q_lower: float,\n# q_upper: float,\n# **fkwargs\n# ) -> Tuple[float, float, float]:\n# \"\"\"Get the confidence intervals using the empirical bootstrap.\"\"\"\n# # get the score from data\n# score = func(*data, **fkwargs)\n\n# return score, score, score\n\n\ndef _empirical_bootstrap(\n func: Callable,\n *data,\n replications: int,\n q_lower: float,\n q_upper: float,\n **fkwargs\n) -> Tuple[float, float, float]:\n \"\"\"Get the confidence intervals using the empirical bootstrap.\"\"\"\n # get the score from data\n score = func(*data, **fkwargs)\n N = len(data[0])\n\n # resample the data, get the score differences\n samples = np.zeros(replications)\n for r in range(replications):\n rind = np.random.choice(N, N, replace=True)\n sdata = [d.iloc[rind] for d in data]\n score_sample = func(*sdata, **fkwargs)\n samples[r] = score_sample - score\n\n # Compute the quantiles of these differences, then compute corresponding\n # quantiles for the score note that the quantiles of the differences are\n # reversed when applying to the score.\n d_l, d_u = mquantiles(samples, prob=[1. - q_lower, 1. - q_upper])\n score_l, score_u = score - d_l, score - d_u\n return score_l, score, score_u\n\n\n\ndef _gini_coefficient(\n selection: np.array,\n attribute: np.array,\n n_percentiles: int\n) -> float:\n \"\"\"Gini coefficient of the selection shared over the attribute.\"\"\"\n # Cut the selected cohort lift into percentiles based on their attribute\n percentiles = np.arange(n_percentiles)\n perc_idx = pd.qcut(attribute, n_percentiles, labels=percentiles)\n\n # Calculate the area under the Lorenz curve\n hist = np.array([sum(selection[perc_idx == p]) for p in percentiles])\n cum_select = np.cumsum(hist) / sum(selection)\n area = simps(np.insert(cum_select, 0, 0), # start at 0\n np.linspace(0, 1, n_percentiles + 1))\n\n G = 1. - 2. * area\n return G\n" ]
[ [ "sklearn.metrics.r2_score", "numpy.logical_and", "numpy.random.choice", "numpy.linspace", "numpy.arange", "numpy.median", "sklearn.preprocessing.OneHotEncoder", "numpy.cumsum", "pandas.DataFrame", "numpy.logical_or", "scipy.stats.multinomial", "sklearn.metrics.log_loss", "numpy.insert", "numpy.array", "numpy.zeros", "pandas.qcut", "scipy.stats.mstats.mquantiles" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [ "0.23", "0.21", "2.0", "1.4", "1.3", "0.19", "1.1", "1.5", "0.24", "0.20", "1.0", "0.25", "1.2" ], "scipy": [ "1.7", "1.0", "0.10", "1.2", "0.14", "0.19", "1.5", "0.12", "0.17", "0.13", "1.6", "1.4", "1.9", "1.3", "1.10", "0.15", "0.18", "0.16", "1.8" ], "tensorflow": [] } ]
sigeisler/robustness_of_gnns_at_scale
[ "0f4844711ace599f54c2abc760b53680a80d6a32" ]
[ "rgnn_at_scale/attacks/prbcd_old.py" ]
[ "import logging\n\nfrom collections import defaultdict\nimport math\nfrom typing import Tuple\nfrom typeguard import typechecked\n\nfrom tqdm import tqdm\nimport numpy as np\nimport torch\nimport torch_sparse\nfrom torch_sparse import SparseTensor\nfrom rgnn_at_scale.helper import utils\nfrom rgnn_at_scale.attacks.base_attack import Attack, SparseAttack\n\n\nclass PRBCD(SparseAttack):\n \"\"\"Sampled and hence scalable PGD attack for graph data.\n \"\"\"\n\n @typechecked\n def __init__(self,\n keep_heuristic: str = 'WeightOnly', # 'InvWeightGradient' 'Gradient', 'WeightOnly'\n keep_weight: float = .1,\n lr_factor: float = 100,\n display_step: int = 20,\n epochs: int = 400,\n fine_tune_epochs: int = 100,\n search_space_size: int = 1_000_000,\n with_early_stropping: bool = True,\n do_synchronize: bool = False,\n eps: float = 1e-7,\n max_resamples: int = 20,\n **kwargs):\n super().__init__(**kwargs)\n\n if self.make_undirected:\n self.n_possible_edges = self.n * (self.n - 1) // 2\n else:\n self.n_possible_edges = self.n ** 2 # We filter self-loops later\n\n self.keep_heuristic = keep_heuristic\n self.keep_weight = keep_weight\n self.display_step = display_step\n self.epochs = epochs\n self.fine_tune_epochs = fine_tune_epochs\n self.search_space_size = search_space_size\n self.with_early_stropping = with_early_stropping\n self.eps = eps\n self.do_synchronize = do_synchronize\n self.max_resamples = max_resamples\n\n self.current_search_space: torch.Tensor = None\n self.modified_edge_index: torch.Tensor = None\n self.modified_edge_weight_diff: torch.Tensor = None\n\n self.lr_factor = lr_factor * max(math.log2(self.n_possible_edges / self.search_space_size), 1.)\n\n def _attack(self, n_perturbations, **kwargs):\n \"\"\"Perform attack (`n_perturbations` is increasing as it was a greedy attack).\n\n Parameters\n ----------\n n_perturbations : int\n Number of edges to be perturbed (assuming an undirected graph)\n \"\"\"\n assert self.search_space_size > n_perturbations, \\\n f'The search space size ({self.search_space_size}) must be ' \\\n + f'greater than the number of permutations ({n_perturbations})'\n self.sample_search_space(n_perturbations)\n best_accuracy = float('Inf')\n best_epoch = float('-Inf')\n self.attack_statistics = defaultdict(list)\n\n with torch.no_grad():\n logits = self.attacked_model(\n data=self.attr.to(self.device),\n adj=(self.edge_index.to(self.device), self.edge_weight.to(self.device))\n )\n loss = self.calculate_loss(logits[self.idx_attack], self.labels[self.idx_attack])\n accuracy = (\n logits.argmax(-1)[self.idx_attack] == self.labels[self.idx_attack]\n ).float().mean().item()\n logging.info(f'\\nBefore the attack - Loss: {loss.item()} Accuracy: {100 * accuracy:.3f} %\\n')\n self._append_attack_statistics(loss.item(), accuracy, 0., 0.)\n del logits\n del loss\n\n for epoch in tqdm(range(self.epochs + self.fine_tune_epochs)):\n self.modified_edge_weight_diff.requires_grad = True\n edge_index, edge_weight = self.get_modified_adj()\n\n if torch.cuda.is_available() and self.do_synchronize:\n torch.cuda.empty_cache()\n torch.cuda.synchronize()\n\n logits = self.attacked_model(data=self.attr.to(self.device), adj=(edge_index, edge_weight))\n loss = self.calculate_loss(logits[self.idx_attack], self.labels[self.idx_attack])\n\n gradient = utils.grad_with_checkpoint(loss, self.modified_edge_weight_diff)[0]\n\n if torch.cuda.is_available() and self.do_synchronize:\n torch.cuda.empty_cache()\n torch.cuda.synchronize()\n\n with torch.no_grad():\n self.modified_edge_weight_diff.requires_grad = False\n edge_weight = self.update_edge_weights(n_perturbations, epoch, gradient)[1]\n probability_mass, probability_mass_projected = self.projection(n_perturbations, edge_index, edge_weight)\n\n edge_index, edge_weight = self.get_modified_adj()\n logits = self.attacked_model(data=self.attr.to(self.device), adj=(edge_index, edge_weight))\n accuracy = (\n logits.argmax(-1)[self.idx_attack] == self.labels[self.idx_attack]\n ).float().mean().item()\n if epoch % self.display_step == 0:\n logging.info(f'\\nEpoch: {epoch} Loss: {loss.item()} Accuracy: {100 * accuracy:.3f} %\\n')\n\n if self.with_early_stropping and best_accuracy > accuracy:\n best_accuracy = accuracy\n best_epoch = epoch\n best_search_space = self.current_search_space.clone().cpu()\n best_edge_index = self.modified_edge_index.clone().cpu()\n best_edge_weight_diff = self.modified_edge_weight_diff.detach().clone().cpu()\n\n self._append_attack_statistics(loss.item(), accuracy, probability_mass, probability_mass_projected)\n\n if epoch < self.epochs - 1:\n self.resample_search_space(n_perturbations, edge_index, edge_weight, gradient)\n elif self.with_early_stropping and epoch == self.epochs - 1:\n logging.info(\n f'Loading search space of epoch {best_epoch} (accuarcy={best_accuracy}) for fine tuning\\n')\n self.current_search_space = best_search_space.to(self.device)\n self.modified_edge_index = best_edge_index.to(self.device)\n self.modified_edge_weight_diff = best_edge_weight_diff.to(self.device)\n self.modified_edge_weight_diff.requires_grad = True\n\n del logits\n del loss\n del gradient\n\n if self.with_early_stropping:\n self.current_search_space = best_search_space.to(self.device)\n self.modified_edge_index = best_edge_index.to(self.device)\n self.modified_edge_weight_diff = best_edge_weight_diff.to(self.device)\n\n edge_index = self.sample_final_edges(n_perturbations)[0]\n\n self.adj_adversary = SparseTensor.from_edge_index(\n edge_index,\n torch.ones_like(edge_index[0], dtype=torch.float32),\n (self.n, self.n)\n ).coalesce().detach()\n self.attr_adversary = self.attr\n\n @torch.no_grad()\n def sample_final_edges(self, n_perturbations: int) -> Tuple[torch.Tensor, torch.Tensor]:\n best_accuracy = float('Inf')\n s = self.modified_edge_weight_diff.abs().detach()\n s[s == self.eps] = 0\n while best_accuracy == float('Inf'):\n for i in range(self.max_resamples):\n if best_accuracy == float('Inf'):\n sampled = torch.zeros_like(s)\n sampled[torch.topk(s, n_perturbations).indices] = 1\n else:\n sampled = torch.bernoulli(s).float()\n\n if sampled.sum() > n_perturbations:\n n_samples = sampled.sum()\n logging.info(f'{i}-th sampling: too many samples {n_samples}')\n continue\n pos_modified_edge_weight_diff = sampled\n self.modified_edge_weight_diff = torch.where(\n self.modified_edge_weight_diff > 0,\n pos_modified_edge_weight_diff,\n -pos_modified_edge_weight_diff\n ).float()\n edge_index, edge_weight = self.get_modified_adj()\n logits = self.attacked_model(data=self.attr.to(self.device), adj=(edge_index, edge_weight))\n accuracy = (\n logits.argmax(-1)[self.idx_attack] == self.labels[self.idx_attack]\n ).float().mean().item()\n if best_accuracy > accuracy:\n best_accuracy = accuracy\n best_s = self.modified_edge_weight_diff.clone().cpu()\n self.modified_edge_weight_diff.data.copy_(best_s.to(self.device))\n edge_index, edge_weight = self.get_modified_adj(is_final=True)\n\n edge_weight = edge_weight.round()\n edge_mask = edge_weight == 1\n return edge_index[:, edge_mask], edge_weight[edge_mask]\n\n def match_search_space_on_edges(\n self,\n edge_index: torch.Tensor,\n edge_weight: torch.Tensor\n ) -> Tuple[torch.Tensor, torch.Tensor]:\n if self.make_undirected:\n is_in_search_space = (edge_weight != 1) & (edge_index[0] < edge_index[1])\n else:\n is_in_search_space = (edge_weight != 1) & (edge_index[0] != edge_index[1])\n assert is_in_search_space.sum() == self.current_search_space.size(0), \\\n f'search space size mismatch: {is_in_search_space.sum()} vs. {self.current_search_space.size(0)}'\n modified_edge_weight = edge_weight[is_in_search_space]\n original_edge_weight = modified_edge_weight - self.modified_edge_weight_diff\n does_original_edge_exist = torch.isclose(original_edge_weight.float(), torch.tensor(1.))\n\n return does_original_edge_exist, is_in_search_space\n\n def projection(self, n_perturbations: int, edge_index: torch.Tensor, edge_weight: torch.Tensor) -> float:\n does_original_edge_exist, is_in_search_space = self.match_search_space_on_edges(edge_index, edge_weight)\n\n pos_modified_edge_weight_diff = torch.where(\n does_original_edge_exist, -self.modified_edge_weight_diff, self.modified_edge_weight_diff\n )\n probability_mass = pos_modified_edge_weight_diff.sum().item()\n\n pos_modified_edge_weight_diff = Attack.project(n_perturbations, pos_modified_edge_weight_diff, self.eps)\n\n self.modified_edge_weight_diff = torch.where(\n does_original_edge_exist, -pos_modified_edge_weight_diff, pos_modified_edge_weight_diff\n )\n\n return probability_mass, pos_modified_edge_weight_diff.sum().item()\n\n def handle_zeros_and_ones(self):\n # Handling edge case to detect an unchanged edge via its value 1\n self.modified_edge_weight_diff.data[\n (self.modified_edge_weight_diff <= self.eps)\n & (self.modified_edge_weight_diff >= -self.eps)\n ] = self.eps\n self.modified_edge_weight_diff.data[self.modified_edge_weight_diff >= 1 - self.eps] = 1 - self.eps\n self.modified_edge_weight_diff.data[self.modified_edge_weight_diff <= -1 + self.eps] = -1 + self.eps\n\n def get_modified_adj(self, is_final: bool = False):\n if not is_final:\n self.handle_zeros_and_ones()\n\n if (\n not self.modified_edge_weight_diff.requires_grad\n or not hasattr(self.attacked_model, 'do_checkpoint')\n or not self.attacked_model.do_checkpoint\n ):\n if self.make_undirected:\n modified_edge_index, modified_edge_weight = utils.to_symmetric(\n self.modified_edge_index, self.modified_edge_weight_diff, self.n\n )\n else:\n modified_edge_index, modified_edge_weight = self.modified_edge_index, self.modified_edge_weight_diff\n edge_index = torch.cat((self.edge_index.to(self.device), modified_edge_index), dim=-1)\n edge_weight = torch.cat((self.edge_weight.to(self.device), modified_edge_weight))\n\n edge_index, edge_weight = torch_sparse.coalesce(edge_index, edge_weight, m=self.n, n=self.n, op='sum')\n else:\n # Currently (1.6.0) PyTorch does not support return arguments of `checkpoint` that do not require gradient.\n # For this reason we need this extra code and to execute it twice (due to checkpointing in fact 3 times...)\n from torch.utils import checkpoint\n\n def fuse_edges_run(modified_edge_weight_diff: torch.Tensor) -> Tuple[torch.Tensor, torch.Tensor]:\n if self.make_undirected:\n modified_edge_index, modified_edge_weight = utils.to_symmetric(\n self.modified_edge_index, modified_edge_weight_diff, self.n\n )\n else:\n modified_edge_index, modified_edge_weight = self.modified_edge_index, self.modified_edge_weight_diff\n edge_index = torch.cat((self.edge_index.to(self.device), modified_edge_index), dim=-1)\n edge_weight = torch.cat((self.edge_weight.to(self.device), modified_edge_weight))\n\n edge_index, edge_weight = torch_sparse.coalesce(edge_index, edge_weight, m=self.n, n=self.n, op='sum')\n return edge_index, edge_weight\n\n # Due to bottleneck...\n if len(self.edge_weight) > 100_000_000:\n device = self.device\n self.device = 'cpu'\n self.modified_edge_index = self.modified_edge_index.to(self.device)\n edge_index, edge_weight = fuse_edges_run(self.modified_edge_weight_diff.cpu())\n self.device = device\n self.modified_edge_index = self.modified_edge_index.to(self.device)\n return edge_index.to(self.device), edge_weight.to(self.device)\n\n with torch.no_grad():\n edge_index = fuse_edges_run(self.modified_edge_weight_diff)[0]\n\n edge_weight = checkpoint.checkpoint(\n lambda *input: fuse_edges_run(*input)[1],\n self.modified_edge_weight_diff\n )\n\n return edge_index, edge_weight\n\n def update_edge_weights(self, n_perturbations: int, epoch: int,\n gradient: torch.Tensor) -> Tuple[torch.Tensor, torch.Tensor]:\n \"\"\"Updates the edge weights and adaptively, heuristically refined the learning rate such that (1) it is\n independent of the number of perturbations (assuming an undirected adjacency matrix) and (2) to decay learning\n rate during fine-tuning (i.e. fixed search space).\n\n Parameters\n ----------\n n_perturbations : int\n Number of perturbations.\n epoch : int\n Number of epochs until fine tuning.\n gradient : torch.Tensor\n The current gradient.\n\n Returns\n -------\n Tuple[torch.Tensor, torch.Tensor]\n Updated edge indices and weights.\n \"\"\"\n lr_factor = n_perturbations / self.n / 2 * self.lr_factor\n lr = lr_factor / np.sqrt(max(0, epoch - self.epochs) + 1)\n self.modified_edge_weight_diff.data.add_(lr * gradient)\n\n return self.get_modified_adj()\n\n def sample_search_space(self, n_perturbations: int = 0):\n for i in range(self.max_resamples):\n self.current_search_space = torch.randint(\n self.n_possible_edges, (self.search_space_size,), device=self.device)\n self.current_search_space = torch.unique(self.current_search_space, sorted=True)\n if self.make_undirected:\n self.modified_edge_index = PRBCD.linear_to_triu_idx(self.n, self.current_search_space)\n else:\n self.modified_edge_index = PRBCD.linear_to_full_idx(self.n, self.current_search_space)\n is_not_self_loop = self.modified_edge_index[0] != self.modified_edge_index[1]\n self.current_search_space = self.current_search_space[is_not_self_loop]\n self.modified_edge_index = self.modified_edge_index[:, is_not_self_loop]\n\n self.modified_edge_weight_diff = torch.full_like(\n self.current_search_space, self.eps, dtype=torch.float32, requires_grad=True\n )\n if self.current_search_space.size(0) >= n_perturbations:\n break\n\n def resample_search_space(self, n_perturbations: int, edge_index: torch.Tensor,\n edge_weight: torch.Tensor, gradient: torch.Tensor):\n if self.keep_heuristic == 'WeightOnly':\n sorted_idx = torch.argsort(self.modified_edge_weight_diff.abs())\n idx_keep = (self.modified_edge_weight_diff <= self.eps).sum().long()\n if idx_keep < sorted_idx.size(0) // 2:\n idx_keep = sorted_idx.size(0) // 2\n else:\n raise NotImplementedError('Only keep_heuristic=`WeightOnly` supported')\n\n sorted_idx = sorted_idx[idx_keep:]\n self.current_search_space = self.current_search_space[sorted_idx]\n self.modified_edge_index = self.modified_edge_index[:, sorted_idx]\n self.modified_edge_weight_diff = self.modified_edge_weight_diff[sorted_idx]\n\n # Sample until enough edges were drawn\n for i in range(self.max_resamples):\n n_edges_resample = self.search_space_size - self.current_search_space.size(0)\n lin_index = torch.randint(self.n_possible_edges, (n_edges_resample,), device=self.device)\n self.current_search_space, unique_idx = torch.unique(\n torch.cat((self.current_search_space, lin_index)),\n sorted=True,\n return_inverse=True\n )\n if self.make_undirected:\n self.modified_edge_index = PRBCD.linear_to_triu_idx(self.n, self.current_search_space)\n else:\n self.modified_edge_index = PRBCD.linear_to_full_idx(self.n, self.current_search_space)\n # Merge existing weights with new edge weights\n modified_edge_weight_diff_old = self.modified_edge_weight_diff.clone()\n self.modified_edge_weight_diff = self.eps * torch.ones_like(self.current_search_space, dtype=torch.float32)\n self.modified_edge_weight_diff[\n unique_idx[:modified_edge_weight_diff_old.size(0)]\n ] = modified_edge_weight_diff_old\n\n if not self.make_undirected:\n is_not_self_loop = self.modified_edge_index[0] != self.modified_edge_index[1]\n self.current_search_space = self.current_search_space[is_not_self_loop]\n self.modified_edge_index = self.modified_edge_index[:, is_not_self_loop]\n self.modified_edge_weight_diff = self.modified_edge_weight_diff[is_not_self_loop]\n\n if self.current_search_space.size(0) > n_perturbations:\n break\n\n @staticmethod\n def linear_to_triu_idx(n: int, lin_idx: torch.Tensor) -> torch.Tensor:\n row_idx = (\n n\n - 2\n - torch.floor(torch.sqrt(-8 * lin_idx.double() + 4 * n * (n - 1) - 7) / 2.0 - 0.5)\n ).long()\n col_idx = (\n lin_idx\n + row_idx\n + 1 - n * (n - 1) // 2\n + (n - row_idx) * ((n - row_idx) - 1) // 2\n )\n return torch.stack((row_idx, col_idx))\n\n @staticmethod\n def linear_to_full_idx(n: int, lin_idx: torch.Tensor) -> torch.Tensor:\n row_idx = lin_idx // n\n col_idx = lin_idx % n\n return torch.stack((row_idx, col_idx))\n\n def _append_attack_statistics(self, loss: float, accuracy: float,\n probability_mass: float, probability_mass_projected: float):\n self.attack_statistics['loss'].append(loss)\n self.attack_statistics['accuracy'].append(accuracy)\n self.attack_statistics['nonzero_weights'].append((self.modified_edge_weight_diff.abs() > self.eps).sum().item())\n self.attack_statistics['probability_mass'].append(probability_mass)\n self.attack_statistics['probability_mass_projected'].append(probability_mass_projected)\n" ]
[ [ "torch.cuda.synchronize", "torch.randint", "torch.cat", "torch.topk", "torch.zeros_like", "torch.cuda.empty_cache", "torch.tensor", "torch.full_like", "torch.unique", "torch.no_grad", "torch.bernoulli", "torch.where", "torch.cuda.is_available", "torch.stack", "torch.ones_like" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
betaBison/umnitsa_rpi
[ "c9a225781d91c1fdf16eb2234af8ff1784876301" ]
[ "src/umnitsa_hardware/src/motors.py" ]
[ "#!/usr/bin/env python\n\"\"\"\nAuthor: D. Knowles\nDesc : ROS node that outputs to the umnitsaControl board\n\"\"\"\nimport os\nimport time\nimport rospy\nfrom geometry_msgs.msg import Twist\nfrom umnitsa_msgs.msg import Joystick\nif (os.environ['ARCHITECTURE'] == 'raspi'):\n\timport RPi.GPIO as GPIO\nelif (os.environ['ARCHITECTURE'] == 'nano'):\n\timport nanpy\nfrom math import atan2, cos, pi, sqrt\nimport numpy as np\n\nclass Motors():\n\tdef __init__(self):\n\t\t\"\"\"\n\t\tinitialize pins as motor outputs\n\t\t\"\"\"\n\n\t\tself.DB1 = rospy.get_param('DB1') # Driver Board #1 INH\n\t\tself.M1 = rospy.get_param('M1') # DB #1 IN1 & IN2\n\t\tself.M2 = rospy.get_param('M2') # DB #1 IN3 & IN4\n\t\tself.DB2 = rospy.get_param('DB2') # DB #2 INH\n\t\tself.M3 = rospy.get_param('M3') # DB #2 IN1 & IN2\n\t\tself.M4 = rospy.get_param('M4') # DB #2 IN3 & IN4\n\n\t\tself.turbo = False\t\t# turbo mode\n\t\tself.arch = os.environ['ARCHITECTURE']\n\n\t\tif self.arch == 'raspi':\n\t\t\tGPIO.setmode(GPIO.BOARD) # use RasPi pin numbers\n\t\t\tGPIO.setwarnings(False) # don't show setup warnings\n\t\t\t# set pins as outputs and initialize to False/Low\n\t\t\tGPIO.setup(self.DB1,GPIO.OUT,initial=False)\n\t\t\tGPIO.setup(self.M1,GPIO.OUT,initial=False)\n\t\t\tGPIO.setup(self.M2,GPIO.OUT,initial=False)\n\t\t\tGPIO.setup(self.DB2,GPIO.OUT,initial=False)\n\t\t\tGPIO.setup(self.M3,GPIO.OUT,initial=False)\n\t\t\tGPIO.setup(self.M4,GPIO.OUT,initial=False)\n\n\t\t\t# setup all pwm outputs\n\t\t\tself.frequency = 500.0 # pwm frequency\n\t\t\tself.PWM_M1 = GPIO.PWM(self.M1,self.frequency)\n\t\t\tself.PWM_M1.start(0.0)\n\t\t\tself.PWM_M2 = GPIO.PWM(self.M2,self.frequency)\n\t\t\tself.PWM_M2.start(0.0)\n\t\t\tself.PWM_M3 = GPIO.PWM(self.M3,self.frequency)\n\t\t\tself.PWM_M3.start(0.0)\n\t\t\tself.PWM_M4 = GPIO.PWM(self.M4,self.frequency)\n\t\t\tself.PWM_M4.start(0.0)\n\n\t\telse:\n\t\t\tno_connection = True\n\t\t\twhile no_connection:\n\t\t\t\ttry:\n\t\t\t\t\tconnection = nanpy.SerialManager()\n\t\t\t\t\tself.a = nanpy.ArduinoApi(connection = connection)\n\t\t\t\t\tno_connection = False\n\t\t\t\texcept:\n\t\t\t\t\tprint(\"Failed to connect to Arduino\")\n\t\t\tself.a.pinMode(self.DB1, self.a.OUTPUT)\n\t\t\tself.a.pinMode(self.M1, self.a.OUTPUT)\n\t\t\tself.a.pinMode(self.M2, self.a.OUTPUT)\n\t\t\tself.a.pinMode(self.DB2, self.a.OUTPUT)\n\t\t\tself.a.pinMode(self.M3, self.a.OUTPUT)\n\t\t\tself.a.pinMode(self.M4, self.a.OUTPUT)\n\n\tdef subscribe(self):\n\t\trospy.init_node('motors', anonymous=False)\n\t\trospy.Subscriber('cmd_vel',Twist, self.updateOutput)\n\t\trospy.Subscriber('commands',Joystick, self.updateSettings)\n\t\t# spin() simply keeps python from exiting until this node is stopped\n\t\trospy.spin()\n\n\tdef updateSettings(self,commands):\n\t\tif commands.TYPE == \"BUTTON\":\n\t\t\tif commands.X:\n\t\t\t\tself.turbo = not(self.turbo)\n\n\tdef updateOutput(self,cmd_vel):\n\t\t# check if any toggle is not 0.0 (i.e. False)\n\t\tif cmd_vel.linear.x or cmd_vel.linear.y or cmd_vel.angular.z:\n\t\t\tlateral = self.lateral(cmd_vel.linear.x,cmd_vel.linear.y)\n\t\t\trotation = self.rotation(cmd_vel.angular.z)\n\t\t\tmotor_output = lateral + rotation\n\t\t\tif np.amax(abs(motor_output)) > 1.0:\n\t\t\t\t# scale result by highest output\n\t\t\t\tmotor_output /= np.amax(abs(motor_output))\n\t\t\telif self.turbo:\n\t\t\t\t# turbo mode amplifies all signals relative to highest\n\t\t\t\tmotor_output /= np.amax(abs(motor_output))\n\t\t\tx_M1 = motor_output.item(0)\n\t\t\tx_M2 = motor_output.item(1)\n\t\t\tx_M3 = motor_output.item(2)\n\t\t\tx_M4 = motor_output.item(3)\n\n\t\t\tprint(\"motor outputs: \",x_M1,x_M2,x_M3,x_M4)\n\n\t\t\tif self.arch == 'raspi':\n\t\t\t\tself.PWM_M1.ChangeDutyCycle(50.0 + x_M1*50.0)\n\t\t\t\tself.PWM_M2.ChangeDutyCycle(50.0 + x_M2*50.0)\n\t\t\t\tself.PWM_M3.ChangeDutyCycle(50.0 + x_M3*50.0)\n\t\t\t\tself.PWM_M4.ChangeDutyCycle(50.0 + x_M4*50.0)\n\n\t\t\t\tGPIO.output(self.DB1,True) # enable DB #1\n\t\t\t\tGPIO.output(self.DB2,True) # enable DB #2\n\t\t\telse:\n\t\t\t\tself.a.analogWrite(self.M1,255*(0.5 + x_M1*0.5))\n\t\t\t\tself.a.analogWrite(self.M2,255*(0.5 + x_M2*0.5))\n\t\t\t\tself.a.analogWrite(self.M3,255*(0.5 + x_M3*0.5))\n\t\t\t\tself.a.analogWrite(self.M4,255*(0.5 + x_M4*0.5))\n\n\t\t\t\tself.a.digitalWrite(self.DB1,self.a.HIGH) # enable DB #1\n\t\t\t\tself.a.digitalWrite(self.DB2,self.a.HIGH) # enable DB #2\n\n\t\telse:\n\t\t\t# disable output if right and left toggle are 0.0\n\t\t\tif self.arch == 'raspi':\n\t\t\t\tGPIO.output(self.DB1,False)\n\t\t\t\tGPIO.output(self.DB2,False)\n\t\t\telse:\n\t\t\t\tself.a.digitalWrite(self.DB1,self.a.LOW)\n\t\t\t\tself.a.digitalWrite(self.DB2,self.a.LOW)\n\n\n\tdef rotation(self,LTOGRIGHT):\n\t\t\"\"\"\n\t\trotate robot with the left toggle\n\t\t\"\"\"\n\t\tx = -LTOGRIGHT\n\n\t\treturn np.array([x,x,x,x])\n\n\n\tdef lateral(self,vel_x,vel_y):\n\t\t\"\"\"\n\t\tmove robot laterally with the right toggle\n\t\t\"\"\"\n\t\tif abs(vel_x) > 0.0 or abs(vel_y) > 0.0:\n\n\t\t\tdirection = atan2(vel_x,-vel_y) # direction of toggle movement\n\t\t\tmag = sqrt(vel_x**2+vel_y**2) # magnitude of toggle movement\n\t\t\tprint(\"magnitude=\",mag)\n\n\t\t\t# compute each motor throttle to move in toggle direction\n\t\t\tx_M1 = mag*cos(direction+pi/4.)\n\t\t\tx_M2 = -mag*cos(direction+pi/4.)\n\t\t\tx_M3 = mag*cos(direction-pi/4.)\n\t\t\tx_M4 = -mag*cos(direction-pi/4.)\n\n\t\t\treturn np.array([x_M1,x_M2,x_M3,x_M4])\n\t\telse:\n\t\t\treturn np.array([0.0, 0.0, 0.0, 0.0])\n\n\tdef cw(self,x):\n\t\t\"\"\"\n\t\tmoves robot clockwise\n\t\tinput: x = throttle (0.0,1.0)\n\t\t\"\"\"\n\t\tself.PWM_M1.ChangeDutyCycle(50.0 + x*50.0)\n\t\tself.PWM_M2.ChangeDutyCycle(50.0 + x*50.0)\n\t\tself.PWM_M3.ChangeDutyCycle(50.0 + x*50.0)\n\t\tself.PWM_M4.ChangeDutyCycle(50.0 + x*50.0)\n\n\t\tGPIO.output(self.DB1,True) # enable DB #1\n\t\tGPIO.output(self.DB2,True) # enable DB #2\n\n\tdef ccw(self,x):\n\t\t\"\"\"\n\t\tmoves robot counter clockwise\n\t\tinput: x = throttle (0.0,1.0)\n\t\t\"\"\"\n\t\tself.PWM_M1.ChangeDutyCycle(50.0 - x*50.0)\n\t\tself.PWM_M2.ChangeDutyCycle(50.0 - x*50.0)\n\t\tself.PWM_M3.ChangeDutyCycle(50.0 - x*50.0)\n\t\tself.PWM_M4.ChangeDutyCycle(50.0 - x*50.0)\n\n\t\tGPIO.output(self.DB1,True) # enable DB #1\n\t\tGPIO.output(self.DB2,True) # enable DB #2\n\n\tdef forward(self,x):\n\t\t\"\"\"\n\t\tmoves robot backwards\n\t\tinput: x = throttle (0.0,1.0)\n\t\t\"\"\"\n\t\tself.PWM_M1.ChangeDutyCycle(50.0 - x*50.0)\n\t\tself.PWM_M2.ChangeDutyCycle(50.0 + x*50.0)\n\t\tself.PWM_M3.ChangeDutyCycle(50.0 + x*50.0)\n\t\tself.PWM_M4.ChangeDutyCycle(50.0 - x*50.0)\n\n\t\tGPIO.output(self.DB1,True) # enable DB #1\n\t\tGPIO.output(self.DB2,True) # enable DB #2\n\n\tdef backward(self,x):\n\t\t\"\"\"\n\t\tmoves robot forward\n\t\tinput: x = throttle (0.0,1.0)\n\t\t\"\"\"\n\t\tself.PWM_M1.ChangeDutyCycle(50.0 + x*50.0)\n\t\tself.PWM_M2.ChangeDutyCycle(50.0 - x*50.0)\n\t\tself.PWM_M3.ChangeDutyCycle(50.0 - x*50.0)\n\t\tself.PWM_M4.ChangeDutyCycle(50.0 + x*50.0)\n\n\t\tGPIO.output(self.DB1,True) # enable DB #1\n\t\tGPIO.output(self.DB2,True) # enable DB #2\n\n\n\tdef right(self,x):\n\t\t\"\"\"\n\t\tmoves robot right\n\t\tinput: x = throttle (0.0,1.0)\n\t\t\"\"\"\n\t\tself.PWM_M1.ChangeDutyCycle(50.0 + x*50.0)\n\t\tself.PWM_M2.ChangeDutyCycle(50.0 - x*50.0)\n\t\tself.PWM_M3.ChangeDutyCycle(50.0 + x*50.0)\n\t\tself.PWM_M4.ChangeDutyCycle(50.0 - x*50.0)\n\n\t\tGPIO.output(self.DB1,True) # enable DB #1\n\t\tGPIO.output(self.DB2,True) # enable DB #2\n\n\n\tdef left(self,x):\n\t\t\"\"\"\n\t\tmoves robot left\n\t\tinput: x = throttle (0.0,1.0)\n\t\t\"\"\"\n\t\tself.PWM_M1.ChangeDutyCycle(50.0 - x*50.0)\n\t\tself.PWM_M2.ChangeDutyCycle(50.0 + x*50.0)\n\t\tself.PWM_M3.ChangeDutyCycle(50.0 - x*50.0)\n\t\tself.PWM_M4.ChangeDutyCycle(50.0 + x*50.0)\n\n\t\tGPIO.output(self.DB1,True) # enable DB #1\n\t\tGPIO.output(self.DB2,True) # enable DB #2\n\n\tdef disable(self):\n\t\t\"\"\"\n\t\tdisable both motor driver boards\n\t\t\"\"\"\n\t\tif self.arch == 'raspi':\n\t\t\tGPIO.output(self.DB1,False)\n\t\t\tGPIO.output(self.DB2,False)\n\t\telse:\n\t\t\tself.a.digitalWrite(self.DB1,self.a.LOW)\n\t\t\tself.a.digitalWrite(self.DB2,self.a.LOW)\n\nif __name__ == '__main__':\n\ttry:\n\t\tsubscriber = Motors()\n\t\tsubscriber.subscribe()\n\texcept rospy.ROSInterruptException:\n\t\tsubscriber.disable()\n" ]
[ [ "numpy.array" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
wennyyuan/pytorch3d
[ "4bb3fff52b7e26ec0f013021cb26fab7db3d8e0b" ]
[ "docs/examples/pulsar_optimization_unified.py" ]
[ "#!/usr/bin/env python3\n# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.\n\"\"\"\nThis example demonstrates scene optimization with the PyTorch3D\npulsar interface. For this, a reference image has been pre-generated\n(you can find it at `../../tests/pulsar/reference/examples_TestRenderer_test_smallopt.png`).\nThe scene is initialized with random spheres. Gradient-based\noptimization is used to converge towards a faithful\nscene representation.\n\"\"\"\nimport logging\nimport math\n\nimport cv2\nimport imageio\nimport numpy as np\nimport torch\n\n# Import `look_at_view_transform` as needed in the suggestion later in the\n# example.\nfrom pytorch3d.renderer.cameras import PerspectiveCameras # , look_at_view_transform\nfrom pytorch3d.renderer.points import (\n PointsRasterizationSettings,\n PointsRasterizer,\n PulsarPointsRenderer,\n)\nfrom pytorch3d.structures.pointclouds import Pointclouds\nfrom torch import nn, optim\n\n\nLOGGER = logging.getLogger(__name__)\nN_POINTS = 10_000\nWIDTH = 1_000\nHEIGHT = 1_000\nDEVICE = torch.device(\"cuda\")\n\n\nclass SceneModel(nn.Module):\n \"\"\"\n A simple scene model to demonstrate use of pulsar in PyTorch modules.\n\n The scene model is parameterized with sphere locations (vert_pos),\n channel content (vert_col), radiuses (vert_rad), camera position (cam_pos),\n camera rotation (cam_rot) and sensor focal length and width (cam_sensor).\n\n The forward method of the model renders this scene description. Any\n of these parameters could instead be passed as inputs to the forward\n method and come from a different model.\n \"\"\"\n\n def __init__(self):\n super(SceneModel, self).__init__()\n self.gamma = 1.0\n # Points.\n torch.manual_seed(1)\n vert_pos = torch.rand(N_POINTS, 3, dtype=torch.float32, device=DEVICE) * 10.0\n vert_pos[:, 2] += 25.0\n vert_pos[:, :2] -= 5.0\n self.register_parameter(\"vert_pos\", nn.Parameter(vert_pos, requires_grad=True))\n self.register_parameter(\n \"vert_col\",\n nn.Parameter(\n torch.ones(N_POINTS, 3, dtype=torch.float32, device=DEVICE) * 0.5,\n requires_grad=True,\n ),\n )\n self.register_parameter(\n \"vert_rad\",\n nn.Parameter(\n torch.ones(N_POINTS, dtype=torch.float32) * 0.3, requires_grad=True\n ),\n )\n self.register_buffer(\n \"cam_params\",\n torch.tensor(\n [0.0, 0.0, 0.0, 0.0, math.pi, 0.0, 5.0, 2.0], dtype=torch.float32\n ),\n )\n self.cameras = PerspectiveCameras(\n # The focal length must be double the size for PyTorch3D because of the NDC\n # coordinates spanning a range of two - and they must be normalized by the\n # sensor width (see the pulsar example). This means we need here\n # 5.0 * 2.0 / 2.0 to get the equivalent results as in pulsar.\n focal_length=5.0,\n R=torch.eye(3, dtype=torch.float32, device=DEVICE)[None, ...],\n T=torch.zeros((1, 3), dtype=torch.float32, device=DEVICE),\n image_size=((HEIGHT, WIDTH),),\n device=DEVICE,\n )\n raster_settings = PointsRasterizationSettings(\n image_size=(HEIGHT, WIDTH),\n radius=self.vert_rad,\n )\n rasterizer = PointsRasterizer(\n cameras=self.cameras, raster_settings=raster_settings\n )\n self.renderer = PulsarPointsRenderer(rasterizer=rasterizer, n_track=32)\n\n def forward(self):\n # The Pointclouds object creates copies of it's arguments - that's why\n # we have to create a new object in every forward step.\n pcl = Pointclouds(\n points=self.vert_pos[None, ...], features=self.vert_col[None, ...]\n )\n return self.renderer(\n pcl,\n gamma=(self.gamma,),\n zfar=(45.0,),\n znear=(1.0,),\n radius_world=True,\n bg_col=torch.ones((3,), dtype=torch.float32, device=DEVICE),\n )[0]\n\n\ndef cli():\n \"\"\"\n Scene optimization example using pulsar and the unified PyTorch3D interface.\n \"\"\"\n LOGGER.info(\"Loading reference...\")\n # Load reference.\n ref = (\n torch.from_numpy(\n imageio.imread(\n \"../../tests/pulsar/reference/examples_TestRenderer_test_smallopt.png\"\n )[:, ::-1, :].copy()\n ).to(torch.float32)\n / 255.0\n ).to(DEVICE)\n # Set up model.\n model = SceneModel().to(DEVICE)\n # Optimizer.\n optimizer = optim.SGD(\n [\n {\"params\": [model.vert_col], \"lr\": 1e0},\n {\"params\": [model.vert_rad], \"lr\": 5e-3},\n {\"params\": [model.vert_pos], \"lr\": 1e-2},\n ]\n )\n LOGGER.info(\"Optimizing...\")\n # Optimize.\n for i in range(500):\n optimizer.zero_grad()\n result = model()\n # Visualize.\n result_im = (result.cpu().detach().numpy() * 255).astype(np.uint8)\n cv2.imshow(\"opt\", result_im[:, :, ::-1])\n overlay_img = np.ascontiguousarray(\n ((result * 0.5 + ref * 0.5).cpu().detach().numpy() * 255).astype(np.uint8)[\n :, :, ::-1\n ]\n )\n overlay_img = cv2.putText(\n overlay_img,\n \"Step %d\" % (i),\n (10, 40),\n cv2.FONT_HERSHEY_SIMPLEX,\n 1,\n (0, 0, 0),\n 2,\n cv2.LINE_AA,\n False,\n )\n cv2.imshow(\"overlay\", overlay_img)\n cv2.waitKey(1)\n # Update.\n loss = ((result - ref) ** 2).sum()\n LOGGER.info(\"loss %d: %f\", i, loss.item())\n loss.backward()\n optimizer.step()\n # Cleanup.\n with torch.no_grad():\n model.vert_col.data = torch.clamp(model.vert_col.data, 0.0, 1.0)\n # Remove points.\n model.vert_pos.data[model.vert_rad < 0.001, :] = -1000.0\n model.vert_rad.data[model.vert_rad < 0.001] = 0.0001\n vd = (\n (model.vert_col - torch.ones(3, dtype=torch.float32).to(DEVICE))\n .abs()\n .sum(dim=1)\n )\n model.vert_pos.data[vd <= 0.2] = -1000.0\n LOGGER.info(\"Done.\")\n\n\nif __name__ == \"__main__\":\n logging.basicConfig(level=logging.INFO)\n cli()\n" ]
[ [ "torch.nn.Parameter", "torch.ones", "torch.zeros", "torch.manual_seed", "torch.eye", "torch.tensor", "torch.no_grad", "torch.rand", "torch.optim.SGD", "torch.device", "torch.clamp" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
JoOkuma/pytorch-metric-learning
[ "4b17eb269d247c8725afcba100f2214680060ea3" ]
[ "examples/example_TrainWithClassifier.py" ]
[ "# The testing module requires faiss\r\n# So if you don't have that, then this import will break\r\nfrom pytorch_metric_learning import losses, miners, samplers, trainers, testers\r\nimport pytorch_metric_learning.utils.logging_presets as logging_presets\r\nimport numpy as np\r\nfrom torchvision import datasets, models, transforms\r\nimport torch\r\nimport logging\r\nfrom utils_for_examples import MLP, Identity\r\nlogging.getLogger().setLevel(logging.INFO)\r\n\r\nimport pytorch_metric_learning\r\nlogging.info(\"VERSION %s\"%pytorch_metric_learning.__version__)\r\n\r\n\r\n##############################\r\n########## Training ##########\r\n##############################\r\n\r\ndevice = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")\r\n\r\n# Set trunk model and replace the softmax layer with an identity function\r\ntrunk = models.resnet18(pretrained=True)\r\ntrunk_output_size = trunk.fc.in_features\r\ntrunk.fc = Identity()\r\ntrunk = torch.nn.DataParallel(trunk.to(device))\r\n\r\n# Set embedder model. This takes in the output of the trunk and outputs 64 dimensional embeddings\r\nembedder = torch.nn.DataParallel(MLP([trunk_output_size, 64]).to(device))\r\n\r\n# Set the classifier. The classifier will take the embeddings and output a 100 dimensional vector.\r\n# (There are 100 classes in CIFAR100, which is the dataset we'll use in this example.)\r\n# We'll specify the classification loss further down in the code.\r\nclassifier = torch.nn.DataParallel(MLP([64, 100])).to(device)\r\n\r\n# Set optimizers\r\ntrunk_optimizer = torch.optim.Adam(trunk.parameters(), lr=0.00001, weight_decay=0.00005)\r\nembedder_optimizer = torch.optim.Adam(embedder.parameters(), lr=0.00001, weight_decay=0.00005)\r\nclassifier_optimizer = torch.optim.Adam(classifier.parameters(), lr=0.00001, weight_decay=0.00005)\r\n\r\n# Set the image transforms\r\ntrain_transform = transforms.Compose([transforms.Resize(256),\r\n transforms.RandomResizedCrop(scale=(0.16, 1), ratio=(0.75, 1.33), size=227),\r\n transforms.RandomHorizontalFlip(0.5),\r\n transforms.ToTensor(),\r\n transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])])\r\n\r\nval_transform = transforms.Compose([transforms.Resize(256),\r\n transforms.CenterCrop(227),\r\n transforms.ToTensor(),\r\n transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])])\r\n\r\n# Set the datasets\r\ntrain_dataset = datasets.CIFAR100(root=\"CIFAR100_Dataset\", train=True, transform=train_transform, download=True)\r\nval_dataset = datasets.CIFAR100(root=\"CIFAR100_Dataset\", train=False, transform=val_transform, download=True)\r\n\r\n# Set the loss function\r\nloss = losses.TripletMarginLoss(margin=0.01)\r\n\r\n# Set the classification loss:\r\nclassification_loss = torch.nn.CrossEntropyLoss()\r\n\r\n# Set the mining function\r\nminer = miners.MultiSimilarityMiner(epsilon=0.1)\r\n\r\n# Set the dataloader sampler\r\nsampler = samplers.MPerClassSampler(train_dataset.targets, m=4)\r\n\r\n# Set other training parameters\r\nbatch_size = 32\r\nnum_epochs = 2\r\niterations_per_epoch = 100\r\n\r\n# Package the above stuff into dictionaries.\r\nmodels = {\"trunk\": trunk, \"embedder\": embedder, \"classifier\": classifier}\r\noptimizers = {\"trunk_optimizer\": trunk_optimizer, \"embedder_optimizer\": embedder_optimizer, \"classifier_optimizer\": classifier_optimizer}\r\nloss_funcs = {\"metric_loss\": loss, \"classifier_loss\": classification_loss}\r\nmining_funcs = {\"post_gradient_miner\": miner}\r\n\r\n# We can specify loss weights if we want to. This is optional\r\nloss_weights = {\"metric_loss\": 1, \"classifier_loss\": 0.5}\r\n\r\nrecord_keeper, _, _ = logging_presets.get_record_keeper(\"example_logs\", \"example_tensorboard\")\r\nhooks = logging_presets.get_hook_container(record_keeper)\r\ndataset_dict = {\"val\": val_dataset}\r\nmodel_folder = \"example_saved_models\"\r\n\r\n# Create the tester\r\ntester = testers.GlobalEmbeddingSpaceTester(end_of_testing_hook=hooks.end_of_testing_hook)\r\nend_of_epoch_hook = hooks.end_of_epoch_hook(tester, dataset_dict, model_folder)\r\ntrainer = trainers.TrainWithClassifier(models,\r\n optimizers,\r\n batch_size,\r\n loss_funcs,\r\n mining_funcs,\r\n iterations_per_epoch,\r\n train_dataset,\r\n loss_weights=loss_weights,\r\n sampler=sampler,\r\n end_of_iteration_hook=hooks.end_of_iteration_hook,\r\n end_of_epoch_hook=end_of_epoch_hook)\r\n\r\ntrainer.train(num_epochs=num_epochs)" ]
[ [ "torch.nn.CrossEntropyLoss", "torch.cuda.is_available" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
xk97/test_stats
[ "7f985988e72bd375c2011b29d42315a62da7c86c" ]
[ "monty_hall.py" ]
[ "#%% Monty Hall problem\n# n_doors with 1 door with award, pick inital y, k rounds of opportunity to switch\nimport numpy as np\n\nn_trails, n_doors = 1000, 3\nk = n_doors - 2\n\nidx_X = np.random.choice(range(n_doors), n_trails)\nX = np.zeros((n_trails, n_doors))\nfor i in range(n_trails):\n X[i, idx_X[i]] = 1\nprint(X)\n#%% no change in choice\ny = np.random.choice(range(n_doors), n_trails)\nprint(f'corrected ratio: {np.sum(y==idx_X) / n_trails}')\n#%% change choice after k round\nfor i in range(k): # K<= X.shape[1] - 2\n # random drop idx not in (y, indx_X)\n pass\nprint(f'win by switching: {np.sum(y != idx_X) / n_trails}')\n\n\n" ]
[ [ "numpy.zeros", "numpy.sum" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
lewisyangliu/LDP
[ "37d5b7a0ed08bd281fc880f8829535047c5a9e1c" ]
[ "code/model/__init__.py" ]
[ "import os\nfrom importlib import import_module\n\nimport torch\nimport torch.nn as nn\nfrom torch.autograd import Variable\n\nclass Model(nn.Module):\n def __init__(self, args, ckp):\n super(Model, self).__init__()\n print('Making model...')\n\n self.precision = args.precision\n self.cpu = args.cpu\n self.device = torch.device('cpu' if args.cpu else 'cuda')\n self.n_GPUs = args.n_GPUs\n self.save_models = args.save_models\n\n module = import_module('model.' + args.model.lower())\n self.model = module.make_model(args).to(self.device)\n if args.precision == 'half': self.model.half()\n\n if not args.cpu and args.n_GPUs > 1:\n self.model = nn.DataParallel(self.model, range(args.n_GPUs))\n\n self.load(\n ckp.dir,\n pre_train=args.pre_train,\n resume=args.resume,\n cpu=args.cpu\n )\n if args.print_model: print(self.model)\n\n def forward(self, x):\n \n return self.model(x)\n\n def get_model(self):\n if self.n_GPUs == 1:\n return self.model\n else:\n return self.model.module\n\n def state_dict(self, **kwargs):\n target = self.get_model()\n return target.state_dict(**kwargs)\n\n def save(self, apath, epoch, is_best=False):\n target = self.get_model()\n torch.save(\n target.state_dict(), \n os.path.join(apath, 'model', 'model_latest.pt')\n )\n if is_best:\n torch.save(\n target.state_dict(),\n os.path.join(apath, 'model', 'model_best.pt')\n )\n \n if self.save_models:\n torch.save(\n target.state_dict(),\n os.path.join(apath, 'model', 'model_{}.pt'.format(epoch))\n )\n\n def load(self, apath, pre_train='.', resume=-1, cpu=False):\n if cpu:\n kwargs = {'map_location': lambda storage, loc: storage}\n else:\n kwargs = {}\n\n if resume == -1:\n print('Resuming model from {}'.format('the latest'))\n self.get_model().load_state_dict(\n torch.load(\n os.path.join(apath, 'model', 'model_latest.pt'),\n **kwargs\n ),\n strict=False\n )\n elif resume == 0:\n if pre_train != '.':\n print('Loading model from {}'.format(pre_train))\n self.get_model().load_state_dict(\n torch.load(pre_train, **kwargs),\n strict=False\n )\n else:\n self.get_model().load_state_dict(\n torch.load(\n os.path.join(apath, 'model', 'model_{}.pt'.format(resume)),\n **kwargs\n ),\n strict=False\n )\n\n" ]
[ [ "torch.device", "torch.load" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
LaiYongqiang/mindspore
[ "1b7a38ccd86b55af50a0ea55c7f2f43813ed3e0e" ]
[ "mindspore/nn/optim/adam.py" ]
[ "# Copyright 2020-2021 Huawei Technologies Co., Ltd\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ============================================================================\n\"\"\"adam\"\"\"\nimport numpy as np\n\nfrom mindspore.common import dtype as mstype\nfrom mindspore.common.initializer import initializer\nfrom mindspore.ops import operations as P\nfrom mindspore.ops import composite as C\nfrom mindspore.ops import functional as F\nfrom mindspore.common.parameter import Parameter\nfrom mindspore.common.tensor import Tensor\nfrom mindspore._checkparam import Validator as validator\nfrom mindspore._checkparam import Rel\nfrom .optimizer import Optimizer\nfrom .optimizer import opt_init_args_register\n\n_adam_opt = C.MultitypeFuncGraph(\"adam_opt\")\n_scaler_one = Tensor(1, mstype.int32)\n_scaler_ten = Tensor(10, mstype.float32)\n\n\n@_adam_opt.register(\"Tensor\", \"Tensor\", \"Tensor\", \"Tensor\", \"Number\", \"Tensor\", \"Tensor\", \"Tensor\",\n \"Tensor\", \"Bool\", \"Bool\")\ndef _update_run_op(beta1, beta2, eps, lr, weight_decay, param, m, v, gradient, decay_flag, optim_filter):\n \"\"\"\n Update parameters.\n\n Args:\n beta1 (Tensor): The exponential decay rate for the 1st moment estimations. Should be in range (0.0, 1.0).\n beta2 (Tensor): The exponential decay rate for the 2nd moment estimations. Should be in range (0.0, 1.0).\n eps (Tensor): Term added to the denominator to improve numerical stability. Should be greater than 0.\n lr (Tensor): Learning rate.\n weight_decay (numbers.Number): Weight decay. Should be equal to or greater than 0.\n param (Tensor): Parameters.\n m (Tensor): m value of parameters.\n v (Tensor): v value of parameters.\n gradient (Tensor): Gradient of parameters.\n decay_flag (bool): Applies weight decay or not.\n optim_filter (bool): Applies parameter update or not.\n\n Returns:\n Tensor, the new value of v after updating.\n \"\"\"\n op_cast = P.Cast()\n if optim_filter:\n op_mul = P.Mul()\n op_square = P.Square()\n op_sqrt = P.Sqrt()\n op_cast = P.Cast()\n op_reshape = P.Reshape()\n op_shape = P.Shape()\n param_fp32 = op_cast(param, mstype.float32)\n m_fp32 = op_cast(m, mstype.float32)\n v_fp32 = op_cast(v, mstype.float32)\n gradient_fp32 = op_cast(gradient, mstype.float32)\n\n next_m = op_mul(beta1, m_fp32) + op_mul(op_cast(F.tuple_to_array((1.0,)), mstype.float32)\n - beta1, gradient_fp32)\n\n next_v = op_mul(beta2, v_fp32) + op_mul(op_cast(F.tuple_to_array((1.0,)), mstype.float32)\n - beta2, op_square(gradient_fp32))\n\n update = next_m / (eps + op_sqrt(next_v))\n if decay_flag:\n update = op_mul(weight_decay, param_fp32) + update\n\n update_with_lr = op_mul(lr, update)\n next_param = param_fp32 - op_reshape(update_with_lr, op_shape(param_fp32))\n\n next_param = F.depend(next_param, F.assign(param, op_cast(next_param, F.dtype(param))))\n next_param = F.depend(next_param, F.assign(m, op_cast(next_m, F.dtype(m))))\n next_param = F.depend(next_param, F.assign(v, op_cast(next_v, F.dtype(v))))\n\n return op_cast(next_param, F.dtype(param))\n return op_cast(gradient, F.dtype(param))\n\n\n@_adam_opt.register(\"Function\", \"Function\", \"Function\", \"Function\", \"Bool\", \"Bool\", \"Bool\", \"Tensor\", \"Tensor\",\n \"Tensor\", \"Tensor\", \"Tensor\", \"Tensor\", \"RowTensor\", \"Tensor\", \"Tensor\", \"Tensor\", \"Bool\", \"Bool\")\ndef _run_opt_with_sparse(opt, sparse_opt, push, pull, use_locking, use_nesterov, target, beta1_power,\n beta2_power, beta1, beta2, eps, lr, gradient, param, m, v, ps_parameter, cache_enable):\n \"\"\"Apply sparse adam optimizer to the weight parameter when the gradient is sparse.\"\"\"\n success = True\n indices = gradient.indices\n values = gradient.values\n if ps_parameter and not cache_enable:\n op_shape = P.Shape()\n shapes = (op_shape(param), op_shape(m), op_shape(v),\n op_shape(beta1_power), op_shape(beta2_power), op_shape(lr), op_shape(beta1),\n op_shape(beta2), op_shape(eps), op_shape(values), op_shape(indices))\n success = F.depend(success, pull(push((beta1_power, beta2_power, lr, beta1, beta2,\n eps, values, indices), shapes), param))\n return success\n\n if not target:\n success = F.depend(success, sparse_opt(param, m, v, beta1_power, beta2_power, lr, beta1, beta2,\n eps, values, indices))\n else:\n op_mul = P.Mul()\n op_square = P.Square()\n op_sqrt = P.Sqrt()\n scatter_add = P.ScatterAdd(use_locking)\n\n success = F.depend(success, F.assign(m, op_mul(beta1, m)))\n success = F.depend(success, F.assign(v, op_mul(beta2, v)))\n\n grad_indices = gradient.indices\n grad_value = gradient.values\n\n next_m = scatter_add(m,\n grad_indices,\n op_mul(F.tuple_to_array((1.0,)) - beta1, grad_value))\n\n next_v = scatter_add(v,\n grad_indices,\n op_mul(F.tuple_to_array((1.0,)) - beta2, op_square(grad_value)))\n\n if use_nesterov:\n m_temp = next_m * _scaler_ten\n F.assign(m, op_mul(beta1, next_m))\n div_value = scatter_add(m,\n op_mul(grad_indices, _scaler_one),\n op_mul(F.tuple_to_array((1.0,)) - beta1, grad_value))\n param_update = div_value / (op_sqrt(next_v) + eps)\n F.assign(m, m_temp / _scaler_ten)\n else:\n param_update = next_m / (op_sqrt(next_v) + eps)\n\n lr_t = lr * op_sqrt(1 - beta2_power) / (1 - beta1_power)\n next_param = param - lr_t * param_update\n\n success = F.depend(success, F.assign(param, next_param))\n success = F.depend(success, F.assign(m, next_m))\n success = F.depend(success, F.assign(v, next_v))\n\n return success\n\n\n@_adam_opt.register(\"Function\", \"Function\", \"Function\", \"Function\", \"Bool\", \"Bool\", \"Bool\", \"Tensor\", \"Tensor\",\n \"Tensor\", \"Tensor\", \"Tensor\", \"Tensor\", \"Tensor\", \"Tensor\", \"Tensor\", \"Tensor\", \"Bool\", \"Bool\")\ndef _run_opt_with_one_number(opt, sparse_opt, push, pull, use_locking, use_nesterov, target,\n beta1_power, beta2_power, beta1, beta2, eps, lr, gradient, param,\n moment1, moment2, ps_parameter, cache_enable):\n \"\"\"Apply adam optimizer to the weight parameter using Tensor.\"\"\"\n success = True\n if ps_parameter and not cache_enable:\n op_shape = P.Shape()\n success = F.depend(success, pull(push((beta1_power, beta2_power, lr, beta1, beta2, eps, gradient),\n (op_shape(param), op_shape(moment1), op_shape(moment2))), param))\n else:\n success = F.depend(success, opt(param, moment1, moment2, beta1_power, beta2_power, lr, beta1, beta2,\n eps, gradient))\n return success\n\n\n@_adam_opt.register(\"Function\", \"Tensor\", \"Tensor\", \"Tensor\", \"Tensor\", \"Tensor\", \"Tensor\", \"Tensor\", \"Tensor\",\n \"Tensor\", \"Tensor\")\ndef _run_off_load_opt(opt, beta1_power, beta2_power, beta1, beta2, eps, lr, gradient, param, moment1, moment2):\n \"\"\"Apply AdamOffload optimizer to the weight parameter using Tensor.\"\"\"\n success = True\n delat_param = opt(moment1, moment2, beta1_power, beta2_power, lr, beta1, beta2, eps, gradient)\n success = F.depend(success, F.assign_add(param, delat_param))\n return success\n\n\ndef _check_param_value(beta1, beta2, eps, prim_name):\n \"\"\"Check the type of inputs.\"\"\"\n validator.check_value_type(\"beta1\", beta1, [float], prim_name)\n validator.check_value_type(\"beta2\", beta2, [float], prim_name)\n validator.check_value_type(\"eps\", eps, [float], prim_name)\n validator.check_float_range(beta1, 0.0, 1.0, Rel.INC_NEITHER, \"beta1\", prim_name)\n validator.check_float_range(beta2, 0.0, 1.0, Rel.INC_NEITHER, \"beta2\", prim_name)\n validator.check_positive_float(eps, \"eps\", prim_name)\n\n\nclass Adam(Optimizer):\n r\"\"\"\n Updates gradients by the Adaptive Moment Estimation (Adam) algorithm.\n\n The Adam optimizer can dynamically adjust the learning rate of each parameter using the first-order\n moment estimation and the second-order moment estimation of the gradient.\n The Adam algorithm is proposed in `Adam: A Method for Stochastic Optimization <https://arxiv.org/abs/1412.6980>`_.\n\n The updating formulas are as follows,\n\n .. math::\n \\begin{gather*}\n m_{t+1} = \\beta_1 * m_{t} + (1 - \\beta_1) * g \\\\\n v_{t+1} = \\beta_2 * v_{t} + (1 - \\beta_2) * g * g \\\\\n l_{t+1} = l_{t} * \\frac{\\sqrt{1-\\beta_2^t}}{1-\\beta_1^t} \\\\\n w_{t+1} = w_{t} - l * \\frac{m_{t+1}}{\\sqrt{v_{t+1}} + \\epsilon}\n \\end{gather*}\n\n :math:`m` represents the 1st moment vector `moment1`, :math:`v` represents the 2nd moment vector `moment2`,\n :math:`g` represents `gradients`, :math:`l` represents scaling factor, :math:`\\beta_1, \\beta_2` represent\n `beta1` and `beta2`, :math:`t` represents updating step while :math:`beta_1^t` and :math:`beta_2^t` represent\n `beta1_power` and `beta2_power`, :math:`\\alpha` represents `learning_rate`, :math:`w` represents `params`,\n :math:`\\epsilon` represents `eps`.\n\n Note:\n The sparse strategy is applied while the SparseGatherV2 operator is used for forward network.\n The sparse feature is under continuous development. If the sparse strategy wants to be executed on the host,\n set the target to the CPU.\n\n If parameters are not grouped, the `weight_decay` in optimizer will be applied on the parameters without 'beta'\n or 'gamma' in their names. Users can group parameters to change the strategy of decaying weight. When parameters\n are grouped, each group can set `weight_decay`, if not, the `weight_decay` in optimizer will be applied.\n\n Args:\n params (Union[list[Parameter], list[dict]]): Must be list of `Parameter` or list of `dict`. When the\n `params` is a list of `dict`, the string \"params\", \"lr\", \"weight_decay\", \"grad_centralization\" and\n \"order_params\" are the keys can be parsed.\n\n - params: Required. Parameters in current group. The value must be a list of `Parameter`.\n\n - lr: Optional. If \"lr\" in the keys, the value of corresponding learning rate will be used.\n If not, the `learning_rate` in optimizer will be used. Fixed and dynamic learning rate are supported.\n\n - weight_decay: Optional. If \"weight_decay\" in the keys, the value of corresponding weight decay\n will be used. If not, the `weight_decay` in the optimizer will be used.\n\n - grad_centralization: Optional. Must be Boolean. If \"grad_centralization\" is in the keys, the set value\n will be used. If not, the `grad_centralization` is False by default. This configuration only works on the\n convolution layer.\n\n - order_params: Optional. When parameters is grouped, this usually is used to maintain the order of\n parameters that appeared in the network to improve performance. The value should be parameters whose\n order will be followed in optimizer.\n If `order_params` in the keys, other keys will be ignored and the element of 'order_params' must be in\n one group of `params`.\n\n learning_rate (Union[float, int, Tensor, Iterable, LearningRateSchedule]): Default: 1e-3.\n\n - float: The fixed learning rate value. Must be equal to or greater than 0.\n\n - int: The fixed learning rate value. Must be equal to or greater than 0. It will be converted to float.\n\n - Tensor: Its value should be a scalar or a 1-D vector. For scalar, fixed learning rate will be applied.\n For vector, learning rate is dynamic, then the i-th step will take the i-th value as the learning rate.\n\n - Iterable: Learning rate is dynamic. The i-th step will take the i-th value as the learning rate.\n\n - LearningRateSchedule: Learning rate is dynamic. During training, the optimizer calls the instance of\n LearningRateSchedule with step as the input to get the learning rate of current step.\n\n beta1 (float): The exponential decay rate for the 1st moment estimations. Should be in range (0.0, 1.0).\n Default: 0.9.\n beta2 (float): The exponential decay rate for the 2nd moment estimations. Should be in range (0.0, 1.0).\n Default: 0.999.\n eps (float): Term added to the denominator to improve numerical stability. Should be greater than 0. Default:\n 1e-8.\n use_locking (bool): Whether to enable a lock to protect variable tensors from being updated.\n If true, updates of the var, m, and v tensors will be protected by a lock.\n If false, the result is unpredictable. Default: False.\n use_nesterov (bool): Whether to use Nesterov Accelerated Gradient (NAG) algorithm to update the gradients.\n If true, update the gradients using NAG.\n If false, update the gradients without using NAG. Default: False.\n weight_decay (float): Weight decay (L2 penalty). It must be equal to or greater than 0. Default: 0.0.\n loss_scale (float): A floating point value for the loss scale. Should be greater than 0. In general, use the\n default value. Only when `FixedLossScaleManager` is used for training and the `drop_overflow_update` in\n `FixedLossScaleManager` is set to False, then this value needs to be the same as the `loss_scale` in\n `FixedLossScaleManager`. Refer to class :class:`mindspore.FixedLossScaleManager` for more details.\n Default: 1.0.\n\n Inputs:\n - **gradients** (tuple[Tensor]) - The gradients of `params`, the shape is the same as `params`.\n\n Outputs:\n Tensor[bool], the value is True.\n\n Raises:\n TypeError: If `learning_rate` is not one of int, float, Tensor, Iterable, LearningRateSchedule.\n TypeError: If element of `parameters` is neither Parameter nor dict.\n TypeError: If `beta1`, `beta2`, `eps` or `loss_scale` is not a float.\n TypeError: If `weight_decay` is neither float nor int.\n TypeError: If `use_locking` or `use_nesterov` is not a bool.\n ValueError: If `loss_scale` or `eps` is less than or equal to 0.\n ValueError: If `beta1`, `beta2` is not in range (0.0, 1.0).\n ValueError: If `weight_decay` is less than 0.\n\n Supported Platforms:\n ``Ascend`` ``GPU`` ``CPU``\n\n Examples:\n >>> net = Net()\n >>> #1) All parameters use the same learning rate and weight decay\n >>> optim = nn.Adam(params=net.trainable_params())\n >>>\n >>> #2) Use parameter groups and set different values\n >>> conv_params = list(filter(lambda x: 'conv' in x.name, net.trainable_params()))\n >>> no_conv_params = list(filter(lambda x: 'conv' not in x.name, net.trainable_params()))\n >>> group_params = [{'params': conv_params, 'weight_decay': 0.01, 'grad_centralization':True},\n ... {'params': no_conv_params, 'lr': 0.01},\n ... {'order_params': net.trainable_params()}]\n >>> optim = nn.Adam(group_params, learning_rate=0.1, weight_decay=0.0)\n >>> # The conv_params's parameters will use default learning rate of 0.1 and weight decay of 0.01 and grad\n >>> # centralization of True.\n >>> # The no_conv_params's parameters will use learning rate of 0.01 and default weight decay of 0.0 and grad\n >>> # centralization of False.\n >>> # The final parameters order in which the optimizer will be followed is the value of 'order_params'.\n >>>\n >>> loss = nn.SoftmaxCrossEntropyWithLogits()\n >>> model = Model(net, loss_fn=loss, optimizer=optim)\n \"\"\"\n\n @opt_init_args_register\n def __init__(self, params, learning_rate=1e-3, beta1=0.9, beta2=0.999, eps=1e-8, use_locking=False,\n use_nesterov=False, weight_decay=0.0, loss_scale=1.0):\n super(Adam, self).__init__(learning_rate, params, weight_decay, loss_scale)\n _check_param_value(beta1, beta2, eps, self.cls_name)\n validator.check_value_type(\"use_locking\", use_locking, [bool], self.cls_name)\n validator.check_value_type(\"use_nesterov\", use_nesterov, [bool], self.cls_name)\n\n self.beta1 = Tensor(beta1, mstype.float32)\n self.beta2 = Tensor(beta2, mstype.float32)\n self.beta1_power = Parameter(initializer(1, [1], mstype.float32), name=\"beta1_power\")\n self.beta2_power = Parameter(initializer(1, [1], mstype.float32), name=\"beta2_power\")\n self.eps = Tensor(eps, mstype.float32)\n self.use_nesterov = use_nesterov\n self.use_locking = use_locking\n self.moment1 = self.parameters.clone(prefix=\"moment1\", init='zeros')\n self.moment2 = self.parameters.clone(prefix=\"moment2\", init='zeros')\n\n self._is_device = True\n self.opt = P.Adam(use_locking, use_nesterov)\n self.sparse_opt = P.FusedSparseAdam(use_locking, use_nesterov)\n self.sparse_opt.add_prim_attr(\"primitive_target\", \"CPU\")\n self._ps_pull = P.Pull()\n self._ps_push = P.Push(\"Adam\", [0, 1, 2])\n self._ps_push.add_prim_attr(\"use_nesterov\", use_nesterov)\n\n def construct(self, gradients):\n params = self.parameters\n moment1 = self.moment1\n moment2 = self.moment2\n gradients = self.decay_weight(gradients)\n gradients = self.gradients_centralization(gradients)\n gradients = self.scale_grad(gradients)\n gradients = self._grad_sparse_indices_deduplicate(gradients)\n lr = self.get_lr()\n\n beta1_power = self.beta1_power * self.beta1\n self.beta1_power = beta1_power\n beta2_power = self.beta2_power * self.beta2\n self.beta2_power = beta2_power\n if self.is_group_lr:\n success = self.map_(F.partial(_adam_opt, self.opt, self.sparse_opt, self._ps_push, self._ps_pull,\n self.use_locking, self.use_nesterov, self._is_device,\n beta1_power, beta2_power, self.beta1, self.beta2, self.eps),\n lr, gradients, params, moment1, moment2, self.ps_parameters, self.cache_enable)\n else:\n success = self.map_(F.partial(_adam_opt, self.opt, self.sparse_opt, self._ps_push, self._ps_pull,\n self.use_locking, self.use_nesterov, self._is_device,\n beta1_power, beta2_power, self.beta1, self.beta2, self.eps, lr),\n gradients, params, moment1, moment2, self.ps_parameters, self.cache_enable)\n return success\n\n @Optimizer.target.setter\n def target(self, value):\n \"\"\"\n If the input value is set to \"CPU\", the parameters will be updated on the host using the Fused\n optimizer operation.\n \"\"\"\n self._set_base_target(value)\n\n\nclass AdamWeightDecay(Optimizer):\n r\"\"\"\n Implements the Adam algorithm to fix the weight decay.\n\n .. math::\n \\begin{array}{ll} \\\\\n m_{t+1} = \\beta_1 * m_{t} + (1 - \\beta_1) * g \\\\\n v_{t+1} = \\beta_2 * v_{t} + (1 - \\beta_2) * g * g \\\\\n update = \\frac{m_{t+1}}{\\sqrt{v_{t+1}} + eps} \\\\\n update =\n \\begin{cases}\n update + weight\\_decay * w_{t}\n & \\text{ if } weight\\_decay > 0 \\\\\n update\n & \\text{ otherwise }\n \\end{cases} \\\\\n w_{t+1} = w_{t} - lr * update\n \\end{array}\n\n :math:`m` represents the 1st moment vector `moment1`, :math:`v` represents the 2nd moment vector `moment2`,\n :math:`g` represents `gradients`, :math:`lr` represents `learning_rate`,\n :math:`\\beta_1, \\beta_2` represent `beta1` and `beta2`, :math:`t` represents updating step while\n :math:`w` represents `params`.\n\n Note:\n There is usually no connection between a optimizer and mixed precision. But when `FixedLossScaleManager` is used\n and `drop_overflow_update` in `FixedLossScaleManager` is set to False, optimizer needs to set the 'loss_scale'.\n As this optimizer has no argument of `loss_scale`, so `loss_scale` needs to be processed by other means, refer\n document `LossScale <https://www.mindspore.cn/docs/programming_guide/zh-CN/master/lossscale.html>`_ to process\n `loss_scale` correctly.\n\n If parameters are not grouped, the `weight_decay` in optimizer will be applied on the parameters without 'beta'\n or 'gamma' in their names. Users can group parameters to change the strategy of decaying weight. When parameters\n are grouped, each group can set `weight_decay`, if not, the `weight_decay` in optimizer will be applied.\n\n Args:\n params (Union[list[Parameter], list[dict]]): Must be list of `Parameter` or list of `dict`. When the\n `params` is a list of `dict`, the string \"params\", \"lr\", \"weight_decay\", and \"order_params\"\n are the keys can be parsed.\n\n - params: Required. Parameters in current group. The value must be a list of `Parameter`.\n\n - lr: Optional. If \"lr\" in the keys, the value of corresponding learning rate will be used.\n If not, the `learning_rate` in optimizer will be used. Fixed and dynamic learning rate are supported.\n\n - weight_decay: Optional. If \"weight_decay\" in the keys, the value of corresponding weight decay\n will be used. If not, the `weight_decay` in the optimizer will be used.\n\n - order_params: Optional. When parameters is grouped, this usually is used to maintain the order of\n parameters that appeared in the network to improve performance. The value should be parameters whose\n order will be followed in optimizer.\n If `order_params` in the keys, other keys will be ignored and the element of 'order_params' must be in\n one group of `params`.\n\n learning_rate (Union[float, int, Tensor, Iterable, LearningRateSchedule]): Default: 1e-3.\n\n - float: The fixed learning rate value. Must be equal to or greater than 0.\n\n - int: The fixed learning rate value. Must be equal to or greater than 0. It will be converted to float.\n\n - Tensor: Its value should be a scalar or a 1-D vector. For scalar, fixed learning rate will be applied.\n For vector, learning rate is dynamic, then the i-th step will take the i-th value as the learning rate.\n\n - Iterable: Learning rate is dynamic. The i-th step will take the i-th value as the learning rate.\n\n - LearningRateSchedule: Learning rate is dynamic. During training, the optimizer calls the instance of\n LearningRateSchedule with step as the input to get the learning rate of current step.\n\n beta1 (float): The exponential decay rate for the 1st moment estimations. Default: 0.9.\n Should be in range (0.0, 1.0).\n beta2 (float): The exponential decay rate for the 2nd moment estimations. Default: 0.999.\n Should be in range (0.0, 1.0).\n eps (float): Term added to the denominator to improve numerical stability. Default: 1e-6.\n Should be greater than 0.\n weight_decay (float): Weight decay (L2 penalty). It must be equal to or greater than 0. Default: 0.0.\n\n Inputs:\n - **gradients** (tuple[Tensor]) - The gradients of `params`, the shape is the same as `params`.\n\n Outputs:\n tuple[bool], all elements are True.\n\n Raises:\n TypeError: If `learning_rate` is not one of int, float, Tensor, Iterable, LearningRateSchedule.\n TypeError: If element of `parameters` is neither Parameter nor dict.\n TypeError: If `beta1`, `beta2` or `eps` is not a float.\n TypeError: If `weight_decay` is neither float nor int.\n ValueError: If `eps` is less than or equal to 0.\n ValueError: If `beta1`, `beta2` is not in range (0.0, 1.0).\n ValueError: If `weight_decay` is less than 0.\n\n Supported Platforms:\n ``Ascend`` ``GPU`` ``CPU``\n\n Examples:\n >>> net = Net()\n >>> #1) All parameters use the same learning rate and weight decay\n >>> optim = nn.AdamWeightDecay(params=net.trainable_params())\n >>>\n >>> #2) Use parameter groups and set different values\n >>> conv_params = list(filter(lambda x: 'conv' in x.name, net.trainable_params()))\n >>> no_conv_params = list(filter(lambda x: 'conv' not in x.name, net.trainable_params()))\n >>> group_params = [{'params': conv_params, 'weight_decay': 0.01},\n ... {'params': no_conv_params, 'lr': 0.01},\n ... {'order_params': net.trainable_params()}]\n >>> optim = nn.AdamWeightDecay(group_params, learning_rate=0.1, weight_decay=0.0)\n >>> # The conv_params's parameters will use default learning rate of 0.1 and weight decay of 0.01.\n >>> # The no_conv_params's parameters will use learning rate of 0.01 and default weight decay of 0.0.\n >>> # The final parameters order in which the optimizer will be followed is the value of 'order_params'.\n >>>\n >>> loss = nn.SoftmaxCrossEntropyWithLogits()\n >>> model = Model(net, loss_fn=loss, optimizer=optim)\n \"\"\"\n def __init__(self, params, learning_rate=1e-3, beta1=0.9, beta2=0.999, eps=1e-6, weight_decay=0.0):\n super(AdamWeightDecay, self).__init__(learning_rate, params, weight_decay)\n _check_param_value(beta1, beta2, eps, self.cls_name)\n self.beta1 = Tensor(np.array([beta1]).astype(np.float32))\n self.beta2 = Tensor(np.array([beta2]).astype(np.float32))\n self.eps = Tensor(np.array([eps]).astype(np.float32))\n self.moments1 = self.parameters.clone(prefix=\"adam_m\", init='zeros')\n self.moments2 = self.parameters.clone(prefix=\"adam_v\", init='zeros')\n\n def construct(self, gradients):\n lr = self.get_lr()\n if self.is_group:\n if self.is_group_lr:\n optim_result = self.hyper_map(F.partial(_adam_opt, self.beta1, self.beta2, self.eps),\n lr, self.weight_decay, self.parameters, self.moments1,\n self.moments2, gradients, self.decay_flags, self.optim_filter)\n else:\n optim_result = self.hyper_map(F.partial(_adam_opt, self.beta1, self.beta2, self.eps, lr),\n self.weight_decay, self.parameters, self.moments1, self.moments2,\n gradients, self.decay_flags, self.optim_filter)\n else:\n optim_result = self.hyper_map(F.partial(_adam_opt, self.beta1, self.beta2, self.eps, lr,\n self.weight_decay),\n self.parameters, self.moments1, self.moments2,\n gradients, self.decay_flags, self.optim_filter)\n if self.use_parallel:\n self.broadcast_params(optim_result)\n return optim_result\n\n\nclass AdamOffload(Optimizer):\n r\"\"\"\n This optimizer will offload Adam optimizer to host CPU and keep parameters being updated on the device,\n to minimize the memory cost. Although that would bring about an increase of performance overhead,\n the optimizer could be used to run a larger model.\n\n The Adam algorithm is proposed in `Adam: A Method for Stochastic Optimization <https://arxiv.org/abs/1412.6980>`_.\n\n The updating formulas are as follows,\n\n .. math::\n \\begin{array}{ll} \\\\\n m_{t+1} = \\beta_1 * m_{t} + (1 - \\beta_1) * g \\\\\n v_{t+1} = \\beta_2 * v_{t} + (1 - \\beta_2) * g * g \\\\\n l = \\alpha * \\frac{\\sqrt{1-\\beta_2^t}}{1-\\beta_1^t} \\\\\n w_{t+1} = w_{t} - l * \\frac{m_{t+1}}{\\sqrt{v_{t+1}} + \\epsilon}\n \\end{array}\n\n :math:`m` represents the 1st moment vector `moment1`, :math:`v` represents the 2nd moment vector `moment2`,\n :math:`g` represents `gradients`, :math:`l` represents scaling factor, :math:`\\beta_1, \\beta_2` represent\n `beta1` and `beta2`, :math:`t` represents updating step while :math:`beta_1^t` and :math:`beta_2^t` represent\n `beta1_power` and `beta2_power`, :math:`\\alpha` represents `learning_rate`, :math:`w` represents `params`,\n :math:`\\epsilon` represents `eps`.\n\n Note:\n This optimizer only supports `GRAPH_MODE` currently.\n\n If parameters are not grouped, the `weight_decay` in optimizer will be applied on the parameters without 'beta'\n or 'gamma' in their names. Users can group parameters to change the strategy of decaying weight. When parameters\n are grouped, each group can set `weight_decay`, if not, the `weight_decay` in optimizer will be applied.\n\n Args:\n params (Union[list[Parameter], list[dict]]): Must be list of `Parameter` or list of `dict`. When the\n `params` is a list of `dict`, the string \"params\", \"lr\", \"weight_decay\", and \"order_params\"\n are the keys can be parsed.\n\n - params: Required. Parameters in current group. The value must be a list of `Parameter`.\n\n - lr: Optional. If \"lr\" in the keys, the value of corresponding learning rate will be used.\n If not, the `learning_rate` in optimizer will be used. Fixed and dynamic learning rate are supported.\n\n - weight_decay: Optional. If \"weight_decay\" in the keys, the value of corresponding weight decay\n will be used. If not, the `weight_decay` in the optimizer will be used.\n\n - order_params: Optional. When parameters is grouped, this usually is used to maintain the order of\n parameters that appeared in the network to improve performance. The value should be parameters whose\n order will be followed in optimizer.\n If `order_params` in the keys, other keys will be ignored and the element of 'order_params' must be in\n one group of `params`.\n\n learning_rate (Union[float, int, Tensor, Iterable, LearningRateSchedule]): Default: 1e-3.\n\n - float: The fixed learning rate value. Must be equal to or greater than 0.\n\n - int: The fixed learning rate value. Must be equal to or greater than 0. It will be converted to float.\n\n - Tensor: Its value should be a scalar or a 1-D vector. For scalar, fixed learning rate will be applied.\n For vector, learning rate is dynamic, then the i-th step will take the i-th value as the learning rate.\n\n - Iterable: Learning rate is dynamic. The i-th step will take the i-th value as the learning rate.\n\n - LearningRateSchedule: Learning rate is dynamic. During training, the optimizer calls the instance of\n LearningRateSchedule with step as the input to get the learning rate of current step.\n\n beta1 (float): The exponential decay rate for the 1st moment estimations. Should be in range (0.0, 1.0).\n Default: 0.9.\n beta2 (float): The exponential decay rate for the 2nd moment estimations. Should be in range (0.0, 1.0).\n Default: 0.999.\n eps (float): Term added to the denominator to improve numerical stability. Should be greater than 0. Default:\n 1e-8.\n use_locking (bool): Whether to enable a lock to protect variable tensors from being updated.\n If true, updates of the var, m, and v tensors will be protected by a lock.\n If false, the result is unpredictable. Default: False.\n use_nesterov (bool): Whether to use Nesterov Accelerated Gradient (NAG) algorithm to update the gradients.\n If true, update the gradients using NAG.\n If false, update the gradients without using NAG. Default: False.\n weight_decay (float): Weight decay (L2 penalty). It must be equal to or greater than 0. Default: 0.0.\n loss_scale (float): A floating point value for the loss scale. Should be greater than 0. In general, use the\n default value. Only when `FixedLossScaleManager` is used for training and the `drop_overflow_update` in\n `FixedLossScaleManager` is set to False, then this value needs to be the same as the `loss_scale` in\n `FixedLossScaleManager`. Refer to class :class:`mindspore.FixedLossScaleManager` for more details.\n Default: 1.0.\n\n Inputs:\n - **gradients** (tuple[Tensor]) - The gradients of `params`, the shape is the same as `params`.\n\n Outputs:\n Tensor[bool], the value is True.\n\n Raises:\n TypeError: If `learning_rate` is not one of int, float, Tensor, Iterable, LearningRateSchedule.\n TypeError: If element of `parameters` is neither Parameter nor dict.\n TypeError: If `beta1`, `beta2`, `eps` or `loss_scale` is not a float.\n TypeError: If `weight_decay` is neither float nor int.\n TypeError: If `use_locking` or `use_nesterov` is not a bool.\n ValueError: If `loss_scale` or `eps` is less than or equal to 0.\n ValueError: If `beta1`, `beta2` is not in range (0.0, 1.0).\n ValueError: If `weight_decay` is less than 0.\n\n Supported Platforms:\n ``Ascend`` ``GPU`` ``CPU``\n\n Examples:\n >>> net = Net()\n >>> #1) All parameters use the same learning rate and weight decay\n >>> optim = nn.AdamOffload(params=net.trainable_params())\n >>>\n >>> #2) Use parameter groups and set different values\n >>> conv_params = list(filter(lambda x: 'conv' in x.name, net.trainable_params()))\n >>> no_conv_params = list(filter(lambda x: 'conv' not in x.name, net.trainable_params()))\n >>> group_params = [{'params': conv_params, 'weight_decay': 0.01},\n ... {'params': no_conv_params, 'lr': 0.01},\n ... {'order_params': net.trainable_params()}]\n >>> optim = nn.AdamOffload(group_params, learning_rate=0.1, weight_decay=0.0)\n >>> # The conv_params's parameters will use default learning rate of 0.1 and weight decay of 0.01.\n >>> # The no_conv_params's parameters will use learning rate of 0.01 and default weight decay of 0.0.\n >>> # The final parameters order in which the optimizer will be followed is the value of 'order_params'.\n >>>\n >>> loss = nn.SoftmaxCrossEntropyWithLogits()\n >>> model = Model(net, loss_fn=loss, optimizer=optim)\n \"\"\"\n\n def __init__(self, params, learning_rate=1e-3, beta1=0.9, beta2=0.999, eps=1e-8, use_locking=False,\n use_nesterov=False, weight_decay=0.0, loss_scale=1.0):\n super(AdamOffload, self).__init__(learning_rate, params, weight_decay, loss_scale)\n _check_param_value(beta1, beta2, eps, self.cls_name)\n validator.check_value_type(\"use_locking\", use_locking, [bool], self.cls_name)\n validator.check_value_type(\"use_nesterov\", use_nesterov, [bool], self.cls_name)\n\n self.beta1 = Tensor(beta1, mstype.float32)\n self.beta2 = Tensor(beta2, mstype.float32)\n self.beta1_power = Parameter(initializer(1, [1], mstype.float32), name=\"beta1_power\")\n self.beta2_power = Parameter(initializer(1, [1], mstype.float32), name=\"beta2_power\")\n self.eps = Tensor(eps, mstype.float32)\n self.moment1 = self.parameters.clone(prefix=\"moment1\", init='zeros')\n self.moment2 = self.parameters.clone(prefix=\"moment2\", init='zeros')\n self.opt = P.AdamNoUpdateParam(use_locking, use_nesterov)\n self.opt.add_prim_attr(\"primitive_target\", \"CPU\")\n\n def construct(self, gradients):\n params = self.parameters\n moment1 = self.moment1\n moment2 = self.moment2\n gradients = self.decay_weight(gradients)\n gradients = self.scale_grad(gradients)\n lr = self.get_lr()\n\n beta1_power = self.beta1_power * self.beta1\n self.beta1_power = beta1_power\n beta2_power = self.beta2_power * self.beta2\n self.beta2_power = beta2_power\n if self.is_group_lr:\n success = self.map_reverse(F.partial(_adam_opt, self.opt,\n beta1_power, beta2_power, self.beta1, self.beta2, self.eps),\n lr, gradients, params, moment1, moment2)\n else:\n success = self.map_reverse(F.partial(_adam_opt, self.opt,\n beta1_power, beta2_power, self.beta1, self.beta2, self.eps, lr),\n gradients, params, moment1, moment2)\n return success\n" ]
[ [ "numpy.array" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
aws-patlin/ml-io
[ "047e7d40609ced6f839d0b08d1917e9742a785af" ]
[ "src/mlio-py/mlio/integ/scipy.py" ]
[ "# Copyright 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\"). You\n# may not use this file except in compliance with the License. A copy of\n# the License is located at\n#\n# http://aws.amazon.com/apache2.0/\n#\n# or in the \"license\" file accompanying this file. This file is\n# distributed on an \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF\n# ANY KIND, either express or implied. See the License for the specific\n# language governing permissions and limitations under the License.\n\nimport numpy as np\n\nfrom mlio.core import CooTensor\nfrom scipy.sparse import coo_matrix\n\n\ndef to_coo_matrix(tensor):\n \"\"\"\n Converts the specified tensor to a ``coo_matrix``.\n \"\"\"\n\n if not isinstance(tensor, CooTensor):\n raise ValueError(\"The tensor must be an instance of CooTensor.\")\n\n s = tensor.shape\n\n if len(s) > 2:\n raise ValueError(\"Only one- and two-dimensional COO tensors are \"\n \"supported.\")\n\n if len(s) == 1:\n s = (1,) + s\n\n data = np.array(tensor.data, copy=False)\n rows = np.array(tensor.indices(0), copy=False)\n cols = np.array(tensor.indices(1), copy=False)\n\n return coo_matrix((data, (rows, cols)), s, copy=True)\n\n\ndef to_tensor(mtx):\n \"\"\"\n Converts the specified ``coo_matrix`` to a tensor.\n \"\"\"\n\n if not isinstance(mtx, coo_matrix):\n raise ValueError(\"Only coo_matrix is supported.\")\n\n rows = mtx.row\n cols = mtx.col\n\n rows = rows.astype(np.int64, copy=True)\n cols = cols.astype(np.int64, copy=True)\n\n return CooTensor(mtx.shape, mtx.data, [rows, cols], copy=False)\n" ]
[ [ "scipy.sparse.coo_matrix", "numpy.array" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [ "1.7", "1.0", "0.10", "1.2", "0.14", "0.19", "1.5", "0.12", "0.17", "0.13", "1.6", "1.4", "1.9", "1.3", "1.10", "0.15", "0.18", "0.16", "1.8" ], "tensorflow": [] } ]
lih627/MSMLNet
[ "868500144edd60599c804203534e08dd672a647a" ]
[ "model/mobilenetv3.py" ]
[ "\"\"\"\nCreates a MobileNetV3 Model as defined in:\nAndrew Howard, Mark Sandler, Grace Chu, Liang-Chieh Chen, Bo Chen, Mingxing Tan, Weijun Wang, Yukun Zhu, Ruoming Pang, Vijay Vasudevan, Quoc V. Le, Hartwig Adam. (2019).\nSearching for MobileNetV3\narXiv preprint arXiv:1905.02244.\n\"\"\"\n\nimport torch.nn as nn\nimport math\nimport torch\n\n__all__ = ['build_mobilenetv3_small', 'build_mobilenetv3_large']\n\n\ndef _make_divisible(v, divisor, min_value=None):\n \"\"\"\n This function is taken from the original tf repo.\n It ensures that all layers have a channel number that is divisible by 8\n It can be seen here:\n https://github.com/tensorflow/models/blob/master/research/slim/nets/mobilenet/mobilenet.py\n :param v:\n :param divisor:\n :param min_value:\n :return:\n \"\"\"\n if min_value is None:\n min_value = divisor\n new_v = max(min_value, int(v + divisor / 2) // divisor * divisor)\n # Make sure that round down does not go down by more than 10%.\n if new_v < 0.9 * v:\n new_v += divisor\n return new_v\n\n\nclass h_sigmoid(nn.Module):\n def __init__(self, inplace=True):\n super(h_sigmoid, self).__init__()\n self.relu = nn.ReLU6(inplace=inplace)\n\n def forward(self, x):\n return self.relu(x + 3) / 6\n\n\nclass h_swish(nn.Module):\n def __init__(self, inplace=True):\n super(h_swish, self).__init__()\n self.sigmoid = h_sigmoid(inplace=inplace)\n\n def forward(self, x):\n return x * self.sigmoid(x)\n\n\nclass SELayer(nn.Module):\n def __init__(self, channel, reduction=4):\n super(SELayer, self).__init__()\n self.avg_pool = nn.AdaptiveAvgPool2d(1)\n self.fc = nn.Sequential(\n nn.Linear(channel, _make_divisible(channel // reduction, 8)),\n nn.ReLU(inplace=True),\n nn.Linear(_make_divisible(channel // reduction, 8), channel),\n h_sigmoid()\n )\n\n def forward(self, x):\n b, c, _, _ = x.size()\n y = self.avg_pool(x).view(b, c)\n y = self.fc(y).view(b, c, 1, 1)\n return x * y\n\n\ndef conv_3x3_bn(inp, oup, stride):\n return nn.Sequential(\n nn.Conv2d(inp, oup, 3, stride, 1, bias=False),\n nn.BatchNorm2d(oup),\n h_swish()\n )\n\n\ndef conv_1x1_bn(inp, oup):\n return nn.Sequential(\n nn.Conv2d(inp, oup, 1, 1, 0, bias=False),\n nn.BatchNorm2d(oup),\n h_swish()\n )\n\n\nclass InvertedResidual(nn.Module):\n def __init__(self, inp, hidden_dim, oup, kernel_size, stride, use_se, use_hs):\n super(InvertedResidual, self).__init__()\n assert stride in [1, 2]\n\n self.identity = stride == 1 and inp == oup\n\n if inp == hidden_dim:\n self.conv = nn.Sequential(\n # dw\n nn.Conv2d(hidden_dim, hidden_dim, kernel_size, stride, (kernel_size - 1) // 2, groups=hidden_dim,\n bias=False),\n nn.BatchNorm2d(hidden_dim),\n h_swish() if use_hs else nn.ReLU(inplace=True),\n # Squeeze-and-Excite\n SELayer(hidden_dim) if use_se else nn.Identity(),\n # pw-linear\n nn.Conv2d(hidden_dim, oup, 1, 1, 0, bias=False),\n nn.BatchNorm2d(oup),\n )\n else:\n self.conv = nn.Sequential(\n # pw\n nn.Conv2d(inp, hidden_dim, 1, 1, 0, bias=False),\n nn.BatchNorm2d(hidden_dim),\n h_swish() if use_hs else nn.ReLU(inplace=True),\n # dw\n nn.Conv2d(hidden_dim, hidden_dim, kernel_size, stride, (kernel_size - 1) // 2, groups=hidden_dim,\n bias=False),\n nn.BatchNorm2d(hidden_dim),\n # Squeeze-and-Excite\n SELayer(hidden_dim) if use_se else nn.Identity(),\n h_swish() if use_hs else nn.ReLU(inplace=True),\n # pw-linear\n nn.Conv2d(hidden_dim, oup, 1, 1, 0, bias=False),\n nn.BatchNorm2d(oup),\n )\n\n def forward(self, x):\n if self.identity:\n return x + self.conv(x)\n else:\n return self.conv(x)\n\n\nclass MobileNetV3(nn.Module):\n def __init__(self, cfgs, mode, num_classes=1000, width_mult=1.):\n super(MobileNetV3, self).__init__()\n # setting of inverted residual blocks\n self.cfgs = cfgs\n assert mode in ['large', 'small']\n\n # building first layer\n input_channel = _make_divisible(16 * width_mult, 8)\n layers = [conv_3x3_bn(3, input_channel, 2)]\n\n self.layer0 = nn.Sequential(*layers)\n # building inverted residual blocks\n block = InvertedResidual\n\n exp_size = None\n output_channel = None\n\n def _build_block(blockcfgs):\n nonlocal input_channel, output_channel, exp_size\n block_layers = []\n for k, t, c, use_se, use_hs, s in blockcfgs:\n output_channel = _make_divisible(c * width_mult, 8)\n exp_size = _make_divisible(input_channel * t, 8)\n block_layers.append(block(input_channel, exp_size, output_channel, k, s, use_se, use_hs))\n input_channel = output_channel\n return nn.Sequential(*block_layers)\n\n self.layer1 = _build_block(self.cfgs[0])\n self.layer2 = _build_block(self.cfgs[1])\n self.layer3 = _build_block(self.cfgs[2])\n self.layer4 = _build_block(self.cfgs[3])\n\n # building last several layers\n self.conv = conv_1x1_bn(input_channel, exp_size)\n self.avgpool = nn.AdaptiveAvgPool2d((1, 1))\n output_channel = {'large': 1280, 'small': 1024}\n output_channel = _make_divisible(output_channel[mode] * width_mult, 8) if width_mult > 1.0 else output_channel[\n mode]\n self.classifier = nn.Sequential(\n nn.Linear(exp_size, output_channel),\n h_swish(),\n nn.Dropout(0.2),\n nn.Linear(output_channel, num_classes),\n )\n\n self._initialize_weights()\n\n def forward(self, x):\n x = self.layer0(x) # downsample 2\n x = self.layer1(x) # downsample 4\n x = self.layer2(x) # downsample 8\n x = self.layer3(x) # downsample 16\n x = self.layer4(x) # downsample 32\n x = self.conv(x)\n x = self.avgpool(x)\n x = x.view(x.size(0), -1)\n x = self.classifier(x)\n return x\n\n def _initialize_weights(self):\n for m in self.modules():\n if isinstance(m, nn.Conv2d):\n n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels\n m.weight.data.normal_(0, math.sqrt(2. / n))\n if m.bias is not None:\n m.bias.data.zero_()\n elif isinstance(m, nn.BatchNorm2d):\n m.weight.data.fill_(1)\n m.bias.data.zero_()\n elif isinstance(m, nn.Linear):\n m.weight.data.normal_(0, 0.01)\n m.bias.data.zero_()\n\n\ndef mobilenetv3_large(**kwargs):\n \"\"\"\n Constructs a MobileNetV3-Large model\n\n 8x download factor\n \"\"\"\n cfgs = [\n # k, t, c, SE, HS, s\n [[3, 1, 16, 0, 0, 1],\n [3, 4, 24, 0, 0, 2],\n [3, 3, 24, 0, 0, 1]],\n [[5, 3, 40, 1, 0, 2],\n [5, 3, 40, 1, 0, 1],\n [5, 3, 40, 1, 0, 1]],\n [[3, 6, 80, 0, 1, 1],\n [3, 2.5, 80, 0, 1, 1],\n [3, 2.3, 80, 0, 1, 1],\n [3, 2.3, 80, 0, 1, 1],\n [3, 6, 112, 1, 1, 1],\n [3, 6, 112, 1, 1, 1]],\n [[5, 6, 160, 1, 1, 1],\n [5, 6, 160, 1, 1, 1],\n [5, 6, 160, 1, 1, 1]]\n ]\n return MobileNetV3(cfgs, mode='large', **kwargs)\n\n\ndef mobilenetv3_small(**kwargs):\n \"\"\"\n Constructs a MobileNetV3-Small model\n 8x download factor\n \"\"\"\n cfgs = [\n # k, t, c, SE, HS, s\n [[3, 1, 16, 1, 0, 2]],\n [[3, 4.5, 24, 0, 0, 2],\n [3, 3.67, 24, 0, 0, 1]],\n [[5, 4, 40, 1, 1, 1],\n [5, 6, 40, 1, 1, 1],\n [5, 6, 40, 1, 1, 1],\n [5, 3, 48, 1, 1, 1],\n [5, 3, 48, 1, 1, 1]],\n [[5, 6, 96, 1, 1, 1],\n [5, 6, 96, 1, 1, 1],\n [5, 6, 96, 1, 1, 1]],\n ]\n\n return MobileNetV3(cfgs, mode='small', **kwargs)\n\n\ndef load_and_convert(net, state_dict):\n net_dict = net.state_dict().copy()\n net_list = list(net_dict.keys())\n trained_list = list(state_dict.keys())\n assert len(net_list) == len(trained_list), 'Learning parameters do not match, check net and trained state_dict'\n for i in range(len(net_list)):\n net_dict[net_list[i]] = state_dict[trained_list[i]]\n net.load_state_dict(net_dict)\n\n\ndef build_mobilenetv3_large(pretrained=True, width_mult=1.):\n net = mobilenetv3_large(width_mult=width_mult)\n if pretrained:\n eps = 1e-5\n if abs(1.0 - width_mult) < eps:\n weights = './initmodel/mobilenetv3-large-1cd25616.pth'\n state_dict = torch.load(weights)\n elif abs(0.75 - width_mult) < eps:\n weights = './initmodel/mobilenetv3-large-0.75-9632d2a8.pth'\n state_dict = torch.load(weights)\n else:\n raise RuntimeError(\"Not support width_mult: {}\".format(width_mult))\n load_and_convert(net, state_dict)\n return net\n\n\ndef build_mobilenetv3_small(pretrained=True, width_mult=1.):\n net = mobilenetv3_small(width_mult=width_mult)\n if pretrained:\n eps = 1e-5\n if abs(1.0 - width_mult) < eps:\n weights = './initmodel/mobilenetv3-small-55df8e1f.pth'\n state_dict = torch.load(weights)\n elif abs(0.75 - width_mult) < eps:\n weights = './initmodel/mobilenetv3-small-0.75-86c972c3.pth'\n state_dict = torch.load(weights)\n else:\n raise RuntimeError(\"Not support width_mult: {}\".format(width_mult))\n load_and_convert(net, state_dict)\n return net\n\n\nif __name__ == '__main__':\n import torch\n\n\n def params(net):\n return sum(param.numel() for param in net.parameters())\n\n\n net = build_mobilenetv3_large(pretrained=False, width_mult=0.75)\n img = torch.randn((1, 3, 224, 224))\n out = net(img)\n print('Out shape ', out.size())\n" ]
[ [ "torch.nn.Sequential", "torch.nn.Dropout", "torch.nn.ReLU6", "torch.load", "torch.randn", "torch.nn.Conv2d", "torch.nn.Linear", "torch.nn.Identity", "torch.nn.AdaptiveAvgPool2d", "torch.nn.BatchNorm2d", "torch.nn.ReLU" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
akensert/autoencoder-chromatogram-enhancement
[ "9aeead5ba33bd97711c4de27841e9827f326b575" ]
[ "src/generator.py" ]
[ "import tensorflow as tf\nimport numpy as np\n\n\nclass Generator(tf.keras.utils.Sequence):\n\n def __init__(self, path, batch_size, num_examples=190_000, random_seed=42):\n self.path = path\n self.batch_size = batch_size\n self.num_examples = num_examples\n self.random_seed = random_seed\n self.on_epoch_end()\n\n def __len__(self):\n return self.num_examples // self.batch_size\n\n def on_epoch_end(self):\n np.random.seed(self.random_seed)\n self.random_seed += 1\n self.indices = np.arange(self.num_examples)\n np.random.shuffle(self.indices)\n\n def __getitem__(self, index):\n batch_indices = self.indices[index*self.batch_size:(index+1)*self.batch_size]\n batch_x = np.empty([self.batch_size, 8192, 1])\n batch_y = np.empty([self.batch_size, 8192, 1])\n batch_y_der = np.empty([self.batch_size, 8192, 1])\n for i, idx in enumerate(batch_indices):\n _, x, y, y_der = np.load(self.path+f'chromatogram_{idx}.npy')\n batch_x[i,] = x[:, np.newaxis]\n batch_y[i,] = y[:, np.newaxis]\n batch_y_der[i,] = y_der[:, np.newaxis]\n return np.array(batch_x), (np.array(batch_y), np.array(batch_y_der))\n" ]
[ [ "numpy.random.seed", "numpy.arange", "numpy.random.shuffle", "numpy.load", "numpy.array", "numpy.empty" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
pandrey-fr/csvtools
[ "9c11bb5a1564f5774117fd33682727f51c51bd58" ]
[ "csvtools/_csv_writers.py" ]
[ "# coding: utf-8\n\n\"\"\"Classes to handle record-type specific dynamic csv storage.\"\"\"\n\nfrom functools import reduce\n\nimport pandas as pd\nfrom yaptools import check_type_validity\n\nfrom csvtools._csv_writer import (\n AbstractCsvWriter, CSV_WRITER_DOCSTRING, CSV_WRITER_EXAMPLE\n)\n\n\nclass DictCsvWriter(AbstractCsvWriter):\n \"\"\"Class to handle dynamic csv storage of dict records.\n {0}\n\n Usage:\n\n # Writer instanciation.\n >>> writer = DictCsvWriter('file.csv', buffer_size=100, sep=',')\n {1}\n \"\"\"\n __doc__ = __doc__.format(CSV_WRITER_DOCSTRING, CSV_WRITER_EXAMPLE)\n\n def _reset_buffer(self):\n \"\"\"Reset the buffer to its empty state.\"\"\"\n self.buffer = []\n\n def _add_to_buffer(self, record):\n \"\"\"Bufferize a given dict.\"\"\"\n if not isinstance(record, dict):\n self.log(\n 'Rejected a record: invalid type %s.' % type(record),\n level='error'\n )\n return None\n self.buffer.append(record)\n\n def _get_buffer_columns(self):\n \"\"\"Return a list of unique column names appearing in the buffer.\"\"\"\n return reduce(\n lambda x, y: list(set(x + y)),\n map(lambda dict_x: list(dict_x.keys()), self.buffer)\n )\n\n def _to_csv(self, first_time):\n \"\"\"Write buffered elements to csv.\"\"\"\n def clean_row(string, replace):\n \"\"\"Clean a record row.\"\"\"\n string = string.replace(self.sep, replace.get(self.sep, '§'))\n return string.replace('\\n', '')\n replace = {';': '.,', '§': ';'}\n rows = (\n self.sep.join(\n clean_row(str(row.get(column, '')), replace)\n for column in self.header\n )\n for row in self.buffer\n )\n with open(self.path, 'a', encoding='utf-8') as csv_file:\n if first_time:\n csv_file.write(self.sep.join(self.header) + '\\n')\n for row in rows:\n csv_file.write(row + '\\n')\n\n\nclass DataframeCsvWriter(AbstractCsvWriter):\n \"\"\"Class to handle dynamic csv storage of pandas.DataFrame records.\n {0}\n\n Usage:\n\n # Writer instanciation.\n >>> writer = DataframeCsvWriter('file.csv', buffer_size=100, sep=',')\n {1}\n \"\"\"\n __doc__ = __doc__.format(CSV_WRITER_DOCSTRING, CSV_WRITER_EXAMPLE)\n\n def __init__(\n self, path, buffer_size, sep=';', logger=None, write_index=False\n ):\n \"\"\"Set up the handler's initial state.\n\n path : path to the destination csv file, which may pre-exist\n buffer_size : maximum number of rows to keep in memory before writing\n them to the csv file (positive integer)\n sep : values separator of the csv file (str, default ';')\n logger : optional Logger object to use instead of the default\n one (which logs everything to the console)\n write_index : whether to write down the records' index as first column\n (bool, default False) ; note that index will be written\n if using a pre-existing file whose first column is not\n named\n \"\"\"\n check_type_validity(write_index, bool, 'write_index')\n self._write_index = write_index\n super().__init__(path, buffer_size, sep, logger)\n\n def _get_current_csv_header(self):\n \"\"\"Read the csv file's initial header, if any.\"\"\"\n header = super()._get_current_csv_header()\n if header and header[0] == '':\n self.write_index = True\n del header[0]\n return header\n\n def _reset_buffer(self):\n \"\"\"Reset the buffer to its empty state.\"\"\"\n self.buffer = pd.DataFrame()\n\n def _add_to_buffer(self, record):\n \"\"\"Bufferize a given pandas.DataFrame.\"\"\"\n if isinstance(record, pd.Series):\n record = pd.DataFrame(record)\n elif not isinstance(record, pd.DataFrame):\n self.log(\n 'Rejected a record: invalid type %s.' % type(record),\n level='error'\n )\n return None\n self.buffer = pd.concat([self.buffer, record])\n\n def _get_buffer_columns(self):\n \"\"\"Return a list of unique column names appearing in the buffer.\"\"\"\n return list(self.buffer.columns)\n\n def _to_csv(self, first_time):\n \"\"\"Write buffered elements to csv.\"\"\"\n if self.buffer.columns.tolist() != self.header:\n for column in self.header:\n if column not in self.buffer.columns:\n self.buffer[column] = None\n self.buffer = self.buffer[self.header]\n self.buffer.to_csv(\n self.path, sep=self.sep, mode='a', index=self._write_index,\n header=self.header if first_time else None, encoding='utf-8'\n )\n\n def _update_csv_header(self):\n \"\"\"Update the csv file's header.\"\"\"\n if self._write_index:\n self.header = [''] + self.header\n super()._update_csv_header()\n" ]
[ [ "pandas.concat", "pandas.DataFrame" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [ "0.23", "0.21", "2.0", "1.4", "1.3", "0.19", "1.1", "1.5", "0.24", "0.20", "1.0", "0.25", "1.2" ], "scipy": [], "tensorflow": [] } ]
psimaj/numpy
[ "b03e4dc36b1b4441fbc955920e0fe92fd39f095e" ]
[ "numpy/core/tests/test_multiarray.py" ]
[ "try:\n # Accessing collections abstract classes from collections\n # has been deprecated since Python 3.3\n import collections.abc as collections_abc\nexcept ImportError:\n import collections as collections_abc\nimport tempfile\nimport sys\nimport shutil\nimport warnings\nimport operator\nimport io\nimport itertools\nimport functools\nimport ctypes\nimport os\nimport gc\nimport weakref\nimport pytest\nfrom contextlib import contextmanager\n\nfrom numpy.compat import pickle\n\ntry:\n import pathlib\nexcept ImportError:\n try:\n import pathlib2 as pathlib\n except ImportError:\n pathlib = None\n\nif sys.version_info[0] >= 3:\n import builtins\nelse:\n import __builtin__ as builtins\nfrom decimal import Decimal\n\nimport numpy as np\nfrom numpy.compat import strchar, unicode\nimport numpy.core._multiarray_tests as _multiarray_tests\nfrom numpy.testing import (\n assert_, assert_raises, assert_warns, assert_equal, assert_almost_equal,\n assert_array_equal, assert_raises_regex, assert_array_almost_equal,\n assert_allclose, IS_PYPY, HAS_REFCOUNT, assert_array_less, runstring,\n temppath, suppress_warnings, break_cycles,\n )\nfrom numpy.core.tests._locales import CommaDecimalPointLocale\n\n# Need to test an object that does not fully implement math interface\nfrom datetime import timedelta, datetime\n\n\nif sys.version_info[:2] > (3, 2):\n # In Python 3.3 the representation of empty shape, strides and sub-offsets\n # is an empty tuple instead of None.\n # https://docs.python.org/dev/whatsnew/3.3.html#api-changes\n EMPTY = ()\nelse:\n EMPTY = None\n\n\ndef _aligned_zeros(shape, dtype=float, order=\"C\", align=None):\n \"\"\"\n Allocate a new ndarray with aligned memory.\n\n The ndarray is guaranteed *not* aligned to twice the requested alignment.\n Eg, if align=4, guarantees it is not aligned to 8. If align=None uses\n dtype.alignment.\"\"\"\n dtype = np.dtype(dtype)\n if dtype == np.dtype(object):\n # Can't do this, fall back to standard allocation (which\n # should always be sufficiently aligned)\n if align is not None:\n raise ValueError(\"object array alignment not supported\")\n return np.zeros(shape, dtype=dtype, order=order)\n if align is None:\n align = dtype.alignment\n if not hasattr(shape, '__len__'):\n shape = (shape,)\n size = functools.reduce(operator.mul, shape) * dtype.itemsize\n buf = np.empty(size + 2*align + 1, np.uint8)\n\n ptr = buf.__array_interface__['data'][0]\n offset = ptr % align\n if offset != 0:\n offset = align - offset\n if (ptr % (2*align)) == 0:\n offset += align\n\n # Note: slices producing 0-size arrays do not necessarily change\n # data pointer --- so we use and allocate size+1\n buf = buf[offset:offset+size+1][:-1]\n data = np.ndarray(shape, dtype, buf, order=order)\n data.fill(0)\n return data\n\ndef _no_tracing(func):\n \"\"\"\n Decorator to temporarily turn off tracing for the duration of a test.\n Needed in tests that check refcounting, otherwise the tracing itself\n influences the refcounts\n \"\"\"\n if not hasattr(sys, 'gettrace'):\n return func\n else:\n @functools.wraps(func)\n def wrapper(*args, **kwargs):\n original_trace = sys.gettrace()\n try:\n sys.settrace(None)\n return func(*args, **kwargs)\n finally:\n sys.settrace(original_trace)\n return wrapper\n\n\n\nclass TestFlags:\n def setup(self):\n self.a = np.arange(10)\n\n def test_writeable(self):\n mydict = locals()\n self.a.flags.writeable = False\n assert_raises(ValueError, runstring, 'self.a[0] = 3', mydict)\n assert_raises(ValueError, runstring, 'self.a[0:1].itemset(3)', mydict)\n self.a.flags.writeable = True\n self.a[0] = 5\n self.a[0] = 0\n\n def test_writeable_any_base(self):\n # Ensure that any base being writeable is sufficient to change flag;\n # this is especially interesting for arrays from an array interface.\n arr = np.arange(10)\n\n class subclass(np.ndarray):\n pass\n\n # Create subclass so base will not be collapsed, this is OK to change\n view1 = arr.view(subclass)\n view2 = view1[...]\n arr.flags.writeable = False\n view2.flags.writeable = False\n view2.flags.writeable = True # Can be set to True again.\n\n arr = np.arange(10)\n\n class frominterface:\n def __init__(self, arr):\n self.arr = arr\n self.__array_interface__ = arr.__array_interface__\n\n view1 = np.asarray(frominterface)\n view2 = view1[...]\n view2.flags.writeable = False\n view2.flags.writeable = True\n\n view1.flags.writeable = False\n view2.flags.writeable = False\n with assert_raises(ValueError):\n # Must assume not writeable, since only base is not:\n view2.flags.writeable = True\n\n def test_writeable_from_readonly(self):\n # gh-9440 - make sure fromstring, from buffer on readonly buffers\n # set writeable False\n data = b'\\x00' * 100\n vals = np.frombuffer(data, 'B')\n assert_raises(ValueError, vals.setflags, write=True)\n types = np.dtype( [('vals', 'u1'), ('res3', 'S4')] )\n values = np.core.records.fromstring(data, types)\n vals = values['vals']\n assert_raises(ValueError, vals.setflags, write=True)\n\n def test_writeable_from_buffer(self):\n data = bytearray(b'\\x00' * 100)\n vals = np.frombuffer(data, 'B')\n assert_(vals.flags.writeable)\n vals.setflags(write=False)\n assert_(vals.flags.writeable is False)\n vals.setflags(write=True)\n assert_(vals.flags.writeable)\n types = np.dtype( [('vals', 'u1'), ('res3', 'S4')] )\n values = np.core.records.fromstring(data, types)\n vals = values['vals']\n assert_(vals.flags.writeable)\n vals.setflags(write=False)\n assert_(vals.flags.writeable is False)\n vals.setflags(write=True)\n assert_(vals.flags.writeable)\n\n @pytest.mark.skipif(sys.version_info[0] < 3, reason=\"Python 2 always copies\")\n @pytest.mark.skipif(IS_PYPY, reason=\"PyPy always copies\")\n def test_writeable_pickle(self):\n import pickle\n # Small arrays will be copied without setting base.\n # See condition for using PyArray_SetBaseObject in\n # array_setstate.\n a = np.arange(1000)\n for v in range(pickle.HIGHEST_PROTOCOL):\n vals = pickle.loads(pickle.dumps(a, v))\n assert_(vals.flags.writeable)\n assert_(isinstance(vals.base, bytes))\n\n def test_writeable_from_c_data(self):\n # Test that the writeable flag can be changed for an array wrapping\n # low level C-data, but not owning its data.\n # Also see that this is deprecated to change from python.\n from numpy.core._multiarray_tests import get_c_wrapping_array\n\n arr_writeable = get_c_wrapping_array(True)\n assert not arr_writeable.flags.owndata\n assert arr_writeable.flags.writeable\n view = arr_writeable[...]\n\n # Toggling the writeable flag works on the view:\n view.flags.writeable = False\n assert not view.flags.writeable\n view.flags.writeable = True\n assert view.flags.writeable\n # Flag can be unset on the arr_writeable:\n arr_writeable.flags.writeable = False\n\n arr_readonly = get_c_wrapping_array(False)\n assert not arr_readonly.flags.owndata\n assert not arr_readonly.flags.writeable\n\n for arr in [arr_writeable, arr_readonly]:\n view = arr[...]\n view.flags.writeable = False # make sure it is readonly\n arr.flags.writeable = False\n assert not arr.flags.writeable\n\n with assert_raises(ValueError):\n view.flags.writeable = True\n\n with warnings.catch_warnings():\n warnings.simplefilter(\"error\", DeprecationWarning)\n with assert_raises(DeprecationWarning):\n arr.flags.writeable = True\n\n with assert_warns(DeprecationWarning):\n arr.flags.writeable = True\n\n def test_warnonwrite(self):\n a = np.arange(10)\n a.flags._warn_on_write = True\n with warnings.catch_warnings(record=True) as w:\n warnings.filterwarnings('always')\n a[1] = 10\n a[2] = 10\n # only warn once\n assert_(len(w) == 1)\n\n def test_otherflags(self):\n assert_equal(self.a.flags.carray, True)\n assert_equal(self.a.flags['C'], True)\n assert_equal(self.a.flags.farray, False)\n assert_equal(self.a.flags.behaved, True)\n assert_equal(self.a.flags.fnc, False)\n assert_equal(self.a.flags.forc, True)\n assert_equal(self.a.flags.owndata, True)\n assert_equal(self.a.flags.writeable, True)\n assert_equal(self.a.flags.aligned, True)\n with assert_warns(DeprecationWarning):\n assert_equal(self.a.flags.updateifcopy, False)\n with assert_warns(DeprecationWarning):\n assert_equal(self.a.flags['U'], False)\n assert_equal(self.a.flags['UPDATEIFCOPY'], False)\n assert_equal(self.a.flags.writebackifcopy, False)\n assert_equal(self.a.flags['X'], False)\n assert_equal(self.a.flags['WRITEBACKIFCOPY'], False)\n\n def test_string_align(self):\n a = np.zeros(4, dtype=np.dtype('|S4'))\n assert_(a.flags.aligned)\n # not power of two are accessed byte-wise and thus considered aligned\n a = np.zeros(5, dtype=np.dtype('|S4'))\n assert_(a.flags.aligned)\n\n def test_void_align(self):\n a = np.zeros(4, dtype=np.dtype([(\"a\", \"i4\"), (\"b\", \"i4\")]))\n assert_(a.flags.aligned)\n\n\nclass TestHash:\n # see #3793\n def test_int(self):\n for st, ut, s in [(np.int8, np.uint8, 8),\n (np.int16, np.uint16, 16),\n (np.int32, np.uint32, 32),\n (np.int64, np.uint64, 64)]:\n for i in range(1, s):\n assert_equal(hash(st(-2**i)), hash(-2**i),\n err_msg=\"%r: -2**%d\" % (st, i))\n assert_equal(hash(st(2**(i - 1))), hash(2**(i - 1)),\n err_msg=\"%r: 2**%d\" % (st, i - 1))\n assert_equal(hash(st(2**i - 1)), hash(2**i - 1),\n err_msg=\"%r: 2**%d - 1\" % (st, i))\n\n i = max(i - 1, 1)\n assert_equal(hash(ut(2**(i - 1))), hash(2**(i - 1)),\n err_msg=\"%r: 2**%d\" % (ut, i - 1))\n assert_equal(hash(ut(2**i - 1)), hash(2**i - 1),\n err_msg=\"%r: 2**%d - 1\" % (ut, i))\n\n\nclass TestAttributes:\n def setup(self):\n self.one = np.arange(10)\n self.two = np.arange(20).reshape(4, 5)\n self.three = np.arange(60, dtype=np.float64).reshape(2, 5, 6)\n\n def test_attributes(self):\n assert_equal(self.one.shape, (10,))\n assert_equal(self.two.shape, (4, 5))\n assert_equal(self.three.shape, (2, 5, 6))\n self.three.shape = (10, 3, 2)\n assert_equal(self.three.shape, (10, 3, 2))\n self.three.shape = (2, 5, 6)\n assert_equal(self.one.strides, (self.one.itemsize,))\n num = self.two.itemsize\n assert_equal(self.two.strides, (5*num, num))\n num = self.three.itemsize\n assert_equal(self.three.strides, (30*num, 6*num, num))\n assert_equal(self.one.ndim, 1)\n assert_equal(self.two.ndim, 2)\n assert_equal(self.three.ndim, 3)\n num = self.two.itemsize\n assert_equal(self.two.size, 20)\n assert_equal(self.two.nbytes, 20*num)\n assert_equal(self.two.itemsize, self.two.dtype.itemsize)\n assert_equal(self.two.base, np.arange(20))\n\n def test_dtypeattr(self):\n assert_equal(self.one.dtype, np.dtype(np.int_))\n assert_equal(self.three.dtype, np.dtype(np.float_))\n assert_equal(self.one.dtype.char, 'l')\n assert_equal(self.three.dtype.char, 'd')\n assert_(self.three.dtype.str[0] in '<>')\n assert_equal(self.one.dtype.str[1], 'i')\n assert_equal(self.three.dtype.str[1], 'f')\n\n def test_int_subclassing(self):\n # Regression test for https://github.com/numpy/numpy/pull/3526\n\n numpy_int = np.int_(0)\n\n # int_ doesn't inherit from Python int, because it's not fixed-width\n assert_(not isinstance(numpy_int, int))\n\n def test_stridesattr(self):\n x = self.one\n\n def make_array(size, offset, strides):\n return np.ndarray(size, buffer=x, dtype=int,\n offset=offset*x.itemsize,\n strides=strides*x.itemsize)\n\n assert_equal(make_array(4, 4, -1), np.array([4, 3, 2, 1]))\n assert_raises(ValueError, make_array, 4, 4, -2)\n assert_raises(ValueError, make_array, 4, 2, -1)\n assert_raises(ValueError, make_array, 8, 3, 1)\n assert_equal(make_array(8, 3, 0), np.array([3]*8))\n # Check behavior reported in gh-2503:\n assert_raises(ValueError, make_array, (2, 3), 5, np.array([-2, -3]))\n make_array(0, 0, 10)\n\n def test_set_stridesattr(self):\n x = self.one\n\n def make_array(size, offset, strides):\n try:\n r = np.ndarray([size], dtype=int, buffer=x,\n offset=offset*x.itemsize)\n except Exception as e:\n raise RuntimeError(e)\n r.strides = strides = strides*x.itemsize\n return r\n\n assert_equal(make_array(4, 4, -1), np.array([4, 3, 2, 1]))\n assert_equal(make_array(7, 3, 1), np.array([3, 4, 5, 6, 7, 8, 9]))\n assert_raises(ValueError, make_array, 4, 4, -2)\n assert_raises(ValueError, make_array, 4, 2, -1)\n assert_raises(RuntimeError, make_array, 8, 3, 1)\n # Check that the true extent of the array is used.\n # Test relies on as_strided base not exposing a buffer.\n x = np.lib.stride_tricks.as_strided(np.arange(1), (10, 10), (0, 0))\n\n def set_strides(arr, strides):\n arr.strides = strides\n\n assert_raises(ValueError, set_strides, x, (10*x.itemsize, x.itemsize))\n\n # Test for offset calculations:\n x = np.lib.stride_tricks.as_strided(np.arange(10, dtype=np.int8)[-1],\n shape=(10,), strides=(-1,))\n assert_raises(ValueError, set_strides, x[::-1], -1)\n a = x[::-1]\n a.strides = 1\n a[::2].strides = 2\n\n def test_fill(self):\n for t in \"?bhilqpBHILQPfdgFDGO\":\n x = np.empty((3, 2, 1), t)\n y = np.empty((3, 2, 1), t)\n x.fill(1)\n y[...] = 1\n assert_equal(x, y)\n\n def test_fill_max_uint64(self):\n x = np.empty((3, 2, 1), dtype=np.uint64)\n y = np.empty((3, 2, 1), dtype=np.uint64)\n value = 2**64 - 1\n y[...] = value\n x.fill(value)\n assert_array_equal(x, y)\n\n def test_fill_struct_array(self):\n # Filling from a scalar\n x = np.array([(0, 0.0), (1, 1.0)], dtype='i4,f8')\n x.fill(x[0])\n assert_equal(x['f1'][1], x['f1'][0])\n # Filling from a tuple that can be converted\n # to a scalar\n x = np.zeros(2, dtype=[('a', 'f8'), ('b', 'i4')])\n x.fill((3.5, -2))\n assert_array_equal(x['a'], [3.5, 3.5])\n assert_array_equal(x['b'], [-2, -2])\n\n\nclass TestArrayConstruction:\n def test_array(self):\n d = np.ones(6)\n r = np.array([d, d])\n assert_equal(r, np.ones((2, 6)))\n\n d = np.ones(6)\n tgt = np.ones((2, 6))\n r = np.array([d, d])\n assert_equal(r, tgt)\n tgt[1] = 2\n r = np.array([d, d + 1])\n assert_equal(r, tgt)\n\n d = np.ones(6)\n r = np.array([[d, d]])\n assert_equal(r, np.ones((1, 2, 6)))\n\n d = np.ones(6)\n r = np.array([[d, d], [d, d]])\n assert_equal(r, np.ones((2, 2, 6)))\n\n d = np.ones((6, 6))\n r = np.array([d, d])\n assert_equal(r, np.ones((2, 6, 6)))\n\n d = np.ones((6, ))\n r = np.array([[d, d + 1], d + 2])\n assert_equal(len(r), 2)\n assert_equal(r[0], [d, d + 1])\n assert_equal(r[1], d + 2)\n\n tgt = np.ones((2, 3), dtype=bool)\n tgt[0, 2] = False\n tgt[1, 0:2] = False\n r = np.array([[True, True, False], [False, False, True]])\n assert_equal(r, tgt)\n r = np.array([[True, False], [True, False], [False, True]])\n assert_equal(r, tgt.T)\n\n def test_array_empty(self):\n assert_raises(TypeError, np.array)\n\n def test_array_copy_false(self):\n d = np.array([1, 2, 3])\n e = np.array(d, copy=False)\n d[1] = 3\n assert_array_equal(e, [1, 3, 3])\n e = np.array(d, copy=False, order='F')\n d[1] = 4\n assert_array_equal(e, [1, 4, 3])\n e[2] = 7\n assert_array_equal(d, [1, 4, 7])\n\n def test_array_copy_true(self):\n d = np.array([[1,2,3], [1, 2, 3]])\n e = np.array(d, copy=True)\n d[0, 1] = 3\n e[0, 2] = -7\n assert_array_equal(e, [[1, 2, -7], [1, 2, 3]])\n assert_array_equal(d, [[1, 3, 3], [1, 2, 3]])\n e = np.array(d, copy=True, order='F')\n d[0, 1] = 5\n e[0, 2] = 7\n assert_array_equal(e, [[1, 3, 7], [1, 2, 3]])\n assert_array_equal(d, [[1, 5, 3], [1,2,3]])\n\n def test_array_cont(self):\n d = np.ones(10)[::2]\n assert_(np.ascontiguousarray(d).flags.c_contiguous)\n assert_(np.ascontiguousarray(d).flags.f_contiguous)\n assert_(np.asfortranarray(d).flags.c_contiguous)\n assert_(np.asfortranarray(d).flags.f_contiguous)\n d = np.ones((10, 10))[::2,::2]\n assert_(np.ascontiguousarray(d).flags.c_contiguous)\n assert_(np.asfortranarray(d).flags.f_contiguous)\n\n\nclass TestAssignment:\n def test_assignment_broadcasting(self):\n a = np.arange(6).reshape(2, 3)\n\n # Broadcasting the input to the output\n a[...] = np.arange(3)\n assert_equal(a, [[0, 1, 2], [0, 1, 2]])\n a[...] = np.arange(2).reshape(2, 1)\n assert_equal(a, [[0, 0, 0], [1, 1, 1]])\n\n # For compatibility with <= 1.5, a limited version of broadcasting\n # the output to the input.\n #\n # This behavior is inconsistent with NumPy broadcasting\n # in general, because it only uses one of the two broadcasting\n # rules (adding a new \"1\" dimension to the left of the shape),\n # applied to the output instead of an input. In NumPy 2.0, this kind\n # of broadcasting assignment will likely be disallowed.\n a[...] = np.arange(6)[::-1].reshape(1, 2, 3)\n assert_equal(a, [[5, 4, 3], [2, 1, 0]])\n # The other type of broadcasting would require a reduction operation.\n\n def assign(a, b):\n a[...] = b\n\n assert_raises(ValueError, assign, a, np.arange(12).reshape(2, 2, 3))\n\n def test_assignment_errors(self):\n # Address issue #2276\n class C:\n pass\n a = np.zeros(1)\n\n def assign(v):\n a[0] = v\n\n assert_raises((AttributeError, TypeError), assign, C())\n assert_raises(ValueError, assign, [1])\n\n def test_unicode_assignment(self):\n # gh-5049\n from numpy.core.numeric import set_string_function\n\n @contextmanager\n def inject_str(s):\n \"\"\" replace ndarray.__str__ temporarily \"\"\"\n set_string_function(lambda x: s, repr=False)\n try:\n yield\n finally:\n set_string_function(None, repr=False)\n\n a1d = np.array([u'test'])\n a0d = np.array(u'done')\n with inject_str(u'bad'):\n a1d[0] = a0d # previously this would invoke __str__\n assert_equal(a1d[0], u'done')\n\n # this would crash for the same reason\n np.array([np.array(u'\\xe5\\xe4\\xf6')])\n\n def test_stringlike_empty_list(self):\n # gh-8902\n u = np.array([u'done'])\n b = np.array([b'done'])\n\n class bad_sequence:\n def __getitem__(self): pass\n def __len__(self): raise RuntimeError\n\n assert_raises(ValueError, operator.setitem, u, 0, [])\n assert_raises(ValueError, operator.setitem, b, 0, [])\n\n assert_raises(ValueError, operator.setitem, u, 0, bad_sequence())\n assert_raises(ValueError, operator.setitem, b, 0, bad_sequence())\n\n def test_longdouble_assignment(self):\n # only relevant if longdouble is larger than float\n # we're looking for loss of precision\n\n for dtype in (np.longdouble, np.longcomplex):\n # gh-8902\n tinyb = np.nextafter(np.longdouble(0), 1).astype(dtype)\n tinya = np.nextafter(np.longdouble(0), -1).astype(dtype)\n\n # construction\n tiny1d = np.array([tinya])\n assert_equal(tiny1d[0], tinya)\n\n # scalar = scalar\n tiny1d[0] = tinyb\n assert_equal(tiny1d[0], tinyb)\n\n # 0d = scalar\n tiny1d[0, ...] = tinya\n assert_equal(tiny1d[0], tinya)\n\n # 0d = 0d\n tiny1d[0, ...] = tinyb[...]\n assert_equal(tiny1d[0], tinyb)\n\n # scalar = 0d\n tiny1d[0] = tinyb[...]\n assert_equal(tiny1d[0], tinyb)\n\n arr = np.array([np.array(tinya)])\n assert_equal(arr[0], tinya)\n\n def test_cast_to_string(self):\n # cast to str should do \"str(scalar)\", not \"str(scalar.item())\"\n # Example: In python2, str(float) is truncated, so we want to avoid\n # str(np.float64(...).item()) as this would incorrectly truncate.\n a = np.zeros(1, dtype='S20')\n a[:] = np.array(['1.12345678901234567890'], dtype='f8')\n assert_equal(a[0], b\"1.1234567890123457\")\n\n\nclass TestDtypedescr:\n def test_construction(self):\n d1 = np.dtype('i4')\n assert_equal(d1, np.dtype(np.int32))\n d2 = np.dtype('f8')\n assert_equal(d2, np.dtype(np.float64))\n\n def test_byteorders(self):\n assert_(np.dtype('<i4') != np.dtype('>i4'))\n assert_(np.dtype([('a', '<i4')]) != np.dtype([('a', '>i4')]))\n\n def test_structured_non_void(self):\n fields = [('a', '<i2'), ('b', '<i2')]\n dt_int = np.dtype(('i4', fields))\n assert_equal(str(dt_int), \"(numpy.int32, [('a', '<i2'), ('b', '<i2')])\")\n\n # gh-9821\n arr_int = np.zeros(4, dt_int)\n assert_equal(repr(arr_int),\n \"array([0, 0, 0, 0], dtype=(numpy.int32, [('a', '<i2'), ('b', '<i2')]))\")\n\n\nclass TestZeroRank:\n def setup(self):\n self.d = np.array(0), np.array('x', object)\n\n def test_ellipsis_subscript(self):\n a, b = self.d\n assert_equal(a[...], 0)\n assert_equal(b[...], 'x')\n assert_(a[...].base is a) # `a[...] is a` in numpy <1.9.\n assert_(b[...].base is b) # `b[...] is b` in numpy <1.9.\n\n def test_empty_subscript(self):\n a, b = self.d\n assert_equal(a[()], 0)\n assert_equal(b[()], 'x')\n assert_(type(a[()]) is a.dtype.type)\n assert_(type(b[()]) is str)\n\n def test_invalid_subscript(self):\n a, b = self.d\n assert_raises(IndexError, lambda x: x[0], a)\n assert_raises(IndexError, lambda x: x[0], b)\n assert_raises(IndexError, lambda x: x[np.array([], int)], a)\n assert_raises(IndexError, lambda x: x[np.array([], int)], b)\n\n def test_ellipsis_subscript_assignment(self):\n a, b = self.d\n a[...] = 42\n assert_equal(a, 42)\n b[...] = ''\n assert_equal(b.item(), '')\n\n def test_empty_subscript_assignment(self):\n a, b = self.d\n a[()] = 42\n assert_equal(a, 42)\n b[()] = ''\n assert_equal(b.item(), '')\n\n def test_invalid_subscript_assignment(self):\n a, b = self.d\n\n def assign(x, i, v):\n x[i] = v\n\n assert_raises(IndexError, assign, a, 0, 42)\n assert_raises(IndexError, assign, b, 0, '')\n assert_raises(ValueError, assign, a, (), '')\n\n def test_newaxis(self):\n a, b = self.d\n assert_equal(a[np.newaxis].shape, (1,))\n assert_equal(a[..., np.newaxis].shape, (1,))\n assert_equal(a[np.newaxis, ...].shape, (1,))\n assert_equal(a[..., np.newaxis].shape, (1,))\n assert_equal(a[np.newaxis, ..., np.newaxis].shape, (1, 1))\n assert_equal(a[..., np.newaxis, np.newaxis].shape, (1, 1))\n assert_equal(a[np.newaxis, np.newaxis, ...].shape, (1, 1))\n assert_equal(a[(np.newaxis,)*10].shape, (1,)*10)\n\n def test_invalid_newaxis(self):\n a, b = self.d\n\n def subscript(x, i):\n x[i]\n\n assert_raises(IndexError, subscript, a, (np.newaxis, 0))\n assert_raises(IndexError, subscript, a, (np.newaxis,)*50)\n\n def test_constructor(self):\n x = np.ndarray(())\n x[()] = 5\n assert_equal(x[()], 5)\n y = np.ndarray((), buffer=x)\n y[()] = 6\n assert_equal(x[()], 6)\n\n def test_output(self):\n x = np.array(2)\n assert_raises(ValueError, np.add, x, [1], x)\n\n def test_real_imag(self):\n # contiguity checks are for gh-11245\n x = np.array(1j)\n xr = x.real\n xi = x.imag\n\n assert_equal(xr, np.array(0))\n assert_(type(xr) is np.ndarray)\n assert_equal(xr.flags.contiguous, True)\n assert_equal(xr.flags.f_contiguous, True)\n\n assert_equal(xi, np.array(1))\n assert_(type(xi) is np.ndarray)\n assert_equal(xi.flags.contiguous, True)\n assert_equal(xi.flags.f_contiguous, True)\n\n\nclass TestScalarIndexing:\n def setup(self):\n self.d = np.array([0, 1])[0]\n\n def test_ellipsis_subscript(self):\n a = self.d\n assert_equal(a[...], 0)\n assert_equal(a[...].shape, ())\n\n def test_empty_subscript(self):\n a = self.d\n assert_equal(a[()], 0)\n assert_equal(a[()].shape, ())\n\n def test_invalid_subscript(self):\n a = self.d\n assert_raises(IndexError, lambda x: x[0], a)\n assert_raises(IndexError, lambda x: x[np.array([], int)], a)\n\n def test_invalid_subscript_assignment(self):\n a = self.d\n\n def assign(x, i, v):\n x[i] = v\n\n assert_raises(TypeError, assign, a, 0, 42)\n\n def test_newaxis(self):\n a = self.d\n assert_equal(a[np.newaxis].shape, (1,))\n assert_equal(a[..., np.newaxis].shape, (1,))\n assert_equal(a[np.newaxis, ...].shape, (1,))\n assert_equal(a[..., np.newaxis].shape, (1,))\n assert_equal(a[np.newaxis, ..., np.newaxis].shape, (1, 1))\n assert_equal(a[..., np.newaxis, np.newaxis].shape, (1, 1))\n assert_equal(a[np.newaxis, np.newaxis, ...].shape, (1, 1))\n assert_equal(a[(np.newaxis,)*10].shape, (1,)*10)\n\n def test_invalid_newaxis(self):\n a = self.d\n\n def subscript(x, i):\n x[i]\n\n assert_raises(IndexError, subscript, a, (np.newaxis, 0))\n assert_raises(IndexError, subscript, a, (np.newaxis,)*50)\n\n def test_overlapping_assignment(self):\n # With positive strides\n a = np.arange(4)\n a[:-1] = a[1:]\n assert_equal(a, [1, 2, 3, 3])\n\n a = np.arange(4)\n a[1:] = a[:-1]\n assert_equal(a, [0, 0, 1, 2])\n\n # With positive and negative strides\n a = np.arange(4)\n a[:] = a[::-1]\n assert_equal(a, [3, 2, 1, 0])\n\n a = np.arange(6).reshape(2, 3)\n a[::-1,:] = a[:, ::-1]\n assert_equal(a, [[5, 4, 3], [2, 1, 0]])\n\n a = np.arange(6).reshape(2, 3)\n a[::-1, ::-1] = a[:, ::-1]\n assert_equal(a, [[3, 4, 5], [0, 1, 2]])\n\n # With just one element overlapping\n a = np.arange(5)\n a[:3] = a[2:]\n assert_equal(a, [2, 3, 4, 3, 4])\n\n a = np.arange(5)\n a[2:] = a[:3]\n assert_equal(a, [0, 1, 0, 1, 2])\n\n a = np.arange(5)\n a[2::-1] = a[2:]\n assert_equal(a, [4, 3, 2, 3, 4])\n\n a = np.arange(5)\n a[2:] = a[2::-1]\n assert_equal(a, [0, 1, 2, 1, 0])\n\n a = np.arange(5)\n a[2::-1] = a[:1:-1]\n assert_equal(a, [2, 3, 4, 3, 4])\n\n a = np.arange(5)\n a[:1:-1] = a[2::-1]\n assert_equal(a, [0, 1, 0, 1, 2])\n\n\nclass TestCreation:\n \"\"\"\n Test the np.array constructor\n \"\"\"\n def test_from_attribute(self):\n class x:\n def __array__(self, dtype=None):\n pass\n\n assert_raises(ValueError, np.array, x())\n\n def test_from_string(self):\n types = np.typecodes['AllInteger'] + np.typecodes['Float']\n nstr = ['123', '123']\n result = np.array([123, 123], dtype=int)\n for type in types:\n msg = 'String conversion for %s' % type\n assert_equal(np.array(nstr, dtype=type), result, err_msg=msg)\n\n def test_void(self):\n arr = np.array([], dtype='V')\n assert_equal(arr.dtype.kind, 'V')\n\n def test_too_big_error(self):\n # 45341 is the smallest integer greater than sqrt(2**31 - 1).\n # 3037000500 is the smallest integer greater than sqrt(2**63 - 1).\n # We want to make sure that the square byte array with those dimensions\n # is too big on 32 or 64 bit systems respectively.\n if np.iinfo('intp').max == 2**31 - 1:\n shape = (46341, 46341)\n elif np.iinfo('intp').max == 2**63 - 1:\n shape = (3037000500, 3037000500)\n else:\n return\n assert_raises(ValueError, np.empty, shape, dtype=np.int8)\n assert_raises(ValueError, np.zeros, shape, dtype=np.int8)\n assert_raises(ValueError, np.ones, shape, dtype=np.int8)\n\n def test_zeros(self):\n types = np.typecodes['AllInteger'] + np.typecodes['AllFloat']\n for dt in types:\n d = np.zeros((13,), dtype=dt)\n assert_equal(np.count_nonzero(d), 0)\n # true for ieee floats\n assert_equal(d.sum(), 0)\n assert_(not d.any())\n\n d = np.zeros(2, dtype='(2,4)i4')\n assert_equal(np.count_nonzero(d), 0)\n assert_equal(d.sum(), 0)\n assert_(not d.any())\n\n d = np.zeros(2, dtype='4i4')\n assert_equal(np.count_nonzero(d), 0)\n assert_equal(d.sum(), 0)\n assert_(not d.any())\n\n d = np.zeros(2, dtype='(2,4)i4, (2,4)i4')\n assert_equal(np.count_nonzero(d), 0)\n\n @pytest.mark.slow\n def test_zeros_big(self):\n # test big array as they might be allocated different by the system\n types = np.typecodes['AllInteger'] + np.typecodes['AllFloat']\n for dt in types:\n d = np.zeros((30 * 1024**2,), dtype=dt)\n assert_(not d.any())\n # This test can fail on 32-bit systems due to insufficient\n # contiguous memory. Deallocating the previous array increases the\n # chance of success.\n del(d)\n\n def test_zeros_obj(self):\n # test initialization from PyLong(0)\n d = np.zeros((13,), dtype=object)\n assert_array_equal(d, [0] * 13)\n assert_equal(np.count_nonzero(d), 0)\n\n def test_zeros_obj_obj(self):\n d = np.zeros(10, dtype=[('k', object, 2)])\n assert_array_equal(d['k'], 0)\n\n def test_zeros_like_like_zeros(self):\n # test zeros_like returns the same as zeros\n for c in np.typecodes['All']:\n if c == 'V':\n continue\n d = np.zeros((3,3), dtype=c)\n assert_array_equal(np.zeros_like(d), d)\n assert_equal(np.zeros_like(d).dtype, d.dtype)\n # explicitly check some special cases\n d = np.zeros((3,3), dtype='S5')\n assert_array_equal(np.zeros_like(d), d)\n assert_equal(np.zeros_like(d).dtype, d.dtype)\n d = np.zeros((3,3), dtype='U5')\n assert_array_equal(np.zeros_like(d), d)\n assert_equal(np.zeros_like(d).dtype, d.dtype)\n\n d = np.zeros((3,3), dtype='<i4')\n assert_array_equal(np.zeros_like(d), d)\n assert_equal(np.zeros_like(d).dtype, d.dtype)\n d = np.zeros((3,3), dtype='>i4')\n assert_array_equal(np.zeros_like(d), d)\n assert_equal(np.zeros_like(d).dtype, d.dtype)\n\n d = np.zeros((3,3), dtype='<M8[s]')\n assert_array_equal(np.zeros_like(d), d)\n assert_equal(np.zeros_like(d).dtype, d.dtype)\n d = np.zeros((3,3), dtype='>M8[s]')\n assert_array_equal(np.zeros_like(d), d)\n assert_equal(np.zeros_like(d).dtype, d.dtype)\n\n d = np.zeros((3,3), dtype='f4,f4')\n assert_array_equal(np.zeros_like(d), d)\n assert_equal(np.zeros_like(d).dtype, d.dtype)\n\n def test_empty_unicode(self):\n # don't throw decode errors on garbage memory\n for i in range(5, 100, 5):\n d = np.empty(i, dtype='U')\n str(d)\n\n def test_sequence_non_homogenous(self):\n assert_equal(np.array([4, 2**80]).dtype, object)\n assert_equal(np.array([4, 2**80, 4]).dtype, object)\n assert_equal(np.array([2**80, 4]).dtype, object)\n assert_equal(np.array([2**80] * 3).dtype, object)\n assert_equal(np.array([[1, 1],[1j, 1j]]).dtype, complex)\n assert_equal(np.array([[1j, 1j],[1, 1]]).dtype, complex)\n assert_equal(np.array([[1, 1, 1],[1, 1j, 1.], [1, 1, 1]]).dtype, complex)\n\n @pytest.mark.skipif(sys.version_info[0] >= 3, reason=\"Not Python 2\")\n def test_sequence_long(self):\n assert_equal(np.array([long(4), long(4)]).dtype, long)\n assert_equal(np.array([long(4), 2**80]).dtype, object)\n assert_equal(np.array([long(4), 2**80, long(4)]).dtype, object)\n assert_equal(np.array([2**80, long(4)]).dtype, object)\n\n def test_non_sequence_sequence(self):\n \"\"\"Should not segfault.\n\n Class Fail breaks the sequence protocol for new style classes, i.e.,\n those derived from object. Class Map is a mapping type indicated by\n raising a ValueError. At some point we may raise a warning instead\n of an error in the Fail case.\n\n \"\"\"\n class Fail:\n def __len__(self):\n return 1\n\n def __getitem__(self, index):\n raise ValueError()\n\n class Map:\n def __len__(self):\n return 1\n\n def __getitem__(self, index):\n raise KeyError()\n\n a = np.array([Map()])\n assert_(a.shape == (1,))\n assert_(a.dtype == np.dtype(object))\n assert_raises(ValueError, np.array, [Fail()])\n\n def test_no_len_object_type(self):\n # gh-5100, want object array from iterable object without len()\n class Point2:\n def __init__(self):\n pass\n\n def __getitem__(self, ind):\n if ind in [0, 1]:\n return ind\n else:\n raise IndexError()\n d = np.array([Point2(), Point2(), Point2()])\n assert_equal(d.dtype, np.dtype(object))\n\n def test_false_len_sequence(self):\n # gh-7264, segfault for this example\n class C:\n def __getitem__(self, i):\n raise IndexError\n def __len__(self):\n return 42\n\n assert_raises(ValueError, np.array, C()) # segfault?\n\n def test_failed_len_sequence(self):\n # gh-7393\n class A:\n def __init__(self, data):\n self._data = data\n def __getitem__(self, item):\n return type(self)(self._data[item])\n def __len__(self):\n return len(self._data)\n\n # len(d) should give 3, but len(d[0]) will fail\n d = A([1,2,3])\n assert_equal(len(np.array(d)), 3)\n\n def test_array_too_big(self):\n # Test that array creation succeeds for arrays addressable by intp\n # on the byte level and fails for too large arrays.\n buf = np.zeros(100)\n\n max_bytes = np.iinfo(np.intp).max\n for dtype in [\"intp\", \"S20\", \"b\"]:\n dtype = np.dtype(dtype)\n itemsize = dtype.itemsize\n\n np.ndarray(buffer=buf, strides=(0,),\n shape=(max_bytes//itemsize,), dtype=dtype)\n assert_raises(ValueError, np.ndarray, buffer=buf, strides=(0,),\n shape=(max_bytes//itemsize + 1,), dtype=dtype)\n\n def test_jagged_ndim_object(self):\n # Lists of mismatching depths are treated as object arrays\n a = np.array([[1], 2, 3])\n assert_equal(a.shape, (3,))\n assert_equal(a.dtype, object)\n\n a = np.array([1, [2], 3])\n assert_equal(a.shape, (3,))\n assert_equal(a.dtype, object)\n\n a = np.array([1, 2, [3]])\n assert_equal(a.shape, (3,))\n assert_equal(a.dtype, object)\n\n def test_jagged_shape_object(self):\n # The jagged dimension of a list is turned into an object array\n a = np.array([[1, 1], [2], [3]])\n assert_equal(a.shape, (3,))\n assert_equal(a.dtype, object)\n\n a = np.array([[1], [2, 2], [3]])\n assert_equal(a.shape, (3,))\n assert_equal(a.dtype, object)\n\n a = np.array([[1], [2], [3, 3]])\n assert_equal(a.shape, (3,))\n assert_equal(a.dtype, object)\n\n\nclass TestStructured:\n def test_subarray_field_access(self):\n a = np.zeros((3, 5), dtype=[('a', ('i4', (2, 2)))])\n a['a'] = np.arange(60).reshape(3, 5, 2, 2)\n\n # Since the subarray is always in C-order, a transpose\n # does not swap the subarray:\n assert_array_equal(a.T['a'], a['a'].transpose(1, 0, 2, 3))\n\n # In Fortran order, the subarray gets appended\n # like in all other cases, not prepended as a special case\n b = a.copy(order='F')\n assert_equal(a['a'].shape, b['a'].shape)\n assert_equal(a.T['a'].shape, a.T.copy()['a'].shape)\n\n def test_subarray_comparison(self):\n # Check that comparisons between record arrays with\n # multi-dimensional field types work properly\n a = np.rec.fromrecords(\n [([1, 2, 3], 'a', [[1, 2], [3, 4]]), ([3, 3, 3], 'b', [[0, 0], [0, 0]])],\n dtype=[('a', ('f4', 3)), ('b', object), ('c', ('i4', (2, 2)))])\n b = a.copy()\n assert_equal(a == b, [True, True])\n assert_equal(a != b, [False, False])\n b[1].b = 'c'\n assert_equal(a == b, [True, False])\n assert_equal(a != b, [False, True])\n for i in range(3):\n b[0].a = a[0].a\n b[0].a[i] = 5\n assert_equal(a == b, [False, False])\n assert_equal(a != b, [True, True])\n for i in range(2):\n for j in range(2):\n b = a.copy()\n b[0].c[i, j] = 10\n assert_equal(a == b, [False, True])\n assert_equal(a != b, [True, False])\n\n # Check that broadcasting with a subarray works\n a = np.array([[(0,)], [(1,)]], dtype=[('a', 'f8')])\n b = np.array([(0,), (0,), (1,)], dtype=[('a', 'f8')])\n assert_equal(a == b, [[True, True, False], [False, False, True]])\n assert_equal(b == a, [[True, True, False], [False, False, True]])\n a = np.array([[(0,)], [(1,)]], dtype=[('a', 'f8', (1,))])\n b = np.array([(0,), (0,), (1,)], dtype=[('a', 'f8', (1,))])\n assert_equal(a == b, [[True, True, False], [False, False, True]])\n assert_equal(b == a, [[True, True, False], [False, False, True]])\n a = np.array([[([0, 0],)], [([1, 1],)]], dtype=[('a', 'f8', (2,))])\n b = np.array([([0, 0],), ([0, 1],), ([1, 1],)], dtype=[('a', 'f8', (2,))])\n assert_equal(a == b, [[True, False, False], [False, False, True]])\n assert_equal(b == a, [[True, False, False], [False, False, True]])\n\n # Check that broadcasting Fortran-style arrays with a subarray work\n a = np.array([[([0, 0],)], [([1, 1],)]], dtype=[('a', 'f8', (2,))], order='F')\n b = np.array([([0, 0],), ([0, 1],), ([1, 1],)], dtype=[('a', 'f8', (2,))])\n assert_equal(a == b, [[True, False, False], [False, False, True]])\n assert_equal(b == a, [[True, False, False], [False, False, True]])\n\n # Check that incompatible sub-array shapes don't result to broadcasting\n x = np.zeros((1,), dtype=[('a', ('f4', (1, 2))), ('b', 'i1')])\n y = np.zeros((1,), dtype=[('a', ('f4', (2,))), ('b', 'i1')])\n # This comparison invokes deprecated behaviour, and will probably\n # start raising an error eventually. What we really care about in this\n # test is just that it doesn't return True.\n with suppress_warnings() as sup:\n sup.filter(FutureWarning, \"elementwise == comparison failed\")\n assert_equal(x == y, False)\n\n x = np.zeros((1,), dtype=[('a', ('f4', (2, 1))), ('b', 'i1')])\n y = np.zeros((1,), dtype=[('a', ('f4', (2,))), ('b', 'i1')])\n # This comparison invokes deprecated behaviour, and will probably\n # start raising an error eventually. What we really care about in this\n # test is just that it doesn't return True.\n with suppress_warnings() as sup:\n sup.filter(FutureWarning, \"elementwise == comparison failed\")\n assert_equal(x == y, False)\n\n # Check that structured arrays that are different only in\n # byte-order work\n a = np.array([(5, 42), (10, 1)], dtype=[('a', '>i8'), ('b', '<f8')])\n b = np.array([(5, 43), (10, 1)], dtype=[('a', '<i8'), ('b', '>f8')])\n assert_equal(a == b, [False, True])\n\n def test_casting(self):\n # Check that casting a structured array to change its byte order\n # works\n a = np.array([(1,)], dtype=[('a', '<i4')])\n assert_(np.can_cast(a.dtype, [('a', '>i4')], casting='unsafe'))\n b = a.astype([('a', '>i4')])\n assert_equal(b, a.byteswap().newbyteorder())\n assert_equal(a['a'][0], b['a'][0])\n\n # Check that equality comparison works on structured arrays if\n # they are 'equiv'-castable\n a = np.array([(5, 42), (10, 1)], dtype=[('a', '>i4'), ('b', '<f8')])\n b = np.array([(5, 42), (10, 1)], dtype=[('a', '<i4'), ('b', '>f8')])\n assert_(np.can_cast(a.dtype, b.dtype, casting='equiv'))\n assert_equal(a == b, [True, True])\n\n # Check that 'equiv' casting can change byte order\n assert_(np.can_cast(a.dtype, b.dtype, casting='equiv'))\n c = a.astype(b.dtype, casting='equiv')\n assert_equal(a == c, [True, True])\n\n # Check that 'safe' casting can change byte order and up-cast\n # fields\n t = [('a', '<i8'), ('b', '>f8')]\n assert_(np.can_cast(a.dtype, t, casting='safe'))\n c = a.astype(t, casting='safe')\n assert_equal((c == np.array([(5, 42), (10, 1)], dtype=t)),\n [True, True])\n\n # Check that 'same_kind' casting can change byte order and\n # change field widths within a \"kind\"\n t = [('a', '<i4'), ('b', '>f4')]\n assert_(np.can_cast(a.dtype, t, casting='same_kind'))\n c = a.astype(t, casting='same_kind')\n assert_equal((c == np.array([(5, 42), (10, 1)], dtype=t)),\n [True, True])\n\n # Check that casting fails if the casting rule should fail on\n # any of the fields\n t = [('a', '>i8'), ('b', '<f4')]\n assert_(not np.can_cast(a.dtype, t, casting='safe'))\n assert_raises(TypeError, a.astype, t, casting='safe')\n t = [('a', '>i2'), ('b', '<f8')]\n assert_(not np.can_cast(a.dtype, t, casting='equiv'))\n assert_raises(TypeError, a.astype, t, casting='equiv')\n t = [('a', '>i8'), ('b', '<i2')]\n assert_(not np.can_cast(a.dtype, t, casting='same_kind'))\n assert_raises(TypeError, a.astype, t, casting='same_kind')\n assert_(not np.can_cast(a.dtype, b.dtype, casting='no'))\n assert_raises(TypeError, a.astype, b.dtype, casting='no')\n\n # Check that non-'unsafe' casting can't change the set of field names\n for casting in ['no', 'safe', 'equiv', 'same_kind']:\n t = [('a', '>i4')]\n assert_(not np.can_cast(a.dtype, t, casting=casting))\n t = [('a', '>i4'), ('b', '<f8'), ('c', 'i4')]\n assert_(not np.can_cast(a.dtype, t, casting=casting))\n\n def test_objview(self):\n # https://github.com/numpy/numpy/issues/3286\n a = np.array([], dtype=[('a', 'f'), ('b', 'f'), ('c', 'O')])\n a[['a', 'b']] # TypeError?\n\n # https://github.com/numpy/numpy/issues/3253\n dat2 = np.zeros(3, [('A', 'i'), ('B', '|O')])\n dat2[['B', 'A']] # TypeError?\n\n def test_setfield(self):\n # https://github.com/numpy/numpy/issues/3126\n struct_dt = np.dtype([('elem', 'i4', 5),])\n dt = np.dtype([('field', 'i4', 10),('struct', struct_dt)])\n x = np.zeros(1, dt)\n x[0]['field'] = np.ones(10, dtype='i4')\n x[0]['struct'] = np.ones(1, dtype=struct_dt)\n assert_equal(x[0]['field'], np.ones(10, dtype='i4'))\n\n def test_setfield_object(self):\n # make sure object field assignment with ndarray value\n # on void scalar mimics setitem behavior\n b = np.zeros(1, dtype=[('x', 'O')])\n # next line should work identically to b['x'][0] = np.arange(3)\n b[0]['x'] = np.arange(3)\n assert_equal(b[0]['x'], np.arange(3))\n\n # check that broadcasting check still works\n c = np.zeros(1, dtype=[('x', 'O', 5)])\n\n def testassign():\n c[0]['x'] = np.arange(3)\n\n assert_raises(ValueError, testassign)\n\n def test_zero_width_string(self):\n # Test for PR #6430 / issues #473, #4955, #2585\n\n dt = np.dtype([('I', int), ('S', 'S0')])\n\n x = np.zeros(4, dtype=dt)\n\n assert_equal(x['S'], [b'', b'', b'', b''])\n assert_equal(x['S'].itemsize, 0)\n\n x['S'] = ['a', 'b', 'c', 'd']\n assert_equal(x['S'], [b'', b'', b'', b''])\n assert_equal(x['I'], [0, 0, 0, 0])\n\n # Variation on test case from #4955\n x['S'][x['I'] == 0] = 'hello'\n assert_equal(x['S'], [b'', b'', b'', b''])\n assert_equal(x['I'], [0, 0, 0, 0])\n\n # Variation on test case from #2585\n x['S'] = 'A'\n assert_equal(x['S'], [b'', b'', b'', b''])\n assert_equal(x['I'], [0, 0, 0, 0])\n\n # Allow zero-width dtypes in ndarray constructor\n y = np.ndarray(4, dtype=x['S'].dtype)\n assert_equal(y.itemsize, 0)\n assert_equal(x['S'], y)\n\n # More tests for indexing an array with zero-width fields\n assert_equal(np.zeros(4, dtype=[('a', 'S0,S0'),\n ('b', 'u1')])['a'].itemsize, 0)\n assert_equal(np.empty(3, dtype='S0,S0').itemsize, 0)\n assert_equal(np.zeros(4, dtype='S0,u1')['f0'].itemsize, 0)\n\n xx = x['S'].reshape((2, 2))\n assert_equal(xx.itemsize, 0)\n assert_equal(xx, [[b'', b''], [b'', b'']])\n # check for no uninitialized memory due to viewing S0 array\n assert_equal(xx[:].dtype, xx.dtype)\n assert_array_equal(eval(repr(xx), dict(array=np.array)), xx)\n\n b = io.BytesIO()\n np.save(b, xx)\n\n b.seek(0)\n yy = np.load(b)\n assert_equal(yy.itemsize, 0)\n assert_equal(xx, yy)\n\n with temppath(suffix='.npy') as tmp:\n np.save(tmp, xx)\n yy = np.load(tmp)\n assert_equal(yy.itemsize, 0)\n assert_equal(xx, yy)\n\n def test_base_attr(self):\n a = np.zeros(3, dtype='i4,f4')\n b = a[0]\n assert_(b.base is a)\n\n def test_assignment(self):\n def testassign(arr, v):\n c = arr.copy()\n c[0] = v # assign using setitem\n c[1:] = v # assign using \"dtype_transfer\" code paths\n return c\n\n dt = np.dtype([('foo', 'i8'), ('bar', 'i8')])\n arr = np.ones(2, dt)\n v1 = np.array([(2,3)], dtype=[('foo', 'i8'), ('bar', 'i8')])\n v2 = np.array([(2,3)], dtype=[('bar', 'i8'), ('foo', 'i8')])\n v3 = np.array([(2,3)], dtype=[('bar', 'i8'), ('baz', 'i8')])\n v4 = np.array([(2,)], dtype=[('bar', 'i8')])\n v5 = np.array([(2,3)], dtype=[('foo', 'f8'), ('bar', 'f8')])\n w = arr.view({'names': ['bar'], 'formats': ['i8'], 'offsets': [8]})\n\n ans = np.array([(2,3),(2,3)], dtype=dt)\n assert_equal(testassign(arr, v1), ans)\n assert_equal(testassign(arr, v2), ans)\n assert_equal(testassign(arr, v3), ans)\n assert_raises(ValueError, lambda: testassign(arr, v4))\n assert_equal(testassign(arr, v5), ans)\n w[:] = 4\n assert_equal(arr, np.array([(1,4),(1,4)], dtype=dt))\n\n # test field-reordering, assignment by position, and self-assignment\n a = np.array([(1,2,3)],\n dtype=[('foo', 'i8'), ('bar', 'i8'), ('baz', 'f4')])\n a[['foo', 'bar']] = a[['bar', 'foo']]\n assert_equal(a[0].item(), (2,1,3))\n\n # test that this works even for 'simple_unaligned' structs\n # (ie, that PyArray_EquivTypes cares about field order too)\n a = np.array([(1,2)], dtype=[('a', 'i4'), ('b', 'i4')])\n a[['a', 'b']] = a[['b', 'a']]\n assert_equal(a[0].item(), (2,1))\n\n def test_structuredscalar_indexing(self):\n # test gh-7262\n x = np.empty(shape=1, dtype=\"(2)3S,(2)3U\")\n assert_equal(x[[\"f0\",\"f1\"]][0], x[0][[\"f0\",\"f1\"]])\n assert_equal(x[0], x[0][()])\n\n def test_multiindex_titles(self):\n a = np.zeros(4, dtype=[(('a', 'b'), 'i'), ('c', 'i'), ('d', 'i')])\n assert_raises(KeyError, lambda : a[['a','c']])\n assert_raises(KeyError, lambda : a[['a','a']])\n assert_raises(ValueError, lambda : a[['b','b']]) # field exists, but repeated\n a[['b','c']] # no exception\n\n\nclass TestBool:\n def test_test_interning(self):\n a0 = np.bool_(0)\n b0 = np.bool_(False)\n assert_(a0 is b0)\n a1 = np.bool_(1)\n b1 = np.bool_(True)\n assert_(a1 is b1)\n assert_(np.array([True])[0] is a1)\n assert_(np.array(True)[()] is a1)\n\n def test_sum(self):\n d = np.ones(101, dtype=bool)\n assert_equal(d.sum(), d.size)\n assert_equal(d[::2].sum(), d[::2].size)\n assert_equal(d[::-2].sum(), d[::-2].size)\n\n d = np.frombuffer(b'\\xff\\xff' * 100, dtype=bool)\n assert_equal(d.sum(), d.size)\n assert_equal(d[::2].sum(), d[::2].size)\n assert_equal(d[::-2].sum(), d[::-2].size)\n\n def check_count_nonzero(self, power, length):\n powers = [2 ** i for i in range(length)]\n for i in range(2**power):\n l = [(i & x) != 0 for x in powers]\n a = np.array(l, dtype=bool)\n c = builtins.sum(l)\n assert_equal(np.count_nonzero(a), c)\n av = a.view(np.uint8)\n av *= 3\n assert_equal(np.count_nonzero(a), c)\n av *= 4\n assert_equal(np.count_nonzero(a), c)\n av[av != 0] = 0xFF\n assert_equal(np.count_nonzero(a), c)\n\n def test_count_nonzero(self):\n # check all 12 bit combinations in a length 17 array\n # covers most cases of the 16 byte unrolled code\n self.check_count_nonzero(12, 17)\n\n @pytest.mark.slow\n def test_count_nonzero_all(self):\n # check all combinations in a length 17 array\n # covers all cases of the 16 byte unrolled code\n self.check_count_nonzero(17, 17)\n\n def test_count_nonzero_unaligned(self):\n # prevent mistakes as e.g. gh-4060\n for o in range(7):\n a = np.zeros((18,), dtype=bool)[o+1:]\n a[:o] = True\n assert_equal(np.count_nonzero(a), builtins.sum(a.tolist()))\n a = np.ones((18,), dtype=bool)[o+1:]\n a[:o] = False\n assert_equal(np.count_nonzero(a), builtins.sum(a.tolist()))\n\n def _test_cast_from_flexible(self, dtype):\n # empty string -> false\n for n in range(3):\n v = np.array(b'', (dtype, n))\n assert_equal(bool(v), False)\n assert_equal(bool(v[()]), False)\n assert_equal(v.astype(bool), False)\n assert_(isinstance(v.astype(bool), np.ndarray))\n assert_(v[()].astype(bool) is np.False_)\n\n # anything else -> true\n for n in range(1, 4):\n for val in [b'a', b'0', b' ']:\n v = np.array(val, (dtype, n))\n assert_equal(bool(v), True)\n assert_equal(bool(v[()]), True)\n assert_equal(v.astype(bool), True)\n assert_(isinstance(v.astype(bool), np.ndarray))\n assert_(v[()].astype(bool) is np.True_)\n\n def test_cast_from_void(self):\n self._test_cast_from_flexible(np.void)\n\n @pytest.mark.xfail(reason=\"See gh-9847\")\n def test_cast_from_unicode(self):\n self._test_cast_from_flexible(np.unicode_)\n\n @pytest.mark.xfail(reason=\"See gh-9847\")\n def test_cast_from_bytes(self):\n self._test_cast_from_flexible(np.bytes_)\n\n\nclass TestZeroSizeFlexible:\n @staticmethod\n def _zeros(shape, dtype=str):\n dtype = np.dtype(dtype)\n if dtype == np.void:\n return np.zeros(shape, dtype=(dtype, 0))\n\n # not constructable directly\n dtype = np.dtype([('x', dtype, 0)])\n return np.zeros(shape, dtype=dtype)['x']\n\n def test_create(self):\n zs = self._zeros(10, bytes)\n assert_equal(zs.itemsize, 0)\n zs = self._zeros(10, np.void)\n assert_equal(zs.itemsize, 0)\n zs = self._zeros(10, unicode)\n assert_equal(zs.itemsize, 0)\n\n def _test_sort_partition(self, name, kinds, **kwargs):\n # Previously, these would all hang\n for dt in [bytes, np.void, unicode]:\n zs = self._zeros(10, dt)\n sort_method = getattr(zs, name)\n sort_func = getattr(np, name)\n for kind in kinds:\n sort_method(kind=kind, **kwargs)\n sort_func(zs, kind=kind, **kwargs)\n\n def test_sort(self):\n self._test_sort_partition('sort', kinds='qhs')\n\n def test_argsort(self):\n self._test_sort_partition('argsort', kinds='qhs')\n\n def test_partition(self):\n self._test_sort_partition('partition', kinds=['introselect'], kth=2)\n\n def test_argpartition(self):\n self._test_sort_partition('argpartition', kinds=['introselect'], kth=2)\n\n def test_resize(self):\n # previously an error\n for dt in [bytes, np.void, unicode]:\n zs = self._zeros(10, dt)\n zs.resize(25)\n zs.resize((10, 10))\n\n def test_view(self):\n for dt in [bytes, np.void, unicode]:\n zs = self._zeros(10, dt)\n\n # viewing as itself should be allowed\n assert_equal(zs.view(dt).dtype, np.dtype(dt))\n\n # viewing as any non-empty type gives an empty result\n assert_equal(zs.view((dt, 1)).shape, (0,))\n\n def test_dumps(self):\n zs = self._zeros(10, int)\n assert_equal(zs, pickle.loads(zs.dumps()))\n\n def test_pickle(self):\n for proto in range(2, pickle.HIGHEST_PROTOCOL + 1):\n for dt in [bytes, np.void, unicode]:\n zs = self._zeros(10, dt)\n p = pickle.dumps(zs, protocol=proto)\n zs2 = pickle.loads(p)\n\n assert_equal(zs.dtype, zs2.dtype)\n\n @pytest.mark.skipif(pickle.HIGHEST_PROTOCOL < 5,\n reason=\"requires pickle protocol 5\")\n def test_pickle_with_buffercallback(self):\n array = np.arange(10)\n buffers = []\n bytes_string = pickle.dumps(array, buffer_callback=buffers.append,\n protocol=5)\n array_from_buffer = pickle.loads(bytes_string, buffers=buffers)\n # when using pickle protocol 5 with buffer callbacks,\n # array_from_buffer is reconstructed from a buffer holding a view\n # to the initial array's data, so modifying an element in array\n # should modify it in array_from_buffer too.\n array[0] = -1\n assert array_from_buffer[0] == -1, array_from_buffer[0]\n\n\nclass TestMethods:\n\n sort_kinds = ['quicksort', 'heapsort', 'stable']\n\n def test_compress(self):\n tgt = [[5, 6, 7, 8, 9]]\n arr = np.arange(10).reshape(2, 5)\n out = arr.compress([0, 1], axis=0)\n assert_equal(out, tgt)\n\n tgt = [[1, 3], [6, 8]]\n out = arr.compress([0, 1, 0, 1, 0], axis=1)\n assert_equal(out, tgt)\n\n tgt = [[1], [6]]\n arr = np.arange(10).reshape(2, 5)\n out = arr.compress([0, 1], axis=1)\n assert_equal(out, tgt)\n\n arr = np.arange(10).reshape(2, 5)\n out = arr.compress([0, 1])\n assert_equal(out, 1)\n\n def test_choose(self):\n x = 2*np.ones((3,), dtype=int)\n y = 3*np.ones((3,), dtype=int)\n x2 = 2*np.ones((2, 3), dtype=int)\n y2 = 3*np.ones((2, 3), dtype=int)\n ind = np.array([0, 0, 1])\n\n A = ind.choose((x, y))\n assert_equal(A, [2, 2, 3])\n\n A = ind.choose((x2, y2))\n assert_equal(A, [[2, 2, 3], [2, 2, 3]])\n\n A = ind.choose((x, y2))\n assert_equal(A, [[2, 2, 3], [2, 2, 3]])\n\n oned = np.ones(1)\n # gh-12031, caused SEGFAULT\n assert_raises(TypeError, oned.choose,np.void(0), [oned])\n\n # gh-6272 check overlap on out\n x = np.arange(5)\n y = np.choose([0,0,0], [x[:3], x[:3], x[:3]], out=x[1:4], mode='wrap')\n assert_equal(y, np.array([0, 1, 2]))\n\n def test_prod(self):\n ba = [1, 2, 10, 11, 6, 5, 4]\n ba2 = [[1, 2, 3, 4], [5, 6, 7, 9], [10, 3, 4, 5]]\n\n for ctype in [np.int16, np.uint16, np.int32, np.uint32,\n np.float32, np.float64, np.complex64, np.complex128]:\n a = np.array(ba, ctype)\n a2 = np.array(ba2, ctype)\n if ctype in ['1', 'b']:\n assert_raises(ArithmeticError, a.prod)\n assert_raises(ArithmeticError, a2.prod, axis=1)\n else:\n assert_equal(a.prod(axis=0), 26400)\n assert_array_equal(a2.prod(axis=0),\n np.array([50, 36, 84, 180], ctype))\n assert_array_equal(a2.prod(axis=-1),\n np.array([24, 1890, 600], ctype))\n\n def test_repeat(self):\n m = np.array([1, 2, 3, 4, 5, 6])\n m_rect = m.reshape((2, 3))\n\n A = m.repeat([1, 3, 2, 1, 1, 2])\n assert_equal(A, [1, 2, 2, 2, 3,\n 3, 4, 5, 6, 6])\n\n A = m.repeat(2)\n assert_equal(A, [1, 1, 2, 2, 3, 3,\n 4, 4, 5, 5, 6, 6])\n\n A = m_rect.repeat([2, 1], axis=0)\n assert_equal(A, [[1, 2, 3],\n [1, 2, 3],\n [4, 5, 6]])\n\n A = m_rect.repeat([1, 3, 2], axis=1)\n assert_equal(A, [[1, 2, 2, 2, 3, 3],\n [4, 5, 5, 5, 6, 6]])\n\n A = m_rect.repeat(2, axis=0)\n assert_equal(A, [[1, 2, 3],\n [1, 2, 3],\n [4, 5, 6],\n [4, 5, 6]])\n\n A = m_rect.repeat(2, axis=1)\n assert_equal(A, [[1, 1, 2, 2, 3, 3],\n [4, 4, 5, 5, 6, 6]])\n\n def test_reshape(self):\n arr = np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9], [10, 11, 12]])\n\n tgt = [[1, 2, 3, 4, 5, 6], [7, 8, 9, 10, 11, 12]]\n assert_equal(arr.reshape(2, 6), tgt)\n\n tgt = [[1, 2, 3, 4], [5, 6, 7, 8], [9, 10, 11, 12]]\n assert_equal(arr.reshape(3, 4), tgt)\n\n tgt = [[1, 10, 8, 6], [4, 2, 11, 9], [7, 5, 3, 12]]\n assert_equal(arr.reshape((3, 4), order='F'), tgt)\n\n tgt = [[1, 4, 7, 10], [2, 5, 8, 11], [3, 6, 9, 12]]\n assert_equal(arr.T.reshape((3, 4), order='C'), tgt)\n\n def test_round(self):\n def check_round(arr, expected, *round_args):\n assert_equal(arr.round(*round_args), expected)\n # With output array\n out = np.zeros_like(arr)\n res = arr.round(*round_args, out=out)\n assert_equal(out, expected)\n assert_equal(out, res)\n\n check_round(np.array([1.2, 1.5]), [1, 2])\n check_round(np.array(1.5), 2)\n check_round(np.array([12.2, 15.5]), [10, 20], -1)\n check_round(np.array([12.15, 15.51]), [12.2, 15.5], 1)\n # Complex rounding\n check_round(np.array([4.5 + 1.5j]), [4 + 2j])\n check_round(np.array([12.5 + 15.5j]), [10 + 20j], -1)\n\n def test_squeeze(self):\n a = np.array([[[1], [2], [3]]])\n assert_equal(a.squeeze(), [1, 2, 3])\n assert_equal(a.squeeze(axis=(0,)), [[1], [2], [3]])\n assert_raises(ValueError, a.squeeze, axis=(1,))\n assert_equal(a.squeeze(axis=(2,)), [[1, 2, 3]])\n\n def test_transpose(self):\n a = np.array([[1, 2], [3, 4]])\n assert_equal(a.transpose(), [[1, 3], [2, 4]])\n assert_raises(ValueError, lambda: a.transpose(0))\n assert_raises(ValueError, lambda: a.transpose(0, 0))\n assert_raises(ValueError, lambda: a.transpose(0, 1, 2))\n\n def test_sort(self):\n # test ordering for floats and complex containing nans. It is only\n # necessary to check the less-than comparison, so sorts that\n # only follow the insertion sort path are sufficient. We only\n # test doubles and complex doubles as the logic is the same.\n\n # check doubles\n msg = \"Test real sort order with nans\"\n a = np.array([np.nan, 1, 0])\n b = np.sort(a)\n assert_equal(b, a[::-1], msg)\n # check complex\n msg = \"Test complex sort order with nans\"\n a = np.zeros(9, dtype=np.complex128)\n a.real += [np.nan, np.nan, np.nan, 1, 0, 1, 1, 0, 0]\n a.imag += [np.nan, 1, 0, np.nan, np.nan, 1, 0, 1, 0]\n b = np.sort(a)\n assert_equal(b, a[::-1], msg)\n\n # all c scalar sorts use the same code with different types\n # so it suffices to run a quick check with one type. The number\n # of sorted items must be greater than ~50 to check the actual\n # algorithm because quick and merge sort fall over to insertion\n # sort for small arrays.\n # Test unsigned dtypes and nonnegative numbers\n for dtype in [np.uint8, np.uint16, np.uint32, np.uint64, np.float16, np.float32, np.float64, np.longdouble]:\n a = np.arange(101, dtype=dtype)\n b = a[::-1].copy()\n for kind in self.sort_kinds:\n msg = \"scalar sort, kind=%s, dtype=%s\" % (kind, dtype)\n c = a.copy()\n c.sort(kind=kind)\n assert_equal(c, a, msg)\n c = b.copy()\n c.sort(kind=kind)\n assert_equal(c, a, msg)\n\n # Test signed dtypes and negative numbers as well\n for dtype in [np.int8, np.int16, np.int32, np.int64, np.float16, np.float32, np.float64, np.longdouble]:\n a = np.arange(-50, 51, dtype=dtype)\n b = a[::-1].copy()\n for kind in self.sort_kinds:\n msg = \"scalar sort, kind=%s, dtype=%s\" % (kind, dtype)\n c = a.copy()\n c.sort(kind=kind)\n assert_equal(c, a, msg)\n c = b.copy()\n c.sort(kind=kind)\n assert_equal(c, a, msg)\n\n # test complex sorts. These use the same code as the scalars\n # but the compare function differs.\n ai = a*1j + 1\n bi = b*1j + 1\n for kind in self.sort_kinds:\n msg = \"complex sort, real part == 1, kind=%s\" % kind\n c = ai.copy()\n c.sort(kind=kind)\n assert_equal(c, ai, msg)\n c = bi.copy()\n c.sort(kind=kind)\n assert_equal(c, ai, msg)\n ai = a + 1j\n bi = b + 1j\n for kind in self.sort_kinds:\n msg = \"complex sort, imag part == 1, kind=%s\" % kind\n c = ai.copy()\n c.sort(kind=kind)\n assert_equal(c, ai, msg)\n c = bi.copy()\n c.sort(kind=kind)\n assert_equal(c, ai, msg)\n\n # test sorting of complex arrays requiring byte-swapping, gh-5441\n for endianness in '<>':\n for dt in np.typecodes['Complex']:\n arr = np.array([1+3.j, 2+2.j, 3+1.j], dtype=endianness + dt)\n c = arr.copy()\n c.sort()\n msg = 'byte-swapped complex sort, dtype={0}'.format(dt)\n assert_equal(c, arr, msg)\n\n # test string sorts.\n s = 'aaaaaaaa'\n a = np.array([s + chr(i) for i in range(101)])\n b = a[::-1].copy()\n for kind in self.sort_kinds:\n msg = \"string sort, kind=%s\" % kind\n c = a.copy()\n c.sort(kind=kind)\n assert_equal(c, a, msg)\n c = b.copy()\n c.sort(kind=kind)\n assert_equal(c, a, msg)\n\n # test unicode sorts.\n s = 'aaaaaaaa'\n a = np.array([s + chr(i) for i in range(101)], dtype=np.unicode_)\n b = a[::-1].copy()\n for kind in self.sort_kinds:\n msg = \"unicode sort, kind=%s\" % kind\n c = a.copy()\n c.sort(kind=kind)\n assert_equal(c, a, msg)\n c = b.copy()\n c.sort(kind=kind)\n assert_equal(c, a, msg)\n\n # test object array sorts.\n a = np.empty((101,), dtype=object)\n a[:] = list(range(101))\n b = a[::-1]\n for kind in ['q', 'h', 'm']:\n msg = \"object sort, kind=%s\" % kind\n c = a.copy()\n c.sort(kind=kind)\n assert_equal(c, a, msg)\n c = b.copy()\n c.sort(kind=kind)\n assert_equal(c, a, msg)\n\n # test record array sorts.\n dt = np.dtype([('f', float), ('i', int)])\n a = np.array([(i, i) for i in range(101)], dtype=dt)\n b = a[::-1]\n for kind in ['q', 'h', 'm']:\n msg = \"object sort, kind=%s\" % kind\n c = a.copy()\n c.sort(kind=kind)\n assert_equal(c, a, msg)\n c = b.copy()\n c.sort(kind=kind)\n assert_equal(c, a, msg)\n\n # test datetime64 sorts.\n a = np.arange(0, 101, dtype='datetime64[D]')\n b = a[::-1]\n for kind in ['q', 'h', 'm']:\n msg = \"datetime64 sort, kind=%s\" % kind\n c = a.copy()\n c.sort(kind=kind)\n assert_equal(c, a, msg)\n c = b.copy()\n c.sort(kind=kind)\n assert_equal(c, a, msg)\n\n # test timedelta64 sorts.\n a = np.arange(0, 101, dtype='timedelta64[D]')\n b = a[::-1]\n for kind in ['q', 'h', 'm']:\n msg = \"timedelta64 sort, kind=%s\" % kind\n c = a.copy()\n c.sort(kind=kind)\n assert_equal(c, a, msg)\n c = b.copy()\n c.sort(kind=kind)\n assert_equal(c, a, msg)\n\n # check axis handling. This should be the same for all type\n # specific sorts, so we only check it for one type and one kind\n a = np.array([[3, 2], [1, 0]])\n b = np.array([[1, 0], [3, 2]])\n c = np.array([[2, 3], [0, 1]])\n d = a.copy()\n d.sort(axis=0)\n assert_equal(d, b, \"test sort with axis=0\")\n d = a.copy()\n d.sort(axis=1)\n assert_equal(d, c, \"test sort with axis=1\")\n d = a.copy()\n d.sort()\n assert_equal(d, c, \"test sort with default axis\")\n\n # check axis handling for multidimensional empty arrays\n a = np.array([])\n a.shape = (3, 2, 1, 0)\n for axis in range(-a.ndim, a.ndim):\n msg = 'test empty array sort with axis={0}'.format(axis)\n assert_equal(np.sort(a, axis=axis), a, msg)\n msg = 'test empty array sort with axis=None'\n assert_equal(np.sort(a, axis=None), a.ravel(), msg)\n\n # test generic class with bogus ordering,\n # should not segfault.\n class Boom:\n def __lt__(self, other):\n return True\n\n a = np.array([Boom()]*100, dtype=object)\n for kind in self.sort_kinds:\n msg = \"bogus comparison object sort, kind=%s\" % kind\n c.sort(kind=kind)\n\n def test_void_sort(self):\n # gh-8210 - previously segfaulted\n for i in range(4):\n rand = np.random.randint(256, size=4000, dtype=np.uint8)\n arr = rand.view('V4')\n arr[::-1].sort()\n\n dt = np.dtype([('val', 'i4', (1,))])\n for i in range(4):\n rand = np.random.randint(256, size=4000, dtype=np.uint8)\n arr = rand.view(dt)\n arr[::-1].sort()\n\n def test_sort_raises(self):\n #gh-9404\n arr = np.array([0, datetime.now(), 1], dtype=object)\n for kind in self.sort_kinds:\n assert_raises(TypeError, arr.sort, kind=kind)\n #gh-3879\n class Raiser:\n def raises_anything(*args, **kwargs):\n raise TypeError(\"SOMETHING ERRORED\")\n __eq__ = __ne__ = __lt__ = __gt__ = __ge__ = __le__ = raises_anything\n arr = np.array([[Raiser(), n] for n in range(10)]).reshape(-1)\n np.random.shuffle(arr)\n for kind in self.sort_kinds:\n assert_raises(TypeError, arr.sort, kind=kind)\n\n def test_sort_degraded(self):\n # test degraded dataset would take minutes to run with normal qsort\n d = np.arange(1000000)\n do = d.copy()\n x = d\n # create a median of 3 killer where each median is the sorted second\n # last element of the quicksort partition\n while x.size > 3:\n mid = x.size // 2\n x[mid], x[-2] = x[-2], x[mid]\n x = x[:-2]\n\n assert_equal(np.sort(d), do)\n assert_equal(d[np.argsort(d)], do)\n\n def test_copy(self):\n def assert_fortran(arr):\n assert_(arr.flags.fortran)\n assert_(arr.flags.f_contiguous)\n assert_(not arr.flags.c_contiguous)\n\n def assert_c(arr):\n assert_(not arr.flags.fortran)\n assert_(not arr.flags.f_contiguous)\n assert_(arr.flags.c_contiguous)\n\n a = np.empty((2, 2), order='F')\n # Test copying a Fortran array\n assert_c(a.copy())\n assert_c(a.copy('C'))\n assert_fortran(a.copy('F'))\n assert_fortran(a.copy('A'))\n\n # Now test starting with a C array.\n a = np.empty((2, 2), order='C')\n assert_c(a.copy())\n assert_c(a.copy('C'))\n assert_fortran(a.copy('F'))\n assert_c(a.copy('A'))\n\n def test_sort_order(self):\n # Test sorting an array with fields\n x1 = np.array([21, 32, 14])\n x2 = np.array(['my', 'first', 'name'])\n x3 = np.array([3.1, 4.5, 6.2])\n r = np.rec.fromarrays([x1, x2, x3], names='id,word,number')\n\n r.sort(order=['id'])\n assert_equal(r.id, np.array([14, 21, 32]))\n assert_equal(r.word, np.array(['name', 'my', 'first']))\n assert_equal(r.number, np.array([6.2, 3.1, 4.5]))\n\n r.sort(order=['word'])\n assert_equal(r.id, np.array([32, 21, 14]))\n assert_equal(r.word, np.array(['first', 'my', 'name']))\n assert_equal(r.number, np.array([4.5, 3.1, 6.2]))\n\n r.sort(order=['number'])\n assert_equal(r.id, np.array([21, 32, 14]))\n assert_equal(r.word, np.array(['my', 'first', 'name']))\n assert_equal(r.number, np.array([3.1, 4.5, 6.2]))\n\n assert_raises_regex(ValueError, 'duplicate',\n lambda: r.sort(order=['id', 'id']))\n\n if sys.byteorder == 'little':\n strtype = '>i2'\n else:\n strtype = '<i2'\n mydtype = [('name', strchar + '5'), ('col2', strtype)]\n r = np.array([('a', 1), ('b', 255), ('c', 3), ('d', 258)],\n dtype=mydtype)\n r.sort(order='col2')\n assert_equal(r['col2'], [1, 3, 255, 258])\n assert_equal(r, np.array([('a', 1), ('c', 3), ('b', 255), ('d', 258)],\n dtype=mydtype))\n\n def test_argsort(self):\n # all c scalar argsorts use the same code with different types\n # so it suffices to run a quick check with one type. The number\n # of sorted items must be greater than ~50 to check the actual\n # algorithm because quick and merge sort fall over to insertion\n # sort for small arrays.\n\n for dtype in [np.int32, np.uint32, np.float32]:\n a = np.arange(101, dtype=dtype)\n b = a[::-1].copy()\n for kind in self.sort_kinds:\n msg = \"scalar argsort, kind=%s, dtype=%s\" % (kind, dtype)\n assert_equal(a.copy().argsort(kind=kind), a, msg)\n assert_equal(b.copy().argsort(kind=kind), b, msg)\n\n # test complex argsorts. These use the same code as the scalars\n # but the compare function differs.\n ai = a*1j + 1\n bi = b*1j + 1\n for kind in self.sort_kinds:\n msg = \"complex argsort, kind=%s\" % kind\n assert_equal(ai.copy().argsort(kind=kind), a, msg)\n assert_equal(bi.copy().argsort(kind=kind), b, msg)\n ai = a + 1j\n bi = b + 1j\n for kind in self.sort_kinds:\n msg = \"complex argsort, kind=%s\" % kind\n assert_equal(ai.copy().argsort(kind=kind), a, msg)\n assert_equal(bi.copy().argsort(kind=kind), b, msg)\n\n # test argsort of complex arrays requiring byte-swapping, gh-5441\n for endianness in '<>':\n for dt in np.typecodes['Complex']:\n arr = np.array([1+3.j, 2+2.j, 3+1.j], dtype=endianness + dt)\n msg = 'byte-swapped complex argsort, dtype={0}'.format(dt)\n assert_equal(arr.argsort(),\n np.arange(len(arr), dtype=np.intp), msg)\n\n # test string argsorts.\n s = 'aaaaaaaa'\n a = np.array([s + chr(i) for i in range(101)])\n b = a[::-1].copy()\n r = np.arange(101)\n rr = r[::-1]\n for kind in self.sort_kinds:\n msg = \"string argsort, kind=%s\" % kind\n assert_equal(a.copy().argsort(kind=kind), r, msg)\n assert_equal(b.copy().argsort(kind=kind), rr, msg)\n\n # test unicode argsorts.\n s = 'aaaaaaaa'\n a = np.array([s + chr(i) for i in range(101)], dtype=np.unicode_)\n b = a[::-1]\n r = np.arange(101)\n rr = r[::-1]\n for kind in self.sort_kinds:\n msg = \"unicode argsort, kind=%s\" % kind\n assert_equal(a.copy().argsort(kind=kind), r, msg)\n assert_equal(b.copy().argsort(kind=kind), rr, msg)\n\n # test object array argsorts.\n a = np.empty((101,), dtype=object)\n a[:] = list(range(101))\n b = a[::-1]\n r = np.arange(101)\n rr = r[::-1]\n for kind in self.sort_kinds:\n msg = \"object argsort, kind=%s\" % kind\n assert_equal(a.copy().argsort(kind=kind), r, msg)\n assert_equal(b.copy().argsort(kind=kind), rr, msg)\n\n # test structured array argsorts.\n dt = np.dtype([('f', float), ('i', int)])\n a = np.array([(i, i) for i in range(101)], dtype=dt)\n b = a[::-1]\n r = np.arange(101)\n rr = r[::-1]\n for kind in self.sort_kinds:\n msg = \"structured array argsort, kind=%s\" % kind\n assert_equal(a.copy().argsort(kind=kind), r, msg)\n assert_equal(b.copy().argsort(kind=kind), rr, msg)\n\n # test datetime64 argsorts.\n a = np.arange(0, 101, dtype='datetime64[D]')\n b = a[::-1]\n r = np.arange(101)\n rr = r[::-1]\n for kind in ['q', 'h', 'm']:\n msg = \"datetime64 argsort, kind=%s\" % kind\n assert_equal(a.copy().argsort(kind=kind), r, msg)\n assert_equal(b.copy().argsort(kind=kind), rr, msg)\n\n # test timedelta64 argsorts.\n a = np.arange(0, 101, dtype='timedelta64[D]')\n b = a[::-1]\n r = np.arange(101)\n rr = r[::-1]\n for kind in ['q', 'h', 'm']:\n msg = \"timedelta64 argsort, kind=%s\" % kind\n assert_equal(a.copy().argsort(kind=kind), r, msg)\n assert_equal(b.copy().argsort(kind=kind), rr, msg)\n\n # check axis handling. This should be the same for all type\n # specific argsorts, so we only check it for one type and one kind\n a = np.array([[3, 2], [1, 0]])\n b = np.array([[1, 1], [0, 0]])\n c = np.array([[1, 0], [1, 0]])\n assert_equal(a.copy().argsort(axis=0), b)\n assert_equal(a.copy().argsort(axis=1), c)\n assert_equal(a.copy().argsort(), c)\n\n # check axis handling for multidimensional empty arrays\n a = np.array([])\n a.shape = (3, 2, 1, 0)\n for axis in range(-a.ndim, a.ndim):\n msg = 'test empty array argsort with axis={0}'.format(axis)\n assert_equal(np.argsort(a, axis=axis),\n np.zeros_like(a, dtype=np.intp), msg)\n msg = 'test empty array argsort with axis=None'\n assert_equal(np.argsort(a, axis=None),\n np.zeros_like(a.ravel(), dtype=np.intp), msg)\n\n # check that stable argsorts are stable\n r = np.arange(100)\n # scalars\n a = np.zeros(100)\n assert_equal(a.argsort(kind='m'), r)\n # complex\n a = np.zeros(100, dtype=complex)\n assert_equal(a.argsort(kind='m'), r)\n # string\n a = np.array(['aaaaaaaaa' for i in range(100)])\n assert_equal(a.argsort(kind='m'), r)\n # unicode\n a = np.array(['aaaaaaaaa' for i in range(100)], dtype=np.unicode_)\n assert_equal(a.argsort(kind='m'), r)\n\n def test_sort_unicode_kind(self):\n d = np.arange(10)\n k = b'\\xc3\\xa4'.decode(\"UTF8\")\n assert_raises(ValueError, d.sort, kind=k)\n assert_raises(ValueError, d.argsort, kind=k)\n\n def test_searchsorted(self):\n # test for floats and complex containing nans. The logic is the\n # same for all float types so only test double types for now.\n # The search sorted routines use the compare functions for the\n # array type, so this checks if that is consistent with the sort\n # order.\n\n # check double\n a = np.array([0, 1, np.nan])\n msg = \"Test real searchsorted with nans, side='l'\"\n b = a.searchsorted(a, side='l')\n assert_equal(b, np.arange(3), msg)\n msg = \"Test real searchsorted with nans, side='r'\"\n b = a.searchsorted(a, side='r')\n assert_equal(b, np.arange(1, 4), msg)\n # check keyword arguments\n a.searchsorted(v=1)\n # check double complex\n a = np.zeros(9, dtype=np.complex128)\n a.real += [0, 0, 1, 1, 0, 1, np.nan, np.nan, np.nan]\n a.imag += [0, 1, 0, 1, np.nan, np.nan, 0, 1, np.nan]\n msg = \"Test complex searchsorted with nans, side='l'\"\n b = a.searchsorted(a, side='l')\n assert_equal(b, np.arange(9), msg)\n msg = \"Test complex searchsorted with nans, side='r'\"\n b = a.searchsorted(a, side='r')\n assert_equal(b, np.arange(1, 10), msg)\n msg = \"Test searchsorted with little endian, side='l'\"\n a = np.array([0, 128], dtype='<i4')\n b = a.searchsorted(np.array(128, dtype='<i4'))\n assert_equal(b, 1, msg)\n msg = \"Test searchsorted with big endian, side='l'\"\n a = np.array([0, 128], dtype='>i4')\n b = a.searchsorted(np.array(128, dtype='>i4'))\n assert_equal(b, 1, msg)\n\n # Check 0 elements\n a = np.ones(0)\n b = a.searchsorted([0, 1, 2], 'l')\n assert_equal(b, [0, 0, 0])\n b = a.searchsorted([0, 1, 2], 'r')\n assert_equal(b, [0, 0, 0])\n a = np.ones(1)\n # Check 1 element\n b = a.searchsorted([0, 1, 2], 'l')\n assert_equal(b, [0, 0, 1])\n b = a.searchsorted([0, 1, 2], 'r')\n assert_equal(b, [0, 1, 1])\n # Check all elements equal\n a = np.ones(2)\n b = a.searchsorted([0, 1, 2], 'l')\n assert_equal(b, [0, 0, 2])\n b = a.searchsorted([0, 1, 2], 'r')\n assert_equal(b, [0, 2, 2])\n\n # Test searching unaligned array\n a = np.arange(10)\n aligned = np.empty(a.itemsize * a.size + 1, 'uint8')\n unaligned = aligned[1:].view(a.dtype)\n unaligned[:] = a\n # Test searching unaligned array\n b = unaligned.searchsorted(a, 'l')\n assert_equal(b, a)\n b = unaligned.searchsorted(a, 'r')\n assert_equal(b, a + 1)\n # Test searching for unaligned keys\n b = a.searchsorted(unaligned, 'l')\n assert_equal(b, a)\n b = a.searchsorted(unaligned, 'r')\n assert_equal(b, a + 1)\n\n # Test smart resetting of binsearch indices\n a = np.arange(5)\n b = a.searchsorted([6, 5, 4], 'l')\n assert_equal(b, [5, 5, 4])\n b = a.searchsorted([6, 5, 4], 'r')\n assert_equal(b, [5, 5, 5])\n\n # Test all type specific binary search functions\n types = ''.join((np.typecodes['AllInteger'], np.typecodes['AllFloat'],\n np.typecodes['Datetime'], '?O'))\n for dt in types:\n if dt == 'M':\n dt = 'M8[D]'\n if dt == '?':\n a = np.arange(2, dtype=dt)\n out = np.arange(2)\n else:\n a = np.arange(0, 5, dtype=dt)\n out = np.arange(5)\n b = a.searchsorted(a, 'l')\n assert_equal(b, out)\n b = a.searchsorted(a, 'r')\n assert_equal(b, out + 1)\n # Test empty array, use a fresh array to get warnings in\n # valgrind if access happens.\n e = np.ndarray(shape=0, buffer=b'', dtype=dt)\n b = e.searchsorted(a, 'l')\n assert_array_equal(b, np.zeros(len(a), dtype=np.intp))\n b = a.searchsorted(e, 'l')\n assert_array_equal(b, np.zeros(0, dtype=np.intp))\n\n def test_searchsorted_unicode(self):\n # Test searchsorted on unicode strings.\n\n # 1.6.1 contained a string length miscalculation in\n # arraytypes.c.src:UNICODE_compare() which manifested as\n # incorrect/inconsistent results from searchsorted.\n a = np.array(['P:\\\\20x_dapi_cy3\\\\20x_dapi_cy3_20100185_1',\n 'P:\\\\20x_dapi_cy3\\\\20x_dapi_cy3_20100186_1',\n 'P:\\\\20x_dapi_cy3\\\\20x_dapi_cy3_20100187_1',\n 'P:\\\\20x_dapi_cy3\\\\20x_dapi_cy3_20100189_1',\n 'P:\\\\20x_dapi_cy3\\\\20x_dapi_cy3_20100190_1',\n 'P:\\\\20x_dapi_cy3\\\\20x_dapi_cy3_20100191_1',\n 'P:\\\\20x_dapi_cy3\\\\20x_dapi_cy3_20100192_1',\n 'P:\\\\20x_dapi_cy3\\\\20x_dapi_cy3_20100193_1',\n 'P:\\\\20x_dapi_cy3\\\\20x_dapi_cy3_20100194_1',\n 'P:\\\\20x_dapi_cy3\\\\20x_dapi_cy3_20100195_1',\n 'P:\\\\20x_dapi_cy3\\\\20x_dapi_cy3_20100196_1',\n 'P:\\\\20x_dapi_cy3\\\\20x_dapi_cy3_20100197_1',\n 'P:\\\\20x_dapi_cy3\\\\20x_dapi_cy3_20100198_1',\n 'P:\\\\20x_dapi_cy3\\\\20x_dapi_cy3_20100199_1'],\n dtype=np.unicode_)\n ind = np.arange(len(a))\n assert_equal([a.searchsorted(v, 'left') for v in a], ind)\n assert_equal([a.searchsorted(v, 'right') for v in a], ind + 1)\n assert_equal([a.searchsorted(a[i], 'left') for i in ind], ind)\n assert_equal([a.searchsorted(a[i], 'right') for i in ind], ind + 1)\n\n def test_searchsorted_with_sorter(self):\n a = np.array([5, 2, 1, 3, 4])\n s = np.argsort(a)\n assert_raises(TypeError, np.searchsorted, a, 0, sorter=(1, (2, 3)))\n assert_raises(TypeError, np.searchsorted, a, 0, sorter=[1.1])\n assert_raises(ValueError, np.searchsorted, a, 0, sorter=[1, 2, 3, 4])\n assert_raises(ValueError, np.searchsorted, a, 0, sorter=[1, 2, 3, 4, 5, 6])\n\n # bounds check\n assert_raises(ValueError, np.searchsorted, a, 4, sorter=[0, 1, 2, 3, 5])\n assert_raises(ValueError, np.searchsorted, a, 0, sorter=[-1, 0, 1, 2, 3])\n assert_raises(ValueError, np.searchsorted, a, 0, sorter=[4, 0, -1, 2, 3])\n\n a = np.random.rand(300)\n s = a.argsort()\n b = np.sort(a)\n k = np.linspace(0, 1, 20)\n assert_equal(b.searchsorted(k), a.searchsorted(k, sorter=s))\n\n a = np.array([0, 1, 2, 3, 5]*20)\n s = a.argsort()\n k = [0, 1, 2, 3, 5]\n expected = [0, 20, 40, 60, 80]\n assert_equal(a.searchsorted(k, side='l', sorter=s), expected)\n expected = [20, 40, 60, 80, 100]\n assert_equal(a.searchsorted(k, side='r', sorter=s), expected)\n\n # Test searching unaligned array\n keys = np.arange(10)\n a = keys.copy()\n np.random.shuffle(s)\n s = a.argsort()\n aligned = np.empty(a.itemsize * a.size + 1, 'uint8')\n unaligned = aligned[1:].view(a.dtype)\n # Test searching unaligned array\n unaligned[:] = a\n b = unaligned.searchsorted(keys, 'l', s)\n assert_equal(b, keys)\n b = unaligned.searchsorted(keys, 'r', s)\n assert_equal(b, keys + 1)\n # Test searching for unaligned keys\n unaligned[:] = keys\n b = a.searchsorted(unaligned, 'l', s)\n assert_equal(b, keys)\n b = a.searchsorted(unaligned, 'r', s)\n assert_equal(b, keys + 1)\n\n # Test all type specific indirect binary search functions\n types = ''.join((np.typecodes['AllInteger'], np.typecodes['AllFloat'],\n np.typecodes['Datetime'], '?O'))\n for dt in types:\n if dt == 'M':\n dt = 'M8[D]'\n if dt == '?':\n a = np.array([1, 0], dtype=dt)\n # We want the sorter array to be of a type that is different\n # from np.intp in all platforms, to check for #4698\n s = np.array([1, 0], dtype=np.int16)\n out = np.array([1, 0])\n else:\n a = np.array([3, 4, 1, 2, 0], dtype=dt)\n # We want the sorter array to be of a type that is different\n # from np.intp in all platforms, to check for #4698\n s = np.array([4, 2, 3, 0, 1], dtype=np.int16)\n out = np.array([3, 4, 1, 2, 0], dtype=np.intp)\n b = a.searchsorted(a, 'l', s)\n assert_equal(b, out)\n b = a.searchsorted(a, 'r', s)\n assert_equal(b, out + 1)\n # Test empty array, use a fresh array to get warnings in\n # valgrind if access happens.\n e = np.ndarray(shape=0, buffer=b'', dtype=dt)\n b = e.searchsorted(a, 'l', s[:0])\n assert_array_equal(b, np.zeros(len(a), dtype=np.intp))\n b = a.searchsorted(e, 'l', s)\n assert_array_equal(b, np.zeros(0, dtype=np.intp))\n\n # Test non-contiguous sorter array\n a = np.array([3, 4, 1, 2, 0])\n srt = np.empty((10,), dtype=np.intp)\n srt[1::2] = -1\n srt[::2] = [4, 2, 3, 0, 1]\n s = srt[::2]\n out = np.array([3, 4, 1, 2, 0], dtype=np.intp)\n b = a.searchsorted(a, 'l', s)\n assert_equal(b, out)\n b = a.searchsorted(a, 'r', s)\n assert_equal(b, out + 1)\n\n def test_searchsorted_return_type(self):\n # Functions returning indices should always return base ndarrays\n class A(np.ndarray):\n pass\n a = np.arange(5).view(A)\n b = np.arange(1, 3).view(A)\n s = np.arange(5).view(A)\n assert_(not isinstance(a.searchsorted(b, 'l'), A))\n assert_(not isinstance(a.searchsorted(b, 'r'), A))\n assert_(not isinstance(a.searchsorted(b, 'l', s), A))\n assert_(not isinstance(a.searchsorted(b, 'r', s), A))\n\n def test_argpartition_out_of_range(self):\n # Test out of range values in kth raise an error, gh-5469\n d = np.arange(10)\n assert_raises(ValueError, d.argpartition, 10)\n assert_raises(ValueError, d.argpartition, -11)\n # Test also for generic type argpartition, which uses sorting\n # and used to not bound check kth\n d_obj = np.arange(10, dtype=object)\n assert_raises(ValueError, d_obj.argpartition, 10)\n assert_raises(ValueError, d_obj.argpartition, -11)\n\n def test_partition_out_of_range(self):\n # Test out of range values in kth raise an error, gh-5469\n d = np.arange(10)\n assert_raises(ValueError, d.partition, 10)\n assert_raises(ValueError, d.partition, -11)\n # Test also for generic type partition, which uses sorting\n # and used to not bound check kth\n d_obj = np.arange(10, dtype=object)\n assert_raises(ValueError, d_obj.partition, 10)\n assert_raises(ValueError, d_obj.partition, -11)\n\n def test_argpartition_integer(self):\n # Test non-integer values in kth raise an error/\n d = np.arange(10)\n assert_raises(TypeError, d.argpartition, 9.)\n # Test also for generic type argpartition, which uses sorting\n # and used to not bound check kth\n d_obj = np.arange(10, dtype=object)\n assert_raises(TypeError, d_obj.argpartition, 9.)\n\n def test_partition_integer(self):\n # Test out of range values in kth raise an error, gh-5469\n d = np.arange(10)\n assert_raises(TypeError, d.partition, 9.)\n # Test also for generic type partition, which uses sorting\n # and used to not bound check kth\n d_obj = np.arange(10, dtype=object)\n assert_raises(TypeError, d_obj.partition, 9.)\n\n def test_partition_empty_array(self):\n # check axis handling for multidimensional empty arrays\n a = np.array([])\n a.shape = (3, 2, 1, 0)\n for axis in range(-a.ndim, a.ndim):\n msg = 'test empty array partition with axis={0}'.format(axis)\n assert_equal(np.partition(a, 0, axis=axis), a, msg)\n msg = 'test empty array partition with axis=None'\n assert_equal(np.partition(a, 0, axis=None), a.ravel(), msg)\n\n def test_argpartition_empty_array(self):\n # check axis handling for multidimensional empty arrays\n a = np.array([])\n a.shape = (3, 2, 1, 0)\n for axis in range(-a.ndim, a.ndim):\n msg = 'test empty array argpartition with axis={0}'.format(axis)\n assert_equal(np.partition(a, 0, axis=axis),\n np.zeros_like(a, dtype=np.intp), msg)\n msg = 'test empty array argpartition with axis=None'\n assert_equal(np.partition(a, 0, axis=None),\n np.zeros_like(a.ravel(), dtype=np.intp), msg)\n\n def test_partition(self):\n d = np.arange(10)\n assert_raises(TypeError, np.partition, d, 2, kind=1)\n assert_raises(ValueError, np.partition, d, 2, kind=\"nonsense\")\n assert_raises(ValueError, np.argpartition, d, 2, kind=\"nonsense\")\n assert_raises(ValueError, d.partition, 2, axis=0, kind=\"nonsense\")\n assert_raises(ValueError, d.argpartition, 2, axis=0, kind=\"nonsense\")\n for k in (\"introselect\",):\n d = np.array([])\n assert_array_equal(np.partition(d, 0, kind=k), d)\n assert_array_equal(np.argpartition(d, 0, kind=k), d)\n d = np.ones(1)\n assert_array_equal(np.partition(d, 0, kind=k)[0], d)\n assert_array_equal(d[np.argpartition(d, 0, kind=k)],\n np.partition(d, 0, kind=k))\n\n # kth not modified\n kth = np.array([30, 15, 5])\n okth = kth.copy()\n np.partition(np.arange(40), kth)\n assert_array_equal(kth, okth)\n\n for r in ([2, 1], [1, 2], [1, 1]):\n d = np.array(r)\n tgt = np.sort(d)\n assert_array_equal(np.partition(d, 0, kind=k)[0], tgt[0])\n assert_array_equal(np.partition(d, 1, kind=k)[1], tgt[1])\n assert_array_equal(d[np.argpartition(d, 0, kind=k)],\n np.partition(d, 0, kind=k))\n assert_array_equal(d[np.argpartition(d, 1, kind=k)],\n np.partition(d, 1, kind=k))\n for i in range(d.size):\n d[i:].partition(0, kind=k)\n assert_array_equal(d, tgt)\n\n for r in ([3, 2, 1], [1, 2, 3], [2, 1, 3], [2, 3, 1],\n [1, 1, 1], [1, 2, 2], [2, 2, 1], [1, 2, 1]):\n d = np.array(r)\n tgt = np.sort(d)\n assert_array_equal(np.partition(d, 0, kind=k)[0], tgt[0])\n assert_array_equal(np.partition(d, 1, kind=k)[1], tgt[1])\n assert_array_equal(np.partition(d, 2, kind=k)[2], tgt[2])\n assert_array_equal(d[np.argpartition(d, 0, kind=k)],\n np.partition(d, 0, kind=k))\n assert_array_equal(d[np.argpartition(d, 1, kind=k)],\n np.partition(d, 1, kind=k))\n assert_array_equal(d[np.argpartition(d, 2, kind=k)],\n np.partition(d, 2, kind=k))\n for i in range(d.size):\n d[i:].partition(0, kind=k)\n assert_array_equal(d, tgt)\n\n d = np.ones(50)\n assert_array_equal(np.partition(d, 0, kind=k), d)\n assert_array_equal(d[np.argpartition(d, 0, kind=k)],\n np.partition(d, 0, kind=k))\n\n # sorted\n d = np.arange(49)\n assert_equal(np.partition(d, 5, kind=k)[5], 5)\n assert_equal(np.partition(d, 15, kind=k)[15], 15)\n assert_array_equal(d[np.argpartition(d, 5, kind=k)],\n np.partition(d, 5, kind=k))\n assert_array_equal(d[np.argpartition(d, 15, kind=k)],\n np.partition(d, 15, kind=k))\n\n # rsorted\n d = np.arange(47)[::-1]\n assert_equal(np.partition(d, 6, kind=k)[6], 6)\n assert_equal(np.partition(d, 16, kind=k)[16], 16)\n assert_array_equal(d[np.argpartition(d, 6, kind=k)],\n np.partition(d, 6, kind=k))\n assert_array_equal(d[np.argpartition(d, 16, kind=k)],\n np.partition(d, 16, kind=k))\n\n assert_array_equal(np.partition(d, -6, kind=k),\n np.partition(d, 41, kind=k))\n assert_array_equal(np.partition(d, -16, kind=k),\n np.partition(d, 31, kind=k))\n assert_array_equal(d[np.argpartition(d, -6, kind=k)],\n np.partition(d, 41, kind=k))\n\n # median of 3 killer, O(n^2) on pure median 3 pivot quickselect\n # exercises the median of median of 5 code used to keep O(n)\n d = np.arange(1000000)\n x = np.roll(d, d.size // 2)\n mid = x.size // 2 + 1\n assert_equal(np.partition(x, mid)[mid], mid)\n d = np.arange(1000001)\n x = np.roll(d, d.size // 2 + 1)\n mid = x.size // 2 + 1\n assert_equal(np.partition(x, mid)[mid], mid)\n\n # max\n d = np.ones(10)\n d[1] = 4\n assert_equal(np.partition(d, (2, -1))[-1], 4)\n assert_equal(np.partition(d, (2, -1))[2], 1)\n assert_equal(d[np.argpartition(d, (2, -1))][-1], 4)\n assert_equal(d[np.argpartition(d, (2, -1))][2], 1)\n d[1] = np.nan\n assert_(np.isnan(d[np.argpartition(d, (2, -1))][-1]))\n assert_(np.isnan(np.partition(d, (2, -1))[-1]))\n\n # equal elements\n d = np.arange(47) % 7\n tgt = np.sort(np.arange(47) % 7)\n np.random.shuffle(d)\n for i in range(d.size):\n assert_equal(np.partition(d, i, kind=k)[i], tgt[i])\n assert_array_equal(d[np.argpartition(d, 6, kind=k)],\n np.partition(d, 6, kind=k))\n assert_array_equal(d[np.argpartition(d, 16, kind=k)],\n np.partition(d, 16, kind=k))\n for i in range(d.size):\n d[i:].partition(0, kind=k)\n assert_array_equal(d, tgt)\n\n d = np.array([0, 1, 2, 3, 4, 5, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7,\n 7, 7, 7, 7, 7, 9])\n kth = [0, 3, 19, 20]\n assert_equal(np.partition(d, kth, kind=k)[kth], (0, 3, 7, 7))\n assert_equal(d[np.argpartition(d, kth, kind=k)][kth], (0, 3, 7, 7))\n\n d = np.array([2, 1])\n d.partition(0, kind=k)\n assert_raises(ValueError, d.partition, 2)\n assert_raises(np.AxisError, d.partition, 3, axis=1)\n assert_raises(ValueError, np.partition, d, 2)\n assert_raises(np.AxisError, np.partition, d, 2, axis=1)\n assert_raises(ValueError, d.argpartition, 2)\n assert_raises(np.AxisError, d.argpartition, 3, axis=1)\n assert_raises(ValueError, np.argpartition, d, 2)\n assert_raises(np.AxisError, np.argpartition, d, 2, axis=1)\n d = np.arange(10).reshape((2, 5))\n d.partition(1, axis=0, kind=k)\n d.partition(4, axis=1, kind=k)\n np.partition(d, 1, axis=0, kind=k)\n np.partition(d, 4, axis=1, kind=k)\n np.partition(d, 1, axis=None, kind=k)\n np.partition(d, 9, axis=None, kind=k)\n d.argpartition(1, axis=0, kind=k)\n d.argpartition(4, axis=1, kind=k)\n np.argpartition(d, 1, axis=0, kind=k)\n np.argpartition(d, 4, axis=1, kind=k)\n np.argpartition(d, 1, axis=None, kind=k)\n np.argpartition(d, 9, axis=None, kind=k)\n assert_raises(ValueError, d.partition, 2, axis=0)\n assert_raises(ValueError, d.partition, 11, axis=1)\n assert_raises(TypeError, d.partition, 2, axis=None)\n assert_raises(ValueError, np.partition, d, 9, axis=1)\n assert_raises(ValueError, np.partition, d, 11, axis=None)\n assert_raises(ValueError, d.argpartition, 2, axis=0)\n assert_raises(ValueError, d.argpartition, 11, axis=1)\n assert_raises(ValueError, np.argpartition, d, 9, axis=1)\n assert_raises(ValueError, np.argpartition, d, 11, axis=None)\n\n td = [(dt, s) for dt in [np.int32, np.float32, np.complex64]\n for s in (9, 16)]\n for dt, s in td:\n aae = assert_array_equal\n at = assert_\n\n d = np.arange(s, dtype=dt)\n np.random.shuffle(d)\n d1 = np.tile(np.arange(s, dtype=dt), (4, 1))\n map(np.random.shuffle, d1)\n d0 = np.transpose(d1)\n for i in range(d.size):\n p = np.partition(d, i, kind=k)\n assert_equal(p[i], i)\n # all before are smaller\n assert_array_less(p[:i], p[i])\n # all after are larger\n assert_array_less(p[i], p[i + 1:])\n aae(p, d[np.argpartition(d, i, kind=k)])\n\n p = np.partition(d1, i, axis=1, kind=k)\n aae(p[:, i], np.array([i] * d1.shape[0], dtype=dt))\n # array_less does not seem to work right\n at((p[:, :i].T <= p[:, i]).all(),\n msg=\"%d: %r <= %r\" % (i, p[:, i], p[:, :i].T))\n at((p[:, i + 1:].T > p[:, i]).all(),\n msg=\"%d: %r < %r\" % (i, p[:, i], p[:, i + 1:].T))\n aae(p, d1[np.arange(d1.shape[0])[:, None],\n np.argpartition(d1, i, axis=1, kind=k)])\n\n p = np.partition(d0, i, axis=0, kind=k)\n aae(p[i, :], np.array([i] * d1.shape[0], dtype=dt))\n # array_less does not seem to work right\n at((p[:i, :] <= p[i, :]).all(),\n msg=\"%d: %r <= %r\" % (i, p[i, :], p[:i, :]))\n at((p[i + 1:, :] > p[i, :]).all(),\n msg=\"%d: %r < %r\" % (i, p[i, :], p[:, i + 1:]))\n aae(p, d0[np.argpartition(d0, i, axis=0, kind=k),\n np.arange(d0.shape[1])[None, :]])\n\n # check inplace\n dc = d.copy()\n dc.partition(i, kind=k)\n assert_equal(dc, np.partition(d, i, kind=k))\n dc = d0.copy()\n dc.partition(i, axis=0, kind=k)\n assert_equal(dc, np.partition(d0, i, axis=0, kind=k))\n dc = d1.copy()\n dc.partition(i, axis=1, kind=k)\n assert_equal(dc, np.partition(d1, i, axis=1, kind=k))\n\n def assert_partitioned(self, d, kth):\n prev = 0\n for k in np.sort(kth):\n assert_array_less(d[prev:k], d[k], err_msg='kth %d' % k)\n assert_((d[k:] >= d[k]).all(),\n msg=\"kth %d, %r not greater equal %d\" % (k, d[k:], d[k]))\n prev = k + 1\n\n def test_partition_iterative(self):\n d = np.arange(17)\n kth = (0, 1, 2, 429, 231)\n assert_raises(ValueError, d.partition, kth)\n assert_raises(ValueError, d.argpartition, kth)\n d = np.arange(10).reshape((2, 5))\n assert_raises(ValueError, d.partition, kth, axis=0)\n assert_raises(ValueError, d.partition, kth, axis=1)\n assert_raises(ValueError, np.partition, d, kth, axis=1)\n assert_raises(ValueError, np.partition, d, kth, axis=None)\n\n d = np.array([3, 4, 2, 1])\n p = np.partition(d, (0, 3))\n self.assert_partitioned(p, (0, 3))\n self.assert_partitioned(d[np.argpartition(d, (0, 3))], (0, 3))\n\n assert_array_equal(p, np.partition(d, (-3, -1)))\n assert_array_equal(p, d[np.argpartition(d, (-3, -1))])\n\n d = np.arange(17)\n np.random.shuffle(d)\n d.partition(range(d.size))\n assert_array_equal(np.arange(17), d)\n np.random.shuffle(d)\n assert_array_equal(np.arange(17), d[d.argpartition(range(d.size))])\n\n # test unsorted kth\n d = np.arange(17)\n np.random.shuffle(d)\n keys = np.array([1, 3, 8, -2])\n np.random.shuffle(d)\n p = np.partition(d, keys)\n self.assert_partitioned(p, keys)\n p = d[np.argpartition(d, keys)]\n self.assert_partitioned(p, keys)\n np.random.shuffle(keys)\n assert_array_equal(np.partition(d, keys), p)\n assert_array_equal(d[np.argpartition(d, keys)], p)\n\n # equal kth\n d = np.arange(20)[::-1]\n self.assert_partitioned(np.partition(d, [5]*4), [5])\n self.assert_partitioned(np.partition(d, [5]*4 + [6, 13]),\n [5]*4 + [6, 13])\n self.assert_partitioned(d[np.argpartition(d, [5]*4)], [5])\n self.assert_partitioned(d[np.argpartition(d, [5]*4 + [6, 13])],\n [5]*4 + [6, 13])\n\n d = np.arange(12)\n np.random.shuffle(d)\n d1 = np.tile(np.arange(12), (4, 1))\n map(np.random.shuffle, d1)\n d0 = np.transpose(d1)\n\n kth = (1, 6, 7, -1)\n p = np.partition(d1, kth, axis=1)\n pa = d1[np.arange(d1.shape[0])[:, None],\n d1.argpartition(kth, axis=1)]\n assert_array_equal(p, pa)\n for i in range(d1.shape[0]):\n self.assert_partitioned(p[i,:], kth)\n p = np.partition(d0, kth, axis=0)\n pa = d0[np.argpartition(d0, kth, axis=0),\n np.arange(d0.shape[1])[None,:]]\n assert_array_equal(p, pa)\n for i in range(d0.shape[1]):\n self.assert_partitioned(p[:, i], kth)\n\n def test_partition_cdtype(self):\n d = np.array([('Galahad', 1.7, 38), ('Arthur', 1.8, 41),\n ('Lancelot', 1.9, 38)],\n dtype=[('name', '|S10'), ('height', '<f8'), ('age', '<i4')])\n\n tgt = np.sort(d, order=['age', 'height'])\n assert_array_equal(np.partition(d, range(d.size),\n order=['age', 'height']),\n tgt)\n assert_array_equal(d[np.argpartition(d, range(d.size),\n order=['age', 'height'])],\n tgt)\n for k in range(d.size):\n assert_equal(np.partition(d, k, order=['age', 'height'])[k],\n tgt[k])\n assert_equal(d[np.argpartition(d, k, order=['age', 'height'])][k],\n tgt[k])\n\n d = np.array(['Galahad', 'Arthur', 'zebra', 'Lancelot'])\n tgt = np.sort(d)\n assert_array_equal(np.partition(d, range(d.size)), tgt)\n for k in range(d.size):\n assert_equal(np.partition(d, k)[k], tgt[k])\n assert_equal(d[np.argpartition(d, k)][k], tgt[k])\n\n def test_partition_unicode_kind(self):\n d = np.arange(10)\n k = b'\\xc3\\xa4'.decode(\"UTF8\")\n assert_raises(ValueError, d.partition, 2, kind=k)\n assert_raises(ValueError, d.argpartition, 2, kind=k)\n\n def test_partition_fuzz(self):\n # a few rounds of random data testing\n for j in range(10, 30):\n for i in range(1, j - 2):\n d = np.arange(j)\n np.random.shuffle(d)\n d = d % np.random.randint(2, 30)\n idx = np.random.randint(d.size)\n kth = [0, idx, i, i + 1]\n tgt = np.sort(d)[kth]\n assert_array_equal(np.partition(d, kth)[kth], tgt,\n err_msg=\"data: %r\\n kth: %r\" % (d, kth))\n\n def test_argpartition_gh5524(self):\n # A test for functionality of argpartition on lists.\n d = [6,7,3,2,9,0]\n p = np.argpartition(d,1)\n self.assert_partitioned(np.array(d)[p],[1])\n\n def test_flatten(self):\n x0 = np.array([[1, 2, 3], [4, 5, 6]], np.int32)\n x1 = np.array([[[1, 2], [3, 4]], [[5, 6], [7, 8]]], np.int32)\n y0 = np.array([1, 2, 3, 4, 5, 6], np.int32)\n y0f = np.array([1, 4, 2, 5, 3, 6], np.int32)\n y1 = np.array([1, 2, 3, 4, 5, 6, 7, 8], np.int32)\n y1f = np.array([1, 5, 3, 7, 2, 6, 4, 8], np.int32)\n assert_equal(x0.flatten(), y0)\n assert_equal(x0.flatten('F'), y0f)\n assert_equal(x0.flatten('F'), x0.T.flatten())\n assert_equal(x1.flatten(), y1)\n assert_equal(x1.flatten('F'), y1f)\n assert_equal(x1.flatten('F'), x1.T.flatten())\n\n def test_flatten_invalid_order(self):\n # invalid after gh-14596\n for order in ['Z', 'c', False, True, 0, 8]:\n x = np.array([[1, 2, 3], [4, 5, 6]], np.int32)\n assert_raises(ValueError, x.flatten, {\"order\": order})\n\n @pytest.mark.parametrize('func', (np.dot, np.matmul))\n def test_arr_mult(self, func):\n a = np.array([[1, 0], [0, 1]])\n b = np.array([[0, 1], [1, 0]])\n c = np.array([[9, 1], [1, -9]])\n d = np.arange(24).reshape(4, 6)\n ddt = np.array(\n [[ 55, 145, 235, 325],\n [ 145, 451, 757, 1063],\n [ 235, 757, 1279, 1801],\n [ 325, 1063, 1801, 2539]]\n )\n dtd = np.array(\n [[504, 540, 576, 612, 648, 684],\n [540, 580, 620, 660, 700, 740],\n [576, 620, 664, 708, 752, 796],\n [612, 660, 708, 756, 804, 852],\n [648, 700, 752, 804, 856, 908],\n [684, 740, 796, 852, 908, 964]]\n )\n\n\n # gemm vs syrk optimizations\n for et in [np.float32, np.float64, np.complex64, np.complex128]:\n eaf = a.astype(et)\n assert_equal(func(eaf, eaf), eaf)\n assert_equal(func(eaf.T, eaf), eaf)\n assert_equal(func(eaf, eaf.T), eaf)\n assert_equal(func(eaf.T, eaf.T), eaf)\n assert_equal(func(eaf.T.copy(), eaf), eaf)\n assert_equal(func(eaf, eaf.T.copy()), eaf)\n assert_equal(func(eaf.T.copy(), eaf.T.copy()), eaf)\n\n # syrk validations\n for et in [np.float32, np.float64, np.complex64, np.complex128]:\n eaf = a.astype(et)\n ebf = b.astype(et)\n assert_equal(func(ebf, ebf), eaf)\n assert_equal(func(ebf.T, ebf), eaf)\n assert_equal(func(ebf, ebf.T), eaf)\n assert_equal(func(ebf.T, ebf.T), eaf)\n\n # syrk - different shape, stride, and view validations\n for et in [np.float32, np.float64, np.complex64, np.complex128]:\n edf = d.astype(et)\n assert_equal(\n func(edf[::-1, :], edf.T),\n func(edf[::-1, :].copy(), edf.T.copy())\n )\n assert_equal(\n func(edf[:, ::-1], edf.T),\n func(edf[:, ::-1].copy(), edf.T.copy())\n )\n assert_equal(\n func(edf, edf[::-1, :].T),\n func(edf, edf[::-1, :].T.copy())\n )\n assert_equal(\n func(edf, edf[:, ::-1].T),\n func(edf, edf[:, ::-1].T.copy())\n )\n assert_equal(\n func(edf[:edf.shape[0] // 2, :], edf[::2, :].T),\n func(edf[:edf.shape[0] // 2, :].copy(), edf[::2, :].T.copy())\n )\n assert_equal(\n func(edf[::2, :], edf[:edf.shape[0] // 2, :].T),\n func(edf[::2, :].copy(), edf[:edf.shape[0] // 2, :].T.copy())\n )\n\n # syrk - different shape\n for et in [np.float32, np.float64, np.complex64, np.complex128]:\n edf = d.astype(et)\n eddtf = ddt.astype(et)\n edtdf = dtd.astype(et)\n assert_equal(func(edf, edf.T), eddtf)\n assert_equal(func(edf.T, edf), edtdf)\n\n @pytest.mark.parametrize('func', (np.dot, np.matmul))\n @pytest.mark.parametrize('dtype', 'ifdFD')\n def test_no_dgemv(self, func, dtype):\n # check vector arg for contiguous before gemv\n # gh-12156\n a = np.arange(8.0, dtype=dtype).reshape(2, 4)\n b = np.broadcast_to(1., (4, 1))\n ret1 = func(a, b)\n ret2 = func(a, b.copy())\n assert_equal(ret1, ret2)\n\n ret1 = func(b.T, a.T)\n ret2 = func(b.T.copy(), a.T)\n assert_equal(ret1, ret2)\n\n # check for unaligned data\n dt = np.dtype(dtype)\n a = np.zeros(8 * dt.itemsize // 2 + 1, dtype='int16')[1:].view(dtype)\n a = a.reshape(2, 4)\n b = a[0]\n # make sure it is not aligned\n assert_(a.__array_interface__['data'][0] % dt.itemsize != 0)\n ret1 = func(a, b)\n ret2 = func(a.copy(), b.copy())\n assert_equal(ret1, ret2)\n\n ret1 = func(b.T, a.T)\n ret2 = func(b.T.copy(), a.T.copy())\n assert_equal(ret1, ret2)\n\n def test_dot(self):\n a = np.array([[1, 0], [0, 1]])\n b = np.array([[0, 1], [1, 0]])\n c = np.array([[9, 1], [1, -9]])\n # function versus methods\n assert_equal(np.dot(a, b), a.dot(b))\n assert_equal(np.dot(np.dot(a, b), c), a.dot(b).dot(c))\n\n # test passing in an output array\n c = np.zeros_like(a)\n a.dot(b, c)\n assert_equal(c, np.dot(a, b))\n\n # test keyword args\n c = np.zeros_like(a)\n a.dot(b=b, out=c)\n assert_equal(c, np.dot(a, b))\n\n def test_dot_type_mismatch(self):\n c = 1.\n A = np.array((1,1), dtype='i,i')\n\n assert_raises(TypeError, np.dot, c, A)\n assert_raises(TypeError, np.dot, A, c)\n\n def test_dot_out_mem_overlap(self):\n np.random.seed(1)\n\n # Test BLAS and non-BLAS code paths, including all dtypes\n # that dot() supports\n dtypes = [np.dtype(code) for code in np.typecodes['All']\n if code not in 'USVM']\n for dtype in dtypes:\n a = np.random.rand(3, 3).astype(dtype)\n\n # Valid dot() output arrays must be aligned\n b = _aligned_zeros((3, 3), dtype=dtype)\n b[...] = np.random.rand(3, 3)\n\n y = np.dot(a, b)\n x = np.dot(a, b, out=b)\n assert_equal(x, y, err_msg=repr(dtype))\n\n # Check invalid output array\n assert_raises(ValueError, np.dot, a, b, out=b[::2])\n assert_raises(ValueError, np.dot, a, b, out=b.T)\n\n def test_dot_matmul_out(self):\n # gh-9641\n class Sub(np.ndarray):\n pass\n a = np.ones((2, 2)).view(Sub)\n b = np.ones((2, 2)).view(Sub)\n out = np.ones((2, 2))\n\n # make sure out can be any ndarray (not only subclass of inputs)\n np.dot(a, b, out=out)\n np.matmul(a, b, out=out)\n\n def test_dot_matmul_inner_array_casting_fails(self):\n\n class A:\n def __array__(self, *args, **kwargs):\n raise NotImplementedError\n\n # Don't override the error from calling __array__()\n assert_raises(NotImplementedError, np.dot, A(), A())\n assert_raises(NotImplementedError, np.matmul, A(), A())\n assert_raises(NotImplementedError, np.inner, A(), A())\n\n def test_matmul_out(self):\n # overlapping memory\n a = np.arange(18).reshape(2, 3, 3)\n b = np.matmul(a, a)\n c = np.matmul(a, a, out=a)\n assert_(c is a)\n assert_equal(c, b)\n a = np.arange(18).reshape(2, 3, 3)\n c = np.matmul(a, a, out=a[::-1, ...])\n assert_(c.base is a.base)\n assert_equal(c, b)\n\n def test_diagonal(self):\n a = np.arange(12).reshape((3, 4))\n assert_equal(a.diagonal(), [0, 5, 10])\n assert_equal(a.diagonal(0), [0, 5, 10])\n assert_equal(a.diagonal(1), [1, 6, 11])\n assert_equal(a.diagonal(-1), [4, 9])\n assert_raises(np.AxisError, a.diagonal, axis1=0, axis2=5)\n assert_raises(np.AxisError, a.diagonal, axis1=5, axis2=0)\n assert_raises(np.AxisError, a.diagonal, axis1=5, axis2=5)\n assert_raises(ValueError, a.diagonal, axis1=1, axis2=1)\n\n b = np.arange(8).reshape((2, 2, 2))\n assert_equal(b.diagonal(), [[0, 6], [1, 7]])\n assert_equal(b.diagonal(0), [[0, 6], [1, 7]])\n assert_equal(b.diagonal(1), [[2], [3]])\n assert_equal(b.diagonal(-1), [[4], [5]])\n assert_raises(ValueError, b.diagonal, axis1=0, axis2=0)\n assert_equal(b.diagonal(0, 1, 2), [[0, 3], [4, 7]])\n assert_equal(b.diagonal(0, 0, 1), [[0, 6], [1, 7]])\n assert_equal(b.diagonal(offset=1, axis1=0, axis2=2), [[1], [3]])\n # Order of axis argument doesn't matter:\n assert_equal(b.diagonal(0, 2, 1), [[0, 3], [4, 7]])\n\n def test_diagonal_view_notwriteable(self):\n a = np.eye(3).diagonal()\n assert_(not a.flags.writeable)\n assert_(not a.flags.owndata)\n\n a = np.diagonal(np.eye(3))\n assert_(not a.flags.writeable)\n assert_(not a.flags.owndata)\n\n a = np.diag(np.eye(3))\n assert_(not a.flags.writeable)\n assert_(not a.flags.owndata)\n\n def test_diagonal_memleak(self):\n # Regression test for a bug that crept in at one point\n a = np.zeros((100, 100))\n if HAS_REFCOUNT:\n assert_(sys.getrefcount(a) < 50)\n for i in range(100):\n a.diagonal()\n if HAS_REFCOUNT:\n assert_(sys.getrefcount(a) < 50)\n\n def test_size_zero_memleak(self):\n # Regression test for issue 9615\n # Exercises a special-case code path for dot products of length\n # zero in cblasfuncs (making it is specific to floating dtypes).\n a = np.array([], dtype=np.float64)\n x = np.array(2.0)\n for _ in range(100):\n np.dot(a, a, out=x)\n if HAS_REFCOUNT:\n assert_(sys.getrefcount(x) < 50)\n\n def test_trace(self):\n a = np.arange(12).reshape((3, 4))\n assert_equal(a.trace(), 15)\n assert_equal(a.trace(0), 15)\n assert_equal(a.trace(1), 18)\n assert_equal(a.trace(-1), 13)\n\n b = np.arange(8).reshape((2, 2, 2))\n assert_equal(b.trace(), [6, 8])\n assert_equal(b.trace(0), [6, 8])\n assert_equal(b.trace(1), [2, 3])\n assert_equal(b.trace(-1), [4, 5])\n assert_equal(b.trace(0, 0, 1), [6, 8])\n assert_equal(b.trace(0, 0, 2), [5, 9])\n assert_equal(b.trace(0, 1, 2), [3, 11])\n assert_equal(b.trace(offset=1, axis1=0, axis2=2), [1, 3])\n\n def test_trace_subclass(self):\n # The class would need to overwrite trace to ensure single-element\n # output also has the right subclass.\n class MyArray(np.ndarray):\n pass\n\n b = np.arange(8).reshape((2, 2, 2)).view(MyArray)\n t = b.trace()\n assert_(isinstance(t, MyArray))\n\n def test_put(self):\n icodes = np.typecodes['AllInteger']\n fcodes = np.typecodes['AllFloat']\n for dt in icodes + fcodes + 'O':\n tgt = np.array([0, 1, 0, 3, 0, 5], dtype=dt)\n\n # test 1-d\n a = np.zeros(6, dtype=dt)\n a.put([1, 3, 5], [1, 3, 5])\n assert_equal(a, tgt)\n\n # test 2-d\n a = np.zeros((2, 3), dtype=dt)\n a.put([1, 3, 5], [1, 3, 5])\n assert_equal(a, tgt.reshape(2, 3))\n\n for dt in '?':\n tgt = np.array([False, True, False, True, False, True], dtype=dt)\n\n # test 1-d\n a = np.zeros(6, dtype=dt)\n a.put([1, 3, 5], [True]*3)\n assert_equal(a, tgt)\n\n # test 2-d\n a = np.zeros((2, 3), dtype=dt)\n a.put([1, 3, 5], [True]*3)\n assert_equal(a, tgt.reshape(2, 3))\n\n # check must be writeable\n a = np.zeros(6)\n a.flags.writeable = False\n assert_raises(ValueError, a.put, [1, 3, 5], [1, 3, 5])\n\n # when calling np.put, make sure a\n # TypeError is raised if the object\n # isn't an ndarray\n bad_array = [1, 2, 3]\n assert_raises(TypeError, np.put, bad_array, [0, 2], 5)\n\n def test_ravel(self):\n a = np.array([[0, 1], [2, 3]])\n assert_equal(a.ravel(), [0, 1, 2, 3])\n assert_(not a.ravel().flags.owndata)\n assert_equal(a.ravel('F'), [0, 2, 1, 3])\n assert_equal(a.ravel(order='C'), [0, 1, 2, 3])\n assert_equal(a.ravel(order='F'), [0, 2, 1, 3])\n assert_equal(a.ravel(order='A'), [0, 1, 2, 3])\n assert_(not a.ravel(order='A').flags.owndata)\n assert_equal(a.ravel(order='K'), [0, 1, 2, 3])\n assert_(not a.ravel(order='K').flags.owndata)\n assert_equal(a.ravel(), a.reshape(-1))\n\n a = np.array([[0, 1], [2, 3]], order='F')\n assert_equal(a.ravel(), [0, 1, 2, 3])\n assert_equal(a.ravel(order='A'), [0, 2, 1, 3])\n assert_equal(a.ravel(order='K'), [0, 2, 1, 3])\n assert_(not a.ravel(order='A').flags.owndata)\n assert_(not a.ravel(order='K').flags.owndata)\n assert_equal(a.ravel(), a.reshape(-1))\n assert_equal(a.ravel(order='A'), a.reshape(-1, order='A'))\n\n a = np.array([[0, 1], [2, 3]])[::-1, :]\n assert_equal(a.ravel(), [2, 3, 0, 1])\n assert_equal(a.ravel(order='C'), [2, 3, 0, 1])\n assert_equal(a.ravel(order='F'), [2, 0, 3, 1])\n assert_equal(a.ravel(order='A'), [2, 3, 0, 1])\n # 'K' doesn't reverse the axes of negative strides\n assert_equal(a.ravel(order='K'), [2, 3, 0, 1])\n assert_(a.ravel(order='K').flags.owndata)\n\n # Test simple 1-d copy behaviour:\n a = np.arange(10)[::2]\n assert_(a.ravel('K').flags.owndata)\n assert_(a.ravel('C').flags.owndata)\n assert_(a.ravel('F').flags.owndata)\n\n # Not contiguous and 1-sized axis with non matching stride\n a = np.arange(2**3 * 2)[::2]\n a = a.reshape(2, 1, 2, 2).swapaxes(-1, -2)\n strides = list(a.strides)\n strides[1] = 123\n a.strides = strides\n assert_(a.ravel(order='K').flags.owndata)\n assert_equal(a.ravel('K'), np.arange(0, 15, 2))\n\n # contiguous and 1-sized axis with non matching stride works:\n a = np.arange(2**3)\n a = a.reshape(2, 1, 2, 2).swapaxes(-1, -2)\n strides = list(a.strides)\n strides[1] = 123\n a.strides = strides\n assert_(np.may_share_memory(a.ravel(order='K'), a))\n assert_equal(a.ravel(order='K'), np.arange(2**3))\n\n # Test negative strides (not very interesting since non-contiguous):\n a = np.arange(4)[::-1].reshape(2, 2)\n assert_(a.ravel(order='C').flags.owndata)\n assert_(a.ravel(order='K').flags.owndata)\n assert_equal(a.ravel('C'), [3, 2, 1, 0])\n assert_equal(a.ravel('K'), [3, 2, 1, 0])\n\n # 1-element tidy strides test (NPY_RELAXED_STRIDES_CHECKING):\n a = np.array([[1]])\n a.strides = (123, 432)\n # If the stride is not 8, NPY_RELAXED_STRIDES_CHECKING is messing\n # them up on purpose:\n if np.ones(1).strides == (8,):\n assert_(np.may_share_memory(a.ravel('K'), a))\n assert_equal(a.ravel('K').strides, (a.dtype.itemsize,))\n\n for order in ('C', 'F', 'A', 'K'):\n # 0-d corner case:\n a = np.array(0)\n assert_equal(a.ravel(order), [0])\n assert_(np.may_share_memory(a.ravel(order), a))\n\n # Test that certain non-inplace ravels work right (mostly) for 'K':\n b = np.arange(2**4 * 2)[::2].reshape(2, 2, 2, 2)\n a = b[..., ::2]\n assert_equal(a.ravel('K'), [0, 4, 8, 12, 16, 20, 24, 28])\n assert_equal(a.ravel('C'), [0, 4, 8, 12, 16, 20, 24, 28])\n assert_equal(a.ravel('A'), [0, 4, 8, 12, 16, 20, 24, 28])\n assert_equal(a.ravel('F'), [0, 16, 8, 24, 4, 20, 12, 28])\n\n a = b[::2, ...]\n assert_equal(a.ravel('K'), [0, 2, 4, 6, 8, 10, 12, 14])\n assert_equal(a.ravel('C'), [0, 2, 4, 6, 8, 10, 12, 14])\n assert_equal(a.ravel('A'), [0, 2, 4, 6, 8, 10, 12, 14])\n assert_equal(a.ravel('F'), [0, 8, 4, 12, 2, 10, 6, 14])\n\n def test_ravel_subclass(self):\n class ArraySubclass(np.ndarray):\n pass\n\n a = np.arange(10).view(ArraySubclass)\n assert_(isinstance(a.ravel('C'), ArraySubclass))\n assert_(isinstance(a.ravel('F'), ArraySubclass))\n assert_(isinstance(a.ravel('A'), ArraySubclass))\n assert_(isinstance(a.ravel('K'), ArraySubclass))\n\n a = np.arange(10)[::2].view(ArraySubclass)\n assert_(isinstance(a.ravel('C'), ArraySubclass))\n assert_(isinstance(a.ravel('F'), ArraySubclass))\n assert_(isinstance(a.ravel('A'), ArraySubclass))\n assert_(isinstance(a.ravel('K'), ArraySubclass))\n\n def test_swapaxes(self):\n a = np.arange(1*2*3*4).reshape(1, 2, 3, 4).copy()\n idx = np.indices(a.shape)\n assert_(a.flags['OWNDATA'])\n b = a.copy()\n # check exceptions\n assert_raises(np.AxisError, a.swapaxes, -5, 0)\n assert_raises(np.AxisError, a.swapaxes, 4, 0)\n assert_raises(np.AxisError, a.swapaxes, 0, -5)\n assert_raises(np.AxisError, a.swapaxes, 0, 4)\n\n for i in range(-4, 4):\n for j in range(-4, 4):\n for k, src in enumerate((a, b)):\n c = src.swapaxes(i, j)\n # check shape\n shape = list(src.shape)\n shape[i] = src.shape[j]\n shape[j] = src.shape[i]\n assert_equal(c.shape, shape, str((i, j, k)))\n # check array contents\n i0, i1, i2, i3 = [dim-1 for dim in c.shape]\n j0, j1, j2, j3 = [dim-1 for dim in src.shape]\n assert_equal(src[idx[j0], idx[j1], idx[j2], idx[j3]],\n c[idx[i0], idx[i1], idx[i2], idx[i3]],\n str((i, j, k)))\n # check a view is always returned, gh-5260\n assert_(not c.flags['OWNDATA'], str((i, j, k)))\n # check on non-contiguous input array\n if k == 1:\n b = c\n\n def test_conjugate(self):\n a = np.array([1-1j, 1+1j, 23+23.0j])\n ac = a.conj()\n assert_equal(a.real, ac.real)\n assert_equal(a.imag, -ac.imag)\n assert_equal(ac, a.conjugate())\n assert_equal(ac, np.conjugate(a))\n\n a = np.array([1-1j, 1+1j, 23+23.0j], 'F')\n ac = a.conj()\n assert_equal(a.real, ac.real)\n assert_equal(a.imag, -ac.imag)\n assert_equal(ac, a.conjugate())\n assert_equal(ac, np.conjugate(a))\n\n a = np.array([1, 2, 3])\n ac = a.conj()\n assert_equal(a, ac)\n assert_equal(ac, a.conjugate())\n assert_equal(ac, np.conjugate(a))\n\n a = np.array([1.0, 2.0, 3.0])\n ac = a.conj()\n assert_equal(a, ac)\n assert_equal(ac, a.conjugate())\n assert_equal(ac, np.conjugate(a))\n\n a = np.array([1-1j, 1+1j, 1, 2.0], object)\n ac = a.conj()\n assert_equal(ac, [k.conjugate() for k in a])\n assert_equal(ac, a.conjugate())\n assert_equal(ac, np.conjugate(a))\n\n a = np.array([1-1j, 1, 2.0, 'f'], object)\n assert_raises(TypeError, lambda: a.conj())\n assert_raises(TypeError, lambda: a.conjugate())\n\n def test__complex__(self):\n dtypes = ['i1', 'i2', 'i4', 'i8',\n 'u1', 'u2', 'u4', 'u8',\n 'f', 'd', 'g', 'F', 'D', 'G',\n '?', 'O']\n for dt in dtypes:\n a = np.array(7, dtype=dt)\n b = np.array([7], dtype=dt)\n c = np.array([[[[[7]]]]], dtype=dt)\n\n msg = 'dtype: {0}'.format(dt)\n ap = complex(a)\n assert_equal(ap, a, msg)\n bp = complex(b)\n assert_equal(bp, b, msg)\n cp = complex(c)\n assert_equal(cp, c, msg)\n\n def test__complex__should_not_work(self):\n dtypes = ['i1', 'i2', 'i4', 'i8',\n 'u1', 'u2', 'u4', 'u8',\n 'f', 'd', 'g', 'F', 'D', 'G',\n '?', 'O']\n for dt in dtypes:\n a = np.array([1, 2, 3], dtype=dt)\n assert_raises(TypeError, complex, a)\n\n dt = np.dtype([('a', 'f8'), ('b', 'i1')])\n b = np.array((1.0, 3), dtype=dt)\n assert_raises(TypeError, complex, b)\n\n c = np.array([(1.0, 3), (2e-3, 7)], dtype=dt)\n assert_raises(TypeError, complex, c)\n\n d = np.array('1+1j')\n assert_raises(TypeError, complex, d)\n\n e = np.array(['1+1j'], 'U')\n assert_raises(TypeError, complex, e)\n\nclass TestCequenceMethods:\n def test_array_contains(self):\n assert_(4.0 in np.arange(16.).reshape(4,4))\n assert_(20.0 not in np.arange(16.).reshape(4,4))\n\nclass TestBinop:\n def test_inplace(self):\n # test refcount 1 inplace conversion\n assert_array_almost_equal(np.array([0.5]) * np.array([1.0, 2.0]),\n [0.5, 1.0])\n\n d = np.array([0.5, 0.5])[::2]\n assert_array_almost_equal(d * (d * np.array([1.0, 2.0])),\n [0.25, 0.5])\n\n a = np.array([0.5])\n b = np.array([0.5])\n c = a + b\n c = a - b\n c = a * b\n c = a / b\n assert_equal(a, b)\n assert_almost_equal(c, 1.)\n\n c = a + b * 2. / b * a - a / b\n assert_equal(a, b)\n assert_equal(c, 0.5)\n\n # true divide\n a = np.array([5])\n b = np.array([3])\n c = (a * a) / b\n\n assert_almost_equal(c, 25 / 3)\n assert_equal(a, 5)\n assert_equal(b, 3)\n\n # ndarray.__rop__ always calls ufunc\n # ndarray.__iop__ always calls ufunc\n # ndarray.__op__, __rop__:\n # - defer if other has __array_ufunc__ and it is None\n # or other is not a subclass and has higher array priority\n # - else, call ufunc\n def test_ufunc_binop_interaction(self):\n # Python method name (without underscores)\n # -> (numpy ufunc, has_in_place_version, preferred_dtype)\n ops = {\n 'add': (np.add, True, float),\n 'sub': (np.subtract, True, float),\n 'mul': (np.multiply, True, float),\n 'truediv': (np.true_divide, True, float),\n 'floordiv': (np.floor_divide, True, float),\n 'mod': (np.remainder, True, float),\n 'divmod': (np.divmod, False, float),\n 'pow': (np.power, True, int),\n 'lshift': (np.left_shift, True, int),\n 'rshift': (np.right_shift, True, int),\n 'and': (np.bitwise_and, True, int),\n 'xor': (np.bitwise_xor, True, int),\n 'or': (np.bitwise_or, True, int),\n # 'ge': (np.less_equal, False),\n # 'gt': (np.less, False),\n # 'le': (np.greater_equal, False),\n # 'lt': (np.greater, False),\n # 'eq': (np.equal, False),\n # 'ne': (np.not_equal, False),\n }\n if sys.version_info >= (3, 5):\n ops['matmul'] = (np.matmul, False, float)\n\n class Coerced(Exception):\n pass\n\n def array_impl(self):\n raise Coerced\n\n def op_impl(self, other):\n return \"forward\"\n\n def rop_impl(self, other):\n return \"reverse\"\n\n def iop_impl(self, other):\n return \"in-place\"\n\n def array_ufunc_impl(self, ufunc, method, *args, **kwargs):\n return (\"__array_ufunc__\", ufunc, method, args, kwargs)\n\n # Create an object with the given base, in the given module, with a\n # bunch of placeholder __op__ methods, and optionally a\n # __array_ufunc__ and __array_priority__.\n def make_obj(base, array_priority=False, array_ufunc=False,\n alleged_module=\"__main__\"):\n class_namespace = {\"__array__\": array_impl}\n if array_priority is not False:\n class_namespace[\"__array_priority__\"] = array_priority\n for op in ops:\n class_namespace[\"__{0}__\".format(op)] = op_impl\n class_namespace[\"__r{0}__\".format(op)] = rop_impl\n class_namespace[\"__i{0}__\".format(op)] = iop_impl\n if array_ufunc is not False:\n class_namespace[\"__array_ufunc__\"] = array_ufunc\n eval_namespace = {\"base\": base,\n \"class_namespace\": class_namespace,\n \"__name__\": alleged_module,\n }\n MyType = eval(\"type('MyType', (base,), class_namespace)\",\n eval_namespace)\n if issubclass(MyType, np.ndarray):\n # Use this range to avoid special case weirdnesses around\n # divide-by-0, pow(x, 2), overflow due to pow(big, big), etc.\n return np.arange(3, 7).reshape(2, 2).view(MyType)\n else:\n return MyType()\n\n def check(obj, binop_override_expected, ufunc_override_expected,\n inplace_override_expected, check_scalar=True):\n for op, (ufunc, has_inplace, dtype) in ops.items():\n err_msg = ('op: %s, ufunc: %s, has_inplace: %s, dtype: %s'\n % (op, ufunc, has_inplace, dtype))\n check_objs = [np.arange(3, 7, dtype=dtype).reshape(2, 2)]\n if check_scalar:\n check_objs.append(check_objs[0][0])\n for arr in check_objs:\n arr_method = getattr(arr, \"__{0}__\".format(op))\n\n def first_out_arg(result):\n if op == \"divmod\":\n assert_(isinstance(result, tuple))\n return result[0]\n else:\n return result\n\n # arr __op__ obj\n if binop_override_expected:\n assert_equal(arr_method(obj), NotImplemented, err_msg)\n elif ufunc_override_expected:\n assert_equal(arr_method(obj)[0], \"__array_ufunc__\",\n err_msg)\n else:\n if (isinstance(obj, np.ndarray) and\n (type(obj).__array_ufunc__ is\n np.ndarray.__array_ufunc__)):\n # __array__ gets ignored\n res = first_out_arg(arr_method(obj))\n assert_(res.__class__ is obj.__class__, err_msg)\n else:\n assert_raises((TypeError, Coerced),\n arr_method, obj, err_msg=err_msg)\n # obj __op__ arr\n arr_rmethod = getattr(arr, \"__r{0}__\".format(op))\n if ufunc_override_expected:\n res = arr_rmethod(obj)\n assert_equal(res[0], \"__array_ufunc__\",\n err_msg=err_msg)\n assert_equal(res[1], ufunc, err_msg=err_msg)\n else:\n if (isinstance(obj, np.ndarray) and\n (type(obj).__array_ufunc__ is\n np.ndarray.__array_ufunc__)):\n # __array__ gets ignored\n res = first_out_arg(arr_rmethod(obj))\n assert_(res.__class__ is obj.__class__, err_msg)\n else:\n # __array_ufunc__ = \"asdf\" creates a TypeError\n assert_raises((TypeError, Coerced),\n arr_rmethod, obj, err_msg=err_msg)\n\n # arr __iop__ obj\n # array scalars don't have in-place operators\n if has_inplace and isinstance(arr, np.ndarray):\n arr_imethod = getattr(arr, \"__i{0}__\".format(op))\n if inplace_override_expected:\n assert_equal(arr_method(obj), NotImplemented,\n err_msg=err_msg)\n elif ufunc_override_expected:\n res = arr_imethod(obj)\n assert_equal(res[0], \"__array_ufunc__\", err_msg)\n assert_equal(res[1], ufunc, err_msg)\n assert_(type(res[-1][\"out\"]) is tuple, err_msg)\n assert_(res[-1][\"out\"][0] is arr, err_msg)\n else:\n if (isinstance(obj, np.ndarray) and\n (type(obj).__array_ufunc__ is\n np.ndarray.__array_ufunc__)):\n # __array__ gets ignored\n assert_(arr_imethod(obj) is arr, err_msg)\n else:\n assert_raises((TypeError, Coerced),\n arr_imethod, obj,\n err_msg=err_msg)\n\n op_fn = getattr(operator, op, None)\n if op_fn is None:\n op_fn = getattr(operator, op + \"_\", None)\n if op_fn is None:\n op_fn = getattr(builtins, op)\n assert_equal(op_fn(obj, arr), \"forward\", err_msg)\n if not isinstance(obj, np.ndarray):\n if binop_override_expected:\n assert_equal(op_fn(arr, obj), \"reverse\", err_msg)\n elif ufunc_override_expected:\n assert_equal(op_fn(arr, obj)[0], \"__array_ufunc__\",\n err_msg)\n if ufunc_override_expected:\n assert_equal(ufunc(obj, arr)[0], \"__array_ufunc__\",\n err_msg)\n\n # No array priority, no array_ufunc -> nothing called\n check(make_obj(object), False, False, False)\n # Negative array priority, no array_ufunc -> nothing called\n # (has to be very negative, because scalar priority is -1000000.0)\n check(make_obj(object, array_priority=-2**30), False, False, False)\n # Positive array priority, no array_ufunc -> binops and iops only\n check(make_obj(object, array_priority=1), True, False, True)\n # ndarray ignores array_priority for ndarray subclasses\n check(make_obj(np.ndarray, array_priority=1), False, False, False,\n check_scalar=False)\n # Positive array_priority and array_ufunc -> array_ufunc only\n check(make_obj(object, array_priority=1,\n array_ufunc=array_ufunc_impl), False, True, False)\n check(make_obj(np.ndarray, array_priority=1,\n array_ufunc=array_ufunc_impl), False, True, False)\n # array_ufunc set to None -> defer binops only\n check(make_obj(object, array_ufunc=None), True, False, False)\n check(make_obj(np.ndarray, array_ufunc=None), True, False, False,\n check_scalar=False)\n\n def test_ufunc_override_normalize_signature(self):\n # gh-5674\n class SomeClass:\n def __array_ufunc__(self, ufunc, method, *inputs, **kw):\n return kw\n\n a = SomeClass()\n kw = np.add(a, [1])\n assert_('sig' not in kw and 'signature' not in kw)\n kw = np.add(a, [1], sig='ii->i')\n assert_('sig' not in kw and 'signature' in kw)\n assert_equal(kw['signature'], 'ii->i')\n kw = np.add(a, [1], signature='ii->i')\n assert_('sig' not in kw and 'signature' in kw)\n assert_equal(kw['signature'], 'ii->i')\n\n def test_array_ufunc_index(self):\n # Check that index is set appropriately, also if only an output\n # is passed on (latter is another regression tests for github bug 4753)\n # This also checks implicitly that 'out' is always a tuple.\n class CheckIndex:\n def __array_ufunc__(self, ufunc, method, *inputs, **kw):\n for i, a in enumerate(inputs):\n if a is self:\n return i\n # calls below mean we must be in an output.\n for j, a in enumerate(kw['out']):\n if a is self:\n return (j,)\n\n a = CheckIndex()\n dummy = np.arange(2.)\n # 1 input, 1 output\n assert_equal(np.sin(a), 0)\n assert_equal(np.sin(dummy, a), (0,))\n assert_equal(np.sin(dummy, out=a), (0,))\n assert_equal(np.sin(dummy, out=(a,)), (0,))\n assert_equal(np.sin(a, a), 0)\n assert_equal(np.sin(a, out=a), 0)\n assert_equal(np.sin(a, out=(a,)), 0)\n # 1 input, 2 outputs\n assert_equal(np.modf(dummy, a), (0,))\n assert_equal(np.modf(dummy, None, a), (1,))\n assert_equal(np.modf(dummy, dummy, a), (1,))\n assert_equal(np.modf(dummy, out=(a, None)), (0,))\n assert_equal(np.modf(dummy, out=(a, dummy)), (0,))\n assert_equal(np.modf(dummy, out=(None, a)), (1,))\n assert_equal(np.modf(dummy, out=(dummy, a)), (1,))\n assert_equal(np.modf(a, out=(dummy, a)), 0)\n with assert_raises(TypeError):\n # Out argument must be tuple, since there are multiple outputs\n np.modf(dummy, out=a)\n\n assert_raises(ValueError, np.modf, dummy, out=(a,))\n\n # 2 inputs, 1 output\n assert_equal(np.add(a, dummy), 0)\n assert_equal(np.add(dummy, a), 1)\n assert_equal(np.add(dummy, dummy, a), (0,))\n assert_equal(np.add(dummy, a, a), 1)\n assert_equal(np.add(dummy, dummy, out=a), (0,))\n assert_equal(np.add(dummy, dummy, out=(a,)), (0,))\n assert_equal(np.add(a, dummy, out=a), 0)\n\n def test_out_override(self):\n # regression test for github bug 4753\n class OutClass(np.ndarray):\n def __array_ufunc__(self, ufunc, method, *inputs, **kw):\n if 'out' in kw:\n tmp_kw = kw.copy()\n tmp_kw.pop('out')\n func = getattr(ufunc, method)\n kw['out'][0][...] = func(*inputs, **tmp_kw)\n\n A = np.array([0]).view(OutClass)\n B = np.array([5])\n C = np.array([6])\n np.multiply(C, B, A)\n assert_equal(A[0], 30)\n assert_(isinstance(A, OutClass))\n A[0] = 0\n np.multiply(C, B, out=A)\n assert_equal(A[0], 30)\n assert_(isinstance(A, OutClass))\n\n def test_pow_override_with_errors(self):\n # regression test for gh-9112\n class PowerOnly(np.ndarray):\n def __array_ufunc__(self, ufunc, method, *inputs, **kw):\n if ufunc is not np.power:\n raise NotImplementedError\n return \"POWER!\"\n # explicit cast to float, to ensure the fast power path is taken.\n a = np.array(5., dtype=np.float64).view(PowerOnly)\n assert_equal(a ** 2.5, \"POWER!\")\n with assert_raises(NotImplementedError):\n a ** 0.5\n with assert_raises(NotImplementedError):\n a ** 0\n with assert_raises(NotImplementedError):\n a ** 1\n with assert_raises(NotImplementedError):\n a ** -1\n with assert_raises(NotImplementedError):\n a ** 2\n\n def test_pow_array_object_dtype(self):\n # test pow on arrays of object dtype\n class SomeClass:\n def __init__(self, num=None):\n self.num = num\n\n # want to ensure a fast pow path is not taken\n def __mul__(self, other):\n raise AssertionError('__mul__ should not be called')\n\n def __div__(self, other):\n raise AssertionError('__div__ should not be called')\n\n def __pow__(self, exp):\n return SomeClass(num=self.num ** exp)\n\n def __eq__(self, other):\n if isinstance(other, SomeClass):\n return self.num == other.num\n\n __rpow__ = __pow__\n\n def pow_for(exp, arr):\n return np.array([x ** exp for x in arr])\n\n obj_arr = np.array([SomeClass(1), SomeClass(2), SomeClass(3)])\n\n assert_equal(obj_arr ** 0.5, pow_for(0.5, obj_arr))\n assert_equal(obj_arr ** 0, pow_for(0, obj_arr))\n assert_equal(obj_arr ** 1, pow_for(1, obj_arr))\n assert_equal(obj_arr ** -1, pow_for(-1, obj_arr))\n assert_equal(obj_arr ** 2, pow_for(2, obj_arr))\n\n def test_pos_array_ufunc_override(self):\n class A(np.ndarray):\n def __array_ufunc__(self, ufunc, method, *inputs, **kwargs):\n return getattr(ufunc, method)(*[i.view(np.ndarray) for\n i in inputs], **kwargs)\n tst = np.array('foo').view(A)\n with assert_raises(TypeError):\n +tst\n\n\nclass TestTemporaryElide:\n # elision is only triggered on relatively large arrays\n\n def test_extension_incref_elide(self):\n # test extension (e.g. cython) calling PyNumber_* slots without\n # increasing the reference counts\n #\n # def incref_elide(a):\n # d = input.copy() # refcount 1\n # return d, d + d # PyNumber_Add without increasing refcount\n from numpy.core._multiarray_tests import incref_elide\n d = np.ones(100000)\n orig, res = incref_elide(d)\n d + d\n # the return original should not be changed to an inplace operation\n assert_array_equal(orig, d)\n assert_array_equal(res, d + d)\n\n def test_extension_incref_elide_stack(self):\n # scanning if the refcount == 1 object is on the python stack to check\n # that we are called directly from python is flawed as object may still\n # be above the stack pointer and we have no access to the top of it\n #\n # def incref_elide_l(d):\n # return l[4] + l[4] # PyNumber_Add without increasing refcount\n from numpy.core._multiarray_tests import incref_elide_l\n # padding with 1 makes sure the object on the stack is not overwritten\n l = [1, 1, 1, 1, np.ones(100000)]\n res = incref_elide_l(l)\n # the return original should not be changed to an inplace operation\n assert_array_equal(l[4], np.ones(100000))\n assert_array_equal(res, l[4] + l[4])\n\n def test_temporary_with_cast(self):\n # check that we don't elide into a temporary which would need casting\n d = np.ones(200000, dtype=np.int64)\n assert_equal(((d + d) + 2**222).dtype, np.dtype('O'))\n\n r = ((d + d) / 2)\n assert_equal(r.dtype, np.dtype('f8'))\n\n r = np.true_divide((d + d), 2)\n assert_equal(r.dtype, np.dtype('f8'))\n\n r = ((d + d) / 2.)\n assert_equal(r.dtype, np.dtype('f8'))\n\n r = ((d + d) // 2)\n assert_equal(r.dtype, np.dtype(np.int64))\n\n # commutative elision into the astype result\n f = np.ones(100000, dtype=np.float32)\n assert_equal(((f + f) + f.astype(np.float64)).dtype, np.dtype('f8'))\n\n # no elision into lower type\n d = f.astype(np.float64)\n assert_equal(((f + f) + d).dtype, d.dtype)\n l = np.ones(100000, dtype=np.longdouble)\n assert_equal(((d + d) + l).dtype, l.dtype)\n\n # test unary abs with different output dtype\n for dt in (np.complex64, np.complex128, np.clongdouble):\n c = np.ones(100000, dtype=dt)\n r = abs(c * 2.0)\n assert_equal(r.dtype, np.dtype('f%d' % (c.itemsize // 2)))\n\n def test_elide_broadcast(self):\n # test no elision on broadcast to higher dimension\n # only triggers elision code path in debug mode as triggering it in\n # normal mode needs 256kb large matching dimension, so a lot of memory\n d = np.ones((2000, 1), dtype=int)\n b = np.ones((2000), dtype=bool)\n r = (1 - d) + b\n assert_equal(r, 1)\n assert_equal(r.shape, (2000, 2000))\n\n def test_elide_scalar(self):\n # check inplace op does not create ndarray from scalars\n a = np.bool_()\n assert_(type(~(a & a)) is np.bool_)\n\n def test_elide_scalar_readonly(self):\n # The imaginary part of a real array is readonly. This needs to go\n # through fast_scalar_power which is only called for powers of\n # +1, -1, 0, 0.5, and 2, so use 2. Also need valid refcount for\n # elision which can be gotten for the imaginary part of a real\n # array. Should not error.\n a = np.empty(100000, dtype=np.float64)\n a.imag ** 2\n\n def test_elide_readonly(self):\n # don't try to elide readonly temporaries\n r = np.asarray(np.broadcast_to(np.zeros(1), 100000).flat) * 0.0\n assert_equal(r, 0)\n\n def test_elide_updateifcopy(self):\n a = np.ones(2**20)[::2]\n b = a.flat.__array__() + 1\n del b\n assert_equal(a, 1)\n\n\nclass TestCAPI:\n def test_IsPythonScalar(self):\n from numpy.core._multiarray_tests import IsPythonScalar\n assert_(IsPythonScalar(b'foobar'))\n assert_(IsPythonScalar(1))\n assert_(IsPythonScalar(2**80))\n assert_(IsPythonScalar(2.))\n assert_(IsPythonScalar(\"a\"))\n\n\nclass TestSubscripting:\n def test_test_zero_rank(self):\n x = np.array([1, 2, 3])\n assert_(isinstance(x[0], np.int_))\n if sys.version_info[0] < 3:\n assert_(isinstance(x[0], int))\n assert_(type(x[0, ...]) is np.ndarray)\n\n\nclass TestPickling:\n @pytest.mark.skipif(pickle.HIGHEST_PROTOCOL >= 5,\n reason=('this tests the error messages when trying to'\n 'protocol 5 although it is not available'))\n def test_correct_protocol5_error_message(self):\n array = np.arange(10)\n\n if sys.version_info[:2] in ((3, 6), (3, 7)):\n # For the specific case of python3.6 and 3.7, raise a clear import\n # error about the pickle5 backport when trying to use protocol=5\n # without the pickle5 package\n with pytest.raises(ImportError):\n array.__reduce_ex__(5)\n\n elif sys.version_info[:2] < (3, 6):\n # when calling __reduce_ex__ explicitly with protocol=5 on python\n # raise a ValueError saying that protocol 5 is not available for\n # this python version\n with pytest.raises(ValueError):\n array.__reduce_ex__(5)\n\n def test_record_array_with_object_dtype(self):\n my_object = object()\n\n arr_with_object = np.array(\n [(my_object, 1, 2.0)],\n dtype=[('a', object), ('b', int), ('c', float)])\n arr_without_object = np.array(\n [('xxx', 1, 2.0)],\n dtype=[('a', str), ('b', int), ('c', float)])\n\n for proto in range(2, pickle.HIGHEST_PROTOCOL + 1):\n depickled_arr_with_object = pickle.loads(\n pickle.dumps(arr_with_object, protocol=proto))\n depickled_arr_without_object = pickle.loads(\n pickle.dumps(arr_without_object, protocol=proto))\n\n assert_equal(arr_with_object.dtype,\n depickled_arr_with_object.dtype)\n assert_equal(arr_without_object.dtype,\n depickled_arr_without_object.dtype)\n\n @pytest.mark.skipif(pickle.HIGHEST_PROTOCOL < 5,\n reason=\"requires pickle protocol 5\")\n def test_f_contiguous_array(self):\n f_contiguous_array = np.array([[1, 2, 3], [4, 5, 6]], order='F')\n buffers = []\n\n # When using pickle protocol 5, Fortran-contiguous arrays can be\n # serialized using out-of-band buffers\n bytes_string = pickle.dumps(f_contiguous_array, protocol=5,\n buffer_callback=buffers.append)\n\n assert len(buffers) > 0\n\n depickled_f_contiguous_array = pickle.loads(bytes_string,\n buffers=buffers)\n\n assert_equal(f_contiguous_array, depickled_f_contiguous_array)\n\n def test_non_contiguous_array(self):\n non_contiguous_array = np.arange(12).reshape(3, 4)[:, :2]\n assert not non_contiguous_array.flags.c_contiguous\n assert not non_contiguous_array.flags.f_contiguous\n\n # make sure non-contiguous arrays can be pickled-depickled\n # using any protocol\n for proto in range(2, pickle.HIGHEST_PROTOCOL + 1):\n depickled_non_contiguous_array = pickle.loads(\n pickle.dumps(non_contiguous_array, protocol=proto))\n\n assert_equal(non_contiguous_array, depickled_non_contiguous_array)\n\n def test_roundtrip(self):\n for proto in range(2, pickle.HIGHEST_PROTOCOL + 1):\n carray = np.array([[2, 9], [7, 0], [3, 8]])\n DATA = [\n carray,\n np.transpose(carray),\n np.array([('xxx', 1, 2.0)], dtype=[('a', (str, 3)), ('b', int),\n ('c', float)])\n ]\n\n refs = [weakref.ref(a) for a in DATA]\n for a in DATA:\n assert_equal(\n a, pickle.loads(pickle.dumps(a, protocol=proto)),\n err_msg=\"%r\" % a)\n del a, DATA, carray\n break_cycles()\n # check for reference leaks (gh-12793)\n for ref in refs:\n assert ref() is None\n\n def _loads(self, obj):\n if sys.version_info[0] >= 3:\n return pickle.loads(obj, encoding='latin1')\n else:\n return pickle.loads(obj)\n\n # version 0 pickles, using protocol=2 to pickle\n # version 0 doesn't have a version field\n def test_version0_int8(self):\n s = b'\\x80\\x02cnumpy.core._internal\\n_reconstruct\\nq\\x01cnumpy\\nndarray\\nq\\x02K\\x00\\x85U\\x01b\\x87Rq\\x03(K\\x04\\x85cnumpy\\ndtype\\nq\\x04U\\x02i1K\\x00K\\x01\\x87Rq\\x05(U\\x01|NNJ\\xff\\xff\\xff\\xffJ\\xff\\xff\\xff\\xfftb\\x89U\\x04\\x01\\x02\\x03\\x04tb.'\n a = np.array([1, 2, 3, 4], dtype=np.int8)\n p = self._loads(s)\n assert_equal(a, p)\n\n def test_version0_float32(self):\n s = b'\\x80\\x02cnumpy.core._internal\\n_reconstruct\\nq\\x01cnumpy\\nndarray\\nq\\x02K\\x00\\x85U\\x01b\\x87Rq\\x03(K\\x04\\x85cnumpy\\ndtype\\nq\\x04U\\x02f4K\\x00K\\x01\\x87Rq\\x05(U\\x01<NNJ\\xff\\xff\\xff\\xffJ\\xff\\xff\\xff\\xfftb\\x89U\\x10\\x00\\x00\\x80?\\x00\\x00\\x00@\\x00\\x00@@\\x00\\x00\\x80@tb.'\n a = np.array([1.0, 2.0, 3.0, 4.0], dtype=np.float32)\n p = self._loads(s)\n assert_equal(a, p)\n\n def test_version0_object(self):\n s = b'\\x80\\x02cnumpy.core._internal\\n_reconstruct\\nq\\x01cnumpy\\nndarray\\nq\\x02K\\x00\\x85U\\x01b\\x87Rq\\x03(K\\x02\\x85cnumpy\\ndtype\\nq\\x04U\\x02O8K\\x00K\\x01\\x87Rq\\x05(U\\x01|NNJ\\xff\\xff\\xff\\xffJ\\xff\\xff\\xff\\xfftb\\x89]q\\x06(}q\\x07U\\x01aK\\x01s}q\\x08U\\x01bK\\x02setb.'\n a = np.array([{'a': 1}, {'b': 2}])\n p = self._loads(s)\n assert_equal(a, p)\n\n # version 1 pickles, using protocol=2 to pickle\n def test_version1_int8(self):\n s = b'\\x80\\x02cnumpy.core._internal\\n_reconstruct\\nq\\x01cnumpy\\nndarray\\nq\\x02K\\x00\\x85U\\x01b\\x87Rq\\x03(K\\x01K\\x04\\x85cnumpy\\ndtype\\nq\\x04U\\x02i1K\\x00K\\x01\\x87Rq\\x05(K\\x01U\\x01|NNJ\\xff\\xff\\xff\\xffJ\\xff\\xff\\xff\\xfftb\\x89U\\x04\\x01\\x02\\x03\\x04tb.'\n a = np.array([1, 2, 3, 4], dtype=np.int8)\n p = self._loads(s)\n assert_equal(a, p)\n\n def test_version1_float32(self):\n s = b'\\x80\\x02cnumpy.core._internal\\n_reconstruct\\nq\\x01cnumpy\\nndarray\\nq\\x02K\\x00\\x85U\\x01b\\x87Rq\\x03(K\\x01K\\x04\\x85cnumpy\\ndtype\\nq\\x04U\\x02f4K\\x00K\\x01\\x87Rq\\x05(K\\x01U\\x01<NNJ\\xff\\xff\\xff\\xffJ\\xff\\xff\\xff\\xfftb\\x89U\\x10\\x00\\x00\\x80?\\x00\\x00\\x00@\\x00\\x00@@\\x00\\x00\\x80@tb.'\n a = np.array([1.0, 2.0, 3.0, 4.0], dtype=np.float32)\n p = self._loads(s)\n assert_equal(a, p)\n\n def test_version1_object(self):\n s = b'\\x80\\x02cnumpy.core._internal\\n_reconstruct\\nq\\x01cnumpy\\nndarray\\nq\\x02K\\x00\\x85U\\x01b\\x87Rq\\x03(K\\x01K\\x02\\x85cnumpy\\ndtype\\nq\\x04U\\x02O8K\\x00K\\x01\\x87Rq\\x05(K\\x01U\\x01|NNJ\\xff\\xff\\xff\\xffJ\\xff\\xff\\xff\\xfftb\\x89]q\\x06(}q\\x07U\\x01aK\\x01s}q\\x08U\\x01bK\\x02setb.'\n a = np.array([{'a': 1}, {'b': 2}])\n p = self._loads(s)\n assert_equal(a, p)\n\n def test_subarray_int_shape(self):\n s = b\"cnumpy.core.multiarray\\n_reconstruct\\np0\\n(cnumpy\\nndarray\\np1\\n(I0\\ntp2\\nS'b'\\np3\\ntp4\\nRp5\\n(I1\\n(I1\\ntp6\\ncnumpy\\ndtype\\np7\\n(S'V6'\\np8\\nI0\\nI1\\ntp9\\nRp10\\n(I3\\nS'|'\\np11\\nN(S'a'\\np12\\ng3\\ntp13\\n(dp14\\ng12\\n(g7\\n(S'V4'\\np15\\nI0\\nI1\\ntp16\\nRp17\\n(I3\\nS'|'\\np18\\n(g7\\n(S'i1'\\np19\\nI0\\nI1\\ntp20\\nRp21\\n(I3\\nS'|'\\np22\\nNNNI-1\\nI-1\\nI0\\ntp23\\nb(I2\\nI2\\ntp24\\ntp25\\nNNI4\\nI1\\nI0\\ntp26\\nbI0\\ntp27\\nsg3\\n(g7\\n(S'V2'\\np28\\nI0\\nI1\\ntp29\\nRp30\\n(I3\\nS'|'\\np31\\n(g21\\nI2\\ntp32\\nNNI2\\nI1\\nI0\\ntp33\\nbI4\\ntp34\\nsI6\\nI1\\nI0\\ntp35\\nbI00\\nS'\\\\x01\\\\x01\\\\x01\\\\x01\\\\x01\\\\x02'\\np36\\ntp37\\nb.\"\n a = np.array([(1, (1, 2))], dtype=[('a', 'i1', (2, 2)), ('b', 'i1', 2)])\n p = self._loads(s)\n assert_equal(a, p)\n\n def test_datetime64_byteorder(self):\n original = np.array([['2015-02-24T00:00:00.000000000']], dtype='datetime64[ns]')\n\n original_byte_reversed = original.copy(order='K')\n original_byte_reversed.dtype = original_byte_reversed.dtype.newbyteorder('S')\n original_byte_reversed.byteswap(inplace=True)\n\n new = pickle.loads(pickle.dumps(original_byte_reversed))\n\n assert_equal(original.dtype, new.dtype)\n\n\nclass TestFancyIndexing:\n def test_list(self):\n x = np.ones((1, 1))\n x[:, [0]] = 2.0\n assert_array_equal(x, np.array([[2.0]]))\n\n x = np.ones((1, 1, 1))\n x[:, :, [0]] = 2.0\n assert_array_equal(x, np.array([[[2.0]]]))\n\n def test_tuple(self):\n x = np.ones((1, 1))\n x[:, (0,)] = 2.0\n assert_array_equal(x, np.array([[2.0]]))\n x = np.ones((1, 1, 1))\n x[:, :, (0,)] = 2.0\n assert_array_equal(x, np.array([[[2.0]]]))\n\n def test_mask(self):\n x = np.array([1, 2, 3, 4])\n m = np.array([0, 1, 0, 0], bool)\n assert_array_equal(x[m], np.array([2]))\n\n def test_mask2(self):\n x = np.array([[1, 2, 3, 4], [5, 6, 7, 8]])\n m = np.array([0, 1], bool)\n m2 = np.array([[0, 1, 0, 0], [1, 0, 0, 0]], bool)\n m3 = np.array([[0, 1, 0, 0], [0, 0, 0, 0]], bool)\n assert_array_equal(x[m], np.array([[5, 6, 7, 8]]))\n assert_array_equal(x[m2], np.array([2, 5]))\n assert_array_equal(x[m3], np.array([2]))\n\n def test_assign_mask(self):\n x = np.array([1, 2, 3, 4])\n m = np.array([0, 1, 0, 0], bool)\n x[m] = 5\n assert_array_equal(x, np.array([1, 5, 3, 4]))\n\n def test_assign_mask2(self):\n xorig = np.array([[1, 2, 3, 4], [5, 6, 7, 8]])\n m = np.array([0, 1], bool)\n m2 = np.array([[0, 1, 0, 0], [1, 0, 0, 0]], bool)\n m3 = np.array([[0, 1, 0, 0], [0, 0, 0, 0]], bool)\n x = xorig.copy()\n x[m] = 10\n assert_array_equal(x, np.array([[1, 2, 3, 4], [10, 10, 10, 10]]))\n x = xorig.copy()\n x[m2] = 10\n assert_array_equal(x, np.array([[1, 10, 3, 4], [10, 6, 7, 8]]))\n x = xorig.copy()\n x[m3] = 10\n assert_array_equal(x, np.array([[1, 10, 3, 4], [5, 6, 7, 8]]))\n\n\nclass TestStringCompare:\n def test_string(self):\n g1 = np.array([\"This\", \"is\", \"example\"])\n g2 = np.array([\"This\", \"was\", \"example\"])\n assert_array_equal(g1 == g2, [g1[i] == g2[i] for i in [0, 1, 2]])\n assert_array_equal(g1 != g2, [g1[i] != g2[i] for i in [0, 1, 2]])\n assert_array_equal(g1 <= g2, [g1[i] <= g2[i] for i in [0, 1, 2]])\n assert_array_equal(g1 >= g2, [g1[i] >= g2[i] for i in [0, 1, 2]])\n assert_array_equal(g1 < g2, [g1[i] < g2[i] for i in [0, 1, 2]])\n assert_array_equal(g1 > g2, [g1[i] > g2[i] for i in [0, 1, 2]])\n\n def test_mixed(self):\n g1 = np.array([\"spam\", \"spa\", \"spammer\", \"and eggs\"])\n g2 = \"spam\"\n assert_array_equal(g1 == g2, [x == g2 for x in g1])\n assert_array_equal(g1 != g2, [x != g2 for x in g1])\n assert_array_equal(g1 < g2, [x < g2 for x in g1])\n assert_array_equal(g1 > g2, [x > g2 for x in g1])\n assert_array_equal(g1 <= g2, [x <= g2 for x in g1])\n assert_array_equal(g1 >= g2, [x >= g2 for x in g1])\n\n def test_unicode(self):\n g1 = np.array([u\"This\", u\"is\", u\"example\"])\n g2 = np.array([u\"This\", u\"was\", u\"example\"])\n assert_array_equal(g1 == g2, [g1[i] == g2[i] for i in [0, 1, 2]])\n assert_array_equal(g1 != g2, [g1[i] != g2[i] for i in [0, 1, 2]])\n assert_array_equal(g1 <= g2, [g1[i] <= g2[i] for i in [0, 1, 2]])\n assert_array_equal(g1 >= g2, [g1[i] >= g2[i] for i in [0, 1, 2]])\n assert_array_equal(g1 < g2, [g1[i] < g2[i] for i in [0, 1, 2]])\n assert_array_equal(g1 > g2, [g1[i] > g2[i] for i in [0, 1, 2]])\n\n\nclass TestArgmax:\n\n nan_arr = [\n ([0, 1, 2, 3, np.nan], 4),\n ([0, 1, 2, np.nan, 3], 3),\n ([np.nan, 0, 1, 2, 3], 0),\n ([np.nan, 0, np.nan, 2, 3], 0),\n ([0, 1, 2, 3, complex(0, np.nan)], 4),\n ([0, 1, 2, 3, complex(np.nan, 0)], 4),\n ([0, 1, 2, complex(np.nan, 0), 3], 3),\n ([0, 1, 2, complex(0, np.nan), 3], 3),\n ([complex(0, np.nan), 0, 1, 2, 3], 0),\n ([complex(np.nan, np.nan), 0, 1, 2, 3], 0),\n ([complex(np.nan, 0), complex(np.nan, 2), complex(np.nan, 1)], 0),\n ([complex(np.nan, np.nan), complex(np.nan, 2), complex(np.nan, 1)], 0),\n ([complex(np.nan, 0), complex(np.nan, 2), complex(np.nan, np.nan)], 0),\n\n ([complex(0, 0), complex(0, 2), complex(0, 1)], 1),\n ([complex(1, 0), complex(0, 2), complex(0, 1)], 0),\n ([complex(1, 0), complex(0, 2), complex(1, 1)], 2),\n\n ([np.datetime64('1923-04-14T12:43:12'),\n np.datetime64('1994-06-21T14:43:15'),\n np.datetime64('2001-10-15T04:10:32'),\n np.datetime64('1995-11-25T16:02:16'),\n np.datetime64('2005-01-04T03:14:12'),\n np.datetime64('2041-12-03T14:05:03')], 5),\n ([np.datetime64('1935-09-14T04:40:11'),\n np.datetime64('1949-10-12T12:32:11'),\n np.datetime64('2010-01-03T05:14:12'),\n np.datetime64('2015-11-20T12:20:59'),\n np.datetime64('1932-09-23T10:10:13'),\n np.datetime64('2014-10-10T03:50:30')], 3),\n # Assorted tests with NaTs\n ([np.datetime64('NaT'),\n np.datetime64('NaT'),\n np.datetime64('2010-01-03T05:14:12'),\n np.datetime64('NaT'),\n np.datetime64('2015-09-23T10:10:13'),\n np.datetime64('1932-10-10T03:50:30')], 0),\n ([np.datetime64('2059-03-14T12:43:12'),\n np.datetime64('1996-09-21T14:43:15'),\n np.datetime64('NaT'),\n np.datetime64('2022-12-25T16:02:16'),\n np.datetime64('1963-10-04T03:14:12'),\n np.datetime64('2013-05-08T18:15:23')], 2),\n ([np.timedelta64(2, 's'),\n np.timedelta64(1, 's'),\n np.timedelta64('NaT', 's'),\n np.timedelta64(3, 's')], 2),\n ([np.timedelta64('NaT', 's')] * 3, 0),\n\n ([timedelta(days=5, seconds=14), timedelta(days=2, seconds=35),\n timedelta(days=-1, seconds=23)], 0),\n ([timedelta(days=1, seconds=43), timedelta(days=10, seconds=5),\n timedelta(days=5, seconds=14)], 1),\n ([timedelta(days=10, seconds=24), timedelta(days=10, seconds=5),\n timedelta(days=10, seconds=43)], 2),\n\n ([False, False, False, False, True], 4),\n ([False, False, False, True, False], 3),\n ([True, False, False, False, False], 0),\n ([True, False, True, False, False], 0),\n ]\n\n def test_all(self):\n a = np.random.normal(0, 1, (4, 5, 6, 7, 8))\n for i in range(a.ndim):\n amax = a.max(i)\n aargmax = a.argmax(i)\n axes = list(range(a.ndim))\n axes.remove(i)\n assert_(np.all(amax == aargmax.choose(*a.transpose(i,*axes))))\n\n def test_combinations(self):\n for arr, pos in self.nan_arr:\n with suppress_warnings() as sup:\n sup.filter(RuntimeWarning,\n \"invalid value encountered in reduce\")\n max_val = np.max(arr)\n\n assert_equal(np.argmax(arr), pos, err_msg=\"%r\" % arr)\n assert_equal(arr[np.argmax(arr)], max_val, err_msg=\"%r\" % arr)\n\n def test_output_shape(self):\n # see also gh-616\n a = np.ones((10, 5))\n # Check some simple shape mismatches\n out = np.ones(11, dtype=np.int_)\n assert_raises(ValueError, a.argmax, -1, out)\n\n out = np.ones((2, 5), dtype=np.int_)\n assert_raises(ValueError, a.argmax, -1, out)\n\n # these could be relaxed possibly (used to allow even the previous)\n out = np.ones((1, 10), dtype=np.int_)\n assert_raises(ValueError, a.argmax, -1, out)\n\n out = np.ones(10, dtype=np.int_)\n a.argmax(-1, out=out)\n assert_equal(out, a.argmax(-1))\n\n def test_argmax_unicode(self):\n d = np.zeros(6031, dtype='<U9')\n d[5942] = \"as\"\n assert_equal(d.argmax(), 5942)\n\n def test_np_vs_ndarray(self):\n # make sure both ndarray.argmax and numpy.argmax support out/axis args\n a = np.random.normal(size=(2,3))\n\n # check positional args\n out1 = np.zeros(2, dtype=int)\n out2 = np.zeros(2, dtype=int)\n assert_equal(a.argmax(1, out1), np.argmax(a, 1, out2))\n assert_equal(out1, out2)\n\n # check keyword args\n out1 = np.zeros(3, dtype=int)\n out2 = np.zeros(3, dtype=int)\n assert_equal(a.argmax(out=out1, axis=0), np.argmax(a, out=out2, axis=0))\n assert_equal(out1, out2)\n\n @pytest.mark.leaks_references(reason=\"replaces None with NULL.\")\n def test_object_argmax_with_NULLs(self):\n # See gh-6032\n a = np.empty(4, dtype='O')\n ctypes.memset(a.ctypes.data, 0, a.nbytes)\n assert_equal(a.argmax(), 0)\n a[3] = 10\n assert_equal(a.argmax(), 3)\n a[1] = 30\n assert_equal(a.argmax(), 1)\n\n\nclass TestArgmin:\n\n nan_arr = [\n ([0, 1, 2, 3, np.nan], 4),\n ([0, 1, 2, np.nan, 3], 3),\n ([np.nan, 0, 1, 2, 3], 0),\n ([np.nan, 0, np.nan, 2, 3], 0),\n ([0, 1, 2, 3, complex(0, np.nan)], 4),\n ([0, 1, 2, 3, complex(np.nan, 0)], 4),\n ([0, 1, 2, complex(np.nan, 0), 3], 3),\n ([0, 1, 2, complex(0, np.nan), 3], 3),\n ([complex(0, np.nan), 0, 1, 2, 3], 0),\n ([complex(np.nan, np.nan), 0, 1, 2, 3], 0),\n ([complex(np.nan, 0), complex(np.nan, 2), complex(np.nan, 1)], 0),\n ([complex(np.nan, np.nan), complex(np.nan, 2), complex(np.nan, 1)], 0),\n ([complex(np.nan, 0), complex(np.nan, 2), complex(np.nan, np.nan)], 0),\n\n ([complex(0, 0), complex(0, 2), complex(0, 1)], 0),\n ([complex(1, 0), complex(0, 2), complex(0, 1)], 2),\n ([complex(1, 0), complex(0, 2), complex(1, 1)], 1),\n\n ([np.datetime64('1923-04-14T12:43:12'),\n np.datetime64('1994-06-21T14:43:15'),\n np.datetime64('2001-10-15T04:10:32'),\n np.datetime64('1995-11-25T16:02:16'),\n np.datetime64('2005-01-04T03:14:12'),\n np.datetime64('2041-12-03T14:05:03')], 0),\n ([np.datetime64('1935-09-14T04:40:11'),\n np.datetime64('1949-10-12T12:32:11'),\n np.datetime64('2010-01-03T05:14:12'),\n np.datetime64('2014-11-20T12:20:59'),\n np.datetime64('2015-09-23T10:10:13'),\n np.datetime64('1932-10-10T03:50:30')], 5),\n # Assorted tests with NaTs\n ([np.datetime64('NaT'),\n np.datetime64('NaT'),\n np.datetime64('2010-01-03T05:14:12'),\n np.datetime64('NaT'),\n np.datetime64('2015-09-23T10:10:13'),\n np.datetime64('1932-10-10T03:50:30')], 0),\n ([np.datetime64('2059-03-14T12:43:12'),\n np.datetime64('1996-09-21T14:43:15'),\n np.datetime64('NaT'),\n np.datetime64('2022-12-25T16:02:16'),\n np.datetime64('1963-10-04T03:14:12'),\n np.datetime64('2013-05-08T18:15:23')], 2),\n ([np.timedelta64(2, 's'),\n np.timedelta64(1, 's'),\n np.timedelta64('NaT', 's'),\n np.timedelta64(3, 's')], 2),\n ([np.timedelta64('NaT', 's')] * 3, 0),\n\n ([timedelta(days=5, seconds=14), timedelta(days=2, seconds=35),\n timedelta(days=-1, seconds=23)], 2),\n ([timedelta(days=1, seconds=43), timedelta(days=10, seconds=5),\n timedelta(days=5, seconds=14)], 0),\n ([timedelta(days=10, seconds=24), timedelta(days=10, seconds=5),\n timedelta(days=10, seconds=43)], 1),\n\n ([True, True, True, True, False], 4),\n ([True, True, True, False, True], 3),\n ([False, True, True, True, True], 0),\n ([False, True, False, True, True], 0),\n ]\n\n def test_all(self):\n a = np.random.normal(0, 1, (4, 5, 6, 7, 8))\n for i in range(a.ndim):\n amin = a.min(i)\n aargmin = a.argmin(i)\n axes = list(range(a.ndim))\n axes.remove(i)\n assert_(np.all(amin == aargmin.choose(*a.transpose(i,*axes))))\n\n def test_combinations(self):\n for arr, pos in self.nan_arr:\n with suppress_warnings() as sup:\n sup.filter(RuntimeWarning,\n \"invalid value encountered in reduce\")\n min_val = np.min(arr)\n\n assert_equal(np.argmin(arr), pos, err_msg=\"%r\" % arr)\n assert_equal(arr[np.argmin(arr)], min_val, err_msg=\"%r\" % arr)\n\n def test_minimum_signed_integers(self):\n\n a = np.array([1, -2**7, -2**7 + 1], dtype=np.int8)\n assert_equal(np.argmin(a), 1)\n\n a = np.array([1, -2**15, -2**15 + 1], dtype=np.int16)\n assert_equal(np.argmin(a), 1)\n\n a = np.array([1, -2**31, -2**31 + 1], dtype=np.int32)\n assert_equal(np.argmin(a), 1)\n\n a = np.array([1, -2**63, -2**63 + 1], dtype=np.int64)\n assert_equal(np.argmin(a), 1)\n\n def test_output_shape(self):\n # see also gh-616\n a = np.ones((10, 5))\n # Check some simple shape mismatches\n out = np.ones(11, dtype=np.int_)\n assert_raises(ValueError, a.argmin, -1, out)\n\n out = np.ones((2, 5), dtype=np.int_)\n assert_raises(ValueError, a.argmin, -1, out)\n\n # these could be relaxed possibly (used to allow even the previous)\n out = np.ones((1, 10), dtype=np.int_)\n assert_raises(ValueError, a.argmin, -1, out)\n\n out = np.ones(10, dtype=np.int_)\n a.argmin(-1, out=out)\n assert_equal(out, a.argmin(-1))\n\n def test_argmin_unicode(self):\n d = np.ones(6031, dtype='<U9')\n d[6001] = \"0\"\n assert_equal(d.argmin(), 6001)\n\n def test_np_vs_ndarray(self):\n # make sure both ndarray.argmin and numpy.argmin support out/axis args\n a = np.random.normal(size=(2, 3))\n\n # check positional args\n out1 = np.zeros(2, dtype=int)\n out2 = np.ones(2, dtype=int)\n assert_equal(a.argmin(1, out1), np.argmin(a, 1, out2))\n assert_equal(out1, out2)\n\n # check keyword args\n out1 = np.zeros(3, dtype=int)\n out2 = np.ones(3, dtype=int)\n assert_equal(a.argmin(out=out1, axis=0), np.argmin(a, out=out2, axis=0))\n assert_equal(out1, out2)\n\n @pytest.mark.leaks_references(reason=\"replaces None with NULL.\")\n def test_object_argmin_with_NULLs(self):\n # See gh-6032\n a = np.empty(4, dtype='O')\n ctypes.memset(a.ctypes.data, 0, a.nbytes)\n assert_equal(a.argmin(), 0)\n a[3] = 30\n assert_equal(a.argmin(), 3)\n a[1] = 10\n assert_equal(a.argmin(), 1)\n\n\nclass TestMinMax:\n\n def test_scalar(self):\n assert_raises(np.AxisError, np.amax, 1, 1)\n assert_raises(np.AxisError, np.amin, 1, 1)\n\n assert_equal(np.amax(1, axis=0), 1)\n assert_equal(np.amin(1, axis=0), 1)\n assert_equal(np.amax(1, axis=None), 1)\n assert_equal(np.amin(1, axis=None), 1)\n\n def test_axis(self):\n assert_raises(np.AxisError, np.amax, [1, 2, 3], 1000)\n assert_equal(np.amax([[1, 2, 3]], axis=1), 3)\n\n def test_datetime(self):\n # Do not ignore NaT\n for dtype in ('m8[s]', 'm8[Y]'):\n a = np.arange(10).astype(dtype)\n assert_equal(np.amin(a), a[0])\n assert_equal(np.amax(a), a[9])\n a[3] = 'NaT'\n assert_equal(np.amin(a), a[3])\n assert_equal(np.amax(a), a[3])\n\n\nclass TestNewaxis:\n def test_basic(self):\n sk = np.array([0, -0.1, 0.1])\n res = 250*sk[:, np.newaxis]\n assert_almost_equal(res.ravel(), 250*sk)\n\n\nclass TestClip:\n def _check_range(self, x, cmin, cmax):\n assert_(np.all(x >= cmin))\n assert_(np.all(x <= cmax))\n\n def _clip_type(self, type_group, array_max,\n clip_min, clip_max, inplace=False,\n expected_min=None, expected_max=None):\n if expected_min is None:\n expected_min = clip_min\n if expected_max is None:\n expected_max = clip_max\n\n for T in np.sctypes[type_group]:\n if sys.byteorder == 'little':\n byte_orders = ['=', '>']\n else:\n byte_orders = ['<', '=']\n\n for byteorder in byte_orders:\n dtype = np.dtype(T).newbyteorder(byteorder)\n\n x = (np.random.random(1000) * array_max).astype(dtype)\n if inplace:\n # The tests that call us pass clip_min and clip_max that\n # might not fit in the destination dtype. They were written\n # assuming the previous unsafe casting, which now must be\n # passed explicitly to avoid a warning.\n x.clip(clip_min, clip_max, x, casting='unsafe')\n else:\n x = x.clip(clip_min, clip_max)\n byteorder = '='\n\n if x.dtype.byteorder == '|':\n byteorder = '|'\n assert_equal(x.dtype.byteorder, byteorder)\n self._check_range(x, expected_min, expected_max)\n return x\n\n def test_basic(self):\n for inplace in [False, True]:\n self._clip_type(\n 'float', 1024, -12.8, 100.2, inplace=inplace)\n self._clip_type(\n 'float', 1024, 0, 0, inplace=inplace)\n\n self._clip_type(\n 'int', 1024, -120, 100, inplace=inplace)\n self._clip_type(\n 'int', 1024, 0, 0, inplace=inplace)\n\n self._clip_type(\n 'uint', 1024, 0, 0, inplace=inplace)\n self._clip_type(\n 'uint', 1024, -120, 100, inplace=inplace, expected_min=0)\n\n def test_record_array(self):\n rec = np.array([(-5, 2.0, 3.0), (5.0, 4.0, 3.0)],\n dtype=[('x', '<f8'), ('y', '<f8'), ('z', '<f8')])\n y = rec['x'].clip(-0.3, 0.5)\n self._check_range(y, -0.3, 0.5)\n\n def test_max_or_min(self):\n val = np.array([0, 1, 2, 3, 4, 5, 6, 7])\n x = val.clip(3)\n assert_(np.all(x >= 3))\n x = val.clip(min=3)\n assert_(np.all(x >= 3))\n x = val.clip(max=4)\n assert_(np.all(x <= 4))\n\n def test_nan(self):\n input_arr = np.array([-2., np.nan, 0.5, 3., 0.25, np.nan])\n result = input_arr.clip(-1, 1)\n expected = np.array([-1., np.nan, 0.5, 1., 0.25, np.nan])\n assert_array_equal(result, expected)\n\n\nclass TestCompress:\n def test_axis(self):\n tgt = [[5, 6, 7, 8, 9]]\n arr = np.arange(10).reshape(2, 5)\n out = np.compress([0, 1], arr, axis=0)\n assert_equal(out, tgt)\n\n tgt = [[1, 3], [6, 8]]\n out = np.compress([0, 1, 0, 1, 0], arr, axis=1)\n assert_equal(out, tgt)\n\n def test_truncate(self):\n tgt = [[1], [6]]\n arr = np.arange(10).reshape(2, 5)\n out = np.compress([0, 1], arr, axis=1)\n assert_equal(out, tgt)\n\n def test_flatten(self):\n arr = np.arange(10).reshape(2, 5)\n out = np.compress([0, 1], arr)\n assert_equal(out, 1)\n\n\nclass TestPutmask:\n def tst_basic(self, x, T, mask, val):\n np.putmask(x, mask, val)\n assert_equal(x[mask], T(val))\n assert_equal(x.dtype, T)\n\n def test_ip_types(self):\n unchecked_types = [bytes, unicode, np.void, object]\n\n x = np.random.random(1000)*100\n mask = x < 40\n\n for val in [-100, 0, 15]:\n for types in np.sctypes.values():\n for T in types:\n if T not in unchecked_types:\n self.tst_basic(x.copy().astype(T), T, mask, val)\n\n def test_mask_size(self):\n assert_raises(ValueError, np.putmask, np.array([1, 2, 3]), [True], 5)\n\n @pytest.mark.parametrize('dtype', ('>i4', '<i4'))\n def test_byteorder(self, dtype):\n x = np.array([1, 2, 3], dtype)\n np.putmask(x, [True, False, True], -1)\n assert_array_equal(x, [-1, 2, -1])\n\n def test_record_array(self):\n # Note mixed byteorder.\n rec = np.array([(-5, 2.0, 3.0), (5.0, 4.0, 3.0)],\n dtype=[('x', '<f8'), ('y', '>f8'), ('z', '<f8')])\n np.putmask(rec['x'], [True, False], 10)\n assert_array_equal(rec['x'], [10, 5])\n assert_array_equal(rec['y'], [2, 4])\n assert_array_equal(rec['z'], [3, 3])\n np.putmask(rec['y'], [True, False], 11)\n assert_array_equal(rec['x'], [10, 5])\n assert_array_equal(rec['y'], [11, 4])\n assert_array_equal(rec['z'], [3, 3])\n\n def test_overlaps(self):\n # gh-6272 check overlap\n x = np.array([True, False, True, False])\n np.putmask(x[1:4], [True, True, True], x[:3])\n assert_equal(x, np.array([True, True, False, True]))\n\n x = np.array([True, False, True, False])\n np.putmask(x[1:4], x[:3], [True, False, True])\n assert_equal(x, np.array([True, True, True, True]))\n\n\nclass TestTake:\n def tst_basic(self, x):\n ind = list(range(x.shape[0]))\n assert_array_equal(x.take(ind, axis=0), x)\n\n def test_ip_types(self):\n unchecked_types = [bytes, unicode, np.void, object]\n\n x = np.random.random(24)*100\n x.shape = 2, 3, 4\n for types in np.sctypes.values():\n for T in types:\n if T not in unchecked_types:\n self.tst_basic(x.copy().astype(T))\n\n def test_raise(self):\n x = np.random.random(24)*100\n x.shape = 2, 3, 4\n assert_raises(IndexError, x.take, [0, 1, 2], axis=0)\n assert_raises(IndexError, x.take, [-3], axis=0)\n assert_array_equal(x.take([-1], axis=0)[0], x[1])\n\n def test_clip(self):\n x = np.random.random(24)*100\n x.shape = 2, 3, 4\n assert_array_equal(x.take([-1], axis=0, mode='clip')[0], x[0])\n assert_array_equal(x.take([2], axis=0, mode='clip')[0], x[1])\n\n def test_wrap(self):\n x = np.random.random(24)*100\n x.shape = 2, 3, 4\n assert_array_equal(x.take([-1], axis=0, mode='wrap')[0], x[1])\n assert_array_equal(x.take([2], axis=0, mode='wrap')[0], x[0])\n assert_array_equal(x.take([3], axis=0, mode='wrap')[0], x[1])\n\n @pytest.mark.parametrize('dtype', ('>i4', '<i4'))\n def test_byteorder(self, dtype):\n x = np.array([1, 2, 3], dtype)\n assert_array_equal(x.take([0, 2, 1]), [1, 3, 2])\n\n def test_record_array(self):\n # Note mixed byteorder.\n rec = np.array([(-5, 2.0, 3.0), (5.0, 4.0, 3.0)],\n dtype=[('x', '<f8'), ('y', '>f8'), ('z', '<f8')])\n rec1 = rec.take([1])\n assert_(rec1['x'] == 5.0 and rec1['y'] == 4.0)\n\n def test_out_overlap(self):\n # gh-6272 check overlap on out\n x = np.arange(5)\n y = np.take(x, [1, 2, 3], out=x[2:5], mode='wrap')\n assert_equal(y, np.array([1, 2, 3]))\n\nclass TestLexsort:\n @pytest.mark.parametrize('dtype',[\n np.uint8, np.uint16, np.uint32, np.uint64,\n np.int8, np.int16, np.int32, np.int64,\n np.float16, np.float32, np.float64\n ])\n def test_basic(self, dtype):\n a = np.array([1, 2, 1, 3, 1, 5], dtype=dtype)\n b = np.array([0, 4, 5, 6, 2, 3], dtype=dtype)\n idx = np.lexsort((b, a))\n expected_idx = np.array([0, 4, 2, 1, 3, 5])\n assert_array_equal(idx, expected_idx)\n assert_array_equal(a[idx], np.sort(a))\n\n def test_mixed(self):\n a = np.array([1, 2, 1, 3, 1, 5])\n b = np.array([0, 4, 5, 6, 2, 3], dtype='datetime64[D]')\n\n idx = np.lexsort((b, a))\n expected_idx = np.array([0, 4, 2, 1, 3, 5])\n assert_array_equal(idx, expected_idx)\n\n def test_datetime(self):\n a = np.array([0,0,0], dtype='datetime64[D]')\n b = np.array([2,1,0], dtype='datetime64[D]')\n idx = np.lexsort((b, a))\n expected_idx = np.array([2, 1, 0])\n assert_array_equal(idx, expected_idx)\n\n a = np.array([0,0,0], dtype='timedelta64[D]')\n b = np.array([2,1,0], dtype='timedelta64[D]')\n idx = np.lexsort((b, a))\n expected_idx = np.array([2, 1, 0])\n assert_array_equal(idx, expected_idx)\n\n def test_object(self): # gh-6312\n a = np.random.choice(10, 1000)\n b = np.random.choice(['abc', 'xy', 'wz', 'efghi', 'qwst', 'x'], 1000)\n\n for u in a, b:\n left = np.lexsort((u.astype('O'),))\n right = np.argsort(u, kind='mergesort')\n assert_array_equal(left, right)\n\n for u, v in (a, b), (b, a):\n idx = np.lexsort((u, v))\n assert_array_equal(idx, np.lexsort((u.astype('O'), v)))\n assert_array_equal(idx, np.lexsort((u, v.astype('O'))))\n u, v = np.array(u, dtype='object'), np.array(v, dtype='object')\n assert_array_equal(idx, np.lexsort((u, v)))\n\n def test_invalid_axis(self): # gh-7528\n x = np.linspace(0., 1., 42*3).reshape(42, 3)\n assert_raises(np.AxisError, np.lexsort, x, axis=2)\n\nclass TestIO:\n \"\"\"Test tofile, fromfile, tobytes, and fromstring\"\"\"\n\n def setup(self):\n shape = (2, 4, 3)\n rand = np.random.random\n self.x = rand(shape) + rand(shape).astype(complex)*1j\n self.x[0,:, 1] = [np.nan, np.inf, -np.inf, np.nan]\n self.dtype = self.x.dtype\n self.tempdir = tempfile.mkdtemp()\n self.filename = tempfile.mktemp(dir=self.tempdir)\n\n def teardown(self):\n shutil.rmtree(self.tempdir)\n\n def test_nofile(self):\n # this should probably be supported as a file\n # but for now test for proper errors\n b = io.BytesIO()\n assert_raises(IOError, np.fromfile, b, np.uint8, 80)\n d = np.ones(7)\n assert_raises(IOError, lambda x: x.tofile(b), d)\n\n def test_bool_fromstring(self):\n v = np.array([True, False, True, False], dtype=np.bool_)\n y = np.fromstring('1 0 -2.3 0.0', sep=' ', dtype=np.bool_)\n assert_array_equal(v, y)\n\n def test_uint64_fromstring(self):\n d = np.fromstring(\"9923372036854775807 104783749223640\",\n dtype=np.uint64, sep=' ')\n e = np.array([9923372036854775807, 104783749223640], dtype=np.uint64)\n assert_array_equal(d, e)\n\n def test_int64_fromstring(self):\n d = np.fromstring(\"-25041670086757 104783749223640\",\n dtype=np.int64, sep=' ')\n e = np.array([-25041670086757, 104783749223640], dtype=np.int64)\n assert_array_equal(d, e)\n\n def test_empty_files_binary(self):\n f = open(self.filename, 'w')\n f.close()\n y = np.fromfile(self.filename)\n assert_(y.size == 0, \"Array not empty\")\n\n def test_empty_files_text(self):\n f = open(self.filename, 'w')\n f.close()\n y = np.fromfile(self.filename, sep=\" \")\n assert_(y.size == 0, \"Array not empty\")\n\n def test_roundtrip_file(self):\n f = open(self.filename, 'wb')\n self.x.tofile(f)\n f.close()\n # NB. doesn't work with flush+seek, due to use of C stdio\n f = open(self.filename, 'rb')\n y = np.fromfile(f, dtype=self.dtype)\n f.close()\n assert_array_equal(y, self.x.flat)\n\n def test_roundtrip_filename(self):\n self.x.tofile(self.filename)\n y = np.fromfile(self.filename, dtype=self.dtype)\n assert_array_equal(y, self.x.flat)\n\n @pytest.mark.skipif(pathlib is None, reason=\"pathlib not found\")\n def test_roundtrip_pathlib(self):\n p = pathlib.Path(self.filename)\n self.x.tofile(p)\n y = np.fromfile(p, dtype=self.dtype)\n assert_array_equal(y, self.x.flat)\n\n @pytest.mark.skipif(pathlib is None, reason=\"pathlib not found\")\n def test_roundtrip_dump_pathlib(self):\n p = pathlib.Path(self.filename)\n self.x.dump(p)\n y = np.load(p, allow_pickle=True)\n assert_array_equal(y, self.x)\n\n def test_roundtrip_binary_str(self):\n s = self.x.tobytes()\n y = np.frombuffer(s, dtype=self.dtype)\n assert_array_equal(y, self.x.flat)\n\n s = self.x.tobytes('F')\n y = np.frombuffer(s, dtype=self.dtype)\n assert_array_equal(y, self.x.flatten('F'))\n\n def test_roundtrip_str(self):\n x = self.x.real.ravel()\n s = \"@\".join(map(str, x))\n y = np.fromstring(s, sep=\"@\")\n # NB. str imbues less precision\n nan_mask = ~np.isfinite(x)\n assert_array_equal(x[nan_mask], y[nan_mask])\n assert_array_almost_equal(x[~nan_mask], y[~nan_mask], decimal=5)\n\n def test_roundtrip_repr(self):\n x = self.x.real.ravel()\n s = \"@\".join(map(repr, x))\n y = np.fromstring(s, sep=\"@\")\n assert_array_equal(x, y)\n\n def test_unseekable_fromfile(self):\n # gh-6246\n self.x.tofile(self.filename)\n\n def fail(*args, **kwargs):\n raise IOError('Can not tell or seek')\n\n with io.open(self.filename, 'rb', buffering=0) as f:\n f.seek = fail\n f.tell = fail\n assert_raises(IOError, np.fromfile, f, dtype=self.dtype)\n\n def test_io_open_unbuffered_fromfile(self):\n # gh-6632\n self.x.tofile(self.filename)\n with io.open(self.filename, 'rb', buffering=0) as f:\n y = np.fromfile(f, dtype=self.dtype)\n assert_array_equal(y, self.x.flat)\n\n def test_largish_file(self):\n # check the fallocate path on files > 16MB\n d = np.zeros(4 * 1024 ** 2)\n d.tofile(self.filename)\n assert_equal(os.path.getsize(self.filename), d.nbytes)\n assert_array_equal(d, np.fromfile(self.filename))\n # check offset\n with open(self.filename, \"r+b\") as f:\n f.seek(d.nbytes)\n d.tofile(f)\n assert_equal(os.path.getsize(self.filename), d.nbytes * 2)\n # check append mode (gh-8329)\n open(self.filename, \"w\").close() # delete file contents\n with open(self.filename, \"ab\") as f:\n d.tofile(f)\n assert_array_equal(d, np.fromfile(self.filename))\n with open(self.filename, \"ab\") as f:\n d.tofile(f)\n assert_equal(os.path.getsize(self.filename), d.nbytes * 2)\n\n def test_io_open_buffered_fromfile(self):\n # gh-6632\n self.x.tofile(self.filename)\n with io.open(self.filename, 'rb', buffering=-1) as f:\n y = np.fromfile(f, dtype=self.dtype)\n assert_array_equal(y, self.x.flat)\n\n def test_file_position_after_fromfile(self):\n # gh-4118\n sizes = [io.DEFAULT_BUFFER_SIZE//8,\n io.DEFAULT_BUFFER_SIZE,\n io.DEFAULT_BUFFER_SIZE*8]\n\n for size in sizes:\n f = open(self.filename, 'wb')\n f.seek(size-1)\n f.write(b'\\0')\n f.close()\n\n for mode in ['rb', 'r+b']:\n err_msg = \"%d %s\" % (size, mode)\n\n f = open(self.filename, mode)\n f.read(2)\n np.fromfile(f, dtype=np.float64, count=1)\n pos = f.tell()\n f.close()\n assert_equal(pos, 10, err_msg=err_msg)\n\n def test_file_position_after_tofile(self):\n # gh-4118\n sizes = [io.DEFAULT_BUFFER_SIZE//8,\n io.DEFAULT_BUFFER_SIZE,\n io.DEFAULT_BUFFER_SIZE*8]\n\n for size in sizes:\n err_msg = \"%d\" % (size,)\n\n f = open(self.filename, 'wb')\n f.seek(size-1)\n f.write(b'\\0')\n f.seek(10)\n f.write(b'12')\n np.array([0], dtype=np.float64).tofile(f)\n pos = f.tell()\n f.close()\n assert_equal(pos, 10 + 2 + 8, err_msg=err_msg)\n\n f = open(self.filename, 'r+b')\n f.read(2)\n f.seek(0, 1) # seek between read&write required by ANSI C\n np.array([0], dtype=np.float64).tofile(f)\n pos = f.tell()\n f.close()\n assert_equal(pos, 10, err_msg=err_msg)\n\n def test_load_object_array_fromfile(self):\n # gh-12300\n with open(self.filename, 'w') as f:\n # Ensure we have a file with consistent contents\n pass\n\n with open(self.filename, 'rb') as f:\n assert_raises_regex(ValueError, \"Cannot read into object array\",\n np.fromfile, f, dtype=object)\n\n assert_raises_regex(ValueError, \"Cannot read into object array\",\n np.fromfile, self.filename, dtype=object)\n\n def test_fromfile_offset(self):\n with open(self.filename, 'wb') as f:\n self.x.tofile(f)\n\n with open(self.filename, 'rb') as f:\n y = np.fromfile(f, dtype=self.dtype, offset=0)\n assert_array_equal(y, self.x.flat)\n\n with open(self.filename, 'rb') as f:\n count_items = len(self.x.flat) // 8\n offset_items = len(self.x.flat) // 4\n offset_bytes = self.dtype.itemsize * offset_items\n y = np.fromfile(f, dtype=self.dtype, count=count_items, offset=offset_bytes)\n assert_array_equal(y, self.x.flat[offset_items:offset_items+count_items])\n\n # subsequent seeks should stack\n offset_bytes = self.dtype.itemsize\n z = np.fromfile(f, dtype=self.dtype, offset=offset_bytes)\n assert_array_equal(z, self.x.flat[offset_items+count_items+1:])\n\n with open(self.filename, 'wb') as f:\n self.x.tofile(f, sep=\",\")\n\n with open(self.filename, 'rb') as f:\n assert_raises_regex(\n TypeError,\n \"'offset' argument only permitted for binary files\",\n np.fromfile, self.filename, dtype=self.dtype,\n sep=\",\", offset=1)\n\n def _check_from(self, s, value, **kw):\n if 'sep' not in kw:\n y = np.frombuffer(s, **kw)\n else:\n y = np.fromstring(s, **kw)\n assert_array_equal(y, value)\n\n f = open(self.filename, 'wb')\n f.write(s)\n f.close()\n y = np.fromfile(self.filename, **kw)\n assert_array_equal(y, value)\n\n def test_nan(self):\n self._check_from(\n b\"nan +nan -nan NaN nan(foo) +NaN(BAR) -NAN(q_u_u_x_)\",\n [np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan],\n sep=' ')\n\n def test_inf(self):\n self._check_from(\n b\"inf +inf -inf infinity -Infinity iNfInItY -inF\",\n [np.inf, np.inf, -np.inf, np.inf, -np.inf, np.inf, -np.inf],\n sep=' ')\n\n def test_numbers(self):\n self._check_from(b\"1.234 -1.234 .3 .3e55 -123133.1231e+133\",\n [1.234, -1.234, .3, .3e55, -123133.1231e+133], sep=' ')\n\n def test_binary(self):\n self._check_from(b'\\x00\\x00\\x80?\\x00\\x00\\x00@\\x00\\x00@@\\x00\\x00\\x80@',\n np.array([1, 2, 3, 4]),\n dtype='<f4')\n\n @pytest.mark.slow # takes > 1 minute on mechanical hard drive\n def test_big_binary(self):\n \"\"\"Test workarounds for 32-bit limited fwrite, fseek, and ftell\n calls in windows. These normally would hang doing something like this.\n See http://projects.scipy.org/numpy/ticket/1660\"\"\"\n if sys.platform != 'win32':\n return\n try:\n # before workarounds, only up to 2**32-1 worked\n fourgbplus = 2**32 + 2**16\n testbytes = np.arange(8, dtype=np.int8)\n n = len(testbytes)\n flike = tempfile.NamedTemporaryFile()\n f = flike.file\n np.tile(testbytes, fourgbplus // testbytes.nbytes).tofile(f)\n flike.seek(0)\n a = np.fromfile(f, dtype=np.int8)\n flike.close()\n assert_(len(a) == fourgbplus)\n # check only start and end for speed:\n assert_((a[:n] == testbytes).all())\n assert_((a[-n:] == testbytes).all())\n except (MemoryError, ValueError):\n pass\n\n def test_string(self):\n self._check_from(b'1,2,3,4', [1., 2., 3., 4.], sep=',')\n\n def test_counted_string(self):\n self._check_from(b'1,2,3,4', [1., 2., 3., 4.], count=4, sep=',')\n self._check_from(b'1,2,3,4', [1., 2., 3.], count=3, sep=',')\n self._check_from(b'1,2,3,4', [1., 2., 3., 4.], count=-1, sep=',')\n\n def test_string_with_ws(self):\n self._check_from(b'1 2 3 4 ', [1, 2, 3, 4], dtype=int, sep=' ')\n\n def test_counted_string_with_ws(self):\n self._check_from(b'1 2 3 4 ', [1, 2, 3], count=3, dtype=int,\n sep=' ')\n\n def test_ascii(self):\n self._check_from(b'1 , 2 , 3 , 4', [1., 2., 3., 4.], sep=',')\n self._check_from(b'1,2,3,4', [1., 2., 3., 4.], dtype=float, sep=',')\n\n def test_malformed(self):\n with assert_warns(DeprecationWarning):\n self._check_from(b'1.234 1,234', [1.234, 1.], sep=' ')\n\n def test_long_sep(self):\n self._check_from(b'1_x_3_x_4_x_5', [1, 3, 4, 5], sep='_x_')\n\n def test_dtype(self):\n v = np.array([1, 2, 3, 4], dtype=np.int_)\n self._check_from(b'1,2,3,4', v, sep=',', dtype=np.int_)\n\n def test_dtype_bool(self):\n # can't use _check_from because fromstring can't handle True/False\n v = np.array([True, False, True, False], dtype=np.bool_)\n s = b'1,0,-2.3,0'\n f = open(self.filename, 'wb')\n f.write(s)\n f.close()\n y = np.fromfile(self.filename, sep=',', dtype=np.bool_)\n assert_(y.dtype == '?')\n assert_array_equal(y, v)\n\n def test_tofile_sep(self):\n x = np.array([1.51, 2, 3.51, 4], dtype=float)\n f = open(self.filename, 'w')\n x.tofile(f, sep=',')\n f.close()\n f = open(self.filename, 'r')\n s = f.read()\n f.close()\n #assert_equal(s, '1.51,2.0,3.51,4.0')\n y = np.array([float(p) for p in s.split(',')])\n assert_array_equal(x,y)\n\n def test_tofile_format(self):\n x = np.array([1.51, 2, 3.51, 4], dtype=float)\n f = open(self.filename, 'w')\n x.tofile(f, sep=',', format='%.2f')\n f.close()\n f = open(self.filename, 'r')\n s = f.read()\n f.close()\n assert_equal(s, '1.51,2.00,3.51,4.00')\n\n def test_locale(self):\n with CommaDecimalPointLocale():\n self.test_numbers()\n self.test_nan()\n self.test_inf()\n self.test_counted_string()\n self.test_ascii()\n self.test_malformed()\n self.test_tofile_sep()\n self.test_tofile_format()\n\n def test_fromfile_subarray_binary(self):\n # Test subarray dtypes which are absorbed into the shape\n x = np.arange(24, dtype=\"i4\").reshape(2, 3, 4)\n x.tofile(self.filename)\n res = np.fromfile(self.filename, dtype=\"(3,4)i4\")\n assert_array_equal(x, res)\n\n x_str = x.tobytes()\n with assert_warns(DeprecationWarning):\n # binary fromstring is deprecated\n res = np.fromstring(x_str, dtype=\"(3,4)i4\")\n assert_array_equal(x, res)\n\n\nclass TestFromBuffer:\n @pytest.mark.parametrize('byteorder', ['<', '>'])\n @pytest.mark.parametrize('dtype', [float, int, complex])\n def test_basic(self, byteorder, dtype):\n dt = np.dtype(dtype).newbyteorder(byteorder)\n x = (np.random.random((4, 7)) * 5).astype(dt)\n buf = x.tobytes()\n assert_array_equal(np.frombuffer(buf, dtype=dt), x.flat)\n\n def test_empty(self):\n assert_array_equal(np.frombuffer(b''), np.array([]))\n\n\nclass TestFlat:\n def setup(self):\n a0 = np.arange(20.0)\n a = a0.reshape(4, 5)\n a0.shape = (4, 5)\n a.flags.writeable = False\n self.a = a\n self.b = a[::2, ::2]\n self.a0 = a0\n self.b0 = a0[::2, ::2]\n\n def test_contiguous(self):\n testpassed = False\n try:\n self.a.flat[12] = 100.0\n except ValueError:\n testpassed = True\n assert_(testpassed)\n assert_(self.a.flat[12] == 12.0)\n\n def test_discontiguous(self):\n testpassed = False\n try:\n self.b.flat[4] = 100.0\n except ValueError:\n testpassed = True\n assert_(testpassed)\n assert_(self.b.flat[4] == 12.0)\n\n def test___array__(self):\n c = self.a.flat.__array__()\n d = self.b.flat.__array__()\n e = self.a0.flat.__array__()\n f = self.b0.flat.__array__()\n\n assert_(c.flags.writeable is False)\n assert_(d.flags.writeable is False)\n # for 1.14 all are set to non-writeable on the way to replacing the\n # UPDATEIFCOPY array returned for non-contiguous arrays.\n assert_(e.flags.writeable is True)\n assert_(f.flags.writeable is False)\n with assert_warns(DeprecationWarning):\n assert_(c.flags.updateifcopy is False)\n with assert_warns(DeprecationWarning):\n assert_(d.flags.updateifcopy is False)\n with assert_warns(DeprecationWarning):\n assert_(e.flags.updateifcopy is False)\n with assert_warns(DeprecationWarning):\n # UPDATEIFCOPY is removed.\n assert_(f.flags.updateifcopy is False)\n assert_(c.flags.writebackifcopy is False)\n assert_(d.flags.writebackifcopy is False)\n assert_(e.flags.writebackifcopy is False)\n assert_(f.flags.writebackifcopy is False)\n\n @pytest.mark.skipif(not HAS_REFCOUNT, reason=\"Python lacks refcounts\")\n def test_refcount(self):\n # includes regression test for reference count error gh-13165\n inds = [np.intp(0), np.array([True]*self.a.size), np.array([0]), None]\n indtype = np.dtype(np.intp)\n rc_indtype = sys.getrefcount(indtype)\n for ind in inds:\n rc_ind = sys.getrefcount(ind)\n for _ in range(100):\n try:\n self.a.flat[ind]\n except IndexError:\n pass\n assert_(abs(sys.getrefcount(ind) - rc_ind) < 50)\n assert_(abs(sys.getrefcount(indtype) - rc_indtype) < 50)\n\n\nclass TestResize:\n\n @_no_tracing\n def test_basic(self):\n x = np.array([[1, 0, 0], [0, 1, 0], [0, 0, 1]])\n if IS_PYPY:\n x.resize((5, 5), refcheck=False)\n else:\n x.resize((5, 5))\n assert_array_equal(x.flat[:9],\n np.array([[1, 0, 0], [0, 1, 0], [0, 0, 1]]).flat)\n assert_array_equal(x[9:].flat, 0)\n\n def test_check_reference(self):\n x = np.array([[1, 0, 0], [0, 1, 0], [0, 0, 1]])\n y = x\n assert_raises(ValueError, x.resize, (5, 1))\n del y # avoid pyflakes unused variable warning.\n\n @_no_tracing\n def test_int_shape(self):\n x = np.eye(3)\n if IS_PYPY:\n x.resize(3, refcheck=False)\n else:\n x.resize(3)\n assert_array_equal(x, np.eye(3)[0,:])\n\n def test_none_shape(self):\n x = np.eye(3)\n x.resize(None)\n assert_array_equal(x, np.eye(3))\n x.resize()\n assert_array_equal(x, np.eye(3))\n\n def test_0d_shape(self):\n # to it multiple times to test it does not break alloc cache gh-9216\n for i in range(10):\n x = np.empty((1,))\n x.resize(())\n assert_equal(x.shape, ())\n assert_equal(x.size, 1)\n x = np.empty(())\n x.resize((1,))\n assert_equal(x.shape, (1,))\n assert_equal(x.size, 1)\n\n def test_invalid_arguments(self):\n assert_raises(TypeError, np.eye(3).resize, 'hi')\n assert_raises(ValueError, np.eye(3).resize, -1)\n assert_raises(TypeError, np.eye(3).resize, order=1)\n assert_raises(TypeError, np.eye(3).resize, refcheck='hi')\n\n @_no_tracing\n def test_freeform_shape(self):\n x = np.eye(3)\n if IS_PYPY:\n x.resize(3, 2, 1, refcheck=False)\n else:\n x.resize(3, 2, 1)\n assert_(x.shape == (3, 2, 1))\n\n @_no_tracing\n def test_zeros_appended(self):\n x = np.eye(3)\n if IS_PYPY:\n x.resize(2, 3, 3, refcheck=False)\n else:\n x.resize(2, 3, 3)\n assert_array_equal(x[0], np.eye(3))\n assert_array_equal(x[1], np.zeros((3, 3)))\n\n @_no_tracing\n def test_obj_obj(self):\n # check memory is initialized on resize, gh-4857\n a = np.ones(10, dtype=[('k', object, 2)])\n if IS_PYPY:\n a.resize(15, refcheck=False)\n else:\n a.resize(15,)\n assert_equal(a.shape, (15,))\n assert_array_equal(a['k'][-5:], 0)\n assert_array_equal(a['k'][:-5], 1)\n\n def test_empty_view(self):\n # check that sizes containing a zero don't trigger a reallocate for\n # already empty arrays\n x = np.zeros((10, 0), int)\n x_view = x[...]\n x_view.resize((0, 10))\n x_view.resize((0, 100))\n\n def test_check_weakref(self):\n x = np.array([[1, 0, 0], [0, 1, 0], [0, 0, 1]])\n xref = weakref.ref(x)\n assert_raises(ValueError, x.resize, (5, 1))\n del xref # avoid pyflakes unused variable warning.\n\n\nclass TestRecord:\n def test_field_rename(self):\n dt = np.dtype([('f', float), ('i', int)])\n dt.names = ['p', 'q']\n assert_equal(dt.names, ['p', 'q'])\n\n def test_multiple_field_name_occurrence(self):\n def test_dtype_init():\n np.dtype([(\"A\", \"f8\"), (\"B\", \"f8\"), (\"A\", \"f8\")])\n\n # Error raised when multiple fields have the same name\n assert_raises(ValueError, test_dtype_init)\n\n @pytest.mark.skipif(sys.version_info[0] < 3, reason=\"Not Python 3\")\n def test_bytes_fields(self):\n # Bytes are not allowed in field names and not recognized in titles\n # on Py3\n assert_raises(TypeError, np.dtype, [(b'a', int)])\n assert_raises(TypeError, np.dtype, [(('b', b'a'), int)])\n\n dt = np.dtype([((b'a', 'b'), int)])\n assert_raises(TypeError, dt.__getitem__, b'a')\n\n x = np.array([(1,), (2,), (3,)], dtype=dt)\n assert_raises(IndexError, x.__getitem__, b'a')\n\n y = x[0]\n assert_raises(IndexError, y.__getitem__, b'a')\n\n @pytest.mark.skipif(sys.version_info[0] < 3, reason=\"Not Python 3\")\n def test_multiple_field_name_unicode(self):\n def test_dtype_unicode():\n np.dtype([(\"\\u20B9\", \"f8\"), (\"B\", \"f8\"), (\"\\u20B9\", \"f8\")])\n\n # Error raised when multiple fields have the same name(unicode included)\n assert_raises(ValueError, test_dtype_unicode)\n\n @pytest.mark.skipif(sys.version_info[0] >= 3, reason=\"Not Python 2\")\n def test_unicode_field_titles(self):\n # Unicode field titles are added to field dict on Py2\n title = u'b'\n dt = np.dtype([((title, 'a'), int)])\n dt[title]\n dt['a']\n x = np.array([(1,), (2,), (3,)], dtype=dt)\n x[title]\n x['a']\n y = x[0]\n y[title]\n y['a']\n\n @pytest.mark.skipif(sys.version_info[0] >= 3, reason=\"Not Python 2\")\n def test_unicode_field_names(self):\n # Unicode field names are converted to ascii on Python 2:\n encodable_name = u'b'\n assert_equal(np.dtype([(encodable_name, int)]).names[0], b'b')\n assert_equal(np.dtype([(('a', encodable_name), int)]).names[0], b'b')\n\n # But raises UnicodeEncodeError if it can't be encoded:\n nonencodable_name = u'\\uc3bc'\n assert_raises(UnicodeEncodeError, np.dtype, [(nonencodable_name, int)])\n assert_raises(UnicodeEncodeError, np.dtype, [(('a', nonencodable_name), int)])\n\n def test_fromarrays_unicode(self):\n # A single name string provided to fromarrays() is allowed to be unicode\n # on both Python 2 and 3:\n x = np.core.records.fromarrays([[0], [1]], names=u'a,b', formats=u'i4,i4')\n assert_equal(x['a'][0], 0)\n assert_equal(x['b'][0], 1)\n\n def test_unicode_order(self):\n # Test that we can sort with order as a unicode field name in both Python 2 and\n # 3:\n name = u'b'\n x = np.array([1, 3, 2], dtype=[(name, int)])\n x.sort(order=name)\n assert_equal(x[u'b'], np.array([1, 2, 3]))\n\n def test_field_names(self):\n # Test unicode and 8-bit / byte strings can be used\n a = np.zeros((1,), dtype=[('f1', 'i4'),\n ('f2', 'i4'),\n ('f3', [('sf1', 'i4')])])\n is_py3 = sys.version_info[0] >= 3\n if is_py3:\n funcs = (str,)\n # byte string indexing fails gracefully\n assert_raises(IndexError, a.__setitem__, b'f1', 1)\n assert_raises(IndexError, a.__getitem__, b'f1')\n assert_raises(IndexError, a['f1'].__setitem__, b'sf1', 1)\n assert_raises(IndexError, a['f1'].__getitem__, b'sf1')\n else:\n funcs = (str, unicode)\n for func in funcs:\n b = a.copy()\n fn1 = func('f1')\n b[fn1] = 1\n assert_equal(b[fn1], 1)\n fnn = func('not at all')\n assert_raises(ValueError, b.__setitem__, fnn, 1)\n assert_raises(ValueError, b.__getitem__, fnn)\n b[0][fn1] = 2\n assert_equal(b[fn1], 2)\n # Subfield\n assert_raises(ValueError, b[0].__setitem__, fnn, 1)\n assert_raises(ValueError, b[0].__getitem__, fnn)\n # Subfield\n fn3 = func('f3')\n sfn1 = func('sf1')\n b[fn3][sfn1] = 1\n assert_equal(b[fn3][sfn1], 1)\n assert_raises(ValueError, b[fn3].__setitem__, fnn, 1)\n assert_raises(ValueError, b[fn3].__getitem__, fnn)\n # multiple subfields\n fn2 = func('f2')\n b[fn2] = 3\n\n assert_equal(b[['f1', 'f2']][0].tolist(), (2, 3))\n assert_equal(b[['f2', 'f1']][0].tolist(), (3, 2))\n assert_equal(b[['f1', 'f3']][0].tolist(), (2, (1,)))\n\n # non-ascii unicode field indexing is well behaved\n if not is_py3:\n pytest.skip('non ascii unicode field indexing skipped; '\n 'raises segfault on python 2.x')\n else:\n assert_raises(ValueError, a.__setitem__, u'\\u03e0', 1)\n assert_raises(ValueError, a.__getitem__, u'\\u03e0')\n\n def test_record_hash(self):\n a = np.array([(1, 2), (1, 2)], dtype='i1,i2')\n a.flags.writeable = False\n b = np.array([(1, 2), (3, 4)], dtype=[('num1', 'i1'), ('num2', 'i2')])\n b.flags.writeable = False\n c = np.array([(1, 2), (3, 4)], dtype='i1,i2')\n c.flags.writeable = False\n assert_(hash(a[0]) == hash(a[1]))\n assert_(hash(a[0]) == hash(b[0]))\n assert_(hash(a[0]) != hash(b[1]))\n assert_(hash(c[0]) == hash(a[0]) and c[0] == a[0])\n\n def test_record_no_hash(self):\n a = np.array([(1, 2), (1, 2)], dtype='i1,i2')\n assert_raises(TypeError, hash, a[0])\n\n def test_empty_structure_creation(self):\n # make sure these do not raise errors (gh-5631)\n np.array([()], dtype={'names': [], 'formats': [],\n 'offsets': [], 'itemsize': 12})\n np.array([(), (), (), (), ()], dtype={'names': [], 'formats': [],\n 'offsets': [], 'itemsize': 12})\n\n def test_multifield_indexing_view(self):\n a = np.ones(3, dtype=[('a', 'i4'), ('b', 'f4'), ('c', 'u4')])\n v = a[['a', 'c']]\n assert_(v.base is a)\n assert_(v.dtype == np.dtype({'names': ['a', 'c'],\n 'formats': ['i4', 'u4'],\n 'offsets': [0, 8]}))\n v[:] = (4,5)\n assert_equal(a[0].item(), (4, 1, 5))\n\nclass TestView:\n def test_basic(self):\n x = np.array([(1, 2, 3, 4), (5, 6, 7, 8)],\n dtype=[('r', np.int8), ('g', np.int8),\n ('b', np.int8), ('a', np.int8)])\n # We must be specific about the endianness here:\n y = x.view(dtype='<i4')\n # ... and again without the keyword.\n z = x.view('<i4')\n assert_array_equal(y, z)\n assert_array_equal(y, [67305985, 134678021])\n\n\ndef _mean(a, **args):\n return a.mean(**args)\n\n\ndef _var(a, **args):\n return a.var(**args)\n\n\ndef _std(a, **args):\n return a.std(**args)\n\n\nclass TestStats:\n\n funcs = [_mean, _var, _std]\n\n def setup(self):\n np.random.seed(range(3))\n self.rmat = np.random.random((4, 5))\n self.cmat = self.rmat + 1j * self.rmat\n self.omat = np.array([Decimal(repr(r)) for r in self.rmat.flat])\n self.omat = self.omat.reshape(4, 5)\n\n def test_python_type(self):\n for x in (np.float16(1.), 1, 1., 1+0j):\n assert_equal(np.mean([x]), 1.)\n assert_equal(np.std([x]), 0.)\n assert_equal(np.var([x]), 0.)\n\n def test_keepdims(self):\n mat = np.eye(3)\n for f in self.funcs:\n for axis in [0, 1]:\n res = f(mat, axis=axis, keepdims=True)\n assert_(res.ndim == mat.ndim)\n assert_(res.shape[axis] == 1)\n for axis in [None]:\n res = f(mat, axis=axis, keepdims=True)\n assert_(res.shape == (1, 1))\n\n def test_out(self):\n mat = np.eye(3)\n for f in self.funcs:\n out = np.zeros(3)\n tgt = f(mat, axis=1)\n res = f(mat, axis=1, out=out)\n assert_almost_equal(res, out)\n assert_almost_equal(res, tgt)\n out = np.empty(2)\n assert_raises(ValueError, f, mat, axis=1, out=out)\n out = np.empty((2, 2))\n assert_raises(ValueError, f, mat, axis=1, out=out)\n\n def test_dtype_from_input(self):\n\n icodes = np.typecodes['AllInteger']\n fcodes = np.typecodes['AllFloat']\n\n # object type\n for f in self.funcs:\n mat = np.array([[Decimal(1)]*3]*3)\n tgt = mat.dtype.type\n res = f(mat, axis=1).dtype.type\n assert_(res is tgt)\n # scalar case\n res = type(f(mat, axis=None))\n assert_(res is Decimal)\n\n # integer types\n for f in self.funcs:\n for c in icodes:\n mat = np.eye(3, dtype=c)\n tgt = np.float64\n res = f(mat, axis=1).dtype.type\n assert_(res is tgt)\n # scalar case\n res = f(mat, axis=None).dtype.type\n assert_(res is tgt)\n\n # mean for float types\n for f in [_mean]:\n for c in fcodes:\n mat = np.eye(3, dtype=c)\n tgt = mat.dtype.type\n res = f(mat, axis=1).dtype.type\n assert_(res is tgt)\n # scalar case\n res = f(mat, axis=None).dtype.type\n assert_(res is tgt)\n\n # var, std for float types\n for f in [_var, _std]:\n for c in fcodes:\n mat = np.eye(3, dtype=c)\n # deal with complex types\n tgt = mat.real.dtype.type\n res = f(mat, axis=1).dtype.type\n assert_(res is tgt)\n # scalar case\n res = f(mat, axis=None).dtype.type\n assert_(res is tgt)\n\n def test_dtype_from_dtype(self):\n mat = np.eye(3)\n\n # stats for integer types\n # FIXME:\n # this needs definition as there are lots places along the line\n # where type casting may take place.\n\n # for f in self.funcs:\n # for c in np.typecodes['AllInteger']:\n # tgt = np.dtype(c).type\n # res = f(mat, axis=1, dtype=c).dtype.type\n # assert_(res is tgt)\n # # scalar case\n # res = f(mat, axis=None, dtype=c).dtype.type\n # assert_(res is tgt)\n\n # stats for float types\n for f in self.funcs:\n for c in np.typecodes['AllFloat']:\n tgt = np.dtype(c).type\n res = f(mat, axis=1, dtype=c).dtype.type\n assert_(res is tgt)\n # scalar case\n res = f(mat, axis=None, dtype=c).dtype.type\n assert_(res is tgt)\n\n def test_ddof(self):\n for f in [_var]:\n for ddof in range(3):\n dim = self.rmat.shape[1]\n tgt = f(self.rmat, axis=1) * dim\n res = f(self.rmat, axis=1, ddof=ddof) * (dim - ddof)\n for f in [_std]:\n for ddof in range(3):\n dim = self.rmat.shape[1]\n tgt = f(self.rmat, axis=1) * np.sqrt(dim)\n res = f(self.rmat, axis=1, ddof=ddof) * np.sqrt(dim - ddof)\n assert_almost_equal(res, tgt)\n assert_almost_equal(res, tgt)\n\n def test_ddof_too_big(self):\n dim = self.rmat.shape[1]\n for f in [_var, _std]:\n for ddof in range(dim, dim + 2):\n with warnings.catch_warnings(record=True) as w:\n warnings.simplefilter('always')\n res = f(self.rmat, axis=1, ddof=ddof)\n assert_(not (res < 0).any())\n assert_(len(w) > 0)\n assert_(issubclass(w[0].category, RuntimeWarning))\n\n def test_empty(self):\n A = np.zeros((0, 3))\n for f in self.funcs:\n for axis in [0, None]:\n with warnings.catch_warnings(record=True) as w:\n warnings.simplefilter('always')\n assert_(np.isnan(f(A, axis=axis)).all())\n assert_(len(w) > 0)\n assert_(issubclass(w[0].category, RuntimeWarning))\n for axis in [1]:\n with warnings.catch_warnings(record=True) as w:\n warnings.simplefilter('always')\n assert_equal(f(A, axis=axis), np.zeros([]))\n\n def test_mean_values(self):\n for mat in [self.rmat, self.cmat, self.omat]:\n for axis in [0, 1]:\n tgt = mat.sum(axis=axis)\n res = _mean(mat, axis=axis) * mat.shape[axis]\n assert_almost_equal(res, tgt)\n for axis in [None]:\n tgt = mat.sum(axis=axis)\n res = _mean(mat, axis=axis) * np.prod(mat.shape)\n assert_almost_equal(res, tgt)\n\n def test_mean_float16(self):\n # This fail if the sum inside mean is done in float16 instead\n # of float32.\n assert_(_mean(np.ones(100000, dtype='float16')) == 1)\n\n def test_var_values(self):\n for mat in [self.rmat, self.cmat, self.omat]:\n for axis in [0, 1, None]:\n msqr = _mean(mat * mat.conj(), axis=axis)\n mean = _mean(mat, axis=axis)\n tgt = msqr - mean * mean.conjugate()\n res = _var(mat, axis=axis)\n assert_almost_equal(res, tgt)\n\n def test_std_values(self):\n for mat in [self.rmat, self.cmat, self.omat]:\n for axis in [0, 1, None]:\n tgt = np.sqrt(_var(mat, axis=axis))\n res = _std(mat, axis=axis)\n assert_almost_equal(res, tgt)\n\n def test_subclass(self):\n class TestArray(np.ndarray):\n def __new__(cls, data, info):\n result = np.array(data)\n result = result.view(cls)\n result.info = info\n return result\n\n def __array_finalize__(self, obj):\n self.info = getattr(obj, \"info\", '')\n\n dat = TestArray([[1, 2, 3, 4], [5, 6, 7, 8]], 'jubba')\n res = dat.mean(1)\n assert_(res.info == dat.info)\n res = dat.std(1)\n assert_(res.info == dat.info)\n res = dat.var(1)\n assert_(res.info == dat.info)\n\nclass TestVdot:\n def test_basic(self):\n dt_numeric = np.typecodes['AllFloat'] + np.typecodes['AllInteger']\n dt_complex = np.typecodes['Complex']\n\n # test real\n a = np.eye(3)\n for dt in dt_numeric + 'O':\n b = a.astype(dt)\n res = np.vdot(b, b)\n assert_(np.isscalar(res))\n assert_equal(np.vdot(b, b), 3)\n\n # test complex\n a = np.eye(3) * 1j\n for dt in dt_complex + 'O':\n b = a.astype(dt)\n res = np.vdot(b, b)\n assert_(np.isscalar(res))\n assert_equal(np.vdot(b, b), 3)\n\n # test boolean\n b = np.eye(3, dtype=bool)\n res = np.vdot(b, b)\n assert_(np.isscalar(res))\n assert_equal(np.vdot(b, b), True)\n\n def test_vdot_array_order(self):\n a = np.array([[1, 2], [3, 4]], order='C')\n b = np.array([[1, 2], [3, 4]], order='F')\n res = np.vdot(a, a)\n\n # integer arrays are exact\n assert_equal(np.vdot(a, b), res)\n assert_equal(np.vdot(b, a), res)\n assert_equal(np.vdot(b, b), res)\n\n def test_vdot_uncontiguous(self):\n for size in [2, 1000]:\n # Different sizes match different branches in vdot.\n a = np.zeros((size, 2, 2))\n b = np.zeros((size, 2, 2))\n a[:, 0, 0] = np.arange(size)\n b[:, 0, 0] = np.arange(size) + 1\n # Make a and b uncontiguous:\n a = a[..., 0]\n b = b[..., 0]\n\n assert_equal(np.vdot(a, b),\n np.vdot(a.flatten(), b.flatten()))\n assert_equal(np.vdot(a, b.copy()),\n np.vdot(a.flatten(), b.flatten()))\n assert_equal(np.vdot(a.copy(), b),\n np.vdot(a.flatten(), b.flatten()))\n assert_equal(np.vdot(a.copy('F'), b),\n np.vdot(a.flatten(), b.flatten()))\n assert_equal(np.vdot(a, b.copy('F')),\n np.vdot(a.flatten(), b.flatten()))\n\n\nclass TestDot:\n def setup(self):\n np.random.seed(128)\n self.A = np.random.rand(4, 2)\n self.b1 = np.random.rand(2, 1)\n self.b2 = np.random.rand(2)\n self.b3 = np.random.rand(1, 2)\n self.b4 = np.random.rand(4)\n self.N = 7\n\n def test_dotmatmat(self):\n A = self.A\n res = np.dot(A.transpose(), A)\n tgt = np.array([[1.45046013, 0.86323640],\n [0.86323640, 0.84934569]])\n assert_almost_equal(res, tgt, decimal=self.N)\n\n def test_dotmatvec(self):\n A, b1 = self.A, self.b1\n res = np.dot(A, b1)\n tgt = np.array([[0.32114320], [0.04889721],\n [0.15696029], [0.33612621]])\n assert_almost_equal(res, tgt, decimal=self.N)\n\n def test_dotmatvec2(self):\n A, b2 = self.A, self.b2\n res = np.dot(A, b2)\n tgt = np.array([0.29677940, 0.04518649, 0.14468333, 0.31039293])\n assert_almost_equal(res, tgt, decimal=self.N)\n\n def test_dotvecmat(self):\n A, b4 = self.A, self.b4\n res = np.dot(b4, A)\n tgt = np.array([1.23495091, 1.12222648])\n assert_almost_equal(res, tgt, decimal=self.N)\n\n def test_dotvecmat2(self):\n b3, A = self.b3, self.A\n res = np.dot(b3, A.transpose())\n tgt = np.array([[0.58793804, 0.08957460, 0.30605758, 0.62716383]])\n assert_almost_equal(res, tgt, decimal=self.N)\n\n def test_dotvecmat3(self):\n A, b4 = self.A, self.b4\n res = np.dot(A.transpose(), b4)\n tgt = np.array([1.23495091, 1.12222648])\n assert_almost_equal(res, tgt, decimal=self.N)\n\n def test_dotvecvecouter(self):\n b1, b3 = self.b1, self.b3\n res = np.dot(b1, b3)\n tgt = np.array([[0.20128610, 0.08400440], [0.07190947, 0.03001058]])\n assert_almost_equal(res, tgt, decimal=self.N)\n\n def test_dotvecvecinner(self):\n b1, b3 = self.b1, self.b3\n res = np.dot(b3, b1)\n tgt = np.array([[ 0.23129668]])\n assert_almost_equal(res, tgt, decimal=self.N)\n\n def test_dotcolumnvect1(self):\n b1 = np.ones((3, 1))\n b2 = [5.3]\n res = np.dot(b1, b2)\n tgt = np.array([5.3, 5.3, 5.3])\n assert_almost_equal(res, tgt, decimal=self.N)\n\n def test_dotcolumnvect2(self):\n b1 = np.ones((3, 1)).transpose()\n b2 = [6.2]\n res = np.dot(b2, b1)\n tgt = np.array([6.2, 6.2, 6.2])\n assert_almost_equal(res, tgt, decimal=self.N)\n\n def test_dotvecscalar(self):\n np.random.seed(100)\n b1 = np.random.rand(1, 1)\n b2 = np.random.rand(1, 4)\n res = np.dot(b1, b2)\n tgt = np.array([[0.15126730, 0.23068496, 0.45905553, 0.00256425]])\n assert_almost_equal(res, tgt, decimal=self.N)\n\n def test_dotvecscalar2(self):\n np.random.seed(100)\n b1 = np.random.rand(4, 1)\n b2 = np.random.rand(1, 1)\n res = np.dot(b1, b2)\n tgt = np.array([[0.00256425],[0.00131359],[0.00200324],[ 0.00398638]])\n assert_almost_equal(res, tgt, decimal=self.N)\n\n def test_all(self):\n dims = [(), (1,), (1, 1)]\n dout = [(), (1,), (1, 1), (1,), (), (1,), (1, 1), (1,), (1, 1)]\n for dim, (dim1, dim2) in zip(dout, itertools.product(dims, dims)):\n b1 = np.zeros(dim1)\n b2 = np.zeros(dim2)\n res = np.dot(b1, b2)\n tgt = np.zeros(dim)\n assert_(res.shape == tgt.shape)\n assert_almost_equal(res, tgt, decimal=self.N)\n\n def test_vecobject(self):\n class Vec:\n def __init__(self, sequence=None):\n if sequence is None:\n sequence = []\n self.array = np.array(sequence)\n\n def __add__(self, other):\n out = Vec()\n out.array = self.array + other.array\n return out\n\n def __sub__(self, other):\n out = Vec()\n out.array = self.array - other.array\n return out\n\n def __mul__(self, other): # with scalar\n out = Vec(self.array.copy())\n out.array *= other\n return out\n\n def __rmul__(self, other):\n return self*other\n\n U_non_cont = np.transpose([[1., 1.], [1., 2.]])\n U_cont = np.ascontiguousarray(U_non_cont)\n x = np.array([Vec([1., 0.]), Vec([0., 1.])])\n zeros = np.array([Vec([0., 0.]), Vec([0., 0.])])\n zeros_test = np.dot(U_cont, x) - np.dot(U_non_cont, x)\n assert_equal(zeros[0].array, zeros_test[0].array)\n assert_equal(zeros[1].array, zeros_test[1].array)\n\n def test_dot_2args(self):\n from numpy.core.multiarray import dot\n\n a = np.array([[1, 2], [3, 4]], dtype=float)\n b = np.array([[1, 0], [1, 1]], dtype=float)\n c = np.array([[3, 2], [7, 4]], dtype=float)\n\n d = dot(a, b)\n assert_allclose(c, d)\n\n def test_dot_3args(self):\n from numpy.core.multiarray import dot\n\n np.random.seed(22)\n f = np.random.random_sample((1024, 16))\n v = np.random.random_sample((16, 32))\n\n r = np.empty((1024, 32))\n for i in range(12):\n dot(f, v, r)\n if HAS_REFCOUNT:\n assert_equal(sys.getrefcount(r), 2)\n r2 = dot(f, v, out=None)\n assert_array_equal(r2, r)\n assert_(r is dot(f, v, out=r))\n\n v = v[:, 0].copy() # v.shape == (16,)\n r = r[:, 0].copy() # r.shape == (1024,)\n r2 = dot(f, v)\n assert_(r is dot(f, v, r))\n assert_array_equal(r2, r)\n\n def test_dot_3args_errors(self):\n from numpy.core.multiarray import dot\n\n np.random.seed(22)\n f = np.random.random_sample((1024, 16))\n v = np.random.random_sample((16, 32))\n\n r = np.empty((1024, 31))\n assert_raises(ValueError, dot, f, v, r)\n\n r = np.empty((1024,))\n assert_raises(ValueError, dot, f, v, r)\n\n r = np.empty((32,))\n assert_raises(ValueError, dot, f, v, r)\n\n r = np.empty((32, 1024))\n assert_raises(ValueError, dot, f, v, r)\n assert_raises(ValueError, dot, f, v, r.T)\n\n r = np.empty((1024, 64))\n assert_raises(ValueError, dot, f, v, r[:, ::2])\n assert_raises(ValueError, dot, f, v, r[:, :32])\n\n r = np.empty((1024, 32), dtype=np.float32)\n assert_raises(ValueError, dot, f, v, r)\n\n r = np.empty((1024, 32), dtype=int)\n assert_raises(ValueError, dot, f, v, r)\n\n def test_dot_array_order(self):\n a = np.array([[1, 2], [3, 4]], order='C')\n b = np.array([[1, 2], [3, 4]], order='F')\n res = np.dot(a, a)\n\n # integer arrays are exact\n assert_equal(np.dot(a, b), res)\n assert_equal(np.dot(b, a), res)\n assert_equal(np.dot(b, b), res)\n\n def test_accelerate_framework_sgemv_fix(self):\n\n def aligned_array(shape, align, dtype, order='C'):\n d = dtype(0)\n N = np.prod(shape)\n tmp = np.zeros(N * d.nbytes + align, dtype=np.uint8)\n address = tmp.__array_interface__[\"data\"][0]\n for offset in range(align):\n if (address + offset) % align == 0:\n break\n tmp = tmp[offset:offset+N*d.nbytes].view(dtype=dtype)\n return tmp.reshape(shape, order=order)\n\n def as_aligned(arr, align, dtype, order='C'):\n aligned = aligned_array(arr.shape, align, dtype, order)\n aligned[:] = arr[:]\n return aligned\n\n def assert_dot_close(A, X, desired):\n assert_allclose(np.dot(A, X), desired, rtol=1e-5, atol=1e-7)\n\n m = aligned_array(100, 15, np.float32)\n s = aligned_array((100, 100), 15, np.float32)\n np.dot(s, m) # this will always segfault if the bug is present\n\n testdata = itertools.product((15,32), (10000,), (200,89), ('C','F'))\n for align, m, n, a_order in testdata:\n # Calculation in double precision\n A_d = np.random.rand(m, n)\n X_d = np.random.rand(n)\n desired = np.dot(A_d, X_d)\n # Calculation with aligned single precision\n A_f = as_aligned(A_d, align, np.float32, order=a_order)\n X_f = as_aligned(X_d, align, np.float32)\n assert_dot_close(A_f, X_f, desired)\n # Strided A rows\n A_d_2 = A_d[::2]\n desired = np.dot(A_d_2, X_d)\n A_f_2 = A_f[::2]\n assert_dot_close(A_f_2, X_f, desired)\n # Strided A columns, strided X vector\n A_d_22 = A_d_2[:, ::2]\n X_d_2 = X_d[::2]\n desired = np.dot(A_d_22, X_d_2)\n A_f_22 = A_f_2[:, ::2]\n X_f_2 = X_f[::2]\n assert_dot_close(A_f_22, X_f_2, desired)\n # Check the strides are as expected\n if a_order == 'F':\n assert_equal(A_f_22.strides, (8, 8 * m))\n else:\n assert_equal(A_f_22.strides, (8 * n, 8))\n assert_equal(X_f_2.strides, (8,))\n # Strides in A rows + cols only\n X_f_2c = as_aligned(X_f_2, align, np.float32)\n assert_dot_close(A_f_22, X_f_2c, desired)\n # Strides just in A cols\n A_d_12 = A_d[:, ::2]\n desired = np.dot(A_d_12, X_d_2)\n A_f_12 = A_f[:, ::2]\n assert_dot_close(A_f_12, X_f_2c, desired)\n # Strides in A cols and X\n assert_dot_close(A_f_12, X_f_2, desired)\n\n\nclass MatmulCommon:\n \"\"\"Common tests for '@' operator and numpy.matmul.\n\n \"\"\"\n # Should work with these types. Will want to add\n # \"O\" at some point\n types = \"?bhilqBHILQefdgFDGO\"\n\n def test_exceptions(self):\n dims = [\n ((1,), (2,)), # mismatched vector vector\n ((2, 1,), (2,)), # mismatched matrix vector\n ((2,), (1, 2)), # mismatched vector matrix\n ((1, 2), (3, 1)), # mismatched matrix matrix\n ((1,), ()), # vector scalar\n ((), (1)), # scalar vector\n ((1, 1), ()), # matrix scalar\n ((), (1, 1)), # scalar matrix\n ((2, 2, 1), (3, 1, 2)), # cannot broadcast\n ]\n\n for dt, (dm1, dm2) in itertools.product(self.types, dims):\n a = np.ones(dm1, dtype=dt)\n b = np.ones(dm2, dtype=dt)\n assert_raises(ValueError, self.matmul, a, b)\n\n def test_shapes(self):\n dims = [\n ((1, 1), (2, 1, 1)), # broadcast first argument\n ((2, 1, 1), (1, 1)), # broadcast second argument\n ((2, 1, 1), (2, 1, 1)), # matrix stack sizes match\n ]\n\n for dt, (dm1, dm2) in itertools.product(self.types, dims):\n a = np.ones(dm1, dtype=dt)\n b = np.ones(dm2, dtype=dt)\n res = self.matmul(a, b)\n assert_(res.shape == (2, 1, 1))\n\n # vector vector returns scalars.\n for dt in self.types:\n a = np.ones((2,), dtype=dt)\n b = np.ones((2,), dtype=dt)\n c = self.matmul(a, b)\n assert_(np.array(c).shape == ())\n\n def test_result_types(self):\n mat = np.ones((1,1))\n vec = np.ones((1,))\n for dt in self.types:\n m = mat.astype(dt)\n v = vec.astype(dt)\n for arg in [(m, v), (v, m), (m, m)]:\n res = self.matmul(*arg)\n assert_(res.dtype == dt)\n\n # vector vector returns scalars\n if dt != \"O\":\n res = self.matmul(v, v)\n assert_(type(res) is np.dtype(dt).type)\n\n def test_scalar_output(self):\n vec1 = np.array([2])\n vec2 = np.array([3, 4]).reshape(1, -1)\n tgt = np.array([6, 8])\n for dt in self.types[1:]:\n v1 = vec1.astype(dt)\n v2 = vec2.astype(dt)\n res = self.matmul(v1, v2)\n assert_equal(res, tgt)\n res = self.matmul(v2.T, v1)\n assert_equal(res, tgt)\n\n # boolean type\n vec = np.array([True, True], dtype='?').reshape(1, -1)\n res = self.matmul(vec[:, 0], vec)\n assert_equal(res, True)\n\n def test_vector_vector_values(self):\n vec1 = np.array([1, 2])\n vec2 = np.array([3, 4]).reshape(-1, 1)\n tgt1 = np.array([11])\n tgt2 = np.array([[3, 6], [4, 8]])\n for dt in self.types[1:]:\n v1 = vec1.astype(dt)\n v2 = vec2.astype(dt)\n res = self.matmul(v1, v2)\n assert_equal(res, tgt1)\n # no broadcast, we must make v1 into a 2d ndarray\n res = self.matmul(v2, v1.reshape(1, -1))\n assert_equal(res, tgt2)\n\n # boolean type\n vec = np.array([True, True], dtype='?')\n res = self.matmul(vec, vec)\n assert_equal(res, True)\n\n def test_vector_matrix_values(self):\n vec = np.array([1, 2])\n mat1 = np.array([[1, 2], [3, 4]])\n mat2 = np.stack([mat1]*2, axis=0)\n tgt1 = np.array([7, 10])\n tgt2 = np.stack([tgt1]*2, axis=0)\n for dt in self.types[1:]:\n v = vec.astype(dt)\n m1 = mat1.astype(dt)\n m2 = mat2.astype(dt)\n res = self.matmul(v, m1)\n assert_equal(res, tgt1)\n res = self.matmul(v, m2)\n assert_equal(res, tgt2)\n\n # boolean type\n vec = np.array([True, False])\n mat1 = np.array([[True, False], [False, True]])\n mat2 = np.stack([mat1]*2, axis=0)\n tgt1 = np.array([True, False])\n tgt2 = np.stack([tgt1]*2, axis=0)\n\n res = self.matmul(vec, mat1)\n assert_equal(res, tgt1)\n res = self.matmul(vec, mat2)\n assert_equal(res, tgt2)\n\n def test_matrix_vector_values(self):\n vec = np.array([1, 2])\n mat1 = np.array([[1, 2], [3, 4]])\n mat2 = np.stack([mat1]*2, axis=0)\n tgt1 = np.array([5, 11])\n tgt2 = np.stack([tgt1]*2, axis=0)\n for dt in self.types[1:]:\n v = vec.astype(dt)\n m1 = mat1.astype(dt)\n m2 = mat2.astype(dt)\n res = self.matmul(m1, v)\n assert_equal(res, tgt1)\n res = self.matmul(m2, v)\n assert_equal(res, tgt2)\n\n # boolean type\n vec = np.array([True, False])\n mat1 = np.array([[True, False], [False, True]])\n mat2 = np.stack([mat1]*2, axis=0)\n tgt1 = np.array([True, False])\n tgt2 = np.stack([tgt1]*2, axis=0)\n\n res = self.matmul(vec, mat1)\n assert_equal(res, tgt1)\n res = self.matmul(vec, mat2)\n assert_equal(res, tgt2)\n\n def test_matrix_matrix_values(self):\n mat1 = np.array([[1, 2], [3, 4]])\n mat2 = np.array([[1, 0], [1, 1]])\n mat12 = np.stack([mat1, mat2], axis=0)\n mat21 = np.stack([mat2, mat1], axis=0)\n tgt11 = np.array([[7, 10], [15, 22]])\n tgt12 = np.array([[3, 2], [7, 4]])\n tgt21 = np.array([[1, 2], [4, 6]])\n tgt12_21 = np.stack([tgt12, tgt21], axis=0)\n tgt11_12 = np.stack((tgt11, tgt12), axis=0)\n tgt11_21 = np.stack((tgt11, tgt21), axis=0)\n for dt in self.types[1:]:\n m1 = mat1.astype(dt)\n m2 = mat2.astype(dt)\n m12 = mat12.astype(dt)\n m21 = mat21.astype(dt)\n\n # matrix @ matrix\n res = self.matmul(m1, m2)\n assert_equal(res, tgt12)\n res = self.matmul(m2, m1)\n assert_equal(res, tgt21)\n\n # stacked @ matrix\n res = self.matmul(m12, m1)\n assert_equal(res, tgt11_21)\n\n # matrix @ stacked\n res = self.matmul(m1, m12)\n assert_equal(res, tgt11_12)\n\n # stacked @ stacked\n res = self.matmul(m12, m21)\n assert_equal(res, tgt12_21)\n\n # boolean type\n m1 = np.array([[1, 1], [0, 0]], dtype=np.bool_)\n m2 = np.array([[1, 0], [1, 1]], dtype=np.bool_)\n m12 = np.stack([m1, m2], axis=0)\n m21 = np.stack([m2, m1], axis=0)\n tgt11 = m1\n tgt12 = m1\n tgt21 = np.array([[1, 1], [1, 1]], dtype=np.bool_)\n tgt12_21 = np.stack([tgt12, tgt21], axis=0)\n tgt11_12 = np.stack((tgt11, tgt12), axis=0)\n tgt11_21 = np.stack((tgt11, tgt21), axis=0)\n\n # matrix @ matrix\n res = self.matmul(m1, m2)\n assert_equal(res, tgt12)\n res = self.matmul(m2, m1)\n assert_equal(res, tgt21)\n\n # stacked @ matrix\n res = self.matmul(m12, m1)\n assert_equal(res, tgt11_21)\n\n # matrix @ stacked\n res = self.matmul(m1, m12)\n assert_equal(res, tgt11_12)\n\n # stacked @ stacked\n res = self.matmul(m12, m21)\n assert_equal(res, tgt12_21)\n\n\nclass TestMatmul(MatmulCommon):\n matmul = np.matmul\n\n def test_out_arg(self):\n a = np.ones((5, 2), dtype=float)\n b = np.array([[1, 3], [5, 7]], dtype=float)\n tgt = np.dot(a, b)\n\n # test as positional argument\n msg = \"out positional argument\"\n out = np.zeros((5, 2), dtype=float)\n self.matmul(a, b, out)\n assert_array_equal(out, tgt, err_msg=msg)\n\n # test as keyword argument\n msg = \"out keyword argument\"\n out = np.zeros((5, 2), dtype=float)\n self.matmul(a, b, out=out)\n assert_array_equal(out, tgt, err_msg=msg)\n\n # test out with not allowed type cast (safe casting)\n msg = \"Cannot cast ufunc .* output\"\n out = np.zeros((5, 2), dtype=np.int32)\n assert_raises_regex(TypeError, msg, self.matmul, a, b, out=out)\n\n # test out with type upcast to complex\n out = np.zeros((5, 2), dtype=np.complex128)\n c = self.matmul(a, b, out=out)\n assert_(c is out)\n with suppress_warnings() as sup:\n sup.filter(np.ComplexWarning, '')\n c = c.astype(tgt.dtype)\n assert_array_equal(c, tgt)\n\n def test_out_contiguous(self):\n a = np.ones((5, 2), dtype=float)\n b = np.array([[1, 3], [5, 7]], dtype=float)\n v = np.array([1, 3], dtype=float)\n tgt = np.dot(a, b)\n tgt_mv = np.dot(a, v)\n\n # test out non-contiguous\n out = np.ones((5, 2, 2), dtype=float)\n c = self.matmul(a, b, out=out[..., 0])\n assert c.base is out\n assert_array_equal(c, tgt)\n c = self.matmul(a, v, out=out[:, 0, 0])\n assert_array_equal(c, tgt_mv)\n c = self.matmul(v, a.T, out=out[:, 0, 0])\n assert_array_equal(c, tgt_mv)\n\n # test out contiguous in only last dim\n out = np.ones((10, 2), dtype=float)\n c = self.matmul(a, b, out=out[::2, :])\n assert_array_equal(c, tgt)\n\n # test transposes of out, args\n out = np.ones((5, 2), dtype=float)\n c = self.matmul(b.T, a.T, out=out.T)\n assert_array_equal(out, tgt)\n\n m1 = np.arange(15.).reshape(5, 3)\n m2 = np.arange(21.).reshape(3, 7)\n m3 = np.arange(30.).reshape(5, 6)[:, ::2] # non-contiguous\n vc = np.arange(10.)\n vr = np.arange(6.)\n m0 = np.zeros((3, 0))\n @pytest.mark.parametrize('args', (\n # matrix-matrix\n (m1, m2), (m2.T, m1.T), (m2.T.copy(), m1.T), (m2.T, m1.T.copy()),\n # matrix-matrix-transpose, contiguous and non\n (m1, m1.T), (m1.T, m1), (m1, m3.T), (m3, m1.T),\n (m3, m3.T), (m3.T, m3),\n # matrix-matrix non-contiguous\n (m3, m2), (m2.T, m3.T), (m2.T.copy(), m3.T),\n # vector-matrix, matrix-vector, contiguous\n (m1, vr[:3]), (vc[:5], m1), (m1.T, vc[:5]), (vr[:3], m1.T),\n # vector-matrix, matrix-vector, vector non-contiguous\n (m1, vr[::2]), (vc[::2], m1), (m1.T, vc[::2]), (vr[::2], m1.T),\n # vector-matrix, matrix-vector, matrix non-contiguous\n (m3, vr[:3]), (vc[:5], m3), (m3.T, vc[:5]), (vr[:3], m3.T),\n # vector-matrix, matrix-vector, both non-contiguous\n (m3, vr[::2]), (vc[::2], m3), (m3.T, vc[::2]), (vr[::2], m3.T),\n # size == 0\n (m0, m0.T), (m0.T, m0), (m1, m0), (m0.T, m1.T),\n ))\n def test_dot_equivalent(self, args):\n r1 = np.matmul(*args)\n r2 = np.dot(*args)\n assert_equal(r1, r2)\n\n r3 = np.matmul(args[0].copy(), args[1].copy())\n assert_equal(r1, r3)\n\n def test_matmul_object(self):\n import fractions\n\n f = np.vectorize(fractions.Fraction)\n def random_ints():\n return np.random.randint(1, 1000, size=(10, 3, 3))\n M1 = f(random_ints(), random_ints())\n M2 = f(random_ints(), random_ints())\n\n M3 = self.matmul(M1, M2)\n\n [N1, N2, N3] = [a.astype(float) for a in [M1, M2, M3]]\n\n assert_allclose(N3, self.matmul(N1, N2))\n\n def test_matmul_object_type_scalar(self):\n from fractions import Fraction as F\n v = np.array([F(2,3), F(5,7)])\n res = self.matmul(v, v)\n assert_(type(res) is F)\n\n def test_matmul_empty(self):\n a = np.empty((3, 0), dtype=object)\n b = np.empty((0, 3), dtype=object)\n c = np.zeros((3, 3))\n assert_array_equal(np.matmul(a, b), c)\n\n def test_matmul_exception_multiply(self):\n # test that matmul fails if `__mul__` is missing\n class add_not_multiply():\n def __add__(self, other):\n return self\n a = np.full((3,3), add_not_multiply())\n with assert_raises(TypeError):\n b = np.matmul(a, a)\n\n def test_matmul_exception_add(self):\n # test that matmul fails if `__add__` is missing\n class multiply_not_add():\n def __mul__(self, other):\n return self\n a = np.full((3,3), multiply_not_add())\n with assert_raises(TypeError):\n b = np.matmul(a, a)\n\n def test_matmul_bool(self):\n # gh-14439\n a = np.array([[1, 0],[1, 1]], dtype=bool)\n assert np.max(a.view(np.uint8)) == 1\n b = np.matmul(a, a)\n # matmul with boolean output should always be 0, 1\n assert np.max(b.view(np.uint8)) == 1\n\n rg = np.random.default_rng(np.random.PCG64(43))\n d = rg.integers(2, size=4*5, dtype=np.int8)\n d = d.reshape(4, 5) > 0\n out1 = np.matmul(d, d.reshape(5, 4))\n out2 = np.dot(d, d.reshape(5, 4))\n assert_equal(out1, out2)\n\n c = np.matmul(np.zeros((2, 0), dtype=bool), np.zeros(0, dtype=bool))\n assert not np.any(c)\n\n\nif sys.version_info[:2] >= (3, 5):\n class TestMatmulOperator(MatmulCommon):\n import operator\n matmul = operator.matmul\n\n def test_array_priority_override(self):\n\n class A:\n __array_priority__ = 1000\n\n def __matmul__(self, other):\n return \"A\"\n\n def __rmatmul__(self, other):\n return \"A\"\n\n a = A()\n b = np.ones(2)\n assert_equal(self.matmul(a, b), \"A\")\n assert_equal(self.matmul(b, a), \"A\")\n\n def test_matmul_raises(self):\n assert_raises(TypeError, self.matmul, np.int8(5), np.int8(5))\n assert_raises(TypeError, self.matmul, np.void(b'abc'), np.void(b'abc'))\n assert_raises(ValueError, self.matmul, np.arange(10), np.void(b'abc'))\n\n def test_matmul_inplace():\n # It would be nice to support in-place matmul eventually, but for now\n # we don't have a working implementation, so better just to error out\n # and nudge people to writing \"a = a @ b\".\n a = np.eye(3)\n b = np.eye(3)\n assert_raises(TypeError, a.__imatmul__, b)\n import operator\n assert_raises(TypeError, operator.imatmul, a, b)\n # we avoid writing the token `exec` so as not to crash python 2's\n # parser\n exec_ = getattr(builtins, \"exec\")\n assert_raises(TypeError, exec_, \"a @= b\", globals(), locals())\n\n def test_matmul_axes():\n a = np.arange(3*4*5).reshape(3, 4, 5)\n c = np.matmul(a, a, axes=[(-2, -1), (-1, -2), (1, 2)])\n assert c.shape == (3, 4, 4)\n d = np.matmul(a, a, axes=[(-2, -1), (-1, -2), (0, 1)])\n assert d.shape == (4, 4, 3)\n e = np.swapaxes(d, 0, 2)\n assert_array_equal(e, c)\n f = np.matmul(a, np.arange(3), axes=[(1, 0), (0), (0)])\n assert f.shape == (4, 5)\n\n\nclass TestInner:\n\n def test_inner_type_mismatch(self):\n c = 1.\n A = np.array((1,1), dtype='i,i')\n\n assert_raises(TypeError, np.inner, c, A)\n assert_raises(TypeError, np.inner, A, c)\n\n def test_inner_scalar_and_vector(self):\n for dt in np.typecodes['AllInteger'] + np.typecodes['AllFloat'] + '?':\n sca = np.array(3, dtype=dt)[()]\n vec = np.array([1, 2], dtype=dt)\n desired = np.array([3, 6], dtype=dt)\n assert_equal(np.inner(vec, sca), desired)\n assert_equal(np.inner(sca, vec), desired)\n\n def test_vecself(self):\n # Ticket 844.\n # Inner product of a vector with itself segfaults or give\n # meaningless result\n a = np.zeros(shape=(1, 80), dtype=np.float64)\n p = np.inner(a, a)\n assert_almost_equal(p, 0, decimal=14)\n\n def test_inner_product_with_various_contiguities(self):\n # github issue 6532\n for dt in np.typecodes['AllInteger'] + np.typecodes['AllFloat'] + '?':\n # check an inner product involving a matrix transpose\n A = np.array([[1, 2], [3, 4]], dtype=dt)\n B = np.array([[1, 3], [2, 4]], dtype=dt)\n C = np.array([1, 1], dtype=dt)\n desired = np.array([4, 6], dtype=dt)\n assert_equal(np.inner(A.T, C), desired)\n assert_equal(np.inner(C, A.T), desired)\n assert_equal(np.inner(B, C), desired)\n assert_equal(np.inner(C, B), desired)\n # check a matrix product\n desired = np.array([[7, 10], [15, 22]], dtype=dt)\n assert_equal(np.inner(A, B), desired)\n # check the syrk vs. gemm paths\n desired = np.array([[5, 11], [11, 25]], dtype=dt)\n assert_equal(np.inner(A, A), desired)\n assert_equal(np.inner(A, A.copy()), desired)\n # check an inner product involving an aliased and reversed view\n a = np.arange(5).astype(dt)\n b = a[::-1]\n desired = np.array(10, dtype=dt).item()\n assert_equal(np.inner(b, a), desired)\n\n def test_3d_tensor(self):\n for dt in np.typecodes['AllInteger'] + np.typecodes['AllFloat'] + '?':\n a = np.arange(24).reshape(2,3,4).astype(dt)\n b = np.arange(24, 48).reshape(2,3,4).astype(dt)\n desired = np.array(\n [[[[ 158, 182, 206],\n [ 230, 254, 278]],\n\n [[ 566, 654, 742],\n [ 830, 918, 1006]],\n\n [[ 974, 1126, 1278],\n [1430, 1582, 1734]]],\n\n [[[1382, 1598, 1814],\n [2030, 2246, 2462]],\n\n [[1790, 2070, 2350],\n [2630, 2910, 3190]],\n\n [[2198, 2542, 2886],\n [3230, 3574, 3918]]]],\n dtype=dt\n )\n assert_equal(np.inner(a, b), desired)\n assert_equal(np.inner(b, a).transpose(2,3,0,1), desired)\n\n\nclass TestAlen:\n def test_basic(self):\n with pytest.warns(DeprecationWarning):\n m = np.array([1, 2, 3])\n assert_equal(np.alen(m), 3)\n\n m = np.array([[1, 2, 3], [4, 5, 7]])\n assert_equal(np.alen(m), 2)\n\n m = [1, 2, 3]\n assert_equal(np.alen(m), 3)\n\n m = [[1, 2, 3], [4, 5, 7]]\n assert_equal(np.alen(m), 2)\n\n def test_singleton(self):\n with pytest.warns(DeprecationWarning):\n assert_equal(np.alen(5), 1)\n\n\nclass TestChoose:\n def setup(self):\n self.x = 2*np.ones((3,), dtype=int)\n self.y = 3*np.ones((3,), dtype=int)\n self.x2 = 2*np.ones((2, 3), dtype=int)\n self.y2 = 3*np.ones((2, 3), dtype=int)\n self.ind = [0, 0, 1]\n\n def test_basic(self):\n A = np.choose(self.ind, (self.x, self.y))\n assert_equal(A, [2, 2, 3])\n\n def test_broadcast1(self):\n A = np.choose(self.ind, (self.x2, self.y2))\n assert_equal(A, [[2, 2, 3], [2, 2, 3]])\n\n def test_broadcast2(self):\n A = np.choose(self.ind, (self.x, self.y2))\n assert_equal(A, [[2, 2, 3], [2, 2, 3]])\n\n @pytest.mark.parametrize(\"ops\",\n [(1000, np.array([1], dtype=np.uint8)),\n (-1, np.array([1], dtype=np.uint8)),\n (1., np.float32(3)),\n (1., np.array([3], dtype=np.float32))],)\n def test_output_dtype(self, ops):\n expected_dt = np.result_type(*ops)\n assert(np.choose([0], ops).dtype == expected_dt)\n\n\nclass TestRepeat:\n def setup(self):\n self.m = np.array([1, 2, 3, 4, 5, 6])\n self.m_rect = self.m.reshape((2, 3))\n\n def test_basic(self):\n A = np.repeat(self.m, [1, 3, 2, 1, 1, 2])\n assert_equal(A, [1, 2, 2, 2, 3,\n 3, 4, 5, 6, 6])\n\n def test_broadcast1(self):\n A = np.repeat(self.m, 2)\n assert_equal(A, [1, 1, 2, 2, 3, 3,\n 4, 4, 5, 5, 6, 6])\n\n def test_axis_spec(self):\n A = np.repeat(self.m_rect, [2, 1], axis=0)\n assert_equal(A, [[1, 2, 3],\n [1, 2, 3],\n [4, 5, 6]])\n\n A = np.repeat(self.m_rect, [1, 3, 2], axis=1)\n assert_equal(A, [[1, 2, 2, 2, 3, 3],\n [4, 5, 5, 5, 6, 6]])\n\n def test_broadcast2(self):\n A = np.repeat(self.m_rect, 2, axis=0)\n assert_equal(A, [[1, 2, 3],\n [1, 2, 3],\n [4, 5, 6],\n [4, 5, 6]])\n\n A = np.repeat(self.m_rect, 2, axis=1)\n assert_equal(A, [[1, 1, 2, 2, 3, 3],\n [4, 4, 5, 5, 6, 6]])\n\n\n# TODO: test for multidimensional\nNEIGH_MODE = {'zero': 0, 'one': 1, 'constant': 2, 'circular': 3, 'mirror': 4}\n\n\[email protected]('dt', [float, Decimal], ids=['float', 'object'])\nclass TestNeighborhoodIter:\n # Simple, 2d tests\n def test_simple2d(self, dt):\n # Test zero and one padding for simple data type\n x = np.array([[0, 1], [2, 3]], dtype=dt)\n r = [np.array([[0, 0, 0], [0, 0, 1]], dtype=dt),\n np.array([[0, 0, 0], [0, 1, 0]], dtype=dt),\n np.array([[0, 0, 1], [0, 2, 3]], dtype=dt),\n np.array([[0, 1, 0], [2, 3, 0]], dtype=dt)]\n l = _multiarray_tests.test_neighborhood_iterator(\n x, [-1, 0, -1, 1], x[0], NEIGH_MODE['zero'])\n assert_array_equal(l, r)\n\n r = [np.array([[1, 1, 1], [1, 0, 1]], dtype=dt),\n np.array([[1, 1, 1], [0, 1, 1]], dtype=dt),\n np.array([[1, 0, 1], [1, 2, 3]], dtype=dt),\n np.array([[0, 1, 1], [2, 3, 1]], dtype=dt)]\n l = _multiarray_tests.test_neighborhood_iterator(\n x, [-1, 0, -1, 1], x[0], NEIGH_MODE['one'])\n assert_array_equal(l, r)\n\n r = [np.array([[4, 4, 4], [4, 0, 1]], dtype=dt),\n np.array([[4, 4, 4], [0, 1, 4]], dtype=dt),\n np.array([[4, 0, 1], [4, 2, 3]], dtype=dt),\n np.array([[0, 1, 4], [2, 3, 4]], dtype=dt)]\n l = _multiarray_tests.test_neighborhood_iterator(\n x, [-1, 0, -1, 1], 4, NEIGH_MODE['constant'])\n assert_array_equal(l, r)\n\n def test_mirror2d(self, dt):\n x = np.array([[0, 1], [2, 3]], dtype=dt)\n r = [np.array([[0, 0, 1], [0, 0, 1]], dtype=dt),\n np.array([[0, 1, 1], [0, 1, 1]], dtype=dt),\n np.array([[0, 0, 1], [2, 2, 3]], dtype=dt),\n np.array([[0, 1, 1], [2, 3, 3]], dtype=dt)]\n l = _multiarray_tests.test_neighborhood_iterator(\n x, [-1, 0, -1, 1], x[0], NEIGH_MODE['mirror'])\n assert_array_equal(l, r)\n\n # Simple, 1d tests\n def test_simple(self, dt):\n # Test padding with constant values\n x = np.linspace(1, 5, 5).astype(dt)\n r = [[0, 1, 2], [1, 2, 3], [2, 3, 4], [3, 4, 5], [4, 5, 0]]\n l = _multiarray_tests.test_neighborhood_iterator(\n x, [-1, 1], x[0], NEIGH_MODE['zero'])\n assert_array_equal(l, r)\n\n r = [[1, 1, 2], [1, 2, 3], [2, 3, 4], [3, 4, 5], [4, 5, 1]]\n l = _multiarray_tests.test_neighborhood_iterator(\n x, [-1, 1], x[0], NEIGH_MODE['one'])\n assert_array_equal(l, r)\n\n r = [[x[4], 1, 2], [1, 2, 3], [2, 3, 4], [3, 4, 5], [4, 5, x[4]]]\n l = _multiarray_tests.test_neighborhood_iterator(\n x, [-1, 1], x[4], NEIGH_MODE['constant'])\n assert_array_equal(l, r)\n\n # Test mirror modes\n def test_mirror(self, dt):\n x = np.linspace(1, 5, 5).astype(dt)\n r = np.array([[2, 1, 1, 2, 3], [1, 1, 2, 3, 4], [1, 2, 3, 4, 5],\n [2, 3, 4, 5, 5], [3, 4, 5, 5, 4]], dtype=dt)\n l = _multiarray_tests.test_neighborhood_iterator(\n x, [-2, 2], x[1], NEIGH_MODE['mirror'])\n assert_([i.dtype == dt for i in l])\n assert_array_equal(l, r)\n\n # Circular mode\n def test_circular(self, dt):\n x = np.linspace(1, 5, 5).astype(dt)\n r = np.array([[4, 5, 1, 2, 3], [5, 1, 2, 3, 4], [1, 2, 3, 4, 5],\n [2, 3, 4, 5, 1], [3, 4, 5, 1, 2]], dtype=dt)\n l = _multiarray_tests.test_neighborhood_iterator(\n x, [-2, 2], x[0], NEIGH_MODE['circular'])\n assert_array_equal(l, r)\n\n\n# Test stacking neighborhood iterators\nclass TestStackedNeighborhoodIter:\n # Simple, 1d test: stacking 2 constant-padded neigh iterators\n def test_simple_const(self):\n dt = np.float64\n # Test zero and one padding for simple data type\n x = np.array([1, 2, 3], dtype=dt)\n r = [np.array([0], dtype=dt),\n np.array([0], dtype=dt),\n np.array([1], dtype=dt),\n np.array([2], dtype=dt),\n np.array([3], dtype=dt),\n np.array([0], dtype=dt),\n np.array([0], dtype=dt)]\n l = _multiarray_tests.test_neighborhood_iterator_oob(\n x, [-2, 4], NEIGH_MODE['zero'], [0, 0], NEIGH_MODE['zero'])\n assert_array_equal(l, r)\n\n r = [np.array([1, 0, 1], dtype=dt),\n np.array([0, 1, 2], dtype=dt),\n np.array([1, 2, 3], dtype=dt),\n np.array([2, 3, 0], dtype=dt),\n np.array([3, 0, 1], dtype=dt)]\n l = _multiarray_tests.test_neighborhood_iterator_oob(\n x, [-1, 3], NEIGH_MODE['zero'], [-1, 1], NEIGH_MODE['one'])\n assert_array_equal(l, r)\n\n # 2nd simple, 1d test: stacking 2 neigh iterators, mixing const padding and\n # mirror padding\n def test_simple_mirror(self):\n dt = np.float64\n # Stacking zero on top of mirror\n x = np.array([1, 2, 3], dtype=dt)\n r = [np.array([0, 1, 1], dtype=dt),\n np.array([1, 1, 2], dtype=dt),\n np.array([1, 2, 3], dtype=dt),\n np.array([2, 3, 3], dtype=dt),\n np.array([3, 3, 0], dtype=dt)]\n l = _multiarray_tests.test_neighborhood_iterator_oob(\n x, [-1, 3], NEIGH_MODE['mirror'], [-1, 1], NEIGH_MODE['zero'])\n assert_array_equal(l, r)\n\n # Stacking mirror on top of zero\n x = np.array([1, 2, 3], dtype=dt)\n r = [np.array([1, 0, 0], dtype=dt),\n np.array([0, 0, 1], dtype=dt),\n np.array([0, 1, 2], dtype=dt),\n np.array([1, 2, 3], dtype=dt),\n np.array([2, 3, 0], dtype=dt)]\n l = _multiarray_tests.test_neighborhood_iterator_oob(\n x, [-1, 3], NEIGH_MODE['zero'], [-2, 0], NEIGH_MODE['mirror'])\n assert_array_equal(l, r)\n\n # Stacking mirror on top of zero: 2nd\n x = np.array([1, 2, 3], dtype=dt)\n r = [np.array([0, 1, 2], dtype=dt),\n np.array([1, 2, 3], dtype=dt),\n np.array([2, 3, 0], dtype=dt),\n np.array([3, 0, 0], dtype=dt),\n np.array([0, 0, 3], dtype=dt)]\n l = _multiarray_tests.test_neighborhood_iterator_oob(\n x, [-1, 3], NEIGH_MODE['zero'], [0, 2], NEIGH_MODE['mirror'])\n assert_array_equal(l, r)\n\n # Stacking mirror on top of zero: 3rd\n x = np.array([1, 2, 3], dtype=dt)\n r = [np.array([1, 0, 0, 1, 2], dtype=dt),\n np.array([0, 0, 1, 2, 3], dtype=dt),\n np.array([0, 1, 2, 3, 0], dtype=dt),\n np.array([1, 2, 3, 0, 0], dtype=dt),\n np.array([2, 3, 0, 0, 3], dtype=dt)]\n l = _multiarray_tests.test_neighborhood_iterator_oob(\n x, [-1, 3], NEIGH_MODE['zero'], [-2, 2], NEIGH_MODE['mirror'])\n assert_array_equal(l, r)\n\n # 3rd simple, 1d test: stacking 2 neigh iterators, mixing const padding and\n # circular padding\n def test_simple_circular(self):\n dt = np.float64\n # Stacking zero on top of mirror\n x = np.array([1, 2, 3], dtype=dt)\n r = [np.array([0, 3, 1], dtype=dt),\n np.array([3, 1, 2], dtype=dt),\n np.array([1, 2, 3], dtype=dt),\n np.array([2, 3, 1], dtype=dt),\n np.array([3, 1, 0], dtype=dt)]\n l = _multiarray_tests.test_neighborhood_iterator_oob(\n x, [-1, 3], NEIGH_MODE['circular'], [-1, 1], NEIGH_MODE['zero'])\n assert_array_equal(l, r)\n\n # Stacking mirror on top of zero\n x = np.array([1, 2, 3], dtype=dt)\n r = [np.array([3, 0, 0], dtype=dt),\n np.array([0, 0, 1], dtype=dt),\n np.array([0, 1, 2], dtype=dt),\n np.array([1, 2, 3], dtype=dt),\n np.array([2, 3, 0], dtype=dt)]\n l = _multiarray_tests.test_neighborhood_iterator_oob(\n x, [-1, 3], NEIGH_MODE['zero'], [-2, 0], NEIGH_MODE['circular'])\n assert_array_equal(l, r)\n\n # Stacking mirror on top of zero: 2nd\n x = np.array([1, 2, 3], dtype=dt)\n r = [np.array([0, 1, 2], dtype=dt),\n np.array([1, 2, 3], dtype=dt),\n np.array([2, 3, 0], dtype=dt),\n np.array([3, 0, 0], dtype=dt),\n np.array([0, 0, 1], dtype=dt)]\n l = _multiarray_tests.test_neighborhood_iterator_oob(\n x, [-1, 3], NEIGH_MODE['zero'], [0, 2], NEIGH_MODE['circular'])\n assert_array_equal(l, r)\n\n # Stacking mirror on top of zero: 3rd\n x = np.array([1, 2, 3], dtype=dt)\n r = [np.array([3, 0, 0, 1, 2], dtype=dt),\n np.array([0, 0, 1, 2, 3], dtype=dt),\n np.array([0, 1, 2, 3, 0], dtype=dt),\n np.array([1, 2, 3, 0, 0], dtype=dt),\n np.array([2, 3, 0, 0, 1], dtype=dt)]\n l = _multiarray_tests.test_neighborhood_iterator_oob(\n x, [-1, 3], NEIGH_MODE['zero'], [-2, 2], NEIGH_MODE['circular'])\n assert_array_equal(l, r)\n\n # 4th simple, 1d test: stacking 2 neigh iterators, but with lower iterator\n # being strictly within the array\n def test_simple_strict_within(self):\n dt = np.float64\n # Stacking zero on top of zero, first neighborhood strictly inside the\n # array\n x = np.array([1, 2, 3], dtype=dt)\n r = [np.array([1, 2, 3, 0], dtype=dt)]\n l = _multiarray_tests.test_neighborhood_iterator_oob(\n x, [1, 1], NEIGH_MODE['zero'], [-1, 2], NEIGH_MODE['zero'])\n assert_array_equal(l, r)\n\n # Stacking mirror on top of zero, first neighborhood strictly inside the\n # array\n x = np.array([1, 2, 3], dtype=dt)\n r = [np.array([1, 2, 3, 3], dtype=dt)]\n l = _multiarray_tests.test_neighborhood_iterator_oob(\n x, [1, 1], NEIGH_MODE['zero'], [-1, 2], NEIGH_MODE['mirror'])\n assert_array_equal(l, r)\n\n # Stacking mirror on top of zero, first neighborhood strictly inside the\n # array\n x = np.array([1, 2, 3], dtype=dt)\n r = [np.array([1, 2, 3, 1], dtype=dt)]\n l = _multiarray_tests.test_neighborhood_iterator_oob(\n x, [1, 1], NEIGH_MODE['zero'], [-1, 2], NEIGH_MODE['circular'])\n assert_array_equal(l, r)\n\nclass TestWarnings:\n\n def test_complex_warning(self):\n x = np.array([1, 2])\n y = np.array([1-2j, 1+2j])\n\n with warnings.catch_warnings():\n warnings.simplefilter(\"error\", np.ComplexWarning)\n assert_raises(np.ComplexWarning, x.__setitem__, slice(None), y)\n assert_equal(x, [1, 2])\n\n\nclass TestMinScalarType:\n\n def test_usigned_shortshort(self):\n dt = np.min_scalar_type(2**8-1)\n wanted = np.dtype('uint8')\n assert_equal(wanted, dt)\n\n def test_usigned_short(self):\n dt = np.min_scalar_type(2**16-1)\n wanted = np.dtype('uint16')\n assert_equal(wanted, dt)\n\n def test_usigned_int(self):\n dt = np.min_scalar_type(2**32-1)\n wanted = np.dtype('uint32')\n assert_equal(wanted, dt)\n\n def test_usigned_longlong(self):\n dt = np.min_scalar_type(2**63-1)\n wanted = np.dtype('uint64')\n assert_equal(wanted, dt)\n\n def test_object(self):\n dt = np.min_scalar_type(2**64)\n wanted = np.dtype('O')\n assert_equal(wanted, dt)\n\n\nfrom numpy.core._internal import _dtype_from_pep3118\n\n\nclass TestPEP3118Dtype:\n def _check(self, spec, wanted):\n dt = np.dtype(wanted)\n actual = _dtype_from_pep3118(spec)\n assert_equal(actual, dt,\n err_msg=\"spec %r != dtype %r\" % (spec, wanted))\n\n def test_native_padding(self):\n align = np.dtype('i').alignment\n for j in range(8):\n if j == 0:\n s = 'bi'\n else:\n s = 'b%dxi' % j\n self._check('@'+s, {'f0': ('i1', 0),\n 'f1': ('i', align*(1 + j//align))})\n self._check('='+s, {'f0': ('i1', 0),\n 'f1': ('i', 1+j)})\n\n def test_native_padding_2(self):\n # Native padding should work also for structs and sub-arrays\n self._check('x3T{xi}', {'f0': (({'f0': ('i', 4)}, (3,)), 4)})\n self._check('^x3T{xi}', {'f0': (({'f0': ('i', 1)}, (3,)), 1)})\n\n def test_trailing_padding(self):\n # Trailing padding should be included, *and*, the item size\n # should match the alignment if in aligned mode\n align = np.dtype('i').alignment\n size = np.dtype('i').itemsize\n\n def aligned(n):\n return align*(1 + (n-1)//align)\n\n base = dict(formats=['i'], names=['f0'])\n\n self._check('ix', dict(itemsize=aligned(size + 1), **base))\n self._check('ixx', dict(itemsize=aligned(size + 2), **base))\n self._check('ixxx', dict(itemsize=aligned(size + 3), **base))\n self._check('ixxxx', dict(itemsize=aligned(size + 4), **base))\n self._check('i7x', dict(itemsize=aligned(size + 7), **base))\n\n self._check('^ix', dict(itemsize=size + 1, **base))\n self._check('^ixx', dict(itemsize=size + 2, **base))\n self._check('^ixxx', dict(itemsize=size + 3, **base))\n self._check('^ixxxx', dict(itemsize=size + 4, **base))\n self._check('^i7x', dict(itemsize=size + 7, **base))\n\n def test_native_padding_3(self):\n dt = np.dtype(\n [('a', 'b'), ('b', 'i'),\n ('sub', np.dtype('b,i')), ('c', 'i')],\n align=True)\n self._check(\"T{b:a:xxxi:b:T{b:f0:=i:f1:}:sub:xxxi:c:}\", dt)\n\n dt = np.dtype(\n [('a', 'b'), ('b', 'i'), ('c', 'b'), ('d', 'b'),\n ('e', 'b'), ('sub', np.dtype('b,i', align=True))])\n self._check(\"T{b:a:=i:b:b:c:b:d:b:e:T{b:f0:xxxi:f1:}:sub:}\", dt)\n\n def test_padding_with_array_inside_struct(self):\n dt = np.dtype(\n [('a', 'b'), ('b', 'i'), ('c', 'b', (3,)),\n ('d', 'i')],\n align=True)\n self._check(\"T{b:a:xxxi:b:3b:c:xi:d:}\", dt)\n\n def test_byteorder_inside_struct(self):\n # The byte order after @T{=i} should be '=', not '@'.\n # Check this by noting the absence of native alignment.\n self._check('@T{^i}xi', {'f0': ({'f0': ('i', 0)}, 0),\n 'f1': ('i', 5)})\n\n def test_intra_padding(self):\n # Natively aligned sub-arrays may require some internal padding\n align = np.dtype('i').alignment\n size = np.dtype('i').itemsize\n\n def aligned(n):\n return (align*(1 + (n-1)//align))\n\n self._check('(3)T{ix}', (dict(\n names=['f0'],\n formats=['i'],\n offsets=[0],\n itemsize=aligned(size + 1)\n ), (3,)))\n\n def test_char_vs_string(self):\n dt = np.dtype('c')\n self._check('c', dt)\n\n dt = np.dtype([('f0', 'S1', (4,)), ('f1', 'S4')])\n self._check('4c4s', dt)\n\n def test_field_order(self):\n # gh-9053 - previously, we relied on dictionary key order\n self._check(\"(0)I:a:f:b:\", [('a', 'I', (0,)), ('b', 'f')])\n self._check(\"(0)I:b:f:a:\", [('b', 'I', (0,)), ('a', 'f')])\n\n def test_unnamed_fields(self):\n self._check('ii', [('f0', 'i'), ('f1', 'i')])\n self._check('ii:f0:', [('f1', 'i'), ('f0', 'i')])\n\n self._check('i', 'i')\n self._check('i:f0:', [('f0', 'i')])\n\n\nclass TestNewBufferProtocol:\n \"\"\" Test PEP3118 buffers \"\"\"\n\n def _check_roundtrip(self, obj):\n obj = np.asarray(obj)\n x = memoryview(obj)\n y = np.asarray(x)\n y2 = np.array(x)\n assert_(not y.flags.owndata)\n assert_(y2.flags.owndata)\n\n assert_equal(y.dtype, obj.dtype)\n assert_equal(y.shape, obj.shape)\n assert_array_equal(obj, y)\n\n assert_equal(y2.dtype, obj.dtype)\n assert_equal(y2.shape, obj.shape)\n assert_array_equal(obj, y2)\n\n def test_roundtrip(self):\n x = np.array([1, 2, 3, 4, 5], dtype='i4')\n self._check_roundtrip(x)\n\n x = np.array([[1, 2], [3, 4]], dtype=np.float64)\n self._check_roundtrip(x)\n\n x = np.zeros((3, 3, 3), dtype=np.float32)[:, 0,:]\n self._check_roundtrip(x)\n\n dt = [('a', 'b'),\n ('b', 'h'),\n ('c', 'i'),\n ('d', 'l'),\n ('dx', 'q'),\n ('e', 'B'),\n ('f', 'H'),\n ('g', 'I'),\n ('h', 'L'),\n ('hx', 'Q'),\n ('i', np.single),\n ('j', np.double),\n ('k', np.longdouble),\n ('ix', np.csingle),\n ('jx', np.cdouble),\n ('kx', np.clongdouble),\n ('l', 'S4'),\n ('m', 'U4'),\n ('n', 'V3'),\n ('o', '?'),\n ('p', np.half),\n ]\n x = np.array(\n [(1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,\n b'aaaa', 'bbbb', b'xxx', True, 1.0)],\n dtype=dt)\n self._check_roundtrip(x)\n\n x = np.array(([[1, 2], [3, 4]],), dtype=[('a', (int, (2, 2)))])\n self._check_roundtrip(x)\n\n x = np.array([1, 2, 3], dtype='>i2')\n self._check_roundtrip(x)\n\n x = np.array([1, 2, 3], dtype='<i2')\n self._check_roundtrip(x)\n\n x = np.array([1, 2, 3], dtype='>i4')\n self._check_roundtrip(x)\n\n x = np.array([1, 2, 3], dtype='<i4')\n self._check_roundtrip(x)\n\n # check long long can be represented as non-native\n x = np.array([1, 2, 3], dtype='>q')\n self._check_roundtrip(x)\n\n # Native-only data types can be passed through the buffer interface\n # only in native byte order\n if sys.byteorder == 'little':\n x = np.array([1, 2, 3], dtype='>g')\n assert_raises(ValueError, self._check_roundtrip, x)\n x = np.array([1, 2, 3], dtype='<g')\n self._check_roundtrip(x)\n else:\n x = np.array([1, 2, 3], dtype='>g')\n self._check_roundtrip(x)\n x = np.array([1, 2, 3], dtype='<g')\n assert_raises(ValueError, self._check_roundtrip, x)\n\n def test_roundtrip_half(self):\n half_list = [\n 1.0,\n -2.0,\n 6.5504 * 10**4, # (max half precision)\n 2**-14, # ~= 6.10352 * 10**-5 (minimum positive normal)\n 2**-24, # ~= 5.96046 * 10**-8 (minimum strictly positive subnormal)\n 0.0,\n -0.0,\n float('+inf'),\n float('-inf'),\n 0.333251953125, # ~= 1/3\n ]\n\n x = np.array(half_list, dtype='>e')\n self._check_roundtrip(x)\n x = np.array(half_list, dtype='<e')\n self._check_roundtrip(x)\n\n def test_roundtrip_single_types(self):\n for typ in np.typeDict.values():\n dtype = np.dtype(typ)\n\n if dtype.char in 'Mm':\n # datetimes cannot be used in buffers\n continue\n if dtype.char == 'V':\n # skip void\n continue\n\n x = np.zeros(4, dtype=dtype)\n self._check_roundtrip(x)\n\n if dtype.char not in 'qQgG':\n dt = dtype.newbyteorder('<')\n x = np.zeros(4, dtype=dt)\n self._check_roundtrip(x)\n\n dt = dtype.newbyteorder('>')\n x = np.zeros(4, dtype=dt)\n self._check_roundtrip(x)\n\n def test_roundtrip_scalar(self):\n # Issue #4015.\n self._check_roundtrip(0)\n\n def test_invalid_buffer_format(self):\n # datetime64 cannot be used fully in a buffer yet\n # Should be fixed in the next Numpy major release\n dt = np.dtype([('a', 'uint16'), ('b', 'M8[s]')])\n a = np.empty(3, dt)\n assert_raises((ValueError, BufferError), memoryview, a)\n assert_raises((ValueError, BufferError), memoryview, np.array((3), 'M8[D]'))\n\n def test_export_simple_1d(self):\n x = np.array([1, 2, 3, 4, 5], dtype='i')\n y = memoryview(x)\n assert_equal(y.format, 'i')\n assert_equal(y.shape, (5,))\n assert_equal(y.ndim, 1)\n assert_equal(y.strides, (4,))\n assert_equal(y.suboffsets, EMPTY)\n assert_equal(y.itemsize, 4)\n\n def test_export_simple_nd(self):\n x = np.array([[1, 2], [3, 4]], dtype=np.float64)\n y = memoryview(x)\n assert_equal(y.format, 'd')\n assert_equal(y.shape, (2, 2))\n assert_equal(y.ndim, 2)\n assert_equal(y.strides, (16, 8))\n assert_equal(y.suboffsets, EMPTY)\n assert_equal(y.itemsize, 8)\n\n def test_export_discontiguous(self):\n x = np.zeros((3, 3, 3), dtype=np.float32)[:, 0,:]\n y = memoryview(x)\n assert_equal(y.format, 'f')\n assert_equal(y.shape, (3, 3))\n assert_equal(y.ndim, 2)\n assert_equal(y.strides, (36, 4))\n assert_equal(y.suboffsets, EMPTY)\n assert_equal(y.itemsize, 4)\n\n def test_export_record(self):\n dt = [('a', 'b'),\n ('b', 'h'),\n ('c', 'i'),\n ('d', 'l'),\n ('dx', 'q'),\n ('e', 'B'),\n ('f', 'H'),\n ('g', 'I'),\n ('h', 'L'),\n ('hx', 'Q'),\n ('i', np.single),\n ('j', np.double),\n ('k', np.longdouble),\n ('ix', np.csingle),\n ('jx', np.cdouble),\n ('kx', np.clongdouble),\n ('l', 'S4'),\n ('m', 'U4'),\n ('n', 'V3'),\n ('o', '?'),\n ('p', np.half),\n ]\n x = np.array(\n [(1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,\n b'aaaa', 'bbbb', b' ', True, 1.0)],\n dtype=dt)\n y = memoryview(x)\n assert_equal(y.shape, (1,))\n assert_equal(y.ndim, 1)\n assert_equal(y.suboffsets, EMPTY)\n\n sz = sum([np.dtype(b).itemsize for a, b in dt])\n if np.dtype('l').itemsize == 4:\n assert_equal(y.format, 'T{b:a:=h:b:i:c:l:d:q:dx:B:e:@H:f:=I:g:L:h:Q:hx:f:i:d:j:^g:k:=Zf:ix:Zd:jx:^Zg:kx:4s:l:=4w:m:3x:n:?:o:@e:p:}')\n else:\n assert_equal(y.format, 'T{b:a:=h:b:i:c:q:d:q:dx:B:e:@H:f:=I:g:Q:h:Q:hx:f:i:d:j:^g:k:=Zf:ix:Zd:jx:^Zg:kx:4s:l:=4w:m:3x:n:?:o:@e:p:}')\n # Cannot test if NPY_RELAXED_STRIDES_CHECKING changes the strides\n if not (np.ones(1).strides[0] == np.iinfo(np.intp).max):\n assert_equal(y.strides, (sz,))\n assert_equal(y.itemsize, sz)\n\n def test_export_subarray(self):\n x = np.array(([[1, 2], [3, 4]],), dtype=[('a', ('i', (2, 2)))])\n y = memoryview(x)\n assert_equal(y.format, 'T{(2,2)i:a:}')\n assert_equal(y.shape, EMPTY)\n assert_equal(y.ndim, 0)\n assert_equal(y.strides, EMPTY)\n assert_equal(y.suboffsets, EMPTY)\n assert_equal(y.itemsize, 16)\n\n def test_export_endian(self):\n x = np.array([1, 2, 3], dtype='>i')\n y = memoryview(x)\n if sys.byteorder == 'little':\n assert_equal(y.format, '>i')\n else:\n assert_equal(y.format, 'i')\n\n x = np.array([1, 2, 3], dtype='<i')\n y = memoryview(x)\n if sys.byteorder == 'little':\n assert_equal(y.format, 'i')\n else:\n assert_equal(y.format, '<i')\n\n def test_export_flags(self):\n # Check SIMPLE flag, see also gh-3613 (exception should be BufferError)\n assert_raises(ValueError,\n _multiarray_tests.get_buffer_info,\n np.arange(5)[::2], ('SIMPLE',))\n\n def test_padding(self):\n for j in range(8):\n x = np.array([(1,), (2,)], dtype={'f0': (int, j)})\n self._check_roundtrip(x)\n\n def test_reference_leak(self):\n if HAS_REFCOUNT:\n count_1 = sys.getrefcount(np.core._internal)\n a = np.zeros(4)\n b = memoryview(a)\n c = np.asarray(b)\n if HAS_REFCOUNT:\n count_2 = sys.getrefcount(np.core._internal)\n assert_equal(count_1, count_2)\n del c # avoid pyflakes unused variable warning.\n\n def test_padded_struct_array(self):\n dt1 = np.dtype(\n [('a', 'b'), ('b', 'i'), ('sub', np.dtype('b,i')), ('c', 'i')],\n align=True)\n x1 = np.arange(dt1.itemsize, dtype=np.int8).view(dt1)\n self._check_roundtrip(x1)\n\n dt2 = np.dtype(\n [('a', 'b'), ('b', 'i'), ('c', 'b', (3,)), ('d', 'i')],\n align=True)\n x2 = np.arange(dt2.itemsize, dtype=np.int8).view(dt2)\n self._check_roundtrip(x2)\n\n dt3 = np.dtype(\n [('a', 'b'), ('b', 'i'), ('c', 'b'), ('d', 'b'),\n ('e', 'b'), ('sub', np.dtype('b,i', align=True))])\n x3 = np.arange(dt3.itemsize, dtype=np.int8).view(dt3)\n self._check_roundtrip(x3)\n\n def test_relaxed_strides(self):\n # Test that relaxed strides are converted to non-relaxed\n c = np.ones((1, 10, 10), dtype='i8')\n\n # Check for NPY_RELAXED_STRIDES_CHECKING:\n if np.ones((10, 1), order=\"C\").flags.f_contiguous:\n c.strides = (-1, 80, 8)\n\n assert_(memoryview(c).strides == (800, 80, 8))\n\n # Writing C-contiguous data to a BytesIO buffer should work\n fd = io.BytesIO()\n fd.write(c.data)\n\n fortran = c.T\n assert_(memoryview(fortran).strides == (8, 80, 800))\n\n arr = np.ones((1, 10))\n if arr.flags.f_contiguous:\n shape, strides = _multiarray_tests.get_buffer_info(\n arr, ['F_CONTIGUOUS'])\n assert_(strides[0] == 8)\n arr = np.ones((10, 1), order='F')\n shape, strides = _multiarray_tests.get_buffer_info(\n arr, ['C_CONTIGUOUS'])\n assert_(strides[-1] == 8)\n\n def test_out_of_order_fields(self):\n dt = np.dtype(dict(\n formats=['<i4', '<i4'],\n names=['one', 'two'],\n offsets=[4, 0],\n itemsize=8\n ))\n\n # overlapping fields cannot be represented by PEP3118\n arr = np.empty(1, dt)\n with assert_raises(ValueError):\n memoryview(arr)\n\n def test_max_dims(self):\n a = np.empty((1,) * 32)\n self._check_roundtrip(a)\n\n @pytest.mark.skipif(sys.version_info < (2, 7, 7), reason=\"See gh-11115\")\n def test_error_too_many_dims(self):\n def make_ctype(shape, scalar_type):\n t = scalar_type\n for dim in shape[::-1]:\n t = dim * t\n return t\n\n # construct a memoryview with 33 dimensions\n c_u8_33d = make_ctype((1,)*33, ctypes.c_uint8)\n m = memoryview(c_u8_33d())\n assert_equal(m.ndim, 33)\n\n assert_raises_regex(\n RuntimeError, \"ndim\",\n np.array, m)\n\n # The above seems to create some deep cycles, clean them up for\n # easier reference count debugging:\n del c_u8_33d, m\n for i in range(33):\n if gc.collect() == 0:\n break\n\n def test_error_pointer_type(self):\n # gh-6741\n m = memoryview(ctypes.pointer(ctypes.c_uint8()))\n assert_('&' in m.format)\n\n assert_raises_regex(\n ValueError, \"format string\",\n np.array, m)\n\n def test_error_message_unsupported(self):\n # wchar has no corresponding numpy type - if this changes in future, we\n # need a better way to construct an invalid memoryview format.\n t = ctypes.c_wchar * 4\n with assert_raises(ValueError) as cm:\n np.array(t())\n\n exc = cm.exception\n if sys.version_info.major > 2:\n with assert_raises_regex(\n NotImplementedError,\n r\"Unrepresentable .* 'u' \\(UCS-2 strings\\)\"\n ):\n raise exc.__cause__\n\n def test_ctypes_integer_via_memoryview(self):\n # gh-11150, due to bpo-10746\n for c_integer in {ctypes.c_int, ctypes.c_long, ctypes.c_longlong}:\n value = c_integer(42)\n with warnings.catch_warnings(record=True):\n warnings.filterwarnings('always', r'.*\\bctypes\\b', RuntimeWarning)\n np.asarray(value)\n\n def test_ctypes_struct_via_memoryview(self):\n # gh-10528\n class foo(ctypes.Structure):\n _fields_ = [('a', ctypes.c_uint8), ('b', ctypes.c_uint32)]\n f = foo(a=1, b=2)\n\n with warnings.catch_warnings(record=True):\n warnings.filterwarnings('always', r'.*\\bctypes\\b', RuntimeWarning)\n arr = np.asarray(f)\n\n assert_equal(arr['a'], 1)\n assert_equal(arr['b'], 2)\n f.a = 3\n assert_equal(arr['a'], 3)\n\n\nclass TestArrayAttributeDeletion:\n\n def test_multiarray_writable_attributes_deletion(self):\n # ticket #2046, should not seqfault, raise AttributeError\n a = np.ones(2)\n attr = ['shape', 'strides', 'data', 'dtype', 'real', 'imag', 'flat']\n with suppress_warnings() as sup:\n sup.filter(DeprecationWarning, \"Assigning the 'data' attribute\")\n for s in attr:\n assert_raises(AttributeError, delattr, a, s)\n\n def test_multiarray_not_writable_attributes_deletion(self):\n a = np.ones(2)\n attr = [\"ndim\", \"flags\", \"itemsize\", \"size\", \"nbytes\", \"base\",\n \"ctypes\", \"T\", \"__array_interface__\", \"__array_struct__\",\n \"__array_priority__\", \"__array_finalize__\"]\n for s in attr:\n assert_raises(AttributeError, delattr, a, s)\n\n def test_multiarray_flags_writable_attribute_deletion(self):\n a = np.ones(2).flags\n attr = ['writebackifcopy', 'updateifcopy', 'aligned', 'writeable']\n for s in attr:\n assert_raises(AttributeError, delattr, a, s)\n\n def test_multiarray_flags_not_writable_attribute_deletion(self):\n a = np.ones(2).flags\n attr = [\"contiguous\", \"c_contiguous\", \"f_contiguous\", \"fortran\",\n \"owndata\", \"fnc\", \"forc\", \"behaved\", \"carray\", \"farray\",\n \"num\"]\n for s in attr:\n assert_raises(AttributeError, delattr, a, s)\n\n\nclass TestArrayInterface():\n class Foo:\n def __init__(self, value):\n self.value = value\n self.iface = {'typestr': 'f8'}\n\n def __float__(self):\n return float(self.value)\n\n @property\n def __array_interface__(self):\n return self.iface\n\n\n f = Foo(0.5)\n\n @pytest.mark.parametrize('val, iface, expected', [\n (f, {}, 0.5),\n ([f], {}, [0.5]),\n ([f, f], {}, [0.5, 0.5]),\n (f, {'shape': ()}, 0.5),\n (f, {'shape': None}, TypeError),\n (f, {'shape': (1, 1)}, [[0.5]]),\n (f, {'shape': (2,)}, ValueError),\n (f, {'strides': ()}, 0.5),\n (f, {'strides': (2,)}, ValueError),\n (f, {'strides': 16}, TypeError),\n ])\n def test_scalar_interface(self, val, iface, expected):\n # Test scalar coercion within the array interface\n self.f.iface = {'typestr': 'f8'}\n self.f.iface.update(iface)\n if HAS_REFCOUNT:\n pre_cnt = sys.getrefcount(np.dtype('f8'))\n if isinstance(expected, type):\n assert_raises(expected, np.array, val)\n else:\n result = np.array(val)\n assert_equal(np.array(val), expected)\n assert result.dtype == 'f8'\n del result\n if HAS_REFCOUNT:\n post_cnt = sys.getrefcount(np.dtype('f8'))\n assert_equal(pre_cnt, post_cnt)\n\ndef test_interface_no_shape():\n class ArrayLike:\n array = np.array(1)\n __array_interface__ = array.__array_interface__\n assert_equal(np.array(ArrayLike()), 1)\n\n\ndef test_array_interface_itemsize():\n # See gh-6361\n my_dtype = np.dtype({'names': ['A', 'B'], 'formats': ['f4', 'f4'],\n 'offsets': [0, 8], 'itemsize': 16})\n a = np.ones(10, dtype=my_dtype)\n descr_t = np.dtype(a.__array_interface__['descr'])\n typestr_t = np.dtype(a.__array_interface__['typestr'])\n assert_equal(descr_t.itemsize, typestr_t.itemsize)\n\n\ndef test_array_interface_empty_shape():\n # See gh-7994\n arr = np.array([1, 2, 3])\n interface1 = dict(arr.__array_interface__)\n interface1['shape'] = ()\n\n class DummyArray1:\n __array_interface__ = interface1\n\n # NOTE: Because Py2 str/Py3 bytes supports the buffer interface, setting\n # the interface data to bytes would invoke the bug this tests for, that\n # __array_interface__ with shape=() is not allowed if the data is an object\n # exposing the buffer interface\n interface2 = dict(interface1)\n interface2['data'] = arr[0].tobytes()\n\n class DummyArray2:\n __array_interface__ = interface2\n\n arr1 = np.asarray(DummyArray1())\n arr2 = np.asarray(DummyArray2())\n arr3 = arr[:1].reshape(())\n assert_equal(arr1, arr2)\n assert_equal(arr1, arr3)\n\ndef test_array_interface_offset():\n arr = np.array([1, 2, 3], dtype='int32')\n interface = dict(arr.__array_interface__)\n interface['data'] = memoryview(arr)\n interface['shape'] = (2,)\n interface['offset'] = 4\n\n\n class DummyArray:\n __array_interface__ = interface\n\n arr1 = np.asarray(DummyArray())\n assert_equal(arr1, arr[1:])\n\ndef test_flat_element_deletion():\n it = np.ones(3).flat\n try:\n del it[1]\n del it[1:2]\n except TypeError:\n pass\n except Exception:\n raise AssertionError\n\n\ndef test_scalar_element_deletion():\n a = np.zeros(2, dtype=[('x', 'int'), ('y', 'int')])\n assert_raises(ValueError, a[0].__delitem__, 'x')\n\n\nclass TestMemEventHook:\n def test_mem_seteventhook(self):\n # The actual tests are within the C code in\n # multiarray/_multiarray_tests.c.src\n _multiarray_tests.test_pydatamem_seteventhook_start()\n # force an allocation and free of a numpy array\n # needs to be larger then limit of small memory cacher in ctors.c\n a = np.zeros(1000)\n del a\n break_cycles()\n _multiarray_tests.test_pydatamem_seteventhook_end()\n\nclass TestMapIter:\n def test_mapiter(self):\n # The actual tests are within the C code in\n # multiarray/_multiarray_tests.c.src\n\n a = np.arange(12).reshape((3, 4)).astype(float)\n index = ([1, 1, 2, 0],\n [0, 0, 2, 3])\n vals = [50, 50, 30, 16]\n\n _multiarray_tests.test_inplace_increment(a, index, vals)\n assert_equal(a, [[0.00, 1., 2.0, 19.],\n [104., 5., 6.0, 7.0],\n [8.00, 9., 40., 11.]])\n\n b = np.arange(6).astype(float)\n index = (np.array([1, 2, 0]),)\n vals = [50, 4, 100.1]\n _multiarray_tests.test_inplace_increment(b, index, vals)\n assert_equal(b, [100.1, 51., 6., 3., 4., 5.])\n\n\nclass TestAsCArray:\n def test_1darray(self):\n array = np.arange(24, dtype=np.double)\n from_c = _multiarray_tests.test_as_c_array(array, 3)\n assert_equal(array[3], from_c)\n\n def test_2darray(self):\n array = np.arange(24, dtype=np.double).reshape(3, 8)\n from_c = _multiarray_tests.test_as_c_array(array, 2, 4)\n assert_equal(array[2, 4], from_c)\n\n def test_3darray(self):\n array = np.arange(24, dtype=np.double).reshape(2, 3, 4)\n from_c = _multiarray_tests.test_as_c_array(array, 1, 2, 3)\n assert_equal(array[1, 2, 3], from_c)\n\n\nclass TestConversion:\n def test_array_scalar_relational_operation(self):\n # All integer\n for dt1 in np.typecodes['AllInteger']:\n assert_(1 > np.array(0, dtype=dt1), \"type %s failed\" % (dt1,))\n assert_(not 1 < np.array(0, dtype=dt1), \"type %s failed\" % (dt1,))\n\n for dt2 in np.typecodes['AllInteger']:\n assert_(np.array(1, dtype=dt1) > np.array(0, dtype=dt2),\n \"type %s and %s failed\" % (dt1, dt2))\n assert_(not np.array(1, dtype=dt1) < np.array(0, dtype=dt2),\n \"type %s and %s failed\" % (dt1, dt2))\n\n # Unsigned integers\n for dt1 in 'BHILQP':\n assert_(-1 < np.array(1, dtype=dt1), \"type %s failed\" % (dt1,))\n assert_(not -1 > np.array(1, dtype=dt1), \"type %s failed\" % (dt1,))\n assert_(-1 != np.array(1, dtype=dt1), \"type %s failed\" % (dt1,))\n\n # Unsigned vs signed\n for dt2 in 'bhilqp':\n assert_(np.array(1, dtype=dt1) > np.array(-1, dtype=dt2),\n \"type %s and %s failed\" % (dt1, dt2))\n assert_(not np.array(1, dtype=dt1) < np.array(-1, dtype=dt2),\n \"type %s and %s failed\" % (dt1, dt2))\n assert_(np.array(1, dtype=dt1) != np.array(-1, dtype=dt2),\n \"type %s and %s failed\" % (dt1, dt2))\n\n # Signed integers and floats\n for dt1 in 'bhlqp' + np.typecodes['Float']:\n assert_(1 > np.array(-1, dtype=dt1), \"type %s failed\" % (dt1,))\n assert_(not 1 < np.array(-1, dtype=dt1), \"type %s failed\" % (dt1,))\n assert_(-1 == np.array(-1, dtype=dt1), \"type %s failed\" % (dt1,))\n\n for dt2 in 'bhlqp' + np.typecodes['Float']:\n assert_(np.array(1, dtype=dt1) > np.array(-1, dtype=dt2),\n \"type %s and %s failed\" % (dt1, dt2))\n assert_(not np.array(1, dtype=dt1) < np.array(-1, dtype=dt2),\n \"type %s and %s failed\" % (dt1, dt2))\n assert_(np.array(-1, dtype=dt1) == np.array(-1, dtype=dt2),\n \"type %s and %s failed\" % (dt1, dt2))\n\n def test_to_bool_scalar(self):\n assert_equal(bool(np.array([False])), False)\n assert_equal(bool(np.array([True])), True)\n assert_equal(bool(np.array([[42]])), True)\n assert_raises(ValueError, bool, np.array([1, 2]))\n\n class NotConvertible:\n def __bool__(self):\n raise NotImplementedError\n __nonzero__ = __bool__ # python 2\n\n assert_raises(NotImplementedError, bool, np.array(NotConvertible()))\n assert_raises(NotImplementedError, bool, np.array([NotConvertible()]))\n\n self_containing = np.array([None])\n self_containing[0] = self_containing\n try:\n Error = RecursionError\n except NameError:\n Error = RuntimeError # python < 3.5\n assert_raises(Error, bool, self_containing) # previously stack overflow\n self_containing[0] = None # resolve circular reference\n\n def test_to_int_scalar(self):\n # gh-9972 means that these aren't always the same\n int_funcs = (int, lambda x: x.__int__())\n for int_func in int_funcs:\n assert_equal(int_func(np.array(0)), 0)\n assert_equal(int_func(np.array([1])), 1)\n assert_equal(int_func(np.array([[42]])), 42)\n assert_raises(TypeError, int_func, np.array([1, 2]))\n\n # gh-9972\n assert_equal(4, int_func(np.array('4')))\n assert_equal(5, int_func(np.bytes_(b'5')))\n assert_equal(6, int_func(np.unicode_(u'6')))\n\n class HasTrunc:\n def __trunc__(self):\n return 3\n assert_equal(3, int_func(np.array(HasTrunc())))\n assert_equal(3, int_func(np.array([HasTrunc()])))\n\n class NotConvertible:\n def __int__(self):\n raise NotImplementedError\n assert_raises(NotImplementedError,\n int_func, np.array(NotConvertible()))\n assert_raises(NotImplementedError,\n int_func, np.array([NotConvertible()]))\n\n\nclass TestWhere:\n def test_basic(self):\n dts = [bool, np.int16, np.int32, np.int64, np.double, np.complex128,\n np.longdouble, np.clongdouble]\n for dt in dts:\n c = np.ones(53, dtype=bool)\n assert_equal(np.where( c, dt(0), dt(1)), dt(0))\n assert_equal(np.where(~c, dt(0), dt(1)), dt(1))\n assert_equal(np.where(True, dt(0), dt(1)), dt(0))\n assert_equal(np.where(False, dt(0), dt(1)), dt(1))\n d = np.ones_like(c).astype(dt)\n e = np.zeros_like(d)\n r = d.astype(dt)\n c[7] = False\n r[7] = e[7]\n assert_equal(np.where(c, e, e), e)\n assert_equal(np.where(c, d, e), r)\n assert_equal(np.where(c, d, e[0]), r)\n assert_equal(np.where(c, d[0], e), r)\n assert_equal(np.where(c[::2], d[::2], e[::2]), r[::2])\n assert_equal(np.where(c[1::2], d[1::2], e[1::2]), r[1::2])\n assert_equal(np.where(c[::3], d[::3], e[::3]), r[::3])\n assert_equal(np.where(c[1::3], d[1::3], e[1::3]), r[1::3])\n assert_equal(np.where(c[::-2], d[::-2], e[::-2]), r[::-2])\n assert_equal(np.where(c[::-3], d[::-3], e[::-3]), r[::-3])\n assert_equal(np.where(c[1::-3], d[1::-3], e[1::-3]), r[1::-3])\n\n def test_exotic(self):\n # object\n assert_array_equal(np.where(True, None, None), np.array(None))\n # zero sized\n m = np.array([], dtype=bool).reshape(0, 3)\n b = np.array([], dtype=np.float64).reshape(0, 3)\n assert_array_equal(np.where(m, 0, b), np.array([]).reshape(0, 3))\n\n # object cast\n d = np.array([-1.34, -0.16, -0.54, -0.31, -0.08, -0.95, 0.000, 0.313,\n 0.547, -0.18, 0.876, 0.236, 1.969, 0.310, 0.699, 1.013,\n 1.267, 0.229, -1.39, 0.487])\n nan = float('NaN')\n e = np.array(['5z', '0l', nan, 'Wz', nan, nan, 'Xq', 'cs', nan, nan,\n 'QN', nan, nan, 'Fd', nan, nan, 'kp', nan, '36', 'i1'],\n dtype=object)\n m = np.array([0, 0, 1, 0, 1, 1, 0, 0, 1, 1,\n 0, 1, 1, 0, 1, 1, 0, 1, 0, 0], dtype=bool)\n\n r = e[:]\n r[np.where(m)] = d[np.where(m)]\n assert_array_equal(np.where(m, d, e), r)\n\n r = e[:]\n r[np.where(~m)] = d[np.where(~m)]\n assert_array_equal(np.where(m, e, d), r)\n\n assert_array_equal(np.where(m, e, e), e)\n\n # minimal dtype result with NaN scalar (e.g required by pandas)\n d = np.array([1., 2.], dtype=np.float32)\n e = float('NaN')\n assert_equal(np.where(True, d, e).dtype, np.float32)\n e = float('Infinity')\n assert_equal(np.where(True, d, e).dtype, np.float32)\n e = float('-Infinity')\n assert_equal(np.where(True, d, e).dtype, np.float32)\n # also check upcast\n e = float(1e150)\n assert_equal(np.where(True, d, e).dtype, np.float64)\n\n def test_ndim(self):\n c = [True, False]\n a = np.zeros((2, 25))\n b = np.ones((2, 25))\n r = np.where(np.array(c)[:,np.newaxis], a, b)\n assert_array_equal(r[0], a[0])\n assert_array_equal(r[1], b[0])\n\n a = a.T\n b = b.T\n r = np.where(c, a, b)\n assert_array_equal(r[:,0], a[:,0])\n assert_array_equal(r[:,1], b[:,0])\n\n def test_dtype_mix(self):\n c = np.array([False, True, False, False, False, False, True, False,\n False, False, True, False])\n a = np.uint32(1)\n b = np.array([5., 0., 3., 2., -1., -4., 0., -10., 10., 1., 0., 3.],\n dtype=np.float64)\n r = np.array([5., 1., 3., 2., -1., -4., 1., -10., 10., 1., 1., 3.],\n dtype=np.float64)\n assert_equal(np.where(c, a, b), r)\n\n a = a.astype(np.float32)\n b = b.astype(np.int64)\n assert_equal(np.where(c, a, b), r)\n\n # non bool mask\n c = c.astype(int)\n c[c != 0] = 34242324\n assert_equal(np.where(c, a, b), r)\n # invert\n tmpmask = c != 0\n c[c == 0] = 41247212\n c[tmpmask] = 0\n assert_equal(np.where(c, b, a), r)\n\n def test_foreign(self):\n c = np.array([False, True, False, False, False, False, True, False,\n False, False, True, False])\n r = np.array([5., 1., 3., 2., -1., -4., 1., -10., 10., 1., 1., 3.],\n dtype=np.float64)\n a = np.ones(1, dtype='>i4')\n b = np.array([5., 0., 3., 2., -1., -4., 0., -10., 10., 1., 0., 3.],\n dtype=np.float64)\n assert_equal(np.where(c, a, b), r)\n\n b = b.astype('>f8')\n assert_equal(np.where(c, a, b), r)\n\n a = a.astype('<i4')\n assert_equal(np.where(c, a, b), r)\n\n c = c.astype('>i4')\n assert_equal(np.where(c, a, b), r)\n\n def test_error(self):\n c = [True, True]\n a = np.ones((4, 5))\n b = np.ones((5, 5))\n assert_raises(ValueError, np.where, c, a, a)\n assert_raises(ValueError, np.where, c[0], a, b)\n\n def test_string(self):\n # gh-4778 check strings are properly filled with nulls\n a = np.array(\"abc\")\n b = np.array(\"x\" * 753)\n assert_equal(np.where(True, a, b), \"abc\")\n assert_equal(np.where(False, b, a), \"abc\")\n\n # check native datatype sized strings\n a = np.array(\"abcd\")\n b = np.array(\"x\" * 8)\n assert_equal(np.where(True, a, b), \"abcd\")\n assert_equal(np.where(False, b, a), \"abcd\")\n\n def test_empty_result(self):\n # pass empty where result through an assignment which reads the data of\n # empty arrays, error detectable with valgrind, see gh-8922\n x = np.zeros((1, 1))\n ibad = np.vstack(np.where(x == 99.))\n assert_array_equal(ibad,\n np.atleast_2d(np.array([[],[]], dtype=np.intp)))\n\n def test_largedim(self):\n # invalid read regression gh-9304\n shape = [10, 2, 3, 4, 5, 6]\n np.random.seed(2)\n array = np.random.rand(*shape)\n\n for i in range(10):\n benchmark = array.nonzero()\n result = array.nonzero()\n assert_array_equal(benchmark, result)\n\n\nif not IS_PYPY:\n # sys.getsizeof() is not valid on PyPy\n class TestSizeOf:\n\n def test_empty_array(self):\n x = np.array([])\n assert_(sys.getsizeof(x) > 0)\n\n def check_array(self, dtype):\n elem_size = dtype(0).itemsize\n\n for length in [10, 50, 100, 500]:\n x = np.arange(length, dtype=dtype)\n assert_(sys.getsizeof(x) > length * elem_size)\n\n def test_array_int32(self):\n self.check_array(np.int32)\n\n def test_array_int64(self):\n self.check_array(np.int64)\n\n def test_array_float32(self):\n self.check_array(np.float32)\n\n def test_array_float64(self):\n self.check_array(np.float64)\n\n def test_view(self):\n d = np.ones(100)\n assert_(sys.getsizeof(d[...]) < sys.getsizeof(d))\n\n def test_reshape(self):\n d = np.ones(100)\n assert_(sys.getsizeof(d) < sys.getsizeof(d.reshape(100, 1, 1).copy()))\n\n @_no_tracing\n def test_resize(self):\n d = np.ones(100)\n old = sys.getsizeof(d)\n d.resize(50)\n assert_(old > sys.getsizeof(d))\n d.resize(150)\n assert_(old < sys.getsizeof(d))\n\n def test_error(self):\n d = np.ones(100)\n assert_raises(TypeError, d.__sizeof__, \"a\")\n\n\nclass TestHashing:\n\n def test_arrays_not_hashable(self):\n x = np.ones(3)\n assert_raises(TypeError, hash, x)\n\n def test_collections_hashable(self):\n x = np.array([])\n assert_(not isinstance(x, collections_abc.Hashable))\n\n\nclass TestArrayPriority:\n # This will go away when __array_priority__ is settled, meanwhile\n # it serves to check unintended changes.\n op = operator\n binary_ops = [\n op.pow, op.add, op.sub, op.mul, op.floordiv, op.truediv, op.mod,\n op.and_, op.or_, op.xor, op.lshift, op.rshift, op.mod, op.gt,\n op.ge, op.lt, op.le, op.ne, op.eq\n ]\n\n # See #7949. Don't use \"/\" operator With -3 switch, since python reports it\n # as a DeprecationWarning\n if sys.version_info[0] < 3 and not sys.py3kwarning:\n binary_ops.append(op.div)\n\n class Foo(np.ndarray):\n __array_priority__ = 100.\n\n def __new__(cls, *args, **kwargs):\n return np.array(*args, **kwargs).view(cls)\n\n class Bar(np.ndarray):\n __array_priority__ = 101.\n\n def __new__(cls, *args, **kwargs):\n return np.array(*args, **kwargs).view(cls)\n\n class Other:\n __array_priority__ = 1000.\n\n def _all(self, other):\n return self.__class__()\n\n __add__ = __radd__ = _all\n __sub__ = __rsub__ = _all\n __mul__ = __rmul__ = _all\n __pow__ = __rpow__ = _all\n __div__ = __rdiv__ = _all\n __mod__ = __rmod__ = _all\n __truediv__ = __rtruediv__ = _all\n __floordiv__ = __rfloordiv__ = _all\n __and__ = __rand__ = _all\n __xor__ = __rxor__ = _all\n __or__ = __ror__ = _all\n __lshift__ = __rlshift__ = _all\n __rshift__ = __rrshift__ = _all\n __eq__ = _all\n __ne__ = _all\n __gt__ = _all\n __ge__ = _all\n __lt__ = _all\n __le__ = _all\n\n def test_ndarray_subclass(self):\n a = np.array([1, 2])\n b = self.Bar([1, 2])\n for f in self.binary_ops:\n msg = repr(f)\n assert_(isinstance(f(a, b), self.Bar), msg)\n assert_(isinstance(f(b, a), self.Bar), msg)\n\n def test_ndarray_other(self):\n a = np.array([1, 2])\n b = self.Other()\n for f in self.binary_ops:\n msg = repr(f)\n assert_(isinstance(f(a, b), self.Other), msg)\n assert_(isinstance(f(b, a), self.Other), msg)\n\n def test_subclass_subclass(self):\n a = self.Foo([1, 2])\n b = self.Bar([1, 2])\n for f in self.binary_ops:\n msg = repr(f)\n assert_(isinstance(f(a, b), self.Bar), msg)\n assert_(isinstance(f(b, a), self.Bar), msg)\n\n def test_subclass_other(self):\n a = self.Foo([1, 2])\n b = self.Other()\n for f in self.binary_ops:\n msg = repr(f)\n assert_(isinstance(f(a, b), self.Other), msg)\n assert_(isinstance(f(b, a), self.Other), msg)\n\n\nclass TestBytestringArrayNonzero:\n\n def test_empty_bstring_array_is_falsey(self):\n assert_(not np.array([''], dtype=str))\n\n def test_whitespace_bstring_array_is_falsey(self):\n a = np.array(['spam'], dtype=str)\n a[0] = ' \\0\\0'\n assert_(not a)\n\n def test_all_null_bstring_array_is_falsey(self):\n a = np.array(['spam'], dtype=str)\n a[0] = '\\0\\0\\0\\0'\n assert_(not a)\n\n def test_null_inside_bstring_array_is_truthy(self):\n a = np.array(['spam'], dtype=str)\n a[0] = ' \\0 \\0'\n assert_(a)\n\n\nclass TestUnicodeArrayNonzero:\n\n def test_empty_ustring_array_is_falsey(self):\n assert_(not np.array([''], dtype=np.unicode_))\n\n def test_whitespace_ustring_array_is_falsey(self):\n a = np.array(['eggs'], dtype=np.unicode_)\n a[0] = ' \\0\\0'\n assert_(not a)\n\n def test_all_null_ustring_array_is_falsey(self):\n a = np.array(['eggs'], dtype=np.unicode_)\n a[0] = '\\0\\0\\0\\0'\n assert_(not a)\n\n def test_null_inside_ustring_array_is_truthy(self):\n a = np.array(['eggs'], dtype=np.unicode_)\n a[0] = ' \\0 \\0'\n assert_(a)\n\n\nclass TestFormat:\n\n def test_0d(self):\n a = np.array(np.pi)\n assert_equal('{:0.3g}'.format(a), '3.14')\n assert_equal('{:0.3g}'.format(a[()]), '3.14')\n\n def test_1d_no_format(self):\n a = np.array([np.pi])\n assert_equal('{}'.format(a), str(a))\n\n def test_1d_format(self):\n # until gh-5543, ensure that the behaviour matches what it used to be\n a = np.array([np.pi])\n if sys.version_info[:2] >= (3, 4):\n assert_raises(TypeError, '{:30}'.format, a)\n else:\n with suppress_warnings() as sup:\n sup.filter(PendingDeprecationWarning)\n res = '{:30}'.format(a)\n dst = object.__format__(a, '30')\n assert_equal(res, dst)\n\nfrom numpy.testing import IS_PYPY\n\nclass TestCTypes:\n\n def test_ctypes_is_available(self):\n test_arr = np.array([[1, 2, 3], [4, 5, 6]])\n\n assert_equal(ctypes, test_arr.ctypes._ctypes)\n assert_equal(tuple(test_arr.ctypes.shape), (2, 3))\n\n def test_ctypes_is_not_available(self):\n from numpy.core import _internal\n _internal.ctypes = None\n try:\n test_arr = np.array([[1, 2, 3], [4, 5, 6]])\n\n assert_(isinstance(test_arr.ctypes._ctypes,\n _internal._missing_ctypes))\n assert_equal(tuple(test_arr.ctypes.shape), (2, 3))\n finally:\n _internal.ctypes = ctypes\n\n def _make_readonly(x):\n x.flags.writeable = False\n return x\n\n @pytest.mark.parametrize('arr', [\n np.array([1, 2, 3]),\n np.array([['one', 'two'], ['three', 'four']]),\n np.array((1, 2), dtype='i4,i4'),\n np.zeros((2,), dtype=\n np.dtype(dict(\n formats=['<i4', '<i4'],\n names=['a', 'b'],\n offsets=[0, 2],\n itemsize=6\n ))\n ),\n np.array([None], dtype=object),\n np.array([]),\n np.empty((0, 0)),\n _make_readonly(np.array([1, 2, 3])),\n ], ids=[\n '1d',\n '2d',\n 'structured',\n 'overlapping',\n 'object',\n 'empty',\n 'empty-2d',\n 'readonly'\n ])\n def test_ctypes_data_as_holds_reference(self, arr):\n # gh-9647\n # create a copy to ensure that pytest does not mess with the refcounts\n arr = arr.copy()\n\n arr_ref = weakref.ref(arr)\n\n ctypes_ptr = arr.ctypes.data_as(ctypes.c_void_p)\n\n # `ctypes_ptr` should hold onto `arr`\n del arr\n break_cycles()\n assert_(arr_ref() is not None, \"ctypes pointer did not hold onto a reference\")\n\n # but when the `ctypes_ptr` object dies, so should `arr`\n del ctypes_ptr\n if IS_PYPY:\n # Pypy does not recycle arr objects immediately. Trigger gc to\n # release arr. Cpython uses refcounts. An explicit call to gc\n # should not be needed here.\n break_cycles()\n assert_(arr_ref() is None, \"unknowable whether ctypes pointer holds a reference\")\n\n def test_ctypes_as_parameter_holds_reference(self):\n arr = np.array([None]).copy()\n\n arr_ref = weakref.ref(arr)\n\n ctypes_ptr = arr.ctypes._as_parameter_\n\n # `ctypes_ptr` should hold onto `arr`\n del arr\n break_cycles()\n assert_(arr_ref() is not None, \"ctypes pointer did not hold onto a reference\")\n\n # but when the `ctypes_ptr` object dies, so should `arr`\n del ctypes_ptr\n if IS_PYPY:\n break_cycles()\n assert_(arr_ref() is None, \"unknowable whether ctypes pointer holds a reference\")\n\n\nclass TestWritebackIfCopy:\n # all these tests use the WRITEBACKIFCOPY mechanism\n def test_argmax_with_out(self):\n mat = np.eye(5)\n out = np.empty(5, dtype='i2')\n res = np.argmax(mat, 0, out=out)\n assert_equal(res, range(5))\n\n def test_argmin_with_out(self):\n mat = -np.eye(5)\n out = np.empty(5, dtype='i2')\n res = np.argmin(mat, 0, out=out)\n assert_equal(res, range(5))\n\n def test_insert_noncontiguous(self):\n a = np.arange(6).reshape(2,3).T # force non-c-contiguous\n # uses arr_insert\n np.place(a, a>2, [44, 55])\n assert_equal(a, np.array([[0, 44], [1, 55], [2, 44]]))\n # hit one of the failing paths\n assert_raises(ValueError, np.place, a, a>20, [])\n\n def test_put_noncontiguous(self):\n a = np.arange(6).reshape(2,3).T # force non-c-contiguous\n np.put(a, [0, 2], [44, 55])\n assert_equal(a, np.array([[44, 3], [55, 4], [2, 5]]))\n\n def test_putmask_noncontiguous(self):\n a = np.arange(6).reshape(2,3).T # force non-c-contiguous\n # uses arr_putmask\n np.putmask(a, a>2, a**2)\n assert_equal(a, np.array([[0, 9], [1, 16], [2, 25]]))\n\n def test_take_mode_raise(self):\n a = np.arange(6, dtype='int')\n out = np.empty(2, dtype='int')\n np.take(a, [0, 2], out=out, mode='raise')\n assert_equal(out, np.array([0, 2]))\n\n def test_choose_mod_raise(self):\n a = np.array([[1, 0, 1], [0, 1, 0], [1, 0, 1]])\n out = np.empty((3,3), dtype='int')\n choices = [-10, 10]\n np.choose(a, choices, out=out, mode='raise')\n assert_equal(out, np.array([[ 10, -10, 10],\n [-10, 10, -10],\n [ 10, -10, 10]]))\n\n def test_flatiter__array__(self):\n a = np.arange(9).reshape(3,3)\n b = a.T.flat\n c = b.__array__()\n # triggers the WRITEBACKIFCOPY resolution, assuming refcount semantics\n del c\n\n def test_dot_out(self):\n # if HAVE_CBLAS, will use WRITEBACKIFCOPY\n a = np.arange(9, dtype=float).reshape(3,3)\n b = np.dot(a, a, out=a)\n assert_equal(b, np.array([[15, 18, 21], [42, 54, 66], [69, 90, 111]]))\n\n def test_view_assign(self):\n from numpy.core._multiarray_tests import npy_create_writebackifcopy, npy_resolve\n\n arr = np.arange(9).reshape(3, 3).T\n arr_wb = npy_create_writebackifcopy(arr)\n assert_(arr_wb.flags.writebackifcopy)\n assert_(arr_wb.base is arr)\n arr_wb[...] = -100\n npy_resolve(arr_wb)\n # arr changes after resolve, even though we assigned to arr_wb\n assert_equal(arr, -100)\n # after resolve, the two arrays no longer reference each other\n assert_(arr_wb.ctypes.data != 0)\n assert_equal(arr_wb.base, None)\n # assigning to arr_wb does not get transferred to arr\n arr_wb[...] = 100\n assert_equal(arr, -100)\n\n @pytest.mark.leaks_references(\n reason=\"increments self in dealloc; ignore since deprecated path.\")\n def test_dealloc_warning(self):\n with suppress_warnings() as sup:\n sup.record(RuntimeWarning)\n arr = np.arange(9).reshape(3, 3)\n v = arr.T\n _multiarray_tests.npy_abuse_writebackifcopy(v)\n assert len(sup.log) == 1\n\n def test_view_discard_refcount(self):\n from numpy.core._multiarray_tests import npy_create_writebackifcopy, npy_discard\n\n arr = np.arange(9).reshape(3, 3).T\n orig = arr.copy()\n if HAS_REFCOUNT:\n arr_cnt = sys.getrefcount(arr)\n arr_wb = npy_create_writebackifcopy(arr)\n assert_(arr_wb.flags.writebackifcopy)\n assert_(arr_wb.base is arr)\n arr_wb[...] = -100\n npy_discard(arr_wb)\n # arr remains unchanged after discard\n assert_equal(arr, orig)\n # after discard, the two arrays no longer reference each other\n assert_(arr_wb.ctypes.data != 0)\n assert_equal(arr_wb.base, None)\n if HAS_REFCOUNT:\n assert_equal(arr_cnt, sys.getrefcount(arr))\n # assigning to arr_wb does not get transferred to arr\n arr_wb[...] = 100\n assert_equal(arr, orig)\n\n\nclass TestArange:\n def test_infinite(self):\n assert_raises_regex(\n ValueError, \"size exceeded\",\n np.arange, 0, np.inf\n )\n\n def test_nan_step(self):\n assert_raises_regex(\n ValueError, \"cannot compute length\",\n np.arange, 0, 1, np.nan\n )\n\n def test_zero_step(self):\n assert_raises(ZeroDivisionError, np.arange, 0, 10, 0)\n assert_raises(ZeroDivisionError, np.arange, 0.0, 10.0, 0.0)\n\n # empty range\n assert_raises(ZeroDivisionError, np.arange, 0, 0, 0)\n assert_raises(ZeroDivisionError, np.arange, 0.0, 0.0, 0.0)\n\n\nclass TestArrayFinalize:\n \"\"\" Tests __array_finalize__ \"\"\"\n\n def test_receives_base(self):\n # gh-11237\n class SavesBase(np.ndarray):\n def __array_finalize__(self, obj):\n self.saved_base = self.base\n\n a = np.array(1).view(SavesBase)\n assert_(a.saved_base is a.base)\n\n def test_lifetime_on_error(self):\n # gh-11237\n class RaisesInFinalize(np.ndarray):\n def __array_finalize__(self, obj):\n # crash, but keep this object alive\n raise Exception(self)\n\n # a plain object can't be weakref'd\n class Dummy: pass\n\n # get a weak reference to an object within an array\n obj_arr = np.array(Dummy())\n obj_ref = weakref.ref(obj_arr[()])\n\n # get an array that crashed in __array_finalize__\n with assert_raises(Exception) as e:\n obj_arr.view(RaisesInFinalize)\n\n obj_subarray = e.exception.args[0]\n del e\n assert_(isinstance(obj_subarray, RaisesInFinalize))\n\n # reference should still be held by obj_arr\n break_cycles()\n assert_(obj_ref() is not None, \"object should not already be dead\")\n\n del obj_arr\n break_cycles()\n assert_(obj_ref() is not None, \"obj_arr should not hold the last reference\")\n\n del obj_subarray\n break_cycles()\n assert_(obj_ref() is None, \"no references should remain\")\n\n\ndef test_orderconverter_with_nonASCII_unicode_ordering():\n # gh-7475\n a = np.arange(5)\n assert_raises(ValueError, a.flatten, order=u'\\xe2')\n\n\ndef test_equal_override():\n # gh-9153: ndarray.__eq__ uses special logic for structured arrays, which\n # did not respect overrides with __array_priority__ or __array_ufunc__.\n # The PR fixed this for __array_priority__ and __array_ufunc__ = None.\n class MyAlwaysEqual:\n def __eq__(self, other):\n return \"eq\"\n\n def __ne__(self, other):\n return \"ne\"\n\n class MyAlwaysEqualOld(MyAlwaysEqual):\n __array_priority__ = 10000\n\n class MyAlwaysEqualNew(MyAlwaysEqual):\n __array_ufunc__ = None\n\n array = np.array([(0, 1), (2, 3)], dtype='i4,i4')\n for my_always_equal_cls in MyAlwaysEqualOld, MyAlwaysEqualNew:\n my_always_equal = my_always_equal_cls()\n assert_equal(my_always_equal == array, 'eq')\n assert_equal(array == my_always_equal, 'eq')\n assert_equal(my_always_equal != array, 'ne')\n assert_equal(array != my_always_equal, 'ne')\n\n\ndef test_npymath_complex():\n # Smoketest npymath functions\n from numpy.core._multiarray_tests import (\n npy_cabs, npy_carg)\n\n funcs = {npy_cabs: np.absolute,\n npy_carg: np.angle}\n vals = (1, np.inf, -np.inf, np.nan)\n types = (np.complex64, np.complex128, np.clongdouble)\n\n for fun, npfun in funcs.items():\n for x, y in itertools.product(vals, vals):\n for t in types:\n z = t(complex(x, y))\n got = fun(z)\n expected = npfun(z)\n assert_allclose(got, expected)\n\n\ndef test_npymath_real():\n # Smoketest npymath functions\n from numpy.core._multiarray_tests import (\n npy_log10, npy_cosh, npy_sinh, npy_tan, npy_tanh)\n\n funcs = {npy_log10: np.log10,\n npy_cosh: np.cosh,\n npy_sinh: np.sinh,\n npy_tan: np.tan,\n npy_tanh: np.tanh}\n vals = (1, np.inf, -np.inf, np.nan)\n types = (np.float32, np.float64, np.longdouble)\n\n with np.errstate(all='ignore'):\n for fun, npfun in funcs.items():\n for x, t in itertools.product(vals, types):\n z = t(x)\n got = fun(z)\n expected = npfun(z)\n assert_allclose(got, expected)\n\ndef test_uintalignment_and_alignment():\n # alignment code needs to satisfy these requirements:\n # 1. numpy structs match C struct layout\n # 2. ufuncs/casting is safe wrt to aligned access\n # 3. copy code is safe wrt to \"uint alidned\" access\n #\n # Complex types are the main problem, whose alignment may not be the same\n # as their \"uint alignment\".\n #\n # This test might only fail on certain platforms, where uint64 alignment is\n # not equal to complex64 alignment. The second 2 tests will only fail\n # for DEBUG=1.\n\n d1 = np.dtype('u1,c8', align=True)\n d2 = np.dtype('u4,c8', align=True)\n d3 = np.dtype({'names': ['a', 'b'], 'formats': ['u1', d1]}, align=True)\n\n assert_equal(np.zeros(1, dtype=d1)['f1'].flags['ALIGNED'], True)\n assert_equal(np.zeros(1, dtype=d2)['f1'].flags['ALIGNED'], True)\n assert_equal(np.zeros(1, dtype='u1,c8')['f1'].flags['ALIGNED'], False)\n\n # check that C struct matches numpy struct size\n s = _multiarray_tests.get_struct_alignments()\n for d, (alignment, size) in zip([d1,d2,d3], s):\n assert_equal(d.alignment, alignment)\n assert_equal(d.itemsize, size)\n\n # check that ufuncs don't complain in debug mode\n # (this is probably OK if the aligned flag is true above)\n src = np.zeros((2,2), dtype=d1)['f1'] # 4-byte aligned, often\n np.exp(src) # assert fails?\n\n # check that copy code doesn't complain in debug mode\n dst = np.zeros((2,2), dtype='c8')\n dst[:,1] = src[:,1] # assert in lowlevel_strided_loops fails?\n\nclass TestAlignment:\n # adapted from scipy._lib.tests.test__util.test__aligned_zeros\n # Checks that unusual memory alignments don't trip up numpy.\n # In particular, check RELAXED_STRIDES don't trip alignment assertions in\n # NDEBUG mode for size-0 arrays (gh-12503)\n\n def check(self, shape, dtype, order, align):\n err_msg = repr((shape, dtype, order, align))\n x = _aligned_zeros(shape, dtype, order, align=align)\n if align is None:\n align = np.dtype(dtype).alignment\n assert_equal(x.__array_interface__['data'][0] % align, 0)\n if hasattr(shape, '__len__'):\n assert_equal(x.shape, shape, err_msg)\n else:\n assert_equal(x.shape, (shape,), err_msg)\n assert_equal(x.dtype, dtype)\n if order == \"C\":\n assert_(x.flags.c_contiguous, err_msg)\n elif order == \"F\":\n if x.size > 0:\n assert_(x.flags.f_contiguous, err_msg)\n elif order is None:\n assert_(x.flags.c_contiguous, err_msg)\n else:\n raise ValueError()\n\n def test_various_alignments(self):\n for align in [1, 2, 3, 4, 8, 12, 16, 32, 64, None]:\n for n in [0, 1, 3, 11]:\n for order in [\"C\", \"F\", None]:\n for dtype in list(np.typecodes[\"All\"]) + ['i4,i4,i4']:\n if dtype == 'O':\n # object dtype can't be misaligned\n continue\n for shape in [n, (1, 2, 3, n)]:\n self.check(shape, np.dtype(dtype), order, align)\n\n def test_strided_loop_alignments(self):\n # particularly test that complex64 and float128 use right alignment\n # code-paths, since these are particularly problematic. It is useful to\n # turn on USE_DEBUG for this test, so lowlevel-loop asserts are run.\n for align in [1, 2, 4, 8, 12, 16, None]:\n xf64 = _aligned_zeros(3, np.float64)\n\n xc64 = _aligned_zeros(3, np.complex64, align=align)\n xf128 = _aligned_zeros(3, np.longdouble, align=align)\n\n # test casting, both to and from misaligned\n with suppress_warnings() as sup:\n sup.filter(np.ComplexWarning, \"Casting complex values\")\n xc64.astype('f8')\n xf64.astype(np.complex64)\n test = xc64 + xf64\n\n xf128.astype('f8')\n xf64.astype(np.longdouble)\n test = xf128 + xf64\n\n test = xf128 + xc64\n\n # test copy, both to and from misaligned\n # contig copy\n xf64[:] = xf64.copy()\n xc64[:] = xc64.copy()\n xf128[:] = xf128.copy()\n # strided copy\n xf64[::2] = xf64[::2].copy()\n xc64[::2] = xc64[::2].copy()\n xf128[::2] = xf128[::2].copy()\n\ndef test_getfield():\n a = np.arange(32, dtype='uint16')\n if sys.byteorder == 'little':\n i = 0\n j = 1\n else:\n i = 1\n j = 0\n b = a.getfield('int8', i)\n assert_equal(b, a)\n b = a.getfield('int8', j)\n assert_equal(b, 0)\n pytest.raises(ValueError, a.getfield, 'uint8', -1)\n pytest.raises(ValueError, a.getfield, 'uint8', 16)\n pytest.raises(ValueError, a.getfield, 'uint64', 0)\n" ]
[ [ "numpy.sqrt", "numpy.core._multiarray_tests.test_as_c_array", "numpy.all", "numpy.min_scalar_type", "numpy.exp", "numpy.testing.break_cycles", "numpy.where", "numpy.place", "numpy.matmul", "numpy.core._multiarray_tests.test_pydatamem_seteventhook_end", "numpy.core._multiarray_tests.get_buffer_info", "numpy.sin", "numpy.frombuffer", "numpy.core.records.fromstring", "numpy.zeros", "numpy.testing.assert_raises_regex", "numpy.core._multiarray_tests.get_c_wrapping_array", "numpy.multiply", "numpy.random.choice", "numpy.sctypes.values", "numpy.core._multiarray_tests.incref_elide", "numpy.testing.assert_raises", "numpy.array", "numpy.fromfile", "numpy.inner", "numpy.void", "numpy.indices", "numpy.random.shuffle", "numpy.datetime64", "numpy.testing.assert_array_equal", "numpy.testing.assert_array_less", "numpy.add", "numpy.asarray", "numpy.core._multiarray_tests.IsPythonScalar", "numpy.core._multiarray_tests.npy_abuse_writebackifcopy", "numpy.ndarray", "numpy.random.random_sample", "numpy.iinfo", "numpy.core.records.fromarrays", "numpy.var", "numpy.core._multiarray_tests.test_neighborhood_iterator_oob", "numpy.uint32", "numpy.testing.suppress_warnings", "numpy.float16", "numpy.core.numeric.set_string_function", "numpy.save", "numpy.std", "numpy.argmax", "numpy.float32", "numpy.putmask", "numpy.core.tests._locales.CommaDecimalPointLocale", "numpy.min", "numpy.amin", "numpy.alen", "numpy.asfortranarray", "numpy.core._internal._dtype_from_pep3118", "numpy.modf", "numpy.timedelta64", "numpy.testing.temppath", "numpy.random.rand", "numpy.testing.assert_", "numpy.errstate", "numpy.testing.assert_warns", "numpy.rec.fromarrays", "numpy.ones", "numpy.vectorize", "numpy.isscalar", "numpy.empty", "numpy.can_cast", "numpy.take", "numpy.linspace", "numpy.longdouble", "numpy.typeDict.values", "numpy.core.multiarray.dot", "numpy.zeros_like", "numpy.argmin", "numpy.mean", "numpy.bool_", "numpy.roll", "numpy.conjugate", "numpy.random.randint", "numpy.testing.assert_equal", "numpy.swapaxes", "numpy.core._multiarray_tests.test_inplace_increment", "numpy.eye", "numpy.int8", "numpy.lexsort", "numpy.choose", "numpy.argpartition", "numpy.count_nonzero", "numpy.repeat", "numpy.random.PCG64", "numpy.testing.assert_array_almost_equal", "numpy.unicode_", "numpy.core._multiarray_tests.test_neighborhood_iterator", "numpy.transpose", "numpy.testing.assert_allclose", "numpy.argsort", "numpy.vdot", "numpy.tile", "numpy.core._multiarray_tests.test_pydatamem_seteventhook_start", "numpy.broadcast_to", "numpy.dot", "numpy.true_divide", "numpy.amax", "numpy.dtype", "numpy.core._multiarray_tests.get_struct_alignments", "numpy.max", "numpy.any", "numpy.core._multiarray_tests.npy_resolve", "numpy.partition", "numpy.ones_like", "numpy.core._multiarray_tests.npy_discard", "numpy.arange", "numpy.stack", "numpy.testing.assert_almost_equal", "numpy.core._multiarray_tests.npy_create_writebackifcopy", "numpy.bytes_", "numpy.load", "numpy.ascontiguousarray", "numpy.int_", "numpy.rec.fromrecords", "numpy.random.random", "numpy.random.seed", "numpy.isfinite", "numpy.core._multiarray_tests.incref_elide_l", "numpy.put", "numpy.intp", "numpy.compress", "numpy.sort", "numpy.result_type", "numpy.random.normal", "numpy.fromstring", "numpy.prod" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
kaymes/terracotta
[ "30730e78e204b573d1c3eb755a0107e3f73021f0" ]
[ "terracotta/image.py" ]
[ "\"\"\"image.py\n\nUtilities to create and manipulate images.\n\"\"\"\n\nfrom typing import Sequence, Tuple, TypeVar, Union\nfrom typing.io import BinaryIO\n\nfrom io import BytesIO\n\nimport numpy as np\nfrom PIL import Image\n\nfrom terracotta.profile import trace\nfrom terracotta import exceptions, get_settings\n\nNumber = TypeVar('Number', int, float)\nRGBA = Tuple[Number, Number, Number, Number]\nPalette = Sequence[RGBA]\nArray = Union[np.ndarray, np.ma.MaskedArray]\n\n\n@trace('array_to_png')\ndef array_to_png(img_data: Array,\n colormap: Union[str, Palette, None] = None) -> BinaryIO:\n \"\"\"Encode an 8bit array as PNG\"\"\"\n from terracotta.cmaps import get_cmap\n\n transparency: Union[Tuple[int, int, int], int, bytes]\n\n settings = get_settings()\n compress_level = settings.PNG_COMPRESS_LEVEL\n\n if img_data.ndim == 3: # encode RGB image\n if img_data.shape[-1] != 3:\n raise ValueError('3D input arrays must have three bands')\n\n if colormap is not None:\n raise ValueError('Colormap argument cannot be given for multi-band data')\n\n mode = 'RGB'\n transparency = (0, 0, 0)\n palette = None\n\n elif img_data.ndim == 2: # encode paletted image\n mode = 'L'\n\n if colormap is None:\n palette = None\n transparency = 0\n else:\n if isinstance(colormap, str):\n # get and apply colormap by name\n try:\n cmap_vals = get_cmap(colormap)\n except ValueError as exc:\n raise exceptions.InvalidArgumentsError(\n f'Encountered invalid color map {colormap}') from exc\n palette = np.concatenate((\n np.zeros(3, dtype='uint8'),\n cmap_vals[:, :-1].flatten()\n ))\n transparency_arr = np.concatenate((\n np.zeros(1, dtype='uint8'),\n cmap_vals[:, -1]\n ))\n else:\n # explicit mapping\n if len(colormap) > 255:\n raise exceptions.InvalidArgumentsError(\n 'Explicit color map must contain less than 256 values'\n )\n\n colormap_array = np.asarray(colormap, dtype='uint8')\n if colormap_array.ndim != 2 or colormap_array.shape[1] != 4:\n raise ValueError('Explicit color mapping must have shape (n, 4)')\n\n rgb, alpha = colormap_array[:, :-1], colormap_array[:, -1]\n palette = np.concatenate((\n np.zeros(3, dtype='uint8'),\n rgb.flatten(),\n np.zeros(3 * (256 - len(colormap) - 1), dtype='uint8')\n ))\n\n # PIL expects paletted transparency as raw bytes\n transparency_arr = np.concatenate((\n np.zeros(1, dtype='uint8'),\n alpha,\n np.zeros(256 - len(colormap) - 1, dtype='uint8')\n ))\n\n assert transparency_arr.shape == (256,)\n assert transparency_arr.dtype == 'uint8'\n transparency = transparency_arr.tobytes()\n\n assert palette.shape == (3 * 256,), palette.shape\n else:\n raise ValueError('Input array must have 2 or 3 dimensions')\n\n if isinstance(img_data, np.ma.MaskedArray):\n img_data = img_data.filled(0)\n\n img = Image.fromarray(img_data, mode=mode)\n\n if palette is not None:\n img.putpalette(palette)\n\n sio = BytesIO()\n img.save(sio, 'png', compress_level=compress_level, transparency=transparency)\n sio.seek(0)\n return sio\n\n\ndef empty_image(size: Tuple[int, int]) -> BinaryIO:\n \"\"\"Return a fully transparent PNG image of given size\"\"\"\n settings = get_settings()\n compress_level = settings.PNG_COMPRESS_LEVEL\n\n img = Image.new(mode='P', size=size, color=0)\n\n sio = BytesIO()\n img.save(sio, 'png', compress_level=compress_level, transparency=0)\n sio.seek(0)\n return sio\n\n\n@trace('contrast_stretch')\ndef contrast_stretch(data: Array,\n in_range: Sequence[Number],\n out_range: Sequence[Number],\n clip: bool = True) -> Array:\n \"\"\"Normalize input array from in_range to out_range\"\"\"\n lower_bound_in, upper_bound_in = in_range\n lower_bound_out, upper_bound_out = out_range\n\n out_data = data.astype('float64', copy=True)\n out_data -= lower_bound_in\n norm = upper_bound_in - lower_bound_in\n if abs(norm) > 1e-8: # prevent division by 0\n out_data *= (upper_bound_out - lower_bound_out) / norm\n out_data += lower_bound_out\n\n if clip:\n np.clip(out_data, lower_bound_out, upper_bound_out, out=out_data)\n\n return out_data\n\n\ndef to_uint8(data: Array, lower_bound: Number, upper_bound: Number) -> Array:\n \"\"\"Re-scale an array to [1, 255] and cast to uint8 (0 is used for transparency)\"\"\"\n rescaled = contrast_stretch(data, (lower_bound, upper_bound), (1, 255), clip=True)\n return rescaled.astype(np.uint8)\n\n\ndef label(data: Array, labels: Sequence[Number]) -> Array:\n \"\"\"Create a labelled uint8 version of data, with output values starting at 1.\n\n Values not found in labels are replaced by 0.\n\n Example:\n\n >>> data = np.array([15, 16, 17])\n >>> label(data, [17, 15])\n np.array([2, 0, 1])\n\n \"\"\"\n if len(labels) > 255:\n raise ValueError('Cannot fit more than 255 labels')\n\n out_data = np.zeros(data.shape, dtype='uint8')\n for i, label in enumerate(labels, 1):\n out_data[data == label] = i\n\n return out_data\n" ]
[ [ "numpy.asarray", "numpy.zeros", "numpy.clip" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
alexstaley/machine-learning
[ "15a880638e45da64a4d52654c2757684ddd11cac" ]
[ "kMeans/Getters.py" ]
[ "\"\"\"\n Alex Staley -- Student ID: 919519311\n Assignment 4 -- March 2020\n\n This file defines the low-level functionality of the K-clusters\n algorithm. Implemented below are the following functions:\n\n # getRandomCenters() picks NUM_CLUSTERS random training\n objects to use as initial cluster centers.\n # getCenter() calculates the mean (centroid) of a given cluster.\n # getClosestIndex() returns the index of the cluster center\n closest to an object.\n # getAvgMSE() computes the average mean square error over all clusters.\n # getMeanSqSep() computes the mean square separation of the clusters.\n # getMeanEntropy() computes the mean entropy of the clusters.\n # getEntropy() computes the entropy of a given cluster.\n # getConvergence() determines if the K-means algorithm has converged.\n # getConfused() processes prediction accuracy data into a\n confusion matrix.\n\"\"\"\n\nimport numpy as np\nimport random as rand\nfrom kMeans.Specs import *\n\n\n# Pick NUM_CLUSTERS objects at random to act as initial centers\ndef getRandomCenters(features):\n \"\"\"\n :param features: training data set\n\n :return: centers: mean values per cluster per attribute\n \"\"\"\n unique = False\n centers = []\n centerIDs = np.array([])\n\n # Get NUM_CLUSTERS unique random indices:\n while not unique:\n centerIDs = np.random.randint(low=0, high=NUM_TRAINING_ROWS, size=NUM_CLUSTERS)\n unique = True\n for i in range(NUM_CLUSTERS):\n for j in range(NUM_CLUSTERS):\n if centerIDs[i] == centerIDs[j] and not i == j:\n unique = False\n\n # Make an array of the corresponding features:\n for i in range(NUM_CLUSTERS):\n centers.append(features[centerIDs[i], 0:-1])\n\n return centers\n\n\n# Compute the mean values of a single cluster:\ndef getCenter(cluster):\n \"\"\"\n :param cluster: training objects assigned to a cluster\n\n :return: array of mean values of each attribute for a cluster\n \"\"\"\n numObjects = np.size(cluster, axis=0)\n means = np.zeros(NUM_ATTRIBUTES)\n if numObjects == 0:\n return means\n\n # Get mean of each attribute over the cluster:\n for i in range(NUM_ATTRIBUTES):\n for j in range(numObjects):\n means[i] += (cluster[j][i])\n means[i] = means[i] / numObjects\n\n return means\n\n\n# Get the index of the closest center to an object\ndef getClosestIndex(distances):\n \"\"\"\n :param distances: array of distances from an object to all cluster centers\n :return: closestIndex: index of the closest cluster center to the object\n \"\"\"\n size = np.size(distances)\n closest = LARGE_NUMBER\n closestIndex = 0\n tied = [0]\n\n # Find the minimum distance:\n for i in range(size):\n if distances[i] <= closest:\n closest = distances[i]\n closestIndex = i\n\n # Check for ties:\n tied[0] = closestIndex\n for i in range(size):\n if not i == closestIndex:\n if distances[i] == closest:\n tied.append(i)\n\n # If there is a tie, pick a winner at random:\n if not len(tied) == 1:\n winner = rand.randint(0, len(tied) - 1)\n return tied[winner]\n\n return closestIndex\n\n\n# Calculate the average mean square error over all clusters\ndef getAvgMSE(centers, clusters):\n \"\"\"\n :param centers: mean values per cluster per attribute\n :param clusters: nested list of clustered training objects\n\n :return: avgMSE: average mean square error over all clusters\n \"\"\"\n mse = 0.\n avgMSE = [x for x in range(NUM_CLUSTERS)]\n\n for i in range(NUM_CLUSTERS):\n clusterSize = len(clusters[i])\n # Get the mean square error of each cluster:\n for j in range(clusterSize):\n # Omit ground truth labels:\n blindCluster = np.copy(clusters[i][j])\n blindCluster = np.delete(blindCluster, -1)\n mse += np.sum(np.square(np.subtract(blindCluster, centers[i])))\n mse = mse / clusterSize\n avgMSE[i] = mse\n avgMSE = np.mean(avgMSE)\n return avgMSE\n\n\n# Calculate the mean square separation for the set of clusters\ndef getMeanSqSep(centers):\n \"\"\"\n :param centers: mean values per cluster per attribute\n\n :return: meanSqSep: mean square separation\n \"\"\"\n mss = 0.\n for i in range(NUM_CLUSTERS-1):\n for j in range(i+1, NUM_CLUSTERS):\n mss += np.sum(np.square(np.subtract(centers[i], centers[j])))\n mss = mss / (0.5*NUM_CLUSTERS*(NUM_CLUSTERS-1))\n return mss\n\n\n# Calculate the mean entropy of the set of clusters\ndef getMeanEntropy(clusters):\n \"\"\"\n :param clusters: nested list of clustered training data\n\n :return: meanEntropy: measure of disorder of cluster distribution\n \"\"\"\n meanEntropy = 0.\n\n # Sum the entropy of each cluster:\n for i in range(NUM_CLUSTERS):\n clusterSize = len(clusters[i])\n meanEntropy += clusterSize * getEntropy(clusters[i])\n\n return meanEntropy / NUM_TRAINING_ROWS\n\n\n# Calculate the entropy of a cluster\ndef getEntropy(cluster):\n \"\"\"\n :param cluster: a cluster of training data (including ground truth label)\n\n :return: entropy: measure of disorder within the cluster\n \"\"\"\n clusterSize = len(cluster)\n classCount = np.zeros(NUM_CLASSES, dtype=int)\n entropy = 0.\n\n # Count the instances of each class:\n for i in range(clusterSize):\n classCount[cluster[i][-1]] += 1\n\n # Calculate entropy:\n for i in range(NUM_CLASSES):\n if not classCount[i] == 0:\n prob = classCount[i]/NUM_TRAINING_ROWS\n entropy += (prob * np.log2(prob))\n return -entropy\n\n\n# Check if the clusters have converged\ndef getConvergence(nClust, oClust):\n \"\"\"\n :param nClust: new set of clustered training data\n :param oClust: old set of clustered training data\n\n :return: True if the clusters match. False otherwise\n \"\"\"\n # Compare sizes of corresponding clusters:\n for i in range(NUM_CLUSTERS):\n if not len(oClust[i]) == len(nClust[i]):\n return False\n return True\n\n\n# Generate the confusion matrix\ndef getConfused(labels, predictions):\n \"\"\"\n :param labels: ground truth labels\n :param predictions: predicted class assignments\n\n :return: confusionMatrix: actual vs predicted classes\n \"\"\"\n confusionMatrix = np.zeros((NUM_CLASSES, NUM_CLASSES), dtype=int)\n\n # Increment the corresponding entry in each (actual, predicted) pair\n for i in range(np.size(labels)):\n confusionMatrix[labels[i], predictions[i]] += 1\n return confusionMatrix\n" ]
[ [ "numpy.log2", "numpy.subtract", "numpy.size", "numpy.copy", "numpy.mean", "numpy.delete", "numpy.array", "numpy.zeros", "numpy.random.randint" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
MohammadAminAlamalhoda/Deep-Object
[ "1f96e7f2d31e1d45d6ed9240b23cae5000276381" ]
[ "datas.py" ]
[ "import torch\nfrom torch.utils.data import DataLoader, Dataset, random_split\nimport torchvision.transforms as transforms\nfrom PIL import Image\nimport mat73\nimport numpy as np\nimport os\nimport random\nimport utils\n\nimport torchvision.transforms.functional as TF\n\nclass ODD_Dataset(Dataset):\n\n '''\n This class loads images, depths, and labels from .mat file.\n \n init inputs = path to .mat file, torch vision transfomr(optional).\n '''\n\n def __init__(self, root_folder, num_datas, transform=None):\n \n self.root_folder = root_folder\n self.transforms = transform\n self.num_datas = num_datas\n\n self.image_ids = ['imgNo{0}.png'.format(i) for i in range(self.num_datas)]\n self.label_ids = ['labelNo{0}.png'.format(i) for i in range(self.num_datas)]\n self.depth_ids = ['depthNo{0}.png'.format(i) for i in range(self.num_datas)]\n\n def __len__(self):\n return self.num_datas\n\n def __getitem__(self, index):\n img_id = self.image_ids[index]\n label_id = self.label_ids[index]\n depth_id = self.depth_ids[index]\n\n images = Image.open(os.path.join(self.root_folder, img_id)).convert(\"RGB\")\n mask = Image.open(os.path.join(self.root_folder, label_id)).convert(\"RGB\")\n depths = Image.open(os.path.join(self.root_folder, depth_id)).convert(\"RGB\")\n\n mask = np.array(mask)[:,:,1]\n obj_ids = np.unique(mask)\n obj_ids = obj_ids[1:]\n masks = mask == obj_ids[:, None, None]\n num_objs = len(obj_ids)\n boxes = []\n for i in range(num_objs):\n pos = np.where(masks[i])\n xmin = np.min(pos[1])\n xmax = np.max(pos[1])\n ymin = np.min(pos[0])\n ymax = np.max(pos[0])\n boxes.append([xmin, ymin, xmax, ymax])\n\n obj_ids = torch.as_tensor(obj_ids, dtype=torch.int64)\n boxes = torch.as_tensor(boxes, dtype=torch.float32)\n # suppose all instances are not crowd\n iscrowd = torch.zeros((num_objs,), dtype=torch.int64)\n image_id = torch.tensor([index])\n area = (boxes[:, 3] - boxes[:, 1]) * (boxes[:, 2] - boxes[:, 0])\n masks = torch.as_tensor(masks, dtype=torch.uint8)\n \n target = {}\n target[\"boxes\"] = boxes\n target[\"labels\"] = obj_ids\n target[\"masks\"] = masks\n target[\"image_id\"] = image_id\n target[\"area\"] = area\n target[\"iscrowd\"] = iscrowd\n\n\n if self.transforms is not None:\n images, depths, target = self.transforms(images, depths, target)\n\n return images, depths, target\n\n\n\ndef get_loader(\n root_folder, transform=None, batch_size=32, num_datas=50, \n num_workers=0, shuffle=True, pin_memory=True, train_test_ratio=1.0):\n '''\n This function returns train and test data loaders.\n inputs = path to .mat file, torch vision transfomr(optional), batch_size, num_workers.\n outputs = returns train dataloader, dataset train, test dataloader, dataset test\n '''\n dataset = ODD_Dataset(root_folder, num_datas=num_datas, transform=transform)\n\n dataset_train, dataset_test = random_split(dataset, \n [int(len(dataset)*train_test_ratio), \n len(dataset)-int(len(dataset)*train_test_ratio)],\n generator=torch.Generator().manual_seed(42))\n\n train_loader = DataLoader(\n dataset = dataset_train,\n batch_size = batch_size,\n num_workers = num_workers,\n shuffle = shuffle,\n pin_memory = pin_memory,\n collate_fn=utils.collate_fn,\n )\n\n test_loader = DataLoader(\n dataset = dataset_test,\n batch_size = batch_size,\n num_workers = num_workers,\n shuffle = shuffle,\n pin_memory = pin_memory,\n collate_fn=utils.collate_fn,\n ) \n\n return train_loader , test_loader" ]
[ [ "torch.Generator", "torch.zeros", "numpy.unique", "numpy.min", "torch.utils.data.DataLoader", "torch.tensor", "numpy.max", "numpy.array", "numpy.where", "torch.as_tensor" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
axb2035/keras-gym
[ "076ebbca022f4dbdcae2a14967f824652fe473c3" ]
[ "keras_gym/caching/test_experience_replay.py" ]
[ "import gym\nimport numpy as np\nfrom .experience_replay import ExperienceReplayBuffer\n\n\nclass MockEnv:\n action_space = gym.spaces.Discrete(7)\n\n def __init__(self, num_frames):\n self.num_frames = num_frames\n\n\nclass TestExperienceReplayBuffer:\n N = 7\n S = np.expand_dims(np.arange(N), axis=1)\n A = S[:, 0] % 100\n R = S[:, 0]\n D = np.zeros(N, dtype='bool')\n D[-1] = True\n EPISODE = list(zip(S, A, R, D))\n\n def test_add(self):\n buffer = ExperienceReplayBuffer(MockEnv(1), capacity=17)\n for i, (s, a, r, done) in enumerate(self.EPISODE, 1):\n buffer.add(s + 100, a, r + 100, done, episode_id=1)\n assert len(buffer) == max(0, i - buffer.bootstrap_n)\n\n np.testing.assert_array_equal(\n buffer._e[:7],\n [1, 1, 1, 1, 1, 1, 1])\n np.testing.assert_array_equal(\n buffer._d[:7].astype('int'),\n [0, 0, 0, 0, 0, 0, 1])\n\n for i, (s, a, r, done) in enumerate(self.EPISODE, i + 1):\n buffer.add(s + 200, a, r + 200, done, episode_id=2)\n assert len(buffer) == max(0, i - buffer.bootstrap_n)\n\n np.testing.assert_array_equal(\n buffer._e[:14],\n [1, 1, 1, 1, 1, 1, 1, 2, 2, 2, 2, 2, 2, 2])\n np.testing.assert_array_equal(\n buffer._d[:14].astype('int'),\n [0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 1])\n\n for i, (s, a, r, done) in enumerate(self.EPISODE, i + 1):\n buffer.add(s + 300, a, r + 300, done, episode_id=3)\n assert len(buffer) == np.clip(i - buffer.bootstrap_n, 0, 17)\n\n # buffer wraps around and overwrites oldest transitions\n np.testing.assert_array_equal(\n buffer._e,\n [3, 3, 3, 1, 1, 1, 1, 2, 2, 2, 2, 2, 2, 2, 3, 3, 3, 3])\n np.testing.assert_array_equal(\n buffer._d.astype('int'),\n [0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0])\n np.testing.assert_array_equal(\n buffer._p,\n [\n [0, 0, 0, 0, 1, 0, 0], # a=4\n [0, 0, 0, 0, 0, 1, 0], # a=5\n [0, 0, 0, 0, 0, 0, 1], # a=6\n [0, 0, 0, 1, 0, 0, 0], # a=3\n [0, 0, 0, 0, 1, 0, 0], # a=4\n [0, 0, 0, 0, 0, 1, 0], # a=5\n [0, 0, 0, 0, 0, 0, 1], # a=6\n [1, 0, 0, 0, 0, 0, 0], # a=0\n [0, 1, 0, 0, 0, 0, 0], # a=1\n [0, 0, 1, 0, 0, 0, 0], # a=2\n [0, 0, 0, 1, 0, 0, 0], # a=3\n [0, 0, 0, 0, 1, 0, 0], # a=4\n [0, 0, 0, 0, 0, 1, 0], # a=5\n [0, 0, 0, 0, 0, 0, 1], # a=6\n [1, 0, 0, 0, 0, 0, 0], # a=0\n [0, 1, 0, 0, 0, 0, 0], # a=1\n [0, 0, 1, 0, 0, 0, 0], # a=2\n [0, 0, 0, 1, 0, 0, 0], # a=3\n ])\n\n def test_sample(self):\n buffer = ExperienceReplayBuffer(\n env=MockEnv(num_frames=3), capacity=17, random_seed=13,\n batch_size=16, bootstrap_n=2)\n\n for ep in (1, 2, 3):\n for s, a, r, done in self.EPISODE:\n buffer.add(\n s[[0, 0, 0]] + ep * 100, a, r + ep * 100, done,\n episode_id=ep)\n\n # quickly check content, just to be safe\n np.testing.assert_array_equal(\n buffer._p,\n [\n [0, 0, 0, 0, 0, 1, 0], # a=5\n [0, 0, 0, 0, 0, 0, 1], # a=6\n [0, 0, 1, 0, 0, 0, 0], # a=2\n [0, 0, 0, 1, 0, 0, 0], # a=3\n [0, 0, 0, 0, 1, 0, 0], # a=4\n [0, 0, 0, 0, 0, 1, 0], # a=5\n [0, 0, 0, 0, 0, 0, 1], # a=6\n [1, 0, 0, 0, 0, 0, 0], # a=0\n [0, 1, 0, 0, 0, 0, 0], # a=1\n [0, 0, 1, 0, 0, 0, 0], # a=2\n [0, 0, 0, 1, 0, 0, 0], # a=3\n [0, 0, 0, 0, 1, 0, 0], # a=4\n [0, 0, 0, 0, 0, 1, 0], # a=5\n [0, 0, 0, 0, 0, 0, 1], # a=6\n [1, 0, 0, 0, 0, 0, 0], # a=0\n [0, 1, 0, 0, 0, 0, 0], # a=1\n [0, 0, 1, 0, 0, 0, 0], # a=2\n [0, 0, 0, 1, 0, 0, 0], # a=3\n [0, 0, 0, 0, 1, 0, 0], # a=4\n ])\n\n transitions = [[[102, 103, 104], 0.9801, [104, 105, 106]], # bootstrap\n [[203, 204, 205], 0.0000, [205, 206, 300]],\n [[200, 200, 201], 0.9801, [201, 202, 203]], # bootstrap\n [[102, 103, 104], 0.9801, [104, 105, 106]], # bootstrap\n [[104, 105, 106], 0.0000, [106, 200, 201]],\n [[202, 203, 204], 0.9801, [204, 205, 206]], # bootstrap\n [[300, 300, 300], 0.9801, [300, 301, 302]], # bootstrap\n [[203, 204, 205], 0.0000, [205, 206, 300]],\n [[103, 104, 105], 0.0000, [105, 106, 200]],\n [[203, 204, 205], 0.0000, [205, 206, 300]],\n [[104, 105, 106], 0.0000, [106, 200, 201]],\n [[102, 103, 104], 0.9801, [104, 105, 106]], # bootstrap\n [[200, 200, 201], 0.9801, [201, 202, 203]], # bootstrap\n [[200, 200, 200], 0.9801, [200, 201, 202]], # bootstrap\n [[300, 300, 301], 0.9801, [301, 302, 303]], # bootstrap\n [[203, 204, 205], 0.0000, [205, 206, 300]]]\n\n S, P, Rn, In, S_next, P_next = buffer.sample()\n np.testing.assert_array_equal(In, [tr[1] for tr in transitions])\n np.testing.assert_array_equal(\n S[:, 0, :], [tr[0] for tr in transitions])\n np.testing.assert_array_equal(\n S_next[:, 0, :], [tr[2] for tr in transitions])\n\n # check if actions are separate by bootstrap_n steps\n A = np.argmax(P, axis=1)\n A_next = np.argmax(P_next, axis=1)\n for a, i_next, a_next in zip(A, In, A_next):\n if i_next != 0:\n assert a_next - a == buffer.bootstrap_n\n\n # check if states and actions are aligned\n np.testing.assert_array_equal(S[:, 0, -1] % 100, A)\n np.testing.assert_array_equal(S_next[:, 0, -1] % 100, A_next)\n\n def test_shape(self):\n buffer = ExperienceReplayBuffer(\n env=MockEnv(num_frames=3), capacity=17, batch_size=5,\n random_seed=13)\n\n for ep in (1, 2, 3):\n for i, (_, a, r, done) in enumerate(self.EPISODE):\n s = 100 * ep + i * np.ones((11, 13, 3), dtype='int')\n buffer.add(s, a, r, done, ep)\n\n S, A, Rn, In, S_next, A_next = buffer.sample()\n assert S.shape == (5, 11, 13, 3)\n\n # check if all frames come from the same episode\n np.testing.assert_array_equal(\n S[:, 0, 0, :], # look at upper-left pixel only\n [[304, 305, 306],\n [203, 204, 205],\n [304, 305, 306],\n [200, 200, 201], # note: first frame is repeated\n [104, 105, 106]])\n" ]
[ [ "numpy.clip", "numpy.arange", "numpy.ones", "numpy.testing.assert_array_equal", "numpy.argmax", "numpy.zeros" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
spongebob03/PyTorch-tutorials-kr
[ "efe11ebede0d3384aacd1bdad5881ea8794223c8" ]
[ "intermediate_source/speech_command_classification_with_torchaudio_tutorial.py" ]
[ "\"\"\"\nSpeech Command Classification with torchaudio\n******************************************\n\nThis tutorial will show you how to correctly format an audio dataset and\nthen train/test an audio classifier network on the dataset.\n\nColab has GPU option available. In the menu tabs, select “Runtime” then\n“Change runtime type”. In the pop-up that follows, you can choose GPU.\nAfter the change, your runtime should automatically restart (which means\ninformation from executed cells disappear).\n\nFirst, let’s import the common torch packages such as\n`torchaudio <https://github.com/pytorch/audio>`__ that can be installed\nby following the instructions on the website.\n\n\"\"\"\n\n# Uncomment the line corresponding to your \"runtime type\" to run in Google Colab\n\n# CPU:\n# !pip install pydub torch==1.7.0+cpu torchvision==0.8.1+cpu torchaudio==0.7.0 -f https://download.pytorch.org/whl/torch_stable.html\n\n# GPU:\n# !pip install pydub torch==1.7.0+cu101 torchvision==0.8.1+cu101 torchaudio==0.7.0 -f https://download.pytorch.org/whl/torch_stable.html\n\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nimport torch.optim as optim\nimport torchaudio\nimport sys\n\nimport matplotlib.pyplot as plt\nimport IPython.display as ipd\n\nfrom tqdm import tqdm\n\n\n######################################################################\n# Let’s check if a CUDA GPU is available and select our device. Running\n# the network on a GPU will greatly decrease the training/testing runtime.\n#\n\ndevice = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")\nprint(device)\n\n\n######################################################################\n# Importing the Dataset\n# ---------------------\n#\n# We use torchaudio to download and represent the dataset. Here we use\n# `SpeechCommands <https://arxiv.org/abs/1804.03209>`__, which is a\n# datasets of 35 commands spoken by different people. The dataset\n# ``SPEECHCOMMANDS`` is a ``torch.utils.data.Dataset`` version of the\n# dataset. In this dataset, all audio files are about 1 second long (and\n# so about 16000 time frames long).\n#\n# The actual loading and formatting steps happen when a data point is\n# being accessed, and torchaudio takes care of converting the audio files\n# to tensors. If one wants to load an audio file directly instead,\n# ``torchaudio.load()`` can be used. It returns a tuple containing the\n# newly created tensor along with the sampling frequency of the audio file\n# (16kHz for SpeechCommands).\n#\n# Going back to the dataset, here we create a subclass that splits it into\n# standard training, validation, testing subsets.\n#\n\nfrom torchaudio.datasets import SPEECHCOMMANDS\nimport os\n\n\nclass SubsetSC(SPEECHCOMMANDS):\n def __init__(self, subset: str = None):\n super().__init__(\"./\", download=True)\n\n def load_list(filename):\n filepath = os.path.join(self._path, filename)\n with open(filepath) as fileobj:\n return [os.path.join(self._path, line.strip()) for line in fileobj]\n\n if subset == \"validation\":\n self._walker = load_list(\"validation_list.txt\")\n elif subset == \"testing\":\n self._walker = load_list(\"testing_list.txt\")\n elif subset == \"training\":\n excludes = load_list(\"validation_list.txt\") + load_list(\"testing_list.txt\")\n excludes = set(excludes)\n self._walker = [w for w in self._walker if w not in excludes]\n\n\n# Create training and testing split of the data. We do not use validation in this tutorial.\ntrain_set = SubsetSC(\"training\")\ntest_set = SubsetSC(\"testing\")\n\nwaveform, sample_rate, label, speaker_id, utterance_number = train_set[0]\n\n\n######################################################################\n# A data point in the SPEECHCOMMANDS dataset is a tuple made of a waveform\n# (the audio signal), the sample rate, the utterance (label), the ID of\n# the speaker, the number of the utterance.\n#\n\nprint(\"Shape of waveform: {}\".format(waveform.size()))\nprint(\"Sample rate of waveform: {}\".format(sample_rate))\n\nplt.plot(waveform.t().numpy());\n\n\n######################################################################\n# Let’s find the list of labels available in the dataset.\n#\n\nlabels = sorted(list(set(datapoint[2] for datapoint in train_set)))\nlabels\n\n\n######################################################################\n# The 35 audio labels are commands that are said by users. The first few\n# files are people saying “marvin”.\n#\n\nwaveform_first, *_ = train_set[0]\nipd.Audio(waveform_first.numpy(), rate=sample_rate)\n\nwaveform_second, *_ = train_set[1]\nipd.Audio(waveform_second.numpy(), rate=sample_rate)\n\n\n######################################################################\n# The last file is someone saying “visual”.\n#\n\nwaveform_last, *_ = train_set[-1]\nipd.Audio(waveform_last.numpy(), rate=sample_rate)\n\n\n######################################################################\n# Formatting the Data\n# -------------------\n#\n# This is a good place to apply transformations to the data. For the\n# waveform, we downsample the audio for faster processing without losing\n# too much of the classification power.\n#\n# We don’t need to apply other transformations here. It is common for some\n# datasets though to have to reduce the number of channels (say from\n# stereo to mono) by either taking the mean along the channel dimension,\n# or simply keeping only one of the channels. Since SpeechCommands uses a\n# single channel for audio, this is not needed here.\n#\n\nnew_sample_rate = 8000\ntransform = torchaudio.transforms.Resample(orig_freq=sample_rate, new_freq=new_sample_rate)\ntransformed = transform(waveform)\n\nipd.Audio(transformed.numpy(), rate=new_sample_rate)\n\n\n######################################################################\n# We are encoding each word using its index in the list of labels.\n#\n\n\ndef label_to_index(word):\n # Return the position of the word in labels\n return torch.tensor(labels.index(word))\n\n\ndef index_to_label(index):\n # Return the word corresponding to the index in labels\n # This is the inverse of label_to_index\n return labels[index]\n\n\nword_start = \"yes\"\nindex = label_to_index(word_start)\nword_recovered = index_to_label(index)\n\nprint(word_start, \"-->\", index, \"-->\", word_recovered)\n\n\n######################################################################\n# To turn a list of data point made of audio recordings and utterances\n# into two batched tensors for the model, we implement a collate function\n# which is used by the PyTorch DataLoader that allows us to iterate over a\n# dataset by batches. Please see `the\n# documentation <https://pytorch.org/docs/stable/data.html#working-with-collate-fn>`__\n# for more information about working with a collate function.\n#\n# In the collate function, we also apply the resampling, and the text\n# encoding.\n#\n\n\ndef pad_sequence(batch):\n # Make all tensor in a batch the same length by padding with zeros\n batch = [item.t() for item in batch]\n batch = torch.nn.utils.rnn.pad_sequence(batch, batch_first=True, padding_value=0.)\n return batch.permute(0, 2, 1)\n\n\ndef collate_fn(batch):\n\n # A data tuple has the form:\n # waveform, sample_rate, label, speaker_id, utterance_number\n\n tensors, targets = [], []\n\n # Gather in lists, and encode labels as indices\n for waveform, _, label, *_ in batch:\n tensors += [waveform]\n targets += [label_to_index(label)]\n\n # Group the list of tensors into a batched tensor\n tensors = pad_sequence(tensors)\n targets = torch.stack(targets)\n\n return tensors, targets\n\n\nbatch_size = 256\n\nif device == \"cuda\":\n num_workers = 1\n pin_memory = True\nelse:\n num_workers = 0\n pin_memory = False\n\ntrain_loader = torch.utils.data.DataLoader(\n train_set,\n batch_size=batch_size,\n shuffle=True,\n collate_fn=collate_fn,\n num_workers=num_workers,\n pin_memory=pin_memory,\n)\ntest_loader = torch.utils.data.DataLoader(\n test_set,\n batch_size=batch_size,\n shuffle=False,\n drop_last=False,\n collate_fn=collate_fn,\n num_workers=num_workers,\n pin_memory=pin_memory,\n)\n\n\n######################################################################\n# Define the Network\n# ------------------\n#\n# For this tutorial we will use a convolutional neural network to process\n# the raw audio data. Usually more advanced transforms are applied to the\n# audio data, however CNNs can be used to accurately process the raw data.\n# The specific architecture is modeled after the M5 network architecture\n# described in `this paper <https://arxiv.org/pdf/1610.00087.pdf>`__. An\n# important aspect of models processing raw audio data is the receptive\n# field of their first layer’s filters. Our model’s first filter is length\n# 80 so when processing audio sampled at 8kHz the receptive field is\n# around 10ms (and at 4kHz, around 20 ms). This size is similar to speech\n# processing applications that often use receptive fields ranging from\n# 20ms to 40ms.\n#\n\n\nclass M5(nn.Module):\n def __init__(self, n_input=1, n_output=35, stride=16, n_channel=32):\n super().__init__()\n self.conv1 = nn.Conv1d(n_input, n_channel, kernel_size=80, stride=stride)\n self.bn1 = nn.BatchNorm1d(n_channel)\n self.pool1 = nn.MaxPool1d(4)\n self.conv2 = nn.Conv1d(n_channel, n_channel, kernel_size=3)\n self.bn2 = nn.BatchNorm1d(n_channel)\n self.pool2 = nn.MaxPool1d(4)\n self.conv3 = nn.Conv1d(n_channel, 2 * n_channel, kernel_size=3)\n self.bn3 = nn.BatchNorm1d(2 * n_channel)\n self.pool3 = nn.MaxPool1d(4)\n self.conv4 = nn.Conv1d(2 * n_channel, 2 * n_channel, kernel_size=3)\n self.bn4 = nn.BatchNorm1d(2 * n_channel)\n self.pool4 = nn.MaxPool1d(4)\n self.fc1 = nn.Linear(2 * n_channel, n_output)\n\n def forward(self, x):\n x = self.conv1(x)\n x = F.relu(self.bn1(x))\n x = self.pool1(x)\n x = self.conv2(x)\n x = F.relu(self.bn2(x))\n x = self.pool2(x)\n x = self.conv3(x)\n x = F.relu(self.bn3(x))\n x = self.pool3(x)\n x = self.conv4(x)\n x = F.relu(self.bn4(x))\n x = self.pool4(x)\n x = F.avg_pool1d(x, x.shape[-1])\n x = x.permute(0, 2, 1)\n x = self.fc1(x)\n return F.log_softmax(x, dim=2)\n\n\nmodel = M5(n_input=transformed.shape[0], n_output=len(labels))\nmodel.to(device)\nprint(model)\n\n\ndef count_parameters(model):\n return sum(p.numel() for p in model.parameters() if p.requires_grad)\n\n\nn = count_parameters(model)\nprint(\"Number of parameters: %s\" % n)\n\n\n######################################################################\n# We will use the same optimization technique used in the paper, an Adam\n# optimizer with weight decay set to 0.0001. At first, we will train with\n# a learning rate of 0.01, but we will use a ``scheduler`` to decrease it\n# to 0.001 during training after 20 epochs.\n#\n\noptimizer = optim.Adam(model.parameters(), lr=0.01, weight_decay=0.0001)\nscheduler = optim.lr_scheduler.StepLR(optimizer, step_size=20, gamma=0.1) # reduce the learning after 20 epochs by a factor of 10\n\n\n######################################################################\n# Training and Testing the Network\n# --------------------------------\n#\n# Now let’s define a training function that will feed our training data\n# into the model and perform the backward pass and optimization steps. For\n# training, the loss we will use is the negative log-likelihood. The\n# network will then be tested after each epoch to see how the accuracy\n# varies during the training.\n#\n\n\ndef train(model, epoch, log_interval):\n model.train()\n for batch_idx, (data, target) in enumerate(train_loader):\n\n data = data.to(device)\n target = target.to(device)\n\n # apply transform and model on whole batch directly on device\n data = transform(data)\n output = model(data)\n\n # negative log-likelihood for a tensor of size (batch x 1 x n_output)\n loss = F.nll_loss(output.squeeze(), target)\n\n optimizer.zero_grad()\n loss.backward()\n optimizer.step()\n\n # print training stats\n if batch_idx % log_interval == 0:\n print(f\"Train Epoch: {epoch} [{batch_idx * len(data)}/{len(train_loader.dataset)} ({100. * batch_idx / len(train_loader):.0f}%)]\\tLoss: {loss.item():.6f}\")\n\n # update progress bar\n pbar.update(pbar_update)\n # record loss\n losses.append(loss.item())\n\n\n######################################################################\n# Now that we have a training function, we need to make one for testing\n# the networks accuracy. We will set the model to ``eval()`` mode and then\n# run inference on the test dataset. Calling ``eval()`` sets the training\n# variable in all modules in the network to false. Certain layers like\n# batch normalization and dropout layers behave differently during\n# training so this step is crucial for getting correct results.\n#\n\n\ndef number_of_correct(pred, target):\n # count number of correct predictions\n return pred.squeeze().eq(target).sum().item()\n\n\ndef get_likely_index(tensor):\n # find most likely label index for each element in the batch\n return tensor.argmax(dim=-1)\n\n\ndef test(model, epoch):\n model.eval()\n correct = 0\n for data, target in test_loader:\n\n data = data.to(device)\n target = target.to(device)\n\n # apply transform and model on whole batch directly on device\n data = transform(data)\n output = model(data)\n\n pred = get_likely_index(output)\n correct += number_of_correct(pred, target)\n\n # update progress bar\n pbar.update(pbar_update)\n\n print(f\"\\nTest Epoch: {epoch}\\tAccuracy: {correct}/{len(test_loader.dataset)} ({100. * correct / len(test_loader.dataset):.0f}%)\\n\")\n\n\n######################################################################\n# Finally, we can train and test the network. We will train the network\n# for ten epochs then reduce the learn rate and train for ten more epochs.\n# The network will be tested after each epoch to see how the accuracy\n# varies during the training.\n#\n\nlog_interval = 20\nn_epoch = 2\n\npbar_update = 1 / (len(train_loader) + len(test_loader))\nlosses = []\n\n# The transform needs to live on the same device as the model and the data.\ntransform = transform.to(device)\nwith tqdm(total=n_epoch) as pbar:\n for epoch in range(1, n_epoch + 1):\n train(model, epoch, log_interval)\n test(model, epoch)\n scheduler.step()\n\n# Let's plot the training loss versus the number of iteration.\n# plt.plot(losses);\n# plt.title(\"training loss\");\n\n\n######################################################################\n# The network should be more than 65% accurate on the test set after 2\n# epochs, and 85% after 21 epochs. Let’s look at the last words in the\n# train set, and see how the model did on it.\n#\n\n\ndef predict(tensor):\n # Use the model to predict the label of the waveform\n tensor = tensor.to(device)\n tensor = transform(tensor)\n tensor = model(tensor.unsqueeze(0))\n tensor = get_likely_index(tensor)\n tensor = index_to_label(tensor.squeeze())\n return tensor\n\n\nwaveform, sample_rate, utterance, *_ = train_set[-1]\nipd.Audio(waveform.numpy(), rate=sample_rate)\n\nprint(f\"Expected: {utterance}. Predicted: {predict(waveform)}.\")\n\n\n######################################################################\n# Let’s find an example that isn’t classified correctly, if there is one.\n#\n\nfor i, (waveform, sample_rate, utterance, *_) in enumerate(test_set):\n output = predict(waveform)\n if output != utterance:\n ipd.Audio(waveform.numpy(), rate=sample_rate)\n print(f\"Data point #{i}. Expected: {utterance}. Predicted: {output}.\")\n break\nelse:\n print(\"All examples in this dataset were correctly classified!\")\n print(\"In this case, let's just look at the last data point\")\n ipd.Audio(waveform.numpy(), rate=sample_rate)\n print(f\"Data point #{i}. Expected: {utterance}. Predicted: {output}.\")\n\n\n######################################################################\n# Feel free to try with one of your own recordings of one of the labels!\n# For example, using Colab, say “Go” while executing the cell below. This\n# will record one second of audio and try to classify it.\n#\n\n\ndef record(seconds=1):\n\n from google.colab import output as colab_output\n from base64 import b64decode\n from io import BytesIO\n from pydub import AudioSegment\n\n RECORD = (\n b\"const sleep = time => new Promise(resolve => setTimeout(resolve, time))\\n\"\n b\"const b2text = blob => new Promise(resolve => {\\n\"\n b\" const reader = new FileReader()\\n\"\n b\" reader.onloadend = e => resolve(e.srcElement.result)\\n\"\n b\" reader.readAsDataURL(blob)\\n\"\n b\"})\\n\"\n b\"var record = time => new Promise(async resolve => {\\n\"\n b\" stream = await navigator.mediaDevices.getUserMedia({ audio: true })\\n\"\n b\" recorder = new MediaRecorder(stream)\\n\"\n b\" chunks = []\\n\"\n b\" recorder.ondataavailable = e => chunks.push(e.data)\\n\"\n b\" recorder.start()\\n\"\n b\" await sleep(time)\\n\"\n b\" recorder.onstop = async ()=>{\\n\"\n b\" blob = new Blob(chunks)\\n\"\n b\" text = await b2text(blob)\\n\"\n b\" resolve(text)\\n\"\n b\" }\\n\"\n b\" recorder.stop()\\n\"\n b\"})\"\n )\n RECORD = RECORD.decode(\"ascii\")\n\n print(f\"Recording started for {seconds} seconds.\")\n display(ipd.Javascript(RECORD))\n s = colab_output.eval_js(\"record(%d)\" % (seconds * 1000))\n print(\"Recording ended.\")\n b = b64decode(s.split(\",\")[1])\n\n fileformat = \"wav\"\n filename = f\"_audio.{fileformat}\"\n AudioSegment.from_file(BytesIO(b)).export(filename, format=fileformat)\n return torchaudio.load(filename)\n\n\n# Detect whether notebook runs in google colab\nif \"google.colab\" in sys.modules:\n waveform, sample_rate = record()\n print(f\"Predicted: {predict(waveform)}.\")\n ipd.Audio(waveform.numpy(), rate=sample_rate)\n\n\n######################################################################\n# Conclusion\n# ----------\n#\n# In this tutorial, we used torchaudio to load a dataset and resample the\n# signal. We have then defined a neural network that we trained to\n# recognize a given command. There are also other data preprocessing\n# methods, such as finding the mel frequency cepstral coefficients (MFCC),\n# that can reduce the size of the dataset. This transform is also\n# available in torchaudio as ``torchaudio.transforms.MFCC``.\n#\n" ]
[ [ "torch.nn.BatchNorm1d", "torch.nn.functional.log_softmax", "torch.nn.utils.rnn.pad_sequence", "torch.utils.data.DataLoader", "torch.nn.MaxPool1d", "torch.nn.Linear", "torch.cuda.is_available", "torch.nn.Conv1d", "torch.stack", "torch.nn.functional.avg_pool1d", "torch.optim.lr_scheduler.StepLR" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
snakeztc/NeuralDialog-CVAE
[ "c8b61f23938ac7c95fc129f63ad921a310c6d425" ]
[ "models/decoder_fn_lib.py" ]
[ "# Copyright (C) 2017 Tiancheng Zhao, Carnegie Mellon University\nfrom tensorflow.python.framework import dtypes\nfrom tensorflow.python.util import nest\nfrom tensorflow.python.framework import ops\nfrom tensorflow.python.ops import array_ops\nfrom tensorflow.python.ops import control_flow_ops\nfrom tensorflow.python.ops import math_ops\nimport tensorflow as tf\n\n\ndef context_decoder_fn_inference(output_fn, encoder_state, embeddings,\n start_of_sequence_id, end_of_sequence_id,\n maximum_length, num_decoder_symbols, context_vector,\n dtype=dtypes.int32, name=None, decode_type='greedy'):\n \"\"\" Simple decoder function for a sequence-to-sequence model used in the\n `dynamic_rnn_decoder`.\n\n Args:\n output_fn: An output function to project your `cell_output` onto class\n logits.\n\n If `None` is supplied it will act as an identity function, which\n might be wanted when using the RNNCell `OutputProjectionWrapper`.\n\n encoder_state: The encoded state to initialize the `dynamic_rnn_decoder`.\n embeddings: The embeddings matrix used for the decoder sized\n `[num_decoder_symbols, embedding_size]`.\n start_of_sequence_id: The start of sequence ID in the decoder embeddings.\n end_of_sequence_id: The end of sequence ID in the decoder embeddings.\n maximum_length: The maximum allowed of time steps to decode.\n num_decoder_symbols: The number of classes to decode at each time step.\n context_vector: an extra vector that should be appended to the input embedding\n dtype: (default: `dtypes.int32`) The default data type to use when\n handling integer objects.\n name: (default: `None`) NameScope for the decoder function;\n defaults to \"simple_decoder_fn_inference\"\n\n Returns:\n A decoder function with the required interface of `dynamic_rnn_decoder`\n intended for inference.\n \"\"\"\n with ops.name_scope(name, \"simple_decoder_fn_inference\",\n [output_fn, encoder_state, embeddings,\n start_of_sequence_id, end_of_sequence_id,\n maximum_length, num_decoder_symbols, dtype]):\n start_of_sequence_id = ops.convert_to_tensor(start_of_sequence_id, dtype)\n end_of_sequence_id = ops.convert_to_tensor(end_of_sequence_id, dtype)\n maxium_length_int = maximum_length + 1\n maximum_length = ops.convert_to_tensor(maximum_length, dtype)\n num_decoder_symbols = ops.convert_to_tensor(num_decoder_symbols, dtype)\n encoder_info = nest.flatten(encoder_state)[0]\n batch_size = encoder_info.get_shape()[0].value\n if output_fn is None:\n output_fn = lambda x: x\n if batch_size is None:\n batch_size = array_ops.shape(encoder_info)[0]\n\n def decoder_fn(time, cell_state, cell_input, cell_output, context_state):\n \"\"\" Decoder function used in the `dynamic_rnn_decoder` with the purpose of\n inference.\n\n The main difference between this decoder function and the `decoder_fn` in\n `simple_decoder_fn_train` is how `next_cell_input` is calculated. In this\n decoder function we calculate the next input by applying an argmax across\n the feature dimension of the output from the decoder. This is a\n greedy-search approach. (Bahdanau et al., 2014) & (Sutskever et al., 2014)\n use beam-search instead.\n\n Args:\n time: positive integer constant reflecting the current timestep.\n cell_state: state of RNNCell.\n cell_input: input provided by `dynamic_rnn_decoder`.\n cell_output: output of RNNCell.\n context_state: context state provided by `dynamic_rnn_decoder`.\n\n Returns:\n A tuple (done, next state, next input, emit output, next context state)\n where:\n\n done: A boolean vector to indicate which sentences has reached a\n `end_of_sequence_id`. This is used for early stopping by the\n `dynamic_rnn_decoder`. When `time>=maximum_length` a boolean vector with\n all elements as `true` is returned.\n\n next state: `cell_state`, this decoder function does not modify the\n given state.\n\n next input: The embedding from argmax of the `cell_output` is used as\n `next_input`.\n\n emit output: If `output_fn is None` the supplied `cell_output` is\n returned, else the `output_fn` is used to update the `cell_output`\n before calculating `next_input` and returning `cell_output`.\n\n next context state: `context_state`, this decoder function does not\n modify the given context state. The context state could be modified when\n applying e.g. beam search.\n \"\"\"\n with ops.name_scope(name, \"simple_decoder_fn_inference\",\n [time, cell_state, cell_input, cell_output,\n context_state]):\n if cell_input is not None:\n raise ValueError(\"Expected cell_input to be None, but saw: %s\" %\n cell_input)\n if cell_output is None:\n # invariant that this is time == 0\n next_input_id = array_ops.ones([batch_size, ], dtype=dtype) * (\n start_of_sequence_id)\n done = array_ops.zeros([batch_size, ], dtype=dtypes.bool)\n cell_state = encoder_state\n cell_output = array_ops.zeros([num_decoder_symbols],\n dtype=dtypes.float32)\n context_state = tf.zeros((batch_size, maxium_length_int), dtype=tf.int32)\n else:\n cell_output = output_fn(cell_output)\n\n if decode_type == 'sample':\n matrix_U = -1.0 * tf.log(\n -1.0 * tf.log(tf.random_uniform(tf.shape(cell_output), minval=0.0, maxval=1.0)))\n next_input_id = math_ops.cast(\n tf.argmax(tf.subtract(cell_output, matrix_U), dimension=1), dtype=dtype)\n elif decode_type == 'greedy':\n next_input_id = math_ops.cast(\n math_ops.argmax(cell_output, 1), dtype=dtype)\n else:\n raise ValueError(\"unknown decode type\")\n\n done = math_ops.equal(next_input_id, end_of_sequence_id)\n # save the results into context state\n expanded_next_input = tf.expand_dims(next_input_id, axis=1)\n sliced_context_state = tf.slice(context_state, [0, 0], [-1, maxium_length_int - 1])\n context_state = tf.concat([expanded_next_input, sliced_context_state], axis=1)\n context_state = tf.reshape(context_state, [batch_size, maxium_length_int])\n\n next_input = array_ops.gather(embeddings, next_input_id)\n if context_vector is not None:\n next_input = tf.concat([next_input, context_vector], axis=1)\n # if time > maxlen, return all true vector\n done = control_flow_ops.cond(math_ops.greater(time, maximum_length),\n lambda: array_ops.ones([batch_size, ], dtype=dtypes.bool),\n lambda: done)\n return (done, cell_state, next_input, cell_output, context_state)\n\n return decoder_fn\n\n\ndef context_decoder_fn_train(encoder_state, context_vector, name=None):\n with ops.name_scope(name, \"simple_decoder_fn_train\", [encoder_state]):\n pass\n\n def decoder_fn(time, cell_state, cell_input, cell_output, context_state):\n \"\"\" Decoder function used in the `dynamic_rnn_decoder` with the purpose of\n training.\n\n Args:\n time: positive integer constant reflecting the current timestep.\n cell_state: state of RNNCell.\n cell_input: input provided by `dynamic_rnn_decoder`.\n cell_output: output of RNNCell.\n context_state: context state provided by `dynamic_rnn_decoder`.\n\n Returns:\n A tuple (done, next state, next input, emit output, next context state)\n where:\n\n done: `None`, which is used by the `dynamic_rnn_decoder` to indicate\n that `sequence_lengths` in `dynamic_rnn_decoder` should be used.\n\n next state: `cell_state`, this decoder function does not modify the\n given state.\n\n next input: `cell_input`, this decoder function does not modify the\n given input. The input could be modified when applying e.g. attention.\n\n emit output: `cell_output`, this decoder function does not modify the\n given output.\n\n next context state: `context_state`, this decoder function does not\n modify the given context state. The context state could be modified when\n applying e.g. beam search.\n \"\"\"\n with ops.name_scope(name, \"simple_decoder_fn_train\",\n [time, cell_state, cell_input, cell_output,\n context_state]):\n if context_vector is not None:\n cell_input = tf.concat([cell_input, context_vector], axis=1)\n if cell_state is None: # first call, return encoder_state\n return (None, encoder_state, cell_input, cell_output, context_state)\n else:\n return (None, cell_state, cell_input, cell_output, context_state)\n\n return decoder_fn\n" ]
[ [ "tensorflow.concat", "tensorflow.python.ops.array_ops.shape", "tensorflow.zeros", "tensorflow.slice", "tensorflow.python.ops.math_ops.argmax", "tensorflow.shape", "tensorflow.reshape", "tensorflow.expand_dims", "tensorflow.python.ops.math_ops.equal", "tensorflow.python.ops.math_ops.greater", "tensorflow.subtract", "tensorflow.python.framework.ops.name_scope", "tensorflow.python.ops.array_ops.gather", "tensorflow.python.ops.array_ops.zeros", "tensorflow.python.framework.ops.convert_to_tensor", "tensorflow.python.ops.array_ops.ones", "tensorflow.python.util.nest.flatten" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [ "1.10", "1.4", "1.5", "1.7", "1.0", "1.2" ] } ]
eduguerdex/BRT_ACTION
[ "01fc214e5fc97e871a379cd85052664a5b404e8a" ]
[ "src/BraccioDEV.py" ]
[ "import numpy as np\r\nfrom copy import copy\r\n#import rbdl\r\ncos=np.cos; sin=np.sin; pi=np.pi\r\n\r\ndef dh(d, theta, a, alpha):\r\n \"\"\"\r\n Matriz de transformacion homogenea asociada a los parametros DH.\r\n Retorna una matriz 4x4\r\n \"\"\"\r\n sth = np.sin(theta)\r\n cth = np.cos(theta)\r\n sa = np.sin(alpha)\r\n ca = np.cos(alpha)\r\n T = np.array([[cth, -ca*sth, sa*sth, a*cth],\r\n [sth, ca*cth, -sa*cth, a*sth],\r\n [0.0, sa, ca, d],\r\n [0.0, 0.0, 0.0, 1.0]])\r\n return T\r\n \r\n \r\ndef fkine_BRT(q):\r\n \"\"\"\r\n Calcular la cinematica directa del robot UR5 dados sus valores articulares. \r\n q es un vector numpy de la forma [q1, q2, q3, q4, q5, q6]\r\n \"\"\"\r\n # Longitudes (en metros)\r\n # Matrices DH (completar), emplear la funcion dh con los parametros DH para cada articulacion\r\n m=100\r\n l1=(7.5/m)\r\n l2=(12.5/m)\r\n l3=(12.5/m)\r\n l4=(6.5/m)\r\n l5=(13/m)\r\n # Matrices DH\r\n T1=dh(l1 ,-q[0]-pi/2 ,0,-pi/2)#listo\r\n T2=dh(0,q[1]+pi,l2,0)\r\n T3=dh(0,q[2]-pi/2,l3,0)\r\n T4=dh(0,q[3],0 ,pi/2) #listo\r\n T5=dh(l4+l5,-q[4] ,0 ,0 )#listo\r\n # Efector final con respecto a la base\r\n T = T1.dot(T2).dot(T3).dot(T4).dot(T5)\r\n return T\r\n\r\ndef jacobian_BRT(q, delta=0.0001):\r\n # Crear una matriz 3x5\r\n J = np.zeros((3,5))\r\n # Transformacion homogenea inicial (usando q)\r\n T = fkine_BRT(q) \r\n # Iteracion para la derivada de cada columna\r\n for i in range(5):\r\n # Copiar la configuracion articular inicial\r\n dq = copy(q)\r\n # Incrementar la articulacion i-esima usando un delta\r\n dq[i]=dq[i]+delta\r\n # Transformacion homogenea luego del incremento (q+delta)\r\n Ti=fkine_BRT(dq)\r\n # Aproximacion del Jacobiano de posicion usando diferencias finitas\r\n J[:,i]= 1/delta*(Ti[0:3,3]-T[0:3,3]) \r\n return J\r\n\r\ndef ikine_BRT(xdes, q0):\r\n \"\"\"\r\n Calcular la cinematica inversa de UR5 numericamente a partir de la configuracion articular inicial de q0. \r\n \"\"\" \r\n epsilon = 0.0001\r\n max_iter = 1000\r\n delta=0.0001\r\n \r\n q = copy(q0)\r\n for i in range(max_iter):\r\n # Main loop\r\n J=jacobian_BRT(q,delta) \r\n f=fkine_BRT(q)\r\n e=xdes-f[0:3,3]\r\n q=q+np.dot(np.linalg.pinv(J), e)\r\n #Condicion de final\r\n if (np.linalg.norm(e)<epsilon):\r\n break \r\n pass\r\n return q\r\n\r\ndef ik_gradient_BRT(xdes, q0):\r\n\r\n \"\"\"\r\n Calcular la cinematica inversa de UR5 numericamente a partir de la configuracion articular inicial de q0. \r\n Emplear el metodo gradiente\r\n \"\"\"\r\n epsilon = 0.001\r\n max_iter = 1000\r\n delta = 0.00001\r\n alpha = 0.5\r\n q = copy(q0)\r\n\r\n for i in range(max_iter):\r\n # Main loop\r\n #Matriz Jacobiana\r\n J=jacobian_BRT(q,delta)\r\n #Matriz Actual\r\n Td=fkine_BRT(q)\r\n #Posicion Actual\r\n xact=Td[0:3,3]\r\n # Error entre pos deseada y pos actual\r\n e=xdes-xact\r\n # Metodo de Newton\r\n q=q+alpha*np.dot(J.T,e)\r\n #Condicion de termino\r\n if(np.linalg.norm(e)<epsilon):\r\n break\r\n pass\r\n return q\r\n" ]
[ [ "numpy.dot", "numpy.linalg.norm", "numpy.cos", "numpy.sin", "numpy.linalg.pinv", "numpy.array", "numpy.zeros" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
lucidrains/memorizing-transformers-pytorch
[ "83fa1479d6f7881dd977fbff55681e709e3b250e" ]
[ "memorizing_transformers_pytorch/knn_memory.py" ]
[ "import os\nimport math\nimport torch\nimport faiss\nimport numpy as np\nfrom pathlib import Path\nfrom functools import wraps\n\nfrom contextlib import ExitStack, contextmanager\n\nfrom einops import rearrange\nfrom einops_exts import rearrange_with_anon_dims, check_shape\n\n# multiprocessing\n\nfrom joblib import Parallel, delayed, cpu_count\n\n# constants\n\nFAISS_INDEX_GPU_ID = int(os.getenv('FAISS_INDEX_GPU_ID', 0))\n\nDEFAULT_KNN_MEMORY_MEMMAP_DIRECTORY = './.tmp/knn.memories'\n\n# helper functions\n\ndef exists(val):\n return val is not None\n\ndef default(val, d):\n return val if exists(val) else d\n\ndef cast_list(val):\n return val if isinstance(val, list) else [val]\n\ndef all_el_unique(arr):\n return len(set(arr)) == len(arr)\n\n@contextmanager\ndef multi_context(*cms):\n with ExitStack() as stack:\n yield [stack.enter_context(cls) for cls in cms]\n\ndef count_intersect(x, y):\n # returns an array that shows how many times an element in x is contained in tensor y\n return np.sum(rearrange(x, 'i -> i 1') == rearrange(y, 'j -> 1 j'), axis = -1)\n\n# a wrapper around faiss IndexIVFFlat\n# taking care of expiring old keys automagically\n\nclass KNN():\n def __init__(\n self,\n dim,\n max_num_entries,\n cap_num_entries = False,\n M = 15,\n keep_stats = False\n ):\n index = faiss.IndexHNSWFlat(dim, M, faiss.METRIC_INNER_PRODUCT)\n self.index = index\n self.max_num_entries = max_num_entries\n self.cap_num_entries = cap_num_entries\n self.is_trained = False\n self.keep_stats = keep_stats\n\n self.reset()\n\n def __del__(self):\n if hasattr(self, 'index'):\n del self.index\n\n def reset(self):\n self.ids = np.empty((0,), dtype = np.int32)\n\n if self.keep_stats:\n self.hits = np.empty((0,), dtype = np.int32)\n self.age_num_iterations = np.empty((0,), dtype = np.int32)\n self.ages_since_last_hit = np.empty((0,), dtype = np.int32)\n\n self.index.reset()\n self.is_trained = False\n\n def train(self, x):\n self.index.train(x)\n self.is_trained = True\n\n def add(self, x, ids):\n if not self.is_trained:\n self.train(x)\n\n self.ids = np.concatenate((ids, self.ids))\n\n if self.keep_stats:\n self.hits = np.concatenate((np.zeros_like(ids), self.hits))\n self.age_num_iterations = np.concatenate((np.zeros_like(ids), self.age_num_iterations))\n self.ages_since_last_hit = np.concatenate((np.zeros_like(ids), self.ages_since_last_hit))\n\n if self.cap_num_entries and len(self.ids) > self.max_num_entries:\n self.reset()\n\n return self.index.add(x)\n\n def search(\n self,\n x,\n topk,\n nprobe = 8,\n return_distances = False,\n increment_hits = False,\n increment_age = True\n ):\n if not self.is_trained:\n return np.full((x.shape[0], topk), -1)\n\n distances, indices = self.index.search(x, k = topk)\n\n if increment_hits and self.keep_stats:\n hits = count_intersect(self.ids, rearrange(indices, '... -> (...)'))\n self.hits += hits\n\n self.ages_since_last_hit += 1\n self.ages_since_last_hit *= (hits == 0)\n\n if increment_age and self.keep_stats:\n self.age_num_iterations += 1\n\n if return_distances:\n return indices, distances\n\n return indices\n\n# KNN memory layer, where one can store key / value memories\n# can automatically take care of a collection of faiss indices (across batch dimension)\n\nclass KNNMemory():\n def __init__(\n self,\n dim,\n max_memories = 16000,\n num_indices = 1,\n memmap_filename = './knn.memory.memmap',\n multiprocessing = True\n ):\n self.dim = dim\n self.num_indices = num_indices\n self.scoped_indices = list(range(num_indices))\n\n self.max_memories = max_memories\n self.shape = (num_indices, max_memories, 2, dim)\n self.db_offsets = np.zeros(num_indices, dtype = np.int32)\n\n self.db = np.memmap(memmap_filename, mode = 'w+', dtype = np.float32, shape = self.shape)\n self.knns = [KNN(dim = dim, max_num_entries = max_memories, cap_num_entries = True) for _ in range(num_indices)]\n \n self.n_jobs = cpu_count() if multiprocessing else 1\n\n def set_scoped_indices(self, indices):\n indices = list(indices)\n assert all_el_unique(indices), f'all scoped batch indices must be unique, received: {indices}'\n assert all([0 <= i < self.num_indices for i in indices]), f'each batch index must be between 0 and less than {self.num_indices}: received {indices}'\n self.scoped_indices = indices\n\n @contextmanager\n def at_batch_indices(self, indices):\n prev_indices = self.scoped_indices\n self.set_scoped_indices(indices)\n yield self\n self.set_scoped_indices(prev_indices)\n\n def clear(self, batch_indices = None):\n if not exists(batch_indices):\n batch_indices = list(range(self.num_indices))\n\n batch_indices = cast_list(batch_indices)\n\n for index in batch_indices:\n knn = self.knns[index]\n knn.reset()\n\n self.db_offsets[batch_indices] = 0\n\n def add(self, memories):\n check_shape(memories, 'b n kv d', d = self.dim, kv = 2, b = len(self.scoped_indices))\n\n memories = memories.detach().cpu().numpy()\n memories = memories[:, -self.max_memories:]\n num_memories = memories.shape[1]\n\n knn_insert_ids = np.arange(num_memories)\n\n keys = np.ascontiguousarray(memories[..., 0, :])\n knns = [self.knns[i] for i in self.scoped_indices]\n db_offsets = [self.db_offsets[i] for i in self.scoped_indices]\n\n # use joblib to insert new key / value memories into faiss index\n\n @delayed\n def knn_add(knn, key, db_offset):\n knn.add(key, ids = knn_insert_ids + db_offset)\n\n Parallel(n_jobs = self.n_jobs)(knn_add(*args) for args in zip(knns, keys, db_offsets))\n\n # add the new memories to the memmap \"database\"\n\n add_indices = (rearrange(np.arange(num_memories), 'j -> 1 j') + rearrange(self.db_offsets[list(self.scoped_indices)], 'i -> i 1')) % self.max_memories\n self.db[rearrange(np.array(self.scoped_indices), 'i -> i 1'), add_indices] = memories\n self.db.flush()\n\n self.db_offsets += num_memories\n\n def search(\n self,\n queries,\n topk,\n nprobe = 8,\n increment_hits = True,\n increment_age = True\n ):\n _, *prec_dims, _ = queries.shape\n check_shape(queries, 'b ... d', d = self.dim, b = len(self.scoped_indices))\n queries = rearrange(queries, 'b ... d -> b (...) d')\n\n device = queries.device\n queries = queries.detach().cpu().numpy()\n\n all_masks = []\n all_key_values = []\n\n knns = [self.knns[i] for i in self.scoped_indices]\n\n # parallelize faiss search\n\n @delayed\n def knn_search(knn, query):\n return knn.search(query, topk, nprobe, increment_hits = increment_hits, increment_age = increment_age)\n\n fetched_indices = Parallel(n_jobs = self.n_jobs)(knn_search(*args) for args in zip(knns, queries))\n\n # get all the memory key / values from memmap 'database'\n # todo - remove for loop below\n\n for batch_index, indices in zip(self.scoped_indices, fetched_indices):\n mask = indices != -1\n db_indices = np.where(mask, indices, 0)\n\n all_masks.append(torch.from_numpy(mask))\n\n key_values = self.db[batch_index, db_indices % self.max_memories]\n all_key_values.append(torch.from_numpy(key_values))\n\n all_masks = torch.stack(all_masks)\n all_key_values = torch.stack(all_key_values)\n all_key_values = all_key_values.masked_fill(~rearrange(all_masks, '... -> ... 1 1'), 0.)\n\n all_key_values = rearrange_with_anon_dims(all_key_values, 'b (...p) ... -> b ...p ...', p = prec_dims)\n all_masks = rearrange_with_anon_dims(all_masks, 'b (...p) ... -> b ...p ...', p = prec_dims)\n\n return all_key_values.to(device), all_masks.to(device)\n\n def __del__(self):\n if hasattr(self, 'knns'):\n for knn in self.knns:\n del knn\n del self.db\n\n# extends list with some extra methods for collections of KNN memories\n\nclass KNNMemoryList(list):\n def cleanup(self):\n for memory in self:\n del memory\n\n @classmethod\n def create_memories(\n self,\n *,\n batch_size,\n num_memory_layers,\n memories_directory = DEFAULT_KNN_MEMORY_MEMMAP_DIRECTORY\n ):\n memories_path = Path(memories_directory)\n memories_path.mkdir(exist_ok = True, parents = True)\n\n def inner(*args, **kwargs):\n return self([KNNMemory(*args, num_indices = batch_size, memmap_filename = str(memories_path / f'knn.memory.layer.{ind + 1}.memmap'), **kwargs) for ind in range(num_memory_layers)])\n return inner\n\n @contextmanager\n def at_batch_indices(\n self,\n indices\n ):\n knn_batch_indices_contexts = [memory.at_batch_indices(indices) for memory in self]\n with multi_context(*knn_batch_indices_contexts):\n yield\n\n def clear_memory(\n self,\n batch_indices = None,\n memory_indices = None\n ):\n memory_indices = default(memory_indices, tuple(range(len(self))))\n\n for memory_index in memory_indices:\n memory = self[memory_index]\n memory.clear(batch_indices)\n" ]
[ [ "numpy.ascontiguousarray", "numpy.arange", "numpy.memmap", "torch.from_numpy", "numpy.full", "numpy.concatenate", "numpy.zeros_like", "torch.stack", "numpy.array", "numpy.zeros", "numpy.where", "numpy.empty" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
hermawanmulyono/ml
[ "bf617ee864134fb0eb9f3cb68c88d6efa8acd150" ]
[ "assignment1/utils/plots.py" ]
[ "import copy\nimport logging\nimport multiprocessing\nimport time\nfrom typing import List, Iterable, Optional, Union, Tuple\n\nimport numpy as np\nimport plotly.figure_factory as ff\nfrom plotly import graph_objects as go\nfrom plotly.subplots import make_subplots\nfrom sklearn.exceptions import ConvergenceWarning\nfrom sklearn.metrics import accuracy_score\nfrom sklearn.model_selection import learning_curve\nfrom sklearn.svm import SVC\nfrom sklearn.utils import shuffle\nfrom sklearn.utils._testing import ignore_warnings\n\nfrom utils.grid_search import GridSearchResults, ModelType\nfrom utils.models import get_svm\nfrom utils.nnestimator import train_nn_multiple\nfrom utils.output_grabber import OutputGrabber\n\n\ndef visualize_2d_data(x_data: np.ndarray,\n y_data: np.ndarray,\n title: str = None) -> go.Figure:\n \"\"\"Visualizes 2D data\n\n Args:\n x_data: A (num_examples, 2) array.\n y_data: A (num_examples, ) array of labels corresponding to `x_data`.\n title: If given, will add plot title\n\n Returns:\n A plotly figure object\n\n \"\"\"\n\n fig = go.Figure()\n fig = _add_scatter_dataset2d(fig,\n x_data,\n y_data,\n scatter_alpha=0.2,\n scatter_size=7)\n fig.update_layout({\n 'xaxis_title': 'x1',\n 'yaxis_title': 'x2',\n 'width': 960,\n 'height': 540\n })\n\n if title is not None:\n fig.update_layout({'title': title})\n\n return fig\n\n\ndef visualize_2d_decision_boundary(model,\n x1_max: float,\n x2_max: float,\n x_data,\n y_data,\n title: str = None,\n scatter_size=2) -> go.Figure:\n \"\"\"Visualizes the decision boundary of a trained model\n\n This function only works with model trained with 2D data.\n\n A meshgrid [0, x1_max] x [0, x2_max] is created.\n\n Args:\n x_data: Dataset2D features\n y_data: Dataset2D labels\n model: A trained model which implements predict_proba() or predict()\n x1_max: Maximum value of first axis\n x2_max: Maximum value of second axis\n title: If given, will add plot title\n scatter_size: Dataset scatter size\n\n Returns:\n A plotly figure object\n \"\"\"\n\n # Implementation inspired by scikit-learn example\n # https://scikit-learn.org/stable/auto_examples/classification/plot_classifier_comparison.html\n\n x1_max = np.maximum(np.max(x_data[:, 0]), x1_max)\n x2_max = np.maximum(np.max(x_data[:, 1]), x2_max)\n\n x1_min = np.minimum(np.min(x_data[:, 0]), 0)\n x2_min = np.minimum(np.min(x_data[:, 1]), 0)\n\n step = min((x1_max - x1_min) / 100, (x2_max - x2_min) / 100)\n\n x1 = np.arange(x1_min, x1_max, step)\n x2 = np.arange(x2_min, x2_max, step)\n xx1, xx2 = np.meshgrid(x1, x2)\n\n x_grid = np.stack([xx1.flatten(), xx2.flatten()], axis=-1)\n\n if type(model) == SVC:\n model: SVC\n z = model.decision_function(x_grid)\n z = (z > 0).astype(np.float)\n\n else:\n # Grab the second class\n z = model.predict_proba(x_grid)[:, 1]\n\n z = z.reshape(xx1.shape)\n colorscale = [[0, 'rgba(128,128,200,0.3)'], [1.0, 'rgba(200,128,128,0.3)']]\n fig = go.Figure()\n fig.add_trace(\n go.Contour(z=z,\n x=x1,\n y=x2,\n colorscale=colorscale,\n contours={'showlines': False},\n colorbar={\n 'len': 0.8,\n 'nticks': 10\n }))\n\n fig = _add_scatter_dataset2d(fig,\n x_data,\n y_data,\n scatter_alpha=0.5,\n scatter_size=scatter_size)\n fig.update_layout({'xaxis_title': 'x1', 'yaxis_title': 'x2'})\n\n if title:\n fig.update_layout({'title': title})\n\n fig.update_layout({\n 'width': 960,\n 'height': 540,\n })\n\n # fig.update_layout(legend={\n # 'yanchor': 'top',\n # 'y': 0.99,\n # 'xanchor': 'left',\n # 'x': 0.01\n # })\n\n return fig\n\n\ndef _add_scatter_dataset2d(fig: go.Figure, x_data, y_data, scatter_alpha: float,\n scatter_size: int):\n \"\"\"Adds Dataset2D scatter plot\n\n Args:\n fig: A Figure object\n x_data: Dataset2D features\n y_data: Dataset2D labels\n scatter_alpha: Transparency\n scatter_size: Scatter size\n\n Returns:\n A Figure object with scatter\n\n \"\"\"\n positive_indices = y_data == 1\n\n # Sanity check\n assert np.all(y_data[np.logical_not(positive_indices)] == 0)\n\n x_positive = x_data[positive_indices]\n x_negative = x_data[np.logical_not(positive_indices)]\n\n fig.add_trace(\n go.Scatter(x=x_positive[:, 0],\n y=x_positive[:, 1],\n mode='markers',\n name='positive',\n marker={\n 'color': f'rgba(255,0,0,{scatter_alpha})',\n 'size': scatter_size\n }))\n fig.add_trace(\n go.Scatter(x=x_negative[:, 0],\n y=x_negative[:, 1],\n mode='markers',\n name='negative',\n marker={\n 'color': f'rgba(0,0,255,{scatter_alpha})',\n 'size': scatter_size\n }))\n return fig\n\n\ndef training_size_curve(model: ModelType, x_train: np.ndarray,\n y_train: np.ndarray, x_val: np.ndarray,\n y_val: np.ndarray, sizes: List[float],\n title: str) -> go.Figure:\n \"\"\"Produces a training curve with respect to training size\n\n This function is responsible for:\n - Training the given `model` with all training sizes\n in the `sizes` parameter.\n - Generates the corresponding training size curve\n\n Args:\n model: An untrained model\n x_train: Training set features (n_train, n_features)\n y_train: Training set labels (n_train, )\n x_val: Validation set features (n_train, n_features)\n y_val: Validation set labels (n_train, )\n sizes: List of training sizes as fractions e.g.\n `[0.1, 0.25, 0.5, 0.75, 1.0]`.\n title: Plot title\n\n Returns:\n A Figure object\n\n \"\"\"\n\n if len(x_train) != len(y_train):\n raise ValueError\n\n if len(x_val) != len(y_val):\n raise ValueError\n\n if x_train.shape[1] != x_val.shape[1]:\n raise ValueError\n\n if not all([0 <= s <= 1.0 for s in sizes]):\n raise ValueError\n\n x_concat = np.concatenate([x_train, x_val], axis=0)\n y_concat = np.concatenate([y_train, y_val], axis=0)\n\n assert len(x_concat) == len(y_concat)\n\n n_train = len(x_train)\n n_concat = len(x_concat)\n\n cv = [(np.arange(n_train), np.arange(n_train, n_concat))]\n\n lc_results = learning_curve(model,\n x_concat,\n y_concat,\n train_sizes=sizes,\n cv=cv,\n shuffle=True,\n return_times=True)\n\n train_sizes_ags, train_accs_, val_accs_, fit_times, score_times = lc_results\n fit_times: np.ndarray = fit_times.flatten()\n score_times: np.ndarray = score_times.flatten()\n\n train_accs = train_accs_.flatten()\n val_accs = val_accs_.flatten()\n\n fig = _generate_training_size_curves(sizes, train_accs, val_accs, title,\n fit_times, score_times)\n\n return fig\n\n\ndef training_size_curve_nn(gs: GridSearchResults, x_train: np.ndarray,\n y_train: np.ndarray, x_val: np.ndarray,\n y_val: np.ndarray, sizes: List[float],\n title: str) -> go.Figure:\n \"\"\"Plots a neural network training curve\n\n The figure has the following properties:\n - x-axis is training sizes\n - y-axis is accuracy\n - Training and validation sets are plotted in the same\n figure.\n\n Args:\n gs: GridSearchResults object which contains the parameters,\n and the model trained with the full dataset.\n x_train: Training features\n y_train: Training labels\n x_val: Validation features\n y_val: Validation labels\n sizes: Training sizes\n title: Plot title\n\n Returns:\n A Figure object\n\n \"\"\"\n\n if len(x_train) != len(y_train):\n raise ValueError\n\n if len(x_val) != len(y_val):\n raise ValueError\n\n if x_train.shape[1] != x_val.shape[1]:\n raise ValueError\n\n if not all([0 <= s <= 1.0 for s in sizes]):\n raise ValueError\n\n params = gs.best_kwargs\n\n num_examples = len(x_train)\n\n x_train, y_train = shuffle(x_train, y_train)\n\n lengths = [int(s * num_examples) for s in sizes]\n\n train_accs: List[float] = []\n val_accs: List[float] = []\n\n train_times_: List[float] = []\n predict_times_: List[float] = []\n\n for idx, length in enumerate(lengths):\n x_train_sampled = x_train[:length]\n y_train_sampled = y_train[:length]\n\n if idx < len(lengths) - 1:\n nn, fit_time = train_nn_multiple(x_train_sampled, y_train_sampled,\n x_val, y_val, **params)\n else:\n # Lookup in the grid search results gs\n fit_time = None\n for kwargs, results in gs.table:\n if kwargs != params:\n continue\n fit_time = results['fit_time']\n\n assert fit_time is not None, 'Cannot match best params to table'\n\n nn = gs.best_model\n\n train_times_.append(fit_time)\n\n y_pred = nn.predict(x_train_sampled)\n train_accs.append(accuracy_score(y_train_sampled, y_pred))\n\n start = time.time()\n y_pred = nn.predict(x_val)\n val_accs.append(accuracy_score(y_val, y_pred))\n finish = time.time()\n predict_times_.append(finish - start)\n\n train_times = np.array(train_times_)\n predict_times = np.array(predict_times_)\n\n fig = _generate_training_size_curves(sizes, train_accs, val_accs, title,\n train_times, predict_times)\n\n return fig\n\n\ndef _generate_training_size_curves(sizes: Iterable[float],\n train_accs: Iterable[float],\n val_accs: Iterable[float], title: str,\n train_times, predict_times):\n \"\"\"Generates training size curves\n\n There are two curves:\n 1. Accuracy with respect to training sizes\n 2. Training/prediction times with respect to training sizes\n\n Args:\n sizes: Fractions of training sizes i.e. x-axis values\n train_accs: Training accuracy values\n val_accs: Validation accuracy values\n title: Plot title\n train_times: Training times\n predict_times: Prediction times\n\n Returns:\n A Figure object\n\n \"\"\"\n fig = make_subplots(rows=2, cols=1)\n fig.add_trace(go.Scatter(x=sizes,\n y=train_accs,\n mode='lines',\n name='train_acc'),\n row=1,\n col=1)\n fig.add_trace(go.Scatter(x=sizes, y=val_accs, mode='lines', name='val_acc'),\n row=1,\n col=1)\n\n fig.add_trace(go.Scatter(x=sizes,\n y=train_times,\n mode='lines',\n name='train_times'),\n row=2,\n col=1)\n fig.add_trace(go.Scatter(x=sizes,\n y=predict_times,\n mode='lines',\n name='prediction_times'),\n row=2,\n col=1)\n\n fig.update_xaxes(title_text='Training size')\n fig.update_yaxes(title_text='Accuracy', row=1, col=1)\n fig.update_yaxes(title_text='Time (s)', row=2, col=1)\n\n # fig.update_layout({'title': title})\n\n fig.update_layout(\n legend={\n 'orientation': 'h',\n 'yanchor': 'bottom',\n 'y': 1.02,\n 'xanchor': 'right',\n 'x': 1\n })\n\n return fig\n\n\n# Decorator trick from the following Stack Overflow post\n# https://stackoverflow.com/questions/53784971/how-to-disable-convergencewarning-using-sklearn\n@ignore_warnings(category=ConvergenceWarning)\ndef svm_training_curve_iteration(best_params: dict, x_train: np.ndarray,\n y_train: np.ndarray, x_val: np.ndarray,\n y_val: np.ndarray):\n \"\"\"Generates an SVM training curve\n\n The curve has the following characteristics:\n 1. x-axis is number of iterations\n 2. y-axis is accuracy\n 3. Train and Val sets are plotted\n\n Args:\n best_params: Best parameters for the SVM constructor\n x_train: Training features\n y_train: Training labels\n x_val: Validation features\n y_val: Validation labels\n\n Returns:\n A graph object with a curve described above\n\n \"\"\"\n\n params = copy.deepcopy(best_params)\n\n def equal(model1: SVC, model2: SVC):\n \"\"\"Tests equality of two SVC models\"\"\"\n cond1 = np.array_equal(model1.intercept_, model2.intercept_)\n cond2 = np.array_equal(model1.support_, model2.support_)\n cond3 = np.array_equal(model1.dual_coef_, model2.dual_coef_)\n return cond1 and cond2 and cond3\n\n last_model: Optional[SVC] = None\n params['verbose'] = True\n out = OutputGrabber()\n with out:\n model = get_svm(**params)\n model.fit(x_train, y_train)\n\n # Capture the max_iter from all SVMs\n s: str = out.capturedtext\n split: List[str] = [s_ for s_ in s.split('\\n') if '#iter = ' in s_]\n all_iters = [int(s_.split()[-1]) for s_ in split]\n max_iter = np.max(all_iters)\n interval = max(1, max_iter // 100)\n\n params['verbose'] = False\n\n iters = []\n train_accs = []\n val_accs = []\n iter_ = 0\n\n while True:\n iter_ += interval\n\n logging.info(f'SVM training curve iteration {iter_}')\n\n params['max_iter'] = iter_\n model = get_svm(**params)\n model.fit(x_train, y_train)\n\n # Update iters\n iters.append(iter_)\n\n # Update train_accs\n y_pred = model.predict(x_train)\n train_accs.append(accuracy_score(y_train, y_pred))\n\n # Update val_accs\n y_pred = model.predict(x_val)\n val_accs.append(accuracy_score(y_val, y_pred))\n\n # If converges, then break\n if (last_model is not None) and equal(model, last_model):\n break\n\n last_model = model\n continue\n\n fig = go.Figure()\n fig.add_trace(go.Scatter(x=iters, y=train_accs, mode='lines', name='train'))\n fig.add_trace(go.Scatter(x=iters, y=val_accs, mode='lines', name='val'))\n fig.update_layout({'xaxis_title': 'Iterations', 'yaxis_title': 'Accuracy'})\n\n return fig\n\n\ndef gs_results_validation_curve(gs: GridSearchResults,\n param_name: str,\n plot_title,\n log_scale: bool = True,\n other_params: Union[str, dict] = 'best'):\n \"\"\"Generates a validation curve\n\n The GridSearchResults contains a table with information\n about the train and val accuracy scores. The generated\n plot has `param_name` on the x-axis and the corresponding\n accuracy on the y-axis. The other parameters are derived\n from the best parameters in the `gs` object.\n\n Args:\n plot_title:\n gs: A GridSearchResults object\n param_name: Parameter name to plot\n log_scale: If True, use log scale x-axis.\n other_params: Keyword arguments for parameters\n other than `param_name`. If 'best' is given,\n they will be inferred from `gs.best_kwargs`.\n\n Returns:\n A graph object\n\n \"\"\"\n\n if other_params == 'best':\n best_params = gs.best_kwargs\n other_params = {k: v for k, v in best_params.items() if k != param_name}\n else:\n if param_name in other_params.keys():\n raise ValueError\n\n param_values = []\n val_acc = []\n train_acc = []\n\n for params, d in gs.table:\n _other_params = {k: v for k, v in params.items() if k != param_name}\n\n if other_params != _other_params:\n continue\n\n train_acc.append(d['train_accuracy'])\n val_acc.append(d['val_accuracy'])\n param_values.append(params[param_name])\n\n sort_indices = np.argsort(param_values)\n sorted_param_values = [param_values[i] for i in sort_indices]\n sorted_val_acc = [val_acc[i] for i in sort_indices]\n sorted_train_acc = [train_acc[i] for i in sort_indices]\n\n fig = go.Figure()\n fig.add_trace(\n go.Scatter(x=sorted_param_values,\n y=sorted_train_acc,\n mode='lines',\n name='train'))\n fig.add_trace(\n go.Scatter(x=sorted_param_values,\n y=sorted_val_acc,\n mode='lines',\n name='val'))\n\n fig.update_layout({\n 'xaxis_title': param_name,\n 'yaxis_title': 'Accuracy',\n # 'title': plot_title,\n 'width': 640,\n 'height': 480\n })\n\n fig.update_layout(\n legend={\n 'orientation': 'h',\n 'yanchor': 'bottom',\n 'y': 1.02,\n 'xanchor': 'right',\n 'x': 1\n })\n\n if log_scale:\n fig.update_xaxes(type='log')\n\n return fig\n\n\ndef model_confusion_matrix(cm, labels: List[str], plot_title: str):\n \"\"\"Generates a model confusion matrix\n\n Args:\n cm: Confusion matrix. `cm[i, j]` corresponds to\n ground truth `i` and predicted `j`.\n labels: Ordered label strings\n plot_title: Plot title string\n\n Returns:\n A figure object\n\n \"\"\"\n\n # Code inspired from\n # https://stackoverflow.com/questions/60860121/plotly-how-to-make-an-annotated-confusion-matrix-using-a-heatmap\n z_text = [[str(y) for y in x] for x in cm]\n fig = ff.create_annotated_heatmap(cm,\n labels,\n labels,\n annotation_text=z_text,\n colorscale='blues',\n reversescale=True)\n\n fig['layout']['yaxis']['autorange'] = 'reversed'\n\n fig.update_layout({\n 'xaxis_title': 'predicted',\n 'yaxis_title': 'ground truth',\n # 'title': plot_title\n })\n\n dims = len(labels) * 50 + 200\n fig.update_layout({'width': dims, 'height': dims})\n\n for i in range(len(fig.layout.annotations)):\n fig.layout.annotations[i].font.size = len(labels) + 10\n\n return fig\n\n\ndef sample_mnist_dataset(x_data: np.ndarray, y_data: np.ndarray,\n samples_per_label: int) -> np.ndarray:\n \"\"\"Picks some samples of the Fashion-MNIST dataset\n\n A grid of 10 columns will be made, where each column\n represents a class. The number of rows is determined\n by the `samples_per_label` argument.\n\n Args:\n x_data: Features\n y_data: Labels\n samples_per_label: Number of samples per label\n\n Returns:\n A numpy array of shape\n `(samples_per_label * 28, 10 * 28)`.\n\n \"\"\"\n if len(x_data.shape) != 2:\n raise ValueError\n\n if x_data.shape[1] != 28 * 28:\n raise ValueError\n\n if len(x_data) != len(y_data):\n raise ValueError\n\n labels = list(range(10))\n\n n_samples = len(x_data)\n\n columns = []\n for label in labels:\n # Sample `samples_per_label`\n indices = np.random.choice(np.arange(n_samples)[y_data == label],\n samples_per_label,\n replace=False)\n x_sampled = x_data[indices]\n\n # Reshape 28 by 28\n x_reshaped = [np.reshape(x, (28, 28)) for x in x_sampled]\n\n # Concatenate to make a column\n x_concat = np.concatenate(x_reshaped, axis=0)\n\n # Append `columns`\n columns.append(x_concat)\n\n x_all = np.concatenate(columns, axis=1)\n\n return x_all\n" ]
[ [ "numpy.logical_not", "numpy.array_equal", "numpy.min", "sklearn.utils._testing.ignore_warnings", "numpy.arange", "sklearn.utils.shuffle", "numpy.reshape", "sklearn.model_selection.learning_curve", "numpy.concatenate", "numpy.max", "numpy.argsort", "numpy.array", "numpy.meshgrid", "sklearn.metrics.accuracy_score" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
laranjma/robmooc
[ "e0dfb04117ad7fc69cd871c0e7e00fca85572164" ]
[ "py_scripts/roblib.py" ]
[ "import numpy as np\r\nimport matplotlib.pyplot as plt\r\nfrom numpy import mean,pi,cos,sin,sqrt,tan,arctan2,exp,dot,array,log,inf, eye, zeros, ones, arange,reshape,concatenate,diag\r\nfrom matplotlib.pyplot import *\r\nfrom numpy.random import uniform as rand\r\nfrom numpy.random import randn as randn\r\nfrom numpy.linalg import inv, det, norm, eig\r\nfrom scipy.linalg import sqrtm,expm,norm,block_diag\r\nfrom scipy.signal import place_poles\r\nfrom mpl_toolkits.mplot3d import Axes3D\r\nimport random\r\n\r\nimport numpy.random as rnd\r\nfrom matplotlib.patches import Ellipse,Rectangle,Circle, Wedge, Polygon\r\n\r\nfrom matplotlib.collections import PatchCollection\r\n\r\n\r\n# Unicode https://en.wikipedia.org/wiki/List_of_Unicode_characters\r\n# for instance to get θ : shift + ctr + U03B8 \r\n# U+03B1 α alpha; U+03B2 β beta; U+03B3;\t Gamma \t0419; U+03B4 δ Delta;\r\n#U+03B5 Epsilon; U+03B6 Zeta; U+03B7 Eta; U+03B8 θ Theta;\r\n#U+03BB Lambda; U+03BC Mu; U+03BD Nu; U+03BE Xi; U+03C0 Pi; U+03C1 Rho;\r\n# U+03C3 Sigma; U+03C4 Tau; U+03C6 φ Phi; U+03C8 ψ Psi; U+03C9 Omega\r\n# U+0393 Γ\r\n\r\n\r\n\r\ndef eulermat(φ,θ,ψ):\r\n Ad_i = array([[0, 0, 0],[0,0,-1],[0,1,0]])\r\n Ad_j = array([[0,0,1],[0,0,0],[-1,0,0]])\r\n Ad_k = array([[0,-1,0],[1,0,0],[0,0,0]])\r\n M = expm(ψ*Ad_k) @ expm(θ*Ad_j) @ expm(φ*Ad_i)\r\n return(M) \r\n \r\ndef move_motif(M,x,y,θ):\r\n M1=ones((1,len(M[1,:])))\r\n M2=concatenate((M, M1), axis=0)\r\n R = array([[cos(θ),-sin(θ),x], [sin(θ),cos(θ),y]])\r\n return(R @ M2) \r\n \r\n \r\ndef draw_tank(x):\r\n x=x.flatten()\r\n M = array([[1,-1,0,0,-1,-1,0,0,-1,1,0,0,3,3,0], [-2,-2,-2,-1,-1,1,1,2,2,2,2,1,0.5,-0.5,-1]])\r\n M=move_motif(M,x[0],x[1],x[2])\r\n plot(M[0],M[1],\"darkblue\",2)\r\n \r\n \r\n\r\ndef draw_ellipse(c,Γ,η,ax,col): # Gaussian confidence ellipse with artist\r\n #draw_ellipse_artist(array([[1],[2]]),eye(2),0.9,ax,[1,0.8-0.3*i,0.8-0.3*i])\r\n if (norm(Γ)==0):\r\n Γ=Γ+0.001*eye(len(Γ[1,:]))\r\n A=sqrtm(-2*log(1-η)*Γ) \r\n w, v = eig(A) \r\n v1=array([[v[0,0]],[v[1,0]]])\r\n v2=array([[v[0,1]],[v[1,1]]]) \r\n f1=A @ v1\r\n f2=A @ v2 \r\n phi = (arctan2(v1 [1,0],v1[0,0]))\r\n alpha=phi*180/3.14\r\n e = Ellipse(xy=c, width=2*norm(f1), height=2*norm(f2), angle=alpha) \r\n ax.add_artist(e)\r\n e.set_clip_box(ax.bbox)\r\n e.set_alpha(0.7)\r\n e.set_facecolor(col)\r\n \r\n \r\n\r\ndef draw_disk(c,d,ax,col): \r\n #draw_disk(array([[1],[2]]),0.5,ax,\"blue\")\r\n e = Ellipse(xy=c, width=2*d, height=2*d, angle=0) \r\n ax.add_artist(e)\r\n e.set_clip_box(ax.bbox)\r\n e.set_alpha(0.7)\r\n e.set_facecolor(col)\r\n \r\n\r\ndef draw_box(x1,x2,y1,y2,ax,col): \r\n c=array([[x1],[y1]]) \r\n rect = Rectangle(c, width=x2-x1, height=y2-y1, angle=0)\r\n rect.set_facecolor(array([0.4,0.3,0.6])) \r\n ax.add_patch(rect)\r\n rect.set_clip_box(ax.bbox)\r\n rect.set_alpha(0.7)\r\n rect.set_facecolor(col) \r\n\r\ndef draw_polygon(P,ax,col): \r\n patches = [] \r\n patches.append(Polygon(P, True)) \r\n p = PatchCollection(patches, cmap=matplotlib.cm.jet, alpha=0.4, color=col)\r\n ax.add_collection(p)\r\n\r\n \r\n \r\n \t\r\ndef draw_car(x):\r\n x=x.flatten();\r\n M = array([ [-1, 4, 5, 5, 4, -1, -1, -1, 0, 0, -1, 1, 0, 0, -1, 1, 0, 0, 3, 3, 3], \r\n [-2, -2, -1, 1, 2, 2, -2, -2, -2, -3, -3, -3, -3, 3, 3, 3, 3, 2, 2, 3, -3],])\r\n \r\n M=move_motif(M,x[0],x[1],x[2])\r\n plot(M[0],M[1],\"blue\",2)\r\n \r\n W = array([[-1, 1], [0, 0]]) #Front Wheel \r\n# Wr = move2Dmat(x[0],x[1],x[2]) @ move2Dmat(3,3,x[4]) @ W\r\n Wr=move_motif(W,3,3,x[4])\r\n Wr=move_motif(Wr,x[0],x[1],x[2])\r\n\r\n\r\n# Wl = move2Dmat(x[0],x[1],x[2]) @ move2Dmat(3,-3,x[4]) @ W\r\n Wl=move_motif(W,3,-3,x[4])\r\n Wl=move_motif(Wl,x[0],x[1],x[2])\r\n\r\n plot(Wr[0, :], Wr[1, :], 'magenta', linewidth = 2)\r\n plot(Wl[0, :], Wl[1, :], 'magenta', linewidth = 2) \r\n\r\n\r\ndef tondarray(M):\r\n if type(M)==float:\r\n return array([[M]])\r\n elif type(M)==int:\r\n return array([[M]]) \r\n else:\r\n return M \r\n\r\n\r\n\r\ndef mvnrnd2(x,G): \r\n n=len(x)\r\n x1=x.reshape(n)\r\n y = np.random.multivariate_normal(x1,G).reshape(n,1)\r\n return(y) \r\n\r\ndef mvnrnd1(G):\r\n G=tondarray(G)\r\n n=len(G)\r\n x=array([[0]] * n)\r\n return(mvnrnd2(x,G)) \r\n \r\n\r\ndef kalman_predict(xup,Gup,u,Γα,A):\r\n Γ1 = A @ Gup @ A.T + Γα\r\n x1 = A @ xup + u \r\n return(x1,Γ1) \r\n\r\ndef kalman_correc(x0,Γ0,y,Γβ,C):\r\n S = C @ Γ0 @ C.T + Γβ \r\n K = Γ0 @ C.T @ inv(S) \r\n ytilde = y - C @ x0 \r\n Gup = (eye(len(x0))-K @ C) @ Γ0 \r\n xup = x0 + K@ytilde\r\n return(xup,Gup) \r\n \r\ndef kalman(x0,Γ0,u,y,Γα,Γβ,A,C):\r\n xup,Gup = kalman_correc(x0,Γ0,y,Γβ,C)\r\n x1,Γ1=kalman_predict(xup,Gup,u,Γα,A)\r\n return(x1,Γ1) \r\n\r\n \r\ndef demo_draw(): \r\n fig = figure(0)\r\n ax = fig.add_subplot(111, aspect='equal')\r\n ax.set_xlim(-10, 10)\r\n ax.set_ylim(-10, 10)\r\n\r\n \r\n\r\n\r\n \r\n c=array([[5],[0]])\r\n e = Ellipse(xy=c, width=13.0, height=2.0, angle=45) \r\n ax.add_artist(e)\r\n e.set_clip_box(ax.bbox)\r\n e.set_alpha(0.9)\r\n e.set_facecolor(array([0.7,0.3,0.6])) \r\n \r\n rect = Rectangle( (1,1), width=5, height=3)\r\n rect.set_facecolor(array([0.4,0.3,0.6])) \r\n ax.add_patch(rect) \r\n \r\n pause(0.2) \r\n draw_tank(array([[-7],[5],[1]]))\r\n \r\n draw_car(array([[1],[2],[3],[4],[0.5]])) \r\n \r\n c = array([[-2],[-3]])\r\n G = array([[2,-1],[-1,4]])\r\n draw_ellipse(c,G,0.9,ax,[0.8,0.8,1])\r\n P=array([[5,-3],[9,-10],[7,-4],[7,-6]])\r\n draw_polygon(P,ax,'green')\r\n show() # only at the end. Otherwize, it closes the figure in a terminal mode\r\n\r\n\r\n\r\ndef demo_animation(): \r\n fig = figure(0)\r\n ax = fig.add_subplot(111, aspect='equal')\r\n for t in arange(0,5,0.1) :\r\n pause(0.01) #needed. Otherwize, draws only at the end \r\n cla()\r\n ax.set_xlim(-15,15)\r\n ax.set_ylim(-15,15)\r\n draw_car(array([[t],[2],[3+t],[4],[5+t]])) \r\n c = array([[-2+2*t],[-3]])\r\n G = array([[2+t,-1],[-1,4+t]])\r\n draw_ellipse(c,G,0.9,ax,[0.8,0.8,1])\r\n show()\r\n\r\n\r\ndef demo_random(): \r\n N=1000\r\n xbar = array([[1],[2]])\r\n Γx = array([[3,1],[1,3]])\r\n X=randn(2,N)\r\n X = (xbar @ ones((1,N))) + sqrtm(Γx) @ X\r\n xbar_ = mean(X,axis=1)\r\n Xtilde = X - xbar @ ones((1,N))\r\n Γx_ = (Xtilde @ Xtilde.T)/N\r\n fig = figure(0) \r\n ax = fig.add_subplot(111, aspect='equal')\r\n cla()\r\n ax.set_xlim(-20,20)\r\n ax.set_ylim(-20,20)\r\n draw_ellipse(xbar,Γx,0.9,ax,[1,0.8,0.8])\r\n pause(0.5) \r\n ax.scatter(X[0],X[1]) \r\n pause(0.3)\r\n plot() \r\n\r\n\r\nif __name__ == \"__main__\":\r\n \r\n\r\n \r\n demo_draw()\r\n \r\n# demo_animation() \r\n# demo_random()\r\n\r\n \r\n\r\n \r\n\r\n# M = array([ [1, 2], [5, 6], [9, 10]])\r\n# print(M)\r\n# x=array([[1], [2]]) \r\n# x2= M@x #multiplication dans Python 3\r\n#\r\n# G = [[1, 0], [0, 1]]\r\n# x3=mvnrnd2(x,G)\r\n# print(x3)\r\n# \r\n# x4=mvnrnd1(G)\r\n# print(x4)\r\n# \r\n# draw_box(-15,15,-15,15,'blue',4)\r\n# x=array([[2], [3], [1], [0], [0.5]]) \r\n# draw_car(x)\r\n# axis ('equal')\r\n# draw_tank(-2,-3,-1) \r\n# print(randn())\r\n# \r\n# A = array([[0,0,1,0],[0,0,0,1],[0,2,0,0],[0,3,0,0]])\r\n# B = array([[0,0,4,5]]).T\r\n# poles = [-2,-2.1,-2.2,-2.3]\r\n# K = place_poles(A,B,poles).gain_matrix\r\n# print(K)\r\n# \r\n# \r\n" ]
[ [ "numpy.random.multivariate_normal", "numpy.concatenate", "numpy.arctan2", "numpy.random.randn", "numpy.mean", "matplotlib.patches.Polygon", "numpy.linalg.eig", "numpy.arange", "numpy.sin", "scipy.linalg.norm", "numpy.log", "numpy.linalg.inv", "matplotlib.patches.Rectangle", "numpy.array", "matplotlib.patches.Ellipse", "matplotlib.collections.PatchCollection", "numpy.cos", "scipy.linalg.expm", "numpy.ones", "scipy.linalg.sqrtm" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [ "0.13", "0.12", "0.14", "0.15" ], "tensorflow": [] } ]
AhmedHani/Kaggle-Machine-Learning-Competitions
[ "b306816463affb0595618844ae479243e505418d" ]
[ "Easy/What's Cooking/logistic_regression.py" ]
[ "__author__ = 'Ahmed Hani Ibrahim'\n\nfrom sklearn.cross_validation import cross_val_score\nfrom sklearn.ensemble import RandomForestClassifier\nfrom sklearn.linear_model import LogisticRegression, LinearRegression\nfrom sklearn.naive_bayes import BernoulliNB\nfrom sklearn import svm\nfrom get_data import *\nfrom get_data_2 import *\nfrom sklearn.feature_extraction.text import CountVectorizer\nimport scipy.sparse\nimport csv\n\nlr = LogisticRegression()\nlabels, training_data_matrix, unique_ingredients = get_training_data_matrix(get_train_data())\nlr = lr.fit(training_data_matrix, labels)\n\nprint(\"Training Done\")\n\ntest_data, ids = get_test_data()\ntest_data_matrix = get_test_data_matrix(test_data, unique_ingredients)\n\nres = lr.predict(test_data_matrix)\nprint(\"Predicting Done\")\nsubmission = dict(zip(ids, res))\n\nwr = csv.writer(open('Logistics_Regression_Result.csv', 'wb'))\nwr.writerow(['id', 'cuisine'])\n\nfor first, second in submission.items():\n wr.writerow([first, second])\n\nprint(\"done\")\n\n#print(cross_val_score(lr, training_data_matrix, labels, cv=5).mean())\n\n\n\n" ]
[ [ "sklearn.linear_model.LogisticRegression" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
bbueno5000/sc2_agents
[ "ea7b610667c37e528597c23589b91f167a1ea1fe" ]
[ "sc2_agents/agents/collect_mineral_shards.py" ]
[ "# MIT License\n#\n# Copyright (c) 2018 Benjamin Bueno (bbueno5000)\n#\n# Permission is hereby granted, free of charge, to any person obtaining a copy\n# of this software and associated documentation files (the \"Software\"), to deal\n# in the Software without restriction, including without limitation the rights\n# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell\n# copies of the Software, and to permit persons to whom the Software is\n# furnished to do so, subject to the following conditions:\n#\n# The above copyright notice and this permission notice shall be included in all\n# copies or substantial portions of the Software.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\n# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\n# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\n# SOFTWARE.\n\n\"\"\"\nA collection of agents for collecting mineral shards.\n\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\nfrom numpy import array as np_array\nfrom numpy import linalg as np_linalg\nfrom pysc2.agents.scripted_agent import CollectMineralShards\nfrom pysc2.lib import actions\n\n\nclass CollectMineralShardsAgent(CollectMineralShards):\n \"\"\"\n Generic agent for collecting mineral shards.\n \"\"\"\n\n def __init__(self):\n super(CollectMineralShardsAgent, self).reset()\n self.functions = actions.FUNCTIONS\n self.not_queued = [0]\n self.player_friendly = 1\n self.player_neutral = 3 # beacon/minerals\n self.results = {}\n self.results['agent_id'] = self.__class__.__name__\n self.results['episode_data'] = {'episode_lengths': [], 'episode_rewards': []}\n self.select_all = [0]\n self.select_worker_all = [2]\n self.terran_commandcenter = 18\n self.terran_scv = 45\n\n def reset(self):\n super(CollectMineralShardsAgent, self).reset()\n self.mean_reward = 0\n self.results['episode_data']['episode_lengths'].append(self.steps)\n self.results['episode_data']['episode_rewards'].append(self.reward)\n self.reward = 0\n self.steps = 0\n\n\nclass CollectMineralShardsAgent001(CollectMineralShardsAgent):\n \"\"\"\n Scripted agent for collecting mineral shards.\n \"\"\"\n\n def step(self, timestep):\n super(CollectMineralShardsAgent001, self).step(timestep)\n if self.functions.Move_screen.id in timestep.observation.available_actions:\n player_relative = timestep.observation.feature_screen.player_relative\n neutral_y, neutral_x = (player_relative == self.player_neutral).nonzero()\n player_y, player_x = (player_relative == self.player_friendly).nonzero()\n player = [int(player_x.mean()), int(player_y.mean())]\n closest, min_dist = None, None\n for p in zip(neutral_x, neutral_y):\n dist = np_linalg.norm(np_array(player) - np_array(p))\n if not min_dist or dist < min_dist:\n closest, min_dist = p, dist\n return actions.FunctionCall(self.functions.Move_screen.id, [self.not_queued, closest])\n else:\n return actions.FunctionCall(self.functions.select_army.id, [self.select_all])\n return actions.FunctionCall(self.functions.no_op.id, [])\n\n\nclass CollectMineralShardsAgent002(CollectMineralShardsAgent):\n \"\"\"\n DeepQ agent for collecting mineral shards.\n \"\"\"\n\n def __init__(self, act_x, act_y):\n super(CollectMineralShardsAgent002, self).__init__()\n self.act_x = act_x\n self.act_y = act_y\n self.mean_reward = 0\n self.x_coord = 0\n self.y_coord = 0\n\n def screen(self, observation):\n player_relative = observation.feature_screen.player_relative\n return (player_relative == self.player_neutral).astype(int)\n\n def step(self, timestep):\n super(CollectMineralShardsAgent002, self).step(timestep)\n screen = self.screen(timestep.observation)\n self.x_coord = self.act_x(np_array(screen)[None])[0]\n self.y_coord = self.act_y(np_array(screen)[None])[0]\n if self.functions.Move_screen.id in timestep.observation.available_actions:\n return actions.FunctionCall(self.functions.Move_screen.id, [self.not_queued, [self.x_coord, self.y_coord]])\n elif self.functions.select_army.id in timestep.observation.available_actions:\n return actions.FunctionCall(self.functions.select_army.id, [self.select_all])\n return actions.FunctionCall(self.functions.no_op.id, [])\n\n def training_step(self, timestep, **kwargs):\n super(CollectMineralShardsAgent002, self).step(timestep)\n screen = self.screen(timestep.observation)\n update_eps = kwargs.pop('update_eps', \"Key not found.\")\n self.x_coord = self.act_x(np_array(screen)[None], update_eps, **kwargs)[0]\n self.y_coord = self.act_y(np_array(screen)[None], update_eps, **kwargs)[0]\n if self.functions.Move_screen.id in timestep.observation.available_actions:\n return actions.FunctionCall(self.functions.Move_screen.id, [self.not_queued, [self.x_coord, self.y_coord]])\n elif self.functions.select_army.id in timestep.observation.available_actions:\n return actions.FunctionCall(self.functions.select_army.id, [self.select_all])\n return actions.FunctionCall(self.functions.no_op.id, [])\n" ]
[ [ "numpy.array" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
zhaolongkzz/ROS
[ "52c70d9d22fe1714c438312fde61214920a4dc3c" ]
[ "BIRL_modular_robot/mr_setup_detect/scripts/setup_detector.py" ]
[ "#!/usr/bin/env python\n\n\"\"\"\nlinkeway\nJan. 2017\n\nPARAMETERS:\n + /robot_name ~ default: 'robot'\nSUBSCRIPTIONS:\n + ar_pose_marker (ar_track_alvar_msgs/AlvarMarkers) ~ pose of all markers detected by ar_track_alvar\nOUTPUT:\nAutomatically generate robot.urdf.xacro to [PATH_TO_MR_DESCRIPTION_PACKAGE]/robot/robot.urdf.xacro\nAutomatically generate robot_display.launch to [PATH_TO_MR_DESCRIPTION_PACKAGE]/lauch/robot_display.launch\n\n\"\"\"\n\nimport rospy\nimport copy\nfrom ar_track_alvar_msgs.msg import AlvarMarkers\nimport numpy as np\nimport math\nimport tf\nimport rospkg\nimport sys \nimport roslaunch\nfrom sensor_msgs.msg import JointState\n\nThreshold= 0.3\ndatabase_dic={} \nmarkers=[]\n\n# load from a database that keeps all modules information to database_dic\ndef load_module_data(file_name):\n database = open(file_name, \"r\")\n for line in database:\n [tagID,type] = line.strip('\\n').split(': ',1)\n database_dic[int(tagID)]=type\n database.close()\n\n# topic Subscriber. TODO: message pre-processing \ndef receive_marker_msg(topic):\n detected_tag_id=[]\n msg = rospy.wait_for_message(topic, AlvarMarkers)\n for marker in msg.markers:\n id = marker.id\n if id not in detected_tag_id:\n if id in database_dic.keys():\n detected_tag_id.append(id)\n markers.append( copy.deepcopy(marker) )\n\n# connect_angle has only 4 alternatives due to mechanical pin hole\ndef descretize_connect_angle(angle): \n while angle < 0:\n angle = angle + math.pi *2\n while angle > math.pi *2:\n angle = angle - math.pi*2\n\n if math.fabs(angle) < math.pi/4.0:\n return 0\n if math.fabs(angle-math.pi/2) < math.pi/4.0:\n return math.pi/2\n if math.fabs(angle- math.pi) < math.pi/4.0:\n return math.pi\n if math.fabs(angle - math.pi*3/2 ) < math.pi/4.0:\n return math.pi*3/2\n return 0\n\n# INPUT: child_marker and its state, unchecked markers\n# OUTPUT: state of parent module_state\n# TODO: use combinatorial optimization methods instead of hardcoded geometrical constraints to achevie generality of the solution\ndef find_parent_module(child_marker,child_inversion,child_joint_angle,candidate_parent_markers):\n parent_module_marker = []\n connect_angle =0 \n parent_joint_angle =0\n \n if not candidate_parent_markers: # if no unchecked markers\n return [0,0,0]\n\n # find parent_module and its inversion\n for candidate_parent_marker in candidate_parent_markers:\n vector= np.array([\n child_marker.pose.pose.position.x - candidate_parent_marker.pose.pose.position.x,\n child_marker.pose.pose.position.y - candidate_parent_marker.pose.pose.position.y,\n child_marker.pose.pose.position.z - candidate_parent_marker.pose.pose.position.z,\n ])\n vector_mag = np.linalg.norm(vector)\n if vector_mag > Threshold: # if candicate marker too far from child-marker\n continue\n\n unit_vector= vector / vector_mag\n \n matrix= tf.transformations.quaternion_matrix( [child_marker.pose.pose.orientation.x,\n child_marker.pose.pose.orientation.y,\n child_marker.pose.pose.orientation.z,\n child_marker.pose.pose.orientation.w])\n y_axis= np.array([matrix[0][1],matrix[1][1] ,matrix[2][1]])\n x_axis= np.array([matrix[0][0],matrix[1][0] ,matrix[2][0]])\n z_axis= np.array([matrix[0][2],matrix[1][2] ,matrix[2][2]])\n matrix= tf.transformations.quaternion_matrix([candidate_parent_marker.pose.pose.orientation.x,\n candidate_parent_marker.pose.pose.orientation.y,\n candidate_parent_marker.pose.pose.orientation.z,\n candidate_parent_marker.pose.pose.orientation.w])\n candidate_y_axis= np.array([matrix[0][1], matrix[1][1], matrix[2][1]])\n candidate_x_axis= np.array([matrix[0][0], matrix[1][0], matrix[2][0]])\n candidate_z_axis= np.array([matrix[0][2], matrix[1][2], matrix[2][2]])\n\n if database_dic[child_marker.id] in ['T', 't'] and child_inversion== 'inverted':\n if np.dot(unit_vector, candidate_y_axis) < -math.cos(26.0*math.pi/180.0):\n parent_module_marker= candidate_parent_marker # parent found\n parent_module_inversion= 'inverted'\n elif database_dic[candidate_parent_marker.id] in ['T','t']:\n parent_module_marker= candidate_parent_marker # parent found\n parent_module_inversion= 'upright'\n elif np.dot( unit_vector, candidate_y_axis) > math.cos(26.0*math.pi/180.0):\n parent_module_marker= candidate_parent_marker # parent found\n parent_module_inversion= 'upright'\n\n else: # for markers of these kind, parent should locate on y_axis\n # if candicate marker not on y_axis of child with err of 26.0 degree \n if np.dot( unit_vector, y_axis) < -math.cos(26.0*math.pi/180.0) and child_inversion == 'inverted':\n # in this case child belongs to non-Tt type inverted\n parent_module_marker= candidate_parent_marker # parent found\n\n # find out inversion direction\n angle_y = math.acos(np.dot(y_axis,candidate_y_axis))\n if math.fabs(angle_y) < 26.0*math.pi/180.0: # if 2 y-axes are almost the same\n parent_module_inversion= 'inverted'\n else:\n parent_module_inversion= 'upright'\n break\n\n # if candicate marker not on y_axis of child with err of 26.0 degree \n # print math.acos(np.dot( unit_vector, y_axis))*180/math.pi\n if np.dot( unit_vector, y_axis) > math.cos(26.0*math.pi/180.0) and child_inversion == 'upright':\n # in this case child is upright\n parent_module_marker= candidate_parent_marker # parent found\n\n # find out inversion direction\n angle_y = math.acos(np.dot(y_axis,candidate_y_axis))\n if math.fabs(angle_y - math.pi) < 26.0*math.pi/180.0: #if 2 y-axes are almost opposite\n parent_module_inversion= 'inverted'\n else:\n parent_module_inversion= 'upright'\n\n if parent_module_marker == []:\n rospy.logerr( 'can\\'t find parent marker for marker id: {}'.format(child_marker.id))\n return [0,0,0,0]\n\n # calculate connect_angle, which is the angle between two z axes of markers\n z_axes_angle = math.acos( np.clip( np.dot(z_axis,candidate_z_axis), -1, 1))\n if np.dot(unit_vector, np.cross(z_axis, candidate_z_axis) ) > 0: # use unit_vector to determine positive direction of connect_angle \n connect_angle = descretize_connect_angle( z_axes_angle )\n else:\n connect_angle = descretize_connect_angle( - z_axes_angle )\n \n\n\n return [parent_module_marker, parent_module_inversion, connect_angle] \n\n# This function recomputes joint angles for T-type module\ndef compute_T_joint_angle(chain):\n child = {}\n grandchild = {}\n new_chain = []\n\n for module in chain:\n if child:\n child['joint_angle']=0\n\n if child and child['type'] in ['t','T']:\n matrix= tf.transformations.quaternion_matrix([ child['pose'].orientation.x,\n child['pose'].orientation.y,\n child['pose'].orientation.z,\n child['pose'].orientation.w])\n y_axis= np.array([matrix[0][1], matrix[1][1], matrix[2][1]])\n z_axis= np.array([matrix[0][2], matrix[1][2], matrix[2][2]]) \n\n vec_child2module = np.array([ module['pose'].position.x - child['pose'].position.x,\n module['pose'].position.y - child['pose'].position.y,\n module['pose'].position.z - child['pose'].position.z]) \n uni_child2module = vec_child2module / np.linalg.norm(vec_child2module)\n\n vec_child2grand = np.array([ grandchild['pose'].position.x - child['pose'].position.x,\n grandchild['pose'].position.y - child['pose'].position.y,\n grandchild['pose'].position.z - child['pose'].position.z]) \n uni_child2grand = vec_child2grand / np.linalg.norm(vec_child2grand)\n\n if grandchild['type'] == {} and child['inversion'] == 'upright':\n child['joint_angle'] = 0\n elif child['inversion'] == 'upright': \n child['joint_angle'] = math.acos(np.dot(y_axis,uni_child2grand))\n if np.dot(z_axis,np.cross(y_axis,uni_child2grand) ) < 0:\n child['joint_angle'] = - child['joint_angle']\n\n if child['inversion'] == 'inverted': \n child['joint_angle'] = math.acos(np.dot(y_axis,uni_child2module))\n if np.dot(z_axis,np.cross(y_axis,uni_child2module) ) < 0:\n child['joint_angle'] = - child['joint_angle'] \n\n if child:\n new_chain.append(child)\n\n grandchild = child\n child = module\n\n module['joint_angle'] = 0\n if module['type'] in ['t','T']:\n if module['inversion'] =='upright':\n matrix= tf.transformations.quaternion_matrix([ module['pose'].orientation.x,\n module['pose'].orientation.y,\n module['pose'].orientation.z,\n module['pose'].orientation.w])\n y_axis= np.array([matrix[0][1], matrix[1][1], matrix[2][1]])\n z_axis= np.array([matrix[0][2], matrix[1][2], matrix[2][2]]) \n vec_module2child = np.array([ child['pose'].position.x - module['pose'].position.x,\n child['pose'].position.y - module['pose'].position.y,\n child['pose'].position.z - module['pose'].position.z]) \n uni_vec = np.linalg.norm(vec_module2child) \n module['joint_angle'] = math.acos(np.dot(uni_vec,y_axis))\n if np.dot(z_axis,np.cross(y_axis,uni_vec) ) < 0:\n module['joint_angle'] = - module['joint_angle']\n\n new_chain.append(module)\n return new_chain\n\n# writes xacros to urdf_file_path according to template file specified by template_file_path \ndef create_urdf_file(chain_list,urdf_file_path,template_file_path):\n with open(urdf_file_path, \"w\") as urdf_file:\n\n with open(template_file_path) as template_file:\n for line in template_file:\n urdf_file.write(line)\n template_file.close()\n cnt=1\n urdf_file.write(\" <link name=\\\"base_link\\\"/>\\n\\n\")\n\n for module in chain_list:\n \n if module['inversion'] == 'upright':\n inversion = 'module'\n else:\n inversion = 'invert'\n\n if cnt == 1:\n parent_link = 'base_link'\n \n type = module['type']\n \n if parent_link[0] in ['G','I','T','sl'] and type in ['g','i','t']:\n urdf_file.write(\" <xacro:connect_link_100_85 name=\\\"cl{}\\\" parent=\\\"{}\\\">\\n\".format( \"{}-{}\".format(cnt,cnt+1), parent_link) )\n urdf_file.write(\" <origin xyz=\\\"0 0 0\\\" rpy=\\\"0 0 0\\\" />\\n\")\n urdf_file.write(\" </xacro:connect_link_100_85>\\n\\n\")\n\n if parent_link[0] in ['g','i','t'] and type in ['G','I','T','sl']:\n urdf_file.write(\" <xacro:connect_link_85_100 name=\\\"cl{}\\\" parent=\\\"{}\\\">\\n\".format( \"{}-{}\".format(cnt,cnt+1), parent_link) )\n urdf_file.write(\" <origin xyz=\\\"0 0 0\\\" rpy=\\\"0 0 0\\\" />\\n\")\n urdf_file.write(\" </xacro:connect_link_85_100>\\n\\n\")\n\n if type == 'sl':\n urdf_file.write(\" <xacro:sleeve_link name=\\\"sl{}\\\" parent=\\\"{}\\\">\\n\".format(cnt,parent_link) )\n urdf_file.write(\" <origin xyz=\\\"0 0 0\\\" rpy=\\\"0 0 0\\\" />\\n\")\n urdf_file.write(\" </xacro:sleeve_link>\\n\\n\")\n else:\n urdf_file.write(\" <xacro:{}_{} name=\\\"{}{}\\\" parent=\\\"{}\\\">\\n\".format(type,inversion,type,cnt,parent_link) )\n urdf_file.write(\" <origin xyz=\\\"0 0 0\\\" rpy=\\\"0 0 {}\\\" />\\n\".format(module['connect_angle']))\n urdf_file.write(\" </xacro:{}_{}>\\n\\n\".format(type,inversion))\n\n parent_link = '{}{}_Link'.format(type,cnt)\n cnt = cnt +1\n\n urdf_file.write(\"\\n</robot>\")\n urdf_file.close()\n\n# create a .launch file and a .rviz(optionally) in order to display or visualize generated xacro_file\ndef create_launch_file(launch_file, xacro_file, chain, rviz_conf_file = [],publish_joint_states = True):\n with open(launch_file, \"w\") as file:\n file.write('<launch>\\n\\n')\n file.write(' <arg name=\"gui\" default=\"true\" />\\n\\n')\n file.write(' <param name=\\\"robot_description\\\" command=\\\"$(find xacro)/xacro \\'{}\\' \\\"/>\\n'.format(xacro_file))\n if publish_joint_states:\n file.write(' <node name=\\\"module_state_publisher\\\" pkg=\\\"joint_state_publisher\\\" type=\\\"joint_state_publisher\\\">\\n')\n file.write(' <param name=\\\"use_gui\\\" value=\\\"true\\\"/>\\n')\n file.write(' </node>\\n')\n file.write(' <node name=\\\"robot_state_publisher\\\" pkg=\\\"robot_state_publisher\\\" type=\\\"state_publisher\\\" />\\n')\n if rviz_conf_file == []:\n file.write(' <node name=\\\"rviz\\\" pkg=\\\"rviz\\\" type=\\\"rviz\\\" args=\\\"-f base_link\\\" if=\\\"$(arg gui)\\\"/>\\n\\n')\n else:\n file.write(' <node name=\\\"rviz\\\" pkg=\\\"rviz\\\" type=\\\"rviz\\\" args=\\\"-d {} -f base_link\\\" if=\\\"$(arg gui)\\\"/>\\n\\n'.format(rviz_conf_file))\n \n # write dependent_joints parameters of gripper module\n cnt=1\n for module in chain:\n if module['type'] in ['g', 'G']:\n module_name = '{}{}'.format(module['type'],cnt)\n file.write(' <rosparam>\\n')\n file.write(' dependent_joints:\\n')\n file.write(' {0}_Joint1: {1} parent: {0}_Joint, factor: 1 {2}\\n'.format(module_name, '{', '}') )\n file.write(' </rosparam>\\n\\n')\n cnt = cnt+1\n\n file.write('</launch>')\n file.close()\n\n# show string in GREEN color\ndef colorize(str):\n return \"\\033[1;32;40m\" + str + \"\\033[0m\"\n\nif __name__ == \"__main__\":\n rospy.init_node(\"setup_detector\", log_level=rospy.INFO)\n\n robot_name = \"robot\"\n if len(sys.argv) > 1:\n robot_name = sys.argv[1]\n\n database_path = rospkg.RosPack().get_path('mr_setup_detect') + '/module_database/modules.dat'\n load_module_data(database_path)\n\n # receive message from /ar_pose_marker topic\n receive_marker_msg(\"/ar_pose_marker\")\n \n chain= [] # list that stores module type, inversion, assembly parameter from end-effector to base\n for marker in markers:\n if database_dic[marker.id] in ['g', 'G', 'S', 'W']: # if is marker on end-effector\n inversion= 'upright'\n joint_angle= 0\n \n while markers: # while not empty \n markers.remove(marker)\n [parent_module_marker, parent_module_inversion, connect_angle]=find_parent_module(marker,inversion,joint_angle,markers)\n chain.append( { 'pose': marker.pose.pose,\n 'type': database_dic[marker.id],\n 'inversion': inversion,\n 'connect_angle': connect_angle } )\n \n marker=copy.deepcopy(parent_module_marker)\n inversion= parent_module_inversion\n\n chain = compute_T_joint_angle(chain)\n break\n chain.reverse() # from base to end-effector\n\n # TODO: do it in a recursive way instead of a ugly return in find_parent_module()\n # chain= []\n # def find_chain(child_marker, unchecked_markers):\n # if not unchecked_markers:\n # return\n # ...find out parent_marker...\n # unchecked_markers.remove(parent_marker)\n # find_chain(parent_marker, unchecked_markers)\n # chain.append(parent_marker) #chain stores markers from base to end-effector\n \n for node in chain:\n rospy.loginfo( \"module_type:{}; direction:{}; connection:{}; joint_angle:{}\"\n .format(node['type'],node['inversion'],node['connect_angle'],node['joint_angle']) )\n\n package_path = rospkg.RosPack().get_path('mr_description')\n xacro_file = package_path + '/robots/{}.urdf.xacro'.format(robot_name)\n template_file = package_path + '/robots/mr_xacro_template.urdf.xacro'\n create_urdf_file(chain, xacro_file, template_file)\n\n launch_file = package_path + '/launch/{}_display.launch'.format(robot_name)\n rviz_conf_file = package_path + '/rviz/{}.rviz'.format(robot_name)\n create_launch_file(launch_file, xacro_file, chain, rviz_conf_file, False)\n \n rospy.loginfo( colorize(\"Files automatically generated!\") )\n rospy.loginfo( colorize(\"Launching {}_display.launch...\".format(robot_name)) )\n\n uuid = roslaunch.rlutil.get_or_generate_uuid(None, False)\n roslaunch.configure_logging(uuid)\n launch = roslaunch.parent.ROSLaunchParent(uuid, [launch_file]) \n launch.start()\n \n # publish joint states\n pub = rospy.Publisher('joint_states', JointState, queue_size=5)\n while not rospy.is_shutdown():\n msg = JointState()\n cnt = 1\n for node in chain:\n joint_name = \"{}{}_Joint\".format(node['type'], cnt)\n msg.name.append(joint_name)\n msg.position.append(node['joint_angle'])\n if node['type'] in ['g','G']: # gripper module has two joints\n msg.name.append(joint_name + '1')\n msg.position.append(node['joint_angle'])\n cnt = cnt +1\n msg.header.stamp = rospy.Time.now()\n\n pub.publish(msg)\n rospy.sleep(0.1)\n \n\n launch.shutdown()\n create_launch_file(launch_file, xacro_file, chain, rviz_conf_file, True)\n rospy.loginfo( colorize(\"Files saved!\") )\n rospy.loginfo( colorize(\"You can try [ roslaunch mr_description {}_display.launch ]\".format(robot_name)) )\n\n \n" ]
[ [ "numpy.dot", "numpy.array", "numpy.linalg.norm", "numpy.cross" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
russellcaughey/ml-agents
[ "493c75bf683d35d512ae6fb57d4a1a332116df15" ]
[ "gym-unity/gym_unity/envs/unity_env.py" ]
[ "import logging\nimport itertools\nimport gym\nimport numpy as np\nfrom mlagents.envs import UnityEnvironment\nfrom gym import error, spaces\n\n\nclass UnityGymException(error.Error):\n \"\"\"\n Any error related to the gym wrapper of ml-agents.\n \"\"\"\n\n pass\n\n\nlogging.basicConfig(level=logging.INFO)\nlogger = logging.getLogger(\"gym_unity\")\n\n\nclass UnityEnv(gym.Env):\n \"\"\"\n Provides Gym wrapper for Unity Learning Environments.\n Multi-agent environments use lists for object types, as done here:\n https://github.com/openai/multiagent-particle-envs\n \"\"\"\n\n def __init__(\n self,\n environment_filename: str,\n worker_id: int = 0,\n use_visual: bool = False,\n uint8_visual: bool = False,\n multiagent: bool = False,\n flatten_branched: bool = False,\n no_graphics: bool = False,\n allow_multiple_visual_obs: bool = False,\n ):\n \"\"\"\n Environment initialization\n :param environment_filename: The UnityEnvironment path or file to be wrapped in the gym.\n :param worker_id: Worker number for environment.\n :param use_visual: Whether to use visual observation or vector observation.\n :param uint8_visual: Return visual observations as uint8 (0-255) matrices instead of float (0.0-1.0).\n :param multiagent: Whether to run in multi-agent mode (lists of obs, reward, done).\n :param flatten_branched: If True, turn branched discrete action spaces into a Discrete space rather than\n MultiDiscrete.\n :param no_graphics: Whether to run the Unity simulator in no-graphics mode\n :param allow_multiple_visual_obs: If True, return a list of visual observations instead of only one.\n \"\"\"\n self._env = UnityEnvironment(\n environment_filename, worker_id, no_graphics=no_graphics\n )\n self.name = self._env.academy_name\n self.visual_obs = None\n self._current_state = None\n self._n_agents = None\n self._multiagent = multiagent\n self._flattener = None\n self.game_over = (\n False\n ) # Hidden flag used by Atari environments to determine if the game is over\n self._allow_multiple_visual_obs = allow_multiple_visual_obs\n\n # Check brain configuration\n if len(self._env.brains) != 1:\n raise UnityGymException(\n \"There can only be one brain in a UnityEnvironment \"\n \"if it is wrapped in a gym.\"\n )\n if len(self._env.external_brain_names) <= 0:\n raise UnityGymException(\n \"There are not any external brain in the UnityEnvironment\"\n )\n\n self.brain_name = self._env.external_brain_names[0]\n brain = self._env.brains[self.brain_name]\n\n if use_visual and brain.number_visual_observations == 0:\n raise UnityGymException(\n \"`use_visual` was set to True, however there are no\"\n \" visual observations as part of this environment.\"\n )\n self.use_visual = brain.number_visual_observations >= 1 and use_visual\n\n if not use_visual and uint8_visual:\n logger.warning(\n \"`uint8_visual was set to true, but visual observations are not in use. \"\n \"This setting will not have any effect.\"\n )\n else:\n self.uint8_visual = uint8_visual\n\n if brain.number_visual_observations > 1 and not self._allow_multiple_visual_obs:\n logger.warning(\n \"The environment contains more than one visual observation. \"\n \"You must define allow_multiple_visual_obs=True to received them all. \"\n \"Otherwise, please note that only the first will be provided in the observation.\"\n )\n\n if brain.num_stacked_vector_observations != 1:\n raise UnityGymException(\n \"There can only be one stacked vector observation in a UnityEnvironment \"\n \"if it is wrapped in a gym.\"\n )\n\n # Check for number of agents in scene.\n initial_info = self._env.reset()[self.brain_name]\n self._check_agents(len(initial_info.agents))\n\n # Set observation and action spaces\n if brain.vector_action_space_type == \"discrete\":\n if len(brain.vector_action_space_size) == 1:\n self._action_space = spaces.Discrete(brain.vector_action_space_size[0])\n else:\n if flatten_branched:\n self._flattener = ActionFlattener(brain.vector_action_space_size)\n self._action_space = self._flattener.action_space\n else:\n self._action_space = spaces.MultiDiscrete(\n brain.vector_action_space_size\n )\n\n else:\n if flatten_branched:\n logger.warning(\n \"The environment has a non-discrete action space. It will \"\n \"not be flattened.\"\n )\n high = np.array([1] * brain.vector_action_space_size[0])\n self._action_space = spaces.Box(-high, high, dtype=np.float32)\n high = np.array([np.inf] * brain.vector_observation_space_size)\n self.action_meanings = brain.vector_action_descriptions\n if self.use_visual:\n if brain.camera_resolutions[0][\"blackAndWhite\"]:\n depth = 1\n else:\n depth = 3\n self._observation_space = spaces.Box(\n 0,\n 1,\n dtype=np.float32,\n shape=(\n brain.camera_resolutions[0][\"height\"],\n brain.camera_resolutions[0][\"width\"],\n depth,\n ),\n )\n else:\n self._observation_space = spaces.Box(-high, high, dtype=np.float32)\n\n def reset(self):\n \"\"\"Resets the state of the environment and returns an initial observation.\n In the case of multi-agent environments, this is a list.\n Returns: observation (object/list): the initial observation of the\n space.\n \"\"\"\n info = self._env.reset()[self.brain_name]\n n_agents = len(info.agents)\n self._check_agents(n_agents)\n self.game_over = False\n\n if not self._multiagent:\n obs, reward, done, info = self._single_step(info)\n else:\n obs, reward, done, info = self._multi_step(info)\n return obs\n\n def step(self, action):\n \"\"\"Run one timestep of the environment's dynamics. When end of\n episode is reached, you are responsible for calling `reset()`\n to reset this environment's state.\n Accepts an action and returns a tuple (observation, reward, done, info).\n In the case of multi-agent environments, these are lists.\n Args:\n action (object/list): an action provided by the environment\n Returns:\n observation (object/list): agent's observation of the current environment\n reward (float/list) : amount of reward returned after previous action\n done (boolean/list): whether the episode has ended.\n info (dict): contains auxiliary diagnostic information, including BrainInfo.\n \"\"\"\n\n # Use random actions for all other agents in environment.\n if self._multiagent:\n if not isinstance(action, list):\n raise UnityGymException(\n \"The environment was expecting `action` to be a list.\"\n )\n if len(action) != self._n_agents:\n raise UnityGymException(\n \"The environment was expecting a list of {} actions.\".format(\n self._n_agents\n )\n )\n else:\n if self._flattener is not None:\n # Action space is discrete and flattened - we expect a list of scalars\n action = [self._flattener.lookup_action(_act) for _act in action]\n action = np.array(action)\n else:\n if self._flattener is not None:\n # Translate action into list\n action = self._flattener.lookup_action(action)\n\n info = self._env.step(action)[self.brain_name]\n n_agents = len(info.agents)\n self._check_agents(n_agents)\n self._current_state = info\n\n if not self._multiagent:\n obs, reward, done, info = self._single_step(info)\n self.game_over = done\n else:\n obs, reward, done, info = self._multi_step(info)\n self.game_over = all(done)\n return obs, reward, done, info\n\n def _single_step(self, info):\n if self.use_visual:\n visual_obs = info.visual_observations\n\n if self._allow_multiple_visual_obs:\n visual_obs_list = []\n for obs in visual_obs:\n visual_obs_list.append(self._preprocess_single(obs[0]))\n self.visual_obs = visual_obs_list\n else:\n self.visual_obs = self._preprocess_single(visual_obs[0][0])\n\n default_observation = self.visual_obs\n else:\n default_observation = info.vector_observations[0, :]\n\n return (\n default_observation,\n info.rewards[0],\n info.local_done[0],\n {\"text_observation\": info.text_observations[0], \"brain_info\": info},\n )\n\n def _preprocess_single(self, single_visual_obs):\n if self.uint8_visual:\n return (255.0 * single_visual_obs).astype(np.uint8)\n else:\n return single_visual_obs\n\n def _multi_step(self, info):\n if self.use_visual:\n self.visual_obs = self._preprocess_multi(info.visual_observations)\n default_observation = self.visual_obs\n else:\n default_observation = info.vector_observations\n return (\n list(default_observation),\n info.rewards,\n info.local_done,\n {\"text_observation\": info.text_observations, \"brain_info\": info},\n )\n\n def _preprocess_multi(self, multiple_visual_obs):\n if self.uint8_visual:\n return [\n (255.0 * _visual_obs).astype(np.uint8)\n for _visual_obs in multiple_visual_obs\n ]\n else:\n return multiple_visual_obs\n\n def render(self, mode=\"rgb_array\"):\n return self.visual_obs\n\n def close(self):\n \"\"\"Override _close in your subclass to perform any necessary cleanup.\n Environments will automatically close() themselves when\n garbage collected or when the program exits.\n \"\"\"\n self._env.close()\n\n def get_action_meanings(self):\n return self.action_meanings\n\n def seed(self, seed=None):\n \"\"\"Sets the seed for this env's random number generator(s).\n Currently not implemented.\n \"\"\"\n logger.warn(\"Could not seed environment %s\", self.name)\n return\n\n def _check_agents(self, n_agents):\n if not self._multiagent and n_agents > 1:\n raise UnityGymException(\n \"The environment was launched as a single-agent environment, however\"\n \"there is more than one agent in the scene.\"\n )\n elif self._multiagent and n_agents <= 1:\n raise UnityGymException(\n \"The environment was launched as a mutli-agent environment, however\"\n \"there is only one agent in the scene.\"\n )\n if self._n_agents is None:\n self._n_agents = n_agents\n logger.info(\"{} agents within environment.\".format(n_agents))\n elif self._n_agents != n_agents:\n raise UnityGymException(\n \"The number of agents in the environment has changed since \"\n \"initialization. This is not supported.\"\n )\n\n @property\n def metadata(self):\n return {\"render.modes\": [\"rgb_array\"]}\n\n @property\n def reward_range(self):\n return -float(\"inf\"), float(\"inf\")\n\n @property\n def spec(self):\n return None\n\n @property\n def action_space(self):\n return self._action_space\n\n @property\n def observation_space(self):\n return self._observation_space\n\n @property\n def number_agents(self):\n return self._n_agents\n\n\nclass ActionFlattener:\n \"\"\"\n Flattens branched discrete action spaces into single-branch discrete action spaces.\n \"\"\"\n\n def __init__(self, branched_action_space):\n \"\"\"\n Initialize the flattener.\n :param branched_action_space: A List containing the sizes of each branch of the action\n space, e.g. [2,3,3] for three branches with size 2, 3, and 3 respectively.\n \"\"\"\n self._action_shape = branched_action_space\n self.action_lookup = self._create_lookup(self._action_shape)\n self.action_space = spaces.Discrete(len(self.action_lookup))\n\n @classmethod\n def _create_lookup(self, branched_action_space):\n \"\"\"\n Creates a Dict that maps discrete actions (scalars) to branched actions (lists).\n Each key in the Dict maps to one unique set of branched actions, and each value\n contains the List of branched actions.\n \"\"\"\n possible_vals = [range(_num) for _num in branched_action_space]\n all_actions = [list(_action) for _action in itertools.product(*possible_vals)]\n # Dict should be faster than List for large action spaces\n action_lookup = {\n _scalar: _action for (_scalar, _action) in enumerate(all_actions)\n }\n return action_lookup\n\n def lookup_action(self, action):\n \"\"\"\n Convert a scalar discrete action into a unique set of branched actions.\n :param: action: A scalar value representing one of the discrete actions.\n :return: The List containing the branched actions.\n \"\"\"\n return self.action_lookup[action]\n" ]
[ [ "numpy.array" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
WadeYin9712/GD-VCR
[ "001652810294a7de25ae96bcbbde515873871159" ]
[ "visualbert/dataloaders/coco_dataset.py" ]
[ "import os\nimport random\nimport json\nfrom collections import defaultdict\nfrom tqdm import tqdm\n\nimport numpy as np\nimport numpy\nimport torch\nfrom torch.utils.data import Dataset\nfrom allennlp.data.dataset import Batch\nfrom allennlp.data.fields import TextField, ListField, LabelField, SequenceLabelField, ArrayField, MetadataField\nfrom allennlp.data.instance import Instance\nfrom allennlp.data.token_indexers import ELMoTokenCharactersIndexer\nfrom allennlp.data.tokenizers import Token\nfrom allennlp.data.vocabulary import Vocabulary\nfrom allennlp.nn.util import get_text_field_mask\nfrom torch.utils.data import Dataset\nfrom dataloaders.box_utils import load_image, resize_image, to_tensor_and_normalize\nfrom dataloaders.mask_utils import make_mask\nfrom dataloaders.bert_field import BertField\nimport h5py\nfrom copy import deepcopy\n\nfrom torch.utils.data.dataloader import default_collate\nfrom allennlp.data.instance import Instance\nfrom allennlp.data.dataset import Batch\nfrom pytorch_pretrained_bert.fine_tuning import _truncate_seq_pair, random_word\nfrom dataloaders.bert_field import IntArrayField\nfrom allennlp.data.fields import ListField\n\nfrom .bert_data_utils import *\nfrom visualbert.pytorch_pretrained_bert.tokenization import BertTokenizer\n\nfrom pycocotools.coco import COCO\nclass COCODataset(Dataset):\n def __init__(self, args, visual_genome_chunk = False):\n super(COCODataset, self).__init__()\n self.args = args\n self.coco = COCO(args.annots_path)\n self.annots_path = args.annots_path\n self.split_name = args.split_name\n self.data_root = args.data_root\n self.visual_genome_chunk = visual_genome_chunk\n self.masks = args.masks\n\n self.image_feature_type = args.image_feature_type\n self.text_only = args.get(\"text_only\", False)\n self.add_spatial_features = args.get(\"add_spatial_features\", False)\n self.expanded = False\n ########## Loading Annotations\n self.items = self.coco.loadAnns(self.coco.getAnnIds())\n\n print(\"{} of captions in total.\".format(len(self.items)))\n\n self.image_feat_reader = faster_RCNN_feat_reader()\n\n if args.get(\"chunk_path\", None) is not None and self.image_feature_type == \"nlvr\":\n print(\"Loading images...\")\n self.chunk = torch.load(args.chunk_path)\n average = 0.0\n counter = 0\n new_chunk = {}\n for image_id in self.chunk.keys():\n image_feat_variable, image_boxes, confidence = self.chunk[image_id]\n if \".npz\" in image_id:\n new_chunk[image_id] = screen_feature(image_feat_variable, image_boxes,confidence, args.image_screening_parameters)\n average += new_chunk[image_id][2]\n else:\n new_chunk[image_id+\".npz\"] = screen_feature(image_feat_variable, image_boxes,confidence, args.image_screening_parameters)\n average += new_chunk[image_id+\".npz\"][2]\n print(\"{} features on average.\".format(average/len(self.chunk)))\n self.chunk = new_chunk\n\n\n self.do_lower_case = args.do_lower_case\n self.bert_model_name = args.bert_model_name\n self.max_seq_length = args.max_seq_length\n self.tokenizer = BertTokenizer.from_pretrained(self.bert_model_name, do_lower_case=self.do_lower_case)\n self.pretraining = args.pretraining\n self.masked_lm_prob = args.get(\"masked_lm_prob\", 0.15)\n\n with open(os.path.join('./cocoontology.json'), 'r') as f:\n coco = json.load(f)\n self.coco_objects = ['__background__'] + [x['name'] for k, x in sorted(coco.items(), key=lambda x: int(x[0]))]\n self.coco_obj_to_ind = {o: i for i, o in enumerate(self.coco_objects)}\n\n\n if self.image_feature_type == \"r2c\":\n items = []\n counter = 0\n for i in self.items:\n if self.expanded and index >= self.train_size:\n image_file_name = \"COCO_val2014_{:0>12d}.jpg\".format(i['image_id'])\n else:\n image_file_name = \"COCO_{}2014_{:0>12d}.jpg\".format(self.split_name, i['image_id'])\n if isinstance(self.masks[image_file_name], dict):\n items.append(i)\n else:\n # For some images, the detector seems to have Null output. Thus we just skip them. This will not affect much.\n counter += 1\n print(\"Discarded {} instances in {}.\".format(counter, self.split_name))\n self.items = items\n\n def get_image_features_by_training_index(self, index):\n item = self.items[index]\n\n if self.args.image_feature_type == \"flickr\":\n v_item = self.visual_genome_chunk[item['image_id']]\n image_feat_variable = v_item[\"features\"]\n image_boxes = None\n image_dim_variable = image_feat_variable.shape[0]\n if self.add_spatial_features:\n image_w = float(v_item['image_w'])\n image_h = float(v_item['image_h'])\n\n bboxes = v_item[\"boxes\"]\n box_width = bboxes[:, 2] - bboxes[:, 0]\n box_height = bboxes[:, 3] - bboxes[:, 1]\n scaled_width = box_width / image_w\n scaled_height = box_height / image_h\n scaled_x = bboxes[:, 0] / image_w\n scaled_y = bboxes[:, 1] / image_h\n box_width = box_width[..., np.newaxis]\n box_height = box_height[..., np.newaxis]\n scaled_width = scaled_width[..., np.newaxis]\n scaled_height = scaled_height[..., np.newaxis]\n scaled_x = scaled_x[..., np.newaxis]\n scaled_y = scaled_y[..., np.newaxis]\n\n spatial_features = np.concatenate(\n (scaled_x,\n scaled_y,\n scaled_x + scaled_width,\n scaled_y + scaled_height,\n scaled_width,\n scaled_height),\n axis=1)\n\n image_feat_variable = np.concatenate((image_feat_variable, spatial_features), axis=1)\n return image_feat_variable, image_boxes, image_dim_variable\n\n if self.args.image_feature_type == \"vqa_fix_100\":\n if self.expanded and index >= self.train_size:\n image_file_name = \"COCO_val2014_{:0>12d}.npy\".format(item['image_id'])\n else:\n image_file_name = \"COCO_{}2014_{:0>12d}.npy\".format(self.split_name, item['image_id'])\n\n if \"train\" in image_file_name:\n folder = os.path.join(self.data_root, \"data/detectron_fix_100/fc6/vqa/train2014\")\n elif \"val\" in image_file_name:\n folder = os.path.join(self.data_root, \"data/detectron_fix_100/fc6/vqa/val2014\")\n image_feat_variable = np.load(os.path.join(folder, image_file_name))\n image_dim_variable = image_feat_variable.shape[0]\n return image_feat_variable, None, image_dim_variable\n\n if self.expanded and index >= self.train_size:\n image_file_name = \"COCO_val2014_{:0>12d}.jpg.npz\".format(item['image_id'])\n return self.chunk_val[image_file_name]\n else:\n image_file_name = \"COCO_{}2014_{:0>12d}.jpg.npz\".format(self.split_name, item['image_id'])\n\n if self.args.get(\"chunk_path\", None) is not None:\n return self.chunk[image_file_name]\n\n def __len__(self):\n return len(self.items)\n\n def __getitem__(self, index):\n if self.image_feature_type == \"r2c\":\n return self.__getitem_detector__(index)\n\n item = self.items[index]\n sample = {}\n if not self.text_only:\n image_feat_variable, image_boxes, image_dim_variable = self.get_image_features_by_training_index(index)\n image_feat_variable = ArrayField(image_feat_variable)\n image_dim_variable = IntArrayField(np.array(image_dim_variable))\n sample[\"image_feat_variable\"] = image_feat_variable\n sample[\"image_dim_variable\"] = image_dim_variable\n sample[\"label\"] = image_dim_variable\n else:\n sample[\"label\"] = IntArrayField(np.array([0]))\n\n caption_a = item[\"caption\"]\n imageID = item[\"image_id\"]\n\n if self.expanded and index >= self.train_size:\n coco = self.coco_val\n else:\n coco = self.coco\n\n rest_anns = coco.loadAnns([i for i in coco.getAnnIds(imgIds=imageID) if i != item['id']])\n\n if self.args.get(\"two_sentence\", True):\n if random.random() > 0.5:\n item_b = self.items[random.randint(0, len(self.items) - 1)]\n while item_b[\"image_id\"] == imageID:\n item_b = self.items[random.randint(0, len(self.items) - 1)]\n flag = False\n else:\n item_b = rest_anns[random.randint(0, len(rest_anns) - 1)]\n flag = True\n\n caption_b = item_b[\"caption\"]\n subword_tokens_a = self.tokenizer.tokenize(caption_a)\n subword_tokens_b = self.tokenizer.tokenize(caption_b)\n bert_example = InputExample(unique_id = index, text_a = subword_tokens_a, text_b = subword_tokens_b, is_correct=flag, max_seq_length = self.max_seq_length)\n elif not self.args.get(\"no_next_sentence\", False):\n if random.random() < self.args.false_caption_ratio:\n item_b = self.items[random.randint(0, len(self.items) - 1)]\n while item_b[\"image_id\"] == imageID:\n item_b = self.items[random.randint(0, len(self.items) - 1)]\n flag = False\n else:\n item_b = item\n flag = True\n\n caption_b = item_b[\"caption\"]\n subword_tokens_b = self.tokenizer.tokenize(caption_b)\n bert_example = InputExample(unique_id = index, text_a = subword_tokens_b, text_b = None, is_correct=flag, max_seq_length = self.max_seq_length)\n else:\n caption_b = item[\"caption\"]\n subword_tokens_b = self.tokenizer.tokenize(caption_b)\n bert_example = InputExample(unique_id = index, text_a = subword_tokens_b, text_b = None, is_correct=None, max_seq_length = self.max_seq_length)\n\n bert_feature = InputFeatures.convert_one_example_to_features_pretraining(\n example = bert_example,\n tokenizer=self.tokenizer,\n probability = self.masked_lm_prob)\n bert_feature.insert_field_into_dict(sample)\n\n return Instance(sample)\n\n def __getitem_detector__(self, index):\n item = self.items[index]\n sample = {}\n if self.expanded and index >= self.train_size:\n image_file_name = \"COCO_val2014_{:0>12d}.jpg\".format(item['image_id'])\n else:\n image_file_name = \"COCO_{}2014_{:0>12d}.jpg\".format(self.split_name, item['image_id'])\n\n image_info = self.masks[image_file_name]\n if \"train\" in image_file_name:\n image_file_path = os.path.join(self.data_root, \"train2014\", image_file_name)\n elif \"val\" in image_file_name:\n image_file_path = os.path.join(self.data_root, \"val2014\", image_file_name)\n\n ###################################################################\n # Most of things adapted from VCR\n # Load image now and rescale it. Might have to subtract the mean and whatnot here too.\n image = load_image(image_file_path)\n image, window, img_scale, padding = resize_image(image, random_pad=self.is_train)\n image = to_tensor_and_normalize(image)\n c, h, w = image.shape\n ###################################################################\n metadata = self.masks[image_file_name] # Get the metadata\n # Load boxes.\n # We will use all detections\n dets2use = np.arange(len(metadata['boxes']))\n # [nobj, 14, 14]\n segms = np.stack([make_mask(mask_size=14, box=metadata['boxes'][i], polygons_list=metadata['segms'][i]) for i in dets2use])\n\n # Chop off the final dimension, that's the confidence\n boxes = np.array(metadata['boxes'])[dets2use, :-1]\n # Possibly rescale them if necessary\n boxes *= img_scale\n boxes[:, :2] += np.array(padding[:2])[None]\n boxes[:, 2:] += np.array(padding[:2])[None]\n \n try:\n metadata['names'] = [i.split(\" \")[1][1:-1] for i in metadata[\"names\"]]\n except:\n pass\n obj_labels = [self.coco_obj_to_ind[metadata['names'][i]] for i in dets2use.tolist()]\n boxes = np.row_stack((window, boxes))\n segms = np.concatenate((np.ones((1, 14, 14), dtype=np.float32), segms), 0)\n obj_labels = [self.coco_obj_to_ind['__background__']] + obj_labels\n\n sample['segms'] = ArrayField(segms, padding_value=0)\n sample['objects'] = ListField([LabelField(x, skip_indexing=True) for x in obj_labels])\n\n if not np.all((boxes[:, 0] >= 0.) & (boxes[:, 0] < boxes[:, 2])):\n import ipdb\n ipdb.set_trace()\n assert np.all((boxes[:, 1] >= 0.) & (boxes[:, 1] < boxes[:, 3]))\n assert np.all((boxes[:, 2] <= w))\n assert np.all((boxes[:, 3] <= h))\n sample['boxes'] = ArrayField(boxes, padding_value=-1)\n\n caption_a = item[\"caption\"]\n imageID = item[\"image_id\"]\n \n sample[\"label\"] = sample['objects'] # This is an useless field. Just so that they know the batch size.\n\n if self.expanded and index >= self.train_size:\n coco = self.coco_val\n else:\n coco = self.coco\n\n rest_anns = coco.loadAnns([i for i in coco.getAnnIds(imgIds=imageID) if i != item['id']])\n\n if self.args.get(\"two_sentence\", True):\n if random.random() > 0.5:\n item_b = self.items[random.randint(0, len(self.items) - 1)]\n while item_b[\"image_id\"] == imageID:\n item_b = self.items[random.randint(0, len(self.items) - 1)]\n flag = False\n else:\n item_b = rest_anns[random.randint(0, len(rest_anns) - 1)]\n flag = True # is next sentence\n\n caption_b = item_b[\"caption\"]\n subword_tokens_a = self.tokenizer.tokenize(caption_a)\n subword_tokens_b = self.tokenizer.tokenize(caption_b)\n bert_example = InputExample(unique_id = index, text_a = subword_tokens_a, text_b = subword_tokens_b, is_correct=flag, max_seq_length = self.max_seq_length)\n elif not self.args.get(\"no_next_sentence\", False):\n if random.random() < self.args.false_caption_ratio:\n item_b = self.items[random.randint(0, len(self.items) - 1)]\n while item_b[\"image_id\"] == imageID:\n item_b = self.items[random.randint(0, len(self.items) - 1)]\n flag = False\n else:\n item_b = item\n flag = True # is next sentence\n\n caption_b = item_b[\"caption\"]\n subword_tokens_b = self.tokenizer.tokenize(caption_b)\n bert_example = InputExample(unique_id = index, text_a = subword_tokens_b, text_b = None, is_correct=flag, max_seq_length = self.max_seq_length)\n else:\n subword_tokens_a = self.tokenizer.tokenize(caption_a)\n bert_example = InputExample(unique_id = index, text_a = subword_tokens_a, text_b = None, is_correct=None, max_seq_length = self.max_seq_length)\n\n bert_feature = InputFeatures.convert_one_example_to_features_pretraining(\n example = bert_example,\n tokenizer=self.tokenizer,\n probability = self.masked_lm_prob)\n bert_feature.insert_field_into_dict(sample)\n\n return image, Instance(sample)\n\n @classmethod\n def splits(cls, args):\n data_root = args.data_root\n\n if args.image_feature_type == \"r2c\":\n # For r2c, the masks are pre-computed from a larger detector. Thus, when pre-training on COCO, we follow the same procedure.\n masks = torch.load(os.path.join(data_root, \"mask_train.th\"))\n mask_val = torch.load(os.path.join(data_root, \"mask_val.th\"))\n for i in mask_val:\n masks[i] = mask_val[i]\n else:\n masks = None\n\n if args.image_feature_type == \"flickr\":\n import base64\n import csv\n import sys\n import zlib\n import time\n import mmap\n csv.field_size_limit(sys.maxsize)\n FIELDNAMES = ['image_id', 'image_w','image_h','num_boxes', 'boxes', 'features']\n infiles = [\n os.path.join(data_root, \"trainval/karpathy_test_resnet101_faster_rcnn_genome.tsv\"),\n os.path.join(data_root, \"trainval/karpathy_train_resnet101_faster_rcnn_genome.tsv.0\"),\n os.path.join(data_root, \"trainval/karpathy_train_resnet101_faster_rcnn_genome.tsv.1\"),\n os.path.join(data_root, \"trainval/karpathy_val_resnet101_faster_rcnn_genome.tsv\")\n ]\n chunk = {}\n chunk_file = os.path.join(data_root, \"trainval/resnet101_genome.th\")\n if not os.path.exists(chunk_file):\n print(\"Loading COCO files for Flickr30K for the first time...\")\n for infile in infiles:\n with open(infile, \"r+\") as tsv_in_file:\n reader = csv.DictReader(tsv_in_file, delimiter='\\t', fieldnames = FIELDNAMES)\n for item in tqdm(reader):\n item['image_id'] = int(item['image_id'])\n item['image_h'] = float(item['image_h'])\n item['image_w'] = float(item['image_w']) \n item['num_boxes'] = int(item['num_boxes'])\n for field in ['boxes', 'features']:\n # Hope the python2/3 b64decode does not mess things up.\n item[field] = np.frombuffer(base64.b64decode(item[field]), \n dtype=np.float32).reshape((item['num_boxes'],-1))\n item[\"features\"] = torch.from_numpy(item[\"features\"])\n item[\"boxes\"] = torch.from_numpy(item[\"boxes\"])\n chunk[item['image_id']] = item\n torch.save(chunk, chunk_file)\n else:\n chunk = torch.load(chunk_file)\n else:\n chunk = None\n\n copy_args = deepcopy(args)\n copy_args.split_name = \"train\"\n copy_args.annots_path = os.path.join(data_root, \"annotations/captions_{}2014.json\".format(copy_args.split_name))\n\n if args.image_feature_type == \"nlvr\":\n copy_args.chunk_path = os.path.join(data_root, \"coco_features_{}_150.th\".format(copy_args.split_name))\n\n copy_args.data_root = data_root\n copy_args.masks = masks\n\n trainset = cls(copy_args, chunk)\n trainset.is_train = True\n\n\n copy_args = deepcopy(args)\n copy_args.split_name = \"val\"\n copy_args.annots_path = os.path.join(data_root, \"annotations/captions_{}2014.json\".format(copy_args.split_name))\n if args.image_feature_type == \"nlvr\":\n copy_args.chunk_path = os.path.join(data_root, \"coco_features_{}_150.th\".format(copy_args.split_name))\n copy_args.data_root = data_root\n copy_args.masks = masks\n\n validationset = cls(copy_args, chunk)\n validationset.is_train = False\n\n\n\n if args.get(\"expand_coco\", False):\n # This is to expand the COCO train \n trainset.expanded = True\n trainset.train_size = len(trainset.items)\n\n trainset.items.extend(validationset.items)\n\n trainset.coco_val = validationset.coco\n\n if args.image_feature_type != \"r2c\" and args.image_feature_type != \"vqa_fix_100\" and args.image_feature_type != \"flickr\": # For NLVR, we pre-load features so we need to expand the chunk as well\n trainset.chunk_val = validationset.chunk\n\n imdb = np.load(os.path.join(data_root, \"data/imdb/imdb_minival2014.npy\"), allow_pickle = True)[1:]\n image_names_mini_val = set([i[\"image_name\"] + \".jpg\" for i in imdb])\n\n if args.get(\"exclude_minival\", False):\n trainset.items = [i for i in trainset.items if \"COCO_val2014_{:0>12d}.jpg\".format(i['image_id']) not in image_names_mini_val]\n\n validationset.items = [i for i in validationset.items if \"COCO_val2014_{:0>12d}.jpg\".format(i['image_id']) in image_names_mini_val]\n print(\"After expanding, train has {} items, val has {} items\".format(len(trainset.items), len(validationset.items)))\n\n testset = validationset # Testset will not be used so this is just a placeholder\n return trainset, validationset, testset\n\n @staticmethod\n def collate_fn(data):\n if isinstance(data[0], Instance):\n batch = Batch(data)\n td = batch.as_tensor_dict()\n return td\n else:\n images, instances = zip(*data)\n images = torch.stack(images, 0)\n\n batch = Batch(instances)\n td = batch.as_tensor_dict()\n td['box_mask'] = torch.all(td['boxes'] >= 0, -1).long()\n td['images'] = images\n return td\n" ]
[ [ "torch.all", "torch.load", "torch.from_numpy", "numpy.ones", "numpy.all", "numpy.concatenate", "numpy.row_stack", "torch.stack", "numpy.array", "torch.save" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
StatBiomed/BBMix
[ "180a6d74a50e91a7a876d452c02459a6ebeb9b29" ]
[ "bbmix/models/model_base.py" ]
[ "# -*- coding: utf-8 -*-\n\"\"\"binomial_mix.\n\nChen Qiao: [email protected]\n\"\"\"\n\nimport numpy as np\n\nfrom .statistics import bic_criterion, entropy_criterion\n\n\nclass ModelBase:\n \"\"\"Base class of all the models\n\n This class encapsulated some common functions shared by all the derived models\n \"\"\"\n\n def __init__(self):\n \"\"\"Initialization\n \"\"\"\n self.params = None\n self.losses = None\n self.model_scores = None\n\n def score_model(self, k, n, nll, probs):\n \"\"\"score model using BIC and ICL criterions\n\n Args:\n k (int): the number of estimated parameters\n n (int): the number of datapoints, i.e., sample size\n nll (float): negative log likelihood of the dataset given the model\n probs (np.array/np.ndarray): batch probabilistic distributions of discrete random variables (latent)\n\n Returns:\n dict: BIC and ICL scores of the given model on the dataset\n \"\"\"\n bic_score = bic_criterion(k, n, nll)\n entropy_score = entropy_criterion(probs)\n icl_score = bic_score + entropy_score\n self.model_scores = {\"BIC\": bic_score, \"ICL\": icl_score}\n return self.model_scores\n\n def _preprocess(self, data, pseudocount=0.1, min_n=10):\n \"\"\"Preprocess inputs\n\n Args:\n data (tuple of arrays): y, n: number of positive events and total number of trials respectively\n pseudocount (float) : add pseudocount if data is zero\n min_n (int): minimum number of samples for filtering\n\n Returns\n tuple of np.array\n \"\"\"\n y, n = data\n # y, n = y[n > min_n], n[n > min_n] # filter extremes\n if np.any(y == 0):\n y = y.astype(float)\n y[y == 0] = pseudocount\n return y, n\n" ]
[ [ "numpy.any" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
valohai/yolov3-tf2
[ "e98bd6fad30b7494cad561fa09bb49a70af5b69b" ]
[ "weights.py" ]
[ "import numpy as np\nfrom yolov3_tf2.models import YoloV3\nfrom yolov3_tf2.utils import load_darknet_weights\nimport tensorflow as tf\nimport valohai\n\nparams = {\n \"weights_num_classes\": 80,\n}\n\ninputs = {\n \"weights\": \"https://pjreddie.com/media/files/yolov3.weights\",\n}\n\nvalohai.prepare(step=\"weights\", default_parameters=params, default_inputs=inputs)\n\n\ndef main():\n physical_devices = tf.config.experimental.list_physical_devices('GPU')\n if len(physical_devices) > 0:\n tf.config.experimental.set_memory_growth(physical_devices[0], True)\n\n yolo = YoloV3(classes=valohai.parameters('weights_num_classes').value)\n yolo.summary()\n print('model created')\n\n load_darknet_weights(yolo, valohai.inputs('weights').path(), False)\n print('weights loaded')\n\n img = np.random.random((1, 320, 320, 3)).astype(np.float32)\n output = yolo(img)\n print('sanity check passed')\n\n path = valohai.outputs('model').path('model.tf')\n yolo.save_weights(path)\n print(f'weights saved {path}')\n\n\nif __name__ == \"__main__\":\n main()\n" ]
[ [ "tensorflow.config.experimental.list_physical_devices", "numpy.random.random", "tensorflow.config.experimental.set_memory_growth" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
zhengxiawu/pytorch-cifar
[ "53f83002a6da425e0ac3ba27e835db2ab42de38f" ]
[ "models/nas_models.py" ]
[ "import torch\nimport torch.nn as nn\nfrom collections import namedtuple\nGenotype = namedtuple('Genotype', 'normal normal_concat reduce reduce_concat')\n\nOPS = {\n 'none': lambda C, stride, affine: Zero(stride),\n 'avg_pool_3x3': lambda C, stride, affine: nn.AvgPool2d(3, stride=stride, padding=1, count_include_pad=False),\n 'max_pool_3x3': lambda C, stride, affine: nn.MaxPool2d(3, stride=stride, padding=1),\n 'skip_connect': lambda C, stride, affine: Identity() if stride == 1 else FactorizedReduce(C, C, affine=affine),\n 'sep_conv_3x3': lambda C, stride, affine: SepConv(C, C, 3, stride, 1, affine=affine),\n 'sep_conv_5x5': lambda C, stride, affine: SepConv(C, C, 5, stride, 2, affine=affine),\n 'sep_conv_7x7': lambda C, stride, affine: SepConv(C, C, 7, stride, 3, affine=affine),\n 'dil_conv_3x3': lambda C, stride, affine: DilConv(C, C, 3, stride, 2, 2, affine=affine),\n 'dil_conv_5x5': lambda C, stride, affine: DilConv(C, C, 5, stride, 4, 2, affine=affine),\n 'conv_7x1_1x7': lambda C, stride, affine: nn.Sequential(\n nn.ReLU(inplace=False),\n nn.Conv2d(C, C, (1, 7), stride=(1, stride),\n padding=(0, 3), bias=False),\n nn.Conv2d(C, C, (7, 1), stride=(stride, 1),\n padding=(3, 0), bias=False),\n nn.BatchNorm2d(C, affine=affine)\n ),\n}\n\n\nclass ReLUConvBN(nn.Module):\n\n def __init__(self, C_in, C_out, kernel_size, stride, padding, affine=True):\n super(ReLUConvBN, self).__init__()\n self.op = nn.Sequential(\n nn.ReLU(inplace=False),\n nn.Conv2d(C_in, C_out, kernel_size, stride=stride,\n padding=padding, bias=False),\n nn.BatchNorm2d(C_out, affine=affine)\n )\n\n def forward(self, x):\n return self.op(x)\n\n\nclass DilConv(nn.Module):\n\n def __init__(self, C_in, C_out, kernel_size, stride, padding, dilation, affine=True):\n super(DilConv, self).__init__()\n self.op = nn.Sequential(\n nn.ReLU(inplace=False),\n nn.Conv2d(C_in, C_in, kernel_size=kernel_size, stride=stride,\n padding=padding, dilation=dilation, groups=C_in, bias=False),\n nn.Conv2d(C_in, C_out, kernel_size=1, padding=0, bias=False),\n nn.BatchNorm2d(C_out, affine=affine),\n )\n\n def forward(self, x):\n return self.op(x)\n\n\nclass SepConv(nn.Module):\n\n def __init__(self, C_in, C_out, kernel_size, stride, padding, affine=True):\n super(SepConv, self).__init__()\n self.op = nn.Sequential(\n nn.ReLU(inplace=False),\n nn.Conv2d(C_in, C_in, kernel_size=kernel_size, stride=stride,\n padding=padding, groups=C_in, bias=False),\n nn.Conv2d(C_in, C_in, kernel_size=1, padding=0, bias=False),\n nn.BatchNorm2d(C_in, affine=affine),\n nn.ReLU(inplace=False),\n nn.Conv2d(C_in, C_in, kernel_size=kernel_size, stride=1,\n padding=padding, groups=C_in, bias=False),\n nn.Conv2d(C_in, C_out, kernel_size=1, padding=0, bias=False),\n nn.BatchNorm2d(C_out, affine=affine),\n )\n\n def forward(self, x):\n return self.op(x)\n\n\nclass Identity(nn.Module):\n\n def __init__(self):\n super(Identity, self).__init__()\n\n def forward(self, x):\n return x\n\n\nclass Zero(nn.Module):\n\n def __init__(self, stride):\n super(Zero, self).__init__()\n self.stride = stride\n\n def forward(self, x):\n if self.stride == 1:\n return x.mul(0.)\n return x[:, :, ::self.stride, ::self.stride].mul(0.)\n\n\nclass FactorizedReduce(nn.Module):\n\n def __init__(self, C_in, C_out, affine=True):\n super(FactorizedReduce, self).__init__()\n assert C_out % 2 == 0\n self.relu = nn.ReLU(inplace=False)\n self.conv_1 = nn.Conv2d(C_in, C_out // 2, 1,\n stride=2, padding=0, bias=False)\n self.conv_2 = nn.Conv2d(C_in, C_out // 2, 1,\n stride=2, padding=0, bias=False)\n self.bn = nn.BatchNorm2d(C_out, affine=affine)\n\n def forward(self, x):\n x = self.relu(x)\n out = torch.cat([self.conv_1(x), self.conv_2(x[:, :, 1:, 1:])], dim=1)\n out = self.bn(out)\n return out\n\n\n# To make a fair comparision with other architectures, we delete\n# the droppath and AuxiliaryHead\nclass Cell(nn.Module):\n\n def __init__(self, genotype, C_prev_prev, C_prev, C, reduction, reduction_prev):\n super(Cell, self).__init__()\n # print(C_prev_prev, C_prev, C)\n\n if reduction_prev:\n self.preprocess0 = FactorizedReduce(C_prev_prev, C)\n else:\n self.preprocess0 = ReLUConvBN(C_prev_prev, C, 1, 1, 0)\n self.preprocess1 = ReLUConvBN(C_prev, C, 1, 1, 0)\n\n if reduction:\n op_names, indices = zip(*genotype.reduce)\n concat = genotype.reduce_concat\n else:\n op_names, indices = zip(*genotype.normal)\n concat = genotype.normal_concat\n self._compile(C, op_names, indices, concat, reduction)\n\n def _compile(self, C, op_names, indices, concat, reduction):\n assert len(op_names) == len(indices)\n self._steps = len(op_names) // 2\n self._concat = concat\n self.multiplier = len(concat)\n\n self._ops = nn.ModuleList()\n for name, index in zip(op_names, indices):\n stride = 2 if reduction and index < 2 else 1\n op = OPS[name](C, stride, True)\n self._ops += [op]\n self._indices = indices\n\n def forward(self, s0, s1):\n s0 = self.preprocess0(s0)\n s1 = self.preprocess1(s1)\n\n states = [s0, s1]\n for i in range(self._steps):\n h1 = states[self._indices[2*i]]\n h2 = states[self._indices[2*i+1]]\n op1 = self._ops[2*i]\n op2 = self._ops[2*i+1]\n h1 = op1(h1)\n h2 = op2(h2)\n s = h1 + h2\n states += [s]\n return torch.cat([states[i] for i in self._concat], dim=1)\n\n\nclass NetworkCIFAR(nn.Module):\n\n def __init__(self, C, num_classes, layers, genotype):\n super(NetworkCIFAR, self).__init__()\n self._layers = layers\n\n stem_multiplier = 3\n C_curr = stem_multiplier * C\n self.stem = nn.Sequential(\n nn.Conv2d(3, C_curr, 3, padding=1, bias=False),\n nn.BatchNorm2d(C_curr)\n )\n\n C_prev_prev, C_prev, C_curr = C_curr, C_curr, C\n self.cells = nn.ModuleList()\n reduction_prev = False\n for i in range(layers):\n if i in [layers // 3, 2 * layers // 3]:\n C_curr *= 2\n reduction = True\n else:\n reduction = False\n cell = Cell(genotype, C_prev_prev, C_prev,\n C_curr, reduction, reduction_prev)\n reduction_prev = reduction\n self.cells += [cell]\n C_prev_prev, C_prev = C_prev, cell.multiplier*C_curr\n\n self.global_pooling = nn.AdaptiveAvgPool2d(1)\n self.classifier = nn.Linear(C_prev, num_classes)\n\n def forward(self, input):\n s0 = s1 = self.stem(input)\n for i, cell in enumerate(self.cells):\n s0, s1 = s1, cell(s0, s1)\n out = self.global_pooling(s1)\n logits = self.classifier(out.view(out.size(0), -1))\n return logits\n\n\ndef NASNet():\n NASNet = Genotype(\n normal=[\n ('sep_conv_5x5', 1),\n ('sep_conv_3x3', 0),\n ('sep_conv_5x5', 0),\n ('sep_conv_3x3', 0),\n ('avg_pool_3x3', 1),\n ('skip_connect', 0),\n ('avg_pool_3x3', 0),\n ('avg_pool_3x3', 0),\n ('sep_conv_3x3', 1),\n ('skip_connect', 1),\n ],\n normal_concat=[2, 3, 4, 5, 6],\n reduce=[\n ('sep_conv_5x5', 1),\n ('sep_conv_7x7', 0),\n ('max_pool_3x3', 1),\n ('sep_conv_7x7', 0),\n ('avg_pool_3x3', 1),\n ('sep_conv_5x5', 0),\n ('skip_connect', 3),\n ('avg_pool_3x3', 2),\n ('sep_conv_3x3', 2),\n ('max_pool_3x3', 1),\n ],\n reduce_concat=[4, 5, 6],\n )\n net = NetworkCIFAR(36, 10, 20, NASNet)\n return net\n\n\ndef AmoebaNet():\n AmoebaNet = Genotype(\n normal=[\n ('avg_pool_3x3', 0),\n ('max_pool_3x3', 1),\n ('sep_conv_3x3', 0),\n ('sep_conv_5x5', 2),\n ('sep_conv_3x3', 0),\n ('avg_pool_3x3', 3),\n ('sep_conv_3x3', 1),\n ('skip_connect', 1),\n ('skip_connect', 0),\n ('avg_pool_3x3', 1),\n ],\n normal_concat=[4, 5, 6],\n reduce=[\n ('avg_pool_3x3', 0),\n ('sep_conv_3x3', 1),\n ('max_pool_3x3', 0),\n ('sep_conv_7x7', 2),\n ('sep_conv_7x7', 0),\n ('avg_pool_3x3', 1),\n ('max_pool_3x3', 0),\n ('max_pool_3x3', 1),\n ('conv_7x1_1x7', 0),\n ('sep_conv_3x3', 5),\n ],\n reduce_concat=[3, 4, 6]\n )\n net = NetworkCIFAR(36, 10, 20, AmoebaNet)\n return net\n\n\ndef DARTS_V1():\n DARTS_V1 = Genotype(normal=[('sep_conv_3x3', 1), ('sep_conv_3x3', 0), ('skip_connect', 0), ('sep_conv_3x3', 1), ('skip_connect', 0), ('sep_conv_3x3', 1), ('sep_conv_3x3', 0), ('skip_connect', 2)], normal_concat=[\n 2, 3, 4, 5], reduce=[('max_pool_3x3', 0), ('max_pool_3x3', 1), ('skip_connect', 2), ('max_pool_3x3', 0), ('max_pool_3x3', 0), ('skip_connect', 2), ('skip_connect', 2), ('avg_pool_3x3', 0)], reduce_concat=[2, 3, 4, 5])\n net = NetworkCIFAR(36, 10, 20, DARTS_V1)\n return net\n\n\ndef DARTS_V2():\n DARTS_V2 = Genotype(normal=[('sep_conv_3x3', 0), ('sep_conv_3x3', 1), ('sep_conv_3x3', 0), ('sep_conv_3x3', 1), ('sep_conv_3x3', 1), ('skip_connect', 0), ('skip_connect', 0), ('dil_conv_3x3', 2)], normal_concat=[\n 2, 3, 4, 5], reduce=[('max_pool_3x3', 0), ('max_pool_3x3', 1), ('skip_connect', 2), ('max_pool_3x3', 1), ('max_pool_3x3', 0), ('skip_connect', 2), ('skip_connect', 2), ('max_pool_3x3', 1)], reduce_concat=[2, 3, 4, 5])\n net = NetworkCIFAR(36, 10, 20, DARTS_V2)\n return net\n\n\ndef test():\n net = DARTS_V2()\n print(net)\n x = torch.randn(2, 3, 32, 32)\n y = net(x)\n print(y.shape)\n" ]
[ [ "torch.cat", "torch.randn", "torch.nn.ModuleList", "torch.nn.Conv2d", "torch.nn.MaxPool2d", "torch.nn.AvgPool2d", "torch.nn.Linear", "torch.nn.AdaptiveAvgPool2d", "torch.nn.BatchNorm2d", "torch.nn.ReLU" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
VakhrameevaLiza/pytorch_segmentation_framework
[ "7df02ba5c575ed0ed082090f80eca4b421f0c98e" ]
[ "models/SegRevnet_1.py" ]
[ "import torch\nfrom torch import nn\nfrom reversible_blocks.revop import ReversibleBlock\n\n\nclass ConvBnReLu(nn.Module):\n def __init__(self, in_channels, out_channels, kernel_size=3, padding=1, dilation=1,\n has_bn=False, has_relu=True):\n super(ConvBnReLu, self).__init__()\n self.conv = nn.Conv2d(in_channels=in_channels, out_channels=out_channels,\n kernel_size=kernel_size, padding=padding, dilation=dilation)\n\n if has_bn:\n self.bn = nn.BatchNorm2d(num_features=out_channels)\n else:\n self.bn = None\n\n if has_relu:\n self.relu = nn.ReLU(inplace=True)\n else:\n self.relu = None\n\n def forward(self, x):\n\n x = self.conv(x)\n if self.bn:\n x = self.bn(x)\n if self.relu:\n x = self.relu(x)\n return x\n\n\ndef make_block(channels, num_layers_in_block):\n layers = []\n\n for i in range(num_layers_in_block - 1):\n layers.append(ConvBnReLu(channels, channels, has_bn=True, has_relu=True))\n layers.append(ConvBnReLu(channels, channels, has_bn=True, has_relu=False))\n\n return nn.Sequential(*layers)\n\n\nclass SegRevnet(nn.Module):\n def __init__(self, num_classes, in_channels=3, channels=30, num_layers_in_block=2, num_blocks=5):\n super(SegRevnet, self).__init__()\n self.name = 'Revnet'\n self.initial_laeyr= ConvBnReLu(in_channels=in_channels, out_channels=channels,\n kernel_size=1, has_bn=True, has_relu=True, padding=0)\n # F = ConvBnReLu(channels // 2, channels // 2)\n # G = ConvBnReLu(channels // 2, channels // 2)\n # self.Y = ReversibleBlock(F, G)\n\n main = []\n\n for i in range(num_blocks):\n F = make_block(channels // 2, num_layers_in_block)\n G = make_block(channels // 2, num_layers_in_block)\n Y = ReversibleBlock(F, G)\n main.append(Y)\n main.append(nn.ReLU())\n\n self.main = nn.Sequential(*main)\n self.final_layer = ConvBnReLu(in_channels=channels, out_channels=num_classes,\n kernel_size=1, has_bn=False, has_relu=False, padding=0)\n\n def forward(self, x):\n x = self.initial_laeyr(x)\n x = self.main(x)\n x = self.final_layer(x)\n # x = self.Y(x)\n # x = self.final_layer(x)\n return x\n\n\nif __name__ == \"__main__\":\n net = SegRevnet(in_channels=3, channels=10,\n num_layers_in_block=2, num_blocks=1,\n num_classes=5)\n\n inp = torch.rand((1, 3, 512, 512))\n conv = nn.Conv2d(in_channels=3, out_channels=5,\n kernel_size=3, padding=1, dilation=1)\n out = net(inp)\n print(out.shape)\n" ]
[ [ "torch.nn.Sequential", "torch.nn.Conv2d", "torch.rand", "torch.nn.BatchNorm2d", "torch.nn.ReLU" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
kyuhyoung/tensortrade
[ "4dae999047f7d3c846bd4a7df9162fe91dfcac53" ]
[ "tensortrade/wallets/ledger.py" ]
[ "\nimport pandas as pd\n\nfrom typing import List\nfrom collections import namedtuple\n\n\nTransaction = namedtuple('Transaction', [\n 'poid',\n 'step',\n 'source',\n 'target',\n 'memo',\n 'amount',\n 'free',\n 'locked',\n 'locked_poid'\n])\n\n\nclass Ledger:\n\n def __init__(self):\n self._transactions = []\n\n @property\n def transactions(self) -> List['Transaction']:\n return self._transactions\n\n def commit(self, wallet: 'Wallet', quantity: 'Quantity', source: str, target: str, memo: str):\n\n poid = quantity.path_id\n locked_poid_balance = None if poid not in wallet.locked.keys() else wallet.locked[poid]\n\n transaction = Transaction(\n poid,\n wallet.exchange.clock.step,\n source,\n target,\n memo,\n quantity,\n wallet.balance,\n wallet.locked_balance,\n locked_poid_balance\n )\n\n self._transactions += [transaction]\n\n def as_frame(self, sort_by_order_seq=False) -> pd.DataFrame:\n\n if not sort_by_order_seq:\n return pd.DataFrame(self.transactions)\n\n df = pd.DataFrame(self.transactions)\n frames = []\n for poid in df.poid.unique():\n frames += [df.loc[df.poid == poid, :]]\n\n return pd.concat(frames, ignore_index=True, axis=0)\n\n def reset(self):\n self._transactions = []\n" ]
[ [ "pandas.concat", "pandas.DataFrame" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [ "0.23", "0.21", "2.0", "1.4", "1.3", "0.19", "1.1", "1.5", "0.24", "0.20", "1.0", "0.25", "1.2" ], "scipy": [], "tensorflow": [] } ]
bubble-07/AnimeReal
[ "b12193f10d231ee85a2a86ec2defeca0b5a4e240" ]
[ "OldAnimeReal/load_cmu.py" ]
[ "from __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport os\nimport numpy as np\n\nfrom camera import Camera\n\nimport json\nimport tensorflow as tf\n\n#Module responsible for loading file handles from CMU-like datasets\n\n#From a filename of the form \"body3DScene_{frame number}.json\" as in the CMU dataset,\n#this gets the frame number\ndef get_frame_number_from_json_filename(fname):\n return fname[12:-5]\n\ndef get_frame_number_from_jpg_filename(fname):\n return fname[6:-4]\n\n#Given a directory for a CMU-like dataset, this yields a list of tuples\n#in the file handle format\n#TODO: Print progress or something?\ndef load_handles(DATASET_DIR):\n #First, establish a list of paths to each sequence in the dataset directory\n sequenceNames = os.listdir(DATASET_DIR)\n\n #Parallel list of paths to each sequence\n sequencePaths = map(lambda y: os.path.join(DATASET_DIR, y), os.listdir(DATASET_DIR))\n\n #Then, for each sequence, \n #look into the \"vgaImgs\" directory to figure out which cameras we'll care about\n cameraNames = map(lambda y: os.listdir(os.path.join(y, \"vgaImgs\")), sequencePaths)\n\n cameras = []\n\n #Most of the data-reading code shamelessly taken from\n #https://github.com/CMU-Perceptual-Computing-Lab/panoptic-toolbox/blob/master/python/example.ipynb\n #Remove once you have your own dataset\n #With the cameras names in hand, load the calibration_{sequence}.json and find\n #camera parameters for each\n for sequenceName, cameraList in zip(sequenceNames, cameraNames):\n with open(os.path.join(DATASET_DIR, sequenceName + '/calibration_{0}.json'.format(sequenceName))) as calib_file:\n calib = json.load(calib_file)\n #Dictionary of all cameras for the sequence, mapping from their names to their other properties\n seq_cams = {cam['name']:cam for cam in calib['cameras']}\n #Filter out to only those cams for which we have VGA images, and for each one, find their parameters\n cam_props = [seq_cams[cam] for cam in cameraList]\n seq_cam_objs = map(lambda cam: Camera([np.array(cam['K']), np.array(cam['distCoef']), \n np.array(cam['R']), np.array(cam['t'])])\n , cam_props)\n cameras.append(seq_cam_objs)\n\n #This list of filepath tuples is in the format expected by load_cache.handle_to_cache\n filepath_tuples = []\n\n #Okay, great, now that we have the sequences and their cameras all listed out, we need to come up with\n #image_filepath, annotation_filepath, cam_parameter triples. To do this, we'll loop over the sequences,\n #cameras within each sequence, and timepoints within each sequence.\n for sequenceName, sequencePath, cameraNames, cameraList in zip(sequenceNames, sequencePaths, cameraNames, cameras):\n jsonDir = os.path.join(sequencePath, \"vgaPose3d_stage1/\")\n jsonFiles = os.listdir(jsonDir)\n\n #Find the collection of all frame numbers for which there is keypoint data\n keypoint_frames = map(get_frame_number_from_json_filename, jsonFiles)\n\n for cameraName, camera in zip(cameraNames, cameraList):\n imgDir = os.path.join(sequencePath, \"vgaImgs/\", cameraName)\n imgFiles = os.listdir(imgDir)\n\n #Now, find the collection of all frame numbers for which there are images\n image_frames = map(get_frame_number_from_jpg_filename, imgFiles)\n\n #Valid frames are those for which there's an image and keypoint data\n valid_frames = set(keypoint_frames).intersection(set(image_frames))\n\n for frameName in valid_frames:\n img_filepath = os.path.join(imgDir, cameraName + \"_\" + frameName + \".jpg\")\n anno_filepath = os.path.join(jsonDir, \"body3DScene_\" + frameName + \".json\")\n cam_parameters = camera.to_flat_rep()\n filepath_tuples.append((img_filepath, anno_filepath, cam_parameters))\n \n return filepath_tuples\n\n\n" ]
[ [ "numpy.array" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
i2cy/lewdity_CNN
[ "f616a040c90069b0bd36501794abf1f6ab6ff55b" ]
[ "lewdity_API.py" ]
[ "#!/usr/bin/python3\n# -*- coding: utf-8 -*-\n# Author: i2cy([email protected])\n# Filename: lewdity_API.py\n# Created on: 2020/9/13\n\nimport psutil\nimport os\nimport time\nimport random\n\n# *屏蔽tensorflow警告信息输出\nos.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'\n\nimport tensorflow as tf\n\n# *RTX硬件兼容性修改配置\nif len(tf.config.list_physical_devices('GPU')) > 0:\n tf.config.experimental.set_memory_growth(\n tf.config.list_physical_devices('GPU')[0], True)\n\nglobal typeClassificationCNN\nglobal PaintingClassificationCNN\nglobal PhotoClassificationCNN\n\n\nTYPE_MODEL = \"./Models/pic_classification_model.h5\"\nPAINTING_MODEL = './Models/NSFW_painting_model.h5'\nPHOTO_MODEL = './Models/NSFW_photo_model.h5'\n\nLEWD_THRESHOLD = 0.997\nSEXY_THRESHOLD = 0.99\nLEWDITY_THRESHOLD = 0.02\nCROP_TIMES = 40\nBATCH_SIZE = 1\n\n\nclass customNN:\n def __init__(self, model_name=\"MLP\"):\n self.name = model_name\n self.train_db = None\n self.test_db = None\n self.model = None\n self.train_size = 0\n self.test_size = 0\n self.data_shape = []\n self.batch_size = 8\n self.train_history = None\n self.tensorboard_enable = False\n self.log_root = \"./tensorflow_log\"\n self.callbacks = []\n self.callback_file_writer = None\n self.base_model = None\n self.epoch = 0\n self.model_file = \"{}.h5\".format(self.name)\n self.autosave = False\n self.output_counts = 0\n\n def _get_freeRAM(self):\n free_ram = psutil.virtual_memory().free\n return free_ram\n\n def _init_tensorboard(self):\n log_dir = os.path.join(self.log_root,\n time.strftime(\"%Y%m%d-%H:%M:%S_\") +\n self.name\n )\n tensorboard_callback = tf.keras.callbacks.TensorBoard(log_dir,\n histogram_freq=1)\n self.callbacks.append(tensorboard_callback)\n self.callback_file_writer = tf.summary.create_file_writer(os.path.join(\n log_dir, \"train\"))\n self.callback_file_writer.set_as_default()\n\n def load_dataset(self, trainset, testset=None,\n mapFunc=None, testRate=0.15, batchSize=8,\n shufflePercentage=0.3, mapFuncTest=None,\n mapFuncLabel=None, mapFuncLabelTest=None): # dataset has to be formated tensors: (data, labels)\n self.batch_size = batchSize\n if testset == None:\n # randomly split trainset and testset\n datasets = [ele for ele in trainset]\n train_size = len(datasets[0]) - int(len(datasets[0]) * testRate)\n all_indexs = list(range(len(datasets[0])))\n random.shuffle(all_indexs)\n features = []\n labels = []\n if (type(datasets[1][0]) in (type([0]), type((0,)))) and len(datasets[1][0]) == len(all_indexs):\n for i in enumerate(datasets[1]):\n labels.append([])\n self.output_counts += 1\n for index in all_indexs[:train_size]:\n data = datasets[0][index]\n features.append(data)\n for i, l in enumerate(datasets[1]):\n label = datasets[1][i][index]\n labels[i].append(label)\n if type(labels[0]) == type([0]):\n labels = tuple(labels)\n else:\n self.output_counts += 1\n for index in all_indexs[:train_size]:\n features.append(datasets[0][index])\n labels.append(datasets[1][index])\n trainset = (features, labels)\n features = []\n labels = []\n if (type(datasets[1][0]) in (type([0]), type((0,)))) and len(datasets[1][0]) == len(all_indexs):\n for i in enumerate(datasets[1]):\n labels.append([])\n for index in all_indexs[train_size:]:\n data = datasets[0][index]\n features.append(data)\n for i, l in enumerate(datasets[1]):\n label = datasets[1][i][index]\n labels[i].append(label)\n if type(labels[0]) == type([0]):\n labels = tuple(labels)\n else:\n for index in all_indexs[train_size:]:\n features.append(datasets[0][index])\n labels.append(datasets[1][index])\n testset = (features, labels)\n\n self.data_shape = tf.constant(trainset[0][0]).shape\n self.train_size = len(trainset[0])\n self.test_size = len(testset[0])\n\n print(\"trainset sample number: {}\".format(str(self.train_size)))\n print(\"testset sample number: {}\".format(str(self.test_size)))\n\n if mapFunc == None:\n if mapFuncLabel == None:\n train_db = tf.data.Dataset.zip((tf.data.Dataset.from_tensor_slices(trainset[0]),\n tf.data.Dataset.from_tensor_slices(trainset[1])))\n test_db = tf.data.Dataset.zip((tf.data.Dataset.from_tensor_slices(testset[0]),\n tf.data.Dataset.from_tensor_slices(testset[1])))\n else:\n if mapFuncLabelTest == None:\n mapFuncLabelTest = mapFuncLabel\n train_db = tf.data.Dataset.zip((\n tf.data.Dataset.from_tensor_slices(trainset[0]), tf.data.Dataset.from_tensor_slices(\n trainset[1]).map(mapFuncLabel, num_parallel_calls=tf.data.experimental.AUTOTUNE)))\n\n test_db = tf.data.Dataset.zip((\n tf.data.Dataset.from_tensor_slices(testset[0]), tf.data.Dataset.from_tensor_slices(\n testset[1]).map(mapFuncLabelTest, num_parallel_calls=tf.data.experimental.AUTOTUNE)))\n\n else:\n if mapFuncTest == None:\n mapFuncTest = mapFunc\n self.data_shape = mapFunc(trainset[0][0]).shape\n train_db = tf.data.Dataset.from_tensor_slices(trainset[0])\n train_db = train_db.map(mapFunc, num_parallel_calls=tf.data.experimental.AUTOTUNE)\n test_db = tf.data.Dataset.from_tensor_slices(testset[0])\n test_db = test_db.map(mapFuncTest)\n\n if mapFuncLabel == None:\n train_db = tf.data.Dataset.zip((\n train_db, tf.data.Dataset.from_tensor_slices(trainset[1])))\n test_db = tf.data.Dataset.zip((\n test_db, tf.data.Dataset.from_tensor_slices(testset[1])))\n else:\n if mapFuncLabelTest == None:\n mapFuncLabelTest = mapFuncLabel\n train_db = tf.data.Dataset.zip((\n train_db, tf.data.Dataset.from_tensor_slices(\n trainset[1]).map(mapFuncLabel, num_parallel_calls=tf.data.experimental.AUTOTUNE)))\n\n test_db = tf.data.Dataset.zip((\n train_db, tf.data.Dataset.from_tensor_slices(\n testset[1]).map(mapFuncLabelTest, num_parallel_calls=tf.data.experimental.AUTOTUNE)))\n\n datasize = 1\n for size in self.data_shape:\n datasize *= size\n freeRAM = int(self._get_freeRAM() * shufflePercentage)\n print(\"free RAM size: {} MB\".format(str(freeRAM // 1048576)))\n\n shuffle_MaxbuffSize = int((freeRAM * 0.8) // datasize)\n prefetch_buffSize = int((freeRAM * 0.2) // (datasize * self.batch_size))\n\n print(\"automatically allocated data buffer size: {} MB\".format(str(shuffle_MaxbuffSize * datasize // 1048576)))\n\n shuffle_buffSize = shuffle_MaxbuffSize\n if shuffle_MaxbuffSize > self.train_size:\n shuffle_buffSize = self.train_size\n train_db = train_db.shuffle(shuffle_buffSize).repeat().batch(self.batch_size).prefetch(prefetch_buffSize)\n shuffle_buffSize = shuffle_MaxbuffSize\n if shuffle_MaxbuffSize > self.test_size:\n shuffle_buffSize = self.test_size\n test_db = test_db.shuffle(shuffle_buffSize).repeat().batch(self.batch_size).prefetch(prefetch_buffSize)\n\n self.train_db = train_db\n self.test_db = test_db\n\n def set_model_file(self, path):\n self.model_file = path\n\n def enable_tensorboard(self, log_dir_root=\"./tensorflow_log\"):\n self.log_root = log_dir_root\n self.tensorboard_enable = True\n\n def enable_checkpointAutosave(self, path=None):\n if path != None:\n self.model_file = path\n checkpoint = tf.keras.callbacks.ModelCheckpoint(filepath=self.model_file)\n self.add_callback(checkpoint)\n self.autosave = True\n\n def add_callback(self, callback_func): # all callbacks added will be reset after training\n self.callbacks.append(callback_func)\n\n def init_model(self): # 神经网络模型\n pass\n\n def postProc_model(self): # 模型后期处理(微调)\n model = self.model\n\n fine_tune_at = -33\n\n self.base_model.trainable = True\n\n for layer in self.base_model.layers[:fine_tune_at]:\n layer.trainable = False\n\n model.compile(optimizer=\"adam\",\n loss=\"binary_crossentropy\", # 2分类问题\n metrics=[\"acc\"]\n )\n\n self.model = model\n print(model.summary())\n\n def compile_model(self, learningRate=0.0001):\n self.model.compile(optimizer=tf.keras.optimizers.Adam(learning_rate=learningRate),\n loss=\"sparse_categorical_crossentropy\",\n metrics=[\"acc\"]\n )\n\n def save_model(self, path=None):\n if path != None:\n self.model_file = path\n self.model.save(self.model_file)\n\n def load_model(self, path=None):\n if path != None:\n self.model_file = path\n self.model = tf.keras.models.load_model(self.model_file, compile=True)\n self.compile_model()\n\n def train(self, epochs=100, verbose=1, validation=True):\n if self.tensorboard_enable and self.epoch == 0:\n self._init_tensorboard()\n try:\n if validation:\n self.train_history = self.model.fit(self.train_db,\n epochs=epochs,\n initial_epoch=self.epoch,\n steps_per_epoch=self.train_size // self.batch_size,\n validation_data=self.test_db,\n validation_steps=self.test_size // self.batch_size,\n callbacks=self.callbacks,\n verbose=verbose\n )\n else:\n self.train_history = self.model.fit(self.train_db,\n epochs=epochs,\n initial_epoch=self.epoch,\n steps_per_epoch=self.train_size // self.batch_size,\n callbacks=self.callbacks,\n verbose=verbose\n )\n self.epoch += epochs\n except KeyboardInterrupt:\n print(\"\\ntraining process stopped manually\")\n if self.autosave:\n self.load_model(self.model_file)\n\n def evaluate(self):\n print(\"evaluating model with test datasets...\")\n\n acc = self.model.evaluate(self.test_db, return_dict=True,\n steps=self.test_size // self.batch_size)\n\n return acc\n\n def predict(self, data):\n res = self.model.predict(data)\n return res\n\n\ndef read_preprocess_image(img_path):\n img = tf.io.read_file(img_path)\n if tf.image.is_jpeg(img):\n img = tf.image.decode_jpeg(img, channels=3)\n else:\n img = tf.image.decode_png(img, channels=3)\n if img.shape[0] == None:\n print(img.shape)\n print(img)\n img = tf.image.resize(img, [300, 300])\n else:\n if img.shape[0] <= 256 and img.shape[1] <= 256:\n img = tf.image.resize(img, [300, 300])\n else:\n if img.shape[0] > img.shape[1]:\n rate = 300 / img.shape[1]\n img = tf.image.resize(img, [int(img.shape[0] * rate), 300])\n else:\n rate = 300 / img.shape[0]\n img = tf.image.resize(img, [300, int(img.shape[1] * rate)])\n imgs = []\n for i in range(BATCH_SIZE):\n img = tf.image.random_crop(img, [256, 256, 3], seed=int(time.time()*1000))\n img = tf.cast(img, tf.float32)\n img = img / 127.5 - 1 # 图像归一化,使得输入数据在(-1,1)区间范围内\n imgs.append(img)\n imgs = tf.convert_to_tensor(imgs)\n return imgs\n\n\ndef classificateImage(filename):\n img = read_preprocess_image(filename)\n sample_number = img.shape[0]\n\n lewd = False # 是否为涩图\n sexy = False # 是否为擦边球\n picType = 0 # 图片类型 (0 漫画 1 照片)\n sexility = 0 # 性感检出率\n sexyMax = 0 # 神经网络性感度最大输出\n lewdity = 0 # 色情检出率\n lewdMax = 0 # 神经网络色情度最大输出\n\n\n # 判断图片是漫画还是照片 (0 漫画 1 照片)\n picType = TypeClassificationCNN.predict(img)\n picType = picType.argmax(axis=1).mean()\n\n if picType < 0.5:\n # 漫画特化神经网络鉴黄 (0 正常 1 色情)\n lewdityRes = PaintingClassificationCNN.predict(img)\n for i in lewdityRes:\n if i[1] > LEWD_THRESHOLD:\n lewd = True\n lewdity += 1\n if i[1] > lewdMax:\n lewdMax = i[1]\n\n else:\n # 照片特化神经网络鉴黄 (0 正常 1 色情 2 擦边球)\n lewdityRes = PhotoClassificationCNN.predict(img)\n for i in lewdityRes:\n if i[1] > LEWD_THRESHOLD:\n lewd = True\n lewdity += 1\n if i[1] > lewdMax:\n lewdMax = i[1]\n if i[2] > SEXY_THRESHOLD:\n sexy = True\n sexility += 1\n if i[2] > sexyMax:\n sexyMax = i[2]\n\n sexility /= sample_number\n lewdity /= sample_number\n\n return {\"lewd\": lewd,\n \"sexy\": sexy,\n \"lewdity\": lewdity,\n \"sexility\": sexility,\n \"lewdMax\": lewdMax,\n \"sexyMax\": sexyMax,\n \"picType\": picType}\n\n\ndef classificateAPI(filename): # API入口\n timetick = time.time()\n if len(filename) > 3 and filename[-3:] == \"gif\":\n return {\"lewd\": False}\n\n lewd = False\n sexy = False\n lewdity = 0\n sexility = 0\n lewdMax = 0\n sexyMax = 0\n picType = 0\n\n for i in range(CROP_TIMES):\n res = classificateImage(filename)\n if res[\"lewd\"] == True:\n lewd = True\n if res[\"sexy\"] == True:\n sexy = True\n lewdity += res[\"lewdity\"]\n sexility += res[\"sexility\"]\n if lewdMax < res[\"lewdMax\"]:\n lewdMax = res[\"lewdMax\"]\n if sexyMax < res[\"sexyMax\"]:\n sexyMax = res[\"sexyMax\"]\n picType += res[\"picType\"]\n\n lewdity /= CROP_TIMES\n sexility /= CROP_TIMES\n picType /= CROP_TIMES\n\n if picType < 0.5:\n sexy = False\n\n if lewdity < LEWDITY_THRESHOLD:\n lewd = False\n\n return {\"lewd\": lewd,\n \"sexy\": sexy,\n \"lewdity\": lewdity,\n \"sexility\": sexility,\n \"lewdMax\": lewdMax,\n \"sexyMax\": sexyMax,\n \"picType\": picType,\n \"timeSpend\": time.time()-timetick}\n\n\n\n\n\ndef init():\n global TypeClassificationCNN\n global PaintingClassificationCNN\n global PhotoClassificationCNN\n\n print(\"initializing lewdity API...\")\n\n # 初始化图片类型鉴定神经网络(底层)\n TypeClassificationCNN = customNN(\"Pic_Type_Classification\")\n try:\n TypeClassificationCNN.load_model(TYPE_MODEL)\n print(\"loaded model \\\"{}\\\"\".format(TYPE_MODEL))\n except Exception as err:\n print(\"failed to load model,\", err)\n\n # 初始化漫画鉴黄神经网络(顶层分支)\n PaintingClassificationCNN = customNN(\"Painting_Classification\")\n try:\n PaintingClassificationCNN.load_model(PAINTING_MODEL)\n print(\"loaded model \\\"{}\\\"\".format(PAINTING_MODEL))\n except Exception as err:\n print(\"failed to load model,\", err)\n\n\n # 初始化照片鉴黄神经网络(顶层分支)\n PhotoClassificationCNN = customNN(\"Photo_Classification\")\n try:\n PhotoClassificationCNN.load_model(PHOTO_MODEL)\n print(\"loaded model \\\"{}\\\"\".format(PHOTO_MODEL))\n except Exception as err:\n print(\"failed to load model,\", err)\n\n print(\"lewdity API initializied\")\n\n\ndef main():\n pass\n\n\nif __name__ == \"__main__\":\n main()\nelse:\n init()" ]
[ [ "tensorflow.convert_to_tensor", "tensorflow.keras.callbacks.ModelCheckpoint", "tensorflow.keras.models.load_model", "tensorflow.constant", "tensorflow.image.decode_png", "tensorflow.cast", "tensorflow.data.Dataset.from_tensor_slices", "tensorflow.keras.callbacks.TensorBoard", "tensorflow.image.is_jpeg", "tensorflow.config.list_physical_devices", "tensorflow.image.resize", "tensorflow.keras.optimizers.Adam", "tensorflow.io.read_file", "tensorflow.image.decode_jpeg" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [ "1.10" ] } ]
luuknieuwdorp/gpt2-discord-bot
[ "ff2d04741522280f28b14ccd59e9f8b8a4e5f65b" ]
[ "src/chatbot.py" ]
[ "import json\nimport os\nimport model, sample, encoder\nimport numpy as np\nimport tensorflow as tf\n\n# import gpt.model, gpt.sample, gpt.encoder\n\nmodel_name='dojo10k'\nbatch_size = 1\nseed = None\nnsamples=1\nlength=30\ntemperature=0.7\ntop_k=40\nnp.random.seed(seed)\ntf.set_random_seed(seed)\n\nenc = encoder.get_encoder(model_name)\nhparams = model.default_hparams()\nwith open(os.path.join('../models', model_name, 'hparams.json')) as f:\n hparams.override_from_dict(json.load(f))\n\nif length is None:\n length = hparams.n_ctx // 2\nelif length > hparams.n_ctx:\n raise ValueError(\"Can't get samples longer than window size: %s\" % hparams.n_ctx)\n\nwith tf.Session(graph=tf.Graph()) as sess:\n context = tf.placeholder(tf.int32, [1, None])\n output = sample.sample_sequence(\n hparams=hparams, length=length,\n context=context,\n batch_size=1,\n temperature=temperature, top_k=top_k\n )\n\n saver = tf.train.Saver()\n ckpt = tf.train.latest_checkpoint(os.path.join('../models', model_name))\n saver.restore(sess, ckpt)\n\n raw_text = \"hello sir\"\n context_tokens = enc.encode(raw_text)\n generated = 0\n out = sess.run(output, feed_dict={\n context: [context_tokens for _ in range(1)]\n })[:, len(context_tokens):]\n generated += 1\n text = enc.decode(out[0])\n print(\"=\" * 40 + \" SAMPLE \" + str(generated) + \" \" + \"=\" * 40)\n print(text)\n print(\"=\" * 80)\n" ]
[ [ "tensorflow.Graph", "numpy.random.seed", "tensorflow.placeholder", "tensorflow.set_random_seed", "tensorflow.train.Saver" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [ "1.10" ] } ]
csaluigm/FinRL-Library
[ "396736443f1abb0088d678c85c63fbce0e9bbd77" ]
[ "finrl/autotrain/training.py" ]
[ "import pandas as pd\nimport numpy as np\nimport matplotlib\nimport matplotlib.pyplot as plt\nfrom sklearn import preprocessing\n\nmatplotlib.use(\"Agg\")\nimport datetime\n\nfrom finrl.config import config\nfrom finrl.marketdata.yahoodownloader import YahooDownloader\nfrom finrl.preprocessing.preprocessors import FeatureEngineer\nfrom finrl.preprocessing.data import data_split\nfrom finrl.env.env_stocktrading import StockTradingEnv\nfrom finrl.model.models import DRLAgent\nfrom finrl.trade.backtest import BackTestStats\n\n\ndef train_one():\n \"\"\"\n train an agent\n \"\"\"\n print(\"==============Start Fetching Data===========\")\n df = YahooDownloader(\n start_date=config.START_DATE,\n end_date=config.END_DATE,\n ticker_list=config.DOW_30_TICKER,\n ).fetch_data()\n print(\"==============Start Feature Engineering===========\")\n fe = FeatureEngineer(\n use_technical_indicator=True,\n tech_indicator_list=config.TECHNICAL_INDICATORS_LIST,\n use_turbulence=True,\n user_defined_feature=False,\n )\n\n processed = fe.preprocess_data(df)\n\n # Training & Trade data split\n train = data_split(processed, config.START_DATE, config.START_TRADE_DATE)\n trade = data_split(processed, config.START_TRADE_DATE, config.END_DATE)\n\n # data normalization\n # feaures_list = list(train.columns)\n # feaures_list.remove('date')\n # feaures_list.remove('tic')\n # feaures_list.remove('close')\n # print(feaures_list)\n # data_normaliser = preprocessing.StandardScaler()\n # train[feaures_list] = data_normaliser.fit_transform(train[feaures_list])\n # trade[feaures_list] = data_normaliser.fit_transform(trade[feaures_list])\n\n # calculate state action space\n stock_dimension = len(train.tic.unique())\n state_space = (\n 1\n + 2 * stock_dimension\n + len(config.TECHNICAL_INDICATORS_LIST) * stock_dimension\n )\n\n env_kwargs = {\n \"hmax\": 100, \n \"initial_amount\": 1000000, \n \"buy_cost_pct\": 0.001, \n \"sell_cost_pct\": 0.001, \n \"state_space\": state_space, \n \"stock_dim\": stock_dimension, \n \"tech_indicator_list\": config.TECHNICAL_INDICATORS_LIST, \n \"action_space\": stock_dimension, \n \"reward_scaling\": 1e-4\n }\n\n e_train_gym = StockTradingEnv(df=train, **env_kwargs)\n\n e_trade_gym = StockTradingEnv(df=trade, turbulence_threshold=250, **env_kwargs)\n env_train, _ = e_train_gym.get_sb_env()\n env_trade, obs_trade = e_trade_gym.get_sb_env()\n\n agent = DRLAgent(env=env_train)\n\n print(\"==============Model Training===========\")\n now = datetime.datetime.now().strftime(\"%Y%m%d-%Hh%M\")\n\n model_sac = agent.get_model(\"sac\")\n trained_sac = agent.train_model(\n model=model_sac, tb_log_name=\"sac\", total_timesteps=80000\n )\n\n print(\"==============Start Trading===========\")\n df_account_value, df_actions = DRLAgent.DRL_prediction(\n model=trained_sac, test_data=trade, test_env=env_trade, test_obs=obs_trade\n )\n df_account_value.to_csv(\n \"./\" + config.RESULTS_DIR + \"/df_account_value_\" + now + \".csv\"\n )\n df_actions.to_csv(\"./\" + config.RESULTS_DIR + \"/df_actions_\" + now + \".csv\")\n\n print(\"==============Get Backtest Results===========\")\n perf_stats_all = BackTestStats(df_account_value)\n perf_stats_all = pd.DataFrame(perf_stats_all)\n perf_stats_all.to_csv(\"./\" + config.RESULTS_DIR + \"/perf_stats_all_\" + now + \".csv\")\n" ]
[ [ "matplotlib.use", "pandas.DataFrame" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [ "0.23", "0.21", "2.0", "1.4", "0.19", "1.1", "1.5", "1.2", "0.24", "0.20", "1.0", "0.25", "1.3" ], "scipy": [], "tensorflow": [] } ]
Zozman/mindmeld
[ "d86cc823c9b36dbafc8fc8f8ea04085ca1ffdeb5" ]
[ "mindmeld/models/nn_utils/classification.py" ]
[ "# -*- coding: utf-8 -*-\n#\n# Copyright (c) 2015 Cisco Systems, Inc. and others. All rights reserved.\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n# http://www.apache.org/licenses/LICENSE-2.0\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"\nBase for custom modules that are developed on top of nn layers that can do\nsequence or token classification\n\"\"\"\n\nimport json\nimport logging\nimport os\nimport shutil\nimport uuid\nfrom abc import abstractmethod\nfrom itertools import chain\nfrom typing import Dict, Union, List, Tuple\n\nimport numpy as np\nfrom sklearn.metrics import accuracy_score, f1_score\nfrom sklearn.model_selection import train_test_split\nfrom tqdm import tqdm\n\nfrom .helpers import (\n BatchData,\n get_disk_space_of_model,\n get_num_weights_of_model,\n get_default_params,\n TokenizerType,\n EmbedderType,\n ValidationMetricType,\n TRAIN_DEV_SPLIT_SEED,\n LABEL_PAD_TOKEN_IDX,\n DEFAULT_EMB_DIM,\n DEFAULT_TOKENIZER\n)\nfrom .input_encoders import InputEncoderFactory\nfrom .._util import _get_module_or_attr\nfrom ..containers import GloVeEmbeddingsContainer\nfrom ...core import Bunch\nfrom ...path import USER_CONFIG_DIR\n\ntry:\n import torch\n import torch.nn as nn\n\n nn_module = _get_module_or_attr(\"torch.nn\", \"Module\")\nexcept ImportError:\n nn_module = object\n pass\n\nlogger = logging.getLogger(__name__)\n\n\nclass BaseClassification(nn_module):\n \"\"\"\n A base class for sequence & token classification using deep neural nets. Both the classification\n submodules have a common fit() method defined in this base class, which also drives the\n training of pytorch based deep nets. The net's computational graph is defined only when the\n fit() method is called. This base class also holds few common utility methods and\n further defines the skeleton of the children classes through abstract methods.\n \"\"\"\n\n def __init__(self):\n super().__init__()\n\n self.name = self.__class__.__name__\n self.params = Bunch()\n self.params[\"name\"] = self.name\n self.encoder = None\n\n self.ready = False # True when .fit() is called or loaded from a checkpoint, else False\n self.dirty = False # True when the model weights aren't saved to disk yet, else False\n\n self.out_dim = float('-inf')\n\n def __repr__(self):\n return f\"<{self.name}> ready:{self.ready} dirty:{self.dirty}\"\n\n def get_default_params(self) -> Dict:\n return get_default_params(self.__class__.__name__)\n\n def log_and_return_model_info(self, verbose: bool = False) -> str:\n \"\"\"\n Logs and returns the details of the underlying torch.nn model, such as occupying disk space\n when dumped, number of parameters, the device on which the model is placed, etc.\n\n Args:\n verbose (bool): Determines the amount of information to be logged and returned.\n \"\"\"\n msg = f\"{self.name} \" \\\n f\"ready:{self.ready} dirty:{self.dirty} device:{self.params.device} \" \\\n f\"\\n\\tNumber of weights (trainable, all):{get_num_weights_of_model(self)} \"\n verbose_msg = msg + (\n f\"\\n\\tDisk Size (in MB): {get_disk_space_of_model(self):.4f} \" if verbose else \"\"\n )\n logger.info(verbose_msg)\n return msg\n\n def to_device(self, batch_data: BatchData) -> BatchData:\n \"\"\"\n Places pytorch tensors on the device configured through the params\n\n Args:\n batch_data (BatchData): A BatchData object consisting different tensor objects\n \"\"\"\n for k, v in batch_data.items():\n if v is not None and isinstance(v, torch.Tensor):\n batch_data[k] = v.to(self.params.device)\n elif isinstance(v, list):\n batch_data[k] = [\n vv.to(self.params.device) if isinstance(vv, torch.Tensor) else vv\n for vv in v\n ]\n elif isinstance(v, dict):\n batch_data[k] = self.to_device(batch_data[k])\n return batch_data\n\n # pylint: disable=too-many-locals\n def fit(self, examples: List[str], labels: Union[List[int], List[List[int]]], **params):\n \"\"\"\n Trains the underlying neural model on the inputted data and finally retains the best scored\n model among all iterations.\n\n Because of possibly large sized neural models, instead of retaining a copy of best set of\n model weights on RAM, it is advisable to dump them in a temporary folder and upon completing\n the training process, load the best checkpoint weights.\n\n Args:\n examples (List[str]): A list of text strings that will be used for model training and\n validation\n labels (Union[List[int], List[List[int]]]): A list of labels passed in as integers\n corresponding to the examples. The encoded labels must have values between 0 and\n n_classes-1 -- one label per example in case of sequence classification and a\n sequence of labels per example in case of token classification\n \"\"\"\n\n if self.ready:\n msg = \"The model is already fitted or is loaded from a file. Aborting re-fitting.\"\n logger.error(msg)\n\n # obtain and validate all parameters required to fit the model\n params = {\n **self.get_default_params(),\n **params # overwrite keys of default params that are passed-in\n }\n params = self._validate_and_update_params(**params)\n\n # update params upon preparing encoder and embedder\n params = self._prepare_input_encoder(examples, **params)\n params = self._prepare_embedder(**params)\n\n # update number of labels in params upon identifying the unique labels\n try:\n # labels for sequence classification are of type List[int] whereas for token\n # classification they are List[List[int]]; this try-except tries to obtain the number\n # of unique label strings for the purpose of classification\n num_labels = len(set(labels))\n except TypeError:\n # error will be raised in case of token classification as `set(labels)` attempts to\n # find a set of lists\n num_labels = len(set(chain.from_iterable(labels)))\n params.update({\"num_labels\": num_labels})\n\n # update self.params which will be used throughout the following modeling code\n self.params.update(params)\n\n # init the graph and move model to device, inputs are moved to device on-the-go\n self._init_graph()\n self.to(self.params.device)\n\n # dumping weights during training process into a temp folder instead of keeping in\n # memory to reduce memory usage\n temp_folder = os.path.join(USER_CONFIG_DIR, \"tmp\", \"pytorch_models\", str(uuid.uuid4()))\n temp_weights_save_path = os.path.join(temp_folder, \"pytorch_model.bin\")\n os.makedirs(temp_folder, exist_ok=True)\n\n # split input data into train & dev splits, and get data loaders\n train_examples, dev_examples, train_labels, dev_labels = train_test_split(\n examples, labels, test_size=self.params.dev_split_ratio,\n random_state=TRAIN_DEV_SPLIT_SEED\n )\n\n # create an optimizer and attach all model params to it\n num_training_steps = int(\n len(train_examples) / self.params.batch_size / self.params.gradient_accumulation_steps *\n self.params.number_of_epochs\n )\n optimizer, scheduler = self._create_optimizer_and_scheduler(num_training_steps)\n\n # set verbosity boolean\n _verbose = (\n logger.getEffectiveLevel() == logging.INFO or\n logger.getEffectiveLevel() == logging.DEBUG\n )\n self.log_and_return_model_info(_verbose)\n\n # training w/ validation\n best_dev_score, best_dev_epoch = -np.inf, -1\n msg = f\"Beginning to train for {self.params.number_of_epochs} number of epochs\"\n logger.info(msg)\n if self.params.number_of_epochs < 1:\n raise ValueError(\"Param 'number_of_epochs' must be a positive integer greater than 0\")\n patience_counter = 0\n for epoch in range(1, self.params.number_of_epochs + 1):\n # patience before terminating due to no dev score improvements\n if patience_counter >= self.params.patience:\n msg = f\"Set patience of {self.params.patience} epochs reached\"\n logger.info(msg)\n break\n # set modules to train phase, reset gradients, do forward-backward propagations\n self.train()\n optimizer.zero_grad()\n train_loss, train_batches = 0.0, 0.0\n t = tqdm(range(0, len(train_examples), self.params.batch_size), disable=not _verbose)\n for start_idx in t:\n batch_examples = train_examples[start_idx:start_idx + self.params.batch_size]\n batch_labels = train_labels[start_idx:start_idx + self.params.batch_size]\n batch_data = self.encoder.batch_encode(\n examples=batch_examples,\n padding_length=self.params.padding_length,\n add_terminals=self.params.add_terminals,\n )\n batch_data.update({\n \"_labels\": self._prepare_labels( # `_` 'cause this key is for intermediate use\n batch_labels,\n # pad to the max length amongst encoded examples\n max([len(_split_lengths) for _split_lengths in batch_data[\"split_lengths\"]])\n )\n })\n batch_data = self.forward(batch_data)\n loss = batch_data[\"loss\"]\n train_loss += loss.cpu().detach().numpy()\n train_batches += 1\n # find gradients\n loss = loss / self.params.gradient_accumulation_steps\n loss.backward()\n # optimizer and scheduler step\n batch_id = start_idx / self.params.batch_size\n if (\n start_idx + self.params.batch_size >= len(train_examples) or\n (batch_id + 1) % self.params.gradient_accumulation_steps == 0\n ):\n # update weights when it is the last batch in the epoch or\n # when specified step is reached or\n if self.params.max_grad_norm: # clip (accumulated) gradients if required\n nn.utils.clip_grad_norm_(self.parameters(), self.params.max_grad_norm)\n optimizer.step()\n scheduler.step()\n optimizer.zero_grad()\n # log progress\n progress_bar_msg = f\"Epoch: {epoch} | Mean loss: {train_loss / train_batches:.4f}\"\n t.set_description(progress_bar_msg, refresh=True)\n train_loss = train_loss / train_batches\n # dev evaluation\n predictions, targets = [], []\n t = tqdm(range(0, len(dev_examples), self.params.batch_size), disable=not _verbose)\n for start_idx in t:\n batch_examples = dev_examples[start_idx:start_idx + self.params.batch_size]\n batch_labels_targetted = dev_labels[start_idx:start_idx + self.params.batch_size]\n batch_labels_predicted = self.predict(batch_examples)\n # validation\n if len(batch_labels_predicted) != len(batch_labels_targetted):\n msg = f\"Number of predictions ({len(batch_labels_predicted)}) \" \\\n f\"not equal to number of targets ({len(batch_labels_targetted)})\"\n logger.error(msg)\n raise AssertionError(msg)\n # flatten if required\n try:\n batch_labels_predicted = sum(batch_labels_predicted, [])\n batch_labels_targetted = sum(batch_labels_targetted, [])\n except TypeError:\n # raised in case of sequence classification; implies already flattened\n pass\n # discard unwanted predictions using _label_padding_idx\n batch_labels_predicted, batch_labels_targetted = zip(*[\n (x, y) for x, y in zip(batch_labels_predicted, batch_labels_targetted)\n if y != self.params._label_padding_idx\n ])\n predictions.extend(batch_labels_predicted)\n targets.extend(batch_labels_targetted)\n progress_bar_msg = f\"Epoch: {epoch} | \" \\\n f\"Validation Metric: {self.params.validation_metric} \"\n t.set_description(progress_bar_msg, refresh=True)\n # compute score\n if ValidationMetricType(self.params.validation_metric) == ValidationMetricType.ACCURACY:\n dev_score = accuracy_score(targets, predictions, normalize=True)\n elif ValidationMetricType(self.params.validation_metric) == ValidationMetricType.F1:\n dev_score = f1_score(targets, predictions, average='weighted')\n else:\n msg = f\"Invalid 'validation_metric' ({self.params.validation_metric}) provided \" \\\n f\"in params. Allowed values are only 'accuracy' and 'f1'\"\n raise ValueError(msg)\n # save model weights in a temp folder; later move it to folder passed through dump()\n if dev_score >= best_dev_score:\n torch.save(self.state_dict(), temp_weights_save_path)\n phrase = (\n f\"improved from '{best_dev_score:.4f}' to\" if dev_score > best_dev_score\n else \"remained at\"\n )\n msg = f\"Model weights saved after epoch: {epoch} when dev score {phrase} \" \\\n f\"'{dev_score:.4f}'\\n\"\n logger.info(msg)\n # update patience counter\n if dev_score == best_dev_score:\n patience_counter += 1\n else:\n patience_counter = 0\n best_dev_score, best_dev_epoch = dev_score, epoch\n else:\n patience_counter += 1\n msg = f\"No weights saved after epoch: {epoch}. \" \\\n f\"The dev score last improved after epoch: {best_dev_epoch}\"\n logger.info(msg)\n\n # load back the best model dumped in temporary path and delete the temp folder\n msg = f\"Setting the model weights to checkpoint whose dev \" \\\n f\"{self.params.validation_metric} score is {best_dev_score:.4f}\"\n logger.info(msg)\n # because we are loading to same device, no `map_location` specified\n self.load_state_dict(torch.load(temp_weights_save_path))\n shutil.rmtree(temp_folder)\n\n self.ready = True\n self.dirty = True\n\n @staticmethod\n def _validate_and_update_params(**params) -> Dict:\n \"\"\"Common validation and updation of the params dict before creating encoders and layers\"\"\"\n\n # populate few required key-values (ensures the key-values are populated if not inputted)\n params.update({\n \"add_terminals\": params.get(\"add_terminals\"), # some encoders need this to be True\n # (e.g. pretrained huggingface encoder) while some others don't; better not to set a\n # boolean value as default to avoid raising errors unexpectedly\n \"padding_length\": params.get(\"padding_length\"), # explicitly obtained for more\n # transparent param dictionary\n \"tokenizer_type\": params.get(\"tokenizer_type\", DEFAULT_TOKENIZER),\n \"_label_padding_idx\": LABEL_PAD_TOKEN_IDX, # used to discard unwanted i.e. label\n # padding indices in the batch predictions in the fit() method\n })\n\n # validate tokenizer_type param\n allowed_tokenizer_types = {\n EmbedderType.GLOVE: [\n TokenizerType.WHITESPACE_TOKENIZER,\n TokenizerType.WHITESPACE_AND_CHAR_DUAL_TOKENIZER,\n ],\n EmbedderType.BERT: [TokenizerType.HUGGINGFACE_PRETRAINED_TOKENIZER, ]\n }\n if params.get(\"embedder_type\") and params.get(\"tokenizer_type\"):\n embedder_type = EmbedderType(params.get(\"embedder_type\"))\n tokenizer_type = TokenizerType(params.get(\"tokenizer_type\"))\n if embedder_type in allowed_tokenizer_types:\n if tokenizer_type not in allowed_tokenizer_types[embedder_type]:\n msg = f\"For the selected choice of embedder ({embedder_type.value}), only \" \\\n f\"the following tokenizer_type are allowed: \" \\\n f\"{[v.value for v in allowed_tokenizer_types[embedder_type]]}.\"\n raise ValueError(msg)\n\n # validate validation metric\n validation_metric = params.get(\"validation_metric\")\n try:\n validation_metric = ValidationMetricType(validation_metric)\n except ValueError as e:\n msg = f\"Expected validation_metric amongst \" \\\n f\"{[v.value for v in ValidationMetricType.__members__.values()]} \" \\\n f\"but found '{validation_metric}'.\"\n raise ValueError(msg) from e\n\n return params\n\n def _prepare_input_encoder(self, examples: List[str], **params) -> Dict:\n \"\"\"Sets the input encoder and returns an updated param dict\"\"\"\n\n # create and fit encoder\n self.encoder = InputEncoderFactory.get_encoder_cls(params.get(\"tokenizer_type\"))(**params)\n self.encoder.prepare(examples=examples)\n params.update({\n \"_num_tokens\": len(self.encoder.get_vocab()),\n \"_padding_idx\": self.encoder.get_pad_token_idx(),\n })\n return params\n\n def _prepare_embedder(self, **params) -> Dict:\n \"\"\"Sets the embedder if required and returns an updated param dict\"\"\"\n\n # check: cannot specify any conflicting params as required by child class\n if self.encoder is None:\n raise ValueError(\"An encoder must be first fitted before calling _prepare_embedder()\")\n\n # check: cannot specify any conflicting params as required by child class\n embedder_type = params.get(\"embedder_type\")\n if EmbedderType(embedder_type) == EmbedderType.GLOVE:\n # load glove embs\n token_dimension = params.get(\"embedder_type\", 300)\n token_pretrained_embedding_filepath = params.get(\"token_pretrained_embedding_filepath\")\n glove_container = GloVeEmbeddingsContainer(\n token_dimension=token_dimension,\n token_pretrained_embedding_filepath=token_pretrained_embedding_filepath\n )\n token2emb = glove_container.get_pretrained_word_to_embeddings_dict()\n glove_emb_dim = glove_container.token_dimension\n # validate emb_dim\n emb_dim = params.get(\"emb_dim\", glove_emb_dim)\n if emb_dim != glove_emb_dim:\n msg = f\"Provided 'emb_dim':{emb_dim} cannot be used with the provided \" \\\n f\"'embedder_type':{embedder_type}. Consider not specifying any 'emb_dim' \" \\\n f\"with this embedder.\"\n raise ValueError(msg)\n params.update({\n \"embedder_type\": embedder_type,\n \"emb_dim\": emb_dim, # overwrite the default value\n \"_embedding_weights\": {\n i: token2emb[t] for t, i in self.encoder.get_vocab().items() if t in token2emb\n },\n })\n elif EmbedderType(embedder_type) == EmbedderType.BERT:\n # the bert model is directly loaded in _init_core() itself\n params.update({\n \"embedder_type\": embedder_type,\n \"emb_dim\": self.encoder.config.hidden_size, # overwrite the default value\n \"pretrained_model_name_or_path\": params.get(\"pretrained_model_name_or_path\"),\n })\n\n if not params.get(\"emb_dim\"):\n msg = f\"Need a valid 'emb_dim' to initialize embedding layers. To specify a \" \\\n f\"particular dimension, either pass-in the 'emb_dim' param or provide a valid \" \\\n f\"'embedder_type' param. Continuing with a default value:{DEFAULT_EMB_DIM}.\"\n logger.error(msg)\n params.update({\"emb_dim\": DEFAULT_EMB_DIM})\n\n return params\n\n def _create_optimizer_and_scheduler(self, num_training_steps: int) -> Tuple:\n \"\"\"Sets an optimizer and scheduler for training torch.nn net\"\"\"\n del num_training_steps\n\n # load a torch optimizer\n optimizer = getattr(torch.optim, self.params.optimizer)(\n self.parameters(), lr=self.params.learning_rate\n )\n # load a constant lr scheduler\n scheduler = getattr(torch.optim.lr_scheduler, \"LambdaLR\")(optimizer, lambda _: 1)\n return optimizer, scheduler\n\n def _get_dumpable_state_dict(self):\n \"\"\"\n Returns a state dict of the Pytorch module that can be dumped. Overwriting definitions can\n select a subset of full state dict to be dumped (e.g. like the BERT based ones)\n \"\"\"\n return self.state_dict()\n\n def dump(self, path: str):\n \"\"\"\n Dumps underlying torch.nn model, encoder state and params\n\n Args:\n path (str): The path header for where the files are dumped.\n\n The following states are dumped into different files:\n - Pytorch model weights\n - Encoder state\n - Params (including params such as tokenizer_type and emb_dim that are used during\n loading to create encoder and forward graph)\n \"\"\"\n # resolve path and create associated folder if required\n path = os.path.abspath(os.path.splitext(path)[0]) + \".pytorch_model\"\n os.makedirs(path, exist_ok=True)\n\n # save weights\n torch.save(self._get_dumpable_state_dict(), os.path.join(path, \"model.bin\"))\n\n # save encoder's state\n self.encoder.dump(path)\n\n # save all params\n with open(os.path.join(path, \"params.json\"), \"w\") as fp:\n json.dump(dict(self.params), fp, indent=4)\n fp.close()\n msg = f\"{self.name} model weights are dumped successfully\"\n logger.info(msg)\n\n self.dirty = False\n\n @classmethod\n def load(cls, path: str):\n \"\"\"\n Loads states from a dumped path\n\n Args:\n path (str): The path header wherein dumped files are present.\n \"\"\"\n # resolve path\n path = os.path.abspath(os.path.splitext(path)[0]) + \".pytorch_model\"\n\n # load all params\n with open(os.path.join(path, \"params.json\"), \"r\") as fp:\n all_params = json.load(fp)\n fp.close()\n\n # create new instance\n module = cls()\n if module.name != all_params[\"name\"]:\n msg = f\"The name of the loaded model ({all_params['name']}) from the path '{path}' \" \\\n f\"is different from the name of the module instantiated ({module.name})\"\n raise AssertionError(msg)\n\n # load encoder's state\n module.params.update(dict(all_params))\n module.encoder = InputEncoderFactory.get_encoder_cls(\n tokenizer_type=all_params[\"tokenizer_type\"])(**all_params)\n module.encoder.load(path)\n\n # load weights\n module._init_graph()\n device = \"cuda\" if torch.cuda.is_available() else \"cpu\"\n if device != module.params.device:\n msg = f\"Model was dumped when on the device:{module.params.device} \" \\\n f\"but is not being loaded on device:{device}\"\n logger.warning(msg)\n module.params.device = device\n bin_path = os.path.join(path, \"model.bin\")\n trained_state_dict = torch.load(bin_path, map_location=torch.device(device))\n module_state_dict = module.state_dict()\n keys_diff = module_state_dict.keys() - trained_state_dict.keys()\n if keys_diff:\n msg = f\"While loading {module.__class__.__name__}, {len(keys_diff)} keys of the \" \\\n f\"total {len(module_state_dict.keys())} of the torch module are not found in \" \\\n f\"the file loaded from {bin_path} \"\n msg += \"\\n- This IS fine if loading a model for which only some parameters were \" \\\n \"trained and others frozen. \\n- This IS NOT fine if you expect all parameters \" \\\n \"were trained.\"\n logger.warning(msg)\n module.load_state_dict(trained_state_dict, strict=False)\n module.to(device)\n msg = f\"{module.name} model weights are loaded successfully on to the device:{device}\"\n logger.info(msg)\n\n module.ready = True\n module.dirty = False\n return module\n\n @abstractmethod\n def _prepare_labels(self, labels: Union[List[int], List[List[int]]], max_length: int):\n raise NotImplementedError\n\n @abstractmethod\n def _init_graph(self):\n raise NotImplementedError\n\n @abstractmethod\n def forward(self, batch_data: BatchData) -> BatchData:\n raise NotImplementedError\n\n @abstractmethod\n def predict(self, examples: List[str]) -> Union[List[int], List[List[int]]]:\n \"\"\"\n Returns predicted class labels\n\n Args:\n examples (List[str]): The list of examples for which predictions are computed and\n returned.\n \"\"\"\n raise NotImplementedError\n\n @abstractmethod\n def predict_proba(self, examples: List[str]) -> Union[List[List[int]], List[List[List[int]]]]:\n \"\"\"\n Returns predicted class probabilities\n\n Args:\n examples (List[str]): The list of examples for which class prediction probabilities\n are computed and returned.\n \"\"\"\n raise NotImplementedError\n" ]
[ [ "torch.load", "sklearn.model_selection.train_test_split", "torch.cuda.is_available", "torch.device", "sklearn.metrics.f1_score", "sklearn.metrics.accuracy_score" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
villarrealas/deltasigma
[ "b1c2e9f307d37064ed4163a2682be825b3a44bf2" ]
[ "chopper_ds/run_walker.py" ]
[ "import pair_counter as pc\nimport data_handler as dh\nimport numpy as np\nfrom thechopper import get_buffered_subvolumes\nimport yaml\nimport sys\nimport json\nfrom collections import OrderedDict\nfrom mpi4py import MPI\nimport psutil\nimport os\nimport gc\nfrom datetime import datetime\n\n# This script runs\nif __name__ == '__main__':\n comm = MPI.COMM_WORLD\n rank = comm.Get_rank()\n size = comm.Get_size()\n myhost = MPI.Get_processor_name()\n\n process = psutil.Process(os.getpid())\n # hardcoded here for ease of changing\n seednum = int(sys.argv[1])\n fp_worklist = sys.argv[2]\n\n # read in some important stuff from the yaml file\n yamlpath = '/global/cscratch1/sd/asv13/repos/deltasigma/chopper_ds/globalconfig.yaml'\n with open(yamlpath) as fp: config=yaml.safe_load(fp)\n logmass_lowcut = config['analysiscuts']['logmass_lowcut']\n logmass_highcut = config['analysiscuts']['logmass_highcut']\n NX = config['setupinfo']['nx']\n NY = config['setupinfo']['ny']\n NZ = config['setupinfo']['nz']\n RMAX = np.log(config['setupinfo']['rbin_loghigh'])\n LBOX = config['setupinfo']['lbox']\n\n # so what we need here is a loop. First we read the worklist on everything.\n # worklist of json format: contains both file locations + cosmology information\n with open(fp_worklist) as fp:\n worklist = json.load(fp)\n # Then, we loop over the worklist. Each element is a model AND snap to work on.\n for work in worklist:\n if rank == 0:\n now = datetime.now()\n print('read worklist + yam at {}'.format(now.strftime(\"%H:%M:%S\")), flush=True)\n # rank 0 reads in the data for halos and particles.\n # these outputs are ordered dicts\n halo_data, ptcl_data, params = dh.load_data(work[0], work[1], work[2], work[3], work[4], seednum)\n now = datetime.now()\n print('done with data handler at {}'.format(now.strftime(\"%H:%M:%S\")), flush=True)\n else:\n # every other rank just initializes a blank ordered dict.\n halo_data = OrderedDict()\n ptcl_data = OrderedDict()\n params = []\n # subsample the halo_data here\n if rank == 0:\n # read in mass config here and subsample by halo mass\n\n mask = (logmass_lowcut < np.log10(halo_data['mass'])) & (np.log10(halo_data['mass']) < logmass_highcut)\n for halo_key in halo_data.keys():\n halo_data[halo_key] = halo_data[halo_key][mask]\n print('read data', flush=True) \n print('rank 0 reporting used memory: {} GB'.format(process.memory_info().rss/1024./1024./1024.), flush=True) \n gc.collect() \n print('rank 0 reporting used memory: {} GB'.format(process.memory_info().rss/1024./1024./1024.), flush=True) \n # now we do the new chopper broadcasting\n halocats_for_rank, cell_ids_for_rank = get_buffered_subvolumes(\n comm, halo_data,\n NX, NY, NZ, LBOX, 0)\n particles_for_rank, __ = get_buffered_subvolumes(comm, ptcl_data,\n NX, NY, NZ, LBOX, RMAX)\n if rank==0:\n now = datetime.now()\n print('broadcast data at {}.'.format(now.strftime(\"%H:%M:%S\")))\n print('rank 0 reporting used memory: {} GB'.format(process.memory_info().rss/1024./1024./1024.), flush=True) \n params = comm.bcast(params, root=0)\n else:\n params = None\n params = comm.bcast(params, root=0)\n # and now we'll loop over these\n rank_iter = 0\n del ptcl_data\n del halo_data\n gc.collect()\n if rank==0:\n now = datetime.now()\n print('broadcast halocats at {}.'.format(now.strftime(\"%H:%M:%S\")))\n print('rank 0 reporting used memory: {} GB'.format(process.memory_info().rss/1024./1024./1024.), flush=True) \n for halocat, particles in zip(halocats_for_rank, particles_for_rank):\n mask = halocat['_inside_subvol'] == True\n # modify halo catalog to only include that inside subvolume\n for halo_key in halocat.keys():\n halocat[halo_key] = halocat[halo_key][mask]\n pc.calculate_delta_sigma(halocat, particles, params, rank, rank_iter)\n rank_iter = rank_iter + 1\n print('rank 0 reporting used memory: {} GB'.format(process.memory_info().rss/1024./1024./1024.), flush=True)\n now = datetime.now()\n print('rank {} completed all work at {}. '.format(rank, now.strftime(\"%H:%M:%S\")))\n" ]
[ [ "numpy.log", "numpy.log10" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
shun60s/BPF2
[ "8e93d670e6e1315a4410bf7181cee577e986a72c" ]
[ "ema1.py" ]
[ "#coding:utf-8\r\n\r\n# A class of Exponential Moving Average with Half-wave rectification, and smoothing via lpf\r\n#\r\n# Half-wave rectification until a few KHz signal.\r\n# More than a few KHz signal is transformed to DC with ripple signal. And smooth ripple signal.\r\n\r\n\r\n# Check version\r\n# Python 3.6.4 on win32 (Windows 10)\r\n# numpy 1.16.3\r\n# matplotlib 2.1.1\r\n# scipy 1.4.1\r\n\r\nimport numpy as np\r\nfrom matplotlib import pyplot as plt\r\nfrom scipy.signal import lfilter\r\nfrom iir1 import *\r\n\r\nclass Class_EMA1(object):\r\n def __init__(self, N=80, Clip_bottom=0.0, PrintOut=False):\r\n # initalize\r\n self.N= N\r\n self.alfa= 2.0 / (self.N + 1.0)\r\n self.Clip_bottom= Clip_bottom\r\n \r\n if PrintOut:\r\n print ('alfa (EMA)', self.alfa)\r\n print ('half cycle (EMA)', self.N/2.8854)\r\n \r\n def __call__(self, x, sr=48000, smooth=True):\r\n # x dimension should be 1-zigenn\r\n # Half-wave rectification\r\n y= np.clip(x, self.Clip_bottom, None)\r\n \r\n \r\n '''\r\n # output numpy array\r\n y2= np.empty( (len(x)), dtype=np.float32)\r\n y2[0]= self.alfa * y[0]\r\n for i in range( len(x) -1 ):\r\n y2[i+1] = self.alfa * y[i] + (1.0- self.alfa) * y2[i]\r\n '''\r\n \r\n # use scipy's lfilter([b] [a]\r\n y2,_ = lfilter( [self.alfa, 0.0],[1.0,self.alfa - 1.0], y, zi=[ y[0] * (1.0- self.alfa)])\r\n \r\n if smooth:\r\n return self.smoothing(y2, sr)\r\n else:\r\n return y2\r\n \r\n def smoothing(self,x, sr=48000):\r\n \t# smoothing via lpf\r\n self.lpf=Class_IIR1(fc= sr / 30 , n_order=3, sampling_rate=sr)\r\n return self.lpf.filtering(x)\r\n \r\n def wav_show(self,y1,y2=None, y3=None, sr=48000):\r\n \t# draw wavform\r\n plt.figure()\r\n plt.subplot(311)\r\n plt.xlabel('time step')\r\n plt.ylabel('amplitude')\r\n tlist= np.arange( len(y1) ) * (1 / sr)\r\n plt.plot( tlist, y1)\r\n \r\n if y2 is not None:\r\n plt.subplot(312)\r\n plt.xlabel('time step')\r\n plt.ylabel('amplitude')\r\n tlist= np.arange( len(y2) ) * (1 /sr)\r\n plt.plot( tlist, y2)\r\n \r\n if y3 is not None:\r\n plt.subplot(313)\r\n plt.xlabel('time step')\r\n plt.ylabel('amplitude')\r\n tlist= np.arange( len(y3) ) * (1 / sr)\r\n plt.plot( tlist, y3)\r\n \r\n plt.grid()\r\n plt.axis('tight')\r\n plt.show()\r\n\r\nif __name__ == '__main__':\r\n #\r\n from scipy import signal\r\n from scipy.io.wavfile import read as wavread\r\n # instance\r\n ema1= Class_EMA1(PrintOut=True)\r\n \r\n # load a sample wav\r\n #path0='wav/400Hz-10dB_44100Hz_400msec.wav'\r\n #path0='wav/1KHz-10dB_44100Hz_400msec.wav'\r\n path0='wav/3KHz-10dB_44100Hz_400msec.wav'\r\n #path0='wav/5KHz-10dB_44100Hz_400msec.wav'\r\n try:\r\n sr, y = wavread(path0)\r\n except:\r\n print ('error: wavread ')\r\n sys.exit()\r\n else:\r\n yg= y / (2 ** 15)\r\n print ('sampling rate ', sr)\r\n print ('y.shape', yg.shape)\r\n \r\n # process\r\n y2=ema1( yg, sr, smooth=False)\r\n y3=ema1.smoothing( y2, sr)\r\n \r\n # draw wav\r\n ema1.wav_show(yg, y2, y3, sr=sr)\r\n\r\n\r\n\r\n" ]
[ [ "numpy.clip", "matplotlib.pyplot.plot", "matplotlib.pyplot.ylabel", "matplotlib.pyplot.subplot", "matplotlib.pyplot.xlabel", "matplotlib.pyplot.grid", "matplotlib.pyplot.axis", "scipy.signal.lfilter", "matplotlib.pyplot.show", "scipy.io.wavfile.read", "matplotlib.pyplot.figure" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [ "1.7", "1.0", "0.10", "1.2", "0.14", "0.19", "1.5", "0.12", "0.17", "0.13", "1.6", "1.4", "1.9", "1.3", "1.10", "0.15", "0.18", "0.16", "1.8" ], "tensorflow": [] } ]
machow/tidytext-py
[ "5000191a48d9f371fe5c3a8d7e3642877e9edbd5" ]
[ "tests/test_unnest_tokens.py" ]
[ "import pytest\n\nfrom tidytext import unnest_tokens\nimport pandas as pd\n\nfrom siuba.siu import _\n\[email protected]\ndef test_unnest_tokens_character():\n df = pd.DataFrame({\"txt\": \"Emily Dickinson\"})\n res = unnest_tokens(df, _.char, _.txt, token = \"characters\")\n\n nrow, ncol = res.shape\n assert nrow == 14\n assert ncol == 1\n assert res[\"char\"].iloc[0] == \"e\"\n\n\[email protected]\ndef test_unnest_tokens_char_shingles():\n df = pd.DataFrame({\"txt\": \"tidytext is the best\"})\n res = unnest_tokens(df, _.char_ngram, _.txt, token = \"character_shingles\", n = 4)\n\n nrow, ncol = res.shape\n assert nrow == 14\n assert ncol == 1\n assert df[\"char_ngram\"].iloc[0] == \"tidy\"\n\n\[email protected]\ndef test_unnest_tokens_char_shingles_whitespace():\n df = pd.DataFrame({\"txt\": \"tidytext is the best!\"})\n res = unnest_tokens(df, _.char_ngram, _.txt, token = \"character_shingles\", strip_non_alphanum = False)\n\n nrow, ncol = res.shape\n assert nrow == 19\n assert ncol == 1\n assert res[\"char_ngram\"].iloc[0] == \"tid\"\n\n\ndef test_unnest_tokens_words():\n df = pd.DataFrame({\"txt\": [\n \"Because I could not stop for Death -\", \n \"He kindly stopped for me -\"]\n })\n res = unnest_tokens(df, _.word, _.txt)\n nrow, ncol = res.shape\n assert nrow == 12\n assert ncol == 1\n assert res[\"word\"].iloc[0] == \"because\"\n\n\ndef test_unnest_tokens_token_arg_wrong():\n df = pd.DataFrame({\"txt\": [\"tidytext is the best!\"]})\n\n with pytest.raises(ValueError):\n # TODO: test part of error message\n unnest_tokens(df, _.word, _.txt, token = \"word\")\n\n\n# To continue tests, see https://github.com/juliasilge/tidytext/blob/master/tests/testthat/test-unnest-tokens.R#L56\n" ]
[ [ "pandas.DataFrame" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [ "0.23", "0.21", "2.0", "1.4", "0.19", "1.1", "1.5", "1.2", "0.24", "0.20", "1.0", "0.25", "1.3" ], "scipy": [], "tensorflow": [] } ]
keithpij/mint
[ "23b3dd946f2118eda625d2b3ff8e5427cbb90bb5" ]
[ "charts.py" ]
[ "\"\"\"\nDemo of a basic pie chart plus a few additional features.\n\nIn addition to the basic pie chart, this demo shows a few optional features:\n\n * slice labels\n * auto-labeling the percentage\n * offsetting a slice with \"explode\"\n * drop-shadow\n * custom start angle\n\nNote about the custom start angle:\n\nThe default ``startangle`` is 0, which would start the \"Frogs\" slice on the\npositive x-axis. This example sets ``startangle = 90`` such that everything is\nrotated counter-clockwise by 90 degrees, and the frog slice starts on the\npositive y-axis.\n\"\"\"\nimport matplotlib.pyplot as plt\nimport settings\n\n\ndef category_pie_chart(categories):\n ''' Pie chart, where the slices will be ordered and plotted counter-clockwise.'''\n\n # Create a list of totals for each category.\n labels = []\n sizes = []\n explode = []\n other_total = 0\n for category_name in categories.keys():\n total = 0\n for transaction in categories[category_name]:\n total += transaction.amount\n \n if total <= settings.OTHER_LIMIT:\n other_total += total\n else:\n labels.append(category_name)\n sizes.append(total)\n explode.append(0)\n\n labels.append('Other')\n sizes.append(other_total)\n explode.append(0)\n \n fig1, ax1 = plt.subplots()\n ax1.pie(sizes, explode=explode, labels=labels, autopct='%1.1f%%', shadow=True, startangle=90)\n ax1.axis('equal') # Equal aspect ratio ensures that pie is drawn as a circle.\n\n plt.show()\n" ]
[ [ "matplotlib.pyplot.show", "matplotlib.pyplot.subplots" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
helenacuesta/voas-vocal-quartets
[ "9b4341f072effdcf0d214694bc24f96eea962427" ]
[ "predict_on_salience.py" ]
[ "import numpy as np\nimport pandas as pd\n\nimport os\nimport argparse\nimport sys\n\nfrom voas import utils as utils\nfrom voas import config as config\nfrom voas import models as models\n\nPATCH_LEN = 128\n\n\ndef grab_input_slices(input_mat):\n '''Input mat will be [num_features x patch_len]\n '''\n\n slice_start_times = np.arange(start=0, stop=input_mat.shape[-1], step=PATCH_LEN)\n\n batches = []\n\n for i in slice_start_times[:-1]:\n chunk = input_mat[:, i:i + PATCH_LEN]\n batches.append(chunk)\n\n last_chunk = np.zeros([config.num_features, PATCH_LEN])\n last_chunk[:, :input_mat[:, slice_start_times[-1]:].shape[-1]] = input_mat[:, slice_start_times[-1]:]\n batches.append(last_chunk)\n\n return batches\n\ndef grab_input_slices_lstm(input_mat):\n '''Input mat will be [num_features x patch_len]\n '''\n\n slice_start_times = np.arange(start=0, stop=input_mat.shape[-1], step=PATCH_LEN)\n\n batches = []\n\n for i in slice_start_times[:-1]:\n chunk = input_mat[:, i:i + PATCH_LEN].transpose()\n batches.append(chunk)\n\n last_chunk = np.zeros([config.num_features, PATCH_LEN])\n last_chunk[:, :input_mat[:, slice_start_times[-1]:].shape[-1]] = input_mat[:, slice_start_times[-1]:]\n batches.append(last_chunk.transpose())\n\n return batches\n\n\ndef eval_generator(data_batches):\n for batch in data_batches:\n yield batch[np.newaxis, :, :, np.newaxis]\n\n\ndef predict_one_example(input_mat, model, mode):\n\n if mode == \"time\":\n batches = grab_input_slices_lstm(input_mat)\n pred = model.predict(x=eval_generator(batches), verbose=1)\n T_orig = input_mat.shape[-1]\n T_pred = np.hstack(pred[0]).shape[0]\n diff = T_pred - T_orig\n\n # import pdb; pdb.set_trace()\n\n return np.vstack(pred[0]).transpose()[:, :-diff], \\\n np.vstack(pred[1]).transpose()[:, :-diff], \\\n np.vstack(pred[2]).transpose()[:, :-diff], \\\n np.vstack(pred[-1]).transpose()[:, :-diff], \\\n pred\n\n else:\n batches = grab_input_slices(input_mat)\n\n pred = model.predict(x=eval_generator(batches), verbose=1)\n\n T_orig = input_mat.shape[-1]\n T_pred = np.hstack(pred[0]).shape[-1]\n diff = T_pred - T_orig\n\n return np.hstack(pred[0])[:, :-diff], np.hstack(pred[1])[:, :-diff], np.hstack(\n pred[2])[:, :-diff], np.hstack(pred[-1])[:, :-diff], pred\n\n\ndef load_salience_function(path_to_salience):\n # assume npy format for the salience from Late/Deep CNN\n salience = np.load(path_to_salience)\n\n return salience\n\n\ndef predict_one_file(model, salience, thresholds):\n\n est_saliences = predict_one_example(salience, model, mode=\"freq\")\n\n timestamp, sop = utils.pitch_activations_to_mf0_argmax(est_saliences[0], thresh=thresholds[0])\n _, alt = utils.pitch_activations_to_mf0_argmax(est_saliences[1], thresh=thresholds[1])\n _, ten = utils.pitch_activations_to_mf0_argmax(est_saliences[2], thresh=thresholds[2])\n _, bas = utils.pitch_activations_to_mf0_argmax(est_saliences[3], thresh=thresholds[3])\n\n # construct the multi-pitch predictions\n predictions = np.zeros([len(timestamp), 5])\n\n predictions[:, 0] = timestamp\n predictions[:, 1] = sop\n predictions[:, 2] = alt\n predictions[:, 3] = ten\n predictions[:, 4] = bas\n\n return predictions, est_saliences\n\n\ndef main(args):\n\n if args.model == \"voas_cnn\":\n thresholds = [0.23, 0.17, 0.15, 0.17]\n model = models.voasCNN(PATCH_LEN)\n model.load_weights(\"./models/voas_cnn.h5\")\n\n elif args.model == \"voas_clstm\":\n thresholds = [0.29, 0.20, 0.17, 0.23]\n model = models.voasConvLSTM(PATCH_LEN)\n model.load_weights(\"./models/voas_clstm.h5\")\n\n else:\n sys.exit(\"Please provide a valid model. Expected `voas_cnn` or `voas_clstm`.\")\n\n\n if args.saliencefolder != 0:\n\n salience_folder = args.saliencefolder\n\n for salience_file in os.listdir(salience_folder):\n if not salience_file.endswith(\"npy\"): continue\n\n salience = load_salience_function(salience_file)\n predictions, _ = predict_one_file(model, salience, thresholds)\n\n if args.outputpath != \"0\":\n output_folder = args.outputpath\n pd.DataFrame(predictions).to_csv(\n os.path.join(output_folder, \"{}\".format(salience_file.replace(\"npy\", \"csv\"))), header=False, index=False, index_label=False\n )\n\n else:\n pd.DataFrame(predictions).to_csv(\n os.path.join(salience_folder, \"{}\".format(salience_file.replace(\"npy\", \"csv\"))), header=False,\n index=False, index_label=False\n )\n\n else:\n salience_file = args.saliencefile\n basename = os.path.basename(salience_file)\n salience = load_salience_function(salience_file)\n predictions, est_saliences = predict_one_file(model, salience, thresholds)\n\n if args.outputpath != \"0\":\n output_folder = args.outputpath\n\n pd.DataFrame(predictions).to_csv(\n os.path.join(output_folder, \"{}\".format(basename.replace(\"npy\", \"csv\"))), header=False, index=False,\n index_label=False\n )\n\n np.save(os.path.join(output_folder, \"{}\".format(basename.replace(\".npy\", \"_s.npy\"))), est_saliences[0])\n np.save(os.path.join(output_folder, \"{}\".format(basename.replace(\".npy\", \"_a.npy\"))), est_saliences[1])\n np.save(os.path.join(output_folder, \"{}\".format(basename.replace(\".npy\", \"_t.npy\"))), est_saliences[2])\n np.save(os.path.join(output_folder, \"{}\".format(basename.replace(\".npy\", \"_b.npy\"))), est_saliences[3])\n\n\n\n else:\n pd.DataFrame(predictions).to_csv(\n os.path.join(salience_file.replace(\"npy\", \"csv\")), header=False, index=False, index_label=False\n )\n np.save(salience_file.replace(\".npy\", \"_s.npy\"), est_saliences[0])\n np.save(salience_file.replace(\".npy\", \"_a.npy\"), est_saliences[0])\n np.save(salience_file.replace(\".npy\", \"_t.npy\"), est_saliences[2])\n np.save(salience_file.replace(\".npy\", \"_b.npy\"), est_saliences[3])\n\n\n\n\nif __name__ == \"__main__\":\n parser = argparse.ArgumentParser(\n description=\"Predict F0 contours given an input polyphonic pitch salience function.\")\n\n parser.add_argument(\"--model\",\n dest='model',\n type=str,\n help=\"Model to use for prediction: voas_clstm | voas_cnn\")\n\n parser.add_argument(\"--saliencefile\",\n type=str,\n default=0,\n help=\"Path to the input salience file. It expects a npy file.\")\n\n parser.add_argument(\"--saliencefolder\",\n type=str,\n default=0,\n help=\"Path to the folder with salience files.\")\n\n parser.add_argument(\"--outputpath\",\n type=str,\n default=\"0\",\n help=\"Path to the folder to store the results. If nothing is provided, results will be stored in the same folder of the input(s).\")\n main(parser.parse_args())" ]
[ [ "numpy.hstack", "numpy.arange", "pandas.DataFrame", "numpy.load", "numpy.zeros", "numpy.vstack" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [ "0.23", "0.21", "2.0", "1.4", "0.19", "1.1", "1.5", "1.2", "0.24", "0.20", "1.0", "0.25", "1.3" ], "scipy": [], "tensorflow": [] } ]
guojj33/VGNSL
[ "90a59c819032c95b74a859898e6eea3393a4d27d" ]
[ "src/model.py" ]
[ "from operator import length_hint\nimport numpy as np\nfrom collections import OrderedDict\n\nimport torch\nimport torch.backends.cudnn as cudnn\nimport torch.nn as nn\nimport torch.nn.init\nimport torchvision.models as models\nfrom torch.autograd import Variable\nfrom torch.distributions import Categorical\nfrom torch.nn import functional\nfrom torch.nn import functional as F\nfrom torch.nn.utils.clip_grad import clip_grad_norm_\nfrom torch.nn.utils.rnn import pack_padded_sequence, pad_packed_sequence\n\nfrom utils import make_embeddings, l2norm, cosine_sim, sequence_mask, \\\n index_mask, index_one_hot_ellipsis\nimport utils\n\n\nclass EncoderImagePrecomp(nn.Module):\n \"\"\" image encoder \"\"\"\n def __init__(self, img_dim, embed_size, no_imgnorm=False):\n super(EncoderImagePrecomp, self).__init__()\n self.embed_size = embed_size\n self.no_imgnorm = no_imgnorm\n\n self.fc = nn.Linear(img_dim, embed_size)\n\n self.init_weights()\n\n def init_weights(self):\n \"\"\" Xavier initialization for the fully connected layer \"\"\"\n r = np.sqrt(6.) / np.sqrt(self.fc.in_features +\n self.fc.out_features)\n self.fc.weight.data.uniform_(-r, r)\n self.fc.bias.data.fill_(0)\n\n def forward(self, images):\n \"\"\" extract image feature vectors \"\"\"\n # assuming that the precomputed features are already l2-normalized\n features = self.fc(images.float())\n\n # normalize in the joint embedding space\n if not self.no_imgnorm:\n features = l2norm(features)\n \n return features\n\n def load_state_dict(self, state_dict):\n \"\"\" copies parameters, overwritting the default one to\n accept state_dict from Full model \"\"\"\n own_state = self.state_dict()\n new_state = OrderedDict()\n for name, param in state_dict.items():\n if name in own_state:\n new_state[name] = param\n\n super(EncoderImagePrecomp, self).load_state_dict(new_state)\n\n\nclass EncoderText(nn.Module):\n \"\"\" text encoder \"\"\"\n def __init__(self, opt, vocab_size, semantics_dim):\n super(EncoderText, self).__init__()\n opt.syntax_dim = semantics_dim # syntax is tied with semantics\n\n self.vocab_size = vocab_size\n self.semantics_dim = semantics_dim\n\n self.sem_embedding = make_embeddings(opt, self.vocab_size, self.semantics_dim)\n self.syn_score = nn.Sequential(\n nn.Linear(opt.syntax_dim * 2, opt.syntax_score_hidden),\n nn.ReLU(),\n nn.Linear(opt.syntax_score_hidden, 1, bias=False)\n )\n # self.reset_weights()\n\n def reset_weights(self):\n self.sem_embedding.weight.data.uniform_(-0.1, 0.1)\n\n def forward(self, x, lengths, volatile=False): \n \"\"\" sample a tree for each sentence \"\"\"\n max_select_cnt = int(lengths.max(dim=0)[0].item()) - 1 # 最大选择次数 = 最长caption长度 - 1\n\n tree_indices = list() # 每一次合并中第一个constituent的位置\n tree_probs = list() # 合并结点的概率(分数)\n span_bounds = list() # 每一次合并所囊括的在原caption中的范围\n features = list() # 存合并的semantic embedding\n left_span_features = list() # 每一次合并的第一个元素\n right_span_features = list() # 每一次合并的第二个元素\n\n # closed range: [left_bounds[i], right_bounds[i]]\n left_bounds = utils.add_dim(torch.arange(\n 0, max_select_cnt + 1, dtype=torch.long, device=x.device), 0, x.size(0))\n right_bounds = left_bounds\n # debug\n print('forward')\n # print(x.shape, lengths.shape) # [32,max] [32]\n # print(left_bounds.shape) # [32,max]\n\n # \"we have used a shared representation for both syntax and semantics\"\n sem_embeddings = self.sem_embedding(x)\n syn_embeddings = sem_embeddings\n # print(sem_embeddings.shape) # [32,max,512]\n\n output_word_embeddings = sem_embeddings * \\\n sequence_mask(lengths, max_length=lengths.max()).unsqueeze(-1).float()\n # print(output_word_embeddings.shape) # [32,max,512]\n \n valid_bs = lengths.size(0)\n # print('for')\n # print(lengths[0]) # <= max\n # print(x[0]==0, x[0].shape) # [false, false, ... , true, true] [max]\n for i in range(max_select_cnt):\n # 一次合并\n seq_length = sem_embeddings.size(1) # 逐渐减小\n print('-------------',seq_length,'------------------') # max,max-1,...,1\n # set invalid positions to 0 prob\n # [0, 0, ..., 1, 1, ...]\n length_mask = 1 - sequence_mask(\n (lengths - 1 - i).clamp(min=0), max_length=seq_length - 1).float()\n # print('length_mask:')\n # print(length_mask[0]) # [0, 0, ... , 1 , 1]\n # 0 = done\n undone_mask = 1 - length_mask[:, 0]\n # print(undone_mask[0]) # 1\n\n syn_feats = torch.cat(\n (l2norm(syn_embeddings[:, 1:]), l2norm(syn_embeddings[:, :-1])), \n dim=2\n )\n # print('syn_feats:')\n # print(syn_feats.shape) # [32,seq_length-1,1024]\n prob_logits = self.syn_score(syn_feats).squeeze(-1) # 用 syntax embedding 计算合并的分数\n # print('prob_logits')\n # print(prob_logits.shape) # [32, seq_length-1]\n\n prob_logits = prob_logits - 1e10 * length_mask\n probs = F.softmax(prob_logits, dim=1)\n # print('probs:')\n # print(probs[0])\n\n if not volatile:\n sampler = Categorical(probs)\n indices = sampler.sample() # 按概率随机抽\n else:\n indices = probs.max(1)[1] # 抽最大的\n print('indice:',indices[0].item())\n tree_indices.append(indices)\n tree_probs.append(index_one_hot_ellipsis(probs, 1, indices))\n # print('indice prob:',tree_probs[-1][0].item())\n\n # print('bounds:')\n # print(left_bounds[0])\n # print(right_bounds[0])\n this_spans = torch.stack([ # bounds在此处使用,记录每一次合并所囊括的在原caption中的范围,没有实际用到?\n index_one_hot_ellipsis(left_bounds, 1, indices),\n index_one_hot_ellipsis(right_bounds, 1, indices + 1)\n ], dim=1)\n print('this_spans:')\n print(this_spans[0])\n this_features = torch.add(\n index_one_hot_ellipsis(sem_embeddings, 1, indices),\n index_one_hot_ellipsis(sem_embeddings, 1, indices + 1)\n )\n print('this_features:')\n print(this_features[0].shape) # [512]\n this_left_features = index_one_hot_ellipsis(sem_embeddings, 1, indices)\n this_right_features = index_one_hot_ellipsis(sem_embeddings, 1, indices + 1)\n this_features = l2norm(this_features) # 相邻constituents的组合\n this_left_features = l2norm(this_left_features) # constituents的第一个元素(左)\n this_right_features = l2norm(this_right_features) # constituents的第二个元素(右)\n\n span_bounds.append(this_spans)\n features.append(l2norm(this_features) * undone_mask.unsqueeze(-1).float())\n left_span_features.append(this_left_features * undone_mask.unsqueeze(-1).float())\n right_span_features.append(this_right_features * undone_mask.unsqueeze(-1).float())\n\n # update word embeddings\n left_mask = sequence_mask(indices, max_length=seq_length).float()\n # print('left_mask:')\n # print(left_mask[0]) # 位置0到indice-1值为1,其他全为0\n right_mask = 1 - sequence_mask(indices + 2, max_length=seq_length).float()\n # print('right_mask:')\n # print(right_mask[0]) # 位置indice+1之后全为1,其他全为0\n center_mask = index_mask(indices, max_length=seq_length).float()\n # print('center mask:')\n # print(center_mask[0]) # 位置indice值为1,其他全为0\n update_masks = (left_mask, right_mask, center_mask)\n\n this_features_syn = torch.add(\n index_one_hot_ellipsis(syn_embeddings, 1, indices),\n index_one_hot_ellipsis(syn_embeddings, 1, indices + 1)\n )\n this_features_syn = l2norm(this_features_syn)\n syn_embeddings = self.update_with_mask(syn_embeddings, syn_embeddings, this_features_syn, *update_masks)\n sem_embeddings = self.update_with_mask(sem_embeddings, sem_embeddings, this_features, *update_masks)\n left_bounds = self.update_with_mask(left_bounds, left_bounds, this_spans[:, 0], *update_masks)\n right_bounds = self.update_with_mask(right_bounds, right_bounds, this_spans[:, 1], *update_masks)\n\n # print(len(features)) # max-1 合并次数\n # print(features[0].shape) # [32,512]\n # print(len(tree_indices)) # max-1\n # print(tree_indices[0].shape) # [32]\n return features, left_span_features, right_span_features, output_word_embeddings, tree_indices, \\\n tree_probs, span_bounds\n\n @staticmethod\n def update_with_mask(lv, rv, cv, lm, rm, cm):\n if lv.dim() > lm.dim():\n lm = lm.unsqueeze(2)\n rm = rm.unsqueeze(2)\n cm = cm.unsqueeze(2)\n\n return (lv * lm.to(lv))[:, :-1] + (rv * rm.to(rv))[:, 1:] + (cv.unsqueeze(1) * cm.to(cv))[:, :-1]\n\n\nclass ContrastiveReward(nn.Module):\n \"\"\" compute contrastive reward \"\"\"\n\n def __init__(self, margin=0):\n super(ContrastiveReward, self).__init__()\n self.margin = margin\n self.sim = cosine_sim\n\n def forward(self, im, s): \n \"\"\" return the reward \"\"\"\n # compute image-sentence score matrix\n scores = self.sim(im, s)\n diagonal = scores.diag().view(im.size(0), 1)\n d1 = diagonal.expand_as(scores)\n d2 = diagonal.t().expand_as(scores)\n\n # compare every diagonal score to scores in its column\n # caption retrieval, given images and retrieve captions\n reward_s = (d1 - scores - self.margin).clamp(min=0)\n # compare every diagonal score to scores in its row\n # image retrieval, given caption and retrieve images\n reward_im = (d2 - scores - self.margin).clamp(min=0)\n # clear diagonals\n I = torch.eye(scores.size(0)) > .5\n if torch.cuda.is_available():\n I = I.cuda()\n reward_s = reward_s.masked_fill_(I, 0)\n reward_im = reward_im.masked_fill_(I, 0)\n\n # sum up the reward\n reward_s = reward_s.mean(1)\n reward_im = reward_im.mean(0)\n\n return reward_s + reward_im\n\n\nclass ContrastiveLoss(nn.Module):\n \"\"\" compute contrastive loss for VSE \"\"\"\n\n def __init__(self, margin=0):\n super(ContrastiveLoss, self).__init__()\n self.margin = margin\n self.sim = cosine_sim\n\n def forward(self, im, s):\n scores = self.sim(im, s)\n diagonal = scores.diag().view(im.size(0), 1)\n d1 = diagonal.expand_as(scores)\n d2 = diagonal.t().expand_as(scores)\n\n loss_s = (self.margin + scores - d1).clamp(min=0)\n loss_im = (self.margin + scores - d2).clamp(min=0)\n I = torch.eye(scores.size(0)) > .5\n if torch.cuda.is_available():\n I = I.cuda()\n loss_s = loss_s.masked_fill_(I, 0)\n loss_im = loss_im.masked_fill_(I, 0)\n\n loss_s = loss_s.mean(1)\n loss_im = loss_im.mean(0)\n\n return loss_s + loss_im\n\n\nclass VGNSL(object):\n \"\"\" the main VGNSL model \"\"\"\n\n def __init__(self, opt):\n self.grad_clip = opt.grad_clip\n self.img_enc = EncoderImagePrecomp(\n opt.img_dim, opt.embed_size, opt.no_imgnorm\n )\n self.txt_enc = EncoderText(opt, opt.vocab_size, opt.word_dim)\n\n if torch.cuda.is_available():\n self.img_enc.cuda()\n self.txt_enc.cuda()\n cudnn.benchmark = True\n\n # loss, reward and optimizer\n self.reward_criterion = ContrastiveReward(margin=opt.margin)\n self.loss_criterion = ContrastiveLoss(margin=opt.margin)\n self.vse_reward_alpha = opt.vse_reward_alpha\n self.vse_loss_alpha = opt.vse_loss_alpha\n self.lambda_hi = opt.lambda_hi\n\n params = list(self.txt_enc.parameters())\n params += list(self.img_enc.fc.parameters())\n self.params = params\n\n self.optimizer = getattr(torch.optim, opt.optimizer)(params, lr=opt.learning_rate)\n\n self.Eiters = 0\n\n def state_dict(self):\n state_dict = [self.img_enc.state_dict(), self.txt_enc.state_dict(), self.optimizer.state_dict()]\n return state_dict\n\n def load_state_dict(self, state_dict):\n self.img_enc.load_state_dict(state_dict[0])\n self.txt_enc.load_state_dict(state_dict[1])\n if len(state_dict) >= 3:\n self.optimizer.load_state_dict(state_dict[2])\n\n def train_start(self):\n \"\"\" switch to train mode \"\"\"\n self.img_enc.train()\n self.txt_enc.train()\n\n def val_start(self):\n \"\"\" switch to evaluate mode \"\"\"\n self.img_enc.eval()\n self.txt_enc.eval()\n\n def forward_emb(self, images, captions, lengths, volatile=False):\n \"\"\"Compute the image and caption embeddings\n \"\"\"\n # Set mini-batch dataset\n if torch.cuda.is_available():\n images = images.cuda()\n captions = captions.cuda()\n with torch.set_grad_enabled(not volatile):\n img_emb = self.img_enc(images)\n txt_outputs= self.txt_enc(captions, lengths, volatile)\n return (img_emb, ) + txt_outputs\n\n def forward_reward(self, base_img_emb, cap_span_features, left_span_features, right_span_features,\n word_embs, lengths, span_bounds, **kwargs):\n \"\"\"Compute the loss given pairs of image and caption embeddings\n \"\"\"\n print('forward_reward')\n reward_matrix = torch.zeros(base_img_emb.size(0), lengths.max(0)[0]-1).float()\n # print(reward_matrix.shape) # [32,max-1]\n left_reg_matrix = torch.zeros(base_img_emb.size(0), lengths.max(0)[0]-1).float()\n right_reg_matrix = torch.zeros(base_img_emb.size(0), lengths.max(0)[0]-1).float()\n if torch.cuda.is_available():\n reward_matrix = reward_matrix.cuda()\n right_reg_matrix = right_reg_matrix.cuda()\n left_reg_matrix = left_reg_matrix.cuda()\n\n matching_loss = 0\n for i in range(lengths.max(0)[0] - 1): # i遍历合并次数\n curr_imgs = list()\n curr_caps = list()\n curr_left_caps = list()\n curr_right_caps = list()\n indices = list()\n for j in range(base_img_emb.size(0)): # j遍历图像-文本数\n if i < lengths[j] - 1: # 有的文本不需要这么多次合并\n curr_imgs.append(base_img_emb[j].reshape(1, -1))\n curr_caps.append(cap_span_features[lengths[j] - 2 - i][j].reshape(1, -1))\n curr_left_caps.append(left_span_features[lengths[j] - 2 - i][j].reshape(1, -1))\n curr_right_caps.append(right_span_features[lengths[j] - 2 - i][j].reshape(1, -1))\n indices.append(j) # 参与此轮合并的图象-文本对\n\n img_emb = torch.cat(curr_imgs, dim=0)\n cap_emb = torch.cat(curr_caps, dim=0)\n print('emb:')\n print(img_emb.shape) # [参与此轮计算样本数, 512]\n print(cap_emb.shape) # [参与此轮计算样本数, 512]\n left_cap_emb = torch.cat(curr_left_caps, dim=0)\n right_cap_emb = torch.cat(curr_right_caps, dim=0)\n reward = self.reward_criterion(img_emb, cap_emb)\n left_reg = self.loss_criterion(img_emb, left_cap_emb)\n right_reg = self.loss_criterion(img_emb, right_cap_emb) # 每次合并的第二个词的abstractness,跟match loss计算公式相同\n for idx, j in enumerate(indices):\n reward_matrix[j][lengths[j] - 2 - i] = reward[idx]\n left_reg_matrix[j][lengths[j] - 2 - i] = left_reg[idx]\n right_reg_matrix[j][lengths[j] - 2 - i] = right_reg[idx]\n\n this_matching_loss = self.loss_criterion(img_emb, cap_emb)\n matching_loss += this_matching_loss.sum() + left_reg.sum() + right_reg.sum()\n reward_matrix = reward_matrix / (self.lambda_hi * right_reg_matrix + 1.0)\n reward_matrix = self.vse_reward_alpha * reward_matrix\n\n return reward_matrix, matching_loss\n\n # 一个batch的训练 入口\n def train_emb(self, images, captions, lengths, ids=None, epoch=None, *args):\n print(images.shape)\n print(captions.shape)\n print(len(lengths))\n print(lengths)\n # 一个batch的数据,lengths为列表,每个元素为1个caption的长度\n \"\"\" one training step given images and captions \"\"\"\n self.Eiters += 1 # iteration count\n self.logger.update('Eit', self.Eiters)\n self.logger.update('lr', self.optimizer.param_groups[0]['lr'])\n lengths = torch.Tensor(lengths).long()\n if torch.cuda.is_available():\n lengths = lengths.cuda()\n\n # compute the embeddings \n img_emb, cap_span_features, left_span_features, right_span_features, word_embs, tree_indices, probs, \\\n span_bounds = self.forward_emb(images, captions, lengths)\n\n # measure accuracy and record loss\n cum_reward, matching_loss = self.forward_reward(\n img_emb, cap_span_features, left_span_features, right_span_features, word_embs, lengths,\n span_bounds\n )\n probs = torch.cat(probs, dim=0).reshape(-1, lengths.size(0)).transpose(0, 1)\n masks = sequence_mask(lengths - 1, lengths.max(0)[0] - 1).float()\n print('r1_loss')\n # print(cum_reward.shape) # [32,max-1]\n # print(probs.shape) # [32,max-1]\n # print(masks.shape) # [32,max-1]\n rl_loss = torch.sum(-masks * torch.log(probs) * cum_reward.detach()) # reinforcement learning loss\n \n loss = rl_loss + matching_loss * self.vse_loss_alpha\n loss = loss / cum_reward.shape[0]\n self.logger.update('Loss', float(loss), img_emb.size(0))\n self.logger.update('MatchLoss', float(matching_loss / cum_reward.shape[0]), img_emb.size(0))\n self.logger.update('RL-Loss', float(rl_loss / cum_reward.shape[0]), img_emb.size(0))\n \n # compute gradient and do SGD step\n self.optimizer.zero_grad()\n loss.backward()\n if self.grad_clip > 0:\n clip_grad_norm_(self.params, self.grad_clip)\n self.optimizer.step()\n\n # clean up\n if epoch > 0:\n del cum_reward\n del tree_indices\n del probs\n del cap_span_features\n del span_bounds\n" ]
[ [ "torch.nn.functional.softmax", "numpy.sqrt", "torch.Tensor", "torch.cat", "torch.nn.utils.clip_grad.clip_grad_norm_", "torch.nn.Linear", "torch.set_grad_enabled", "torch.distributions.Categorical", "torch.log", "torch.cuda.is_available", "torch.arange", "torch.nn.ReLU" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
sonaldangi12/DataScience
[ "3d7cd529a96f37c2ef179ee408e2c6d8744d746a" ]
[ "Groups/Group_ID_40/vcca.py" ]
[ "import os\nimport torch\nimport torch.utils.data\nfrom torch import optim\nfrom torch.autograd import Variable\nfrom torch.nn import functional as F\nfrom torchvision import datasets, transforms\nfrom torchvision.utils import save_image\nfrom autoencoder import Autoencoder\nfrom utils import *\n\nSEED = 1\nBATCH_SIZE = 128\nLOG_INTERVAL = 10\nEPOCHS = 1\ntorch.manual_seed(SEED)\n\ndef loss_function(recon_x1, recon_x2, x1, x2, mu, logvar,input_dim) -> Variable:\n # how well do input x and output recon_x agree?\n BCE1 = F.binary_cross_entropy(recon_x1, x1.view(-1, input_dim ))\n BCE2 = F.binary_cross_entropy(recon_x2, x2.view(-1, input_dim ))\n\n # KLD is Kullback–Leibler divergence -- how much does one learned\n # distribution deviate from another, in this specific case the\n # learned distribution from the unit Gaussian\n # - D_{KL} = 0.5 * sum(1 + log(sigma^2) - mu^2 - sigma^2)\n # note the negative D_{KL} in appendix B of the paper\n KLD = -0.5 * torch.sum(1 + logvar - mu.pow(2) - logvar.exp())\n # Normalise by same number of elements as in reconstruction\n KLD /= BATCH_SIZE * input_dim\n\n # BCE tries to make our reconstruction as accurate as possible\n # KLD tries to push the distributions as close as possible to unit Gaussian\n return BCE1 + KLD + BCE2\n\ndef train(model,epoch,train_loader,optimizer,input_dim):\n # toggle model to train mode\n model.train()\n train_loss = 0\n\n for batch_idx, (data1, data2) in enumerate(train_loader): \n data1 = Variable(data1).float()\n data2 = Variable(data2).float()\n\n optimizer.zero_grad()\n\n recon_batch1, recon_batch2, mu, log_var = model(data1, data2)\n # calculate scalar loss\n loss = loss_function(recon_batch1, recon_batch2, data1, data2, mu, log_var,input_dim)\n # calculate the gradient of the loss w.r.t. the graph leaves\n # i.e. input variables -- by the power of pytorch!\n loss.backward()\n train_loss += loss.data\n optimizer.step()\n if batch_idx % LOG_INTERVAL == 0:\n print('Train Epoch: {} [{}/{} ({:.0f}%)]\\tLoss: {:.6f}'.format(\n epoch, batch_idx * len(data1), len(train_loader.dataset),\n 100. * batch_idx / len(train_loader),\n loss.data / len(data1)))\n\n print('====> Epoch: {} Average loss: {:.4f}'.format(\n epoch, train_loss / len(train_loader.dataset)))\n\n\ndef fit(x_view,y_view,ZDIMS,input_dim,epochs):\n EPOCHS=epochs\n data1 = x_view\n data2 = y_view\n\n train_loader = torch.utils.data.DataLoader(\n ConcatDataset(\n data1,\n data2\n ),\n batch_size=BATCH_SIZE, shuffle=True)\n\n model = Autoencoder(ZDIMS,input_dim)\n optimizer = optim.Adam(model.parameters(), lr=0.0001)\n\n for epoch in range(1, EPOCHS + 1):\n train(model,epoch,train_loader,optimizer,input_dim)\n #est(epoch)\n model.eval()\n # 64 sets of random ZDIMS-float vectors, i.e. 64 locations / MNIST\n # digits in latent space\n sample = Variable(torch.randn(64, ZDIMS))\n\n sample1 = model.decode_1(sample).cpu()\n # print(sample1)\n sample2 = model.decode_2(sample).cpu()\n\n\n" ]
[ [ "torch.autograd.Variable", "torch.manual_seed", "torch.randn" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
vishalbelsare/online-centrality
[ "30901da7d558c417d2eea3f0e85224776df97b1e" ]
[ "python/centrality_utils/static_negative_beta_measure_computer.py" ]
[ "import os\nimport numpy as np\nimport pandas as pd\nimport networkx as nx\nfrom collections import deque\nfrom .base_computer import *\n\nclass StaticNegativeBetaMeasureParams():\n def __init__(self,lookback_cnt=0):\n self.lookback_cnt = lookback_cnt\n if lookback_cnt > 0:\n self.graph_type = \"snapshot_%i\" % lookback_cnt\n else:\n self.graph_type = \"total\"\n \n def __str__(self):\n return \"nbm_%s\" % (self.graph_type)\n\nclass StaticNegativeBetaMeasureComputer(BaseComputer):\n def __init__(self,param_list):\n \"\"\"Input: list of StaticNegativeBetaMeasureParams objects\"\"\"\n self.param_list = param_list\n self.graph_snapshots = [deque([]) for i in range(len(self.param_list))]\n self.stat_nbmes = None\n\n def update(self,edge,graph,snapshot_graph,time=None):\n \"\"\"edge=(src,trg)\"\"\"\n # This is a static measure. It only needs to be updated at snapshot update\n pass\n\n def calculate_neg_beta_measures(self,graph,snapshot_graph,epsilon=0.001):\n nbmes_df = pd.DataFrame()\n for i in range(len(self.param_list)):\n param = self.param_list[i]\n G = nx.DiGraph(graph) if param.lookback_cnt == 0 else get_graph_from_snapshots(self, snapshot_graph, param, i)\n out_deg = dict(G.out_degree())\n # calculate weights for in edges\n rec_out_deg = dict([(n,1.0/out_deg[n] if out_deg[n] > 0 else 1.0) for n in out_deg])\n edges_with_weights = [(link[0],link[1],rec_out_deg[link[0]]) for link in G.edges()]\n # re-initialize the snapshot graph with 'in_weights'\n G.clear()\n G.add_weighted_edges_from(edges_with_weights,weight=\"in_weight\")\n nbmes = dict(G.in_degree(weight=\"in_weight\")) # due to the 'in_weight' values on the edges the result is the negative beta measure\n # we want to included zero neg. beta measure nodes in output files as well, that is why we add epsilon!\n nbmes_with_epsilon = pd.Series(nbmes) + epsilon\n new_col_df = pd.DataFrame({str(i):nbmes_with_epsilon})\n nbmes_df = nbmes_df.join(new_col_df, how='outer')\n nbmes_df.insert(0,\"node_id\",nbmes_df.index)\n return nbmes_df.fillna(0.0).as_matrix()\n\n\n def save_snapshot(self,experiment_folder,snapshot_index,graph,snapshot_graph,time=None):\n self.stat_nbmes = self.calculate_neg_beta_measures(graph,snapshot_graph)\n if not os.path.exists(experiment_folder):\n os.makedirs(experiment_folder)\n for j, param in enumerate(self.param_list):\n output_folder = \"%s/%s\" % (experiment_folder,param)\n if not os.path.exists(output_folder):\n os.makedirs(output_folder)\n pos_idx = self.stat_nbmes[:,j+1] > 0 \n active_arr = self.stat_nbmes[pos_idx][:,[0,j+1]]\n scores2file(active_arr,\"%s/nbm_%i.csv\" % (output_folder,snapshot_index))\n" ]
[ [ "pandas.Series", "pandas.DataFrame" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [ "0.23", "0.21", "2.0", "1.4", "1.3", "0.19", "1.1", "1.5", "0.24", "0.20", "1.0", "0.25", "1.2" ], "scipy": [], "tensorflow": [] } ]
mavelim/twitter_selenium_get_tweets
[ "e1bdec1aff68c14c03a4b37ee8b5b5b22108f0a8" ]
[ "resources/scraper_function.py" ]
[ "# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Mon Oct 12 18:27:54 2020\n\n@author: theo goe\n\"\"\"\n\n# imports\nfrom selenium import webdriver\nfrom selenium.webdriver.common.keys import Keys\nfrom time import sleep\nimport random\nimport pandas as pd\nfrom progressbar import ProgressBar\npbar = ProgressBar()\nfrom datetime import timedelta, date\nfrom datetime import datetime\nfrom dateutil.relativedelta import relativedelta\n\nchrome_options = webdriver.ChromeOptions()\nchrome_options.add_argument(\"--headless\")\nchrome_options.add_argument(\"--disable-gpu\")\n\n\n\ndef sleep_for(opt1, opt2):\n time_for = random.uniform(opt1, opt2)\n time_for_int = int(round(time_for))\n sleep(abs(time_for_int - time_for))\n for i in range(time_for_int, 0, -1):\n sleep(1)\n\n\ndef daterange(date1, date2):\n for n in range(int((date2 - date1).days) + 30):\n yield date1 + timedelta(n)\n\n\ndef list_of_dates(start_date, end_date, num_days):\n cur_date = start = datetime.strptime(start_date, '%Y-%m-%d').date()\n end = datetime.strptime(end_date, '%Y-%m-%d').date()\n\n dates_list = []\n dates_list.append(start_date)\n while cur_date < end:\n # print(cur_date)\n cur_date += relativedelta(days=num_days)\n dates_list.append(cur_date)\n\n # if last date is after the end date, remove\n if dates_list[-1] > end:\n dates_list.pop(-1)\n \n # add the last day\n dates_list.append(end)\n # list of tuples of each date pairing\n tup_list = []\n counter = 1\n for i in dates_list:\n # print(i)\n try:\n tup_list.append((i,dates_list[counter]))\n counter += 1\n except: # lazy way to skip last date pairing\n pass\n return tup_list\n\n\ndef twitter_scraper(browser_path, urls, scroll_down_num, post_element_xpath,\n start_date, end_date, days_between):\n\n # setting the chromedriver path and initializing driver\n driver = webdriver.Chrome(options=chrome_options)\n #driver = webdriver.Chrome(executable_path=browser_path)\n driver.set_page_load_timeout(100)\n\n # create master df to append to\n master_df = pd.DataFrame()\n\n dates_list = list_of_dates(start_date, end_date, num_days=days_between)\n\n # loop through the list of urls listed in config_and_run.py\n for orig_url in pbar(urls):\n print(str(orig_url))\n for day_tup in dates_list:\n print(str(day_tup[0]))\n print(str(day_tup[1]))\n url = orig_url + '%20until%3A' + str(day_tup[1]) + \\\n '%20since%3A' + str(day_tup[0]) + '&src=typed_query'\n\n driver.get(url)\n print(str(url))\n sleep_for(10, 15) # sleep a while to be safe\n\n # scroll x number of times\n for i in range(0, scroll_down_num):\n # scroll down\n driver.find_element_by_xpath('//body').send_keys(Keys.END)\n sleep_for(4, 7)\n\n # get a list of each post\n post_list = driver.find_elements_by_xpath(post_element_xpath)\n\n post_text = [x.text for x in post_list]\n\n print(post_text)\n\n # create temp dataset of each tweet\n temp_df = pd.DataFrame(post_text, columns={'all_text'})\n\n master_df = master_df.append(temp_df)\n print('master df len ' + str(len(master_df)))\n print()\n\n return master_df\n" ]
[ [ "pandas.DataFrame" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [ "0.23", "0.21", "2.0", "1.4", "0.19", "1.1", "1.5", "1.2", "0.24", "0.20", "1.0", "0.25", "1.3" ], "scipy": [], "tensorflow": [] } ]
imcallister/accountifie
[ "094834c9d632e0353e3baf8d924eeb10cba0add4" ]
[ "accountifie/toolkit/utils/everything.py" ]
[ "from . import gl_helpers\n\nimport datetime\nimport json\n\n\n\nfrom dateutil.parser import parse\nfrom dateutil.relativedelta import relativedelta\nfrom decimal import Decimal, getcontext, ROUND_HALF_UP, ROUND_HALF_DOWN\nfrom bisect import insort\nimport locale\nimport operator\nimport re\n\nimport csv, os\nfrom pprint import pprint\n\nfrom django.db import models\nfrom django.conf import settings\n\nfrom . import datefuncs\n\nlocale.setlocale(locale.LC_ALL, '')\n\n\n\nimport logging\nlogger = logging.getLogger('default')\n\n\n#use this frequently in rounding so give it a name\nDZERO = Decimal(\"0.00\")\nHUNDREDTH = Decimal(\"0.01\")\n\n\"\"\"\ndef periods(period_tags):\n periods = {}\n for tag in period_tags:\n period_tag = period_tags[tag]\n periods[tag] = {'start': start_of_period(period_tag), 'end': end_of_period(period_tag)}\n return periods\n\"\"\"\n\ndef day_before(d):\n return d - datetime.timedelta(days=1)\n\nfrom pandas.tseries.offsets import BDay\ndef prev_busday(d):\n return (d - BDay(1)).date()\n\n\ndef entry(x, link='', ccy_fmt=''):\n return {'text': fmt(x, values_fmt=ccy_fmt), 'link': link}\n\ndef acct_history_link(id):\n from_dt = datefuncs.start_of_prev_year(datetime.datetime.now().year).isoformat()\n return ('/reporting/history/account/%s/?from=%s' % (id, from_dt))\n\ndef path_history_link(path):\n from_dt = datefuncs.start_of_prev_year(datetime.datetime.now().year).isoformat()\n return ('/reporting/history/path/%s/?from=%s' % (path.replace('.', '_'), from_dt))\n\ndef path_balances_link(path, date='today'):\n return ('/gl/path/%s/balances/?date=date' % path.replace('.','_'))\n\ndef fmt(stuff, values_fmt=None):\n \"How do we want money formatted?\"\n if stuff is None:\n return '----'\n elif isinstance(stuff, str):\n return stuff\n else:\n #accounting format, hacky, must be a recipe for this.\n #just seeing if brackets line up.\n if abs(stuff) < 0.1:\n txt = '-'\n elif stuff < 0:\n txt = '(' + locale.format(\"%0.0f\", abs(stuff), grouping=True) + ')'\n else:\n txt = locale.format(\"%0.0f\", stuff, grouping=True) + ''\n if values_fmt:\n return values_fmt + txt\n else:\n return txt\n\ndef unfmt(x):\n x = x.replace('$', '')\n if x == '-':\n return 0.0\n else:\n return float(x.replace(',','').replace(')','').replace('(','-'))\n\n\n\n\n\ndef get_dates(dt):\n if datefuncs.is_period_id(dt):\n start = datefuncs.start_of_period(dt)\n end = datefuncs.end_of_period(dt)\n elif type(dt) in [str, str] and dt[-4:]=='_YTD':\n end = parse(dt[:-4])\n start = datetime.date(end.year, 1, 1)\n elif type(dt) in [str, str] and dt[0] == 'D':\n d = parse(dt[1:]).date()\n start = prev_busday(d)+datetime.timedelta(days=1)\n end = d\n elif dt=='today':\n start = settings.DATE_EARLY\n end = datetime.datetime.now().date()\n else:\n start = settings.DATE_EARLY\n if type(dt)==str:\n end = parse(dt).date()\n else:\n end = dt\n return start, end\n\ndef get_dates_dict(dt):\n if type(dt)==dict:\n if 'start' in dt and 'end' in dt:\n #already in format\n return dt\n\n start, end = get_dates(dt)\n return { 'start': start, 'end': end }\n\n\n\n\ndef to_dict(dataset):\n \"Make a dict with column 1 as key and cols 2,3,4 etc as value.\"\n d = {}\n for row in dataset:\n first, rest = row[0], row[1:]\n d[first] = rest\n return d\n\ndef denoneify(num):\n if num is None:\n return DZERO\n else:\n return num\n\ndef safe_sum(seq):\n \"Safe with None\"\n try:\n return sum(seq)\n except TypeError:\n seq2 = []\n for elem in seq:\n if elem is not None:\n seq2.append(elem)\n\n return sum(seq2)\n\n\n\n\n\n\n\ndef get_columns(request):\n if 'columns' in request.GET:\n return request.GET.get('columns').split('.')\n else:\n return None\n\n\n\ndef files_for_dir(datadir):\n return [os.path.join(datadir, name) for name in os.listdir(datadir) if os.path.isfile(os.path.join(datadir, name))]\n\ndef csv_to_modelattr(open_file, name_cleaner=None, company=gl_helpers.get_default_company()):\n '''takes the fields and values in the CSV and transforms them into a list of dicts where the keys\n will match model attributes. for example Full Description becomes full_description'''\n if name_cleaner == None:\n name_cleaner = lambda name: name\n f_csv = csv.DictReader(open_file)\n csv_to_modelattr = dict([(name, name_cleaner(name)) for name in f_csv.fieldnames])\n csv_to_modelattr['company_id'] = company\n\n return [dict([(csv_to_modelattr[name], value) for name, value in list(row.items()) if name in csv_to_modelattr]) for row in f_csv]\n\ndef get_foreignkeys(model):\n return dict(((f.name, f.rel.to) for f in model._meta.fields if f.__class__ == models.ForeignKey))\n\ndef get_fk_attr(model):\n return [f.name for f in model._meta.fields if f.__class__ == models.ForeignKey]\n\ndef get_pk_name(model):\n return model._meta.pk.name\n\ndef instance_nonrel_data(row, model, name_cleaner=None, value_cleaner=None):\n model_flds = model._meta.get_all_field_names()\n instance_data_no_fk = dict((name_cleaner(name), value_cleaner(name, value)) for name, value in list(row.items()) if name_cleaner(name)\n and name_cleaner(name) not in get_fk_attr(model) and name_cleaner(name) in model_flds)\n return model(**instance_data_no_fk)\n\ndef set_foreignkeys(instance, row, model, name_cleaner=None, value_cleaner=None):\n if get_foreignkeys(model):\n instance_fk = dict((name_cleaner(name), value_cleaner(name, value)) for name, value in list(row.items()) if name_cleaner(name)\n and name_cleaner(name) in get_fk_attr(model))\n for fk in list(get_foreignkeys(model).items()):\n if fk[0] in instance_fk:\n try:\n related = fk[1].objects.get(pk=instance_fk[fk[0]])\n setattr(instance, fk[0], related)\n except:\n logger.error(\"No ForeignKey %s %s. %s\" % (fk[0], str(fk[1]), instance_fk))\n return instance\n\ndef dirty_key(row, model=None, unique=None, name_cleaner=None, value_cleaner=None):\n dirty = [name_cleaner(k) for k in list(row.keys())\n if name_cleaner(k) not in [f.name for f in [field for field in model._meta.fields\n if field not in get_fk_attr(model)]]\n if name_cleaner(k)]\n\n return dirty\n\ndef create_instance(row, model, name_cleaner=None, value_cleaner=None, unique=None, exclude=[], company=gl_helpers.get_default_company()):\n row['company'] = company\n non_rel_instance = instance_nonrel_data(row, model, name_cleaner=name_cleaner, value_cleaner=value_cleaner)\n if non_rel_instance.id in exclude:\n return None\n\n full_instance = set_foreignkeys(non_rel_instance, row, model, name_cleaner=name_cleaner, value_cleaner=value_cleaner)\n\n return unique(full_instance)\n\n\ndef random_color(dark=True):\n import random\n r = lambda: random.randrange(10,130 if dark else 255, 10)\n return '#%02X%02X%02X' % (r(),r(),r())\n" ]
[ [ "pandas.tseries.offsets.BDay" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [ "0.23", "0.21", "0.19", "0.24", "0.20", "1.0", "0.25" ], "scipy": [], "tensorflow": [] } ]
Jie-Yuan/CTRZOO
[ "cf7810d2cb76fb0b2a48678cade01f64d1ed3fdb" ]
[ "ctrzoo/layers/DNN.py" ]
[ "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n# @Project : Python.\n# @File : DNN\n# @Time : 2020-03-13 13:42\n# @Author : yuanjie\n# @Email : [email protected]\n# @Software : PyCharm\n# @Description :\nfrom typing import Any, Callable, Dict, List, Optional, Sequence, Type, Union\n\nimport tensorflow as tf\n\n\nclass DNN(tf.keras.layers.Layer):\n\n def __init__(self,\n hidden_units_list: List[int] = (64, 32, 16, 4),\n activation='relu',\n kernel_regularizer=tf.keras.regularizers.l2(),\n use_bn=False,\n dropout_rate=0,\n seed=666,\n name='DNN',\n **kwargs):\n super().__init__(name=name, **kwargs)\n\n self.hidden_units_list = hidden_units_list\n self.activation = activation\n self.dropout_rate = dropout_rate\n self.kernel_regularizer = kernel_regularizer\n self.use_bn = use_bn\n self.seed = seed\n self.num_layer = len(self.hidden_units_list)\n\n def build(self, input_shape):\n super().build(input_shape) # self.built = True\n\n self.dense_layers = []\n for index, units in enumerate(self.hidden_units_list):\n _ = tf.keras.layers.Dense(\n units,\n activation=self.activation,\n use_bias=True,\n kernel_initializer='glorot_uniform',\n bias_initializer='zeros',\n kernel_regularizer=self.kernel_regularizer,\n bias_regularizer=None,\n name=f\"dense{index}\"\n )\n self.dense_layers.append(_)\n\n # BN\n if self.use_bn:\n self.bn_layers = [tf.keras.layers.BatchNormalization() for _ in range(self.num_layer)]\n\n self.dropout_layers = []\n for i in range(self.num_layer):\n _ = tf.keras.layers.Dropout(self.dropout_rate, seed=self.seed + i)\n self.dropout_layers.append(_)\n\n def call(self, inputs, training=None, **kwargs):\n \"\"\"http://www.luyixian.cn/news_show_256709.aspx\n BN和Dropout共同使用时会出现的问题\n BN和Dropout单独使用都能减少过拟合并加速训练速度,但如果一起使用的话并不会产生1+1>2的效果,相反可能会得到比单独使用更差的效果。\n 相关的研究参考论文:Understanding the Disharmony between Dropout and Batch Normalization by Variance Shift\n 本论文作者发现理解 Dropout 与 BN 之间冲突的关键是网络状态切换过程中存在神经方差的(neural variance)不一致行为。\n 试想若有图一中的神经响应 X,当网络从训练转为测试时,Dropout 可以通过其随机失活保留率(即 p)来缩放响应,并在学习中改变神经元的方差,\n 而 BN 仍然维持 X 的统计滑动方差。这种方差不匹配可能导致数值不稳定(见下图中的红色曲线)。而随着网络越来越深,最终预测的数值偏差可能会累计,\n 从而降低系统的性能。简单起见,作者们将这一现象命名为「方差偏移」。\n 事实上,如果没有 Dropout,那么实际前馈中的神经元方差将与 BN 所累计的滑动方差非常接近(见下图中的蓝色曲线),这也保证了其较高的测试准确率。\n \"\"\"\n deep_input = inputs\n for i in range(self.num_layer):\n fc = self.dense_layers[i](deep_input) # 注意下次循环的输入\n fc = self.bn_layers[i](fc, training=training) if self.use_bn else fc\n fc = self.dropout_layers[i](fc, training=training)\n deep_input = fc\n\n return fc\n\n def compute_output_shape(self, input_shape):\n return input_shape[:-1] + (self.hidden_units_list[-1],)\n\n def get_config(self):\n base_config = super().get_config()\n config = {\n 'hidden_units': self.hidden_units_list,\n 'activation': self.activation,\n 'kernel_regularizer': self.kernel_regularizer,\n 'use_bn': self.use_bn,\n 'dropout_rate': self.dropout_rate,\n 'seed': self.seed\n }\n return {**base_config, **config}\n" ]
[ [ "tensorflow.keras.layers.Dense", "tensorflow.keras.regularizers.l2", "tensorflow.keras.layers.BatchNormalization", "tensorflow.keras.layers.Dropout" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [ "1.10", "2.2" ] } ]
alanmaehara/Sales-Prediction
[ "ccc12fb1664a733f64a3064e787d070207f37444" ]
[ "api/handler.py" ]
[ "import pandas as pd\nimport pickle\nfrom flask import Flask, request, Response\nfrom rossmann.Rossmann import Rossmann #(from folder rossmann, import rossmann class)\n\n# loading model\nmodel = pickle.load(open('/home/alan/Sales-Prediction/model/model_rossmann.pkl','rb'))\n\n\n# initialize API\napp = Flask(__name__) # __name__ = constructor\n\[email protected]('/rossmann/predict', methods = ['POST']) # creating endpoint with method POST\ndef rossmann_predict(): #function that is executed when an endpoint receives a POST request. This function works on the data received.\n test_json = request.get_json() # retrieve the json data \n \n if test_json: # test whether the data is there or not\n if isinstance(test_json, dict): # if data is a dict, then we have only one line of data\n test_raw = pd.DataFrame(test_json, index=[0])\n else:\n test_raw = pd.DataFrame(test_json, columns = test_json[0].keys()) # if data is not a dict, then it has multiple data. We need to name the columns.\n \n # Instantiate Rossmann Class (\"copy\"/call Rossmann class)\n pipeline = Rossmann()\n # run Data Cleaning on raw data\n df1 = pipeline.data_cleaning( test_raw )\n # run feature engineering on df1\n df2 = pipeline.feature_engineering(df1)\n # run data preprocessing on df2\n df3 = pipeline.data_preparation(df2)\n # prediction\n df_response = pipeline.get_prediction(model, test_raw, df3) #generate predictions with xgboost model, the data that the user sent, and the data to generate predictions on.\n \n return df_response\n \n else:\n return Response( '{}', status = 200, mimetype = 'application/json') # if there's no data, return a response answer 200 (request was correct but execution failed)\n #mimetype indicates the data type\n \nif __name__ == '__main__':\n app.run('0.0.0.0') #running flask on local host" ]
[ [ "pandas.DataFrame" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [ "0.23", "0.21", "2.0", "1.4", "0.19", "1.1", "1.5", "1.2", "0.24", "0.20", "1.0", "0.25", "1.3" ], "scipy": [], "tensorflow": [] } ]
vishalbelsare/chebpy
[ "55df1f84a08dcc5d63e687a7ab950d4a6e2666cc" ]
[ "chebpy/core/chebtech.py" ]
[ "from abc import ABC, abstractmethod\n\nimport numpy as np\n\nfrom .smoothfun import Smoothfun\nfrom .settings import _preferences as prefs\nfrom .decorators import self_empty\nfrom .algorithms import (bary, clenshaw, adaptive, coeffmult,\n vals2coeffs2, coeffs2vals2, chebpts2,\n barywts2, rootsunit, newtonroots,\n standard_chop)\nfrom .plotting import import_plt, plotfun, plotfuncoeffs\nfrom .utilities import Interval, coerce_list\n\n\nclass Chebtech(Smoothfun, ABC):\n '''Abstract base class serving as the template for Chebtech1 and\n Chebtech2 subclasses. \n\n Chebtech objects always work with first-kind coefficients, so much \n of the core operational functionality is defined this level.\n\n The user will rarely work with these classes directly so we make\n several assumptions regarding input data types.\n '''\n \n @classmethod\n def initconst(cls, c, *, interval=None):\n '''Initialise a Chebtech from a constant c'''\n if not np.isscalar(c):\n raise ValueError(c)\n if isinstance(c, int):\n c = float(c)\n return cls(np.array([c]), interval=interval)\n\n @classmethod\n def initempty(cls, *, interval=None):\n '''Initialise an empty Chebtech'''\n return cls(np.array([]), interval=interval)\n\n @classmethod\n def initidentity(cls, *, interval=None):\n '''Chebtech representation of f(x) = x on [-1,1]'''\n return cls(np.array([0,1]), interval=interval)\n\n @classmethod\n def initfun(cls, fun, n=None, *, interval=None):\n '''Convenience constructor to automatically select the adaptive or\n fixedlen constructor from the input arguments passed.'''\n if n is None:\n return cls.initfun_adaptive(fun, interval=interval)\n else:\n return cls.initfun_fixedlen(fun, n, interval=interval)\n\n @classmethod\n def initfun_fixedlen(cls, fun, n, *, interval=None):\n '''Initialise a Chebtech from the callable fun using n degrees of\n freedom.'''\n points = cls._chebpts(n)\n values = fun(points)\n coeffs = vals2coeffs2(values)\n return cls(coeffs, interval=interval)\n\n @classmethod\n def initfun_adaptive(cls, fun, *, interval=None):\n '''Initialise a Chebtech from the callable fun utilising the adaptive\n constructor to determine the number of degrees of freedom parameter.'''\n interval = interval if interval is not None else prefs.domain\n interval = Interval(*interval)\n coeffs = adaptive(cls, fun, hscale=interval.hscale)\n return cls(coeffs, interval=interval)\n\n @classmethod\n def initvalues(cls, values, *, interval=None):\n '''Initialise a Chebtech from an array of values at Chebyshev points'''\n return cls(cls._vals2coeffs(values), interval=interval)\n\n def __init__(self, coeffs, interval=None):\n interval = interval if interval is not None else prefs.domain\n self._coeffs = np.array(coeffs)\n self._interval = Interval(*interval)\n\n def __call__(self, x, how='clenshaw'):\n method = {\n 'clenshaw': self.__call__clenshaw,\n 'bary': self.__call__bary,\n }\n try:\n return method[how](x)\n except KeyError:\n raise ValueError(how)\n\n def __call__clenshaw(self, x):\n return clenshaw(x, self.coeffs)\n \n def __call__bary(self, x):\n fk = self.values()\n xk = self._chebpts(fk.size)\n vk = self._barywts(fk.size)\n return bary(x, fk, xk, vk)\n\n def __repr__(self):\n out = '<{0}{{{1}}}>'.format(self.__class__.__name__, self.size)\n return out\n\n # ------------\n # properties\n # ------------\n @property\n def coeffs(self):\n '''Chebyshev expansion coefficients in the T_k basis'''\n return self._coeffs\n\n @property\n def interval(self):\n '''Interval that Chebtech is mapped to'''\n return self._interval\n\n @property\n def size(self):\n '''Return the size of the object'''\n return self.coeffs.size\n\n @property\n def isempty(self):\n '''Return True if the Chebtech is empty'''\n return self.size == 0\n\n @property\n def iscomplex(self):\n '''Determine whether the underlying onefun is complex or real valued'''\n return self._coeffs.dtype == complex\n\n @property\n def isconst(self):\n '''Return True if the Chebtech represents a constant'''\n return self.size == 1\n\n @property\n @self_empty(0.)\n def vscale(self):\n '''Estimate the vertical scale of a Chebtech'''\n return np.abs(coerce_list((self.values()))).max()\n\n # -----------\n # utilities\n # -----------\n def copy(self):\n '''Return a deep copy of the Chebtech'''\n return self.__class__(self.coeffs.copy(), interval=self.interval.copy())\n\n def imag(self):\n if self.iscomplex:\n return self.__class__(np.imag(self.coeffs), self.interval)\n else:\n return self.initconst(0, interval=self.interval)\n\n def prolong(self, n):\n '''Return a Chebtech of length n, obtained either by truncating\n if n < self.size or zero-padding if n > self.size. In all cases a\n deep copy is returned.\n '''\n m = self.size\n ak = self.coeffs\n cls = self.__class__\n if n - m < 0:\n out = cls(ak[:n].copy(), interval=self.interval)\n elif n - m > 0:\n out = cls(np.append(ak, np.zeros(n-m)), interval=self.interval)\n else:\n out = self.copy()\n return out\n\n def real(self):\n if self.iscomplex:\n return self.__class__(np.real(self.coeffs), self.interval)\n else:\n return self\n\n def simplify(self):\n '''Call standard_chop on the coefficients of self, returning a\n Chebtech comprised of a copy of the truncated coefficients.'''\n # coefficients\n oldlen = len(self.coeffs)\n longself = self.prolong(max(17, oldlen))\n cfs = longself.coeffs\n # scale (decrease) tolerance by hscale\n tol = prefs.eps*max(self.interval.hscale, 1)\n # chop\n npts = standard_chop(cfs, tol=tol)\n npts = min(oldlen, npts)\n # construct\n return self.__class__(cfs[:npts].copy(), interval=self.interval)\n\n def values(self):\n '''Function values at Chebyshev points'''\n return coeffs2vals2(self.coeffs)\n\n # ---------\n # algebra\n # ---------\n @self_empty()\n def __add__(self, f):\n cls = self.__class__\n if np.isscalar(f):\n if np.iscomplexobj(f):\n dtype = complex\n else:\n dtype = self.coeffs.dtype\n cfs = np.array(self.coeffs, dtype=dtype)\n cfs[0] += f\n return cls(cfs, interval=self.interval)\n else:\n # TODO: is a more general decorator approach better here?\n # TODO: for constant Chebtech, convert to constant and call __add__ again \n if f.isempty:\n return f.copy()\n g = self\n n, m = g.size, f.size\n if n < m:\n g = g.prolong(m)\n elif m < n:\n f = f.prolong(n)\n cfs = f.coeffs + g.coeffs\n\n # check for zero output\n eps = prefs.eps\n tol = .5 * eps * max([f.vscale, g.vscale])\n if all(abs(cfs)<tol):\n return cls.initconst(0., interval=self.interval)\n else:\n return cls(cfs, interval=self.interval)\n\n @self_empty()\n def __div__(self, f):\n cls = self.__class__\n if np.isscalar(f):\n cfs = 1./f * self.coeffs\n return cls(cfs, interval=self.interval)\n else:\n # TODO: review with reference to __add__\n if f.isempty:\n return f.copy()\n divfun = lambda x: self(x) / f(x)\n return cls.initfun_adaptive(divfun, interval=self.interval)\n\n __truediv__ = __div__\n\n @self_empty()\n def __mul__(self, g):\n cls = self.__class__\n if np.isscalar(g):\n cfs = g * self.coeffs\n return cls(cfs, interval=self.interval)\n else:\n # TODO: review with reference to __add__\n if g.isempty:\n return g.copy()\n f = self\n n = f.size + g.size - 1\n f = f.prolong(n)\n g = g.prolong(n)\n cfs = coeffmult(f.coeffs, g.coeffs)\n out = cls(cfs, interval=self.interval)\n return out\n\n def __neg__(self):\n coeffs = -self.coeffs\n return self.__class__(coeffs, interval=self.interval)\n\n def __pos__(self):\n return self\n\n @self_empty()\n def __pow__(self, f):\n if np.isscalar(f):\n powfun = lambda x: np.power(self(x), f)\n else:\n powfun = lambda x: np.power(self(x), f(x))\n return self.__class__.initfun_adaptive(powfun, interval=self.interval)\n\n def __rdiv__(self, f):\n # Executed when __div__(f, self) fails, which is to say whenever f\n # is not a Chebtech. We proceeed on the assumption f is a scalar.\n constfun = lambda x: .0*x + f\n quotient = lambda x: constfun(x) / self(x)\n return self.__class__.initfun_adaptive(quotient, interval=self.interval)\n\n __radd__ = __add__\n\n def __rsub__(self, f):\n return -(self-f)\n\n @self_empty()\n def __rpow__(self, f):\n powfun = lambda x: np.power(f, self(x))\n return self.__class__.initfun_adaptive(powfun, interval=self.interval)\n\n __rtruediv__ = __rdiv__\n __rmul__ = __mul__\n\n def __sub__(self, f):\n return self + (-f)\n\n # -------\n # roots\n # -------\n def roots(self, sort=None):\n '''Compute the roots of the Chebtech on [-1,1] using the\n coefficients in the associated Chebyshev series approximation'''\n sort = sort if sort is not None else prefs.sortroots\n rts = rootsunit(self.coeffs)\n rts = newtonroots(self, rts)\n # fix problems with newton for roots that are numerically very close\n rts = np.clip(rts, -1, 1) # if newton roots are just outside [-1,1]\n rts = rts if not sort else np.sort(rts)\n return rts\n\n # ----------\n # calculus\n # ----------\n # Note that function returns 0 for an empty Chebtech object; this is\n # consistent with numpy, which returns zero for the sum of an empty array\n @self_empty(resultif=0.)\n def sum(self):\n '''Definite integral of a Chebtech on the interval [-1,1]'''\n if self.isconst:\n out = 2.*self(0.)\n else:\n ak = self.coeffs.copy()\n ak[1::2] = 0\n kk = np.arange(2, ak.size)\n ii = np.append([2,0], 2/(1-kk**2))\n out = (ak*ii).sum()\n return out\n\n @self_empty()\n def cumsum(self):\n '''Return a Chebtech object representing the indefinite integral\n of a Chebtech on the interval [-1,1]. The constant term is chosen\n such that F(-1) = 0.'''\n n = self.size\n ak = np.append(self.coeffs, [0, 0])\n bk = np.zeros(n+1, dtype=self.coeffs.dtype)\n rk = np.arange(2,n+1)\n bk[2:] = .5*(ak[1:n] - ak[3:]) / rk\n bk[1] = ak[0] - .5*ak[2]\n vk = np.ones(n)\n vk[1::2] = -1\n bk[0] = (vk*bk[1:]).sum()\n out = self.__class__(bk, interval=self.interval)\n return out\n\n @self_empty()\n def diff(self):\n '''Return a Chebtech object representing the derivative of a\n Chebtech on the interval [-1,1].'''\n if self.isconst:\n out = self.__class__(np.array([0.]), interval=self.interval)\n else:\n n = self.size\n ak = self.coeffs\n zk = np.zeros(n-1, dtype=self.coeffs.dtype)\n wk = 2*np.arange(1, n)\n vk = wk * ak[1:]\n zk[-1::-2] = vk[-1::-2].cumsum()\n zk[-2::-2] = vk[-2::-2].cumsum()\n zk[0] = .5 * zk[0]\n out = self.__class__(zk, interval=self.interval)\n return out\n\n # ---------------------------------\n # subclasses must implement these\n # ---------------------------------\n @abstractmethod\n def _chebpts():\n raise NotImplementedError\n\n @abstractmethod\n def _barywts():\n raise NotImplementedError\n\n @abstractmethod\n def _vals2coeffs():\n raise NotImplementedError\n\n @abstractmethod\n def _coeffs2vals():\n raise NotImplementedError\n\n# ----------\n# plotting\n# ----------\n\nplt = import_plt()\nif plt:\n def plot(self, ax=None, **kwargs):\n return plotfun(self, (-1, 1), ax=ax, **kwargs)\n setattr(Chebtech, 'plot', plot)\n\n def plotcoeffs(self, ax=None, **kwargs):\n ax = ax or plt.gca()\n return plotfuncoeffs(abs(self.coeffs), ax=ax, **kwargs)\n setattr(Chebtech, 'plotcoeffs', plotcoeffs)\n\n\nclass Chebtech2(Chebtech):\n '''Second-Kind Chebyshev technology'''\n \n @staticmethod\n def _chebpts(n):\n '''Return n Chebyshev points of the second-kind'''\n return chebpts2(n)\n\n @staticmethod\n def _barywts(n):\n '''Barycentric weights for Chebyshev points of 2nd kind'''\n return barywts2(n)\n \n @staticmethod\n def _vals2coeffs(vals):\n '''Map function values at Chebyshev points of 2nd kind to\n first-kind Chebyshev polynomial coefficients'''\n return vals2coeffs2(vals)\n\n @staticmethod\n def _coeffs2vals(coeffs):\n '''Map first-kind Chebyshev polynomial coefficients to\n function values at Chebyshev points of 2nd kind'''\n return coeffs2vals2(coeffs)\n" ]
[ [ "numpy.imag", "numpy.clip", "numpy.arange", "numpy.sort", "numpy.ones", "numpy.append", "numpy.real", "numpy.isscalar", "numpy.iscomplexobj", "numpy.array", "numpy.zeros" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
Kandidatarbete-Chalmers-MCCX02-19-06/RaspberryPiRadarProgram
[ "f5d69d9084d37246aaf0e0061b3353b86e8d59e3" ]
[ "Archive for old stuff/Unused_files/Class_Thread.py" ]
[ "import time\nimport threading\nimport numpy as np\nimport queue\nimport copy\n\nfrom acconeer_utils.clients.reg.client import RegClient\nfrom acconeer_utils.clients.json.client import JSONClient\nfrom acconeer_utils.clients import configs\nfrom acconeer_utils import example_utils\nfrom acconeer_utils.mpl_process import PlotProcess, PlotProccessDiedException, FigureUpdater\n\n\nclass Radar():\n def __init__(self, radar_queue, interrupt_queue):\n # Setup for collecting data from radar\n self.args = example_utils.ExampleArgumentParser().parse_args()\n example_utils.config_logging(self.args)\n if self.args.socket_addr:\n self.client = JSONClient(self.args.socket_addr)\n else:\n port = self.args.serial_port or example_utils.autodetect_serial_port()\n self.client = RegClient(port)\n\n self.client.squeeze = False\n self.config = configs.IQServiceConfig()\n self.config.sensor = self.args.sensors\n\n self.config.range_interval = [0.2, 0.6] # Measurement interval\n self.config.sweep_rate = 1 # Frequency for collecting data\n self.config.gain = 1 # Gain between 0 and 1.\n self.time = 10 # Duration for a set amount of sequences\n self.seq = self.config.sweep_rate * self.time\n\n self.info = self.client.setup_session(self.config) # Setup acconeer radar session\n self.num_points = self.info[\"data_length\"] # Amount of data points per sampel\n\n #### Det här kanske inte ska vara i den här klassen #####\n # Vector for radar values from tracked data\n self.peak_vector = np.zeros((1, self.seq), dtype=np.csingle)\n self.data_idx = 0 # Inedex for peak vector used for filtering\n\n self.radar_queue = radar_queue\n self.interrupt_queue = interrupt_queue\n self.timeout = time.time() + self.time\n b = 0\n\n # Loop which collects data from the radar, tracks the maximum peak and filters it for further signal processing. The final filtered data is put into a queue.\n def get_data(self):\n self.client.start_streaming() # Starts Acconeers streaming server\n while True:\n self.info, self.data = self.client.get_next()\n print(\"Getting data\")\n if self.interrupt_queue.empty() == False or time.time() >= self.timeout: # Interrupt from bluetooth\n # self.interrupt_queue.get()\n print('Breaking loop')\n break\n self.client.disconnect()\n" ]
[ [ "numpy.zeros" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
FrederikWR/course-02443-stochastic-virus-outbreak
[ "4f1d7f1fa4aa197b31ed86c4daf420d5a637974e" ]
[ "code/report-code/rio_olympics_cities.py" ]
[ "import _setup\n\nimport os.path as path\nimport numpy as np\nimport scipy.stats\nimport matplotlib.pyplot as plt\nimport scipy.stats\nimport math\nimport csv\n\nfrom simulator import State, Simulator\nfrom world import regions, routes\nimport sir\n\nthis_dir = path.dirname(path.realpath(__file__))\n\ndef plot_sir(sols, names, fig_name):\n n = len(sols)\n plt.subplots(figsize=(10,12))\n for i in range(1, n+1):\n # share x-axis\n if i == 1:\n ax = plt.subplot(n * 100 + 10 + i)\n else:\n ax = plt.subplot(n * 100 + 10 + i, sharex=ax)\n\n # hide x ticks except for last\n if i != n:\n plt.setp(ax.get_xticklabels(), visible=False)\n\n ax.set_title(names[i-1])\n\n\n for simulation in sols[i-1]:\n sol = np.asarray(simulation)\n\n p1, = plt.plot(sol[:, 0], color='SteelBlue', alpha=0.5, label='Susceptible')\n p2, = plt.plot(sol[:, 1], color='IndianRed', alpha=0.5, label='Infected')\n p3, = plt.plot(sol[:, 2], color='Olive', alpha=0.5, label='Removed')\n p4, = plt.plot(sol[:, 3], color='Gray', alpha=0.5, label='Total')\n\n plt.legend([p1, p2, p3, p4], ['S', 'I', 'R', 'T'])\n\n fig_save = path.join(this_dir, '../../report/plots/' + fig_name)\n print(\"saving figure {0}\".format(fig_save))\n plt.savefig(fig_save,\n format='pdf', bbox_inches='tight')\n\n\ndef control_variate_conf(y, x, verbose=False):\n\n if verbose:\n print(np.corrcoef(x, y))\n\n c = -np.cov(x, y)[1, 0] / np.var(x, ddof=1)\n z = y + c * (x - np.mean(x))\n\n std_z = np.std(z, ddof=2)\n\n n = len(z)\n confidence = scipy.stats.t.ppf(0.975, n - 1) * std_z / math.sqrt(n)\n\n return confidence\n\ndef execute_simulation(add_rio=False, ol_start=0, rio_length=18,\n rio_visitors=380e3, n_simulations=5):\n\n sol_global = []\n sol_rio = []\n sol_moscow = []\n sol_berlin = []\n sol_beijing = []\n sol_sydney = []\n sol_new_york = []\n params = {}\n params['global'] = []\n params['rio'] = []\n params['moscow'] = []\n params['berlin'] = []\n params['beijing'] = []\n params['sydney'] = []\n params['new'] = []\n for j in range(n_simulations):\n print(\"running simulation {0} / {1}\".format(j + 1, n_simulations))\n state = State(regions, routes, verbose=True)\n state.set_outbreak('Rio De Janeiro', 1e3)#'Rio De Janeiro', 1000)\n sim = Simulator(state, transfer_prob=0.005, beta=2, gamma=0.5,\n verbose=True)\n\n sol_global.append([])\n sol_rio.append([])\n sol_moscow.append([])\n sol_berlin.append([])\n sol_beijing.append([])\n sol_sydney.append([])\n sol_new_york.append([])\n state_list = []\n for i, state in enumerate(sim.run(iterations=120)):\n state_list.append(state)\n if i == ol_start and add_rio: # start outbreak x days before olympics\n sim.add_event(2560, days=rio_length, total_transfer=rio_visitors)\n\n sol_global[j].append(state.total_sir().as_tuple(total=True))\n sol_rio[j].append(state.region_sir[2560].as_tuple(total=True))\n sol_moscow[j].append(state.region_sir[4029].as_tuple(total=True))\n sol_berlin[j].append(state.region_sir[351].as_tuple(total=True))\n sol_beijing[j].append(state.region_sir[3364].as_tuple(total=True))\n sol_sydney[j].append(state.region_sir[3361].as_tuple(total=True))\n sol_new_york[j].append(state.region_sir[3797].as_tuple(total=True))\n\n params['global'].append(sir.ParameterEstimator(\n iter([x.total_sir() for x in state_list]), method='max').beta)\n params['rio'].append(sir.ParameterEstimator(\n iter([x.region_sir[2560] for x in state_list]), method='max').beta)\n params['moscow'].append(sir.ParameterEstimator(\n iter([x.region_sir[4029] for x in state_list]), method='max').beta)\n params['berlin'].append(sir.ParameterEstimator(\n iter([x.region_sir[351] for x in state_list]), method='max').beta)\n params['beijing'].append(sir.ParameterEstimator(\n iter([x.region_sir[3364] for x in state_list]), method='max').beta)\n params['sydney'].append(sir.ParameterEstimator(\n iter([x.region_sir[3361] for x in state_list]), method='max').beta)\n params['new'].append(sir.ParameterEstimator(\n iter([x.region_sir[2560] for x in state_list]), method='max').beta)\n\n if add_rio:\n fig_name = \"rio-{0}-{1}-{2:d}.pdf\".format(ol_start, rio_length,\n int(rio_visitors))\n else:\n fig_name = \"no_rio.pdf\"\n\n plot_sir([sol_global, sol_rio, sol_new_york, sol_berlin,\n sol_moscow, sol_beijing, sol_sydney],\n ['Global', 'Rio De Janeiro', 'New York', 'Berlin',\n 'Moscow', 'Beijing', 'Sydney'], fig_name)\n\n # estimate means and variance\n global_values = sol_global\n peak_times_global = [np.argmax([x[1] for x in y])\n for y in global_values]\n peak_amount_global = [y[peak][1]\n for peak, y in zip(peak_times_global, global_values)]\n\n\n peak_times_rio = [np.argmax([x[1] for x in y])\n for y in sol_rio]\n peak_times_new_york = [np.argmax([x[1] for x in y])\n for y in sol_new_york]\n peak_times_berlin = [np.argmax([x[1] for x in y])\n for y in sol_berlin]\n peak_times_moscow = [np.argmax([x[1] for x in y])\n for y in sol_moscow]\n peak_times_beijing = [np.argmax([x[1] for x in y])\n for y in sol_beijing]\n peak_times_sydney = [np.argmax([x[1] for x in y])\n for y in sol_sydney]\n\n t_deviations = scipy.stats.t.ppf(0.975, len(peak_times_rio)-1)\n\n # estimate variance with control variates\n with open('control-{0}.csv'.format(add_rio), 'w') as csvfile:\n writer = csv.writer(csvfile, delimiter=',')\n writer.writerow(['global_amount', 'global_amount_control',\n 'global_peak_time', 'global_peak_time_control',\n 'rio_time', 'rio_time_control',\n 'new_york_time', 'new_york_time_control',\n 'berlin_time', 'berlin_time_control',\n 'moscow_time', 'moscow_time_control',\n 'beijing_time', 'beijing_time_control',\n 'sydney_time', 'sydney_time_control'])\n for i in range(n_simulations):\n writer.writerow([peak_amount_global[i], params['global'][i],\n peak_times_global[i], params['global'][i],\n peak_times_rio[i], params['rio'][i],\n peak_times_rio[i], params['new'][i],\n peak_times_rio[i], params['berlin'][i],\n peak_times_rio[i], params['moscow'][i],\n peak_times_rio[i], params['beijing'][i],\n peak_times_rio[i], params['sydney'][i]\n ])\n amount_global_control_conf = control_variate_conf(peak_amount_global, params['global'])\n time_global_control_conf = control_variate_conf(peak_times_global, params['global'])\n time_rio_control_conf = control_variate_conf(peak_times_rio, params['rio'])\n time_new_york_control_conf = control_variate_conf(peak_times_new_york, params['new'])\n time_berlin_control_conf = control_variate_conf(peak_times_berlin, params['berlin'])\n time_moscow_control_conf = control_variate_conf(peak_times_moscow, params['moscow'])\n time_beijing_control_conf = control_variate_conf(peak_times_beijing, params['beijing'])\n time_sydney_control_conf = control_variate_conf(peak_times_sydney, params['sydney'])\n\n return [(np.mean(peak_amount_global),\n t_deviations * np.std(peak_amount_global, ddof=1) / math.sqrt(n_simulations),\n amount_global_control_conf),\n (np.mean(peak_times_global),\n t_deviations * np.std(peak_times_global, ddof=1) / math.sqrt(n_simulations),\n time_global_control_conf),\n (np.mean(peak_times_rio),\n t_deviations * np.std(peak_times_rio, ddof=1) / math.sqrt(n_simulations),\n time_rio_control_conf),\n (np.mean(peak_times_new_york),\n t_deviations * np.std(peak_times_new_york, ddof=1) / math.sqrt(n_simulations),\n time_new_york_control_conf),\n (np.mean(peak_times_berlin),\n t_deviations * np.std(peak_times_berlin, ddof=1) / math.sqrt(n_simulations),\n time_berlin_control_conf),\n (np.mean(peak_times_moscow),\n t_deviations * np.std(peak_times_moscow, ddof=1) / math.sqrt(n_simulations),\n time_moscow_control_conf),\n (np.mean(peak_times_beijing),\n t_deviations * np.std(peak_times_beijing, ddof=1) / math.sqrt(n_simulations),\n time_beijing_control_conf),\n (np.mean(peak_times_sydney),\n t_deviations * np.std(peak_times_sydney, ddof=1) / math.sqrt(n_simulations),\n time_sydney_control_conf)\n ]\n\nif __name__ == \"__main__\":\n names = [\"Peak amount Global\", \"Peak time Global\",\n \"Peak time Rio\", \"Peak time New York\", \"Peak time Berlin\",\n \"Peak time Moscow\", \"Peak time Beijing\", \"Peak time Sydney\"]\n n_sim = 10\n res_with_ol = execute_simulation(True, n_simulations=n_sim)\n res_without_ol = execute_simulation(False, n_simulations=n_sim)\n\n latex_table = \"\\\\begin{tabular}[H]{c | c | c}\"\n latex_table += \"\\nObservation & With OL & Without OL \\\\\\\\ \\\\hline \"\n latex_table += \"\\n {0} [million]& ${1:.1f}\\\\pm {2:.2f} ({3:.2f})$ & ${4:.1f} \\\\pm {5:.2f} ({6:.2f})$\".format(\n names[0], res_with_ol[0][0] / 1e6, res_with_ol[0][1] / 1e6, res_with_ol[0][2] / 1e6,\n res_without_ol[0][0] / 1e6, res_without_ol[0][1] / 1e6, res_without_ol[0][2] / 1e6\n )\n for i in range(1, len(names)):\n latex_table += \"\\\\\\\\ \\n {0} & ${1:.1f}\\\\pm {2:.2f}( {3:.2f})$ & ${4:.1f} \\\\pm {5:.2f} ({6:.2f})$\".format(\n names[i], res_with_ol[i][0], res_with_ol[i][1], res_with_ol[i][2],\n res_without_ol[i][0], res_without_ol[i][1], res_without_ol[i][2],\n )\n latex_table += \"\\n\\\\end{tabular}\"\n\n table_save = path.join(this_dir, '../../report/tables/result.tex')\n tex_file = open(table_save, 'w')\n tex_file.write(latex_table)\n tex_file.close()\n" ]
[ [ "matplotlib.pyplot.legend", "numpy.asarray", "matplotlib.pyplot.subplots", "matplotlib.pyplot.savefig", "matplotlib.pyplot.plot", "numpy.std", "numpy.argmax", "matplotlib.pyplot.subplot", "numpy.mean", "numpy.cov", "numpy.var", "numpy.corrcoef" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
elbuco1/INF8215_Workshop_Machine_Learning
[ "ce4ffde44a25c7f7c15699d1b8fe0be19c32a2f7" ]
[ "solution/SoftmaxClassifier.py" ]
[ "from sklearn.base import BaseEstimator, ClassifierMixin\nimport numpy as np\n\n\nclass SoftmaxClassifier(BaseEstimator, ClassifierMixin): \n \"\"\"A softmax classifier\"\"\"\n\n def __init__(self, lr = 0.1, alpha = 100, n_epochs = 1000, eps = 1.0e-5,threshold = 1.0e-10 , regularization = True, early_stopping = True):\n \n\n self.lr = lr\n self.alpha = alpha\n self.n_epochs = n_epochs\n self.eps = eps\n self.regularization = regularization\n self.threshold = threshold\n self.early_stopping = early_stopping\n \n\n\n def fit(self, X, y=None):\n \n prev_loss = np.inf\n self.losses_ = []\n \n# self.nb_example = X.shape[0]\n self.nb_feature = X.shape[1]\n self.nb_classes = len(np.unique(y))\n\n \n\n X_bias = np.c_[np.ones((X.shape[0])), X ] \n \n self.theta_ = np.random.normal(scale = 0.3,size=(self.nb_feature+1,self.nb_classes))\n \n\n for epoch in range( self.n_epochs):\n\n z = X_bias.dot(self.theta_)\n probas = self._softmax(z)\n \n \n loss = self._cost_function(probas, y ) \n \n self.theta_ = self.theta_ - self._get_gradient(X_bias,y,probas)\n \n self.losses_.append(loss)\n\n if np.abs(loss - prev_loss) < self.threshold:\n print(\"stopped at epoch n\" + str(epoch))\n break\n else:\n prev_loss = loss\n\n\n return self\n\n def fit_predict(self, X, y=None):\n self.fit(X, y)\n return self.predict(X,y)\n \n \n \n def predict(self, X, y=None):\n try:\n getattr(self, \"theta_\")\n except AttributeError:\n raise RuntimeError(\"You must train classifer before predicting data!\")\n \n # X_bias = np.c_[np.ones((X.shape[0])), X ] \n # z = np.matmul( X_bias, self.theta_) # m * k\n probabilities = self.predict_proba(X)\n prediction = np.argmax(probabilities, axis = 1)\n \n return prediction\n \n def predict_proba(self, X, y=None):\n try:\n getattr(self, \"theta_\")\n except AttributeError:\n raise RuntimeError(\"You must train classifer before predicting data!\")\n X_bias = np.c_[np.ones((X.shape[0])), X ] \n z = np.matmul( X_bias, self.theta_) # m * k\n prediction = self._softmax(z)\n \n return prediction\n\n def score(self, X, y=None):\n b = self.regularization \n self.regularization = False\n prediction = self.predict_proba(X)\n score = self._cost_function(prediction, y )\n self.regularization = b\n \n return score\n \n \n def _cost_function(self,probas, y ): \n y_ohe = self._one_hot(y)\n probas = np.maximum(self.eps, np.minimum(np.ones(probas.shape) - self.eps, probas))\n r = 0.\n \n l = -np.mean(np.sum(y_ohe * np.log(probas), axis=1))\n \n if self.regularization:\n r = np.sum(np.square(self.theta_[1:]))/2.0\n\n return l + self.alpha/float(probas.shape[0]) * r\n \n\n \n \n \n def _one_hot(self,y):\n \n # if not type(value) is numpy.ndarray:\n try: \n y = y.reshape(-1)\n y_ohe = np.zeros((y.shape[0],self.nb_classes))\n\n for i,l in enumerate(y):\n # print(i,l)\n y_ohe[i,l] = 1.\n except :\n raise TypeError(\"You must give a numpy array!\")\n \n\n return y_ohe\n \n def _softmax(self,z):\n z = np.subtract(z.T, np.max(z, axis = 1)).T\n return np.exp(z) / np.sum(np.exp(z), axis = 1,keepdims = True)\n \n\n \n def _get_gradient(self,X,y, probas):\n \n y_ohe = self._one_hot(y)\n \n regularization_term = np.zeros(self.theta_.shape)\n error = (probas - y_ohe)\n gradient = np.matmul( X.T, error ) / float(X.shape[0])\n \n if self.regularization:\n regularization_term = np.r_[np.zeros([1, self.nb_classes]),self.theta_[1:]] / float(X.shape[0])\n \n return self.lr * gradient + self.alpha * regularization_term\n \n " ]
[ [ "numpy.square", "numpy.log", "numpy.abs", "numpy.unique", "numpy.matmul", "numpy.ones", "numpy.max", "numpy.random.normal", "numpy.argmax", "numpy.exp", "numpy.zeros" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
ga546/Bill_project_8p
[ "89c496b04c2b6da4558d0b2b972eeb47d84bb238" ]
[ "detectron2/structures/boxes.py" ]
[ "# Copyright (c) Facebook, Inc. and its affiliates.\nimport math\nimport numpy as np\nfrom enum import IntEnum, unique\nfrom typing import List, Tuple, Union\nimport torch\nfrom torch import device\n\n_RawBoxType = Union[List[float], Tuple[float, ...], torch.Tensor, np.ndarray]\n\n\n@unique\nclass BoxMode(IntEnum):\n \"\"\"\n Enum of different ways to represent a box.\n \"\"\"\n\n XYXY_ABS = 0\n \"\"\"\n (x0, y0, x1, y1) in absolute floating points coordinates.\n The coordinates in range [0, width or height].\n \"\"\"\n XYWH_ABS = 1\n \"\"\"\n (x0, y0, w, h) in absolute floating points coordinates.\n \"\"\"\n XYXY_REL = 2\n \"\"\"\n Not yet supported!\n (x0, y0, x1, y1) in range [0, 1]. They are relative to the size of the image.\n \"\"\"\n XYWH_REL = 3\n \"\"\"\n Not yet supported!\n (x0, y0, w, h) in range [0, 1]. They are relative to the size of the image.\n \"\"\"\n XYWHA_ABS = 4\n \"\"\"\n (xc, yc, w, h, a) in absolute floating points coordinates.\n (xc, yc) is the center of the rotated box, and the angle a is in degrees ccw.\n \"\"\"\n XY_POLY_4 = 5\n\n @staticmethod\n def convert(box: _RawBoxType, from_mode: \"BoxMode\", to_mode: \"BoxMode\") -> _RawBoxType:\n \"\"\"\n Args:\n box: can be a k-tuple, k-list or an Nxk array/tensor, where k = 4 or 5\n from_mode, to_mode (BoxMode)\n\n Returns:\n The converted box of the same type.\n \"\"\"\n if from_mode == to_mode:\n return box\n\n original_type = type(box)\n is_numpy = isinstance(box, np.ndarray)\n single_box = isinstance(box, (list, tuple))\n if single_box:\n assert len(box) == 4 or len(box) == 5, (\n \"BoxMode.convert takes either a k-tuple/list or an Nxk array/tensor,\"\n \" where k == 4 or 5\"\n )\n arr = torch.tensor(box)[None, :]\n else:\n # avoid modifying the input box\n if is_numpy:\n arr = torch.from_numpy(np.asarray(box)).clone()\n else:\n arr = box.clone()\n\n assert to_mode not in [BoxMode.XYXY_REL, BoxMode.XYWH_REL] and from_mode not in [\n BoxMode.XYXY_REL,\n BoxMode.XYWH_REL,\n ], \"Relative mode not yet supported!\"\n\n if from_mode == BoxMode.XYWHA_ABS and to_mode == BoxMode.XYXY_ABS:\n assert (\n arr.shape[-1] == 5\n ), \"The last dimension of input shape must be 5 for XYWHA format\"\n original_dtype = arr.dtype\n arr = arr.double()\n\n w = arr[:, 2]\n h = arr[:, 3]\n a = arr[:, 4]\n c = torch.abs(torch.cos(a * math.pi / 180.0))\n s = torch.abs(torch.sin(a * math.pi / 180.0))\n # This basically computes the horizontal bounding rectangle of the rotated box\n new_w = c * w + s * h\n new_h = c * h + s * w\n\n # convert center to top-left corner\n arr[:, 0] -= new_w / 2.0\n arr[:, 1] -= new_h / 2.0\n # bottom-right corner\n arr[:, 2] = arr[:, 0] + new_w\n arr[:, 3] = arr[:, 1] + new_h\n\n arr = arr[:, :4].to(dtype=original_dtype)\n elif from_mode == BoxMode.XYWH_ABS and to_mode == BoxMode.XYWHA_ABS:\n original_dtype = arr.dtype\n arr = arr.double()\n arr[:, 0] += arr[:, 2] / 2.0\n arr[:, 1] += arr[:, 3] / 2.0\n angles = torch.zeros((arr.shape[0], 1), dtype=arr.dtype)\n arr = torch.cat((arr, angles), axis=1).to(dtype=original_dtype)\n else:\n if to_mode == BoxMode.XYXY_ABS and from_mode == BoxMode.XYWH_ABS:\n arr[:, 2] += arr[:, 0]\n arr[:, 3] += arr[:, 1]\n elif from_mode == BoxMode.XYXY_ABS and to_mode == BoxMode.XYWH_ABS:\n arr[:, 2] -= arr[:, 0]\n arr[:, 3] -= arr[:, 1]\n else:\n raise NotImplementedError(\n \"Conversion from BoxMode {} to {} is not supported yet\".format(\n from_mode, to_mode\n )\n )\n\n if single_box:\n return original_type(arr.flatten().tolist())\n if is_numpy:\n return arr.numpy()\n else:\n return arr\n\n\nclass Boxes:\n \"\"\"\n This structure stores a list of boxes as a Nx4 torch.Tensor.\n It supports some common methods about boxes\n (`area`, `clip`, `nonempty`, etc),\n and also behaves like a Tensor\n (support indexing, `to(device)`, `.device`, and iteration over all boxes)\n\n Attributes:\n tensor (torch.Tensor): float matrix of Nx4. Each row is (x1, y1, x2, y2).\n \"\"\"\n\n def __init__(self, tensor: torch.Tensor):\n \"\"\"\n Args:\n tensor (Tensor[float]): a Nx4 matrix. Each row is (x1, y1, x2, y2).\n \"\"\"\n device = tensor.device if isinstance(tensor, torch.Tensor) else torch.device(\"cpu\")\n tensor = torch.as_tensor(tensor, dtype=torch.float32, device=device)\n if tensor.numel() == 0:\n # Use reshape, so we don't end up creating a new tensor that does not depend on\n # the inputs (and consequently confuses jit)\n tensor = tensor.reshape((-1, 4)).to(dtype=torch.float32, device=device)\n assert tensor.dim() == 2 and tensor.size(-1) == 4, tensor.size()\n\n self.tensor = tensor\n\n def clone(self) -> \"Boxes\":\n \"\"\"\n Clone the Boxes.\n\n Returns:\n Boxes\n \"\"\"\n return Boxes(self.tensor.clone())\n\n def to(self, device: torch.device):\n # Boxes are assumed float32 and does not support to(dtype)\n return Boxes(self.tensor.to(device=device))\n\n def area(self) -> torch.Tensor:\n \"\"\"\n Computes the area of all the boxes.\n\n Returns:\n torch.Tensor: a vector with areas of each box.\n \"\"\"\n box = self.tensor\n area = (box[:, 2] - box[:, 0]) * (box[:, 3] - box[:, 1])\n return area\n\n def clip(self, box_size: Tuple[int, int]) -> None:\n \"\"\"\n Clip (in place) the boxes by limiting x coordinates to the range [0, width]\n and y coordinates to the range [0, height].\n\n Args:\n box_size (height, width): The clipping box's size.\n \"\"\"\n assert torch.isfinite(self.tensor).all(), \"Box tensor contains infinite or NaN!\"\n h, w = box_size\n x1 = self.tensor[:, 0].clamp(min=0, max=w)\n y1 = self.tensor[:, 1].clamp(min=0, max=h)\n x2 = self.tensor[:, 2].clamp(min=0, max=w)\n y2 = self.tensor[:, 3].clamp(min=0, max=h)\n self.tensor = torch.stack((x1, y1, x2, y2), dim=-1)\n\n def nonempty(self, threshold: float = 0.0) -> torch.Tensor:\n \"\"\"\n Find boxes that are non-empty.\n A box is considered empty, if either of its side is no larger than threshold.\n\n Returns:\n Tensor:\n a binary vector which represents whether each box is empty\n (False) or non-empty (True).\n \"\"\"\n box = self.tensor\n widths = box[:, 2] - box[:, 0]\n heights = box[:, 3] - box[:, 1]\n keep = (widths > threshold) & (heights > threshold)\n return keep\n\n def __getitem__(self, item) -> \"Boxes\":\n \"\"\"\n Args:\n item: int, slice, or a BoolTensor\n\n Returns:\n Boxes: Create a new :class:`Boxes` by indexing.\n\n The following usage are allowed:\n\n 1. `new_boxes = boxes[3]`: return a `Boxes` which contains only one box.\n 2. `new_boxes = boxes[2:10]`: return a slice of boxes.\n 3. `new_boxes = boxes[vector]`, where vector is a torch.BoolTensor\n with `length = len(boxes)`. Nonzero elements in the vector will be selected.\n\n Note that the returned Boxes might share storage with this Boxes,\n subject to Pytorch's indexing semantics.\n \"\"\"\n if isinstance(item, int):\n return Boxes(self.tensor[item].view(1, -1))\n b = self.tensor[item]\n assert b.dim() == 2, \"Indexing on Boxes with {} failed to return a matrix!\".format(item)\n return Boxes(b)\n\n def __len__(self) -> int:\n return self.tensor.shape[0]\n\n def __repr__(self) -> str:\n return \"Boxes(\" + str(self.tensor) + \")\"\n\n def inside_box(self, box_size: Tuple[int, int], boundary_threshold: int = 0) -> torch.Tensor:\n \"\"\"\n Args:\n box_size (height, width): Size of the reference box.\n boundary_threshold (int): Boxes that extend beyond the reference box\n boundary by more than boundary_threshold are considered \"outside\".\n\n Returns:\n a binary vector, indicating whether each box is inside the reference box.\n \"\"\"\n height, width = box_size\n inds_inside = (\n (self.tensor[..., 0] >= -boundary_threshold)\n & (self.tensor[..., 1] >= -boundary_threshold)\n & (self.tensor[..., 2] < width + boundary_threshold)\n & (self.tensor[..., 3] < height + boundary_threshold)\n )\n return inds_inside\n\n def get_centers(self) -> torch.Tensor:\n \"\"\"\n Returns:\n The box centers in a Nx2 array of (x, y).\n \"\"\"\n return (self.tensor[:, :2] + self.tensor[:, 2:]) / 2\n\n def scale(self, scale_x: float, scale_y: float) -> None:\n \"\"\"\n Scale the box with horizontal and vertical scaling factors\n \"\"\"\n self.tensor[:, 0::2] *= scale_x\n self.tensor[:, 1::2] *= scale_y\n\n @classmethod\n def cat(cls, boxes_list: List[\"Boxes\"]) -> \"Boxes\":\n \"\"\"\n Concatenates a list of Boxes into a single Boxes\n\n Arguments:\n boxes_list (list[Boxes])\n\n Returns:\n Boxes: the concatenated Boxes\n \"\"\"\n assert isinstance(boxes_list, (list, tuple))\n if len(boxes_list) == 0:\n return cls(torch.empty(0))\n assert all([isinstance(box, Boxes) for box in boxes_list])\n\n # use torch.cat (v.s. layers.cat) so the returned boxes never share storage with input\n cat_boxes = cls(torch.cat([b.tensor for b in boxes_list], dim=0))\n return cat_boxes\n\n @property\n def device(self) -> device:\n return self.tensor.device\n\n # type \"Iterator[torch.Tensor]\", yield, and iter() not supported by torchscript\n # https://github.com/pytorch/pytorch/issues/18627\n @torch.jit.unused\n def __iter__(self):\n \"\"\"\n Yield a box as a Tensor of shape (4,) at a time.\n \"\"\"\n yield from self.tensor\n\n\ndef pairwise_intersection(boxes1: Boxes, boxes2: Boxes) -> torch.Tensor:\n \"\"\"\n Given two lists of boxes of size N and M,\n compute the intersection area between __all__ N x M pairs of boxes.\n The box order must be (xmin, ymin, xmax, ymax)\n\n Args:\n boxes1,boxes2 (Boxes): two `Boxes`. Contains N & M boxes, respectively.\n\n Returns:\n Tensor: intersection, sized [N,M].\n \"\"\"\n boxes1, boxes2 = boxes1.tensor, boxes2.tensor\n width_height = torch.min(boxes1[:, None, 2:], boxes2[:, 2:]) - torch.max(\n boxes1[:, None, :2], boxes2[:, :2]\n ) # [N,M,2]\n\n width_height.clamp_(min=0) # [N,M,2]\n intersection = width_height.prod(dim=2) # [N,M]\n return intersection\n\n\n# implementation from https://github.com/kuangliu/torchcv/blob/master/torchcv/utils/box.py\n# with slight modifications\ndef pairwise_iou(boxes1: Boxes, boxes2: Boxes) -> torch.Tensor:\n \"\"\"\n Given two lists of boxes of size N and M, compute the IoU\n (intersection over union) between **all** N x M pairs of boxes.\n The box order must be (xmin, ymin, xmax, ymax).\n\n Args:\n boxes1,boxes2 (Boxes): two `Boxes`. Contains N & M boxes, respectively.\n\n Returns:\n Tensor: IoU, sized [N,M].\n \"\"\"\n area1 = boxes1.area() # [N]\n area2 = boxes2.area() # [M]\n inter = pairwise_intersection(boxes1, boxes2)\n\n # handle empty boxes\n iou = torch.where(\n inter > 0,\n inter / (area1[:, None] + area2 - inter),\n torch.zeros(1, dtype=inter.dtype, device=inter.device),\n )\n return iou\n\n\ndef pairwise_ioa(boxes1: Boxes, boxes2: Boxes) -> torch.Tensor:\n \"\"\"\n Similar to :func:`pariwise_iou` but compute the IoA (intersection over boxes2 area).\n\n Args:\n boxes1,boxes2 (Boxes): two `Boxes`. Contains N & M boxes, respectively.\n\n Returns:\n Tensor: IoA, sized [N,M].\n \"\"\"\n area2 = boxes2.area() # [M]\n inter = pairwise_intersection(boxes1, boxes2)\n\n # handle empty boxes\n ioa = torch.where(\n inter > 0, inter / area2, torch.zeros(1, dtype=inter.dtype, device=inter.device)\n )\n return ioa\n\n\ndef pairwise_point_box_distance(points: torch.Tensor, boxes: Boxes):\n \"\"\"\n Pairwise distance between N points and M boxes. The distance between a\n point and a box is represented by the distance from the point to 4 edges\n of the box. Distances are all positive when the point is inside the box.\n\n Args:\n points: Nx2 coordinates. Each row is (x, y)\n boxes: M boxes\n\n Returns:\n Tensor: distances of size (N, M, 4). The 4 values are distances from\n the point to the left, top, right, bottom of the box.\n \"\"\"\n x, y = points.unsqueeze(dim=2).unbind(dim=1) # (N, 1)\n x0, y0, x1, y1 = boxes.tensor.unsqueeze(dim=0).unbind(dim=2) # (1, M)\n return torch.stack([x - x0, y - y0, x1 - x, y1 - y], dim=2)\n\n\ndef matched_pairwise_iou(boxes1: Boxes, boxes2: Boxes) -> torch.Tensor:\n \"\"\"\n Compute pairwise intersection over union (IOU) of two sets of matched\n boxes that have the same number of boxes.\n Similar to :func:`pairwise_iou`, but computes only diagonal elements of the matrix.\n\n Args:\n boxes1 (Boxes): bounding boxes, sized [N,4].\n boxes2 (Boxes): same length as boxes1\n Returns:\n Tensor: iou, sized [N].\n \"\"\"\n assert len(boxes1) == len(\n boxes2\n ), \"boxlists should have the same\" \"number of entries, got {}, {}\".format(\n len(boxes1), len(boxes2)\n )\n area1 = boxes1.area() # [N]\n area2 = boxes2.area() # [N]\n box1, box2 = boxes1.tensor, boxes2.tensor\n lt = torch.max(box1[:, :2], box2[:, :2]) # [N,2]\n rb = torch.min(box1[:, 2:], box2[:, 2:]) # [N,2]\n wh = (rb - lt).clamp(min=0) # [N,2]\n inter = wh[:, 0] * wh[:, 1] # [N]\n iou = inter / (area1 + area2 - inter) # [N]\n return iou\n" ]
[ [ "torch.max", "torch.empty", "torch.zeros", "torch.cat", "torch.sin", "torch.min", "numpy.asarray", "torch.tensor", "torch.isfinite", "torch.stack", "torch.device", "torch.cos", "torch.as_tensor" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
nestauk/industrial_taxonomy
[ "eecfa393bc9c887d5cf9b752142cd18ed4ae4d01" ]
[ "industrial_taxonomy/pipeline/glass_clusters/utils.py" ]
[ "\"\"\"Utils to create a sector corpus\"\"\"\nfrom toolz import pipe\nfrom functools import partial\nfrom typing import Dict, List\nimport pandas as pd\n\nNE_CODES = {\n \"CARDINAL\",\n \"DATE\",\n \"GPE\",\n \"LOC\",\n \"MONEY\",\n \"NORP\",\n \"ORDINAL\",\n \"ORG\",\n \"PERSON\",\n \"QUANTITY\",\n \"TIME\",\n}\n\ntoken_descr = List[str]\norg_id = str\nsic_4 = str\n\n\ndef strip_nes(tokenised_dict: Dict[int, List[str]]) -> Dict[int, List[str]]:\n \"\"\"Removes named entities from company tokenised descriptions\"\"\"\n\n return {\n k: [t for t in v if all(ne not in t for ne in NE_CODES)]\n for k, v in tokenised_dict.items()\n }\n\n\ndef filter_non_matched_comps(\n tokenised: Dict[org_id, token_descr], matched_ids: set\n) -> Dict[org_id, token_descr]:\n \"\"\"Removes tokenised descriptions of glass companies that were not matched with CH\n\n Args:\n tokenised: lookup between company ids and tokenised descriptions\n matched_ids: ids from glass companies matched with CH\n\n Returns:\n filtered tokenised descriptions dict\n \"\"\"\n\n return {id_: tok for id_, tok in tokenised.items() if id_ in matched_ids}\n\n\ndef big_sector_tokens_lookup(\n tokenised: Dict[org_id, token_descr], gl_sic4: Dict[org_id, sic_4], big_sectors: set\n) -> Dict[sic_4, Dict[org_id, token_descr]]:\n \"\"\"Creates a dict where keys are (big) sectors and\n values the tokenised descriptions of their companies.\n\n Args:\n tokenised: lookup between company ids and tokenised descriptions\n gl_sic4: lookup between company ids and SIC4s\n big_sectors: sectors above a certain size threshold\n\n Returns:\n dict with sectors and tokenised descriptions\n\n \"\"\"\n\n return {\n sector: {id_: tok for id_, tok in tokenised.items() if gl_sic4[id_] == sector}\n for sector in big_sectors\n }\n\n\ndef make_sector_corpora(\n glass_sic4: Dict[int, str],\n token_descr: Dict[org_id, token_descr],\n min_sector_size: int = 1000,\n) -> Dict[str, Dict[int, List[str]]]:\n \"\"\"Creates a dict of sectors and the tokenised descriptions of their companies\n\n Args:\n glass_sic4: lookup between glass ids and sic4s\n token_descr: lookup between glass ids and tokenised descriptions\n min_sector_size: minimum sector size\n\n Returns:\n dict with sectors and tokenised descriptions for their companies\n \"\"\"\n selected_sectors = set(\n sector\n for sector, sector_n in pd.Series(glass_sic4).value_counts().items()\n if sector_n > min_sector_size\n )\n\n return pipe(\n token_descr,\n strip_nes,\n partial(filter_non_matched_comps, matched_ids=set(glass_sic4.keys())),\n partial(\n big_sector_tokens_lookup, gl_sic4=glass_sic4, big_sectors=selected_sectors\n ),\n )\n" ]
[ [ "pandas.Series" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [ "0.23", "0.21", "2.0", "1.4", "0.19", "1.1", "1.5", "1.2", "0.24", "0.20", "1.0", "0.25", "1.3" ], "scipy": [], "tensorflow": [] } ]
Danglich/flowers102_retrieval_streamlit
[ "a16bf87be1e3c2da04f067d53a2fcf8172c6dd90" ]
[ "flower_st.py" ]
[ "import itertools\n\nimport numpy as np\nfrom scipy.spatial.distance import cdist\nfrom PIL import Image\nimport streamlit as st\n\nfrom utils import FlowerArc, load_prec_embs\n\n\ndef main(top_k):\n\n flower_arc = FlowerArc()\n\n st.title(\"Flower retrieval\")\n train_img_fps, train_embs, train_labels = load_prec_embs()\n uploaded_file = st.file_uploader(\"Choose an image...\", type=\"jpg\")\n\n if uploaded_file is not None:\n st.image(\n uploaded_file,\n caption='Uploaded Image.',\n use_column_width=True\n )\n image = Image.open(uploaded_file)\n img_arr = np.array(image)\n\n # query emb\n test_emb = flower_arc.predict(img_arr)\n\n dists = cdist(test_emb, train_embs, metric='euclidean')[0]\n min_dist_indexes = dists.argsort()[:top_k]\n label_indexes = [train_labels[index] + 1 for index in min_dist_indexes]\n img_fps = [train_img_fps[index] for index in min_dist_indexes]\n\n indices_on_page, images_on_page = \\\n map(list, zip(*itertools.islice(zip(label_indexes, img_fps), 0, top_k))) # noqa\n st.image(images_on_page, width=200, caption=indices_on_page)\n\n\nif __name__ == '__main__':\n main(top_k=18)\n" ]
[ [ "numpy.array", "scipy.spatial.distance.cdist" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [ "0.13", "1.6", "0.14", "1.10", "0.15", "1.4", "0.16", "1.9", "0.19", "1.5", "0.18", "1.2", "1.7", "0.12", "1.0", "0.17", "1.3", "1.8" ], "tensorflow": [] } ]
HReynaud/Morpho-MNIST
[ "84bffa49a2ad38b2b426af744b07a9069cc83fcf" ]
[ "experiments/infogan_util.py" ]
[ "import matplotlib.pyplot as plt\nimport numpy as np\nimport torch\n\nfrom models import infogan\nfrom morphomnist.util import plot_grid\n\n_TICK_LABEL_SIZE = 'x-large'\n_VAR_LABEL_SIZE = 'xx-large'\n\n\ndef _prep_ax(ax):\n ax.axis('on')\n ax.xaxis.set_visible(True)\n ax.yaxis.set_visible(True)\n ax.xaxis.set_label_position('top')\n ax.set_xticks([])\n ax.set_yticks([])\n for s in ax.spines:\n ax.spines[s].set_visible(False)\n\n\ndef plot_cat_traversal(model: infogan.InfoGAN, nrow, cat_mapping=None):\n cat_dim = model.cat_dim\n idx = np.argsort(cat_mapping) if cat_mapping is not None else np.arange(cat_dim)\n latent = model.sample_latent(nrow).repeat(cat_dim, 1)\n latent[:, model.cat_idx] = 0\n for d in range(cat_dim):\n latent[d * nrow: (d + 1) * nrow, model.cat_idx[idx[d]]] = 1\n samples = model.gen(latent).detach()\n fig, axs = plot_grid(samples, nrow=nrow, figsize=(cat_dim, nrow),\n gridspec_kw=dict(wspace=0, hspace=0))\n # plt.suptitle(f\"$c_1$: Categorical ({cat_dim})\")\n for i in [0, -1]:\n _prep_ax(axs[i, 0])\n axs[0, 0].set_xlabel('$(1)$', ha='center', va='bottom', size=_TICK_LABEL_SIZE)\n axs[-1, 0].set_xlabel(f'$({model.cat_dim})$', ha='center', va='bottom', size=_TICK_LABEL_SIZE)\n\n ypos = axs[0, 0].get_position().y1\n\n fig.text(.5, ypos, '$c_1$', ha='center', va='bottom', size=_VAR_LABEL_SIZE)\n\n\ndef plot_cont_traversal(model: infogan.InfoGAN, c, nrow, nstep=9):\n values = torch.linspace(-2, 2, nstep).to(model.device)\n latent = model.sample_latent(nrow).repeat(nstep, 1)\n for r in range(nrow):\n latent[r::nrow, model.cont_idx[c]] = values\n samples = model.gen(latent).detach()\n fig, axs = plot_grid(samples, nrow=nrow, figsize=(nstep, nrow),\n gridspec_kw=dict(wspace=0, hspace=0))\n # plt.suptitle(f\"$c_{{{c + 2}}}$: Continuous (-2 to 2)\")\n\n for i in [0, -1]:\n _prep_ax(axs[i, 0])\n axs[0, 0].set_xlabel(f'${values[ 0]:+g}$', ha='center', va='bottom', size=_TICK_LABEL_SIZE)\n axs[-1, 0].set_xlabel(f'${values[-1]:+g}$', ha='center', va='bottom', size=_TICK_LABEL_SIZE)\n\n ypos = axs[0, 0].get_position().y1\n\n fig.text(.5, ypos, f'$c_{{{c + 2}}}$', ha='center', va='bottom', size=_VAR_LABEL_SIZE)\n\n\ndef plot_cont_cont_traversal(model: infogan.InfoGAN, c1, c2, nstep=9):\n values = torch.linspace(-1.5, 1.5, nstep).to(model.device)\n latent = model.sample_latent(1).repeat(nstep ** 2, 1)\n for s in range(nstep):\n latent[s::nstep, model.cont_idx[c2]] = values\n latent[s * nstep:(s + 1) * nstep, model.cont_idx[c1]] = values\n samples = model.gen(latent).detach()\n fig, axs = plot_grid(samples, nrow=nstep, figsize=(nstep, nstep),\n gridspec_kw=dict(wspace=0, hspace=0))\n # plt.suptitle(rf\"$c_{{{c1 + 2}}} \\times c_{{{c2 + 2}}}$: Continuous (-2 to 2)\")\n\n for i in [(0, 0), (0, -1), (-1, 0)]:\n _prep_ax(axs[i])\n axs[ 0, 0].set_xlabel(f'${values[ 0]:+g}$', ha='center', va='bottom', size=_TICK_LABEL_SIZE)\n axs[-1, 0].set_xlabel(f'${values[-1]:+g}$', ha='center', va='bottom', size=_TICK_LABEL_SIZE)\n axs[ 0, 0].set_ylabel(f'${values[ 0]:+g}$', ha='right', va='center', rotation=0, size=_TICK_LABEL_SIZE)\n axs[ 0,-1].set_ylabel(f'${values[-1]:+g}$', ha='right', va='center', rotation=0, size=_TICK_LABEL_SIZE)\n\n xpos = axs[ 0, 0].get_position().x0\n ypos = axs[ 0, 0].get_position().y1\n\n fig.text(.5, ypos, f'$c_{{{c1 + 2}}}$', ha='center', va='bottom', size=_VAR_LABEL_SIZE)\n fig.text(xpos, .5, f'$c_{{{c2 + 2}}}$', ha='right', va='center', size=_VAR_LABEL_SIZE)\n\n\ndef plot_bin_traversal(model: infogan.InfoGAN, nrow, ncol=5):\n latent = model.sample_latent(nrow * ncol).view(ncol, 1, nrow, -1).repeat(1, 2, 1, 1)\n bin_code = latent[..., model.bin_idx].clone()\n for b in range(model.bin_dim):\n latent[..., model.bin_idx] = bin_code\n latent[:, 0, :, model.bin_idx[b]] = 0\n latent[:, 1, :, model.bin_idx[b]] = 1\n samples = model.gen(latent.view(int(np.prod(latent.shape[:-1])), -1)).detach()\n plot_grid(samples, nrow=nrow, figsize=(2 * ncol, nrow),\n gridspec_kw=dict(wspace=0, hspace=0))\n plt.suptitle(f\"$c_{{{model.cont_dim + b + 2}}}$: Binary (columns: 0, 1)\")\n" ]
[ [ "torch.linspace", "numpy.arange", "numpy.prod", "numpy.argsort", "matplotlib.pyplot.suptitle" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
Vision-CAIR/HalentNet
[ "dedef73c57c63aa580fc497fa42d512f4241a64b" ]
[ "trajectron/model/components/discrete_latent.py" ]
[ "import torch\nimport torch.distributions as td\nimport numpy as np\nfrom model.model_utils import ModeKeys\n\n\nclass DiscreteLatent(object):\n def __init__(self, hyperparams, device):\n self.hyperparams = hyperparams\n self.z_dim = hyperparams['N'] * hyperparams['K']\n self.N = hyperparams['N']\n self.K = hyperparams['K']\n self.kl_min = hyperparams['kl_min']\n self.device = device\n self.temp = None # filled in by MultimodalGenerativeCVAE.set_annealing_params\n self.z_logit_clip = None # filled in by MultimodalGenerativeCVAE.set_annealing_params\n self.p_dist = None # filled in by MultimodalGenerativeCVAE.encoder\n self.q_dist = None # filled in by MultimodalGenerativeCVAE.encoder\n\n def dist_from_h(self, h, mode):\n logits_separated = torch.reshape(h, (-1, self.N, self.K))\n logits_separated_mean_zero = logits_separated - torch.mean(logits_separated, dim=-1, keepdim=True)\n if self.z_logit_clip is not None and mode == ModeKeys.TRAIN:\n c = self.z_logit_clip\n logits = torch.clamp(logits_separated_mean_zero, min=-c, max=c)\n else:\n logits = logits_separated_mean_zero\n\n return td.OneHotCategorical(logits=logits)\n\n def sample_q(self, num_samples, mode):\n bs = self.p_dist.probs.size()[0]\n num_components = self.N * self.K\n z_NK = torch.from_numpy(self.all_one_hot_combinations(self.N, self.K)).float().to(self.device).repeat(num_samples, bs)\n return torch.reshape(z_NK, (num_samples * num_components, -1, self.z_dim))\n\n def sample_p(self, num_samples, mode, most_likely_z=False, full_dist=True, all_z_sep=False):\n num_components = 1\n if full_dist:\n bs = self.p_dist.probs.size()[0]\n z_NK = torch.from_numpy(self.all_one_hot_combinations(self.N, self.K)).float().to(self.device).repeat(num_samples, bs)\n num_components = self.K ** self.N\n k = num_samples * num_components\n elif all_z_sep:\n bs = self.p_dist.probs.size()[0]\n z_NK = torch.from_numpy(self.all_one_hot_combinations(self.N, self.K)).float().to(self.device).repeat(1, bs)\n k = self.K ** self.N\n num_samples = k\n elif most_likely_z:\n # Sampling the most likely z from p(z|x).\n eye_mat = torch.eye(self.p_dist.event_shape[-1], device=self.device)\n argmax_idxs = torch.argmax(self.p_dist.probs, dim=2)\n z_NK = torch.unsqueeze(eye_mat[argmax_idxs], dim=0).expand(num_samples, -1, -1, -1)\n k = num_samples\n else:\n z_NK = self.p_dist.sample((num_samples,))\n k = num_samples\n\n if mode in [ModeKeys.PREDICT, ModeKeys.GAN]:\n return torch.reshape(z_NK, (k, -1, self.N * self.K)), num_samples, num_components\n else:\n return torch.reshape(z_NK, (k, -1, self.N * self.K))\n\n def kl_q_p(self, log_writer=None, prefix=None, curr_iter=None):\n kl_separated = td.kl_divergence(self.q_dist, self.p_dist)\n if len(kl_separated.size()) < 2:\n kl_separated = torch.unsqueeze(kl_separated, dim=0)\n\n kl_minibatch = torch.mean(kl_separated, dim=0, keepdim=True)\n\n if log_writer is not None:\n log_writer.add_scalar(prefix + '/true_kl', torch.sum(kl_minibatch), curr_iter)\n\n if self.kl_min > 0:\n kl_lower_bounded = torch.clamp(kl_minibatch, min=self.kl_min)\n kl = torch.sum(kl_lower_bounded)\n else:\n kl = torch.sum(kl_minibatch)\n\n return kl\n\n def q_log_prob(self, z):\n k = z.size()[0]\n z_NK = torch.reshape(z, [k, -1, self.N, self.K])\n return torch.sum(self.q_dist.log_prob(z_NK), dim=2)\n\n def p_log_prob(self, z):\n k = z.size()[0]\n z_NK = torch.reshape(z, [k, -1, self.N, self.K])\n return torch.sum(self.p_dist.log_prob(z_NK), dim=2)\n\n def get_p_dist_probs(self):\n return self.p_dist.probs\n\n @staticmethod\n def all_one_hot_combinations(N, K):\n return np.eye(K).take(np.reshape(np.indices([K] * N), [N, -1]).T, axis=0).reshape(-1, N * K) # [K**N, N*K]\n\n def summarize_for_tensorboard(self, log_writer, prefix, curr_iter):\n log_writer.add_histogram(prefix + \"/latent/p_z_x\", self.p_dist.probs, curr_iter)\n log_writer.add_histogram(prefix + \"/latent/q_z_xy\", self.q_dist.probs, curr_iter)\n log_writer.add_histogram(prefix + \"/latent/p_z_x_logits\", self.p_dist.logits, curr_iter)\n log_writer.add_histogram(prefix + \"/latent/q_z_xy_logits\", self.q_dist.logits, curr_iter)\n if self.z_dim <= 9:\n for i in range(self.N):\n for j in range(self.K):\n log_writer.add_histogram(prefix + \"/latent/q_z_xy_logit{0}{1}\".format(i, j),\n self.q_dist.logits[:, i, j],\n curr_iter)\n" ]
[ [ "torch.distributions.OneHotCategorical", "torch.mean", "torch.reshape", "numpy.eye", "torch.sum", "torch.eye", "torch.unsqueeze", "numpy.indices", "torch.distributions.kl_divergence", "torch.clamp", "torch.argmax" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
ngxingyu/Domain-Transfer-for-Punctuation-Retrieval
[ "f5aa0ea0946c68aaf7fcf49a5085e6c823766a2f" ]
[ "experiment/data/punctuation_dataset_multi.py" ]
[ "from torch.utils.data import IterableDataset, Dataset, get_worker_info\nimport gc\nimport numpy as np\nfrom typing import List, Optional, Dict\nfrom core.utils import chunk_examples_with_degree, chunk_to_len_batch\nimport pandas as pd\nimport os\nimport torch\nimport subprocess\nfrom time import time\nfrom itertools import cycle, chain, islice, repeat\nfrom math import ceil\nfrom collections import Counter\n\nclass PunctuationDomainDataset(IterableDataset):\n\n def __init__(self, \n csv_file:str, \n tokenizer,\n num_samples:int=256,\n max_seq_length:int=256,\n degree=0,\n punct_label_ids: Dict[str, int] = None,\n label_map:Dict[str,str] = None,\n domain=0,\n labelled=True,\n randomize=True,\n target_file='',\n tmp_path='~/data/tmp',\n start=0,\n end=-1,\n attach_label_to_end=None,\n no_space_label=None,\n manual_len=0,\n pad_start=0,\n alpha_sub=0.4, \n alpha_del=0.4,\n alpha_ins=0.4,\n alpha_swp=0,\n alpha_spl=0.4,\n stride=0,\n ):\n if not (os.path.exists(csv_file)):\n raise FileNotFoundError(\n f'{csv_file} not found. The 2nd column of the file contains the transcripts.'\n )\n\n data_dir = os.path.dirname(csv_file)\n filename = os.path.basename(csv_file)\n\n if not filename.endswith('.csv'):\n raise ValueError(\"{text_file} should have extension .csv\")\n \n self.csv_file = csv_file\n self.max_seq_length = max_seq_length\n self.manual_len=manual_len\n self.domain= domain\n self.punct_label_ids=punct_label_ids\n self.label_map=label_map\n self.labelled= labelled\n self.tokenizer= tokenizer\n self.degree=degree\n self.randomize=randomize\n self.target_file=target_file\n self.tmp_path=tmp_path\n self.attach_label_to_end=attach_label_to_end\n self.no_space_label=no_space_label\n self.pad_start=pad_start\n self.alpha_sub=alpha_sub\n self.alpha_del=alpha_del\n self.alpha_ins=alpha_ins\n self.alpha_swp=alpha_swp\n self.alpha_spl=alpha_spl\n self.stride=stride\n if not (os.path.exists(self.target_file)):\n os.system(f\"sed '1d' {self.csv_file} > {self.target_file}\")\n self.set_num_samples(self.target_file, num_samples, manual_len)\n def __iter__(self):\n self.dataset=iter(pd.read_csv(\n self.target_file,\n skiprows=(0 % self.len)*self.num_samples,\n header=None,\n dtype=str,\n chunksize=self.num_samples,\n ))\n return self\n \n\n def __next__(self):\n batch = next(self.dataset)[1]\n complete=batch\n if self.stride>0:\n for i in range(1,self.max_seq_length//self.stride):\n l=batch.str.split().map(len).values\n a=self.stride*i*np.ones_like(l)\n b=l\n complete=complete.append(pd.DataFrame({'t':batch,'a':a,'b':b}).apply(lambda row: ' '.join(row.t.split()[row.a:row.b]),axis=1))\n # pp(batch.shape,complete.shape)\n batch=complete\n chunked=chunk_examples_with_degree(self.degree, self.punct_label_ids, self.label_map, self.tokenizer,self.alpha_sub, self.alpha_del,self.alpha_ins,self.alpha_swp,self.alpha_spl)(batch)\n batched=chunk_to_len_batch(self.max_seq_length,self.tokenizer,chunked['texts'],chunked['tags'],self.labelled,attach_label_to_end=self.attach_label_to_end,no_space_label=self.no_space_label, pad_start=self.pad_start)\n num_samples=batched['labels'].shape[0]\n batched['domain']=self.domain*torch.ones(num_samples,1,dtype=torch.long)\n gc.collect()\n if self.randomize:\n rand=torch.randperm(num_samples)\n return {k:v[rand] for k,v in batched.items()}\n else:\n return batched\n\n def set_num_samples(self,csv_file,num_samples, manual_len):\n self.num_samples = num_samples\n self.total_samples=int(subprocess.Popen(['wc', '-l', csv_file], stdout=subprocess.PIPE, stderr=subprocess.STDOUT).communicate()[0].split()[0])\n if manual_len>0:\n self.total_samples=min(manual_len,self.total_samples)\n self.num_samples=min(self.num_samples,self.total_samples)\n self.len = max(1,int(self.total_samples / self.num_samples))\n\n \n\n def __len__(self):\n return pp(self.len)\n \n def shuffle(self, randomize=True, seed=42):\n int(subprocess.Popen(['wc', '-l', self.target_file], stdout=subprocess.PIPE, stderr=subprocess.STDOUT).communicate()[0].split()[0])\n os.system('bash data/shuffle.sh -i {} -o {} -a {} -s {} -m {} -t {}'.format(self.target_file, self.target_file, ['true','false'][randomize], seed, '100M',self.tmp_path))\n int(subprocess.Popen(['wc', '-l', self.target_file], stdout=subprocess.PIPE, stderr=subprocess.STDOUT).communicate()[0].split()[0])\n self.dataset=iter(pd.read_csv(\n self.target_file,\n skiprows=(0 % self.len)*self.num_samples,\n header=None,\n dtype=str,\n chunksize=self.num_samples,\n ))\n \n def determine_class_weights(self):\n it=iter(self)\n ct=torch.zeros(len(self.punct_label_ids))\n for _ in range(min(20,self.len)):\n print('.',end='')\n ni=next(it)\n ct+=torch.bincount(ni['labels'].view(-1),minlength=len(self.punct_label_ids))\n return ct/sum(ct)\n\n\n\n\nclass PunctuationDomainDatasets(IterableDataset):\n\n def __init__(self, \n split:str,\n num_samples:int,\n max_seq_length:int,\n punct_label_ids: Dict[str, int],\n label_map:Dict[str,str],\n labelled: List[str],\n unlabelled: List[str],\n tokenizer,\n randomize:bool=True,\n data_id='',\n tmp_path='~/data/tmp',\n attach_label_to_end=None,\n manual_len:int=0,\n no_space_label:int=None,\n pad_start:int=0,\n low_resource_labelled_count:int = 0,\n alpha_sub=0,\n alpha_del=0,\n alpha_ins=0,\n alpha_swp=0,\n alpha_spl=0,\n stride=0,\n ):\n worker_info = get_worker_info()\n self.num_workers=1 if worker_info is None else worker_info.num_workers\n self.num_labelled=len(labelled)\n self.datasets = []\n self.iterables=[]\n self.randomize=randomize\n self.punct_label_ids=punct_label_ids\n self.label_map=label_map\n self.ds_lengths=[]\n self.labelled=labelled\n self.stride=stride\n for path in labelled:\n if manual_len>0:\n self.ds_lengths.append(min(manual_len,int(subprocess.Popen(['wc', '-l', f'{path}.{split}.csv'], stdout=subprocess.PIPE, stderr=subprocess.STDOUT).communicate()[0].split()[0])))\n else:\n self.ds_lengths.append(int(subprocess.Popen(['wc', '-l', f'{path}.{split}.csv'], stdout=subprocess.PIPE, stderr=subprocess.STDOUT).communicate()[0].split()[0]))\n for path in unlabelled:\n if split=='train' and low_resource_labelled_count>0:\n if manual_len>0:\n self.ds_lengths.append(min(manual_len,int(subprocess.Popen(['wc', '-l', f'{path}.labelled.{split}.csv'], stdout=subprocess.PIPE, stderr=subprocess.STDOUT).communicate()[0].split()[0])))\n self.ds_lengths.append(min(manual_len,int(subprocess.Popen(['wc', '-l', f'{path}.unlabelled.{split}.csv'], stdout=subprocess.PIPE, stderr=subprocess.STDOUT).communicate()[0].split()[0])))\n else:\n self.ds_lengths.append(int(subprocess.Popen(['wc', '-l', f'{path}.labelled.{split}.csv'], stdout=subprocess.PIPE, stderr=subprocess.STDOUT).communicate()[0].split()[0]))\n self.ds_lengths.append(int(subprocess.Popen(['wc', '-l', f'{path}.unlabelled.{split}.csv'], stdout=subprocess.PIPE, stderr=subprocess.STDOUT).communicate()[0].split()[0]))\n else:\n if manual_len>0:\n self.ds_lengths.append(min(manual_len,int(subprocess.Popen(['wc', '-l', f'{path}.{split}.csv'], stdout=subprocess.PIPE, stderr=subprocess.STDOUT).communicate()[0].split()[0])))\n else:\n self.ds_lengths.append(int(subprocess.Popen(['wc', '-l', f'{path}.{split}.csv'], stdout=subprocess.PIPE, stderr=subprocess.STDOUT).communicate()[0].split()[0]))\n self.max_length=max(self.ds_lengths) \n self.per_worker=int(self.max_length/self.num_workers)\n self.len=max(1,ceil(self.per_worker/num_samples))\n self.class_weights=None\n\n self.alpha_sub=alpha_sub\n self.alpha_del=alpha_del\n self.alpha_ins=alpha_ins\n self.alpha_swp=alpha_swp\n self.alpha_spl=alpha_spl\n self.stride=stride\n\n for i,path in enumerate(labelled):\n target=os.path.join(tmp_path,os.path.split(path)[1])\n dataset=PunctuationDomainDataset(\n csv_file=f'{path}.{split}.csv', tokenizer=tokenizer,\n num_samples=num_samples,max_seq_length=max_seq_length,\n punct_label_ids=punct_label_ids,\n label_map=label_map,\n domain=i,labelled=True,\n randomize=randomize,\n target_file=f'{target}.{split}.{data_id}.csv',\n tmp_path=tmp_path,\n attach_label_to_end=attach_label_to_end,\n no_space_label=no_space_label,\n manual_len=manual_len,\n pad_start=pad_start,\n alpha_sub=self.alpha_sub,\n alpha_del=self.alpha_del,\n alpha_ins=self.alpha_ins,\n alpha_swp=self.alpha_swp,\n alpha_spl=self.alpha_spl,\n stride=self.stride,)\n self.datasets.append(dataset)\n self.iterables.append(cycle(dataset))\n \n for i,path in enumerate(unlabelled):\n target=os.path.join(tmp_path,os.path.split(path)[1])\n if split=='train' and low_resource_labelled_count>0:\n dataset=PunctuationDomainDataset(\n csv_file=f'{path}.unlabelled.{split}.csv', tokenizer=tokenizer,\n num_samples=num_samples,max_seq_length=max_seq_length,\n punct_label_ids=punct_label_ids,\n label_map=label_map,domain=len(labelled)+i,labelled=False,\n randomize=randomize,\n target_file=f'{target}.unlabelled.{split}.{data_id}.csv',\n tmp_path=tmp_path,\n attach_label_to_end=attach_label_to_end,\n no_space_label=no_space_label,\n manual_len=manual_len,\n pad_start=pad_start,\n alpha_sub=self.alpha_sub,\n alpha_del=self.alpha_del,\n alpha_ins=self.alpha_ins,\n alpha_swp=self.alpha_swp,\n alpha_spl=self.alpha_spl,\n stride=self.stride,)\n self.datasets.append(dataset)\n self.iterables.append(cycle(dataset))\n dataset=PunctuationDomainDataset(\n csv_file=f'{path}.labelled.{split}.csv', tokenizer=tokenizer,\n num_samples=num_samples,max_seq_length=max_seq_length,\n punct_label_ids=punct_label_ids,\n label_map=label_map,domain=len(labelled)+i,labelled=True,\n randomize=randomize,\n target_file=f'{target}.labelled.{split}.{data_id}.csv',\n tmp_path=tmp_path,\n attach_label_to_end=attach_label_to_end,\n no_space_label=no_space_label,\n manual_len=manual_len,\n pad_start=pad_start,\n alpha_sub=self.alpha_sub,\n alpha_del=self.alpha_del,\n alpha_ins=self.alpha_ins,\n alpha_swp=self.alpha_swp,\n alpha_spl=self.alpha_spl,\n stride=self.stride,)\n self.datasets.append(dataset)\n self.iterables.append(cycle(dataset))\n else:\n dataset=PunctuationDomainDataset(\n csv_file=f'{path}.{split}.csv', tokenizer=tokenizer,\n num_samples=num_samples,max_seq_length=max_seq_length,\n punct_label_ids=punct_label_ids,\n label_map=label_map,domain=len(labelled)+i,labelled=False,\n randomize=randomize,\n target_file=f'{target}.{split}.{data_id}.csv',\n tmp_path=tmp_path,\n attach_label_to_end=attach_label_to_end,\n no_space_label=no_space_label,\n manual_len=manual_len,\n pad_start=pad_start,\n alpha_sub=self.alpha_sub,\n alpha_del=self.alpha_del,\n alpha_ins=self.alpha_ins,\n alpha_swp=self.alpha_swp,\n alpha_spl=self.alpha_spl,\n stride=self.stride,\n )\n self.datasets.append(dataset)\n self.iterables.append(cycle(dataset))\n\n def __iter__(self):\n worker_info = get_worker_info()\n worker_id = 0 if worker_info is None else worker_info.id\n self.iterables=[]\n for ds_length, dataset in zip(self.ds_lengths,self.datasets):\n start = (worker_id*self.per_worker)%ds_length\n self.iterables.append(cycle(chain(islice(iter(dataset),start,None),islice(iter(dataset),start))))\n return self\n\n def __next__(self):\n ds=[next(d) for d in self.iterables]\n if self.randomize:\n min_batch=1000000\n for d in ds:\n size=d['domain'].shape[0]\n if size<min_batch:\n min_batch=size\n #Ensure all domains are evenly represented\n b={k:torch.cat([torch.repeat_interleave(d[k],max(1,min_batch/d[k].shape[0]),dim=0)[:min_batch] for d in ds], dim=0) for k in ['input_ids','attention_mask','subtoken_mask','labels','domain']}\n rand=torch.randperm(b['labels'].shape[0])\n return {k:v[rand] for k,v in b.items()}\n else:\n return {k:torch.cat([d[k] for d in ds], dim=0) for k in ['input_ids','attention_mask','subtoken_mask','labels','domain']}\n\n def __len__(self):\n return self.len\n\n def shuffle(self, randomize=True, seed=42):\n worker_info = get_worker_info()\n worker_id = 0 if worker_info is None else worker_info.id\n if worker_id==0:\n for _ in self.datasets:\n print(f\"shuffling {_}\")\n _.shuffle(randomize,seed)\n \n def determine_class_weights(self):\n if self.class_weights is None:\n ct=torch.zeros(len(self.punct_label_ids))\n for _ in range(self.num_labelled):\n ct+=self.datasets[_].determine_class_weights()\n self.class_weights=self.num_labelled/ct\n return self.class_weights\n\n\nclass PunctuationInferenceDataset(Dataset):\n \"\"\"\n Creates dataset to use during inference for punctuation and capitalization tasks with a pretrained model.\n For dataset to use during training with labels, see BertPunctuationCapitalizationDataset.\n Args:\n queries file to sequences, each line should a sentence, no header.\n max_seq_length: max sequence length minus 2 for [CLS] and [SEP]\n tokenizer: such as AutoTokenizer\n \"\"\"\n\n def __init__(self, \n tokenizer, \n queries: List[str], \n max_seq_length: int, \n punct_label_ids:Dict[str,int], \n label_map:Dict[str,str], \n num_samples:int=256, \n degree:int = 0, \n attach_label_to_end:bool=None,\n no_space_label=None,\n pad_start:int=0,\n ):\n \"\"\" Initializes BertPunctuationInferDataset. \"\"\"\n self.degree=degree\n self.punct_label_ids=punct_label_ids\n self.label_map = label_map\n chunked=chunk_examples_with_degree(self.degree, self.punct_label_ids, self.label_map,)(queries)\n self.features = chunk_to_len_batch(max_seq_length, tokenizer,chunked['texts'],chunked['tags'],attach_label_to_end=attach_label_to_end,no_space_label=no_space_label,pad_start=pad_start)\n self.attach_label_to_end=attach_label_to_end\n self.num_samples=num_samples\n\n def __len__(self):\n return math.ceil(len(self.all_input_ids)/self.num_samples)\n\n def __getitem__(self, idx):\n return {k:v for k,v in self.features.items()}\n" ]
[ [ "pandas.read_csv", "torch.ones", "numpy.ones_like", "torch.cat", "torch.randperm", "torch.utils.data.get_worker_info", "pandas.DataFrame" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [ "2.0", "1.4", "1.3", "1.1", "1.5", "1.2" ], "scipy": [], "tensorflow": [] } ]
andillio/CHiMES
[ "546788d207bb878d702df93f7f9ab07318f80851" ]
[ "husimi_sp.py" ]
[ "import numpy as np\nimport scipy.integrate as sp\nimport cupy as cp\n\n\n# x has to be defined from -L/2, L/2 or this won't work\ndef K_H(x, x_, u, hbar, mpart, sig_x, L, full = False):\n\tX, X_, U = np.meshgrid(x, x_, u) # x is on axis 1, x_ is on axis 0, u is on axis 2\n\n\tX_[X_ - X > L/2] -= L # shift x_ so it is centered at a given x value\n\tX_[X_ - X < -L/2] += L\n\n\targ = (-(X - X_)**2)/(4.*sig_x**2) + 0j\n\targ -= U*X_*mpart*(1.j/hbar)\n\tdenom = np.sqrt(2.*np.pi*hbar/mpart)\n\tdenom *= (2.*np.pi*sig_x**2)**(.25)\n\tif full:\n\t\treturn np.exp(arg)/denom, X, X_, U\n\treturn np.exp(arg)/denom\n\n# x has to be defined from -L/2, L/2 or this won't work\ndef K_H_(x, x_, u, hbar_, sig_x, L, full = False):\n\tX, X_, U = np.meshgrid(x, x_, u) # x is on axis 1, x_ is on axis 0, u is on axis 2\n\n\tX_[X_ - X > L/2] -= L # shift x_ so it is centered at a given x value\n\tX_[X_ - X < -L/2] += L\n\n\targ = (-(X - X_)**2)/(4.*sig_x**2) + 0j\n\targ -= U*X_*(1.j/hbar_)\n\tdenom = np.sqrt(2.*np.pi*hbar_)\n\tdenom *= (2.*np.pi*sig_x**2)**(.25)\n\tif full:\n\t\treturn np.exp(arg)/denom, X, X_, U\n\treturn np.exp(arg)/denom\n\n\ndef K_H_cp(x,x_,u,hbar_, sig_x,L, full = False):\n\tX, X_, U = cp.meshgrid(x, x_, u) # x is on axis 1, x_ is on axis 0, u is on axis 2\n\n\tX_[X_ - X > L/2] -= L # shift x_ so it is centered at a given x value\n\tX_[X_ - X < -L/2] += L\n\n\targ = (-(X - X_)**2)/(4.*sig_x**2) + 0j\n\targ -= U*X_*(1.j/hbar_)\n\tdenom = cp.sqrt(2.*np.pi*hbar_)\n\tdenom *= (2.*cp.pi*sig_x**2)**(.25)\n\tif full:\n\t\treturn cp.exp(arg)/denom, X, X_, U\n\treturn cp.exp(arg)/denom\n\n\ndef f_H(psi, K, dx, shift = False):\n\tf = (np.abs(psi_H(psi, K, dx))**2).transpose()\n\tif shift:\n\t\treturn np.fft.fftshift(f,1)\n\treturn f\n\n\ndef f_H_cp(psi, K, dx, shift = False):\n\tf = (cp.abs(psi_H_cp(psi, K, dx))**2).transpose()\n\tif shift:\n\t\treturn cp.fft.fftshift(f,1)\n\treturn f\n\n\ndef psi_H(psi, K, dx):\n\tintegrand = psi[:, None, None]*K\n\treturn dx*integrand.sum(axis = 0)\n#\treturn dx*sp.simps(integrand, axis = 0)\n\n\ndef psi_H_cp(psi, K, dx):\n\tintegrand = psi[:, None, None]*K\n\treturn dx*integrand.sum(axis = 0)\n\n" ]
[ [ "numpy.exp", "numpy.meshgrid", "numpy.sqrt", "numpy.fft.fftshift" ] ]
[ { "matplotlib": [], "numpy": [ "1.6", "1.10", "1.12", "1.11", "1.19", "1.13", "1.16", "1.9", "1.18", "1.21", "1.20", "1.7", "1.15", "1.14", "1.17", "1.8" ], "pandas": [], "scipy": [], "tensorflow": [] } ]
shivupa/pyqmc
[ "f2e3b7443f69a2d48a74a8346e622fa64c87211f" ]
[ "pyqmc/obdm.py" ]
[ "\"\"\" Evaluate the OBDM for a wave function object. \"\"\"\nimport numpy as np\nfrom copy import deepcopy\nfrom pyqmc.mc import initial_guess\n\n\nclass OBDMAccumulator:\n \"\"\" Return the obdm as an array with indices rho[spin][i][k] = <c_{spin,i}c^+_{spin,j}>\n Args:\n\n mol (Mole): PySCF Mole object.\n\n configs (array): electron positions.\n\n wf (pyqmc wave function object): wave function to evaluate on.\n\n orb_coeff (array): coefficients with size (nbasis,norb) relating mol basis to basis \n of 1-RDM desired.\n \n tstep (float): width of the Gaussian to update a walker position for the \n extra coordinate.\n\n spin: 0 or 1 for up or down. Defaults to all electrons.\n \"\"\"\n\n def __init__(\n self,\n mol,\n orb_coeff,\n nstep=10,\n tstep=0.50,\n warmup=100,\n naux=500,\n spin=None,\n electrons=None,\n ):\n assert (\n len(orb_coeff.shape) == 2\n ), \"orb_coeff should be a list of orbital coefficients.\"\n\n if not (spin is None):\n if spin == 0:\n self._electrons = np.arange(0, mol.nelec[0])\n elif spin == 1:\n self._electrons = np.arange(mol.nelec[0], np.sum(mol.nelec))\n else:\n raise ValueError(\"Spin not equal to 0 or 1\")\n elif not (electrons is None):\n self._electrons = electrons\n else:\n self._electrons = np.arange(0, np.sum(mol.nelec))\n\n self._orb_coeff = orb_coeff\n self._tstep = tstep\n self._mol = mol\n # self._extra_config = np.random.normal(scale=tstep,size=3) # not zero to avoid sitting on top of atom.\n nelec = sum(self._mol.nelec)\n self._extra_config = initial_guess(mol, int(naux / nelec) + 1).reshape(-1, 3)\n\n self._nstep = nstep\n\n for i in range(warmup):\n accept, self._extra_config = sample_onebody(\n mol, orb_coeff, self._extra_config, tstep\n )\n\n def __call__(self, configs, wf):\n \"\"\" Quantities from equation (9) of DOI:10.1063/1.4793531\"\"\"\n\n results = {\n \"value\": np.zeros(\n (configs.shape[0], self._orb_coeff.shape[1], self._orb_coeff.shape[1])\n ),\n \"norm\": np.zeros((configs.shape[0], self._orb_coeff.shape[1])),\n \"acceptance\": np.zeros(configs.shape[0]),\n }\n acceptance = 0\n naux = self._extra_config.shape[0]\n nelec = len(self._electrons)\n\n for step in range(self._nstep):\n e = np.random.choice(self._electrons)\n\n points = np.concatenate([self._extra_config, configs[:, e, :]])\n ao = self._mol.eval_gto(\"GTOval_sph\", points)\n borb = ao.dot(self._orb_coeff)\n\n # Orbital evaluations at extra coordinate.\n borb_aux = borb[0:naux, :]\n fsum = np.sum(borb_aux * borb_aux, axis=1)\n norm = borb_aux * borb_aux / fsum[:, np.newaxis]\n borb_configs = borb[naux:, :]\n\n auxassignments = np.random.randint(0, naux, size=configs.shape[0])\n wfratio = wf.testvalue(e, self._extra_config[auxassignments, :])\n\n orbratio = np.einsum(\n \"ij,ik->ijk\",\n borb_aux[auxassignments, :] / fsum[auxassignments, np.newaxis],\n borb_configs,\n )\n\n results[\"value\"] += nelec * np.einsum(\"i,ijk->ijk\", wfratio, orbratio)\n results[\"norm\"] += norm[auxassignments]\n\n accept, self._extra_config = sample_onebody(\n self._mol, self._orb_coeff, self._extra_config, tstep=self._tstep\n )\n\n results[\"acceptance\"] += np.mean(accept)\n\n results[\"value\"] /= self._nstep\n results[\"norm\"] = results[\"norm\"] / self._nstep\n results[\"acceptance\"] /= self._nstep\n\n return results\n\n def avg(self, configs, wf):\n d = self(configs, wf)\n davg = {}\n for k, v in d.items():\n # print(k, v.shape)\n davg[k] = np.mean(v, axis=0)\n return davg\n\n\ndef sample_onebody(mol, orb_coeff, configs, tstep=2.0):\n \"\"\" For a set of orbitals defined by orb_coeff, return samples from f(r) = \\sum_i phi_i(r)^2. \"\"\"\n config_pack = np.concatenate(\n [configs, configs + np.sqrt(tstep) * np.random.randn(*configs.shape)], axis=0\n )\n\n ao = mol.eval_gto(\"GTOval_sph\", config_pack)\n borb = ao.dot(orb_coeff)\n fsum = (borb ** 2).sum(axis=1)\n\n n = configs.shape[0]\n accept = fsum[n:] / fsum[0:n] > np.random.rand(n)\n newconf = config_pack[n:, :]\n configs[accept, :] = newconf[accept, :]\n return accept, configs\n\n\ndef normalize_obdm(obdm, norm):\n return obdm / (norm[np.newaxis, :] * norm[:, np.newaxis]) ** 0.5\n" ]
[ [ "numpy.sqrt", "numpy.einsum", "numpy.random.choice", "numpy.arange", "numpy.concatenate", "numpy.mean", "numpy.random.rand", "numpy.random.randn", "numpy.zeros", "numpy.sum", "numpy.random.randint" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
spaicer/example-container
[ "65a93b63dad6c5ccbdf80c6541cdf2c776e2f5af" ]
[ "python/example/src/main.py" ]
[ "\"\"\"Example Container computing mean value and standard deviation.\"\"\"\n\n__version__ = '0.0.1'\n\nfrom typing import List\nfrom fastapi import FastAPI\nfrom pydantic import BaseModel\nimport numpy\n\napp = FastAPI(\n title='Example Container',\n docs_url='/documentation',\n redoc_url='/redoc',\n description='Example container computing mean value and standard deviation.',\n version=__version__\n)\n\n\[email protected](\"/\")\nasync def root():\n \"\"\"Simply hello world.\"\"\"\n return {\"message\": \"Hello World\"}\n\n\nclass RawData(BaseModel):\n \"\"\"Example structure for raw data\"\"\"\n data: List[float] = [3.14, 2.72, 42, -1]\n\n\nclass Features(BaseModel):\n \"\"\"Example structure for computed features\"\"\"\n mean: float\n std: float\n\n\[email protected]('/example-feature-extration', response_model=Features)\nasync def feature_extraction(raw_data: RawData):\n \"\"\"Compute the mean value and the standard deviation.\"\"\"\n\n # compute features\n mean = numpy.mean(raw_data.data)\n std = numpy.std(raw_data.data)\n\n return Features(mean=mean, std=std)\n" ]
[ [ "numpy.std", "numpy.mean" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
donkirkby/zero-play
[ "15e3afa950037cfd1f373ee4943cd8b42d4c82c9" ]
[ "zero_play/othello/game.py" ]
[ "import math\nimport typing\n\nimport numpy as np\n\nfrom zero_play.game_state import GridGameState\n\n\nclass OthelloState(GridGameState):\n game_name = 'Othello'\n\n def __init__(self,\n text: str = None,\n board_height: int = 6,\n board_width: int = 6,\n spaces: np.ndarray = None):\n if spaces is not None:\n size = spaces.size\n board_width = board_height = int(math.sqrt(size-1))\n assert text is None\n if text is None:\n lines = None\n next_player_line = None\n else:\n lines = text.splitlines()\n next_player_line = lines.pop()\n super().__init__(board_height,\n board_width,\n lines=lines,\n extra_count=1,\n spaces=spaces)\n if spaces is not None:\n return\n spaces = self.get_spaces()\n if text:\n assert next_player_line and next_player_line.startswith('>')\n self.board[-1] = (self.X_PLAYER\n if next_player_line.endswith('X')\n else self.O_PLAYER)\n else:\n self.board[-1] = self.X_PLAYER\n for i in range(self.board_height//2-1, self.board_height//2+1):\n for j in range(self.board_width//2-1, self.board_width//2+1):\n player = self.X_PLAYER if (i+j) % 2 else self.O_PLAYER\n spaces[i, j] = player\n\n def get_valid_moves(self) -> np.ndarray:\n spaces = self.get_spaces()\n moves = np.zeros(self.board_height * self.board_width + 1, bool)\n move_spaces = moves[:-1].reshape(self.board_width, self.board_height)\n player = self.get_active_player()\n for i, j in self.find_moves(spaces, player):\n move_spaces[i, j] = True\n\n if moves.sum() == 0:\n # No moves for this player, check opponent.\n for _ in self.find_moves(spaces, -player):\n # Opponent has a move, pass is allowed.\n moves[-1] = True\n break\n\n return moves\n\n def find_moves(self, spaces: np.ndarray, player: int):\n for i in range(self.board_height):\n for j in range(self.board_width):\n piece = spaces[i, j]\n if piece == player:\n yield from self.find_moves_from_space(spaces, i, j, player)\n\n def find_moves_from_space(self, spaces, start_row, start_column, player):\n for di in range(-1, 2):\n for dj in range(-1, 2):\n if not (di or dj):\n continue\n has_flipped = False\n i = start_row + di\n j = start_column + dj\n while 0 <= i < self.board_height and 0 <= j < self.board_width:\n piece = spaces[i, j]\n if piece == player:\n break\n if piece == self.NO_PLAYER:\n if has_flipped:\n yield i, j\n break\n else:\n has_flipped = True\n i += di\n j += dj\n\n def display(self, show_coordinates: bool = False) -> str:\n result = super().display(show_coordinates)\n next_player = self.board[-1]\n return result + f'>{self.DISPLAY_CHARS[next_player+1]}\\n'\n\n def display_move(self, move: int) -> str:\n if move == self.board_width * self.board_height:\n return 'PASS'\n return super().display_move(move)\n\n def parse_move(self, text: str) -> int:\n trimmed = text.strip().replace(' ', '')\n if not trimmed:\n return self.board_height*self.board_width # It's a pass.\n return super().parse_move(trimmed)\n\n def make_move(self, move: int) -> 'OthelloState':\n new_board: np.ndarray = self.board.copy()\n player = new_board[-1]\n new_board[-1] = -player\n\n new_state = OthelloState(spaces=new_board)\n if move == self.board_width * self.board_height:\n return new_state # It's a pass.\n\n spaces = new_state.get_spaces()\n start_row = move // self.board_width\n start_column = move % self.board_width\n for di in range(-1, 2):\n for dj in range(-1, 2):\n if not (di or dj):\n continue\n to_flip: typing.List[typing.Tuple[int, int]] = [] # [(i, j)]\n i = start_row + di\n j = start_column + dj\n while 0 <= i < self.board_height and 0 <= j < self.board_width:\n piece = spaces[i, j]\n if piece == player:\n for i, j in to_flip:\n spaces[i, j] *= -1\n break\n if piece == self.NO_PLAYER:\n break\n else:\n to_flip.append((i, j))\n i += di\n j += dj\n spaces[start_row, start_column] = player\n return new_state\n\n def get_active_player(self):\n return self.board[-1]\n\n def is_ended(self):\n spaces = self.get_spaces()\n player = self.board[-1]\n for _ in self.find_moves(spaces, player):\n return False\n for _ in self.find_moves(spaces, -player):\n return False\n return True\n\n def get_winner(self):\n if not self.is_ended():\n return self.NO_PLAYER\n total = self.board[:-1].sum()\n if total > 0:\n return self.X_PLAYER\n if total < 0:\n return self.O_PLAYER\n return self.NO_PLAYER\n\n def get_piece_count(self, player: int):\n return (self.board[:-1] == player).sum()\n\n def is_win(self, player: int) -> bool:\n return self.get_winner() == player\n" ]
[ [ "numpy.zeros" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
jbonaiuto/infant_eeg
[ "6368e253f9990dbfd9717f2d862c0b0c46d64f3b" ]
[ "src/python/infant_eeg/experiment.py" ]
[ "import sys\n# Import AVbin first\nif sys.platform == 'win32':\n import ctypes\n avbin_lib = ctypes.cdll.LoadLibrary('avbin')\n import psychopy.visual\nimport copy\nimport datetime\nfrom psychopy.visual import Window\nfrom psychopy import visual, core, event, monitors\nimport numpy as np\nimport egi.threaded as egi\n# Try to import tobii sdk\ntry:\n from infant_eeg.tobii_controller import TobiiController\nexcept:\n pass\nfrom infant_eeg.distractors import DistractorSet\nfrom infant_eeg.config import *\n\n\nclass Experiment:\n \"\"\"\n Base experiment class\n \"\"\"\n\n def __init__(self, exp_info, file_name):\n \"\"\"\n Initialize experiment - read XML file, setup window, connect to netstation and tobii\n exp_info - experiment information\n file_name - name of XML file containing experiment definition\n \"\"\"\n self.exp_info = exp_info\n self.name = None\n self.type = None\n self.num_blocks = 0\n self.blocks = {}\n self.block_order = []\n\n # Window to use\n wintype = 'pyglet' # use pyglet if possible, it's faster at event handling\n # Add 14cm to distance - this is distance from eyetracker to monitor\n mon = monitors.Monitor(exp_info['monitor'], distance=float(exp_info['monitor distance']))\n self.win = Window(\n [1280, 1024],\n monitor=mon,\n screen=SCREEN,\n units=\"deg\",\n fullscr=True,\n #fullscr=False,\n color=[-1, -1, -1],\n winType=wintype)\n self.win.setMouseVisible(False)\n event.clearEvents()\n\n # Measure frame rate\n self.mean_ms_per_frame, std_ms_per_frame, median_ms_per_frame = visual.getMsPerFrame(self.win, nFrames=60,\n showVisual=True)\n\n self.debug_sq=None\n if exp_info['monitor']=='tobii':\n self.debug_sq=psychopy.visual.Rect(self.win, width=30, height=30, units='pix')\n self.debug_sq.setFillColor((1,1,1))\n self.debug_sq.setPos((630,-500))\n\n # Compute distractor duration in frames based on frame rate\n distractor_duration_frames = int(2000.0/self.mean_ms_per_frame)\n\n # Initialize set of distractors\n self.distractor_set = DistractorSet(os.path.join(DATA_DIR, 'images', 'distractors', 'space'),\n os.path.join(DATA_DIR, 'sounds', 'distractors'),\n os.path.join(DATA_DIR, 'movies', 'distractors'),\n os.path.join(DATA_DIR, 'images', 'distractors', 'star-cartoon.jpg'),\n distractor_duration_frames, self.win)\n\n # Connect to nestation\n self.ns = None\n if exp_info['eeg']:\n # connect to netstation\n self.ns = egi.Netstation()\n ms_localtime = egi.ms_localtime\n\n self.eye_tracker = None\n mouse_visible = False\n if exp_info['eyetracking source'] == 'tobii':\n # Initialize eyetracker\n self.eye_tracker = TobiiController(self.win)\n self.eye_tracker.waitForFindEyeTracker()\n self.eye_tracker.activate(EYETRACKER_NAME)\n elif exp_info['eyetracking source'] == 'mouse':\n mouse_visible = True\n\n # Initialize mouse\n self.mouse = event.Mouse(visible=mouse_visible, win=self.win)\n\n self.gaze_debug=None\n if self.exp_info['debug mode']:\n self.gaze_debug=psychopy.visual.Circle(self.win, radius=1, fillColor=(1.0,-1.0,-1.0))\n\n self.read_xml(file_name)\n\n # Initialize netstation and eyetracker\n self.initialize()\n\n def calibrate_eyetracker(self):\n \"\"\"\n Run eyetracker calibration routine\n \"\"\"\n retval = 'retry'\n while retval == 'retry':\n waitkey = True\n retval = None\n can_accept = self.eye_tracker.doCalibration(EYETRACKER_CALIBRATION_POINTS)\n while waitkey:\n for key in psychopy.event.getKeys():\n if can_accept:\n num_entered=True\n try:\n calib_idx=int(key)\n can_accept = self.eye_tracker.doCalibration([EYETRACKER_CALIBRATION_POINTS[calib_idx-1]],\n calib=self.eye_tracker.calib)\n except:\n num_entered=False\n if not num_entered and key == 'a':\n retval = 'accept'\n waitkey = False\n elif key == 'r':\n retval = 'retry'\n waitkey = False\n elif key == 'escape':\n retval = 'abort'\n waitkey = False\n self.eye_tracker.calresult.draw()\n self.eye_tracker.calresultmsg.draw()\n for point_label in self.eye_tracker.point_labels:\n point_label.draw()\n self.win.flip()\n\n if retval == 'abort':\n self.eye_tracker.closeDataFile()\n self.eye_tracker.destroy()\n self.win.close()\n core.quit()\n\n def initialize(self):\n \"\"\"\n Start netstation recording, calibrate eyetracker\n \"\"\"\n if self.ns is not None:\n try:\n self.ns.initialize(NETSTATION_IP, 55513)\n self.ns.BeginSession()\n self.ns.StartRecording()\n except:\n print('Could not connect with NetStation!')\n\n # Initialize logging\n logfile = os.path.join(DATA_DIR, 'logs', self.exp_info['experiment'], '%s_%s_%s.log' % (self.exp_info['child_id'],\n self.exp_info['date'],\n self.exp_info['session']))\n\n if self.eye_tracker is not None:\n self.eye_tracker.setDataFile(logfile, self.exp_info)\n else:\n datafile = open(logfile, 'w')\n datafile.write('Recording date:\\t' + datetime.datetime.now().strftime('%Y/%m/%d') + '\\n')\n datafile.write('Recording time:\\t' + datetime.datetime.now().strftime('%H:%M:%S') + '\\n')\n datafile.write('Recording resolution\\t%d x %d\\n' % tuple(self.win.size))\n for key, data in self.exp_info.iteritems():\n datafile.write('%s:\\t%s\\n' % (key, data))\n datafile.close()\n\n # Create random block order\n n_repeats = int(self.num_blocks/len(self.blocks.keys()))\n for i in range(n_repeats):\n subblock_order = copy.copy(self.blocks.keys())\n np.random.shuffle(subblock_order)\n self.block_order.extend(subblock_order)\n\n # Synch with netstation in between trials\n if self.ns is not None:\n self.ns.sync()\n\n if self.eye_tracker is not None:\n self.eye_tracker.startTracking()\n\n def close(self):\n \"\"\"\n Disconnect from eyetracker and netstation\n \"\"\"\n if self.eye_tracker is not None:\n self.eye_tracker.stopTracking()\n self.eye_tracker.closeDataFile()\n\n # close netstation connection\n if self.ns:\n self.ns.StopRecording()\n self.ns.EndSession()\n self.ns.finalize()\n\n self.win.close()\n core.quit()\n if self.eye_tracker is not None:\n self.eye_tracker.destroy()\n\n def run(self):\n \"\"\"\n Run task\n ns - netstation connection\n \"\"\"\n pass\n\n def read_xml(self, file_name):\n \"\"\"\n Read experiment definition file\n :param file_name: file to read definition from\n \"\"\"\n pass\n\nclass Event:\n def __init__(self, code, label, table):\n self.code=code\n self.label=label\n self.timestamp=egi.ms_localtime()\n self.table=table" ]
[ [ "numpy.random.shuffle" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
kramersi/cctv-flood-extraction
[ "73ff7949d544cfbdae26f683b974daa6ebbfe893" ]
[ "img_segmentation/model.py" ]
[ "import os\nimport numpy as np\nimport pandas as pd\n\nfrom keras.callbacks import ModelCheckpoint, TensorBoard, EarlyStopping, ReduceLROnPlateau\nfrom keras.layers import Conv2D, Concatenate, MaxPooling2D, Conv2DTranspose, UpSampling2D, Dropout, BatchNormalization\nfrom keras.models import Input, Model\nfrom keras.optimizers import Adam\n\nfrom img_segmentation.image_gen import ImageGenerator\nfrom img_segmentation.utils import f1_loss, f1_np, iou_np, precision_np, recall_np, error_np, load_images, channel_mean_stdev, \\\n store_prediction, load_img_msk_paths\n\n\ndef conv_block(m, dim, acti, bn, res, do=0):\n \"\"\" creates convolutional block for creating u-net\n\n \"\"\"\n n = Conv2D(dim, 3, activation=acti, padding='same')(m)\n n = BatchNormalization()(n) if bn else n\n n = Dropout(do)(n) if do else n\n n = Conv2D(dim, 3, activation=acti, padding='same')(n)\n n = BatchNormalization()(n) if bn else n\n return Concatenate()([m, n]) if res else n\n\n\ndef level_block(m, dim, depth, inc, acti, do, bn, mp, up, res):\n if depth > 0:\n n = conv_block(m, dim, acti, bn, res)\n m = MaxPooling2D()(n) if mp else Conv2D(dim, 3, strides=2, padding='same')(n)\n m = level_block(m, int(inc*dim), depth-1, inc, acti, do, bn, mp, up, res)\n if up:\n m = UpSampling2D()(m)\n m = Conv2D(dim, 2, activation=acti, padding='same')(m)\n else:\n m = Conv2DTranspose(dim, 3, strides=2, activation=acti, padding='same')(m)\n n = Concatenate()([n, m])\n m = conv_block(n, dim, acti, bn, res)\n else:\n m = conv_block(m, dim, acti, bn, res, do)\n return m\n\n\nclass UNet(object):\n \"\"\" Class which create UNet model and trains it and test it\n\n U-Net: Convolutional Networks for Biomedical Image Segmentation\n (https://arxiv.org/abs/1505.04597)\n\n Arguments:\n img_shape: (height, width, channels)\n n_class: number of output channels, classes to predict in one-hot coding\n root_features: number of channels of the first conv\n layers: zero indexed depth of the U-structure, number of layers\n inc_rate: rate at which the conv channels will increase\n activation: activation function after convolutions\n dropout: amount of dropout in the contracting part\n batch_norm: adds Batch Normalization if true\n max_pool: use strided conv instead of maxpooling if false\n up_conv: use transposed conv instead of upsamping + conv if false\n residual: add residual connections around each conv block if true\n \"\"\"\n def __init__(self, img_shape, n_class=2, root_features=64, layers=4, inc_rate=1., activation='relu', dropout=0.5,\n batch_norm=False, max_pool=True, up_conv=True, residual=False):\n self.img_shape = img_shape\n self.n_class = n_class\n self.root_features = root_features\n self.layers = layers\n self.inc_rate = inc_rate\n self.activation = activation\n self.dropout = dropout\n self.batch_norm = batch_norm\n self.max_pool = max_pool\n self.up_conv = up_conv\n self.residual = residual\n\n self.tr_mean = None\n self.tr_std = None\n\n # define model\n i = Input(shape=img_shape)\n o = level_block(i, root_features, layers, inc_rate, activation, dropout, batch_norm, max_pool, up_conv, residual)\n o = Conv2D(n_class, 1, activation='sigmoid')(o)\n self.model = Model(inputs=i, outputs=o)\n\n def normalize(self, x):\n #self.tr_mean = np.array([69.7399, 69.8885, 65.1602])\n #self.tr_std = np.array([72.9841, 72.3374, 71.6508])\n\n if self.tr_mean is None:\n print('mean and standard deviation of training pictures not calculated yet, calculating...')\n self.tr_mean, self.tr_std = channel_mean_stdev(x)\n print('mean: ', self.tr_mean, 'std: ', self.tr_std)\n\n x_norm = (x - self.tr_mean.astype('float32')) / self.tr_std.astype('float32')\n # x_norm = (x - np.amin(x)) / np.amax(x)\n # img_eq = exposure.equalize_hist(x_norm)\n return x_norm\n\n def train(self, model_dir, train_dir, valid_dir, epochs=20, batch_size=3, augmentation=True, normalisation=True, base_dir=None, trainable_index=14, save_aug=False, learning_rate=0.01):\n \"\"\" trains a unet instance on keras. With on-line data augmentation to diversify training samples in each batch.\n\n example of defining paths\n train_dir = \"E:\\\\watson_for_trend\\\\3_select_for_labelling\\\\train_cityscape\\\\\"\n model_dir = \"E:\\\\watson_for_trend\\\\5_train\\\\cityscape_l5f64c3n8e20\\\\\"\n\n \"\"\"\n # define callbacks\n mc = ModelCheckpoint(os.path.join(model_dir, 'model.h5'), save_best_only=True, save_weights_only=False)\n es = EarlyStopping(monitor='val_loss', patience=30)\n tb = TensorBoard(log_dir=model_dir, write_graph=True) # write_images=True, write_grads=True, histogram_freq=5\n lr = ReduceLROnPlateau(monitor='val_loss', factor=0.2, patience=20, verbose=1, min_lr=0.0000001)\n\n # define weights (not used now, keras does not support it with segmentation)\n class_weights = {0: 0.5, 1: 0.5}\n\n if base_dir is not None:\n self.model.load_weights(os.path.join(base_dir, 'model.h5'))\n\n for layer in self.model.layers[:-trainable_index]:\n layer.trainable = False\n\n # Check the trainable status of the individual layers\n for layer in self.model.layers:\n print(layer.name, layer.trainable)\n\n # compile model with optimizer and loss function\n self.model.compile(optimizer=Adam(lr=learning_rate), loss=f1_loss,\n metrics=['acc', 'categorical_crossentropy'])\n\n # summary of parameters in each layer\n self.model.summary()\n\n path_tr = load_img_msk_paths(train_dir)\n path_va = load_img_msk_paths(valid_dir)\n\n if save_aug is True:\n aug_path = os.path.join(model_dir, 'augmentations')\n if not os.path.exists(aug_path):\n print('created augmentation dir', aug_path)\n os.makedirs(aug_path)\n else:\n aug_path = None\n\n # augmentation are defined here and can be changed\n aug_dict = dict(horizontal_flip=0.5, vertical_flip=0.0, rotation_range=(0.0, 0.0),\n width_shift_range=(-0.2, 0.2), height_shift_range=(-0.2, 0.2), contrast_range=(0.5, 1.5),\n zoom_range=(1.0, 1.33), grayscale_range=(0.0, 0.8), brightness_range=(-80, 20),\n crop_range=(0, 0), blur_range=(0.0, 1.0), shear_range=(0.0, 0.0), prob=0.2)\n\n train_generator = ImageGenerator(list(path_tr.keys()), masks=path_tr, batch_size=batch_size, dim=(512, 512), shuffle=True,\n normalize='std_norm', save_to_dir=aug_path, augmentation=augmentation, aug_dict=aug_dict)\n\n valid_generator = ImageGenerator(list(path_va.keys()), masks=path_va, batch_size=batch_size, dim=(512, 512), shuffle=True,\n normalize='std_norm', augmentation=augmentation, aug_dict=aug_dict)\n\n # train unet with image_generator\n self.model.fit_generator(train_generator,\n validation_data=valid_generator,\n epochs=epochs,\n verbose=1,\n callbacks=[mc, tb, es, lr],\n use_multiprocessing=False,\n workers=4)\n\n print('Training completed')\n\n def test(self, model_dir, test_img_dirs, output_dir, csv_path=None, roi=None):\n path_test = load_img_msk_paths(test_img_dirs)\n\n img_gen_norm = ImageGenerator(list(path_test.keys()), masks=path_test, batch_size=1, shuffle=False, normalize='std_norm', augmentation=False)\n img_gen = ImageGenerator(list(path_test.keys()), masks=path_test, batch_size=1, shuffle=False, normalize=None, augmentation=False)\n\n n = len(img_gen)\n x_va = np.empty((n, 512, 512, 3))\n y_va = np.empty((n, 512, 512, 2))\n for i in range(n):\n x_va[i, ], y_va[i,] = img_gen[i]\n\n self.model.compile(optimizer=Adam(lr=0.001), loss=f1_loss, metrics=['acc', 'categorical_crossentropy'])\n self.model.load_weights(os.path.join(model_dir, 'model.h5'))\n\n p_va = self.model.predict_generator(generator=img_gen_norm, verbose=1)\n scores = self.model.evaluate_generator(img_gen_norm, steps=None, max_queue_size=10, workers=1, use_multiprocessing=False, verbose=1)\n\n store_prediction(p_va, x_va, output_dir)\n if roi is not None:\n y_va = y_va[:,roi[1]:(roi[1] + roi[3]), roi[0]:(roi[0] + roi[2]),:]\n p_va = p_va[:,roi[1]:(roi[1] + roi[3]), roi[0]:(roi[0] + roi[2]),:]\n\n res = {'DICE': [f1_np(y_va, p_va)], 'IoU': [iou_np(y_va, p_va)], 'Precision': [precision_np(y_va, p_va)],\n 'Recall': [recall_np(y_va, p_va)], 'Error': [error_np(y_va, p_va)]}\n\n if csv_path is None:\n pd.DataFrame(res).to_csv(os.path.join(model_dir, 'result.csv'))\n else:\n pd.DataFrame(res).to_csv(os.path.join(csv_path))\n\n print('DICE: ' + str(f1_np(y_va, p_va)))\n print('IoU: ' + str(iou_np(y_va, p_va)))\n print('Precision: ' + str(precision_np(y_va, p_va)))\n print('Recall: ' + str(recall_np(y_va, p_va)))\n print('Error: ' + str(error_np(y_va, p_va)))\n print('Scores: ', scores)\n\n\n def predict(self, model_dir, img_dir, output_dir, batch_size=4, train_dir=None):\n\n x_va = load_images(os.path.join(img_dir), sort=True, target_size=(512, 512))\n self.tr_mean = np.array([69.739934, 69.88847943, 65.16021837])\n self.tr_std = np.array([72.98415532, 72.33742881, 71.6508131])\n\n if train_dir is not None and self.tr_mean is None:\n x_tr = load_images(os.path.join(train_dir), sort=True, target=(512, 512))\n self.normalize(x_tr)\n\n # pre-process\n if self.tr_mean is not None:\n x_va_norm = self.normalize(x_va)\n\n self.model.compile(optimizer=Adam(lr=0.001), loss=f1_loss, metrics=['acc', 'categorical_crossentropy'])\n self.model.load_weights(os.path.join(model_dir, 'model.h5'))\n\n p_va = self.model.predict(x_va_norm, batch_size=batch_size, verbose=1)\n store_prediction(p_va, x_va, output_dir)\n" ]
[ [ "numpy.array", "numpy.empty", "pandas.DataFrame" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [ "0.23", "0.21", "2.0", "1.4", "0.19", "1.1", "1.5", "1.2", "0.24", "0.20", "1.0", "0.25", "1.3" ], "scipy": [], "tensorflow": [] } ]
heartexlabs/NeMo
[ "eb0da4b312090ba694a3dd7e41e513d1fce789cc" ]
[ "nemo/collections/tts/models/squeezewave.py" ]
[ "# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom dataclasses import dataclass\nfrom typing import Any, Dict, Optional\n\nimport torch\nfrom hydra.utils import instantiate\nfrom omegaconf import MISSING, DictConfig, OmegaConf, open_dict\nfrom pytorch_lightning.loggers import LoggerCollection, TensorBoardLogger\n\nfrom nemo.collections.tts.helpers.helpers import OperationMode, waveglow_log_to_tb_func\nfrom nemo.collections.tts.losses.waveglowloss import WaveGlowLoss\nfrom nemo.collections.tts.models.base import GlowVocoder\nfrom nemo.core.classes.common import PretrainedModelInfo, typecheck\nfrom nemo.core.neural_types.elements import (\n AudioSignal,\n LengthsType,\n LogDeterminantType,\n MelSpectrogramType,\n NormalDistributionSamplesType,\n VoidType,\n)\nfrom nemo.core.neural_types.neural_type import NeuralType\nfrom nemo.utils import logging\n\n\n@dataclass\nclass SqueezeWaveConfig:\n squeezewave: Dict[Any, Any] = MISSING\n preprocessor: Dict[Any, Any] = MISSING\n sigma: float = MISSING\n train_ds: Optional[Dict[Any, Any]] = None\n validation_ds: Optional[Dict[Any, Any]] = None\n\n\nclass SqueezeWaveModel(GlowVocoder):\n \"\"\" SqueezeWave model that generates audio conditioned on mel-spectrogram\n \"\"\"\n\n def __init__(self, cfg: DictConfig, trainer: 'Trainer' = None):\n if isinstance(cfg, dict):\n cfg = OmegaConf.create(cfg)\n super().__init__(cfg=cfg, trainer=trainer)\n\n schema = OmegaConf.structured(SqueezeWaveConfig)\n # ModelPT ensures that cfg is a DictConfig, but do this second check in case ModelPT changes\n if isinstance(cfg, dict):\n cfg = OmegaConf.create(cfg)\n elif not isinstance(cfg, DictConfig):\n raise ValueError(f\"cfg was type: {type(cfg)}. Expected either a dict or a DictConfig\")\n # Ensure passed cfg is compliant with schema\n OmegaConf.merge(cfg, schema)\n\n self.sigma = self._cfg.sigma\n self.audio_to_melspec_precessor = instantiate(self._cfg.preprocessor)\n self.squeezewave = instantiate(self._cfg.squeezewave)\n self.loss = WaveGlowLoss() # Same loss as WaveGlow\n\n @GlowVocoder.mode.setter\n def mode(self, new_mode):\n if new_mode == OperationMode.training:\n self.train()\n else:\n self.eval()\n self._mode = new_mode\n self.squeezewave.mode = new_mode\n\n @property\n def input_types(self):\n return {\n \"audio\": NeuralType(('B', 'T'), AudioSignal()),\n \"audio_len\": NeuralType(('B'), LengthsType()),\n \"run_inverse\": NeuralType(optional=True),\n }\n\n @property\n def output_types(self):\n if self.mode == OperationMode.training or self.mode == OperationMode.validation:\n output_dict = {\n \"pred_normal_dist\": NeuralType(('B', 'flowgroup', 'T'), NormalDistributionSamplesType()),\n \"log_s_list\": [NeuralType(('B', 'flowgroup', 'T'), VoidType())], # TODO: Figure out a good typing\n \"log_det_W_list\": [NeuralType(elements_type=LogDeterminantType())],\n }\n if self.mode == OperationMode.validation:\n output_dict[\"audio_pred\"] = NeuralType(('B', 'T'), AudioSignal())\n output_dict[\"spec\"] = NeuralType(('B', 'T', 'D'), MelSpectrogramType())\n output_dict[\"spec_len\"] = NeuralType(('B'), LengthsType())\n return output_dict\n return {\n \"audio_pred\": NeuralType(('B', 'T'), AudioSignal()),\n }\n\n @typecheck()\n def forward(self, *, audio, audio_len, run_inverse=True):\n if self.mode != self.squeezewave.mode:\n raise ValueError(\n f\"SqueezeWaveModel's mode {self.mode} does not match SqueezeWaveModule's mode {self.squeezewave.mode}\"\n )\n spec, spec_len = self.audio_to_melspec_precessor(audio, audio_len)\n tensors = self.squeezewave(spec=spec, audio=audio, run_inverse=run_inverse)\n if self.mode == OperationMode.training:\n return tensors[:-1] # z, log_s_list, log_det_W_list\n elif self.mode == OperationMode.validation:\n z, log_s_list, log_det_W_list, audio_pred = tensors\n return z, log_s_list, log_det_W_list, audio_pred, spec, spec_len\n return tensors # audio_pred\n\n @typecheck(\n input_types={\n \"spec\": NeuralType(('B', 'D', 'T'), MelSpectrogramType()),\n \"sigma\": NeuralType(optional=True),\n \"denoise\": NeuralType(optional=True),\n \"denoiser_strength\": NeuralType(optional=True),\n },\n output_types={\"audio\": NeuralType(('B', 'T'), AudioSignal())},\n )\n def convert_spectrogram_to_audio(\n self, spec: torch.Tensor, sigma: bool = 1.0, denoise: bool = True, denoiser_strength: float = 0.01\n ) -> torch.Tensor:\n with self.nemo_infer():\n audio = self.squeezewave(spec=spec, run_inverse=True, audio=None, sigma=sigma)\n if denoise:\n audio = self.denoise(audio, denoiser_strength)\n\n return audio\n\n def training_step(self, batch, batch_idx):\n self.mode = OperationMode.training\n\n audio, audio_len = batch\n z, log_s_list, log_det_W_list = self.forward(audio=audio, audio_len=audio_len, run_inverse=False)\n\n loss = self.loss(z=z, log_s_list=log_s_list, log_det_W_list=log_det_W_list, sigma=self.sigma)\n return {\n 'loss': loss,\n 'progress_bar': {'training_loss': loss},\n 'log': {'loss': loss},\n }\n\n def validation_step(self, batch, batch_idx):\n self.mode = OperationMode.validation\n\n audio, audio_len = batch\n z, log_s_list, log_det_W_list, audio_pred, spec, spec_len = self.forward(\n audio=audio, audio_len=audio_len, run_inverse=(batch_idx == 0)\n )\n loss = self.loss(z=z, log_s_list=log_s_list, log_det_W_list=log_det_W_list, sigma=self.sigma)\n return {\n \"val_loss\": loss,\n \"audio_pred\": audio_pred,\n \"mel_target\": spec,\n \"mel_len\": spec_len,\n }\n\n def validation_epoch_end(self, outputs):\n if self.logger is not None and self.logger.experiment is not None:\n tb_logger = self.logger.experiment\n if isinstance(self.logger, LoggerCollection):\n for logger in self.logger:\n if isinstance(logger, TensorBoardLogger):\n tb_logger = logger.experiment\n break\n waveglow_log_to_tb_func(\n tb_logger,\n outputs[0].values(),\n self.global_step,\n tag=\"eval\",\n mel_fb=self.audio_to_melspec_precessor.fb,\n )\n avg_loss = torch.stack([x['val_loss'] for x in outputs]).mean()\n self.log('val_loss', avg_loss)\n\n def __setup_dataloader_from_config(self, cfg, shuffle_should_be: bool = True, name: str = \"train\"):\n if \"dataset\" not in cfg or not isinstance(cfg.dataset, DictConfig):\n raise ValueError(f\"No dataset for {name}\")\n if \"dataloader_params\" not in cfg or not isinstance(cfg.dataloader_params, DictConfig):\n raise ValueError(f\"No dataloder_params for {name}\")\n if shuffle_should_be:\n if 'shuffle' not in cfg.dataloader_params:\n logging.warning(\n f\"Shuffle should be set to True for {self}'s {name} dataloader but was not found in its \"\n \"config. Manually setting to True\"\n )\n with open_dict(cfg[\"dataloader_params\"]):\n cfg.dataloader_params.shuffle = True\n elif not cfg.dataloader_params.shuffle:\n logging.error(f\"The {name} dataloader for {self} has shuffle set to False!!!\")\n elif not shuffle_should_be and cfg.dataloader_params.shuffle:\n logging.error(f\"The {name} dataloader for {self} has shuffle set to True!!!\")\n\n dataset = instantiate(cfg.dataset)\n return torch.utils.data.DataLoader(dataset, collate_fn=dataset.collate_fn, **cfg.dataloader_params)\n\n def setup_training_data(self, cfg):\n self._train_dl = self.__setup_dataloader_from_config(cfg)\n\n def setup_validation_data(self, cfg):\n self._validation_dl = self.__setup_dataloader_from_config(cfg, shuffle_should_be=False, name=\"validation\")\n\n @classmethod\n def list_available_models(cls) -> 'List[PretrainedModelInfo]':\n \"\"\"\n This method returns a list of pre-trained model which can be instantiated directly from NVIDIA's NGC cloud.\n Returns:\n List of available pre-trained models.\n \"\"\"\n list_of_models = []\n model = PretrainedModelInfo(\n pretrained_model_name=\"SqueezeWave-22050Hz\",\n location=\"https://api.ngc.nvidia.com/v2/models/nvidia/nemottsmodels/versions/1.0.0a5/files/SqueezeWave-22050Hz.nemo\",\n description=\"This model is trained on LJSpeech sampled at 22050Hz, and can be used as an universal vocoder.\",\n class_=cls,\n )\n list_of_models.append(model)\n return list_of_models\n" ]
[ [ "torch.stack", "torch.utils.data.DataLoader" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
PeriscopeData/analytics-toolbox
[ "83effdee380c33e5eecea29528acf5375fd496fb" ]
[ "Python/Calculating_Trimmed_Means/calculating_trimmed_means1.py" ]
[ "# SQL output is imported as a pandas dataframe variable called \"df\"\n\n# Source: https://stackoverflow.com/questions/19441730/trimmed-mean-with-percentage-limit-in-python\nimport pandas as pd\nimport matplotlib.pyplot as plt\nfrom scipy.stats import tmean, scoreatpercentile\nimport numpy as np\n\ndef trimmean(arr, percent):\n lower_limit = scoreatpercentile(arr, percent)\n upper_limit = scoreatpercentile(arr, 100-percent)\n return tmean(arr, limits=(lower_limit, upper_limit), inclusive=(False, False))\n\nmy_result = trimmean(df[\"amt_paid\"].values,10)" ]
[ [ "scipy.stats.tmean", "scipy.stats.scoreatpercentile" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [ "0.13", "1.6", "0.14", "1.10", "0.15", "1.4", "1.3", "1.9", "0.19", "1.5", "0.18", "1.2", "1.7", "0.12", "1.0", "0.17", "0.16", "1.8" ], "tensorflow": [] } ]
curtislisle/miqa
[ "d7220e1ab9bdacce9e4344ba6549f16fe710862a" ]
[ "miqa/learning/correlator.py" ]
[ "#!/usr/bin/env python3\n\nimport pandas as pd\nfrom sklearn.metrics import confusion_matrix\n\ndf = pd.read_csv('M:/MIQA/data.csv') # manually converted TRUE/FALSE into 1/0\nprint(f'count NaN: {df.isnull().sum().sum()}')\ncorrelation_df = df.corr()\ncorrelation_df.to_csv('M:/MIQA/correlations2.csv')\nprint(correlation_df)\n\ncm = pd.DataFrame(confusion_matrix(df['overall_qa_assessment'], df['cnr']))\ncm.to_csv('M:/MIQA/oQA_CNR.csv')\nprint(cm)\n\ncm = pd.DataFrame(confusion_matrix(df['overall_qa_assessment'], df['snr']))\ncm.to_csv('M:/MIQA/oQA_SNR.csv')\nprint(cm)\n\n# df = pd.read_csv('M:/MIQA/PredictHD_small/phenotype/bids_image_qc_information.tsv', sep='\\t')\n# df = df.drop(columns=['participant_id', 'session_id', 'series_number'])\n# df.to_csv('M:/MIQA/dataQA.csv')\n# print(f\"count NaN: {df.isnull().sum().sum()}\")\n# correlation_df = df.corr()\n# correlation_df.to_csv('M:/MIQA/correlations.csv')\n# print(correlation_df)\n" ]
[ [ "pandas.read_csv", "sklearn.metrics.confusion_matrix" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [ "2.0", "1.4", "1.1", "1.5", "1.2", "1.3" ], "scipy": [], "tensorflow": [] } ]
edfagan/ManuCostModel
[ "c3d97815f84d7cdff1365a472887da0fc712ae71" ]
[ "src/ManuCostModel/Tools.py" ]
[ "\"\"\"\nCost Model Data Processing Tools\n\nAuthor: Edward Fagan\n\"\"\"\nimport pandas as pd\n\ndef BOM(manuf_obj):\n \"\"\"\n Determine the bill of materials for manufacturing.\n\n Parameters\n ----------\n manuf_obj : obj\n A manufacturing object.\n\n Returns\n -------\n bom : DataFrame\n A Pandas dataframe containing the bill of materials.\n\n \"\"\"\n \n matCosts = {}\n matName = {}\n \n for val in manuf_obj.breakdown_material_mass_struct.keys():\n \n for matType in manuf_obj.materialVars.keys():\n \n if val in manuf_obj.materialVars[matType].keys():\n \n matCosts[val] = manuf_obj.materialVars[matType][val]['cost']\n matName[val] = manuf_obj.materialVars[matType][val]['material']\n \n bom = pd.DataFrame.from_dict(matName, orient='index', columns=['Material'])\n bom['Unit Cost (€/kg)'] = matCosts.values()\n \n bom['Structural Mass (kg)'] = manuf_obj.breakdown_material_mass_struct.values()\n bom['Structural Cost(€)'] = manuf_obj.breakdown_material_cost_struct.values()\n bom['Scrap Mass (kg)'] = manuf_obj.breakdown_material_mass_scrap.values()\n bom['Scrap Cost(€)'] = manuf_obj.breakdown_material_cost_scrap.values()\n \n bom['Total Mass (kg)'] = bom['Structural Mass (kg)'] + bom['Scrap Mass (kg)']\n bom['Total Cost(€)'] = bom['Structural Cost(€)'] + bom['Scrap Cost(€)']\n\n return bom\n\ndef CostCentres(manuf, totals=False, stacked=False, legendOn=True):\n #\n \n if totals is False:\n plotInfo = [['Material Cost (€)'],\n ['Equipment Cost (€)'],\n ['Labour Cost (€)']]\n \n data = [[[val for val in manuf.materialCategoryCosts.values()]],\n [[val for val in manuf.equipmentItemCosts.values()]],\n [[val for val in manuf.labourCostBreakdown.values()]]]\n \n dataLabels = [[[val for val in manuf.materialCategoryCosts.keys()]],\n [[val for val in manuf.equipmentItemCosts.keys()]],\n [[val for val in manuf.labourCostBreakdown.keys()]]]\n \n colourSet = [[['red']],\n [['green']],\n [['blue']]]\n \n if legendOn is True:\n legendDisplay = [[[1 for val in range(len(manuf.materialCategoryCosts.keys()))]],\n [[1 for val in range(len(manuf.materialCategoryCosts.keys()))]],\n [[1 for val in range(len(manuf.materialCategoryCosts.keys()))]]]\n else:\n legendDisplay = [[[0 for val in range(len(manuf.materialCategoryCosts.keys()))]],\n [[0 for val in range(len(manuf.equipmentItemCosts.keys()))]],\n [[0 for val in range(len(manuf.labourCostBreakdown.keys()))]]]\n \n else:\n if stacked is False:\n plotInfo = [['Material Cost (€)'],\n ['Equipment Cost (€)'],\n ['Labour Cost (€)']]\n \n data = [[[sum(manuf.materialCategoryCosts.values())]],\n [[sum(manuf.equipmentItemCosts.values())]],\n [[sum(manuf.labourCostBreakdown.values())]]]\n \n dataLabels = [[[manuf.prodName + \" Materials\"]],\n [[manuf.prodName + \" Equipment\"]],\n [[manuf.prodName + \" Labour\"]]]\n \n colourSet = [[['red']],\n [['green']],\n [['blue']]]\n \n if legendOn is True:\n legendDisplay = [[[1]],\n [[1]],\n [[1]]]\n else:\n legendDisplay = [[[0]],\n [[0]],\n [[0]]]\n else:\n plotInfo = [[manuf.prodName]]\n \n data = [[[sum(manuf.materialCategoryCosts.values()), sum(manuf.equipmentItemCosts.values()), sum(manuf.labourCostBreakdown.values())]]]\n \n dataLabels = [[[\"Materials\", \"Equipment\", \"Labour\"]]]\n \n colourSet = [[['red', 'green', 'blue']]]\n \n if legendOn is True:\n legendDisplay = [[[1, 1, 1]]]\n else:\n legendDisplay = [[[0, 0, 0]]]\n \n plotData = plotInfo, data, dataLabels, colourSet, legendDisplay\n \n return plotData\n\ndef Compare(productionList, totals=True, stacked=True, oneLegend=True, centreIndex=None):\n \"\"\" \n centreIndex is a integer value referring to a particular Cost Centre \n Cost Centres are ordered as:\n 0 = Materials\n 1 = Equipment\n 2 = Labour\n \"\"\"\n plotList = []\n legendOn = True\n \n # Create plot formatted data for each manufacturing analysis\n for prod in productionList:\n \n plotList.append(CostCentres(prod, totals=totals, stacked=stacked, legendOn=legendOn))\n \n if oneLegend is True:\n legendOn = False\n \n # Combine data from each plot\n if stacked is True:\n plotInfo = [entry[0][0] for entry in plotList]\n \n data = [entry[1][0] for entry in plotList]\n \n dataLabels = [entry[2][0] for entry in plotList]\n \n colourSet = [entry[3][0] for entry in plotList]\n \n legendDisplay = [entry[4][0] for entry in plotList]\n \n else:\n plotInfo = plotList[0][0]\n \n data = [i for entry in plotList for val in entry[1] for i in val]\n \n data = [[data[j*3+i] for j in range(int(len(data)/3))] for i in range(3)]\n \n dataLabels = [i for entry in plotList for val in entry[2] for i in val]\n \n dataLabels = [[dataLabels[j*3+i] for j in range(int(len(dataLabels)/3))] for i in range(3)]\n \n colourSet = [i for entry in plotList for val in entry[3] for i in val]\n \n colourSet = [[colourSet[j*3+i] for j in range(int(len(colourSet)/3))] for i in range(3)]\n \n legendDisplay = [i for entry in plotList for val in entry[4] for i in val]\n \n legendDisplay = [[legendDisplay[j*3+i] for j in range(int(len(legendDisplay)/3))] for i in range(3)]\n \n if centreIndex is not None:\n plotInfo = [plotInfo[centreIndex]]\n data = [data[centreIndex]]\n dataLabels = [dataLabels[centreIndex]]\n colourSet = [colourSet[centreIndex]]\n legendDisplay = [legendDisplay[centreIndex]]\n \n plotData = plotInfo, data, dataLabels, colourSet, legendDisplay\n \n return plotData\n \n# if __name__ == '__main__':\n \n # from DataVis import barPlot\n \n # wingProduction.prodName = 'VI'\n \n # plotData = costCentres(wingProduction, totals=True, stacked=False)\n \n # barPlot(plotData, percentDisplay=False, barLabelDisplay=True)\n \n # plotData = compare([wingProduction, wingProduction, wingProduction], totals=True, stacked=False, centreIndex=2)\n \n # barPlot(plotData, percentDisplay=False, barLabelDisplay=False, secondAxis=True, secondAxisVars=['1','2','3','4','5'])\n \n \n " ]
[ [ "pandas.DataFrame.from_dict" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [ "0.23", "0.21", "2.0", "1.4", "0.19", "1.1", "1.5", "1.2", "0.24", "0.20", "1.0", "0.25", "1.3" ], "scipy": [], "tensorflow": [] } ]
ginesam/semtagger
[ "ce5b0342ddf3bf1b30479ba7f1478fc2d66a6059" ]
[ "models/semtagger_predict.py" ]
[ "#!/usr/bin/python3\n# this script predicts semantic tags using a trained neural model\n\nimport sys\nsys.path.append(sys.argv[1])\n\nimport os\nos.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'\nos.environ[\"CUDA_VISIBLE_DEVICES\"] = '1'\n\nimport pickle\nimport numpy as np\n\nimport tensorflow as tf\nconfig = tf.ConfigProto()\nconfig.gpu_options.allow_growth = True\n\nimport keras\nkeras.backend.tensorflow_backend.set_session(tf.Session(config=config))\n\nfrom models.argparser import get_args\nfrom models.loader import load_conll_notags, make_char_seqs\nfrom models.nn import get_model\n\nfrom utils.input2feats import wordsents2sym, charsents2sym\n\n\n# parse input arguments\nargs = get_args()\n\n# load trained model parameters\nminfo = pickle.load(open(args.output_model_info, 'rb'))\nparams = minfo['params']\n\n# read and featurize unlabelled data\nword_inputs, word_sents = load_conll_notags(args.input_pred_file,\n minfo['max_slen'],\n vocab = minfo['word2idx'].keys(),\n oovs = minfo['oov_sym'],\n pads = minfo['pad_word'],\n lower = False,\n mwe = True,\n unk_case = True)\n\n# transform inputs to a symbolic representation\nif params.use_words:\n X_word, _ = wordsents2sym(word_sents,\n minfo['max_slen'],\n minfo['word2idx'],\n minfo['tag2idx'],\n minfo['oov_sym']['unknown'],\n minfo['DEFAULT_TAG'],\n minfo['pad_word']['pad'],\n minfo['PADDING_TAG'])\n\n# compute character-based inputs\nif params.use_chars:\n char_sents, _ = make_char_seqs(word_sents,\n vocab = set(minfo['char2idx'].keys()),\n oovs = minfo['oov_sym'],\n pads = minfo['pad_char'],\n len_perc = params.word_len_perc,\n lower = False)\n\n # map character sentences and their tags to a symbolic representation\n X_char = charsents2sym(char_sents,\n minfo['max_slen'],\n minfo['max_wlen'],\n minfo['char2idx'],\n minfo['oov_sym']['unknown'],\n minfo['pad_char']['begin'],\n minfo['pad_char']['end'],\n minfo['pad_char']['pad'])\n\n# build input for the model\nif params.use_words and params.use_chars:\n X = [X_word, X_char]\nelif params.use_words:\n X = X_word\nelif params.use_chars:\n X = X_char\n\n# use a trained model to predict the corresponding tags\nif params.use_words and params.use_chars:\n model = get_model(minfo['params'],\n num_tags = minfo['num_tags'],\n max_slen = minfo['max_slen'], num_words = minfo['num_words'],\n wemb_dim = minfo['wemb_dim'], wemb_matrix = minfo['wemb_matrix'],\n max_wlen = minfo['max_wlen'], num_chars = minfo['num_chars'],\n cemb_dim = minfo['cemb_dim'], cemb_matrix = minfo['cemb_matrix'])\nelif params.use_words:\n model = get_model(minfo['params'],\n num_tags = minfo['num_tags'],\n max_slen = minfo['max_slen'], num_words = minfo['num_words'],\n wemb_dim = minfo['wemb_dim'], wemb_matrix = minfo['wemb_matrix'])\n\nelif params.use_chars:\n model = get_model(minfo['params'],\n num_tags = minfo['num_tags'],\n max_slen = minfo['max_slen'],\n max_wlen = minfo['max_wlen'], num_chars = minfo['num_chars'],\n cemb_dim = minfo['cemb_dim'], cemb_matrix = minfo['cemb_matrix'])\n\nmodel.load_weights(args.output_model)\n#model.summary()\n\n# predict tags using the model\np = model.predict(X, verbose = min(1, params.verbose))\np = np.argmax(p, axis=-1) + 1\n\n# reconstruct the original file with tags\n# an input sentence can be split over multiple processed sentences\nidx_offset = 0\nwith open(args.output_pred_file, 'w') as ofile:\n for sidx in range(len(word_inputs)):\n # find the range of processed sentences that match the current input sentence\n old_offset = idx_offset\n while list(filter(lambda y: y[1] != -1, word_sents[sidx + idx_offset]))[-1][1] < len(word_inputs[sidx]) - 1:\n idx_offset += 1\n\n # generate the predicted mapping for each word in the input sentence\n wpos2tag = {}\n for off in range(old_offset, idx_offset + 1):\n for wpos, tag in zip([x[1] for x in word_sents[sidx + off]], p[sidx + off]):\n if wpos not in wpos2tag:\n wpos2tag[wpos] = []\n wpos2tag[wpos].append(tag)\n\n for widx in range(len(word_inputs[sidx])):\n tgt_word = word_inputs[sidx][widx]\n tgt_tag = minfo['tag2idx'][minfo['DEFAULT_TAG']]\n # multi-word expressions take the most common prediction for their individual components\n if widx in wpos2tag:\n tgt_tag = max(set(wpos2tag[widx]), key=wpos2tag[widx].count)\n # write out\n ofile.write(str(minfo['idx2tag'][tgt_tag]) + '\\t' + tgt_word + '\\n')\n ofile.write('\\n')\n\n" ]
[ [ "tensorflow.ConfigProto", "numpy.argmax", "tensorflow.Session" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [ "1.10", "1.12", "1.4", "1.13", "1.5", "1.7", "0.12", "1.0", "1.2" ] } ]
ethanm88/lingvo
[ "46314590ca80a557b6b95c8acdf5956f9e045eb7" ]
[ "lingvo/core/base_model_test.py" ]
[ "# Copyright 2018 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Tests for base_model.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport numpy as np\nimport six\nfrom six.moves import range\n\nimport tensorflow as tf\n\nfrom lingvo.core import base_decoder\nfrom lingvo.core import base_encoder\nfrom lingvo.core import base_input_generator\nfrom lingvo.core import base_layer\nfrom lingvo.core import base_model\nfrom lingvo.core import base_model_params\nfrom lingvo.core import hyperparams\nfrom lingvo.core import layers\nfrom lingvo.core import py_utils\nfrom lingvo.core import task_scheduler\n\nFLAGS = tf.flags.FLAGS\n\n_NUMPY_RANDOM_SEED = 9885784\n\n\nclass BaseTaskTest(tf.test.TestCase):\n\n def testStatsCounter(self):\n with self.session() as sess:\n foo = base_model.StatsCounter('foo')\n val = foo.Value()\n params = base_layer.BaseLayer.Params()\n inc = foo.IncBy(params, 100)\n\n tf.global_variables_initializer().run()\n self.assertAllEqual(0, val.eval())\n self.assertAllEqual(100, sess.run(inc))\n self.assertAllEqual(100, val.eval())\n self.assertAllEqual([100, 200], sess.run([val, inc]))\n self.assertAllEqual([200, 300], sess.run([val, inc]))\n\n @classmethod\n def TestParams(cls):\n p = base_model.BaseTask.Params()\n p.name = 'base_mdl'\n p.encoder = base_encoder.BaseEncoder.Params()\n p.encoder.name = 'encoder'\n p.decoder = base_decoder.BaseDecoder.Params()\n p.decoder.name = 'decoder'\n return p\n\n def testInit(self):\n p = self.TestParams()\n p.input = base_input_generator.BaseSequenceInputGenerator.Params()\n _ = p.cls(p)\n\n def testScaleGradients(self):\n p = self.TestParams()\n p.input = base_input_generator.BaseSequenceInputGenerator.Params()\n task = p.cls(p)\n task.CreateVariable(\n 'a',\n py_utils.WeightParams(shape=[], init=py_utils.WeightInit.Constant(0)))\n var_a = task.theta.a\n var_grads = py_utils.NestedMap(a=(var_a, tf.ones_like(var_a)))\n has_nan_or_inf, grad_scale, final_var_grads = task.ScaleGradients(var_grads)\n\n FLAGS.enable_check_numerics = False\n with self.session():\n tf.global_variables_initializer().run()\n self.assertFalse(has_nan_or_inf.eval())\n self.assertEqual(1.0, grad_scale.eval())\n # The final gradient must be finite.\n self.assertFalse(tf.is_nan(final_var_grads.a[1]).eval())\n self.assertTrue(tf.is_finite(final_var_grads.a[1]).eval())\n\n def testScaleGradientsInf(self):\n FLAGS.enable_check_numerics = False\n p = self.TestParams()\n p.input = base_input_generator.BaseSequenceInputGenerator.Params()\n task = p.cls(p)\n task.CreateVariable(\n 'a',\n py_utils.WeightParams(shape=[], init=py_utils.WeightInit.Constant(0)))\n var_a = task.theta.a\n # Infinite gradient.\n var_grads = py_utils.NestedMap(a=(var_a, tf.log(0.)))\n has_nan_or_inf, grad_scale, final_var_grads = task.ScaleGradients(var_grads)\n\n with self.session():\n tf.global_variables_initializer().run()\n self.assertTrue(has_nan_or_inf.eval())\n self.assertEqual(0., grad_scale.eval())\n # The final gradient must be finite.\n self.assertFalse(tf.is_nan(final_var_grads.a[1]).eval())\n self.assertTrue(tf.is_finite(final_var_grads.a[1]).eval())\n\n def testScaleGradientsNaN(self):\n FLAGS.enable_check_numerics = False\n p = self.TestParams()\n p.input = base_input_generator.BaseSequenceInputGenerator.Params()\n task = p.cls(p)\n task.CreateVariable(\n 'a',\n py_utils.WeightParams(shape=[], init=py_utils.WeightInit.Constant(0)))\n var_a = task.theta.a\n # Make a NaN gradient.\n var_grads = py_utils.NestedMap(a=(var_a, 0. * tf.log(0.)))\n has_nan_or_inf, grad_scale, final_var_grads = task.ScaleGradients(var_grads)\n\n with self.session():\n tf.global_variables_initializer().run()\n self.assertTrue(has_nan_or_inf.eval())\n self.assertEqual(0., grad_scale.eval())\n # The final gradient must be finite.\n self.assertFalse(tf.is_nan(final_var_grads.a[1]).eval())\n self.assertTrue(tf.is_finite(final_var_grads.a[1]).eval())\n\n def testScaleGradientsCheckNumerics(self):\n \"\"\"ScaleGradients when enable_check_numerics=True.\"\"\"\n FLAGS.enable_check_numerics = True\n p = self.TestParams()\n p.input = base_input_generator.BaseSequenceInputGenerator.Params()\n task = p.cls(p)\n task.CreateVariable(\n 'a',\n py_utils.WeightParams(shape=[], init=py_utils.WeightInit.Constant(0)))\n var_a = task.theta.a\n # Make a NaN gradient.\n var_grads = py_utils.NestedMap(a=(var_a, 0. * tf.log(0.)))\n has_nan_or_inf, grad_scale, final_var_grads = task.ScaleGradients(var_grads)\n\n with self.session():\n tf.global_variables_initializer().run()\n with self.assertRaisesRegexp(tf.errors.InvalidArgumentError,\n 'is not finite'):\n self.assertTrue(has_nan_or_inf.eval())\n self.assertEqual(0., grad_scale.eval())\n # The final gradient must be finite.\n self.assertFalse(tf.is_nan(final_var_grads.a[1]).eval())\n self.assertTrue(tf.is_finite(final_var_grads.a[1]).eval())\n\n\nclass TeacherTask(base_model.BaseTask):\n\n @base_layer.initializer\n def __init__(self, params):\n super(TeacherTask, self).__init__(params)\n p = self.params\n with tf.variable_scope(p.name):\n self.CreateVariable('x',\n py_utils.WeightParams(\n shape=[], init=py_utils.WeightInit.Constant(0)))\n\n def ComputePredictions(self, theta, input_batch):\n return theta.x\n\n\nclass StudentTask(base_model.BaseTask):\n\n @base_layer.initializer\n def __init__(self, params):\n super(StudentTask, self).__init__(params)\n p = self.params\n with tf.variable_scope(p.name):\n self.CreateVariable('x',\n py_utils.WeightParams(\n shape=[], init=py_utils.WeightInit.Uniform()))\n\n def ComputePredictions(self, theta, input_batch):\n return theta.x\n\n\nclass TestInputGenerator(base_input_generator.BaseSequenceInputGenerator):\n\n def __init__(self, params):\n super(TestInputGenerator, self).__init__(params)\n self._input_batch_size = tf.constant(1)\n\n def InputBatch(self):\n return 0\n\n\nclass DistillationTestTask(base_model.DistillationTask):\n\n @classmethod\n def Params(cls):\n p = super(DistillationTestTask, cls).Params()\n p.name = 'distillation_test'\n p.teacher = TeacherTask.Params()\n p.student = StudentTask.Params()\n p.input = TestInputGenerator.Params()\n p.train.learning_rate = 1e3\n p.teacher.train = None\n p.teacher.eval = None\n p.student.train = None\n p.student.eval = None\n return p\n\n @base_layer.initializer\n def __init__(self, params):\n super(DistillationTestTask, self).__init__(params)\n\n def ComputeLoss(self, theta, input_batch, predictions):\n return {'loss': (predictions.teacher - predictions.student, 1)}\n\n\nclass DistillationTaskTest(tf.test.TestCase):\n\n def testFProp(self):\n p = DistillationTestTask.Params()\n task = p.cls(p)\n self.assertFalse(task.params.is_eval)\n self.assertFalse(task.teacher.params.is_eval)\n self.assertIsNotNone(task.teacher.params.input)\n self.assertFalse(task.student.params.is_eval)\n self.assertIsNotNone(task.student.params.input)\n metrics = task.FPropDefaultTheta()\n self.assertItemsEqual(['loss', 'num_samples_in_batch'],\n list(metrics.keys()))\n task.BProp()\n # Expected side effects of BProp().\n self.assertIsNotNone(task.train_op)\n self.assertIsNotNone(task.total_examples)\n\n with self.session() as sess:\n tf.global_variables_initializer().run()\n\n variables = {}\n values_before_training = {}\n values_after_training = {}\n for child in ('teacher', 'student'):\n variables[child] = {\n k: v\n for k, v in getattr(task, child).vars.FlattenItems()\n }\n values_before_training[child] = sess.run(variables[child])\n\n # Train for a few steps.\n for _ in range(10):\n sess.run(task.train_op)\n\n for child in ('teacher', 'student'):\n values_after_training[child] = sess.run(variables[child])\n for k, v in six.iteritems(values_after_training[child]):\n print('Comparing variable %s' % k)\n if child == 'teacher':\n # Teacher vars should not change after training.\n self.assertAllEqual(values_before_training[child][k], v)\n else:\n # Student vars should change after training.\n self.assertNotAlmostEqual(values_before_training[child][k], v)\n\n\nclass SingleTaskModelTest(tf.test.TestCase):\n\n def testInit(self):\n p = base_model.SingleTaskModel.Params()\n p.task = BaseTaskTest.TestParams()\n p.task.input = base_input_generator.BaseSequenceInputGenerator.Params()\n model = p.cls(p)\n self.assertEqual(model.params.name, model.GetTask().params.name)\n self.assertEqual(model.params.task, model.GetTask().params)\n self.assertEqual(len(model.tasks), 1)\n self.assertEqual(model.tasks[0], model.GetTask())\n self.assertEqual(model.tasks[0], model.SampleTask(None))\n\n def testExponentialMovingAverage(self):\n p = base_model.SingleTaskModel.Params()\n p.task = BaseTaskTest.TestParams()\n p.task.input = base_input_generator.BaseSequenceInputGenerator.Params()\n p.train.ema_decay = 0.9\n model = p.cls(p)\n model._task.CreateChild('a',\n layers.BatchNormLayer.Params().Set(name='a', dim=1))\n model._task._train_op = tf.no_op()\n model._task.ApplyExponentialMovingAverage(model.ema)\n with tf.variable_scope('', reuse=True):\n beta = tf.get_variable('a/beta/var')\n mean = tf.get_variable('a/moving_mean/var')\n self.assertIsNotNone(model.ema.average(beta))\n self.assertIsNone(model.ema.average(mean))\n\n\nclass MultiTaskModelTest(tf.test.TestCase):\n\n def testInit(self):\n p = base_model.MultiTaskModel.Params()\n p.name = 'MultiTaskModel'\n p0 = BaseTaskTest.TestParams()\n p1 = BaseTaskTest.TestParams()\n\n p.input = base_model_params.MultiTaskModelParams().Train()\n p.input.Define('a',\n base_input_generator.BaseSequenceInputGenerator.Params(), '')\n p.input.Define('b',\n base_input_generator.BaseSequenceInputGenerator.Params(), '')\n\n p.task_params = hyperparams.Params()\n p.task_params.Define('a', p0, '')\n p.task_params.Define('b', p1, '')\n\n p.task_probs = hyperparams.Params()\n p.task_probs.Define('a', 0.5, '')\n p.task_probs.Define('b', 0.5, '')\n\n model = p.cls(p)\n self.assertEqual(len(model.tasks), 2)\n self.assertEqual(set(model.task_names), {'a', 'b'})\n self.assertEqual(set(model.tasks), {model.GetTask('a'), model.GetTask('b')})\n self.assertEqual(model.params.task_params.a, model.GetTask('a').params)\n self.assertEqual(model.params.task_params.b, model.GetTask('b').params)\n\n def _setUpTestSampleTask(self):\n np.random.seed(_NUMPY_RANDOM_SEED)\n\n # define and initalize tasks, model and params\n p = base_model.MultiTaskModel.Params()\n p.name = 'MultiTaskModel'\n p0 = BaseTaskTest.TestParams()\n p1 = BaseTaskTest.TestParams()\n\n p.input = base_model_params.MultiTaskModelParams().Train()\n p.input.Define('a',\n base_input_generator.BaseSequenceInputGenerator.Params(), '')\n p.input.Define('b',\n base_input_generator.BaseSequenceInputGenerator.Params(), '')\n\n p.task_params = hyperparams.Params()\n p.task_params.Define('a', p0, '')\n p.task_params.Define('b', p1, '')\n\n return p\n\n def _testSampleTaskHelper(self, p):\n model = p.cls(p)\n\n task_to_id = {model.children['a']: 'a', model.children['b']: 'b'}\n task_counts = {'a': 0, 'b': 0}\n\n # initialize tensorflow graph and global step\n with self.session() as sess:\n tf.global_variables_initializer().run()\n global_step = sess.run(model.global_step)\n for _ in range(100):\n task = model.SampleTask(global_step)\n task_counts[task_to_id[task]] += 1\n\n self.assertEqual(task_counts['a'], 83)\n self.assertEqual(task_counts['b'], 17)\n\n def testSampleTaskSpecifiedWithoutScheduler(self):\n \"\"\"Expected distribution: 'a': 0.8 , 'b': 0.2.\"\"\"\n p = self._setUpTestSampleTask()\n\n p.task_probs = hyperparams.Params()\n p.task_probs.Define('a', 0.8, '')\n p.task_probs.Define('b', 0.2, '')\n\n self._testSampleTaskHelper(p)\n\n def testSampleTask(self):\n \"\"\"Expected distribution: 'a': 0.8 , 'b': 0.2.\"\"\"\n p = self._setUpTestSampleTask()\n\n p.task_schedule = task_scheduler.ConstantScheduler.Params()\n p.task_schedule.task_probs = [('a', 0.8), ('b', 0.2)]\n\n self._testSampleTaskHelper(p)\n\n\nif __name__ == '__main__':\n tf.test.main()\n" ]
[ [ "tensorflow.is_nan", "tensorflow.get_variable", "tensorflow.constant", "numpy.random.seed", "tensorflow.is_finite", "tensorflow.ones_like", "tensorflow.test.main", "tensorflow.global_variables_initializer", "tensorflow.no_op", "tensorflow.log", "tensorflow.variable_scope" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [ "1.10", "1.12", "1.4", "1.13", "1.5", "1.7", "0.12", "1.0", "1.2" ] } ]