repo_name
stringlengths
6
130
hexsha
list
file_path
list
code
list
apis
list
possible_versions
list
JTT94/filterflow
[ "536f71105dc263472f54e80f2ef4ad5e4ee6621f" ]
[ "tests/resampling/differentiable/test_optimized.py" ]
[ "import tensorflow as tf\n\nfrom filterflow.base import State\nfrom filterflow.resampling.differentiable.loss.regularized import SinkhornLoss\nfrom filterflow.resampling.differentiable.optimized import OptimizedPointCloud\nfrom filterflow.resampling.differentiable.optimizer.sgd import SGD\nfrom filterflow.resampling.standard.systematic import SystematicResampler\n\n\nclass TestOptimizedPointCloud(tf.test.TestCase):\n\n def setUp(self):\n N = 10\n n_particles = tf.constant(N)\n dimension = tf.constant(2)\n batch_size = tf.constant(3)\n\n self.flags = tf.constant([True, False, True])\n\n weights = tf.random.uniform((batch_size, n_particles), dtype=float)\n weights = weights / tf.reduce_sum(weights, 1, keepdims=True)\n particles = tf.random.uniform((batch_size, n_particles, dimension), -1, 1)\n log_likelihoods = tf.zeros(batch_size, dtype=float)\n self.state = State(particles=particles, log_weights=tf.math.log(weights),\n weights=weights, log_likelihoods=log_likelihoods,\n ancestor_indices=None, resampling_correction=None)\n\n self.loss = SinkhornLoss(tf.constant(0.05))\n loss_optimizer = SGD(self.loss, lr=0.5, n_iter=10)\n intermediate_resampling = SystematicResampler(on_log=tf.constant(True))\n self.cloud_optimizer = OptimizedPointCloud(loss_optimizer, intermediate_resampling)\n\n def test_apply(self):\n optimized_states = self.cloud_optimizer.apply(self.state, self.flags)\n\n self.assertNotAllClose(self.state.particles[0], optimized_states.particles[0])\n self.assertAllClose(self.state.particles[1], optimized_states.particles[1])\n\n self.assertAllClose(optimized_states.log_weights[0], -tf.math.log([10.] * 10))\n\n optimized_loss = self.loss(optimized_states.log_weights, optimized_states.weights, optimized_states.particles,\n self.state.log_weights, self.state.weights, self.state.particles)\n\n non_optimized_loss = self.loss(optimized_states.log_weights, optimized_states.weights, self.state.particles,\n self.state.log_weights, self.state.weights, self.state.particles)\n\n self.assertAllLess(optimized_loss[self.flags] - non_optimized_loss[self.flags], 0.)\n self.assertAllEqual(optimized_loss[tf.logical_not(self.flags)] - non_optimized_loss[tf.logical_not(self.flags)],\n [0.])\n" ]
[ [ "tensorflow.constant", "tensorflow.zeros", "tensorflow.reduce_sum", "tensorflow.random.uniform", "tensorflow.math.log", "tensorflow.logical_not" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [ "1.10" ] } ]
mikeryu/pover-t
[ "1c6b67f3eeae386f8a627b282f31b5d4483977dc" ]
[ "DAG/trim-nn.py" ]
[ "# %matplotlib inline\n\nimport os\nimport numpy as np\nimport pandas as pd\nimport matplotlib\nmatplotlib.use('TkAgg')\nimport matplotlib.pyplot as plt\n\nfrom sklearn.ensemble import RandomForestClassifier\nfrom keras.models import Sequential, Model\nfrom keras.layers import Dense, Input\nfrom keras import optimizers\nimport keras.backend as K\n\n# data directory\nDATA_DIR = os.path.join('../..', 'pover-t', 'data')\nA_TRAIN_HHOLD = os.path.join(DATA_DIR, 'train', 'A_hhold_train.csv')\nB_TRAIN_HHOLD = os.path.join(DATA_DIR, 'train', 'B_hhold_train.csv')\nC_TRAIN_HHOLD = os.path.join(DATA_DIR, 'train', 'C_hhold_train.csv')\nA_TRAIN_IND = os.path.join(DATA_DIR, 'train', 'A_indiv_train.csv')\nB_TRAIN_IND = os.path.join(DATA_DIR, 'train', 'B_indiv_train.csv')\nC_TRAIN_IND = os.path.join(DATA_DIR, 'train', 'C_indiv_train.csv')\nA_TEST_HHOLD = os.path.join(DATA_DIR, 'test', 'A_hhold_test.csv')\nB_TEST_HHOLD = os.path.join(DATA_DIR, 'test', 'B_hhold_test.csv')\nC_TEST_HHOLD = os.path.join(DATA_DIR, 'test', 'C_hhold_test.csv')\nA_TEST_IND = os.path.join(DATA_DIR, 'test', 'A_indiv_test.csv')\nB_TEST_IND = os.path.join(DATA_DIR, 'test', 'B_indiv_test.csv')\nC_TEST_IND = os.path.join(DATA_DIR, 'test', 'C_indiv_test.csv')\n\ndata_paths = {'A': {'train': A_TRAIN_HHOLD, 'test': A_TEST_HHOLD},\n 'B': {'train': B_TRAIN_HHOLD, 'test': B_TEST_HHOLD},\n 'C': {'train': C_TRAIN_HHOLD, 'test': C_TEST_HHOLD}}\n\nind_data_paths = {'A': {'train': A_TRAIN_IND, 'test': A_TEST_IND},\n 'B': {'train': B_TRAIN_IND, 'test': B_TEST_IND},\n 'C': {'train': C_TRAIN_IND, 'test': C_TEST_IND}}\n\ndef main():\n a_train_hhold, b_train_hhold, c_train_hhold, a_train_ind, b_train_ind,\\\n c_train_ind = read_train_data()\n print(a_train_ind.shape)\n\n # Group individual data by household\n #a_ind_group = a_train_ind.groupby('id')\n #print(\"\\n ==== Group By ====\")\n #print(len(a_ind_group.groups.keys()))\n\n # Trim data that gives same answers for poor and non-poor\n a_ind_trim = trim_non_unique(a_train_ind)\n a_hhold_trim = trim_non_unique(a_train_hhold)\n b_ind_trim = trim_non_unique(b_train_ind)\n b_hhold_trim = trim_non_unique(b_train_hhold)\n c_ind_trim = trim_non_unique(c_train_ind)\n c_hhold_trim = trim_non_unique(c_train_hhold)\n\n # Potentially useful - number of individuals in household?\n\n # Merge trimmed individual and original household training data\n a_data = pd.concat([a_ind_trim, a_train_hhold])\n b_data = pd.concat([b_ind_trim, b_train_hhold])\n c_data = pd.concat([c_ind_trim, c_train_hhold])\n\n # Merge trimmed individual and trimmed household training data\n #a_data = pd.concat([a_ind_trim, a_hhold_trim])\n #b_data = pd.concat([b_ind_trim, b_hhold_trim])\n #c_data = pd.concat([c_ind_trim, c_hhold_trim])\n\n # Process the training data\n print(\"Country A\")\n a_train = preprocess_data(a_data.drop(columns=['poor']))\n a_labels = np.ravel(a_data.poor)\n\n print(\"\\nCountry B\")\n b_train = preprocess_data(b_data.drop(columns=['poor']))\n b_labels = np.ravel(b_data.poor)\n\n print(\"\\nCountry C\")\n c_train = preprocess_data(c_data.drop(columns=['poor']))\n c_labels = np.ravel(c_data.poor)\n\n print(\"\\nTest Data\")\n a_test_hhold, b_test_hhold, c_test_hhold, a_test_ind, b_test_ind,\\\n c_test_ind = read_test_data()\n\n # Merge individual and household test data\n #a_test_data = pd.concat([a_test_ind, a_test_hhold])\n #b_test_data = pd.concat([b_test_ind, b_test_hhold])\n #c_test_data = pd.concat([c_test_ind, c_test_hhold])\n\n #Process the test data\n a_test = preprocess_data(a_test_hhold, enforce_cols=a_train.columns)\n b_test = preprocess_data(b_test_hhold, enforce_cols=b_train.columns)\n c_test = preprocess_data(c_test_hhold, enforce_cols=c_train.columns)\n\n # Train and predict over the data sets\n a_preds = train_and_predict(a_train, a_labels, a_test)\n a_sub = make_country_sub(a_preds, a_test, 'A')\n\n b_preds = train_and_predict(b_train, b_labels, b_test)\n b_sub = make_country_sub(b_preds, b_test, 'B')\n\n c_preds = train_and_predict(c_train, c_labels, c_test)\n c_sub = make_country_sub(c_preds, c_test, 'C')\n print(c_sub)\n\n return 0\n # combine predictions and save for submission\n submission = pd.concat([a_sub, b_sub, c_sub])\n\n print(\"Submission head:\")\n print(submission.head())\n print(\"\\nSubmission tail:\")\n print(submission.tail())\n\n print(\"Converting to csv for submission...\")\n submission.to_csv('merge_nn_submission_v2.csv')\n print(\"All done\")\n\n# Drop columns in data that have little to no variation (same answers for poor and non-poor)\ndef trim_non_unique(df):\n print(\"\\n ======== TRIM DATA =========\\n\")\n nonuniques = df.nunique()\n cols_to_drop = [col for col in nonuniques.index if nonuniques[col] <= 5]\n # Need columans for poor and country\n cols_to_drop.remove('poor')\n cols_to_drop.remove('country')\n #print(cols_to_drop)\n\n df_trim = df.drop(cols_to_drop, axis=1)\n print(df_trim.shape)\n return df_trim\n\n\ndef train_and_predict(train, ids, test):\n model = Sequential()\n\n # Add an input layer\n model.add(Dense(72, activation='relu', input_shape=(train.shape[1],)))\n # Add some hidden layers\n model.add(Dense(36, activation='relu'))\n model.add(Dense(36, activation='relu'))\n model.add(Dense(36, activation='sigmoid'))\n model.add(Dense(36, activation='sigmoid'))\n # Add an output layer\n model.add(Dense(1, activation='sigmoid'))\n print(\"Model output shape:\")\n model.output_shape\n print(\"Model summary:\")\n model.summary()\n print(\"Model config:\")\n model.get_config()\n print(\"Model weights:\")\n model.get_weights()\n\n # Compile the model and fit the model to the data\n model.compile(loss='mean_squared_error',\n optimizer='sgd',\n metrics=['accuracy', precision, recall, fmeasure])\n\n model.fit(train, ids, epochs=20, batch_size=36, verbose=1)\n score = model.evaluate(train, ids, verbose=1)\n print(score)\n\n preds = model.predict(test)\n\n return preds\n\ndef read_train_data():\n # load training data\n a_train = pd.read_csv(data_paths['A']['train'], index_col='id')\n b_train = pd.read_csv(data_paths['B']['train'], index_col='id')\n c_train = pd.read_csv(data_paths['C']['train'], index_col='id')\n a_indiv_train = pd.read_csv(ind_data_paths['A']['train'], index_col='id')\n b_indiv_train = pd.read_csv(ind_data_paths['B']['train'], index_col='id')\n c_indiv_train = pd.read_csv(ind_data_paths['C']['train'], index_col='id')\n\n print(\"\\n\\n=============================================\\n\")\n print(\"A Training\")\n #print(a_train.head())\n print(a_train.info())\n print(\"\\n\\n=============================================\\n\")\n print(\"B Training\")\n #print(b_train.head())\n print(b_train.info())\n print(\"\\n\\n=============================================\\n\")\n print(\"C Training\")\n #print(c_train.head())\n print(c_train.info())\n\n return a_train, b_train, c_train, a_indiv_train, b_indiv_train,\\\n c_indiv_train\n\ndef read_test_data():\n # load test data\n a_test = pd.read_csv(data_paths['A']['test'], index_col='id')\n b_test = pd.read_csv(data_paths['B']['test'], index_col='id')\n c_test = pd.read_csv(data_paths['C']['test'], index_col='id')\n a_indiv_test = pd.read_csv(ind_data_paths['A']['test'], index_col='id')\n b_indiv_test = pd.read_csv(ind_data_paths['B']['test'], index_col='id')\n c_indiv_test = pd.read_csv(ind_data_paths['C']['test'], index_col='id')\n\n return a_test, b_test, c_test, a_indiv_test, b_indiv_test, c_indiv_test\n\n# Standardize features\ndef standardize(df, numeric_only=True):\n numeric = df.select_dtypes(include=['int64', 'float64'])\n\n # subtract mean and divide by std\n df[numeric.columns] = (numeric - numeric.mean()) / numeric.std()\n\n return df\n\ndef preprocess_data(df, enforce_cols=None):\n print(\"Input shape:\\t{}\".format(df.shape))\n\n df = standardize(df)\n print(\"After standardization {}\".format(df.shape))\n\n # create dummy variables for categoricals\n df = pd.get_dummies(df)\n print(\"After converting categoricals:\\t{}\".format(df.shape))\n\n\n # match test set and training set columns\n if enforce_cols is not None:\n to_drop = np.setdiff1d(df.columns, enforce_cols)\n to_add = np.setdiff1d(enforce_cols, df.columns)\n\n df.drop(to_drop, axis=1, inplace=True)\n df = df.assign(**{c: 0 for c in to_add})\n\n df.fillna(0, inplace=True)\n\n return df\n\n# save submission\ndef make_country_sub(preds, test_feat, country):\n # make sure we code the country correctly\n country_codes = ['A', 'B', 'C']\n\n # get just the poor probabilities\n country_sub = pd.DataFrame(data=preds[:, 0], # proba p=1\n columns=['poor'],\n index=test_feat.index)\n\n\n # add the country code for joining later\n country_sub[\"country\"] = country\n return country_sub[[\"country\", \"poor\"]]\n\n\n# From previous keras version\n# https://github.com/keras-team/keras/commit/a56b1a55182acf061b1eb2e2c86b48193a0e88f7 \ndef precision(y_true, y_pred):\n \"\"\"Precision metric.\n \n Only computes a batch-wise average of precision.\n\n Computes the precision, a metric for multi-label classification of\n how many selected items are relevant.\n \"\"\"\n true_positives = K.sum(K.round(K.clip(y_true * y_pred, 0, 1)))\n predicted_positives = K.sum(K.round(K.clip(y_pred, 0, 1)))\n precision = true_positives / (predicted_positives + K.epsilon())\n return precision\n\n# From previous keras version\n# https://github.com/keras-team/keras/commit/a56b1a55182acf061b1eb2e2c86b48193a0e88f7 \ndef recall(y_true, y_pred):\n \"\"\"Recall metric.\n \n Only computes a batch-wise average of recall.\n \n Computes the recall, a metric for multi-label classification of\n how many relevant items are selected.\n \"\"\"\n true_positives = K.sum(K.round(K.clip(y_true * y_pred, 0, 1)))\n possible_positives = K.sum(K.round(K.clip(y_true, 0, 1)))\n recall = true_positives / (possible_positives + K.epsilon())\n return recall\n\n# From previous keras version\n# https://github.com/keras-team/keras/commit/a56b1a55182acf061b1eb2e2c86b48193a0e88f7 \ndef fbeta_score(y_true, y_pred, beta=1):\n \"\"\"Computes the F score.\n \n The F score is the weighted harmonic mean of precision and recall.\n Here it is only computed as a batch-wise average, not globally.\n\n This is useful for multi-label classification, where input samples can be\n classified as sets of labels. By only using accuracy (precision) a model\n would achieve a perfect score by simply assigning every class to every\n input. In order to avoid this, a metric should penalize incorrect class\n assignments as well (recall). The F-beta score (ranged from 0.0 to 1.0)\n computes this, as a weighted mean of the proportion of correct class\n assignments vs. the proportion of incorrect class assignments.\n\n With beta = 1, this is equivalent to a F-measure. With beta < 1, assigning\n correct classes becomes more important, and with beta > 1 the metric is\n instead weighted towards penalizing incorrect class assignments.\n \"\"\"\n if beta < 0:\n raise ValueError('The lowest choosable beta is zero (only precision).')\n\n # If there are no true positives, fix the F score at 0 like sklearn.\n if K.sum(K.round(K.clip(y_true, 0, 1))) == 0:\n return 0\n\n p = precision(y_true, y_pred)\n r = recall(y_true, y_pred)\n bb = beta ** 2\n fbeta_score = (1 + bb) * (p * r) / (bb * p + r + K.epsilon())\n return fbeta_score\n\n\n# From previous keras version\n# https://github.com/keras-team/keras/commit/a56b1a55182acf061b1eb2e2c86b48193a0e88f7 \ndef fmeasure(y_true, y_pred):\n \"\"\"Computes the f-measure, the harmonic mean of precision and recall.\n \n Here it is only computed as a batch-wise average, not globally.\n \"\"\"\n return fbeta_score(y_true, y_pred, beta=1) \n\n\nif __name__ == '__main__':\n main()\n" ]
[ [ "pandas.concat", "pandas.read_csv", "matplotlib.use", "pandas.DataFrame", "numpy.setdiff1d", "numpy.ravel", "pandas.get_dummies" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [ "2.0", "1.4", "1.3", "1.1", "1.5", "1.2" ], "scipy": [], "tensorflow": [] } ]
18150167970/YOLOV3_pytorch
[ "0a0c872c6fe086e534224568082d7754790f0c07" ]
[ "dataset/vocdataset.py" ]
[ "import os\nimport numpy as np\nimport xml.etree.ElementTree as ET\nimport torch\nfrom torch.utils.data import Dataset\nimport cv2\nfrom pycocotools.coco import COCO\n\nfrom utils.utils import *\n\n\nclass VOCDataset(Dataset):\n \"\"\"\n COCO dataset class.\n \"\"\"\n def __init__(self, model_type, data_dir,\n name='trainval', img_size=416,\n augmentation=None, min_size=1, debug=False):\n \"\"\"\n COCO dataset initialization. Annotation data are read into memory by COCO API.\n Args:\n model_type (str): model name specified in config file\n data_dir (str): dataset root directory\n json_file (str): COCO json file name\n name (str): COCO data name (e.g. 'train2017' or 'val2017')\n img_size (int): target image size after pre-processing\n min_size (int): bounding boxes smaller than this are ignored\n debug (bool): if True, only one data id is selected from the dataset\n \"\"\"\n self.data_dir = data_dir\n self.model_type = model_type\n id_list_file = os.path.join(\n data_dir, 'ImageSets/Main/{0}.txt'.format(name))\n self.name=name\n self.ids = [id_.strip() for id_ in open(id_list_file)]\n if debug:\n self.ids = self.ids[1:2]\n print(\"debug mode...\", self.ids)\n self.max_labels = 50\n self.img_size = img_size\n self.min_size = min_size\n self.lrflip = augmentation['LRFLIP']\n self.jitter = augmentation['JITTER']\n self.random_placing = augmentation['RANDOM_PLACING']\n self.hue = augmentation['HUE']\n self.saturation = augmentation['SATURATION']\n self.exposure = augmentation['EXPOSURE']\n self.random_distort = augmentation['RANDOM_DISTORT']\n\n\n def __len__(self):\n return len(self.ids)\n\n def __getitem__(self, index):\n \"\"\"\n One image / label pair for the given index is picked up \\\n and pre-processed.\n Args:\n index (int): data index\n Returns:\n img (numpy.ndarray): pre-processed image\n padded_labels (torch.Tensor): pre-processed label data. \\\n The shape is :math:`[self.max_labels, 5]`. \\\n each label consists of [class, xc, yc, w, h]:\n class (float): class index.\n xc, yc (float) : center of bbox whose values range from 0 to 1.\n w, h (float) : size of bbox whose values range from 0 to 1.\n info_img : tuple of h, w, nh, nw, dx, dy.\n h, w (int): original shape of the image\n nh, nw (int): shape of the resized image without padding\n dx, dy (int): pad size\n id_ (int): same as the input index. Used for evaluation.\n \"\"\"\n id_ = self.ids[index]\n\n lrflip = False\n if np.random.rand() > 0.5 and self.lrflip == True:\n lrflip = True\n\n # load image and preprocess\n img_file = os.path.join(self.data_dir, 'JPEGImages', id_ + '.jpg')\n img = cv2.imread(img_file)\n\n #图像大小变换以及填充\n img, info_img = preprocess(img, self.img_size, jitter=self.jitter,\n random_placing=self.random_placing)\n\n if self.random_distort:\n #HSV变换\n img = random_distort(img, self.hue, self.saturation, self.exposure)\n\n img = np.transpose(img / 255., (2, 0, 1))\n\n #水平翻转\n if lrflip:\n img = np.flip(img, axis=2).copy()\n\n # load labels\n labels = []\n anno = ET.parse(\n os.path.join(self.data_dir, 'Annotations', id_ + '.xml'))\n\n for obj in anno.findall('object'):\n # when in not using difficult split, and the object is\n # difficult, skipt it.\n #读取标签\n name = obj.find('name').text.lower().strip()\n bndbox_anno = obj.find('bndbox')\n\n # subtract 1 to make pixel indexes 0-based\n bbox_=[int(bndbox_anno.find(tag).text) - 1\n for tag in ('xmin', 'ymin', 'xmax', 'ymax')]\n x1 = float(bndbox_anno.find('xmin').text) - 1\n y1 = float(bndbox_anno.find('ymin').text) - 1\n x2 = float(bndbox_anno.find('xmax').text) - 1\n y2 = float(bndbox_anno.find('ymax').text) - 1\n\n bbox_[0] = x1\n bbox_[1] = y1\n bbox_[2] = x2-x1\n bbox_[3] = y2-y1\n\n #label为【x,y,w,h】 这里x,y是左上角坐标\n if bbox_[2] > self.min_size and bbox_[3] > self.min_size:\n labels.append([])\n labels[-1].append(VOC_BBOX_LABEL_NAMES.index(name))\n labels[-1].extend(bbox_)\n\n # yolo标签是50个bounding box,不够用零来凑\n padded_labels = np.zeros((self.max_labels, 5))\n if len(labels) > 0:\n labels = np.stack(labels)\n if 'YOLO' in self.model_type:\n labels = label2yolobox(labels, info_img, self.img_size, lrflip)\n padded_labels[range(len(labels))[:self.max_labels]\n ] = labels[:self.max_labels]\n padded_labels = torch.from_numpy(padded_labels)\n return img, padded_labels, info_img, id_\n\n\n\n\nVOC_BBOX_LABEL_NAMES = (\n 'aeroplane',\n 'bicycle',\n 'bird',\n 'boat',\n 'bottle',\n 'bus',\n 'car',\n 'cat',\n 'chair',\n 'cow',\n 'diningtable',\n 'dog',\n 'horse',\n 'motorbike',\n 'person',\n 'pottedplant',\n 'sheep',\n 'sofa',\n 'train',\n 'tvmonitor')\n" ]
[ [ "torch.from_numpy", "numpy.stack", "numpy.random.rand", "numpy.transpose", "numpy.flip", "numpy.zeros" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
qcri/tasrif
[ "327bc1eccb8f8e11d8869ba65a7c72ad038aa094", "327bc1eccb8f8e11d8869ba65a7c72ad038aa094", "327bc1eccb8f8e11d8869ba65a7c72ad038aa094", "327bc1eccb8f8e11d8869ba65a7c72ad038aa094", "327bc1eccb8f8e11d8869ba65a7c72ad038aa094", "327bc1eccb8f8e11d8869ba65a7c72ad038aa094" ]
[ "tasrif/test_scripts/test_pipeline_RenameOperator.py", "tasrif/test_scripts/test_pipeline_AggregateActivityDatesOperator.py", "tasrif/processing_pipeline/tests/test_scoped_processing_operator.py", "tasrif/processing_pipeline/pandas/qcut_operator.py", "tasrif/test_scripts/test_pipeline_SetIndexOperator.py", "tasrif/processing_pipeline/pandas/convert_to_datetime.py" ]
[ "# ---\n# jupyter:\n# jupytext:\n# text_representation:\n# extension: .py\n# format_name: percent\n# format_version: '1.3'\n# jupytext_version: 1.7.1\n# kernelspec:\n# display_name: Python 3\n# language: python\n# name: python3\n# ---\n\n# %%\nimport pandas as pd\n\nfrom tasrif.processing_pipeline.pandas import CorrOperator\n\ndf = pd.DataFrame(\n [[1, 1, 3], [1, 1, 5], [1, 2, 3], [2, 1, 10], [2, 1, 0]],\n columns=[\"logId\", \"sleep_level\", \"awake_count\"],\n)\n\ndf = df.set_index(\"logId\")\nop = CorrOperator()\ndf1 = op.process(df)\ndf1[0]\n", "# ---\n# jupyter:\n# jupytext:\n# formats: ipynb,py:light\n# text_representation:\n# extension: .py\n# format_name: light\n# format_version: '1.5'\n# jupytext_version: 1.11.2\n# kernelspec:\n# display_name: Python 3\n# language: python\n# name: python3\n# ---\n\nimport numpy as np\nimport pandas as pd\n\nfrom tasrif.processing_pipeline.custom import AggregateActivityDatesOperator\n\ndf = pd.DataFrame(\n [\n [122, 1, \"2016-03-13 02:39:00\", 1],\n [122, 1, \"2016-03-13 02:40:00\", 1],\n [122, 1, \"2016-03-13 02:41:00\", 1],\n [122, 1, \"2016-03-13 02:42:00\", 1],\n [122, 1, \"2016-03-13 02:43:00\", 1],\n [122, 1, \"2016-03-13 02:44:00\", 1],\n [122, 1, \"2016-03-13 02:45:00\", 2],\n [122, 1, \"2016-03-13 02:46:00\", 2],\n [122, 1, \"2016-03-13 02:47:00\", 1],\n [122, 1, \"2016-03-13 02:48:00\", 1],\n [122, 2, \"2016-03-13 06:06:00\", 1],\n [122, 2, \"2016-03-13 06:07:00\", 1],\n [122, 2, \"2016-03-13 06:08:00\", 1],\n [122, 2, \"2016-03-13 06:09:00\", 1],\n [122, 2, \"2016-03-13 06:10:00\", 1],\n [122, 2, \"2016-03-13 06:11:00\", 1],\n [122, 2, \"2016-03-13 06:12:00\", 1],\n [122, 2, \"2016-03-13 06:13:00\", 1],\n [122, 2, \"2016-03-13 06:14:00\", 1],\n [122, 2, \"2016-03-13 06:15:00\", 1],\n [144, 1, \"2016-03-13 06:36:00\", 1],\n [144, 1, \"2016-03-13 06:37:00\", 1],\n [144, 1, \"2016-03-13 06:38:00\", 1],\n [144, 1, \"2016-03-13 06:39:00\", 1],\n [144, 1, \"2016-03-13 06:40:00\", 1],\n [144, 1, \"2016-03-13 06:41:00\", 1],\n [144, 1, \"2016-03-13 06:42:00\", 1],\n [144, 1, \"2016-03-13 06:43:00\", 1],\n [144, 1, \"2016-03-13 06:44:00\", 2],\n [144, 1, \"2016-03-13 06:45:00\", 1],\n [167, 1, \"2016-03-14 01:32:00\", 2],\n [167, 1, \"2016-03-14 01:33:00\", 2],\n [167, 1, \"2016-03-14 01:34:00\", 1],\n [167, 1, \"2016-03-14 01:35:00\", 1],\n [167, 1, \"2016-03-14 01:36:00\", 1],\n [167, 1, \"2016-03-14 01:37:00\", 1],\n [167, 1, \"2016-03-14 01:38:00\", 1],\n [167, 1, \"2016-03-14 01:39:00\", 1],\n [167, 1, \"2016-03-14 01:40:00\", 1],\n [167, 1, \"2016-03-14 01:41:00\", 1],\n [167, 2, \"2016-03-15 02:36:00\", 2],\n [167, 2, \"2016-03-15 02:37:00\", 2],\n [167, 2, \"2016-03-15 02:38:00\", 2],\n [167, 2, \"2016-03-15 02:39:00\", 3],\n [167, 2, \"2016-03-15 02:40:00\", 3],\n [167, 2, \"2016-03-15 02:41:00\", 3],\n [167, 2, \"2016-03-15 02:42:00\", 3],\n [167, 2, \"2016-03-15 02:43:00\", 3],\n [167, 2, \"2016-03-15 02:44:00\", 2],\n [167, 2, \"2016-03-15 02:45:00\", 1],\n [167, 3, \"2016-03-15 03:03:00\", 1],\n [167, 3, \"2016-03-15 03:04:00\", 1],\n [167, 3, \"2016-03-15 03:05:00\", 1],\n [167, 3, \"2016-03-15 03:06:00\", 1],\n [167, 3, \"2016-03-15 03:07:00\", 1],\n [167, 3, \"2016-03-15 03:08:00\", 1],\n [167, 3, \"2016-03-15 03:09:00\", 1],\n [167, 3, \"2016-03-15 03:10:00\", 1],\n [167, 3, \"2016-03-15 03:11:00\", 1],\n [167, 3, \"2016-03-15 03:12:00\", 1],\n [167, 4, \"2016-03-15 03:58:00\", 1],\n [167, 4, \"2016-03-15 03:59:00\", 1],\n [167, 4, \"2016-03-15 04:00:00\", 1],\n [167, 4, \"2016-03-15 04:01:00\", 1],\n [167, 4, \"2016-03-15 04:02:00\", 1],\n [167, 4, \"2016-03-15 04:03:00\", 1],\n [167, 4, \"2016-03-15 04:04:00\", 1],\n [167, 4, \"2016-03-15 04:05:00\", 1],\n [167, 4, \"2016-03-15 04:06:00\", 1],\n [167, 4, \"2016-03-15 04:07:00\", 1],\n ],\n columns=[\"Id\", \"logId\", \"date\", \"value\"],\n)\n\noperator = AggregateActivityDatesOperator(\n date_feature_name=\"date\",\n participant_identifier=[\"Id\", \"logId\"],\n aggregation_definition={\"value\": [np.sum, lambda x: x[x == 1].sum()]},\n)\noperator.process(df)[0]\n", "# ---\n# jupyter:\n# jupytext:\n# text_representation:\n# extension: .py\n# format_name: percent\n# format_version: '1.3'\n# jupytext_version: 1.11.2\n# kernelspec:\n# display_name: Python 3\n# language: python\n# name: python3\n# ---\n\n# %%\n# %load_ext autoreload\n# %autoreload 2\nimport pandas as pd\n\nfrom tasrif.processing_pipeline import (\n ProcessingOperator,\n ScopedProcessingOperator,\n SequenceOperator,\n Variable,\n)\n\n# %%\ndf1 = pd.DataFrame({\"id\": [1, 2, 3], \"cities\": [\"Rome\", \"Barcelona\", \"Stockholm\"]})\ndf2 = pd.DataFrame({\"id\": [4, 5, 6], \"cities\": [\"Doha\", \"Vienna\", \"Belo Horizonte\"]})\n\n\nclass TrainingOperator(ProcessingOperator):\n def __init__(self, model, x=1, y=2):\n super().__init__()\n self.model = model\n self.x = x\n self.y = y\n\n def _process(self, *args):\n self.model.value = {\"x\": self.x, \"y\": self.y}\n return args\n\n\nclass PredictionOperator(ProcessingOperator):\n def __init__(self, model):\n super().__init__()\n self.model = model\n\n def _process(self, *args):\n print(self.model.value)\n return args\n\n\nmodela = Variable(None)\ns = SequenceOperator(\n processing_operators=[\n TrainingOperator(model=modela),\n PredictionOperator(model=modela),\n ScopedProcessingOperator(\n lambda modelb=Variable(): SequenceOperator(\n processing_operators=[\n TrainingOperator(model=modelb),\n PredictionOperator(model=modelb),\n ScopedProcessingOperator(\n lambda: SequenceOperator(\n processing_operators=[\n TrainingOperator(model=modelb),\n PredictionOperator(model=modelb),\n ]\n )\n ),\n ]\n )\n ),\n ]\n)\n\n# %%\ns.process(df1, df2)\n\n# %%\n", "\"\"\"\nOperator to convert a continuous variable to a categorical variable, useful for binning data\n\"\"\"\nimport pandas as pd\n\nfrom tasrif.processing_pipeline import PandasOperator\nfrom tasrif.processing_pipeline.validators import InputsAreDataFramesValidatorMixin\n\n\nclass QCutOperator(InputsAreDataFramesValidatorMixin, PandasOperator):\n \"\"\"\n\n Quantile-based discretization function using Pandas ``qcut``\n\n Examples\n --------\n\n >>> import pandas as pd\n >>> import numpy as np\n >>> import datetime\n >>>\n >>> from tasrif.processing_pipeline.pandas import CutOperator\n >>>\n >>>\n >>> df = pd.DataFrame({\n ... 'Time': pd.date_range('2018-01-01', '2018-01-10', freq='1H', closed='left'),\n ... 'Steps': np.random.randint(100,5000, size=9*24),\n ... }\n ... )\n >>>\n >>> ids = []\n >>> for i in range(1, 217):\n ... ids.append(i%10 + 1)\n >>>\n >>> df[\"Id\"] = ids\n ### Input ###\n Time Steps Id\n 0 2018-01-01 00:00:00 4974 2\n 1 2018-01-01 01:00:00 3377 3\n 2 2018-01-01 02:00:00 293 4\n 3 2018-01-01 03:00:00 3389 5\n 4 2018-01-01 04:00:00 1906 6\n ... ... ... ...\n 211 2018-01-09 19:00:00 4715 3\n 212 2018-01-09 20:00:00 1947 4\n 213 2018-01-09 21:00:00 2181 5\n 214 2018-01-09 22:00:00 2701 6\n 215 2018-01-09 23:00:00 3444 7\n\n >>> # 4 Equally distributed bins\n >>> df1 = df.copy()\n >>> operator = QCutOperator(cut_column_name='Steps',\n ... bin_column_name='Bin',\n ... quantile=4,\n ... retbins=True)\n >>> df1, bins = operator.process(df1)[0]\n >>> print('Bins:', bins)\n >>> df1\n ### Output 1 ###\n Bins: [ 100. 1341.5 2437.5 3502.25 4987. ]\n (99.999, 1341.5] 54\n (1341.5, 2437.5] 54\n (2437.5, 3502.25] 54\n (3502.25, 4987.0] 54\n Name: Bin, dtype: int64\n Time Steps Id Bin\n 0 2018-01-01 00:00:00 1414 2 (1341.5, 2437.5]\n 1 2018-01-01 01:00:00 1513 3 (1341.5, 2437.5]\n 2 2018-01-01 02:00:00 937 4 (99.999, 1341.5]\n 3 2018-01-01 03:00:00 3551 5 (3502.25, 4987.0]\n 4 2018-01-01 04:00:00 2573 6 (2437.5, 3502.25]\n ... ... ... ... ...\n 211 2018-01-09 19:00:00 2835 3 (2437.5, 3502.25]\n 212 2018-01-09 20:00:00 409 4 (99.999, 1341.5]\n 213 2018-01-09 21:00:00 691 5 (99.999, 1341.5]\n 214 2018-01-09 22:00:00 1533 6 (1341.5, 2437.5]\n 215 2018-01-09 23:00:00 3018 7 (2437.5, 3502.25]\n\n >>> # Custom bins\n >>> cut_labels = ['Sedentary', \"Light\", 'Moderate', 'Vigorous']\n >>> quantiles = [0, 0.2, 0.5, 0.80, 1]\n >>>\n >>> df2 = df.copy()\n >>> operator = QCutOperator(cut_column_name='Steps',\n ... bin_column_name='Bin',\n ... quantile=quantiles,\n ... labels=cut_labels)\n >>> df2 = operator.process(df1)[0]\n >>> print(df2['Bin'].value_counts())\n >>> df2\n ### Output 2 ###\n Moderate 65\n Light 64\n Sedentary 44\n Vigorous 43\n Name: Bin, dtype: int64\n ...\n Time Steps Id Bin\n 0 2018-01-01 00:00:00 1414 2 Light\n 1 2018-01-01 01:00:00 1513 3 Light\n 2 2018-01-01 02:00:00 937 4 Sedentary\n 3 2018-01-01 03:00:00 3551 5 Moderate\n 4 2018-01-01 04:00:00 2573 6 Moderate\n ... ... ... ... ...\n 211 2018-01-09 19:00:00 2835 3 Moderate\n 212 2018-01-09 20:00:00 409 4 Sedentary\n 213 2018-01-09 21:00:00 691 5 Sedentary\n 214 2018-01-09 22:00:00 1533 6 Light\n 215 2018-01-09 23:00:00 3018 7 Moderate\n \"\"\"\n\n def __init__(self, cut_column_name, bin_column_name, quantile, **kwargs):\n \"\"\"Initializes the operator\n\n Args:\n cut_column_name (str):\n Name of the column to perform the cut operation on\n bin_column_name (str):\n Name of the column representing the bins\n quantile (int or list-like of float):\n Number of quantiles. 10 for deciles, 4 for quartiles, etc.\n Alternately array of quantiles, e.g. [0, .25, .5, .75, 1.] for quartiles.\n **kwargs:\n key word arguments passed to pandas ``cut`` method\n\n \"\"\"\n self.cut_column_name = cut_column_name\n self.bin_column_name = bin_column_name\n self.quantile = quantile\n super().__init__(kwargs)\n self.kwargs = kwargs\n\n def _process(self, *data_frames):\n \"\"\"Processes the passed data frame as per the configuration define in the constructor.\n\n Args:\n *data_frames (list of pd.DataFrame):\n Variable number of pandas dataframes to be processed\n\n Returns:\n pd.DataFrame -or- list[pd.DataFrame]\n Processed dataframe(s) resulting from applying the operator\n \"\"\"\n\n processed = []\n for data_frame in data_frames:\n if \"retbins\" in self.kwargs:\n result, bins = pd.qcut(\n data_frame[self.cut_column_name], self.quantile, **self.kwargs\n )\n\n data_frame[self.bin_column_name] = result\n processed.append((data_frame, bins))\n else:\n data_frame[self.bin_column_name] = pd.qcut(\n data_frame[self.cut_column_name], self.quantile, **self.kwargs\n )\n processed.append(data_frame)\n return processed\n", "# ---\n# jupyter:\n# jupytext:\n# text_representation:\n# extension: .py\n# format_name: percent\n# format_version: '1.3'\n# jupytext_version: 1.7.1\n# kernelspec:\n# display_name: Python 3\n# language: python\n# name: python3\n# ---\n\n# %%\nimport pandas as pd\n\nfrom tasrif.processing_pipeline.pandas import SetIndexOperator\n\ndf = pd.DataFrame(\n [\n [1, \"2020-05-01 00:00:00\", 1],\n [1, \"2020-05-01 01:00:00\", 1],\n [1, \"2020-05-01 03:00:00\", 2],\n [2, \"2020-05-02 00:00:00\", 1],\n [2, \"2020-05-02 01:00:00\", 1],\n ],\n columns=[\"logId\", \"timestamp\", \"sleep_level\"],\n)\n\ndf\n\n# %%\nop = SetIndexOperator(\"timestamp\")\n\nop.process(df)\n\n# %%\n", "\"\"\"\nOperator to convert a column feature from string to datetime\n\"\"\"\nimport pandas as pd\n\nfrom tasrif.processing_pipeline import PandasOperator\nfrom tasrif.processing_pipeline.validators import InputsAreDataFramesValidatorMixin\n\n\nclass ConvertToDatetimeOperator(InputsAreDataFramesValidatorMixin, PandasOperator):\n \"\"\"\n\n Converts a set of (string) features to datetime using Pandas ``to_datetime``\n\n Examples\n --------\n\n >>> import pandas as pd\n >>> from tasrif.processing_pipeline.pandas import ConvertToDatetimeOperator\n >>>\n >>> df0 = pd.DataFrame([[1, \"2020-05-01 00:00:00\", 1], [1, \"2020-05-01 01:00:00\", 1],\n >>> [1, \"2020-05-01 03:00:00\", 2], [2, \"2020-05-02 00:00:00\", 1],\n >>> [2, \"2020-05-02 01:00:00\", 1]],\n >>> columns=['logId', 'timestamp', 'sleep_level'])\n >>>\n >>> operator = ConvertToDatetime(feature_names=[\"timestamp\"], utc=True)\n >>> df0 = operator.process(df0)\n >>>\n >>> print(df0)\n . logId timestamp sleep_level\n 0 1 2020-05-01 00:00:00+00:00 1\n 1 1 2020-05-01 01:00:00+00:00 1\n 2 1 2020-05-01 03:00:00+00:00 2\n 3 2 2020-05-02 00:00:00+00:00 1\n 4 2 2020-05-02 01:00:00+00:00 1\n\n \"\"\"\n\n def __init__(self, feature_names, **kwargs):\n \"\"\"Convert a set of columns features from string to datetime\n\n Args:\n feature_names (str):\n Name of the string columns that represent datetime objects\n **kwargs:\n key word arguments passed to pandas ``to_datetime`` method\n\n \"\"\"\n self.feature_names = feature_names\n super().__init__(kwargs)\n self.kwargs = kwargs\n\n def _process(self, *data_frames):\n \"\"\"Processes the passed data frame as per the configuration define in the constructor.\n\n Args:\n *data_frames (list of pd.DataFrame):\n Variable number of pandas dataframes to be processed\n\n Returns:\n pd.DataFrame -or- list[pd.DataFrame]\n Processed dataframe(s) resulting from applying the operator\n \"\"\"\n columns = (\n self.feature_names.copy()\n if isinstance(self.feature_names, list)\n else [self.feature_names]\n )\n\n processed = []\n for data_frame in data_frames:\n for col in columns:\n data_frame[col] = pd.to_datetime(\n data_frame[col], errors=\"coerce\", **self.kwargs\n )\n processed.append(data_frame)\n return processed\n" ]
[ [ "pandas.DataFrame" ], [ "pandas.DataFrame" ], [ "pandas.DataFrame" ], [ "pandas.qcut" ], [ "pandas.DataFrame" ], [ "pandas.to_datetime" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [ "0.23", "0.21", "2.0", "1.4", "0.19", "1.1", "1.5", "1.2", "0.24", "0.20", "1.0", "0.25", "1.3" ], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [ "0.23", "0.21", "2.0", "1.4", "0.19", "1.1", "1.5", "1.2", "0.24", "0.20", "1.0", "0.25", "1.3" ], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [ "0.23", "0.21", "2.0", "1.4", "0.19", "1.1", "1.5", "1.2", "0.24", "0.20", "1.0", "0.25", "1.3" ], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [ "0.23", "0.21", "2.0", "1.4", "0.19", "1.1", "1.5", "1.2", "0.24", "0.20", "1.0", "0.25", "1.3" ], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [ "0.23", "0.21", "2.0", "1.4", "0.19", "1.1", "1.5", "1.2", "0.24", "0.20", "1.0", "0.25", "1.3" ], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [ "0.23", "0.21", "2.0", "1.4", "0.19", "1.1", "1.5", "1.2", "0.24", "0.20", "1.0", "0.25", "1.3" ], "scipy": [], "tensorflow": [] } ]
saadatshakeel/SoftwareLabPointNet
[ "1c197ae3ec76a0c2983e184f17f114fd9efe8c81" ]
[ "evaluate.py" ]
[ "import tensorflow.compat.v1 as tf\nimport numpy as np\nimport argparse\nimport socket\nimport importlib\nimport time\nimport os\nimport scipy.misc\nimport sys\nBASE_DIR = os.path.dirname(os.path.abspath(__file__))\nsys.path.append(BASE_DIR)\nsys.path.append(os.path.join(BASE_DIR, 'models'))\nsys.path.append(os.path.join(BASE_DIR, 'utils'))\nimport provider\nimport pc_util\n\n\nparser = argparse.ArgumentParser()\nparser.add_argument('--gpu', type=int, default=0, help='GPU to use [default: GPU 0]')\nparser.add_argument('--model', default='pointnet_cls', help='Model name: pointnet_cls or pointnet_cls_basic [default: pointnet_cls]')\nparser.add_argument('--batch_size', type=int, default=1, help='Batch Size during training [default: 1]')\nparser.add_argument('--num_point', type=int, default=1, help='Point Number [256/512/1024/2048] [default: 1024]')\nparser.add_argument('--model_path', default='log/model.ckpt', help='model checkpoint file path [default: log/model.ckpt]')\nparser.add_argument('--dump_dir', default='dump', help='dump folder path [dump]')\nparser.add_argument('--visu', action='store_true', help='Whether to dump image for error case [default: False]')\nFLAGS = parser.parse_args()\n\n\nBATCH_SIZE = FLAGS.batch_size\nNUM_POINT = FLAGS.num_point\nMODEL_PATH = FLAGS.model_path\nGPU_INDEX = FLAGS.gpu\nMODEL = importlib.import_module(FLAGS.model) # import network module\nDUMP_DIR = FLAGS.dump_dir\nif not os.path.exists(DUMP_DIR): os.mkdir(DUMP_DIR)\nLOG_FOUT = open(os.path.join(DUMP_DIR, 'log_evaluate.txt'), 'w')\nLOG_FOUT.write(str(FLAGS)+'\\n')\n\nNUM_CLASSES = 40\nSHAPE_NAMES = [line.rstrip() for line in \\\n open(os.path.join(BASE_DIR, 'data/hdf5_files/shape_names.txt'))] \n\nHOSTNAME = socket.gethostname()\n\n# ModelNet40 official train/test split\nTRAIN_FILES = provider.getDataFiles( \\\n os.path.join(BASE_DIR, 'data/hdf5_files/train_files.txt'))\nTEST_FILES = provider.getDataFiles(\\\n os.path.join(BASE_DIR, 'data/hdf5_files/test_files.txt'))\n\ndef log_string(out_str):\n LOG_FOUT.write(out_str+'\\n')\n LOG_FOUT.flush()\n print(out_str)\n\ndef evaluate(num_votes):\n is_training = False\n \n with tf.device('/gpu:'+str(GPU_INDEX)):\n pointclouds_pl, labels_pl = MODEL.placeholder_inputs(BATCH_SIZE, NUM_POINT)\n is_training_pl = tf.placeholder(tf.bool, shape=())\n\n # simple model\n pred, end_points = MODEL.get_model(pointclouds_pl, is_training_pl)\n loss = MODEL.get_loss(pred, labels_pl, end_points)\n \n # Add ops to save and restore all the variables.\n saver = tf.train.Saver()\n \n # Create a session\n config = tf.ConfigProto()\n config.gpu_options.allow_growth = True\n config.allow_soft_placement = True\n config.log_device_placement = True\n sess = tf.Session(config=config)\n\n # Restore variables from disk.\n saver.restore(sess, MODEL_PATH)\n log_string(\"Model restored.\")\n\n ops = {'pointclouds_pl': pointclouds_pl,\n 'labels_pl': labels_pl,\n 'is_training_pl': is_training_pl,\n 'pred': pred,\n 'loss': loss}\n\n eval_one_epoch(sess, ops, num_votes)\n\n \ndef eval_one_epoch(sess, ops, num_votes=1, topk=1):\n error_cnt = 0\n is_training = False\n total_correct = 0\n total_seen = 0\n loss_sum = 0\n total_seen_class = [0 for _ in range(NUM_CLASSES)]\n total_correct_class = [0 for _ in range(NUM_CLASSES)]\n fout = open(os.path.join(DUMP_DIR, 'pred_label.txt'), 'w')\n for fn in range(len(TEST_FILES)):\n log_string('----'+str(fn)+'----')\n current_data, current_label = provider.loadDataFile(TEST_FILES[fn])\n current_data = current_data[:,0:NUM_POINT,:]\n current_label = np.squeeze(current_label)\n print(current_data.shape)\n \n file_size = current_data.shape[0]\n num_batches = file_size // BATCH_SIZE\n print(file_size)\n \n for batch_idx in range(num_batches):\n start_idx = batch_idx * BATCH_SIZE\n end_idx = (batch_idx+1) * BATCH_SIZE\n cur_batch_size = end_idx - start_idx\n \n # Aggregating BEG\n batch_loss_sum = 0 # sum of losses for the batch\n batch_pred_sum = np.zeros((cur_batch_size, NUM_CLASSES)) # score for classes\n batch_pred_classes = np.zeros((cur_batch_size, NUM_CLASSES)) # 0/1 for classes\n for vote_idx in range(num_votes):\n rotated_data = provider.rotate_point_cloud_by_angle(current_data[start_idx:end_idx, :, :],\n vote_idx/float(num_votes) * np.pi * 2)\n feed_dict = {ops['pointclouds_pl']: rotated_data,\n ops['labels_pl']: current_label[start_idx:end_idx],\n ops['is_training_pl']: is_training}\n loss_val, pred_val = sess.run([ops['loss'], ops['pred']],\n feed_dict=feed_dict)\n batch_pred_sum += pred_val\n batch_pred_val = np.argmax(pred_val, 1)\n for el_idx in range(cur_batch_size):\n batch_pred_classes[el_idx, batch_pred_val[el_idx]] += 1\n batch_loss_sum += (loss_val * cur_batch_size / float(num_votes))\n # pred_val_topk = np.argsort(batch_pred_sum, axis=-1)[:,-1*np.array(range(topk))-1]\n # pred_val = np.argmax(batch_pred_classes, 1)\n pred_val = np.argmax(batch_pred_sum, 1)\n # Aggregating END\n \n correct = np.sum(pred_val == current_label[start_idx:end_idx])\n # correct = np.sum(pred_val_topk[:,0:topk] == label_val)\n total_correct += correct\n total_seen += cur_batch_size\n loss_sum += batch_loss_sum\n\n for i in range(start_idx, end_idx):\n l = current_label[i]\n total_seen_class[l] += 1\n total_correct_class[l] += (pred_val[i-start_idx] == l)\n fout.write('%d, %d\\n' % (pred_val[i-start_idx], l))\n \n if pred_val[i-start_idx] != l and FLAGS.visu: # ERROR CASE, DUMP!\n img_filename = '%d_label_%s_pred_%s.jpg' % (error_cnt, SHAPE_NAMES[l],\n SHAPE_NAMES[pred_val[i-start_idx]])\n img_filename = os.path.join(DUMP_DIR, img_filename)\n output_img = pc_util.point_cloud_three_views(np.squeeze(current_data[i, :, :]))\n scipy.misc.imsave(img_filename, output_img)\n error_cnt += 1\n \n log_string('eval mean loss: %f' % (loss_sum / float(total_seen)))\n log_string('eval accuracy: %f' % (total_correct / float(total_seen)))\n log_string('eval avg class acc: %f' % (np.mean(np.array(total_correct_class)/np.array(total_seen_class,dtype=np.float))))\n \n class_accuracies = np.array(total_correct_class)/np.array(total_seen_class,dtype=np.float)\n for i, name in enumerate(SHAPE_NAMES):\n log_string('%10s:\\t%0.3f' % (name, class_accuracies[i]))\n \n\n\nif __name__=='__main__':\n with tf.Graph().as_default():\n evaluate(num_votes=1)\n LOG_FOUT.close()\n" ]
[ [ "tensorflow.compat.v1.ConfigProto", "numpy.squeeze", "tensorflow.compat.v1.Session", "tensorflow.compat.v1.placeholder", "numpy.argmax", "tensorflow.compat.v1.Graph", "numpy.array", "tensorflow.compat.v1.train.Saver", "numpy.zeros", "numpy.sum" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
nordmtr/quantpy
[ "b23b4fea8c5028e2598790e6e418b02932ca59ca", "b23b4fea8c5028e2598790e6e418b02932ca59ca" ]
[ "quantpy/operator.py", "quantpy/polytope.py" ]
[ "from copy import deepcopy\n\nimport numpy as np\n\nfrom .base_quantum import BaseQuantum\nfrom .qobj import Qobj\nfrom .routines import _SIGMA_I, _SIGMA_X, _SIGMA_Y, _SIGMA_Z, _vec2mat\n\n\nclass Operator(BaseQuantum):\n \"\"\"Class for representing quantum operators\n\n Parameters\n ----------\n data : array-like\n Matrix representation of a quantum operator\n\n Attributes\n ----------\n H : Operator (property)\n Adjoint matrix of the quantum operator\n matrix : numpy 2-D array (property)\n Matrix representation of the quantum operator\n n_qubits : int\n Number of qubits\n T : Operator (property)\n Transpose of the quantum operator\n\n Methods\n -------\n as_channel()\n Convert Operator to Channel\n conj()\n Conjugate of the quantum operator\n copy()\n Create a copy of this Operator instance\n kron()\n Kronecker product of 2 Operator instances\n trace()\n Trace of the quantum operator\n transform()\n Apply this operator to a quantum state\n \"\"\"\n\n def __init__(self, data):\n if isinstance(data, self.__class__):\n self.__dict__ = deepcopy(data.__dict__)\n self._matrix = np.array(data, dtype=np.complex128)\n self.n_qubits = int(np.log2(self._matrix.shape[0]))\n\n @property\n def matrix(self):\n \"\"\"Quantum Operator in a matrix form\"\"\"\n return self._matrix\n\n @matrix.setter\n def matrix(self, data):\n self._matrix = np.array(data, dtype=np.complex128)\n self.n_qubits = int(np.log2(self._matrix.shape[0]))\n\n def transform(self, state):\n \"\"\"Apply this Operator to the state: U @ rho @ U.H\"\"\"\n return Qobj((self @ state @ self.H).matrix)\n\n def as_channel(self):\n \"\"\"Return a channel representation of this Operator\"\"\"\n from .channel import Channel\n\n return Channel(self.transform, self.n_qubits)\n\n def trace(self):\n \"\"\"Trace of the quantum object\"\"\"\n return np.trace(self.matrix)\n\n def __repr__(self):\n return \"Quantum Operator\\n\" + repr(self.matrix)\n\n\n# One-qubit gates\n\n# noinspection PyPep8Naming\ndef PHASE(theta):\n return Operator(\n [\n [1, 0],\n [0, np.exp(1j * theta)],\n ]\n )\n\n\n# noinspection PyPep8Naming\ndef RX(theta):\n return Operator(\n [\n [np.cos(theta / 2), -1j * np.sin(theta / 2)],\n [-1j * np.sin(theta / 2), np.cos(theta / 2)],\n ]\n )\n\n\n# noinspection PyPep8Naming\ndef RY(theta):\n return Operator(\n [\n [np.cos(theta / 2), -np.sin(theta / 2)],\n [np.sin(theta / 2), np.cos(theta / 2)],\n ]\n )\n\n\n# noinspection PyPep8Naming\ndef RZ(theta):\n return Operator(\n [\n [np.exp(-0.5j * theta), 0],\n [0, np.exp(0.5j * theta)],\n ]\n )\n\n\nId = Operator(_SIGMA_I)\nX = Operator(_SIGMA_X)\nY = Operator(_SIGMA_Y)\nZ = Operator(_SIGMA_Z)\nH = (\n Operator(\n [\n [1, 1],\n [1, -1],\n ]\n )\n / np.sqrt(2)\n)\nT = PHASE(np.pi / 4)\nS = PHASE(np.pi / 2)\n\n# Two-qubit gates\n\nCNOT = Operator(\n [\n [1, 0, 0, 0],\n [0, 1, 0, 0],\n [0, 0, 0, 1],\n [0, 0, 1, 0],\n ]\n)\n\nCY = Operator(\n [\n [1, 0, 0, 0],\n [0, 1, 0, 0],\n [0, 0, 0, -1j],\n [0, 0, 1j, 0],\n ]\n)\n\nCZ = Operator(\n [\n [1, 0, 0, 0],\n [0, 1, 0, 0],\n [0, 0, 1, 0],\n [0, 0, 0, -1],\n ]\n)\n\nSWAP = Operator(\n [\n [1, 0, 0, 0],\n [0, 0, 1, 0],\n [0, 1, 0, 0],\n [0, 0, 0, 1],\n ]\n)\n\nISWAP = Operator(\n [\n [1, 0, 0, 0],\n [0, 0, 1j, 0],\n [0, 1j, 0, 0],\n [0, 0, 0, 1],\n ]\n)\n\nMS = (\n Operator(\n [\n [1, 0, 0, 1j],\n [0, 1, -1j, 0],\n [0, -1j, 1, 0],\n [1j, 0, 0, 1],\n ]\n )\n / np.sqrt(2)\n)\n\n\nToffoli = Operator(\n [\n [1, 0, 0, 0, 0, 0, 0, 0],\n [0, 1, 0, 0, 0, 0, 0, 0],\n [0, 0, 1, 0, 0, 0, 0, 0],\n [0, 0, 0, 1, 0, 0, 0, 0],\n [0, 0, 0, 0, 1, 0, 0, 0],\n [0, 0, 0, 0, 0, 1, 0, 0],\n [0, 0, 0, 0, 0, 0, 0, 1],\n [0, 0, 0, 0, 0, 0, 1, 0],\n ]\n)\n\n\nFredkin = Operator(\n [\n [1, 0, 0, 0, 0, 0, 0, 0],\n [0, 1, 0, 0, 0, 0, 0, 0],\n [0, 0, 1, 0, 0, 0, 0, 0],\n [0, 0, 0, 1, 0, 0, 0, 0],\n [0, 0, 0, 0, 1, 0, 0, 0],\n [0, 0, 0, 0, 0, 0, 1, 0],\n [0, 0, 0, 0, 0, 1, 0, 0],\n [0, 0, 0, 0, 0, 0, 0, 1],\n ]\n)\n\n\ndef _choi_to_kraus(choi):\n EPS = 1e-15\n eigvals, eigvecs = choi.eig()\n eigvecs = list(eigvecs.T)\n return [\n Operator(_vec2mat(vec) * np.sqrt(val))\n for val, vec in zip(eigvals, eigvecs)\n if abs(val) > EPS\n ]\n", "import random\n\nimport numpy as np\nimport numpy.linalg as la\n\n\ndef compute_polytope_volume(polytope):\n \"\"\"Compute the volume of the polytope approximately\n\n Parameters\n ----------\n polytope : polytope.Polytope\n \"\"\"\n dim = polytope.A.shape[1]\n n_points = int(5000 * 1.5 ** dim)\n l_b, u_b = polytope.bounding_box\n x = np.tile(l_b, (1, n_points)) + np.random.rand(dim, n_points) * np.tile(\n u_b - l_b, (1, n_points)\n )\n aux = np.dot(polytope.A, x) - np.tile(np.array([polytope.b]).T, (1, n_points))\n aux = np.nonzero(np.all(aux < 0, 0))[0].shape[0]\n vol = np.prod(u_b - l_b) * aux / n_points\n return vol\n\n\ndef find_max_distance_to_polytope(\n A,\n b,\n target_point_bloch,\n start_point_bloch,\n n_points=500,\n discard_closer=False,\n hit_and_run=True,\n):\n \"\"\"Compute the distance between the target point and the farthest point in the polytope\n using hit and run algorithm. Polytope is defined by H-representation: Ax <= b.\n\n Parameters\n ----------\n A : np.array\n b : np.array\n target_point_bloch\n start_point_bloch : np.array\n Reduced bloch vector of the starting point\n n_points : int\n Number of points to sample in polytope\n discard_closer : bool\n Determines whether to discard directions, which have negative scalar product\n with (target - start) vector\n hit_and_run : bool\n If True use the hit and run algorithm, otherwise simply check directions\n from the starting point\n\n Returns\n -------\n float\n The distance between the target point and the farthest point in the polytope\n \"\"\"\n EPS = 1e-13\n dim = A.shape[1]\n max_dist = la.norm(start_point_bloch - target_point_bloch)\n\n if np.min(b - A @ start_point_bloch) < -EPS:\n return 0\n\n for _ in range(n_points):\n direction = np.random.rand(dim) * 2 - 1\n\n # discard directions pointing towards the target point\n while discard_closer and np.dot(direction, start_point_bloch - target_point_bloch) <= 0:\n direction = np.random.rand(dim) * 2 - 1\n direction /= la.norm(direction)\n farthest_point_bloch = find_farthest_polytope_point(A, b, start_point_bloch, direction, EPS)\n if hit_and_run:\n theta = random.random()\n start_point_bloch = theta * start_point_bloch + (1 - theta) * farthest_point_bloch\n max_dist = max(max_dist, la.norm(start_point_bloch - target_point_bloch))\n else:\n max_dist = max(max_dist, la.norm(farthest_point_bloch - target_point_bloch))\n return max_dist * np.sqrt(np.sqrt(dim) / 2)\n\n\ndef find_farthest_polytope_point(A, b, start_point, direction, tol=1e-15, init_alpha=1):\n \"\"\"Find the farthest point in the polytope in the selected direction.\n Polytope is defined by H-representation: Ax <= b.\n\n Parameters\n ----------\n A : np.array\n b : np.array\n start_point : np.array\n Reduced bloch vector of the starting point\n direction: np.array\n Direction in reduced bloch space\n tol : float\n Determines the precision of finding the point\n init_alpha : float\n Initial coefficient before the dimension vector\n Returns\n -------\n\n \"\"\"\n step = alpha = init_alpha\n diff_start = np.min(b - A @ start_point)\n while True:\n cur_point = start_point + alpha * direction\n diff = np.min(b - A @ cur_point)\n if diff_start < -tol:\n print(diff, step, alpha)\n if -tol <= diff < tol:\n break\n elif diff < -tol:\n step /= 2\n alpha -= step\n else:\n step *= 2\n alpha += step\n return cur_point\n" ]
[ [ "numpy.log2", "numpy.sqrt", "numpy.cos", "numpy.sin", "numpy.array", "numpy.exp", "numpy.trace" ], [ "numpy.dot", "numpy.sqrt", "numpy.min", "numpy.tile", "numpy.linalg.norm", "numpy.all", "numpy.random.rand", "numpy.prod", "numpy.array" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
jeremy9959/cnvkit
[ "b839a2b323113a7d318d216f61a0ed6657c70ed4", "b839a2b323113a7d318d216f61a0ed6657c70ed4", "b839a2b323113a7d318d216f61a0ed6657c70ed4" ]
[ "cnvlib/autobin.py", "scripts/guess_baits.py", "cnvlib/call.py" ]
[ "\"\"\"Estimate reasonable bin sizes from BAM read counts or depths.\"\"\"\nfrom __future__ import absolute_import, division, print_function\nimport logging\nimport os\nimport tempfile\n\nimport numpy as np\nimport pandas as pd\nfrom skgenome import tabio, GenomicArray as GA\n\nfrom . import coverage, samutil\nfrom .antitarget import compare_chrom_names\nfrom .descriptives import weighted_median\n\n\ndef midsize_file(fnames):\n \"\"\"Select the median-size file from several given filenames.\n\n If an even number of files is given, selects the file just below the median.\n \"\"\"\n return sorted(fnames, key=lambda f: os.stat(f).st_size\n )[len(fnames) // 2 - 1]\n\n\ndef do_autobin(bam_fname, method, targets=None, access=None,\n bp_per_bin=100000., target_min_size=20, target_max_size=50000,\n antitarget_min_size=500, antitarget_max_size=1000000):\n \"\"\"Quickly calculate reasonable bin sizes from BAM read counts.\n\n Parameters\n ----------\n bam_fname : string\n BAM filename.\n method : string\n One of: 'wgs' (whole-genome sequencing), 'amplicon' (targeted amplicon\n capture), 'hybrid' (hybridization capture).\n targets : GenomicArray\n Targeted genomic regions (for 'hybrid' and 'amplicon').\n access : GenomicArray\n Sequencing-accessible regions of the reference genome (for 'hybrid' and\n 'wgs').\n bp_per_bin : int\n Desired number of sequencing read nucleotide bases mapped to each bin.\n\n Returns\n -------\n 2-tuple of 2-tuples:\n ((target depth, target avg. bin size),\n (antitarget depth, antitarget avg. bin size))\n \"\"\"\n if method in ('amplicon', 'hybrid'):\n if targets is None:\n raise ValueError(\"Target regions are required for method %r \"\n \"but were not provided.\" % method)\n if not len(targets):\n raise ValueError(\"Target regions are required for method %r \"\n \"but were not provided.\" % method)\n\n # Closes over bp_per_bin\n def depth2binsize(depth, min_size, max_size):\n if depth:\n bin_size = int(round(bp_per_bin / depth))\n if bin_size < min_size:\n logging.info(\"Limiting est. bin size %d to given min. %d\",\n bin_size, min_size)\n bin_size = min_size\n elif bin_size > max_size:\n logging.info(\"Limiting est. bin size %d to given max. %d\",\n bin_size, max_size)\n bin_size = max_size\n return bin_size\n\n samutil.ensure_bam_index(bam_fname)\n rc_table = samutil.idxstats(bam_fname, drop_unmapped=True)\n read_len = samutil.get_read_length(bam_fname)\n logging.info(\"Estimated read length %s\", read_len)\n\n # Dispatch\n if method == 'amplicon':\n # From BAM index\n # rc_table = update_chrom_length(rc_table, targets)\n # tgt_depth = average_depth(rc_table, read_len)\n # By sampling\n tgt_depth = sample_region_cov(bam_fname, targets)\n anti_depth = None\n elif method == 'hybrid':\n tgt_depth, anti_depth = hybrid(rc_table, read_len, bam_fname, targets,\n access)\n elif method == 'wgs':\n if access is not None and len(access):\n rc_table = update_chrom_length(rc_table, access)\n tgt_depth = average_depth(rc_table, read_len)\n anti_depth = None\n\n # Clip bin sizes to specified ranges\n tgt_bin_size = depth2binsize(tgt_depth, target_min_size, target_max_size)\n anti_bin_size = depth2binsize(anti_depth, antitarget_min_size,\n antitarget_max_size)\n return ((tgt_depth, tgt_bin_size),\n (anti_depth, anti_bin_size))\n\n\ndef hybrid(rc_table, read_len, bam_fname, targets, access=None):\n \"\"\"Hybrid capture sequencing.\"\"\"\n # Identify off-target regions\n if access is None:\n access = idxstats2ga(rc_table, bam_fname)\n # Verify BAM chromosome names match those in target BED\n compare_chrom_names(access, targets)\n antitargets = access.subtract(targets)\n # Only examine chromosomes present in all 2-3 input datasets\n rc_table, targets, antitargets = shared_chroms(rc_table, targets,\n antitargets)\n # Deal with targets\n target_depth = sample_region_cov(bam_fname, targets)\n # Antitargets: subtract captured reads from total\n target_length = region_size_by_chrom(targets)['length']\n target_reads = (target_length * target_depth / read_len).values\n anti_table = update_chrom_length(rc_table, antitargets)\n anti_table = anti_table.assign(mapped=anti_table.mapped - target_reads)\n anti_depth = average_depth(anti_table, read_len)\n return target_depth, anti_depth\n\n\n# ---\n\ndef average_depth(rc_table, read_length):\n \"\"\"Estimate the average read depth across the genome.\n\n Returns\n -------\n float\n Median of the per-chromosome mean read depths, weighted by chromosome\n size.\n \"\"\"\n mean_depths = read_length * rc_table.mapped / rc_table.length\n return weighted_median(mean_depths, rc_table.length)\n\n\ndef idxstats2ga(table, bam_fname):\n return GA(table.assign(start=0, end=table.length)\n .loc[:, ('chromosome', 'start', 'end')],\n meta_dict={'filename': bam_fname})\n\n\ndef sample_region_cov(bam_fname, regions, max_num=100):\n \"\"\"Calculate read depth in a randomly sampled subset of regions.\"\"\"\n midsize_regions = sample_midsize_regions(regions, max_num)\n with tempfile.NamedTemporaryFile(suffix='.bed', mode='w+t') as f:\n tabio.write(regions.as_dataframe(midsize_regions), f, 'bed4')\n f.flush()\n table = coverage.bedcov(f.name, bam_fname, 0)\n # Mean read depth across all sampled regions\n return table.basecount.sum() / (table.end - table.start).sum()\n\n\ndef sample_midsize_regions(regions, max_num):\n \"\"\"Randomly select a subset of up to `max_num` regions.\"\"\"\n sizes = regions.end - regions.start\n lo_size, hi_size = np.percentile(sizes[sizes > 0], [25, 75])\n midsize_regions = regions.data[(sizes >= lo_size) & (sizes <= hi_size)]\n if len(midsize_regions) > max_num:\n midsize_regions = midsize_regions.sample(max_num, random_state=0xA5EED)\n return midsize_regions\n\n\ndef shared_chroms(*tables):\n \"\"\"Intersection of DataFrame .chromosome values.\"\"\"\n chroms = tables[0].chromosome.drop_duplicates()\n for tab in tables[1:]:\n if tab is not None:\n new_chroms = tab.chromosome.drop_duplicates()\n chroms = chroms[chroms.isin(new_chroms)]\n return [None if tab is None else tab[tab.chromosome.isin(chroms)]\n for tab in tables]\n\n\ndef update_chrom_length(rc_table, regions):\n if regions is not None and len(regions):\n chrom_sizes = region_size_by_chrom(regions)\n rc_table = rc_table.merge(chrom_sizes, on='chromosome', how='inner')\n rc_table['length'] = rc_table['length_y'] # ?\n rc_table = rc_table.drop(['length_x', 'length_y'], axis=1)\n return rc_table\n\n\ndef region_size_by_chrom(regions):\n chromgroups = regions.data.groupby('chromosome', sort=False)\n # sizes = chromgroups.apply(total_region_size) # XXX\n sizes = [total_region_size(g) for _key, g in chromgroups]\n return pd.DataFrame({'chromosome': regions.chromosome.drop_duplicates(),\n 'length': sizes})\n\n\ndef total_region_size(regions):\n \"\"\"Aggregate area of all genomic ranges in `regions`.\"\"\"\n return (regions.end - regions.start).sum()\n", "#!/usr/bin/env python\n\"\"\"Guess the coordinates of captured regions from sample read depths.\n\nTwo approaches available:\n\n- (Faster) Scan a given list of exons and/or other potentially targeted regions.\n The script checks each region and drops those with very low coverage\n indicating they were not captured.\n- (Slower) Scan the entire genome, or the given sequencing-accessible regions,\n for regions with elevated coverage. Choose reasonable boundaries for each\n apparently captured region.\n\nUse multiple BAMs for greater robustness in detecting targeted regions, as a\nsingle sample may have poor coverage are some targets by chance.\nStill, this script does not guarantee correctly detecting all targets.\n\nSee also: https://github.com/brentp/goleft\n\"\"\"\nfrom __future__ import absolute_import, division, print_function\n\nimport argparse\nimport collections\nimport logging\nimport subprocess\nimport sys\n\nimport numpy as np\nimport pandas as pd\n\nimport cnvlib\nfrom cnvlib import parallel\nfrom cnvlib.descriptives import modal_location\nfrom skgenome import tabio, GenomicArray as GA\n\nlogging.basicConfig(level=logging.INFO, format=\"%(message)s\")\n\n\n# ___________________________________________\n# Guided method: guess from potential targets\n\ndef filter_targets(target_bed, sample_bams, procs):\n \"\"\"Check if each potential target has significant coverage.\"\"\"\n try:\n baits = tabio.read(target_bed, 'bed4')\n except:\n raise RuntimeError(\"Targets must be in BED format; try skg_convert.py\")\n logging.info(\"Loaded %d candidate regions from %s\", len(baits), target_bed)\n # Loop over BAMs to calculate weighted averages of bin coverage depths\n total_depths = np.zeros(len(baits), dtype=np.float_)\n for bam_fname in sample_bams:\n logging.info(\"Evaluating targets in %s\", bam_fname)\n sample = cnvlib.do_coverage(target_bed, bam_fname, processes=procs)\n assert len(sample) == len(baits), \\\n \"%d != %d\" % (len(sample), len(baits))\n total_depths += sample['depth'].values\n baits['depth'] = total_depths / len(sample_bams)\n logging.info(\"Average candidate-target depth:\\n%s\",\n baits['depth'].describe())\n return baits\n\n\n# _________________________________________\n# Unguided method: guess from raw depths\n\ndef scan_targets(access_bed, sample_bams, min_depth, min_gap, min_length,\n procs):\n \"\"\"Estimate baited regions from a genome-wide, per-base depth profile.\"\"\"\n bait_chunks = []\n # ENH: context manager to call rm on bed chunks? with to_chunks as pool, ck?\n logging.info(\"Scanning for enriched regions in:\\n %s\",\n '\\n '.join(sample_bams))\n # with futures.ProcessPoolExecutor(procs) as pool:\n with parallel.pick_pool(procs) as pool:\n args_iter = ((bed_chunk, sample_bams,\n min_depth, min_gap, min_length)\n for bed_chunk in parallel.to_chunks(access_bed))\n for bed_chunk_fname, bait_chunk in pool.map(_scan_depth, args_iter):\n bait_chunks.append(bait_chunk)\n parallel.rm(bed_chunk_fname)\n baits = GA(pd.concat(bait_chunks))\n baits['depth'] /= len(sample_bams)\n return baits\n\n\ndef _scan_depth(args):\n \"\"\"Wrapper for parallel map\"\"\"\n bed_fname, bam_fnames, min_depth, min_gap, min_length = args\n regions = list(drop_small(merge_gaps(scan_depth(bed_fname, bam_fnames,\n min_depth),\n min_gap),\n min_length))\n result = pd.DataFrame.from_records(list(regions),\n columns=regions[0]._fields)\n return bed_fname, result\n\n\ndef scan_depth(bed_fname, bam_fnames, min_depth):\n \"\"\"Locate sub-regions with enriched read depth in the given regions.\n\n Yields\n ------\n tuple\n Region coordinates (0-indexed, half-open): chromosome name, start, end\n \"\"\"\n Region = collections.namedtuple('Region', 'chromosome start end depth')\n\n nsamples = len(bam_fnames)\n if nsamples == 1:\n def get_depth(depths):\n return int(depths[0])\n else:\n min_depth *= nsamples\n # NB: samtools emits additional BAMs' depths as trailing columns\n def get_depth(depths):\n return sum(map(int, depths))\n\n proc = subprocess.Popen(['samtools', 'depth',\n '-Q', '1', # Skip pseudogenes\n '-b', bed_fname,\n ] + bam_fnames,\n stdout=subprocess.PIPE,\n shell=False)\n\n # Detect runs of >= min_depth; emit their coordinates\n chrom = start = depths = None\n for line in proc.stdout:\n fields = line.split('\\t')\n depth = get_depth(fields[2:])\n is_enriched = (depth >= min_depth)\n if start is None:\n if is_enriched:\n # Entering a new captured region\n chrom = fields[0]\n start = int(fields[1]) - 1\n depths = [depth]\n else:\n # Still not in a captured region\n continue\n elif is_enriched and fields[0] == chrom:\n # Still in a captured region -- extend it\n depths.append(depth)\n else:\n # Exiting a captured region\n # Update target region boundaries\n darr = np.array(depths)\n half_depth = 0.5 * darr.max()\n ok_dp_idx = np.nonzero(darr >= half_depth)[0]\n start_idx = ok_dp_idx[0]\n end_idx = ok_dp_idx[-1] + 1\n yield Region(chrom,\n start + start_idx,\n start + end_idx,\n darr[start_idx:end_idx].mean())\n chrom = start = depths = None\n\n\ndef merge_gaps(regions, min_gap):\n \"\"\"Merge regions across small gaps.\"\"\"\n prev = next(regions)\n for reg in regions:\n if reg.start - prev.end < min_gap:\n # Merge\n prev = prev._replace(end=reg.end)\n else:\n # Done merging; move on\n yield prev\n prev = reg\n # Residual\n yield prev\n\n\ndef drop_small(regions, min_length):\n \"\"\"Merge small gaps and filter by minimum length.\"\"\"\n return (reg for reg in regions\n if reg.end - reg.start >= min_length)\n\n\n# ___________________________________________\n# Shared\n\ndef normalize_depth_log2_filter(baits, min_depth, enrich_ratio=0.1):\n \"\"\"Calculate normalized depth, add log2 column, filter by enrich_ratio.\"\"\"\n # Normalize depths to a neutral value of 1.0\n dp_mode = modal_location(baits.data.loc[baits['depth'] > min_depth,\n 'depth'].values)\n norm_depth = baits['depth'] / dp_mode\n # Drop low-coverage targets\n keep_idx = (norm_depth >= enrich_ratio)\n logging.info(\"Keeping %d/%d bins with coverage depth >= %f, modal depth %f\",\n keep_idx.sum(), len(keep_idx), dp_mode * enrich_ratio, dp_mode)\n return baits[keep_idx]\n\n\n\nif __name__ == '__main__':\n AP = argparse.ArgumentParser(description=__doc__)\n AP.add_argument('sample_bams', nargs='+',\n help=\"\"\"Sample BAM file(s) to test for target coverage.\"\"\")\n AP.add_argument('-o', '--output', metavar='FILENAME',\n help=\"\"\"The inferred targets, in BED format.\"\"\")\n AP.add_argument('-c', '--coverage', metavar='FILENAME',\n help=\"\"\"Filename to output average coverage depths in .cnn\n format.\"\"\")\n AP.add_argument('-p', '--processes', metavar='CPU',\n nargs='?', type=int, const=0, default=1,\n help=\"\"\"Number of subprocesses to segment in parallel.\n If given without an argument, use the maximum number\n of available CPUs. [Default: use 1 process]\"\"\")\n\n AP_x = AP.add_mutually_exclusive_group(required=True)\n AP_x.add_argument('-t', '--targets', metavar='TARGET_BED',\n help=\"\"\"Potentially targeted genomic regions, e.g. all known\n exons in the reference genome, in BED format. Each of these\n regions will be tested as a whole for enrichment. (Faster\n method)\"\"\")\n AP_x.add_argument('-a', '--access', metavar='ACCESS_BED',\n # default=\"../data/access-5k-mappable.grch37.bed\",\n help=\"\"\"Sequencing-accessible genomic regions (e.g. from\n 'cnvkit.py access'), or known genic regions in the reference\n genome, in BED format. All bases will be tested for\n enrichment. (Slower method)\"\"\")\n\n AP_target = AP.add_argument_group(\"With --targets only\")\n AP_target.add_argument('-d', '--min-depth', metavar='DEPTH',\n type=int, default=5,\n help=\"\"\"Minimum sequencing read depth to accept as captured.\n [Default: %(default)s]\"\"\")\n\n AP_access = AP.add_argument_group(\"With --access only\")\n AP_access.add_argument('-g', '--min-gap', metavar='GAP_SIZE',\n type=int, default=25,\n help=\"\"\"Merge regions separated by gaps smaller than this.\n [Default: %(default)s]\"\"\")\n AP_access.add_argument('-l', '--min-length', metavar='TARGET_SIZE',\n type=int, default=50,\n help=\"\"\"Minimum region length to accept as captured.\n [Default: %(default)s]\"\"\")\n\n args = AP.parse_args()\n\n # ENH: can we reserve multiple cores for htslib?\n if args.processes < 1:\n args.processes = None\n\n if args.targets:\n baits = filter_targets(args.targets, args.sample_bams, args.processes)\n else:\n baits = scan_targets(args.access, args.sample_bams,\n 0.5 * args.min_depth, # More sensitive 1st pass\n args.min_gap, args.min_length, args.processes)\n baits = normalize_depth_log2_filter(baits, args.min_depth)\n tabio.write(baits, args.output or sys.stdout, 'bed')\n if args.coverage:\n baits['log2'] = np.log2(baits['depth'] / baits['depth'].median())\n tabio.write(baits, args.coverage, 'tab')\n", "\"\"\"Call copy number variants from segmented log2 ratios.\"\"\"\nfrom __future__ import absolute_import, division, print_function\n\nimport logging\n\nimport numpy as np\nimport pandas as pd\n\nfrom . import segfilters\n\n\ndef do_call(cnarr, variants=None, method=\"threshold\", ploidy=2, purity=None,\n is_reference_male=False, is_sample_female=False, filters=None,\n thresholds=(-1.1, -0.25, 0.2, 0.7)):\n if method not in (\"threshold\", \"clonal\", \"none\"):\n raise ValueError(\"Argument `method` must be one of: clonal, threshold\")\n\n outarr = cnarr.copy()\n if filters:\n # Apply any filters that use segmetrics but not cn fields\n for filt in ('ci', 'sem'):\n if filt in filters:\n logging.info(\"Applying filter '%s'\", filt)\n outarr = getattr(segfilters, filt)(outarr)\n filters.remove(filt)\n\n if variants:\n outarr[\"baf\"] = variants.baf_by_ranges(outarr)\n\n if purity and purity < 1.0:\n logging.info(\"Rescaling sample with purity %g, ploidy %d\",\n purity, ploidy)\n absolutes = absolute_clonal(outarr, ploidy, purity,\n is_reference_male, is_sample_female)\n # Recalculate sample log2 ratios after rescaling for purity\n outarr[\"log2\"] = log2_ratios(outarr, absolutes, ploidy,\n is_reference_male)\n if variants:\n # Rescale b-allele frequencies for purity\n outarr[\"baf\"] = rescale_baf(purity, outarr[\"baf\"])\n elif method == \"clonal\":\n # Estimate absolute copy numbers from the original log2 values\n logging.info(\"Calling copy number with clonal ploidy %d\", ploidy)\n absolutes = absolute_pure(outarr, ploidy, is_reference_male)\n\n if method == \"threshold\":\n # Apply cutoffs to either original or rescaled log2 values\n tokens = [\"%g => %d\" % (thr, i) for i, thr in enumerate(thresholds)]\n logging.info(\"Calling copy number with thresholds: %s\",\n \", \".join(tokens))\n absolutes = absolute_threshold(outarr, ploidy, thresholds,\n is_reference_male)\n\n if method != 'none':\n outarr['cn'] = absolutes.round().astype('int')\n if 'baf' in outarr:\n # Calculate major and minor allelic copy numbers (s.t. cn1 >= cn2)\n upper_baf = ((outarr['baf'] - .5).abs() + .5).fillna(1.0).values\n outarr['cn1'] = ((absolutes * upper_baf).round()\n .clip(0, outarr['cn'])\n .astype('int'))\n outarr['cn2'] = outarr['cn'] - outarr['cn1']\n is_null = (outarr['baf'].isnull() & (outarr['cn'] > 0))\n outarr[is_null, 'cn1'] = np.nan\n outarr[is_null, 'cn2'] = np.nan\n\n if filters:\n # Apply the remaining cn-based filters\n for filt in filters:\n logging.info(\"Applying filter '%s'\", filt)\n outarr = getattr(segfilters, filt)(outarr)\n\n outarr.sort_columns()\n return outarr\n\n\ndef log2_ratios(cnarr, absolutes, ploidy, is_reference_male,\n min_abs_val=1e-3, round_to_int=False):\n \"\"\"Convert absolute copy numbers to log2 ratios.\n\n Optionally round copy numbers to integers.\n\n Account for reference sex & ploidy of sex chromosomes.\n \"\"\"\n # Round absolute copy numbers to integer values\n if round_to_int:\n absolutes = absolutes.round()\n # Avoid a logarithm domain error\n ratios = np.log2(np.maximum(absolutes / ploidy, min_abs_val))\n # Adjust sex chromosomes to be relative to the reference\n if is_reference_male:\n ratios[(cnarr.chromosome == cnarr._chr_x_label).values] += 1.0\n ratios[(cnarr.chromosome == cnarr._chr_y_label).values] += 1.0\n return ratios\n\n\ndef absolute_threshold(cnarr, ploidy, thresholds, is_reference_male):\n \"\"\"Call integer copy number using hard thresholds for each level.\n\n Integer values are assigned for log2 ratio values less than each given\n threshold value in sequence, counting up from zero.\n Above the last threshold value, integer copy numbers are called assuming\n full purity, diploidy, and rounding up.\n\n Default thresholds follow this heuristic for calling CNAs in a tumor sample:\n For single-copy gains and losses, assume 50% tumor cell clonality (including\n normal cell contamination). Then::\n\n R> log2(2:6 / 4)\n -1.0 -0.4150375 0.0 0.3219281 0.5849625\n\n Allowing for random noise of +/- 0.1, the cutoffs are::\n\n DEL(0) < -1.1\n LOSS(1) < -0.25\n GAIN(3) >= +0.2\n AMP(4) >= +0.7\n\n For germline samples, better precision could be achieved with::\n\n LOSS(1) < -0.4\n GAIN(3) >= +0.3\n\n \"\"\"\n absolutes = np.zeros(len(cnarr), dtype=np.float_)\n for idx, row in enumerate(cnarr):\n cnum = 0\n ref_copies = _reference_copies_pure(row.chromosome, ploidy,\n is_reference_male)\n for cnum, thresh in enumerate(thresholds):\n if row.log2 <= thresh:\n if ref_copies != ploidy:\n cnum = int(cnum * ref_copies / ploidy)\n break\n else:\n cnum = int(np.ceil(_log2_ratio_to_absolute_pure(row.log2,\n ref_copies)))\n absolutes[idx] = cnum\n return absolutes\n\n\ndef absolute_clonal(cnarr, ploidy, purity, is_reference_male, is_sample_female):\n \"\"\"Calculate absolute copy number values from segment or bin log2 ratios.\"\"\"\n absolutes = np.zeros(len(cnarr), dtype=np.float_)\n for i, row in enumerate(cnarr):\n # TODO by_chromosome to reduce number of calls to this\n ref_copies, expect_copies = _reference_expect_copies(\n row.chromosome, ploidy, is_sample_female, is_reference_male)\n absolutes[i] = _log2_ratio_to_absolute(\n row.log2, ref_copies, expect_copies, purity)\n return absolutes\n\n\ndef absolute_pure(cnarr, ploidy, is_reference_male):\n \"\"\"Calculate absolute copy number values from segment or bin log2 ratios.\"\"\"\n absolutes = np.zeros(len(cnarr), dtype=np.float_)\n for i, row in enumerate(cnarr):\n ref_copies = _reference_copies_pure(row.chromosome, ploidy,\n is_reference_male)\n absolutes[i] = _log2_ratio_to_absolute_pure(row.log2, ref_copies)\n return absolutes\n\n\ndef absolute_dataframe(cnarr, ploidy, purity, is_reference_male, is_sample_female):\n \"\"\"Absolute, expected and reference copy number in a DataFrame.\"\"\"\n absolutes = np.zeros(len(cnarr), dtype=np.float_)\n reference_copies = expect_copies = np.zeros(len(cnarr), dtype=np.int_)\n for i, row in enumerate(cnarr):\n ref_copies, exp_copies = _reference_expect_copies(\n row.chromosome, ploidy, is_sample_female, is_reference_male)\n reference_copies[i] = ref_copies\n expect_copies[i] = exp_copies\n absolutes[i] = _log2_ratio_to_absolute(\n row.log2, ref_copies, exp_copies, purity)\n return pd.DataFrame({'absolute': absolutes,\n 'reference': reference_copies,\n 'expect': expect_copies})\n\n\ndef absolute_expect(cnarr, ploidy, is_sample_female):\n \"\"\"Absolute integer number of expected copies in each bin.\n\n I.e. the given ploidy for autosomes, and XY or XX sex chromosome counts\n according to the sample's specified chromosomal sex.\n \"\"\"\n exp_copies = np.repeat(ploidy, len(cnarr))\n is_y = (cnarr.chromosome == cnarr._chr_y_label).values\n if is_sample_female:\n exp_copies[is_y] = 0\n else:\n is_x = (cnarr.chromosome == cnarr._chr_x_label).values\n exp_copies[is_x | is_y] = ploidy // 2\n return exp_copies\n\n\ndef absolute_reference(cnarr, ploidy, is_reference_male):\n \"\"\"Absolute integer number of reference copies in each bin.\n\n I.e. the given ploidy for autosomes, 1 or 2 X according to the reference\n sex, and always 1 copy of Y.\n \"\"\"\n ref_copies = np.repeat(ploidy, len(cnarr))\n is_x = (cnarr.chromosome == cnarr._chr_x_label).values\n is_y = (cnarr.chromosome == cnarr._chr_y_label).values\n if is_reference_male:\n ref_copies[is_x] = ploidy // 2\n ref_copies[is_y] = ploidy // 2\n return ref_copies\n\n\ndef _reference_expect_copies(chrom, ploidy, is_sample_female, is_reference_male):\n \"\"\"Determine the number copies of a chromosome expected and in reference.\n\n For sex chromosomes, these values may not be the same ploidy as the\n autosomes. The \"reference\" number is the chromosome's ploidy in the\n CNVkit reference, while \"expect\" is the chromosome's neutral ploidy in the\n given sample, based on the specified sex of each. E.g., given a female\n sample and a male reference, on chromosome X the \"reference\" value is 1 but\n \"expect\" is 2.\n\n Returns\n -------\n tuple\n A pair of integers: number of copies in the reference, and expected in\n the sample.\n \"\"\"\n chrom = chrom.lower()\n if chrom in [\"chrx\", \"x\"]:\n ref_copies = (ploidy // 2 if is_reference_male else ploidy)\n exp_copies = (ploidy if is_sample_female else ploidy // 2)\n elif chrom in [\"chry\", \"y\"]:\n ref_copies = ploidy // 2\n exp_copies = (0 if is_sample_female else ploidy // 2)\n else:\n ref_copies = exp_copies = ploidy\n return ref_copies, exp_copies\n\n\ndef _reference_copies_pure(chrom, ploidy, is_reference_male):\n \"\"\"Determine the reference number of chromosome copies (pure sample).\n\n Returns\n -------\n int\n Number of copies in the reference.\n \"\"\"\n chrom = chrom.lower()\n if chrom in [\"chry\", \"y\"] or (is_reference_male and chrom in [\"chrx\", \"x\"]):\n ref_copies = ploidy // 2\n else:\n ref_copies = ploidy\n return ref_copies\n\n\ndef _log2_ratio_to_absolute(log2_ratio, ref_copies, expect_copies, purity=None):\n \"\"\"Transform a log2 ratio to absolute linear scale (for an impure sample).\n\n Does not round to an integer absolute value here.\n\n Math::\n\n log2_ratio = log2(ncopies / ploidy)\n 2^log2_ratio = ncopies / ploidy\n ncopies = ploidy * 2^log2_ratio\n\n With rescaling for purity::\n\n let v = log2 ratio value, p = tumor purity,\n r = reference ploidy, x = expected ploidy,\n n = tumor ploidy (\"ncopies\" above);\n\n v = log_2(p*n/r + (1-p)*x/r)\n 2^v = p*n/r + (1-p)*x/r\n n*p/r = 2^v - (1-p)*x/r\n n = (r*2^v - x*(1-p)) / p\n\n If purity adjustment is skipped (p=1; e.g. if germline or if scaling for\n heterogeneity was done beforehand)::\n\n n = r*2^v\n \"\"\"\n if purity and purity < 1.0:\n ncopies = (ref_copies * 2**log2_ratio - expect_copies * (1 - purity)\n ) / purity\n else:\n ncopies = _log2_ratio_to_absolute_pure(log2_ratio, ref_copies)\n return ncopies\n\n\ndef _log2_ratio_to_absolute_pure(log2_ratio, ref_copies):\n \"\"\"Transform a log2 ratio to absolute linear scale (for a pure sample).\n\n Purity adjustment is skipped. This is appropriate if the sample is germline\n or if scaling for tumor heterogeneity was done beforehand.\n\n .. math :: n = r*2^v\n \"\"\"\n ncopies = ref_copies * 2 ** log2_ratio\n return ncopies\n\n\ndef rescale_baf(purity, observed_baf, normal_baf=0.5):\n \"\"\"Adjust B-allele frequencies for sample purity.\n\n Math::\n\n t_baf*purity + n_baf*(1-purity) = obs_baf\n obs_baf - n_baf * (1-purity) = t_baf * purity\n t_baf = (obs_baf - n_baf * (1-purity))/purity\n \"\"\"\n # ENH: use normal_baf array if available\n tumor_baf = (observed_baf - normal_baf * (1-purity)) / purity\n # ENH: warn if tumor_baf < 0 -- purity estimate may be too low\n return tumor_baf\n" ]
[ [ "numpy.percentile" ], [ "pandas.concat", "numpy.array", "numpy.nonzero" ], [ "numpy.maximum", "pandas.DataFrame" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [ "0.23", "0.21", "2.0", "1.4", "0.19", "1.1", "1.5", "1.2", "0.24", "0.20", "1.0", "0.25", "1.3" ], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [ "0.23", "0.21", "2.0", "1.4", "0.19", "1.1", "1.5", "1.2", "0.24", "0.20", "1.0", "0.25", "1.3" ], "scipy": [], "tensorflow": [] } ]
pohlt/streamlit
[ "852764f4f7d2bc06ddf932632df06c9104bf0a35", "852764f4f7d2bc06ddf932632df06c9104bf0a35" ]
[ "lib/tests/streamlit/write_test.py", "lib/tests/streamlit/keras_test.py" ]
[ "# Copyright 2018-2020 Streamlit Inc.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Streamlit Unit test.\"\"\"\n\nfrom mock import call, patch, Mock\nfrom collections import namedtuple\n\nimport time\nimport unittest\n\nimport numpy as np\nimport pandas as pd\n\nimport streamlit as st\nfrom streamlit import type_util\n\n\nclass StreamlitWriteTest(unittest.TestCase):\n \"\"\"Test st.write.\n\n Unit tests for https://docs.streamlit.io/api/text.html#streamlit.write\n\n Because we're going to test st.markdown, st.pyplot, st.altair_chart\n later on, we don't have to test it in st.write In st.write, all we're\n trying to check is that the right st.* method gets called\n \"\"\"\n\n def test_string(self):\n \"\"\"Test st.write with a string.\"\"\"\n with patch(\"streamlit.markdown\") as p:\n st.write(\"some string\")\n\n p.assert_called_once()\n\n with patch(\"streamlit.markdown\") as p:\n st.write(\"more\", \"strings\", \"to\", \"pass\")\n\n p.assert_called_once_with(\"more strings to pass\", unsafe_allow_html=False)\n\n def test_dataframe(self):\n \"\"\"Test st.write with dataframe.\"\"\"\n data = {\n type_util._PANDAS_DF_TYPE_STR: pd.DataFrame(\n [[20, 30, 50]], columns=[\"a\", \"b\", \"c\"]\n ),\n type_util._PANDAS_SERIES_TYPE_STR: pd.Series(np.array([\"a\", \"b\", \"c\"])),\n type_util._PANDAS_INDEX_TYPE_STR: pd.Index(list(\"abc\")),\n type_util._PANDAS_STYLER_TYPE_STR: pd.DataFrame(\n {\"a\": [1], \"b\": [2]}\n ).style.format(\"{:.2%}\"),\n type_util._NUMPY_ARRAY_TYPE_STR: np.array([\"a\", \"b\", \"c\"]),\n }\n\n # Make sure we have test cases for all _DATAFRAME_LIKE_TYPES\n self.assertEqual(sorted(data.keys()), sorted(type_util._DATAFRAME_LIKE_TYPES))\n\n for df in data.values():\n with patch(\"streamlit.dataframe\") as p:\n st.write(df)\n\n p.assert_called_once()\n\n def test_exception_type(self):\n \"\"\"Test st.write with exception.\"\"\"\n with patch(\"streamlit.exception\") as p:\n st.write(Exception(\"some exception\"))\n\n p.assert_called_once()\n\n def test_help(self):\n \"\"\"Test st.write with help types.\"\"\"\n # Test module\n with patch(\"streamlit.help\") as p:\n st.write(np)\n\n p.assert_called_once()\n\n # Test function\n with patch(\"streamlit.help\") as p:\n st.write(st.set_option)\n\n p.assert_called_once()\n\n @patch(\"streamlit.type_util.is_type\")\n def test_altair_chart(self, is_type):\n \"\"\"Test st.write with altair_chart.\"\"\"\n is_type.side_effect = make_is_type_mock(type_util._ALTAIR_RE)\n\n class FakeChart(object):\n pass\n\n with patch(\"streamlit.altair_chart\") as p:\n st.write(FakeChart())\n\n p.assert_called_once()\n\n @patch(\"streamlit.type_util.is_type\")\n def test_pyplot(self, is_type):\n \"\"\"Test st.write with matplotlib.\"\"\"\n is_type.side_effect = make_is_type_mock(\"matplotlib.figure.Figure\")\n\n class FakePyplot(object):\n pass\n\n with patch(\"streamlit.pyplot\") as p:\n st.write(FakePyplot())\n\n p.assert_called_once()\n\n def test_plotly(self):\n import plotly.graph_objs as go\n\n \"\"\"Test st.write with plotly object.\"\"\"\n with patch(\"streamlit.plotly_chart\") as p:\n st.write([go.Scatter(x=[1, 2], y=[10, 20])])\n\n p.assert_called_once()\n\n def test_dict(self):\n \"\"\"Test st.write with dict.\"\"\"\n with patch(\"streamlit.json\") as p:\n st.write({\"a\": 1, \"b\": 2})\n\n p.assert_called_once()\n\n def test_list(self):\n \"\"\"Test st.write with list.\"\"\"\n with patch(\"streamlit.json\") as p:\n st.write([1, 2, 3])\n\n p.assert_called_once()\n\n def test_namedtuple(self):\n \"\"\"Test st.write with list.\"\"\"\n with patch(\"streamlit.json\") as p:\n Boy = namedtuple(\"Boy\", (\"name\", \"age\"))\n John = Boy(\"John\", 29)\n st.write(John)\n\n p.assert_called_once()\n\n @patch(\"streamlit.markdown\")\n @patch(\"streamlit.json\")\n def test_dict_and_string(self, mock_json, mock_markdown):\n \"\"\"Test st.write with dict.\"\"\"\n manager = Mock()\n manager.attach_mock(mock_json, \"json\")\n manager.attach_mock(mock_markdown, \"markdown\")\n\n st.write(\"here is a dict\", {\"a\": 1, \"b\": 2}, \" and that is all\")\n\n expected_calls = [\n call.markdown(\"here is a dict\", unsafe_allow_html=False),\n call.json({\"a\": 1, \"b\": 2}),\n call.markdown(\" and that is all\", unsafe_allow_html=False),\n ]\n self.assertEqual(manager.mock_calls, expected_calls)\n\n def test_default_object(self):\n \"\"\"Test st.write with default clause ie some object.\"\"\"\n\n class SomeObject(object):\n def __str__(self):\n return \"1 * 2 - 3 = 4 `ok` !\"\n\n with patch(\"streamlit.markdown\") as p:\n st.write(SomeObject())\n\n p.assert_called_once_with(\n \"`1 * 2 - 3 = 4 \\\\`ok\\\\` !`\", unsafe_allow_html=False\n )\n\n def test_exception(self):\n \"\"\"Test st.write that raises an exception.\"\"\"\n with patch(\"streamlit.markdown\") as m, patch(\"streamlit.exception\") as e:\n m.side_effect = Exception(\"some exception\")\n st.write(\"some text\")\n\n e.assert_called_once()\n\n def test_spinner(self):\n \"\"\"Test st.spinner.\"\"\"\n # TODO(armando): Test that the message is actually passed to\n # message.warning\n with patch(\"streamlit.empty\") as e:\n with st.spinner(\"some message\"):\n time.sleep(0.15)\n e.assert_called_once_with()\n\n\ndef make_is_type_mock(true_type_matchers):\n \"\"\"Return a function that mocks is_type.\n\n When you do this:\n mock_is_type.side_effect = make_is_type_mock(\"foo.bar.Baz\")\n\n ...then when you call mock_is_type(my_type, \"foo.bar.Baz\") it will return\n True (and False otherwise).\n\n You can also pass in a tuple.\n \"\"\"\n if type(true_type_matchers) is not tuple:\n true_type_matchers = (true_type_matchers,)\n\n def new_is_type(obj, type_matchers):\n if type(type_matchers) is not tuple:\n type_matchers = (type_matchers,)\n\n for type_matcher in type_matchers:\n if type_matcher in true_type_matchers:\n return True\n return False\n\n return new_is_type\n", "# Copyright 2018-2020 Streamlit Inc.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Keras unit test.\"\"\"\n\nimport unittest\nfrom mock import patch\n\ntry:\n from tensorflow.python.keras.utils import vis_utils\n from tensorflow.python.keras.models import Sequential\n from tensorflow.python.keras.layers import Conv2D, MaxPooling2D, Dense, Flatten\nexcept ImportError:\n pass\n\nimport streamlit as st\nfrom tests import testutil\n\n\[email protected]_tensorflow\nclass KerasTest(unittest.TestCase):\n \"\"\"Test ability to marshall keras models.\"\"\"\n\n def test_model(self):\n \"\"\"Test that it can be called with a model.\"\"\"\n model = Sequential()\n model.add(Conv2D(10, (5, 5), input_shape=(28, 28, 1), activation=\"relu\"))\n model.add(MaxPooling2D(pool_size=(2, 2)))\n model.add(Flatten())\n model.add(Dense(8, activation=\"relu\"))\n\n with patch(\"streamlit.graphviz_chart\") as graphviz_chart:\n st.write(model)\n\n dot = vis_utils.model_to_dot(model)\n graphviz_chart.assert_called_once_with(dot.to_string())\n" ]
[ [ "numpy.array", "pandas.DataFrame" ], [ "tensorflow.python.keras.layers.Flatten", "tensorflow.python.keras.layers.MaxPooling2D", "tensorflow.python.keras.layers.Dense", "tensorflow.python.keras.models.Sequential", "tensorflow.python.keras.layers.Conv2D", "tensorflow.python.keras.utils.vis_utils.model_to_dot" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [ "0.23", "0.21", "2.0", "1.4", "0.19", "1.1", "1.5", "1.2", "0.24", "0.20", "1.0", "0.25", "1.3" ], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
gummz/cell
[ "a741ca4900a11f1080b7572ac969f765e5ac2ffd", "a741ca4900a11f1080b7572ac969f765e5ac2ffd" ]
[ "src/experiments/compare_filters/experiment_compare_filters.py", "src/data/make_dataset_val.py" ]
[ "import os\nfrom os import listdir, makedirs\nfrom os.path import join\nimport pickle\nimport sys\nimport cv2\nimport matplotlib.pyplot as plt\nfrom PIL import Image\nimport numpy as np\nfrom time import time\nfrom skimage import filters # threshold_yen, frangi\nfrom skimage.exposure import rescale_intensity\n\nimport src.data.constants as c\nimport src.data.utils.utils as utils\n\nmode = 'train'\nimg_idx = 1500\n\ntic = time()\nutils.setcwd(__file__)\n\nDIR = c.RAW_DATA_DIR\next = c.IMG_EXT\nfiles = c.RAW_FILES\nKERNEL = c.MEDIAN_FILTER_KERNEL\nimgs_path = join('..', c.DATA_DIR, mode, c.IMG_DIR)\nfilename = os.path.basename(__file__)\nfilename = os.path.splitext(filename)[0]\n\nimages = sorted([image for image in listdir(imgs_path) if '.npy' in image])\n# Get full image paths from filename list `images`\nimage_paths = sorted([join(imgs_path, image) for image in images])\npath = image_paths[img_idx]\nimg_name = images[img_idx].split('.')[0]\nsave = join(c.FIG_DIR, mode, img_name)\n\n# Create image-specific directory\nutils.make_dir(save)\n\nimg = np.int16(np.load(path))\nimg = cv2.normalize(img, None, alpha=0, beta=255,\n dtype=cv2.CV_8UC1, norm_type=cv2.NORM_MINMAX)\n# hist = cv2.calcHist([img], [0], None, [256], [0, 256])\n\ncv2.imwrite(join(save, f'img_cv.{ext}'), img)\nplt.imsave(join(save, f'img_plt.{ext}'), img)\n\n# Operation: mean blur\noperation = 'meanblur'\nutils.make_dir(join(save, operation))\nfor i in range(1, 21, 2):\n img_blur = cv2.blur(img, (i, i))\n # img_blur = np.array(img_blur)\n # img_blur = np.where(img_blur > 5, img_blur, 0)\n name = f'{operation}_{i}'\n utils.imsave(join(save, operation, name), img_blur)\n\n# Operation\n# Median Blur\noperation = 'medianblur'\nutils.make_dir(join(save, operation))\nfor i in range(1, 21, 2):\n name = f'{operation}_{i}'\n if os.path.exists(join(save, operation, name)):\n break\n img_blur = cv2.medianBlur(img, i)\n # img_blur = np.array(img_blur)\n # img_blur = np.where(img_blur > 5, img_blur, 0)\n utils.imsave(join(save, operation, name), img_blur)\n\n\n# Operation\n# Denoise\noperation = 'denoise'\nutils.make_dir(join(save, operation))\nfor i in range(1, 21, 2):\n for j in range(1, 10, 2):\n for k in range(1, 30, 4):\n name = f'{operation}_{i}_{j}_{k}'\n if os.path.exists(join(save, operation, name)):\n break\n img_denoise = cv2.fastNlMeansDenoising(img, None, i, j, k)\n utils.imsave(join(save, operation, name),\n img_denoise)\n\n\n# Operation: Gaussian blur\noperation = 'gaussianblur'\nutils.make_dir(join(save, operation))\nfor kernel_size in [1, 5, 9, 15]:\n for sigma_x in [1, 5, 9]:\n for sigma_y in [1, 5, 9]:\n name = f'{operation}_{kernel_size}_{sigma_x}_{sigma_y}'\n if os.path.exists(join(save, operation, name)):\n break\n\n img_gauss = cv2.GaussianBlur(\n img, (kernel_size, kernel_size),\n sigma_x, sigmaY=sigma_y)\n\n utils.imsave(join(save, operation, name), img_gauss)\n\n\n# Operation: Bilateral filter\noperation = 'bilateral'\nutils.make_dir(join(save, operation))\nfor filter_size in [50, 150]:\n for sigma_color in [50, 150]:\n for sigma_space in [5, 9]:\n name = f'{operation}_{filter_size}_{sigma_color}_{sigma_space}'\n if os.path.exists(join(save, operation, name)):\n break\n img_bilateral = cv2.bilateralFilter(\n img, filter_size, sigma_color, sigma_space)\n\n utils.imsave(join(save, operation, name), img_bilateral)\n\n\noperation = 'frangi'\nutils.make_dir(join(save, operation))\nfor alpha in np.linspace(0.1, 1, 10):\n for beta in np.linspace(0.1, 1, 10):\n for gamma in np.linspace(1, 30, 5):\n if os.path.exists(join(save, operation, name)):\n break\n img_frangi = frangi(img, alpha=alpha, beta=beta,\n gamma=gamma, black_ridges=False)\n name = f'{operation}_plt_{img_name}_{alpha:.2f}_{beta}_{gamma}'\n utils.imsave(join(save, operation, name), img_frangi)\n\n\noperation = 'canny'\nutils.make_dir(join(save, operation))\nfor thresh1 in [20, 50, 80, 100, 150, 200][-2:]:\n for thresh2 in [20, 50, 80, 100, 150, 200][-2:]:\n for aperture_size in [3, 5, 7]:\n for L2_gradient in [True, False]:\n if os.path.exists(join(save, operation, name)):\n break\n img = cv2.fastNlMeansDenoising(img, None, 11, 7, 21)\n # img = cv2.normalize(img, None, alpha=0,\n # beta=1, dtype=cv2.CV_32FC1,\n # norm_type=cv2.NORM_MINMAX)\n # img *= np.where((0.05 < img) & (img < 0.3), img * 3, img)\n # img = cv2.normalize(img, None, alpha=0,\n # beta=255, dtype=cv2.CV_8UC1,\n # norm_type=cv2.NORM_MINMAX)\n img_canny = cv2.Canny(\n img, thresh1, thresh2, None,\n apertureSize=aperture_size, L2gradient=L2_gradient)\n name = (f'canny_{thresh1}_{thresh2}'\n f'_{aperture_size}_{L2_gradient}')\n utils.imsave(join(save, operation, name), img_canny, 512)\n\n# Operation\n# Simple Threshold\n# operation = 'simple_threshold'\n# _, thresh = cv2.threshold(img_blur, SIMPLE_THRESHOLD, 255, cv2.THRESH_BINARY)\n# cv2.imwrite(f'{save}/{operation}_{img_name}.png', thresh)\n\n# Operation\n# Rescale intensity\noperation = 'rescale_intensity'\nyen_threshold = filters.threshold_yen(img_blur)\nfor thresh in range(80, 220, 20):\n bright = filters.rescale_intensity(\n img_blur, (0, yen_threshold), (220, 255))\n utils.imsave(join(save, operation, thresh), bright)\n\n# bright = Image.fromarray(bright)\n\n# # Operation\n# # Generate and save histogram of intensified image\n# operation = 'histogram_intense'\n# plt.hist(bright.ravel(), 256, [0, 256])\n# plt.show()\n# plt.savefig(f'{save}/{img_name}_{operation}.jpg')\n\nelapsed = utils.time_report(tic, time())\nprint(f'{filename} complete after {elapsed}.')\n", "import os\nfrom os import listdir\nfrom os.path import join, splitext\nimport random\nfrom time import time\nfrom PIL import Image\nimport numpy as np\nimport src.data.utils.utils as utils\nimport src.data.constants as c\nfrom aicsimageio import AICSImage\nimport pandas as pd\n\nmode = 'val'\nrandom.seed(41)\nutils.setcwd(__file__)\n\nraw_data_dir = c.RAW_DATA_DIR\n\nfiles = c.RAW_FILES[mode].keys()\nfiles = utils.add_ext(files)\n\nfile_paths = [join(raw_data_dir, file) for file in files]\ncell_ch = c.CELL_CHANNEL\n\n# Create `IMG_DIR` folder, in case it doesn't exist\n# f'../data/interim/{folder_name}'\nfolder = join(c.DATA_DIR, mode, c.IMG_DIR)\nutils.make_dir(folder)\n\n# Get files in imgs folder - need to know what's in there so\n# we don't start the index at 0\nimages = [image for image in listdir(folder) if '.npy' in image]\n\n# This is the index we will start on, in case there are already\n# data files in there\n# So, we are only adding to the existing list of files in /imgs/\n# -1 for zero-indexing, +1 because we want to start at the next free index\nimg_idx = len(images) - 1 + 1\nidx = img_idx if img_idx > 0 else 0 # numbering for images\n# How often to print out with matplotlib\ndebug_every = c.DBG_EVERY\n\ntic = time()\nn_timepoints = 5\nn_slices = 10\nidx = 0\n# To store which timepoints and slices\n# were randomly chosen for each file\nfile_indices = {}\nslice_record = []\n\nfor j, (file, file_path) in enumerate(zip(files, file_paths)):\n data = AICSImage(file_path)\n\n if '.czi' not in file_path:\n T = data.dims['T'][0]\n Z = data.dims['Z'][0]\n else:\n dims = utils.get_czi_dims(data.metadata)\n T = dims['T']\n Z = dims['Z']\n\n # What timepoints it's ok to use, according to excel sheet\n time_ok = c.RAW_FILES[mode][splitext(file)[0]]\n time_ok = T if time_ok is None else time_ok[1]\n\n time_idx = sorted(random.sample(range(time_ok), n_timepoints))\n\n indices = []\n for t in time_idx:\n timepoint = data.get_image_dask_data(\n 'ZXY', T=t, C=cell_ch).compute()\n\n slice_idx = sorted(random.sample(range(Z), n_slices))\n\n # Gets slices with most cells on them\n # slice_idx = utils.active_slices(timepoint)\n # Make record of the indices of T and Z\n record = utils.record_dict(t, slice_idx)\n indices.append(record)\n\n timepoint_sliced = timepoint[slice_idx]\n for i, z_slice in enumerate(timepoint_sliced):\n name = f'{idx:05d}'\n save = os.path.join(folder, name)\n\n np.save(save, z_slice)\n\n if idx % debug_every == 0:\n dirs = os.path.dirname(save)\n filename = os.path.basename(save)\n utils.imsave(f'{dirs}/_{filename}.{c.IMG_EXT}', z_slice, 512)\n\n slice_record.append((name, file, t, slice_idx[i]))\n idx = idx + 1\n\n # ';'.join([f'{key}:{\",\".join(value)}' for idx in indices for key, value in idx.items()])\n file_indices[file] = indices\n\n# Record keeping over which indices were randomly selected\nsave = join(c.DATA_DIR, mode, c.IMG_DIR)\n\n# np.savetxt(join(save, 'index_record_np.csv'), file_indices)\n\nindex_record = pd.DataFrame(file_indices, columns=list(file_indices.keys()))\nindex_record.to_csv(join(save, 'index_record.csv'), sep=';')\n\nslices_record = pd.DataFrame(slice_record, columns=[\n 'name', 'file', 'timepoint', 'slice'])\nslices_record.to_csv(join(save, 'slice_record.csv'), sep='\\t', index=False)\n\ntoc = time()\nprint(f'make_dataset_val.py complete after {(toc-tic)/60: .1f} minutes.')\n" ]
[ [ "numpy.load", "numpy.linspace" ], [ "numpy.save", "pandas.DataFrame" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [ "0.23", "0.21", "2.0", "1.4", "0.19", "1.1", "1.5", "1.2", "0.24", "0.20", "1.0", "0.25", "1.3" ], "scipy": [], "tensorflow": [] } ]
tilmanbeck/contextualized-topic-models
[ "53d3dc262dc2370a9d9052f798565833f2472320" ]
[ "CTM.py" ]
[ "from contextualized_topic_models.models.ctm import CTM\nfrom contextualized_topic_models.utils.data_preparation import TextHandler\nfrom contextualized_topic_models.utils.data_preparation import bert_embeddings_from_list\nfrom contextualized_topic_models.datasets.dataset import CTMDataset\nimport pandas as pd\nimport numpy as np\nimport argparse\nimport logging\n\nlogging.basicConfig(level=logging.DEBUG)\n\nparser = argparse.ArgumentParser()\nparser.add_argument(\"--input\")\nparser.add_argument(\"--dataset\")\nparser.add_argument(\"--epochs\", default=10, type=int)\nargs = parser.parse_args()\n\nif args.dataset == \"trec\":\n df = pd.read_csv(args.input, sep=\"\\t\")\n texts = list(df['full_text'].values)\n ids = list(df['tweetId'].values)\n nr_topics = len(df['topicId'].unique())\nelif args.dataset == \"reuters\":\n df = pd.read_json(args.input, orient='records', lines=True)\n texts = list(df['title'].values)\n ids = list(df['identifier'].values)\n nr_topics = len(df['topicId'].unique())\nelif args.dataset == \"ibm\":\n df = pd.read_csv(args.input, sep=\"\\t\")\n ids_all = []\n predictions_all = []\n for i, topic in enumerate(df[\"topic\"].unique()):\n ddf = df[df[\"topic\"] == topic]\n texts = list(ddf['argument'].values)\n ids = list(ddf['argumentId'].values)\n nr_topics = len(ddf[\"keypoint_id\"].unique())\n\n handler = TextHandler(texts)\n handler.prepare() # create vocabulary and training data\n\n # load BERT data\n training_bert = bert_embeddings_from_list(texts, 'bert-base-nli-mean-tokens')\n\n training_dataset = CTMDataset(handler.bow, training_bert, handler.idx2token)\n\n ctm = CTM(input_size=len(handler.vocab), bert_input_size=768, num_epochs=args.epochs, inference_type=\"combined\",\n n_components=nr_topics, num_data_loader_workers=5)\n\n ctm.fit(training_dataset) # run the model\n\n distribution = ctm.get_thetas(training_dataset)\n\n best_match_topics = np.argmax(distribution, axis=1)\n\n # collect ids and predictions\n ids_all += ids\n predictions_all += best_match_topics\n print(len(predictions_all), len(ids_all))\n with open('predictions_CTM_' + args.dataset + '.txt', 'w') as fp:\n for ID, topicId in zip(ids_all, predictions_all):\n fp.write(str(ID) + ' ' + str(topicId) + '\\n')\n exit()\nelif args.dataset == \"webis\":\n df = pd.read_csv(args.input, sep=\"\\t\")\n ids_all = []\n predictions_all = []\n for i, topic in enumerate(df[\"topic_id\"].unique()):\n ddf = df[df[\"topic_id\"] == topic]\n texts = list(ddf['conclusion'].values)\n ids = list(ddf['argument_id'].values)\n nr_topics = len(ddf[\"frame_id\"].unique())\n\n handler = TextHandler(texts)\n handler.prepare() # create vocabulary and training data\n\n # load BERT data\n training_bert = bert_embeddings_from_list(texts, 'bert-base-nli-mean-tokens')\n\n training_dataset = CTMDataset(handler.bow, training_bert, handler.idx2token)\n\n ctm = CTM(input_size=len(handler.vocab), bert_input_size=768, num_epochs=args.epochs, inference_type=\"combined\",\n n_components=nr_topics, num_data_loader_workers=5)\n\n ctm.fit(training_dataset) # run the model\n\n distribution = ctm.get_thetas(training_dataset)\n\n best_match_topics = np.argmax(distribution, axis=1)\n\n # collect ids and predictions\n ids_all += ids\n predictions_all += best_match_topics\n print(len(predictions_all), len(ids_all))\n with open('predictions_CTM_' + args.dataset + '.txt', 'w') as fp:\n for ID, topicId in zip(ids_all, predictions_all):\n fp.write(str(ID) + ' ' + str(topicId) + '\\n')\n exit()\nelse:\n print('not implemented yet')\n exit()\n\nprint('nr of data samples:', len(texts))\nprint('nr topics:', nr_topics)\n\nhandler = TextHandler(texts)\nhandler.prepare() # create vocabulary and training data\n\n# load BERT data\ntraining_bert = bert_embeddings_from_list(texts, 'bert-base-nli-mean-tokens')\n\ntraining_dataset = CTMDataset(handler.bow, training_bert, handler.idx2token)\n\nctm = CTM(input_size=len(handler.vocab), bert_input_size=768, num_epochs=args.epochs, inference_type=\"combined\",\n n_components=nr_topics, num_data_loader_workers=5)\n\nctm.fit(training_dataset) # run the model\n\ndistribution = ctm.get_thetas(training_dataset)\n\nbest_match_topics = np.argmax(distribution, axis=1)\n\nwith open('predictions_CTM_' + args.dataset + '.txt', 'w') as fp:\n for ID, topicId in zip(ids, best_match_topics):\n fp.write(str(ID) + ' ' + str(topicId) + '\\n')\n" ]
[ [ "numpy.argmax", "pandas.read_csv", "pandas.read_json" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [ "2.0", "1.4", "1.3", "1.1", "1.5", "1.2" ], "scipy": [], "tensorflow": [] } ]
jvegreg/ESMValCore
[ "b46b948c47d8579d997b28501f8588f5531aa354", "03eb1c942bf1dc3be98cb30c3592b42e82a94f16" ]
[ "esmvalcore/cmor/_fixes/cmip5/gfdl_cm2p1.py", "esmvalcore/cmor/_fixes/native6/oras5.py" ]
[ "\"\"\"Fixes for GFDL CM2p1 model.\"\"\"\nfrom copy import deepcopy\n\nimport cftime\nimport numpy as np\n\nfrom esmvalcore.iris_helpers import date2num\n\nfrom ..cmip5.gfdl_esm2g import AllVars as BaseAllVars\nfrom ..fix import Fix\nfrom .cesm1_cam5 import Cl as BaseCl\n\nCl = BaseCl\n\n\nclass AllVars(BaseAllVars):\n \"\"\"Fixes for all variables.\"\"\"\n\n\nclass Areacello(Fix):\n \"\"\"Fixes for areacello.\"\"\"\n\n def fix_metadata(self, cubes):\n \"\"\"Fix metadata.\n\n Fixes wrong units.\n\n Parameters\n ----------\n cube: iris.cube.Cube\n\n Returns\n -------\n iris.cube.Cube\n \"\"\"\n cube = self.get_cube_from_list(cubes)\n cube.units = 'm2'\n return cubes\n\n\nclass Sftof(Fix):\n \"\"\"Fixes for sftof.\"\"\"\n\n def fix_data(self, cube):\n \"\"\"Fix data.\n\n Fixes discrepancy between declared units and real units\n\n Parameters\n ----------\n cube: iris.cube.Cube\n\n Returns\n -------\n iris.cube.Cube\n \"\"\"\n metadata = cube.metadata\n cube *= 100\n cube.metadata = metadata\n return cube\n\n\nclass Sit(Fix):\n \"\"\"Fixes for sit.\"\"\"\n\n def fix_metadata(self, cubes):\n \"\"\"Fix metadata.\n\n Fixes bad bounds\n\n Parameters\n ----------\n cube: iris.cube.Cube\n\n Returns\n -------\n iris.cube.Cube\n \"\"\"\n cube = self.get_cube_from_list(cubes)\n time = cube.coord('time')\n if self._fix_required(time):\n times = time.units.num2date(time.points)\n starts = [\n cftime.DatetimeJulian(c.year, c.month, 1)\n for c in times\n ]\n ends = [\n cftime.DatetimeJulian(c.year, c.month + 1, 1)\n if c.month < 12\n else cftime.DatetimeJulian(c.year + 1, 1, 1)\n for c in times\n ]\n time.bounds = date2num(np.stack([starts, ends], -1), time.units)\n return cubes\n\n def _fix_required(self, time):\n return (\n self.vardef.frequency == 'mon' and\n not (time.bounds[-1, 0] < time.points[-1] < time.bounds[-1, 1])\n )\n\n\nclass Tos(Fix):\n \"\"\"Fixes for tos.\"\"\"\n\n def fix_data(self, cube):\n \"\"\"Fix data.\n\n Fixes discrepancy between declared units and real units\n\n Parameters\n ----------\n cube: iris.cube.Cube\n\n Returns\n -------\n iris.cube.Cube\n \"\"\"\n metadata = deepcopy(cube.metadata)\n cube += 273.15\n cube.metadata = metadata\n return cube\n\n def fix_metadata(self, cubes):\n \"\"\"Fix metadata.\n\n Fixes wrong standard_name.\n\n Parameters\n ----------\n cube: iris.cube.Cube\n\n Returns\n -------\n iris.cube.Cube\n \"\"\"\n cube = self.get_cube_from_list(cubes)\n cube.standard_name = 'sea_surface_temperature'\n return cubes\n", "\"\"\"Fixes for ERA5.\"\"\"\nimport datetime\nimport logging\n\nimport iris\nimport numpy as np\n\nfrom ..fix import Fix\nfrom ..shared import cube_to_aux_coord\n\nlogger = logging.getLogger(__name__)\n\nclass Thetao(Fix):\n \"\"\"Fixes for Geopotential.\"\"\"\n def fix_metadata(self, cubes):\n \"\"\"Fix metadata.\"\"\"\n cube = cubes.extract('votemper')[0]\n cube.standard_name = self.vardef.standard_name\n cube.long_name = self.vardef.long_name\n cube.var_name = self.vardef.short_name\n cube.units = self.vardef.units\n levels = cube.coord(var_name='deptht')\n try:\n cube.coord(var_name='time')\n except iris.exceptions.CoordinateNotFoundError:\n cube.add_dim_coord(\n iris.coords.DimCoord(\n points=cubes.extract('time')[0].core_data(),\n var_name='time',\n standard_name='time',\n units=cubes.extract('time')[0].units,\n ),\n 0)\n\n for coord in cube.coords():\n if coord.var_name != 'time':\n cube.remove_coord(coord)\n cube.add_aux_coord(\n cube_to_aux_coord(\n cubes.extract('latitude')[0]), (2, 3))\n cube.add_aux_coord(\n cube_to_aux_coord(\n cubes.extract('longitude')[0]), (2, 3))\n cube.add_dim_coord(\n iris.coords.DimCoord(\n points=levels.points,\n var_name='lev',\n standard_name='depth',\n long_name='ocean depth coordinate',\n units='m',\n attributes={'positive': 'down'}\n ),\n 1)\n return cube\n\nclass So(Fix):\n \"\"\"Fixes for Geopotential.\"\"\"\n def fix_metadata(self, cubes):\n \"\"\"Fix metadata.\"\"\"\n cube = cubes.extract('vosaline')[0]\n cube.standard_name = self.vardef.standard_name\n cube.long_name = self.vardef.long_name\n cube.var_name = self.vardef.short_name\n cube.units = self.vardef.units\n levels = cube.coord(var_name='deptht')\n try:\n cube.coord(var_name='time')\n except iris.exceptions.CoordinateNotFoundError:\n cube.add_dim_coord(\n iris.coords.DimCoord(\n points=cubes.extract('time')[0].core_data(),\n var_name='time',\n standard_name='time',\n units=cubes.extract('time')[0].units,\n ),\n 0)\n\n for coord in cube.coords():\n if coord.var_name != 'time':\n cube.remove_coord(coord)\n cube.add_aux_coord(\n cube_to_aux_coord(\n cubes.extract('latitude')[0]), (2, 3))\n cube.add_aux_coord(\n cube_to_aux_coord(\n cubes.extract('longitude')[0]), (2, 3))\n cube.add_dim_coord(\n iris.coords.DimCoord(\n points=levels.points,\n var_name='lev',\n standard_name='depth',\n long_name='ocean depth coordinate',\n units='m',\n ),\n 1)\n del cube.attributes['invalid_units']\n return cube\n\nclass AllVars(Fix):\n \"\"\"Fixes for all variables.\"\"\"\n def fix_metadata(self, cubes):\n \"\"\"Fix metadata.\"\"\"\n fixed_cubes = iris.cube.CubeList()\n i = iris.coords.DimCoord(\n np.arange(1, cubes.shape[3]+1).astype(np.int32),\n long_name='cell index along first dimension',\n units='1',\n var_name='i')\n j = iris.coords.DimCoord(\n np.arange(1, cubes.shape[2]+1).astype(np.int32),\n long_name='cell index along second dimension',\n units='1',\n var_name='j')\n cubes.add_dim_coord(j, 2)\n cubes.add_dim_coord(i, 3)\n f = np.vectorize(lambda x: x % 360)\n cubes.coord('longitude').points = f(cubes.coord('longitude').points)\n for name in ['latitude', 'longitude']:\n cubes.coord(name).units = self.vardef.coordinates[name].units\n cubes.coord(name).bounds = self.create_bounds(cubes.coord(name).points, name)\n fixed_cubes.append(cubes)\n return fixed_cubes\n \n def create_bounds(self, coord, name):\n if name == 'latitude':\n return self.create_vertex_lats(coord)\n else:\n return self.create_vertex_lons(coord)\n \n def create_vertex_lons(self, a):\n ny = a.shape[0]\n nx = a.shape[1]\n f = np.vectorize(lambda x: x % 360)\n if nx == 1: # Longitudes were integrated out\n if ny == 1:\n return f(np.array([a[0, 0]]))\n return np.zeros([ny, 2])\n b = np.zeros([ny, nx, 4])\n b[:, 1:nx, 0] = f(0.5 * (a[:, 0:nx - 1] + a[:, 1:nx]))\n b[:, 0, 0] = f(1.5 * a[:, 0] - 0.5 * a[:, 1])\n b[:, 0:nx - 1, 1] = b[:, 1:nx, 0]\n b[:, nx - 1, 1] = f(1.5 * a[:, nx - 1] - 0.5 * a[:, nx - 2])\n b[:, :, 2] = b[:, :, 1]\n b[:, :, 3] = b[:, :, 0]\n return b\n \n def create_vertex_lats(self, a):\n ny = a.shape[0]\n nx = a.shape[1]\n f = np.vectorize(lambda x: (x + 90) % 180 - 90)\n if nx == 1: # Longitudes were integrated out\n if ny == 1:\n return f(np.array([a[0, 0]]))\n b = np.zeros([ny, 2])\n b[1:ny, 0] = f(0.5 * (a[0:ny - 1, 0] + a[1:ny, 0]))\n b[0, 0] = f(2 * a[0, 0] - b[1, 0])\n b[0:ny - 1, 1] = b[1:ny, 0]\n b[ny - 1, 1] = f(1.5 * a[ny - 1, 0] - 0.5 * a[ny - 2, 0])\n return b\n b = np.zeros([ny, nx, 4])\n b[1:ny, :, 0] = f(0.5 * (a[0:ny - 1, :] + a[1:ny, :]))\n b[0, :, 0] = f(2 * a[0, :] - b[1, :, 0])\n b[:, :, 1] = b[:, :, 0]\n b[0:ny - 1, :, 2] = b[1:ny, :, 0]\n b[ny - 1, :, 2] = f(1.5 * a[ny - 1, :] - 0.5 * a[ny - 2, :])\n b[:, :, 3] = b[:, :, 2]\n return b\n\n" ]
[ [ "numpy.stack" ], [ "numpy.arange", "numpy.array", "numpy.vectorize", "numpy.zeros" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
jkpawlowski96/Web-scraper
[ "5b6db52198b10e6de619a4db7e5a1ba652e98c45" ]
[ "app/database.py" ]
[ "from pymongo import MongoClient\nimport pandas as pd\nfrom bson.json_util import dumps\nfrom flask import make_response\n\n\ndef db_init():\n \"\"\"\n Initialize database connection and clear data in collection\n :return: MongoDB reference object\n \"\"\"\n client = MongoClient(\"mongodb+srv://admin:[email protected]/test?retryWrites=true&w=majority\")\n # client['scrap'].dropDatabase()\n db = client['scrap']\n db['resources'].drop()\n db['tasks'].drop()\n return db\n\n# init database\ndb = db_init()\n\n\ndef data_add_task(address):\n \"\"\"\n Add new working task o database\n :param address: website address example: https://www.youtube.com/\n :return: True value as job done\n \"\"\"\n global db\n db.tasks.insert_one({'Address': address})\n return True\n\n\ndef data_add_resource(row):\n \"\"\"\n Add new working task o database\n :param row: Row to add\n :return: True value as job done\n \"\"\"\n global db\n db.resources.insert_one(row)\n return True\n\n\ndef address_working(address, value=None):\n \"\"\"\n Find, insert or delete from database task address\n :param address: website address example: https://www.youtube.com/\n :param value: True: add , False: remove, default: find\n :return:\n \"\"\"\n global db\n if value is True:\n db.tasks.insert_one({'Address': address})\n return True\n\n if value is False:\n db.tasks.delete_many({'Address': address})\n return False\n\n x = list(db.tasks.find({'Address': address}))\n if len(x) == 0:\n return False\n else:\n return True\n\n\ndef data_export(colums={'Address': 1, 'Text': 1, 'Images': 1, 'Images_links': 1, '_id': 0}, query={}, download=False):\n \"\"\"\n Export found data from database\n :param colums: Columns to export\n :param query: Filter of data\n :param download: True: return file, default: return view\n :return: data\n \"\"\"\n global db\n data = db.resources.find(query, colums)\n data = list(data)\n data = dumps(data)\n if download is False:\n return data\n else:\n resp = make_response(data)\n resp.headers[\"Content-Disposition\"] = \"attachment; filename=data.json\"\n resp.headers[\"Content-Type\"] = \"text/json\"\n return resp\n\n\ndef address_done(address):\n \"\"\"\n Check if address is already in done resources\n :param address: website address example: https://www.youtube.com/\n :return: result\n \"\"\"\n global db\n x = list(db.resources.find({'Address': address}))\n if len(x) == 0:\n # not found\n return False\n else:\n # found\n return True\n\n\ndef db_to_df(query={}, no_id=True):\n \"\"\"\n Read from Mongo and transform into DataFrame\n :param query: data filter\n :param no_id: don't include id field\n :return: DataFrame object\n \"\"\"\n global db\n\n # Make a query to the specific DB and Collection\n cursor = db.resources.find(query)\n\n # Expand the cursor and construct the DataFrame\n df = pd.DataFrame(list(cursor))\n\n # Delete the _id\n if no_id and '_id' in df:\n del df['_id']\n\n return df\n\n\ndef json_to_scv(data, download=False):\n \"\"\"\n Transform json to csv\n :param data: json with data\n :param download: True: return file, default: return view\n :return: csv\n \"\"\"\n data = pd.read_json(data)\n\n if download is False:\n # as a html view\n return data.to_html()\n # to file\n data = data.to_csv(index=False)\n resp = make_response(data)\n resp.headers[\"Content-Disposition\"] = \"attachment; filename=data.csv\"\n resp.headers[\"Content-Type\"] = \"text/csv\"\n return resp" ]
[ [ "pandas.read_json" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [ "0.23", "0.21", "2.0", "1.4", "0.19", "1.1", "1.5", "1.2", "0.24", "0.20", "1.0", "0.25", "1.3" ], "scipy": [], "tensorflow": [] } ]
Sesamestrong/COCO_Utils
[ "998cfc9cdd93203c86011632cd8f45bcdc6eceb9" ]
[ "labelbox2coco.py" ]
[ "# Labelbox json file to COCO json files\n# Based on the LBExporters 0.1.1 file (https://pypi.org/project/LBExporters/), but heavily modified\n\n# Import libraries\nimport json\nimport datetime as dt\nimport logging\nimport requests\nfrom PIL import Image\nimport numpy as np\nimport os\n\nimport rasterio\nimport shapely\n\nimport time\n\nfrom tqdm import tqdm\n\ndef labelbox_to_json(labeled_data, coco_output, images_output_dir,cat_order=None,verbose=False):\n\n \"\"\"\n Converts from labelbox json export format to COCO json\n stores the png files locally\n tested only for instance segmentation datasets\n\n Args:\n labeled_data: json file exported from labelbox\n coco output: path of json file to store the coco labels\n images_output_dir: folder to put the\n\n \"\"\"\n\n\n # read labelbox JSON output\n with open(labeled_data, 'r') as f:\n label_data = json.loads(f.read())\n\n # setup COCO dataset container and info\n coco = {\n 'info': None,\n 'images': [],\n 'annotations': [],\n 'licenses': [],\n 'categories': []\n }\n\n # Include base information about the export\n coco['info'] = {\n 'year': dt.datetime.now(dt.timezone.utc).year,\n 'version': None,\n 'description': label_data[0]['Project Name'],\n 'contributor': label_data[0]['Created By'],\n 'url': 'labelbox.com',\n 'date_created': dt.datetime.now(dt.timezone.utc).isoformat()\n }\n\n count = 1\n\n # Go though each labelled image\n\n existing_files=os.listdir(images_output_dir)\n\n for data in tqdm(label_data):\n im=None\n\n # Create an id using consecutive numbers\n image_id = data['External ID']\n\n if image_id in existing_files:\n im=Image.open(open(images_output_dir+\"/\"+image_id))\n if im is None:\n for i in range(3):\n try:\n # Download and get image name\n try:\n response = requests.get(data['Labeled Data'], stream=True)\n except requests.exceptions.MissingSchema as e:\n logging.exception(('\"Labeled Data\" field must be a URL. '\n 'Support for local files coming soon'))\n continue\n except requests.exceptions.ConnectionError as e:\n logging.exception('Failed to fetch image from {}'\n .format(data['Labeled Data']))\n continue\n\n response.raw.decode_content = True\n\n # Open image and get image size\n im = Image.open(response.raw)\n break\n except:\n time.sleep(0.5)\n continue\n\n if im is None:\n print(\"No image for file\",image_id,data['Labeled Data'])\n continue\n width, height = im.size\n\n\n # Print status\n if verbose: print('###### Processing ' + data['ID'] + ' image, ' + 'Image ' + str(count) + ' of ' + str(len(label_data)))\n count = count + 1\n\n # Write image in png format, is will have the ID as name (e.g. 23.png)\n image_path = images_output_dir + '/' + str(image_id) + '.png'\n im.save(image_path)\n image_name = image_id\n\n # Include only images with annotations\n if not ('objects' in data['Label']):\n if verbose: print(\"Image without annotations\")\n else:\n\n # build the file name name (path), ID, dimensions\n image = {\n \"id\": image_id,\n \"width\": width,\n \"height\": height,\n \"file_name\": image_name,\n \"license\": None,\n \"flickr_url\": data['ID'],\n \"coco_url\": image_name,\n \"date_captured\": None,\n }\n\n # Write it inbto the images list\n coco['images'].append(image)\n\n # remove classification labels (Skip, etc...)\n labels = data['Label']\n if not callable(getattr(labels, 'keys', None)):\n continue\n\n # convert the masks into polygon format\n for category_name, binary_masks in labels.items():\n\n polygons = []\n\n count2 = 1\n\n for mask in binary_masks:\n\n if verbose: print('processing ' + mask['value'] + ' instance. Instance ' + str(count2) + ' of ' + str(len(binary_masks)))\n count2 = count2+ 1\n\n try:\n # check if label category exists in 'categories' field\n category_id = [c['id'] for c in coco['categories'] if c['supercategory'] == mask['value']][0]\n # If it doesnt, create it\n except IndexError:\n category_id = len(coco['categories']) + 1 if cat_order is None else ( cat_order.index(mask['value'])+1 if mask['value'] in cat_order else None )\n if category_id is None: raise ValueError(\"category order provided does not contain id '\"+mask['value']+\"'\")\n category = {\n 'supercategory': mask['value'],\n 'id': category_id,\n 'name': mask['value']\n }\n coco['categories'].append(category)\n\n\n def get_im():\n # Get the binary mask name\n try:\n response = requests.get(mask['instanceURI'], stream=True)\n except requests.exceptions.MissingSchema as e:\n logging.exception(('\"Labeled Data\" field must be a URL. '\n 'Support for local files coming soon'))\n return None\n except requests.exceptions.ConnectionError as e:\n logging.exception('Failed to fetch image from {}'\n .format(mask['instanceURI']))\n return None\n\n response.raw.decode_content = True\n\n # Open the binary mask (it is just a png image with 1 and 0)\n return Image.open(response.raw)\n\n try:\n im=get_im()\n except:\n im=get_im() # retry if it fails the first time--this solves a pretty annoying issue with finnicky image downloads\n\n if im is None:\n print(\"could not get binary mask\")\n continue\n\n # Transform to numpy array, as numpy reads columns and rows differently we need to reshape the array\n im_np = np.array(im)\n im.np = im_np.reshape(im.size[0], im.size[1],4)\n\n # Transform the masks to a listo of polygons\n all_polygons = mask_to_polygons_layer(im_np)\n if verbose: print('Instance consisting in ' + str(len(all_polygons)) + ' polygon')\n\n all_segmentation = []\n\n # Transform the list of polygons in multipolygons\n all_polygons_multi = shapely.geometry.MultiPolygon(all_polygons)\n\n # Get the coordinates of all the polygons and put them on a list\n for polygon in all_polygons:\n\n segmentation = []\n\n for x, y in polygon.exterior.coords:\n segmentation.extend([x, y])\n\n all_segmentation.append(segmentation)\n\n # Create the anotation dic, with all the segmentation data\n annotation = {\n \"id\": len(coco['annotations']) + 1,\n \"image_id\": image_id,\n \"category_id\": category_id,\n \"segmentation\": all_segmentation,\n \"area\": all_polygons_multi.area, # float\n \"bbox\": [all_polygons_multi.bounds[0], all_polygons_multi.bounds[1],\n all_polygons_multi.bounds[2] - all_polygons_multi.bounds[0],\n all_polygons_multi.bounds[3] - all_polygons_multi.bounds[1]],\n \"iscrowd\": 0\n }\n\n coco['annotations'].append(annotation)\n\n coco['categories']=sorted(coco['categories'],key=lambda b:b['id'])\n if verbose: print(coco['categories'])\n\n # Write the coco json file\n with open(coco_output, 'w+') as f:\n f.write(json.dumps(coco))\n if verbose: print(\"Image preprocessing ready\")\n\n#\ndef mask_to_polygons_layer(mask):\n\n \"\"\"\n Function to convert from binary mask to polygon\n\n Args:\n mask: numpy array containing the binary mask\n\n \"\"\"\n\n # Use rasterio to generate shapes of pixels greater than 1 (I am using the first band)\n generator = rasterio.features.shapes(mask[:,:,0].astype(np.int16),connectivity=8, mask=(mask[:,:,0] >0),transform=rasterio.Affine(1.0, 0, 0, 0, 1.0, 0))\n\n all_polygons = []\n\n # Put all the polygons in a list\n for poly,value in generator:\n all_polygons.append(shapely.geometry.shape(poly))\n\n return all_polygons\n\n" ]
[ [ "numpy.array" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
hossein20s/CycleGAN-Tensorflow-2
[ "272a886939d8ef81cf3dcc53df6b945ab375cc36" ]
[ "tf2gan/loss.py" ]
[ "import logging\n\nimport tensorflow as tf\n\nlogging.config.fileConfig(fname='log.conf')\nlogger = logging.getLogger('dev')\n\n\ndef get_gan_losses_fn():\n bce = tf.losses.BinaryCrossentropy(from_logits=True)\n\n def d_loss_fn(r_logit, f_logit):\n r_loss = bce(tf.ones_like(r_logit), r_logit)\n f_loss = bce(tf.zeros_like(f_logit), f_logit)\n return r_loss, f_loss\n\n def g_loss_fn(f_logit):\n f_loss = bce(tf.ones_like(f_logit), f_logit)\n return f_loss\n\n return d_loss_fn, g_loss_fn\n\n\ndef get_hinge_v1_losses_fn():\n def d_loss_fn(r_logit, f_logit):\n r_loss = tf.reduce_mean(tf.maximum(1 - r_logit, 0))\n f_loss = tf.reduce_mean(tf.maximum(1 + f_logit, 0))\n return r_loss, f_loss\n\n def g_loss_fn(f_logit):\n f_loss = tf.reduce_mean(tf.maximum(1 - f_logit, 0))\n return f_loss\n\n return d_loss_fn, g_loss_fn\n\n\ndef get_hinge_v2_losses_fn():\n def d_loss_fn(r_logit, f_logit):\n r_loss = tf.reduce_mean(tf.maximum(1 - r_logit, 0))\n f_loss = tf.reduce_mean(tf.maximum(1 + f_logit, 0))\n return r_loss, f_loss\n\n def g_loss_fn(f_logit):\n f_loss = tf.reduce_mean(- f_logit)\n return f_loss\n\n return d_loss_fn, g_loss_fn\n\n\ndef get_lsgan_losses_fn():\n mse = tf.losses.MeanSquaredError()\n\n def d_loss_fn(r_logit, f_logit):\n r_loss = mse(tf.ones_like(r_logit), r_logit)\n f_loss = mse(tf.zeros_like(f_logit), f_logit)\n return r_loss, f_loss\n\n def g_loss_fn(f_logit):\n f_loss = mse(tf.ones_like(f_logit), f_logit)\n return f_loss\n\n return d_loss_fn, g_loss_fn\n\n\ndef get_wgan_losses_fn():\n def d_loss_fn(r_logit, f_logit):\n r_loss = - tf.reduce_mean(r_logit)\n f_loss = tf.reduce_mean(f_logit)\n return r_loss, f_loss\n\n def g_loss_fn(f_logit):\n f_loss = - tf.reduce_mean(f_logit)\n return f_loss\n\n return d_loss_fn, g_loss_fn\n\n\ndef get_adversarial_losses_fn(mode):\n if mode == 'gan':\n return get_gan_losses_fn()\n elif mode == 'hinge_v1':\n return get_hinge_v1_losses_fn()\n elif mode == 'hinge_v2':\n return get_hinge_v2_losses_fn()\n elif mode == 'lsgan':\n return get_lsgan_losses_fn()\n elif mode == 'wgan':\n return get_wgan_losses_fn()\n\n\ndef gradient_penalty(f, real, fake, mode):\n def _gradient_penalty(f, real, fake=None):\n def _interpolate(a, b=None):\n if b is None: # interpolation in DRAGAN\n beta = tf.random.uniform(shape=tf.shape(a), minval=0., maxval=1.)\n b = a + 0.5 * tf.math.reduce_std(a) * beta\n shape = [tf.shape(a)[0]] + [1] * (a.shape.ndims - 1)\n alpha = tf.random.uniform(shape=shape, minval=0., maxval=1.)\n inter = a + alpha * (b - a)\n inter.set_shape(a.shape)\n return inter\n\n x = _interpolate(real, fake)\n with tf.GradientTape() as t:\n t.watch(x)\n pred = f(x)\n grad = t.gradient(pred, x)\n norm = tf.norm(tf.reshape(grad, [tf.shape(grad)[0], -1]), axis=1)\n gp = tf.reduce_mean((norm - 1.)**2)\n\n return gp\n\n if mode == 'none':\n gp = tf.constant(0, dtype=real.dtype)\n elif mode == 'dragan':\n gp = _gradient_penalty(f, real)\n elif mode == 'wgan-gp':\n gp = _gradient_penalty(f, real, fake)\n else:\n logger.error(\"mode {} is not defines\".format(mode))\n\n return gp\n" ]
[ [ "tensorflow.constant", "tensorflow.reduce_mean", "tensorflow.shape", "tensorflow.maximum", "tensorflow.random.uniform", "tensorflow.ones_like", "tensorflow.losses.MeanSquaredError", "tensorflow.math.reduce_std", "tensorflow.zeros_like", "tensorflow.losses.BinaryCrossentropy", "tensorflow.GradientTape" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [ "1.10", "2.7", "1.12", "2.6", "2.2", "1.13", "2.3", "2.4", "2.9", "2.5", "2.8", "2.10" ] } ]
PatriciaXiao/Structured-Self-Attention
[ "a13208565e2ba313b151420aca0c36fae2f3da9b" ]
[ "classification.py" ]
[ "#You can write your own classification file to use the module\nfrom attention.model import StructuredSelfAttention\nfrom attention.train import train,get_activation_wts,evaluate\nfrom utils.pretrained_glove_embeddings import load_glove_embeddings\nfrom utils.data_loader import load_data_set\nfrom visualization.attention_visualization import createHTML\nimport torch\nimport numpy as np\nfrom torch.autograd import Variable\nfrom keras.preprocessing.sequence import pad_sequences\nimport torch.nn.functional as F\nimport torch.utils.data as data_utils\nimport os,sys\nimport json\n\n\n# to avoid certain warnings on macOS\nimport ssl\nssl._create_default_https_context = ssl._create_unverified_context\n \nclassified = False\nclassification_type = sys.argv[1]\n \ndef json_to_dict(json_set):\n for k,v in json_set.items():\n if v == 'False':\n json_set[k] = False\n elif v == 'True':\n json_set[k] = True\n else:\n json_set[k] = v\n return json_set\n \n \nwith open('config.json', 'r') as f:\n params_set = json.load(f)\n \nwith open('model_params.json', 'r') as f:\n model_params = json.load(f)\n \nparams_set = json_to_dict(params_set)\nmodel_params = json_to_dict(model_params)\n \nprint(\"Using settings:\",params_set)\nprint(\"Using model settings\",model_params)\n \ndef visualize_attention(wts,x_test_pad,word_to_id,filename):\n wts_add = torch.sum(wts,1)\n wts_add_np = wts_add.data.numpy()\n wts_add_list = wts_add_np.tolist()\n id_to_word = {v:k for k,v in word_to_id.items()}\n text= []\n for test in x_test_pad:\n text.append(\" \".join([id_to_word.get(i) for i in test]))\n createHTML(text, wts_add_list, filename)\n print(\"Attention visualization created for {} samples\".format(len(x_test_pad)))\n return\n \ndef binary_classfication(attention_model,train_loader,epochs=5,use_regularization=True,C=1.0,clip=True):\n loss = torch.nn.BCELoss()\n optimizer = torch.optim.RMSprop(attention_model.parameters())\n train(attention_model,train_loader,loss,optimizer,epochs,use_regularization,C,clip)\n \ndef multiclass_classification(attention_model,train_loader,epochs=5,use_regularization=True,C=1.0,clip=True):\n loss = torch.nn.NLLLoss()\n optimizer = torch.optim.RMSprop(attention_model.parameters())\n train(attention_model,train_loader,loss,optimizer,epochs,use_regularization,C,clip)\n \n \n \nMAXLENGTH = model_params['timesteps']\nif classification_type =='binary':\n \n train_loader,x_test_pad,y_test,word_to_id = load_data_set(0,MAXLENGTH,model_params[\"vocab_size\"],model_params['batch_size']) #loading imdb dataset\n \n \n if params_set[\"use_embeddings\"]:\n embeddings = load_glove_embeddings(\"glove/glove.6B.50d.txt\",word_to_id,50)\n else:\n embeddings = None\n #Can use pretrained embeddings by passing in the embeddings and setting the use_pretrained_embeddings=True\n attention_model = StructuredSelfAttention(batch_size=train_loader.batch_size,lstm_hid_dim=model_params['lstm_hidden_dimension'],d_a = model_params[\"d_a\"],r=params_set[\"attention_hops\"],vocab_size=len(word_to_id),max_len=MAXLENGTH,_type=0,n_classes=1,use_pretrained_embeddings=params_set[\"use_embeddings\"],embeddings=embeddings)\n \n #Can set use_regularization=True for penalization and clip=True for gradient clipping\n binary_classfication(attention_model,train_loader=train_loader,epochs=params_set[\"epochs\"],use_regularization=params_set[\"use_regularization\"],C=params_set[\"C\"],clip=params_set[\"clip\"])\n classified = True\n #wts = get_activation_wts(binary_attention_model,Variable(torch.from_numpy(x_test_pad[:]).type(torch.LongTensor)))\n #print(\"Attention weights for the testing data in binary classification are:\",wts)\n \n \nif classification_type == 'multiclass':\n train_loader,train_set,test_set,x_test_pad,word_to_id = load_data_set(1,MAXLENGTH,model_params[\"vocab_size\"],model_params['batch_size']) #load the reuters dataset\n #Using pretrained embeddings\n if params_set[\"use_embeddings\"]:\n embeddings = load_glove_embeddings(\"glove/glove.6B.50d.txt\",word_to_id,50)\n else:\n embeddings = None\n attention_model = StructuredSelfAttention(batch_size=train_loader.batch_size,lstm_hid_dim=model_params['lstm_hidden_dimension'],d_a = model_params[\"d_a\"],r=params_set[\"attention_hops\"],vocab_size=len(word_to_id),max_len=MAXLENGTH,_type=1,n_classes=46,use_pretrained_embeddings=params_set[\"use_embeddings\"],embeddings=embeddings)\n \n #Using regularization and gradient clipping at 0.5 (currently unparameterized)\n multiclass_classification(attention_model,train_loader,epochs=params_set[\"epochs\"],use_regularization=params_set[\"use_regularization\"],C=params_set[\"C\"],clip=params_set[\"clip\"])\n classified=True\n #wts = get_activation_wts(multiclass_attention_model,Variable(torch.from_numpy(x_test_pad[:]).type(torch.LongTensor)))\n #print(\"Attention weights for the data in multiclass classification are:\",wts)\nif classified:\n test_last_idx = 100\n wts = get_activation_wts(attention_model,Variable(torch.from_numpy(x_test_pad[:test_last_idx]).type(torch.LongTensor)))\n print(wts.size())\n visualize_attention(wts,x_test_pad[:test_last_idx],word_to_id,filename='attention.html')" ]
[ [ "torch.nn.NLLLoss", "torch.sum", "torch.from_numpy", "torch.nn.BCELoss" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
SamuelMarks/botorch
[ "7801e2f56dc447322b2b6c92cab683d8900e4c7f", "7801e2f56dc447322b2b6c92cab683d8900e4c7f", "7801e2f56dc447322b2b6c92cab683d8900e4c7f", "7801e2f56dc447322b2b6c92cab683d8900e4c7f", "7801e2f56dc447322b2b6c92cab683d8900e4c7f" ]
[ "botorch/utils/torch.py", "test/utils/test_testing.py", "test/test_cross_validation.py", "botorch/acquisition/fixed_feature.py", "test/posteriors/test_deterministic.py" ]
[ "#!/usr/bin/env python3\n# Copyright (c) Facebook, Inc. and its affiliates.\n#\n# This source code is licensed under the MIT license found in the\n# LICENSE file in the root directory of this source tree.\n\n# NOTE: To be removed once (if) https://github.com/pytorch/pytorch/pull/37385 lands\n\nfrom __future__ import annotations\n\nfrom collections import OrderedDict\n\nimport torch\nfrom torch._six import container_abcs\nfrom torch.nn import Module\n\n\nclass BufferDict(Module):\n r\"\"\"Holds buffers in a dictionary.\n\n BufferDict can be indexed like a regular Python dictionary, but buffers it\n contains are properly registered, and will be visible by all Module methods.\n\n :class:`~torch.nn.BufferDict` is an **ordered** dictionary that respects\n\n * the order of insertion, and\n\n * in :meth:`~torch.nn.BufferDict.update`, the order of the merged ``OrderedDict``\n or another :class:`~torch.nn.BufferDict` (the argument to\n :meth:`~torch.nn.BufferDict.update`).\n\n Note that :meth:`~torch.nn.BufferDict.update` with other unordered mapping\n types (e.g., Python's plain ``dict``) does not preserve the order of the\n merged mapping.\n\n Arguments:\n buffers (iterable, optional): a mapping (dictionary) of\n (string : :class:`~torch.Tensor`) or an iterable of key-value pairs\n of type (string, :class:`~torch.Tensor`)\n\n Example::\n\n class MyModule(nn.Module):\n def __init__(self):\n super(MyModule, self).__init__()\n self.buffers = nn.BufferDict({\n 'left': torch.randn(5, 10),\n 'right': torch.randn(5, 10)\n })\n\n def forward(self, x, choice):\n x = self.buffers[choice].mm(x)\n return x\n \"\"\"\n\n def __init__(self, buffers=None):\n super(BufferDict, self).__init__()\n if buffers is not None:\n self.update(buffers)\n\n def __getitem__(self, key):\n return self._buffers[key]\n\n def __setitem__(self, key, buffer):\n self.register_buffer(key, buffer)\n\n def __delitem__(self, key):\n del self._buffers[key]\n\n def __len__(self):\n return len(self._buffers)\n\n def __iter__(self):\n return iter(self._buffers.keys())\n\n def __contains__(self, key):\n return key in self._buffers\n\n def clear(self):\n \"\"\"Remove all items from the BufferDict.\"\"\"\n self._buffers.clear()\n\n def pop(self, key):\n r\"\"\"Remove key from the BufferDict and return its buffer.\n\n Arguments:\n key (string): key to pop from the BufferDict\n \"\"\"\n v = self[key]\n del self[key]\n return v\n\n def keys(self):\n r\"\"\"Return an iterable of the BufferDict keys.\"\"\"\n return self._buffers.keys()\n\n def items(self):\n r\"\"\"Return an iterable of the BufferDict key/value pairs.\"\"\"\n return self._buffers.items()\n\n def values(self):\n r\"\"\"Return an iterable of the BufferDict values.\"\"\"\n return self._buffers.values()\n\n def update(self, buffers):\n r\"\"\"Update the :class:`~torch.nn.BufferDict` with the key-value pairs from a\n mapping or an iterable, overwriting existing keys.\n\n .. note::\n If :attr:`buffers` is an ``OrderedDict``, a :class:`~torch.nn.BufferDict`,\n or an iterable of key-value pairs, the order of new elements in it is\n preserved.\n\n Arguments:\n buffers (iterable): a mapping (dictionary) from string to\n :class:`~torch.Tensor`, or an iterable of\n key-value pairs of type (string, :class:`~torch.Tensor`)\n \"\"\"\n if not isinstance(buffers, container_abcs.Iterable):\n raise TypeError(\n \"BuffersDict.update should be called with an \"\n \"iterable of key/value pairs, but got \" + type(buffers).__name__\n )\n\n if isinstance(buffers, container_abcs.Mapping):\n if isinstance(buffers, (OrderedDict, BufferDict)):\n for key, buffer in buffers.items():\n self[key] = buffer\n else:\n for key, buffer in sorted(buffers.items()):\n self[key] = buffer\n else:\n for j, p in enumerate(buffers):\n if not isinstance(p, container_abcs.Iterable):\n raise TypeError(\n \"BufferDict update sequence element \"\n \"#\" + str(j) + \" should be Iterable; is\" + type(p).__name__\n )\n if not len(p) == 2:\n raise ValueError(\n \"BufferDict update sequence element \"\n \"#\" + str(j) + \" has length \" + str(len(p)) + \"; 2 is required\"\n )\n self[p[0]] = p[1]\n\n def extra_repr(self):\n child_lines = []\n for k, p in self._buffers.items():\n size_str = \"x\".join(str(size) for size in p.size())\n device_str = \"\" if not p.is_cuda else \" (GPU {})\".format(p.get_device())\n parastr = \"Buffer containing: [{} of size {}{}]\".format(\n torch.typename(p), size_str, device_str\n )\n child_lines.append(\" (\" + k + \"): \" + parastr)\n tmpstr = \"\\n\".join(child_lines)\n return tmpstr\n\n def __call__(self, input):\n raise RuntimeError(\"BufferDict should not be called.\")\n", "#!/usr/bin/env python3\n# Copyright (c) Facebook, Inc. and its affiliates.\n#\n# This source code is licensed under the MIT license found in the\n# LICENSE file in the root directory of this source tree.\n\nimport torch\nfrom botorch.utils.testing import BotorchTestCase, MockModel, MockPosterior\n\n\nclass TestMock(BotorchTestCase):\n def test_MockPosterior(self):\n # test basic logic\n mp = MockPosterior()\n self.assertEqual(mp.device.type, \"cpu\")\n self.assertEqual(mp.dtype, torch.float32)\n self.assertEqual(mp.event_shape, torch.Size())\n self.assertEqual(\n MockPosterior(variance=torch.rand(2)).event_shape, torch.Size([2])\n )\n # test passing in tensors\n mean = torch.rand(2)\n variance = torch.eye(2)\n samples = torch.rand(1, 2)\n mp = MockPosterior(mean=mean, variance=variance, samples=samples)\n self.assertEqual(mp.device.type, \"cpu\")\n self.assertEqual(mp.dtype, torch.float32)\n self.assertTrue(torch.equal(mp.mean, mean))\n self.assertTrue(torch.equal(mp.variance, variance))\n self.assertTrue(torch.all(mp.sample() == samples.unsqueeze(0)))\n self.assertTrue(\n torch.all(mp.sample(torch.Size([2])) == samples.repeat(2, 1, 1))\n )\n with self.assertRaises(RuntimeError):\n mp.sample(sample_shape=torch.Size([2]), base_samples=torch.rand(3))\n\n def test_MockModel(self):\n mp = MockPosterior()\n mm = MockModel(mp)\n X = torch.empty(0)\n self.assertEqual(mm.posterior(X), mp)\n self.assertEqual(mm.num_outputs, 0)\n mm.state_dict()\n mm.load_state_dict()\n", "#!/usr/bin/env python3\n# Copyright (c) Facebook, Inc. and its affiliates.\n#\n# This source code is licensed under the MIT license found in the\n# LICENSE file in the root directory of this source tree.\n\nimport itertools\nimport warnings\n\nimport torch\nfrom botorch.cross_validation import batch_cross_validation, gen_loo_cv_folds\nfrom botorch.exceptions.warnings import OptimizationWarning\nfrom botorch.models.gp_regression import FixedNoiseGP, SingleTaskGP\nfrom botorch.utils.testing import BotorchTestCase, _get_random_data\nfrom gpytorch.mlls.exact_marginal_log_likelihood import ExactMarginalLogLikelihood\n\n\nclass TestFitBatchCrossValidation(BotorchTestCase):\n def test_single_task_batch_cv(self):\n n = 10\n for batch_shape, num_outputs, dtype in itertools.product(\n (torch.Size(), torch.Size([2])), (1, 2), (torch.float, torch.double)\n ):\n tkwargs = {\"device\": self.device, \"dtype\": dtype}\n train_X, train_Y = _get_random_data(\n batch_shape=batch_shape, num_outputs=num_outputs, n=n, **tkwargs\n )\n if num_outputs == 1:\n train_Y = train_Y.squeeze(-1)\n train_Yvar = torch.full_like(train_Y, 0.01)\n noiseless_cv_folds = gen_loo_cv_folds(train_X=train_X, train_Y=train_Y)\n # check shapes\n expected_shape_train_X = batch_shape + torch.Size(\n [n, n - 1, train_X.shape[-1]]\n )\n expected_shape_test_X = batch_shape + torch.Size([n, 1, train_X.shape[-1]])\n self.assertEqual(noiseless_cv_folds.train_X.shape, expected_shape_train_X)\n self.assertEqual(noiseless_cv_folds.test_X.shape, expected_shape_test_X)\n\n expected_shape_train_Y = batch_shape + torch.Size([n, n - 1, num_outputs])\n expected_shape_test_Y = batch_shape + torch.Size([n, 1, num_outputs])\n\n self.assertEqual(noiseless_cv_folds.train_Y.shape, expected_shape_train_Y)\n self.assertEqual(noiseless_cv_folds.test_Y.shape, expected_shape_test_Y)\n self.assertIsNone(noiseless_cv_folds.train_Yvar)\n self.assertIsNone(noiseless_cv_folds.test_Yvar)\n # Test SingleTaskGP\n with warnings.catch_warnings():\n warnings.filterwarnings(\"ignore\", category=OptimizationWarning)\n cv_results = batch_cross_validation(\n model_cls=SingleTaskGP,\n mll_cls=ExactMarginalLogLikelihood,\n cv_folds=noiseless_cv_folds,\n fit_args={\"options\": {\"maxiter\": 1}},\n )\n expected_shape = batch_shape + torch.Size([n, 1, num_outputs])\n self.assertEqual(cv_results.posterior.mean.shape, expected_shape)\n self.assertEqual(cv_results.observed_Y.shape, expected_shape)\n\n # Test FixedNoiseGP\n noisy_cv_folds = gen_loo_cv_folds(\n train_X=train_X, train_Y=train_Y, train_Yvar=train_Yvar\n )\n # check shapes\n self.assertEqual(noisy_cv_folds.train_X.shape, expected_shape_train_X)\n self.assertEqual(noisy_cv_folds.test_X.shape, expected_shape_test_X)\n self.assertEqual(noisy_cv_folds.train_Y.shape, expected_shape_train_Y)\n self.assertEqual(noisy_cv_folds.test_Y.shape, expected_shape_test_Y)\n self.assertEqual(noisy_cv_folds.train_Yvar.shape, expected_shape_train_Y)\n self.assertEqual(noisy_cv_folds.test_Yvar.shape, expected_shape_test_Y)\n with warnings.catch_warnings():\n warnings.filterwarnings(\"ignore\", category=OptimizationWarning)\n cv_results = batch_cross_validation(\n model_cls=FixedNoiseGP,\n mll_cls=ExactMarginalLogLikelihood,\n cv_folds=noisy_cv_folds,\n fit_args={\"options\": {\"maxiter\": 1}},\n )\n self.assertEqual(cv_results.posterior.mean.shape, expected_shape)\n self.assertEqual(cv_results.observed_Y.shape, expected_shape)\n self.assertEqual(cv_results.observed_Y.shape, expected_shape)\n", "#!/usr/bin/env python3\n# Copyright (c) Facebook, Inc. and its affiliates.\n#\n# This source code is licensed under the MIT license found in the\n# LICENSE file in the root directory of this source tree.\n\nr\"\"\"\nA wrapper around AquisitionFunctions to fix certain features for optimization.\nThis is useful e.g. for performing contextual optimization.\n\"\"\"\n\nfrom __future__ import annotations\n\nfrom typing import List, Union\n\nimport torch\nfrom botorch.acquisition.acquisition import AcquisitionFunction\nfrom torch import Tensor\nfrom torch.nn import Module\n\n\nclass FixedFeatureAcquisitionFunction(AcquisitionFunction):\n \"\"\"A wrapper around AquisitionFunctions to fix a subset of features.\n\n Example:\n >>> model = SingleTaskGP(train_X, train_Y) # d = 5\n >>> qEI = qExpectedImprovement(model, best_f=0.0)\n >>> columns = [2, 4]\n >>> values = X[..., columns]\n >>> qEI_FF = FixedFeatureAcquisitionFunction(qEI, 5, columns, values)\n >>> qei = qEI_FF(test_X) # d' = 3\n \"\"\"\n\n def __init__(\n self,\n acq_function: AcquisitionFunction,\n d: int,\n columns: List[int],\n values: Union[Tensor, List[float]],\n ) -> None:\n r\"\"\"Derived Acquisition Function by fixing a subset of input features.\n\n Args:\n acq_function: The base acquisition function, operating on input\n tensors `X_full` of feature dimension `d`.\n d: The feature dimension expected by `acq_function`.\n columns: `d_f < d` indices of columns in `X_full` that are to be\n fixed to the provided values.\n values: The values to which to fix the columns in `columns`. Either\n a full `batch_shape x q x d_f` tensor of values (if values are\n different for each of the `q` input points), or an array-like of\n values that is broadcastable to the input across `t`-batch and\n `q`-batch dimensions, e.g. a list of length `d_f` if values\n are the same across all `t` and `q`-batch dimensions.\n \"\"\"\n Module.__init__(self)\n self.acq_func = acq_function\n self.d = d\n values = torch.as_tensor(values).detach().clone()\n self.register_buffer(\"values\", values)\n # build selector for _construct_X_full\n self._selector = []\n idx_X, idx_f = 0, d - values.shape[-1]\n for i in range(self.d):\n if i in columns:\n self._selector.append(idx_f)\n idx_f += 1\n else:\n self._selector.append(idx_X)\n idx_X += 1\n\n def forward(self, X: Tensor):\n r\"\"\"Evaluate base acquisition function under the fixed features.\n\n Args:\n X: Input tensor of feature dimension `d' < d` such that `d' + d_f = d`.\n\n Returns:\n Base acquisition function evaluated on tensor `X_full` constructed\n by adding `values` in the appropriate places (see\n `_construct_X_full`).\n \"\"\"\n X_full = self._construct_X_full(X)\n return self.acq_func(X_full)\n\n def _construct_X_full(self, X: Tensor) -> Tensor:\n r\"\"\"Constructs the full input for the base acquisition function.\n\n Args:\n X: Input tensor with shape `batch_shape x q x d'` such that\n `d' + d_f = d`.\n\n Returns:\n Tensor `X_full` of shape `batch_shape x q x d`, where\n `X_full[..., i] = values[..., i]` if `i in columns`,\n and `X_full[..., i] = X[..., j]`, with\n `j = i - sum_{l<=i} 1_{l in fixed_colunns}`.\n \"\"\"\n d_prime, d_f = X.shape[-1], self.values.shape[-1]\n if d_prime + d_f != self.d:\n raise ValueError(\n f\"Feature dimension d' ({d_prime}) of input must be \"\n f\"d - d_f ({self.d - d_f}).\"\n )\n # concatenate values to the end\n values = self.values.to(X).expand(*X.shape[:-1], d_f)\n X_perm = torch.cat([X, values], dim=-1)\n # now select the appropriate column order\n return X_perm[..., self._selector]\n", "#!/usr/bin/env python3\n# Copyright (c) Facebook, Inc. and its affiliates.\n#\n# This source code is licensed under the MIT license found in the\n# LICENSE file in the root directory of this source tree.\n\nimport itertools\n\nimport torch\nfrom botorch.posteriors.deterministic import DeterministicPosterior\nfrom botorch.utils.testing import BotorchTestCase\n\n\nclass TestDeterministicPosterior(BotorchTestCase):\n def test_DeterministicPosterior(self):\n for shape, dtype in itertools.product(\n ((3, 2), (2, 3, 1)), (torch.float, torch.double)\n ):\n values = torch.randn(*shape, device=self.device, dtype=dtype)\n p = DeterministicPosterior(values)\n self.assertEqual(p.device.type, self.device.type)\n self.assertEqual(p.dtype, dtype)\n self.assertEqual(p.event_shape, values.shape)\n self.assertTrue(torch.equal(p.mean, values))\n self.assertTrue(torch.equal(p.variance, torch.zeros_like(values)))\n # test sampling\n samples = p.rsample()\n self.assertTrue(torch.equal(samples, values.unsqueeze(0)))\n samples = p.rsample(torch.Size([2]))\n self.assertTrue(torch.equal(samples, values.expand(2, *values.shape)))\n base_samples = torch.randn(2, *shape, device=self.device, dtype=dtype)\n samples = p.rsample(torch.Size([2]), base_samples)\n self.assertTrue(torch.equal(samples, values.expand(2, *values.shape)))\n with self.assertRaises(RuntimeError):\n samples = p.rsample(\n torch.Size([2]), base_samples.expand(3, *base_samples.shape)\n )\n" ]
[ [ "torch.typename" ], [ "torch.Size", "torch.empty", "torch.eye", "torch.equal", "torch.rand" ], [ "torch.full_like", "torch.Size" ], [ "torch.as_tensor", "torch.nn.Module.__init__", "torch.cat" ], [ "torch.randn", "torch.Size", "torch.zeros_like", "torch.equal" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
zokin/human_body_prior
[ "0278cb45180992e4d39ba1a11601f5ecc53ee148" ]
[ "src/human_body_prior/visualizations/training_visualization.py" ]
[ "# -*- coding: utf-8 -*-\n#\n# Copyright (C) 2019 Max-Planck-Gesellschaft zur Förderung der Wissenschaften e.V. (MPG),\n# acting on behalf of its Max Planck Institute for Intelligent Systems and the\n# Max Planck Institute for Biological Cybernetics. All rights reserved.\n#\n# Max-Planck-Gesellschaft zur Förderung der Wissenschaften e.V. (MPG) is holder of all proprietary rights\n# on this computer program. You can only use this computer program if you have closed a license agreement\n# with MPG or you get the right to use the computer program from someone who is authorized to grant you that right.\n# Any use of the computer program without a valid license is prohibited and liable to prosecution.\n# Contact: [email protected]\n#\n#\n# If you use this code in a research publication please consider citing the following:\n#\n# Expressive Body Capture: 3D Hands, Face, and Body from a Single Image <https://arxiv.org/abs/1904.05866>\n#\n#\n# Code Developed by:\n# Nima Ghorbani <https://nghorbani.github.io/>\n#\n# 2020.12.12\n\ndef pyrenderer(imw=2048, imh=2048):\n\n from body_visualizer.mesh.mesh_viewer import MeshViewer\n import cv2\n\n import numpy as np\n import trimesh\n\n try:\n mv = MeshViewer(width=imw, height=imh, use_offscreen=True)\n except:\n import os\n os.environ['PYOPENGL_PLATFORM'] = 'egl'\n os.environ['EGL_DEVICE_ID'] = os.environ['GPU_DEVICE_ORDINAL'].split(',')[0]\n\n mv = MeshViewer(width=imw, height=imh, use_offscreen=True)\n\n mv.set_cam_trans([0, -0.5, 2.])\n\n def render_an_image(meshes):\n n_all = len(meshes)\n nc = int(np.sqrt(n_all))\n\n out_image = np.zeros([1, 1, 1, mv.width, mv.height, 4])\n\n scale_percent = 100./nc\n width = int(mv.width * scale_percent / 100)\n height = int(mv.height * scale_percent / 100)\n dim = (width, height)\n\n for rId in range(nc):\n for cId in range(nc):\n i = (nc*rId) + cId\n if i>len(meshes): break\n\n mesh = meshes[i]\n\n # mesh.apply_transform(trimesh.transformations.rotation_matrix(np.radians(-90), (1, 0, 0)))\n mesh.vertices -= np.median(np.array(mesh.vertices), axis=0)\n mv.set_dynamic_meshes([mesh])\n img = mv.render(render_wireframe=False, RGBA=True)\n img_resized = cv2.resize(img, dim, interpolation=cv2.INTER_AREA)\n\n out_image[0, 0, 0, (rId*width):((rId+1)*width), (cId*height):((cId+1)*height)] = cv2.cvtColor(img_resized, cv2.COLOR_BGRA2RGBA)\n\n return out_image.astype(np.uint8)\n\n return render_an_image\n\ndef vposer_trainer_renderer(bm, num_bodies_to_display=5):\n import numpy as np\n import trimesh\n import torch\n\n from body_visualizer.tools.vis_tools import imagearray2file, colors\n from human_body_prior.tools.omni_tools import copy2cpu as c2c\n from human_body_prior.tools.omni_tools import makepath\n from trimesh import Trimesh as Mesh\n from trimesh.util import concatenate as mesh_cat\n\n renderer = pyrenderer(1024, 1024)\n\n faces = c2c(bm.f)\n\n def render_once(body_parms, body_colors=[colors['grey'], colors['brown-light']], out_fname=None):\n '''\n\n :param body_parms: list of dictionaries of body parameters.\n :param body_colors: list of np arrays of color rgb values\n :param movie_outpath: a mp4 path\n :return:\n '''\n\n if out_fname is not None: makepath(out_fname, isfile=True)\n assert len(body_parms) <= len(body_colors), ValueError('Not enough colors provided for #{} body_parms'.format(len(body_parms)))\n\n bs = body_parms[0]['pose_body'].shape[0]\n\n body_ids = np.random.choice(bs, num_bodies_to_display)\n\n body_evals = [c2c(bm(root_orient=v['root_orient'].view(bs, -1) if 'root_orient' in v else torch.zeros(bs, 3).type_as(v['pose_body']),\n pose_body=v['pose_body'].contiguous().view(bs, -1)).v) for v in body_parms]\n num_verts = body_evals[0].shape[1]\n\n render_meshes = []\n for bId in body_ids:\n concat_cur_meshes = None\n for body, body_color in zip(body_evals, body_colors):\n cur_body_mesh = Mesh(body[bId], faces, vertex_colors=np.ones([num_verts, 3]) * body_color)\n concat_cur_meshes = cur_body_mesh if concat_cur_meshes is None else mesh_cat(concat_cur_meshes, cur_body_mesh)\n render_meshes.append(concat_cur_meshes)\n\n img = renderer(render_meshes)\n\n if out_fname is not None: imagearray2file(img, out_fname, fps=10)\n\n\n return\n\n return render_once\n" ]
[ [ "numpy.sqrt", "numpy.random.choice", "torch.zeros", "numpy.ones", "numpy.array", "numpy.zeros" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
yoohyewony/mcan
[ "5e6fa78ca76d05fa936d473d3cf7ca9c563a354e" ]
[ "core/exec_rubi.py" ]
[ "# --------------------------------------------------------\n# mcan-vqa (Deep Modular Co-Attention Networks)\n# Licensed under The MIT License [see LICENSE for details]\n# Written by Yuhao Cui https://github.com/cuiyuhao1996\n# --------------------------------------------------------\n\nfrom core.data.load_data import DataSet\nfrom core.model.net import Net, QNet\nfrom core.model.optim import get_optim, adjust_lr\nfrom core.data.data_utils import shuffle_list\nfrom utils.vqa import VQA\nfrom utils.vqaEval import VQAEval\n\nimport os, json, torch, datetime, pickle, copy, shutil, time\nimport numpy as np\nimport torch.nn as nn\nimport torch.utils.data as Data\nimport torch.nn.functional as F\n\nimport wandb\n\nclass Execution:\n def __init__(self, __C):\n self.__C = __C\n\n print('Loading training set ........')\n self.dataset = DataSet(__C) # img_feat, ques_ix, ans\n\n self.dataset_eval = None\n if __C.EVAL_EVERY_EPOCH: # EVAL_EVERY_EPOCH = TRUE\n __C_eval = copy.deepcopy(__C)\n setattr(__C_eval, 'RUN_MODE', 'val')\n\n print('Loading validation set for per-epoch evaluation ........')\n self.dataset_eval = DataSet(__C_eval)\n\n\n def train(self, dataset, dataset_eval=None):\n\n # Obtain needed information\n data_size = dataset.data_size\n token_size = dataset.token_size\n ans_size = dataset.ans_size\n pretrained_emb = dataset.pretrained_emb\n\n # Define the MCAN model\n net = Net(\n self.__C,\n pretrained_emb,\n token_size,\n ans_size\n )\n net.cuda()\n net.train()\n \n # Define the Question-only model\n qnet = QNet(\n self.__C,\n pretrained_emb,\n token_size,\n ans_size\n )\n qnet.cuda()\n qnet.train()\n \n # Watch net & qnet\n wandb.watch(net)\n wandb.watch(qnet)\n\n # Define the multi-gpu training if needed\n if self.__C.N_GPU > 1:\n net = nn.DataParallel(net, device_ids=self.__C.DEVICES)\n\n # Define the binary cross entropy loss\n # loss_fn = torch.nn.BCELoss(size_average=False).cuda()\n loss_qm = torch.nn.BCELoss(reduction='sum').cuda()\n loss_qo = torch.nn.BCELoss(reduction='sum').cuda()\n\n # Load checkpoint if resume training\n if self.__C.RESUME: # default -> FALSE\n print(' ========== Resume training')\n\n if self.__C.CKPT_PATH is not None:\n print('Warning: you are now using CKPT_PATH args, '\n 'CKPT_VERSION and CKPT_EPOCH will not work')\n\n path = self.__C.CKPT_PATH\n else:\n path = self.__C.CKPTS_PATH + \\\n 'ckpt_' + self.__C.CKPT_VERSION + \\\n '/epoch' + str(self.__C.CKPT_EPOCH) + '.pkl'\n\n # Load the network parameters\n print('Loading ckpt {}'.format(path))\n ckpt = torch.load(path)\n print('Finish!')\n net.load_state_dict(ckpt['state_dict'])\n\n # Load the optimizer paramters\n #params = list(net.parameters()) + list(qnet.parameters())\n optim = get_optim(self.__C, net, data_size, ckpt['lr_base'])\n optim._step = int(data_size / self.__C.BATCH_SIZE * self.__C.CKPT_EPOCH)\n optim.optimizer.load_state_dict(ckpt['optimizer'])\n\n start_epoch = self.__C.CKPT_EPOCH\n\n else:\n if ('ckpt_' + self.__C.VERSION) in os.listdir(self.__C.CKPTS_PATH):\n shutil.rmtree(self.__C.CKPTS_PATH + 'ckpt_' + self.__C.VERSION)\n\n os.mkdir(self.__C.CKPTS_PATH + 'ckpt_' + self.__C.VERSION)\n\n #params = net.parameters() + qnet.parameters()\n optim = get_optim(self.__C, net, data_size)\n optim_q = get_optim(self.__C, qnet, data_size)\n start_epoch = 0\n\n loss_sum = 0\n L_qo_sum = 0\n L_qm_sum = 0\n named_params = list(net.named_parameters()) + list(qnet.named_parameters())\n grad_norm = np.zeros(len(named_params))\n\n # Define multi-thread dataloader\n if self.__C.SHUFFLE_MODE in ['external']:\n dataloader = Data.DataLoader(\n dataset,\n batch_size=self.__C.BATCH_SIZE,\n shuffle=False,\n num_workers=self.__C.NUM_WORKERS,\n pin_memory=self.__C.PIN_MEM,\n drop_last=True\n )\n else:\n dataloader = Data.DataLoader(\n dataset,\n batch_size=self.__C.BATCH_SIZE,\n shuffle=True,\n num_workers=self.__C.NUM_WORKERS,\n pin_memory=self.__C.PIN_MEM,\n drop_last=True\n )\n\n # Training script\n for epoch in range(start_epoch, self.__C.MAX_EPOCH):\n\n # Save log information\n logfile = open(\n self.__C.LOG_PATH +\n 'log_run_' + self.__C.VERSION + '.txt',\n 'a+'\n )\n logfile.write(\n 'nowTime: ' +\n datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S') +\n '\\n'\n )\n logfile.close()\n\n # Learning Rate Decay\n if epoch in self.__C.LR_DECAY_LIST:\n adjust_lr(optim, self.__C.LR_DECAY_R)\n adjust_lr(optim_q, self.__C.LR_DECAY_R)\n\n # Externally shuffle\n if self.__C.SHUFFLE_MODE == 'external':\n shuffle_list(dataset.ans_list)\n\n time_start = time.time()\n # Iteration\n for step, (\n img_feat_iter,\n ques_ix_iter,\n ans_iter\n ) in enumerate(dataloader):\n\n optim.zero_grad()\n optim_q.zero_grad()\n\n img_feat_iter = img_feat_iter.cuda()\n ques_ix_iter = ques_ix_iter.cuda()\n ans_iter = ans_iter.cuda()\n\n for accu_step in range(self.__C.GRAD_ACCU_STEPS):\n\n sub_img_feat_iter = \\\n img_feat_iter[accu_step * self.__C.SUB_BATCH_SIZE:\n (accu_step + 1) * self.__C.SUB_BATCH_SIZE]\n sub_ques_ix_iter = \\\n ques_ix_iter[accu_step * self.__C.SUB_BATCH_SIZE:\n (accu_step + 1) * self.__C.SUB_BATCH_SIZE]\n sub_ans_iter = \\\n ans_iter[accu_step * self.__C.SUB_BATCH_SIZE:\n (accu_step + 1) * self.__C.SUB_BATCH_SIZE]\n\n \n out, q_emb, lang_feat_mask = net(sub_img_feat_iter, sub_ques_ix_iter)\n pred_qo, q_out = qnet(q_emb, lang_feat_mask)\n #print(pred_qo.shape, sub_ans_iter.shape)\n #print(torch.argmax(sub_ans_iter.long(), dim=1))\n ans_idx = torch.argmax(sub_ans_iter.long(), dim=1)\n pred_idx = torch.argmax(pred_qo.long(), dim=1) # predicted answer index from QO\n qo_scale = pred_qo.detach().clone()\n for i in range(self.__C.SUB_BATCH_SIZE):\n if (ans_idx[i] == pred_idx[i]):\n qo_scale[i, :] = torch.ones(3129)\n \n L_qo = loss_qo(q_out, sub_ans_iter)\n L_qm = loss_qm(torch.sigmoid(out*torch.sigmoid(qo_scale)), sub_ans_iter)\n \n #L_qo = loss_qo(q_out, sub_ans_iter)\n #L_qm = loss_qm(torch.sigmoid(out*torch.sigmoid(pred_qo)), sub_ans_iter)\n\n loss = L_qo + L_qm\n \n # only mean-reduction needs be divided by grad_accu_steps\n # removing this line wouldn't change our results because the speciality of Adam optimizer,\n # but would be necessary if you use SGD optimizer.\n # loss /= self.__C.GRAD_ACCU_STEPS\n loss.backward()\n loss_sum += loss.cpu().data.numpy() * self.__C.GRAD_ACCU_STEPS\n L_qo_sum += L_qo.cpu().data.numpy() * self.__C.GRAD_ACCU_STEPS\n L_qm_sum += L_qm.cpu().data.numpy() * self.__C.GRAD_ACCU_STEPS\n \n wandb.log({\"Training loss\": loss.cpu().data.numpy() / self.__C.SUB_BATCH_SIZE,\n \"Question only loss\": L_qo.cpu().data.numpy() / self.__C.SUB_BATCH_SIZE,\n \"Fusion loss\": L_qm.cpu().data.numpy() / self.__C.SUB_BATCH_SIZE}) # Tracking training loss\n\n if self.__C.VERBOSE: # print loss every step -> TRUE\n if dataset_eval is not None:\n mode_str = self.__C.SPLIT['train'] + '->' + self.__C.SPLIT['val']\n else:\n mode_str = self.__C.SPLIT['train'] + '->' + self.__C.SPLIT['test']\n\n print(\"\\r[version %s][epoch %2d][step %4d/%4d][%s] loss: %.4f, lr: %.2e\" % (\n self.__C.VERSION,\n epoch + 1,\n step,\n int(data_size / self.__C.BATCH_SIZE),\n mode_str,\n loss.cpu().data.numpy() / self.__C.SUB_BATCH_SIZE,\n optim._rate\n ), end=' ')\n\n # Gradient norm clipping\n if self.__C.GRAD_NORM_CLIP > 0:\n nn.utils.clip_grad_norm_(\n net.parameters(),\n self.__C.GRAD_NORM_CLIP\n )\n\n # Save the gradient information\n for name in range(len(named_params)):\n norm_v = torch.norm(named_params[name][1].grad).cpu().data.numpy() \\\n if named_params[name][1].grad is not None else 0\n grad_norm[name] += norm_v * self.__C.GRAD_ACCU_STEPS\n # print('Param %-3s Name %-80s Grad_Norm %-20s'%\n # (str(grad_wt),\n # params[grad_wt][0],\n # str(norm_v)))\n\n optim.step()\n optim_q.step()\n\n time_end = time.time()\n print('Finished in {}s'.format(int(time_end-time_start)))\n\n # print('')\n epoch_finish = epoch + 1\n\n # Save checkpoint\n state = {\n 'state_dict': net.state_dict(),\n 'optimizer': optim.optimizer.state_dict(),\n 'lr_base': optim.lr_base\n }\n torch.save(\n state,\n self.__C.CKPTS_PATH +\n 'ckpt_' + self.__C.VERSION +\n '/epoch' + str(epoch_finish) +\n '.pkl'\n )\n\n # Logging\n logfile = open(\n self.__C.LOG_PATH +\n 'log_run_' + self.__C.VERSION + '.txt',\n 'a+'\n )\n logfile.write(\n 'epoch = ' + str(epoch_finish) +\n ' Q loss = ' + str(L_qo_sum / data_size) +\n ' fusion loss = ' + str(L_qm_sum / data_size) +\n ' loss = ' + str(loss_sum / data_size) +\n '\\n' +\n 'lr = ' + str(optim._rate) +\n '\\n\\n'\n )\n logfile.close()\n\n # Eval after every epoch\n if dataset_eval is not None:\n self.eval(\n dataset_eval,\n state_dict=net.state_dict(),\n valid=True\n )\n\n # if self.__C.VERBOSE:\n # logfile = open(\n # self.__C.LOG_PATH +\n # 'log_run_' + self.__C.VERSION + '.txt',\n # 'a+'\n # )\n # for name in range(len(named_params)):\n # logfile.write(\n # 'Param %-3s Name %-80s Grad_Norm %-25s\\n' % (\n # str(name),\n # named_params[name][0],\n # str(grad_norm[name] / data_size * self.__C.BATCH_SIZE)\n # )\n # )\n # logfile.write('\\n')\n # logfile.close()\n\n loss_sum = 0\n L_qo_sum = 0\n L_qm_sum = 0\n grad_norm = np.zeros(len(named_params))\n\n\n # Evaluation\n def eval(self, dataset, state_dict=None, valid=False):\n\n # Load parameters\n if self.__C.CKPT_PATH is not None:\n print('Warning: you are now using CKPT_PATH args, '\n 'CKPT_VERSION and CKPT_EPOCH will not work')\n\n path = self.__C.CKPT_PATH\n else:\n path = self.__C.CKPTS_PATH + \\\n 'ckpt_' + self.__C.CKPT_VERSION + \\\n '/epoch' + str(self.__C.CKPT_EPOCH) + '.pkl'\n\n val_ckpt_flag = False\n if state_dict is None:\n val_ckpt_flag = True\n print('Loading ckpt {}'.format(path))\n state_dict = torch.load(path)['state_dict']\n print('Finish!')\n\n # Store the prediction list\n qid_list = [ques['question_id'] for ques in dataset.ques_list]\n ans_ix_list = []\n pred_list = []\n\n data_size = dataset.data_size\n token_size = dataset.token_size\n ans_size = dataset.ans_size\n pretrained_emb = dataset.pretrained_emb\n\n net = Net(\n self.__C,\n pretrained_emb,\n token_size,\n ans_size\n )\n net.cuda()\n net.eval()\n\n if self.__C.N_GPU > 1:\n net = nn.DataParallel(net, device_ids=self.__C.DEVICES)\n\n net.load_state_dict(state_dict)\n\n dataloader = Data.DataLoader(\n dataset,\n batch_size=self.__C.EVAL_BATCH_SIZE,\n shuffle=False,\n num_workers=self.__C.NUM_WORKERS,\n pin_memory=True\n )\n\n for step, (\n img_feat_iter,\n ques_ix_iter,\n ans_iter\n ) in enumerate(dataloader):\n print(\"\\rEvaluation: [step %4d/%4d]\" % (\n step,\n int(data_size / self.__C.EVAL_BATCH_SIZE),\n ), end=' ')\n\n img_feat_iter = img_feat_iter.cuda()\n ques_ix_iter = ques_ix_iter.cuda()\n\n pred = net(\n img_feat_iter,\n ques_ix_iter\n )\n #print(pred)\n pred_np = pred[0].cpu().data.numpy()\n pred_argmax = np.argmax(pred_np, axis=1)\n\n # Save the answer index\n if pred_argmax.shape[0] != self.__C.EVAL_BATCH_SIZE:\n pred_argmax = np.pad(\n pred_argmax,\n (0, self.__C.EVAL_BATCH_SIZE - pred_argmax.shape[0]),\n mode='constant',\n constant_values=-1\n )\n\n ans_ix_list.append(pred_argmax)\n\n # Save the whole prediction vector\n if self.__C.TEST_SAVE_PRED:\n if pred_np.shape[0] != self.__C.EVAL_BATCH_SIZE:\n pred_np = np.pad(\n pred_np,\n ((0, self.__C.EVAL_BATCH_SIZE - pred_np.shape[0]), (0, 0)),\n mode='constant',\n constant_values=-1\n )\n\n pred_list.append(pred_np)\n\n print('')\n ans_ix_list = np.array(ans_ix_list).reshape(-1)\n\n result = [{\n 'answer': dataset.ix_to_ans[str(ans_ix_list[qix])], # ix_to_ans(load with json) keys are type of string\n 'question_id': int(qid_list[qix])\n }for qix in range(qid_list.__len__())]\n\n # Write the results to result file\n if valid:\n if val_ckpt_flag:\n result_eval_file = \\\n self.__C.CACHE_PATH + \\\n 'result_run_' + self.__C.CKPT_VERSION + \\\n '.json'\n else:\n result_eval_file = \\\n self.__C.CACHE_PATH + \\\n 'result_run_' + self.__C.VERSION + \\\n '.json'\n\n else:\n if self.__C.CKPT_PATH is not None:\n result_eval_file = \\\n self.__C.RESULT_PATH + \\\n 'result_run_' + self.__C.CKPT_VERSION + \\\n '.json'\n else:\n result_eval_file = \\\n self.__C.RESULT_PATH + \\\n 'result_run_' + self.__C.CKPT_VERSION + \\\n '_epoch' + str(self.__C.CKPT_EPOCH) + \\\n '.json'\n\n print('Save the result to file: {}'.format(result_eval_file))\n\n json.dump(result, open(result_eval_file, 'w'))\n\n # Save the whole prediction vector\n if self.__C.TEST_SAVE_PRED:\n\n if self.__C.CKPT_PATH is not None:\n ensemble_file = \\\n self.__C.PRED_PATH + \\\n 'result_run_' + self.__C.CKPT_VERSION + \\\n '.json'\n else:\n ensemble_file = \\\n self.__C.PRED_PATH + \\\n 'result_run_' + self.__C.CKPT_VERSION + \\\n '_epoch' + str(self.__C.CKPT_EPOCH) + \\\n '.json'\n\n print('Save the prediction vector to file: {}'.format(ensemble_file))\n\n pred_list = np.array(pred_list).reshape(-1, ans_size)\n result_pred = [{\n 'pred': pred_list[qix],\n 'question_id': int(qid_list[qix])\n }for qix in range(qid_list.__len__())]\n\n pickle.dump(result_pred, open(ensemble_file, 'wb+'), protocol=-1)\n\n\n # Run validation script\n if valid:\n # create vqa object and vqaRes object\n ques_file_path = self.__C.QUESTION_PATH['test']\n ans_file_path = self.__C.ANSWER_PATH['test']\n\n vqa = VQA(ans_file_path, ques_file_path)\n vqaRes = vqa.loadRes(result_eval_file, ques_file_path)\n\n # create vqaEval object by taking vqa and vqaRes\n vqaEval = VQAEval(vqa, vqaRes, n=2) # n is precision of accuracy (number of places after decimal), default is 2\n\n # evaluate results\n \"\"\"\n If you have a list of question ids on which you would like to evaluate your results, pass it as a list to below function\n By default it uses all the question ids in annotation file\n \"\"\"\n vqaEval.evaluate()\n\n # print accuracies\n print(\"\\n\")\n print(\"Overall Accuracy is: %.02f\\n\" % (vqaEval.accuracy['overall']))\n # print(\"Per Question Type Accuracy is the following:\")\n # for quesType in vqaEval.accuracy['perQuestionType']:\n # print(\"%s : %.02f\" % (quesType, vqaEval.accuracy['perQuestionType'][quesType]))\n # print(\"\\n\")\n print(\"Per Answer Type Accuracy is the following:\")\n for ansType in vqaEval.accuracy['perAnswerType']:\n print(\"%s : %.02f\" % (ansType, vqaEval.accuracy['perAnswerType'][ansType]))\n print(\"\\n\")\n\n if val_ckpt_flag:\n print('Write to log file: {}'.format(\n self.__C.LOG_PATH +\n 'log_run_' + self.__C.CKPT_VERSION + '.txt',\n 'a+')\n )\n\n logfile = open(\n self.__C.LOG_PATH +\n 'log_run_' + self.__C.CKPT_VERSION + '.txt',\n 'a+'\n )\n\n else:\n print('Write to log file: {}'.format(\n self.__C.LOG_PATH +\n 'log_run_' + self.__C.VERSION + '.txt',\n 'a+')\n )\n\n logfile = open(\n self.__C.LOG_PATH +\n 'log_run_' + self.__C.VERSION + '.txt',\n 'a+'\n )\n\n logfile.write(\"Overall Accuracy is: %.02f\\n\" % (vqaEval.accuracy['overall']))\n for ansType in vqaEval.accuracy['perAnswerType']:\n logfile.write(\"%s : %.02f \" % (ansType, vqaEval.accuracy['perAnswerType'][ansType]))\n logfile.write(\"\\n\\n\")\n logfile.close()\n\n\n def run(self, run_mode):\n if run_mode == 'train':\n self.empty_log(self.__C.VERSION)\n self.train(self.dataset, self.dataset_eval)\n\n elif run_mode == 'val':\n self.eval(self.dataset, valid=True)\n\n elif run_mode == 'test':\n self.eval(self.dataset, valid=True)\n\n else:\n exit(-1)\n\n\n def empty_log(self, version):\n print('Initializing log file ........')\n if (os.path.exists(self.__C.LOG_PATH + 'log_run_' + version + '.txt')):\n os.remove(self.__C.LOG_PATH + 'log_run_' + version + '.txt')\n print('Finished!')\n print('')\n\n" ]
[ [ "torch.sigmoid", "torch.norm", "torch.ones", "numpy.pad", "torch.load", "torch.utils.data.DataLoader", "torch.nn.BCELoss", "numpy.argmax", "torch.nn.DataParallel", "numpy.array" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
Quik-e/Agilent-33220a-Remote-Control
[ "279b546b2ebd430fe137966130ad0f8c6410863c" ]
[ "agilent33220a_test.py" ]
[ "import agilent33220a as agi\r\nimport numpy as np\r\nimport matplotlib.pyplot as plt\r\nimport arbitraryfunctions as af\r\n\r\ninstrument=agi.connect_device()\r\n#t=np.linspace(-32768,32767)\r\n#function=np.round(t/6553.6)\r\nfunction=af.mati()\r\nplt.plot(function)\r\nplt.show()\r\nprint(len(function))\r\ninstrument.load_signal(function,\"MATI\") #Solo acepta letras, numeros y '_' el nombre de la funcion\r\ninstrument.output_af(\"MATI\",6,5000,99)\r\ninstrument.output_on()\r\ninstrument.catalog()\r\ninstrument.af_attributes(\"MATI\")\r\n#instrument.erase_af(\"QUIQUE_FUNC\")\r\n" ]
[ [ "matplotlib.pyplot.plot", "matplotlib.pyplot.show" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
dwarf-miner/midas
[ "68ff19da4a1f1a095b9c37e2fd53b77a2e27e562" ]
[ "vision/common.py" ]
[ "#!/usr/bin/env python\r\n\r\n'''\r\nThis module contains some common routines used by other samples.\r\n'''\r\n\r\n# Python 2/3 compatibility\r\nfrom __future__ import print_function\r\nimport sys\r\nPY3 = sys.version_info[0] == 3\r\n\r\nif PY3:\r\n from functools import reduce\r\n\r\nimport numpy as np\r\nimport cv2\r\n\r\n# built-in modules\r\nimport os\r\nimport itertools as it\r\nfrom contextlib import contextmanager\r\n\r\nimage_extensions = ['.bmp', '.jpg', '.jpeg', '.png', '.tif', '.tiff', '.pbm', '.pgm', '.ppm']\r\n\r\nclass Bunch(object):\r\n def __init__(self, **kw):\r\n self.__dict__.update(kw)\r\n def __str__(self):\r\n return str(self.__dict__)\r\n\r\ndef splitfn(fn):\r\n path, fn = os.path.split(fn)\r\n name, ext = os.path.splitext(fn)\r\n return path, name, ext\r\n\r\ndef anorm2(a):\r\n return (a*a).sum(-1)\r\ndef anorm(a):\r\n return np.sqrt( anorm2(a) )\r\n\r\ndef homotrans(H, x, y):\r\n xs = H[0, 0]*x + H[0, 1]*y + H[0, 2]\r\n ys = H[1, 0]*x + H[1, 1]*y + H[1, 2]\r\n s = H[2, 0]*x + H[2, 1]*y + H[2, 2]\r\n return xs/s, ys/s\r\n\r\ndef to_rect(a):\r\n a = np.ravel(a)\r\n if len(a) == 2:\r\n a = (0, 0, a[0], a[1])\r\n return np.array(a, np.float64).reshape(2, 2)\r\n\r\ndef rect2rect_mtx(src, dst):\r\n src, dst = to_rect(src), to_rect(dst)\r\n cx, cy = (dst[1] - dst[0]) / (src[1] - src[0])\r\n tx, ty = dst[0] - src[0] * (cx, cy)\r\n M = np.float64([[ cx, 0, tx],\r\n [ 0, cy, ty],\r\n [ 0, 0, 1]])\r\n return M\r\n\r\n\r\ndef lookat(eye, target, up = (0, 0, 1)):\r\n fwd = np.asarray(target, np.float64) - eye\r\n fwd /= anorm(fwd)\r\n right = np.cross(fwd, up)\r\n right /= anorm(right)\r\n down = np.cross(fwd, right)\r\n R = np.float64([right, down, fwd])\r\n tvec = -np.dot(R, eye)\r\n return R, tvec\r\n\r\ndef mtx2rvec(R):\r\n w, u, vt = cv2.SVDecomp(R - np.eye(3))\r\n p = vt[0] + u[:,0]*w[0] # same as np.dot(R, vt[0])\r\n c = np.dot(vt[0], p)\r\n s = np.dot(vt[1], p)\r\n axis = np.cross(vt[0], vt[1])\r\n return axis * np.arctan2(s, c)\r\n\r\ndef draw_str(dst, target, s):\r\n x, y = target\r\n cv2.putText(dst, s, (x+1, y+1), cv2.FONT_HERSHEY_PLAIN, 1.0, (0, 0, 0), thickness = 2, lineType=cv2.LINE_AA)\r\n cv2.putText(dst, s, (x, y), cv2.FONT_HERSHEY_PLAIN, 1.0, (255, 255, 255), lineType=cv2.LINE_AA)\r\n\r\nclass Sketcher:\r\n def __init__(self, windowname, dests, colors_func):\r\n self.prev_pt = None\r\n self.windowname = windowname\r\n self.dests = dests\r\n self.colors_func = colors_func\r\n self.dirty = False\r\n self.show()\r\n cv2.setMouseCallback(self.windowname, self.on_mouse)\r\n\r\n def show(self):\r\n cv2.imshow(self.windowname, self.dests[0])\r\n\r\n def on_mouse(self, event, x, y, flags, param):\r\n pt = (x, y)\r\n if event == cv2.EVENT_LBUTTONDOWN:\r\n self.prev_pt = pt\r\n elif event == cv2.EVENT_LBUTTONUP:\r\n self.prev_pt = None\r\n\r\n if self.prev_pt and flags & cv2.EVENT_FLAG_LBUTTON:\r\n for dst, color in zip(self.dests, self.colors_func()):\r\n cv2.line(dst, self.prev_pt, pt, color, 5)\r\n self.dirty = True\r\n self.prev_pt = pt\r\n self.show()\r\n\r\n\r\n# palette data from matplotlib/_cm.py\r\n_jet_data = {'red': ((0., 0, 0), (0.35, 0, 0), (0.66, 1, 1), (0.89,1, 1),\r\n (1, 0.5, 0.5)),\r\n 'green': ((0., 0, 0), (0.125,0, 0), (0.375,1, 1), (0.64,1, 1),\r\n (0.91,0,0), (1, 0, 0)),\r\n 'blue': ((0., 0.5, 0.5), (0.11, 1, 1), (0.34, 1, 1), (0.65,0, 0),\r\n (1, 0, 0))}\r\n\r\ncmap_data = { 'jet' : _jet_data }\r\n\r\ndef make_cmap(name, n=256):\r\n data = cmap_data[name]\r\n xs = np.linspace(0.0, 1.0, n)\r\n channels = []\r\n eps = 1e-6\r\n for ch_name in ['blue', 'green', 'red']:\r\n ch_data = data[ch_name]\r\n xp, yp = [], []\r\n for x, y1, y2 in ch_data:\r\n xp += [x, x+eps]\r\n yp += [y1, y2]\r\n ch = np.interp(xs, xp, yp)\r\n channels.append(ch)\r\n return np.uint8(np.array(channels).T*255)\r\n\r\ndef nothing(*arg, **kw):\r\n pass\r\n\r\ndef clock():\r\n return cv2.getTickCount() / cv2.getTickFrequency()\r\n\r\n@contextmanager\r\ndef Timer(msg):\r\n print(msg, '...',)\r\n start = clock()\r\n try:\r\n yield\r\n finally:\r\n print(\"%.2f ms\" % ((clock()-start)*1000))\r\n\r\nclass StatValue:\r\n def __init__(self, smooth_coef = 0.5):\r\n self.value = None\r\n self.smooth_coef = smooth_coef\r\n def update(self, v):\r\n if self.value is None:\r\n self.value = v\r\n else:\r\n c = self.smooth_coef\r\n self.value = c * self.value + (1.0-c) * v\r\n\r\nclass RectSelector:\r\n def __init__(self, win, callback):\r\n self.win = win\r\n self.callback = callback\r\n cv2.setMouseCallback(win, self.onmouse)\r\n self.drag_start = None\r\n self.drag_rect = None\r\n def onmouse(self, event, x, y, flags, param):\r\n x, y = np.int16([x, y]) # BUG\r\n if event == cv2.EVENT_LBUTTONDOWN:\r\n self.drag_start = (x, y)\r\n if self.drag_start:\r\n if flags & cv2.EVENT_FLAG_LBUTTON:\r\n xo, yo = self.drag_start\r\n x0, y0 = np.minimum([xo, yo], [x, y])\r\n x1, y1 = np.maximum([xo, yo], [x, y])\r\n self.drag_rect = None\r\n if x1-x0 > 0 and y1-y0 > 0:\r\n self.drag_rect = (x0, y0, x1, y1)\r\n else:\r\n rect = self.drag_rect\r\n self.drag_start = None\r\n self.drag_rect = None\r\n if rect:\r\n self.callback(rect)\r\n def draw(self, vis):\r\n if not self.drag_rect:\r\n return False\r\n x0, y0, x1, y1 = self.drag_rect\r\n cv2.rectangle(vis, (x0, y0), (x1, y1), (0, 255, 0), 2)\r\n return True\r\n @property\r\n def dragging(self):\r\n return self.drag_rect is not None\r\n\r\n\r\ndef grouper(n, iterable, fillvalue=None):\r\n '''grouper(3, 'ABCDEFG', 'x') --> ABC DEF Gxx'''\r\n args = [iter(iterable)] * n\r\n if PY3:\r\n output = it.zip_longest(fillvalue=fillvalue, *args)\r\n else:\r\n output = it.izip_longest(fillvalue=fillvalue, *args)\r\n return output\r\n\r\ndef mosaic(w, imgs):\r\n '''Make a grid from images.\r\n\r\n w -- number of grid columns\r\n imgs -- images (must have same size and format)\r\n '''\r\n imgs = iter(imgs)\r\n if PY3:\r\n img0 = next(imgs)\r\n else:\r\n img0 = imgs.next()\r\n pad = np.zeros_like(img0)\r\n imgs = it.chain([img0], imgs)\r\n rows = grouper(w, imgs, pad)\r\n return np.vstack(map(np.hstack, rows))\r\n\r\ndef getsize(img):\r\n h, w = img.shape[:2]\r\n return w, h\r\n\r\ndef mdot(*args):\r\n return reduce(np.dot, args)\r\n\r\ndef draw_keypoints(vis, keypoints, color = (0, 255, 255)):\r\n for kp in keypoints:\r\n x, y = kp.pt\r\n cv2.circle(vis, (int(x), int(y)), 2, color)\r\n" ]
[ [ "numpy.dot", "numpy.minimum", "numpy.maximum", "numpy.linspace", "numpy.asarray", "numpy.eye", "numpy.int16", "numpy.arctan2", "numpy.zeros_like", "numpy.float64", "numpy.interp", "numpy.cross", "numpy.ravel", "numpy.array" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
adamomainz/pytorch
[ "09b90612c464c499a6ce6b92e57ba64546ba6adb", "09b90612c464c499a6ce6b92e57ba64546ba6adb" ]
[ "test/fx2trt/converters/acc_op/test_split.py", "test/test_fx_experimental.py" ]
[ "import torch\nimport torch.fx.experimental.fx_acc.acc_ops as acc_ops\nimport torch.nn as nn\nfrom torch.testing._internal.common_fx2trt import AccTestCase\nfrom parameterized import parameterized\n\n\nclass TestSplitConverter(AccTestCase):\n @parameterized.expand(\n [\n (\"split_size\", 3, 1),\n (\"sections\", [5, 2, 3], 1),\n ]\n )\n def test_split(self, _, split_size_or_sections, dim):\n class Split(nn.Module):\n def forward(self, x):\n return x.split(split_size_or_sections, dim)[0]\n\n inputs = [torch.randn(1, 10)]\n self.run_test(\n Split(),\n inputs,\n expected_ops={\n acc_ops.split\n if isinstance(split_size_or_sections, int)\n else acc_ops.slice_tensor\n },\n test_explicit_batch_dim=False,\n )\n", "import math\nimport numbers\nimport operator\nimport sys\nimport unittest\nfrom typing import Callable, Dict, Union, List, Optional\n\nimport torch\nimport torch.fx.experimental.optimization as optimization\nfrom torch.fx._symbolic_trace import symbolic_trace\nfrom torch.fx.experimental import merge_matmul\nfrom torch.fx.experimental.accelerator_partitioner import Partitioner\nfrom torch.fx.experimental.normalize import NormalizeOperators, NormalizeArgs\nfrom torch.fx.passes import graph_manipulation\nfrom torch.fx.passes.param_fetch import lift_lowering_attrs_to_nodes\nfrom torch.fx.experimental.partitioner_utils import (\n NodeLatency,\n get_partition_to_latency_mapping,\n get_latency_of_partitioned_graph,\n Device,\n PartitionerConfig,\n PartitionMode,\n)\nfrom torch.fx.experimental.rewriter import RewritingTracer\nfrom torch.fx.experimental.schema_type_annotation import AnnotateTypesWithSchema\nfrom torch.fx.graph_module import GraphModule\nfrom torch.fx.node import Node\nfrom torch.fx.operator_schemas import (\n _torchscript_type_to_python_type,\n normalize_function,\n normalize_module,\n type_matches,\n create_type_hint,\n)\nfrom torch.fx.passes.shape_prop import _extract_tensor_metadata, ShapeProp\nfrom torch.fx.passes.split_module import split_module\nfrom torch.testing._internal.common_device_type import (\n ops,\n onlyCPU,\n instantiate_device_type_tests,\n)\nfrom torch.testing._internal.common_methods_invocations import op_db\nfrom torch.testing._internal.common_nn import module_tests, new_module_tests\nfrom torch.testing._internal.common_utils import run_tests\nfrom torch.testing._internal.jit_utils import JitTestCase\n\ntry:\n import torchvision.models\n from torchvision.models import resnet18\n\n HAS_TORCHVISION = True\nexcept ImportError:\n HAS_TORCHVISION = False\nskipIfNoTorchVision = unittest.skipIf(not HAS_TORCHVISION, \"no torchvision\")\nskipIfNoMkldnn = unittest.skipIf(\n not (torch.backends.mkldnn.enabled and torch.backends.mkldnn.is_available()),\n \"no MKLDNN\",\n)\n\n\ndef symbolic_trace_with_rewrite(root: Union[torch.nn.Module, Callable]) -> GraphModule:\n return GraphModule(\n root if isinstance(root, torch.nn.Module) else torch.nn.Module(),\n RewritingTracer().trace(root),\n )\n\n\nclass TestFXExperimental(JitTestCase):\n def test_serialize_graph(self):\n class TestModule(torch.nn.Module):\n def __init__(self):\n super().__init__()\n self.linear = torch.nn.Linear(4, 4)\n self.e = torch.rand(4)\n self.conv = torch.nn.Conv2d(3, 3, 2, bias=False)\n\n def forward(self, a, b, c):\n add_1 = a + b\n conv1 = self.conv(c)\n linear = self.linear(add_1 + conv1)\n add_2 = linear + self.e\n return add_2\n\n m = TestModule()\n traced = symbolic_trace(m)\n a = torch.rand(4)\n b = torch.rand(4)\n c = torch.rand(3, 3, 2, 2)\n graph_manipulation.get_size_of_all_nodes(traced, [a, b, c])\n\n partitioner = Partitioner()\n devices = [Device(\"dev_0\", 5000, 0), Device(\"dev_1\", 125, 1)]\n partitioner_config = PartitionerConfig(devices, PartitionMode.sparse_nn)\n ret = partitioner.partition_graph(traced, m, partitioner_config)\n module_with_submodules = ret.module_with_submodules\n # Fix for now to add type/shape to output\n for node in traced.graph.nodes:\n if node.op == \"output\":\n node.meta[\"tensor_meta\"] = _extract_tensor_metadata(a)\n for mod in module_with_submodules.modules():\n if isinstance(mod, GraphModule):\n for node in mod.graph.nodes:\n node.meta[\"tensor_meta\"] = _extract_tensor_metadata(a)\n for node in module_with_submodules.graph.nodes:\n node.meta[\"tensor_meta\"] = _extract_tensor_metadata(a)\n\n weights1 = {}\n weights2 = {}\n serialized_graph1 = graph_manipulation.serialize_module(traced, weights1)\n serialized_graph2 = graph_manipulation.serialize_module(\n module_with_submodules, weights2\n )\n assert len(weights1) == 4\n assert len(weights2) == 4\n assert len(serialized_graph1[\"nodes\"]) == 10\n assert len(serialized_graph1[\"weights\"]) == 4\n assert len(serialized_graph1[\"modules\"]) == 0\n assert len(serialized_graph2[\"nodes\"]) == 6\n assert len(serialized_graph2[\"weights\"]) == 4\n assert len(serialized_graph2[\"modules\"]) == 1\n assert serialized_graph1[\"weights\"][\"linear.weight\"][\"shape\"] == \"[4, 4]\"\n assert serialized_graph1[\"weights\"][\"linear.weight\"][\"dtype\"] == \"torch.float32\"\n assert serialized_graph1[\"weights\"][\"linear.weight\"][\"is_quantized\"] is False\n assert serialized_graph1[\"nodes\"][0][\"shape\"] == \"[4]\"\n assert serialized_graph1[\"nodes\"][0][\"dtype\"] == \"torch.float32\"\n assert serialized_graph1[\"nodes\"][0][\"target\"] == \"a\"\n assert serialized_graph1[\"nodes\"][0][\"op_code\"] == \"placeholder\"\n assert serialized_graph1[\"nodes\"][0][\"name\"] == \"a\"\n assert serialized_graph1[\"nodes\"][6][\"args\"][0][\"name\"] == \"add_1\"\n assert serialized_graph1[\"nodes\"][6][\"args\"][0][\"is_node\"] is True\n\n # Test the users of the nodes. No users of the last/output node.\n assert serialized_graph2[\"nodes\"][0][\"users\"][0][\"name\"] == \"submod_0\"\n assert serialized_graph2[\"nodes\"][1][\"users\"][0][\"name\"] == \"submod_0\"\n assert serialized_graph2[\"nodes\"][4][\"users\"][0][\"name\"] == \"output\"\n assert serialized_graph2[\"nodes\"][5][\"users\"] == []\n\n # Test quantization info serialization.\n x = torch.tensor([[-1.0, 0.0], [1.0, 2.0]])\n q_tensor = torch.quantize_per_tensor(x, 1, 0, torch.qint32)\n q_tensor_channel = torch.quantize_per_channel(\n x, torch.tensor([0.1, 0.01]), torch.tensor([10, 0]), 0, torch.quint8\n )\n result, _ = graph_manipulation.serialize_tensor_quantization(\n q_tensor, weights={}, pcq_prefix=\"foo\"\n )\n result2, per_channel_dict = graph_manipulation.serialize_tensor_quantization(\n q_tensor_channel, weights={}, pcq_prefix=\"bar\"\n )\n assert result[\"qscheme\"] == \"torch.per_tensor_affine\"\n assert result[\"q_scale\"] == 1.0\n assert result2[\"qscheme\"] == \"torch.per_channel_affine\"\n assert result2[\"q_per_channel_scales\"] == \"bar_per_channel_scales\"\n assert per_channel_dict[\"bar_per_channel_zero_points\"][\"shape\"] == \"[2]\"\n\n def test_find_single_partition(self):\n class TestModule(torch.nn.Module):\n def forward(self, a, b):\n return a + b\n\n m = TestModule()\n traced = symbolic_trace(m)\n a = torch.rand(1)\n b = torch.rand(1)\n graph_manipulation.get_size_of_all_nodes(traced, [a, b])\n partitioner = Partitioner()\n devices = [\n Device(\"dev_0\", 125, 0),\n Device(\"dev_1\", 150, 1),\n Device(\"dev_2\", 125, 2),\n ]\n partitioner_config = PartitionerConfig(devices)\n ret = partitioner.partition_graph(traced, m, partitioner_config)\n module_with_submodules = ret.module_with_submodules\n dag = ret.dag\n self.assertEqual(traced(a, b), module_with_submodules(a, b))\n assert dag.nodes[0].logical_device_ids == [1]\n\n def test_lack_of_devices(self):\n class TestModule(torch.nn.Module):\n def forward(self, a, b):\n return a + b\n\n m = TestModule()\n traced = symbolic_trace(m)\n a = torch.rand(4)\n b = torch.rand(4)\n graph_manipulation.get_size_of_all_nodes(traced, [a, b])\n partitioner = Partitioner()\n devices = [Device(\"dev_0\", 4, 0), Device(\"dev_1\", 4, 1)]\n partitioner_config = PartitionerConfig(devices, PartitionMode.size_based)\n catch_runtime_error = False\n try:\n ret = partitioner.partition_graph(traced, m, partitioner_config)\n except RuntimeError:\n catch_runtime_error = True\n assert catch_runtime_error\n\n def test_large_node_error(self):\n class TestModule(torch.nn.Module):\n def __init__(self):\n super().__init__()\n self.linear = torch.nn.Linear(4, 4)\n\n def forward(self, a):\n linear = self.linear(a)\n add = linear + a\n return add\n\n m = TestModule()\n traced = symbolic_trace(m)\n a = torch.rand(4)\n graph_manipulation.get_size_of_all_nodes(traced, [a])\n partitioner = Partitioner()\n devices = [\n Device(\"dev_0\", 40, 0),\n Device(\"dev_1\", 40, 0),\n Device(\"dev_2\", 40, 0),\n Device(\"dev_3\", 40, 0),\n Device(\"dev_4\", 40, 0),\n ]\n partitioner_config = PartitionerConfig(devices, PartitionMode.size_based)\n catch_runtime_error = False\n try:\n ret = partitioner.partition_graph(traced, m, partitioner_config)\n except RuntimeError:\n catch_runtime_error = True\n assert catch_runtime_error\n\n def test_partition_node_manipulation(self):\n class TestModule(torch.nn.Module):\n def forward(self, a, b):\n add_1 = a + b\n add_2 = add_1 + torch.rand(4)\n add_3 = add_2 + torch.rand(4)\n return add_3\n\n m = TestModule()\n traced = symbolic_trace(m)\n a, b = torch.rand(4), torch.rand(4)\n graph_manipulation.get_size_of_all_nodes(traced, [a, b])\n partitioner = Partitioner()\n devices = [Device(\"dev_0\", 1000, 0)]\n partitioner_config = PartitionerConfig(devices)\n ret = partitioner.partition_graph(traced, m, partitioner_config)\n partition = partitioner.partitions[0]\n assert partition.used_mem_bytes == 112\n # Select add_2 node to remove\n selected_node = None\n for node in partition.nodes:\n if node.name == \"add_2\":\n selected_node = node\n partition.remove_node(selected_node)\n assert partition.used_mem_bytes == 80\n\n def test_size_based_partition(self):\n class TestModule(torch.nn.Module):\n def __init__(self):\n super().__init__()\n self.linear = torch.nn.Linear(4, 4)\n self.c = torch.rand(4)\n\n def forward(self, a, b):\n add_1 = a + b\n linear = self.linear(add_1)\n add_2 = linear + self.c\n return add_2\n\n m = TestModule()\n traced = symbolic_trace(m)\n a = torch.rand(4)\n b = torch.rand(4)\n graph_manipulation.get_size_of_all_nodes(traced, [a, b])\n partitioner = Partitioner()\n devices = [\n Device(\"dev_0\", 125, 0),\n Device(\"dev_1\", 125, 1),\n Device(\"dev_2\", 125, 2),\n ]\n partitioner_config = PartitionerConfig(devices, PartitionMode.size_based)\n ret = partitioner.partition_graph(traced, m, partitioner_config)\n module_with_submodules = ret.module_with_submodules\n dag = ret.dag\n self.assertEqual(traced(a, b), module_with_submodules(a, b))\n for i, node in enumerate(dag.nodes):\n assert node.logical_device_ids == [i]\n\n def test_partition_device_mapping(self):\n class TestModule(torch.nn.Module):\n def __init__(self):\n super().__init__()\n self.linear = torch.nn.Linear(4, 4)\n\n def forward(self, a):\n b = torch.rand(4)\n add_1 = a + b\n linear_1 = self.linear(add_1)\n add_2 = torch.rand(4) + a\n add_3 = add_2 + linear_1\n return add_3\n\n m = TestModule()\n traced = symbolic_trace(m)\n a = torch.rand(4)\n graph_manipulation.get_size_of_all_nodes(traced, [a])\n partitioner = Partitioner()\n devices = [Device(\"dev_0\", 120, 0), Device(\"dev_1\", 160, 1)]\n partitioner_config = PartitionerConfig(devices, PartitionMode.size_based)\n ret = partitioner.partition_graph(traced, m, partitioner_config)\n module_with_submodules = ret.module_with_submodules\n dag = ret.dag\n self.assertEqual(traced(a), module_with_submodules(a))\n for i, node in enumerate(dag.nodes):\n if i == 1:\n assert node.logical_device_ids == [1]\n else:\n assert node.logical_device_ids == [0]\n\n def test_sparse_nn_partition(self):\n class MyRecommendationModule(torch.nn.Module):\n def create_mlp(self, num_of_layers: int, input_size: int, output_size: int):\n layers = torch.nn.ModuleList()\n for _ in range(num_of_layers):\n ll = torch.nn.Linear(input_size, output_size)\n layers.append(ll)\n layers.append(torch.nn.ReLU())\n return layers\n\n def __init__(self):\n super(MyRecommendationModule, self).__init__()\n layers = self.create_mlp(4, 4, 4)\n self.bottom_layers = torch.nn.Sequential(*layers)\n layers = self.create_mlp(3, 24, 24)\n self.top_layers = torch.nn.Sequential(*layers)\n self.embedding_layers = torch.nn.ModuleList()\n el = torch.nn.EmbeddingBag(500000, 4, mode=\"sum\", sparse=True)\n self.embedding_layers.append(el)\n for i in range(3):\n el = torch.nn.EmbeddingBag(1000000, 4, mode=\"sum\", sparse=True)\n self.embedding_layers.append(el)\n el = torch.nn.EmbeddingBag(500000, 4, mode=\"sum\", sparse=True)\n self.embedding_layers.append(el)\n\n def forward(self, a, b, offset):\n x = self.bottom_layers(a)\n y = []\n c = []\n for i in range(len(self.embedding_layers)):\n temp = torch.randint(10, (8,))\n c.append(temp + b)\n for i in range(len(self.embedding_layers)):\n if i % 2 == 0:\n y.append(self.embedding_layers[i](c[i], offset))\n else:\n y.append(\n self.embedding_layers[i](torch.randint(10, (8,)), offset)\n )\n z = torch.cat([x] + y, dim=1)\n p = self.top_layers(z)\n return p\n\n m = MyRecommendationModule()\n a = torch.rand(2, 4)\n b = torch.randint(10, (8,))\n offset = torch.randint(1, (2,))\n traced = symbolic_trace(m)\n graph_manipulation.get_size_of_all_nodes(traced, [a, b, offset])\n devices = [\n Device(\"dev_0\", 33000000, 0),\n Device(\"dev_1\", 33000000, 1),\n Device(\"dev_2\", 33000000, 2),\n ]\n partitioner_config = PartitionerConfig(devices, PartitionMode.sparse_nn)\n partitioner = Partitioner()\n ret = partitioner.partition_graph(traced, m, partitioner_config)\n module_with_submodules = ret.module_with_submodules\n dag = ret.dag\n self.assertEqual(traced(a, b, offset), module_with_submodules(a, b, offset))\n assert len(module_with_submodules.graph.nodes) == 24\n\n def test_partition_latency(self):\n class TestModule(torch.nn.Module):\n def __init__(self):\n super(TestModule, self).__init__()\n self.linear = torch.nn.Linear(4, 4)\n\n def forward(self, a):\n add_1 = a + torch.rand(4)\n add_2 = add_1 + torch.rand(4)\n linear_1 = self.linear(add_1)\n add_3 = add_2 + linear_1\n add_4 = add_2 + add_3\n return add_4\n\n def get_node_to_latency_mapping(fx_module: GraphModule):\n \"\"\"Given a fx module, generate node latency for each node\n based on the size of each node\n \"\"\"\n node_to_latency_mapping: Dict[Node, NodeLatency] = {}\n for node in fx_module.graph.nodes:\n if node.op not in {\"output\", \"placeholder\", \"get_attr\"}:\n if node.size_bytes.total_size == node.size_bytes.output_size:\n node_to_latency_mapping[node] = NodeLatency(\n node.size_bytes.total_size, 2.0 * node.size_bytes.total_size\n )\n else:\n node_to_latency_mapping[node] = NodeLatency(\n node.size_bytes.total_size, node.size_bytes.output_size\n )\n return node_to_latency_mapping\n\n m = TestModule()\n traced = symbolic_trace(m)\n a = torch.rand(4)\n graph_manipulation.get_size_of_all_nodes(traced, [a])\n node_to_latency_mapping = get_node_to_latency_mapping(traced)\n devices = [Device(\"dev_0\", 200, 0), Device(\"dev_1\", 200, 1)]\n partitioner = Partitioner()\n partitioner_config = PartitionerConfig(devices)\n ret = partitioner.partition_graph(traced, m, partitioner_config)\n module_with_submodules = ret.module_with_submodules\n self.assertEqual(traced(a), module_with_submodules(a))\n partitions = partitioner.partitions\n partition_to_latency_mapping = get_partition_to_latency_mapping(\n partitions, node_to_latency_mapping\n )\n for p in partition_to_latency_mapping:\n if p.partition_id == 0:\n assert partition_to_latency_mapping[p] == (128.0, 80.0, 160.0)\n else:\n assert partition_to_latency_mapping[p] == (16.0, 32.0, 32.0)\n transfer_rate_bytes_per_sec = 2\n critical_path_latency_sec = get_latency_of_partitioned_graph(\n partitions, partition_to_latency_mapping, transfer_rate_bytes_per_sec\n )\n assert critical_path_latency_sec == 208.0\n\n def test_cost_aware_partition(self):\n class MyModule(torch.nn.Module):\n def __init__(self):\n super().__init__()\n self.linear = torch.nn.Linear(4, 4)\n\n def forward(self, a):\n add_1 = a + torch.rand(4)\n add_2 = add_1 + torch.rand(4)\n linear_1 = self.linear(add_1)\n add_3 = add_2 + torch.rand(4)\n add_4 = add_2 + linear_1\n add_5 = add_3 + add_4\n return add_5\n\n def get_node_to_latency_mapping(fx_module: GraphModule):\n node_to_latency_mapping: Dict[Node, Nodelatency] = {}\n for node in fx_module.graph.nodes:\n if node.op not in {\"output\", \"placeholder\", \"get_attr\"}:\n if node.size_bytes.total_size == node.size_bytes.output_size:\n node_to_latency_mapping[node] = NodeLatency(\n node.size_bytes.total_size, 1\n )\n else:\n node_to_latency_mapping[node] = NodeLatency(\n node.size_bytes.total_size, node.size_bytes.output_size\n )\n return node_to_latency_mapping\n\n m = MyModule()\n traced = symbolic_trace(m)\n a = torch.rand(4)\n graph_manipulation.get_size_of_all_nodes(traced, [a])\n devices = [\n Device(\"dev_0\", 125, 0),\n Device(\"dev_1\", 125, 1),\n Device(\"dev_2\", 125, 2),\n Device(\"dev_3\", 125, 3),\n ]\n node_to_latency_mapping = get_node_to_latency_mapping(traced)\n partitioner_config = PartitionerConfig(\n devices,\n mode=PartitionMode.cost_aware,\n transfer_rate_bytes_per_sec=2,\n node_to_latency_mapping=node_to_latency_mapping,\n )\n partitioner = Partitioner()\n ret = partitioner.partition_graph(traced, m, partitioner_config)\n module_with_submodules = ret.module_with_submodules\n dag = ret.dag\n self.assertEqual(traced(a), module_with_submodules(a))\n partitions = partitioner.partitions\n partition_to_latency_mapping = get_partition_to_latency_mapping(\n partitions, node_to_latency_mapping\n )\n critical_path_latency_sec = get_latency_of_partitioned_graph(\n partitions,\n partition_to_latency_mapping,\n partitioner_config.transfer_rate_bytes_per_sec,\n )\n assert critical_path_latency_sec == 160.0\n\n def test_kl_based_partition(self):\n class TestModule(torch.nn.Module):\n def __init__(self):\n super(TestModule, self).__init__()\n self.linear = torch.nn.Linear(4, 4)\n self.b = torch.rand(4)\n self.c = torch.rand(4)\n self.d = torch.rand(4)\n\n def forward(self, a):\n add_1 = a + self.b\n add_2 = add_1 + self.c\n linear_1 = self.linear(add_1)\n add_3 = add_2 + linear_1\n add_4 = add_2 + self.d\n add_5 = add_3 + add_4\n return add_4\n\n m = TestModule()\n traced = symbolic_trace(m)\n a = torch.rand(4)\n graph_manipulation.get_size_of_all_nodes(traced, [a])\n node_to_latency_mapping = get_node_to_latency_mapping(traced)\n transfer_rate_bytes_per_sec = 2\n devices = [\n Device(\"dev_0\", 200, 0),\n Device(\"dev_1\", 200, 1),\n Device(\"dev_2\", 200, 2),\n Device(\"dev_3\", 200, 3),\n ]\n partitioner = Partitioner()\n partitioner_config = PartitionerConfig(\n devices,\n mode=PartitionMode.kl_based,\n transfer_rate_bytes_per_sec=transfer_rate_bytes_per_sec,\n node_to_latency_mapping=node_to_latency_mapping,\n )\n ret = partitioner.partition_graph(traced, m, partitioner_config)\n module_with_submodules = ret.module_with_submodules\n self.assertEqual(traced(a), module_with_submodules(a))\n dag = ret.dag\n assert dag.nodes[0] == 176\n assert dag.nodes[1] == 112\n partition_to_latency_mapping = get_partition_to_latency_mapping(\n partitioner.partitions, node_to_latency_mapping\n )\n cost = get_latency_of_partitioned_graph(\n partitioner.partitions,\n partition_to_latency_mapping,\n transfer_rate_bytes_per_sec,\n )\n assert cost == 208.0\n\n def test_aot_based_partition(self):\n class TestModule(torch.nn.Module):\n def __init__(self):\n super(TestModule, self).__init__()\n self.b = torch.rand(4)\n self.c = torch.rand(4)\n\n def forward(self, a):\n add_1 = a + self.b\n add_2 = self.c + add_1\n return add_2\n\n m = TestModule()\n traced = symbolic_trace(m)\n a = torch.rand(4)\n node_to_partition_id = {}\n partition_to_logical_devices = {}\n count = 0\n graph_manipulation.get_size_of_all_nodes(traced, [a])\n for node in traced.graph.nodes:\n if node.op not in {\"placeholder\", \"get_attr\", \"output\"}:\n node_to_partition_id[node] = count\n partition_to_logical_devices[count] = [0]\n count += 1\n devices = [Device(\"dev_0\", 200, 0)]\n partitioner_config = PartitionerConfig(\n devices=devices,\n mode=PartitionMode.aot_based,\n node_to_partition_mapping=node_to_partition_id,\n partition_to_logical_device_mapping=partition_to_logical_devices,\n )\n partitioner = Partitioner()\n ret = partitioner.partition_graph(traced, m, partitioner_config)\n module_with_submodules = ret.module_with_submodules\n dag = ret.dag\n self.assertEqual(module_with_submodules(a), traced(a))\n for node in dag.nodes:\n assert node.size_bytes == 48\n assert node.logical_device_ids == [0]\n\n def test_replace_target_nodes_with(self):\n class testModule(torch.nn.Module):\n def forward(self, a, b):\n return a + b\n\n m = testModule()\n traced = symbolic_trace(m)\n input1 = torch.randn(1)\n input2 = torch.randn(1)\n assert (input1 + input2) == traced(input1, input2)\n graph_manipulation.replace_target_nodes_with(\n fx_module=traced,\n old_op=\"call_function\",\n old_target=operator.add,\n new_op=\"call_function\",\n new_target=operator.mul,\n )\n assert (input1 * input2) == traced(input1, input2)\n\n def test_saturate_host(self):\n class TestModule(torch.nn.Module):\n def __init__(self):\n super(TestModule, self).__init__()\n self.linear = torch.nn.Linear(4, 4)\n\n def forward(self, a):\n add_1 = a + torch.rand(4)\n add_2 = add_1 + torch.rand(4)\n linear_1 = self.linear(add_1)\n add_3 = add_2 + linear_1\n add_4 = add_2 + add_3\n return add_4\n\n m = TestModule()\n traced = symbolic_trace(m)\n a = torch.rand(4)\n graph_manipulation.get_size_of_all_nodes(traced, [a])\n devices = [\n Device(\"dev_0\", 200, 0),\n Device(\"dev_1\", 200, 1),\n Device(\"dev_2\", 100, 2),\n Device(\"dev_3\", 100, 3),\n Device(\"dev_4\", 200, 4),\n Device(\"dev_5\", 100, 5),\n ]\n partitioner = Partitioner()\n # Without host saturation, the model will be split into two partitions.\n # dev_0 holds partition 0 of 192 bytes and dev_1 holds partition 1 of 48 bytes.\n partitioner_config = PartitionerConfig(devices, saturate_host=True)\n ret = partitioner.partition_graph(traced, m, partitioner_config)\n module_with_submodules = ret.module_with_submodules\n self.assertEqual(traced(a), module_with_submodules(a))\n\n partitions = partitioner.partitions\n self.assertEqual(len(partitions), 2)\n # With host saturation, partition 1 will be replicated to dev_4, and partition 2\n # will be replicated to dev_2.\n self.assertEqual(partitions[0].logical_device_ids, [0, 4])\n self.assertEqual(partitions[1].logical_device_ids, [1, 2])\n\n @skipIfNoTorchVision\n def test_conv_bn_fusion(self):\n rn18 = resnet18().eval()\n traced = symbolic_trace(rn18)\n fused = optimization.fuse(traced)\n\n self.assertTrue(\n all(not isinstance(m, torch.nn.BatchNorm2d) for m in fused.modules())\n )\n\n N, C, H, W = 20, 3, 224, 224\n inp = torch.randn(N, C, H, W)\n\n self.assertEqual(fused(inp), rn18(inp))\n\n def test_call_to_assert_no_msg(self):\n class M(torch.nn.Module):\n def forward(self, a, b):\n assert a == b\n return a + b\n\n m = M()\n traced = symbolic_trace_with_rewrite(m)\n\n # Make sure the graph is well-formed\n traced.graph.lint()\n\n # Check the IR to make sure there's a call_function node with target == \"Assert\"\n self.assertTrue(\n any(\n node.op == \"call_function\" and node.target == torch._assert\n for node in traced.graph.nodes\n )\n )\n\n # Ensure that the assert throws when it's supposed to and doesn't throw when it's not supposed to\n traced(3, 3)\n with self.assertRaisesRegex(AssertionError, \"\"):\n traced(3, 5)\n\n # Confirm that the output is correct\n self.assertEqual(traced(3, 3), m(3, 3))\n\n def test_call_to_assert_with_msg(self):\n class M(torch.nn.Module):\n def forward(self, a, b):\n assert a == b, \"test message\"\n return a + b\n\n m = M()\n traced = symbolic_trace_with_rewrite(m)\n\n # Make sure the graph is well-formed\n traced.graph.lint()\n\n # Check the IR to make sure there's a call_function node with target == \"Assert\"\n self.assertTrue(\n any(\n node.op == \"call_function\" and node.target == torch._assert\n for node in traced.graph.nodes\n )\n )\n\n # Ensure that the assert throws when it's supposed to and doesn't throw when it's not supposed to\n traced(3, 3)\n with self.assertRaisesRegex(AssertionError, \"test message\"):\n traced(3, 5)\n\n # Confirm that the output is correct\n self.assertEqual(traced(3, 3), m(3, 3))\n\n def test_call_to_assert_with_empty_msg(self):\n class M(torch.nn.Module):\n def forward(self, a, b):\n assert a == b, \"\"\n return a + b\n\n m = M()\n traced = symbolic_trace_with_rewrite(m)\n\n # Make sure the graph is well-formed\n traced.graph.lint()\n\n # Check the IR to make sure there's a call_function node with target == \"Assert\"\n self.assertTrue(\n any(\n node.op == \"call_function\" and node.target == torch._assert\n for node in traced.graph.nodes\n )\n )\n\n # Ensure that the assert throws when it's supposed to and doesn't throw when it's not supposed to\n traced(3, 3)\n with self.assertRaisesRegex(AssertionError, \"\"):\n traced(3, 5)\n\n # Confirm that the output is correct\n self.assertEqual(traced(3, 3), m(3, 3))\n\n def test_call_to_assert_with_multiline_message(self):\n class M(torch.nn.Module):\n def forward(self, a, b):\n error_msg = \"\"\"\nAn error message with\nterrible spacing\n \"\"\"\n assert a == b, error_msg\n return a + b\n\n m = M()\n traced = symbolic_trace_with_rewrite(m)\n\n # Make sure the graph is well-formed\n traced.graph.lint()\n\n # Check the IR to make sure there's a call_function node with target == \"Assert\"\n self.assertTrue(\n any(\n node.op == \"call_function\" and node.target == torch._assert\n for node in traced.graph.nodes\n )\n )\n\n # Ensure that the assert throws when it's supposed to and doesn't throw when it's not supposed to\n error_msg = \"\"\"\nAn error message with\nterrible spacing\n \"\"\"\n traced(3, 3)\n with self.assertRaisesRegex(AssertionError, error_msg):\n traced(3, 5)\n\n # Confirm that the output is correct\n self.assertEqual(traced(3, 3), m(3, 3))\n\n def test_subgraph_creation(self):\n class MyModule(torch.nn.Module):\n def __init__(self):\n super().__init__()\n self.param = torch.nn.Parameter(torch.rand(3, 4))\n self.linear = torch.nn.Linear(4, 5)\n\n def forward(self, x, y):\n z = self.linear(x + self.param).clamp(min=0.0, max=1.0)\n w = self.linear(y).clamp(min=0.0, max=1.0)\n return z + w\n\n # symbolically trace model\n my_module = MyModule()\n my_module_traced = symbolic_trace(my_module)\n\n # random mod partitioning\n partition_counter = 0\n NPARTITIONS = 3\n\n # Add some random meta info to make sure it is kept around.\n for node in my_module_traced.graph.nodes:\n if node.op != \"output\":\n node.meta[\"test_meta_info\"] = True\n\n def mod_partition(node: Node):\n nonlocal partition_counter\n partition = partition_counter % NPARTITIONS\n partition_counter = (partition_counter + 1) % NPARTITIONS\n return partition\n\n # split module in module with submodules\n module_with_submodules = split_module(\n my_module_traced, my_module, mod_partition\n )\n\n # Check that test_meta_info was still on all nodes.\n submodules = dict(module_with_submodules.named_modules())\n for node in module_with_submodules.graph.nodes:\n if node.op == \"call_module\":\n submod = submodules[node.target]\n self.assertTrue(isinstance(submod, torch.fx.GraphModule))\n for submod_node in submod.graph.nodes:\n if submod_node.op != \"output\":\n stored_op = submod_node.meta.get(\"test_meta_info\")\n self.assertTrue(stored_op is not None and stored_op)\n\n x = torch.rand(3, 4)\n y = torch.rand(3, 4)\n\n orig_out = my_module_traced(x, y)\n submodules_out = module_with_submodules(x, y)\n\n self.assertEqual(orig_out, submodules_out)\n\n @skipIfNoTorchVision\n def test_subgraph_trivial_resnet(self):\n # Smoke test trivially splitting resnet into 1 partition works\n # There was an issue before causing submodule names to be aliased\n m = resnet18()\n traced = symbolic_trace(m)\n a = torch.rand(64, 3, 7, 7)\n module_with_submodules = split_module(traced, m, lambda node: 0)\n module_with_submodules(a)\n\n def test_normalize_binary_operators(self):\n ops_to_test = {\n torch.add,\n torch.mul,\n torch.sub,\n torch.div,\n torch.floor_divide,\n torch.remainder,\n torch.eq,\n torch.ne,\n torch.lt,\n torch.le,\n torch.gt,\n torch.ge,\n }\n\n # Test Tensor/Tensor callsite\n for op in ops_to_test:\n\n class WrapperMod(torch.nn.Module):\n def forward(self, x, y):\n return op(x, y)\n\n traced = symbolic_trace(WrapperMod())\n normalized = NormalizeOperators(traced).transform()\n x, y = torch.randn(3, 4), torch.randn(3, 4)\n torch.testing.assert_close(traced(x, y), normalized(x, y))\n self.assertFalse(\n any(n.target in ops_to_test for n in normalized.graph.nodes)\n )\n\n # Test Tensor/scalar callsite\n for op in ops_to_test:\n\n class WrapperMod(torch.nn.Module):\n def forward(self, x):\n return op(x, 42)\n\n traced = symbolic_trace(WrapperMod())\n normalized = NormalizeOperators(traced).transform()\n x = torch.randn(3, 4)\n torch.testing.assert_close(traced(x), normalized(x))\n self.assertFalse(\n any(n.target in ops_to_test for n in normalized.graph.nodes)\n )\n\n @skipIfNoTorchVision\n def test_normalize_args(self):\n m = resnet18()\n\n class FunctionalTracer(torch.fx.Tracer):\n def is_leaf_module(\n self, m: torch.nn.Module, module_qualified_name: str\n ) -> bool:\n # `leaves` contains the set of standard `nn.Modules` that are not\n # currently symbolically traceable. Ideally this set would be empty\n leaves = set([torch.nn.BatchNorm2d])\n return type(m) in leaves\n\n traced = torch.fx.GraphModule(m, FunctionalTracer().trace(m))\n\n input = torch.randn(5, 3, 224, 224)\n ref_outs = traced(input)\n\n ShapeProp(traced).propagate(input)\n traced = NormalizeArgs(traced).transform()\n\n modules = dict(traced.named_modules())\n\n for node in traced.graph.nodes:\n if node.op == \"call_function\" and node.target != operator.add:\n self.assertEqual(len(node.args), 0)\n elif node.op == \"call_module\":\n submod_class = modules[node.target].__class__\n nn_class = getattr(torch.nn, submod_class.__name__)\n if submod_class == nn_class:\n self.assertEqual(len(node.args), 0)\n traced(input)\n self.assertEqual(traced(input), ref_outs)\n\n def test_normalize_modules_exhaustive(self):\n \"\"\"\n Exhaustively test `Node.normalized_arguments` on all standard\n torch.nn Module classes\n \"\"\"\n for test_params in module_tests + new_module_tests:\n if \"constructor\" not in test_params:\n constructor = getattr(torch.nn, test_params[\"module_name\"])\n else:\n constructor = test_params[\"constructor\"]\n\n if \"constructor_args\" not in test_params:\n args = ()\n else:\n args = test_params[\"constructor_args\"]\n\n mod = constructor(*args)\n # Skip modules that are not standard `torch.nn`\n # instances, including functionals. (functionals\n # are tested in test_normalize_args)\n if mod.__class__.__name__ not in dir(torch.nn):\n continue\n\n if \"input_fn\" not in test_params:\n inputs = torch.randn(test_params[\"input_size\"])\n else:\n inputs = test_params[\"input_fn\"]()\n\n if not isinstance(inputs, (tuple, list)):\n inputs = (inputs,)\n\n params = \", \".join(f\"v{i}\" for i in range(len(inputs)))\n\n # Generate a class to wrap this standard `nn.Module` instance\n test_classname = f\"Test{mod.__class__.__name__}\"\n test_mod_code = f\"\"\"\nclass {test_classname}(torch.nn.Module):\n def __init__(self, mod):\n super().__init__()\n self.mod = mod\n\n def forward(self, {params}):\n return self.mod({params})\n \"\"\"\n\n gbls = {\"torch\": torch}\n exec(test_mod_code, gbls)\n\n test_instance = gbls[test_classname](mod)\n traced = symbolic_trace(test_instance)\n\n # Use `Node.normalized_arguments` to get a new set of arguments\n # to feed to the Module. Then, rewrite the node to only take\n # in those arguments as kwargs\n modules = dict(traced.named_modules())\n for node in traced.graph.nodes:\n if node.op == \"call_module\":\n submod_class = modules[node.target].__class__\n nn_class = getattr(torch.nn, submod_class.__name__)\n if submod_class == nn_class:\n normalized_args = node.normalized_arguments(traced)\n normalized_args2 = normalize_module(\n traced, node.target, node.args, node.kwargs\n )\n assert normalized_args == normalized_args2\n assert normalized_args\n node.args = normalized_args.args\n node.kwargs = normalized_args.kwargs\n\n traced.recompile()\n\n # These Modules have an RNG in their forward, so testing\n # correctness by comparing outputs is not correct. Skip that\n # check for these\n stochastic_modules = {\"FractionalMaxPool2d\", \"FractionalMaxPool3d\", \"RReLU\"}\n\n if mod.__class__.__name__ not in stochastic_modules:\n self.assertEqual(traced(*inputs), mod(*inputs))\n\n traced = NormalizeArgs(symbolic_trace(test_instance)).transform()\n modules = dict(traced.named_modules())\n for node in traced.graph.nodes:\n if node.op == \"call_module\":\n submod_class = modules[node.target].__class__\n nn_class = getattr(torch.nn, submod_class.__name__)\n if submod_class == nn_class:\n self.assertEqual(len(node.args), 0)\n\n def test_normalize_args_preserve_meta(self):\n class MyModule(torch.nn.Module):\n def __init__(self):\n super().__init__()\n\n def forward(self, a):\n return torch.add(a, 3)\n\n m = MyModule()\n traced = symbolic_trace(m)\n\n for node in traced.graph.nodes:\n if node.op == \"call_function\" and node.target == torch.add:\n node.meta[\"my_key\"] = 7\n break\n else:\n self.fail(\"Didn't find call_function torch.add\")\n\n input = torch.randn(2, 3)\n ShapeProp(traced).propagate(input)\n traced = NormalizeArgs(traced).transform()\n\n for node in traced.graph.nodes:\n if node.op == \"call_function\" and node.target == torch.add:\n self.assertTrue(\"my_key\" in node.meta)\n self.assertEqual(node.meta[\"my_key\"], 7)\n break\n else:\n self.fail(\"Didn't find call_function torch.add\")\n\n @skipIfNoTorchVision\n def test_annotate_returns_with_schema(self):\n m = resnet18()\n\n traced_modules = symbolic_trace(m)\n traced_modules_annotated = AnnotateTypesWithSchema(traced_modules).transform()\n for node in traced_modules_annotated.graph.nodes:\n if node.type is None:\n check = (node.op, node.target)\n self.assertTrue(\n check\n in {\n (\"placeholder\", \"x\"),\n (\"call_function\", operator.add),\n (\"call_function\", torch.flatten),\n (\"output\", \"output\"),\n }\n )\n\n # Smoke test torchscript compilation since now we're emitting type annotations\n torch.jit.script(traced_modules_annotated)\n\n class FunctionalTracer(torch.fx.Tracer):\n def is_leaf_module(\n self, m: torch.nn.Module, module_qualified_name: str\n ) -> bool:\n # `leaves` contains the set of standard `nn.Modules` that are not\n # currently symbolically traceable. Ideally this set would be empty\n leaves = set([torch.nn.BatchNorm2d])\n return type(m) in leaves\n\n traced_functionals = torch.fx.GraphModule(m, FunctionalTracer().trace(m))\n\n traced_functionals_annotated = AnnotateTypesWithSchema(\n traced_functionals\n ).transform()\n for node in traced_functionals_annotated.graph.nodes:\n if node.type is None:\n check = (node.op, node.target)\n excluded_nodes = {\n (\"placeholder\", \"x\"),\n (\"call_function\", torch.conv2d),\n # Return type differs based on boolean dispatch :(\n (\"call_function\", torch.nn.functional.max_pool2d),\n (\"call_function\", operator.add),\n (\"call_function\", torch.flatten),\n (\"output\", \"output\"),\n }\n self.assertTrue(check in excluded_nodes)\n\n # Smoke test torchscript compilation since now we're emitting type annotations\n torch.jit.script(traced_functionals_annotated)\n\n def test_subgraph_uniquename(self):\n class MyModule(torch.nn.Module):\n def __init__(self):\n super().__init__()\n self.linear = torch.nn.Linear(4, 4)\n\n def forward(self, a, b, c, d):\n add_1 = a + b\n add_2 = add_1 + c\n linear_1 = self.linear(add_1)\n add_3 = add_2 + d\n add_4 = add_2 + linear_1\n add_5 = add_3 + add_4\n return add_5\n\n a, b, c, d = torch.ones(4), torch.ones(4), torch.ones(4), torch.ones(4)\n mm = MyModule()\n traced = symbolic_trace(mm)\n\n def split_cb(node: torch.fx.Node):\n if node.name == \"a\" or node.name == \"b\" or node.name == \"add\":\n return 0\n else:\n return 1\n\n module_with_submodule = split_module(traced, mm, split_cb)\n self.assertEqual(module_with_submodule(a, b, c, d), traced(a, b, c, d))\n\n def test_traceable_function_with_nonstandard_name(self):\n def foo(x):\n return torch.relu(x)\n\n traced = symbolic_trace_with_rewrite(foo)\n\n def test_to_folder(self):\n class Test(torch.nn.Module):\n def __init__(self):\n super(Test, self).__init__()\n self.W = torch.nn.Parameter(torch.randn(2))\n self.seq = torch.nn.Sequential(torch.nn.BatchNorm1d(2, 2))\n self.linear = torch.nn.Linear(2, 2)\n self.attr = torch.randn(2)\n self.register_buffer(\"attr2\", torch.randn(2))\n\n def forward(self, x):\n return self.linear(self.seq(self.W + self.attr + self.attr2 + x))\n\n mod = symbolic_trace(Test())\n module_name = \"Foo\"\n import tempfile\n from pathlib import Path\n\n with tempfile.TemporaryDirectory() as tmp_dir:\n tmp_dir = Path(tmp_dir)\n mod.to_folder(tmp_dir, module_name)\n # Recipe taken from here:\n # https://docs.python.org/3/library/importlib.html#importing-a-source-file-directly\n import importlib.util\n\n spec = importlib.util.spec_from_file_location(\n module_name, tmp_dir / \"__init__.py\"\n )\n module = importlib.util.module_from_spec(spec)\n sys.modules[module_name] = module\n spec.loader.exec_module(module)\n t = torch.randn(2, 2)\n self.assertEqual(module.Foo()(t), mod(t))\n\n def test_fetch(self):\n attrs_for_lowering: Dict[str, List[str]] = {\n \"torch.nn.modules.conv.Conv2d\": [\n \"weight\",\n \"bias\",\n \"kernel_size\",\n \"stride\",\n \"padding\",\n \"dilation\",\n \"groups\",\n \"padding_mode\",\n ],\n \"torch.nn.modules.batchnorm.BatchNorm2d\": [\n \"weight\",\n \"bias\",\n \"running_mean\",\n \"running_var\",\n \"eps\",\n ],\n }\n\n class TestModule(torch.nn.Module):\n def __init__(self):\n super().__init__()\n self.conv = torch.nn.Conv2d(3, 3, 2)\n self.bn = torch.nn.BatchNorm2d(3)\n\n def forward(self, a):\n a = self.conv(a)\n a += a\n return self.bn(a)\n\n mod = TestModule()\n traced = symbolic_trace(mod)\n lift_lowering_attrs_to_nodes(traced)\n\n for node in traced.graph.nodes:\n if node.op == \"call_module\":\n assert hasattr(node, \"attrs_for_lowering\")\n para_list = attrs_for_lowering[node.attrs_for_lowering[\"name\"]]\n\n # node.attrs_for_lowering has an addition field of class name\n assert len(para_list) + 1 == len(node.attrs_for_lowering)\n for p_name in para_list:\n assert p_name in node.attrs_for_lowering\n\n def test_merge_matmuls(self):\n \"\"\"\n A collection of test cases for torch.fx.experimental.merge_matmul,\n a graph transformation that merges matrix multiplication operations.\n \"\"\"\n # Utility function for counting matmuls for test assertions.\n def _count_matmuls(mod):\n gm = torch.fx.symbolic_trace(mod)\n\n num_matmuls = 0\n for node in gm.graph.nodes:\n if node.target == torch.matmul:\n num_matmuls += 1\n\n return num_matmuls\n\n # Simple test case in which there are two matmuls of the same size to merge.\n class SimpleMergeMatmulModule(torch.nn.Module):\n def __init__(self, rhs):\n super().__init__()\n self.rhs = rhs\n\n def forward(self, x, y):\n a = torch.matmul(x, self.rhs)\n b = torch.matmul(y, self.rhs)\n return a + b\n\n # Initialize inputs.\n a = torch.randn(3, 3)\n b = torch.randn(3, 3)\n\n # Initialize RHS for matmuls.\n rhs = torch.randn(3, 4)\n\n # Construct SimpleMergeMatmulModule and call merge_matmul on it.\n module = SimpleMergeMatmulModule(rhs)\n opt_module = merge_matmul.merge_matmul(module)\n\n # Numerical correctness check.\n before = module(a, b)\n after = opt_module(a, b)\n before.allclose(after)\n\n # Basic graph structure check; original module should have 2 matmuls\n # and optimized module should have 1.\n self.assertEqual(_count_matmuls(module), 2)\n self.assertEqual(_count_matmuls(opt_module), 1)\n\n # Test case in which there are multiple matmuls of different sizes to merge.\n class FiveMergeMatmulModule(torch.nn.Module):\n def __init__(self, rhs):\n super().__init__()\n self.rhs = rhs\n\n def forward(self, a, b, c, d, e):\n s = torch.tensor([])\n matmuls = []\n\n # For some reason using a list comprehension or for-loop for this\n # doesn't work.\n matmuls.append(torch.matmul(a, self.rhs))\n matmuls.append(torch.matmul(b, self.rhs))\n matmuls.append(torch.matmul(c, self.rhs))\n matmuls.append(torch.matmul(d, self.rhs))\n matmuls.append(torch.matmul(e, self.rhs))\n\n for m in matmuls:\n s += torch.sum(m)\n\n return s\n\n # Initialize inputs.\n inputs = [torch.randn(2 * i + 1, 5) for i in range(5)]\n\n # Initialize RHS.\n rhs = torch.randn(5, 4)\n\n # Construct FiveMergeMatmulModule and call merge_matmul on it.\n module = FiveMergeMatmulModule(rhs)\n opt_module = merge_matmul.merge_matmul(module)\n\n # Numerical correctness check.\n before = module(*inputs)\n after = opt_module(*inputs)\n before.allclose(after)\n\n # Basic graph structure check; original module should have len(inputs) matmuls\n # and optimized module should have 1.\n self.assertEqual(_count_matmuls(module), len(inputs))\n self.assertEqual(_count_matmuls(opt_module), 1)\n\n # Simple test case in which two matmuls cannot be merged due to a data dependency between\n # the LHS operands.\n class UnmergeableMatmulModule(torch.nn.Module):\n def __init__(self, rhs):\n super().__init__()\n self.rhs = rhs\n\n def forward(self, x):\n a = torch.matmul(x, self.rhs)\n a_abs = torch.abs(a)\n b = torch.matmul(a_abs.transpose(1, 0), self.rhs)\n return b\n\n # Initialize inputs.\n a = torch.randn(3, 3)\n\n # Initialize RHS for matmuls.\n rhs = torch.randn(3, 4)\n\n # Construct UnmergeableMatmulModule and call merge_matmul on it.\n module = UnmergeableMatmulModule(rhs)\n opt_module = merge_matmul.merge_matmul(module)\n\n # Numerical correctness check.\n before = module(a)\n after = opt_module(a)\n before.allclose(after)\n\n # Basic graph structure check; the number of matrix multiplcations should not have changed.\n self.assertEqual(_count_matmuls(module), 2)\n self.assertEqual(_count_matmuls(opt_module), 2)\n\n def test_type_matches(self):\n should_be_equal = [\n (int, type(5)),\n (numbers.Number, type(5)),\n (numbers.Number, type(5.0)),\n (int, type(torch.float)),\n (Union[int, float], type(5)),\n (Union[int, float], type(5.0)),\n (List[int], type(5)),\n (List[int], create_type_hint([int, int])),\n (List[int], create_type_hint((int, int))),\n (List[torch.Tensor], create_type_hint([torch.Tensor, torch.Tensor])),\n (\n List[torch.Tensor],\n create_type_hint([torch.nn.Parameter, torch.nn.Parameter]),\n ),\n (torch.Tensor, torch.nn.Parameter),\n (List[torch.Tensor], create_type_hint([torch.nn.Parameter, torch.Tensor])),\n (List[torch.Tensor], create_type_hint([torch.Tensor, torch.nn.Parameter])),\n (List[torch.Tensor], create_type_hint((torch.Tensor, torch.Tensor))),\n (\n List[torch.Tensor],\n create_type_hint((torch.nn.Parameter, torch.nn.Parameter)),\n ),\n (torch.Tensor, torch.nn.Parameter),\n (List[torch.Tensor], create_type_hint((torch.nn.Parameter, torch.Tensor))),\n (List[torch.Tensor], create_type_hint((torch.Tensor, torch.nn.Parameter))),\n (Optional[List[torch.Tensor]], List[torch.Tensor]),\n (Optional[List[int]], List[int]),\n ]\n for sig_type, arg_type in should_be_equal:\n self.assertTrue(type_matches(sig_type, arg_type))\n\n should_fail = [\n (int, float),\n (Union[int, float], str),\n (List[torch.Tensor], List[int]),\n ]\n\n for sig_type, arg_type in should_fail:\n self.assertFalse(type_matches(sig_type, arg_type))\n\n @skipIfNoMkldnn\n def test_optimize_for_inference_cpu(self):\n import torch.nn as nn\n\n class Foo(nn.Module):\n def __init__(self):\n super().__init__()\n layers = []\n layers2 = []\n for _ in range(10):\n layers.append(nn.Conv2d(3, 3, 1))\n layers.append(nn.BatchNorm2d(3))\n layers.append(nn.ReLU())\n\n layers2.append(nn.Conv2d(3, 3, 1))\n layers2.append(nn.BatchNorm2d(3))\n layers2.append(nn.ReLU())\n self.model = nn.Sequential(*layers)\n self.model2 = nn.Sequential(*layers2)\n\n def forward(self, x):\n return self.model(x) + self.model2(x)\n\n N, C, H, W, = (\n 1,\n 3,\n 224,\n 224,\n )\n inp = torch.randn(N, C, H, W)\n with torch.no_grad():\n model = Foo().eval()\n optimized_model = optimization.optimize_for_inference(model)\n torch.testing.assert_close(model(inp), optimized_model(inp))\n\n optimized_model2 = optimization.optimize_for_inference(\n model, pass_config={\"remove_dropout\": False}\n )\n torch.testing.assert_close(model(inp), optimized_model2(inp))\n\n @skipIfNoTorchVision\n @skipIfNoMkldnn\n def test_optimize_for_inference_cpu_torchvision(self):\n models = [\n torchvision.models.resnet18,\n torchvision.models.resnet50,\n torchvision.models.densenet121,\n torchvision.models.shufflenet_v2_x1_0,\n torchvision.models.vgg16,\n torchvision.models.mobilenet_v2,\n torchvision.models.mnasnet1_0,\n torchvision.models.resnext50_32x4d,\n ]\n with torch.no_grad():\n for model_type in models:\n model = model_type()\n C, H, W, = (\n 3,\n 224,\n 224,\n )\n inp = torch.randn(3, C, H, W)\n model(inp)\n model.eval()\n inp = torch.randn(1, C, H, W)\n heuristic = optimization.gen_mkl_autotuner(inp, iters=0, warmup=0)\n optimized_model = optimization.optimize_for_inference(model)\n\n orig_out = model(inp)\n new_out = optimized_model(inp)\n torch.testing.assert_close(orig_out, new_out)\n\n\nclass TestNormalizeOperators(JitTestCase):\n @onlyCPU\n @ops(op_db, allowed_dtypes=(torch.float,))\n def test_normalize_operator_exhaustive(self, device, dtype, op):\n # Sorted and one entry on each line to minimize merge conflicts.\n op_skip = {\n # See: https://github.com/pytorch/pytorch/issues/64997\n \"block_diag\",\n \"broadcast_tensors\",\n \"contiguous\",\n \"einsum\",\n \"expand\",\n \"expand_as\",\n \"fill_\",\n \"T\", # Implemented with a lambda\n \"H\", # Implemented with a lambda\n \"mT\", # Implemented with a lambda\n \"mH\", # Implemented with a lambda\n \"gradient\",\n \"igamma\",\n \"igammac\",\n \"index_put\",\n \"nn.functional.conv2d\",\n \"nn.functional.dropout\",\n \"polygamma\",\n \"special.polygamma\",\n \"repeat\",\n \"reshape_as\",\n \"resize_\",\n \"resize_as_\",\n \"special.zeta\",\n \"to_sparse\",\n \"view\",\n \"view_as\",\n \"unfold\",\n \"where\",\n \"zero_\",\n \"__getitem__\",\n \"__radd__\",\n \"__rsub__\",\n \"__rmul__\",\n \"__rdiv__\",\n \"__rmod__\",\n \"__rpow__\",\n '__rand__',\n '__ror__',\n '__rxor__',\n \"__rmatmul__\",\n }\n\n # Unsupported input types\n if op.name in op_skip:\n return\n\n # These ops currently don't trace in FX for various reasons (i.e. they take a list of tensors)\n fx_fail = {\"cat\", \"stack\", \"hstack\", \"vstack\", \"dstack\", \"linalg.multi_dot\"}\n sample_inputs_itr = op.sample_inputs(device, dtype, requires_grad=False)\n for sample_input in sample_inputs_itr:\n unsupported_arg_type = False\n arg_values = [sample_input.input] + list(sample_input.args)\n kwarg_values = sample_input.kwargs\n arg_types = []\n kwarg_types = {}\n\n def jit_infer_type(v):\n inferred_arg_type = torch._C._jit_try_infer_type(v)\n assert inferred_arg_type.success()\n t = _torchscript_type_to_python_type(inferred_arg_type.type())\n return t\n\n for v in arg_values:\n if isinstance(v, torch.Tensor):\n arg_types.append(type(v))\n else:\n if isinstance(v, complex):\n # Complex type not supported in FX\n unsupported_arg_type = True\n arg_types.append(jit_infer_type(v))\n\n for k, v in kwarg_values.items():\n if isinstance(v, torch.Tensor):\n kwarg_types[k] = type(v)\n else:\n if isinstance(v, complex):\n # Complex type not supported in FX\n unsupported_arg_type = True\n kwarg_types[k] = jit_infer_type(v)\n\n if unsupported_arg_type:\n continue\n # Test normalize_function by itself\n ref_out = op.op(*arg_values, **kwarg_values)\n norm_args_and_kwargs = normalize_function(\n op.op, arg_values, kwarg_values, arg_types, kwarg_types\n )\n if norm_args_and_kwargs is None:\n raise RuntimeError(\n \"\"\"\n FX failed to normalize op - add the op to the op_skip list.\n A common reason is if your OpInfo was implemented with a lambda\n - otherwise, file an issue\n \"\"\"\n )\n test_out = op.op(*norm_args_and_kwargs.args, **norm_args_and_kwargs.kwargs)\n self.assertEqual(test_out, ref_out)\n\n # Test normalized_arguments as part of FX\n if op.name in fx_fail:\n continue\n param_names = []\n param_values = []\n fx_args = []\n for idx, v in enumerate(arg_values):\n if isinstance(v, torch.Tensor):\n param_names.append(f\"arg_{idx}\")\n param_values.append(v)\n fx_args.append(param_names[-1])\n else:\n fx_args.append(f\"{repr(v)}\")\n\n for k, v in kwarg_values.items():\n if isinstance(v, torch.Tensor):\n param_names.append(k)\n param_values.append(v)\n fx_args.append(f\"{k} = {k}\")\n else:\n fx_args.append(f\"{k} = {repr(v)}\")\n\n code = f\"\"\"\nclass TestModule(torch.nn.Module):\n def forward(self, {', '.join(param_names)}):\n return torch.{op.name}({', '.join(fx_args)})\n \"\"\"\n\n g = {\"torch\": torch, \"inf\": math.inf}\n exec(code, g)\n TestModule = g[\"TestModule\"]\n\n m = TestModule()\n traced = torch.fx.symbolic_trace(m)\n ref_out = traced(*param_values)\n\n for node in traced.graph.nodes:\n if node.op == \"call_function\":\n normalized_args = node.normalized_arguments(\n traced, arg_types, kwarg_types\n )\n assert normalized_args\n node.args = normalized_args.args\n node.kwargs = normalized_args.kwargs\n traced.recompile()\n\n test_out = traced(*param_values)\n self.assertEqual(test_out, ref_out)\n\n\ninstantiate_device_type_tests(TestNormalizeOperators, globals())\n\nif __name__ == \"__main__\":\n run_tests()\n" ]
[ [ "torch.randn" ], [ "torch.abs", "torch._C._jit_try_infer_type", "torch.randint", "torch.cat", "torch.fx.experimental.partitioner_utils.get_latency_of_partitioned_graph", "torch.sum", "torch.fx.passes.param_fetch.lift_lowering_attrs_to_nodes", "torch.fx.operator_schemas.type_matches", "torch.fx.experimental.optimization.fuse", "torch.no_grad", "torch.nn.EmbeddingBag", "torch.jit.script", "torch.fx.experimental.schema_type_annotation.AnnotateTypesWithSchema", "torch.ones", "torch.add", "torch.randn", "torch.fx.experimental.normalize.NormalizeArgs", "torch.tensor", "torch.fx.symbolic_trace", "torch.relu", "torch.fx.passes.shape_prop._extract_tensor_metadata", "torch.quantize_per_tensor", "torch.rand", "torch.backends.mkldnn.is_available", "torch.fx.operator_schemas.create_type_hint", "torch.testing.assert_close", "torch.fx.experimental.merge_matmul.merge_matmul", "torch.fx.passes.graph_manipulation.serialize_module", "torch.nn.Sequential", "torch.nn.BatchNorm1d", "torch.fx.experimental.accelerator_partitioner.Partitioner", "torch.fx.passes.shape_prop.ShapeProp", "torch.fx.operator_schemas.normalize_function", "torch.nn.Conv2d", "torch.nn.ModuleList", "torch.nn.ReLU", "torch.fx.experimental.partitioner_utils.NodeLatency", "torch.nn.Module", "torch.fx.experimental.partitioner_utils.Device", "torch.fx.passes.graph_manipulation.replace_target_nodes_with", "torch.nn.Linear", "torch.nn.BatchNorm2d", "torch.testing._internal.common_utils.run_tests", "torch.testing._internal.common_device_type.ops", "torch.fx.experimental.partitioner_utils.PartitionerConfig", "torch.fx.operator_schemas.normalize_module", "torch.fx.experimental.rewriter.RewritingTracer", "torch.fx.experimental.partitioner_utils.get_partition_to_latency_mapping", "torch.matmul", "torch.fx.experimental.normalize.NormalizeOperators", "torch.fx.experimental.optimization.gen_mkl_autotuner", "torch.fx.passes.split_module.split_module", "torch.fx.experimental.optimization.optimize_for_inference", "torch.fx.passes.graph_manipulation.get_size_of_all_nodes", "torch.fx.passes.graph_manipulation.serialize_tensor_quantization", "torch.fx._symbolic_trace.symbolic_trace" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
zhenfisher/scikit-learn
[ "be765c59542ced1e0fccbf531bef6705919474be" ]
[ "sklearn/neighbors/_base.py" ]
[ "\"\"\"Base and mixin classes for nearest neighbors\"\"\"\n# Authors: Jake Vanderplas <[email protected]>\n# Fabian Pedregosa <[email protected]>\n# Alexandre Gramfort <[email protected]>\n# Sparseness support by Lars Buitinck\n# Multi-output support by Arnaud Joly <[email protected]>\n#\n# License: BSD 3 clause (C) INRIA, University of Amsterdam\nfrom functools import partial\n\nimport warnings\nfrom abc import ABCMeta, abstractmethod\nimport numbers\n\nimport numpy as np\nfrom scipy.sparse import csr_matrix, issparse\nimport joblib\nfrom joblib import Parallel, effective_n_jobs\n\nfrom ._ball_tree import BallTree\nfrom ._kd_tree import KDTree\nfrom ..base import BaseEstimator, MultiOutputMixin\nfrom ..base import is_classifier\nfrom ..metrics import pairwise_distances_chunked\nfrom ..metrics.pairwise import PAIRWISE_DISTANCE_FUNCTIONS\nfrom ..utils import (\n check_array,\n gen_even_slices,\n _to_object_array,\n)\nfrom ..utils.deprecation import deprecated\nfrom ..utils.multiclass import check_classification_targets\nfrom ..utils.validation import check_is_fitted\nfrom ..utils.validation import check_non_negative\nfrom ..utils.fixes import delayed\nfrom ..utils.fixes import parse_version\nfrom ..exceptions import DataConversionWarning, EfficiencyWarning\n\nVALID_METRICS = dict(\n ball_tree=BallTree.valid_metrics,\n kd_tree=KDTree.valid_metrics,\n # The following list comes from the\n # sklearn.metrics.pairwise doc string\n brute=(\n list(PAIRWISE_DISTANCE_FUNCTIONS.keys())\n + [\n \"braycurtis\",\n \"canberra\",\n \"chebyshev\",\n \"correlation\",\n \"cosine\",\n \"dice\",\n \"hamming\",\n \"jaccard\",\n \"kulsinski\",\n \"mahalanobis\",\n \"matching\",\n \"minkowski\",\n \"rogerstanimoto\",\n \"russellrao\",\n \"seuclidean\",\n \"sokalmichener\",\n \"sokalsneath\",\n \"sqeuclidean\",\n \"yule\",\n \"wminkowski\",\n ]\n ),\n)\n\n\nVALID_METRICS_SPARSE = dict(\n ball_tree=[],\n kd_tree=[],\n brute=(PAIRWISE_DISTANCE_FUNCTIONS.keys() - {\"haversine\", \"nan_euclidean\"}),\n)\n\n\ndef _check_weights(weights):\n \"\"\"Check to make sure weights are valid\"\"\"\n if weights not in (None, \"uniform\", \"distance\") and not callable(weights):\n raise ValueError(\n \"weights not recognized: should be 'uniform', \"\n \"'distance', or a callable function\"\n )\n\n return weights\n\n\ndef _get_weights(dist, weights):\n \"\"\"Get the weights from an array of distances and a parameter ``weights``\n\n Parameters\n ----------\n dist : ndarray\n The input distances.\n\n weights : {'uniform', 'distance' or a callable}\n The kind of weighting used.\n\n Returns\n -------\n weights_arr : array of the same shape as ``dist``\n If ``weights == 'uniform'``, then returns None.\n \"\"\"\n if weights in (None, \"uniform\"):\n return None\n elif weights == \"distance\":\n # if user attempts to classify a point that was zero distance from one\n # or more training points, those training points are weighted as 1.0\n # and the other points as 0.0\n if dist.dtype is np.dtype(object):\n for point_dist_i, point_dist in enumerate(dist):\n # check if point_dist is iterable\n # (ex: RadiusNeighborClassifier.predict may set an element of\n # dist to 1e-6 to represent an 'outlier')\n if hasattr(point_dist, \"__contains__\") and 0.0 in point_dist:\n dist[point_dist_i] = point_dist == 0.0\n else:\n dist[point_dist_i] = 1.0 / point_dist\n else:\n with np.errstate(divide=\"ignore\"):\n dist = 1.0 / dist\n inf_mask = np.isinf(dist)\n inf_row = np.any(inf_mask, axis=1)\n dist[inf_row] = inf_mask[inf_row]\n return dist\n elif callable(weights):\n return weights(dist)\n else:\n raise ValueError(\n \"weights not recognized: should be 'uniform', \"\n \"'distance', or a callable function\"\n )\n\n\ndef _is_sorted_by_data(graph):\n \"\"\"Returns whether the graph's non-zero entries are sorted by data\n\n The non-zero entries are stored in graph.data and graph.indices.\n For each row (or sample), the non-zero entries can be either:\n - sorted by indices, as after graph.sort_indices();\n - sorted by data, as after _check_precomputed(graph);\n - not sorted.\n\n Parameters\n ----------\n graph : sparse matrix of shape (n_samples, n_samples)\n Neighbors graph as given by `kneighbors_graph` or\n `radius_neighbors_graph`. Matrix should be of format CSR format.\n\n Returns\n -------\n res : bool\n Whether input graph is sorted by data.\n \"\"\"\n assert graph.format == \"csr\"\n out_of_order = graph.data[:-1] > graph.data[1:]\n line_change = np.unique(graph.indptr[1:-1] - 1)\n line_change = line_change[line_change < out_of_order.shape[0]]\n return out_of_order.sum() == out_of_order[line_change].sum()\n\n\ndef _check_precomputed(X):\n \"\"\"Check precomputed distance matrix\n\n If the precomputed distance matrix is sparse, it checks that the non-zero\n entries are sorted by distances. If not, the matrix is copied and sorted.\n\n Parameters\n ----------\n X : {sparse matrix, array-like}, (n_samples, n_samples)\n Distance matrix to other samples. X may be a sparse matrix, in which\n case only non-zero elements may be considered neighbors.\n\n Returns\n -------\n X : {sparse matrix, array-like}, (n_samples, n_samples)\n Distance matrix to other samples. X may be a sparse matrix, in which\n case only non-zero elements may be considered neighbors.\n \"\"\"\n if not issparse(X):\n X = check_array(X)\n check_non_negative(X, whom=\"precomputed distance matrix.\")\n return X\n else:\n graph = X\n\n if graph.format not in (\"csr\", \"csc\", \"coo\", \"lil\"):\n raise TypeError(\n \"Sparse matrix in {!r} format is not supported due to \"\n \"its handling of explicit zeros\".format(graph.format)\n )\n copied = graph.format != \"csr\"\n graph = check_array(graph, accept_sparse=\"csr\")\n check_non_negative(graph, whom=\"precomputed distance matrix.\")\n\n if not _is_sorted_by_data(graph):\n warnings.warn(\n \"Precomputed sparse input was not sorted by data.\", EfficiencyWarning\n )\n if not copied:\n graph = graph.copy()\n\n # if each sample has the same number of provided neighbors\n row_nnz = np.diff(graph.indptr)\n if row_nnz.max() == row_nnz.min():\n n_samples = graph.shape[0]\n distances = graph.data.reshape(n_samples, -1)\n\n order = np.argsort(distances, kind=\"mergesort\")\n order += np.arange(n_samples)[:, None] * row_nnz[0]\n order = order.ravel()\n graph.data = graph.data[order]\n graph.indices = graph.indices[order]\n\n else:\n for start, stop in zip(graph.indptr, graph.indptr[1:]):\n order = np.argsort(graph.data[start:stop], kind=\"mergesort\")\n graph.data[start:stop] = graph.data[start:stop][order]\n graph.indices[start:stop] = graph.indices[start:stop][order]\n return graph\n\n\ndef _kneighbors_from_graph(graph, n_neighbors, return_distance):\n \"\"\"Decompose a nearest neighbors sparse graph into distances and indices\n\n Parameters\n ----------\n graph : sparse matrix of shape (n_samples, n_samples)\n Neighbors graph as given by `kneighbors_graph` or\n `radius_neighbors_graph`. Matrix should be of format CSR format.\n\n n_neighbors : int\n Number of neighbors required for each sample.\n\n return_distance : bool\n Whether or not to return the distances.\n\n Returns\n -------\n neigh_dist : ndarray of shape (n_samples, n_neighbors)\n Distances to nearest neighbors. Only present if `return_distance=True`.\n\n neigh_ind : ndarray of shape (n_samples, n_neighbors)\n Indices of nearest neighbors.\n \"\"\"\n n_samples = graph.shape[0]\n assert graph.format == \"csr\"\n\n # number of neighbors by samples\n row_nnz = np.diff(graph.indptr)\n row_nnz_min = row_nnz.min()\n if n_neighbors is not None and row_nnz_min < n_neighbors:\n raise ValueError(\n \"%d neighbors per samples are required, but some samples have only\"\n \" %d neighbors in precomputed graph matrix. Decrease number of \"\n \"neighbors used or recompute the graph with more neighbors.\"\n % (n_neighbors, row_nnz_min)\n )\n\n def extract(a):\n # if each sample has the same number of provided neighbors\n if row_nnz.max() == row_nnz_min:\n return a.reshape(n_samples, -1)[:, :n_neighbors]\n else:\n idx = np.tile(np.arange(n_neighbors), (n_samples, 1))\n idx += graph.indptr[:-1, None]\n return a.take(idx, mode=\"clip\").reshape(n_samples, n_neighbors)\n\n if return_distance:\n return extract(graph.data), extract(graph.indices)\n else:\n return extract(graph.indices)\n\n\ndef _radius_neighbors_from_graph(graph, radius, return_distance):\n \"\"\"Decompose a nearest neighbors sparse graph into distances and indices\n\n Parameters\n ----------\n graph : sparse matrix of shape (n_samples, n_samples)\n Neighbors graph as given by `kneighbors_graph` or\n `radius_neighbors_graph`. Matrix should be of format CSR format.\n\n radius : float\n Radius of neighborhoods which should be strictly positive.\n\n return_distance : bool\n Whether or not to return the distances.\n\n Returns\n -------\n neigh_dist : ndarray of shape (n_samples,) of arrays\n Distances to nearest neighbors. Only present if `return_distance=True`.\n\n neigh_ind : ndarray of shape (n_samples,) of arrays\n Indices of nearest neighbors.\n \"\"\"\n assert graph.format == \"csr\"\n\n no_filter_needed = bool(graph.data.max() <= radius)\n\n if no_filter_needed:\n data, indices, indptr = graph.data, graph.indices, graph.indptr\n else:\n mask = graph.data <= radius\n if return_distance:\n data = np.compress(mask, graph.data)\n indices = np.compress(mask, graph.indices)\n indptr = np.concatenate(([0], np.cumsum(mask)))[graph.indptr]\n\n indices = indices.astype(np.intp, copy=no_filter_needed)\n\n if return_distance:\n neigh_dist = _to_object_array(np.split(data, indptr[1:-1]))\n neigh_ind = _to_object_array(np.split(indices, indptr[1:-1]))\n\n if return_distance:\n return neigh_dist, neigh_ind\n else:\n return neigh_ind\n\n\nclass NeighborsBase(MultiOutputMixin, BaseEstimator, metaclass=ABCMeta):\n \"\"\"Base class for nearest neighbors estimators.\"\"\"\n\n @abstractmethod\n def __init__(\n self,\n n_neighbors=None,\n radius=None,\n algorithm=\"auto\",\n leaf_size=30,\n metric=\"minkowski\",\n p=2,\n metric_params=None,\n n_jobs=None,\n ):\n\n self.n_neighbors = n_neighbors\n self.radius = radius\n self.algorithm = algorithm\n self.leaf_size = leaf_size\n self.metric = metric\n self.metric_params = metric_params\n self.p = p\n self.n_jobs = n_jobs\n\n def _check_algorithm_metric(self):\n if self.algorithm not in [\"auto\", \"brute\", \"kd_tree\", \"ball_tree\"]:\n raise ValueError(\"unrecognized algorithm: '%s'\" % self.algorithm)\n\n if self.algorithm == \"auto\":\n if self.metric == \"precomputed\":\n alg_check = \"brute\"\n elif callable(self.metric) or self.metric in VALID_METRICS[\"ball_tree\"]:\n alg_check = \"ball_tree\"\n else:\n alg_check = \"brute\"\n else:\n alg_check = self.algorithm\n\n if callable(self.metric):\n if self.algorithm == \"kd_tree\":\n # callable metric is only valid for brute force and ball_tree\n raise ValueError(\n \"kd_tree does not support callable metric '%s'\"\n \"Function call overhead will result\"\n \"in very poor performance.\"\n % self.metric\n )\n elif self.metric not in VALID_METRICS[alg_check]:\n raise ValueError(\n \"Metric '%s' not valid. Use \"\n \"sorted(sklearn.neighbors.VALID_METRICS['%s']) \"\n \"to get valid options. \"\n \"Metric can also be a callable function.\" % (self.metric, alg_check)\n )\n\n if self.metric_params is not None and \"p\" in self.metric_params:\n if self.p is not None:\n warnings.warn(\n \"Parameter p is found in metric_params. \"\n \"The corresponding parameter from __init__ \"\n \"is ignored.\",\n SyntaxWarning,\n stacklevel=3,\n )\n effective_p = self.metric_params[\"p\"]\n else:\n effective_p = self.p\n\n if self.metric in [\"wminkowski\", \"minkowski\"] and effective_p < 1:\n raise ValueError(\"p must be greater or equal to one for minkowski metric\")\n\n def _fit(self, X, y=None):\n if self._get_tags()[\"requires_y\"]:\n if not isinstance(X, (KDTree, BallTree, NeighborsBase)):\n X, y = self._validate_data(X, y, accept_sparse=\"csr\", multi_output=True)\n\n if is_classifier(self):\n # Classification targets require a specific format\n if y.ndim == 1 or y.ndim == 2 and y.shape[1] == 1:\n if y.ndim != 1:\n warnings.warn(\n \"A column-vector y was passed when a \"\n \"1d array was expected. Please change \"\n \"the shape of y to (n_samples,), for \"\n \"example using ravel().\",\n DataConversionWarning,\n stacklevel=2,\n )\n\n self.outputs_2d_ = False\n y = y.reshape((-1, 1))\n else:\n self.outputs_2d_ = True\n\n check_classification_targets(y)\n self.classes_ = []\n self._y = np.empty(y.shape, dtype=int)\n for k in range(self._y.shape[1]):\n classes, self._y[:, k] = np.unique(y[:, k], return_inverse=True)\n self.classes_.append(classes)\n\n if not self.outputs_2d_:\n self.classes_ = self.classes_[0]\n self._y = self._y.ravel()\n else:\n self._y = y\n\n else:\n if not isinstance(X, (KDTree, BallTree, NeighborsBase)):\n X = self._validate_data(X, accept_sparse=\"csr\")\n\n self._check_algorithm_metric()\n if self.metric_params is None:\n self.effective_metric_params_ = {}\n else:\n self.effective_metric_params_ = self.metric_params.copy()\n\n effective_p = self.effective_metric_params_.get(\"p\", self.p)\n if self.metric in [\"wminkowski\", \"minkowski\"]:\n self.effective_metric_params_[\"p\"] = effective_p\n\n self.effective_metric_ = self.metric\n # For minkowski distance, use more efficient methods where available\n if self.metric == \"minkowski\":\n p = self.effective_metric_params_.pop(\"p\", 2)\n if p < 1:\n raise ValueError(\n \"p must be greater or equal to one for minkowski metric\"\n )\n elif p == 1:\n self.effective_metric_ = \"manhattan\"\n elif p == 2:\n self.effective_metric_ = \"euclidean\"\n elif p == np.inf:\n self.effective_metric_ = \"chebyshev\"\n else:\n self.effective_metric_params_[\"p\"] = p\n\n if isinstance(X, NeighborsBase):\n self._fit_X = X._fit_X\n self._tree = X._tree\n self._fit_method = X._fit_method\n self.n_samples_fit_ = X.n_samples_fit_\n return self\n\n elif isinstance(X, BallTree):\n self._fit_X = X.data\n self._tree = X\n self._fit_method = \"ball_tree\"\n self.n_samples_fit_ = X.data.shape[0]\n return self\n\n elif isinstance(X, KDTree):\n self._fit_X = X.data\n self._tree = X\n self._fit_method = \"kd_tree\"\n self.n_samples_fit_ = X.data.shape[0]\n return self\n\n if self.metric == \"precomputed\":\n X = _check_precomputed(X)\n # Precomputed matrix X must be squared\n if X.shape[0] != X.shape[1]:\n raise ValueError(\n \"Precomputed matrix must be square.\"\n \" Input is a {}x{} matrix.\".format(X.shape[0], X.shape[1])\n )\n self.n_features_in_ = X.shape[1]\n\n n_samples = X.shape[0]\n if n_samples == 0:\n raise ValueError(\"n_samples must be greater than 0\")\n\n if issparse(X):\n if self.algorithm not in (\"auto\", \"brute\"):\n warnings.warn(\"cannot use tree with sparse input: using brute force\")\n if self.effective_metric_ not in VALID_METRICS_SPARSE[\n \"brute\"\n ] and not callable(self.effective_metric_):\n raise ValueError(\n \"Metric '%s' not valid for sparse input. \"\n \"Use sorted(sklearn.neighbors.\"\n \"VALID_METRICS_SPARSE['brute']) \"\n \"to get valid options. \"\n \"Metric can also be a callable function.\" % (self.effective_metric_)\n )\n self._fit_X = X.copy()\n self._tree = None\n self._fit_method = \"brute\"\n self.n_samples_fit_ = X.shape[0]\n return self\n\n self._fit_method = self.algorithm\n self._fit_X = X\n self.n_samples_fit_ = X.shape[0]\n\n if self._fit_method == \"auto\":\n # A tree approach is better for small number of neighbors or small\n # number of features, with KDTree generally faster when available\n if (\n self.metric == \"precomputed\"\n or self._fit_X.shape[1] > 15\n or (\n self.n_neighbors is not None\n and self.n_neighbors >= self._fit_X.shape[0] // 2\n )\n ):\n self._fit_method = \"brute\"\n else:\n if self.effective_metric_ in VALID_METRICS[\"kd_tree\"]:\n self._fit_method = \"kd_tree\"\n elif (\n callable(self.effective_metric_)\n or self.effective_metric_ in VALID_METRICS[\"ball_tree\"]\n ):\n self._fit_method = \"ball_tree\"\n else:\n self._fit_method = \"brute\"\n\n if self._fit_method == \"ball_tree\":\n self._tree = BallTree(\n X,\n self.leaf_size,\n metric=self.effective_metric_,\n **self.effective_metric_params_,\n )\n elif self._fit_method == \"kd_tree\":\n self._tree = KDTree(\n X,\n self.leaf_size,\n metric=self.effective_metric_,\n **self.effective_metric_params_,\n )\n elif self._fit_method == \"brute\":\n self._tree = None\n else:\n raise ValueError(\"algorithm = '%s' not recognized\" % self.algorithm)\n\n if self.n_neighbors is not None:\n if self.n_neighbors <= 0:\n raise ValueError(\"Expected n_neighbors > 0. Got %d\" % self.n_neighbors)\n elif not isinstance(self.n_neighbors, numbers.Integral):\n raise TypeError(\n \"n_neighbors does not take %s value, enter integer value\"\n % type(self.n_neighbors)\n )\n\n return self\n\n def _more_tags(self):\n # For cross-validation routines to split data correctly\n return {\"pairwise\": self.metric == \"precomputed\"}\n\n # TODO: Remove in 1.1\n # mypy error: Decorated property not supported\n @deprecated( # type: ignore\n \"Attribute `_pairwise` was deprecated in \"\n \"version 0.24 and will be removed in 1.1 (renaming of 0.26).\"\n )\n @property\n def _pairwise(self):\n # For cross-validation routines to split data correctly\n return self.metric == \"precomputed\"\n\n\ndef _tree_query_parallel_helper(tree, *args, **kwargs):\n \"\"\"Helper for the Parallel calls in KNeighborsMixin.kneighbors\n\n The Cython method tree.query is not directly picklable by cloudpickle\n under PyPy.\n \"\"\"\n return tree.query(*args, **kwargs)\n\n\nclass KNeighborsMixin:\n \"\"\"Mixin for k-neighbors searches\"\"\"\n\n def _kneighbors_reduce_func(self, dist, start, n_neighbors, return_distance):\n \"\"\"Reduce a chunk of distances to the nearest neighbors\n\n Callback to :func:`sklearn.metrics.pairwise.pairwise_distances_chunked`\n\n Parameters\n ----------\n dist : ndarray of shape (n_samples_chunk, n_samples)\n The distance matrix.\n\n start : int\n The index in X which the first row of dist corresponds to.\n\n n_neighbors : int\n Number of neighbors required for each sample.\n\n return_distance : bool\n Whether or not to return the distances.\n\n Returns\n -------\n dist : array of shape (n_samples_chunk, n_neighbors)\n Returned only if `return_distance=True`.\n\n neigh : array of shape (n_samples_chunk, n_neighbors)\n The neighbors indices.\n \"\"\"\n sample_range = np.arange(dist.shape[0])[:, None]\n neigh_ind = np.argpartition(dist, n_neighbors - 1, axis=1)\n neigh_ind = neigh_ind[:, :n_neighbors]\n # argpartition doesn't guarantee sorted order, so we sort again\n neigh_ind = neigh_ind[sample_range, np.argsort(dist[sample_range, neigh_ind])]\n if return_distance:\n if self.effective_metric_ == \"euclidean\":\n result = np.sqrt(dist[sample_range, neigh_ind]), neigh_ind\n else:\n result = dist[sample_range, neigh_ind], neigh_ind\n else:\n result = neigh_ind\n return result\n\n def kneighbors(self, X=None, n_neighbors=None, return_distance=True):\n \"\"\"Find the K-neighbors of a point.\n\n Returns indices of and distances to the neighbors of each point.\n\n Parameters\n ----------\n X : array-like, shape (n_queries, n_features), \\\n or (n_queries, n_indexed) if metric == 'precomputed', \\\n default=None\n The query point or points.\n If not provided, neighbors of each indexed point are returned.\n In this case, the query point is not considered its own neighbor.\n\n n_neighbors : int, default=None\n Number of neighbors required for each sample. The default is the\n value passed to the constructor.\n\n return_distance : bool, default=True\n Whether or not to return the distances.\n\n Returns\n -------\n neigh_dist : ndarray of shape (n_queries, n_neighbors)\n Array representing the lengths to points, only present if\n return_distance=True.\n\n neigh_ind : ndarray of shape (n_queries, n_neighbors)\n Indices of the nearest points in the population matrix.\n\n Examples\n --------\n In the following example, we construct a NearestNeighbors\n class from an array representing our data set and ask who's\n the closest point to [1,1,1]\n\n >>> samples = [[0., 0., 0.], [0., .5, 0.], [1., 1., .5]]\n >>> from sklearn.neighbors import NearestNeighbors\n >>> neigh = NearestNeighbors(n_neighbors=1)\n >>> neigh.fit(samples)\n NearestNeighbors(n_neighbors=1)\n >>> print(neigh.kneighbors([[1., 1., 1.]]))\n (array([[0.5]]), array([[2]]))\n\n As you can see, it returns [[0.5]], and [[2]], which means that the\n element is at distance 0.5 and is the third element of samples\n (indexes start at 0). You can also query for multiple points:\n\n >>> X = [[0., 1., 0.], [1., 0., 1.]]\n >>> neigh.kneighbors(X, return_distance=False)\n array([[1],\n [2]]...)\n \"\"\"\n check_is_fitted(self)\n\n if n_neighbors is None:\n n_neighbors = self.n_neighbors\n elif n_neighbors <= 0:\n raise ValueError(\"Expected n_neighbors > 0. Got %d\" % n_neighbors)\n elif not isinstance(n_neighbors, numbers.Integral):\n raise TypeError(\n \"n_neighbors does not take %s value, enter integer value\"\n % type(n_neighbors)\n )\n\n if X is not None:\n query_is_train = False\n if self.metric == \"precomputed\":\n X = _check_precomputed(X)\n else:\n X = self._validate_data(X, accept_sparse=\"csr\", reset=False)\n else:\n query_is_train = True\n X = self._fit_X\n # Include an extra neighbor to account for the sample itself being\n # returned, which is removed later\n n_neighbors += 1\n\n n_samples_fit = self.n_samples_fit_\n if n_neighbors > n_samples_fit:\n raise ValueError(\n \"Expected n_neighbors <= n_samples, \"\n \" but n_samples = %d, n_neighbors = %d\" % (n_samples_fit, n_neighbors)\n )\n\n n_jobs = effective_n_jobs(self.n_jobs)\n chunked_results = None\n if self._fit_method == \"brute\" and self.metric == \"precomputed\" and issparse(X):\n results = _kneighbors_from_graph(\n X, n_neighbors=n_neighbors, return_distance=return_distance\n )\n\n elif self._fit_method == \"brute\":\n reduce_func = partial(\n self._kneighbors_reduce_func,\n n_neighbors=n_neighbors,\n return_distance=return_distance,\n )\n\n # for efficiency, use squared euclidean distances\n if self.effective_metric_ == \"euclidean\":\n kwds = {\"squared\": True}\n else:\n kwds = self.effective_metric_params_\n\n chunked_results = list(\n pairwise_distances_chunked(\n X,\n self._fit_X,\n reduce_func=reduce_func,\n metric=self.effective_metric_,\n n_jobs=n_jobs,\n **kwds,\n )\n )\n\n elif self._fit_method in [\"ball_tree\", \"kd_tree\"]:\n if issparse(X):\n raise ValueError(\n \"%s does not work with sparse matrices. Densify the data, \"\n \"or set algorithm='brute'\"\n % self._fit_method\n )\n old_joblib = parse_version(joblib.__version__) < parse_version(\"0.12\")\n if old_joblib:\n # Deal with change of API in joblib\n parallel_kwargs = {\"backend\": \"threading\"}\n else:\n parallel_kwargs = {\"prefer\": \"threads\"}\n chunked_results = Parallel(n_jobs, **parallel_kwargs)(\n delayed(_tree_query_parallel_helper)(\n self._tree, X[s], n_neighbors, return_distance\n )\n for s in gen_even_slices(X.shape[0], n_jobs)\n )\n else:\n raise ValueError(\"internal: _fit_method not recognized\")\n\n if chunked_results is not None:\n if return_distance:\n neigh_dist, neigh_ind = zip(*chunked_results)\n results = np.vstack(neigh_dist), np.vstack(neigh_ind)\n else:\n results = np.vstack(chunked_results)\n\n if not query_is_train:\n return results\n else:\n # If the query data is the same as the indexed data, we would like\n # to ignore the first nearest neighbor of every sample, i.e\n # the sample itself.\n if return_distance:\n neigh_dist, neigh_ind = results\n else:\n neigh_ind = results\n\n n_queries, _ = X.shape\n sample_range = np.arange(n_queries)[:, None]\n sample_mask = neigh_ind != sample_range\n\n # Corner case: When the number of duplicates are more\n # than the number of neighbors, the first NN will not\n # be the sample, but a duplicate.\n # In that case mask the first duplicate.\n dup_gr_nbrs = np.all(sample_mask, axis=1)\n sample_mask[:, 0][dup_gr_nbrs] = False\n neigh_ind = np.reshape(neigh_ind[sample_mask], (n_queries, n_neighbors - 1))\n\n if return_distance:\n neigh_dist = np.reshape(\n neigh_dist[sample_mask], (n_queries, n_neighbors - 1)\n )\n return neigh_dist, neigh_ind\n return neigh_ind\n\n def kneighbors_graph(self, X=None, n_neighbors=None, mode=\"connectivity\"):\n \"\"\"Compute the (weighted) graph of k-Neighbors for points in X.\n\n Parameters\n ----------\n X : array-like of shape (n_queries, n_features), \\\n or (n_queries, n_indexed) if metric == 'precomputed', \\\n default=None\n The query point or points.\n If not provided, neighbors of each indexed point are returned.\n In this case, the query point is not considered its own neighbor.\n For ``metric='precomputed'`` the shape should be\n (n_queries, n_indexed). Otherwise the shape should be\n (n_queries, n_features).\n\n n_neighbors : int, default=None\n Number of neighbors for each sample. The default is the value\n passed to the constructor.\n\n mode : {'connectivity', 'distance'}, default='connectivity'\n Type of returned matrix: 'connectivity' will return the\n connectivity matrix with ones and zeros, in 'distance' the\n edges are distances between points, type of distance\n depends on the selected metric parameter in\n NearestNeighbors class.\n\n Returns\n -------\n A : sparse-matrix of shape (n_queries, n_samples_fit)\n `n_samples_fit` is the number of samples in the fitted data.\n `A[i, j]` gives the weight of the edge connecting `i` to `j`.\n The matrix is of CSR format.\n\n See Also\n --------\n NearestNeighbors.radius_neighbors_graph: Computes a graph of neighbors.\n\n Examples\n --------\n >>> X = [[0], [3], [1]]\n >>> from sklearn.neighbors import NearestNeighbors\n >>> neigh = NearestNeighbors(n_neighbors=2)\n >>> neigh.fit(X)\n NearestNeighbors(n_neighbors=2)\n >>> A = neigh.kneighbors_graph(X)\n >>> A.toarray()\n array([[1., 0., 1.],\n [0., 1., 1.],\n [1., 0., 1.]])\n \"\"\"\n check_is_fitted(self)\n if n_neighbors is None:\n n_neighbors = self.n_neighbors\n\n # check the input only in self.kneighbors\n\n # construct CSR matrix representation of the k-NN graph\n if mode == \"connectivity\":\n A_ind = self.kneighbors(X, n_neighbors, return_distance=False)\n n_queries = A_ind.shape[0]\n A_data = np.ones(n_queries * n_neighbors)\n\n elif mode == \"distance\":\n A_data, A_ind = self.kneighbors(X, n_neighbors, return_distance=True)\n A_data = np.ravel(A_data)\n\n else:\n raise ValueError(\n 'Unsupported mode, must be one of \"connectivity\" '\n 'or \"distance\" but got \"%s\" instead' % mode\n )\n\n n_queries = A_ind.shape[0]\n n_samples_fit = self.n_samples_fit_\n n_nonzero = n_queries * n_neighbors\n A_indptr = np.arange(0, n_nonzero + 1, n_neighbors)\n\n kneighbors_graph = csr_matrix(\n (A_data, A_ind.ravel(), A_indptr), shape=(n_queries, n_samples_fit)\n )\n\n return kneighbors_graph\n\n\ndef _tree_query_radius_parallel_helper(tree, *args, **kwargs):\n \"\"\"Helper for the Parallel calls in RadiusNeighborsMixin.radius_neighbors\n\n The Cython method tree.query_radius is not directly picklable by\n cloudpickle under PyPy.\n \"\"\"\n return tree.query_radius(*args, **kwargs)\n\n\nclass RadiusNeighborsMixin:\n \"\"\"Mixin for radius-based neighbors searches\"\"\"\n\n def _radius_neighbors_reduce_func(self, dist, start, radius, return_distance):\n \"\"\"Reduce a chunk of distances to the nearest neighbors\n\n Callback to :func:`sklearn.metrics.pairwise.pairwise_distances_chunked`\n\n Parameters\n ----------\n dist : ndarray of shape (n_samples_chunk, n_samples)\n The distance matrix.\n\n start : int\n The index in X which the first row of dist corresponds to.\n\n radius : float\n The radius considered when making the nearest neighbors search.\n\n return_distance : bool\n Whether or not to return the distances.\n\n Returns\n -------\n dist : list of ndarray of shape (n_samples_chunk,)\n Returned only if `return_distance=True`.\n\n neigh : list of ndarray of shape (n_samples_chunk,)\n The neighbors indices.\n \"\"\"\n neigh_ind = [np.where(d <= radius)[0] for d in dist]\n\n if return_distance:\n if self.effective_metric_ == \"euclidean\":\n dist = [np.sqrt(d[neigh_ind[i]]) for i, d in enumerate(dist)]\n else:\n dist = [d[neigh_ind[i]] for i, d in enumerate(dist)]\n results = dist, neigh_ind\n else:\n results = neigh_ind\n return results\n\n def radius_neighbors(\n self, X=None, radius=None, return_distance=True, sort_results=False\n ):\n \"\"\"Finds the neighbors within a given radius of a point or points.\n\n Return the indices and distances of each point from the dataset\n lying in a ball with size ``radius`` around the points of the query\n array. Points lying on the boundary are included in the results.\n\n The result points are *not* necessarily sorted by distance to their\n query point.\n\n Parameters\n ----------\n X : array-like of (n_samples, n_features), default=None\n The query point or points.\n If not provided, neighbors of each indexed point are returned.\n In this case, the query point is not considered its own neighbor.\n\n radius : float, default=None\n Limiting distance of neighbors to return. The default is the value\n passed to the constructor.\n\n return_distance : bool, default=True\n Whether or not to return the distances.\n\n sort_results : bool, default=False\n If True, the distances and indices will be sorted by increasing\n distances before being returned. If False, the results may not\n be sorted. If `return_distance=False`, setting `sort_results=True`\n will result in an error.\n\n .. versionadded:: 0.22\n\n Returns\n -------\n neigh_dist : ndarray of shape (n_samples,) of arrays\n Array representing the distances to each point, only present if\n `return_distance=True`. The distance values are computed according\n to the ``metric`` constructor parameter.\n\n neigh_ind : ndarray of shape (n_samples,) of arrays\n An array of arrays of indices of the approximate nearest points\n from the population matrix that lie within a ball of size\n ``radius`` around the query points.\n\n Examples\n --------\n In the following example, we construct a NeighborsClassifier\n class from an array representing our data set and ask who's\n the closest point to [1, 1, 1]:\n\n >>> import numpy as np\n >>> samples = [[0., 0., 0.], [0., .5, 0.], [1., 1., .5]]\n >>> from sklearn.neighbors import NearestNeighbors\n >>> neigh = NearestNeighbors(radius=1.6)\n >>> neigh.fit(samples)\n NearestNeighbors(radius=1.6)\n >>> rng = neigh.radius_neighbors([[1., 1., 1.]])\n >>> print(np.asarray(rng[0][0]))\n [1.5 0.5]\n >>> print(np.asarray(rng[1][0]))\n [1 2]\n\n The first array returned contains the distances to all points which\n are closer than 1.6, while the second array returned contains their\n indices. In general, multiple points can be queried at the same time.\n\n Notes\n -----\n Because the number of neighbors of each point is not necessarily\n equal, the results for multiple query points cannot be fit in a\n standard data array.\n For efficiency, `radius_neighbors` returns arrays of objects, where\n each object is a 1D array of indices or distances.\n \"\"\"\n check_is_fitted(self)\n\n if X is not None:\n query_is_train = False\n if self.metric == \"precomputed\":\n X = _check_precomputed(X)\n else:\n X = self._validate_data(X, accept_sparse=\"csr\", reset=False)\n else:\n query_is_train = True\n X = self._fit_X\n\n if radius is None:\n radius = self.radius\n\n if self._fit_method == \"brute\" and self.metric == \"precomputed\" and issparse(X):\n results = _radius_neighbors_from_graph(\n X, radius=radius, return_distance=return_distance\n )\n\n elif self._fit_method == \"brute\":\n # for efficiency, use squared euclidean distances\n if self.effective_metric_ == \"euclidean\":\n radius *= radius\n kwds = {\"squared\": True}\n else:\n kwds = self.effective_metric_params_\n\n reduce_func = partial(\n self._radius_neighbors_reduce_func,\n radius=radius,\n return_distance=return_distance,\n )\n\n chunked_results = pairwise_distances_chunked(\n X,\n self._fit_X,\n reduce_func=reduce_func,\n metric=self.effective_metric_,\n n_jobs=self.n_jobs,\n **kwds,\n )\n if return_distance:\n neigh_dist_chunks, neigh_ind_chunks = zip(*chunked_results)\n neigh_dist_list = sum(neigh_dist_chunks, [])\n neigh_ind_list = sum(neigh_ind_chunks, [])\n neigh_dist = _to_object_array(neigh_dist_list)\n neigh_ind = _to_object_array(neigh_ind_list)\n results = neigh_dist, neigh_ind\n else:\n neigh_ind_list = sum(chunked_results, [])\n results = _to_object_array(neigh_ind_list)\n\n if sort_results:\n if not return_distance:\n raise ValueError(\n \"return_distance must be True if sort_results is True.\"\n )\n for ii in range(len(neigh_dist)):\n order = np.argsort(neigh_dist[ii], kind=\"mergesort\")\n neigh_ind[ii] = neigh_ind[ii][order]\n neigh_dist[ii] = neigh_dist[ii][order]\n results = neigh_dist, neigh_ind\n\n elif self._fit_method in [\"ball_tree\", \"kd_tree\"]:\n if issparse(X):\n raise ValueError(\n \"%s does not work with sparse matrices. Densify the data, \"\n \"or set algorithm='brute'\"\n % self._fit_method\n )\n\n n_jobs = effective_n_jobs(self.n_jobs)\n delayed_query = delayed(_tree_query_radius_parallel_helper)\n if parse_version(joblib.__version__) < parse_version(\"0.12\"):\n # Deal with change of API in joblib\n parallel_kwargs = {\"backend\": \"threading\"}\n else:\n parallel_kwargs = {\"prefer\": \"threads\"}\n\n chunked_results = Parallel(n_jobs, **parallel_kwargs)(\n delayed_query(\n self._tree, X[s], radius, return_distance, sort_results=sort_results\n )\n for s in gen_even_slices(X.shape[0], n_jobs)\n )\n if return_distance:\n neigh_ind, neigh_dist = tuple(zip(*chunked_results))\n results = np.hstack(neigh_dist), np.hstack(neigh_ind)\n else:\n results = np.hstack(chunked_results)\n else:\n raise ValueError(\"internal: _fit_method not recognized\")\n\n if not query_is_train:\n return results\n else:\n # If the query data is the same as the indexed data, we would like\n # to ignore the first nearest neighbor of every sample, i.e\n # the sample itself.\n if return_distance:\n neigh_dist, neigh_ind = results\n else:\n neigh_ind = results\n\n for ind, ind_neighbor in enumerate(neigh_ind):\n mask = ind_neighbor != ind\n\n neigh_ind[ind] = ind_neighbor[mask]\n if return_distance:\n neigh_dist[ind] = neigh_dist[ind][mask]\n\n if return_distance:\n return neigh_dist, neigh_ind\n return neigh_ind\n\n def radius_neighbors_graph(\n self, X=None, radius=None, mode=\"connectivity\", sort_results=False\n ):\n \"\"\"Computes the (weighted) graph of Neighbors for points in X\n\n Neighborhoods are restricted the points at a distance lower than\n radius.\n\n Parameters\n ----------\n X : array-like of shape (n_samples, n_features), default=None\n The query point or points.\n If not provided, neighbors of each indexed point are returned.\n In this case, the query point is not considered its own neighbor.\n\n radius : float, default=None\n Radius of neighborhoods. The default is the value passed to the\n constructor.\n\n mode : {'connectivity', 'distance'}, default='connectivity'\n Type of returned matrix: 'connectivity' will return the\n connectivity matrix with ones and zeros, in 'distance' the\n edges are distances between points, type of distance\n depends on the selected metric parameter in\n NearestNeighbors class.\n\n sort_results : bool, default=False\n If True, in each row of the result, the non-zero entries will be\n sorted by increasing distances. If False, the non-zero entries may\n not be sorted. Only used with mode='distance'.\n\n .. versionadded:: 0.22\n\n Returns\n -------\n A : sparse-matrix of shape (n_queries, n_samples_fit)\n `n_samples_fit` is the number of samples in the fitted data.\n `A[i, j]` gives the weight of the edge connecting `i` to `j`.\n The matrix is of CSR format.\n\n Examples\n --------\n >>> X = [[0], [3], [1]]\n >>> from sklearn.neighbors import NearestNeighbors\n >>> neigh = NearestNeighbors(radius=1.5)\n >>> neigh.fit(X)\n NearestNeighbors(radius=1.5)\n >>> A = neigh.radius_neighbors_graph(X)\n >>> A.toarray()\n array([[1., 0., 1.],\n [0., 1., 0.],\n [1., 0., 1.]])\n\n See Also\n --------\n kneighbors_graph\n \"\"\"\n check_is_fitted(self)\n\n # check the input only in self.radius_neighbors\n\n if radius is None:\n radius = self.radius\n\n # construct CSR matrix representation of the NN graph\n if mode == \"connectivity\":\n A_ind = self.radius_neighbors(X, radius, return_distance=False)\n A_data = None\n elif mode == \"distance\":\n dist, A_ind = self.radius_neighbors(\n X, radius, return_distance=True, sort_results=sort_results\n )\n A_data = np.concatenate(list(dist))\n else:\n raise ValueError(\n 'Unsupported mode, must be one of \"connectivity\", '\n 'or \"distance\" but got %s instead' % mode\n )\n\n n_queries = A_ind.shape[0]\n n_samples_fit = self.n_samples_fit_\n n_neighbors = np.array([len(a) for a in A_ind])\n A_ind = np.concatenate(list(A_ind))\n if A_data is None:\n A_data = np.ones(len(A_ind))\n A_indptr = np.concatenate((np.zeros(1, dtype=int), np.cumsum(n_neighbors)))\n\n return csr_matrix((A_data, A_ind, A_indptr), shape=(n_queries, n_samples_fit))\n" ]
[ [ "numpy.split", "numpy.sqrt", "numpy.cumsum", "numpy.dtype", "numpy.all", "numpy.any", "numpy.where", "numpy.hstack", "scipy.sparse.issparse", "numpy.unique", "numpy.reshape", "numpy.arange", "numpy.diff", "numpy.argpartition", "numpy.ravel", "numpy.zeros", "scipy.sparse.csr_matrix", "numpy.errstate", "numpy.argsort", "numpy.compress", "numpy.empty", "numpy.ones", "numpy.isinf", "numpy.vstack" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [ "1.7", "1.0", "0.10", "1.2", "0.14", "0.19", "1.5", "0.12", "0.17", "0.13", "1.6", "1.4", "1.9", "1.3", "1.10", "0.15", "0.18", "0.16", "1.8" ], "tensorflow": [] } ]
mirceast/continuous-control
[ "d209b45b3db9eab10ca68cbd07e0e0a713fb071a" ]
[ "misc.py" ]
[ "import os\nimport numpy as np\nimport time\nfrom collections import deque\nimport glob\n\nimport matplotlib.pyplot as plt\nimport torch\n\n\ndef ddpg(agent, env, n_episodes=1000, max_t=1000, scores_window=100, progress_every=2, save_every=60, folder=None):\n brain_name = env.brain_names[0]\n brain = env.brains[brain_name]\n\n scores_deque = deque(maxlen=scores_window)\n scores = []\n mean_scores = []\n t_start = time.time()\n best_score = -np.inf\n\n progress_t = time.time()\n saved_t = time.time()\n for i_episode in range(1, n_episodes+1):\n env_info = env.reset(train_mode=True)[brain_name]\n state = env_info.vector_observations[0]\n\n agent.reset()\n score = 0\n\n t_episode = time.time()\n for t in range(max_t):\n action = agent.act(state)\n env_info = env.step(action)[brain_name]\n next_state = env_info.vector_observations[0]\n reward = env_info.rewards[0]\n done = env_info.local_done[0]\n agent.step(state, action, reward, next_state, done)\n state = next_state\n score += reward\n if done:\n break\n if progress_every > 0 and time.time() - progress_t >= progress_every:\n print('\\rAverage score: {:.2f}\\tTime: {}'.format(\n np.mean(scores_deque), seconds_to_time_str(time.time() - t_start)), end=\"\")\n progress_t = time.time()\n if save_every > 0 and time.time() - saved_t >= save_every:\n saved_t = time.time()\n save_agent(agent, scores=scores, mean_scores=mean_scores, agent_name='',\n train_time=seconds_to_time_str(time.time() - t_start), folder=folder)\n\n scores_deque.append(score)\n scores.append(score)\n mean_scores.append(np.mean(scores_deque))\n\n if np.mean(scores_deque) >= 30:\n print('\\nEnvironment solved in {:d} episodes!\\tAverage Score: {:.2f}'.format(\n i_episode-100, np.mean(scores_deque)))\n save_agent(agent, scores=scores, mean_scores=mean_scores, agent_name='SOLVED',\n train_time=seconds_to_time_str(time.time() - t_start), folder=folder)\n break\n\n if np.mean(scores_deque) > best_score:\n best_score = np.mean(scores_deque)\n save_agent(agent, scores=scores, mean_scores=mean_scores, agent_name='',\n train_time=seconds_to_time_str(time.time() - t_start), folder=folder)\n\n return scores\n\n\ndef find_state_mag(env, max_t=1000, n_episodes=1000):\n brain_name = env.brain_names[0]\n brain = env.brains[brain_name]\n action_size = brain.vector_action_space_size\n states = []\n for i_episode in range(1, n_episodes+1):\n env_info = env.reset(train_mode=True)[brain_name]\n num_agents = len(env_info.agents)\n state = env_info.vector_observations[0]\n for t in range(max_t):\n states.append(state)\n actions = np.random.randn(num_agents, action_size)\n actions = np.clip(actions, -1, 1)\n env_info = env.step(actions)[brain_name]\n state = env_info.vector_observations[0]\n done = env_info.local_done[0]\n if done:\n break\n states = np.array(states)\n states = np.abs(states)\n return np.mean(states, axis=0), np.std(states, axis=0)\n\n\ndef seconds_to_time_str(t):\n if t < 0:\n raise Exception(\"Negative time?\")\n if t < 60:\n return \"{:02d} seconds\".format(int(t))\n elif t >= 60 and t < 3600:\n return \"{:04.1f} minutes\".format(t/60)\n elif t >= 3600:\n return \"{:04.1f} hours\".format(t/3600)\n\n\ndef save_agent(agent, scores=None, mean_scores=None, agent_name='', train_time='', folder=None):\n # Make sure save folder exists\n if folder is None:\n folder = os.getcwd()\n if not os.path.isdir(folder):\n os.makedirs(folder)\n # Figure out the root of the resulting file names\n if agent_name != \"\":\n name = \"agent_\" + agent_name + \"_\"\n else:\n name = \"agent_\"\n\n if train_time != \"\":\n name = name + \"train_time_\" + train_time.replace(\" \", \"_\") + \"_\"\n\n # Save agent weights\n save_path = os.path.join(folder, name + \"checkpoint_actor.pth\")\n torch.save(agent.actor_local.state_dict(), save_path)\n save_path = os.path.join(folder, name + \"checkpoint_critic.pth\")\n torch.save(agent.critic_local.state_dict(), save_path)\n\n # Save agent scores\n if scores is not None:\n save_path = os.path.join(folder, name + \"scores.np\")\n np.savetxt(save_path, scores)\n if mean_scores is not None:\n save_path = os.path.join(folder, name + \"mean_scores.np\")\n np.savetxt(save_path, mean_scores)\n\n\ndef load_agent(agent_name=\"\", train_time=\"last\", folder=None):\n if folder is None:\n folder = os.getcwd()\n if agent_name != \"\":\n name = \"agent_\" + agent_name + \"_\"\n else:\n name = \"agent_\"\n if train_time != \"last\":\n name = name + \"train_time_\" + train_time.replace(\" \", \"_\") + \"_\"\n else:\n files = glob.glob(os.path.join(folder, \"agent*mean_scores.np\"))\n files.sort(key=os.path.getmtime)\n files = files[-1]\n files = os.path.split(files)[1]\n name = files.split(\"mean_scores\")[0]\n path_scores = os.path.join(folder, name + \"scores.np\")\n path_mean_scores = path_scores.replace(\"_scores\", \"_mean_scores\")\n scores = np.loadtxt(path_scores)\n mean_scores = np.loadtxt(path_mean_scores)\n\n actor_dict = torch.load(os.path.join(\n folder, name + \"checkpoint_actor.pth\"))\n critic_dict = torch.load(os.path.join(\n folder, name + \"checkpoint_critic.pth\"))\n\n return scores, mean_scores, actor_dict, critic_dict\n\n\ndef load_folders(folders, train_time=\"last\"):\n scores = []\n mean_scores = []\n actor_dicts = []\n critic_dicts = []\n for i in range(len(folders)):\n score, mean_score, actor_dict, critic_dict = load_agent(\n train_time=train_time, folder=folders[i])\n scores.append(score)\n mean_scores.append(mean_score)\n actor_dicts.append(actor_dict)\n critic_dicts.append(critic_dict)\n return mean_scores, scores, actor_dicts, critic_dicts\n\n\ndef show_plots(mean_scores, scores, labels=None, max_episodes=None, only_mean=False, legend_outside=False):\n if max_episodes == None:\n # Find max number of episodes\n max_episodes = 0\n for i in range(len(mean_scores)):\n if len(mean_scores[i]) > max_episodes:\n max_episodes = len(mean_scores[i])\n\n fig, ax = plt.subplots()\n cmap = plt.cm.get_cmap(\"jet\", max([len(mean_scores), 2]))\n for i in range(len(mean_scores)):\n if labels is not None:\n label = labels[i]\n else:\n label = None\n mean_score = mean_scores[i]\n score = scores[i]\n if len(mean_score) < max_episodes:\n mean_score = np.concatenate(\n (mean_score, np.nan * np.ones(max_episodes-len(mean_score))))\n score = np.concatenate(\n (score, np.nan * np.ones(max_episodes-len(score))))\n if not only_mean:\n ax.plot(np.arange(1, max_episodes+1),\n score, alpha=0.3, color=cmap(i))\n ax.plot(np.arange(1, max_episodes+1), mean_score,\n label=label, color=cmap(i), linewidth=2)\n if labels is not None:\n if legend_outside:\n ax.legend(loc='center left', bbox_to_anchor=(1, 0.5))\n else:\n ax.legend()\n ax.set_xlabel(\"# episodes\")\n ax.grid()\n" ]
[ [ "numpy.abs", "numpy.clip", "numpy.arange", "matplotlib.pyplot.subplots", "numpy.std", "numpy.mean", "numpy.random.randn", "numpy.savetxt", "numpy.array", "numpy.loadtxt" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
alex123012/biotite
[ "5702c6eb4e9a577954177788815b0f517c111c12", "5702c6eb4e9a577954177788815b0f517c111c12", "5702c6eb4e9a577954177788815b0f517c111c12", "5702c6eb4e9a577954177788815b0f517c111c12", "5702c6eb4e9a577954177788815b0f517c111c12" ]
[ "src/biotite/structure/io/pdb/file.py", "tests/sequence/align/test_matrix.py", "tests/structure/test_pdbqt.py", "tests/application/test_tantan.py", "src/biotite/structure/compare.py" ]
[ "# This source code is part of the Biotite package and is distributed\n# under the 3-Clause BSD License. Please see 'LICENSE.rst' for further\n# information.\n\n__name__ = \"biotite.structure.io.pdb\"\n__author__ = \"Patrick Kunzmann, Daniel Bauer\"\n__all__ = [\"PDBFile\"]\n\nimport warnings\nimport numpy as np\nfrom ...atoms import AtomArray, AtomArrayStack\nfrom ...bonds import BondList, connect_via_residue_names\nfrom ...box import vectors_from_unitcell, unitcell_from_vectors\nfrom ....file import TextFile, InvalidFileError\nfrom ..general import _guess_element as guess_element\nfrom ...error import BadStructureError\nfrom ...filter import filter_first_altloc, filter_highest_occupancy_altloc, \\\n filter_solvent\nfrom .hybrid36 import encode_hybrid36, decode_hybrid36, max_hybrid36_number\nimport copy\nfrom warnings import warn\n\n\n_atom_records = {\"hetero\" : (0, 6),\n \"atom_id\" : (6, 11),\n \"atom_name\" : (12, 16),\n \"alt_loc\" : (16, ),\n \"res_name\" : (17, 20),\n \"chain_id\" : (21, ),\n \"res_id\" : (22, 26),\n \"ins_code\" : (26, ),\n \"coord_x\" : (30, 38),\n \"coord_y\" : (38, 46),\n \"coord_z\" : (46, 54),\n \"occupancy\" : (54, 60),\n \"temp_f\" : (60, 66),\n \"element\" : (76, 78),\n \"charge\" : (78, 80),}\n\n\nclass PDBFile(TextFile):\n r\"\"\"\n This class represents a PDB file.\n \n The usage of PDBxFile is encouraged in favor of this class.\n \n This class only provides support for reading/writing the pure atom\n information (*ATOM*, *HETATM*, *MODEL* and *ENDMDL* records). *TER*\n records cannot be written.\n \n See also\n --------\n PDBxFile\n \n Examples\n --------\n Load a `\\\\*.pdb` file, modify the structure and save the new\n structure into a new file:\n \n >>> import os.path\n >>> file = PDBFile.read(os.path.join(path_to_structures, \"1l2y.pdb\"))\n >>> array_stack = file.get_structure()\n >>> array_stack_mod = rotate(array_stack, [1,2,3])\n >>> file = PDBFile()\n >>> file.set_structure(array_stack_mod)\n >>> file.write(os.path.join(path_to_directory, \"1l2y_mod.pdb\"))\n \"\"\"\n def get_model_count(self):\n \"\"\"\n Get the number of models contained in the PDB file.\n\n Returns\n -------\n model_count : int\n The number of models.\n \"\"\"\n model_count = 0\n for line in self.lines:\n if line.startswith(\"MODEL\"):\n model_count += 1\n \n if model_count == 0:\n # It could be an empty file or a file with a single model,\n # where the 'MODEL' line is missing\n for line in self.lines:\n if line.startswith((\"ATOM\", \"HETATM\")):\n return 1\n return 0\n else:\n return model_count\n \n\n def get_coord(self, model=None):\n \"\"\"\n Get only the coordinates of the PDB file.\n \n Parameters\n ----------\n model : int, optional\n If this parameter is given, the function will return a\n 2D coordinate array from the atoms corresponding to the\n given model number (starting at 1).\n Negative values are used to index models starting from the\n last model insted of the first model.\n If this parameter is omitted, an 2D coordinate array\n containing all models will be returned, even if\n the structure contains only one model.\n \n Returns\n -------\n coord : ndarray, shape=(m,n,3) or shape=(n,2), dtype=float\n The coordinates read from the ATOM and HETATM records of the\n file.\n \n Notes\n -----\n Note that :func:`get_coord()` may output more coordinates than\n the atom array (stack) from the corresponding\n :func:`get_structure()` call has.\n The reason for this is, that :func:`get_structure()` filters\n *altloc* IDs, while `get_coord()` does not.\n \n Examples\n --------\n Read an :class:`AtomArrayStack` from multiple PDB files, where\n each PDB file contains the same atoms but different positions.\n This is an efficient approach when a trajectory is spread into\n multiple PDB files, as done e.g. by the *Rosetta* modeling\n software. \n\n For the purpose of this example, the PDB files are created from\n an existing :class:`AtomArrayStack`.\n \n >>> import os.path\n >>> from tempfile import gettempdir\n >>> file_names = []\n >>> for i in range(atom_array_stack.stack_depth()):\n ... pdb_file = PDBFile()\n ... pdb_file.set_structure(atom_array_stack[i])\n ... file_name = os.path.join(gettempdir(), f\"model_{i+1}.pdb\")\n ... pdb_file.write(file_name)\n ... file_names.append(file_name)\n >>> print(file_names)\n ['...model_1.pdb', '...model_2.pdb', ..., '...model_38.pdb']\n\n Now the PDB files are used to create an :class:`AtomArrayStack`,\n where each model represents a different model.\n\n Construct a new :class:`AtomArrayStack` with annotations taken\n from one of the created files used as template and coordinates\n from all of the PDB files.\n\n >>> template_file = PDBFile.read(file_names[0])\n >>> template = template_file.get_structure()\n >>> coord = []\n >>> for i, file_name in enumerate(file_names):\n ... pdb_file = PDBFile.read(file_name)\n ... coord.append(pdb_file.get_coord(model=1))\n >>> new_stack = from_template(template, np.array(coord))\n\n The newly created :class:`AtomArrayStack` should now be equal to\n the :class:`AtomArrayStack` the PDB files were created from.\n\n >>> print(np.allclose(new_stack.coord, atom_array_stack.coord))\n True\n \"\"\"\n # Line indices where a new model starts\n model_start_i = np.array([i for i in range(len(self.lines))\n if self.lines[i].startswith(\"MODEL\")],\n dtype=int)\n # Line indices with ATOM or HETATM records\n atom_line_i = np.array([i for i in range(len(self.lines)) if\n self.lines[i].startswith((\"ATOM\", \"HETATM\"))],\n dtype=int)\n # Structures containing only one model may omit MODEL record\n # In these cases model starting index is set to 0\n if len(model_start_i) == 0:\n model_start_i = np.array([0])\n \n if model is None:\n depth = len(model_start_i)\n length = self._get_model_length(model_start_i, atom_line_i)\n coord_i = atom_line_i\n \n else:\n last_model = len(model_start_i)\n if model == 0:\n raise ValueError(\"The model index must not be 0\")\n # Negative models mean index starting from last model\n model = last_model + model + 1 if model < 0 else model\n\n if model < last_model:\n line_filter = ( ( atom_line_i >= model_start_i[model-1] ) &\n ( atom_line_i < model_start_i[model ] ) )\n elif model == last_model:\n line_filter = (atom_line_i >= model_start_i[model-1])\n else:\n raise ValueError(\n f\"The file has {last_model} models, \"\n f\"the given model {model} does not exist\"\n )\n coord_i = atom_line_i[line_filter]\n length = len(coord_i)\n \n # Fill in coordinates\n if model is None:\n coord = np.zeros((depth, length, 3), dtype=np.float32)\n m = 0\n i = 0\n for line_i in atom_line_i:\n if m < len(model_start_i)-1 and line_i > model_start_i[m+1]:\n m += 1\n i = 0\n line = self.lines[line_i]\n coord[m,i,0] = float(line[30:38])\n coord[m,i,1] = float(line[38:46])\n coord[m,i,2] = float(line[46:54])\n i += 1\n return coord\n \n else:\n coord = np.zeros((length, 3), dtype=np.float32)\n for i, line_i in enumerate(coord_i):\n line = self.lines[line_i]\n coord[i,0] = float(line[30:38])\n coord[i,1] = float(line[38:46])\n coord[i,2] = float(line[46:54])\n return coord\n\n\n def get_structure(self, model=None, altloc=\"first\", extra_fields=[],\n include_bonds=False):\n \"\"\"\n Get an :class:`AtomArray` or :class:`AtomArrayStack` from the PDB file.\n \n This function parses standard base-10 PDB files as well as\n hybrid-36 PDB.\n \n Parameters\n ----------\n model : int, optional\n If this parameter is given, the function will return an\n :class:`AtomArray` from the atoms corresponding to the given\n model number (starting at 1).\n Negative values are used to index models starting from the\n last model insted of the first model.\n If this parameter is omitted, an :class:`AtomArrayStack`\n containing all models will be returned, even if the\n structure contains only one model.\n altloc : {'first', 'occupancy', 'all'}\n This parameter defines how *altloc* IDs are handled:\n - ``'first'`` - Use atoms that have the first\n *altloc* ID appearing in a residue.\n - ``'occupancy'`` - Use atoms that have the *altloc* ID\n with the highest occupancy for a residue.\n - ``'all'`` - Use all atoms.\n Note that this leads to duplicate atoms.\n When this option is chosen, the ``altloc_id``\n annotation array is added to the returned structure.\n extra_fields : list of str, optional\n The strings in the list are optional annotation categories\n that should be stored in the output array or stack.\n These are valid values:\n ``'atom_id'``, ``'b_factor'``, ``'occupancy'`` and\n ``'charge'``.\n include_bonds : bool, optional\n If set to true, a :class:`BondList` will be created for the\n resulting :class:`AtomArray` containing the bond information\n from the file.\n All bonds have :attr:`BondType.ANY`, since the PDB format\n does not support bond orders.\n \n Returns\n -------\n array : AtomArray or AtomArrayStack\n The return type depends on the `model` parameter.\n \"\"\"\n # Line indices where a new model starts\n model_start_i = np.array([i for i in range(len(self.lines))\n if self.lines[i].startswith((\"MODEL\"))],\n dtype=int)\n # Line indices with ATOM or HETATM records\n atom_line_i = np.array([i for i in range(len(self.lines)) if\n self.lines[i].startswith((\"ATOM\", \"HETATM\"))],\n dtype=int)\n # Structures containing only one model may omit MODEL record\n # In these cases model starting index is set to 0\n if len(model_start_i) == 0:\n model_start_i = np.array([0])\n \n if model is None:\n depth = len(model_start_i)\n length = self._get_model_length(model_start_i, atom_line_i)\n array = AtomArrayStack(depth, length)\n # Line indices for annotation determination\n # Annotation is determined from model 1,\n # therefore from ATOM records before second MODEL record\n if len(model_start_i) == 1:\n annot_i = atom_line_i\n else:\n annot_i = atom_line_i[atom_line_i < model_start_i[1]]\n # Line indices for coordinate determination\n coord_i = atom_line_i\n \n else:\n last_model = len(model_start_i)\n if model == 0:\n raise ValueError(\"The model index must not be 0\")\n # Negative models mean index starting from last model\n model = last_model + model + 1 if model < 0 else model\n\n if model < last_model:\n line_filter = ( ( atom_line_i >= model_start_i[model-1] ) &\n ( atom_line_i < model_start_i[model ] ) )\n elif model == last_model:\n line_filter = (atom_line_i >= model_start_i[model-1])\n else:\n raise ValueError(\n f\"The file has {last_model} models, \"\n f\"the given model {model} does not exist\"\n )\n annot_i = coord_i = atom_line_i[line_filter]\n array = AtomArray(len(coord_i))\n \n # Create mandatory and optional annotation arrays\n chain_id = np.zeros(array.array_length(), array.chain_id.dtype)\n res_id = np.zeros(array.array_length(), array.res_id.dtype)\n ins_code = np.zeros(array.array_length(), array.ins_code.dtype)\n res_name = np.zeros(array.array_length(), array.res_name.dtype)\n hetero = np.zeros(array.array_length(), array.hetero.dtype)\n atom_name = np.zeros(array.array_length(), array.atom_name.dtype)\n element = np.zeros(array.array_length(), array.element.dtype)\n\n atom_id_raw = np.zeros(array.array_length(), \"U5\")\n charge_raw = np.zeros(array.array_length(), \"U2\")\n occupancy = np.zeros(array.array_length(), float)\n b_factor = np.zeros(array.array_length(), float)\n\n altloc_id = np.zeros(array.array_length(), dtype=\"U1\")\n\n # Fill annotation array\n # i is index in array, line_i is line index\n for i, line_i in enumerate(annot_i):\n line = self.lines[line_i]\n \n chain_id[i] = line[21].upper().strip()\n res_id[i] = decode_hybrid36(line[22:26])\n ins_code[i] = line[26].strip()\n res_name[i] = line[17:20].strip()\n hetero[i] = (False if line[0:4] == \"ATOM\" else True)\n atom_name[i] = line[12:16].strip()\n element[i] = line[76:78].strip()\n\n altloc_id[i] = line[16]\n\n atom_id_raw[i] = line[6:11]\n charge_raw[i] = line[78:80]\n occupancy[i] = float(line[54:60].strip())\n b_factor[i] = float(line[60:66].strip())\n \n if include_bonds or \\\n (extra_fields is not None and \"atom_id\" in extra_fields):\n # The atom IDs are only required in these two cases\n atom_id = np.array(\n [decode_hybrid36(raw_id.item()) for raw_id in atom_id_raw],\n dtype=int\n )\n else:\n atom_id = None\n\n \n # Add annotation arrays to atom array (stack)\n array.chain_id = chain_id\n array.res_id = res_id\n array.ins_code = ins_code\n array.res_name = res_name\n array.hetero = hetero\n array.atom_name = atom_name\n array.element = element\n\n for field in (extra_fields if extra_fields is not None else []):\n if field == \"atom_id\":\n # Copy is necessary to avoid double masking in \n # later altloc ID filtering\n array.set_annotation(\"atom_id\", atom_id.copy())\n elif field == \"charge\":\n array.set_annotation(\"charge\", np.array(\n [0 if raw_number == \" \" else\n (-float(raw_number) if sign == \"-\" else float(raw_number))\n for raw_number, sign in charge_raw],\n dtype=int\n ))\n elif field == \"occupancy\":\n array.set_annotation(\"occupancy\", occupancy)\n elif field == \"b_factor\":\n array.set_annotation(\"b_factor\", b_factor)\n else:\n raise ValueError(f\"Unknown extra field: {field}\")\n\n # Replace empty strings for elements with guessed types\n # This is used e.g. for PDB files created by Gromacs\n if \"\" in array.element:\n rep_num = 0\n for idx in range(len(array.element)):\n if not array.element[idx]:\n atom_name = array.atom_name[idx]\n array.element[idx] = guess_element(atom_name)\n rep_num += 1\n warn(\"{} elements were guessed from atom_name.\".format(rep_num))\n \n # Fill in coordinates\n if isinstance(array, AtomArray):\n for i, line_i in enumerate(coord_i):\n line = self.lines[line_i]\n array.coord[i,0] = float(line[30:38])\n array.coord[i,1] = float(line[38:46])\n array.coord[i,2] = float(line[46:54])\n \n elif isinstance(array, AtomArrayStack):\n m = 0\n i = 0\n for line_i in atom_line_i:\n if m < len(model_start_i)-1 and line_i > model_start_i[m+1]:\n m += 1\n i = 0\n line = self.lines[line_i]\n array.coord[m,i,0] = float(line[30:38])\n array.coord[m,i,1] = float(line[38:46])\n array.coord[m,i,2] = float(line[46:54])\n i += 1\n\n # Fill in box vectors\n # PDB does not support changing box dimensions. CRYST1 is a one-time\n # record so we can extract it directly\n for line in self.lines:\n if line.startswith(\"CRYST1\"):\n try:\n len_a = float(line[6:15])\n len_b = float(line[15:24])\n len_c = float(line[24:33])\n alpha = np.deg2rad(float(line[33:40]))\n beta = np.deg2rad(float(line[40:47]))\n gamma = np.deg2rad(float(line[47:54]))\n box = vectors_from_unitcell(\n len_a, len_b, len_c, alpha, beta, gamma\n )\n except ValueError:\n # File contains invalid 'CRYST1' record\n warnings.warn(\n \"File contains invalid 'CRYST1' record, box is ignored\"\n )\n box = None\n\n if isinstance(array, AtomArray):\n array.box = box\n else:\n array.box = np.repeat(\n box[np.newaxis, ...], array.stack_depth(), axis=0\n )\n break \n\n # Filter altloc IDs\n if altloc == \"occupancy\":\n filter = filter_highest_occupancy_altloc(\n array, altloc_id, occupancy\n )\n array = array[..., filter]\n atom_id = atom_id[filter] if atom_id is not None else None\n elif altloc == \"first\":\n filter = filter_first_altloc(array, altloc_id)\n array = array[..., filter]\n atom_id = atom_id[filter] if atom_id is not None else None\n elif altloc == \"all\":\n array.set_annotation(\"altloc_id\", altloc_id)\n else:\n raise ValueError(f\"'{altloc}' is not a valid 'altloc' option\")\n \n # Read bonds\n if include_bonds:\n bond_list = self._get_bonds(atom_id)\n bond_list = bond_list.merge(connect_via_residue_names(\n array,\n # The information for non-hetero residues and water\n # are not part of CONECT records\n (~array.hetero) | filter_solvent(array)\n ))\n # Remove bond order from inter residue bonds for consistency\n bond_list.remove_bond_order()\n array.bonds = bond_list \n \n return array\n\n\n\n\n def set_structure(self, array, hybrid36=False):\n \"\"\"\n Set the :class:`AtomArray` or :class:`AtomArrayStack` for the\n file.\n \n This makes also use of the optional annotation arrays\n ``'atom_id'``, ``'b_factor'``, ``'occupancy'`` and ``'charge'``.\n If the atom array (stack) contains the annotation ``'atom_id'``,\n these values will be used for atom numbering instead of\n continuous numbering.\n \n Parameters\n ----------\n array : AtomArray or AtomArrayStack\n The array or stack to be saved into this file. If a stack\n is given, each array in the stack is saved as separate\n model.\n hybrid36: bool, optional\n Defines wether the file should be written in hybrid-36\n format.\n \n Notes\n -----\n If `array` has an associated :class:`BondList`, ``CONECT``\n records are also written for all non-water hetero residues\n and all inter-residue connections.\n \"\"\"\n annot_categories = array.get_annotation_categories()\n hetero = [\"ATOM\" if e == False else \"HETATM\" for e in array.hetero]\n # Check for optional annotation categories\n if \"atom_id\" in annot_categories:\n atom_id = array.atom_id\n else:\n atom_id = np.arange(1, array.array_length()+1)\n if \"b_factor\" in annot_categories:\n b_factor = array.b_factor\n else:\n b_factor = np.zeros(array.array_length())\n if \"occupancy\" in annot_categories:\n occupancy = array.occupancy\n else:\n occupancy = np.ones(array.array_length())\n if \"charge\" in annot_categories:\n charge = [ str(np.abs(charge)) + \"+\" if charge > 0 else\n (str(np.abs(charge)) + \"-\" if charge < 0 else\n \"\")\n for charge in array.get_annotation(\"charge\")]\n else:\n charge = [\"\"] * array.array_length()\n\n # Do checks on atom array (stack)\n if hybrid36:\n max_atoms, max_residues \\\n = max_hybrid36_number(5), max_hybrid36_number(4)\n else:\n max_atoms, max_residues = 99999, 9999\n if array.array_length() > max_atoms:\n warn(f\"More then {max_atoms:,} atoms per model\")\n if (array.res_id > max_residues).any():\n warn(f\"Residue IDs exceed {max_residues:,}\")\n if np.isnan(array.coord).any():\n raise ValueError(\"Coordinates contain 'NaN' values\")\n if any([len(name) > 1 for name in array.chain_id]):\n raise ValueError(\"Some chain IDs exceed 1 character\")\n if any([len(name) > 3 for name in array.res_name]):\n raise ValueError(\"Some residue names exceed 3 characters\")\n if any([len(name) > 4 for name in array.atom_name]):\n raise ValueError(\"Some atom names exceed 4 characters\")\n\n if hybrid36:\n pdb_atom_id = [encode_hybrid36(i, 5).rjust(5) for i in atom_id]\n pdb_res_id = [encode_hybrid36(i, 4).rjust(4) for i in array.res_id]\n else:\n # Atom IDs are supported up to 99999,\n # but negative IDs are also possible\n pdb_atom_id = np.where(\n atom_id > 0,\n ((atom_id - 1) % 99999) + 1,\n atom_id\n )\n pdb_atom_id = [\"{:>5d}\".format(i) for i in pdb_atom_id]\n # Residue IDs are supported up to 9999,\n # but negative IDs are also possible\n pdb_res_id = np.where(\n array.res_id > 0,\n ((array.res_id - 1) % 9999) + 1,\n array.res_id\n )\n pdb_res_id = [\"{:>4d}\".format(i) for i in pdb_res_id]\n\n if isinstance(array, AtomArray):\n self.lines = [None] * array.array_length()\n for i in range(array.array_length()):\n self.lines[i] = (\"{:6}\".format(hetero[i]) + \n pdb_atom_id[i] +\n \" \" +\n \"{:4}\".format(array.atom_name[i]) +\n \" \" +\n \"{:3}\".format(array.res_name[i]) +\n \" \" +\n \"{:1}\".format(array.chain_id[i]) +\n pdb_res_id[i] +\n \"{:1}\".format(array.ins_code[i]) +\n (\" \" * 3) +\n \"{:>8.3f}\".format(array.coord[i,0]) +\n \"{:>8.3f}\".format(array.coord[i,1]) +\n \"{:>8.3f}\".format(array.coord[i,2]) +\n \"{:>6.2f}\".format(occupancy[i]) +\n \"{:>6.2f}\".format(b_factor[i]) +\n (\" \" * 10) + \n \"{:>2}\".format(array.element[i]) +\n \"{:2}\".format(charge[i])\n )\n \n elif isinstance(array, AtomArrayStack):\n self.lines = []\n\n # The entire information, but the coordinates,\n # is equal for each model\n # Therefore template lines are created\n # which are afterwards applied for each model\n templines = [None] * array.array_length()\n for i in range(array.array_length()):\n templines[i] = (\"{:6}\".format(hetero[i]) + \n pdb_atom_id[i] +\n \" \" +\n \"{:4}\".format(array.atom_name[i]) +\n \" \" +\n \"{:3}\".format(array.res_name[i]) +\n \" \" +\n \"{:1}\".format(array.chain_id[i]) +\n pdb_res_id[i] +\n \"{:1}\".format(array.ins_code[i]) +\n (\" \" * 27) +\n \"{:>6.2f}\".format(occupancy[i]) +\n \"{:>6.2f}\".format(b_factor[i]) +\n (\" \" * 10) +\n \"{:>2}\".format(array.element[i]) +\n \"{:2}\".format(charge[i])\n )\n for i in range(array.stack_depth()):\n #Fill in coordinates for each model\n self.lines.append(\"MODEL {:>8d}\".format(i+1))\n modellines = copy.copy(templines)\n for j, line in enumerate(modellines):\n # Insert coordinates\n line = (line[:30]\n + \"{:>8.3f}{:>8.3f}{:>8.3f}\".format(\n array.coord[i,j,0],\n array.coord[i,j,1],\n array.coord[i,j,2])\n + line[54:] )\n modellines[j] = line\n self.lines.extend(modellines)\n self.lines.append(\"ENDMDL\")\n\n # Prepend a single CRYST1 record if we have box information\n if array.box is not None:\n box = array.box\n if len(box.shape) == 3:\n box = box[0]\n unitcell = unitcell_from_vectors(box)\n self.lines.insert(0, \"CRYST1\" +\n \"{:>9.3f}\".format(unitcell[0]) +\n \"{:>9.3f}\".format(unitcell[1]) +\n \"{:>9.3f}\".format(unitcell[2]) +\n \"{:>7.2f}\".format(np.rad2deg(unitcell[3])) +\n \"{:>7.2f}\".format(np.rad2deg(unitcell[4])) +\n \"{:>7.2f}\".format(np.rad2deg(unitcell[5])) +\n \" P 1 1\")\n \n # Add CONECT records if bonds are present\n if array.bonds is not None:\n # Only non-water hetero records and connections between\n # residues are added to the records\n hetero_indices = np.where(array.hetero & ~filter_solvent(array))[0]\n bond_array = array.bonds.as_array()\n bond_array = bond_array[\n np.isin(bond_array[:,0], hetero_indices) |\n np.isin(bond_array[:,1], hetero_indices) |\n (array.res_id [bond_array[:,0]] != array.res_id [bond_array[:,1]]) |\n (array.chain_id[bond_array[:,0]] != array.chain_id[bond_array[:,1]])\n ]\n self._set_bonds(\n BondList(array.array_length(), bond_array), atom_id\n )\n\n\n def _get_model_length(self, model_start_i, atom_line_i):\n \"\"\"\n Determine length of models and check that all models\n have equal length.\n \"\"\"\n n_models = len(model_start_i)\n length = None\n for model_i in range(len(model_start_i)):\n model_start = model_start_i[model_i]\n model_stop = model_start_i[model_i+1] if model_i+1 < n_models \\\n else len(self.lines)\n model_length = np.count_nonzero(\n (atom_line_i >= model_start) & (atom_line_i < model_stop)\n )\n if length is None:\n length = model_length\n if model_length != length:\n raise InvalidFileError(\n f\"Model {model_i+1} has {model_length} atoms, \"\n f\"but model 1 has {length} atoms, must be equal\"\n )\n return length\n \n\n def _get_bonds(self, atom_ids):\n conect_lines = [line for line in self.lines\n if line.startswith(\"CONECT\")]\n \n # Mapping from atom ids to indices in an AtomArray\n atom_id_to_index = np.zeros(atom_ids[-1]+1, dtype=int)\n for i, id in enumerate(atom_ids):\n atom_id_to_index[id] = i\n\n bonds = []\n for line in conect_lines:\n center_id = atom_id_to_index[int(line[6 : 11])]\n for i in range(11, 31, 5):\n id_string = line[i : i+5]\n try:\n id = atom_id_to_index[int(id_string)]\n except ValueError:\n # String is empty -> no further IDs\n break\n bonds.append((center_id, id))\n \n # The length of the 'atom_ids' array\n # is equal to the length of the AtomArray\n return BondList(len(atom_ids), np.array(bonds, dtype=np.uint32))\n\n\n def _set_bonds(self, bond_list, atom_ids):\n # Bond type is unused since PDB does not support bond orders\n bonds, _ = bond_list.get_all_bonds()\n\n for center_i, bonded_indices in enumerate(bonds):\n n_added = 0\n for bonded_i in bonded_indices:\n if bonded_i == -1:\n # Reached padding values\n break\n if n_added == 0:\n # Add new record\n line = f\"CONECT{atom_ids[center_i]:>5d}\"\n line += f\"{atom_ids[bonded_i]:>5d}\"\n n_added += 1\n if n_added == 4:\n # Only a maximum of 4 bond partners can be put\n # into a single line\n # If there are more, use an extra record\n n_added = 0\n self.lines.append(line)\n if n_added > 0:\n self.lines.append(line)", "# This source code is part of the Biotite package and is distributed\n# under the 3-Clause BSD License. Please see 'LICENSE.rst' for further\n# information.\n\nimport numpy as np\nimport pytest\nimport biotite.sequence as seq\nimport biotite.sequence.align as align\n\n\[email protected](\"db_entry\", [entry for entry\n in align.SubstitutionMatrix.list_db()\n if entry not in [\"NUC\",\"GONNET\"]])\ndef test_matrices(db_entry):\n \"\"\"\n Test for exceptions when reading matrix files.\n \"\"\"\n alph1 = seq.ProteinSequence.alphabet\n alph2 = seq.ProteinSequence.alphabet\n matrix = align.SubstitutionMatrix(alph1, alph2, db_entry)\n\ndef test_matrix_str():\n \"\"\"\n Test conversion of substitution matrix to string via a small\n constructed test case.\n \"\"\"\n alph1 = seq.Alphabet(\"abc\")\n alph2 = seq.Alphabet(\"def\")\n score_matrix = np.arange(9).reshape((3,3))\n matrix = align.SubstitutionMatrix(alph1, alph2, score_matrix)\n assert str(matrix) == \"\\n\".join(\n [\" d e f\",\n \"a 0 1 2\",\n \"b 3 4 5\",\n \"c 6 7 8\"]\n )", "# This source code is part of the Biotite package and is distributed\n# under the 3-Clause BSD License. Please see 'LICENSE.rst' for further\n# information.\n\nimport warnings\nfrom tempfile import TemporaryFile\nimport glob\nfrom os.path import join\nimport pytest\nimport numpy as np\nimport biotite.structure as struc\nimport biotite.structure.io.pdbqt as pdbqt\nimport biotite.structure.io.pdbx as pdbx\nfrom ..util import data_dir\n\n\[email protected](\n \"path\", glob.glob(join(data_dir(\"structure\"), \"*.cif\"))\n)\ndef test_array_conversion(path):\n pdbx_file = pdbx.PDBxFile.read(path)\n ref_structure = pdbx.get_structure(\n pdbx_file, model=1, extra_fields=[\"charge\"]\n )\n ref_structure.bonds = struc.connect_via_residue_names(ref_structure)\n\n pdbqt_file = pdbqt.PDBQTFile()\n with warnings.catch_warnings():\n warnings.simplefilter(\"ignore\")\n # Ignore warnings about atoms not parametrized \n mask = pdbqt.set_structure(pdbqt_file, ref_structure)\n ref_structure = ref_structure[mask]\n temp = TemporaryFile(\"r+\")\n pdbqt_file.write(temp)\n\n temp.seek(0)\n pdbqt_file = pdbqt.PDBQTFile.read(temp)\n test_structure = pdbqt.get_structure(pdbqt_file, model=1)\n temp.close()\n\n assert np.allclose(test_structure.coord, ref_structure.coord)\n for category in test_structure.get_annotation_categories():\n if category == \"element\":\n # PDBQT uses special atom types, which replace the usual\n # elements\n # -> there cannot be equality of the 'element' annotation\n continue\n try:\n assert np.array_equal(\n test_structure.get_annotation(category),\n ref_structure.get_annotation(category)\n )\n except AssertionError:\n print(f\"Inequality in '{category}' category\")\n raise\n", "# This source code is part of the Biotite package and is distributed\n# under the 3-Clause BSD License. Please see 'LICENSE.rst' for further\n# information.\n\nimport numpy as np\nimport pytest\nimport biotite.sequence as seq\nimport biotite.sequence.align as align\nfrom biotite.application.tantan import TantanApp\nfrom ..util import is_not_installed\n\[email protected]\ndef simple_matrix():\n alph = seq.NucleotideSequence.alphabet_unamb\n return align.SubstitutionMatrix(\n alph, alph, np.array(\n [[ 1, -1, -1, -1],\n [-1, 1, -1, -1],\n [-1, -1, 1, -1],\n [-1, -1, -1, 1]]\n )\n )\n\n\[email protected](\n is_not_installed(\"tantan\"), reason=\"tantan is not installed\"\n)\[email protected](\"use_custom_matrix\", [False, True])\ndef test_nucleotide(simple_matrix, use_custom_matrix):\n \"\"\"\n Test masking a nucleotide sequence based on a known example.\n \"\"\"\n seq_string = \"TGCAAGCTATTAGGCTTAGGTCAGTGCttaagcttaggtcagtgcAACATA\"\n sequence = seq.NucleotideSequence(seq_string)\n\n if use_custom_matrix:\n matrix = simple_matrix\n else:\n matrix = None\n\n test_mask = TantanApp.mask_repeats(sequence, matrix)\n\n ref_mask = [True if char.islower() else False for char in seq_string]\n\n assert len(test_mask) == len(ref_mask)\n assert np.all(test_mask.tolist() == ref_mask)\n\n\[email protected](\n is_not_installed(\"tantan\"), reason=\"tantan is not installed\"\n)\[email protected](\"use_custom_matrix\", [False, True])\ndef test_protein(use_custom_matrix):\n \"\"\"\n Test masking a protein sequence based on a known example.\n \"\"\"\n seq_string = \"MAPKINASekinasekinase\"\n sequence = seq.ProteinSequence(seq_string)\n\n if use_custom_matrix:\n matrix = align.SubstitutionMatrix.std_protein_matrix()\n else:\n matrix = None\n\n test_mask = TantanApp.mask_repeats(sequence, matrix)\n\n ref_mask = [True if char.islower() else False for char in seq_string]\n\n assert len(test_mask) == len(ref_mask)\n assert np.all(test_mask.tolist() == ref_mask)", "# This source code is part of the Biotite package and is distributed\n# under the 3-Clause BSD License. Please see 'LICENSE.rst' for further\n# information.\n\n\"\"\"\nThis module provides functions for calculation of characteristic values when\ncomparing multiple structures with each other.\n\"\"\"\n\n__name__ = \"biotite.structure\"\n__author__ = \"Patrick Kunzmann\"\n__all__ = [\"rmsd\", \"rmsf\", \"average\"]\n\nimport numpy as np\nfrom .atoms import Atom, AtomArray, AtomArrayStack, coord\nfrom .util import vector_dot\n\n\ndef rmsd(reference, subject):\n r\"\"\"\n Calculate the RMSD between two structures.\n \n The *root-mean-square-deviation* (RMSD) indicates the overall\n deviation of each model of a structure to a reference structure.\n It is defined as:\n \n .. math:: RMSD = \\sqrt{ \\frac{1}{n} \\sum\\limits_{i=1}^n (x_i - x_{ref,i})^2}\n \n Parameters\n ----------\n reference : AtomArray or ndarray, dtype=float, shape=(n,3)\n The reference structure.\n Alternatively, coordinates can be provided directly as\n :class:`ndarray`.\n subject : AtomArray or AtomArrayStack or ndarray, dtype=float, shape=(n,3) or shape=(m,n,3)\n Structure(s) to be compared with `reference`.\n Alternatively, coordinates can be provided directly as\n :class:`ndarray`.\n \n Returns\n -------\n rmsd : float or ndarray, dtype=float, shape=(m,)\n RMSD between subject and reference.\n If subject is an :class:`AtomArray` a float is returned.\n If subject is an :class:`AtomArrayStack` a :class:`ndarray`\n containing the RMSD for each model is returned.\n \n See Also\n --------\n rmsf\n\n Notes\n -----\n This function does not superimpose the subject to its reference.\n In most cases :func:`superimpose()` should be called prior to this\n function.\n\n Examples\n --------\n\n Calculate the RMSD of all models to the first model:\n\n >>> superimposed, _ = superimpose(atom_array, atom_array_stack)\n >>> rms = rmsd(atom_array, superimposed)\n >>> print(np.around(rms, decimals=3))\n [0.000 1.928 2.103 2.209 1.806 2.172 2.704 1.360 2.337 1.818 1.879 2.471\n 1.939 2.035 2.167 1.789 1.653 2.348 2.247 2.529 1.583 2.115 2.131 2.050\n 2.512 2.666 2.206 2.397 2.328 1.868 2.316 1.984 2.124 1.761 2.642 1.721\n 2.571 2.579]\n \"\"\"\n return np.sqrt(np.mean(_sq_euclidian(reference, subject), axis=-1))\n\n\ndef rmsf(reference, subject):\n r\"\"\"\n Calculate the RMSF between two structures.\n\n The *root-mean-square-fluctuation* (RMSF) indicates the positional\n deviation of a structure to a reference structure, averaged over all\n models.\n Usually the reference structure, is the average over all models.\n The RMSF is defined as:\n \n .. math:: RMSF(i) = \\sqrt{ \\frac{1}{T} \\sum\\limits_{t=1}^T (x_i(t) - x_{ref,i}(t))^2}\n \n Parameters\n ----------\n reference : AtomArray or ndarray, dtype=float, shape=(n,3)\n The reference structure.\n Alternatively, coordinates can be provided directly as\n :class:`ndarray`.\n subject : AtomArrayStack or ndarray, dtype=float, shape=(m,n,3)\n Structures to be compared with `reference`.\n The time *t* is represented by the models in the\n :class:`AtomArrayStack`.\n Alternatively, coordinates can be provided directly as\n :class:`ndarray`.\n \n Returns\n -------\n rmsf : ndarray, dtype=float, shape=(n,)\n RMSF between subject and reference structure.\n Each element gives the RMSF for the atom at the respective\n index.\n \n See Also\n --------\n rmsd\n\n Notes\n -----\n This function does not superimpose the subject to its reference.\n In most cases :func:`superimpose()` should be called prior to this\n function.\n\n Examples\n --------\n\n Calculate the :math:`C_\\alpha` RMSF of all models to the average\n model:\n\n >>> ca = atom_array_stack[:, atom_array_stack.atom_name == \"CA\"]\n >>> ca_average = average(ca)\n >>> ca, _ = superimpose(ca_average, ca)\n >>> print(rmsf(ca_average, ca))\n [1.372 0.360 0.265 0.261 0.288 0.204 0.196 0.306 0.353 0.238 0.266 0.317\n 0.358 0.448 0.586 0.369 0.332 0.396 0.410 0.968]\n \"\"\"\n return np.sqrt(np.mean(_sq_euclidian(reference, subject), axis=-2))\n\n\ndef average(atoms):\n \"\"\"\n Calculate an average structure.\n \n The average structure has the average coordinates\n of the input models.\n \n Parameters\n ----------\n atoms : AtomArrayStack or ndarray, dtype=float, shape=(m,n,3)\n The structure models to be averaged.\n Alternatively, coordinates can be provided directly as\n :class:`ndarray`.\n \n Returns\n -------\n average : AtomArray or ndarray, dtype=float, shape=(n,3)\n Structure with averaged atom coordinates.\n If `atoms` is a :class:`ndarray` and :class:`ndarray` is also\n returned.\n \n See Also\n --------\n rmsd, rmsf\n \n Notes\n -----\n The calculated average structure is not suitable for visualization\n or geometric calculations, since bond lengths and angles will\n deviate from meaningful values.\n This method is rather useful to provide a reference structure for\n calculation of e.g. the RMSD or RMSF. \n \"\"\"\n coords = coord(atoms)\n if coords.ndim != 3:\n raise TypeError(\n \"Expected an AtomArrayStack or an ndarray with shape (m,n,3)\"\n )\n mean_coords = np.mean(coords, axis=0)\n if isinstance(atoms, AtomArrayStack):\n mean_array = atoms[0].copy()\n mean_array.coord = mean_coords\n return mean_array\n else:\n return mean_coords\n\n\ndef _sq_euclidian(reference, subject):\n \"\"\"\n Calculate squared euclidian distance between atoms in two\n structures.\n \n Parameters\n ----------\n reference : AtomArray or ndarray, dtype=float, shape=(n,3)\n Reference structure.\n subject : AtomArray or AtomArrayStack or ndarray, dtype=float, shape=(n,3) or shape=(m,n,3)\n Structure(s) whose atoms squared euclidian distance to\n `reference` is measured.\n \n Returns\n -------\n ndarray, dtype=float, shape=(n,) or shape=(m,n)\n Squared euclidian distance between subject and reference.\n If subject is an :class:`AtomArray` a 1-D array is returned.\n If subject is an :class:`AtomArrayStack` a 2-D array is\n returned.\n In this case the first dimension indexes the AtomArray.\n \"\"\"\n reference_coord = coord(reference)\n subject_coord = coord(subject)\n if reference_coord.ndim != 2:\n raise TypeError(\n \"Expected an AtomArray or an ndarray with shape (n,3) as reference\"\n )\n dif = subject_coord - reference_coord\n return vector_dot(dif, dif)" ]
[ [ "numpy.abs", "numpy.isnan", "numpy.rad2deg", "numpy.count_nonzero", "numpy.array", "numpy.zeros", "numpy.where", "numpy.isin" ], [ "numpy.arange" ], [ "numpy.allclose" ], [ "numpy.array" ], [ "numpy.mean" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
JungHoonJung/2021MD
[ "29bfae7a750217d50654e4973a2be6fb0d968bdf", "29bfae7a750217d50654e4973a2be6fb0d968bdf", "29bfae7a750217d50654e4973a2be6fb0d968bdf", "29bfae7a750217d50654e4973a2be6fb0d968bdf" ]
[ "examples-master/python_examples/mc_sc_module.py", "examples-master/python_examples/md_lj_module.py", "examples-master/python_examples/sample_mean.py", "examples-master/python_examples/md_nvt_lj_le.py" ]
[ "#!/usr/bin/env python3\n# mc_sc_module.py\n\n#------------------------------------------------------------------------------------------------#\n# This software was written in 2016/17 #\n# by Michael P. Allen <[email protected]>/<[email protected]> #\n# and Dominic J. Tildesley <[email protected]> (\"the authors\"), #\n# to accompany the book \"Computer Simulation of Liquids\", second edition, 2017 (\"the text\"), #\n# published by Oxford University Press (\"the publishers\"). #\n# #\n# LICENCE #\n# Creative Commons CC0 Public Domain Dedication. #\n# To the extent possible under law, the authors have dedicated all copyright and related #\n# and neighboring rights to this software to the PUBLIC domain worldwide. #\n# This software is distributed without any warranty. #\n# You should have received a copy of the CC0 Public Domain Dedication along with this software. #\n# If not, see <http://creativecommons.org/publicdomain/zero/1.0/>. #\n# #\n# DISCLAIMER #\n# The authors and publishers make no warranties about the software, and disclaim liability #\n# for all uses of the software, to the fullest extent permitted by applicable law. #\n# The authors and publishers do not recommend use of this software for any purpose. #\n# It is made freely available, solely to clarify points made in the text. When using or citing #\n# the software, you should not imply endorsement by the authors or publishers. #\n#------------------------------------------------------------------------------------------------#\n\n\"\"\"Overlap and move routines for MC simulation, hard spherocylinders.\"\"\"\n\nfast = True # Change this to replace NumPy overlap evaluation with slower Python\nlength = 5.0 # Cylinder length L (in units where D=1) used throughout the module\n\ndef introduction():\n \"\"\"Prints out introductory statements at start of run.\"\"\"\n\n import numpy as np\n \n vmol = np.pi * ( 0.25*length + 1/6 ) # Spherocylinder volume\n\n print('Hard spherocylinder potential')\n print(\"{:40}{:15.6f}\".format('Spherocylinder L/D ratio', length))\n print(\"{:40}{:15.6f}\".format('Spherocylinder volume/D**3', vmol))\n print('Diameter, D = 1')\n print('Energy, kT = 1')\n if fast:\n print('Fast NumPy overlap routine')\n else:\n print('Slow Python overlap routine')\n\ndef conclusion():\n \"\"\"Prints out concluding statements at end of run.\"\"\"\n\n print('Program ends')\n\ndef overlap ( box, r, e ):\n \"\"\"Takes in box and coordinate & orientation arrays, and signals any overlap.\"\"\"\n\n # Actual calculation is performed by function overlap_1\n\n n, d = r.shape\n assert d==3, 'Dimension error for r in overlap'\n assert d==e.shape[1], 'Dimension error for e in overlap'\n assert n==e.shape[0], \"{}{:d}{:d}\".format('Dimension error for e in overlap',n,e.shape[0])\n\n for i in range(n-1):\n if overlap_1 ( r[i,:], e[i,:], box, r[i+1:,:], e[i+1:,:] ):\n return True # Immediate return on detection of overlap\n\n return False\n\ndef overlap_1 ( ri, ei, box, r, e ):\n \"\"\"Takes in coordinates and orientations of a molecules and signals any overlap.\n\n Values of box and partner coordinate array are supplied.\n \"\"\"\n\n import numpy as np\n\n # In general, r will be a subset of the complete set of simulation coordinates\n # and none of its rows should be identical to ri\n\n # It is assumed that positions are in units where box = 1\n\n nj, d = r.shape\n assert d==3, 'Dimension error for r in overlap_1'\n assert d==e.shape[1], 'Dimension error for e in overlap_1'\n assert nj==e.shape[0], \"{}{:d}{:d}\".format('Dimension error for e in overlap_1',nj,e.shape[0])\n assert ri.size==3, 'Dimension error for ri in overlap_1'\n assert ei.size==3, 'Dimension error for ei in overlap_1'\n\n range = 1.0 + length\n assert range <= box/2.0, \"{}{:15.6f}{:15.6f}\".format('Box too small', box, range)\n range_box_sq = ( range / box ) ** 2 # Squared range in box=1 units\n box_sq = box**2 # Squared box length\n \n if fast:\n rij = ri - r # Get all separation vectors from partners\n rij = rij - np.rint(rij) # Periodic boundary conditions in box=1 units\n rij_sq = np.sum(rij**2,axis=1) # Squared separations\n rij_sq = rij_sq * box_sq # Now in D=1 units\n rij = rij * box # Now in D=1 units\n rei = np.sum ( rij*ei, axis=1 ) # All dot products\n rej = np.sum ( rij*e, axis=1 ) # All dot products\n eij = np.sum ( ei *e, axis=1 ) # All dot products\n sij_sq = all_dist_sq ( rij_sq, rei, rej, eij ) # Squared distance between line segments\n return np.any(sij_sq<1.0)\n\n # Otherwise use slow method\n for j,rj in enumerate(r):\n rij = ri - rj # Separation vector\n rij = rij - np.rint(rij) # Periodic boundary conditions in box=1 units\n rij_sq = np.sum(rij**2) # Squared separation\n if rij_sq > range_box_sq: # Check no possibility of overlap\n continue\n rij_sq = rij_sq * box_sq # Now in D=1 units\n rij = rij * box # Now in D=1 units\n rei = np.dot ( rij, ei )\n rej = np.dot ( rij, e[j,:] )\n eij = np.dot ( ei, e[j,:] )\n\n sij_sq = dist_sq ( rij_sq, rei, rej, eij ) # Squared distance between line segments\n if sij_sq<1.0:\n return True # Overlap detected, return immediately\n\n return False\n\ndef n_overlap ( box, r, e ):\n \"\"\"Takes in box and coordinate and orientation arrays, and counts overlaps.\"\"\"\n\n # This routine is used in the calculation of pressure\n # Actual calculation is performed by function n_overlap_1\n\n n, d = r.shape\n assert d==3, 'Dimension error for r in n_overlap'\n assert d==e.shape[1], 'Dimension error for e in n_overlap'\n assert n==e.shape[0], \"{}{:d}{:d}\".format('Dimension error for e in n_overlap',n,e.shape[0])\n\n n_ovr = 0\n for i in range(n-1):\n n_ovr = n_ovr + n_overlap_1 ( r[i,:], e[i,:], box, r[i+1:,:], e[i+1:,:] )\n\n return n_ovr\n\ndef n_overlap_1 ( ri, ei, box, r, e ):\n \"\"\"Takes in coordinates and orientations of a molecule and counts overlaps.\n\n Values of box and partner coordinate array are supplied.\n Fast or slow algorithm selected.\n \"\"\"\n\n import numpy as np\n\n # In general, r will be a subset of the complete set of simulation coordinates\n # and none of its rows should be identical to ri\n\n # It is assumed that positions are in units where box = 1\n\n nj, d = r.shape\n assert d==3, 'Dimension error for r in n_overlap_1'\n assert d==e.shape[1], 'Dimension error for e in n_overlap_1'\n assert nj==e.shape[0], \"{}{:d}{:d}\".format('Dimension error for e in n_overlap_1',nj,e.shape[0])\n assert ri.size==3, 'Dimension error for ri in n_overlap_1'\n assert ei.size==3, 'Dimension error for ei in n_overlap_1'\n\n range = 1.0 + length\n assert range <= box/2.0, \"{}{:15.6f}{:15.6f}\".format('Box too small', box, range)\n range_box_sq = ( range / box ) ** 2 # Squared range in box=1 units\n box_sq = box**2 # Squared box length\n\n if fast:\n rij = ri - r # Get all separation vectors from partners\n rij = rij - np.rint(rij) # Periodic boundary conditions in box=1 units\n rij_sq = np.sum(rij**2,axis=1) # Squared separations\n rij_sq = rij_sq * box_sq # Now in D=1 units\n rij = rij * box # Now in D=1 units\n rei = np.sum ( rij*ei, axis=1 ) # All dot products\n rej = np.sum ( rij*e, axis=1 ) # All dot products\n eij = np.sum ( ei *e, axis=1 ) # All dot products\n sij_sq = all_dist_sq ( rij_sq, rei, rej, eij ) # Squared distance between line segments\n return np.count_nonzero(sij_sq<1.0)\n\n # Otherwise use slow method\n n_ovr = 0\n for j,rj in enumerate(r):\n rij = ri - rj # Separation vector\n rij = rij - np.rint(rij) # Periodic boundary conditions in box=1 units\n rij_sq = np.sum(rij**2) # Squared separation\n if rij_sq > range_box_sq: # Check no possibility of overlap\n continue\n rij_sq = rij_sq * box_sq # Now in D=1 units\n rij = rij * box # Now in D=1 units\n rei = np.dot ( rij, ei )\n rej = np.dot ( rij, e[j,:] )\n eij = np.dot ( ei, e[j,:] )\n\n sij_sq = dist_sq ( rij_sq, rei, rej, eij ) # Squared distance between line segments\n if sij_sq<1.0:\n n_ovr = n_ovr + 1\n\n return n_ovr\n\ndef dist_sq ( rij_sq, rei, rej, eij ):\n import numpy as np\n\n tol = 1.0e-6\n ell2 = length/2.0 # Half length\n sin_sq = 1.0 - eij**2 # Squared sine of angle between line segments\n\n if sin_sq < tol: \n ci = -rei\n cj = rej\n else:\n ci = ( - rei + eij * rej ) / sin_sq\n cj = ( rej - eij * rei ) / sin_sq\n\n ai = np.fabs ( ci )\n aj = np.fabs ( cj )\n if ai > ell2:\n ci = ell2*np.sign(ci)\n if aj > ell2:\n cj = ell2*np.sign(cj)\n\n if ai > aj:\n cj = rej + ci * eij\n else:\n ci = -rei + cj * eij\n\n ai = np.fabs ( ci )\n aj = np.fabs ( cj )\n if ai > ell2:\n ci = ell2*np.sign(ci)\n if aj > ell2:\n cj = ell2*np.sign(cj)\n\n di = 2.0 * rei + ci - cj * eij\n dj = -2.0 * rej + cj - ci * eij\n\n return rij_sq + ci * di + cj * dj # Squared distance between line segments\n \ndef all_dist_sq ( rij_sq, rei, rej, eij ):\n import numpy as np\n \n tol = 1.0e-6\n ell2 = length/2.0 # Half length\n sin_sq = 1.0 - eij**2 # Squared sines of angles between line segments\n mask = sin_sq>tol\n ci = np.where ( mask, (-rei+eij*rej)/sin_sq, -rei )\n cj = np.where ( mask, ( rej-eij*rei)/sin_sq, rej )\n\n ai = np.fabs ( ci )\n aj = np.fabs ( cj )\n ci = np.where ( ai>ell2, ell2*np.sign(ci), ci )\n cj = np.where ( aj>ell2, ell2*np.sign(cj), cj )\n mask = ai>aj\n cj = np.where ( mask, rej+ci*eij, cj )\n mask = np.logical_not(mask)\n ci = np.where ( mask, -rei+cj*eij, ci )\n\n ai = np.fabs ( ci )\n aj = np.fabs ( cj )\n ci = np.where ( ai>ell2, ell2*np.sign(ci), ci )\n cj = np.where ( aj>ell2, ell2*np.sign(cj), cj )\n\n di = 2.0 * rei + ci - cj * eij\n dj = -2.0 * rej + cj - ci * eij\n\n return rij_sq + ci * di + cj * dj # Squared distances between line segments\n", "#!/usr/bin/env python3\n# md_lj_module.py\n\n#------------------------------------------------------------------------------------------------#\n# This software was written in 2016/17 #\n# by Michael P. Allen <[email protected]>/<[email protected]> #\n# and Dominic J. Tildesley <[email protected]> (\"the authors\"), #\n# to accompany the book \"Computer Simulation of Liquids\", second edition, 2017 (\"the text\"), #\n# published by Oxford University Press (\"the publishers\"). #\n# #\n# LICENCE #\n# Creative Commons CC0 Public Domain Dedication. #\n# To the extent possible under law, the authors have dedicated all copyright and related #\n# and neighboring rights to this software to the PUBLIC domain worldwide. #\n# This software is distributed without any warranty. #\n# You should have received a copy of the CC0 Public Domain Dedication along with this software. #\n# If not, see <http://creativecommons.org/publicdomain/zero/1.0/>. #\n# #\n# DISCLAIMER #\n# The authors and publishers make no warranties about the software, and disclaim liability #\n# for all uses of the software, to the fullest extent permitted by applicable law. #\n# The authors and publishers do not recommend use of this software for any purpose. #\n# It is made freely available, solely to clarify points made in the text. When using or citing #\n# the software, you should not imply endorsement by the authors or publishers. #\n#------------------------------------------------------------------------------------------------#\n\n\"\"\"Force routine for MD simulation, Lennard-Jones atoms.\"\"\"\n\nfast = True # Change this to replace NumPy force evaluation with slower Python\n\nclass PotentialType:\n \"\"\"A composite variable for interactions.\"\"\"\n\n def __init__(self, cut, pot, vir, lap, ovr):\n self.cut = cut # the potential energy cut (but not shifted) at r_cut\n self.pot = pot # the potential energy cut-and-shifted at r_cut\n self.vir = vir # the virial\n self.lap = lap # the Laplacian\n self.ovr = ovr # a flag indicating overlap (i.e. pot too high to use)\n\n def __add__(self, other):\n cut = self.cut + other.cut\n pot = self.pot + other.pot\n vir = self.vir + other.vir\n lap = self.lap + other.lap\n ovr = self.ovr or other.ovr\n\n return PotentialType(cut,pot,vir,lap,ovr)\n\ndef introduction():\n \"\"\"Prints out introductory statements at start of run.\"\"\"\n \n print('Lennard-Jones potential')\n print('Cut-and-shifted version for dynamics')\n print('Cut (but not shifted) version also calculated')\n print('Diameter, sigma = 1')\n print('Well depth, epsilon = 1')\n if fast:\n print('Fast NumPy force routine')\n else:\n print('Slow Python force routine')\n\ndef conclusion():\n \"\"\"Prints out concluding statements at end of run.\"\"\"\n\n print('Program ends')\n\ndef force ( box, r_cut, r ):\n \"\"\"Takes in box, cutoff range, and coordinate array, and calculates forces and potentials etc.\"\"\"\n\n import numpy as np\n \n # It is assumed that positions are in units where box = 1\n # Forces are calculated in units where sigma = 1 and epsilon = 1\n\n n, d = r.shape\n assert d==3, 'Dimension error in force'\n \n sr2_ovr = 1.77 # Overlap threshold (pot > 100)\n r_cut_box = r_cut / box\n r_cut_box_sq = r_cut_box ** 2\n box_sq = box ** 2\n\n # Calculate potential at cutoff\n sr2 = 1.0 / r_cut**2 # in sigma=1 units\n sr6 = sr2 ** 3\n sr12 = sr6 **2\n pot_cut = sr12 - sr6 # Without numerical factor 4\n\n # Initialize\n f = np.zeros_like(r)\n total = PotentialType ( cut=0.0, pot=0.0, vir=0.0, lap=0.0, ovr=False )\n\n if fast:\n for i in range(n-1):\n rij = r[i,:]-r[i+1:,:] # Separation vectors for j>i\n rij = rij - np.rint(rij) # Periodic boundary conditions in box=1 units\n rij_sq = np.sum(rij**2,axis=1) # Squared separations for j>1\n in_range = rij_sq < r_cut_box_sq # Set flags for within cutoff\n rij_sq = rij_sq * box_sq # Now in sigma=1 units\n rij = rij * box # Now in sigma=1 units\n sr2 = np.where ( in_range, 1.0/rij_sq, 0.0 ) # (sigma/rij)**2, only if in range\n ovr = sr2 > sr2_ovr # Overlap if too close\n sr6 = sr2 ** 3\n sr12 = sr6 ** 2\n cut = sr12 - sr6 # LJ pair potential (cut but not shifted)\n vir = cut + sr12 # LJ pair virial\n pot = np.where ( in_range, cut-pot_cut, 0.0 ) # LJ pair potential (cut-and-shifted)\n lap = ( 22.0*sr12 - 5.0*sr6 ) * sr2 # LJ pair Laplacian\n fij = vir * sr2 # LJ scalar part of forces\n fij = rij * fij[:,np.newaxis] # LJ pair forces\n total = total + PotentialType ( cut=np.sum(cut), pot=np.sum(pot),\n vir=np.sum(vir), lap=np.sum(lap), ovr=np.any(ovr) )\n f[i,:] = f[i,:] + np.sum(fij,axis=0)\n f[i+1:,:] = f[i+1:,:] - fij\n\n else:\n for i in range(n-1): # Outer loop\n for j in range(i+1,n): # Inner loop\n rij = r[i,:]-r[j,:] # Separation vector\n rij = rij - np.rint(rij) # Periodic boundary conditions in box=1 units\n rij_sq = np.sum(rij**2) # Squared separation\n\n if rij_sq < r_cut_box_sq: # Check within cutoff\n rij_sq = rij_sq * box_sq # Now in sigma=1 units\n rij = rij * box # Now in sigma=1 units\n sr2 = 1.0 / rij_sq # (sigma/rij)**2\n ovr = sr2 > sr2_ovr # Overlap if too close\n\n sr6 = sr2 ** 3\n sr12 = sr6 ** 2\n cut = sr12 - sr6 # LJ pair potential (cut but not shifted)\n vir = cut + sr12 # LJ pair virial\n pot = cut - pot_cut # LJ pair potential (cut-and-shifted)\n lap = ( 22.0*sr12 - 5.0*sr6 ) * sr2 # LJ pair Laplacian\n fij = rij * vir * sr2 # LJ pair forces\n\n total = total + PotentialType ( cut=cut, pot=pot, vir=vir, lap=lap, ovr=ovr )\n f[i,:] = f[i,:] + fij\n f[j,:] = f[j,:] - fij\n\n # Multiply results by numerical factors\n f = f * 24.0 # 24*epsilon\n total.cut = total.cut * 4.0 # 4*epsilon\n total.pot = total.pot * 4.0 # 4*epsilon\n total.vir = total.vir * 24.0 / 3.0 # 24*epsilon and divide virial by 3\n total.lap = total.lap * 24.0 * 2.0 # 24*epsilon and factor 2 for ij and ji\n \n return total, f\n\ndef hessian ( box, r_cut, r, f ):\n \"\"\"Calculates Hessian function (for 1/N correction to config temp).\"\"\"\n\n import numpy as np\n\n # This routine is only needed in a constant-energy ensemble\n # It is assumed that positions are in units where box = 1\n # but the result is given in units where sigma = 1 and epsilon = 1\n # It is assumed that forces have already been calculated in array f\n\n n, d = r.shape\n assert d==3, 'Dimension error in hessian'\n assert np.all ( r.shape==f.shape ), 'Dimension mismatch in hessian'\n\n r_cut_box = r_cut / box\n r_cut_box_sq = r_cut_box ** 2\n box_sq = box ** 2\n\n hes = 0.0\n\n if fast:\n for i in range(n-1):\n rij = r[i,:] - r[i+1:,:] # Separation vectors\n rij = rij - np.rint ( rij ) # Periodic boundary conditions in box=1 units\n rij_sq = np.sum(rij**2,axis=1) # Squared separations for j>1\n in_range = rij_sq < r_cut_box_sq # Set flags for within cutoff\n rij_sq = rij_sq * box_sq # Now in sigma=1 units\n rij = rij * box # Now in sigma=1 units\n fij = f[i,:] - f[i+1:,:] # Differences in forces\n\n ff = np.sum(fij*fij,axis=1)\n rf = np.sum(rij*fij,axis=1)\n sr2 = np.where ( in_range, 1.0 / rij_sq, 0.0 ) # Only where in range\n sr6 = sr2 ** 3\n sr8 = sr6 * sr2\n sr10 = sr8 * sr2\n v1 = 24.0 * ( 1.0 - 2.0 * sr6 ) * sr8\n v2 = 96.0 * ( 7.0 * sr6 - 2.0 ) * sr10\n hes = hes + np.sum(v1 * ff) + np.sum(v2 * rf**2)\n\n else:\n for i in range(n-1):\n for j in range(i+1,n):\n rij = r[i,:] - r[j,:] # Separation vector\n rij = rij - np.rint ( rij ) # Periodic boundary conditions in box=1 units\n rij_sq = np.sum ( rij**2 ) # Squared separation\n\n if rij_sq < r_cut_box_sq:\n rij_sq = rij_sq * box_sq # Now in sigma=1 units\n rij = rij * box # Now in sigma=1 units\n fij = f[i,:] - f[j,:] # Difference in forces\n\n ff = np.dot(fij,fij)\n rf = np.dot(rij,fij)\n sr2 = 1.0 / rij_sq\n sr6 = sr2 ** 3\n sr8 = sr6 * sr2\n sr10 = sr8 * sr2\n v1 = 24.0 * ( 1.0 - 2.0 * sr6 ) * sr8\n v2 = 96.0 * ( 7.0 * sr6 - 2.0 ) * sr10\n hes = hes + v1 * ff + v2 * rf**2\n\n return hes\n\n", "#!/usr/bin/env python3\n# sample_mean.py\n\n#------------------------------------------------------------------------------------------------#\n# This software was written in 2016/17 #\n# by Michael P. Allen <[email protected]>/<[email protected]> #\n# and Dominic J. Tildesley <[email protected]> (\"the authors\"), #\n# to accompany the book \"Computer Simulation of Liquids\", second edition, 2017 (\"the text\"), #\n# published by Oxford University Press (\"the publishers\"). #\n# #\n# LICENCE #\n# Creative Commons CC0 Public Domain Dedication. #\n# To the extent possible under law, the authors have dedicated all copyright and related #\n# and neighboring rights to this software to the PUBLIC domain worldwide. #\n# This software is distributed without any warranty. #\n# You should have received a copy of the CC0 Public Domain Dedication along with this software. #\n# If not, see <http://creativecommons.org/publicdomain/zero/1.0/>. #\n# #\n# DISCLAIMER #\n# The authors and publishers make no warranties about the software, and disclaim liability #\n# for all uses of the software, to the fullest extent permitted by applicable law. #\n# The authors and publishers do not recommend use of this software for any purpose. #\n# It is made freely available, solely to clarify points made in the text. When using or citing #\n# the software, you should not imply endorsement by the authors or publishers. #\n#------------------------------------------------------------------------------------------------#\n\n\"\"\"sample-mean program to illustrate Monte Carlo evaluation of an integral.\"\"\"\n\n# No parameters need be supplied by the user. The exact value of the integral is 5/3.\n# For details, see Chapter 4 of the text.\n\nimport numpy as np\n\nprint('sample_mean')\nnp.random.seed()\n\nr_0 = np.array([1.0,2.0],dtype=np.float_)\na_0 = np.prod ( r_0 )\n\nf = 0.0\ntau_max = 1000000\n\nfor tau in range(tau_max):\n zeta = np.random.rand(2) # uniform in range (0,1)\n r = zeta * r_0 # uniform in xy rectangle\n if r[1] < ( 2.0 - 2.0*r[0] ) :\n f += (1.0+r[1]) # value of z in xy triangle\n\nv = a_0 * f / tau_max\nprint ( \"{}{:10.5f}\".format('Estimate =', v) )\n\n", "#!/usr/bin/env python3\n# md_nvt_lj_le.py\n\n#------------------------------------------------------------------------------------------------#\n# This software was written in 2016/17 #\n# by Michael P. Allen <[email protected]>/<[email protected]> #\n# and Dominic J. Tildesley <[email protected]> (\"the authors\"), #\n# to accompany the book \"Computer Simulation of Liquids\", second edition, 2017 (\"the text\"), #\n# published by Oxford University Press (\"the publishers\"). #\n# #\n# LICENCE #\n# Creative Commons CC0 Public Domain Dedication. #\n# To the extent possible under law, the authors have dedicated all copyright and related #\n# and neighboring rights to this software to the PUBLIC domain worldwide. #\n# This software is distributed without any warranty. #\n# You should have received a copy of the CC0 Public Domain Dedication along with this software. #\n# If not, see <http://creativecommons.org/publicdomain/zero/1.0/>. #\n# #\n# DISCLAIMER #\n# The authors and publishers make no warranties about the software, and disclaim liability #\n# for all uses of the software, to the fullest extent permitted by applicable law. #\n# The authors and publishers do not recommend use of this software for any purpose. #\n# It is made freely available, solely to clarify points made in the text. When using or citing #\n# the software, you should not imply endorsement by the authors or publishers. #\n#------------------------------------------------------------------------------------------------#\n\n\"\"\"Molecular dynamics, NVT ensemble, Lees-Edwards boundaries.\"\"\"\n\ndef calc_variables ( ):\n \"\"\"Calculates all variables of interest.\n \n They are collected and returned as a list, for use in the main program.\n \"\"\"\n\n import numpy as np\n import math\n from averages_module import msd, VariableType\n\n # Preliminary calculations (n,r,v,f,total are taken from the calling program)\n vol = box**3 # Volume\n rho = n / vol # Density\n kin = 0.5*np.sum(v**2) # Kinetic energy\n fsq = np.sum ( f**2 ) # Total squared force\n tmp = 2.0 * kin / (3*n-3) # Remove three degrees of freedom for momentum conservation\n kyx = np.sum(v[:,0]*v[:,1]) / vol # Kinetic part of off-diagonal pressure tensor\n eng = kin + total.pot # Total energy\n\n # Variables of interest, of class VariableType, containing three attributes:\n # .val: the instantaneous value\n # .nam: used for headings\n # .method: indicating averaging method\n # If not set below, .method adopts its default value of avg\n # The .nam and some other attributes need only be defined once, at the start of the program,\n # but for clarity and readability we assign all the values together below\n\n # Internal energy per atom\n # Total KE plus total PE divided by N\n e_s = VariableType ( nam = 'E/N', val = eng/n )\n\n # Pressure\n # Ideal gas contribution plus total virial divided by V\n p_s = VariableType ( nam = 'P', val = rho*tmp + total.vir/vol )\n\n # Kinetic temperature\n t_k = VariableType ( nam = 'T kinetic', val = tmp )\n\n # Configurational temperature\n # Total squared force divided by total Laplacian\n t_c = VariableType ( nam = 'T config', val = fsq/total.lap )\n\n # Shear viscosity\n if np.fabs(strain_rate)<tol: # Guard against simulation with zero strain rate\n eta = VariableType ( nam = 'Shear viscosity', val = 0.0 )\n else:\n eta = VariableType ( nam = 'Shear viscosity', val = -(kyx+total.pyx/vol) / strain_rate )\n\n # MSD of conserved kinetic energy\n conserved_msd = VariableType ( nam = 'Conserved MSD', val = kin/n,\n method = msd, e_format = True, instant = False )\n\n # Collect together into a list for averaging\n return [ e_s, p_s, t_k, t_c, eta, conserved_msd ]\n\ndef a_propagator ( t ):\n \"\"\"A propagator. t is the time over which to propagate (typically dt/2).\"\"\"\n\n global r, strain\n import numpy as np\n\n x = t * strain_rate # Change in strain (dimensionless)\n\n r[:,0] = r[:,0] + x * r[:,1] # Extra strain term\n r = r + t * v / box # Drift half-step (positions in box=1 units)\n strain = strain + x # Advance strain and hence boundaries\n strain = strain - np.rint ( strain ) # Keep strain within (-0.5,0.5)\n\n r[:,0] = r[:,0] - np.rint ( r[:,1] ) * strain # Extra PBC correction (box=1 units)\n r = r - np.rint ( r ) # Periodic boundaries (box=1 units)\n\ndef b1_propagator ( t ): \n \"\"\"B1 propagator. t is the time over which to propagate (typically dt/2).\"\"\"\n\n global v\n import numpy as np\n\n x = t * strain_rate # Change in strain (dimensionless)\n\n c1 = x * np.sum ( v[:,0]*v[:,1] ) / np.sum ( v**2 )\n c2 = ( x**2 ) * np.sum ( v[:,1]**2 ) / np.sum ( v**2 )\n g = 1.0 / np.sqrt ( 1.0 - 2.0*c1 + c2 )\n\n v[:,0] = v[:,0] - x*v[:,1]\n v = g * v\n\ndef b2_propagator ( t ):\n \"\"\"B2 propagator. t is the time over which to propagate (typically dt).\"\"\"\n\n global v\n import numpy as np\n\n alpha = np.sum ( f*v ) / np.sum ( v**2 )\n beta = np.sqrt ( np.sum ( f**2 ) / np.sum ( v**2 ) )\n h = ( alpha + beta ) / ( alpha - beta )\n e = np.exp ( -beta * t )\n\n dt_factor = ( 1 + h - e - h / e ) / ( ( 1 - h ) * beta )\n prefactor = ( 1 - h ) / ( e - h / e )\n\n v = prefactor * ( v + dt_factor * f )\n \n# Takes in a configuration of atoms (positions, velocities)\n# Cubic periodic boundary conditions, with Lees-Edwards shear\n# Conducts molecular dynamics, SLLOD algorithm, with isokinetic thermostat\n# Refs: Pan et al J Chem Phys 122 094114 (2005)\n\n# Reads several variables and options from standard input using JSON format\n# Leave input empty \"{}\" to accept supplied defaults\n\n# Positions r are divided by box length after reading in and we assume mass=1 throughout\n# However, input configuration, output configuration, most calculations, and all results \n# are given in simulation units defined by the model\n# For example, for Lennard-Jones, sigma = 1, epsilon = 1\n\n# Despite the program name, there is nothing here specific to Lennard-Jones\n# The model is defined in md_lj_le_module\n\nimport json\nimport sys\nimport numpy as np\nimport math\nfrom config_io_module import read_cnf_atoms, write_cnf_atoms\nfrom averages_module import run_begin, run_end, blk_begin, blk_end, blk_add\nfrom md_lj_le_module import introduction, conclusion, force, PotentialType\n\ncnf_prefix = 'cnf.'\ninp_tag = 'inp'\nout_tag = 'out'\nsav_tag = 'sav'\ntol = 1.0e-6\n\nprint('md_nvt_lj_le')\nprint('Molecular dynamics, constant-NVT ensemble, Lees-Edwards')\nprint('Particle mass=1 throughout')\n\n# Read parameters in JSON format\ntry:\n nml = json.load(sys.stdin)\nexcept json.JSONDecodeError:\n print('Exiting on Invalid JSON format')\n sys.exit()\n\n# Set default values, check keys and typecheck values\ndefaults = {\"nblock\":10, \"nstep\":10000, \"dt\":0.005, \"strain_rate\":0.04}\nfor key, val in nml.items():\n if key in defaults:\n assert type(val) == type(defaults[key]), key+\" has the wrong type\"\n else:\n print('Warning', key, 'not in ',list(defaults.keys()))\n\n# Set parameters to input values or defaults\nnblock = nml[\"nblock\"] if \"nblock\" in nml else defaults[\"nblock\"]\nnstep = nml[\"nstep\"] if \"nstep\" in nml else defaults[\"nstep\"]\ndt = nml[\"dt\"] if \"dt\" in nml else defaults[\"dt\"]\nstrain_rate = nml[\"strain_rate\"] if \"strain_rate\" in nml else defaults[\"strain_rate\"]\n\nintroduction()\n\n# Write out parameters\nprint( \"{:40}{:15d} \".format('Number of blocks', nblock) )\nprint( \"{:40}{:15d} \".format('Number of steps per block', nstep) )\nprint( \"{:40}{:15.6f}\".format('Time step', dt) )\nprint( \"{:40}{:15.6f}\".format('Strain rate', strain_rate) )\n\n# Insist that strain be zero (i.e. an integer) at end of each block\nstrain = strain_rate * dt * nstep\nstrain = strain - np.rint ( strain )\nassert np.fabs(strain) < tol, 'Strain must be zero at end of block'\n\n# Read in initial configuration\nn, box, r, v = read_cnf_atoms ( cnf_prefix+inp_tag, with_v=True)\nprint( \"{:40}{:15d} \".format('Number of particles', n) )\nprint( \"{:40}{:15.6f}\".format('Box length', box) )\nprint( \"{:40}{:15.6f}\".format('Density', n/box**3) )\nstrain = 0.0 # Assume for simplicity that this is true\nr = r / box # Convert positions to box units\nr[:,0] = r[:,0] - np.rint ( r[:,1] ) * strain # Extra correction (box=1 units)\nr = r - np.rint ( r ) # Periodic boundaries\nvcm = np.sum ( v, axis=0 ) / n # Centre-of mass velocity\nv = v - vcm # Set COM velocity to zero\n\n# Initial forces, potential, etc plus overlap check\ntotal, f = force ( box, strain, r )\nassert not total.ovr, 'Overlap in initial configuration'\n\n# Initialize arrays for averaging and write column headings\nrun_begin ( calc_variables() )\n\nfor blk in range(1,nblock+1): # Loop over blocks\n\n blk_begin()\n\n for stp in range(nstep): # Loop over steps\n\n # Isokinetic SLLOD algorithm (Pan et al)\n\n a_propagator ( dt/2 )\n b1_propagator ( dt/2 )\n\n total, f = force ( box, strain, r ) # Force evaluation\n assert not total.ovr, 'Overlap in configuration'\n\n b2_propagator ( dt )\n b1_propagator ( dt/2 )\n a_propagator ( dt/2 )\n\n blk_add ( calc_variables() )\n\n blk_end(blk) # Output block averages\n sav_tag = str(blk).zfill(3) if blk<1000 else 'sav' # Number configuration by block\n write_cnf_atoms ( cnf_prefix+sav_tag, n, box, r*box, v ) # Save configuration\n\nrun_end ( calc_variables() )\n\ntotal, f = force ( box, strain, r ) # Force evaluation\nassert not total.ovr, 'Overlap in final configuration'\n\nwrite_cnf_atoms ( cnf_prefix+out_tag, n, box, r*box, v ) # Save configuration\nconclusion()\n" ]
[ [ "numpy.logical_not", "numpy.dot", "numpy.rint", "numpy.sign", "numpy.any", "numpy.count_nonzero", "numpy.where", "numpy.sum", "numpy.fabs" ], [ "numpy.dot", "numpy.rint", "numpy.all", "numpy.zeros_like", "numpy.any", "numpy.where", "numpy.sum" ], [ "numpy.prod", "numpy.array", "numpy.random.rand", "numpy.random.seed" ], [ "numpy.sqrt", "numpy.rint", "numpy.exp", "numpy.sum", "numpy.fabs" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
syedhamzazaidi/CompGCN
[ "76de7466b18ee39416fd9fc0d45996f0caa60186" ]
[ "helper.py" ]
[ "import numpy as np, sys, os, random, pdb, json, uuid, time, argparse\nfrom pprint import pprint\nimport logging, logging.config\nfrom collections import defaultdict as ddict\nfrom ordered_set import OrderedSet\n\n# PyTorch related imports\nimport torch\nfrom torch.nn import functional as F\nfrom torch.nn.init import xavier_normal_\nfrom torch.utils.data import DataLoader\nfrom torch.nn import Parameter\nfrom torch_scatter import scatter_add\n\nnp.set_printoptions(precision=4)\n\ndef set_gpu(gpus):\n\t\"\"\"\n\tSets the GPU to be used for the run\n\n\tParameters\n\t----------\n\tgpus: List of GPUs to be used for the run\n\t\n\tReturns\n\t-------\n\t\t\n\t\"\"\"\n\tos.environ[\"CUDA_DEVICE_ORDER\"] = \"PCI_BUS_ID\"\n\tos.environ[\"CUDA_VISIBLE_DEVICES\"] = gpus\n\ndef get_logger(name, log_dir, config_dir):\n\t\"\"\"\n\tCreates a logger object\n\n\tParameters\n\t----------\n\tname: Name of the logger file\n\tlog_dir: Directory where logger file needs to be stored\n\tconfig_dir: Directory from where log_config.json needs to be read\n\t\n\tReturns\n\t-------\n\tA logger object which writes to both file and stdout\n\t\t\n\t\"\"\"\n\tconfig_dict = json.load(open( config_dir + 'log_config.json'))\n\tconfig_dict['handlers']['file_handler']['filename'] = log_dir + name.replace('/', '-')\n\tlogging.config.dictConfig(config_dict)\n\tlogger = logging.getLogger(name)\n\n\tstd_out_format = '%(asctime)s - [%(levelname)s] - %(message)s'\n\tconsoleHandler = logging.StreamHandler(sys.stdout)\n\tconsoleHandler.setFormatter(logging.Formatter(std_out_format))\n\tlogger.addHandler(consoleHandler)\n\n\treturn logger\n\ndef get_combined_results(left_results, right_results):\n\tresults = {}\n\tcount = float(left_results['count'])\n\n\tresults['left_mr']\t= round(left_results ['mr'] /count, 5)\n\tresults['left_mrr']\t= round(left_results ['mrr']/count, 5)\n\tresults['right_mr']\t= round(right_results['mr'] /count, 5)\n\tresults['right_mrr']\t= round(right_results['mrr']/count, 5)\n\tresults['mr']\t\t= round((left_results['mr'] + right_results['mr']) /(2*count), 5)\n\tresults['mrr']\t\t= round((left_results['mrr'] + right_results['mrr'])/(2*count), 5)\n\n\tfor k in range(10):\n\t\tresults['left_hits@{}'.format(k+1)]\t= round(left_results ['hits@{}'.format(k+1)]/count, 5)\n\t\tresults['right_hits@{}'.format(k+1)]\t= round(right_results['hits@{}'.format(k+1)]/count, 5)\n\t\tresults['hits@{}'.format(k+1)]\t\t= round((left_results['hits@{}'.format(k+1)] + right_results['hits@{}'.format(k+1)])/(2*count), 5)\n\treturn results\n\ndef get_param(shape):\n\tparam = Parameter(torch.Tensor(*shape)); \t\n\txavier_normal_(param.data)\n\treturn param\n\ndef com_mult(a, b):\n\tr1, i1 = a[..., 0], a[..., 1]\n\tr2, i2 = b[..., 0], b[..., 1]\n\treturn torch.stack([r1 * r2 - i1 * i2, r1 * i2 + i1 * r2], dim = -1)\n\ndef conj(a):\n\ta[..., 1] = -a[..., 1]\n\treturn a\n\ndef cconv(a, b):\n\treturn torch.irfft(com_mult(torch.rfft(a, 1), torch.rfft(b, 1)), 1, signal_sizes=(a.shape[-1],))\n\ndef ccorr(a, b):\n\treturn torch.irfft(com_mult(conj(torch.rfft(a, 1)), torch.rfft(b, 1)), 1, signal_sizes=(a.shape[-1],))" ]
[ [ "torch.Tensor", "numpy.set_printoptions", "torch.nn.init.xavier_normal_", "torch.rfft", "torch.stack" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
BendeguzToth/torchagents
[ "172a19e740406c650129da2a489943b13999b710" ]
[ "torchagents/agent.py" ]
[ "\"\"\"\nThis file implements the base agent class.\n\"\"\"\n\n# Standard libraries\nfrom abc import ABC, abstractmethod\nfrom typing import TypeVar\n\n# Third-party dependencies\nimport torch\n\n# Type definitions\nAction = TypeVar(\"Action\")\n# Need this for type hints, true base\n# class is hidden.\nScheduler = TypeVar(\"Scheduler\")\n\n\nclass Agent(ABC):\n \"\"\"\n Base agent class. All other agents inherit\n from this class.\n \"\"\"\n def __init__(self, tensor_specs: dict):\n \"\"\"\n Ctor.\n :param tensor_specs: A dictionary with optional\n pytorch tensor parameters. Keys can be for example 'dtype' and\n 'device'.\n \"\"\"\n self.tensor_specs = tensor_specs\n\n def tensor(self, *args) -> torch.Tensor:\n \"\"\"\n Utility function for creating a torch tensor. It will always\n append the arguments from tensor_specs to the parameters, so\n it is not needed to specify them every time you create a new\n tensor.\n :param args: Arguments to the tensor.\n :return: New pytorch tensor.\n \"\"\"\n return torch.tensor(*args, **self.tensor_specs)\n\n @abstractmethod\n def train_step(self, *args) -> Action:\n \"\"\"\n Step function called when training. Returns an action and\n (potentially) updates its parameters.\n :return: An action to take.\n \"\"\"\n\n @abstractmethod\n def __call__(self, *args) -> Action:\n \"\"\"\n Step function in inference mode. Returns an action but does not\n train.\n :return: An action to take.\n \"\"\"\n\n @abstractmethod\n def save(self, path: str) -> None:\n \"\"\"\n Saves the current parameters of the agent\n to the specified file.\n :param path: Path to output file.\n \"\"\"\n\n @abstractmethod\n def load(self, path: str) -> None:\n \"\"\"\n Loads agent from a saved checkpoint.\n :param path: Path to the checkpoint file.\n \"\"\"\n" ]
[ [ "torch.tensor" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
AlastairWiseman/ODE
[ "3fdfc18e8376dab8042c300db7bda91ad27c7c78" ]
[ "LinearStabilityDomains.py" ]
[ "# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Sat Nov 11 13:59:10 2017\r\n\r\n@author: Alastair Wiseman\r\n\"\"\"\r\nimport matplotlib\r\nimport numpy as np\r\nimport matplotlib.pyplot as plt\r\nimport matplotlib.patches as mpatches\r\nimport matplotlib.lines as mlines\r\n\r\n#set up Latex labeling\r\nmatplotlib.rcParams['text.usetex'] = True\r\nmatplotlib.rcParams['text.latex.unicode'] = True\r\n\r\ndef RKStabilityFunction (z, RKMatrix, RKWeights):\r\n #z, a complex number\r\n #RKMatrix and RKweights, a Runge-Kutta matrix and its correspoding\r\n #weights\r\n\r\n return (np.abs(1 + z * np.dot(RKWeights, np.dot(np.linalg.inv(\r\n np.identity(len(RKWeights)) - z * RKMatrix), np.ones\r\n ((len(RKWeights),1))))))\r\n\r\ndef RKLSDPlotter (RKMatrix, RKWeights, RStart = -5, REnd = 5, ImStart = -5,\r\n ImEnd = 5, meshDivisions = 100, legend = True):\r\n #RKMatrix and RKweights, a Runge-Kutta matrix and its correspoding\r\n #weights\r\n #RStart, REnd the bounds on the real components of points plotted\r\n #ImStart, ImEnd the bounds on the real components of points plotted\r\n #meshDivisions, the number of subdivisions of the real/imaginary axes\r\n #legend, set False if you don't want a key\r\n\r\n #setup grid for function evaluations\r\n A = np.linspace(RStart, REnd, meshDivisions)\r\n B = 1j * np.linspace(ImStart, ImEnd, meshDivisions)\r\n p, q = np.meshgrid(A,B)\r\n C = p + q\r\n \r\n #evaluate the Runge-Kutta stability function on the grid\r\n for i in xrange(meshDivisions):\r\n for j in xrange(meshDivisions):\r\n C[i][j] = RKStabilityFunction(C[i][j], RKMatrix, RKWeights)\r\n \r\n #Initialize a Figure\r\n fig = plt.figure()\r\n \r\n #Add Axes to Figure\r\n ax = fig.add_subplot(111)\r\n \r\n #plot the boundary and region of linear stability\r\n ax.contour(p,q*1j,C, [1], colors = 'C0')\r\n ax.contourf(p,q*1j,C, [0,1], alpha = 0.1, colors = 'C0')\r\n \r\n #setup legend\r\n LSD = mpatches.Rectangle((0, 0), 1, 1, fc=\"C0\",alpha=0.1)\r\n LSDB = mlines.Line2D([], [], color='C0')\r\n handles = [LSD, LSDB]\r\n labels = ['LSD', 'Boundary']\r\n if legend == True:\r\n ax.legend(handles, labels)\r\n \r\n #setup plot window\r\n ax = plt.gca()\r\n # making the top and right spine invisible:\r\n ax.spines['top'].set_color('none')\r\n ax.spines['right'].set_color('none')\r\n # moving bottom spine up to y=0 position:\r\n ax.xaxis.set_ticks_position('bottom')\r\n ax.spines['bottom'].set_position(('data',0))\r\n # moving left spine to the right to position x == 0:\r\n ax.yaxis.set_ticks_position('left')\r\n ax.spines['left'].set_position(('data',0))\r\n ax.grid(b = 'on')\r\n plt.axes().set_aspect('equal', 'datalim')\r\n plt.show()\r\n return\r\n\r\ndef LMMStabilityFunction (w, YCoefficients, FCoefficients):\r\n #w, a complex number\r\n #YCoefficients, the \"alpha\" coefficents, a_0, ... ,a_k\r\n #FCoefficients, the \"beta\" coefficients, b_0, ... b_k\r\n\r\n #setup numerator/denominator\r\n\r\n rho = 0.0\r\n sigma = 0.0\r\n\r\n #calculate numerator and denominator for boundary locus method\r\n\r\n for i in xrange(len(YCoefficients)):\r\n rho += (w ** i) * YCoefficients[i]\r\n sigma += (w ** i) * FCoefficients[i]\r\n\r\n return (rho / sigma)\r\n \r\ndef SchurCriterion (z, YCoefficients, FCoefficients):\r\n #z, a complex number\r\n #YCoefficients, the \"alpha\" coefficents, a_0, ... ,a_k\r\n #FCoefficients, the \"beta\" coefficients, b_0, ... b_k\r\n \r\n poly = YCoefficients - z * FCoefficients\r\n \r\n #reduce polynomial to the order 1 case\r\n while len(poly) > 2:\r\n #check coefficient condition\r\n if (np.abs(poly[-1]) > np.abs(poly[0])):\r\n #get conjugate reciprical polynomial\r\n polyConj = np.conjugate(poly)[:: -1]\r\n #setup blank polynomial with order one less\r\n polyTemp = 1j * np.zeros(len(poly) - 1)\r\n #evaluate the next polynomial in the sequence\r\n for i in xrange(len(polyTemp)):\r\n polyTemp[i] = polyConj[0] * poly[1 + i] - poly[0] * polyConj[1 + i]\r\n \r\n poly = polyTemp\r\n else:\r\n return False\r\n \r\n #check if roots of the order 1 polynomial are in the unit circle \r\n if np.abs(- poly[0] / poly[1]) < 1:\r\n return True\r\n else:\r\n return False\r\n\r\n\r\ndef LMMLSDPlotter (YCoefficients, FCoefficients, steps = 100):\r\n #YCoefficients, the \"alpha\" coefficents, a_0, ... ,a_k\r\n #FCoefficients, the \"beta\" coefficients, b_0, ... b_k\r\n\r\n #setup values on the unit circle centered at 0 + 0i\r\n A = np.linspace(- np.pi, np.pi, steps)\r\n A = np.exp(A * 1j)\r\n \r\n #setup vector to hold function values\r\n B = 1j * np.zeros(steps)\r\n \r\n #evaluate the boundary locus (on the unit circle)\r\n for i in xrange(steps):\r\n B[i] = LMMStabilityFunction(A[i], YCoefficients, FCoefficients)\r\n\r\n #plot the boundary locus\r\n plt.plot(B.real,B.imag, '-', color = 'C0')\r\n return\r\n\r\n\r\n\r\ndef LMMLSDPlotterSchur (YCoefficients, FCoefficients, RStart = -10, REnd = 10,\r\n ImStart = -10, ImEnd = 10, meshDivisions = 100):\r\n #YCoefficients, the \"alpha\" coefficents, a_0, ... ,a_k\r\n #FCoefficients, the \"beta\" coefficients, b_0, ... b_k\r\n #RStart, REnd the bounds on the real components of points plotted\r\n #ImStart, ImEnd the bounds on the real components of points plotted\r\n #meshDivisions, the number of subdivisions of the real/imaginary axes\r\n\r\n #setup grid points for function evaluations\r\n A = np.linspace(RStart, REnd, meshDivisions)\r\n B = 1j * np.linspace(ImStart, ImEnd, meshDivisions)\r\n p, q = np.meshgrid(A,B)\r\n C= p + q\r\n \r\n #evaluate Schur criterion on the previously setup grid\r\n for i in xrange(meshDivisions):\r\n for j in xrange(meshDivisions):\r\n C[i][j] = (int(SchurCriterion(C[i][j], YCoefficients,\r\n FCoefficients)))\r\n\r\n \r\n #plot region for where the polynomial passes the Schur criterion\r\n plt.contourf(p, q * 1j, C, [0.9, 1.1], alpha = 0.1, colors = 'C0')\r\n return\r\n \r\ndef LMMLSDPlotterComplete (YCoefficients, FCoefficients, RStart = -10, \r\n REnd = 10,ImStart = -10, ImEnd = 10, \r\n meshDivisions = 100, boundaryDivisions = 100,\r\n legend = True):\r\n #YCoefficients, the \"alpha\" coefficents, a_0, ... ,a_k\r\n #FCoefficients, the \"beta\" coefficients, b_0, ... b_k\r\n #RStart, REnd the bounds on the real components of points plotted\r\n #ImStart, ImEnd the bounds on the real components of points plotted\r\n #meshDivisions, the number of subdivisions of the real/imaginary axes\r\n #boundaryDivisions, the number of points where the boundary curve is \r\n #evaluated at\r\n #legend, set False if you don't want a key\r\n \r\n #Initialize a Figure\r\n fig = plt.figure()\r\n \r\n #Add Axes to Figure\r\n ax = fig.add_subplot(111)\r\n \r\n #get boundary\r\n LMMLSDPlotter(YCoefficients, FCoefficients, boundaryDivisions)\r\n \r\n #get filled region\r\n LMMLSDPlotterSchur(YCoefficients, FCoefficients, RStart, REnd,ImStart,\r\n ImEnd, meshDivisions)\r\n #setup legend\r\n LSD = mpatches.Rectangle((0, 0), 1, 1, fc=\"C0\",alpha=0.1)\r\n LSDB = mlines.Line2D([], [], color='C0')\r\n handles = [LSD, LSDB]\r\n labels = ['LSD', 'Boundary']\r\n if legend == True:\r\n ax.legend(handles, labels)\r\n \r\n #setup plot window\r\n ax = plt.gca()\r\n # making the top and right spine invisible:\r\n ax.spines['top'].set_color('none')\r\n ax.spines['right'].set_color('none')\r\n # moving bottom spine up to y=0 position:\r\n ax.xaxis.set_ticks_position('bottom')\r\n ax.spines['bottom'].set_position(('data',0))\r\n # moving left spine to the right to position x == 0:\r\n ax.yaxis.set_ticks_position('left')\r\n ax.spines['left'].set_position(('data',0))\r\n ax.grid(b = 'on')\r\n plt.axes().set_aspect('equal', 'datalim')\r\n plt.show()" ]
[ [ "matplotlib.pyplot.gca", "matplotlib.pyplot.contourf", "numpy.abs", "numpy.linspace", "matplotlib.patches.Rectangle", "matplotlib.lines.Line2D", "numpy.conjugate", "matplotlib.pyplot.plot", "matplotlib.pyplot.axes", "numpy.exp", "numpy.meshgrid", "numpy.zeros", "matplotlib.pyplot.show", "matplotlib.pyplot.figure" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
Tencent/TPAT
[ "6380a44ed1c2c35c97dc30768835197bfb79eeb1" ]
[ "python/plugin_template_params.py" ]
[ "##############################\n# author : qianqiu\n# email : [email protected]\n# time : 2022.1.7\n##############################\nimport os\nimport onnx\nfrom onnx import shape_inference\nimport onnx_graphsurgeon as gs\nimport onnxruntime as ort\nimport numpy as np\nfrom type_mapping import (\n tvm_to_c_type_mapping,\n python_to_trt_type_mapping,\n plugin_type_size,\n)\n\n\nclass PluginTemplateParams(object):\n \"\"\"\n Generate useable params for tensorRT plugin.\n \"\"\"\n\n def __init__(\n self,\n cuda_kernel,\n onnx_path,\n tuning_name,\n one_node_model=\"submodel.onnx\",\n ):\n self._kernel_generate = cuda_kernel\n self._onnx_path = onnx_path\n self._one_node_model = one_node_model\n self._tuning_name = tuning_name\n\n self._onnx_input_order = []\n self._input_dict = {}\n self._tvm_executor_order = {}\n self._allocate_size = []\n self._data_type = []\n self._cuda_kernel_order = {}\n self._gpu_thread_config = {}\n self._tvm_func_order = []\n self._nums_input = 0\n self._nums_output = 0\n self._workspace_size = 0\n self._output_type = []\n self._cuda_func_order = []\n self._tvm_constant = {}\n self._tvm_workspace_constant = {}\n self._onnx_output_shape = []\n self._onnx_tensor_type = []\n self._storage_id = []\n self._allocate_global_memory = {}\n self._plugin_config = None\n\n self.infer_for_output_shape()\n self.parse()\n self.align_onnx_and_tvm_input(self._one_node_model)\n self.match_address_for_eid()\n self.cuda_kernel_config()\n os.remove(self._one_node_model)\n\n # Parse Constant.\n def parse_constant_params(self, constant_params):\n tvm_constant = {}\n for key, value in constant_params.items():\n tvm_constant[key] = value.flatten()\n return tvm_constant\n\n # Parse device functions params order.\n def parse_device_funcs_params(self, device_funcs_inorder):\n cuda_kernel_order = {}\n for device_func_inorder in device_funcs_inorder:\n if len(device_func_inorder) == 0:\n continue\n tvm_device_func = device_func_inorder.split()\n cuda_kernel_order[tvm_device_func[0]] = tvm_device_func[1:]\n return cuda_kernel_order\n\n # Parse device functions thread config.\n def parse_device_funcs_thread_config(self, device_funcs_thread_config):\n gpu_thread_config = {}\n cuda_func_order = []\n for device_func_thread_config in device_funcs_thread_config:\n if len(device_func_thread_config) == 0:\n continue\n config = device_func_thread_config.split()\n cuda_func_name = config[0]\n gpu_thread_config[cuda_func_name] = config[1:]\n cuda_func_order.append(cuda_func_name)\n return gpu_thread_config, cuda_func_order\n\n # Parse global memory allocated in device side.\n def parse_device_allocate_global_memory(self, device_allocate_global_memory):\n allocate_global_memory = {}\n for allocate_memory in device_allocate_global_memory:\n if len(allocate_memory) == 0:\n continue\n allocate = allocate_memory.split()\n allocate_global_memory[allocate[0]] = allocate[1:]\n return allocate_global_memory\n\n # Parse variables storage index.\n def parse_storageid(self, storageid):\n storage_id = []\n storage_slot = {}\n for sid in storageid:\n if len(sid) == 0:\n continue\n storage_id = sid.split()\n storage_slot = {}.fromkeys(sid).keys()\n return storage_id, storage_slot\n\n # Parse numbers of input.\n def parse_nums_input(self, nums_input):\n real_nums_input = int(nums_input) - int(len(self._tvm_constant))\n return real_nums_input\n\n # Parse numbers of output.\n def parse_nums_output(self, nums_output):\n real_nums_output = int(nums_output)\n return real_nums_output\n\n # Parse datatype of variables in memory.\n def parse_workspace_dtype(self, workspaces_dtype):\n return workspaces_dtype.split()\n\n # Parse size of variables in memory.\n def parse_workspace_size(self, workspace_size):\n return workspace_size.split()\n\n def parse_func_inorder(self, funcs_inorder):\n \"\"\"\n Parse the order of host functions.\n \"\"\"\n func_call = {}\n tvm_executor_order = {}\n tvm_func_order = []\n for host_func_inorder in funcs_inorder:\n if len(host_func_inorder) == 0:\n continue\n tvm_host_func = host_func_inorder.split()\n if tvm_host_func[0] not in tvm_executor_order.keys():\n tvm_executor_order[tvm_host_func[0]] = tvm_host_func[1:]\n tvm_func_order.append(tvm_host_func[0])\n func_call[tvm_host_func[0]] = 0\n else:\n func_call[tvm_host_func[0]] += 1\n func_name = tvm_host_func[0] + \"_\" + str(func_call[tvm_host_func[0]])\n tvm_executor_order[func_name] = tvm_host_func[1:]\n tvm_func_order.append(func_name)\n return tvm_executor_order, tvm_func_order\n\n # Parse\n def parse(self):\n constant_params = self._kernel_generate.constant_param\n device_funcs_inorder = self._kernel_generate.device_funcs_inorder.split(\"\\n\")\n device_funcs_thread_config = (\n self._kernel_generate.device_funcs_thread_config.split(\"\\n\")\n )\n device_allocate_global_memory = (\n self._kernel_generate.device_allocate_global_memory.split(\"\\n\")\n )\n num_inputs = self._kernel_generate.num_inputs\n num_outputs = self._kernel_generate.num_outputs\n workspace_dtype = self._kernel_generate.workspace_dtype\n workspace_size = self._kernel_generate.workspace_size\n funcs_inorder = self._kernel_generate.func_inorder.split(\"\\n\")\n storage_id = self._kernel_generate.storageid.split(\"\\n\")\n\n self._tvm_constant = self.parse_constant_params(constant_params)\n self._cuda_kernel_order = self.parse_device_funcs_params(device_funcs_inorder)\n (\n self._gpu_thread_config,\n self._cuda_func_order,\n ) = self.parse_device_funcs_thread_config(device_funcs_thread_config)\n\n self._nums_input = self.parse_nums_input(num_inputs)\n self._nums_output = self.parse_nums_output(num_outputs)\n self._data_type = self.parse_workspace_dtype(workspace_dtype)\n\n self._allocate_size = self.parse_workspace_size(workspace_size)\n self._tvm_executor_order, self._tvm_func_order = self.parse_func_inorder(\n funcs_inorder\n )\n self._cuda_source_code = self._kernel_generate.cuda_source_code\n self._storage_id, self.storage_slot = self.parse_storageid(storage_id)\n self._allocate_global_memory = self.parse_device_allocate_global_memory(\n device_allocate_global_memory\n )\n\n def infer_for_output_shape(self):\n \"\"\"\n infer for output shape\n \"\"\"\n model = onnx.load(self._onnx_path)\n inferred_model = shape_inference.infer_shapes(model)\n graph = gs.import_onnx(inferred_model)\n nodes = graph.nodes\n tensors = graph.tensors()\n tuning_nodes = [node for node in nodes if node.name == self._tuning_name]\n assert len(tuning_nodes) != 0\n tuning_node = tuning_nodes[0]\n for inp in tuning_node.inputs:\n if inp.__class__ == gs.Constant:\n self._onnx_tensor_type.append(\n python_to_trt_type_mapping[inp.dtype.__name__]\n )\n else:\n self._onnx_tensor_type.append(\n python_to_trt_type_mapping[inp.dtype.name]\n )\n\n for oup in tuning_node.outputs:\n self._onnx_tensor_type.append(python_to_trt_type_mapping[oup.dtype.name])\n\n graph.outputs = [\n tensors[oup.name].to_variable(dtype=oup.dtype, shape=oup.shape)\n for oup in tuning_node.outputs\n ]\n graph.cleanup()\n submodel = gs.export_onnx(graph)\n dummy_model = \"dummy_model.onnx\"\n onnx.save(submodel, dummy_model)\n session = ort.InferenceSession(dummy_model)\n outname = [output.name for output in session.get_outputs()]\n dummy_input = {}\n for gi in graph.inputs:\n dummy_input[gi.name] = (np.random.random(gi.shape) + 1).astype(gi.dtype)\n dummy_output = session.run(outname, dummy_input)\n for i in range(len(dummy_output)):\n self._onnx_output_shape.append(dummy_output[i].shape)\n os.remove(dummy_model)\n\n def align_onnx_and_tvm_input(self, onnx_path):\n \"\"\"\n align onnx and tvm input. Because tvm let constants in the after of variables params.\n \"\"\"\n model = onnx.load(onnx_path)\n graph = model.graph\n nodes = graph.node\n onnx_inputs = graph.input\n init_order = {}\n for node in nodes:\n op_inputs = node.input\n for i in range(len(op_inputs)):\n init_order[op_inputs[i]] = i\n for i in onnx_inputs:\n self._onnx_input_order.append(init_order[i.name])\n\n def match_address_for_eid(self):\n \"\"\"\n The memory address used by functions params.\n \"\"\"\n workspace = 0\n input_slot_dict = {}\n for i in range(self._nums_output):\n eid = self._kernel_generate.graph_module.get_output_eid(i)\n idx = int(self._storage_id[eid])\n self._output_type.append(python_to_trt_type_mapping[self._data_type[eid]])\n self._input_dict[str(eid)] = \"outputs[\" + str(i) + \"]\"\n input_slot_dict[idx] = self._input_dict[str(eid)]\n duplicate_allocate = {}\n for i in range(len(self._allocate_size)):\n idx = int(self._storage_id[i])\n if idx not in duplicate_allocate.keys():\n duplicate_allocate[idx] = 0\n duplicate_allocate[idx] = max(\n int(self._allocate_size[i]), int(duplicate_allocate[idx])\n )\n\n for i in range(len(self._allocate_size)):\n idx = int(self._storage_id[i])\n if idx in input_slot_dict.keys():\n self._input_dict[str(i)] = input_slot_dict[idx]\n continue\n if i < self._nums_input:\n self._input_dict[str(i)] = (\n \"inputs[\" + str(self._onnx_input_order[i]) + \"]\"\n )\n elif i < len(self._allocate_size) - self._nums_output:\n if i == self._nums_input:\n self._input_dict[str(i)] = \"workspace\"\n else:\n self._input_dict[str(i)] = \"(workspace + \" + str(workspace) + \")\"\n workspace += int(duplicate_allocate[idx])\n self._workspace_size = workspace\n if (\n self._input_dict[str(i)] not in self._tvm_workspace_constant.keys()\n and str(idx) in self._tvm_constant.keys()\n ):\n # self._tvm_workspace_constant[self._input_dict[str(i)]] = None\n self._tvm_workspace_constant[self._input_dict[str(i)]] = (\n self._tvm_constant[str(idx)],\n tvm_to_c_type_mapping[self._data_type[i]],\n int(i),\n )\n input_slot_dict[idx] = self._input_dict[str(i)]\n if len(self._allocate_global_memory) != 0:\n for key, value in self._allocate_global_memory.items():\n self._input_dict[key] = (\n \"(\"\n + tvm_to_c_type_mapping[value[0]]\n + \"*)(workspace + \"\n + str(workspace)\n + \")\"\n )\n workspace += int(value[1]) * plugin_type_size[value[0]]\n self._workspace_size = workspace\n\n def cuda_kernel_config(self):\n \"\"\"\n Grid. Block. Thread. size.\n \"\"\"\n output = \"\"\n output_json = {}\n cuda_func_call = {}\n for i in range(len(self._cuda_func_order)):\n cuda_func_name = self._cuda_func_order[i]\n import re\n\n func_name = re.sub(\"_kernel\\d+\", \"\", cuda_func_name, count=1)\n if cuda_func_name not in output_json.keys():\n output_json[cuda_func_name] = {}\n cuda_func_call[cuda_func_name] = 0\n multi_cuda_func_name = cuda_func_name\n else:\n cuda_func_call[cuda_func_name] += 1\n func_name = func_name + \"_\" + str(cuda_func_call[cuda_func_name])\n multi_cuda_func_name = (\n cuda_func_name + \"_\" + str(cuda_func_call[cuda_func_name])\n )\n output_json[multi_cuda_func_name] = {}\n\n output_json[multi_cuda_func_name][\"grid_dim\"] = self._gpu_thread_config[\n cuda_func_name\n ][0].strip(\"grid=\")\n output_json[multi_cuda_func_name][\"block_dim\"] = self._gpu_thread_config[\n cuda_func_name\n ][1].strip(\"block=\")\n output += (\n cuda_func_name\n + \"\\n\"\n + str(self._gpu_thread_config[cuda_func_name])\n + \"\\n\"\n )\n\n kernel_param_order = self._cuda_kernel_order[cuda_func_name]\n\n tvm_param_order = self._tvm_executor_order[func_name]\n enqueue_params = \"\"\n for j in range(len(kernel_param_order)):\n # output += self._input_dict[int(tvm_param_order[int(kernel_param_order[j])])]\n if kernel_param_order[j].isdigit():\n # enqueue_params += self._input_dict[str(tvm_param_order[int(kernel_param_order[j])])]\n output += self._input_dict[\n str(tvm_param_order[int(kernel_param_order[j])])\n ]\n eid = tvm_param_order[int(kernel_param_order[j])]\n enqueue_params += (\n \"(\"\n + tvm_to_c_type_mapping[self._data_type[int(eid)]]\n + \"*)\"\n + self._input_dict[str(eid)]\n )\n else:\n if kernel_param_order[j] in self._input_dict.keys():\n enqueue_params += self._input_dict[kernel_param_order[j]]\n if j == len(kernel_param_order) - 1:\n output += \"\\n\"\n else:\n output += \", \"\n enqueue_params += \", \"\n output_json[multi_cuda_func_name][\"enqueue_params\"] = enqueue_params\n self._plugin_config = output_json\n\n @property\n def kernel_order(self):\n return self._cuda_func_order\n\n @property\n def plugin_config(self):\n return self._plugin_config\n\n @property\n def workspace_size(self):\n return self._workspace_size\n\n @property\n def output_num(self):\n return self._nums_output\n\n @property\n def output_type(self):\n return self._output_type\n\n @property\n def output_shape(self):\n return self._onnx_output_shape\n\n @property\n def tensor_type(self):\n return self._onnx_tensor_type\n\n @property\n def workspace_init(self):\n return self._tvm_workspace_constant\n\n @property\n def cuda_source_code(self):\n return self._cuda_source_code\n\n @property\n def plugin_name(self):\n return self._kernel_generate.plugin_name\n\n @property\n def onnx_op_type(self):\n return self._kernel_generate.onnx_op_type\n\n @property\n def storage_id(self):\n return self._storage_id\n" ]
[ [ "numpy.random.random" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
nahanoo/StrucFollower
[ "b61d87f3e65720b3c721b695b3a7484535ec8dfa" ]
[ "deletion_detection/deletion_detection.py" ]
[ "from os.path import join, exists\nfrom os import mkdir, remove\nfrom io import StringIO\nfrom subprocess import call, run as r, DEVNULL, STDOUT\nfrom Bio import SeqIO\nfrom Bio.SeqRecord import SeqRecord\nimport pandas as pd\nfrom .plotting import plot_alignment, plot_genbank\nimport pysam\n\n\nclass Deletion():\n def __init__(self, args):\n # Input file paths\n self.out_dir = args.out_dir\n self.reference_gbk = args.ancestral\n self.mutant_fasta = args.mutant\n # Bam file path\n self.bam = join(self.out_dir, \"aligned.sorted.bam\")\n\n # Chunk parameters\n self.step = 100\n self.window = 500\n\n # List contigs\n self.mutant_contigs = [contig for contig in SeqIO.parse(\n self.mutant_fasta, 'fasta')]\n self.reference_contigs = [contig for contig in SeqIO.parse(\n self.reference_gbk, 'genbank')]\n self.reference_features = self.parse_genbank()\n # Dumping input reference as fasta for mapping\n self.reference_fasta = join(self.out_dir, 'reference.fasta')\n with open(join(self.out_dir, 'reference.fasta'), 'w') as handle:\n SeqIO.write(self.reference_contigs, handle, 'fasta')\n\n # Output dataframes\n self.no_coverage = pd.DataFrame(\n columns=['chromosome', 'position', 'length'])\n self.deletions = pd.DataFrame(\n columns=['chromosome', 'position',\n 'length', 'chromosome_origin', 'position_origin'])\n self.deletions_annotated = pd.DataFrame(\n columns=['chromosome', 'position',\n 'length', 'chromosome_origin', 'position_origin', 'products'])\n self.plasmids = pd.DataFrame(\n columns=['chromosome', 'position', 'length'])\n self.plasmids_annotated = pd.DataFrame(\n columns=['chromosome', 'position', 'length', 'products'])\n\n # List storing files paths to trash\n self.trash = []\n self.trash.append(self.reference_fasta)\n\n # Create plot directory\n if not exists(join(self.out_dir, 'plots')):\n mkdir(join(self.out_dir, 'plots'))\n\n def parse_genbank(self):\n \"\"\"Parses all features of a genbanka and stores locations\n and products in dictionary\"\"\"\n genbank = {contig.id: {} for contig in self.reference_contigs}\n for contig in self.reference_contigs:\n for feature in contig.features:\n # Some features don't have all desired keys\n try:\n start = feature.location.start\n end = feature.location.end\n product = feature.qualifiers['product']\n genbank[contig.id][(start, end)] = product[0]\n except KeyError:\n pass\n return genbank\n\n def chunker(self, seq, window_size, step):\n \"\"\"Creates chunks of a sequence. window_size defines\n chunk size and step the amount of basepairs the window\n is moved forward.\"\"\"\n # List which stores all chunks\n seqs = []\n seqlen = len(seq)\n self.step = step\n for counter, q in enumerate(range(0, seqlen, step)):\n # Returns ether entire sequence or window depending on sequence length\n j = seqlen if q + window_size > seqlen else q + window_size\n chunk_id = seq.id\n chunk_seq = seq.seq[q:j]\n # Add chunk id to sequence id\n chunk = SeqRecord(seq=chunk_seq,id=chunk_id + \".\" + str(counter))\n seqs.append(chunk)\n if j == seqlen:\n break\n return seqs\n\n def chunk_assembly(self):\n \"\"\"Chunks an assembly of multiple contigs into different \n chunks using a sliding window algorightm (see chunker function).\"\"\"\n assembly_chunks = []\n for contig in self.mutant_contigs:\n # Creates chunks of every contig\n assembly_chunks += self.chunker(contig, self.window, self.step)\n self.chunks = join(self.out_dir,\n \"chunked_sequences.fasta\")\n # Dumps chunks to fasta\n with open(self.chunks, \"w\") as handle:\n SeqIO.write(assembly_chunks, handle, \"fasta\")\n # Delete chunked sequence\n self.trash.append(self.chunks)\n\n def mapper(self, reference, reads, out):\n \"\"\"Maps long accurate sequences to references with minimap2.\"\"\"\n cmd = [\n \"minimap2\",\n \"-ax\",\n \"asm5\",\n reference,\n reads,\n \">\",\n out,\n ]\n bam = out.replace('.sam', '.sorted.bam')\n # Calling minimap and surpressing stdout\n call(\" \".join(cmd), shell=True, stdout=DEVNULL,\n stderr=STDOUT)\n cmd = ['samtools', 'sort', '-o', bam, out]\n # Calling samtools and surpressing stdout\n call(\" \".join(cmd), shell=True, stdout=DEVNULL,\n stderr=STDOUT)\n cmd = ['samtools', 'index', bam]\n # Calling samtools and surpressing stdout\n call(\" \".join(cmd), shell=True, stdout=DEVNULL,\n stderr=STDOUT)\n # Files to trash\n if out not in self.trash:\n self.trash.append(out)\n self.trash.append(bam)\n self.trash.append(bam+'.bai')\n\n def map_chunks(self):\n \"\"\"Maps chunked sequences to reference\"\"\"\n self.mapper(self.reference_fasta, join(self.out_dir, \"chunked_sequences.fasta\"),\n join(self.out_dir, \"aligned.sam\"))\n\n def get_deletions(self):\n \"\"\"Gets all regions with 0 coverage and seperates between\n in strand deletion or deleted plasmids.\"\"\"\n cmd = ['samtools', 'depth', '-aa', '-Q', '0', self.bam]\n process = r(cmd, capture_output=True)\n df = pd.read_csv(StringIO(process.stdout.decode()), sep='\\t')\n df.columns = ['chromosome', 'position', 'coverage']\n # Masks for regions with no coverage\n df = df[df['coverage'] == 0]\n # Concats positions into start position + length\n self.concat_deletions(df)\n # Switching to 0 based index\n self.no_coverage['position'] = self.no_coverage['position'] - 1\n a = pysam.AlignmentFile(self.bam)\n for i, row in self.no_coverage.iterrows():\n c, p, l = row\n if l == a.get_reference_length(c):\n # Entire plasmid is deleted\n self.plasmids.loc[len(self.plasmids)] = [c, p, l]\n else:\n self.deletions.loc[len(self.deletions)] = [None, None, l, c, p]\n\n def concat_deletions(self, df):\n \"\"\"Concats following positions into deletions\n with length equals n following positions.\"\"\"\n i = -1\n prev_pos = 0\n prev_contig = None\n for contig, pos in zip(df['chromosome'], df['position']):\n if (prev_contig == contig) & (pos - 1 == prev_pos):\n self.no_coverage.at[i, 'length'] += 1\n else:\n i += 1\n self.no_coverage.loc[i] = [contig, pos, 1]\n prev_pos = pos\n prev_contig = contig\n\n def get_location(self):\n \"\"\"Extracts sequence from reference around detected deletion.\n Sequence is aligned to mutant to find position in the mutant.\"\"\"\n # Switching to dict style\n reference_contigs = {\n contig.id: contig for contig in self.reference_contigs}\n i = 0\n deletions = pd.DataFrame(columns=self.deletions.columns)\n for c, p, l in zip(self.deletions['chromosome_origin'],\n self.deletions['position_origin'], self.deletions['length']):\n a = pysam.AlignmentFile(self.bam)\n # Getting correct paddins considering pos possiblye\n # being at the start or the end of a contig\n padding = 2000\n if p - padding < 0:\n start = 0\n else:\n start = p - padding\n if p + padding > a.get_reference_length(c):\n end = a.get_reference_length(c)\n else:\n end = p + padding\n\n # Getting sequence around deletion in reference\n seq = reference_contigs[c][start:p]\n seq += reference_contigs[c][p+l:end]\n seq_id = c + '.' + str(p)\n seq.id = seq_id\n\n # Location of deletion in sequence\n rel_pos = p - start\n\n # Mapping files\n tmp_seq = join(self.out_dir, 'tmp_seq.fasta')\n tmp_sam = join(self.out_dir, 'tmp_seq.sam')\n tmp_bam = tmp_sam.replace('.sam', '.sorted.bam')\n with open(tmp_seq, 'w') as handle:\n SeqIO.write(seq, handle, 'fasta')\n\n # Mapping to mutant\n self.mapper(self.mutant_fasta, tmp_seq, tmp_sam)\n\n a = pysam.AlignmentFile(tmp_bam)\n for read in a:\n if not (read.is_unmapped):\n # Idetifying positions\n deletions.loc[i] = [\n read.reference_name, read.reference_start + rel_pos, l, c, p]\n i += 1\n self.deletions = deletions\n # Deleting tmp_seq\n self.trash.append(tmp_seq)\n\n def annotate(self):\n \"\"\"Annotates the dataframes of deleted plasmids and\n in strand deletions.\"\"\"\n i = 0\n for c, p, l in zip(self.plasmids['chromosome'], self.plasmids['position'], self.plasmids['length']):\n products = self.annotate_position(c, p, l)\n for product in products:\n self.plasmids_annotated.loc[i] = [c, p, l, product]\n i += 1\n\n i = 0\n for counter, row in self.deletions.iterrows():\n c, p, l, c_o, p_o = row\n products = self.annotate_position(c_o, p_o, l)\n for product in products:\n self.deletions_annotated.loc[i] = [c, p, l, c_o, p_o, product]\n i += 1\n\n def annotate_position(self, c, p, l):\n \"\"\"Returns products in a region in a genbank.\"\"\"\n products = []\n for (start, end), product in self.reference_features[c].items():\n if not set(range(start, end)).isdisjoint(range(p, p+l)):\n products.append(product)\n return products\n\n def dump(self):\n \"\"\"Dumping all dataframes.\"\"\"\n self.no_coverage.to_csv(\n join(self.out_dir, 'no_coverage.tsv'), sep='\\t', index=False)\n\n self.deletions.to_csv(\n join(self.out_dir, 'deletions.tsv'), sep='\\t', index=False)\n self.deletions_annotated.to_csv(\n join(self.out_dir, 'deletions.annotated.tsv'), sep='\\t', index=False)\n\n self.plasmids.to_csv(\n join(self.out_dir, 'plasmids.tsv'), sep='\\t', index=False)\n self.plasmids_annotated.to_csv(\n join(self.out_dir, 'plasmids.annotated.tsv'), sep='\\t', index=False)\n\n def plot_deletions(self):\n \"\"\"Plots alignments.\"\"\"\n out = join(self.out_dir, 'plots', 'alignments')\n if not exists(out):\n mkdir(out)\n\n for chromosome, position, length in zip(self.deletions['chromosome_origin'], \n self.deletions['position_origin'], self.deletions['length']):\n plot_alignment(self.bam, chromosome, position, length, self.window, out)\n\n def plot_annotation(self):\n \"\"\"Plots annotation\"\"\"\n out = join(self.out_dir, 'plots', 'annotations')\n if not exists(out):\n mkdir(out)\n for i, row in self.deletions.iterrows():\n plot_genbank(\n self.reference_contigs, row['chromosome_origin'], row['position_origin'],\n row['position_origin']+row['length'], out)\n\n def clean(self):\n \"\"\"Deletes temporary files.\"\"\"\n for item in self.trash:\n remove(item)\n" ]
[ [ "pandas.DataFrame" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [ "0.23", "0.21", "2.0", "1.4", "0.19", "1.1", "1.5", "1.2", "0.24", "0.20", "1.0", "0.25", "1.3" ], "scipy": [], "tensorflow": [] } ]
joramwessels/torcs-client
[ "257441138ebcce3928f100f28e58d9d5a7221d8a" ]
[ "ffnn_gears.py" ]
[ "import torch\nfrom torch.autograd import Variable\nimport torch.nn as nn\nimport torch.optim as optim\nimport torch.nn.functional as F\nimport time\nimport sys\nimport numpy as np\nimport argparse\nimport driver_support\nfrom os import listdir\nfrom os.path import isfile, join\nfrom collections import defaultdict\n\n\ngear_number = 9 # 0 = reverse, 1 = neutral, 2=1st gear, etc. to 7th gear\n\nclass Gear_switcher(nn.Module):\n\n\tdef __init__(self, hidden_dimension):\n\t\tsuper(Gear_switcher, self).__init__()\n\t\tn_states = 1 + gear_number\n\t\tn_actions = gear_number\n\t\tself.layer_1 = nn.Linear(n_states, hidden_dimension)\n\t\tself.layer_2 = nn.Linear(hidden_dimension, n_actions)\n\n\tdef forward(self, inputs):\n\t\tout = self.layer_1(inputs)\n\t\tout = nn.functional.relu(out)\n\t\tout = self.layer_2(out)\n\t\treturn out\n\ndef gear_to_tensor(gear_value):\n\tgear_value += 1\n\treturn torch.LongTensor([gear_value])\n\ndef to_tensor(carstate):\n\t#gear, rpm\n\treturn torch.FloatTensor(driver_support.binerize_input(value=carstate.gear, mapping=get_gear_map()) + [carstate.rpm])\n\ndef prediction_to_action(prediction):\n\t# the index is the gear\n\tindex = prediction.data.numpy().argmax()\n\tindex -= 1\n\treturn index\n\ndef get_gear_map():\n\tgear_to_index_map = dict()\n\tfor x in range(-1, gear_number - 1):\n\t\tgear_to_index_map[str(x)] = x + 1\n\treturn gear_to_index_map\n\ndef evaluate(model, data):\n\t\"\"\"Evaluate a model on a data set.\"\"\"\n\tcorrect = 0.0\n\n\tfor y_true, state in data:\n\t\ty_true = int(y_true[0])\n\t\tlookup_tensor = Variable(torch.FloatTensor(state))\n\t\tscores = model(lookup_tensor)\n\t\taction = prediction_to_action(scores)\n\n\t\tif action == y_true:\n\t\t\tcorrect += 1\n\n\tprint(\"percent correct={}\".format(correct/len(data)))\n\ndef split_data_set(data_set, eval_perc=0.2):\n\ttotal = len(data_set)\n\tsplit = int(total*eval_perc)\n\ttrain = data_set[:split]\n\tevaluate = data_set[split:]\n\treturn train, evaluate\n\ndef create_model(out_file, training_folder, learning_rate, epochs, hidden_dimension):\n\t# Read in the data\n\ttraining = []\n\tfor file_in in [join(training_folder, f) for f in listdir(training_folder) if isfile(join(training_folder, f))]:\n\t\ttraining += list(driver_support.read_lliaw_dataset_gear_gear_rpm_spe(file_in))\n\n\n\tmodel = Gear_switcher(hidden_dimension)\n\ttraining = driver_support.binerize_data_input(data=training, index=0, mapping=get_gear_map())\n\ttraining, evalu = split_data_set(training)\n\tprint(model)\n\tevaluate(model, evalu)\n\toptimizer = optim.Adam(model.parameters(), lr=learning_rate)\n\n\tloss = nn.CrossEntropyLoss()\n\n\tfor ITER in range(epochs):\n\n\t\ttrain_loss = 0.0\n\t\tstart = time.time()\n\t\tlowest_gear = 10\n\t\thighest_gear = 0\n\t\tlast_state = None\n\n\t\tfor y_true, state in training:\n\t\t\tif last_state == None:\n\t\t\t\tlast_state = state\n\t\t\t\tcontinue\n\n\t\t\tcorrect_gear = int(y_true[0])\n\n\t\t\toptimizer.zero_grad()\n\n\t\t\tin_state = Variable(torch.FloatTensor(last_state))\n\t\t\ty_pred = model(in_state).view(1, gear_number)\n\t\t\ty_true = Variable(gear_to_tensor(correct_gear))\n\n\t\t\t#print(y_true, prediction_to_action(y_pred))\n\n\t\t\toutput = loss(y_pred, y_true)\n\t\t\ttrain_loss += output.data[0]\n\n\t\t\t# backward pass\n\t\t\toutput.backward()\n\n\t\t\t# update weights\n\t\t\toptimizer.step()\n\t\t\tlast_state = state\n\n\t\tprint(\"last prediction made:pred={}, actual={}\".format(prediction_to_action(y_pred), y_true))\n\t\tprint(\"iter %r: train loss/action=%.4f, time=%.2fs\" %(ITER, train_loss/len(training), time.time()-start))\n\tevaluate(model, evalu)\n\ttorch.save(model.state_dict(), out_file)\n\ndef main():\n\tparser = argparse.ArgumentParser()\n\tparser.add_argument('--epochs', type=int)\n\tparser.add_argument('--hidden', type=int)\n\tparser.add_argument('--learn', type=float)\n\tparser.add_argument('--in_file', type=str)\n\tparser.add_argument('--out_file', type=str)\n\targs = parser.parse_args()\n\tcreate_model(args.out_file, args.in_file, args.learn, args.epochs, args.hidden)\n\nif __name__ == \"__main__\":\n main()\n" ]
[ [ "torch.nn.CrossEntropyLoss", "torch.LongTensor", "torch.nn.Linear", "torch.nn.functional.relu", "torch.FloatTensor" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
hsivan/automon
[ "222b17651533bdb2abce7de36a80156ab7b9cc21", "222b17651533bdb2abce7de36a80156ab7b9cc21", "222b17651533bdb2abce7de36a80156ab7b9cc21" ]
[ "experiments/test_ablation_study_quadratic_inverse.py", "automon/gm/gm_variance_node.py", "automon/common_coordinator.py" ]
[ "from automon import AutomonNode, AutomonCoordinator, RlvNode, RlvCoordinator, SlackType, SyncType\nfrom test_utils.functions_to_monitor import func_quadratic_inverse\nfrom test_utils.data_generator import DataGeneratorQuadraticInverse\nfrom experiments.visualization.plot_monitoring_stats_ablation_study import plot_monitoring_stats_graph_and_barchart\nfrom test_utils.test_utils import start_test, end_test, run_test, get_config, write_config_to_file\nfrom test_utils.stats_analysis_utils import plot_monitoring_stats\nimport logging\nfrom test_utils.object_factory import get_objects\nimport numpy as np\nfrom experiments.visualization.plot_quadratic_inverse_surface import draw_f_contour_and_node_trail, draw_f\nlogging = logging.getLogger('automon')\n\nif __name__ == \"__main__\":\n try:\n test_folder = start_test(\"ablation_study_quadratic_inverse\")\n np.random.seed(0)\n\n conf = get_config(num_nodes=4, num_iterations=1020, sliding_window_size=20, d=2, error_bound=0.02,\n slack_type=SlackType.Drift.value, sync_type=SyncType.LazyLRU.value, neighborhood_size=3)\n write_config_to_file(test_folder, conf)\n\n data_generator = DataGeneratorQuadraticInverse(num_iterations=conf[\"num_iterations\"], num_nodes=conf[\"num_nodes\"],\n d=conf[\"d\"], test_folder=test_folder, sliding_window_size=conf[\"sliding_window_size\"])\n\n logging.info(\"\\n###################### Start quadratic inverse RLV test (no ADCD no slack) ######################\")\n data_generator.reset()\n coordinator, nodes = get_objects(RlvNode, RlvCoordinator, conf, func_quadratic_inverse)\n coordinator.coordinator_name = \"no ADCD no slack\"\n run_test(data_generator, coordinator, nodes, test_folder)\n\n logging.info(\"\\n###################### Start quadratic inverse RLV test (no ADCD) ######################\")\n data_generator.reset()\n coordinator, nodes = get_objects(RlvNode, RlvCoordinator, conf, func_quadratic_inverse)\n coordinator.coordinator_name = \"no ADCD\"\n coordinator.slack_type = SlackType.Drift\n coordinator.sync_type = SyncType.LazyLRU\n run_test(data_generator, coordinator, nodes, test_folder)\n\n logging.info(\"\\n###################### Start quadratic inverse AutoMon test ######################\")\n data_generator.reset()\n coordinator, nodes = get_objects(AutomonNode, AutomonCoordinator, conf, func_quadratic_inverse)\n run_test(data_generator, coordinator, nodes, test_folder)\n\n plot_monitoring_stats(test_folder)\n draw_f_contour_and_node_trail(data_generator.data, test_folder)\n draw_f(test_folder)\n plot_monitoring_stats_graph_and_barchart(test_folder, \"quadratic_inverse\", test_folder + \"/\")\n\n finally:\n end_test()\n", "import numpy as np\nfrom automon.gm.gm_common_node import GmCommonNode\nimport scipy as sp\nfrom scipy.optimize import NonlinearConstraint\n\n# Implementation according to https://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=6877240\n\n\ndef func_q_x0_distance(q, x0):\n return np.linalg.norm(q - x0, 2) ** 2\n\n\ndef func_q_on_parabola(q):\n return q[1] - q[0]**2\n\n\nclass GmVarianceNode(GmCommonNode):\n \n def __init__(self, idx, func_to_monitor=None, d=2, domain=None):\n # func_to_monitor must be func_variance; however we keep function implementations outside of automon core.\n assert (d == 2) # The local vector is the first and second momentum\n GmCommonNode.__init__(self, idx, d=d, domain=domain, func_to_monitor=func_to_monitor)\n\n def _calc_parabola(self, thresh, x):\n # Calculates y = x**2 + thresh\n y = x**2 + thresh\n return y\n \n def _calc_q_x_for_negative_discriminant(self, R, Q):\n theta = np.arccos(R / np.sqrt(-Q**3))\n # There are 3 real roots\n roots = 2 * np.sqrt(-Q) * np.cos(theta / 3 + np.array([0, 2, 4]) * np.pi / 3)\n # Choose the root that is closest to self.x0[0], and set q_x to it\n closest_root_index = np.argmin(np.abs(self.x0[0] - roots))\n q_x = roots[closest_root_index]\n self.roots = roots\n return q_x\n \n def _calc_q_x_for_positive_discriminant(self, R, Q):\n discriminant_sqrt = np.sqrt(R**2 + Q**3)\n q_x = np.cbrt(R + discriminant_sqrt) + np.cbrt(R - discriminant_sqrt)\n self.roots = np.array([q_x])\n return q_x \n \n def _calc_q_numerically(self):\n constraint = NonlinearConstraint(func_q_on_parabola, self.u_thresh, self.u_thresh)\n q = sp.optimize.minimize(func_q_x0_distance, self.x0, args=self.x0, constraints=(constraint))\n return q\n \n def _calc_q(self):\n # Taken from: https://proofwiki.org/wiki/Cardano%27s_Formula\n Q = ((2 * self.u_thresh + 1 - 2 * self.x0[1]) / 2) / 3\n R = -(-0.5 * self.x0[0]) / 2\n discriminant = R**2 + Q**3\n\n if discriminant < 0:\n q_x = self._calc_q_x_for_negative_discriminant(R, Q)\n # Make sure that when the discriminant is negative then self.x0[1]\n # is above the lowest point of the parabola.\n parabola_min_y = self.u_thresh\n assert(self.x0[1] > parabola_min_y)\n else:\n q_x = self._calc_q_x_for_positive_discriminant(R, Q)\n # Note that the discriminant can be positive and still self.x0[1]\n # is above the lowest point of the parabola. However, it is always\n # negative when self.x0[1] is below the lowest point of the parabola.\n \n q_y = self._calc_parabola(self.u_thresh, q_x)\n q = np.array([q_x, q_y])\n \n q_numeric = self._calc_q_numerically()\n assert(np.all(q - q_numeric.x < 1e-4))\n \n return q\n \n def _below_safe_zone_upper_bound(self, x):\n x0_minus_q = self.x0 - self.q\n x_minus_q = x - self.q\n b_inside_safe_zone = True if x0_minus_q @ x_minus_q >= 0 else False\n return b_inside_safe_zone\n \n def _above_safe_zone_lower_bound(self, x):\n f = self.func_to_monitor(x)\n b_inside_safe_zone = True if f >= self.l_thresh else False\n return b_inside_safe_zone\n", "import enum\nimport numpy as np\nimport logging\nfrom timeit import default_timer as timer\nimport threading\nfrom automon.common_messages import MessageType, ViolationOrigin, parse_message_violation, parse_message_local_vector_info, \\\n prepare_message_sync, prepare_message_lazy_sync, prepare_message_get_local_vector, message_to_message_list\n\n\nlogging = logging.getLogger(__name__)\n\n\nclass SlackType(enum.Enum):\n # When no slack is used each node monitors its local value x.\n NoSlack = 0\n # If using drift slack then each local node checks that x0+drift is in\n # the safe zone.\n # Drift is: x-x0_local, and x is the local vector x.\n # The node is oblivious to the slack used and just checks that x-slack is in the safe zone.\n # Therefore, the slack given to the node is x0_local-x0.\n Drift = 1\n\n\nclass SyncType(enum.Enum):\n # Sync all nodes.\n Eager = 0\n # Add random nodes to the set S of synced nodes, until all nodes are in the safe zone.\n LazyRandom = 1\n # Add nodes to the set S according the LRU order.\n LazyLRU = 2\n \n def is_lazy(self):\n return self != SyncType.Eager\n\n\n# This class is used to separate the statistics that are collected during experiments from the core code of the coordinator.\nclass Statistics:\n\n def __init__(self):\n # Statistics that can be collected only when b_simulation is True\n self.real_function_value = []\n self.function_approximation_error = []\n self.cumulative_msg_for_broadcast_disabled = []\n self.cumulative_msg_for_broadcast_enabled = []\n self.cumulative_fallback_to_eager_sync = [0]\n\n # General statistics\n self.full_sync_history_times = []\n self.collect_local_vectors_latencies = []\n\n # Message statistics\n self.total_violations_msg_counter = 0\n self.sync_broadcast_msg_counter = 0\n self.sync_msg_counter = 0\n self.get_node_local_vector_msg_counter = 0\n self.get_node_local_vector_broadcast_msg_counter = 0\n self.node_return_local_vector_msg_counter = 0\n self.bytes_sent = 0\n self.bytes_received = 0\n\n # Violation statistics that can be collected only when b_simulation is True\n self.true_violations_msg_counter = 0\n self.false_global_violation_msg_counter = 0\n self.false_local_violation_msg_counter = 0\n self.missed_violations_counter = 0\n self.rounds_with_violation_counter = 0\n\n # Violation statistics\n self.violation_origin_outside_safe_zone = 0\n self.violation_origin_outside_domain = 0\n self.violation_origin_faulty_safe_zone = 0\n\n # For regression test\n self.full_sync_history = []\n\n def update_sync_statistics(self, f_at_global_x, f_at_x0, b_violation, b_eager_sync):\n # Keep the real function value and the error for statistics\n self.real_function_value.append(f_at_global_x)\n # The difference between f(x0) (f at the reference point from the last sync) and the real f(global_x) at the moment\n self.function_approximation_error.append(np.abs(f_at_global_x - f_at_x0))\n self.cumulative_msg_for_broadcast_enabled.append(self._total_msgs_for_enabled_broadcast())\n self.cumulative_msg_for_broadcast_disabled.append(self._total_msgs_for_disabled_broadcast())\n\n self.rounds_with_violation_counter += int(b_violation)\n self.cumulative_fallback_to_eager_sync.append(self.cumulative_fallback_to_eager_sync[-1] + int(b_eager_sync))\n\n def update_sync_messages_statistics(self, num_synced_nodes):\n # If broadcast is supported, then count single msg for the entire node group\n self.sync_broadcast_msg_counter += 1\n # Otherwise, count single messages\n self.sync_msg_counter += num_synced_nodes\n\n def update_get_node_local_vector_messages_statistics(self, num_asked_nodes):\n # If broadcast is supported, then count single msg for the entire node group\n self.get_node_local_vector_broadcast_msg_counter += 1\n # Otherwise, count single messages\n self.get_node_local_vector_msg_counter += num_asked_nodes\n\n def update_node_local_vector_info_messages_statistics(self, num_responding_nodes):\n # Update the counter that counts the responses\n self.node_return_local_vector_msg_counter += num_responding_nodes\n\n def update_violation_messages_statistics(self, violation_origin):\n self.total_violations_msg_counter += 1\n self.violation_origin_outside_safe_zone += int(violation_origin == ViolationOrigin.SafeZone)\n self.violation_origin_outside_domain += int(violation_origin == ViolationOrigin.Domain)\n self.violation_origin_faulty_safe_zone += int(violation_origin == ViolationOrigin.FaultySafeZone)\n\n def _total_msgs_for_enabled_broadcast(self):\n total_msg = self.total_violations_msg_counter + self.sync_broadcast_msg_counter + self.get_node_local_vector_broadcast_msg_counter + self.node_return_local_vector_msg_counter\n return total_msg\n\n def _total_msgs_for_disabled_broadcast(self):\n total_msg = self.total_violations_msg_counter + self.sync_msg_counter + self.get_node_local_vector_msg_counter + self.node_return_local_vector_msg_counter\n return total_msg\n\n def dump_stats(self, test_folder, coordinator_name):\n logging.info(\"Coordinator \" + coordinator_name + \" statistics:\")\n logging.info(\"True violations msg counter \" + str(self.true_violations_msg_counter))\n logging.info(\"False Global violations msg counter \" + str(self.false_local_violation_msg_counter))\n logging.info(\"False Local violations msg counter \" + str(self.false_local_violation_msg_counter))\n logging.info(\"Sync broadcast msg counter \" + str(self.sync_broadcast_msg_counter))\n logging.info(\"Sync msg counter \" + str(self.sync_msg_counter))\n logging.info(\"Get node statistics broadcast msg counter \" + str(self.get_node_local_vector_broadcast_msg_counter))\n logging.info(\"Get node statistics msg counter \" + str(self.get_node_local_vector_msg_counter))\n logging.info(\"Missed violations counter \" + str(self.missed_violations_counter))\n logging.info(\"Rounds with violations counter \" + str(self.rounds_with_violation_counter))\n logging.info(\"Total violations msg counter \" + str(self.total_violations_msg_counter))\n logging.info(\"Node return statistics msg counter \" + str(self.node_return_local_vector_msg_counter))\n logging.info(\"Total msgs broadcast enabled \" + str(self._total_msgs_for_enabled_broadcast()) + \", and disabled \" + str(self._total_msgs_for_disabled_broadcast()))\n logging.info(\"Num violations caused by local vector outside safe zone \" + str(self.violation_origin_outside_safe_zone))\n logging.info(\"Num violations caused by local vector outside domain \" + str(self.violation_origin_outside_domain))\n logging.info(\"Num violations caused by faulty safe zone \" + str(self.violation_origin_faulty_safe_zone))\n logging.info(\"Bytes sent \" + str(self.bytes_sent))\n logging.info(\"Bytes received \" + str(self.bytes_received))\n\n logging.info(\"Full sync history len \" + str(len(self.full_sync_history_times)))\n if len(self.full_sync_history_times) > 1:\n logging.info(\"Avg full sync time (ignore first time) \" + str(np.mean(self.full_sync_history_times[1:])))\n logging.info(\"Std full sync time (ignore first time) \" + str(np.std(self.full_sync_history_times[1:])))\n\n logging.info(\"Avg collect local vectors latency \" + str(np.mean(self.collect_local_vectors_latencies)))\n logging.info(\"Std collect local vectors latency \" + str(np.std(self.collect_local_vectors_latencies)))\n logging.info(\"Max collect local vectors latency \" + str(np.max(self.collect_local_vectors_latencies, initial=0)))\n\n if test_folder is not None:\n with open(test_folder + \"/results.txt\", \"a\") as f:\n f.write(\"\\n\\nCoordinator \" + coordinator_name + \" statistics:\")\n f.write(\"\\nTrue violations \" + str(self.true_violations_msg_counter))\n f.write(\"\\nFalse Global violations \" + str(self.false_global_violation_msg_counter))\n f.write(\"\\nFalse Local violations \" + str(self.false_local_violation_msg_counter))\n f.write(\"\\nSync broadcast msg counter \" + str(self.sync_broadcast_msg_counter))\n f.write(\"\\nSync msg counter \" + str(self.sync_msg_counter))\n f.write(\"\\nGet node statistics broadcast msg counter \" + str(self.get_node_local_vector_broadcast_msg_counter))\n f.write(\"\\nGet node statistics msg counter \" + str(self.get_node_local_vector_msg_counter))\n f.write(\"\\nMissed violations counter \" + str(self.missed_violations_counter))\n f.write(\"\\nRounds with violations counter \" + str(self.rounds_with_violation_counter))\n f.write(\"\\nTotal violations msg counter \" + str(self.total_violations_msg_counter))\n f.write(\"\\nNode return statistics msg counter \" + str(self.node_return_local_vector_msg_counter))\n f.write(\"\\nTotal msgs broadcast enabled \" + str(self._total_msgs_for_enabled_broadcast()) + \", and disabled \" + str(self._total_msgs_for_disabled_broadcast()))\n f.write(\"\\nNum violations caused by local vector outside safe zone \" + str(self.violation_origin_outside_safe_zone))\n f.write(\"\\nNum violations caused by local vector outside domain \" + str(self.violation_origin_outside_domain))\n f.write(\"\\nNum violations caused by faulty safe zone \" + str(self.violation_origin_faulty_safe_zone))\n f.write(\"\\nBytes sent \" + str(self.bytes_sent))\n f.write(\"\\nBytes received \" + str(self.bytes_received))\n f.write(\"\\nFull sync history len \" + str(len(self.full_sync_history_times)))\n if len(self.full_sync_history_times) > 1:\n f.write(\"\\nAvg full sync time (ignore first time) \" + str(np.mean(self.full_sync_history_times[1:])))\n f.write(\"\\nStd full sync time (ignore first time) \" + str(np.std(self.full_sync_history_times[1:])))\n f.write(\"\\nAvg collect local vectors latency \" + str(np.mean(self.collect_local_vectors_latencies)))\n f.write(\"\\nStd collect local vectors latency \" + str(np.std(self.collect_local_vectors_latencies)))\n f.write(\"\\nMax collect local vectors latency \" + str(np.max(self.collect_local_vectors_latencies, initial=0)))\n\n # Write \"over time\" arrays to files. Ignore the first value that is of the initial sync (and initial x0).\n file_prefix = test_folder + \"/\" + coordinator_name\n with open(file_prefix + \"_real_function_value.csv\", 'wb') as f:\n np.savetxt(f, self.real_function_value)\n with open(file_prefix + \"_function_approximation_error.csv\", 'wb') as f:\n np.savetxt(f, self.function_approximation_error)\n with open(file_prefix + \"_cumulative_msgs_broadcast_enabled.csv\", 'wb') as f:\n np.savetxt(f, self.cumulative_msg_for_broadcast_enabled)\n with open(file_prefix + \"_cumulative_msgs_broadcast_disabled.csv\", 'wb') as f:\n np.savetxt(f, self.cumulative_msg_for_broadcast_disabled)\n with open(file_prefix + \"_cumulative_fallback_to_eager_sync.csv\", 'wb') as f:\n np.savetxt(f, self.cumulative_fallback_to_eager_sync)\n with open(file_prefix + \"_full_sync_times.csv\", 'wb') as f:\n np.savetxt(f, self.full_sync_history_times)\n\n def get_msg_counters(self):\n return [self.true_violations_msg_counter,\n self.false_global_violation_msg_counter,\n self.false_local_violation_msg_counter,\n self.sync_broadcast_msg_counter,\n self.sync_msg_counter,\n self.get_node_local_vector_broadcast_msg_counter,\n self.get_node_local_vector_msg_counter,\n self.missed_violations_counter,\n self.rounds_with_violation_counter,\n self.total_violations_msg_counter,\n self.node_return_local_vector_msg_counter,\n self._total_msgs_for_enabled_broadcast(),\n self._total_msgs_for_disabled_broadcast()]\n\n\nclass State(enum.Enum):\n # From Idle state can move to LazySync or FullSync\n Idle = 0\n # From LazySync can move to Idle (if was able to resolve violations) or to FullSync (if failed to resolve violations).\n LazySync = 1\n # From FullSync moves to Idle after receiving LocalVectorInfo messages from all the nodes.\n FullSync = 2\n\n\nclass CommonCoordinator:\n \n def __init__(self, verifier, num_nodes, error_bound=2, slack_type=SlackType.Drift, sync_type=SyncType.Eager,\n lazy_sync_max_S=0.5, b_violation_strict=True, coordinator_name=\"Common\"):\n self.coordinator_name = coordinator_name\n # Relevant only for simulation. Indicates whether this type of coordinator tolerates false negative events (missed violations).\n self.b_violation_strict = b_violation_strict\n # Flag that indicates if the current run is simulation or not. The test manager sets to True, after initialization, if running as simulation.\n self.b_simulation = False\n\n self.lock = threading.Semaphore()\n \n self.verifier = verifier # Node that is used in lazy sync (to verify constraints) and for violation statistics.\n self.func_to_monitor = verifier.func_to_monitor\n self.error_bound = error_bound\n self.slack_type = slack_type\n self.sync_type = sync_type\n assert(not (slack_type == SlackType.NoSlack and sync_type.is_lazy()))\n self.lazy_sync_max_S = lazy_sync_max_S\n self.num_nodes = num_nodes\n\n CommonCoordinator._init(self)\n logging.info(self.coordinator_name + \" coordinator initialization: d \" + str(self.d) + \", error_bound \" + str(error_bound) + \", num_nodes \" + str(num_nodes) +\n \", slack_type \" + str(slack_type) + \", sync_type \" + str(sync_type) + \", lazy_sync_max_S \" + str(lazy_sync_max_S))\n\n def _init(self):\n self.iteration = 0\n self.state = State.Idle\n self.indices_of_nodes_asked_for_local_vector = []\n self.verifier._init()\n self.x0 = self.verifier.get_local_vector()\n self.d = self.x0.shape[0]\n self.u_thresh = 0\n self.l_thresh = 0\n self.b_faulty_safe_zone = False\n self.b_violation = False\n self.b_eager_sync = False\n\n # Nodes\n self.nodes_x0_local = np.zeros((self.num_nodes, self.d))\n # Indicates if node sent its local vector in the current iteration.\n # It could be due to violation msg from this node, or during lazy sync process.\n # It tells the coordinator, during eager sync for example, that it does not need to collect the local vector from this node.\n self.b_nodes_have_updated_local_vector = np.zeros(self.num_nodes, dtype=bool)\n self.nodes_slack = np.zeros((self.num_nodes, self.d))\n self.b_nodes_have_violation = np.zeros(self.num_nodes, dtype=bool)\n self.b_nodes_have_violation_prev_iteration = self.b_nodes_have_violation.copy()\n self.nodes_lazy_lru_sync_counter = np.zeros(self.num_nodes)\n # Keep for each node its constraint version. After eager sync all the nodes should hold the latest version.\n # After lazy sync only the nodes in S should hold the latest version and the rest of the nodes an older version.\n # Messages between the coordinator and the nodes include these versions.\n self.nodes_constraint_version = np.zeros(self.num_nodes, dtype=int)\n\n # Collect statistics during experiment\n self.statistics = Statistics()\n\n def _global_vector_inside_admissible_region(self):\n # Check if the global x, which is the one in the verifier (which uses no slack) is inside the admissible region.\n # This verification is used for statistics such as number of true violations, false local violations, false global violations, etc.\n global_x = self.verifier.get_local_vector()\n f_at_x = self.func_to_monitor(global_x)\n return self.l_thresh <= f_at_x <= self.u_thresh\n\n def _global_vector_inside_effective_safe_zone(self):\n # Check if the global x, which is the one in the verifier (which uses no slack) is inside the effective safe zone\n # (inside domain, bounds, safe zone).\n # This verification is used for statistics such as number of true violations, false local violations, false global violations, etc.\n global_x = self.verifier.get_local_vector()\n return self.verifier.inside_effective_safe_zone(global_x)\n \n def _log_violation_type(self, node_idx):\n # Find the type and origin of the violation and write it to log file and update statistics\n\n b_inside_admissible_region = self._global_vector_inside_admissible_region()\n b_inside_safe_zone = self._global_vector_inside_effective_safe_zone()\n # This is a \"true\" violation if global x is not in the admissible region\n b_true_violation = not b_inside_admissible_region\n # This is a \"false global\" violation if global x is not in the safe zone but inside the admissible region\n b_false_global_violation = not b_inside_safe_zone and b_inside_admissible_region\n # This is a \"false local\" violation if global x is inside the safe zone\n b_false_local_violation = b_inside_safe_zone\n \n if self.b_violation_strict:\n assert(b_true_violation + b_false_global_violation + b_false_local_violation == 1)\n else:\n # Do not assert, just log the error. This is needed in AutomonCoordinator and RlvCoordinator, when this error can happen.\n if b_true_violation + b_false_global_violation + b_false_local_violation != 1:\n logging.warning(\"Iteration \" + str(self.iteration) + \": b_true_violation \" + str(b_true_violation) + \", b_false_global_violation \" + str(b_false_global_violation) + \", b_false_local_violation \" + str(b_false_local_violation))\n\n self.statistics.true_violations_msg_counter += int(b_true_violation)\n self.statistics.false_global_violation_msg_counter += int(b_false_global_violation)\n self.statistics.false_local_violation_msg_counter += int(b_false_local_violation)\n\n violation_type_str = \"\"\n if b_true_violation:\n violation_type_str = \"True Violation\"\n if b_false_global_violation:\n violation_type_str = \"False Global Violation\" if violation_type_str == \"\" else violation_type_str + \" and False Global Violation\"\n if b_false_local_violation:\n violation_type_str = \"False Local Violation\" if violation_type_str == \"\" else violation_type_str + \" and False Global Violation\"\n logging.debug(\"Iteration \" + str(self.iteration) + \": Node \" + str(node_idx) + \" notify \" + violation_type_str)\n\n def _notify_violation(self, node_idx, violation_origin):\n self.b_nodes_have_violation[node_idx] = True\n self.b_violation = True # For statistics of iterations with violations\n\n if self.b_simulation:\n self._log_violation_type(node_idx)\n\n if violation_origin == ViolationOrigin.FaultySafeZone:\n # Should perform full sync to resolve the issue\n logging.warning(\"Iteration \" + str(self.iteration) + \": Node \" + str(node_idx) + \" notify faulty safe zone violation. Trigger full sync.\")\n self.b_faulty_safe_zone = True\n\n def _prepare_message_get_local_vector_for_node_group(self, nodes_indices):\n messages_out = []\n\n # Get stats from nodes with outdated statistics\n indices_of_nodes_asked_for_local_vector = [node_idx for node_idx in nodes_indices if not self.b_nodes_have_updated_local_vector[node_idx]]\n # Wait for local vectors of these outdated nodes\n self.indices_of_nodes_asked_for_local_vector = indices_of_nodes_asked_for_local_vector\n\n if len(indices_of_nodes_asked_for_local_vector) > 0:\n logging.info(\"Iteration \" + str(self.iteration) + \": Coordinator about to ask \" + str(len(indices_of_nodes_asked_for_local_vector)) + \" nodes for statistics. Nodes \" + str(indices_of_nodes_asked_for_local_vector))\n self.statistics.update_get_node_local_vector_messages_statistics(len(indices_of_nodes_asked_for_local_vector))\n \n for node_idx in indices_of_nodes_asked_for_local_vector:\n logging.debug(\"Iteration \" + str(self.iteration) + \": Coordinator asks node \" + str(node_idx) + \" for statistics\")\n message_out = prepare_message_get_local_vector(node_idx, self.nodes_constraint_version[node_idx])\n messages_out.append((node_idx, message_out))\n\n return messages_out\n\n def _update_local_vector_info(self, node_idx, x):\n self.nodes_x0_local[node_idx] = x\n self.b_nodes_have_updated_local_vector[node_idx] = True\n if node_idx in self.indices_of_nodes_asked_for_local_vector:\n self.indices_of_nodes_asked_for_local_vector.remove(node_idx)\n\n def _eager_sync(self):\n # Collect all local statistic vectors from all the nodes and compute new x0 and local constrains.\n # Set all nodes with the new x0 value and constraints\n messages_out = self._prepare_message_get_local_vector_for_node_group(list(range(self.num_nodes)))\n return messages_out\n\n def _finish_eager_sync(self):\n start = timer()\n self.b_eager_sync = True\n\n # Already collect all local statistic vectors from all the nodes.\n # Compute new x0 and local constrains.\n # Set all nodes with the new x0 value and constraints.\n new_x0, _ = self._evaluate_x0_and_slack(list(range(self.num_nodes)))\n\n if self.b_simulation:\n # Sanity check: verify that new_x0 is the same one as the verifier x (which is the global vector)\n global_x = self.verifier.get_local_vector()\n assert (np.all(global_x - new_x0 < 1e-10))\n else:\n # This action is not required as the global vector x of the verifier is not used in a real distributed experiment.\n self.verifier.x = new_x0.copy()\n logging.debug(\"Iteration \" + str(self.iteration) + \": About to sync the value \" + str(new_x0))\n\n self.x0 = new_x0\n # Updating the thresholds to make sure that that the new x0 is inside the safe zone.\n self._update_l_u_threshold()\n\n # Update the slacks to all nodes, and sync all nodes\n self._allocate_slacks(self.x0, list(range(self.num_nodes)))\n messages_out = self._sync_nodes(list(range(self.num_nodes)), sync_type=\"full\")\n\n # Sync also verifier. Since verifier.x equals new_x0, no slack is ever needed.\n self._sync_verifier()\n # new_x0 must be inside the safe zone. We can make sure by checking that verifier.x\n # is inside the safe zone since verifier.x equals new_x0.\n assert (self._global_vector_inside_effective_safe_zone())\n\n self.b_faulty_safe_zone = False\n\n end = timer()\n\n self.statistics.full_sync_history.append((self.iteration, new_x0)) # For testing: keep the iteration and the new x0\n self.statistics.full_sync_history_times.append(end - start)\n if self.iteration == 0:\n # This is the first full sync after windows of all nodes are full. Should ignore all violations up until now.\n self.statistics.total_violations_msg_counter = 0\n self.statistics.violation_origin_outside_safe_zone = 0\n self.statistics.violation_origin_outside_domain = 0\n\n return messages_out\n \n def _lazy_sync(self):\n b_eager_sync_fallback = False\n S_max_size = np.round(self.lazy_sync_max_S * self.num_nodes)\n\n # Before asking collecting the local vectors of extra nodes, try first to resolve violations with the nodes with violations.\n # This is only relevant if a violation was reported after the previous call to _lazy_sync().\n if not np.alltrue(self.b_nodes_have_violation_prev_iteration == self.b_nodes_have_violation):\n S = np.nonzero(self.b_nodes_have_violation)[0]\n if len(S) <= S_max_size:\n S_x0, S_slack = self._evaluate_x0_and_slack(S)\n if self.verifier.inside_effective_safe_zone(S_x0 - S_slack):\n logging.info(\"Iteration \" + str(self.iteration) + \": Resolved violations only with violating nodes\")\n if len(S) == 1:\n logging.error(\"Iteration \" + str(self.iteration) + \": Used a single node in lazy sync\")\n raise Exception\n self.b_nodes_have_updated_local_vector = self.b_nodes_have_violation.copy()\n # The violation is resolved using the nodes in S. No need to ask for more local vectors and can move to _finish_lazy_sync step.\n return [], b_eager_sync_fallback\n\n self.b_nodes_have_violation_prev_iteration = self.b_nodes_have_violation.copy()\n\n S = np.nonzero(self.b_nodes_have_updated_local_vector)[0]\n\n # Now try to resolve violations with the nodes that provide their local vectors (due to violations or as part of LOCAL_VECTOR_INFO message)\n if len(S) <= S_max_size:\n S_x0, S_slack = self._evaluate_x0_and_slack(S)\n if self.verifier.inside_effective_safe_zone(S_x0 - S_slack):\n # The violation is resolved using the nodes in S. No need to ask for more local vectors and can move to _finish_lazy_sync step.\n return [], b_eager_sync_fallback\n\n # Could not resolve violations with the nodes that provide their local vectors.\n\n if len(S) >= S_max_size:\n logging.info(\"Iteration \" + str(self.iteration) + \": Fallback to eager sync from lazy sync\")\n messages_out = self._eager_sync()\n b_eager_sync_fallback = True\n # Reset the LRU counters of all nodes\n self.nodes_lazy_lru_sync_counter = np.zeros(self.num_nodes)\n return messages_out, b_eager_sync_fallback\n\n # Add nodes to S until the convex combination of the vectors (x_i-s_i) is in the safe zone\n\n S_not = np.nonzero(np.logical_not(self.b_nodes_have_updated_local_vector))[0]\n if self.sync_type == SyncType.LazyRandom:\n # Arrange S_not (the nodes without violations) in random order\n np.random.shuffle(S_not)\n if self.sync_type == SyncType.LazyLRU:\n # Arrange S_not (the nodes without violations) according to LRU\n S_not_lru_counters = self.nodes_lazy_lru_sync_counter[S_not]\n S_not = S_not[S_not_lru_counters.argsort()]\n\n node_idx = S_not[0]\n messages_out = self._prepare_message_get_local_vector_for_node_group([node_idx])\n return messages_out, b_eager_sync_fallback\n\n def _finish_lazy_sync(self):\n S = np.nonzero(self.b_nodes_have_updated_local_vector)[0]\n logging.info(\"Iteration \" + str(self.iteration) + \": Used \" + str(len(S)) + \" nodes in lazy sync. Nodes \" + str(S))\n S_x0, S_slack = self._evaluate_x0_and_slack(S)\n # Allocate slack and sync nodes\n self._allocate_slacks(S_x0 - S_slack, S)\n messages_out = self._sync_nodes(S, sync_type=\"lazy\")\n # Update the LRU counters of the nodes in S\n self.nodes_lazy_lru_sync_counter[S] += 1\n return messages_out\n\n def _check_missed_violations(self):\n # Check for missed violations (false negative). It is only possible to have missed violations in AutomonCoordinator\n # in case the coordinator didn't find the real min/max eigenvalue, and in RlvCoordinator.\n # In that case there is violation of the admissible region, but no violation from any of the nodes.\n # We check it here, since this function is called after each round of set_new_data_point() for all the nodes.\n if (not np.any(self.b_nodes_have_violation)) and (not self._global_vector_inside_admissible_region()):\n self.statistics.missed_violations_counter += 1\n if self.b_violation_strict:\n logging.error(\"Iteration \" + str(self.iteration) + \": Found true violation without any node violation when running in strict mode.\")\n raise Exception\n logging.warning(\"Iteration \" + str(self.iteration) + \": Found true violation without any node violation.\")\n\n # Override by inherent class. The specific coordinator specifies here its special condition for full sync.\n # By default, there is no special condition for eager sync and the coordinator uses lazy sync and falls to full sync when resolving violation fails.\n def _is_eager_sync_required(self):\n return False\n\n def _resolve_violation(self):\n b_eager_sync = True\n\n if self.b_faulty_safe_zone:\n messages_out = self._eager_sync()\n elif self._is_eager_sync_required():\n messages_out = self._eager_sync()\n elif self.sync_type == SyncType.Eager:\n messages_out = self._eager_sync()\n elif self.sync_type.is_lazy():\n messages_out, b_eager_sync = self._lazy_sync() # Returns indication if there was a fallback to eager sync or not\n else:\n logging.error(\"Iteration \" + str(self.iteration) + \": Unexpected sync type \" + str(self.sync_type))\n raise Exception\n\n return messages_out, b_eager_sync\n\n def _evaluate_x0_and_slack(self, nodes_indices):\n x0 = np.zeros(self.d)\n slack = np.zeros(self.d)\n\n for node_idx in nodes_indices:\n x0 += self.nodes_x0_local[node_idx]\n slack += self.nodes_slack[node_idx]\n\n x0 = x0 / len(nodes_indices)\n slack = slack / len(nodes_indices)\n\n return x0, slack\n\n def _allocate_slacks(self, x0, nodes_indices):\n for node_idx in nodes_indices:\n slack = np.zeros_like(x0) # self.slack_type == SlackType.NoSlack\n if self.slack_type == SlackType.Drift:\n slack = self.nodes_x0_local[node_idx] - x0\n self.nodes_slack[node_idx] = slack\n \n assert(np.isclose(np.sum(self.nodes_slack), 0))\n \n def _sync_nodes(self, nodes_indices, sync_type=\"full\"):\n messages_out = []\n logging.info(\"Iteration \" + str(self.iteration) + \": Coordinator about to sync \" + str(len(nodes_indices)) + \" nodes. Nodes \" + str(nodes_indices))\n self.statistics.update_sync_messages_statistics(len(nodes_indices))\n \n for node_idx in nodes_indices:\n logging.debug(\"Iteration \" + str(self.iteration) + \": Coordinator syncs node \" + str(node_idx))\n message_out = self._sync_node(node_idx, sync_type)\n messages_out.append((node_idx, message_out))\n self.b_nodes_have_violation[node_idx] = False\n\n # After sync there shouldn't be nodes with violations\n assert not np.any(self.b_nodes_have_violation)\n self.b_nodes_have_violation_prev_iteration = self.b_nodes_have_violation.copy()\n\n return messages_out\n\n # Override by inherent class if sync requires additional parameters\n def _sync_verifier(self):\n # Since verifier.x equals new_x0, no slack is ever needed.\n self.verifier.sync(self.x0, np.zeros_like(self.x0), self.l_thresh, self.u_thresh)\n\n # Override by inherent class if sync requires additional parameters\n def _sync_node(self, node_idx, sync_type=\"full\"):\n self.nodes_constraint_version[node_idx] = self.iteration + 1\n if sync_type == \"full\":\n message_out = prepare_message_sync(node_idx, self.nodes_constraint_version[node_idx], self.x0, self.nodes_slack[node_idx], self.l_thresh, self.u_thresh)\n else:\n message_out = prepare_message_lazy_sync(node_idx, self.nodes_constraint_version[node_idx], self.nodes_slack[node_idx])\n return message_out\n \n def _update_l_u_threshold(self):\n f = self.func_to_monitor(self.x0)\n self.l_thresh = f - self.error_bound\n self.u_thresh = f + self.error_bound\n logging.debug(\"Iteration \" + str(self.iteration) + \": About to sync the thresholds \" + str(self.l_thresh) + \",\" + str(self.u_thresh))\n\n # This function should be called after every data round by the test util loop (in a simulation, not in a real distributed experiment). This is for statistics only.\n def update_statistics(self):\n self.iteration += 1\n self._check_missed_violations()\n\n self.statistics.update_sync_statistics(self.func_to_monitor(self.verifier.get_local_vector()),\n self.func_to_monitor(self.x0), self.b_violation, self.b_eager_sync)\n self.b_violation = False\n self.b_eager_sync = False\n\n def _handle_violation_message(self, message_list):\n num_updates = 0\n\n for node_idx, payload in message_list:\n constraint_version, violation_origin, local_vector = parse_message_violation(payload, self.d)\n self.statistics.update_violation_messages_statistics(violation_origin)\n if constraint_version != self.nodes_constraint_version[node_idx]:\n logging.warning(\"Iteration \" + str(self.iteration) + \": Node \" + str(node_idx) + \" reported violation \" + str(violation_origin) + \" with an old constraint version \" + str(constraint_version) + \" (current is \" + str(self.nodes_constraint_version[node_idx]) + \"). Ignoring.\")\n continue\n\n if self.state == State.Idle:\n self.start_collecting_local_vectors = timer()\n if not self.b_simulation:\n # TODO: remove. This sleep adds latency that simulates the network latency. This sleep after the first violation in a sync round\n # enables all the nodes to receive their data and update their local vectors in this data update round, before the coordinator\n # asks for their local vectors as part of the sync process.\n # In a real distributed experiment the network latency should be enough (under the assumption that all the nodes receive their\n # data at about the same time in each data round).\n #time.sleep(0.02) # 20 milliseconds\n pass\n\n logging.info(\"Iteration \" + str(self.iteration) + \": Node \" + str(node_idx) + \" notify violation \" + str(violation_origin) + \" with constraint version \" + str(constraint_version))\n if self.b_nodes_have_violation[node_idx]:\n logging.error(\"Iteration \" + str(self.iteration) + \": Got violation from node \" + str(node_idx) + \" when there is a pending violation for this node\")\n raise Exception\n self._notify_violation(node_idx, violation_origin)\n self._update_local_vector_info(node_idx, local_vector)\n num_updates += 1\n\n return num_updates\n\n def _handle_local_vector_info_message(self, message_list):\n num_updates = 0\n\n for node_idx, payload in message_list:\n self.statistics.update_node_local_vector_info_messages_statistics(1)\n constraint_version, local_vector = parse_message_local_vector_info(payload, self.d)\n # First, check if the iteration number in the message equals self.iteration. If not, the message is from\n # old iteration and should be ignored.\n if constraint_version != self.nodes_constraint_version[node_idx]:\n logging.warning(\"Iteration \" + str(self.iteration) + \": Node \" + str(node_idx) + \" returns to coordinator with statistics with an old constraint version \" + str(constraint_version) + \" (current is \" + self.nodes_constraint_version[node_idx] + \"). Ignoring.\")\n continue\n # Second, check if the local vector of this node was already updated. It can happen if the coordinator\n # asked for this node's local vector as part of LazySync but before it got the vector from the node,\n # the node had already reported violation (with its local vector) to the coordinator.\n # In that case, do nothing.\n if node_idx not in self.indices_of_nodes_asked_for_local_vector:\n logging.info(\"Iteration \" + str(self.iteration) + \": Node \" + str(node_idx) + \" returns to coordinator with statistics, but vector was already updated\")\n continue\n logging.info(\"Iteration \" + str(self.iteration) + \": Node \" + str(node_idx) + \" returns to coordinator with statistics\")\n self._update_local_vector_info(node_idx, local_vector)\n num_updates += 1\n\n return num_updates\n\n def _state_machine(self, message_type, message_list):\n messages_out = []\n\n num_updates = self._handle_violation_message(message_list) if message_type == MessageType.Violation else self._handle_local_vector_info_message(message_list)\n if num_updates == 0:\n return messages_out\n\n # If len(self.indices_of_nodes_asked_for_local_vector) > 0, the coordinator must wait for the rest of the nodes in indices_of_nodes_asked_for_local_vector list\n # to send their local vectors. Otherwise, it can try to move to the next state.\n\n if len(self.indices_of_nodes_asked_for_local_vector) == 0 and not self.state == State.FullSync:\n # All the nodes in indices_of_nodes_asked_for_local_vector list sent their local vectors back to the coordinator.\n # If state is Idle calling to self._resolve_violation() starts the sync process, lazy or eager.\n # If state is FullSync then calling to self._resolve_violation() does nothing and returns empty message (so just skip the call in this case to prevent confusing logging).\n # If state is Idle or LazySync then calling to self._resolve_violation() asks for the next nodes for their local vectors.\n messages_out, b_eager_sync = self._resolve_violation()\n if b_eager_sync:\n self.state = State.FullSync\n else:\n self.state = State.LazySync\n\n # Calling to self._resolve_violation() may change self.indices_of_nodes_asked_for_local_vector, therefore, must check again for its length.\n\n if len(self.indices_of_nodes_asked_for_local_vector) == 0:\n self.statistics.collect_local_vectors_latencies.append(timer() - self.start_collecting_local_vectors)\n\n if self.state == State.FullSync:\n messages_out = self._finish_eager_sync()\n elif self.state == State.LazySync:\n messages_out = self._finish_lazy_sync()\n self.state = State.Idle\n self.b_nodes_have_updated_local_vector = np.zeros(self.num_nodes, dtype=bool)\n if not self.b_simulation:\n # In a real distributed experiment the iterations are the sync rounds, and every sync round ends here, with a call to finish_sync().\n # In simulation, however, the iterations are the data update rounds, and iteration increase happens in update_statistics()\n # that is called by the test manager even if no violation occurred during this data update round.\n self.iteration += 1\n\n return messages_out\n\n def dump_stats(self, test_folder):\n self.statistics.dump_stats(test_folder, self.coordinator_name)\n return self.statistics.full_sync_history, self.statistics.get_msg_counters()\n\n # For compatibility with both simulation and real distributed experiment (that uses messages), this method is the\n # only entry point of the coordinator (except dump_stats function that is called directly).\n def parse_message(self, messages: bytes):\n with self.lock:\n self.statistics.bytes_received += len(messages)\n message_type, message_list = message_to_message_list(messages)\n\n if message_type == MessageType.Violation or message_type == MessageType.LocalVectorInfo:\n messages_out = self._state_machine(message_type, message_list)\n else:\n logging.error(\"Iteration \" + str(self.iteration) + \": Unexpected message type \" + str(message_type))\n raise Exception\n\n for _, message_out in messages_out:\n self.statistics.bytes_sent += len(message_out)\n\n return messages_out\n" ]
[ [ "numpy.random.seed" ], [ "numpy.sqrt", "numpy.abs", "numpy.linalg.norm", "numpy.all", "numpy.cbrt", "scipy.optimize.minimize", "scipy.optimize.NonlinearConstraint", "numpy.array" ], [ "numpy.logical_not", "numpy.abs", "numpy.nonzero", "numpy.random.shuffle", "numpy.round", "numpy.all", "numpy.std", "numpy.alltrue", "numpy.zeros_like", "numpy.any", "numpy.mean", "numpy.max", "numpy.savetxt", "numpy.zeros", "numpy.sum" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [ "1.6", "1.10", "1.4", "1.3", "1.9", "1.5", "1.7", "1.2", "1.8" ], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
ijauregiCMCRC/Shared_Attention_for_APE
[ "c51ee9bd75c66906bddcd59d271f48e62dbd2636" ]
[ "onmt/modules/Embeddings.py" ]
[ "import torch\nimport torch.nn as nn\nfrom torch.autograd import Variable\n\nfrom onmt.modules import BottleLinear, Elementwise\nfrom onmt.Utils import aeq\n\n\nclass PositionalEncoding(nn.Module):\n\n def __init__(self, dropout, dim, max_len=5000):\n pe = torch.arange(0, max_len).unsqueeze(1).expand(max_len, dim)\n div_term = 1 / torch.pow(10000, torch.arange(0, dim * 2, 2) / dim)\n pe = pe * div_term.expand_as(pe)\n pe[:, 0::2] = torch.sin(pe[:, 0::2])\n pe[:, 1::2] = torch.cos(pe[:, 1::2])\n pe = Variable(pe.unsqueeze(1))\n super(PositionalEncoding, self).__init__()\n self.register_buffer('pe', pe)\n self.dropout = nn.Dropout(p=dropout)\n\n def forward(self, emb):\n emb = emb + self.pe[:emb.size(0), :1, :emb.size(2)].expand_as(emb)\n emb = self.dropout(emb)\n return emb\n\n\nclass Embeddings(nn.Module):\n \"\"\"\n Words embeddings dictionary for encoder/decoder.\n\n Args:\n word_vec_size (int): size of the dictionary of embeddings.\n position_encoding (bool): use a sin to mark relative words positions.\n feat_merge (string): merge action for the features embeddings:\n concat, sum or mlp.\n feat_vec_exponent (float): when using '-feat_merge concat', feature\n embedding size is N^feat_dim_exponent, where N is the\n number of values of feature takes.\n feat_vec_size (int): embedding dimension for features when using\n '-feat_merge mlp'\n dropout (float): dropout probability.\n word_padding_idx (int): padding index for words in the embeddings.\n feats_padding_idx ([int]): padding index for a list of features\n in the embeddings.\n word_vocab_size (int): size of dictionary of embeddings for words.\n feat_vocab_sizes ([int], optional): list of size of dictionary\n of embeddings for each feature.\n \"\"\"\n def __init__(self, word_vec_size, position_encoding, feat_merge,\n feat_vec_exponent, feat_vec_size, dropout,\n word_padding_idx, feat_padding_idx,\n word_vocab_size, feat_vocab_sizes=[]):\n\n self.word_padding_idx = word_padding_idx\n\n # Dimensions and padding for constructing the word embedding matrix\n vocab_sizes = [word_vocab_size]\n emb_dims = [word_vec_size]\n pad_indices = [word_padding_idx]\n\n # Dimensions and padding for feature embedding matrices\n # (these have no effect if feat_vocab_sizes is empty)\n if feat_merge == 'sum':\n feat_dims = [word_vec_size] * len(feat_vocab_sizes)\n elif feat_vec_size > 0:\n feat_dims = [feat_vec_size] * len(feat_vocab_sizes)\n else:\n feat_dims = [int(vocab ** feat_vec_exponent)\n for vocab in feat_vocab_sizes]\n vocab_sizes.extend(feat_vocab_sizes)\n emb_dims.extend(feat_dims)\n pad_indices.extend(feat_padding_idx)\n\n # The embedding matrix look-up tables. The first look-up table\n # is for words. Subsequent ones are for features, if any exist.\n emb_params = zip(vocab_sizes, emb_dims, pad_indices)\n embeddings = [nn.Embedding(vocab, dim, padding_idx=pad)\n for vocab, dim, pad in emb_params]\n emb_luts = Elementwise(feat_merge, embeddings)\n\n # The final output size of word + feature vectors. This can vary\n # from the word vector size if and only if features are defined.\n # This is the attribute you should access if you need to know\n # how big your embeddings are going to be.\n self.embedding_size = (sum(emb_dims) if feat_merge == 'concat'\n else word_vec_size)\n\n # The sequence of operations that converts the input sequence\n # into a sequence of embeddings. At minimum this consists of\n # looking up the embeddings for each word and feature in the\n # input. Model parameters may require the sequence to contain\n # additional operations as well.\n super(Embeddings, self).__init__()\n self.make_embedding = nn.Sequential()\n self.make_embedding.add_module('emb_luts', emb_luts)\n\n if feat_merge == 'mlp':\n in_dim = sum(emb_dims)\n out_dim = word_vec_size\n mlp = nn.Sequential(BottleLinear(in_dim, out_dim), nn.ReLU())\n self.make_embedding.add_module('mlp', mlp)\n\n if position_encoding:\n pe = PositionalEncoding(dropout, self.embedding_size)\n self.make_embedding.add_module('pe', pe)\n\n @property\n def word_lut(self):\n return self.make_embedding[0][0]\n\n @property\n def emb_luts(self):\n return self.make_embedding[0]\n\n def load_pretrained_vectors(self, emb_file, fixed):\n if emb_file:\n pretrained = torch.load(emb_file)\n self.word_lut.weight.data.copy_(pretrained)\n if fixed:\n self.word_lut.weight.requires_grad = False\n\n def forward(self, input):\n \"\"\"\n Return the embeddings for words, and features if there are any.\n Args:\n input (LongTensor): len x batch x nfeat\n Return:\n emb (FloatTensor): len x batch x self.embedding_size\n \"\"\"\n in_length, in_batch, nfeat = input.size()\n aeq(nfeat, len(self.emb_luts))\n\n emb = self.make_embedding(input)\n\n out_length, out_batch, emb_size = emb.size()\n aeq(in_length, out_length)\n aeq(in_batch, out_batch)\n aeq(emb_size, self.embedding_size)\n\n return emb\n" ]
[ [ "torch.nn.Sequential", "torch.nn.Dropout", "torch.sin", "torch.load", "torch.nn.Embedding", "torch.arange", "torch.nn.ReLU", "torch.cos" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
MichaelSchreier/pyod
[ "10a29b9999cb1fc340f8d5335850952cdb4b5b46" ]
[ "pyod/models/mcd.py" ]
[ "# -*- coding: utf-8 -*-\n\"\"\"Outlier Detection with Minimum Covariance Determinant (MCD)\n\"\"\"\n# Author: Yue Zhao <[email protected]>\n# License: BSD 2 clause\n\nfrom __future__ import division\nfrom __future__ import print_function\n\nfrom sklearn.covariance import MinCovDet\nfrom sklearn.utils.validation import check_is_fitted\nfrom sklearn.utils.validation import check_array\n\nfrom .base import BaseDetector\n\n__all__ = ['MCD']\n\n\nclass MCD(BaseDetector):\n \"\"\"Detecting outliers in a Gaussian distributed dataset using\n Minimum Covariance Determinant (MCD): robust estimator of covariance.\n\n The Minimum Covariance Determinant covariance estimator is to be applied\n on Gaussian-distributed data, but could still be relevant on data\n drawn from a unimodal, symmetric distribution. It is not meant to be used\n with multi-modal data (the algorithm used to fit a MinCovDet object is\n likely to fail in such a case).\n One should consider projection pursuit methods to deal with multi-modal\n datasets.\n\n First fit a minimum covariance determinant model and then compute the\n Mahalanobis distance as the outlier degree of the data\n\n See :cite:`rousseeuw1999fast,hardin2004outlier` for details.\n\n Parameters\n ----------\n contamination : float in (0., 0.5), optional (default=0.1)\n The amount of contamination of the data set,\n i.e. the proportion of outliers in the data set. Used when fitting to\n define the threshold on the decision function.\n\n store_precision : bool\n Specify if the estimated precision is stored.\n\n assume_centered : Boolean\n If True, the support of the robust location and the covariance\n estimates is computed, and a covariance estimate is recomputed from\n it, without centering the data.\n Useful to work with data whose mean is significantly equal to\n zero but is not exactly zero.\n If False, the robust location and covariance are directly computed\n with the FastMCD algorithm without additional treatment.\n\n support_fraction : float, 0 < support_fraction < 1\n The proportion of points to be included in the support of the raw\n MCD estimate. Default is None, which implies that the minimum\n value of support_fraction will be used within the algorithm:\n [n_sample + n_features + 1] / 2\n\n random_state : int, RandomState instance or None, optional (default=None)\n If int, random_state is the seed used by the random number generator;\n If RandomState instance, random_state is the random number generator;\n If None, the random number generator is the RandomState instance used\n by `np.random`.\n\n Attributes\n ----------\n raw_location_ : array-like, shape (n_features,)\n The raw robust estimated location before correction and re-weighting.\n\n raw_covariance_ : array-like, shape (n_features, n_features)\n The raw robust estimated covariance before correction and re-weighting.\n\n raw_support_ : array-like, shape (n_samples,)\n A mask of the observations that have been used to compute\n the raw robust estimates of location and shape, before correction\n and re-weighting.\n\n location_ : array-like, shape (n_features,)\n Estimated robust location\n\n covariance_ : array-like, shape (n_features, n_features)\n Estimated robust covariance matrix\n\n precision_ : array-like, shape (n_features, n_features)\n Estimated pseudo inverse matrix.\n (stored only if store_precision is True)\n\n support_ : array-like, shape (n_samples,)\n A mask of the observations that have been used to compute\n the robust estimates of location and shape.\n\n decision_scores_ : numpy array of shape (n_samples,)\n The outlier scores of the training data.\n The higher, the more abnormal. Outliers tend to have higher\n scores. This value is available once the detector is\n fitted. Mahalanobis distances of the training set (on which\n `:meth:`fit` is called) observations.\n\n threshold_ : float\n The threshold is based on ``contamination``. It is the\n ``n_samples * contamination`` most abnormal samples in\n ``decision_scores_``. The threshold is calculated for generating\n binary outlier labels.\n\n labels_ : int, either 0 or 1\n The binary labels of the training data. 0 stands for inliers\n and 1 for outliers/anomalies. It is generated by applying\n ``threshold_`` on ``decision_scores_``.\n \"\"\"\n\n def __init__(self, contamination=0.1, store_precision=True,\n assume_centered=False, support_fraction=None,\n random_state=None):\n super(MCD, self).__init__(contamination=contamination)\n self.store_precision = store_precision\n self.assume_centered = assume_centered\n self.support_fraction = support_fraction\n self.random_state = random_state\n\n # noinspection PyIncorrectDocstring\n def fit(self, X, y=None):\n \"\"\"Fit detector. y is ignored in unsupervised methods.\n\n Parameters\n ----------\n X : numpy array of shape (n_samples, n_features)\n The input samples.\n\n y : Ignored\n Not used, present for API consistency by convention.\n\n Returns\n -------\n self : object\n Fitted estimator.\n \"\"\"\n # Validate inputs X and y (optional)\n X = check_array(X)\n self._set_n_classes(y)\n\n self.detector_ = MinCovDet(store_precision=self.store_precision,\n assume_centered=self.assume_centered,\n support_fraction=self.support_fraction,\n random_state=self.random_state)\n self.detector_.fit(X=X, y=y)\n\n # Use mahalanabis distance as the outlier score\n self.decision_scores_ = self.detector_.dist_\n self._process_decision_scores()\n return self\n\n def decision_function(self, X):\n \"\"\"Predict raw anomaly score of X using the fitted detector.\n\n The anomaly score of an input sample is computed based on different\n detector algorithms. For consistency, outliers are assigned with\n larger anomaly scores.\n\n Parameters\n ----------\n X : numpy array of shape (n_samples, n_features)\n The training input samples. Sparse matrices are accepted only\n if they are supported by the base estimator.\n\n Returns\n -------\n anomaly_scores : numpy array of shape (n_samples,)\n The anomaly score of the input samples.\n \"\"\"\n check_is_fitted(self, ['decision_scores_', 'threshold_', 'labels_'])\n X = check_array(X)\n\n # Computer mahalanobis distance of the samples\n return self.detector_.mahalanobis(X)\n\n @property\n def raw_location_(self):\n \"\"\"The raw robust estimated location before correction and\n re-weighting.\n\n Decorator for scikit-learn MinCovDet attributes.\n \"\"\"\n return self.detector_.raw_location_\n\n @property\n def raw_covariance_(self):\n \"\"\"The raw robust estimated location before correction and\n re-weighting.\n\n Decorator for scikit-learn MinCovDet attributes.\n \"\"\"\n return self.detector_.raw_covariance_\n\n @property\n def raw_support_(self):\n \"\"\"A mask of the observations that have been used to compute\n the raw robust estimates of location and shape, before correction\n and re-weighting.\n\n Decorator for scikit-learn MinCovDet attributes.\n \"\"\"\n return self.detector_.raw_support_\n\n @property\n def location_(self):\n \"\"\"Estimated robust location.\n\n Decorator for scikit-learn MinCovDet attributes.\n \"\"\"\n return self.detector_.location_\n\n @property\n def covariance_(self):\n \"\"\"Estimated robust covariance matrix.\n\n Decorator for scikit-learn MinCovDet attributes.\n \"\"\"\n return self.detector_.covariance_\n\n @property\n def precision_(self):\n \"\"\" Estimated pseudo inverse matrix.\n (stored only if store_precision is True)\n\n Decorator for scikit-learn MinCovDet attributes.\n \"\"\"\n return self.detector_.precision_\n\n @property\n def support_(self):\n \"\"\"A mask of the observations that have been used to compute\n the robust estimates of location and shape.\n\n Decorator for scikit-learn MinCovDet attributes.\n \"\"\"\n return self.detector_.support_\n" ]
[ [ "sklearn.covariance.MinCovDet", "sklearn.utils.validation.check_is_fitted", "sklearn.utils.validation.check_array" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
hugobb/OnlineAttacks
[ "5cc971eba014e625ec43f67f6c5eadf713c4141c", "5cc971eba014e625ec43f67f6c5eadf713c4141c" ]
[ "online_attacks/classifiers/mnist/models.py", "online_attacks/classifiers/cifar/models/__init__.py" ]
[ "# Copyright (c) Facebook, Inc. and its affiliates.\n#\n# This source code is licensed under the MIT license found in the\n# LICENSE file in the root directory of this source tree.\n\nfrom enum import Enum\nimport torch.nn as nn\nimport torch.nn.functional as F\nimport torch\nimport os\n\n\nclass MnistModel(Enum):\n MODEL_A = \"modelA\"\n MODEL_B = \"modelB\"\n MODEL_C = \"modelC\"\n MODEL_D = \"modelD\"\n MADRY_MODEL = \"madry\"\n\n def __str__(self):\n return self.value\n\n\nclass modelA(nn.Module):\n def __init__(self):\n super().__init__()\n self.num_classes = 10\n\n self.conv1 = nn.Conv2d(1, 64, 5)\n self.conv2 = nn.Conv2d(64, 64, 5)\n self.dropout1 = nn.Dropout(0.25)\n self.fc1 = nn.Linear(64 * 20 * 20, 128)\n self.dropout2 = nn.Dropout(0.5)\n self.fc2 = nn.Linear(128, 10)\n\n def forward(self, x: torch.Tensor) -> torch.Tensor:\n x = F.relu(self.conv1(x))\n x = F.relu(self.conv2(x))\n x = self.dropout1(x)\n x = x.view(x.size(0), -1)\n x = F.relu(self.fc1(x))\n x = self.dropout2(x)\n x = self.fc2(x)\n return x\n\n\nclass modelB(nn.Module):\n def __init__(self):\n super().__init__()\n self.num_classes = 10\n\n self.dropout1 = nn.Dropout(0.2)\n self.conv1 = nn.Conv2d(1, 64, 8)\n self.conv2 = nn.Conv2d(64, 128, 6)\n self.conv3 = nn.Conv2d(128, 128, 5)\n self.dropout2 = nn.Dropout(0.5)\n self.fc = nn.Linear(128 * 12 * 12, 10)\n\n def forward(self, x: torch.Tensor) -> torch.Tensor:\n x = self.dropout1(x)\n x = F.relu(self.conv1(x))\n x = F.relu(self.conv2(x))\n x = F.relu(self.conv3(x))\n x = self.dropout2(x)\n x = x.view(x.size(0), -1)\n x = self.fc(x)\n return x\n\n\nclass modelC(nn.Module):\n def __init__(self):\n super().__init__()\n self.num_classes = 10\n\n self.conv1 = nn.Conv2d(1, 128, 3)\n self.conv2 = nn.Conv2d(128, 64, 3)\n self.fc1 = nn.Linear(64 * 5 * 5, 128)\n self.fc2 = nn.Linear(128, 10)\n\n def forward(self, x: torch.Tensor) -> torch.Tensor:\n x = torch.tanh(self.conv1(x))\n x = F.max_pool2d(x, 2)\n x = torch.tanh(self.conv2(x))\n x = F.max_pool2d(x, 2)\n x = x.view(x.size(0), -1)\n x = F.relu(self.fc1(x))\n x = self.fc2(x)\n return x\n\n\nclass modelD(nn.Module):\n def __init__(self):\n super().__init__()\n self.num_classes = 10\n\n self.fc1 = nn.Linear(1 * 28 * 28, 300)\n self.dropout1 = nn.Dropout(0.5)\n self.fc2 = nn.Linear(300, 300)\n self.dropout2 = nn.Dropout(0.5)\n self.fc3 = nn.Linear(300, 300)\n self.dropout3 = nn.Dropout(0.5)\n self.fc4 = nn.Linear(300, 300)\n self.dropout4 = nn.Dropout(0.5)\n self.fc5 = nn.Linear(300, 10)\n\n def forward(self, x: torch.Tensor) -> torch.Tensor:\n x = x.view(x.size(0), -1)\n x = F.relu(self.fc1(x))\n x = self.dropout1(x)\n x = F.relu(self.fc2(x))\n x = self.dropout2(x)\n x = F.relu(self.fc3(x))\n x = self.dropout3(x)\n x = F.relu(self.fc4(x))\n x = self.dropout4(x)\n x = self.fc5(x)\n return x\n\n\n__mnist_model_dict__ = {\n MnistModel.MODEL_A: modelA,\n MnistModel.MODEL_B: modelB,\n MnistModel.MODEL_C: modelC,\n MnistModel.MODEL_D: modelD,\n}\n\n\ndef make_mnist_model(model: MnistModel) -> nn.Module:\n return __mnist_model_dict__[model]()\n\n\ndef load_mnist_classifier(\n model_type: MnistModel,\n name: str = None,\n model_dir: str = None,\n device=None,\n eval=False,\n) -> nn.Module:\n if model_type == MnistModel.MADRY_MODEL:\n from online_attacks.classifiers.madry import load_madry_model\n\n filename = os.path.join(model_dir, \"mnist\", model_type.value, \"%s\" % name)\n if os.path.exists(filename):\n model = load_madry_model(\"mnist\", filename)\n else:\n raise OSError(\"File %s not found !\" % filename)\n\n # Hack to be able to use some attacker class\n model.num_classes = 10\n\n elif model_type in __mnist_model_dict__:\n model = make_mnist_model(model_type)\n if name is not None:\n filename = os.path.join(\n model_dir, \"mnist\", model_type.value, \"%s.pth\" % name\n )\n if os.path.exists(filename):\n state_dict = torch.load(filename, map_location=torch.device(\"cpu\"))\n model.load_state_dict(state_dict)\n else:\n raise OSError(\"File %s not found !\" % filename)\n\n else:\n raise ValueError()\n\n if eval:\n model.eval()\n\n return model.to(device)\n", "from enum import Enum\nimport torch\nimport torch.nn as nn\nimport os\n\nfrom .vgg import VGG\n# Copyright (c) Facebook, Inc. and its affiliates.\n#\n# This source code is licensed under the MIT license found in the\n# LICENSE file in the root directory of this source tree.\n\nfrom .resnet import ResNet18\nfrom .densenet import densenet_cifar\nfrom .googlenet import GoogLeNet\nfrom .wide_resnet import Wide_ResNet\n\n\nclass CifarModel(Enum):\n VGG_16 = \"VGG16\"\n RESNET_18 = \"res18\"\n DENSE_121 = \"dense121\"\n GOOGLENET = \"googlenet\"\n WIDE_RESNET = \"wide_resnet\"\n MADRY_MODEL = \"madry\"\n\n def __str__(self):\n return self.value\n\n\n__cifar_model_dict__ = {\n CifarModel.VGG_16: VGG,\n CifarModel.RESNET_18: ResNet18,\n CifarModel.DENSE_121: densenet_cifar,\n CifarModel.GOOGLENET: GoogLeNet,\n CifarModel.WIDE_RESNET: Wide_ResNet,\n}\n\n\ndef make_cifar_model(model: CifarModel) -> nn.Module:\n return __cifar_model_dict__[model]()\n\n\ndef load_cifar_classifier(\n model_type: CifarModel,\n name: str = None,\n model_dir: str = None,\n device=None,\n eval=False,\n) -> nn.Module:\n if model_type == CifarModel.MADRY_MODEL:\n from online_attacks.classifiers.madry import load_madry_model\n\n filename = os.path.join(model_dir, \"cifar\", model_type.value, \"%s\" % name)\n if os.path.exists(filename):\n model = load_madry_model(\"cifar\", filename)\n else:\n raise OSError(\"File %s not found !\" % filename)\n\n elif model_type in __cifar_model_dict__:\n model = make_cifar_model(model_type)\n if name is not None:\n filename = os.path.join(\n model_dir, \"cifar\", model_type.value, \"%s.pth\" % name\n )\n if os.path.exists(filename):\n state_dict = torch.load(filename, map_location=torch.device(\"cpu\"))\n model.load_state_dict(state_dict)\n else:\n raise OSError(\"File not found !\")\n\n else:\n raise ValueError()\n\n if eval:\n model.eval()\n\n # Hack to be able to use some attacker class\n model.num_classes = 10\n\n return model.to(device)\n" ]
[ [ "torch.nn.Dropout", "torch.nn.Conv2d", "torch.nn.Linear", "torch.device", "torch.nn.functional.max_pool2d" ], [ "torch.device" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
aokad/GenomonToolkit
[ "d9fbe2dcd91271e47eab8b91fff792e3f7b7320c" ]
[ "subcode.py" ]
[ "# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Thu Nov 05 16:44:30 2015\n\n@brief: Common functions\n@author: okada\n\n$Id: subcode.py 199 2017-07-10 04:27:19Z aokada $\n$Rev: 199 $\n\"\"\"\n\n# ファイルがあるかチェック\ndef path_check(path, config):\n \n if config != None and config.has_option(\"MAIN\", \"path_check\"):\n if config.getboolean(\"MAIN\", \"path_check\") == False:\n return True\n \n import os\n return os.path.exists(path)\n\n# 現在時刻で文字列を作る (yyyy/mm/dd_HH/MM/SS)\ndef date_str():\n import datetime\n now = datetime.datetime.now()\n return \"%04d%02d%02d_%02d%02d%02d\" % (now.year, now.month, now.day, now.hour, now.minute, now.second)\n\n# ログファイル書き込み\n# path ログファイルのパス\n# mode ファイルオープンモード(\"w\",\"a\")\n# text 出力したい文字列\n# date (True/False) Trueの時は現在時刻を先頭につける\n# printer (True/False) Trueの時はコンソールにも出力する\n\ndef write_log(path, mode, text, date, printer):\n import datetime\n \n t = \"\"\n if date == True:\n now = datetime.datetime.now()\n t = \"{0:0>4d}/{1:0>2d}/{2:0>2d} {3:0>2d}:{4:0>2d}:{5:0>2d}: \".format(\n now.year, now.month, now.day, now.hour, now.minute, now.second)\n\n f = open(path, mode)\n f.write(t + text + \"\\n\")\n f.close()\n\n if printer == True:\n print (t + text)\n\n# configのメタデータ設定値と一致するかチェック\n# config ConfigParser\n# option configのオプション、sectionはMETADATA固定\n# value 現在の値\n\ndef conf_match(config, option, value):\n\n # 設定項目がない場合はTrueで返す\n if config == None:\n return True\n if not config.has_option(\"METADATA\", option):\n return True\n if config.get(\"METADATA\", option) == \"\":\n return True\n \n # 設定値が複数ある場合は,で区切って入力してある\n # どれか一つと一致すればTrueで返す\n if value in config.get('METADATA', option).split(\",\"):\n return True\n \n # 設定値がboolean型の場合\n try:\n if value == config.getboolean('METADATA', option):\n return True\n except Exception:\n pass\n \n # 設定値がちゃんと設定してあり、一致しない場合のみFalseで返す\n return False\n\n# metaデータを読み込む\n# このとき、チェックリストやチェック項目を合わせみる\n# OKのものは\"data\"で、NGのものは\"invalid\"で返す\n\ndef load_metadata(metadata, bam_dir=None, config=None, check_result=\"\"):\n import json\n \n # チェックリストNGのbamリストを作成\n black = []\n reasons = []\n if check_result != \"\":\n f = open(check_result)\n for row in f.readlines():\n cells = row.rstrip(\"\\r\\n\").split(\",\")\n if cells[1] != \"OK\":\n black.append(cells[0])\n reasons.append(cells[1])\n f.close()\n\n read_data = json.load(open(metadata))\n data = []\n invalid = []\n for obj in read_data:\n \n analysis_id = obj[\"file_id\"]\n \n # ファイルが存在しない\n if path_check(bam_dir + \"/\" + analysis_id + \"/\" + obj[\"file_name\"], config) == False:\n invalid.append([analysis_id, \"not exist bam\"])\n continue\n \n # metadata 不一致\n if not conf_match(config, 'analyte_type', obj[\"cases\"][0][\"samples\"][0][\"portions\"][0][\"analytes\"][0][\"analyte_type\"]):\n invalid.append([analysis_id, \"analyte_type:%s\" % (obj[\"cases\"][0][\"samples\"][0][\"portions\"][0][\"analytes\"][0][\"analyte_type\"]) ])\n continue\n if not conf_match(config, 'is_ffpe', obj[\"cases\"][0][\"samples\"][0][\"is_ffpe\"]):\n invalid.append([analysis_id, \"is_ffpe:%s\" % (obj[\"cases\"][0][\"samples\"][0][\"is_ffpe\"]) ])\n continue\n if not conf_match(config, 'experimental_strategy', obj[\"experimental_strategy\"]):\n invalid.append([analysis_id, \"experimental_strategy:%s\" % (obj[\"experimental_strategy\"]) ])\n continue\n if not conf_match(config, 'platform', obj[\"platform\"]):\n invalid.append([analysis_id, \"platform:%s\" % (obj[\"platform\"]) ])\n continue\n\n # チェックリストNGのものは除外\n if analysis_id in black:\n invalid.append([analysis_id, reasons[black.index(analysis_id)]])\n continue\n \n data.append(obj)\n return {\"data\": data, \"invalid\": invalid}\n\ndef id_to_samplecode(id):\n if id == 1: return \"TP\"\n if id == 2: return \"TR\"\n if id == 3: return \"TB\"\n if id == 4: return \"TRBM\"\n if id == 5: return \"TAP\"\n if id == 6: return \"TM\"\n if id == 7: return \"TAM\"\n if id == 8: return \"THOC\"\n if id == 9: return \"TBM\"\n if id == 10:return \"NB\"\n if id == 11:return \"NT\"\n if id == 12:return \"NBC\"\n if id == 13:return \"NEBV\"\n if id == 14:return \"NBM\"\n if id == 20:return \"CELL\"\n if id == 40:return \"TRB\"\n if id == 50:return \"CELL\"\n if id == 60:return \"XP\"\n if id == 61:return \"XCL\"\n\n return \"\"\n \ndef json_to_pandas(jdata):\n import pandas\n \n tmp = [] \n for obj in jdata:\n tmp.append([obj[\"cases\"][0][\"samples\"][0][\"portions\"][0][\"analytes\"][0][\"aliquots\"][0][\"submitter_id\"],\n obj[\"cases\"][0][\"project\"][\"project_id\"].replace(\"TCGA-\", \"\"),\n id_to_samplecode(int(obj[\"cases\"][0][\"samples\"][0][\"sample_type_id\"])),\n int(obj[\"cases\"][0][\"samples\"][0][\"sample_type_id\"]),\n obj[\"file_name\"],\n obj[\"file_id\"],\n obj[\"cases\"][0][\"samples\"][0][\"portions\"][0][\"analytes\"][0][\"aliquots\"][0][\"updated_datetime\"],\n ])\n\n pdata = pandas.DataFrame(tmp)\n pdata.columns = [\"barcode\", \"disease\", \"sample_type\", \"sample_type_id\", \"filename\", \"analysis_id\", \"updated\"]\n\n return pdata\n\ndef view1(files):\n import json\n \n for file_path in files:\n for obj in json.load(open(file_path)):\n if len(obj[\"associated_entities\"]) > 1: \n print (\"%s\\n\" % (len(obj[\"associated_entities\"])))\n \n if len(obj[\"cases\"]) > 1: \n print (\"%s\\n\" % (len(obj[\"cases\"])))\n if len(obj[\"cases\"][0][\"samples\"]) > 1: \n print (\"%s\\n\" % (len(obj[\"cases\"][0][\"samples\"])))\n if len(obj[\"cases\"][0][\"samples\"][0][\"portions\"]) > 1: \n print (\"%s\\n\" % (len(obj[\"cases\"][0][\"samples\"][0][\"portions\"])))\n if len(obj[\"cases\"][0][\"samples\"][0][\"portions\"][0][\"analytes\"]) > 1: \n print (\"%s\\n\" % (len(obj[\"cases\"][0][\"samples\"][0][\"portions\"][0][\"analytes\"])))\n if len(obj[\"cases\"][0][\"samples\"][0][\"portions\"][0][\"analytes\"][0][\"aliquots\"]) > 1: \n print (\"%s\\n\" % (len(obj[\"cases\"][0][\"samples\"][0][\"portions\"][0][\"analytes\"][0][\"aliquots\"])))\n \n if obj[\"cases\"][0][\"samples\"][0][\"portions\"][0][\"analytes\"][0][\"aliquots\"][0][\"submitter_id\"] != obj[\"associated_entities\"][0][\"entity_submitter_id\"]:\n print (\"%s, %s\\n\" % (obj[\"cases\"][0][\"samples\"][0][\"portions\"][0][\"analytes\"][0][\"aliquots\"][0][\"submitter_id\"], obj[\"associated_entities\"][0][\"entity_submitter_id\"]))\n\ndef view2(files, output):\n import json\n \n fw = open(output, \"w\")\n fw.write(\"Project_id,barcode,Project,TSS(Tissue source site),Participant,Sample+Vial,Portion+Analyte,Plate,Center,updated,analyte_type,isFFPE,\")\n fw.write(\"composition,created_datetime,current_weight,days_to_collection,days_to_sample_procurement,freezing_method,initial_weight,intermediate_dimension,is_ffpe,longest_dimension,oct_embedded,preservation_method,shortest_dimension,state,time_between_clamping_and_freezing,time_between_excision_and_freezing,tissue_type,tumor_code,tumor_code_id,tumor_descriptor\\n\")\n for file_path in files:\n for obj in json.load(open(file_path)):\n barcode = obj[\"associated_entities\"][0][\"entity_submitter_id\"]\n fw.write(\"%s,%s,%s,%s,%s,%s,%s\\n\" % (\n obj[\"cases\"][0][\"project\"][\"project_id\"], \n barcode, \n barcode.replace(\"-\", \",\"),\n obj[\"cases\"][0][\"samples\"][0][\"portions\"][0][\"analytes\"][0][\"aliquots\"][0][\"updated_datetime\"],\n obj[\"cases\"][0][\"samples\"][0][\"portions\"][0][\"analytes\"][0][\"analyte_type\"],\n obj[\"cases\"][0][\"samples\"][0][\"portions\"][0][\"is_ffpe\"],\n \"%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,\" % (\n obj[\"cases\"][0][\"samples\"][0][\"composition\"],\n obj[\"cases\"][0][\"samples\"][0][\"created_datetime\"],\n obj[\"cases\"][0][\"samples\"][0][\"current_weight\"],\n obj[\"cases\"][0][\"samples\"][0][\"days_to_collection\"],\n obj[\"cases\"][0][\"samples\"][0][\"days_to_sample_procurement\"],\n obj[\"cases\"][0][\"samples\"][0][\"freezing_method\"],\n obj[\"cases\"][0][\"samples\"][0][\"initial_weight\"],\n obj[\"cases\"][0][\"samples\"][0][\"intermediate_dimension\"],\n obj[\"cases\"][0][\"samples\"][0][\"is_ffpe\"],\n obj[\"cases\"][0][\"samples\"][0][\"longest_dimension\"],\n obj[\"cases\"][0][\"samples\"][0][\"oct_embedded\"],\n obj[\"cases\"][0][\"samples\"][0][\"preservation_method\"],\n obj[\"cases\"][0][\"samples\"][0][\"shortest_dimension\"],\n obj[\"cases\"][0][\"samples\"][0][\"state\"],\n obj[\"cases\"][0][\"samples\"][0][\"time_between_clamping_and_freezing\"],\n obj[\"cases\"][0][\"samples\"][0][\"time_between_excision_and_freezing\"],\n obj[\"cases\"][0][\"samples\"][0][\"tissue_type\"],\n obj[\"cases\"][0][\"samples\"][0][\"tumor_code\"],\n obj[\"cases\"][0][\"samples\"][0][\"tumor_code_id\"],\n obj[\"cases\"][0][\"samples\"][0][\"tumor_descriptor\"],\n )\n ))\n fw.close()\n \nif __name__ == \"__main__\":\n pass\n \n #view1([\"metadata.all_p1.json\",\"metadata.all_p2.json\",\"metadata.all_p3.json\"])\n #view2([\"metadata.all_p1.json\",\"metadata.all_p2.json\",\"metadata.all_p3.json\"], \"./barcodes.csv\")\n" ]
[ [ "pandas.DataFrame" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [ "0.23", "0.21", "2.0", "1.4", "0.19", "1.1", "1.5", "1.2", "0.24", "0.20", "1.0", "0.25", "1.3" ], "scipy": [], "tensorflow": [] } ]
phcanalytics/ibd_flare_model
[ "78da4640ab5a3a5d174d604d764f46059ee66dce" ]
[ "scripts/sql_query/06_cdiff_query.py" ]
[ "\"\"\"\n--------------------------------------------------------------------------------\nTitle: Query to persons in the IBD cohort with a clostridium difficile diagnosis\nAuthor: Ryan Gan\nDate Created: 2019-06-28\n\nThis script will generate permanent SQL table persons in our IBD cohort with a \ndiagnosis of C-diff in case we decide to exclude\n--------------------------------------------------------------------------------\n\"\"\"\n# load teradatasql to establish connection to teradata\nimport teradatasql\n# import jinja2 for looping in sql query\nfrom jinja2 import Template\n# os \nimport os\n# import timeit for timing\nimport timeit\n# import pandas to save a table\nimport pandas as pd\n\n\n# read hidden login info; not the best way to do this but it works\nlogin = [line.rstrip('\\n') for line in open('../sql_query/login.txt')]\n\n# new connection info for teradatasql client\nusertd = login[2]\npasswordtd = login[3]\nhost = login[4]\n\n# create connection to teradata\ncon = teradatasql.connect('{\"host\": \"' + host + '\",' + \\\n '\"user\": \"' + usertd + '\",' + \\\n '\"password\": \"' + passwordtd + '\",' + \\\n '\"logmech\":\"LDAP\"}')\n\n\n# create cursor to submit scripts\ncur = con.cursor()\n\n\"\"\"\nQuery of fecal cal to create ibd_flare.fecal_cal data lab\n\"\"\"\n\nfile_path = './sql_templates/cdiff/ibd_flare.cdiff.sql'\n\nwith open(file_path) as f:\n query = f.read()\n print(query)\n \n# execute query\ntry:\n print('Attempting to submit query')\n cur.execute(query)\n print('Finished query')\nexcept:\n start_time = timeit.default_timer()\n print('Table alread exists; dropping table and trying again.')\n cur.execute('DROP TABLE ' + q)\n cur.execute(query)\n print('Finished query')\n \n \n\"\"\"\nCount up how many outpatient corticosteroid events may be misclassified\n\"\"\"\n\ncdiff_statistics = pd.read_sql(\"\"\"\n SELECT COUNT(*) AS cortsteroid_count,\n COUNT(cdiff_flag) AS cdiff_count,\n cdiff_count*100.0/cortsteroid_count AS prop_cdiff\n FROM\n (\n SELECT a.*,\n b.diag_date AS cdiff_date, \n b.cdiff_flag\n FROM ibd_flare.analysis AS a\n LEFT JOIN ibd_flare.cdiff AS b \n ON (a.ptid = b.ptid) AND \n (a.flare_date >= b.diag_date - 7 AND a.flare_date <= b.diag_date + 7)\n WHERE a.flare_cat = 'outpat_corticosteroid_flare'\n ) AS temp;\n \"\"\", con)\n\n# save to results folder\n\nprint('Outpatient corticosteroid events with possible C-Diff misclassification\\n\\n', \n cdiff_statistics, \n file=open(\"../../results/cdiff_stats.txt\", \"w\"))" ]
[ [ "pandas.read_sql" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [ "0.23", "0.21", "2.0", "1.4", "0.19", "1.1", "1.5", "1.2", "0.24", "0.20", "1.0", "0.25", "1.3" ], "scipy": [], "tensorflow": [] } ]
vt-sailbot/sailbot-20
[ "b5d75cb82e4bc3e9c4e428a288c6ac98a4aa2c52", "b5d75cb82e4bc3e9c4e428a288c6ac98a4aa2c52" ]
[ "src/buoy_detection/Distance_Calculator.py", "src/tracking/kalman_filter.py" ]
[ "import numpy as np\nimport cv2\nfrom src.buoy_detection.Depth_Map_Calculator import Depth_Map\nfrom math import sin, cos, asin, atan2, pi\nimport os\nclass DistanceCalculator():\n path = os.path.realpath(__file__)[:-len(os.path.basename(__file__))] + \"stereo_calibration.npz\"\n\n def __init__(self, calibration_directory = path, camera_baseline=.4064, camera_numbers=(2,3), DRAW_IMAGE = False):\n\n self.DRAW_IMAGE = DRAW_IMAGE\n self.depth_map_calculator = Depth_Map(calibration_directory, baseline = camera_baseline, camera_numbers=camera_numbers, DRAW_IMAGE = DRAW_IMAGE)\n\n def isBuoyInImage(self, left_frame):\n \"\"\"\n Returns boolean value of if a large orange contour (buoy) is found in a frame\n :param left_frame: the frame from the main camera\n :return: boolean if buoy is found\n \"\"\"\n kernel_close = np.ones((2, 2))\n kernel_open = np.ones((12, 12))\n\n hsv = cv2.cvtColor(left_frame, cv2.COLOR_BGR2HSV)\n\n # mask = cv2.inRange(hsv, RED_ORANGE_LOW, RED_ORANGE_HIGH)\n mask = cv2.inRange(hsv, (10, 100, 20), (15, 255, 255))\n mask_open = cv2.morphologyEx(mask, cv2.MORPH_OPEN, kernel_open)\n mask = mask_open\n mask_close = cv2.morphologyEx(mask, cv2.MORPH_CLOSE, kernel_close)\n mask = mask_close\n contours, __ = cv2.findContours(mask.copy(), cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)\n return len(contours) > 0\n\n def findBuoyPixels(self, left_frame):\n \"\"\"\n Determine if the left camera has an image of the of buoy in it using color. The calibration setup\n has the left camera as the primary camera, so the disparity map pixels are equivalent to the ones in the disparity map.\n\n :return:\n The pixels in which we see the buoy\n \"\"\"\n\n kernel_close = np.ones((2, 2))\n kernel_open = np.ones((12, 12))\n\n frame_copy = left_frame\n\n hsv = cv2.cvtColor(left_frame, cv2.COLOR_BGR2HSV)\n\n # mask = cv2.inRange(hsv, RED_ORANGE_LOW, RED_ORANGE_HIGH)\n mask = cv2.inRange(hsv, (10, 100, 20), (15, 255, 255))\n mask_open = cv2.morphologyEx(mask, cv2.MORPH_OPEN, kernel_open)\n mask = mask_open\n mask_close = cv2.morphologyEx(mask, cv2.MORPH_CLOSE, kernel_close)\n mask = mask_close\n contours, __ = cv2.findContours(mask.copy(), cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)\n if len(contours) > 0:\n biggest = sorted(contours, key=cv2.contourArea)[-1]\n if self.DRAW_IMAGE:\n cv2.drawContours(left_frame, contours, -1, (0, 255, 0), 3)\n x, y, w, h = cv2.boundingRect(biggest)\n cv2.rectangle(left_frame, (x, y), (x + w, y + h), (0, 0, 255), 2)\n moment = cv2.moments(biggest)\n return int(moment['m10'] / moment['m00']), int(moment['m01'] / moment['m00'])\n raise Exception(\"No buoy found in image\")\n return None\n\n def getDisparityValue(self, disparity_frame, xPixel, yPixel):\n \"\"\"\n Gets the disparity value from the disparity matrix if it is near an edge. Otherwise, gets an average of disparity values from surrounding pixels\n\n :param xPixel: the x coordinate in the disparity map\n :param yPixel: the y coordinate in the disparity map\n :return: the disparity value\n \"\"\"\n\n if disparity_frame is None:\n print(\"disparity is None\")\n return None\n\n #If the detected center pixel is near the edges, return just the disparity of that one pixel\n if xPixel <= 1 or yPixel <= 1 or xPixel >= 639 or yPixel >= 479:\n return disparity_frame[xPixel][yPixel]\n\n #Otherwise, return the average of the surrounding pixels for a little more accuracy\n array = disparity_frame[xPixel - 1: xPixel + 1, yPixel - 1: yPixel + 1]\n return sum(array)/array.size\n\n def getDistance(self, disparity_value):\n \"\"\"\n Get the distance of the given disparity value using the two cameras' offsets, as well as the focal length and the calculated dispairty value\n :param disparity_value: the value of the disparity map that we use to calculate distance\n\n :return:\n the distance in meters of the given disparity\n \"\"\"\n #D:= Distance of point in real world,\n #b:= base offset, (the distance *between* your cameras)\n #f:= focal length of camera,\n #d:= disparity:\n\n #D = b*f/d\n return self.baseline*self.depth_map_calculator.focal_length/disparity_value\n\n\n\n def getBearingFromxPixel(self, xPixel, real_bearing, cameras_rotation=0):\n \"\"\"\n\n :param xPixel: the pixel in the x direction in which we see the buoy\n :param real_bearing: the real bearing of the boat as read by the airmar\n :param cameras_rotation: the rotation of the two cameras around the central axis (this is currently not implemented, so defaults to 0)\n :return: the predicted bearing of the buoy taking into consideration the real bearing of the boat\n \"\"\"\n distance_from_center = xPixel - self.depth_map_calculator.image_size[0]/2\n relative_bearing = distance_from_center*self.depth_map_calculator.pixel_degrees\n\n camera_bearing = real_bearing + cameras_rotation\n new_bearing = camera_bearing + relative_bearing\n return ((new_bearing % 360) + 360) % 360\n\n\n def getBuoyGPSLocation(self, boat_lat, boat_lon, distance, bearing):\n \"\"\"\n Gets the predicted gps location of the buoy based on the current gps location, angle to the buoy, and the distance to the buoy\n\n The math was found from here:\n https://stackoverflow.com/questions/19352921/how-to-use-direction-angle-and-speed-to-calculate-next-times-latitude-and-longi\n\n :param boat_lat: the current latitude of the boat\n :param boat_lon: the current longitude of the boat\n :param distance: the predicted distance to the buoy in meters\n :param bearing: the bearing (angle) to the buoy in radians\n :return:\n \"\"\"\n earth_radius = 6371000\n distance = distance / earth_radius\n # First attempt (might work?)\n # buoy_lat = asin(sin(boat_lat) * cos(distance) + cos(boat_lat) * sin(distance) * cos(bearing))\n # d_lon = atan2(sin(bearing) * sin(distance) * cos(boat_lat), cos(distance) - sin(boat_lat) * sin(buoy_lat))\n # buoy_lon = ((boat_lon - d_lon + pi) % 2 * pi) - pi\n # return np.rad2deg(buoy_lat), np.rad2deg(buoy_lon)\n\n\n # Here is another version if the previous isn't working well\n lat2 = asin(sin(boat_lat) * cos(distance) + cos(boat_lat) * sin(distance) * cos(bearing))\n a = atan2(sin(bearing) * sin(distance) * cos(boat_lat), cos(distance) - sin(boat_lat) * sin(lat2))\n lon2 = boat_lon + a\n lon2 = (lon2 + 3 * pi) % (2 * pi) - pi\n\n return np.rad2deg(lat2), np.rad2deg(lon2)\n", "import numpy as np\nimport filterpy.kalman as kalman\n\nfrom src.utils.time_in_millis import time_in_millis\n\nfrom src.tracking.config_reader import read_kalman_config\n\nclass KalmanFilter():\n def __init__(self, pos, vel, pos_sigma=None, vel_sigma=None):\n \"\"\"Initialize kalman filter\n Inputs:\n pos -- position of object (polar)\n vel -- velocity of object (polar)\n pos_sigma -- uncertainty of position of object (polar)\n vel_sigma -- uncertainty of veloicty of object (polar)\n \"\"\"\n\n kalman_config = read_kalman_config()\n\n self.state = np.append(pos, vel).astype(np.float32) # create state vector (elements are r, bear, v_r, v_bear)\n if pos_sigma is None:\n pos_sigma = np.array([kalman_config['r_sigma'], kalman_config['theta_sigma']]).astype(np.float32)\n if vel_sigma is None:\n vel_sigma = np.array([kalman_config['r_hat_sigma'], kalman_config['theta_hat_sigma']]).astype(np.float32)\n self.covar = np.diag(np.append(pos_sigma, vel_sigma)).astype(np.float32) # create covariance matrix (matrix of certainties of measurements)\n self.measurement_covar = np.eye(self.covar.shape[0]).astype(np.float32)\n\n self.process_noise = np.eye(self.state.shape[0]).astype(np.float32) # initalize process noise\n\n self.last_time_changed = time_in_millis()\n self.delta_t = 0\n\n # create state transition matrix\n self.state_trans = np.array([[1., 0, self.delta_t, 0],\n [0, 1., 0, self.delta_t],\n [0, 0, 1., 0],\n [0, 0, 0, 1.]])\n\n self.measurement_trans = np.eye(self.state.size) # create measurement transition matrix\n\n def predict(self):\n \"\"\"Predicts next state of object\n Side Effects:\n self.state_trans -- calls _update_trans_matrix which updates transition matrix\n self.state -- updates state through kalman predict\n self.covar -- updates uncertainty matrix through kalman predict\n \"\"\"\n self._update_trans_matrix() # update state transition matrix with update delta_t\n self._update_process_noise()\n self.state, self.covar = kalman.predict(x=self.state, P=self.covar, F=self.state_trans, Q=self.process_noise)\n\n self._adjust_wraparound()\n\n def update(self, pos, vel):\n \"\"\"Update object position and filter\n Inputs:\n pos -- position of object (cartesian)\n vel -- veloicty of object (cartesian)\n# hist_score -- certainty score based on object history (used as scale factor for measurement covariance) (range 1 - 1.05)\n \"\"\"\n measurement = np.append(pos, vel).astype(np.float32)\n\n self.state, self.covar = kalman.update(x=self.state, P=self.covar, z=measurement, R=self.measurement_covar, H=self.measurement_trans)\n\n def _update_trans_matrix(self):\n \"\"\"Updates transition matrix for time delta since last prediction\n Side Effects:\n self.state_trans -- updates velocity coefficients in position equations\n self.last_time_changed -- updates last time changed to reflect that state has changed\n self.delta_t -- updates delta between current time and last time changed (used for predict)\n \"\"\"\n self.delta_t = (time_in_millis() - self.last_time_changed) / 1000.\n\n # update delta_t in state transition matrix\n self.state_trans[0, 2] = self.delta_t\n self.state_trans[1, 3] = self.delta_t\n\n self.last_time_changed = time_in_millis()\n\n def _update_process_noise(self):\n \"\"\"\n Updates process noise using distance from origin of object and velocity of obj\n Side Effects:\n self.process_noise -- updates using range and object velocity\n \"\"\"\n process_noise = np.diag(np.ones(4))\n\n # bearing noise increases as distance from origin DECREASES (small changes in position result in large bearing changes)\n bearing_scale_fac = 0.5 + 50*(np.power(self.state[0], -2)) # arbitrary choice for numerator\n vel_scale_fac = [1 + (abs(vel)) for vel in self.state[2:4]]\n process_noise[0::2, 0::2] *= self.delta_t * vel_scale_fac[0]\n process_noise[1::2, 1::2] *= bearing_scale_fac * vel_scale_fac[1] * self.delta_t\n\n self.process_noise = process_noise\n\n def _adjust_wraparound(self):\n \"\"\" \n Wraps data from -180 to 180\n Side Effects: \n self.state -- wraps bearing\n \"\"\"\n if self.state[1] > 180:\n self.state[1] = -180. + (self.state[1] % 180)\n" ]
[ [ "numpy.rad2deg", "numpy.ones" ], [ "numpy.power", "numpy.eye", "numpy.ones", "numpy.append", "numpy.array" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
toddrme2178/OpenMDAO
[ "379cc6216d13d380e11cb3a46f03960981de4660", "379cc6216d13d380e11cb3a46f03960981de4660", "379cc6216d13d380e11cb3a46f03960981de4660", "379cc6216d13d380e11cb3a46f03960981de4660", "379cc6216d13d380e11cb3a46f03960981de4660" ]
[ "openmdao/components/structured_metamodel_util/scipy_interp.py", "openmdao/recorders/tests/sqlite_recorder_test_utils.py", "openmdao/drivers/tests/test_doe_driver.py", "openmdao/components/tests/test_vector_magnitude_comp.py", "openmdao/drivers/pyoptsparse_driver.py" ]
[ "\"\"\"Grid interpolation using scipy splines.\"\"\"\nfrom __future__ import division, print_function, absolute_import\n\nfrom six.moves import range\n\nfrom scipy import __version__ as scipy_version\ntry:\n from scipy.interpolate._bsplines import make_interp_spline as _make_interp_spline\nexcept ImportError:\n def _make_interp_spline(*args, **kwargs):\n msg = \"'MetaModelStructuredComp' requires scipy>=0.19, but the currently\" \\\n \" installed version is %s.\" % scipy_version\n raise RuntimeError(msg)\n\nimport numpy as np\n\nfrom openmdao.components.structured_metamodel_util.grid_interp_base import GridInterpBase\n\n\nclass ScipyGridInterp(GridInterpBase):\n \"\"\"\n Interpolation on a regular grid in arbitrary dimensions.\n\n The data must be defined on a regular grid; the grid spacing however may be uneven. First,\n third and fifth order spline interpolation are supported. After setting up the interpolator\n object, the interpolation order (*slinear*, *cubic*, and *quintic*) may be chosen at each\n evaluation. Additionally, gradients are provided for the spline interpolation methods.\n\n Attributes\n ----------\n bounds_error : bool\n If True, when interpolated values are requested outside of the domain of the input data,\n a ValueError is raised. If False, then the methods are allowed to extrapolate.\n Default is True (raise an exception).\n grid : tuple\n Collection of points that determine the regular grid.\n order : string\n Name of interpolation order.\n values : array_like, shape (m1, ..., mn, ...)\n The data on the regular grid in n dimensions.\n _all_gradients : ndarray\n Cache of computed gradients.\n _interp_config : dict\n Configuration object that stores the number of points required for each interpolation\n method.\n _ki : list\n Interpolation order to be used in each dimension.\n _xi : ndarray\n Cache of current evaluation point.\n \"\"\"\n\n def __init__(self, points, values, interp_method=\"slinear\", bounds_error=True):\n \"\"\"\n Initialize instance of interpolation class.\n\n Parameters\n ----------\n points : tuple of ndarray of float, with shapes (m1, ), ..., (mn, )\n The points defining the regular grid in n dimensions.\n values : array_like, shape (m1, ..., mn, ...)\n The data on the regular grid in n dimensions.\n interp_method : str, optional\n Name of interpolation method.\n bounds_error : bool, optional\n If True, when interpolated values are requested outside of the domain of the input\n data, a ValueError is raised. If False, then the methods are allowed to extrapolate.\n Default is True (raise an exception).\n \"\"\"\n super(ScipyGridInterp, self).__init__(points, values, interp_method=interp_method,\n bounds_error=bounds_error)\n\n # ScipyGridInterp supports automatic order reduction.\n self._ki = []\n # Order is the number of required points minus one.\n k = self._interp_config[interp_method] - 1\n for p in points:\n n_p = len(p)\n self._ki.append(k)\n if n_p <= k:\n self._ki[-1] = n_p - 1\n\n def _interp_methods(self):\n \"\"\"\n Method-specific settings for interpolation and for testing.\n\n Returns\n -------\n list\n Valid interpolation name strings.\n dict\n Configuration object that stores the number of points required for each method.\n \"\"\"\n interpolator_configs = {\n \"slinear\": 2,\n \"cubic\": 4,\n \"quintic\": 6,\n }\n\n all_methods = list(interpolator_configs.keys())\n return all_methods, interpolator_configs\n\n def interpolate(self, xi):\n \"\"\"\n Interpolate at the sample coordinates.\n\n Parameters\n ----------\n xi : ndarray of shape (..., ndim)\n The coordinates to sample the gridded data.\n\n Returns\n -------\n ndarray\n Value of interpolant at all sample points.\n \"\"\"\n super(ScipyGridInterp, self).interpolate(xi)\n\n result = self._evaluate_splines(self.values[:].T, xi, self._ki)\n\n return result\n\n def _evaluate_splines(self, data_values, xi, ki, compute_gradients=True):\n \"\"\"\n Perform interpolation using the scipy interpolator.\n\n Parameters\n ----------\n data_values : array_like\n The data on the regular grid in n dimensions.\n xi : ndarray\n The coordinates to sample the gridded data at\n ki : list\n List of spline interpolation orders.\n compute_gradients : bool, optional\n If a spline interpolation method is chosen, this determines whether gradient\n calculations should be made and cached. Default is True.\n\n Returns\n -------\n array_like\n Value of interpolant at all sample points.\n \"\"\"\n # for spline based methods\n\n # requires floating point input\n xi = xi.astype(np.float)\n\n # ensure xi is 2D list of points to evaluate\n if xi.ndim == 1:\n xi = xi.reshape((1, xi.size))\n m, n = xi.shape\n\n # create container arrays for output and gradients\n result = np.empty(m)\n if compute_gradients:\n all_gradients = np.empty_like(xi)\n\n # Non-stationary procedure: difficult to vectorize this part entirely\n # into numpy-level operations. Unfortunately this requires explicit\n # looping over each point in xi.\n\n # can at least vectorize the first pass across all points in the\n # last variable of xi. This provides one dimension of the entire\n # gradient output array.\n i = n - 1\n first_values, first_derivs = self._do_spline_fit(self.grid[i],\n data_values,\n xi[:, i],\n ki[i],\n compute_gradients)\n\n # the rest of the dimensions have to be on a per point-in-xi basis\n for j, x in enumerate(xi):\n gradient = np.empty_like(x)\n values = data_values[:]\n\n # Main process: Apply 1D interpolate in each dimension\n # sequentially, starting with the last dimension. These are then\n # \"folded\" into the next dimension in-place.\n for i in range(n - 1, 0, -1):\n if i == n - 1:\n values = first_values[j]\n if compute_gradients:\n local_derivs = first_derivs[j]\n else:\n # Interpolate and collect gradients for each 1D in this\n # last dimensions. This collapses each 1D sequence into a\n # scalar.\n values, local_derivs = self._do_spline_fit(self.grid[i],\n values,\n x[i],\n ki[i],\n compute_gradients)\n\n # Chain rule: to compute gradients of the output w.r.t. xi\n # across the dimensions, apply interpolation to the collected\n # gradients. This is equivalent to multiplication by\n # dResults/dValues at each level.\n if compute_gradients:\n gradient[i] = self._evaluate_splines(local_derivs,\n x[: i],\n ki,\n compute_gradients=False)\n\n # All values have been folded down to a single dimensional array\n # compute the final interpolated results, and gradient w.r.t. the\n # first dimension\n output_value, gradient[0] = self._do_spline_fit(self.grid[0],\n values,\n x[0],\n ki[0],\n compute_gradients)\n\n if compute_gradients:\n all_gradients[j] = gradient\n result[j] = output_value\n\n # Cache the computed gradients for return by the gradient method\n if compute_gradients:\n self._all_gradients = all_gradients\n # indicate what order was used to compute these\n\n return result\n\n def _do_spline_fit(self, x, y, pt, k, compute_gradients):\n \"\"\"\n Do a single interpolant call, and compute the gradient if needed.\n\n Parameters\n ----------\n x : array_like, shape (n,)\n Abscissas.\n y : array_like, shape (n, ...)\n Ordinates.\n pt : array_like\n Points to evaluate the spline at.\n k : float\n Spline interpolation order.\n compute_gradients : bool\n If a spline interpolation method is chosen, this determines whether gradient\n calculations should be made and cached.\n\n Returns\n -------\n array_like\n Value of interpolant at point of interest.\n None or array_like, optional\n Value of gradient of interpolant at point of interest.\n \"\"\"\n local_interp = _make_interp_spline(x, y, k=k, axis=0)\n values = local_interp(pt)\n local_derivs = None\n if compute_gradients:\n local_derivs = local_interp(pt, 1)\n return values, local_derivs\n\n def training_gradients(self, pt):\n \"\"\"\n Compute the training gradient for the vector of training points.\n\n Parameters\n ----------\n pt : ndarray\n Training point values.\n\n Returns\n -------\n ndarray\n Gradient of output with respect to training point values.\n \"\"\"\n for i, axis in enumerate(self.grid):\n e_i = np.eye(axis.size)\n interp = _make_interp_spline(axis, e_i, k=self._ki[i], axis=0)\n if i == 0:\n val = interp(pt[i])\n else:\n val = np.outer(val, interp(pt[i]))\n\n return val\n", "from six import iteritems, PY2, PY3\n\nimport sqlite3\nimport numpy as np\nimport json\n\nfrom contextlib import contextmanager\n\nfrom openmdao.utils.record_util import format_iteration_coordinate, deserialize\nfrom openmdao.utils.assert_utils import assert_rel_error\nfrom openmdao.recorders.sqlite_recorder import blob_to_array, format_version\n\nif PY2:\n import cPickle as pickle\nelse:\n import pickle\n\n\n@contextmanager\ndef database_cursor(filename):\n \"\"\"\n Context manager managing a cursor for the SQLite database with the given file name.\n \"\"\"\n con = sqlite3.connect(filename)\n cur = con.cursor()\n\n yield cur\n\n con.close()\n\n\ndef get_format_version_abs2meta(db_cur):\n \"\"\"\n Return the format version and abs2meta dict from metadata table in the case recorder file.\n \"\"\"\n db_cur.execute(\"SELECT format_version, abs2meta FROM metadata\")\n row = db_cur.fetchone()\n\n f_version = row[0]\n\n # Need to also get abs2meta so that we can pass it to deserialize\n if f_version >= 3:\n abs2meta = json.loads(row[1])\n elif f_version in (1, 2):\n if PY2:\n abs2meta = pickle.loads(str(row[1])) if row[1] is not None else None\n\n if PY3:\n try:\n abs2meta = pickle.loads(row[1]) if row[1] is not None else None\n except TypeError:\n # Reading in a python 2 pickle recorded pre-OpenMDAO 2.4.\n abs2meta = pickle.loads(row[1].encode()) if row[1] is not None else None\n\n return f_version, abs2meta\n\ndef assertProblemDataRecorded(test, expected, tolerance):\n \"\"\"\n Expected can be from multiple cases.\n \"\"\"\n with database_cursor(test.filename) as db_cur:\n f_version, abs2meta = get_format_version_abs2meta(db_cur)\n\n # iterate through the cases\n for case, (t0, t1), outputs_expected in expected:\n # from the database, get the actual data recorded\n db_cur.execute(\"SELECT * FROM problem_cases WHERE case_name=:case_name\",\n {\"case_name\": case})\n row_actual = db_cur.fetchone()\n\n test.assertTrue(row_actual, 'Problem table does not contain the requested '\n 'case name: \"{}\"'.format(case))\n\n counter, global_counter, case_name, timestamp, success, msg, outputs_text = row_actual\n\n if f_version >= 3:\n outputs_actual = deserialize(outputs_text, abs2meta)\n elif f_version in (1, 2):\n outputs_actual = blob_to_array(outputs_text)\n\n test.assertEqual(success, 1)\n test.assertEqual(msg, '')\n\n for vartype, actual, expected in (\n ('outputs', outputs_actual, outputs_expected),\n ):\n\n if expected is None:\n if f_version >= 3:\n test.assertIsNone(actual)\n if f_version in (1, 2):\n test.assertEqual(actual, np.array(None, dtype=object))\n else:\n actual = actual[0]\n # Check to see if the number of values in actual and expected match\n test.assertEqual(len(actual), len(expected))\n for key, value in iteritems(expected):\n # Check to see if the keys in the actual and expected match\n test.assertTrue(key in actual.dtype.names,\n '{} variable not found in actual data'\n ' from recorder'.format(key))\n # Check to see if the values in actual and expected match\n assert_rel_error(test, actual[key], expected[key], tolerance)\n\n\ndef assertDriverIterDataRecorded(test, expected, tolerance, prefix=None):\n \"\"\"\n Expected can be from multiple cases.\n \"\"\"\n with database_cursor(test.filename) as db_cur:\n f_version, abs2meta = get_format_version_abs2meta(db_cur)\n\n # iterate through the cases\n for coord, (t0, t1), outputs_expected, inputs_expected in expected:\n iter_coord = format_iteration_coordinate(coord, prefix=prefix)\n # from the database, get the actual data recorded\n db_cur.execute(\"SELECT * FROM driver_iterations WHERE \"\n \"iteration_coordinate=:iteration_coordinate\",\n {\"iteration_coordinate\": iter_coord})\n row_actual = db_cur.fetchone()\n\n test.assertTrue(row_actual,\n 'Driver iterations table does not contain the requested '\n 'iteration coordinate: \"{}\"'.format(iter_coord))\n\n counter, global_counter, iteration_coordinate, timestamp, success, msg,\\\n inputs_text, outputs_text = row_actual\n\n if f_version >= 3:\n inputs_actual = deserialize(inputs_text, abs2meta)\n outputs_actual = deserialize(outputs_text, abs2meta)\n elif f_version in (1, 2):\n inputs_actual = blob_to_array(inputs_text)\n outputs_actual = blob_to_array(outputs_text)\n\n # Does the timestamp make sense?\n test.assertTrue(t0 <= timestamp and timestamp <= t1)\n\n test.assertEqual(success, 1)\n test.assertEqual(msg, '')\n\n for vartype, actual, expected in (\n ('outputs', outputs_actual, outputs_expected),\n ('inputs', inputs_actual, inputs_expected)\n ):\n\n if expected is None:\n if f_version >= 3:\n test.assertIsNone(actual)\n if f_version in (1, 2):\n test.assertEqual(actual, np.array(None, dtype=object))\n else:\n actual = actual[0]\n # Check to see if the number of values in actual and expected match\n test.assertEqual(len(actual), len(expected))\n for key, value in iteritems(expected):\n # Check to see if the keys in the actual and expected match\n test.assertTrue(key in actual.dtype.names,\n '{} variable not found in actual data'\n ' from recorder'.format(key))\n # Check to see if the values in actual and expected match\n assert_rel_error(test, actual[key], expected[key], tolerance)\n\n\ndef assertDriverDerivDataRecorded(test, expected, tolerance, prefix=None):\n \"\"\"\n Expected can be from multiple cases.\n \"\"\"\n with database_cursor(test.filename) as db_cur:\n\n # iterate through the cases\n for coord, (t0, t1), totals_expected in expected:\n\n iter_coord = format_iteration_coordinate(coord, prefix=prefix)\n\n # from the database, get the actual data recorded\n db_cur.execute(\"SELECT * FROM driver_derivatives WHERE \"\n \"iteration_coordinate=:iteration_coordinate\",\n {\"iteration_coordinate\": iter_coord})\n row_actual = db_cur.fetchone()\n\n db_cur.execute(\"SELECT abs2meta FROM metadata\")\n row_abs2meta = db_cur.fetchone()\n\n test.assertTrue(row_actual,\n 'Driver iterations table does not contain the requested '\n 'iteration coordinate: \"{}\"'.format(iter_coord))\n\n counter, global_counter, iteration_coordinate, timestamp, success, msg,\\\n totals_blob = row_actual\n abs2meta = json.loads(row_abs2meta[0]) if row_abs2meta[0] is not None else None\n test.assertTrue(isinstance(abs2meta, dict))\n\n totals_actual = blob_to_array(totals_blob)\n\n # Does the timestamp make sense?\n test.assertTrue(t0 <= timestamp and timestamp <= t1)\n\n test.assertEqual(success, 1)\n test.assertEqual(msg, '')\n\n if totals_expected is None:\n test.assertEqual(totals_actual, np.array(None, dtype=object))\n else:\n actual = totals_actual[0]\n # Check to see if the number of values in actual and expected match\n test.assertEqual(len(actual), len(totals_expected))\n for key, value in iteritems(totals_expected):\n # Check to see if the keys in the actual and expected match\n test.assertTrue(key in actual.dtype.names,\n '{} variable not found in actual data'\n ' from recorder'.format(key))\n # Check to see if the values in actual and expected match\n assert_rel_error(test, actual[key], totals_expected[key], tolerance)\n\n\ndef assertSystemIterDataRecorded(test, expected, tolerance, prefix=None):\n \"\"\"\n Expected can be from multiple cases.\n \"\"\"\n with database_cursor(test.filename) as db_cur:\n f_version, abs2meta = get_format_version_abs2meta(db_cur)\n\n # iterate through the cases\n for coord, (t0, t1), inputs_expected, outputs_expected, residuals_expected in expected:\n iter_coord = format_iteration_coordinate(coord, prefix=prefix)\n\n # from the database, get the actual data recorded\n db_cur.execute(\"SELECT * FROM system_iterations WHERE \"\n \"iteration_coordinate=:iteration_coordinate\",\n {\"iteration_coordinate\": iter_coord})\n row_actual = db_cur.fetchone()\n test.assertTrue(row_actual, 'System iterations table does not contain the requested '\n 'iteration coordinate: \"{}\"'.format(iter_coord))\n\n counter, global_counter, iteration_coordinate, timestamp, success, msg, inputs_text, \\\n outputs_text, residuals_text = row_actual\n\n if f_version >= 3:\n inputs_actual = deserialize(inputs_text, abs2meta)\n outputs_actual = deserialize(outputs_text, abs2meta)\n residuals_actual = deserialize(residuals_text, abs2meta)\n elif f_version in (1, 2):\n inputs_actual = blob_to_array(inputs_text)\n outputs_actual = blob_to_array(outputs_text)\n residuals_actual = blob_to_array(residuals_text)\n\n # Does the timestamp make sense?\n test.assertTrue(t0 <= timestamp and timestamp <= t1)\n\n test.assertEqual(success, 1)\n test.assertEqual(msg, '')\n\n for vartype, actual, expected in (\n ('inputs', inputs_actual, inputs_expected),\n ('outputs', outputs_actual, outputs_expected),\n ('residuals', residuals_actual, residuals_expected),\n ):\n\n if expected is None:\n if f_version >= 3:\n test.assertIsNone(actual)\n if f_version in (1, 2):\n test.assertEqual(actual, np.array(None, dtype=object))\n else:\n # Check to see if the number of values in actual and expected match\n test.assertEqual(len(actual[0]), len(expected))\n for key, value in iteritems(expected):\n # Check to see if the keys in the actual and expected match\n test.assertTrue(key in actual[0].dtype.names,\n '{} variable not found in actual data '\n 'from recorder'.format(key))\n # Check to see if the values in actual and expected match\n assert_rel_error(test, actual[0][key], expected[key], tolerance)\n\n\ndef assertSolverIterDataRecorded(test, expected, tolerance, prefix=None):\n \"\"\"\n Expected can be from multiple cases.\n \"\"\"\n with database_cursor(test.filename) as db_cur:\n f_version, abs2meta = get_format_version_abs2meta(db_cur)\n\n # iterate through the cases\n for coord, (t0, t1), expected_abs_error, expected_rel_error, expected_output, \\\n expected_solver_residuals in expected:\n\n iter_coord = format_iteration_coordinate(coord, prefix=prefix)\n\n # from the database, get the actual data recorded\n db_cur.execute(\"SELECT * FROM solver_iterations \"\n \"WHERE iteration_coordinate=:iteration_coordinate\",\n {\"iteration_coordinate\": iter_coord})\n row_actual = db_cur.fetchone()\n test.assertTrue(row_actual, 'Solver iterations table does not contain the requested '\n 'iteration coordinate: \"{}\"'.format(iter_coord))\n\n counter, global_counter, iteration_coordinate, timestamp, success, msg, \\\n abs_err, rel_err, input_blob, output_text, residuals_text = row_actual\n\n if f_version >= 3:\n output_actual = deserialize(output_text, abs2meta)\n residuals_actual = deserialize(residuals_text, abs2meta)\n elif f_version in (1, 2):\n output_actual = blob_to_array(output_text)\n residuals_actual = blob_to_array(residuals_text)\n\n # Does the timestamp make sense?\n test.assertTrue(t0 <= timestamp and timestamp <= t1,\n 'timestamp should be between when the model started and stopped')\n\n test.assertEqual(success, 1)\n test.assertEqual(msg, '')\n if expected_abs_error:\n test.assertTrue(abs_err, 'Expected absolute error but none recorded')\n assert_rel_error(test, abs_err, expected_abs_error, tolerance)\n if expected_rel_error:\n test.assertTrue(rel_err, 'Expected relative error but none recorded')\n assert_rel_error(test, rel_err, expected_rel_error, tolerance)\n\n for vartype, actual, expected in (\n ('outputs', output_actual, expected_output),\n ('residuals', residuals_actual, expected_solver_residuals),\n ):\n\n if expected is None:\n if f_version >= 3:\n test.assertIsNone(actual)\n if f_version in (1, 2):\n test.assertEqual(actual, np.array(None, dtype=object))\n else:\n # Check to see if the number of values in actual and expected match\n test.assertEqual(len(actual[0]), len(expected))\n for key, value in iteritems(expected):\n # Check to see if the keys in the actual and expected match\n test.assertTrue(key in actual[0].dtype.names,\n '{} variable not found in actual data '\n 'from recorder'.format(key))\n # Check to see if the values in actual and expected match\n assert_rel_error(test, actual[0][key], expected[key], tolerance)\n\n\ndef assertMetadataRecorded(test, expected_prom2abs, expected_abs2prom):\n\n with database_cursor(test.filename) as db_cur:\n\n db_cur.execute(\"SELECT format_version, prom2abs, abs2prom FROM metadata\")\n row = db_cur.fetchone()\n\n format_version_actual = row[0]\n format_version_expected = format_version\n\n prom2abs = json.loads(str(row[1]))\n abs2prom = json.loads(str(row[2]))\n\n if prom2abs is None:\n test.assertIsNone(expected_prom2abs)\n else:\n for io in ['input', 'output']:\n for var in prom2abs[io]:\n test.assertEqual(prom2abs[io][var].sort(), expected_prom2abs[io][var].sort())\n if abs2prom is None:\n test.assertIsNone(expected_abs2prom)\n else:\n for io in ['input', 'output']:\n for var in abs2prom[io]:\n test.assertEqual(abs2prom[io][var], expected_abs2prom[io][var])\n\n # this always gets recorded\n test.assertEqual(format_version_actual, format_version_expected)\n\n\ndef assertViewerDataRecorded(test, expected):\n\n with database_cursor(test.filename) as db_cur:\n db_cur.execute(\"SELECT format_version FROM metadata\")\n f_version = db_cur.fetchone()[0]\n test.assertTrue(isinstance(f_version, int))\n\n db_cur.execute(\"SELECT model_viewer_data FROM driver_metadata\")\n row = db_cur.fetchone()\n\n if expected is None:\n test.assertIsNone(row)\n return\n\n model_viewer_data = json.loads(row[0])\n\n test.assertTrue(isinstance(model_viewer_data, dict))\n\n # primary keys\n test.assertEqual(set(model_viewer_data.keys()), {\n 'tree', 'sys_pathnames_list', 'connections_list', 'abs2prom',\n 'driver', 'design_vars', 'responses', 'declare_partials_list'\n })\n\n # system pathnames\n test.assertTrue(isinstance(model_viewer_data['sys_pathnames_list'], list))\n\n # connections\n test.assertTrue(isinstance(model_viewer_data['connections_list'], list))\n\n test.assertEqual(expected['connections_list_length'],\n len(model_viewer_data['connections_list']))\n\n cl = model_viewer_data['connections_list']\n for c in cl:\n test.assertTrue(set(c.keys()).issubset(set(['src', 'tgt', 'cycle_arrows'])))\n\n # model tree\n tr = model_viewer_data['tree']\n test.assertEqual(expected['tree_length'], len(tr))\n\n test.assertEqual({'name', 'type', 'subsystem_type', 'children', 'linear_solver',\n 'nonlinear_solver', 'is_parallel', 'component_type', 'class',\n 'expressions'},\n set(tr.keys()))\n test.assertEqual(expected['tree_children_length'],\n len(model_viewer_data['tree']['children']))\n\n # abs2prom map\n abs2prom = model_viewer_data['abs2prom']\n for io in ['input', 'output']:\n for var in expected['abs2prom'][io]:\n test.assertEqual(abs2prom[io][var], expected['abs2prom'][io][var])\n\n\ndef assertSystemMetadataIdsRecorded(test, ids):\n\n with database_cursor(test.filename) as cur:\n\n for id in ids:\n cur.execute(\"SELECT * FROM system_metadata WHERE id=:id\", {\"id\": id})\n row_actual = cur.fetchone()\n test.assertTrue(row_actual,\n 'System metadata table does not contain the '\n 'requested id: \"{}\"'.format(id))\n\n\ndef assertSystemIterCoordsRecorded(test, iteration_coordinates):\n\n with database_cursor(test.filename) as cur:\n\n for iteration_coordinate in iteration_coordinates:\n cur.execute(\"SELECT * FROM system_iterations WHERE \"\n \"iteration_coordinate=:iteration_coordinate\",\n {\"iteration_coordinate\": iteration_coordinate})\n row_actual = cur.fetchone()\n test.assertTrue(row_actual,\n 'System iterations table does not contain the '\n 'requested iteration coordinate: \"{}\"'.\n format(iteration_coordinate))\n", "\"\"\"\nTest DOE Driver and Generators.\n\"\"\"\nfrom __future__ import print_function, division\n\nimport unittest\n\nimport os\nimport shutil\nimport tempfile\nimport csv\nimport json\n\nimport numpy as np\n\nimport openmdao.api as om\n\nfrom openmdao.test_suite.components.paraboloid import Paraboloid\nfrom openmdao.test_suite.groups.parallel_groups import FanInGrouped\n\nfrom openmdao.utils.assert_utils import assert_rel_error\nfrom openmdao.utils.general_utils import run_driver, printoptions\n\nfrom openmdao.utils.mpi import MPI\n\n\nclass ParaboloidArray(om.ExplicitComponent):\n \"\"\"\n Evaluates the equation f(x,y) = (x-3)^2 + x*y + (y+4)^2 - 3.\n\n Where x and y are xy[0] and xy[1] repectively.\n \"\"\"\n\n def __init__(self):\n super(ParaboloidArray, self).__init__()\n\n self.add_input('xy', val=np.array([0., 0.]))\n self.add_output('f_xy', val=0.0)\n\n def compute(self, inputs, outputs):\n \"\"\"\n f(x,y) = (x-3)^2 + xy + (y+4)^2 - 3\n \"\"\"\n x = inputs['xy'][0]\n y = inputs['xy'][1]\n outputs['f_xy'] = (x-3.0)**2 + x*y + (y+4.0)**2 - 3.0\n\n\nclass TestErrors(unittest.TestCase):\n\n def test_generator_check(self):\n prob = om.Problem()\n\n with self.assertRaises(TypeError) as err:\n prob.driver = om.DOEDriver(om.FullFactorialGenerator)\n\n self.assertEqual(str(err.exception),\n \"DOEDriver requires an instance of DOEGenerator, \"\n \"but a class object was found: FullFactorialGenerator\")\n\n with self.assertRaises(TypeError) as err:\n prob.driver = om.DOEDriver(om.Problem())\n\n self.assertEqual(str(err.exception),\n \"DOEDriver requires an instance of DOEGenerator, \"\n \"but an instance of Problem was found.\")\n\n def test_lhc_criterion(self):\n with self.assertRaises(ValueError) as err:\n om.LatinHypercubeGenerator(criterion='foo')\n\n self.assertEqual(str(err.exception),\n \"Invalid criterion 'foo' specified for LatinHypercubeGenerator. \"\n \"Must be one of ['center', 'c', 'maximin', 'm', 'centermaximin', \"\n \"'cm', 'correlation', 'corr', None].\")\n\n\nclass TestDOEDriver(unittest.TestCase):\n\n def setUp(self):\n self.startdir = os.getcwd()\n self.tempdir = tempfile.mkdtemp(prefix='TestDOEDriver-')\n os.chdir(self.tempdir)\n\n def tearDown(self):\n os.chdir(self.startdir)\n try:\n shutil.rmtree(self.tempdir)\n except OSError:\n pass\n\n def test_no_generator(self):\n prob = om.Problem()\n model = prob.model\n\n model.add_subsystem('p1', om.IndepVarComp('x', 0.), promotes=['*'])\n model.add_subsystem('p2', om.IndepVarComp('y', 0.), promotes=['*'])\n model.add_subsystem('comp', Paraboloid(), promotes=['*'])\n\n model.add_design_var('x', lower=-10, upper=10)\n model.add_design_var('y', lower=-10, upper=10)\n model.add_objective('f_xy')\n\n prob.driver = om.DOEDriver()\n prob.driver.add_recorder(om.SqliteRecorder(\"cases.sql\"))\n\n prob.setup()\n prob.run_driver()\n prob.cleanup()\n\n cr = om.CaseReader(\"cases.sql\")\n cases = cr.list_cases('driver')\n\n self.assertEqual(len(cases), 0)\n\n def test_list(self):\n prob = om.Problem()\n model = prob.model\n\n model.add_subsystem('p1', om.IndepVarComp('x', 0.0), promotes=['x'])\n model.add_subsystem('p2', om.IndepVarComp('y', 0.0), promotes=['y'])\n model.add_subsystem('comp', Paraboloid(), promotes=['x', 'y', 'f_xy'])\n\n model.add_design_var('x', lower=0.0, upper=1.0)\n model.add_design_var('y', lower=0.0, upper=1.0)\n model.add_objective('f_xy')\n\n prob.setup()\n\n # create a list of DOE cases\n case_gen = om.FullFactorialGenerator(levels=3)\n cases = list(case_gen(model.get_design_vars(recurse=True)))\n\n # create DOEDriver using provided list of cases\n prob.driver = om.DOEDriver(cases)\n prob.driver.add_recorder(om.SqliteRecorder(\"cases.sql\"))\n\n prob.run_driver()\n prob.cleanup()\n\n expected = {\n 0: {'x': np.array([0.]), 'y': np.array([0.]), 'f_xy': np.array([22.00])},\n 1: {'x': np.array([.5]), 'y': np.array([0.]), 'f_xy': np.array([19.25])},\n 2: {'x': np.array([1.]), 'y': np.array([0.]), 'f_xy': np.array([17.00])},\n\n 3: {'x': np.array([0.]), 'y': np.array([.5]), 'f_xy': np.array([26.25])},\n 4: {'x': np.array([.5]), 'y': np.array([.5]), 'f_xy': np.array([23.75])},\n 5: {'x': np.array([1.]), 'y': np.array([.5]), 'f_xy': np.array([21.75])},\n\n 6: {'x': np.array([0.]), 'y': np.array([1.]), 'f_xy': np.array([31.00])},\n 7: {'x': np.array([.5]), 'y': np.array([1.]), 'f_xy': np.array([28.75])},\n 8: {'x': np.array([1.]), 'y': np.array([1.]), 'f_xy': np.array([27.00])},\n }\n\n cr = om.CaseReader(\"cases.sql\")\n cases = cr.list_cases('driver')\n\n self.assertEqual(len(cases), 9)\n\n for n in range(len(cases)):\n outputs = cr.get_case(cases[n]).outputs\n self.assertEqual(outputs['x'], expected[n]['x'])\n self.assertEqual(outputs['y'], expected[n]['y'])\n self.assertEqual(outputs['f_xy'], expected[n]['f_xy'])\n\n def test_list_errors(self):\n prob = om.Problem()\n model = prob.model\n\n model.add_subsystem('p1', om.IndepVarComp('x', 0.0), promotes=['x'])\n model.add_subsystem('p2', om.IndepVarComp('y', 0.0), promotes=['y'])\n model.add_subsystem('comp', Paraboloid(), promotes=['x', 'y', 'f_xy'])\n\n model.add_design_var('x', lower=0.0, upper=1.0)\n model.add_design_var('y', lower=0.0, upper=1.0)\n model.add_objective('f_xy')\n\n prob.setup()\n\n # data does not contain a list\n cases = {'desvar': 1.0}\n\n with self.assertRaises(RuntimeError) as err:\n prob.driver = om.DOEDriver(generator=om.ListGenerator(cases))\n self.assertEqual(str(err.exception), \"Invalid DOE case data, \"\n \"expected a list but got a dict.\")\n\n # data contains a list of non-list\n cases = [{'desvar': 1.0}]\n prob.driver = om.DOEDriver(generator=om.ListGenerator(cases))\n\n with self.assertRaises(RuntimeError) as err:\n prob.run_driver()\n self.assertEqual(str(err.exception), \"Invalid DOE case found, \"\n \"expecting a list of name/value pairs:\\n{'desvar': 1.0}\")\n\n # data contains a list of list, but one has the wrong length\n cases = [\n [['p1.x', 0.], ['p2.y', 0.]],\n [['p1.x', 1.], ['p2.y', 1., 'foo']]\n ]\n\n prob.driver = om.DOEDriver(generator=om.ListGenerator(cases))\n\n with self.assertRaises(RuntimeError) as err:\n prob.run_driver()\n self.assertEqual(str(err.exception), \"Invalid DOE case found, \"\n \"expecting a list of name/value pairs:\\n\"\n \"[['p1.x', 1.0], ['p2.y', 1.0, 'foo']]\")\n\n # data contains a list of list, but one case has an invalid design var\n cases = [\n [['p1.x', 0.], ['p2.y', 0.]],\n [['p1.x', 1.], ['p2.z', 1.]]\n ]\n\n prob.driver = om.DOEDriver(generator=om.ListGenerator(cases))\n\n with self.assertRaises(RuntimeError) as err:\n prob.run_driver()\n self.assertEqual(str(err.exception), \"Invalid DOE case found, \"\n \"'p2.z' is not a valid design variable:\\n\"\n \"[['p1.x', 1.0], ['p2.z', 1.0]]\")\n\n # data contains a list of list, but one case has multiple invalid design vars\n cases = [\n [['p1.x', 0.], ['p2.y', 0.]],\n [['p1.y', 1.], ['p2.z', 1.]]\n ]\n\n prob.driver = om.DOEDriver(generator=om.ListGenerator(cases))\n\n with self.assertRaises(RuntimeError) as err:\n prob.run_driver()\n self.assertEqual(str(err.exception), \"Invalid DOE case found, \"\n \"['p1.y', 'p2.z'] are not valid design variables:\\n\"\n \"[['p1.y', 1.0], ['p2.z', 1.0]]\")\n\n def test_csv(self):\n prob = om.Problem()\n model = prob.model\n\n model.add_subsystem('p1', om.IndepVarComp('x', 0.0), promotes=['x'])\n model.add_subsystem('p2', om.IndepVarComp('y', 0.0), promotes=['y'])\n model.add_subsystem('comp', Paraboloid(), promotes=['x', 'y', 'f_xy'])\n\n model.add_design_var('x', lower=0.0, upper=1.0)\n model.add_design_var('y', lower=0.0, upper=1.0)\n model.add_objective('f_xy')\n\n prob.setup()\n\n # create a list of DOE cases\n cases = []\n case_gen = om.FullFactorialGenerator(levels=3)\n for case in case_gen(model.get_design_vars(recurse=True)):\n cases.append([(var, val) for (var, val) in case])\n\n # generate CSV file with cases\n header = [var for (var, val) in cases[0]]\n with open('cases.csv', 'w') as f:\n writer = csv.writer(f)\n writer.writerow(header)\n for case in cases:\n writer.writerow([val for (var, val) in case])\n\n # create DOEDriver using generated CSV file\n prob.driver = om.DOEDriver(om.CSVGenerator('cases.csv'))\n prob.driver.add_recorder(om.SqliteRecorder(\"cases.sql\"))\n\n prob.run_driver()\n prob.cleanup()\n\n expected = {\n 0: {'x': np.array([0.]), 'y': np.array([0.]), 'f_xy': np.array([22.00])},\n 1: {'x': np.array([.5]), 'y': np.array([0.]), 'f_xy': np.array([19.25])},\n 2: {'x': np.array([1.]), 'y': np.array([0.]), 'f_xy': np.array([17.00])},\n\n 3: {'x': np.array([0.]), 'y': np.array([.5]), 'f_xy': np.array([26.25])},\n 4: {'x': np.array([.5]), 'y': np.array([.5]), 'f_xy': np.array([23.75])},\n 5: {'x': np.array([1.]), 'y': np.array([.5]), 'f_xy': np.array([21.75])},\n\n 6: {'x': np.array([0.]), 'y': np.array([1.]), 'f_xy': np.array([31.00])},\n 7: {'x': np.array([.5]), 'y': np.array([1.]), 'f_xy': np.array([28.75])},\n 8: {'x': np.array([1.]), 'y': np.array([1.]), 'f_xy': np.array([27.00])},\n }\n\n cr = om.CaseReader(\"cases.sql\")\n cases = cr.list_cases('driver')\n\n self.assertEqual(len(cases), 9)\n\n for n in range(len(cases)):\n outputs = cr.get_case(cases[n]).outputs\n self.assertEqual(outputs['x'], expected[n]['x'])\n self.assertEqual(outputs['y'], expected[n]['y'])\n self.assertEqual(outputs['f_xy'], expected[n]['f_xy'])\n\n def test_csv_array(self):\n prob = om.Problem()\n model = prob.model\n\n model.add_subsystem('p1', om.IndepVarComp('x', [0., 1.]))\n model.add_subsystem('p2', om.IndepVarComp('y', [0., 1.]))\n model.add_subsystem('comp1', Paraboloid())\n model.add_subsystem('comp2', Paraboloid())\n\n model.connect('p1.x', 'comp1.x', src_indices=[0])\n model.connect('p2.y', 'comp1.y', src_indices=[0])\n\n model.connect('p1.x', 'comp2.x', src_indices=[1])\n model.connect('p2.y', 'comp2.y', src_indices=[1])\n\n model.add_design_var('p1.x', lower=0.0, upper=1.0)\n model.add_design_var('p2.y', lower=0.0, upper=1.0)\n\n prob.setup()\n\n # create a list of DOE cases\n cases = []\n case_gen = om.FullFactorialGenerator(levels=2)\n for case in case_gen(model.get_design_vars(recurse=True)):\n cases.append([(var, val) for (var, val) in case])\n\n # generate CSV file with cases\n header = [var for (var, val) in cases[0]]\n with open('cases.csv', 'w') as f:\n writer = csv.writer(f)\n writer.writerow(header)\n for case in cases:\n writer.writerow([val for (var, val) in case])\n\n # create DOEDriver using generated CSV file\n prob.driver = om.DOEDriver(om.CSVGenerator('cases.csv'))\n prob.driver.add_recorder(om.SqliteRecorder(\"cases.sql\"))\n\n prob.run_driver()\n prob.cleanup()\n\n expected = {\n 0: {'p1.x': np.array([0., 0.]), 'p2.y': np.array([0., 0.])},\n 1: {'p1.x': np.array([1., 0.]), 'p2.y': np.array([0., 0.])},\n 2: {'p1.x': np.array([0., 1.]), 'p2.y': np.array([1., 0.])},\n 3: {'p1.x': np.array([1., 1.]), 'p2.y': np.array([1., 0.])},\n 4: {'p1.x': np.array([0., 0.]), 'p2.y': np.array([0., 1.])},\n 5: {'p1.x': np.array([1., 0.]), 'p2.y': np.array([0., 1.])},\n 6: {'p1.x': np.array([0., 1.]), 'p2.y': np.array([1., 1.])},\n 7: {'p1.x': np.array([1., 1.]), 'p2.y': np.array([1., 1.])},\n 8: {'p1.x': np.array([0., 0.]), 'p2.y': np.array([0., 0.])},\n 9: {'p1.x': np.array([1., 0.]), 'p2.y': np.array([0., 0.])},\n 10: {'p1.x': np.array([0., 1.]), 'p2.y': np.array([1., 0.])},\n 11: {'p1.x': np.array([1., 1.]), 'p2.y': np.array([1., 0.])},\n 12: {'p1.x': np.array([0., 0.]), 'p2.y': np.array([0., 1.])},\n 13: {'p1.x': np.array([1., 0.]), 'p2.y': np.array([0., 1.])},\n 14: {'p1.x': np.array([0., 1.]), 'p2.y': np.array([1., 1.])},\n 15: {'p1.x': np.array([1., 1.]), 'p2.y': np.array([1., 1.])},\n }\n\n cr = om.CaseReader(\"cases.sql\")\n cases = cr.list_cases('driver')\n\n self.assertEqual(len(cases), 16)\n\n for n in range(len(cases)):\n outputs = cr.get_case(cases[n]).outputs\n self.assertEqual(outputs['p1.x'][0], expected[n]['p1.x'][0])\n self.assertEqual(outputs['p2.y'][0], expected[n]['p2.y'][0])\n self.assertEqual(outputs['p1.x'][1], expected[n]['p1.x'][1])\n self.assertEqual(outputs['p2.y'][1], expected[n]['p2.y'][1])\n\n def test_csv_errors(self):\n # test invalid file name\n with self.assertRaises(RuntimeError) as err:\n om.CSVGenerator(1.23)\n self.assertEqual(str(err.exception),\n \"'1.23' is not a valid file name.\")\n\n # test file not found\n with self.assertRaises(RuntimeError) as err:\n om.CSVGenerator('nocases.csv')\n self.assertEqual(str(err.exception),\n \"File not found: nocases.csv\")\n\n # create problem and a list of DOE cases\n prob = om.Problem()\n model = prob.model\n\n model.add_subsystem('p1', om.IndepVarComp('x', 0.0), promotes=['x'])\n model.add_subsystem('p2', om.IndepVarComp('y', 0.0), promotes=['y'])\n model.add_subsystem('comp', Paraboloid(), promotes=['x', 'y', 'f_xy'])\n\n model.add_design_var('x', lower=0.0, upper=1.0)\n model.add_design_var('y', lower=0.0, upper=1.0)\n model.add_objective('f_xy')\n\n prob.setup()\n\n cases = []\n case_gen = om.FullFactorialGenerator(levels=2)\n for case in case_gen(model.get_design_vars(recurse=True)):\n cases.append([(var, val) for (var, val) in case])\n\n # test CSV file with an invalid design var\n header = [var for (var, val) in cases[0]]\n header[-1] = 'foobar'\n with open('cases.csv', 'w') as f:\n writer = csv.writer(f)\n writer.writerow(header)\n for case in cases:\n writer.writerow([val for (var, val) in case])\n\n prob.driver = om.DOEDriver(om.CSVGenerator('cases.csv'))\n with self.assertRaises(RuntimeError) as err:\n prob.run_driver()\n self.assertEqual(str(err.exception), \"Invalid DOE case file, \"\n \"'foobar' is not a valid design variable.\")\n\n # test CSV file with invalid design vars\n header = [var+'_bad' for (var, val) in cases[0]]\n with open('cases.csv', 'w') as f:\n writer = csv.writer(f)\n writer.writerow(header)\n for case in cases:\n writer.writerow([val for (var, val) in case])\n\n with self.assertRaises(RuntimeError) as err:\n prob.run_driver()\n self.assertEqual(str(err.exception), \"Invalid DOE case file, \"\n \"%s are not valid design variables.\" %\n str([var for var in header]))\n\n # test CSV file with invalid values\n header = [var for (var, val) in cases[0]]\n with open('cases.csv', 'w') as f:\n writer = csv.writer(f)\n writer.writerow(header)\n for case in cases:\n writer.writerow([np.ones((2,2))*val for (var, val) in case])\n\n from distutils.version import LooseVersion\n if LooseVersion(np.__version__) >= LooseVersion(\"1.14\"):\n opts = {'legacy': '1.13'}\n else:\n opts = {}\n\n with printoptions(**opts):\n with self.assertRaises(ValueError) as err:\n prob.run_driver()\n self.assertEqual(str(err.exception),\n \"Error assigning p1.x = [ 0. 0. 0. 0.]: \"\n \"could not broadcast input array from shape (4) into shape (1)\")\n\n def test_uniform(self):\n prob = om.Problem()\n model = prob.model\n\n model.add_subsystem('p1', om.IndepVarComp('x', 0.), promotes=['*'])\n model.add_subsystem('p2', om.IndepVarComp('y', 0.), promotes=['*'])\n model.add_subsystem('comp', Paraboloid(), promotes=['*'])\n\n model.add_design_var('x', lower=-10, upper=10)\n model.add_design_var('y', lower=-10, upper=10)\n model.add_objective('f_xy')\n\n prob.driver = om.DOEDriver(om.UniformGenerator(num_samples=5, seed=0))\n prob.driver.add_recorder(om.SqliteRecorder(\"cases.sql\"))\n\n prob.setup()\n prob.run_driver()\n prob.cleanup()\n\n # all values should be between -10 and 10, check expected values for seed = 0\n expected = {\n 0: {'x': np.array([ 0.97627008]), 'y': np.array([ 4.30378733])},\n 1: {'x': np.array([ 2.05526752]), 'y': np.array([ 0.89766366])},\n 2: {'x': np.array([-1.52690401]), 'y': np.array([ 2.91788226])},\n 3: {'x': np.array([-1.24825577]), 'y': np.array([ 7.83546002])},\n 4: {'x': np.array([ 9.27325521]), 'y': np.array([-2.33116962])},\n }\n\n cr = om.CaseReader(\"cases.sql\")\n cases = cr.list_cases('driver')\n\n self.assertEqual(len(cases), 5)\n\n for n in range(len(cases)):\n outputs = cr.get_case(cases[n]).outputs\n assert_rel_error(self, outputs['x'], expected[n]['x'], 1e-4)\n assert_rel_error(self, outputs['y'], expected[n]['y'], 1e-4)\n\n def test_full_factorial(self):\n prob = om.Problem()\n model = prob.model\n\n model.add_subsystem('p1', om.IndepVarComp('x', 0.0), promotes=['x'])\n model.add_subsystem('p2', om.IndepVarComp('y', 0.0), promotes=['y'])\n model.add_subsystem('comp', Paraboloid(), promotes=['x', 'y', 'f_xy'])\n\n model.add_design_var('x', lower=0.0, upper=1.0)\n model.add_design_var('y', lower=0.0, upper=1.0)\n model.add_objective('f_xy')\n\n prob.driver = om.DOEDriver(generator=om.FullFactorialGenerator(levels=3))\n prob.driver.add_recorder(om.SqliteRecorder(\"cases.sql\"))\n\n prob.setup()\n prob.run_driver()\n prob.cleanup()\n\n expected = {\n 0: {'x': np.array([0.]), 'y': np.array([0.]), 'f_xy': np.array([22.00])},\n 1: {'x': np.array([.5]), 'y': np.array([0.]), 'f_xy': np.array([19.25])},\n 2: {'x': np.array([1.]), 'y': np.array([0.]), 'f_xy': np.array([17.00])},\n\n 3: {'x': np.array([0.]), 'y': np.array([.5]), 'f_xy': np.array([26.25])},\n 4: {'x': np.array([.5]), 'y': np.array([.5]), 'f_xy': np.array([23.75])},\n 5: {'x': np.array([1.]), 'y': np.array([.5]), 'f_xy': np.array([21.75])},\n\n 6: {'x': np.array([0.]), 'y': np.array([1.]), 'f_xy': np.array([31.00])},\n 7: {'x': np.array([.5]), 'y': np.array([1.]), 'f_xy': np.array([28.75])},\n 8: {'x': np.array([1.]), 'y': np.array([1.]), 'f_xy': np.array([27.00])},\n }\n\n cr = om.CaseReader(\"cases.sql\")\n cases = cr.list_cases('driver')\n\n self.assertEqual(len(cases), 9)\n\n for n in range(len(cases)):\n outputs = cr.get_case(cases[n]).outputs\n self.assertEqual(outputs['x'], expected[n]['x'])\n self.assertEqual(outputs['y'], expected[n]['y'])\n self.assertEqual(outputs['f_xy'], expected[n]['f_xy'])\n\n def test_full_factorial_array(self):\n prob = om.Problem()\n model = prob.model\n\n model.add_subsystem('p1', om.IndepVarComp('xy', np.array([0., 0.])), promotes=['*'])\n model.add_subsystem('comp', ParaboloidArray(), promotes=['*'])\n\n model.add_design_var('xy', lower=np.array([-50., -50.]), upper=np.array([50., 50.]))\n model.add_objective('f_xy')\n\n prob.driver = om.DOEDriver(om.FullFactorialGenerator(levels=3))\n prob.driver.add_recorder(om.SqliteRecorder(\"cases.sql\"))\n\n prob.setup()\n prob.run_driver()\n prob.cleanup()\n\n expected = {\n 0: {'xy': np.array([-50., -50.])},\n 1: {'xy': np.array([ 0., -50.])},\n 2: {'xy': np.array([ 50., -50.])},\n 3: {'xy': np.array([-50., 0.])},\n 4: {'xy': np.array([ 0., 0.])},\n 5: {'xy': np.array([ 50., 0.])},\n 6: {'xy': np.array([-50., 50.])},\n 7: {'xy': np.array([ 0., 50.])},\n 8: {'xy': np.array([ 50., 50.])},\n }\n\n cr = om.CaseReader(\"cases.sql\")\n cases = cr.list_cases('driver')\n\n self.assertEqual(len(cases), 9)\n\n for n in range(len(cases)):\n outputs = cr.get_case(cases[n]).outputs\n self.assertEqual(outputs['xy'][0], expected[n]['xy'][0])\n self.assertEqual(outputs['xy'][1], expected[n]['xy'][1])\n\n def test_plackett_burman(self):\n prob = om.Problem()\n model = prob.model\n\n model.add_subsystem('p1', om.IndepVarComp('x', 0.0), promotes=['x'])\n model.add_subsystem('p2', om.IndepVarComp('y', 0.0), promotes=['y'])\n model.add_subsystem('comp', Paraboloid(), promotes=['x', 'y', 'f_xy'])\n\n model.add_design_var('x', lower=0.0, upper=1.0)\n model.add_design_var('y', lower=0.0, upper=1.0)\n model.add_objective('f_xy')\n\n prob.driver = om.DOEDriver(om.PlackettBurmanGenerator())\n prob.driver.add_recorder(om.SqliteRecorder(\"cases.sql\"))\n\n prob.setup()\n prob.run_driver()\n prob.cleanup()\n\n expected = {\n 0: {'x': np.array([0.]), 'y': np.array([0.]), 'f_xy': np.array([22.00])},\n 1: {'x': np.array([1.]), 'y': np.array([0.]), 'f_xy': np.array([17.00])},\n 2: {'x': np.array([0.]), 'y': np.array([1.]), 'f_xy': np.array([31.00])},\n 3: {'x': np.array([1.]), 'y': np.array([1.]), 'f_xy': np.array([27.00])},\n }\n\n cr = om.CaseReader(\"cases.sql\")\n cases = cr.list_cases('driver')\n\n self.assertEqual(len(cases), 4)\n\n for n in range(len(cases)):\n outputs = cr.get_case(cases[n]).outputs\n self.assertEqual(outputs['x'], expected[n]['x'])\n self.assertEqual(outputs['y'], expected[n]['y'])\n self.assertEqual(outputs['f_xy'], expected[n]['f_xy'])\n\n def test_box_behnken(self):\n upper = 10.\n center = 1\n\n prob = om.Problem()\n model = prob.model\n\n indep = model.add_subsystem('indep', om.IndepVarComp(), promotes=['*'])\n indep.add_output('x', 0.0)\n indep.add_output('y', 0.0)\n indep.add_output('z', 0.0)\n\n model.add_subsystem('comp', om.ExecComp('a = x**2 + y - z'), promotes=['*'])\n\n model.add_design_var('x', lower=0., upper=upper)\n model.add_design_var('y', lower=0., upper=upper)\n model.add_design_var('z', lower=0., upper=upper)\n\n model.add_objective('a')\n\n prob.driver = om.DOEDriver(om.BoxBehnkenGenerator(center=center))\n prob.driver.add_recorder(om.SqliteRecorder(\"cases.sql\"))\n\n prob.setup()\n prob.run_driver()\n prob.cleanup()\n\n cr = om.CaseReader(\"cases.sql\")\n cases = cr.list_cases('driver')\n\n # The Box-Behnken design for 3 factors involves three blocks, in each of\n # which 2 factors are varied thru the 4 possible combinations of high & low.\n # It also includes centre points (all factors at their central values).\n # ref: https://en.wikipedia.org/wiki/Box-Behnken_design\n self.assertEqual(len(cases), (3*4)+center)\n\n expected = {\n 0: {'x': np.array([ 0.]), 'y': np.array([ 0.]), 'z': np.array([ 5.])},\n 1: {'x': np.array([10.]), 'y': np.array([ 0.]), 'z': np.array([ 5.])},\n 2: {'x': np.array([ 0.]), 'y': np.array([10.]), 'z': np.array([ 5.])},\n 3: {'x': np.array([10.]), 'y': np.array([10.]), 'z': np.array([ 5.])},\n\n 4: {'x': np.array([ 0.]), 'y': np.array([ 5.]), 'z': np.array([ 0.])},\n 5: {'x': np.array([10.]), 'y': np.array([ 5.]), 'z': np.array([ 0.])},\n 6: {'x': np.array([ 0.]), 'y': np.array([ 5.]), 'z': np.array([10.])},\n 7: {'x': np.array([10.]), 'y': np.array([ 5.]), 'z': np.array([10.])},\n\n 8: {'x': np.array([ 5.]), 'y': np.array([ 0.]), 'z': np.array([ 0.])},\n 9: {'x': np.array([ 5.]), 'y': np.array([10.]), 'z': np.array([ 0.])},\n 10: {'x': np.array([ 5.]), 'y': np.array([ 0.]), 'z': np.array([10.])},\n 11: {'x': np.array([ 5.]), 'y': np.array([10.]), 'z': np.array([10.])},\n\n 12: {'x': np.array([ 5.]), 'y': np.array([ 5.]), 'z': np.array([ 5.])},\n }\n\n for n in range(len(cases)):\n outputs = cr.get_case(cases[n]).outputs\n self.assertEqual(outputs['x'], expected[n]['x'])\n self.assertEqual(outputs['y'], expected[n]['y'])\n self.assertEqual(outputs['z'], expected[n]['z'])\n\n def test_latin_hypercube(self):\n samples = 4\n\n bounds = np.array([\n [-1, -10], # lower bounds for x and y\n [ 1, 10] # upper bounds for x and y\n ])\n xlb, xub = bounds[0][0], bounds[1][0]\n ylb, yub = bounds[0][1], bounds[1][1]\n\n prob = om.Problem()\n model = prob.model\n\n model.add_subsystem('p1', om.IndepVarComp('x', 0.0), promotes=['x'])\n model.add_subsystem('p2', om.IndepVarComp('y', 0.0), promotes=['y'])\n model.add_subsystem('comp', Paraboloid(), promotes=['x', 'y', 'f_xy'])\n\n model.add_design_var('x', lower=xlb, upper=xub)\n model.add_design_var('y', lower=ylb, upper=yub)\n model.add_objective('f_xy')\n\n prob.driver = om.DOEDriver()\n prob.driver.options['generator'] = om.LatinHypercubeGenerator(samples=4, seed=0)\n\n prob.driver.add_recorder(om.SqliteRecorder(\"cases.sql\"))\n\n prob.setup()\n prob.run_driver()\n prob.cleanup()\n\n # the sample space for each variable should be divided into equal\n # size buckets and each variable should have a value in each bucket\n all_buckets = set(range(samples))\n\n xlb, xub = bounds[0][0], bounds[1][0]\n x_offset = 0 - xlb\n x_bucket_size = xub - xlb\n x_buckets_filled = set()\n\n ylb, yub = bounds[0][1], bounds[1][1]\n y_offset = 0 - ylb\n y_bucket_size = yub - ylb\n y_buckets_filled = set()\n\n # expected values for seed = 0\n expected = {\n 0: {'x': np.array([-0.19861831]), 'y': np.array([-6.42405317])},\n 1: {'x': np.array([ 0.2118274]), 'y': np.array([ 9.458865])},\n 2: {'x': np.array([ 0.71879361]), 'y': np.array([ 3.22947057])},\n 3: {'x': np.array([-0.72559325]), 'y': np.array([-2.27558409])},\n }\n\n cr = om.CaseReader(\"cases.sql\")\n cases = cr.list_cases('driver')\n\n self.assertEqual(len(cases), 4)\n\n for n in range(len(cases)):\n outputs = cr.get_case(cases[n]).outputs\n x = outputs['x']\n y = outputs['y']\n\n bucket = int((x+x_offset)/(x_bucket_size/samples))\n x_buckets_filled.add(bucket)\n\n bucket = int((y+y_offset)/(y_bucket_size/samples))\n y_buckets_filled.add(bucket)\n\n assert_rel_error(self, x, expected[n]['x'], 1e-4)\n assert_rel_error(self, y, expected[n]['y'], 1e-4)\n\n self.assertEqual(x_buckets_filled, all_buckets)\n self.assertEqual(y_buckets_filled, all_buckets)\n\n def test_latin_hypercube_array(self):\n samples = 4\n\n bounds = np.array([\n [-10, -50], # lower bounds for x and y\n [ 10, 50] # upper bounds for x and y\n ])\n\n prob = om.Problem()\n model = prob.model\n\n model.add_subsystem('p1', om.IndepVarComp('xy', np.array([50., 50.])), promotes=['*'])\n model.add_subsystem('comp', ParaboloidArray(), promotes=['*'])\n\n model.add_design_var('xy', lower=bounds[0], upper=bounds[1])\n model.add_objective('f_xy')\n\n prob.driver = om.DOEDriver(om.LatinHypercubeGenerator(samples=4, seed=0))\n prob.driver.add_recorder(om.SqliteRecorder(\"cases.sql\"))\n\n prob.setup()\n prob.run_driver()\n prob.cleanup()\n\n # the sample space for each variable should be divided into equal\n # size buckets and each variable should have a value in each bucket\n all_buckets = set(range(samples))\n\n xlb, xub = bounds[0][0], bounds[1][0]\n x_offset = 0 - xlb\n x_bucket_size = xub - xlb\n x_buckets_filled = set()\n\n ylb, yub = bounds[0][1], bounds[1][1]\n y_offset = 0 - ylb\n y_bucket_size = yub - ylb\n y_buckets_filled = set()\n\n # expected values for seed = 0\n expected = {\n 0: {'xy': np.array([-1.98618312, -32.12026584])},\n 1: {'xy': np.array([ 2.118274, 47.29432502])},\n 2: {'xy': np.array([ 7.18793606, 16.14735283])},\n 3: {'xy': np.array([-7.25593248, -11.37792043])},\n }\n\n cr = om.CaseReader(\"cases.sql\")\n cases = cr.list_cases('driver')\n\n self.assertEqual(len(cases), 4)\n\n for n in range(len(cases)):\n outputs = cr.get_case(cases[n]).outputs\n x = outputs['xy'][0]\n y = outputs['xy'][1]\n\n bucket = int((x+x_offset)/(x_bucket_size/samples))\n x_buckets_filled.add(bucket)\n\n bucket = int((y+y_offset)/(y_bucket_size/samples))\n y_buckets_filled.add(bucket)\n\n assert_rel_error(self, x, expected[n]['xy'][0], 1e-4)\n assert_rel_error(self, y, expected[n]['xy'][1], 1e-4)\n\n self.assertEqual(x_buckets_filled, all_buckets)\n self.assertEqual(y_buckets_filled, all_buckets)\n\n def test_latin_hypercube_center(self):\n samples = 4\n upper = 10.\n\n prob = om.Problem()\n model = prob.model\n\n indep = model.add_subsystem('indep', om.IndepVarComp())\n indep.add_output('x', 0.0)\n indep.add_output('y', 0.0)\n\n model.add_subsystem('comp', Paraboloid())\n\n model.connect('indep.x', 'comp.x')\n model.connect('indep.y', 'comp.y')\n\n model.add_design_var('indep.x', lower=0., upper=upper)\n model.add_design_var('indep.y', lower=0., upper=upper)\n\n model.add_objective('comp.f_xy')\n\n prob.driver = om.DOEDriver(om.LatinHypercubeGenerator(samples=samples, criterion='c'))\n prob.driver.add_recorder(om.SqliteRecorder(\"cases.sql\"))\n\n prob.setup()\n prob.run_driver()\n prob.cleanup()\n\n cr = om.CaseReader(\"cases.sql\")\n cases = cr.list_cases('driver')\n\n self.assertEqual(len(cases), samples)\n\n # the sample space for each variable (0 to upper) should be divided into\n # equal size buckets and each variable should have a value in each bucket\n bucket_size = upper/samples\n all_buckets = set(range(samples))\n\n x_buckets_filled = set()\n y_buckets_filled = set()\n\n # with criterion of 'center', each value should be in the center of it's bucket\n valid_values = [round(bucket_size*(bucket + 1/2), 3) for bucket in all_buckets]\n\n for n in range(len(cases)):\n outputs = cr.get_case(cases[n]).outputs\n x = float(outputs['indep.x'])\n y = float(outputs['indep.y'])\n\n x_buckets_filled.add(int(x/bucket_size))\n y_buckets_filled.add(int(y/bucket_size))\n\n self.assertTrue(round(x, 3) in valid_values, '%f not in %s' % (x, valid_values))\n self.assertTrue(round(y, 3) in valid_values, '%f not in %s' % (y, valid_values))\n\n self.assertEqual(x_buckets_filled, all_buckets)\n self.assertEqual(y_buckets_filled, all_buckets)\n\n def test_record_bug(self):\n # There was a bug that caused values to be recorded in driver_scaled form.\n\n prob = om.Problem()\n model = prob.model\n\n ivc = model.add_subsystem('indeps', om.IndepVarComp(), promotes=['*'])\n ivc.add_output('x', val=1.)\n\n model.add_subsystem('obj_comp', om.ExecComp('y=2*x'), promotes=['*'])\n model.add_subsystem('con_comp', om.ExecComp('z=3*x'), promotes=['*'])\n\n prob.driver = om.DOEDriver(om.FullFactorialGenerator(levels=3))\n\n prob.driver.add_recorder(om.SqliteRecorder(\"cases.sql\"))\n prob.driver.recording_options['includes'] = ['*']\n\n model.add_design_var('x', lower=0., upper=10., ref=3.0)\n model.add_constraint('z', lower=2.0, scaler=13.0)\n model.add_objective('y', scaler=-1)\n\n prob.setup(check=True)\n\n prob.run_driver()\n\n cr = om.CaseReader(\"cases.sql\")\n final_case = cr.list_cases('driver')[-1]\n outputs = cr.get_case(final_case).outputs\n\n assert_rel_error(self, outputs['x'], 10.0, 1e-7)\n assert_rel_error(self, outputs['y'], 20.0, 1e-7)\n assert_rel_error(self, outputs['z'], 30.0, 1e-7)\n\n\[email protected](om.PETScVector, \"PETSc is required.\")\nclass TestParallelDOE(unittest.TestCase):\n\n N_PROCS = 4\n\n def setUp(self):\n self.startdir = os.getcwd()\n self.tempdir = tempfile.mkdtemp(prefix='TestDOEDriver-')\n os.chdir(self.tempdir)\n\n def tearDown(self):\n os.chdir(self.startdir)\n try:\n shutil.rmtree(self.tempdir)\n except OSError:\n pass\n\n def test_indivisible_error(self):\n prob = om.Problem()\n\n prob.driver = om.DOEDriver(om.FullFactorialGenerator(levels=3))\n prob.driver.options['run_parallel'] = True\n prob.driver.options['procs_per_model'] = 3\n\n with self.assertRaises(RuntimeError) as context:\n prob.setup()\n\n self.assertEqual(str(context.exception),\n \"The total number of processors is not evenly divisible by the \"\n \"specified number of processors per model.\\n Provide a number of \"\n \"processors that is a multiple of 3, or specify a number \"\n \"of processors per model that divides into 4.\")\n\n def test_minprocs_error(self):\n prob = om.Problem(FanInGrouped())\n\n # require 2 procs for the ParallelGroup\n prob.model._proc_info['sub'] = (2, None, 1.0)\n\n # run cases on all procs\n prob.driver = om.DOEDriver(om.FullFactorialGenerator(levels=3))\n prob.driver.options['run_parallel'] = True\n prob.driver.options['procs_per_model'] = 1\n\n with self.assertRaises(RuntimeError) as context:\n prob.setup()\n\n self.assertEqual(str(context.exception),\n \"FanInGrouped (<model>): MPI process allocation failed: can't meet min_procs \"\n \"required for the following subsystems: ['sub']\")\n\n def test_full_factorial(self):\n prob = om.Problem()\n model = prob.model\n\n model.add_subsystem('p1', om.IndepVarComp('x', 0.0), promotes=['x'])\n model.add_subsystem('p2', om.IndepVarComp('y', 0.0), promotes=['y'])\n model.add_subsystem('comp', Paraboloid(), promotes=['x', 'y', 'f_xy'])\n\n model.add_design_var('x', lower=0.0, upper=1.0)\n model.add_design_var('y', lower=0.0, upper=1.0)\n model.add_objective('f_xy')\n\n prob.driver = om.DOEDriver(om.FullFactorialGenerator(levels=3), procs_per_model=1,\n run_parallel=True)\n prob.driver.add_recorder(om.SqliteRecorder(\"cases.sql\"))\n\n prob.setup()\n\n failed, output = run_driver(prob)\n self.assertFalse(failed)\n\n prob.cleanup()\n\n expected = {\n 0: {'x': np.array([0.]), 'y': np.array([0.]), 'f_xy': np.array([22.00])},\n 1: {'x': np.array([.5]), 'y': np.array([0.]), 'f_xy': np.array([19.25])},\n 2: {'x': np.array([1.]), 'y': np.array([0.]), 'f_xy': np.array([17.00])},\n\n 3: {'x': np.array([0.]), 'y': np.array([.5]), 'f_xy': np.array([26.25])},\n 4: {'x': np.array([.5]), 'y': np.array([.5]), 'f_xy': np.array([23.75])},\n 5: {'x': np.array([1.]), 'y': np.array([.5]), 'f_xy': np.array([21.75])},\n\n 6: {'x': np.array([0.]), 'y': np.array([1.]), 'f_xy': np.array([31.00])},\n 7: {'x': np.array([.5]), 'y': np.array([1.]), 'f_xy': np.array([28.75])},\n 8: {'x': np.array([1.]), 'y': np.array([1.]), 'f_xy': np.array([27.00])},\n }\n\n size = prob.comm.size\n rank = prob.comm.rank\n\n # cases will be split across files for each proc\n filename = \"cases.sql_%d\" % rank\n\n expect_msg = \"Cases from rank %d are being written to %s.\" % (rank, filename)\n self.assertTrue(expect_msg in output)\n\n cr = om.CaseReader(filename)\n cases = cr.list_cases('driver')\n\n # cases recorded on this proc\n num_cases = len(cases)\n self.assertEqual(num_cases, len(expected)//size+(rank<len(expected)%size))\n\n for n in range(num_cases):\n outputs = cr.get_case(cases[n]).outputs\n idx = n * size + rank # index of expected case\n\n self.assertEqual(outputs['x'], expected[idx]['x'])\n self.assertEqual(outputs['y'], expected[idx]['y'])\n self.assertEqual(outputs['f_xy'], expected[idx]['f_xy'])\n\n # total number of cases recorded across all procs\n num_cases = prob.comm.allgather(num_cases)\n self.assertEqual(sum(num_cases), len(expected))\n\n def test_fan_in_grouped(self):\n # run 2 cases at a time, each using 2 of our 4 procs\n doe_parallel = 2\n\n prob = om.Problem(FanInGrouped())\n model = prob.model\n\n model.add_design_var('iv.x1', lower=0.0, upper=1.0)\n model.add_design_var('iv.x2', lower=0.0, upper=1.0)\n\n model.add_objective('c3.y')\n\n prob.driver = om.DOEDriver(om.FullFactorialGenerator(levels=3))\n prob.driver.add_recorder(om.SqliteRecorder(\"cases.sql\"))\n prob.driver.options['run_parallel'] = True\n prob.driver.options['procs_per_model'] = doe_parallel\n\n prob.setup()\n\n failed, output = run_driver(prob)\n self.assertFalse(failed)\n\n prob.cleanup()\n\n expected = {\n 0: {'iv.x1': np.array([0.]), 'iv.x2': np.array([0.]), 'c3.y': np.array([ 0.0])},\n 1: {'iv.x1': np.array([.5]), 'iv.x2': np.array([0.]), 'c3.y': np.array([-3.0])},\n 2: {'iv.x1': np.array([1.]), 'iv.x2': np.array([0.]), 'c3.y': np.array([-6.0])},\n\n 3: {'iv.x1': np.array([0.]), 'iv.x2': np.array([.5]), 'c3.y': np.array([17.5])},\n 4: {'iv.x1': np.array([.5]), 'iv.x2': np.array([.5]), 'c3.y': np.array([14.5])},\n 5: {'iv.x1': np.array([1.]), 'iv.x2': np.array([.5]), 'c3.y': np.array([11.5])},\n\n 6: {'iv.x1': np.array([0.]), 'iv.x2': np.array([1.]), 'c3.y': np.array([35.0])},\n 7: {'iv.x1': np.array([.5]), 'iv.x2': np.array([1.]), 'c3.y': np.array([32.0])},\n 8: {'iv.x1': np.array([1.]), 'iv.x2': np.array([1.]), 'c3.y': np.array([29.0])},\n }\n\n rank = prob.comm.rank\n size = prob.comm.size // doe_parallel\n\n num_cases = 0\n\n # cases will be split across files for each proc up to the number requested\n if rank < doe_parallel:\n filename = \"cases.sql_%d\" % rank\n\n expect_msg = \"Cases from rank %d are being written to %s.\" % (rank, filename)\n self.assertTrue(expect_msg in output)\n\n cr = om.CaseReader(filename)\n cases = cr.list_cases('driver')\n\n # cases recorded on this proc\n num_cases = len(cases)\n self.assertEqual(num_cases, len(expected)//size+(rank<len(expected)%size))\n\n for n in range(num_cases):\n idx = n * size + rank # index of expected case\n\n outputs = cr.get_case(cases[n]).outputs\n\n self.assertEqual(outputs['iv.x1'], expected[idx]['iv.x1'])\n self.assertEqual(outputs['iv.x2'], expected[idx]['iv.x2'])\n self.assertEqual(outputs['c3.y'], expected[idx]['c3.y'])\n else:\n self.assertFalse(\"Cases from rank %d are being written\" % rank in output)\n\n # total number of cases recorded across all requested procs\n num_cases = prob.comm.allgather(num_cases)\n self.assertEqual(sum(num_cases), len(expected))\n\n def test_fan_in_grouped_serial(self):\n # run cases on all procs (parallel model will run on single proc)\n doe_parallel = 1\n\n prob = om.Problem(FanInGrouped())\n model = prob.model\n\n model.add_design_var('iv.x1', lower=0.0, upper=1.0)\n model.add_design_var('iv.x2', lower=0.0, upper=1.0)\n\n model.add_objective('c3.y')\n\n prob.driver = om.DOEDriver(om.FullFactorialGenerator(levels=3))\n prob.driver.add_recorder(om.SqliteRecorder(\"cases.sql\"))\n prob.driver.options['run_parallel'] = True\n prob.driver.options['procs_per_model'] = doe_parallel\n\n prob.setup()\n\n failed, output = run_driver(prob)\n self.assertFalse(failed)\n\n prob.cleanup()\n\n expected = {\n 0: {'iv.x1': np.array([0.]), 'iv.x2': np.array([0.]), 'c3.y': np.array([ 0.0])},\n 1: {'iv.x1': np.array([.5]), 'iv.x2': np.array([0.]), 'c3.y': np.array([-3.0])},\n 2: {'iv.x1': np.array([1.]), 'iv.x2': np.array([0.]), 'c3.y': np.array([-6.0])},\n\n 3: {'iv.x1': np.array([0.]), 'iv.x2': np.array([.5]), 'c3.y': np.array([17.5])},\n 4: {'iv.x1': np.array([.5]), 'iv.x2': np.array([.5]), 'c3.y': np.array([14.5])},\n 5: {'iv.x1': np.array([1.]), 'iv.x2': np.array([.5]), 'c3.y': np.array([11.5])},\n\n 6: {'iv.x1': np.array([0.]), 'iv.x2': np.array([1.]), 'c3.y': np.array([35.0])},\n 7: {'iv.x1': np.array([.5]), 'iv.x2': np.array([1.]), 'c3.y': np.array([32.0])},\n 8: {'iv.x1': np.array([1.]), 'iv.x2': np.array([1.]), 'c3.y': np.array([29.0])},\n }\n\n rank = prob.comm.rank\n size = prob.comm.size // doe_parallel\n\n num_cases = 0\n\n # cases will be split across files for each proc up to the number requested\n filename = \"cases.sql_%d\" % rank\n\n expect_msg = \"Cases from rank %d are being written to %s.\" % (rank, filename)\n self.assertTrue(expect_msg in output)\n\n cr = om.CaseReader(filename)\n cases = cr.list_cases('driver')\n\n # cases recorded on this proc\n num_cases = len(cases)\n self.assertEqual(num_cases, len(expected)//size+(rank<len(expected)%size))\n\n for n in range(num_cases):\n idx = n * size + rank # index of expected case\n\n outputs = cr.get_case(cases[n]).outputs\n\n self.assertEqual(outputs['iv.x1'], expected[idx]['iv.x1'])\n self.assertEqual(outputs['iv.x2'], expected[idx]['iv.x2'])\n self.assertEqual(outputs['c3.y'], expected[idx]['c3.y'])\n\n # total number of cases recorded across all requested procs\n num_cases = prob.comm.allgather(num_cases)\n self.assertEqual(sum(num_cases), len(expected))\n\n\nclass TestDOEDriverFeature(unittest.TestCase):\n\n def setUp(self):\n import json\n import os\n import tempfile\n import numpy as np\n\n self.startdir = os.getcwd()\n self.tempdir = tempfile.mkdtemp(prefix='TestDOEDriverFeature-')\n os.chdir(self.tempdir)\n\n self.expected_csv = '\\n'.join([\n \" x , y\",\n \"0.0, 0.0\",\n \"0.5, 0.0\",\n \"1.0, 0.0\",\n \"0.0, 0.5\",\n \"0.5, 0.5\",\n \"1.0, 0.5\",\n \"0.0, 1.0\",\n \"0.5, 1.0\",\n \"1.0, 1.0\",\n ])\n\n with open('cases.csv', 'w') as f:\n f.write(self.expected_csv)\n\n expected = {\n 0: {'x': np.array([0.]), 'y': np.array([0.]), 'f_xy': np.array([22.00])},\n 1: {'x': np.array([.5]), 'y': np.array([0.]), 'f_xy': np.array([19.25])},\n 2: {'x': np.array([1.]), 'y': np.array([0.]), 'f_xy': np.array([17.00])},\n\n 3: {'x': np.array([0.]), 'y': np.array([.5]), 'f_xy': np.array([26.25])},\n 4: {'x': np.array([.5]), 'y': np.array([.5]), 'f_xy': np.array([23.75])},\n 5: {'x': np.array([1.]), 'y': np.array([.5]), 'f_xy': np.array([21.75])},\n\n 6: {'x': np.array([0.]), 'y': np.array([1.]), 'f_xy': np.array([31.00])},\n 7: {'x': np.array([.5]), 'y': np.array([1.]), 'f_xy': np.array([28.75])},\n 8: {'x': np.array([1.]), 'y': np.array([1.]), 'f_xy': np.array([27.00])},\n }\n\n values = []\n cases = []\n\n for idx in range(len(expected)):\n case = expected[idx]\n values.append((case['x'], case['y'], case['f_xy']))\n # converting ndarray to list enables JSON serialization\n cases.append((('x', list(case['x'])), ('y', list(case['y']))))\n\n self.expected_text = \"\\n\".join([\n \"x: %5.2f, y: %5.2f, f_xy: %6.2f\" % (x, y, f_xy) for x, y, f_xy in values\n ])\n\n self.expected_json = json.dumps(cases).replace(']]],', ']]],\\n')\n with open('cases.json', 'w') as f:\n f.write(self.expected_json)\n\n def tearDown(self):\n import os\n import shutil\n\n os.chdir(self.startdir)\n try:\n shutil.rmtree(self.tempdir)\n except OSError:\n pass\n\n def test_uniform(self):\n import openmdao.api as om\n from openmdao.test_suite.components.paraboloid import Paraboloid\n\n prob = om.Problem()\n model = prob.model\n\n model.add_subsystem('p1', om.IndepVarComp('x', 0.), promotes=['*'])\n model.add_subsystem('p2', om.IndepVarComp('y', 0.), promotes=['*'])\n model.add_subsystem('comp', Paraboloid(), promotes=['*'])\n\n model.add_design_var('x', lower=-10, upper=10)\n model.add_design_var('y', lower=-10, upper=10)\n model.add_objective('f_xy')\n\n prob.driver = om.DOEDriver(om.UniformGenerator(num_samples=5))\n prob.driver.add_recorder(om.SqliteRecorder(\"cases.sql\"))\n\n prob.setup()\n prob.run_driver()\n prob.cleanup()\n\n cr = om.CaseReader(\"cases.sql\")\n cases = cr.list_cases('driver')\n\n self.assertEqual(len(cases), 5)\n\n values = []\n for n in range(len(cases)):\n outputs = cr.get_case(cases[n]).outputs\n values.append((outputs['x'], outputs['y'], outputs['f_xy']))\n\n print(\"\\n\".join([\"x: %5.2f, y: %5.2f, f_xy: %6.2f\" % (x, y, f_xy) for x, y, f_xy in values]))\n\n def test_csv(self):\n import openmdao.api as om\n from openmdao.test_suite.components.paraboloid import Paraboloid\n\n prob = om.Problem()\n model = prob.model\n\n model.add_subsystem('p1', om.IndepVarComp('x', 0.0), promotes=['x'])\n model.add_subsystem('p2', om.IndepVarComp('y', 0.0), promotes=['y'])\n model.add_subsystem('comp', Paraboloid(), promotes=['x', 'y', 'f_xy'])\n\n model.add_design_var('x', lower=0.0, upper=1.0)\n model.add_design_var('y', lower=0.0, upper=1.0)\n model.add_objective('f_xy')\n\n prob.setup()\n\n # this file contains design variable inputs in CSV format\n with open('cases.csv', 'r') as f:\n self.assertEqual(f.read(), self.expected_csv)\n\n # run problem with DOEDriver using the CSV file\n prob.driver = om.DOEDriver(om.CSVGenerator('cases.csv'))\n prob.driver.add_recorder(om.SqliteRecorder(\"cases.sql\"))\n\n prob.run_driver()\n prob.cleanup()\n\n cr = om.CaseReader(\"cases.sql\")\n cases = cr.list_cases('driver')\n\n values = []\n for n in range(len(cases)):\n outputs = cr.get_case(cases[n]).outputs\n values.append((outputs['x'], outputs['y'], outputs['f_xy']))\n\n self.assertEqual(\"\\n\".join([\"x: %5.2f, y: %5.2f, f_xy: %6.2f\" % (x, y, f_xy) for x, y, f_xy in values]),\n self.expected_text)\n\n def test_list(self):\n import openmdao.api as om\n from openmdao.test_suite.components.paraboloid import Paraboloid\n\n import json\n\n prob = om.Problem()\n model = prob.model\n\n model.add_subsystem('p1', om.IndepVarComp('x', 0.0), promotes=['x'])\n model.add_subsystem('p2', om.IndepVarComp('y', 0.0), promotes=['y'])\n model.add_subsystem('comp', Paraboloid(), promotes=['x', 'y', 'f_xy'])\n\n model.add_design_var('x', lower=0.0, upper=1.0)\n model.add_design_var('y', lower=0.0, upper=1.0)\n model.add_objective('f_xy')\n\n prob.setup()\n\n # load design variable inputs from JSON file and decode into list\n with open('cases.json', 'r') as f:\n json_data = f.read()\n\n self.assertEqual(json_data, self.expected_json)\n\n case_list = json.loads(json_data)\n\n self.assertEqual(case_list, json.loads(json_data))\n\n # create DOEDriver using provided list of cases\n prob.driver = om.DOEDriver(case_list)\n\n # a ListGenerator was created\n self.assertEqual(type(prob.driver.options['generator']), om.ListGenerator)\n\n prob.driver.add_recorder(om.SqliteRecorder(\"cases.sql\"))\n\n prob.run_driver()\n prob.cleanup()\n\n cr = om.CaseReader(\"cases.sql\")\n cases = cr.list_cases('driver')\n\n values = []\n for n in range(len(cases)):\n outputs = cr.get_case(cases[n]).outputs\n values.append((outputs['x'], outputs['y'], outputs['f_xy']))\n\n self.assertEqual(\"\\n\".join([\"x: %5.2f, y: %5.2f, f_xy: %6.2f\" % (x, y, f_xy) for x, y, f_xy in values]),\n self.expected_text)\n\n\[email protected](om.PETScVector, \"PETSc is required.\")\nclass TestParallelDOEFeature(unittest.TestCase):\n\n N_PROCS = 2\n\n def setUp(self):\n import os\n import tempfile\n import numpy as np\n\n from mpi4py import MPI\n rank = MPI.COMM_WORLD.rank\n\n expected = {\n 0: {'x': np.array([0.]), 'y': np.array([0.]), 'f_xy': np.array([22.00])},\n 1: {'x': np.array([.5]), 'y': np.array([0.]), 'f_xy': np.array([19.25])},\n 2: {'x': np.array([1.]), 'y': np.array([0.]), 'f_xy': np.array([17.00])},\n\n 3: {'x': np.array([0.]), 'y': np.array([.5]), 'f_xy': np.array([26.25])},\n 4: {'x': np.array([.5]), 'y': np.array([.5]), 'f_xy': np.array([23.75])},\n 5: {'x': np.array([1.]), 'y': np.array([.5]), 'f_xy': np.array([21.75])},\n\n 6: {'x': np.array([0.]), 'y': np.array([1.]), 'f_xy': np.array([31.00])},\n 7: {'x': np.array([.5]), 'y': np.array([1.]), 'f_xy': np.array([28.75])},\n 8: {'x': np.array([1.]), 'y': np.array([1.]), 'f_xy': np.array([27.00])},\n }\n\n # expect odd cases on rank 0 and even cases on rank 1\n values = []\n for idx in range(len(expected)):\n if idx % 2 == rank:\n case = expected[idx]\n values.append((case['x'], case['y'], case['f_xy']))\n\n self.expect_text = \"\\n\"+\"\\n\".join([\n \"x: %5.2f, y: %5.2f, f_xy: %6.2f\" % (x, y, f_xy) for x, y, f_xy in values\n ])\n\n # run in temp dir\n self.startdir = os.getcwd()\n self.tempdir = tempfile.mkdtemp(prefix='TestParallelDOEFeature-')\n os.chdir(self.tempdir)\n\n def tearDown(self):\n os.chdir(self.startdir)\n try:\n shutil.rmtree(self.tempdir)\n except OSError:\n pass\n\n def test_full_factorial(self):\n import openmdao.api as om\n from openmdao.test_suite.components.paraboloid import Paraboloid\n\n from mpi4py import MPI\n\n prob = om.Problem()\n model = prob.model\n\n model.add_subsystem('p1', om.IndepVarComp('x', 0.0), promotes=['x'])\n model.add_subsystem('p2', om.IndepVarComp('y', 0.0), promotes=['y'])\n model.add_subsystem('comp', Paraboloid(), promotes=['x', 'y', 'f_xy'])\n\n model.add_design_var('x', lower=0.0, upper=1.0)\n model.add_design_var('y', lower=0.0, upper=1.0)\n model.add_objective('f_xy')\n\n prob.driver = om.DOEDriver(om.FullFactorialGenerator(levels=3))\n prob.driver.options['run_parallel'] = True\n prob.driver.options['procs_per_model'] = 1\n\n prob.driver.add_recorder(om.SqliteRecorder(\"cases.sql\"))\n\n prob.setup()\n prob.run_driver()\n prob.cleanup()\n\n self.assertEqual(MPI.COMM_WORLD.size, 2)\n\n # check recorded cases from each case file\n rank = MPI.COMM_WORLD.rank\n filename = \"cases.sql_%d\" % rank\n self.assertEqual(filename, \"cases.sql_%d\" % rank)\n\n cr = om.CaseReader(filename)\n cases = cr.list_cases('driver')\n self.assertEqual(len(cases), 5 if rank == 0 else 4)\n\n values = []\n for n in range(len(cases)):\n outputs = cr.get_case(cases[n]).outputs\n values.append((outputs['x'], outputs['y'], outputs['f_xy']))\n\n self.assertEqual(\"\\n\"+\"\\n\".join([\"x: %5.2f, y: %5.2f, f_xy: %6.2f\" % (x, y, f_xy) for x, y, f_xy in values]),\n self.expect_text)\n\n\[email protected](om.PETScVector, \"PETSc is required.\")\nclass TestParallelDOEFeature2(unittest.TestCase):\n\n N_PROCS = 4\n\n def setUp(self):\n import os\n import shutil\n import tempfile\n\n from mpi4py import MPI\n rank = MPI.COMM_WORLD.rank\n\n expected = {\n 0: {'iv.x1': np.array([0.]), 'iv.x2': np.array([0.]), 'c3.y': np.array([ 0.00])},\n 1: {'iv.x1': np.array([.5]), 'iv.x2': np.array([0.]), 'c3.y': np.array([-3.00])},\n 2: {'iv.x1': np.array([1.]), 'iv.x2': np.array([0.]), 'c3.y': np.array([-6.00])},\n\n 3: {'iv.x1': np.array([0.]), 'iv.x2': np.array([.5]), 'c3.y': np.array([17.50])},\n 4: {'iv.x1': np.array([.5]), 'iv.x2': np.array([.5]), 'c3.y': np.array([14.50])},\n 5: {'iv.x1': np.array([1.]), 'iv.x2': np.array([.5]), 'c3.y': np.array([11.50])},\n\n 6: {'iv.x1': np.array([0.]), 'iv.x2': np.array([1.]), 'c3.y': np.array([35.00])},\n 7: {'iv.x1': np.array([.5]), 'iv.x2': np.array([1.]), 'c3.y': np.array([32.00])},\n 8: {'iv.x1': np.array([1.]), 'iv.x2': np.array([1.]), 'c3.y': np.array([29.00])},\n }\n\n # expect odd cases on rank 0 and even cases on rank 1\n values = []\n for idx in range(len(expected)):\n if idx % 2 == rank:\n case = expected[idx]\n values.append((case['iv.x1'], case['iv.x2'], case['c3.y']))\n\n self.expect_text = \"\\n\"+\"\\n\".join([\n \"iv.x1: %5.2f, iv.x2: %5.2f, c3.y: %6.2f\" % (x1, x2, y) for x1, x2, y in values\n ])\n\n # run in temp dir\n self.startdir = os.getcwd()\n self.tempdir = tempfile.mkdtemp(prefix='TestParallelDOEFeature2-')\n os.chdir(self.tempdir)\n\n def tearDown(self):\n os.chdir(self.startdir)\n try:\n shutil.rmtree(self.tempdir)\n except OSError:\n pass\n\n def test_fan_in_grouped(self):\n import openmdao.api as om\n from openmdao.test_suite.groups.parallel_groups import FanInGrouped\n\n from mpi4py import MPI\n\n prob = om.Problem(FanInGrouped())\n model = prob.model\n\n model.add_design_var('iv.x1', lower=0.0, upper=1.0)\n model.add_design_var('iv.x2', lower=0.0, upper=1.0)\n\n model.add_objective('c3.y')\n\n prob.driver = om.DOEDriver(om.FullFactorialGenerator(levels=3))\n prob.driver.add_recorder(om.SqliteRecorder(\"cases.sql\"))\n prob.driver.options['run_parallel'] = True\n\n # run 2 cases at a time, each using 2 of our 4 procs\n doe_parallel = prob.driver.options['procs_per_model'] = 2\n\n prob.setup()\n prob.run_driver()\n prob.cleanup()\n\n rank = MPI.COMM_WORLD.rank\n\n # check recorded cases from each case file\n if rank < doe_parallel:\n filename = \"cases.sql_%d\" % rank\n\n cr = om.CaseReader(filename)\n cases = cr.list_cases('driver')\n\n values = []\n for n in range(len(cases)):\n outputs = cr.get_case(cases[n]).outputs\n values.append((outputs['iv.x1'], outputs['iv.x2'], outputs['c3.y']))\n\n self.assertEqual(\"\\n\"+\"\\n\".join([\"iv.x1: %5.2f, iv.x2: %5.2f, c3.y: %6.2f\" % (x1, x2, y) for x1, x2, y in values]),\n self.expect_text)\n\n\nif __name__ == \"__main__\":\n unittest.main()\n", "from __future__ import print_function, division, absolute_import\n\nimport unittest\n\nimport numpy as np\n\nimport openmdao.api as om\n\n\nclass TestVectorMagnitudeCompNx3(unittest.TestCase):\n\n def setUp(self):\n self.nn = 5\n\n self.p = om.Problem()\n\n ivc = om.IndepVarComp()\n ivc.add_output(name='a', shape=(self.nn, 3))\n\n self.p.model.add_subsystem(name='ivc',\n subsys=ivc,\n promotes_outputs=['a'])\n\n self.p.model.add_subsystem(name='vec_mag_comp',\n subsys=om.VectorMagnitudeComp(vec_size=self.nn))\n\n self.p.model.connect('a', 'vec_mag_comp.a')\n\n self.p.setup()\n\n self.p['a'] = 1.0 + np.random.rand(self.nn, 3)\n\n self.p.run_model()\n\n def test_results(self):\n\n for i in range(self.nn):\n a_i = self.p['a'][i, :]\n mag_i = self.p['vec_mag_comp.a_mag'][i]\n expected_i = np.sqrt(np.dot(a_i, a_i))\n\n np.testing.assert_almost_equal(mag_i, expected_i)\n\n def test_partials(self):\n np.set_printoptions(linewidth=1024)\n cpd = self.p.check_partials(compact_print=False, method='fd', step=1.0E-9)\n\n for comp in cpd:\n for (var, wrt) in cpd[comp]:\n np.testing.assert_almost_equal(actual=cpd[comp][var, wrt]['J_fwd'],\n desired=cpd[comp][var, wrt]['J_fd'],\n decimal=5)\n\n\nclass TestVectorMagnitudeCompNx4(unittest.TestCase):\n def setUp(self):\n self.nn = 100\n\n self.p = om.Problem()\n\n ivc = om.IndepVarComp()\n ivc.add_output(name='a', shape=(self.nn, 4))\n\n self.p.model.add_subsystem(name='ivc',\n subsys=ivc,\n promotes_outputs=['a'])\n\n self.p.model.add_subsystem(name='vec_mag_comp',\n subsys=om.VectorMagnitudeComp(vec_size=self.nn, length=4))\n\n self.p.model.connect('a', 'vec_mag_comp.a')\n\n self.p.setup()\n\n self.p['a'] = 1.0 + np.random.rand(self.nn, 4)\n\n self.p.run_model()\n\n def test_results(self):\n\n for i in range(self.nn):\n a_i = self.p['a'][i, :]\n mag_i = self.p['vec_mag_comp.a_mag'][i]\n expected_i = np.sqrt(np.dot(a_i, a_i))\n\n np.testing.assert_almost_equal(mag_i, expected_i)\n\n def test_partials(self):\n np.set_printoptions(linewidth=1024)\n cpd = self.p.check_partials(compact_print=False, method='fd', step=1.0E-9)\n\n for comp in cpd:\n for (var, wrt) in cpd[comp]:\n np.testing.assert_almost_equal(actual=cpd[comp][var, wrt]['J_fwd'],\n desired=cpd[comp][var, wrt]['J_fd'],\n decimal=6)\n\n\nclass TestUnits(unittest.TestCase):\n\n def setUp(self):\n self.nn = 5\n\n self.p = om.Problem()\n\n ivc = om.IndepVarComp()\n ivc.add_output(name='a', shape=(self.nn, 3), units='m')\n\n self.p.model.add_subsystem(name='ivc',\n subsys=ivc,\n promotes_outputs=['a'])\n\n self.p.model.add_subsystem(name='vec_mag_comp',\n subsys=om.VectorMagnitudeComp(vec_size=self.nn, units='m'))\n\n self.p.model.connect('a', 'vec_mag_comp.a')\n\n self.p.setup()\n\n self.p['a'] = 1.0 + np.random.rand(self.nn, 3)\n\n self.p.run_model()\n\n def test_results(self):\n\n for i in range(self.nn):\n a_i = self.p['a'][i, :]\n c_i = self.p.get_val('vec_mag_comp.a_mag', units='ft')[i]\n expected_i = np.sqrt(np.dot(a_i, a_i)) / 0.3048\n\n np.testing.assert_almost_equal(c_i, expected_i)\n\n def test_partials(self):\n np.set_printoptions(linewidth=1024)\n cpd = self.p.check_partials(compact_print=True)\n\n for comp in cpd:\n for (var, wrt) in cpd[comp]:\n np.testing.assert_almost_equal(actual=cpd[comp][var, wrt]['J_fwd'],\n desired=cpd[comp][var, wrt]['J_fd'],\n decimal=6)\n\n\nclass TestFeature(unittest.TestCase):\n\n def test(self):\n \"\"\"\n A simple example to compute the magnitude of 3-vectors at at 100 points simultaneously.\n \"\"\"\n import numpy as np\n import openmdao.api as om\n from openmdao.utils.assert_utils import assert_rel_error\n\n n = 100\n\n p = om.Problem()\n\n ivc = om.IndepVarComp()\n ivc.add_output(name='pos', shape=(n, 3), units='m')\n\n p.model.add_subsystem(name='ivc',\n subsys=ivc,\n promotes_outputs=['pos'])\n\n dp_comp = om.VectorMagnitudeComp(vec_size=n, length=3, in_name='r', mag_name='r_mag',\n units='km')\n\n p.model.add_subsystem(name='vec_mag_comp', subsys=dp_comp)\n\n p.model.connect('pos', 'vec_mag_comp.r')\n\n p.setup()\n\n p['pos'] = 1.0 + np.random.rand(n, 3)\n\n p.run_model()\n\n # Verify the results against numpy.dot in a for loop.\n for i in range(n):\n a_i = p['pos'][i, :]\n expected_i = np.sqrt(np.dot(a_i, a_i)) / 1000.0\n assert_rel_error(self, p.get_val('vec_mag_comp.r_mag')[i], expected_i)\n\n\nif __name__ == '__main__':\n unittest.main()\n", "\"\"\"\nOpenMDAO Wrapper for pyoptsparse.\n\npyoptsparse is based on pyOpt, which is an object-oriented framework for\nformulating and solving nonlinear constrained optimization problems, with\nadditional MPI capability.\n\"\"\"\nfrom __future__ import print_function\n\nfrom collections import OrderedDict\nimport json\nimport sys\nimport traceback\n\nfrom six import iteritems, itervalues, string_types, reraise\n\nimport numpy as np\nfrom scipy.sparse import coo_matrix\n\nfrom pyoptsparse import Optimization\n\nfrom openmdao.core.analysis_error import AnalysisError\nfrom openmdao.core.driver import Driver, RecordingDebugging\nimport openmdao.utils.coloring as coloring_mod\nfrom openmdao.utils.general_utils import warn_deprecation, simple_warning\nfrom openmdao.utils.class_util import weak_method_wrapper\nfrom openmdao.utils.mpi import FakeComm\n\n\n# names of optimizers that use gradients\ngrad_drivers = {'CONMIN', 'FSQP', 'IPOPT', 'NLPQLP',\n 'PSQP', 'SLSQP', 'SNOPT', 'NLPY_AUGLAG'}\n\n# names of optimizers that allow multiple objectives\nmulti_obj_drivers = {'NSGA2'}\n\n# All optimizers in pyoptsparse\noptlist = ['ALPSO', 'CONMIN', 'FSQP', 'IPOPT', 'NLPQLP',\n 'NSGA2', 'PSQP', 'SLSQP', 'SNOPT', 'NLPY_AUGLAG', 'NOMAD']\n\n# All optimizers that require an initial run\nrun_required = ['NSGA2', 'ALPSO']\n\nCITATIONS = \"\"\"@article{Hwang_maud_2018\n author = {Hwang, John T. and Martins, Joaquim R.R.A.},\n title = \"{A Computational Architecture for Coupling Heterogeneous\n Numerical Models and Computing Coupled Derivatives}\",\n journal = \"{ACM Trans. Math. Softw.}\",\n volume = {44},\n number = {4},\n month = jun,\n year = {2018},\n pages = {37:1--37:39},\n articleno = {37},\n numpages = {39},\n doi = {10.1145/3182393},\n publisher = {ACM},\n}\n\"\"\"\n\n\nclass pyOptSparseDriver(Driver):\n \"\"\"\n Driver wrapper for pyoptsparse.\n\n Pyoptsparse is based on pyOpt, which\n is an object-oriented framework for formulating and solving nonlinear\n constrained optimization problems, with additional MPI capability.\n pypptsparse has interfaces to the following optimizers:\n ALPSO, CONMIN, FSQP, IPOPT, NLPQLP, NSGA2, PSQP, SLSQP,\n SNOPT, NLPY_AUGLAG, NOMAD.\n Note that some of these are not open source and therefore not included\n in the pyoptsparse source code.\n\n pyOptSparseDriver supports the following:\n equality_constraints\n\n inequality_constraints\n\n two_sided_constraints\n\n Attributes\n ----------\n fail : bool\n Flag that indicates failure of most recent optimization.\n hist_file : str or None\n File location for saving pyopt_sparse optimization history.\n Default is None for no output.\n hotstart_file : str\n Optional file to hot start the optimization.\n opt_settings : dict\n Dictionary for setting optimizer-specific options.\n pyopt_solution : Solution\n Pyopt_sparse solution object.\n _indep_list : list\n List of design variables.\n _quantities : list\n Contains the objectives plus nonlinear constraints.\n \"\"\"\n\n def __init__(self, **kwargs):\n \"\"\"\n Initialize pyopt.\n\n Parameters\n ----------\n **kwargs : dict of keyword arguments\n Keyword arguments that will be mapped into the Driver options.\n \"\"\"\n super(pyOptSparseDriver, self).__init__(**kwargs)\n\n # What we support\n self.supports['inequality_constraints'] = True\n self.supports['equality_constraints'] = True\n self.supports['multiple_objectives'] = True\n self.supports['two_sided_constraints'] = True\n self.supports['linear_constraints'] = True\n self.supports['simultaneous_derivatives'] = True\n self.supports['total_jac_sparsity'] = True\n\n # What we don't support yet\n self.supports['active_set'] = False\n self.supports['integer_design_vars'] = False\n\n # The user places optimizer-specific settings in here.\n self.opt_settings = {}\n\n # The user can set a file name here to store history\n self.hist_file = None\n\n # The user can set a file here to hot start the optimization\n # with a history file\n self.hotstart_file = None\n\n # We save the pyopt_solution so that it can be queried later on.\n self.pyopt_solution = None\n\n self._indep_list = []\n self._quantities = []\n self.fail = False\n\n self.cite = CITATIONS\n\n def _declare_options(self):\n \"\"\"\n Declare options before kwargs are processed in the init method.\n \"\"\"\n self.options.declare('optimizer', default='SLSQP', values=optlist,\n desc='Name of optimizers to use')\n self.options.declare('title', default='Optimization using pyOpt_sparse',\n desc='Title of this optimization run')\n self.options.declare('print_results', types=bool, default=True,\n desc='Print pyOpt results if True')\n self.options.declare('gradient method', default='openmdao',\n values={'openmdao', 'pyopt_fd', 'snopt_fd'},\n desc='Finite difference implementation to use')\n self.options.declare('dynamic_simul_derivs', default=False, types=bool,\n desc='Compute simultaneous derivative coloring dynamically '\n 'if True (deprecated)')\n\n def _setup_driver(self, problem):\n \"\"\"\n Prepare the driver for execution.\n\n This is the final thing to run during setup.\n\n Parameters\n ----------\n problem : <Problem>\n Pointer to the containing problem.\n \"\"\"\n super(pyOptSparseDriver, self)._setup_driver(problem)\n\n self.supports['gradients'] = self.options['optimizer'] in grad_drivers\n\n if len(self._objs) > 1 and self.options['optimizer'] not in multi_obj_drivers:\n raise RuntimeError('Multiple objectives have been added to pyOptSparseDriver'\n ' but the selected optimizer ({0}) does not support'\n ' multiple objectives.'.format(self.options['optimizer']))\n\n self._setup_tot_jac_sparsity()\n\n def run(self):\n \"\"\"\n Excute pyOptsparse.\n\n Note that pyOpt controls the execution, and the individual optimizers\n (e.g., SNOPT) control the iteration.\n\n Returns\n -------\n boolean\n Failure flag; True if failed to converge, False is successful.\n \"\"\"\n problem = self._problem()\n model = problem.model\n relevant = model._relevant\n self.pyopt_solution = None\n self._total_jac = None\n self.iter_count = 0\n fwd = problem._mode == 'fwd'\n optimizer = self.options['optimizer']\n self._quantities = []\n\n self._check_for_missing_objective()\n\n # Only need initial run if we have linear constraints or if we are using an optimizer that\n # doesn't perform one initially.\n con_meta = self._cons\n model_ran = False\n if optimizer in run_required or np.any([con['linear'] for con in itervalues(self._cons)]):\n with RecordingDebugging(self._get_name(), self.iter_count, self) as rec:\n # Initial Run\n model.run_solve_nonlinear()\n rec.abs = 0.0\n rec.rel = 0.0\n model_ran = True\n self.iter_count += 1\n\n # compute dynamic simul deriv coloring or just sparsity if option is set\n if coloring_mod._use_total_sparsity:\n coloring = None\n if self._coloring_info['coloring'] is None and self._coloring_info['dynamic']:\n coloring_mod.dynamic_total_coloring(self, run_model=not model_ran,\n fname=self._get_total_coloring_fname())\n coloring = self._coloring_info['coloring']\n self._setup_tot_jac_sparsity()\n elif self.options['dynamic_simul_derivs']:\n warn_deprecation(\"The 'dynamic_simul_derivs' option has been deprecated. Call \"\n \"the 'declare_coloring' function instead.\")\n coloring_mod.dynamic_total_coloring(self, run_model=not model_ran,\n fname=self._get_total_coloring_fname())\n coloring = self._coloring_info['coloring']\n\n self._setup_tot_jac_sparsity()\n\n if coloring is not None:\n # if the improvement wasn't large enough, don't use coloring\n pct = coloring._solves_info()[-1]\n info = self._coloring_info\n if info['min_improve_pct'] > pct:\n info['coloring'] = info['static'] = info['dynamic'] = None\n simple_warning(\"%s: Coloring was deactivated. Improvement of %.1f%% was less \"\n \"than min allowed (%.1f%%).\" % (self.msginfo, pct,\n info['min_improve_pct']))\n\n comm = None if isinstance(problem.comm, FakeComm) else problem.comm\n opt_prob = Optimization(self.options['title'], weak_method_wrapper(self, '_objfunc'),\n comm=comm)\n\n # Add all design variables\n param_meta = self._designvars\n self._indep_list = indep_list = list(param_meta)\n param_vals = self.get_design_var_values()\n\n for name, meta in iteritems(param_meta):\n opt_prob.addVarGroup(name, meta['size'], type='c',\n value=param_vals[name],\n lower=meta['lower'], upper=meta['upper'])\n\n opt_prob.finalizeDesignVariables()\n\n # Add all objectives\n objs = self.get_objective_values()\n for name in objs:\n opt_prob.addObj(name)\n self._quantities.append(name)\n\n # Calculate and save derivatives for any linear constraints.\n lcons = [key for (key, con) in iteritems(con_meta) if con['linear']]\n if len(lcons) > 0:\n _lin_jacs = self._compute_totals(of=lcons, wrt=indep_list, return_format='dict')\n # convert all of our linear constraint jacs to COO format. Otherwise pyoptsparse will\n # do it for us and we'll end up with a fully dense COO matrix and very slow evaluation\n # of linear constraints!\n to_remove = []\n for jacdct in itervalues(_lin_jacs):\n for n, subjac in iteritems(jacdct):\n if isinstance(subjac, np.ndarray):\n # we can safely use coo_matrix to automatically convert the ndarray\n # since our linear constraint jacs are constant, so zeros won't become\n # nonzero during the optimization.\n mat = coo_matrix(subjac)\n if mat.row.size > 0:\n # convert to 'coo' format here to avoid an emphatic warning\n # by pyoptsparse.\n jacdct[n] = {'coo': [mat.row, mat.col, mat.data], 'shape': mat.shape}\n\n # Add all equality constraints\n for name, meta in iteritems(con_meta):\n if meta['equals'] is None:\n continue\n size = meta['size']\n lower = upper = meta['equals']\n if fwd:\n wrt = [v for v in indep_list if name in relevant[v]]\n else:\n rels = relevant[name]\n wrt = [v for v in indep_list if v in rels]\n\n if meta['linear']:\n jac = {w: _lin_jacs[name][w] for w in wrt}\n opt_prob.addConGroup(name, size, lower=lower, upper=upper,\n linear=True, wrt=wrt, jac=jac)\n else:\n if name in self._res_jacs:\n resjac = self._res_jacs[name]\n jac = {n: resjac[n] for n in wrt}\n else:\n jac = None\n opt_prob.addConGroup(name, size, lower=lower, upper=upper, wrt=wrt, jac=jac)\n self._quantities.append(name)\n\n # Add all inequality constraints\n for name, meta in iteritems(con_meta):\n if meta['equals'] is not None:\n continue\n size = meta['size']\n\n # Bounds - double sided is supported\n lower = meta['lower']\n upper = meta['upper']\n\n if fwd:\n wrt = [v for v in indep_list if name in relevant[v]]\n else:\n rels = relevant[name]\n wrt = [v for v in indep_list if v in rels]\n\n if meta['linear']:\n jac = {w: _lin_jacs[name][w] for w in wrt}\n opt_prob.addConGroup(name, size, upper=upper, lower=lower,\n linear=True, wrt=wrt, jac=jac)\n else:\n if name in self._res_jacs:\n resjac = self._res_jacs[name]\n jac = {n: resjac[n] for n in wrt}\n else:\n jac = None\n opt_prob.addConGroup(name, size, upper=upper, lower=lower, wrt=wrt, jac=jac)\n self._quantities.append(name)\n\n # Instantiate the requested optimizer\n try:\n _tmp = __import__('pyoptsparse', globals(), locals(), [optimizer], 0)\n opt = getattr(_tmp, optimizer)()\n\n except Exception as err:\n # Change whatever pyopt gives us to an ImportError, give it a readable message,\n # but raise with the original traceback.\n msg = \"Optimizer %s is not available in this installation.\" % optimizer\n reraise(ImportError, ImportError(msg), sys.exc_info()[2])\n\n # Set optimization options\n for option, value in self.opt_settings.items():\n opt.setOption(option, value)\n\n # Execute the optimization problem\n if self.options['gradient method'] == 'pyopt_fd':\n\n # Use pyOpt's internal finite difference\n # TODO: Need to get this from OpenMDAO\n # fd_step = problem.root.deriv_options['step_size']\n fd_step = 1e-6\n sol = opt(opt_prob, sens='FD', sensStep=fd_step, storeHistory=self.hist_file,\n hotStart=self.hotstart_file)\n\n elif self.options['gradient method'] == 'snopt_fd':\n if self.options['optimizer'] == 'SNOPT':\n\n # Use SNOPT's internal finite difference\n # TODO: Need to get this from OpenMDAO\n # fd_step = problem.root.deriv_options['step_size']\n fd_step = 1e-6\n sol = opt(opt_prob, sens=None, sensStep=fd_step, storeHistory=self.hist_file,\n hotStart=self.hotstart_file)\n\n else:\n raise Exception(\"SNOPT's internal finite difference can only be used with SNOPT\")\n else:\n\n # Use OpenMDAO's differentiator for the gradient\n sol = opt(opt_prob, sens=weak_method_wrapper(self, '_gradfunc'),\n storeHistory=self.hist_file, hotStart=self.hotstart_file)\n\n # Print results\n if self.options['print_results']:\n print(sol)\n\n # Pull optimal parameters back into framework and re-run, so that\n # framework is left in the right final state\n dv_dict = sol.getDVs()\n for name in indep_list:\n self.set_design_var(name, dv_dict[name])\n\n with RecordingDebugging(self._get_name(), self.iter_count, self) as rec:\n model.run_solve_nonlinear()\n rec.abs = 0.0\n rec.rel = 0.0\n self.iter_count += 1\n\n # Save the most recent solution.\n self.pyopt_solution = sol\n\n try:\n exit_status = sol.optInform['value']\n self.fail = False\n\n # These are various failed statuses.\n if exit_status > 2:\n self.fail = True\n\n except KeyError:\n # optimizers other than pySNOPT may not populate this dict\n pass\n\n return self.fail\n\n def _objfunc(self, dv_dict):\n \"\"\"\n Compute the objective function and constraints.\n\n This function is passed to pyOpt's Optimization object and is called\n from its optimizers.\n\n Parameters\n ----------\n dv_dict : dict\n Dictionary of design variable values.\n\n Returns\n -------\n func_dict : dict\n Dictionary of all functional variables evaluated at design point.\n\n fail : int\n 0 for successful function evaluation\n 1 for unsuccessful function evaluation\n \"\"\"\n model = self._problem().model\n fail = 0\n\n try:\n for name in self._indep_list:\n self.set_design_var(name, dv_dict[name])\n\n # print(\"Setting DV\")\n # print(dv_dict)\n\n # Execute the model\n with RecordingDebugging(self._get_name(), self.iter_count, self) as rec:\n self.iter_count += 1\n try:\n model.run_solve_nonlinear()\n\n # Let the optimizer try to handle the error\n except AnalysisError:\n model._clear_iprint()\n fail = 1\n\n func_dict = self.get_objective_values()\n func_dict.update(self.get_constraint_values(lintype='nonlinear'))\n\n # Record after getting obj and constraint to assure they have\n # been gathered in MPI.\n rec.abs = 0.0\n rec.rel = 0.0\n\n except Exception as msg:\n tb = traceback.format_exc()\n\n # Exceptions seem to be swallowed by the C code, so this\n # should give the user more info than the dreaded \"segfault\"\n print(\"Exception: %s\" % str(msg))\n print(70 * \"=\", tb, 70 * \"=\")\n fail = 1\n func_dict = {}\n\n # print(\"Functions calculated\")\n # print(dv_dict)\n\n return func_dict, fail\n\n def _gradfunc(self, dv_dict, func_dict):\n \"\"\"\n Compute the gradient of the objective function and constraints.\n\n This function is passed to pyOpt's Optimization object and is called\n from its optimizers.\n\n Parameters\n ----------\n dv_dict : dict\n Dictionary of design variable values.\n func_dict : dict\n Dictionary of all functional variables evaluated at design point.\n\n Returns\n -------\n sens_dict : dict\n Dictionary of dictionaries for gradient of each dv/func pair\n\n fail : int\n 0 for successful function evaluation\n 1 for unsuccessful function evaluation\n \"\"\"\n prob = self._problem()\n fail = 0\n\n try:\n\n try:\n sens_dict = self._compute_totals(of=self._quantities,\n wrt=self._indep_list,\n return_format='dict')\n # Let the optimizer try to handle the error\n except AnalysisError:\n prob.model._clear_iprint()\n fail = 1\n\n # We need to cobble together a sens_dict of the correct size.\n # Best we can do is return zeros.\n\n sens_dict = OrderedDict()\n for okey, oval in iteritems(func_dict):\n sens_dict[okey] = OrderedDict()\n osize = len(oval)\n for ikey, ival in iteritems(dv_dict):\n isize = len(ival)\n sens_dict[okey][ikey] = np.zeros((osize, isize))\n else:\n # if we don't convert to 'coo' here, pyoptsparse will do a\n # conversion of our dense array into a fully dense 'coo', which is bad.\n # TODO: look into getting rid of all of these conversions!\n new_sens = OrderedDict()\n res_jacs = self._res_jacs\n for okey in func_dict:\n new_sens[okey] = newdv = OrderedDict()\n for ikey in dv_dict:\n if okey in res_jacs and ikey in res_jacs[okey]:\n arr = sens_dict[okey][ikey]\n coo = res_jacs[okey][ikey]\n row, col, data = coo['coo']\n coo['coo'][2] = arr[row, col].flatten()\n newdv[ikey] = coo\n elif okey in sens_dict:\n newdv[ikey] = sens_dict[okey][ikey]\n sens_dict = new_sens\n\n except Exception as msg:\n tb = traceback.format_exc()\n\n # Exceptions seem to be swallowed by the C code, so this\n # should give the user more info than the dreaded \"segfault\"\n print(\"Exception: %s\" % str(msg))\n print(70 * \"=\", tb, 70 * \"=\")\n sens_dict = {}\n\n # print(\"Derivatives calculated\")\n # print(dv_dict)\n # print(sens_dict)\n return sens_dict, fail\n\n def _get_name(self):\n \"\"\"\n Get name of current optimizer.\n\n Returns\n -------\n str\n The name of the current optimizer.\n \"\"\"\n return \"pyOptSparse_\" + self.options['optimizer']\n\n def _get_ordered_nl_responses(self):\n \"\"\"\n Return the names of nonlinear responses in the order used by the driver.\n\n Default order is objectives followed by nonlinear constraints. This is used for\n simultaneous derivative coloring and sparsity determination.\n\n Returns\n -------\n list of str\n The nonlinear response names in order.\n \"\"\"\n nl_order = list(self._objs)\n neq_order = []\n for n, meta in iteritems(self._cons):\n if 'linear' not in meta or not meta['linear']:\n if meta['equals'] is not None:\n nl_order.append(n)\n else:\n neq_order.append(n)\n\n nl_order.extend(neq_order)\n\n return nl_order\n\n def _setup_tot_jac_sparsity(self):\n \"\"\"\n Set up total jacobian subjac sparsity.\n \"\"\"\n total_sparsity = None\n coloring = self._get_static_coloring()\n if coloring is not None:\n total_sparsity = coloring.get_subjac_sparsity()\n if self._total_jac_sparsity is not None:\n raise RuntimeError(\"Total jac sparsity was set in both _total_coloring\"\n \" and _total_jac_sparsity.\")\n elif self._total_jac_sparsity is not None:\n if isinstance(self._total_jac_sparsity, string_types):\n with open(self._total_jac_sparsity, 'r') as f:\n self._total_jac_sparsity = json.load(f)\n total_sparsity = self._total_jac_sparsity\n\n if total_sparsity is None:\n return\n\n self._res_jacs = {}\n for res, resdict in iteritems(total_sparsity):\n if res in self._objs: # skip objectives\n continue\n self._res_jacs[res] = {}\n for dv, (rows, cols, shape) in iteritems(resdict):\n rows = np.array(rows, dtype=int)\n cols = np.array(cols, dtype=int)\n\n self._res_jacs[res][dv] = {\n 'coo': [rows, cols, np.zeros(rows.size)],\n 'shape': shape,\n }\n" ]
[ [ "scipy.interpolate._bsplines.make_interp_spline", "numpy.eye", "numpy.empty_like", "numpy.empty" ], [ "numpy.array" ], [ "numpy.array", "numpy.ones" ], [ "numpy.dot", "numpy.testing.assert_almost_equal", "numpy.set_printoptions", "numpy.random.rand" ], [ "scipy.sparse.coo_matrix", "numpy.array", "numpy.zeros" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [ "1.7", "1.0", "0.10", "1.2", "0.14", "0.19", "1.5", "0.12", "0.17", "0.13", "1.6", "1.4", "1.9", "1.3", "1.10", "0.15", "0.18", "0.16", "1.8" ], "tensorflow": [] } ]
LauJames/QuestionMatching
[ "380e32ad4d884ecbf314fbb69b009f34a1f8f6a9" ]
[ "run_esim.py" ]
[ "#! /user/bin/evn python\n# -*- coding:utf8 -*-\n\n\"\"\"\n\n@Author : Lau James\n@Contact : [email protected]\n@Project : MVLSTM \n@File : run_esim.py\n@Time : 18-12-18 下午1:04\n@Software : PyCharm\n@Copyright: \"Copyright (c) 2018 Lau James. All Rights Reserved\"\n\"\"\"\n\nimport os\nimport sys\nimport time\nimport datetime\nimport argparse\nimport numpy as np\nimport tensorflow as tf\nimport tensorflow.contrib as tc\nimport csv\nimport logging\nimport jieba\nfrom sklearn import metrics\nfrom models.esim import ESIM\nfrom data.dataloader import split_data, batch_iter_per_epoch, batch_iter_per_epoch_mask, get_q2q_label, load_pkl_set\n\n\ndef parse_args():\n parser = argparse.ArgumentParser('Question to Question matching for QA task using ESIM model')\n parser.add_argument('--prepare', action='store_true',\n help='create the directories, prepare the vocab and embeddings')\n parser.add_argument('--train', action='store_true',\n help='train the model')\n parser.add_argument('--evaluate', action='store_true',\n help='evaluate the model on dev set')\n parser.add_argument('--predict', action='store_true',\n help='predict the match result fot test set on trained model')\n parser.add_argument('--gpu', type=str, default='0', help='specify gpu device')\n\n train_settings = parser.add_argument_group('train settings')\n train_settings.add_argument('--dev_sample_percentage', type=float, default=0.1,\n help='percentage of the training data to use for validation')\n train_settings.add_argument('--optim', default='adam', help='optimizer type')\n train_settings.add_argument('--learning_rate', type=float, default=0.1, help='optimizer type')\n train_settings.add_argument('--weight_dacay', type=float, default=0, help='weight decay')\n train_settings.add_argument('--dropout_keep_prob', type=float, default=0.5, help='dropout keep prob')\n train_settings.add_argument('--batch_size', type=int, default=64, help='train batch size')\n train_settings.add_argument('--epochs', type=int, default=20, help='train epochs')\n train_settings.add_argument('--evaluate_every', type=int, default=100,\n help='evaluate model on dev set after this many training steps')\n train_settings.add_argument('--checkpoint_every', type=int, default=500,\n help='save model after this many training steps')\n train_settings.add_argument('--num_checkpoints', type=int, default=5,\n help='number of checkpoints to store')\n\n model_settings = parser.add_argument_group('model settings')\n model_settings.add_argument('--algo', choices=['ESIM'], default='ESIM',\n help='choose the algorithm to use')\n model_settings.add_argument('--embedding_dim', type=int, default=300,\n help='size of the embeddings')\n model_settings.add_argument('--hidden_size', type=int, default=128,\n help='size of LSTM hidden units')\n model_settings.add_argument('--max_q_len', type=int, default=30,\n help='max length of question')\n model_settings.add_argument('--num_classes', type=int, default=2,\n help='num of classes')\n\n path_settings = parser.add_argument_group('path settings')\n path_settings.add_argument('--merged_files',\n default='./data/q2q_pair_merged.txt',\n # default='./data/test.txt',\n help='list of files that contain the preprocessed data')\n path_settings.add_argument('--pkl_files',\n default='./data/split_data_aug_mask.pkl',\n # default='./data/test.txt',\n help='list of files that contain the preprocessed data')\n # path_settings.add_argument('--test_data_files',\n # default='./data/testset.txt')\n path_settings.add_argument('--tensorboard_dir', default='tensorboard_dir/ESIM',\n help='saving path of tensorboard')\n path_settings.add_argument('--save_dir', default='checkpoints/ESIM',\n help='save base dir')\n path_settings.add_argument('--log_path',\n help='path of the log file. If not set, logs are printed to console')\n\n misc_setting = parser.add_argument_group('misc settings')\n misc_setting.add_argument('--allow_soft_placement', type=bool, default=True,\n help='allow device soft device placement')\n misc_setting.add_argument('--log_device_placement', type=bool, default=False,\n help='log placement of ops on devices')\n\n return parser.parse_args()\n\n\ndef get_time_dif(start_time):\n end_time = time.time()\n time_dif = end_time - start_time\n return datetime.timedelta(seconds=int(round(time_dif)))\n\n\n# def feed_data(q1_batch, q2_batch, y_batch, q1_mask_batch, q2_mask_batch, keep_prob, model):\n# feed_dict = {\n# model.input_q1: q1_batch,\n# model.input_q2: q2_batch,\n# model.input_y: y_batch,\n# model.q1_mask: q1_mask_batch,\n# model.q2_mask: q2_mask_batch,\n# model.dropout_keep_prob: keep_prob\n# }\n# return feed_dict\n\ndef feed_data(q1_batch, q2_batch, y_batch, keep_prob, model):\n feed_dict = {\n model.input_q1: q1_batch,\n model.input_q2: q2_batch,\n model.input_y: y_batch,\n model.dropout_keep_prob: keep_prob\n }\n return feed_dict\n\n\n# def evaluate(q1_dev, q2_dev, y_dev, q1_mask_dev, q2_mask_dev, sess, model):\n# \"\"\"\n# Evaluate model on a dev set\n# :param q1_dev:\n# :param q2_dev:\n# :param y_dev:\n# :param sess:\n# :return:\n# \"\"\"\n# data_len = len(y_dev)\n# batch_eval = batch_iter_per_epoch_mask(q1_dev, q2_dev, q1_mask_dev, q2_mask_dev, y_dev)\n# total_loss = 0.0\n# total_acc = 0.0\n# for q1_batch_eval, q2_batch_eval, q1_mask_batch_eval, q2_mask_batch_eval, y_batch_eval in batch_eval:\n# batch_len = len(y_batch_eval)\n# feed_dict = feed_data(q1_batch_eval, q2_batch_eval, y_batch_eval,\n# q1_mask_batch_eval, q2_mask_batch_eval,\n# keep_prob=1.0,\n# model=model)\n# loss, accuracy = sess.run([model.loss, model.accuracy], feed_dict)\n# total_loss += loss * batch_len\n# total_acc += accuracy * batch_len\n# return total_loss/data_len, total_acc/data_len\n\n\ndef evaluate(q1_dev, q2_dev, y_dev, sess, model):\n \"\"\"\n Evaluate model on a dev set\n :param q1_dev:\n :param q2_dev:\n :param y_dev:\n :param sess:\n :return:\n \"\"\"\n data_len = len(y_dev)\n batch_eval = batch_iter_per_epoch(q1_dev, q2_dev, y_dev)\n total_loss = 0.0\n total_acc = 0.0\n for q1_batch_eval, q2_batch_eval, y_batch_eval in batch_eval:\n batch_len = len(y_batch_eval)\n feed_dict = feed_data(q1_batch_eval, q2_batch_eval, y_batch_eval, keep_prob=1.0, model=model)\n loss, accuracy = sess.run([model.loss, model.accuracy], feed_dict)\n total_loss += loss * batch_len\n total_acc += accuracy * batch_len\n return total_loss/data_len, total_acc/data_len\n\n\ndef chinese_tokenizer(documents):\n \"\"\"\n 中文文本转换为词序列\n :param documents:\n :return:\n \"\"\"\n for document in documents:\n yield list(jieba.cut(document))\n\n\ndef prepare():\n if not os.path.exists(args.save_dir):\n os.makedirs(args.save_dir)\n print('Vocab processing ...')\n q1, q2, y = get_q2q_label(args.merged_files)\n start_time = time.time()\n vocab_processor = tc.learn.preprocessing.VocabularyProcessor(max_document_length=args.max_q_len,\n min_frequency=5,\n tokenizer_fn=chinese_tokenizer)\n q1_pad = np.array(list(vocab_processor.fit_transform(q1)))\n q2_pad = np.array(list(vocab_processor.fit_transform(q2)))\n\n del q1, q1_pad, q2, q2_pad, y\n\n print('Vocab size: {}'.format(len(vocab_processor.vocabulary_)))\n vocab_processor.save(os.path.join(args.save_dir, \"vocab\"))\n\n # split\n # split_data(args.merged_files, os.path.join(args.save_dir, \"vocab\"), args.pkl_files, mask=True)\n # no mask\n split_data(args.merged_files, os.path.join(args.save_dir, \"vocab\"), args.pkl_files)\n\n time_dif = get_time_dif(start_time)\n print('Vocab processing time usage:', time_dif)\n\n # Vocab size: 25579\n # Vocabulary Size: 25579\n # Train / Dev / test split: 589870 / 73733 / 73733\n # Vocab processing time usage: 0:04: 21\n\n\ndef train():\n # Loading data\n print('Loading data ...')\n start_time = time.time()\n # [q1_train, q2_train, y_train, q1_dev, q2_dev, y_dev, q1_test, q2_test, y_test, vocab_size, q1_mask_train,\n # q2_mask_train, q1_mask_dev, q2_mask_dev, q1_mask_test, q2_mask_test] = load_pkl_set(args.pkl_files, mask=True)\n\n # del q1_test, q2_test, q1_mask_test, q2_mask_test, y_test\n\n q1_train, q2_train, y_train, q1_dev, q2_dev, y_dev, q1_test, q2_test, y_test, vocab_size = load_pkl_set(\n args.pkl_files)\n\n del q1_test, q2_test, y_test\n\n time_dif = get_time_dif(start_time)\n print('Time usage:', time_dif)\n\n print('Configuring TensorBoard and Saver ...')\n tensorboard_dir = args.tensorboard_dir\n if not os.path.exists(tensorboard_dir):\n os.makedirs(tensorboard_dir)\n\n # ESIM model init\n model = ESIM(\n sequence_length=args.max_q_len,\n num_classes=args.num_classes,\n embedding_dim=args.embedding_dim,\n vocab_size=vocab_size,\n max_length=args.max_q_len,\n hidden_dim=args.hidden_size,\n learning_rate=args.learning_rate,\n optimizer=args.optim\n )\n tf.summary.scalar('loss', model.loss)\n tf.summary.scalar('accuracy', model.accuracy)\n merged_summary = tf.summary.merge_all()\n writer = tf.summary.FileWriter(tensorboard_dir)\n\n # Configuring Saver\n saver = tf.train.Saver()\n if not os.path.exists(args.save_dir):\n os.makedirs(args.save_dir)\n\n # Create Session\n session = tf.Session()\n session.run(tf.global_variables_initializer())\n writer.add_graph(session.graph)\n\n print('Training and Deviation ...')\n start_time = time.time()\n total_batch = 0\n best_acc_dev = 0.0\n last_improved = 0\n require_improvement = 30000 # Early stopping\n\n tag = False\n for epoch in range(args.epochs):\n print('Epoch:', epoch + 1)\n # batch_train = batch_iter_per_epoch_mask(q1_train, q2_train,\n # q1_mask_train, q2_mask_train,\n # y_train, args.batch_size)\n\n batch_train = batch_iter_per_epoch(q1_train, q2_train, y_train, args.batch_size)\n for q1_batch, q2_batch, y_batch in batch_train:\n feed_dict = feed_data(q1_batch, q2_batch, y_batch, args.dropout_keep_prob, model=model)\n if total_batch % args.checkpoint_every == 0:\n # write to tensorboard scalar\n summary = session.run(merged_summary, feed_dict)\n writer.add_summary(summary, total_batch)\n\n if total_batch % args.evaluate_every == 0:\n # print performance on train set and dev set\n feed_dict[model.dropout_keep_prob] = 1.0\n loss_train, acc_train = session.run([model.loss, model.accuracy], feed_dict=feed_dict)\n # loss_dev, acc_dev = evaluate(q1_dev, q2_dev, y_dev, q1_mask_dev, q2_mask_dev, session, model)\n loss_dev, acc_dev = evaluate(q1_dev, q2_dev, y_dev, session, model=model)\n\n if acc_dev > best_acc_dev:\n # save best result\n best_acc_dev = acc_dev\n last_improved = total_batch\n saver.save(sess=session, save_path=save_path)\n improved_str = '*'\n else:\n improved_str = ''\n\n time_dif = get_time_dif(start_time)\n print('Iter: {0:>6}, Train Loss: {1:6.2}, Train Acc: {2:7.2%}, Val loss:{3:6.2}, '\n 'Val acc:{4:7.2%}, Time:{5}{6}'\n .format(total_batch, loss_train, acc_train, loss_dev, acc_dev, time_dif, improved_str))\n\n session.run(model.optimizer, feed_dict)\n total_batch += 1\n\n if total_batch - last_improved > require_improvement:\n # having no improvement for a long time\n print('No optimization for a long time, auto-stopping ...')\n tag = True\n break\n if tag: # early stopping\n break\n\n\ndef predict():\n print('Loading test data ...')\n start_time = time.time()\n # [q1_train, q2_train, y_train, q1_dev, q2_dev, y_dev, q1_test, q2_test, y_test, vocab_size, q1_mask_train,\n # q2_mask_train, q1_mask_dev, q2_mask_dev, q1_mask_test, q2_mask_test] = load_pkl_set(args.pkl_files, mask=True)\n #\n # del q1_train, q2_train, y_train, q1_dev, q2_dev, y_dev, q1_mask_train, q2_mask_train, q1_mask_dev, q2_mask_dev\n\n q1_train, q2_train, y_train, q1_dev, q2_dev, y_dev, q1_test, q2_test, y_test, vocab_size = load_pkl_set(\n args.pkl_files)\n\n del q1_train, q2_train, y_train, q1_dev, q2_dev, y_dev\n\n # ESIM model init\n model = ESIM(\n sequence_length=args.max_q_len,\n num_classes=args.num_classes,\n embedding_dim=args.embedding_dim,\n vocab_size=vocab_size,\n max_length=args.max_q_len,\n hidden_dim=args.hidden_size,\n learning_rate=args.learning_rate,\n optimizer=args.optim\n )\n\n session = tf.Session()\n session.run(tf.global_variables_initializer())\n saver = tf.train.Saver()\n saver.restore(session, save_path=save_path)\n\n print('Testing ...')\n # loss_test, acc_test = evaluate(q1_test, q2_test, y_test, q1_mask_test, q2_mask_test, session, model)\n loss_test, acc_test = evaluate(q1_test, q2_test, y_test, session, model=model)\n print('Test loss:{0:6.2}, Test acc:{1:7.2%}'.format(loss_test, acc_test))\n\n # test_batches = batch_iter_per_epoch_mask(q1_test, q2_test, q1_mask_test, q2_mask_test, y_test, shuffle=False)\n test_batches = batch_iter_per_epoch(q1_test, q2_test, y_test, shuffle=False)\n all_predictions = []\n all_predict_prob = []\n count = 0\n # for q1_test_batch, q2_test_batch, q1_mask_batch, q2_mask_batch, y_test_batch in test_batches:\n # batch_predictions, batch_predict_probs = session.run([model.predict, model.probs],\n # feed_dict={\n # model.input_q1: q1_test_batch,\n # model.input_q2: q2_test_batch,\n # model.q1_mask: q1_mask_batch,\n # model.q2_mask: q2_mask_batch,\n # model.dropout_keep_prob: 1.0\n # })\n for q1_test_batch, q2_test_batch, y_test_batch in test_batches:\n batch_predictions, batch_predict_probs = session.run([model.y_pred, model.probs],\n feed_dict={\n model.input_q1: q1_test_batch,\n model.input_q2: q2_test_batch,\n model.dropout_keep_prob: 1.0\n })\n all_predictions = np.concatenate([all_predictions, batch_predictions])\n if count == 0:\n all_predict_prob = batch_predict_probs\n else:\n all_predict_prob = np.concatenate([all_predict_prob, batch_predict_probs])\n count = 1\n y_test = [float(temp) for temp in y_test]\n\n # Evaluation indices\n print('Precision, Recall, F1-Score ...')\n print(metrics.classification_report(y_test, all_predictions,\n target_names=['not match', 'match']))\n\n # Confusion Matrix\n print('Confusion Matrix ...')\n print(metrics.confusion_matrix(y_test, all_predictions))\n\n # Write probability to csv\n # out_dir = os.path.join(args.save_dir, 'predict_prob_csv')\n # print('Saving evaluation to {0}'.format(out_dir))\n # with open(out_dir, 'w') as f:\n # csv.writer(f).writerows(all_predict_prob)\n\n time_dif = get_time_dif(start_time)\n print('Time usage:', time_dif)\n\n\nif __name__ == '__main__':\n args = parse_args()\n\n save_path = os.path.join(args.save_dir, 'best_validation')\n\n logger = logging.getLogger('q2q_matching_esim')\n logger.setLevel(logging.INFO)\n formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')\n if args.log_path:\n file_handler = logging.FileHandler(args.log_path)\n file_handler.setLevel(logging.INFO)\n file_handler.setFormatter(formatter)\n logger.addHandler(file_handler)\n else:\n console_handler = logging.StreamHandler()\n console_handler.setLevel(logging.INFO)\n console_handler.setFormatter(formatter)\n logger.addHandler(console_handler)\n\n logger.info('Runing with args: {}'.format(args))\n\n os.environ[\"CUDA_DEVICE_ORDER\"] = \"PCI_BUS_ID\"\n os.environ[\"CUDA_VISIBLE_DEVICES\"] = args.gpu\n\n # if args.prepare:\n # prepare()\n # if args.train:\n # train()\n # if args.evaluate:\n # predict()\n # prepare()\n train()\n # predict()\n" ]
[ [ "tensorflow.summary.FileWriter", "tensorflow.summary.scalar", "sklearn.metrics.classification_report", "sklearn.metrics.confusion_matrix", "numpy.concatenate", "tensorflow.global_variables_initializer", "tensorflow.summary.merge_all", "tensorflow.Session", "tensorflow.train.Saver", "tensorflow.contrib.learn.preprocessing.VocabularyProcessor" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
guiwitz/napari-animation
[ "a2d94656d9ae86b7018afaeede683e7d9929717b" ]
[ "napari_animation/interpolation.py" ]
[ "import numbers\nfrom enum import Enum\nfrom functools import partial\n\nimport numpy as np\nfrom scipy.spatial.transform import Rotation as R\nfrom scipy.spatial.transform import Slerp\n\nfrom .utils import keys_to_list, nested_get, nested_set, quaternion2euler\n\n\ndef default(a, b, fraction):\n \"\"\"Default interpolation for the corresponding type;\n linear interpolation for numeric, step interpolation otherwise.\n\n Parameters\n ----------\n a :\n initial value\n b :\n final value\n fraction : float\n fraction to interpolate to between a and b.\n\n Returns\n ----------\n Interpolated value between a and b at fraction.\n \"\"\"\n if isinstance(a, numbers.Number) and isinstance(b, numbers.Number):\n return interpolate_num(a, b, fraction)\n\n elif isinstance(a, (list, tuple)) and isinstance(b, (list, tuple)):\n return interpolate_seq(a, b, fraction)\n\n else:\n return interpolate_bool(a, b, fraction)\n\n\ndef interpolate_seq(a, b, fraction):\n \"\"\"Interpolation of list or tuple.\n Parameters\n ----------\n a : list or tuple\n initial sequence\n b : list or tuple\n final sequence\n fraction : float\n fraction to interpolate to between a and b.\n\n Returns\n ----------\n : sequence of type a\n Interpolated sequence between a and b at fraction.\n \"\"\"\n return type(a)(default(v0, v1, fraction) for v0, v1 in zip(a, b))\n\n\ndef interpolate_num(a, b, fraction):\n \"\"\"Linear interpolation for numeric types.\n\n Parameters\n ----------\n a : numeric type\n initial value\n b : numeric type\n final value\n fraction : float\n fraction to interpolate to between a and b.\n\n Returns\n ----------\n : numeric type\n Interpolated value between a and b at fraction.\n \"\"\"\n return type(a)(a + (b - a) * fraction)\n\n\ndef interpolate_bool(a, b, fraction):\n \"\"\"Step interpolation.\n\n Parameters\n ----------\n a :\n initial value\n b :\n final value\n fraction : float\n fraction to interpolate to between a and b.\n\n Returns\n ----------\n a or b :\n Step interpolated value between a and b.\n \"\"\"\n if fraction < 0.5:\n return a\n else:\n return b\n\n\ndef interpolate_log(a, b, fraction):\n \"\"\"Log interpolation, for camera zoom mostly.\n\n Parameters\n ----------\n a : float\n initial value\n b : float\n final value\n fraction : float\n fraction to interpolate to between a and b.\n\n Returns\n ----------\n : float\n Log interpolated value between a and b at fraction.\n \"\"\"\n c = interpolate_num(np.log10(a), np.log10(b), fraction)\n return np.power(10, c)\n\n\ndef slerp(a, b, fraction):\n \"\"\"Compute Spherical linear interpolation from Euler angles,\n compatible with the napari view.\n\n Parameters\n ----------\n a : tuple\n initial tuple of Euler angles in degrees.\n b : tuple\n final tuple of Euler angles in degrees.\n fraction : float\n fraction to interpolate to between a and b.\n\n Returns\n ----------\n : tuple\n Interpolated Euler angles between a and b at fraction.\n \"\"\"\n key_rots = R.from_euler(\"ZYX\", [a, b], degrees=True)\n slerped = Slerp([0, 1], key_rots)\n q = slerped(fraction).as_quat()\n return quaternion2euler(q, degrees=True)\n\n\nclass Interpolation(Enum):\n \"\"\"Interpolation: interpolation function to use for a transition.\n\n Selects a preset interpolation function\n * DEFAULT: linear interpolation between start and endpoint.\n * SLERP: spherical linear interpolation on Euler angles.\n * LOG: log interpolation between start and endpoint.\n\n \"\"\"\n\n DEFAULT = partial(default)\n LOG = partial(interpolate_log)\n SLERP = partial(slerp)\n\n def __call__(self, *args):\n return self.value(*args)\n\n\ndef interpolate_state(\n initial_state, final_state, fraction, state_interpolation_map={}\n):\n \"\"\"Interpolate a state between two states\n\n Parameters\n ----------\n initial_state : dict\n Description of initial viewer state.\n final_state : dict\n Description of final viewer state.\n fraction : float\n Interpolation fraction, must be between `0` and `1`.\n A value of `0` will return the initial state. A\n value of `1` will return the final state.\n state_interpolation_map : dict\n Dictionary relating state attributes to interpolation functions.\n\n Returns\n -------\n state : dict\n Description of viewer state.\n \"\"\"\n\n state = dict()\n separator = \".\"\n\n for keys in keys_to_list(initial_state):\n v0 = nested_get(initial_state, keys)\n v1 = nested_get(final_state, keys)\n\n property_string = separator.join(keys)\n\n if property_string in state_interpolation_map.keys():\n interpolation_func = state_interpolation_map[property_string]\n else:\n interpolation_func = Interpolation.DEFAULT\n\n nested_set(state, keys, interpolation_func(v0, v1, fraction))\n\n return state\n" ]
[ [ "scipy.spatial.transform.Rotation.from_euler", "numpy.log10", "scipy.spatial.transform.Slerp", "numpy.power" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [ "1.5", "1.3", "1.2", "1.4" ], "tensorflow": [] } ]
gifford-lab/seqgra
[ "3c7547878ecda4c00572746b8a07e0d614c9dbef", "3c7547878ecda4c00572746b8a07e0d614c9dbef", "3c7547878ecda4c00572746b8a07e0d614c9dbef", "3c7547878ecda4c00572746b8a07e0d614c9dbef" ]
[ "docsrc/defs/md/PyTorch/o100-dna1000-conv100-gmp-fc100.py", "seqgra/evaluator/metricsevaluator.py", "docsrc/defs/md/PyTorch/o2-dna150-fc1000.py", "docsrc/defs/md/PyTorch/o87-dna1000-chromdragonn.py" ]
[ "import math\n\nimport torch\n\n\nclass TorchModel(torch.nn.Module):\n def __init__(self):\n super().__init__()\n INPUT_CHANNELS: int = 4\n CONV_NUM_FILTERS: int = 10\n CONV_FILTER_WIDTH: int = 11\n FC_NUM_UNITS: int = 100\n OUTPUT_UNITS: int = 100\n\n self.conv = torch.nn.Sequential(\n torch.nn.Conv1d(INPUT_CHANNELS,\n CONV_NUM_FILTERS,\n CONV_FILTER_WIDTH, 1,\n math.floor(CONV_FILTER_WIDTH / 2)),\n torch.nn.ReLU(),\n torch.nn.AdaptiveMaxPool1d(1)\n )\n\n self.fc = torch.nn.Sequential(\n torch.nn.Linear(CONV_NUM_FILTERS, FC_NUM_UNITS),\n torch.nn.ReLU(),\n torch.nn.Linear(FC_NUM_UNITS, OUTPUT_UNITS)\n )\n\n def forward(self, x):\n batch_size = x.size(0)\n x = self.conv(x)\n x = x.view(batch_size, -1)\n x = self.fc(x)\n return x\n", "\"\"\"\nMIT - CSAIL - Gifford Lab - seqgra\n\nmetrics evaluator: evaluates model using conventional performance metrics\n\ncalculates accuracy and loss for training, validation and test set\n\n@author: Konstantin Krismer\n\"\"\"\nfrom typing import Any, List\n\nimport pandas as pd\n\nimport seqgra.constants as c\nfrom seqgra.learner import Learner\nfrom seqgra.evaluator import Evaluator\n\n\nclass MetricsEvaluator(Evaluator):\n def __init__(self, learner: Learner, output_dir: str,\n silent: bool = False) -> None:\n super().__init__(c.EvaluatorID.METRICS, \"Accuracy and loss metrics\",\n learner, output_dir, silent=silent)\n\n def _evaluate_model(self, x: List[str], y: List[str],\n annotations: List[str]) -> Any:\n return self.learner.evaluate_model(x=x, y=y)\n\n def _save_results(self, results, set_name: str = \"test\",\n suppress_plots: bool = False) -> None:\n if results is None:\n df = pd.DataFrame([], columns=[\"set\", \"metric\", \"value\"])\n else:\n df = pd.DataFrame([[set_name, \"loss\", results[\"loss\"]],\n [set_name, \"accuracy\", results[\"accuracy\"]]],\n columns=[\"set\", \"metric\", \"value\"])\n\n df.to_csv(self.output_dir + set_name + \"-metrics.txt\", sep=\"\\t\",\n index=False)\n", "import torch\n\n\nclass TorchModel(torch.nn.Module):\n def __init__(self):\n super().__init__()\n INPUT_CHANNELS: int = 4\n INPUT_WIDTH: int = 150\n FC_NUM_UNITS: int = 1000\n OUTPUT_UNITS: int = 2\n\n self.model = torch.nn.Sequential(\n torch.nn.Flatten(),\n torch.nn.Linear(INPUT_WIDTH * INPUT_CHANNELS, FC_NUM_UNITS),\n torch.nn.ReLU(),\n torch.nn.Linear(FC_NUM_UNITS, OUTPUT_UNITS)\n )\n\n def forward(self, x):\n x = self.model(x)\n return x\n", "# adapted from https://github.com/kundajelab/ChromDragoNN/blob/master/model_zoo/stage1/resnet.pychromdragonn\n\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\n\n\nclass L1Block(nn.Module):\n def __init__(self):\n super().__init__()\n self.conv1 = nn.Conv2d(64, 64, (3, 1), stride=(1, 1), padding=(1, 0))\n self.bn1 = nn.BatchNorm2d(64)\n self.conv2 = nn.Conv2d(64, 64, (3, 1), stride=(1, 1), padding=(1, 0))\n self.bn2 = nn.BatchNorm2d(64)\n self.layer = nn.Sequential(self.conv1, self.bn1, nn.ReLU(\n inplace=True), self.conv2, self.bn2)\n\n def forward(self, x):\n out = self.layer(x)\n out += x\n out = F.relu(out)\n return out\n\n\nclass L2Block(nn.Module):\n def __init__(self):\n super().__init__()\n self.conv1 = nn.Conv2d(128, 128, (7, 1), stride=(1, 1), padding=(3, 0))\n self.conv2 = nn.Conv2d(128, 128, (7, 1), stride=(1, 1), padding=(3, 0))\n self.bn1 = nn.BatchNorm2d(128)\n self.bn2 = nn.BatchNorm2d(128)\n self.layer = nn.Sequential(self.conv1, self.bn1, nn.ReLU(\n inplace=True), self.conv2, self.bn2)\n\n def forward(self, x):\n out = self.layer(x)\n out += x\n out = F.relu(out)\n return out\n\n\nclass L3Block(nn.Module):\n def __init__(self):\n super().__init__()\n self.conv1 = nn.Conv2d(200, 200, (7, 1), stride=(1, 1), padding=(3, 0))\n self.conv2 = nn.Conv2d(200, 200, (3, 1), stride=(1, 1), padding=(1, 0))\n self.conv3 = nn.Conv2d(200, 200, (3, 1), stride=(1, 1), padding=(1, 0))\n\n self.bn1 = nn.BatchNorm2d(200)\n self.bn2 = nn.BatchNorm2d(200)\n self.bn3 = nn.BatchNorm2d(200)\n\n self.layer = nn.Sequential(self.conv1, self.bn1, nn.ReLU(inplace=True),\n self.conv2, self.bn2, nn.ReLU(inplace=True),\n self.conv3, self.bn3)\n\n def forward(self, x):\n out = self.layer(x)\n out += x\n out = F.relu(out)\n return out\n\n\nclass L4Block(nn.Module):\n def __init__(self):\n super().__init__()\n self.conv1 = nn.Conv2d(200, 200, (7, 1), stride=(1, 1), padding=(3, 0))\n self.bn1 = nn.BatchNorm2d(200)\n self.conv2 = nn.Conv2d(200, 200, (7, 1), stride=(1, 1), padding=(3, 0))\n self.bn2 = nn.BatchNorm2d(200)\n self.layer = nn.Sequential(self.conv1, self.bn1, nn.ReLU(inplace=True),\n self.conv2, self.bn2)\n\n def forward(self, x):\n out = self.layer(x)\n out += x\n out = F.relu(out)\n return out\n\n\nclass TorchModel(torch.nn.Module):\n def __init__(self):\n super().__init__()\n self.dropout = 0.3\n self.num_cell_types = 87\n self.blocks = [2, 2, 2, 2]\n\n self.conv1 = nn.Conv2d(4, 48, (3, 1), stride=(1, 1), padding=(1, 0))\n self.bn1 = nn.BatchNorm2d(48)\n self.conv2 = nn.Conv2d(48, 64, (3, 1), stride=(1, 1), padding=(1, 0))\n self.bn2 = nn.BatchNorm2d(64)\n self.prelayer = nn.Sequential(self.conv1, self.bn1, nn.ReLU(inplace=True),\n self.conv2, self.bn2, nn.ReLU(inplace=True))\n\n self.layer1 = nn.Sequential(*[L1Block()\n for x in range(self.blocks[0])])\n self.layer2 = nn.Sequential(*[L2Block()\n for x in range(self.blocks[1])])\n self.layer3 = nn.Sequential(*[L3Block()\n for x in range(self.blocks[2])])\n self.layer4 = nn.Sequential(*[L4Block()\n for x in range(self.blocks[3])])\n\n self.c1to2 = nn.Conv2d(64, 128, (3, 1), stride=(1, 1), padding=(1, 0))\n self.b1to2 = nn.BatchNorm2d(128)\n self.l1tol2 = nn.Sequential(\n self.c1to2, self.b1to2, nn.ReLU(inplace=True))\n\n self.c2to3 = nn.Conv2d(128, 200, (1, 1), padding=(3, 0))\n self.b2to3 = nn.BatchNorm2d(200)\n self.l2tol3 = nn.Sequential(\n self.c2to3, self.b2to3, nn.ReLU(inplace=True))\n\n self.maxpool1 = nn.MaxPool2d((3, 1))\n self.maxpool2 = nn.MaxPool2d((4, 1))\n self.maxpool3 = nn.MaxPool2d((4, 1))\n self.fc1 = nn.Linear(4200, 1000)\n self.bn4 = nn.BatchNorm1d(1000)\n self.fc2 = nn.Linear(1000, 1000)\n self.bn5 = nn.BatchNorm1d(1000)\n self.fc3 = nn.Linear(1000, self.num_cell_types)\n self.flayer = self.final_layer()\n\n def final_layer(self):\n self.conv3 = nn.Conv2d(200, 200, (7, 1), stride=(1, 1), padding=(4, 0))\n self.bn3 = nn.BatchNorm2d(200)\n return nn.Sequential(self.conv3, self.bn3, nn.ReLU(inplace=True))\n\n def forward(self, s):\n s = s.permute(0, 2, 1).contiguous() # batch_size x 4 x 1000\n s = s.view(-1, 4, 1000, 1) # batch_size x 4 x 1000 x 1 [4 channels]\n\n out = self.prelayer(s)\n out = self.layer1(out)\n out = self.layer2(self.l1tol2(out))\n out = self.maxpool1(out)\n out = self.layer3(self.l2tol3(out))\n out = self.maxpool2(out)\n out = self.layer4(out)\n out = self.flayer(out)\n out = self.maxpool3(out)\n out = out.view(-1, 4200)\n conv_out = out\n out = F.dropout(F.relu(self.bn4(self.fc1(out))), p=self.dropout,\n training=self.training) # batch_size x 1000\n out = F.dropout(F.relu(self.bn5(self.fc2(out))), p=self.dropout,\n training=self.training) # batch_size x 1000\n out = self.fc3(out)\n return out\n" ]
[ [ "torch.nn.Linear", "torch.nn.AdaptiveMaxPool1d", "torch.nn.ReLU" ], [ "pandas.DataFrame" ], [ "torch.nn.Linear", "torch.nn.ReLU", "torch.nn.Flatten" ], [ "torch.nn.BatchNorm1d", "torch.nn.Conv2d", "torch.nn.Linear", "torch.nn.MaxPool2d", "torch.nn.functional.relu", "torch.nn.BatchNorm2d", "torch.nn.ReLU" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [ "0.23", "0.21", "2.0", "1.4", "0.19", "1.1", "1.5", "1.2", "0.24", "0.20", "1.0", "0.25", "1.3" ], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
arthur-qiu/BasicSR
[ "2e5f131edfc2adf912a1ed3b8c818a63d590a282" ]
[ "codes/data/util.py" ]
[ "import os\nimport math\nimport pickle\nimport random\nimport numpy as np\nimport lmdb\nimport torch\nimport cv2\n\nIMG_EXTENSIONS = ['.jpg', '.JPG', '.jpeg', '.JPEG', '.png', '.PNG', '.ppm', '.PPM', '.bmp', '.BMP']\n\n####################\n# Files & IO\n####################\n\n\ndef is_image_file(filename):\n return any(filename.endswith(extension) for extension in IMG_EXTENSIONS)\n\n\ndef _get_paths_from_images(path):\n assert os.path.isdir(path), '{:s} is not a valid directory'.format(path)\n images = []\n for dirpath, _, fnames in sorted(os.walk(path)):\n for fname in sorted(fnames):\n if is_image_file(fname):\n img_path = os.path.join(dirpath, fname)\n images.append(img_path)\n assert images, '{:s} has no valid image file'.format(path)\n return images\n\n\ndef _get_paths_from_lmdb(dataroot):\n env = lmdb.open(dataroot, readonly=True, lock=False, readahead=False, meminit=False)\n keys_cache_file = os.path.join(dataroot, '_keys_cache.p')\n if os.path.isfile(keys_cache_file):\n print('read lmdb keys from cache: {}'.format(keys_cache_file))\n keys = pickle.load(open(keys_cache_file, \"rb\"))\n else:\n with env.begin(write=False) as txn:\n print('creating lmdb keys cache: {}'.format(keys_cache_file))\n keys = [key.decode('ascii') for key, _ in txn.cursor()]\n pickle.dump(keys, open(keys_cache_file, 'wb'))\n paths = sorted([key for key in keys if not key.endswith('.meta')])\n return env, paths\n\n\ndef get_image_paths(data_type, dataroot):\n env, paths = None, None\n if dataroot is not None:\n if data_type == 'lmdb':\n env, paths = _get_paths_from_lmdb(dataroot)\n elif data_type == 'img':\n paths = sorted(_get_paths_from_images(dataroot))\n else:\n raise NotImplementedError('data_type [{:s}] is not recognized.'.format(data_type))\n return env, paths\n\n\ndef _read_lmdb_img(env, path):\n with env.begin(write=False) as txn:\n buf = txn.get(path.encode('ascii'))\n buf_meta = txn.get((path + '.meta').encode('ascii')).decode('ascii')\n img_flat = np.frombuffer(buf, dtype=np.uint8)\n H, W, C = [int(s) for s in buf_meta.split(',')]\n img = img_flat.reshape(H, W, C)\n return img\n\n\ndef read_img(env, path):\n # read image by cv2 or from lmdb\n # return: Numpy float32, HWC, BGR, [0,1]\n if env is None: # img\n img = cv2.imread(path, cv2.IMREAD_UNCHANGED)\n else:\n img = _read_lmdb_img(env, path)\n img = img.astype(np.float32) / 255.\n if img.ndim == 2:\n img = np.expand_dims(img, axis=2)\n # some images have 4 channels\n if img.shape[2] > 3:\n img = img[:, :, :3]\n return img\n\n\n####################\n# image processing\n# process on numpy image\n####################\n\n\ndef augment(img_list, hflip=True, rot=True):\n # horizontal flip OR rotate\n hflip = hflip and random.random() < 0.5\n vflip = rot and random.random() < 0.5\n rot90 = rot and random.random() < 0.5\n\n def _augment(img):\n if hflip: img = img[:, ::-1, :]\n if vflip: img = img[::-1, :, :]\n if rot90: img = img.transpose(1, 0, 2)\n return img\n\n return [_augment(img) for img in img_list]\n\n\ndef channel_convert(in_c, tar_type, img_list):\n # conversion among BGR, gray and y\n if in_c == 3 and tar_type == 'gray': # BGR to gray\n gray_list = [cv2.cvtColor(img, cv2.COLOR_BGR2GRAY) for img in img_list]\n return [np.expand_dims(img, axis=2) for img in gray_list]\n elif in_c == 3 and tar_type == 'y': # BGR to y\n y_list = [bgr2ycbcr(img, only_y=True) for img in img_list]\n return [np.expand_dims(img, axis=2) for img in y_list]\n elif in_c == 1 and tar_type == 'RGB': # gray/y to BGR\n return [cv2.cvtColor(img, cv2.COLOR_GRAY2BGR) for img in img_list]\n else:\n return img_list\n\n\ndef rgb2ycbcr(img, only_y=True):\n '''same as matlab rgb2ycbcr\n only_y: only return Y channel\n Input:\n uint8, [0, 255]\n float, [0, 1]\n '''\n in_img_type = img.dtype\n img.astype(np.float32)\n if in_img_type != np.uint8:\n img *= 255.\n # convert\n if only_y:\n rlt = np.dot(img, [65.481, 128.553, 24.966]) / 255.0 + 16.0\n else:\n rlt = np.matmul(img, [[65.481, -37.797, 112.0], [128.553, -74.203, -93.786],\n [24.966, 112.0, -18.214]]) / 255.0 + [16, 128, 128]\n if in_img_type == np.uint8:\n rlt = rlt.round()\n else:\n rlt /= 255.\n return rlt.astype(in_img_type)\n\n\ndef bgr2ycbcr(img, only_y=True):\n '''bgr version of rgb2ycbcr\n only_y: only return Y channel\n Input:\n uint8, [0, 255]\n float, [0, 1]\n '''\n in_img_type = img.dtype\n img.astype(np.float32)\n if in_img_type != np.uint8:\n img *= 255.\n # convert\n if only_y:\n rlt = np.dot(img, [24.966, 128.553, 65.481]) / 255.0 + 16.0\n else:\n rlt = np.matmul(img, [[24.966, 112.0, -18.214], [128.553, -74.203, -93.786],\n [65.481, -37.797, 112.0]]) / 255.0 + [16, 128, 128]\n if in_img_type == np.uint8:\n rlt = rlt.round()\n else:\n rlt /= 255.\n return rlt.astype(in_img_type)\n\n\ndef ycbcr2rgb(img):\n '''same as matlab ycbcr2rgb\n Input:\n uint8, [0, 255]\n float, [0, 1]\n '''\n in_img_type = img.dtype\n img.astype(np.float32)\n if in_img_type != np.uint8:\n img *= 255.\n # convert\n rlt = np.matmul(img, [[0.00456621, 0.00456621, 0.00456621], [0, -0.00153632, 0.00791071],\n [0.00625893, -0.00318811, 0]]) * 255.0 + [-222.921, 135.576, -276.836]\n if in_img_type == np.uint8:\n rlt = rlt.round()\n else:\n rlt /= 255.\n return rlt.astype(in_img_type)\n\n\ndef modcrop(img_in, scale):\n # img_in: Numpy, HWC or HW\n img = np.copy(img_in)\n if img.ndim == 2:\n H, W = img.shape\n H_r, W_r = H % scale, W % scale\n img = img[:H - H_r, :W - W_r]\n elif img.ndim == 3:\n H, W, C = img.shape\n H_r, W_r = H % scale, W % scale\n img = img[:H - H_r, :W - W_r, :]\n else:\n raise ValueError('Wrong img ndim: [{:d}].'.format(img.ndim))\n return img\n\n\n####################\n# Functions\n####################\n\n\n# matlab 'imresize' function, now only support 'bicubic'\ndef cubic(x):\n absx = torch.abs(x)\n absx2 = absx**2\n absx3 = absx**3\n return (1.5*absx3 - 2.5*absx2 + 1) * ((absx <= 1).type_as(absx)) + \\\n (-0.5*absx3 + 2.5*absx2 - 4*absx + 2) * (((absx > 1)*(absx <= 2)).type_as(absx))\n\n\ndef calculate_weights_indices(in_length, out_length, scale, kernel, kernel_width, antialiasing):\n if (scale < 1) and (antialiasing):\n # Use a modified kernel to simultaneously interpolate and antialias- larger kernel width\n kernel_width = kernel_width / scale\n\n # Output-space coordinates\n x = torch.linspace(1, out_length, out_length)\n\n # Input-space coordinates. Calculate the inverse mapping such that 0.5\n # in output space maps to 0.5 in input space, and 0.5+scale in output\n # space maps to 1.5 in input space.\n u = x / scale + 0.5 * (1 - 1 / scale)\n\n # What is the left-most pixel that can be involved in the computation?\n left = torch.floor(u - kernel_width / 2)\n\n # What is the maximum number of pixels that can be involved in the\n # computation? Note: it's OK to use an extra pixel here; if the\n # corresponding weights are all zero, it will be eliminated at the end\n # of this function.\n P = math.ceil(kernel_width) + 2\n\n # The indices of the input pixels involved in computing the k-th output\n # pixel are in row k of the indices matrix.\n indices = left.view(out_length, 1).expand(out_length, P) + torch.linspace(0, P - 1, P).view(\n 1, P).expand(out_length, P)\n\n # The weights used to compute the k-th output pixel are in row k of the\n # weights matrix.\n distance_to_center = u.view(out_length, 1).expand(out_length, P) - indices\n # apply cubic kernel\n if (scale < 1) and (antialiasing):\n weights = scale * cubic(distance_to_center * scale)\n else:\n weights = cubic(distance_to_center)\n # Normalize the weights matrix so that each row sums to 1.\n weights_sum = torch.sum(weights, 1).view(out_length, 1)\n weights = weights / weights_sum.expand(out_length, P)\n\n # If a column in weights is all zero, get rid of it. only consider the first and last column.\n weights_zero_tmp = torch.sum((weights == 0), 0)\n if not math.isclose(weights_zero_tmp[0], 0, rel_tol=1e-6):\n indices = indices.narrow(1, 1, P - 2)\n weights = weights.narrow(1, 1, P - 2)\n if not math.isclose(weights_zero_tmp[-1], 0, rel_tol=1e-6):\n indices = indices.narrow(1, 0, P - 2)\n weights = weights.narrow(1, 0, P - 2)\n weights = weights.contiguous()\n indices = indices.contiguous()\n sym_len_s = -indices.min() + 1\n sym_len_e = indices.max() - in_length\n indices = indices + sym_len_s - 1\n return weights, indices, int(sym_len_s), int(sym_len_e)\n\n\ndef imresize(img, scale, antialiasing=True):\n # Now the scale should be the same for H and W\n # input: img: CHW RGB [0,1]\n # output: CHW RGB [0,1] w/o round\n\n in_C, in_H, in_W = img.size()\n out_C, out_H, out_W = in_C, math.ceil(in_H * scale), math.ceil(in_W * scale)\n kernel_width = 4\n kernel = 'cubic'\n\n # Return the desired dimension order for performing the resize. The\n # strategy is to perform the resize first along the dimension with the\n # smallest scale factor.\n # Now we do not support this.\n\n # get weights and indices\n weights_H, indices_H, sym_len_Hs, sym_len_He = calculate_weights_indices(\n in_H, out_H, scale, kernel, kernel_width, antialiasing)\n weights_W, indices_W, sym_len_Ws, sym_len_We = calculate_weights_indices(\n in_W, out_W, scale, kernel, kernel_width, antialiasing)\n # process H dimension\n # symmetric copying\n img_aug = torch.FloatTensor(in_C, in_H + sym_len_Hs + sym_len_He, in_W)\n img_aug.narrow(1, sym_len_Hs, in_H).copy_(img)\n\n sym_patch = img[:, :sym_len_Hs, :]\n inv_idx = torch.arange(sym_patch.size(1) - 1, -1, -1).long()\n sym_patch_inv = sym_patch.index_select(1, inv_idx)\n img_aug.narrow(1, 0, sym_len_Hs).copy_(sym_patch_inv)\n\n sym_patch = img[:, -sym_len_He:, :]\n inv_idx = torch.arange(sym_patch.size(1) - 1, -1, -1).long()\n sym_patch_inv = sym_patch.index_select(1, inv_idx)\n img_aug.narrow(1, sym_len_Hs + in_H, sym_len_He).copy_(sym_patch_inv)\n\n out_1 = torch.FloatTensor(in_C, out_H, in_W)\n kernel_width = weights_H.size(1)\n for i in range(out_H):\n idx = int(indices_H[i][0])\n out_1[0, i, :] = img_aug[0, idx:idx + kernel_width, :].transpose(0, 1).mv(weights_H[i])\n out_1[1, i, :] = img_aug[1, idx:idx + kernel_width, :].transpose(0, 1).mv(weights_H[i])\n out_1[2, i, :] = img_aug[2, idx:idx + kernel_width, :].transpose(0, 1).mv(weights_H[i])\n\n # process W dimension\n # symmetric copying\n out_1_aug = torch.FloatTensor(in_C, out_H, in_W + sym_len_Ws + sym_len_We)\n out_1_aug.narrow(2, sym_len_Ws, in_W).copy_(out_1)\n\n sym_patch = out_1[:, :, :sym_len_Ws]\n inv_idx = torch.arange(sym_patch.size(2) - 1, -1, -1).long()\n sym_patch_inv = sym_patch.index_select(2, inv_idx)\n out_1_aug.narrow(2, 0, sym_len_Ws).copy_(sym_patch_inv)\n\n sym_patch = out_1[:, :, -sym_len_We:]\n inv_idx = torch.arange(sym_patch.size(2) - 1, -1, -1).long()\n sym_patch_inv = sym_patch.index_select(2, inv_idx)\n out_1_aug.narrow(2, sym_len_Ws + in_W, sym_len_We).copy_(sym_patch_inv)\n\n out_2 = torch.FloatTensor(in_C, out_H, out_W)\n kernel_width = weights_W.size(1)\n for i in range(out_W):\n idx = int(indices_W[i][0])\n out_2[0, :, i] = out_1_aug[0, :, idx:idx + kernel_width].mv(weights_W[i])\n out_2[1, :, i] = out_1_aug[1, :, idx:idx + kernel_width].mv(weights_W[i])\n out_2[2, :, i] = out_1_aug[2, :, idx:idx + kernel_width].mv(weights_W[i])\n\n return out_2\n\n\ndef imresize_np(img, scale, antialiasing=True):\n # Now the scale should be the same for H and W\n # input: img: Numpy, HWC BGR [0,1]\n # output: HWC BGR [0,1] w/o round\n img = torch.from_numpy(img)\n\n in_H, in_W, in_C = img.size()\n out_C, out_H, out_W = in_C, math.ceil(in_H * scale), math.ceil(in_W * scale)\n kernel_width = 4\n kernel = 'cubic'\n\n # Return the desired dimension order for performing the resize. The\n # strategy is to perform the resize first along the dimension with the\n # smallest scale factor.\n # Now we do not support this.\n\n # get weights and indices\n weights_H, indices_H, sym_len_Hs, sym_len_He = calculate_weights_indices(\n in_H, out_H, scale, kernel, kernel_width, antialiasing)\n weights_W, indices_W, sym_len_Ws, sym_len_We = calculate_weights_indices(\n in_W, out_W, scale, kernel, kernel_width, antialiasing)\n # process H dimension\n # symmetric copying\n img_aug = torch.FloatTensor(in_H + sym_len_Hs + sym_len_He, in_W, in_C)\n img_aug.narrow(0, sym_len_Hs, in_H).copy_(img)\n\n sym_patch = img[:sym_len_Hs, :, :]\n inv_idx = torch.arange(sym_patch.size(0) - 1, -1, -1).long()\n sym_patch_inv = sym_patch.index_select(0, inv_idx)\n img_aug.narrow(0, 0, sym_len_Hs).copy_(sym_patch_inv)\n\n sym_patch = img[-sym_len_He:, :, :]\n inv_idx = torch.arange(sym_patch.size(0) - 1, -1, -1).long()\n sym_patch_inv = sym_patch.index_select(0, inv_idx)\n img_aug.narrow(0, sym_len_Hs + in_H, sym_len_He).copy_(sym_patch_inv)\n\n out_1 = torch.FloatTensor(out_H, in_W, in_C)\n kernel_width = weights_H.size(1)\n for i in range(out_H):\n idx = int(indices_H[i][0])\n out_1[i, :, 0] = img_aug[idx:idx + kernel_width, :, 0].transpose(0, 1).mv(weights_H[i])\n out_1[i, :, 1] = img_aug[idx:idx + kernel_width, :, 1].transpose(0, 1).mv(weights_H[i])\n out_1[i, :, 2] = img_aug[idx:idx + kernel_width, :, 2].transpose(0, 1).mv(weights_H[i])\n\n # process W dimension\n # symmetric copying\n out_1_aug = torch.FloatTensor(out_H, in_W + sym_len_Ws + sym_len_We, in_C)\n out_1_aug.narrow(1, sym_len_Ws, in_W).copy_(out_1)\n\n sym_patch = out_1[:, :sym_len_Ws, :]\n inv_idx = torch.arange(sym_patch.size(1) - 1, -1, -1).long()\n sym_patch_inv = sym_patch.index_select(1, inv_idx)\n out_1_aug.narrow(1, 0, sym_len_Ws).copy_(sym_patch_inv)\n\n sym_patch = out_1[:, -sym_len_We:, :]\n inv_idx = torch.arange(sym_patch.size(1) - 1, -1, -1).long()\n sym_patch_inv = sym_patch.index_select(1, inv_idx)\n out_1_aug.narrow(1, sym_len_Ws + in_W, sym_len_We).copy_(sym_patch_inv)\n\n out_2 = torch.FloatTensor(out_H, out_W, in_C)\n kernel_width = weights_W.size(1)\n for i in range(out_W):\n idx = int(indices_W[i][0])\n out_2[:, i, 0] = out_1_aug[:, idx:idx + kernel_width, 0].mv(weights_W[i])\n out_2[:, i, 1] = out_1_aug[:, idx:idx + kernel_width, 1].mv(weights_W[i])\n out_2[:, i, 2] = out_1_aug[:, idx:idx + kernel_width, 2].mv(weights_W[i])\n\n return out_2.numpy()\n\n\nif __name__ == '__main__':\n # test imresize function\n # read images\n img = cv2.imread('test.png')\n img = img * 1.0 / 255\n img = torch.from_numpy(np.transpose(img[:, :, [2, 1, 0]], (2, 0, 1))).float()\n # imresize\n scale = 1 / 4\n import time\n total_time = 0\n for i in range(10):\n start_time = time.time()\n rlt = imresize(img, scale, antialiasing=True)\n use_time = time.time() - start_time\n total_time += use_time\n print('average time: {}'.format(total_time / 10))\n\n import torchvision.utils\n torchvision.utils.save_image(\n (rlt * 255).round() / 255, 'rlt.png', nrow=1, padding=0, normalize=False)\n" ]
[ [ "torch.abs", "torch.linspace", "numpy.expand_dims", "numpy.dot", "torch.floor", "torch.sum", "torch.from_numpy", "numpy.matmul", "numpy.frombuffer", "numpy.copy", "torch.FloatTensor", "numpy.transpose" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
jataware/flee
[ "67c00c4572e71dd2bbfb390d7d7ede13ffb9594e" ]
[ "flee/postprocessing/analyze_graph.py" ]
[ "import matplotlib.pyplot as plt\nimport networkx as nx\n\n\ndef print_graph(vertices, edges, print_dist: bool = False) -> None:\n \"\"\"\n Summary\n\n Args:\n vertices (TYPE): Description\n edges (TYPE): Description\n print_dist (bool, optional): Description\n \"\"\"\n for v in vertices:\n print(\"Vertex name: \", v)\n if not print_dist:\n for e in edges:\n if e[0] == v:\n print(\"-> \", e[1])\n else:\n for e in edges:\n if e[0] == v:\n print(\"-> \", e[1], e[2])\n\n\ndef print_graph_nx(vertices, edges, print_dist=False):\n \"\"\"\n Summary\n\n Args:\n vertices (TYPE): Description\n edges (TYPE): Description\n print_dist (bool, optional): Description\n \"\"\"\n\n G = nx.DiGraph()\n # labels = []\n\n for v in vertices:\n G.add_node(v)\n\n for _ in vertices:\n for e in edges:\n G.add_edge(e[0], e[1], weight=int(e[2]))\n # labels += [(e[0], e[1]), e[2]]\n\n print(\"Nodes of graph: \")\n print(G.nodes())\n print(\"Edges of graph: \")\n print(G.edges())\n\n nx.draw(G, with_labels=True, node_color=\"y\")\n # nx.draw_networkx_edge_labels(G,labels)\n plt.savefig(\"simulation_graph.png\") # save as png\n plt.show()\n" ]
[ [ "matplotlib.pyplot.show", "matplotlib.pyplot.savefig" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
RandolphVI/Text-Pairs-Relation-Classification
[ "25a746ac9e72efdc79c9d90af9769e02587cf650" ]
[ "RCNN/train_rcnn.py" ]
[ "# -*- coding:utf-8 -*-\n__author__ = 'Randolph'\n\nimport os\nimport sys\nimport time\nimport logging\n\nsys.path.append('../')\nlogging.getLogger('tensorflow').disabled = True\n\nimport numpy as np\nimport tensorflow as tf\nfrom tensorboard.plugins import projector\nfrom text_rcnn import TextRCNN\nfrom utils import checkmate as cm\nfrom utils import data_helpers as dh\nfrom utils import param_parser as parser\nfrom sklearn.metrics import accuracy_score, precision_score, recall_score, f1_score, roc_auc_score\n\nargs = parser.parameter_parser()\nOPTION = dh._option(pattern=0)\nlogger = dh.logger_fn(\"tflog\", \"logs/{0}-{1}.log\".format('Train' if OPTION == 'T' else 'Restore', time.asctime()))\n\n\ndef create_input_data(data: dict):\n return zip(data['f_pad_seqs'], data['b_pad_seqs'], data['onehot_labels'])\n\n\ndef train_rcnn():\n \"\"\"Training RCNN model.\"\"\"\n # Print parameters used for the model\n dh.tab_printer(args, logger)\n\n # Load word2vec model\n word2idx, embedding_matrix = dh.load_word2vec_matrix(args.word2vec_file)\n\n # Load sentences, labels, and training parameters\n logger.info(\"Loading data...\")\n logger.info(\"Data processing...\")\n train_data = dh.load_data_and_labels(args, args.train_file, word2idx)\n val_data = dh.load_data_and_labels(args, args.validation_file, word2idx)\n\n # Build a graph and rcnn object\n with tf.Graph().as_default():\n session_conf = tf.ConfigProto(\n allow_soft_placement=args.allow_soft_placement,\n log_device_placement=args.log_device_placement)\n session_conf.gpu_options.allow_growth = args.gpu_options_allow_growth\n sess = tf.Session(config=session_conf)\n with sess.as_default():\n rcnn = TextRCNN(\n sequence_length=args.pad_seq_len,\n vocab_size=len(word2idx),\n embedding_type=args.embedding_type,\n embedding_size=args.embedding_dim,\n lstm_hidden_size=args.lstm_dim,\n filter_sizes=args.filter_sizes,\n num_filters=args.num_filters,\n fc_hidden_size=args.fc_dim,\n num_classes=args.num_classes,\n l2_reg_lambda=args.l2_lambda,\n pretrained_embedding=embedding_matrix)\n\n # Define training procedure\n with tf.control_dependencies(tf.get_collection(tf.GraphKeys.UPDATE_OPS)):\n learning_rate = tf.train.exponential_decay(learning_rate=args.learning_rate,\n global_step=rcnn.global_step,\n decay_steps=args.decay_steps,\n decay_rate=args.decay_rate,\n staircase=True)\n optimizer = tf.train.AdamOptimizer(learning_rate)\n grads, vars = zip(*optimizer.compute_gradients(rcnn.loss))\n grads, _ = tf.clip_by_global_norm(grads, clip_norm=args.norm_ratio)\n train_op = optimizer.apply_gradients(zip(grads, vars), global_step=rcnn.global_step, name=\"train_op\")\n\n # Keep track of gradient values and sparsity (optional)\n grad_summaries = []\n for g, v in zip(grads, vars):\n if g is not None:\n grad_hist_summary = tf.summary.histogram(\"{0}/grad/hist\".format(v.name), g)\n sparsity_summary = tf.summary.scalar(\"{0}/grad/sparsity\".format(v.name), tf.nn.zero_fraction(g))\n grad_summaries.append(grad_hist_summary)\n grad_summaries.append(sparsity_summary)\n grad_summaries_merged = tf.summary.merge(grad_summaries)\n\n # Output directory for models and summaries\n out_dir = dh.get_out_dir(OPTION, logger)\n checkpoint_dir = os.path.abspath(os.path.join(out_dir, \"checkpoints\"))\n best_checkpoint_dir = os.path.abspath(os.path.join(out_dir, \"bestcheckpoints\"))\n\n # Summaries for loss\n loss_summary = tf.summary.scalar(\"loss\", rcnn.loss)\n\n # Train summaries\n train_summary_op = tf.summary.merge([loss_summary, grad_summaries_merged])\n train_summary_dir = os.path.join(out_dir, \"summaries\", \"train\")\n train_summary_writer = tf.summary.FileWriter(train_summary_dir, sess.graph)\n\n # Validation summaries\n validation_summary_op = tf.summary.merge([loss_summary])\n validation_summary_dir = os.path.join(out_dir, \"summaries\", \"validation\")\n validation_summary_writer = tf.summary.FileWriter(validation_summary_dir, sess.graph)\n\n saver = tf.train.Saver(tf.global_variables(), max_to_keep=args.num_checkpoints)\n best_saver = cm.BestCheckpointSaver(save_dir=best_checkpoint_dir, num_to_keep=3, maximize=True)\n\n if OPTION == 'R':\n # Load rcnn model\n logger.info(\"Loading model...\")\n checkpoint_file = tf.train.latest_checkpoint(checkpoint_dir)\n logger.info(checkpoint_file)\n\n # Load the saved meta graph and restore variables\n saver = tf.train.import_meta_graph(\"{0}.meta\".format(checkpoint_file))\n saver.restore(sess, checkpoint_file)\n if OPTION == 'T':\n if not os.path.exists(checkpoint_dir):\n os.makedirs(checkpoint_dir)\n sess.run(tf.global_variables_initializer())\n sess.run(tf.local_variables_initializer())\n\n # Embedding visualization config\n config = projector.ProjectorConfig()\n embedding_conf = config.embeddings.add()\n embedding_conf.tensor_name = \"embedding\"\n embedding_conf.metadata_path = args.metadata_file\n\n projector.visualize_embeddings(train_summary_writer, config)\n projector.visualize_embeddings(validation_summary_writer, config)\n\n # Save the embedding visualization\n saver.save(sess, os.path.join(out_dir, \"embedding\", \"embedding.ckpt\"))\n\n current_step = sess.run(rcnn.global_step)\n\n def train_step(batch_data):\n \"\"\"A single training step.\"\"\"\n x_f, x_b, y_onehot = zip(*batch_data)\n\n feed_dict = {\n rcnn.input_x_front: x_f,\n rcnn.input_x_behind: x_b,\n rcnn.input_y: y_onehot,\n rcnn.dropout_keep_prob: args.dropout_rate,\n rcnn.is_training: True\n }\n _, step, summaries, loss = sess.run(\n [train_op, rcnn.global_step, train_summary_op, rcnn.loss], feed_dict)\n logger.info(\"step {0}: loss {1:g}\".format(step, loss))\n train_summary_writer.add_summary(summaries, step)\n\n def validation_step(val_loader, writer=None):\n \"\"\"Evaluates model on a validation set.\"\"\"\n batches_validation = dh.batch_iter(list(create_input_data(val_loader)), args.batch_size, 1)\n\n eval_counter, eval_loss = 0, 0.0\n true_labels = []\n predicted_scores = []\n predicted_labels = []\n\n for batch_validation in batches_validation:\n x_f, x_b, y_onehot = zip(*batch_validation)\n feed_dict = {\n rcnn.input_x_front: x_f,\n rcnn.input_x_behind: x_b,\n rcnn.input_y: y_onehot,\n rcnn.dropout_keep_prob: 1.0,\n rcnn.is_training: False\n }\n step, summaries, predictions, cur_loss = sess.run(\n [rcnn.global_step, validation_summary_op, rcnn.topKPreds, rcnn.loss], feed_dict)\n\n # Prepare for calculating metrics\n for i in y_onehot:\n true_labels.append(np.argmax(i))\n for j in predictions[0]:\n predicted_scores.append(j[0])\n for k in predictions[1]:\n predicted_labels.append(k[0])\n\n eval_loss = eval_loss + cur_loss\n eval_counter = eval_counter + 1\n\n if writer:\n writer.add_summary(summaries, step)\n\n eval_loss = float(eval_loss / eval_counter)\n\n # Calculate Precision & Recall & F1\n eval_acc = accuracy_score(y_true=np.array(true_labels), y_pred=np.array(predicted_labels))\n eval_pre = precision_score(y_true=np.array(true_labels),\n y_pred=np.array(predicted_labels), average='micro')\n eval_rec = recall_score(y_true=np.array(true_labels),\n y_pred=np.array(predicted_labels), average='micro')\n eval_F1 = f1_score(y_true=np.array(true_labels),\n y_pred=np.array(predicted_labels), average='micro')\n\n # Calculate the average AUC\n eval_auc = roc_auc_score(y_true=np.array(true_labels),\n y_score=np.array(predicted_scores), average='micro')\n\n return eval_loss, eval_acc, eval_pre, eval_rec, eval_F1, eval_auc\n\n # Generate batches\n batches_train = dh.batch_iter(list(create_input_data(train_data)), args.batch_size, args.epochs)\n num_batches_per_epoch = int((len(train_data['f_pad_seqs']) - 1) / args.batch_size) + 1\n\n # Training loop. For each batch...\n for batch_train in batches_train:\n train_step(batch_train)\n current_step = tf.train.global_step(sess, rcnn.global_step)\n\n if current_step % args.evaluate_steps == 0:\n logger.info(\"\\nEvaluation:\")\n eval_loss, eval_acc, eval_pre, eval_rec, eval_F1, eval_auc = \\\n validation_step(val_data, writer=validation_summary_writer)\n logger.info(\"All Validation set: Loss {0:g} | Acc {1:g} | Precision {2:g} | \"\n \"Recall {3:g} | F1 {4:g} | AUC {5:g}\"\n .format(eval_loss, eval_acc, eval_pre, eval_rec, eval_F1, eval_auc))\n best_saver.handle(eval_acc, sess, current_step)\n if current_step % args.checkpoint_steps == 0:\n checkpoint_prefix = os.path.join(checkpoint_dir, \"model\")\n path = saver.save(sess, checkpoint_prefix, global_step=current_step)\n logger.info(\"Saved model checkpoint to {0}\\n\".format(path))\n if current_step % num_batches_per_epoch == 0:\n current_epoch = current_step // num_batches_per_epoch\n logger.info(\"Epoch {0} has finished!\".format(current_epoch))\n\n logger.info(\"All Done.\")\n\n\nif __name__ == '__main__':\n train_rcnn()\n" ]
[ [ "tensorflow.train.global_step", "tensorflow.Graph", "tensorflow.summary.FileWriter", "tensorflow.train.latest_checkpoint", "tensorflow.local_variables_initializer", "tensorflow.get_collection", "tensorflow.global_variables", "tensorflow.train.exponential_decay", "tensorflow.ConfigProto", "tensorflow.global_variables_initializer", "tensorflow.clip_by_global_norm", "tensorflow.nn.zero_fraction", "numpy.argmax", "tensorflow.Session", "tensorflow.train.AdamOptimizer", "numpy.array", "tensorflow.summary.scalar", "tensorflow.summary.merge" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [ "1.10" ] } ]
himanshukgp/Lyssandra
[ "994da67a8bf4a63561cf4d6c58c7e58722e192f7", "994da67a8bf4a63561cf4d6c58c7e58722e192f7" ]
[ "lyssa/feature_extract/pooling.py", "lyssa/feature_extract/preproc.py" ]
[ "import numpy as np\n\n\nclass sc_max_pooling:\n \"\"\"applies max pooling on the absolute values of the sparse codes in Z\"\"\"\n def __call__(self, Z):\n return np.max(np.abs(Z), axis=1)\n\n\nclass max_pooling:\n \"\"\"applies max pooling on the the sparse codes in Z\"\"\"\n def __call__(self, Z):\n return np.max(Z, axis=1)\n\n\nclass sum_pooling:\n \"\"\"applies sum pooling on the sparse codes in Z\"\"\"\n def __call__(self, Z):\n return np.sum(Z, axis=1)\n\n\nclass average_pooling:\n \"\"\"applies average pooling on the sparse codes in Z\"\"\"\n def __call__(self, Z):\n n_descriptors = Z.shape[1]\n return np.sum(Z, axis=1) / float(n_descriptors)\n", "from __future__ import division\n\nimport numpy as np\nfrom scipy.linalg import eigh\nfrom lyssa.utils.math import normalize, norm_cols\n\n\nclass l2_normalizer():\n def __call__(self, Z):\n if Z.ndim == 1:\n return normalize(Z)\n elif Z.ndim == 2:\n # e.g a 2D patch in a feature_map\n shape = Z.shape\n return normalize(Z.flatten()).reshape(shape)\n\n\ndef zca_transform(X, bias=.1):\n # each datapoint is a row of X\n n_samples, n_features = X.shape\n # subtracts the mean for each feature vector\n mean_ = np.mean(X, axis=0)\n X -= mean_\n eigs, eigv = eigh(np.dot(X.T, X) / n_samples + \\\n bias * np.identity(n_features))\n components = np.dot(eigv * np.sqrt(1.0 / eigs), eigv.T)\n components_ = components\n # Order the explained variance from greatest to least\n X_transformed = np.dot(X, components_)\n return X_transformed\n\n\ndef local_contrast_normalization(X):\n \"\"\"apply local constrast normalization to the datapoints in X\"\"\"\n X = X.T\n X = X.reshape((X.shape[0], -1))\n X -= X.mean(axis=1)[:, None]\n X_std = X.std(axis=1)\n # This trick is credited to N. Pinto\n min_divisor = (2 * X_std.min() + X_std.mean()) / 3\n X /= np.maximum(min_divisor, X_std).reshape(\n (X.shape[0], 1))\n return X.T\n\n\nclass preproc():\n def __init__(self, name):\n self.name = name\n\n def __call__(self, X):\n\n # assumes that each datapoint is a\n # column of the 2D matrix X\n\n if self.name == 'global_centering':\n # remove the mean of each feature vector\n X = X - X.mean(axis=1)[:, np.newaxis]\n elif self.name == 'global_standarization':\n # remove the mean and\n # divide by the variance of each feature vector\n X = X - X.mean(axis=1)[:, np.newaxis]\n X = X / X.std(axis=1)[:, np.newaxis]\n elif self.name == 'local_centering':\n # remove the mean of each datapoint\n X = X - X.mean(axis=0)[np.newaxis, :]\n elif self.name == 'contrast_normalization':\n # remove the mean of each datapoint\n # and divide by its norm\n X = X - X.mean(axis=0)[np.newaxis, :]\n X = norm_cols(X)\n elif self.name == 'normalization':\n return norm_cols(X)\n elif self.name == 'scaling':\n # scale each pixel in an image\n # to lie in[0,1]\n X = X / 255.\n elif self.name == 'whitening':\n X = zca_transform(X.T).T\n\n return X\n" ]
[ [ "numpy.max", "numpy.sum", "numpy.abs" ], [ "numpy.dot", "numpy.maximum", "numpy.sqrt", "numpy.identity", "numpy.mean" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
elyase/polyaxon
[ "1c19f059a010a6889e2b7ea340715b2bcfa382a0" ]
[ "polyaxon/hpsearch/search_managers/utils.py" ]
[ "import copy\nimport numpy as np\nimport uuid\n\nfrom functools import reduce\nfrom operator import mul\n\n\nclass Suggestion(object):\n \"\"\"A structure that defines an experiment hyperparam suggestion.\"\"\"\n\n def __init__(self, params):\n self.params = params\n\n def __eq__(self, other):\n if self.params.keys() != other.params.keys():\n return False\n\n for key, value in self.params.items():\n if value != other.params[key]:\n return False\n\n return True\n\n def __repr__(self):\n return ','.join([\n '{}:{}'.format(key, val)\n for (key, val) in sorted(self.params.items())])\n\n def __hash__(self):\n return hash(self.__repr__())\n\n def uuid(self):\n return uuid.uuid5(uuid.NAMESPACE_DNS, self.__repr__())\n\n\ndef get_random_generator(seed=None):\n return np.random.RandomState(seed) if seed else np.random\n\n\ndef get_random_suggestions(matrix, n_suggestions, suggestion_params=None, seed=None):\n if not n_suggestions:\n raise ValueError('This search algorithm requires `n_experiments`.')\n suggestions = []\n suggestion_params = suggestion_params or {}\n rand_generator = get_random_generator(seed=seed)\n # Validate number of suggestions and total space\n all_discrete = True\n for v in matrix.values():\n if v.is_continuous:\n all_discrete = False\n break\n if all_discrete:\n space = reduce(mul, [len(v.to_numpy()) for v in matrix.values()])\n n_suggestions = n_suggestions if n_suggestions <= space else space\n\n while n_suggestions > 0:\n params = copy.deepcopy(suggestion_params)\n params.update({k: v.sample(rand_generator=rand_generator) for k, v in matrix.items()})\n suggestion = Suggestion(params=params)\n if suggestion not in suggestions:\n suggestions.append(suggestion)\n n_suggestions -= 1\n return [suggestion.params for suggestion in suggestions]\n" ]
[ [ "numpy.random.RandomState" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
eyler94/ee674AirplaneSim
[ "3ba2c6e685c2688a7f372475a7cd1f55f583d10e", "3ba2c6e685c2688a7f372475a7cd1f55f583d10e", "3ba2c6e685c2688a7f372475a7cd1f55f583d10e" ]
[ "Submarine/chap7/mavsim_chap7.py", "Submarine/chap8/mavsim_chap8.py", "Airplane/chap10/autopilot.py" ]
[ "\"\"\"\nmavsim_python\n - Chapter 7 assignment for Beard & McLain, PUP, 2012\n - Last Update:\n 2/16/2019 - RWB\n\"\"\"\nimport sys\nsys.path.append('..')\nimport numpy as np\nimport parameters.simulation_parameters as SIM\n\nfrom chap2.mav_viewer import mav_viewer\nfrom chap3.data_viewer import data_viewer\nfrom chap4.wind_simulation import wind_simulation\nfrom chap6.autopilot import autopilot\nfrom chap7.mav_dynamics import mav_dynamics\nfrom tools.signals import signals\n\n# initialize the visualization\nmav_view = mav_viewer() # initialize the mav viewer\ndata_view = data_viewer() # initialize view of data plots\nVIDEO = False # True==write video, False==don't write video\nif VIDEO == True:\n from chap2.video_writer import video_writer\n video = video_writer(video_name=\"chap7_video.avi\",\n bounding_box=(0, 0, 1000, 1000),\n output_rate=SIM.ts_video)\n\n# initialize elements of the architecture\nwind = wind_simulation(SIM.ts_simulation)\nmav = mav_dynamics(SIM.ts_simulation)\nctrl = autopilot(SIM.ts_simulation)\n\n# autopilot commands\nfrom message_types.msg_autopilot import msg_autopilot\ncommands = msg_autopilot()\nVa_command = signals(dc_offset=25.0, amplitude=3.0, start_time=2.0, frequency = 0.01)\nh_command = signals(dc_offset=100.0, amplitude=10.0, start_time=0.0, frequency = 0.02)\nchi_command = signals(dc_offset=np.radians(180), amplitude=np.radians(45), start_time=5.0, frequency = 0.015)\n\n# initialize the simulation time\nsim_time = SIM.start_time\n\n# main simulation loop\nprint(\"Press Command-Q to exit...\")\nwhile sim_time < SIM.end_time:\n\n #-------autopilot commands-------------\n commands.airspeed_command = Va_command.square(sim_time)\n commands.course_command = chi_command.square(sim_time)\n commands.altitude_command = h_command.square(sim_time)\n\n #-------controller-------------\n estimated_state = mav.msg_true_state # uses true states in the control\n delta, commanded_state = ctrl.update(commands, estimated_state)\n\n #-------physical system-------------\n current_wind = wind.sensors() # get the new wind vector\n mav.update_state(delta, current_wind) # propagate the MAV dynamics\n mav.update_sensors() # update the sensors\n\n #-------update viewer-------------\n mav_view.update(mav.msg_true_state) # plot body of MAV\n data_view.update(mav.msg_true_state, # true states\n estimated_state, # estimated states\n commanded_state, # commanded states\n SIM.ts_simulation)\n if VIDEO == True: video.update(sim_time)\n\n #-------increment time-------------\n sim_time += SIM.ts_simulation\n\nif VIDEO == True: video.close()\n\n\n\n\n", "\"\"\"\nmavsim_python\n - Chapter 8 assignment for Beard & McLain, PUP, 2012\n - Last Update:\n 2/21/2019 - RWB\n\"\"\"\nimport sys\nsys.path.append('..')\nimport numpy as np\nimport parameters.simulation_parameters as SIM\n\nfrom chap2.mav_viewer import mav_viewer\nfrom chap3.data_viewer import data_viewer\nfrom chap4.wind_simulation import wind_simulation\nfrom chap6.autopilot import autopilot\nfrom chap7.mav_dynamics import mav_dynamics\nfrom chap8.observer import observer\nfrom tools.signals import signals\n\n# initialize the visualization\nmav_view = mav_viewer() # initialize the mav viewer\ndata_view = data_viewer() # initialize view of data plots\nVIDEO = False # True==write video, False==don't write video\nif VIDEO == True:\n from chap2.video_writer import video_writer\n video = video_writer(video_name=\"chap8_video.avi\",\n bounding_box=(0, 0, 1000, 1000),\n output_rate=SIM.ts_video)\n\n# initialize elements of the architecture\nwind = wind_simulation(SIM.ts_simulation)\nmav = mav_dynamics(SIM.ts_simulation)\nctrl = autopilot(SIM.ts_simulation)\nobsv = observer(SIM.ts_simulation)\n\n# autopilot commands\nfrom message_types.msg_autopilot import msg_autopilot\ncommands = msg_autopilot()\nVa_command = signals(dc_offset=25.0, amplitude=3.0, start_time=2.0, frequency = 0.01)\nh_command = signals(dc_offset=100.0, amplitude=15.0, start_time=0.0, frequency = 0.02)\nchi_command = signals(dc_offset=np.radians(180), amplitude=np.radians(45), start_time=5.0, frequency = 0.015)\n\n# initialize the simulation time\nsim_time = SIM.start_time\n\n# main simulation loop\nprint(\"Press Command-Q to exit...\")\n\nfrom message_types.msg_state import msg_state\ntemp = msg_state()\n\nwhile sim_time < SIM.end_time:\n\n #-------autopilot commands-------------\n commands.airspeed_command = Va_command.square(sim_time)\n commands.course_command = chi_command.square(sim_time)\n commands.altitude_command = h_command.square(sim_time)\n\n #-------controller-------------\n measurements = mav.sensors # get sensor measurements\n estimated_state = obsv.update(measurements) # estimate states from measurements\n\n temp = mav.msg_true_state\n temp.p = estimated_state.p\n temp.q = estimated_state.q\n temp.r = estimated_state.r\n temp.h = estimated_state.h\n temp.Va = estimated_state.Va\n temp.phi = estimated_state.phi\n temp.theta = estimated_state.theta\n temp.pn = estimated_state.pn\n temp.pe = estimated_state.pe\n temp.Vg = estimated_state.Vg\n temp.chi = estimated_state.chi\n temp.wn = estimated_state.wn\n temp.we = estimated_state.we\n temp.psi = estimated_state.psi\n\n # delta, commanded_state = ctrl.update(commands, mav.msg_true_state)\n # delta, commanded_state = ctrl.update(commands, estimated_state)\n delta, commanded_state = ctrl.update(commands, temp)\n\n #-------physical system-------------\n current_wind = wind.update() # get the new wind vector\n mav.update_state(delta, current_wind) # propagate the MAV dynamics\n mav.update_sensors()\n\n #-------update viewer-------------\n mav_view.update(mav.msg_true_state) # plot body of MAV\n data_view.update(mav.msg_true_state, # true states\n estimated_state, # estimated states\n commanded_state, # commanded states\n SIM.ts_simulation)\n if VIDEO == True: video.update(sim_time)\n\n #-------increment time-------------\n sim_time += SIM.ts_simulation\n\nif VIDEO == True: video.close()\n\n\n\n\n", "\"\"\"\nautopilot block for mavsim_python\n - Beard & McLain, PUP, 2012\n - Last Update:\n 2/6/2019 - RWB\n\"\"\"\nimport sys\nimport numpy as np\nsys.path.append('..')\nimport parameters.control_parameters as AP\nfrom chap6.pid_controlBrendon import pid_control#, pi_control, pd_control_with_rate\nfrom message_types.msg_state import msg_state\nfrom tools.tools import Euler2Quaternion, Quaternion2Euler\nfrom control import matlab\n\n\nclass autopilot:\n def __init__(self, ts_control):\n # instantiate lateral controllers\n self.roll_from_aileron = pid_control( #pd_control_with_rate(\n kp=AP.roll_kp,\n kd=AP.roll_kd,\n Ts=ts_control,\n limit=np.radians(45))\n self.course_from_roll = pid_control( #pi_control(\n kp=AP.course_kp,\n ki=AP.course_ki,\n Ts=ts_control,\n limit=np.radians(30))\n self.sideslip_from_rudder = pid_control( #pi_control(\n kp=AP.sideslip_kp,\n ki=AP.sideslip_ki,\n Ts=ts_control,\n limit=np.radians(45))\n self.yaw_damper = matlab.tf([0.5, 0.],[1.0, ],ts_control)\n #\n # num=np.array([[AP.yaw_damper_kp, 0]]),\n # den=np.array([[1, 1/AP.yaw_damper_tau_r]]),\n # Ts=ts_control)\n\n # instantiate lateral controllers\n self.pitch_from_elevator = pid_control( #pd_control_with_rate(\n kp=AP.pitch_kp,\n kd=AP.pitch_kd,\n limit=np.radians(45))\n self.altitude_from_pitch = pid_control( #pi_control(\n kp=AP.altitude_kp,\n ki=AP.altitude_ki,\n Ts=ts_control,\n limit=np.radians(30))\n self.airspeed_from_throttle = pid_control( #pi_control(\n kp=AP.airspeed_throttle_kp,\n ki=AP.airspeed_throttle_ki,\n Ts=ts_control,\n limit=1.5,\n throttle_flag=True)\n self.commanded_state = msg_state()\n\n def update(self, cmd, state):\n\n # lateral autopilot\n\n phi_c = cmd.phi_feedforward + self.course_from_roll.update(cmd.course_command,state.chi,reset_flag=True) #cmd.course_command\n # delta_a = -8.13462186e-09 # Trim state\n delta_a = self.roll_from_aileron.update_with_rate(phi_c, state.phi, state.p) # Controller based on chi command#\n # delta_r = -1.21428507e-08\n delta_r = self.sideslip_from_rudder.update(0,state.beta)\n\n # longitudinal autopilot\n h_c = cmd.altitude_command\n theta_c = np.pi/16\n theta_c = self.altitude_from_pitch.update(h_c, state.h)\n # delta_e = -1.24785989e-01\n delta_e = self.pitch_from_elevator.update_with_rate(theta_c, state.theta, state.q)\n # delta_t = 3.14346798e-01 # Trim state\n delta_t = self.airspeed_from_throttle.update(cmd.airspeed_command, state.Va)\n\n # construct output and commanded states\n delta = np.array([[delta_e], [delta_t], [delta_a], [delta_r]])\n self.commanded_state.h = cmd.altitude_command\n self.commanded_state.Va = cmd.airspeed_command\n self.commanded_state.phi = phi_c\n self.commanded_state.theta = theta_c\n self.commanded_state.chi = cmd.course_command\n return delta, self.commanded_state\n\n def saturate(self, input, low_limit, up_limit):\n if input <= low_limit:\n output = low_limit\n elif input >= up_limit:\n output = up_limit\n else:\n output = input\n return output\n" ]
[ [ "numpy.radians" ], [ "numpy.radians" ], [ "numpy.array", "numpy.radians" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
ljhOfGithub/pytorch
[ "c568f7b16f2a98d72ff5b7c6c6161b67b2c27514", "c568f7b16f2a98d72ff5b7c6c6161b67b2c27514", "e5282c3cb8bf6ad8c5161f9d0cc271edb9abed25" ]
[ "torch/nn/functional.py", "torch/nn/modules/container.py", "torch/autograd/forward_ad.py" ]
[ "r\"\"\"Functional interface\"\"\"\nfrom typing import Callable, List, Optional, Tuple\nimport math\nimport warnings\n\nimport torch\nfrom torch import _VF\nfrom torch._C import _infer_size, _add_docstr\nfrom torch._torch_docs import reproducibility_notes, tf32_notes\n# A workaround to support both TorchScript and MyPy:\nfrom typing import TYPE_CHECKING\nif TYPE_CHECKING:\n from torch.types import _dtype as DType\nelse:\n # The JIT doesn't understand Union, nor torch.dtype here\n DType = int\n\nfrom .._jit_internal import boolean_dispatch, _overload, BroadcastingList1, BroadcastingList2, BroadcastingList3\nfrom ..overrides import (\n has_torch_function, has_torch_function_unary, has_torch_function_variadic,\n handle_torch_function)\nfrom . import _reduction as _Reduction\nfrom . import grad # noqa: F401\nfrom .modules import utils\nfrom .modules.utils import _single, _pair, _triple, _list_with_default\n\n\nTensor = torch.Tensor\n\nconv1d = _add_docstr(\n torch.conv1d,\n r\"\"\"\nconv1d(input, weight, bias=None, stride=1, padding=0, dilation=1, groups=1) -> Tensor\n\nApplies a 1D convolution over an input signal composed of several input\nplanes.\n\n{tf32_note}\n\nSee :class:`~torch.nn.Conv1d` for details and output shape.\n\nNote:\n {cudnn_reproducibility_note}\n\"\"\".format(\n **reproducibility_notes, **tf32_notes\n )\n + r\"\"\"\n\nArgs:\n input: input tensor of shape :math:`(\\text{minibatch} , \\text{in\\_channels} , iW)`\n weight: filters of shape :math:`(\\text{out\\_channels} , \\frac{\\text{in\\_channels}}{\\text{groups}} , kW)`\n bias: optional bias of shape :math:`(\\text{out\\_channels})`. Default: ``None``\n stride: the stride of the convolving kernel. Can be a single number or\n a one-element tuple `(sW,)`. Default: 1\n padding: implicit paddings on both sides of the input. Can be a string {'valid', 'same'},\n single number or a one-element tuple `(padW,)`. Default: 0\n ``padding='valid'`` is the same as no padding. ``padding='same'`` pads\n the input so the output has the same shape as the input. However, this mode\n doesn't support any stride values other than 1.\n\n .. warning::\n For ``padding='same'``, if the ``weight`` is even-length and\n ``dilation`` is odd in any dimension, a full :func:`pad` operation\n may be needed internally. Lowering performance.\n dilation: the spacing between kernel elements. Can be a single number or\n a one-element tuple `(dW,)`. Default: 1\n groups: split input into groups, :math:`\\text{in\\_channels}` should be divisible by\n the number of groups. Default: 1\n\nExamples::\n\n >>> inputs = torch.randn(33, 16, 30)\n >>> filters = torch.randn(20, 16, 5)\n >>> F.conv1d(inputs, filters)\n\"\"\",\n)\n\nconv2d = _add_docstr(\n torch.conv2d,\n r\"\"\"\nconv2d(input, weight, bias=None, stride=1, padding=0, dilation=1, groups=1) -> Tensor\n\nApplies a 2D convolution over an input image composed of several input\nplanes.\n\n{tf32_note}\n\nSee :class:`~torch.nn.Conv2d` for details and output shape.\n\nNote:\n {cudnn_reproducibility_note}\n\"\"\".format(\n **reproducibility_notes, **tf32_notes\n )\n + r\"\"\"\n\nArgs:\n input: input tensor of shape :math:`(\\text{minibatch} , \\text{in\\_channels} , iH , iW)`\n weight: filters of shape :math:`(\\text{out\\_channels} , \\frac{\\text{in\\_channels}}{\\text{groups}} , kH , kW)`\n bias: optional bias tensor of shape :math:`(\\text{out\\_channels})`. Default: ``None``\n stride: the stride of the convolving kernel. Can be a single number or a\n tuple `(sH, sW)`. Default: 1\n padding: implicit paddings on both sides of the input. Can be a string {'valid', 'same'},\n single number or a tuple `(padH, padW)`. Default: 0\n ``padding='valid'`` is the same as no padding. ``padding='same'`` pads\n the input so the output has the same shape as the input. However, this mode\n doesn't support any stride values other than 1.\n\n .. warning::\n For ``padding='same'``, if the ``weight`` is even-length and\n ``dilation`` is odd in any dimension, a full :func:`pad` operation\n may be needed internally. Lowering performance.\n\n dilation: the spacing between kernel elements. Can be a single number or\n a tuple `(dH, dW)`. Default: 1\n groups: split input into groups, :math:`\\text{in\\_channels}` should be divisible by the\n number of groups. Default: 1\n\nExamples::\n\n >>> # With square kernels and equal stride\n >>> filters = torch.randn(8, 4, 3, 3)\n >>> inputs = torch.randn(1, 4, 5, 5)\n >>> F.conv2d(inputs, filters, padding=1)\n\"\"\",\n) # noqa: E501\n\nconv3d = _add_docstr(\n torch.conv3d,\n r\"\"\"\nconv3d(input, weight, bias=None, stride=1, padding=0, dilation=1, groups=1) -> Tensor\n\nApplies a 3D convolution over an input image composed of several input\nplanes.\n\n{tf32_note}\n\nSee :class:`~torch.nn.Conv3d` for details and output shape.\n\nNote:\n {cudnn_reproducibility_note}\n\"\"\".format(\n **reproducibility_notes, **tf32_notes\n )\n + r\"\"\"\n\nArgs:\n input: input tensor of shape :math:`(\\text{minibatch} , \\text{in\\_channels} , iT , iH , iW)`\n weight: filters of shape :math:`(\\text{out\\_channels} , \\frac{\\text{in\\_channels}}{\\text{groups}} , kT , kH , kW)`\n bias: optional bias tensor of shape :math:`(\\text{out\\_channels})`. Default: None\n stride: the stride of the convolving kernel. Can be a single number or a\n tuple `(sT, sH, sW)`. Default: 1\n padding: implicit paddings on both sides of the input. Can be a string {'valid', 'same'},\n single number or a tuple `(padT, padH, padW)`. Default: 0\n ``padding='valid'`` is the same as no padding. ``padding='same'`` pads\n the input so the output has the same shape as the input. However, this mode\n doesn't support any stride values other than 1.\n\n .. warning::\n For ``padding='same'``, if the ``weight`` is even-length and\n ``dilation`` is odd in any dimension, a full :func:`pad` operation\n may be needed internally. Lowering performance.\n\n dilation: the spacing between kernel elements. Can be a single number or\n a tuple `(dT, dH, dW)`. Default: 1\n groups: split input into groups, :math:`\\text{in\\_channels}` should be divisible by\n the number of groups. Default: 1\n\nExamples::\n\n >>> filters = torch.randn(33, 16, 3, 3, 3)\n >>> inputs = torch.randn(20, 16, 50, 10, 20)\n >>> F.conv3d(inputs, filters)\n\"\"\",\n) # noqa: E501\n\nconv_transpose1d = _add_docstr(\n torch.conv_transpose1d,\n r\"\"\"\nconv_transpose1d(input, weight, bias=None, stride=1, padding=0, output_padding=0, groups=1, dilation=1) -> Tensor\n\nApplies a 1D transposed convolution operator over an input signal\ncomposed of several input planes, sometimes also called \"deconvolution\".\n\n{tf32_note}\n\nSee :class:`~torch.nn.ConvTranspose1d` for details and output shape.\n\nNote:\n {cudnn_reproducibility_note}\n\"\"\".format(\n **reproducibility_notes, **tf32_notes\n )\n + r\"\"\"\n\nArgs:\n input: input tensor of shape :math:`(\\text{minibatch} , \\text{in\\_channels} , iW)`\n weight: filters of shape :math:`(\\text{in\\_channels} , \\frac{\\text{out\\_channels}}{\\text{groups}} , kW)`\n bias: optional bias of shape :math:`(\\text{out\\_channels})`. Default: None\n stride: the stride of the convolving kernel. Can be a single number or a\n tuple ``(sW,)``. Default: 1\n padding: ``dilation * (kernel_size - 1) - padding`` zero-padding will be added to both\n sides of each dimension in the input. Can be a single number or a tuple\n ``(padW,)``. Default: 0\n output_padding: additional size added to one side of each dimension in the\n output shape. Can be a single number or a tuple ``(out_padW)``. Default: 0\n groups: split input into groups, :math:`\\text{in\\_channels}` should be divisible by the\n number of groups. Default: 1\n dilation: the spacing between kernel elements. Can be a single number or\n a tuple ``(dW,)``. Default: 1\n\nExamples::\n\n >>> inputs = torch.randn(20, 16, 50)\n >>> weights = torch.randn(16, 33, 5)\n >>> F.conv_transpose1d(inputs, weights)\n\"\"\",\n)\n\nconv_transpose2d = _add_docstr(\n torch.conv_transpose2d,\n r\"\"\"\nconv_transpose2d(input, weight, bias=None, stride=1, padding=0, output_padding=0, groups=1, dilation=1) -> Tensor\n\nApplies a 2D transposed convolution operator over an input image\ncomposed of several input planes, sometimes also called \"deconvolution\".\n\n{tf32_note}\n\nSee :class:`~torch.nn.ConvTranspose2d` for details and output shape.\n\nNote:\n {cudnn_reproducibility_note}\n\"\"\".format(\n **reproducibility_notes, **tf32_notes\n )\n + r\"\"\"\n\nArgs:\n input: input tensor of shape :math:`(\\text{minibatch} , \\text{in\\_channels} , iH , iW)`\n weight: filters of shape :math:`(\\text{in\\_channels} , \\frac{\\text{out\\_channels}}{\\text{groups}} , kH , kW)`\n bias: optional bias of shape :math:`(\\text{out\\_channels})`. Default: None\n stride: the stride of the convolving kernel. Can be a single number or a\n tuple ``(sH, sW)``. Default: 1\n padding: ``dilation * (kernel_size - 1) - padding`` zero-padding will be added to both\n sides of each dimension in the input. Can be a single number or a tuple\n ``(padH, padW)``. Default: 0\n output_padding: additional size added to one side of each dimension in the\n output shape. Can be a single number or a tuple ``(out_padH, out_padW)``.\n Default: 0\n groups: split input into groups, :math:`\\text{in\\_channels}` should be divisible by the\n number of groups. Default: 1\n dilation: the spacing between kernel elements. Can be a single number or\n a tuple ``(dH, dW)``. Default: 1\n\nExamples::\n\n >>> # With square kernels and equal stride\n >>> inputs = torch.randn(1, 4, 5, 5)\n >>> weights = torch.randn(4, 8, 3, 3)\n >>> F.conv_transpose2d(inputs, weights, padding=1)\n\"\"\",\n) # noqa: E501\n\nconv_transpose3d = _add_docstr(\n torch.conv_transpose3d,\n r\"\"\"\nconv_transpose3d(input, weight, bias=None, stride=1, padding=0, output_padding=0, groups=1, dilation=1) -> Tensor\n\nApplies a 3D transposed convolution operator over an input image\ncomposed of several input planes, sometimes also called \"deconvolution\"\n\n{tf32_note}\n\nSee :class:`~torch.nn.ConvTranspose3d` for details and output shape.\n\nNote:\n {cudnn_reproducibility_note}\n\"\"\".format(\n **reproducibility_notes, **tf32_notes\n )\n + r\"\"\"\n\nArgs:\n input: input tensor of shape :math:`(\\text{minibatch} , \\text{in\\_channels} , iT , iH , iW)`\n weight: filters of shape :math:`(\\text{in\\_channels} , \\frac{\\text{out\\_channels}}{\\text{groups}} , kT , kH , kW)`\n bias: optional bias of shape :math:`(\\text{out\\_channels})`. Default: None\n stride: the stride of the convolving kernel. Can be a single number or a\n tuple ``(sT, sH, sW)``. Default: 1\n padding: ``dilation * (kernel_size - 1) - padding`` zero-padding will be added to both\n sides of each dimension in the input. Can be a single number or a tuple\n ``(padT, padH, padW)``. Default: 0\n output_padding: additional size added to one side of each dimension in the\n output shape. Can be a single number or a tuple\n ``(out_padT, out_padH, out_padW)``. Default: 0\n groups: split input into groups, :math:`\\text{in\\_channels}` should be divisible by the\n number of groups. Default: 1\n dilation: the spacing between kernel elements. Can be a single number or\n a tuple `(dT, dH, dW)`. Default: 1\n\nExamples::\n\n >>> inputs = torch.randn(20, 16, 50, 10, 20)\n >>> weights = torch.randn(16, 33, 3, 3, 3)\n >>> F.conv_transpose3d(inputs, weights)\n\"\"\",\n) # noqa: E501\n\nconv_tbc = _add_docstr(\n torch.conv_tbc,\n r\"\"\"\nApplies a 1-dimensional sequence convolution over an input sequence.\nInput and output dimensions are (Time, Batch, Channels) - hence TBC.\n\nArgs:\n input: input tensor of shape :math:`(\\text{sequence length} \\times batch \\times \\text{in\\_channels})`\n weight: filter of shape (:math:`\\text{kernel width} \\times \\text{in\\_channels} \\times \\text{out\\_channels}`)\n bias: bias of shape (:math:`\\text{out\\_channels}`)\n pad: number of timesteps to pad. Default: 0\n\"\"\",\n)\n\n\n# Pooling\navg_pool1d = _add_docstr(\n torch.avg_pool1d,\n r\"\"\"\navg_pool1d(input, kernel_size, stride=None, padding=0, ceil_mode=False, count_include_pad=True) -> Tensor\n\nApplies a 1D average pooling over an input signal composed of several\ninput planes.\n\nSee :class:`~torch.nn.AvgPool1d` for details and output shape.\n\nArgs:\n input: input tensor of shape :math:`(\\text{minibatch} , \\text{in\\_channels} , iW)`\n kernel_size: the size of the window. Can be a single number or a\n tuple `(kW,)`\n stride: the stride of the window. Can be a single number or a tuple\n `(sW,)`. Default: :attr:`kernel_size`\n padding: implicit zero paddings on both sides of the input. Can be a\n single number or a tuple `(padW,)`. Default: 0\n ceil_mode: when True, will use `ceil` instead of `floor` to compute the\n output shape. Default: ``False``\n count_include_pad: when True, will include the zero-padding in the\n averaging calculation. Default: ``True``\n\nExamples::\n\n >>> # pool of square window of size=3, stride=2\n >>> input = torch.tensor([[[1, 2, 3, 4, 5, 6, 7]]], dtype=torch.float32)\n >>> F.avg_pool1d(input, kernel_size=3, stride=2)\n tensor([[[ 2., 4., 6.]]])\n\n\"\"\",\n)\n\n\navg_pool2d = _add_docstr(\n torch._C._nn.avg_pool2d,\n r\"\"\"\navg_pool2d(input, kernel_size, stride=None, padding=0, ceil_mode=False, count_include_pad=True, divisor_override=None) -> Tensor\n\nApplies 2D average-pooling operation in :math:`kH \\times kW` regions by step size\n:math:`sH \\times sW` steps. The number of output features is equal to the number of\ninput planes.\n\nSee :class:`~torch.nn.AvgPool2d` for details and output shape.\n\nArgs:\n input: input tensor :math:`(\\text{minibatch} , \\text{in\\_channels} , iH , iW)`\n kernel_size: size of the pooling region. Can be a single number or a\n tuple `(kH, kW)`\n stride: stride of the pooling operation. Can be a single number or a\n tuple `(sH, sW)`. Default: :attr:`kernel_size`\n padding: implicit zero paddings on both sides of the input. Can be a\n single number or a tuple `(padH, padW)`. Default: 0\n ceil_mode: when True, will use `ceil` instead of `floor` in the formula\n to compute the output shape. Default: ``False``\n count_include_pad: when True, will include the zero-padding in the\n averaging calculation. Default: ``True``\n divisor_override: if specified, it will be used as divisor, otherwise\n size of the pooling region will be used. Default: None\n\"\"\",\n)\n\navg_pool3d = _add_docstr(\n torch._C._nn.avg_pool3d,\n r\"\"\"\navg_pool3d(input, kernel_size, stride=None, padding=0, ceil_mode=False, count_include_pad=True, divisor_override=None) -> Tensor\n\nApplies 3D average-pooling operation in :math:`kT \\times kH \\times kW` regions by step\nsize :math:`sT \\times sH \\times sW` steps. The number of output features is equal to\n:math:`\\lfloor\\frac{\\text{input planes}}{sT}\\rfloor`.\n\nSee :class:`~torch.nn.AvgPool3d` for details and output shape.\n\nArgs:\n input: input tensor :math:`(\\text{minibatch} , \\text{in\\_channels} , iT \\times iH , iW)`\n kernel_size: size of the pooling region. Can be a single number or a\n tuple `(kT, kH, kW)`\n stride: stride of the pooling operation. Can be a single number or a\n tuple `(sT, sH, sW)`. Default: :attr:`kernel_size`\n padding: implicit zero paddings on both sides of the input. Can be a\n single number or a tuple `(padT, padH, padW)`, Default: 0\n ceil_mode: when True, will use `ceil` instead of `floor` in the formula\n to compute the output shape\n count_include_pad: when True, will include the zero-padding in the\n averaging calculation\n divisor_override: if specified, it will be used as divisor, otherwise\n size of the pooling region will be used. Default: None\n\"\"\",\n)\n\n\ndef fractional_max_pool2d_with_indices(\n input: Tensor, kernel_size: BroadcastingList2[int],\n output_size: Optional[BroadcastingList2[int]] = None,\n output_ratio: Optional[BroadcastingList2[float]] = None,\n return_indices: bool = False,\n _random_samples: Optional[Tensor] = None\n) -> Tuple[Tensor, Tensor]:\n r\"\"\"Applies 2D fractional max pooling over an input signal composed of several input planes.\n\n Fractional MaxPooling is described in detail in the paper `Fractional MaxPooling`_ by Ben Graham\n\n The max-pooling operation is applied in :math:`kH \\times kW` regions by a stochastic\n step size determined by the target output size.\n The number of output features is equal to the number of input planes.\n\n Args:\n kernel_size: the size of the window to take a max over.\n Can be a single number :math:`k` (for a square kernel of :math:`k \\times k`)\n or a tuple `(kH, kW)`\n output_size: the target output size of the image of the form :math:`oH \\times oW`.\n Can be a tuple `(oH, oW)` or a single number :math:`oH` for a square image :math:`oH \\times oH`\n output_ratio: If one wants to have an output size as a ratio of the input size, this option can be given.\n This has to be a number or tuple in the range (0, 1)\n return_indices: if ``True``, will return the indices along with the outputs.\n Useful to pass to :func:`~torch.nn.functional.max_unpool2d`.\n\n Examples::\n >>> input = torch.randn(20, 16, 50, 32)\n >>> # pool of square window of size=3, and target output size 13x12\n >>> F.fractional_max_pool2d(input, 3, output_size=(13, 12))\n >>> # pool of square window and target output size being half of input image size\n >>> F.fractional_max_pool2d(input, 3, output_ratio=(0.5, 0.5))\n\n .. _Fractional MaxPooling:\n http://arxiv.org/abs/1412.6071\n \"\"\"\n if has_torch_function_variadic(input, _random_samples):\n return handle_torch_function(\n fractional_max_pool2d_with_indices,\n (input, _random_samples),\n input,\n kernel_size,\n output_size=output_size,\n output_ratio=output_ratio,\n return_indices=return_indices,\n _random_samples=_random_samples,\n )\n if output_size is None and output_ratio is None:\n raise ValueError(\"fractional_max_pool2d requires specifying either \" \"an output_size or an output_ratio\")\n if output_size is None:\n assert output_ratio is not None\n _output_ratio = _pair(output_ratio)\n output_size = [int(input.size(-2) * _output_ratio[0]), int(input.size(-1) * _output_ratio[1])]\n\n if _random_samples is None:\n n_batch = 1 if input.dim() == 3 else input.size(0)\n _random_samples = torch.rand(n_batch, input.size(-3), 2, dtype=input.dtype, device=input.device)\n return torch._C._nn.fractional_max_pool2d(input, kernel_size, output_size, _random_samples)\n\n\ndef _fractional_max_pool2d(\n input: Tensor, kernel_size: BroadcastingList2[int],\n output_size: Optional[BroadcastingList2[int]] = None,\n output_ratio: Optional[BroadcastingList2[float]] = None,\n return_indices: bool = False,\n _random_samples: Optional[Tensor] = None\n) -> Tensor:\n if has_torch_function_variadic(input, _random_samples):\n return handle_torch_function(\n fractional_max_pool2d,\n (input, _random_samples),\n input,\n kernel_size,\n output_size=output_size,\n output_ratio=output_ratio,\n return_indices=return_indices,\n _random_samples=_random_samples,\n )\n return fractional_max_pool2d_with_indices(\n input, kernel_size, output_size, output_ratio, return_indices, _random_samples\n )[0]\n\n\nfractional_max_pool2d = boolean_dispatch(\n arg_name=\"return_indices\",\n arg_index=4,\n default=False,\n if_true=fractional_max_pool2d_with_indices,\n if_false=_fractional_max_pool2d,\n module_name=__name__,\n func_name=\"fractional_max_pool2d\",\n)\n\n\ndef fractional_max_pool3d_with_indices(\n input: Tensor, kernel_size: BroadcastingList3[int],\n output_size: Optional[BroadcastingList3[int]] = None,\n output_ratio: Optional[BroadcastingList3[float]] = None,\n return_indices: bool = False,\n _random_samples: Optional[Tensor] = None\n) -> Tuple[Tensor, Tensor]:\n r\"\"\"Applies 3D fractional max pooling over an input signal composed of several input planes.\n\n Fractional MaxPooling is described in detail in the paper `Fractional MaxPooling`_ by Ben Graham\n\n The max-pooling operation is applied in :math:`kT \\times kH \\times kW` regions by a stochastic\n step size determined by the target output size.\n The number of output features is equal to the number of input planes.\n\n Args:\n kernel_size: the size of the window to take a max over.\n Can be a single number :math:`k` (for a square kernel of :math:`k \\times k \\times k`)\n or a tuple `(kT, kH, kW)`\n output_size: the target output size of the form :math:`oT \\times oH \\times oW`.\n Can be a tuple `(oT, oH, oW)` or a single number :math:`oH` for a cubic output\n :math:`oH \\times oH \\times oH`\n output_ratio: If one wants to have an output size as a ratio of the input size, this option can be given.\n This has to be a number or tuple in the range (0, 1)\n return_indices: if ``True``, will return the indices along with the outputs.\n Useful to pass to :func:`~torch.nn.functional.max_unpool3d`.\n\n Shape:\n - Input: :math:`(N, C, T_{in}, H_{in}, W_{in})` or :math:`(C, T_{in}, H_{in}, W_{in})`.\n - Output: :math:`(N, C, T_{out}, H_{out}, W_{out})` or :math:`(C, T_{out}, H_{out}, W_{out})`, where\n :math:`(T_{out}, H_{out}, W_{out})=\\text{output\\_size}` or\n :math:`(T_{out}, H_{out}, W_{out})=\\text{output\\_ratio} \\times (T_{in}, H_{in}, W_{in})`\n\n Examples::\n >>> input = torch.randn(20, 16, 50, 32, 16)\n >>> # pool of cubic window of size=3, and target output size 13x12x11\n >>> F.fractional_max_pool3d(input, 3, output_size=(13, 12, 11))\n >>> # pool of cubic window and target output size being half of input size\n >>> F.fractional_max_pool3d(input, 3, output_ratio=(0.5, 0.5, 0.5))\n\n .. _Fractional MaxPooling:\n http://arxiv.org/abs/1412.6071\n \"\"\"\n if has_torch_function_variadic(input, _random_samples):\n return handle_torch_function(\n fractional_max_pool3d_with_indices,\n (input, _random_samples),\n input,\n kernel_size,\n output_size=output_size,\n output_ratio=output_ratio,\n return_indices=return_indices,\n _random_samples=_random_samples,\n )\n if output_size is None and output_ratio is None:\n raise ValueError(\"fractional_max_pool3d requires specifying either \" \"an output_size or an output_ratio\")\n if output_size is None:\n assert output_ratio is not None\n _output_ratio = _triple(output_ratio)\n output_size = [\n int(input.size(-3) * _output_ratio[0]),\n int(input.size(-2) * _output_ratio[1]),\n int(input.size(-1) * _output_ratio[2]),\n ]\n\n if _random_samples is None:\n n_batch = 1 if input.dim() == 4 else input.size(0)\n _random_samples = torch.rand(n_batch, input.size(-4), 3, dtype=input.dtype, device=input.device)\n return torch._C._nn.fractional_max_pool3d(input, kernel_size, output_size, _random_samples)\n\n\ndef _fractional_max_pool3d(\n input: Tensor, kernel_size: BroadcastingList3[int],\n output_size: Optional[BroadcastingList3[int]] = None,\n output_ratio: Optional[BroadcastingList3[float]] = None,\n return_indices: bool = False,\n _random_samples: Optional[Tensor] = None\n) -> Tensor:\n if has_torch_function_variadic(input, _random_samples):\n return handle_torch_function(\n fractional_max_pool3d,\n (input, _random_samples),\n input,\n kernel_size,\n output_size=output_size,\n output_ratio=output_ratio,\n return_indices=return_indices,\n _random_samples=_random_samples,\n )\n return fractional_max_pool3d_with_indices(\n input, kernel_size, output_size, output_ratio, return_indices, _random_samples\n )[0]\n\n\nfractional_max_pool3d = boolean_dispatch(\n arg_name=\"return_indices\",\n arg_index=4,\n default=False,\n if_true=fractional_max_pool3d_with_indices,\n if_false=_fractional_max_pool3d,\n module_name=__name__,\n func_name=\"fractional_max_pool3d\",\n)\n\n\ndef max_pool1d_with_indices(\n input: Tensor, kernel_size: BroadcastingList1[int],\n stride: Optional[BroadcastingList1[int]] = None,\n padding: BroadcastingList1[int] = 0,\n dilation: BroadcastingList1[int] = 1,\n ceil_mode: bool = False,\n return_indices: bool = False\n) -> Tuple[Tensor, Tensor]:\n r\"\"\"\n max_pool1d(input, kernel_size, stride=None, padding=0, dilation=1, ceil_mode=False, return_indices=False)\n\n Applies a 1D max pooling over an input signal composed of several input\n planes.\n\n .. note::\n The order of :attr:`ceil_mode` and :attr:`return_indices` is different from\n what seen in :class:`~torch.nn.MaxPool1d`, and will change in a future release.\n\n See :class:`~torch.nn.MaxPool1d` for details.\n\n Args:\n input: input tensor of shape :math:`(\\text{minibatch} , \\text{in\\_channels} , iW)`, minibatch dim optional.\n kernel_size: the size of the window. Can be a single number or a\n tuple `(kW,)`\n stride: the stride of the window. Can be a single number or a tuple\n `(sW,)`. Default: :attr:`kernel_size`\n padding: Implicit negative infinity padding to be added on both sides, must be >= 0 and <= kernel_size / 2.\n dilation: The stride between elements within a sliding window, must be > 0.\n ceil_mode: If ``True``, will use `ceil` instead of `floor` to compute the output shape. This\n ensures that every element in the input tensor is covered by a sliding window.\n return_indices: If ``True``, will return the argmax along with the max values.\n Useful for :class:`torch.nn.functional.max_unpool1d` later\n \"\"\"\n if has_torch_function_unary(input):\n return handle_torch_function(\n max_pool1d_with_indices,\n (input,),\n input,\n kernel_size,\n stride=stride,\n padding=padding,\n dilation=dilation,\n ceil_mode=ceil_mode,\n return_indices=return_indices,\n )\n if stride is None:\n stride = torch.jit.annotate(List[int], [])\n return torch.max_pool1d_with_indices(input, kernel_size, stride, padding, dilation, ceil_mode)\n\n\ndef _max_pool1d(\n input: Tensor, kernel_size: BroadcastingList1[int],\n stride: Optional[BroadcastingList1[int]] = None,\n padding: BroadcastingList1[int] = 0,\n dilation: BroadcastingList1[int] = 1,\n ceil_mode: bool = False,\n return_indices: bool = False\n) -> Tensor:\n if has_torch_function_unary(input):\n return handle_torch_function(\n max_pool1d,\n (input,),\n input,\n kernel_size,\n stride=stride,\n padding=padding,\n dilation=dilation,\n ceil_mode=ceil_mode,\n return_indices=return_indices,\n )\n if stride is None:\n stride = torch.jit.annotate(List[int], [])\n return torch.max_pool1d(input, kernel_size, stride, padding, dilation, ceil_mode)\n\n\nmax_pool1d = boolean_dispatch(\n arg_name=\"return_indices\",\n arg_index=6,\n default=False,\n if_true=max_pool1d_with_indices,\n if_false=_max_pool1d,\n module_name=__name__,\n func_name=\"max_pool1d\",\n)\n\n\ndef max_pool2d_with_indices(\n input: Tensor, kernel_size: BroadcastingList2[int],\n stride: Optional[BroadcastingList2[int]] = None,\n padding: BroadcastingList2[int] = 0,\n dilation: BroadcastingList2[int] = 1,\n ceil_mode: bool = False,\n return_indices: bool = False\n) -> Tuple[Tensor, Tensor]:\n r\"\"\"\n max_pool2d(input, kernel_size, stride=None, padding=0, dilation=1, ceil_mode=False, return_indices=False)\n\n Applies a 2D max pooling over an input signal composed of several input\n planes.\n\n .. note::\n The order of :attr:`ceil_mode` and :attr:`return_indices` is different from\n what seen in :class:`~torch.nn.MaxPool2d`, and will change in a future release.\n\n See :class:`~torch.nn.MaxPool2d` for details.\n\n Args:\n input: input tensor :math:`(\\text{minibatch} , \\text{in\\_channels} , iH , iW)`, minibatch dim optional.\n kernel_size: size of the pooling region. Can be a single number or a\n tuple `(kH, kW)`\n stride: stride of the pooling operation. Can be a single number or a\n tuple `(sH, sW)`. Default: :attr:`kernel_size`\n padding: Implicit negative infinity padding to be added on both sides, must be >= 0 and <= kernel_size / 2.\n dilation: The stride between elements within a sliding window, must be > 0.\n ceil_mode: If ``True``, will use `ceil` instead of `floor` to compute the output shape. This\n ensures that every element in the input tensor is covered by a sliding window.\n return_indices: If ``True``, will return the argmax along with the max values.\n Useful for :class:`torch.nn.functional.max_unpool2d` later\n \"\"\"\n if has_torch_function_unary(input):\n return handle_torch_function(\n max_pool2d_with_indices,\n (input,),\n input,\n kernel_size,\n stride=stride,\n padding=padding,\n dilation=dilation,\n ceil_mode=ceil_mode,\n return_indices=return_indices,\n )\n if stride is None:\n stride = torch.jit.annotate(List[int], [])\n return torch._C._nn.max_pool2d_with_indices(input, kernel_size, stride, padding, dilation, ceil_mode)\n\n\ndef _max_pool2d(\n input: Tensor, kernel_size: BroadcastingList2[int],\n stride: Optional[BroadcastingList2[int]] = None,\n padding: BroadcastingList2[int] = 0,\n dilation: BroadcastingList2[int] = 1,\n ceil_mode: bool = False,\n return_indices: bool = False\n) -> Tensor:\n if has_torch_function_unary(input):\n return handle_torch_function(\n max_pool2d,\n (input,),\n input,\n kernel_size,\n stride=stride,\n padding=padding,\n dilation=dilation,\n ceil_mode=ceil_mode,\n return_indices=return_indices,\n )\n if stride is None:\n stride = torch.jit.annotate(List[int], [])\n return torch.max_pool2d(input, kernel_size, stride, padding, dilation, ceil_mode)\n\n\nmax_pool2d = boolean_dispatch(\n arg_name=\"return_indices\",\n arg_index=6,\n default=False,\n if_true=max_pool2d_with_indices,\n if_false=_max_pool2d,\n module_name=__name__,\n func_name=\"max_pool2d\",\n)\n\n\ndef max_pool3d_with_indices(\n input: Tensor, kernel_size: BroadcastingList3[int],\n stride: Optional[BroadcastingList3[int]] = None,\n padding: BroadcastingList3[int] = 0,\n dilation: BroadcastingList3[int] = 1,\n ceil_mode: bool = False,\n return_indices: bool = False\n) -> Tuple[Tensor, Tensor]:\n r\"\"\"\n max_pool3d(input, kernel_size, stride=None, padding=0, dilation=1, ceil_mode=False, return_indices=False)\n\n Applies a 3D max pooling over an input signal composed of several input\n planes.\n\n .. note::\n The order of :attr:`ceil_mode` and :attr:`return_indices` is different from\n what seen in :class:`~torch.nn.MaxPool3d`, and will change in a future release.\n\n See :class:`~torch.nn.MaxPool3d` for details.\n\n Args:\n input: input tensor :math:`(\\text{minibatch} , \\text{in\\_channels} , iD, iH , iW)`, minibatch dim optional.\n kernel_size: size of the pooling region. Can be a single number or a\n tuple `(kT, kH, kW)`\n stride: stride of the pooling operation. Can be a single number or a\n tuple `(sT, sH, sW)`. Default: :attr:`kernel_size`\n padding: Implicit negative infinity padding to be added on both sides, must be >= 0 and <= kernel_size / 2.\n dilation: The stride between elements within a sliding window, must be > 0.\n ceil_mode: If ``True``, will use `ceil` instead of `floor` to compute the output shape. This\n ensures that every element in the input tensor is covered by a sliding window.\n return_indices: If ``True``, will return the argmax along with the max values.\n Useful for :class:`torch.nn.functional.max_unpool3d` later\n \"\"\"\n if has_torch_function_unary(input):\n return handle_torch_function(\n max_pool3d_with_indices,\n (input,),\n input,\n kernel_size,\n stride=stride,\n padding=padding,\n dilation=dilation,\n ceil_mode=ceil_mode,\n return_indices=return_indices,\n )\n if stride is None:\n stride = torch.jit.annotate(List[int], [])\n return torch._C._nn.max_pool3d_with_indices(input, kernel_size, stride, padding, dilation, ceil_mode)\n\n\ndef _max_pool3d(\n input: Tensor, kernel_size: BroadcastingList3[int],\n stride: Optional[BroadcastingList3[int]] = None,\n padding: BroadcastingList3[int] = 0,\n dilation: BroadcastingList3[int] = 1,\n ceil_mode: bool = False,\n return_indices: bool = False\n) -> Tensor:\n if has_torch_function_unary(input):\n return handle_torch_function(\n max_pool3d,\n (input,),\n input,\n kernel_size,\n stride=stride,\n padding=padding,\n dilation=dilation,\n ceil_mode=ceil_mode,\n return_indices=return_indices,\n )\n if stride is None:\n stride = torch.jit.annotate(List[int], [])\n return torch.max_pool3d(input, kernel_size, stride, padding, dilation, ceil_mode)\n\n\nmax_pool3d = boolean_dispatch(\n arg_name=\"return_indices\",\n arg_index=6,\n default=False,\n if_true=max_pool3d_with_indices,\n if_false=_max_pool3d,\n module_name=__name__,\n func_name=\"max_pool3d\",\n)\n\n\ndef _unpool_output_size(\n input: Tensor, kernel_size: List[int], stride: List[int], padding: List[int], output_size: Optional[List[int]]\n) -> List[int]:\n input_size = input.size()\n default_size = torch.jit.annotate(List[int], [])\n for d in range(len(kernel_size)):\n default_size.append((input_size[-len(kernel_size) + d] - 1) * stride[d] + kernel_size[d] - 2 * padding[d])\n if output_size is None:\n ret = default_size\n else:\n if len(output_size) == len(kernel_size) + 2:\n output_size = output_size[2:]\n if len(output_size) != len(kernel_size):\n raise ValueError(\n \"output_size should be a sequence containing \"\n \"{} or {} elements, but it has a length of '{}'\".format(\n len(kernel_size), len(kernel_size) + 2, len(output_size)\n )\n )\n for d in range(len(kernel_size)):\n min_size = default_size[d] - stride[d]\n max_size = default_size[d] + stride[d]\n if not (min_size < output_size[d] < max_size):\n raise ValueError(\n 'invalid output_size \"{}\" (dim {} must be between {} and {})'.format(\n output_size, d, min_size, max_size\n )\n )\n\n ret = output_size\n return ret\n\n\ndef max_unpool1d(\n input: Tensor, indices: Tensor,\n kernel_size: BroadcastingList1[int],\n stride: Optional[BroadcastingList1[int]] = None,\n padding: BroadcastingList1[int] = 0,\n output_size: Optional[BroadcastingList1[int]] = None\n) -> Tensor:\n r\"\"\"Computes a partial inverse of :class:`MaxPool1d`.\n\n See :class:`~torch.nn.MaxUnpool1d` for details.\n \"\"\"\n if has_torch_function_unary(input):\n return handle_torch_function(\n max_unpool1d,\n (input,),\n input,\n indices,\n kernel_size,\n stride=stride,\n padding=padding,\n output_size=output_size,\n )\n kernel_size = _single(kernel_size)\n if stride is not None:\n _stride = _single(stride)\n else:\n _stride = kernel_size\n padding = _single(padding)\n output_size = _unpool_output_size(input, kernel_size, _stride, padding, output_size)\n if isinstance(output_size, list):\n output_size = output_size + [1]\n else:\n output_size = output_size + (1,)\n return torch._C._nn.max_unpool2d(input.unsqueeze(-1), indices.unsqueeze(-1), output_size).squeeze(-1)\n\n\ndef max_unpool2d(\n input: Tensor, indices: Tensor,\n kernel_size: BroadcastingList2[int],\n stride: Optional[BroadcastingList2[int]] = None,\n padding: BroadcastingList2[int] = 0,\n output_size: Optional[BroadcastingList2[int]] = None\n) -> Tensor:\n r\"\"\"Computes a partial inverse of :class:`MaxPool2d`.\n\n See :class:`~torch.nn.MaxUnpool2d` for details.\n \"\"\"\n if has_torch_function_unary(input):\n return handle_torch_function(\n max_unpool2d,\n (input,),\n input,\n indices,\n kernel_size,\n stride=stride,\n padding=padding,\n output_size=output_size,\n )\n kernel_size = _pair(kernel_size)\n if stride is not None:\n _stride = _pair(stride)\n else:\n _stride = kernel_size\n padding = _pair(padding)\n output_size = _unpool_output_size(input, kernel_size, _stride, padding, output_size)\n return torch._C._nn.max_unpool2d(input, indices, output_size)\n\n\ndef max_unpool3d(\n input: Tensor, indices: Tensor,\n kernel_size: BroadcastingList3[int],\n stride: Optional[BroadcastingList3[int]] = None,\n padding: BroadcastingList3[int] = 0,\n output_size: Optional[BroadcastingList3[int]] = None\n) -> Tensor:\n r\"\"\"Computes a partial inverse of :class:`MaxPool3d`.\n\n See :class:`~torch.nn.MaxUnpool3d` for details.\n \"\"\"\n if has_torch_function_unary(input):\n return handle_torch_function(\n max_unpool3d,\n (input,),\n input,\n indices,\n kernel_size,\n stride=stride,\n padding=padding,\n output_size=output_size,\n )\n kernel_size = _triple(kernel_size)\n if stride is not None:\n _stride = _triple(stride)\n else:\n _stride = kernel_size\n padding = _triple(padding)\n output_size = _unpool_output_size(input, kernel_size, _stride, padding, output_size)\n return torch._C._nn.max_unpool3d(input, indices, output_size, _stride, padding)\n\n\ndef lp_pool2d(\n input: Tensor, norm_type: float,\n kernel_size: int,\n stride: Optional[BroadcastingList2[int]] = None,\n ceil_mode: bool = False\n) -> Tensor:\n r\"\"\"Applies a 2D power-average pooling over an input signal composed of\n several input planes. If the sum of all inputs to the power of `p` is\n zero, the gradient is set to zero as well.\n\n See :class:`~torch.nn.LPPool2d` for details.\n \"\"\"\n if has_torch_function_unary(input):\n return handle_torch_function(\n lp_pool2d, (input,), input, norm_type, kernel_size, stride=stride, ceil_mode=ceil_mode\n )\n kw, kh = utils._pair(kernel_size)\n if stride is not None:\n out = avg_pool2d(input.pow(norm_type), kernel_size, stride, 0, ceil_mode)\n else:\n out = avg_pool2d(input.pow(norm_type), kernel_size, padding=0, ceil_mode=ceil_mode)\n\n return (torch.sign(out) * relu(torch.abs(out))).mul(kw * kh).pow(1.0 / norm_type)\n\n\ndef lp_pool1d(\n input: Tensor, norm_type: float,\n kernel_size: int,\n stride: Optional[BroadcastingList1[int]] = None,\n ceil_mode: bool = False\n) -> Tensor:\n r\"\"\"Applies a 1D power-average pooling over an input signal composed of\n several input planes. If the sum of all inputs to the power of `p` is\n zero, the gradient is set to zero as well.\n\n See :class:`~torch.nn.LPPool1d` for details.\n \"\"\"\n if has_torch_function_unary(input):\n return handle_torch_function(\n lp_pool1d, (input,), input, norm_type, kernel_size, stride=stride, ceil_mode=ceil_mode\n )\n if stride is not None:\n out = avg_pool1d(input.pow(norm_type), kernel_size, stride, 0, ceil_mode)\n else:\n out = avg_pool1d(input.pow(norm_type), kernel_size, padding=0, ceil_mode=ceil_mode)\n\n return (torch.sign(out) * relu(torch.abs(out))).mul(kernel_size).pow(1.0 / norm_type)\n\n\ndef adaptive_max_pool1d_with_indices(\n input: Tensor, output_size: BroadcastingList1[int], return_indices: bool = False\n) -> Tuple[Tensor, Tensor]:\n r\"\"\"Applies a 1D adaptive max pooling over an input signal composed of\n several input planes.\n\n See :class:`~torch.nn.AdaptiveMaxPool1d` for details and output shape.\n\n Args:\n output_size: the target output size (single integer)\n return_indices: whether to return pooling indices. Default: ``False``\n \"\"\"\n if has_torch_function_unary(input):\n return handle_torch_function(\n adaptive_max_pool1d_with_indices, (input,), input, output_size, return_indices=return_indices\n )\n return torch.adaptive_max_pool1d(input, output_size)\n\n\ndef _adaptive_max_pool1d(input: Tensor, output_size: BroadcastingList1[int], return_indices: bool = False) -> Tensor:\n if has_torch_function_unary(input):\n return handle_torch_function(\n adaptive_max_pool1d, (input,), input, output_size, return_indices=return_indices\n )\n return adaptive_max_pool1d_with_indices(input, output_size)[0]\n\n\nadaptive_max_pool1d = boolean_dispatch(\n arg_name=\"return_indices\",\n arg_index=2,\n default=False,\n if_true=adaptive_max_pool1d_with_indices,\n if_false=_adaptive_max_pool1d,\n module_name=__name__,\n func_name=\"adaptive_max_pool1d\",\n)\n\n\ndef adaptive_max_pool2d_with_indices(\n input: Tensor, output_size: BroadcastingList2[int],\n return_indices: bool = False\n) -> Tuple[Tensor, Tensor]:\n r\"\"\"Applies a 2D adaptive max pooling over an input signal composed of\n several input planes.\n\n See :class:`~torch.nn.AdaptiveMaxPool2d` for details and output shape.\n\n Args:\n output_size: the target output size (single integer or\n double-integer tuple)\n return_indices: whether to return pooling indices. Default: ``False``\n \"\"\"\n if has_torch_function_unary(input):\n return handle_torch_function(\n adaptive_max_pool2d_with_indices, (input,), input, output_size, return_indices=return_indices\n )\n output_size = _list_with_default(output_size, input.size())\n return torch._C._nn.adaptive_max_pool2d(input, output_size)\n\n\ndef _adaptive_max_pool2d(input: Tensor, output_size: BroadcastingList2[int], return_indices: bool = False) -> Tensor:\n if has_torch_function_unary(input):\n return handle_torch_function(\n adaptive_max_pool2d, (input,), input, output_size, return_indices=return_indices\n )\n return adaptive_max_pool2d_with_indices(input, output_size)[0]\n\n\nadaptive_max_pool2d = boolean_dispatch(\n arg_name=\"return_indices\",\n arg_index=2,\n default=False,\n if_true=adaptive_max_pool2d_with_indices,\n if_false=_adaptive_max_pool2d,\n module_name=__name__,\n func_name=\"adaptive_max_pool2d\",\n)\n\n\ndef adaptive_max_pool3d_with_indices(\n input: Tensor, output_size: BroadcastingList3[int],\n return_indices: bool = False\n) -> Tuple[Tensor, Tensor]:\n r\"\"\"Applies a 3D adaptive max pooling over an input signal composed of\n several input planes.\n\n See :class:`~torch.nn.AdaptiveMaxPool3d` for details and output shape.\n\n Args:\n output_size: the target output size (single integer or\n triple-integer tuple)\n return_indices: whether to return pooling indices. Default: ``False``\n \"\"\"\n if has_torch_function_unary(input):\n return handle_torch_function(\n adaptive_max_pool3d_with_indices, (input,), input, output_size, return_indices=return_indices\n )\n output_size = _list_with_default(output_size, input.size())\n return torch._C._nn.adaptive_max_pool3d(input, output_size)\n\n\ndef _adaptive_max_pool3d(input: Tensor, output_size: BroadcastingList3[int], return_indices: bool = False) -> Tensor:\n if has_torch_function_unary(input):\n return handle_torch_function(\n adaptive_max_pool3d, (input,), input, output_size, return_indices=return_indices\n )\n return adaptive_max_pool3d_with_indices(input, output_size)[0]\n\n\nadaptive_max_pool3d = boolean_dispatch(\n arg_name=\"return_indices\",\n arg_index=2,\n default=False,\n if_true=adaptive_max_pool3d_with_indices,\n if_false=_adaptive_max_pool3d,\n module_name=__name__,\n func_name=\"adaptive_max_pool3d\",\n)\n\n\nadaptive_avg_pool1d = _add_docstr(\n torch.adaptive_avg_pool1d,\n r\"\"\"\nadaptive_avg_pool1d(input, output_size) -> Tensor\n\nApplies a 1D adaptive average pooling over an input signal composed of\nseveral input planes.\n\nSee :class:`~torch.nn.AdaptiveAvgPool1d` for details and output shape.\n\nArgs:\n output_size: the target output size (single integer)\n\"\"\",\n)\n\n\ndef adaptive_avg_pool2d(input: Tensor, output_size: BroadcastingList2[int]) -> Tensor:\n r\"\"\"\n Applies a 2D adaptive average pooling over an input signal composed of\n several input planes.\n\n See :class:`~torch.nn.AdaptiveAvgPool2d` for details and output shape.\n\n Args:\n output_size: the target output size (single integer or\n double-integer tuple)\n \"\"\"\n if has_torch_function_unary(input):\n return handle_torch_function(adaptive_avg_pool2d, (input,), input, output_size)\n _output_size = _list_with_default(output_size, input.size())\n return torch._C._nn.adaptive_avg_pool2d(input, _output_size)\n\n\ndef adaptive_avg_pool3d(input: Tensor, output_size: BroadcastingList3[int]) -> Tensor:\n r\"\"\"\n Applies a 3D adaptive average pooling over an input signal composed of\n several input planes.\n\n See :class:`~torch.nn.AdaptiveAvgPool3d` for details and output shape.\n\n Args:\n output_size: the target output size (single integer or\n triple-integer tuple)\n \"\"\"\n if has_torch_function_unary(input):\n return handle_torch_function(adaptive_avg_pool3d, (input,), input, output_size)\n _output_size = _list_with_default(output_size, input.size())\n return torch._C._nn.adaptive_avg_pool3d(input, _output_size)\n\n\n# Activation functions\ndef dropout(input: Tensor, p: float = 0.5, training: bool = True, inplace: bool = False) -> Tensor:\n r\"\"\"\n During training, randomly zeroes some of the elements of the input\n tensor with probability :attr:`p` using samples from a Bernoulli\n distribution.\n\n See :class:`~torch.nn.Dropout` for details.\n\n Args:\n p: probability of an element to be zeroed. Default: 0.5\n training: apply dropout if is ``True``. Default: ``True``\n inplace: If set to ``True``, will do this operation in-place. Default: ``False``\n \"\"\"\n if has_torch_function_unary(input):\n return handle_torch_function(dropout, (input,), input, p=p, training=training, inplace=inplace)\n if p < 0.0 or p > 1.0:\n raise ValueError(\"dropout probability has to be between 0 and 1, \" \"but got {}\".format(p))\n return _VF.dropout_(input, p, training) if inplace else _VF.dropout(input, p, training)\n\n\ndef alpha_dropout(input: Tensor, p: float = 0.5, training: bool = False, inplace: bool = False) -> Tensor:\n r\"\"\"Applies alpha dropout to the input.\n\n See :class:`~torch.nn.AlphaDropout` for details.\n \"\"\"\n if has_torch_function_unary(input):\n return handle_torch_function(alpha_dropout, (input,), input, p=p, training=training, inplace=inplace)\n if p < 0.0 or p > 1.0:\n raise ValueError(\"dropout probability has to be between 0 and 1, \" \"but got {}\".format(p))\n return _VF.alpha_dropout_(input, p, training) if inplace else _VF.alpha_dropout(input, p, training)\n\n\ndef dropout2d(input: Tensor, p: float = 0.5, training: bool = True, inplace: bool = False) -> Tensor:\n r\"\"\"\n Randomly zero out entire channels (a channel is a 2D feature map,\n e.g., the :math:`j`-th channel of the :math:`i`-th sample in the\n batched input is a 2D tensor :math:`\\text{input}[i, j]`) of the input tensor).\n Each channel will be zeroed out independently on every forward call with\n probability :attr:`p` using samples from a Bernoulli distribution.\n\n See :class:`~torch.nn.Dropout2d` for details.\n\n Args:\n p: probability of a channel to be zeroed. Default: 0.5\n training: apply dropout if is ``True``. Default: ``True``\n inplace: If set to ``True``, will do this operation in-place. Default: ``False``\n \"\"\"\n if has_torch_function_unary(input):\n return handle_torch_function(dropout2d, (input,), input, p=p, training=training, inplace=inplace)\n if p < 0.0 or p > 1.0:\n raise ValueError(\"dropout probability has to be between 0 and 1, \" \"but got {}\".format(p))\n inp_dim = input.dim()\n if inp_dim not in (3, 4):\n warn_msg = (f\"dropout2d: Received a {inp_dim}-D input to dropout2d, which is deprecated \"\n \"and will result in an error in a future release. To retain the behavior \"\n \"and silence this warning, please use dropout instead. Note that dropout2d \"\n \"exists to provide channel-wise dropout on inputs with 2 spatial dimensions, \"\n \"a channel dimension, and an optional batch dimension (i.e. 3D or 4D inputs).\")\n warnings.warn(warn_msg)\n\n is_batched = inp_dim == 4\n if not is_batched:\n input = input.unsqueeze_(0) if inplace else input.unsqueeze(0)\n\n result = _VF.feature_dropout_(input, p, training) if inplace else _VF.feature_dropout(input, p, training)\n\n if not is_batched:\n result = result.squeeze_(0) if inplace else result.squeeze(0)\n\n return result\n\n\ndef dropout3d(input: Tensor, p: float = 0.5, training: bool = True, inplace: bool = False) -> Tensor:\n r\"\"\"\n Randomly zero out entire channels (a channel is a 3D feature map,\n e.g., the :math:`j`-th channel of the :math:`i`-th sample in the\n batched input is a 3D tensor :math:`\\text{input}[i, j]`) of the input tensor).\n Each channel will be zeroed out independently on every forward call with\n probability :attr:`p` using samples from a Bernoulli distribution.\n\n See :class:`~torch.nn.Dropout3d` for details.\n\n Args:\n p: probability of a channel to be zeroed. Default: 0.5\n training: apply dropout if is ``True``. Default: ``True``\n inplace: If set to ``True``, will do this operation in-place. Default: ``False``\n \"\"\"\n if has_torch_function_unary(input):\n return handle_torch_function(dropout3d, (input,), input, p=p, training=training, inplace=inplace)\n if p < 0.0 or p > 1.0:\n raise ValueError(\"dropout probability has to be between 0 and 1, \" \"but got {}\".format(p))\n inp_dim = input.dim()\n if inp_dim not in (4, 5):\n warn_msg = (f\"dropout3d: Received a {inp_dim}-D input to dropout3d, which is deprecated \"\n \"and will result in an error in a future release. To retain the behavior \"\n \"and silence this warning, please use dropout instead. Note that dropout3d \"\n \"exists to provide channel-wise dropout on inputs with 3 spatial dimensions, \"\n \"a channel dimension, and an optional batch dimension (i.e. 4D or 5D inputs).\")\n warnings.warn(warn_msg)\n\n is_batched = inp_dim == 5\n if not is_batched:\n input = input.unsqueeze_(0) if inplace else input.unsqueeze(0)\n\n result = _VF.feature_dropout_(input, p, training) if inplace else _VF.feature_dropout(input, p, training)\n\n if not is_batched:\n result = result.squeeze_(0) if inplace else result.squeeze(0)\n return result\n\n\ndef feature_alpha_dropout(input: Tensor, p: float = 0.5, training: bool = False, inplace: bool = False) -> Tensor:\n r\"\"\"\n Randomly masks out entire channels (a channel is a feature map,\n e.g. the :math:`j`-th channel of the :math:`i`-th sample in the batch input\n is a tensor :math:`\\text{input}[i, j]`) of the input tensor). Instead of\n setting activations to zero, as in regular Dropout, the activations are set\n to the negative saturation value of the SELU activation function.\n\n Each element will be masked independently on every forward call with\n probability :attr:`p` using samples from a Bernoulli distribution.\n The elements to be masked are randomized on every forward call, and scaled\n and shifted to maintain zero mean and unit variance.\n\n See :class:`~torch.nn.FeatureAlphaDropout` for details.\n\n Args:\n p: dropout probability of a channel to be zeroed. Default: 0.5\n training: apply dropout if is ``True``. Default: ``True``\n inplace: If set to ``True``, will do this operation in-place. Default: ``False``\n \"\"\"\n if has_torch_function_unary(input):\n return handle_torch_function(\n feature_alpha_dropout, (input,), input, p=p, training=training, inplace=inplace\n )\n if p < 0.0 or p > 1.0:\n raise ValueError(\"dropout probability has to be between 0 and 1, \" \"but got {}\".format(p))\n return _VF.feature_alpha_dropout_(input, p, training) if inplace else _VF.feature_alpha_dropout(input, p, training)\n\n\ndef _threshold(input: Tensor, threshold: float, value: float, inplace: bool = False) -> Tensor:\n r\"\"\"Thresholds each element of the input Tensor.\n\n See :class:`~torch.nn.Threshold` for more details.\n \"\"\"\n if has_torch_function_unary(input):\n return handle_torch_function(_threshold, (input,), input, threshold, value, inplace=inplace)\n if inplace:\n result = _VF.threshold_(input, threshold, value)\n else:\n result = _VF.threshold(input, threshold, value)\n return result\n\n\n# We define this function as _threshold because it takes an argument\n# named threshold, which clobbers the recursive reference to the\n# function needed for __torch_function__ support\nthreshold = _threshold\n\nthreshold_ = _add_docstr(\n _VF.threshold_,\n r\"\"\"\nthreshold_(input, threshold, value) -> Tensor\n\nIn-place version of :func:`~threshold`.\n\"\"\",\n)\n\n\ndef relu(input: Tensor, inplace: bool = False) -> Tensor:\n r\"\"\"relu(input, inplace=False) -> Tensor\n\n Applies the rectified linear unit function element-wise. See\n :class:`~torch.nn.ReLU` for more details.\n \"\"\"\n if has_torch_function_unary(input):\n return handle_torch_function(relu, (input,), input, inplace=inplace)\n if inplace:\n result = torch.relu_(input)\n else:\n result = torch.relu(input)\n return result\n\n\nrelu_ = _add_docstr(\n torch.relu_,\n r\"\"\"\nrelu_(input) -> Tensor\n\nIn-place version of :func:`~relu`.\n\"\"\",\n)\n\n\ndef glu(input: Tensor, dim: int = -1) -> Tensor:\n r\"\"\"\n glu(input, dim=-1) -> Tensor\n\n The gated linear unit. Computes:\n\n .. math ::\n \\text{GLU}(a, b) = a \\otimes \\sigma(b)\n\n where `input` is split in half along `dim` to form `a` and `b`, :math:`\\sigma`\n is the sigmoid function and :math:`\\otimes` is the element-wise product between matrices.\n\n See `Language Modeling with Gated Convolutional Networks <https://arxiv.org/abs/1612.08083>`_.\n\n Args:\n input (Tensor): input tensor\n dim (int): dimension on which to split the input. Default: -1\n \"\"\"\n if has_torch_function_unary(input):\n return handle_torch_function(glu, (input,), input, dim=dim)\n if input.dim() == 0:\n raise RuntimeError(\"glu does not support scalars because halving size must be even\")\n return torch._C._nn.glu(input, dim)\n\n\ndef hardtanh(input: Tensor, min_val: float = -1.0, max_val: float = 1.0, inplace: bool = False) -> Tensor:\n r\"\"\"\n hardtanh(input, min_val=-1., max_val=1., inplace=False) -> Tensor\n\n Applies the HardTanh function element-wise. See :class:`~torch.nn.Hardtanh` for more\n details.\n \"\"\"\n if has_torch_function_unary(input):\n return handle_torch_function(hardtanh, (input,), input, min_val=min_val, max_val=max_val, inplace=inplace)\n if inplace:\n result = torch._C._nn.hardtanh_(input, min_val, max_val)\n else:\n result = torch._C._nn.hardtanh(input, min_val, max_val)\n return result\n\n\nhardtanh_ = _add_docstr(\n torch._C._nn.hardtanh_,\n r\"\"\"\nhardtanh_(input, min_val=-1., max_val=1.) -> Tensor\n\nIn-place version of :func:`~hardtanh`.\n\"\"\",\n)\n\n\ndef relu6(input: Tensor, inplace: bool = False) -> Tensor:\n r\"\"\"relu6(input, inplace=False) -> Tensor\n\n Applies the element-wise function :math:`\\text{ReLU6}(x) = \\min(\\max(0,x), 6)`.\n\n See :class:`~torch.nn.ReLU6` for more details.\n \"\"\"\n if has_torch_function_unary(input):\n return handle_torch_function(relu6, (input,), input, inplace=inplace)\n if inplace:\n result = torch._C._nn.relu6_(input)\n else:\n result = torch._C._nn.relu6(input)\n return result\n\n\ndef elu(input: Tensor, alpha: float = 1.0, inplace: bool = False) -> Tensor:\n r\"\"\"Applies element-wise,\n :math:`\\text{ELU}(x) = \\max(0,x) + \\min(0, \\alpha * (\\exp(x) - 1))`.\n\n See :class:`~torch.nn.ELU` for more details.\n \"\"\"\n if has_torch_function_unary(input):\n return handle_torch_function(elu, (input,), input, alpha=alpha, inplace=inplace)\n if inplace:\n result = torch._C._nn.elu_(input, alpha)\n else:\n result = torch._C._nn.elu(input, alpha)\n return result\n\n\nelu_ = _add_docstr(\n torch._C._nn.elu_,\n r\"\"\"\nelu_(input, alpha=1.) -> Tensor\n\nIn-place version of :func:`~elu`.\n\"\"\",\n)\n\n\ndef selu(input: Tensor, inplace: bool = False) -> Tensor:\n r\"\"\"selu(input, inplace=False) -> Tensor\n\n Applies element-wise,\n :math:`\\text{SELU}(x) = scale * (\\max(0,x) + \\min(0, \\alpha * (\\exp(x) - 1)))`,\n with :math:`\\alpha=1.6732632423543772848170429916717` and\n :math:`scale=1.0507009873554804934193349852946`.\n\n See :class:`~torch.nn.SELU` for more details.\n \"\"\"\n if has_torch_function_unary(input):\n return handle_torch_function(selu, (input,), input, inplace=inplace)\n if inplace:\n result = torch.selu_(input)\n else:\n result = torch.selu(input)\n return result\n\n\nselu_ = _add_docstr(\n torch.selu_,\n r\"\"\"\nselu_(input) -> Tensor\n\nIn-place version of :func:`~selu`.\n\"\"\",\n)\n\n\ndef celu(input: Tensor, alpha: float = 1.0, inplace: bool = False) -> Tensor:\n r\"\"\"celu(input, alpha=1., inplace=False) -> Tensor\n\n Applies element-wise,\n :math:`\\text{CELU}(x) = \\max(0,x) + \\min(0, \\alpha * (\\exp(x/\\alpha) - 1))`.\n\n See :class:`~torch.nn.CELU` for more details.\n \"\"\"\n if has_torch_function_unary(input):\n return handle_torch_function(celu, (input,), input, alpha=alpha, inplace=inplace)\n if inplace:\n result = torch.celu_(input, alpha)\n else:\n result = torch.celu(input, alpha)\n return result\n\n\ncelu_ = _add_docstr(\n torch.celu_,\n r\"\"\"\ncelu_(input, alpha=1.) -> Tensor\n\nIn-place version of :func:`~celu`.\n\"\"\",\n)\n\n\ndef leaky_relu(input: Tensor, negative_slope: float = 0.01, inplace: bool = False) -> Tensor:\n r\"\"\"\n leaky_relu(input, negative_slope=0.01, inplace=False) -> Tensor\n\n Applies element-wise,\n :math:`\\text{LeakyReLU}(x) = \\max(0, x) + \\text{negative\\_slope} * \\min(0, x)`\n\n See :class:`~torch.nn.LeakyReLU` for more details.\n \"\"\"\n if has_torch_function_unary(input):\n return handle_torch_function(leaky_relu, (input,), input, negative_slope=negative_slope, inplace=inplace)\n if inplace:\n result = torch._C._nn.leaky_relu_(input, negative_slope)\n else:\n result = torch._C._nn.leaky_relu(input, negative_slope)\n return result\n\n\nleaky_relu_ = _add_docstr(\n torch._C._nn.leaky_relu_,\n r\"\"\"\nleaky_relu_(input, negative_slope=0.01) -> Tensor\n\nIn-place version of :func:`~leaky_relu`.\n\"\"\",\n)\n\n\nprelu = _add_docstr(\n torch.prelu,\n r\"\"\"prelu(input, weight) -> Tensor\n\nApplies element-wise the function\n:math:`\\text{PReLU}(x) = \\max(0,x) + \\text{weight} * \\min(0,x)` where weight is a\nlearnable parameter.\n\nSee :class:`~torch.nn.PReLU` for more details.\n\"\"\")\n\n\ndef rrelu(\n input: Tensor, lower: float = 1.0 / 8, upper: float = 1.0 / 3, training: bool = False, inplace: bool = False\n) -> Tensor:\n r\"\"\"rrelu(input, lower=1./8, upper=1./3, training=False, inplace=False) -> Tensor\n\n Randomized leaky ReLU.\n\n See :class:`~torch.nn.RReLU` for more details.\n \"\"\"\n if has_torch_function_unary(input):\n return handle_torch_function(\n rrelu, (input,), input, lower=lower, upper=upper, training=training, inplace=inplace\n )\n if inplace:\n result = torch.rrelu_(input, lower, upper, training)\n else:\n result = torch.rrelu(input, lower, upper, training)\n return result\n\n\nrrelu_ = _add_docstr(\n torch.rrelu_,\n r\"\"\"\nrrelu_(input, lower=1./8, upper=1./3, training=False) -> Tensor\n\nIn-place version of :func:`~rrelu`.\n\"\"\",\n)\n\nlogsigmoid = _add_docstr(\n torch._C._nn.log_sigmoid,\n r\"\"\"\nlogsigmoid(input) -> Tensor\n\nApplies element-wise :math:`\\text{LogSigmoid}(x_i) = \\log \\left(\\frac{1}{1 + \\exp(-x_i)}\\right)`\n\nSee :class:`~torch.nn.LogSigmoid` for more details.\n\"\"\",\n)\n\ngelu = _add_docstr(\n torch._C._nn.gelu,\n r\"\"\"\ngelu(input, approximate = 'none') -> Tensor\n\nWhen the approximate argument is 'none', it applies element-wise the function\n:math:`\\text{GELU}(x) = x * \\Phi(x)`\n\nwhere :math:`\\Phi(x)` is the Cumulative Distribution Function for Gaussian Distribution.\n\nWhen the approximate argument is 'tanh', Gelu is estimated with:\n :math:: \\text{GELU}(x) = 0.5 * x * (1 + \\text{Tanh}(\\sqrt(2 / \\pi) * (x + 0.044715 * x^3)))\n\nSee `Gaussian Error Linear Units (GELUs) <https://arxiv.org/abs/1606.08415>`_.\n\"\"\")\n\nhardshrink = _add_docstr(\n torch.hardshrink,\n r\"\"\"\nhardshrink(input, lambd=0.5) -> Tensor\n\nApplies the hard shrinkage function element-wise\n\nSee :class:`~torch.nn.Hardshrink` for more details.\n\"\"\")\n\n\ndef tanhshrink(input):\n r\"\"\"tanhshrink(input) -> Tensor\n\n Applies element-wise, :math:`\\text{Tanhshrink}(x) = x - \\text{Tanh}(x)`\n\n See :class:`~torch.nn.Tanhshrink` for more details.\n \"\"\"\n if has_torch_function_unary(input):\n return handle_torch_function(tanhshrink, (input,), input)\n return input - input.tanh()\n\n\ndef softsign(input):\n r\"\"\"softsign(input) -> Tensor\n\n Applies element-wise, the function :math:`\\text{SoftSign}(x) = \\frac{x}{1 + |x|}`\n\n See :class:`~torch.nn.Softsign` for more details.\n \"\"\"\n if has_torch_function_unary(input):\n return handle_torch_function(softsign, (input,), input)\n return input / (input.abs() + 1)\n\n\nsoftplus = _add_docstr(\n torch._C._nn.softplus,\n r\"\"\"\nsoftplus(input, beta=1, threshold=20) -> Tensor\n\nApplies element-wise, the function :math:`\\text{Softplus}(x) = \\frac{1}{\\beta} * \\log(1 + \\exp(\\beta * x))`.\n\nFor numerical stability the implementation reverts to the linear function\nwhen :math:`input \\times \\beta > threshold`.\n\nSee :class:`~torch.nn.Softplus` for more details.\n\"\"\",\n)\n\n\ndef _get_softmax_dim(name: str, ndim: int, stacklevel: int) -> int:\n warnings.warn(\n \"Implicit dimension choice for {} has been deprecated. \"\n \"Change the call to include dim=X as an argument.\".format(name),\n stacklevel=stacklevel,\n )\n if ndim == 0 or ndim == 1 or ndim == 3:\n ret = 0\n else:\n ret = 1\n return ret\n\n\ndef softmin(input: Tensor, dim: Optional[int] = None, _stacklevel: int = 3, dtype: Optional[DType] = None) -> Tensor:\n r\"\"\"Applies a softmin function.\n\n Note that :math:`\\text{Softmin}(x) = \\text{Softmax}(-x)`. See softmax definition for mathematical formula.\n\n See :class:`~torch.nn.Softmin` for more details.\n\n Args:\n input (Tensor): input\n dim (int): A dimension along which softmin will be computed (so every slice\n along dim will sum to 1).\n dtype (:class:`torch.dtype`, optional): the desired data type of returned tensor.\n If specified, the input tensor is casted to :attr:`dtype` before the operation\n is performed. This is useful for preventing data type overflows. Default: None.\n \"\"\"\n if has_torch_function_unary(input):\n return handle_torch_function(softmin, (input,), input, dim=dim, _stacklevel=_stacklevel, dtype=dtype)\n if dim is None:\n dim = _get_softmax_dim(\"softmin\", input.dim(), _stacklevel)\n if dtype is None:\n ret = (-input).softmax(dim)\n else:\n ret = (-input).softmax(dim, dtype=dtype)\n return ret\n\n\ndef softmax(input: Tensor, dim: Optional[int] = None, _stacklevel: int = 3, dtype: Optional[DType] = None) -> Tensor:\n r\"\"\"Applies a softmax function.\n\n Softmax is defined as:\n\n :math:`\\text{Softmax}(x_{i}) = \\frac{\\exp(x_i)}{\\sum_j \\exp(x_j)}`\n\n It is applied to all slices along dim, and will re-scale them so that the elements\n lie in the range `[0, 1]` and sum to 1.\n\n See :class:`~torch.nn.Softmax` for more details.\n\n Args:\n input (Tensor): input\n dim (int): A dimension along which softmax will be computed.\n dtype (:class:`torch.dtype`, optional): the desired data type of returned tensor.\n If specified, the input tensor is casted to :attr:`dtype` before the operation\n is performed. This is useful for preventing data type overflows. Default: None.\n\n .. note::\n This function doesn't work directly with NLLLoss,\n which expects the Log to be computed between the Softmax and itself.\n Use log_softmax instead (it's faster and has better numerical properties).\n\n \"\"\"\n if has_torch_function_unary(input):\n return handle_torch_function(softmax, (input,), input, dim=dim, _stacklevel=_stacklevel, dtype=dtype)\n if dim is None:\n dim = _get_softmax_dim(\"softmax\", input.dim(), _stacklevel)\n if dtype is None:\n ret = input.softmax(dim)\n else:\n ret = input.softmax(dim, dtype=dtype)\n return ret\n\n\ndef gumbel_softmax(logits: Tensor, tau: float = 1, hard: bool = False, eps: float = 1e-10, dim: int = -1) -> Tensor:\n r\"\"\"\n Samples from the Gumbel-Softmax distribution (`Link 1`_ `Link 2`_) and optionally discretizes.\n\n Args:\n logits: `[..., num_features]` unnormalized log probabilities\n tau: non-negative scalar temperature\n hard: if ``True``, the returned samples will be discretized as one-hot vectors,\n but will be differentiated as if it is the soft sample in autograd\n dim (int): A dimension along which softmax will be computed. Default: -1.\n\n Returns:\n Sampled tensor of same shape as `logits` from the Gumbel-Softmax distribution.\n If ``hard=True``, the returned samples will be one-hot, otherwise they will\n be probability distributions that sum to 1 across `dim`.\n\n .. note::\n This function is here for legacy reasons, may be removed from nn.Functional in the future.\n\n .. note::\n The main trick for `hard` is to do `y_hard - y_soft.detach() + y_soft`\n\n It achieves two things:\n - makes the output value exactly one-hot\n (since we add then subtract y_soft value)\n - makes the gradient equal to y_soft gradient\n (since we strip all other gradients)\n\n Examples::\n >>> logits = torch.randn(20, 32)\n >>> # Sample soft categorical using reparametrization trick:\n >>> F.gumbel_softmax(logits, tau=1, hard=False)\n >>> # Sample hard categorical using \"Straight-through\" trick:\n >>> F.gumbel_softmax(logits, tau=1, hard=True)\n\n .. _Link 1:\n https://arxiv.org/abs/1611.00712\n .. _Link 2:\n https://arxiv.org/abs/1611.01144\n \"\"\"\n if has_torch_function_unary(logits):\n return handle_torch_function(gumbel_softmax, (logits,), logits, tau=tau, hard=hard, eps=eps, dim=dim)\n if eps != 1e-10:\n warnings.warn(\"`eps` parameter is deprecated and has no effect.\")\n\n gumbels = (\n -torch.empty_like(logits, memory_format=torch.legacy_contiguous_format).exponential_().log()\n ) # ~Gumbel(0,1)\n gumbels = (logits + gumbels) / tau # ~Gumbel(logits,tau)\n y_soft = gumbels.softmax(dim)\n\n if hard:\n # Straight through.\n index = y_soft.max(dim, keepdim=True)[1]\n y_hard = torch.zeros_like(logits, memory_format=torch.legacy_contiguous_format).scatter_(dim, index, 1.0)\n ret = y_hard - y_soft.detach() + y_soft\n else:\n # Reparametrization trick.\n ret = y_soft\n return ret\n\n\ndef log_softmax(input: Tensor, dim: Optional[int] = None, _stacklevel: int = 3, dtype: Optional[DType] = None) -> Tensor:\n r\"\"\"Applies a softmax followed by a logarithm.\n\n While mathematically equivalent to log(softmax(x)), doing these two\n operations separately is slower and numerically unstable. This function\n uses an alternative formulation to compute the output and gradient correctly.\n\n See :class:`~torch.nn.LogSoftmax` for more details.\n\n Args:\n input (Tensor): input\n dim (int): A dimension along which log_softmax will be computed.\n dtype (:class:`torch.dtype`, optional): the desired data type of returned tensor.\n If specified, the input tensor is cast to :attr:`dtype` before the operation\n is performed. This is useful for preventing data type overflows. Default: None.\n \"\"\"\n if has_torch_function_unary(input):\n return handle_torch_function(log_softmax, (input,), input, dim=dim, _stacklevel=_stacklevel, dtype=dtype)\n if dim is None:\n dim = _get_softmax_dim(\"log_softmax\", input.dim(), _stacklevel)\n if dtype is None:\n ret = input.log_softmax(dim)\n else:\n ret = input.log_softmax(dim, dtype=dtype)\n return ret\n\n\nsoftshrink = _add_docstr(\n torch._C._nn.softshrink,\n r\"\"\"\nsoftshrink(input, lambd=0.5) -> Tensor\n\nApplies the soft shrinkage function elementwise\n\nSee :class:`~torch.nn.Softshrink` for more details.\n\"\"\",\n)\n\n\ndef tanh(input):\n r\"\"\"tanh(input) -> Tensor\n\n Applies element-wise,\n :math:`\\text{Tanh}(x) = \\tanh(x) = \\frac{\\exp(x) - \\exp(-x)}{\\exp(x) + \\exp(-x)}`\n\n See :class:`~torch.nn.Tanh` for more details.\n \"\"\"\n warnings.warn(\"nn.functional.tanh is deprecated. Use torch.tanh instead.\")\n return input.tanh()\n\n\ndef sigmoid(input):\n r\"\"\"sigmoid(input) -> Tensor\n\n Applies the element-wise function :math:`\\text{Sigmoid}(x) = \\frac{1}{1 + \\exp(-x)}`\n\n See :class:`~torch.nn.Sigmoid` for more details.\n \"\"\"\n warnings.warn(\"nn.functional.sigmoid is deprecated. Use torch.sigmoid instead.\")\n return input.sigmoid()\n\n\ndef hardsigmoid(input: Tensor, inplace: bool = False) -> Tensor:\n r\"\"\"Applies the element-wise function\n\n .. math::\n \\text{Hardsigmoid}(x) = \\begin{cases}\n 0 & \\text{if~} x \\le -3, \\\\\n 1 & \\text{if~} x \\ge +3, \\\\\n x / 6 + 1 / 2 & \\text{otherwise}\n \\end{cases}\n\n Args:\n inplace: If set to ``True``, will do this operation in-place. Default: ``False``\n\n See :class:`~torch.nn.Hardsigmoid` for more details.\n \"\"\"\n if has_torch_function_unary(input):\n return handle_torch_function(hardsigmoid, (input,), input, inplace=inplace)\n if inplace:\n return torch._C._nn.hardsigmoid_(input)\n return torch._C._nn.hardsigmoid(input)\n\n\nlinear = _add_docstr(\n torch._C._nn.linear,\n r\"\"\"\nlinear(input, weight, bias=None) -> Tensor\n\nApplies a linear transformation to the incoming data: :math:`y = xA^T + b`.\n\nThis operator supports :ref:`TensorFloat32<tf32_on_ampere>`.\n\nShape:\n\n - Input: :math:`(*, in\\_features)` where `*` means any number of\n additional dimensions, including none\n - Weight: :math:`(out\\_features, in\\_features)` or :math:`(in\\_features)`\n - Bias: :math:`(out\\_features)` or :math:`()`\n - Output: :math:`(*, out\\_features)` or :math:`(*)`, based on the shape of the weight\n\"\"\")\n\n\nbilinear = _add_docstr(\n torch.bilinear,\n r\"\"\"\nbilinear(input1, input2, weight, bias=None) -> Tensor\n\nApplies a bilinear transformation to the incoming data:\n:math:`y = x_1^T A x_2 + b`\n\nShape:\n\n - input1: :math:`(N, *, H_{in1})` where :math:`H_{in1}=\\text{in1\\_features}`\n and :math:`*` means any number of additional dimensions.\n All but the last dimension of the inputs should be the same.\n - input2: :math:`(N, *, H_{in2})` where :math:`H_{in2}=\\text{in2\\_features}`\n - weight: :math:`(\\text{out\\_features}, \\text{in1\\_features},\n \\text{in2\\_features})`\n - bias: :math:`(\\text{out\\_features})`\n - output: :math:`(N, *, H_{out})` where :math:`H_{out}=\\text{out\\_features}`\n and all but the last dimension are the same shape as the input.\n\"\"\")\n\n\ndef silu(input: Tensor, inplace: bool = False) -> Tensor:\n r\"\"\"Applies the Sigmoid Linear Unit (SiLU) function, element-wise.\n The SiLU function is also known as the swish function.\n\n .. math::\n \\text{silu}(x) = x * \\sigma(x), \\text{where } \\sigma(x) \\text{ is the logistic sigmoid.}\n\n .. note::\n See `Gaussian Error Linear Units (GELUs) <https://arxiv.org/abs/1606.08415>`_\n where the SiLU (Sigmoid Linear Unit) was originally coined, and see\n `Sigmoid-Weighted Linear Units for Neural Network Function Approximation\n in Reinforcement Learning <https://arxiv.org/abs/1702.03118>`_ and `Swish:\n a Self-Gated Activation Function <https://arxiv.org/abs/1710.05941v1>`_\n where the SiLU was experimented with later.\n\n See :class:`~torch.nn.SiLU` for more details.\n \"\"\"\n if has_torch_function_unary(input):\n return handle_torch_function(silu, (input,), input, inplace=inplace)\n if inplace:\n return torch._C._nn.silu_(input)\n return torch._C._nn.silu(input)\n\n\ndef mish(input: Tensor, inplace: bool = False) -> Tensor:\n r\"\"\"Applies the Mish function, element-wise.\n Mish: A Self Regularized Non-Monotonic Neural Activation Function.\n\n .. math::\n \\text{Mish}(x) = x * \\text{Tanh}(\\text{Softplus}(x))\n\n .. note::\n See `Mish: A Self Regularized Non-Monotonic Neural Activation Function <https://arxiv.org/abs/1908.08681>`_\n\n See :class:`~torch.nn.Mish` for more details.\n \"\"\"\n if has_torch_function_unary(input):\n return handle_torch_function(mish, (input,), input, inplace=inplace)\n if inplace:\n return torch._C._nn.mish_(input)\n return torch._C._nn.mish(input)\n\n\ndef hardswish(input: Tensor, inplace: bool = False) -> Tensor:\n r\"\"\"Applies the hardswish function, element-wise, as described in the paper:\n\n `Searching for MobileNetV3`_.\n\n .. math::\n \\text{Hardswish}(x) = \\begin{cases}\n 0 & \\text{if~} x \\le -3, \\\\\n x & \\text{if~} x \\ge +3, \\\\\n x \\cdot (x + 3) /6 & \\text{otherwise}\n \\end{cases}\n\n See :class:`~torch.nn.Hardswish` for more details.\n\n .. _`Searching for MobileNetV3`:\n https://arxiv.org/abs/1905.02244\n \"\"\"\n if has_torch_function_unary(input):\n return handle_torch_function(hardswish, (input,), input, inplace=inplace)\n if inplace:\n return torch._C._nn.hardswish_(input)\n return torch._C._nn.hardswish(input)\n\n\ndef _no_grad_embedding_renorm_(weight: Tensor, input: Tensor, max_norm: float, norm_type: float) -> Tensor:\n with torch.no_grad():\n torch.embedding_renorm_(weight, input, max_norm, norm_type)\n\n\ndef embedding(\n input: Tensor,\n weight: Tensor,\n padding_idx: Optional[int] = None,\n max_norm: Optional[float] = None,\n norm_type: float = 2.0,\n scale_grad_by_freq: bool = False,\n sparse: bool = False,\n) -> Tensor:\n r\"\"\"A simple lookup table that looks up embeddings in a fixed dictionary and size.\n\n This module is often used to retrieve word embeddings using indices.\n The input to the module is a list of indices, and the embedding matrix,\n and the output is the corresponding word embeddings.\n\n See :class:`torch.nn.Embedding` for more details.\n\n Args:\n input (LongTensor): Tensor containing indices into the embedding matrix\n weight (Tensor): The embedding matrix with number of rows equal to the maximum possible index + 1,\n and number of columns equal to the embedding size\n padding_idx (int, optional): If specified, the entries at :attr:`padding_idx` do not contribute to the gradient;\n therefore, the embedding vector at :attr:`padding_idx` is not updated during training,\n i.e. it remains as a fixed \"pad\".\n max_norm (float, optional): If given, each embedding vector with norm larger than :attr:`max_norm`\n is renormalized to have norm :attr:`max_norm`.\n Note: this will modify :attr:`weight` in-place.\n norm_type (float, optional): The p of the p-norm to compute for the :attr:`max_norm` option. Default ``2``.\n scale_grad_by_freq (boolean, optional): If given, this will scale gradients by the inverse of frequency of\n the words in the mini-batch. Default ``False``.\n sparse (bool, optional): If ``True``, gradient w.r.t. :attr:`weight` will be a sparse tensor. See Notes under\n :class:`torch.nn.Embedding` for more details regarding sparse gradients.\n\n Shape:\n - Input: LongTensor of arbitrary shape containing the indices to extract\n - Weight: Embedding matrix of floating point type with shape `(V, embedding_dim)`,\n where V = maximum index + 1 and embedding_dim = the embedding size\n - Output: `(*, embedding_dim)`, where `*` is the input shape\n\n Examples::\n\n >>> # a batch of 2 samples of 4 indices each\n >>> input = torch.tensor([[1,2,4,5],[4,3,2,9]])\n >>> # an embedding matrix containing 10 tensors of size 3\n >>> embedding_matrix = torch.rand(10, 3)\n >>> F.embedding(input, embedding_matrix)\n tensor([[[ 0.8490, 0.9625, 0.6753],\n [ 0.9666, 0.7761, 0.6108],\n [ 0.6246, 0.9751, 0.3618],\n [ 0.4161, 0.2419, 0.7383]],\n\n [[ 0.6246, 0.9751, 0.3618],\n [ 0.0237, 0.7794, 0.0528],\n [ 0.9666, 0.7761, 0.6108],\n [ 0.3385, 0.8612, 0.1867]]])\n\n >>> # example with padding_idx\n >>> weights = torch.rand(10, 3)\n >>> weights[0, :].zero_()\n >>> embedding_matrix = weights\n >>> input = torch.tensor([[0,2,0,5]])\n >>> F.embedding(input, embedding_matrix, padding_idx=0)\n tensor([[[ 0.0000, 0.0000, 0.0000],\n [ 0.5609, 0.5384, 0.8720],\n [ 0.0000, 0.0000, 0.0000],\n [ 0.6262, 0.2438, 0.7471]]])\n \"\"\"\n\n if has_torch_function_variadic(input, weight):\n return handle_torch_function(\n embedding,\n (input, weight),\n input,\n weight,\n padding_idx=padding_idx,\n max_norm=max_norm,\n norm_type=norm_type,\n scale_grad_by_freq=scale_grad_by_freq,\n sparse=sparse,\n )\n if padding_idx is not None:\n if padding_idx > 0:\n assert padding_idx < weight.size(0), \"Padding_idx must be within num_embeddings\"\n elif padding_idx < 0:\n assert padding_idx >= -weight.size(0), \"Padding_idx must be within num_embeddings\"\n padding_idx = weight.size(0) + padding_idx\n else:\n padding_idx = -1\n if max_norm is not None:\n # Note [embedding_renorm contiguous]\n # `embedding_renorm_` will call .contiguous() on input anyways, so we\n # call it here and take advantage of the improved locality in the\n # `embedding` call below too.\n input = input.contiguous()\n # Note [embedding_renorm set_grad_enabled]\n # XXX: equivalent to\n # with torch.no_grad():\n # torch.embedding_renorm_\n # remove once script supports set_grad_enabled\n _no_grad_embedding_renorm_(weight, input, max_norm, norm_type)\n return torch.embedding(weight, input, padding_idx, scale_grad_by_freq, sparse)\n\n\ndef embedding_bag(\n input: Tensor,\n weight: Tensor,\n offsets: Optional[Tensor] = None,\n max_norm: Optional[float] = None,\n norm_type: float = 2,\n scale_grad_by_freq: bool = False,\n mode: str = \"mean\",\n sparse: bool = False,\n per_sample_weights: Optional[Tensor] = None,\n include_last_offset: bool = False,\n padding_idx: Optional[int] = None,\n) -> Tensor:\n r\"\"\"Computes sums, means or maxes of `bags` of embeddings, without instantiating the\n intermediate embeddings.\n\n See :class:`torch.nn.EmbeddingBag` for more details.\n\n Note:\n {backward_reproducibility_note}\n\n Args:\n input (LongTensor): Tensor containing bags of indices into the embedding matrix\n weight (Tensor): The embedding matrix with number of rows equal to the maximum possible index + 1,\n and number of columns equal to the embedding size\n offsets (LongTensor, optional): Only used when :attr:`input` is 1D. :attr:`offsets` determines\n the starting index position of each bag (sequence) in :attr:`input`.\n max_norm (float, optional): If given, each embedding vector with norm larger than :attr:`max_norm`\n is renormalized to have norm :attr:`max_norm`.\n Note: this will modify :attr:`weight` in-place.\n norm_type (float, optional): The ``p`` in the ``p``-norm to compute for the :attr:`max_norm` option.\n Default ``2``.\n scale_grad_by_freq (boolean, optional): if given, this will scale gradients by the inverse of frequency of\n the words in the mini-batch. Default ``False``.\n Note: this option is not supported when ``mode=\"max\"``.\n mode (string, optional): ``\"sum\"``, ``\"mean\"`` or ``\"max\"``. Specifies the way to reduce the bag.\n Default: ``\"mean\"``\n sparse (bool, optional): if ``True``, gradient w.r.t. :attr:`weight` will be a sparse tensor. See Notes under\n :class:`torch.nn.Embedding` for more details regarding sparse gradients.\n Note: this option is not supported when ``mode=\"max\"``.\n per_sample_weights (Tensor, optional): a tensor of float / double weights, or None\n to indicate all weights should be taken to be 1. If specified, :attr:`per_sample_weights`\n must have exactly the same shape as input and is treated as having the same\n :attr:`offsets`, if those are not None.\n\n include_last_offset (bool, optional): if ``True``, the size of offsets is equal to the number of bags + 1.\n The last element is the size of the input, or the ending index position of the last bag (sequence).\n\n padding_idx (int, optional): If specified, the entries at :attr:`padding_idx` do not contribute to the\n gradient; therefore, the embedding vector at :attr:`padding_idx` is not updated\n during training, i.e. it remains as a fixed \"pad\". Note that the embedding\n vector at :attr:`padding_idx` is excluded from the reduction.\n\n Shape:\n - :attr:`input` (LongTensor) and :attr:`offsets` (LongTensor, optional)\n\n - If :attr:`input` is 2D of shape `(B, N)`, it will be treated as ``B`` bags (sequences)\n each of fixed length ``N``, and this will return ``B`` values aggregated in a way\n depending on the :attr:`mode`. :attr:`offsets` is ignored and required to be ``None`` in this case.\n\n - If :attr:`input` is 1D of shape `(N)`, it will be treated as a concatenation of\n multiple bags (sequences). :attr:`offsets` is required to be a 1D tensor containing\n the starting index positions of each bag in :attr:`input`. Therefore, for :attr:`offsets`\n of shape `(B)`, :attr:`input` will be viewed as having ``B`` bags.\n Empty bags (i.e., having 0-length) will have returned vectors filled by zeros.\n\n - :attr:`weight` (Tensor): the learnable weights of the module of shape `(num_embeddings, embedding_dim)`\n\n - :attr:`per_sample_weights` (Tensor, optional). Has the same shape as :attr:`input`.\n\n - :attr:`output`: aggregated embedding values of shape `(B, embedding_dim)`\n\n Examples::\n\n >>> # an Embedding module containing 10 tensors of size 3\n >>> embedding_matrix = torch.rand(10, 3)\n >>> # a batch of 2 samples of 4 indices each\n >>> input = torch.tensor([1,2,4,5,4,3,2,9])\n >>> offsets = torch.tensor([0,4])\n >>> F.embedding_bag(input, embedding_matrix, offsets)\n tensor([[ 0.3397, 0.3552, 0.5545],\n [ 0.5893, 0.4386, 0.5882]])\n\n >>> # example with padding_idx\n >>> embedding_matrix = torch.rand(10, 3)\n >>> input = torch.tensor([2, 2, 2, 2, 4, 3, 2, 9])\n >>> offsets = torch.tensor([0,4])\n >>> F.embedding_bag(input, embedding_matrix, offsets, padding_idx=2, mode='sum')\n tensor([[ 0.0000, 0.0000, 0.0000],\n [-0.7082, 3.2145, -2.6251]])\n \"\"\"\n if has_torch_function_variadic(input, weight, offsets, per_sample_weights):\n return handle_torch_function(\n embedding_bag,\n (input, weight, offsets, per_sample_weights),\n input,\n weight,\n offsets=offsets,\n max_norm=max_norm,\n norm_type=norm_type,\n scale_grad_by_freq=scale_grad_by_freq,\n mode=mode,\n sparse=sparse,\n per_sample_weights=per_sample_weights,\n include_last_offset=include_last_offset,\n padding_idx=padding_idx,\n )\n # Check for backward compatibility.\n # Used to be embedding_bag(weight, input, ...)\n # Now is embedding_bag(input, weight, ...)\n if weight.dtype == torch.long and input.is_floating_point():\n warnings.warn(\n \"Argument order of nn.functional.embedding_bag was changed. \"\n \"Usage `embedding_bag(weight, input, ...)` is deprecated, \"\n \"and should now be `embedding_bag(input, weight, ...)`.\"\n )\n weight, input = input, weight\n\n if per_sample_weights is not None and input.size() != per_sample_weights.size():\n raise ValueError(\n \"embedding_bag: If per_sample_weights ({}) is not None, \"\n \"then it must have the same shape as the input ({})\".format(per_sample_weights.shape, input.shape)\n )\n\n if input.dim() == 2:\n if offsets is not None:\n type_str = \"<unknown>\"\n # TODO: Remove this once script supports type() calls\n if not torch.jit.is_scripting():\n type_str = str(type(offsets))\n raise ValueError(\n \"if input is 2D, then offsets has to be None\"\n \", as input is treated is a mini-batch of\"\n \" fixed length sequences. However, found \"\n \"offsets of type {}\".format(type_str)\n )\n offsets = torch.arange(0, input.numel(), input.size(1), dtype=input.dtype, device=input.device)\n\n input = input.reshape(-1)\n if per_sample_weights is not None:\n per_sample_weights = per_sample_weights.reshape(-1)\n elif input.dim() == 1:\n if offsets is None:\n raise ValueError(\"offsets has to be a 1D Tensor but got None\")\n if offsets.dim() != 1:\n raise ValueError(\"offsets has to be a 1D Tensor\")\n else:\n raise ValueError(\"input has to be 1D or 2D Tensor,\" \" but got Tensor of dimension {}\".format(input.dim()))\n if mode == \"sum\":\n mode_enum = 0\n elif mode == \"mean\":\n mode_enum = 1\n elif mode == \"max\":\n mode_enum = 2\n\n if scale_grad_by_freq:\n raise ValueError(\"max mode does not support scaling the gradient by the frequency\")\n\n if sparse:\n raise ValueError(\"max mode does not support sparse weights\")\n\n else:\n raise ValueError(\"mode has to be one of sum, mean or max\")\n\n if max_norm is not None:\n # XXX: equivalent to\n # with torch.no_grad():\n # torch.nembedding_renorm_\n # remove once script supports set_grad_enabled\n _no_grad_embedding_renorm_(weight, input, max_norm, norm_type)\n\n if per_sample_weights is not None and mode != \"sum\":\n raise NotImplementedError(\n \"embedding_bag: per_sample_weights was not None. \"\n \"per_sample_weights is only supported for mode='sum' \"\n \"(got mode='{}'). Please open a feature request on GitHub.\".format(mode)\n )\n\n ret, _, _, _ = torch.embedding_bag(\n weight, input, offsets, scale_grad_by_freq, mode_enum, sparse, per_sample_weights, include_last_offset, padding_idx\n )\n return ret\n\n\nembedding_bag.__doc__ = embedding_bag.__doc__.format(**reproducibility_notes)\n\n\ndef _verify_batch_size(size: List[int]) -> None:\n # XXX: JIT script does not support the reduce from functools, and mul op is a\n # builtin, which cannot be used as a value to a func yet, so rewrite this size\n # check to a simple equivalent for loop\n #\n # TODO: make use of reduce like below when JIT is ready with the missing features:\n # from operator import mul\n # from functools import reduce\n #\n # if reduce(mul, size[2:], size[0]) == 1\n size_prods = size[0]\n for i in range(len(size) - 2):\n size_prods *= size[i + 2]\n if size_prods == 1:\n raise ValueError(\"Expected more than 1 value per channel when training, got input size {}\".format(size))\n\n\ndef batch_norm(\n input: Tensor,\n running_mean: Optional[Tensor],\n running_var: Optional[Tensor],\n weight: Optional[Tensor] = None,\n bias: Optional[Tensor] = None,\n training: bool = False,\n momentum: float = 0.1,\n eps: float = 1e-5,\n) -> Tensor:\n r\"\"\"Applies Batch Normalization for each channel across a batch of data.\n\n See :class:`~torch.nn.BatchNorm1d`, :class:`~torch.nn.BatchNorm2d`,\n :class:`~torch.nn.BatchNorm3d` for details.\n \"\"\"\n if has_torch_function_variadic(input, running_mean, running_var, weight, bias):\n return handle_torch_function(\n batch_norm,\n (input, running_mean, running_var, weight, bias),\n input,\n running_mean,\n running_var,\n weight=weight,\n bias=bias,\n training=training,\n momentum=momentum,\n eps=eps,\n )\n if training:\n _verify_batch_size(input.size())\n\n return torch.batch_norm(\n input, weight, bias, running_mean, running_var, training, momentum, eps, torch.backends.cudnn.enabled\n )\n\n\ndef _verify_spatial_size(size: List[int]) -> None:\n # Verify that there is > 1 spatial element for instance norm calculation.\n size_prods = 1\n for i in range(2, len(size)):\n size_prods *= size[i]\n if size_prods == 1:\n raise ValueError(\"Expected more than 1 spatial element when training, got input size {}\".format(size))\n\n\ndef instance_norm(\n input: Tensor,\n running_mean: Optional[Tensor] = None,\n running_var: Optional[Tensor] = None,\n weight: Optional[Tensor] = None,\n bias: Optional[Tensor] = None,\n use_input_stats: bool = True,\n momentum: float = 0.1,\n eps: float = 1e-5,\n) -> Tensor:\n r\"\"\"Applies Instance Normalization for each channel in each data sample in a\n batch.\n\n See :class:`~torch.nn.InstanceNorm1d`, :class:`~torch.nn.InstanceNorm2d`,\n :class:`~torch.nn.InstanceNorm3d` for details.\n \"\"\"\n if has_torch_function_variadic(input, running_mean, running_var, weight, bias):\n return handle_torch_function(\n instance_norm,\n (input, running_mean, running_var, weight, bias),\n input,\n running_mean=running_mean,\n running_var=running_var,\n weight=weight,\n bias=bias,\n use_input_stats=use_input_stats,\n momentum=momentum,\n eps=eps,\n )\n if use_input_stats:\n _verify_spatial_size(input.size())\n return torch.instance_norm(\n input, weight, bias, running_mean, running_var, use_input_stats, momentum, eps, torch.backends.cudnn.enabled\n )\n\n\ndef layer_norm(\n input: Tensor,\n normalized_shape: List[int],\n weight: Optional[Tensor] = None,\n bias: Optional[Tensor] = None,\n eps: float = 1e-5,\n) -> Tensor:\n r\"\"\"Applies Layer Normalization for last certain number of dimensions.\n\n See :class:`~torch.nn.LayerNorm` for details.\n \"\"\"\n if has_torch_function_variadic(input, weight, bias):\n return handle_torch_function(\n layer_norm, (input, weight, bias), input, normalized_shape, weight=weight, bias=bias, eps=eps\n )\n return torch.layer_norm(input, normalized_shape, weight, bias, eps, torch.backends.cudnn.enabled)\n\n\ndef group_norm(\n input: Tensor, num_groups: int, weight: Optional[Tensor] = None, bias: Optional[Tensor] = None, eps: float = 1e-5\n) -> Tensor:\n r\"\"\"Applies Group Normalization for last certain number of dimensions.\n\n See :class:`~torch.nn.GroupNorm` for details.\n \"\"\"\n if has_torch_function_variadic(input, weight, bias):\n return handle_torch_function(group_norm, (input, weight, bias,), input, num_groups, weight=weight, bias=bias, eps=eps)\n _verify_batch_size([input.size(0) * input.size(1) // num_groups, num_groups] + list(input.size()[2:]))\n return torch.group_norm(input, num_groups, weight, bias, eps, torch.backends.cudnn.enabled)\n\n\ndef local_response_norm(input: Tensor, size: int, alpha: float = 1e-4, beta: float = 0.75, k: float = 1.0) -> Tensor:\n r\"\"\"Applies local response normalization over an input signal composed of\n several input planes, where channels occupy the second dimension.\n Applies normalization across channels.\n\n See :class:`~torch.nn.LocalResponseNorm` for details.\n \"\"\"\n if has_torch_function_unary(input):\n return handle_torch_function(local_response_norm, (input,), input, size, alpha=alpha, beta=beta, k=k)\n dim = input.dim()\n if dim < 3:\n raise ValueError(\n \"Expected 3D or higher dimensionality \\\n input (got {} dimensions)\".format(\n dim\n )\n )\n\n if input.numel() == 0:\n return input\n\n div = input.mul(input).unsqueeze(1)\n if dim == 3:\n div = pad(div, (0, 0, size // 2, (size - 1) // 2))\n div = avg_pool2d(div, (size, 1), stride=1).squeeze(1)\n else:\n sizes = input.size()\n div = div.view(sizes[0], 1, sizes[1], sizes[2], -1)\n div = pad(div, (0, 0, 0, 0, size // 2, (size - 1) // 2))\n div = avg_pool3d(div, (size, 1, 1), stride=1).squeeze(1)\n div = div.view(sizes)\n div = div.mul(alpha).add(k).pow(beta)\n return input / div\n\n\n# loss\n\n\ndef ctc_loss(\n log_probs: Tensor,\n targets: Tensor,\n input_lengths: Tensor,\n target_lengths: Tensor,\n blank: int = 0,\n reduction: str = \"mean\",\n zero_infinity: bool = False,\n) -> Tensor:\n r\"\"\"The Connectionist Temporal Classification loss.\n\n See :class:`~torch.nn.CTCLoss` for details.\n\n Note:\n {cudnn_reproducibility_note}\n\n Note:\n {backward_reproducibility_note}\n\n Args:\n log_probs: :math:`(T, N, C)` or :math:`(T, C)` where `C = number of characters in alphabet including blank`,\n `T = input length`, and `N = batch size`.\n The logarithmized probabilities of the outputs\n (e.g. obtained with :func:`torch.nn.functional.log_softmax`).\n targets: :math:`(N, S)` or `(sum(target_lengths))`.\n Targets cannot be blank. In the second form, the targets are assumed to be concatenated.\n input_lengths: :math:`(N)` or :math:`()`.\n Lengths of the inputs (must each be :math:`\\leq T`)\n target_lengths: :math:`(N)` or :math:`()`.\n Lengths of the targets\n blank (int, optional):\n Blank label. Default :math:`0`.\n reduction (string, optional): Specifies the reduction to apply to the output:\n ``'none'`` | ``'mean'`` | ``'sum'``. ``'none'``: no reduction will be applied,\n ``'mean'``: the output losses will be divided by the target lengths and\n then the mean over the batch is taken, ``'sum'``: the output will be\n summed. Default: ``'mean'``\n zero_infinity (bool, optional):\n Whether to zero infinite losses and the associated gradients.\n Default: ``False``\n Infinite losses mainly occur when the inputs are too short\n to be aligned to the targets.\n\n Example::\n\n >>> log_probs = torch.randn(50, 16, 20).log_softmax(2).detach().requires_grad_()\n >>> targets = torch.randint(1, 20, (16, 30), dtype=torch.long)\n >>> input_lengths = torch.full((16,), 50, dtype=torch.long)\n >>> target_lengths = torch.randint(10,30,(16,), dtype=torch.long)\n >>> loss = F.ctc_loss(log_probs, targets, input_lengths, target_lengths)\n >>> loss.backward()\n \"\"\"\n if has_torch_function_variadic(log_probs, targets, input_lengths, target_lengths):\n return handle_torch_function(\n ctc_loss,\n (log_probs, targets, input_lengths, target_lengths),\n log_probs, targets, input_lengths, target_lengths,\n blank=blank, reduction=reduction, zero_infinity=zero_infinity\n )\n return torch.ctc_loss(\n log_probs, targets, input_lengths, target_lengths, blank, _Reduction.get_enum(reduction), zero_infinity\n )\n\n\nctc_loss.__doc__ = ctc_loss.__doc__.format(**reproducibility_notes)\n\n\ndef nll_loss(\n input: Tensor,\n target: Tensor,\n weight: Optional[Tensor] = None,\n size_average: Optional[bool] = None,\n ignore_index: int = -100,\n reduce: Optional[bool] = None,\n reduction: str = \"mean\",\n) -> Tensor:\n r\"\"\"The negative log likelihood loss.\n\n See :class:`~torch.nn.NLLLoss` for details.\n\n Args:\n input: :math:`(N, C)` where `C = number of classes` or :math:`(N, C, H, W)`\n in case of 2D Loss, or :math:`(N, C, d_1, d_2, ..., d_K)` where :math:`K \\geq 1`\n in the case of K-dimensional loss. `input` is expected to be log-probabilities.\n target: :math:`(N)` where each value is :math:`0 \\leq \\text{targets}[i] \\leq C-1`,\n or :math:`(N, d_1, d_2, ..., d_K)` where :math:`K \\geq 1` for\n K-dimensional loss.\n weight (Tensor, optional): a manual rescaling weight given to each\n class. If given, has to be a Tensor of size `C`\n size_average (bool, optional): Deprecated (see :attr:`reduction`). By default,\n the losses are averaged over each loss element in the batch. Note that for\n some losses, there multiple elements per sample. If the field :attr:`size_average`\n is set to ``False``, the losses are instead summed for each minibatch. Ignored\n when reduce is ``False``. Default: ``True``\n ignore_index (int, optional): Specifies a target value that is ignored\n and does not contribute to the input gradient. When :attr:`size_average` is\n ``True``, the loss is averaged over non-ignored targets. Default: -100\n reduce (bool, optional): Deprecated (see :attr:`reduction`). By default, the\n losses are averaged or summed over observations for each minibatch depending\n on :attr:`size_average`. When :attr:`reduce` is ``False``, returns a loss per\n batch element instead and ignores :attr:`size_average`. Default: ``True``\n reduction (string, optional): Specifies the reduction to apply to the output:\n ``'none'`` | ``'mean'`` | ``'sum'``. ``'none'``: no reduction will be applied,\n ``'mean'``: the sum of the output will be divided by the number of\n elements in the output, ``'sum'``: the output will be summed. Note: :attr:`size_average`\n and :attr:`reduce` are in the process of being deprecated, and in the meantime,\n specifying either of those two args will override :attr:`reduction`. Default: ``'mean'``\n\n Example::\n\n >>> # input is of size N x C = 3 x 5\n >>> input = torch.randn(3, 5, requires_grad=True)\n >>> # each element in target has to have 0 <= value < C\n >>> target = torch.tensor([1, 0, 4])\n >>> output = F.nll_loss(F.log_softmax(input), target)\n >>> output.backward()\n \"\"\"\n if has_torch_function_variadic(input, target, weight):\n return handle_torch_function(\n nll_loss,\n (input, target, weight),\n input,\n target,\n weight=weight,\n size_average=size_average,\n ignore_index=ignore_index,\n reduce=reduce,\n reduction=reduction,\n )\n if size_average is not None or reduce is not None:\n reduction = _Reduction.legacy_get_string(size_average, reduce)\n return torch._C._nn.nll_loss_nd(input, target, weight, _Reduction.get_enum(reduction), ignore_index)\n\n\ndef poisson_nll_loss(\n input: Tensor,\n target: Tensor,\n log_input: bool = True,\n full: bool = False,\n size_average: Optional[bool] = None,\n eps: float = 1e-8,\n reduce: Optional[bool] = None,\n reduction: str = \"mean\",\n) -> Tensor:\n r\"\"\"Poisson negative log likelihood loss.\n\n See :class:`~torch.nn.PoissonNLLLoss` for details.\n\n Args:\n input: expectation of underlying Poisson distribution.\n target: random sample :math:`target \\sim \\text{Poisson}(input)`.\n log_input: if ``True`` the loss is computed as\n :math:`\\exp(\\text{input}) - \\text{target} * \\text{input}`, if ``False`` then loss is\n :math:`\\text{input} - \\text{target} * \\log(\\text{input}+\\text{eps})`. Default: ``True``\n full: whether to compute full loss, i. e. to add the Stirling\n approximation term. Default: ``False``\n :math:`\\text{target} * \\log(\\text{target}) - \\text{target} + 0.5 * \\log(2 * \\pi * \\text{target})`.\n size_average (bool, optional): Deprecated (see :attr:`reduction`). By default,\n the losses are averaged over each loss element in the batch. Note that for\n some losses, there multiple elements per sample. If the field :attr:`size_average`\n is set to ``False``, the losses are instead summed for each minibatch. Ignored\n when reduce is ``False``. Default: ``True``\n eps (float, optional): Small value to avoid evaluation of :math:`\\log(0)` when\n :attr:`log_input`\\ =\\ ``False``. Default: 1e-8\n reduce (bool, optional): Deprecated (see :attr:`reduction`). By default, the\n losses are averaged or summed over observations for each minibatch depending\n on :attr:`size_average`. When :attr:`reduce` is ``False``, returns a loss per\n batch element instead and ignores :attr:`size_average`. Default: ``True``\n reduction (string, optional): Specifies the reduction to apply to the output:\n ``'none'`` | ``'mean'`` | ``'sum'``. ``'none'``: no reduction will be applied,\n ``'mean'``: the sum of the output will be divided by the number of\n elements in the output, ``'sum'``: the output will be summed. Note: :attr:`size_average`\n and :attr:`reduce` are in the process of being deprecated, and in the meantime,\n specifying either of those two args will override :attr:`reduction`. Default: ``'mean'``\n\n \"\"\"\n if has_torch_function_variadic(input, target):\n return handle_torch_function(\n poisson_nll_loss,\n (input, target),\n input,\n target,\n log_input=log_input,\n full=full,\n size_average=size_average,\n eps=eps,\n reduce=reduce,\n reduction=reduction,\n )\n if size_average is not None or reduce is not None:\n reduction = _Reduction.legacy_get_string(size_average, reduce)\n if reduction != \"none\" and reduction != \"mean\" and reduction != \"sum\":\n ret = input\n raise ValueError(reduction + \" is not valid\")\n\n ret = torch.poisson_nll_loss(input, target, log_input, full, eps, _Reduction.get_enum(reduction))\n return ret\n\n\ndef gaussian_nll_loss(\n input: Tensor,\n target: Tensor,\n var: Tensor,\n full: bool = False,\n eps: float = 1e-6,\n reduction: str = \"mean\",\n) -> Tensor:\n r\"\"\"Gaussian negative log likelihood loss.\n\n See :class:`~torch.nn.GaussianNLLLoss` for details.\n\n Args:\n input: expectation of the Gaussian distribution.\n target: sample from the Gaussian distribution.\n var: tensor of positive variance(s), one for each of the expectations\n in the input (heteroscedastic), or a single one (homoscedastic).\n full (bool, optional): include the constant term in the loss calculation. Default: ``False``.\n eps (float, optional): value added to var, for stability. Default: 1e-6.\n reduction (string, optional): specifies the reduction to apply to the output:\n ``'none'`` | ``'mean'`` | ``'sum'``. ``'none'``: no reduction will be applied,\n ``'mean'``: the output is the average of all batch member losses,\n ``'sum'``: the output is the sum of all batch member losses.\n Default: ``'mean'``.\n \"\"\"\n if has_torch_function_variadic(input, target, var):\n return handle_torch_function(\n gaussian_nll_loss,\n (input, target, var),\n input,\n target,\n var,\n full=full,\n eps=eps,\n reduction=reduction,\n )\n\n # Check var size\n # If var.size == input.size, the case is heteroscedastic and no further checks are needed.\n # Otherwise:\n if var.size() != input.size():\n\n # If var is one dimension short of input, but the sizes match otherwise, then this is a homoscedastic case.\n # e.g. input.size = (10, 2, 3), var.size = (10, 2)\n # -> unsqueeze var so that var.shape = (10, 2, 1)\n # this is done so that broadcasting can happen in the loss calculation\n if input.size()[:-1] == var.size():\n var = torch.unsqueeze(var, -1)\n\n # This checks if the sizes match up to the final dimension, and the final dimension of var is of size 1.\n # This is also a homoscedastic case.\n # e.g. input.size = (10, 2, 3), var.size = (10, 2, 1)\n elif input.size()[:-1] == var.size()[:-1] and var.size(-1) == 1: # Heteroscedastic case\n pass\n\n # If none of the above pass, then the size of var is incorrect.\n else:\n raise ValueError(\"var is of incorrect size\")\n\n # Check validity of reduction mode\n if reduction != 'none' and reduction != 'mean' and reduction != 'sum':\n raise ValueError(reduction + \" is not valid\")\n\n # Entries of var must be non-negative\n if torch.any(var < 0):\n raise ValueError(\"var has negative entry/entries\")\n\n # Clamp for stability\n var = var.clone()\n with torch.no_grad():\n var.clamp_(min=eps)\n\n # Calculate the loss\n loss = 0.5 * (torch.log(var) + (input - target)**2 / var)\n if full:\n loss += 0.5 * math.log(2 * math.pi)\n\n if reduction == 'mean':\n return loss.mean()\n elif reduction == 'sum':\n return loss.sum()\n else:\n return loss\n\n\ndef kl_div(\n input: Tensor,\n target: Tensor,\n size_average: Optional[bool] = None,\n reduce: Optional[bool] = None,\n reduction: str = \"mean\",\n log_target: bool = False,\n) -> Tensor:\n r\"\"\"The `Kullback-Leibler divergence Loss\n <https://en.wikipedia.org/wiki/Kullback-Leibler_divergence>`__\n\n See :class:`~torch.nn.KLDivLoss` for details.\n\n Args:\n input: Tensor of arbitrary shape in log-probabilities.\n target: Tensor of the same shape as input. See :attr:`log_target` for\n the target's interpretation.\n size_average (bool, optional): Deprecated (see :attr:`reduction`). By default,\n the losses are averaged over each loss element in the batch. Note that for\n some losses, there multiple elements per sample. If the field :attr:`size_average`\n is set to ``False``, the losses are instead summed for each minibatch. Ignored\n when reduce is ``False``. Default: ``True``\n reduce (bool, optional): Deprecated (see :attr:`reduction`). By default, the\n losses are averaged or summed over observations for each minibatch depending\n on :attr:`size_average`. When :attr:`reduce` is ``False``, returns a loss per\n batch element instead and ignores :attr:`size_average`. Default: ``True``\n reduction (string, optional): Specifies the reduction to apply to the output:\n ``'none'`` | ``'batchmean'`` | ``'sum'`` | ``'mean'``.\n ``'none'``: no reduction will be applied\n ``'batchmean'``: the sum of the output will be divided by the batchsize\n ``'sum'``: the output will be summed\n ``'mean'``: the output will be divided by the number of elements in the output\n Default: ``'mean'``\n log_target (bool): A flag indicating whether ``target`` is passed in the log space.\n It is recommended to pass certain distributions (like ``softmax``)\n in the log space to avoid numerical issues caused by explicit ``log``.\n Default: ``False``\n\n .. note::\n :attr:`size_average` and :attr:`reduce` are in the process of being deprecated,\n and in the meantime, specifying either of those two args will override :attr:`reduction`.\n\n .. note::\n :attr:`reduction` = ``'mean'`` doesn't return the true kl divergence value, please use\n :attr:`reduction` = ``'batchmean'`` which aligns with KL math definition.\n In the next major release, ``'mean'`` will be changed to be the same as 'batchmean'.\n \"\"\"\n if has_torch_function_variadic(input, target):\n return handle_torch_function(\n kl_div,\n (input, target),\n input,\n target,\n size_average=size_average,\n reduce=reduce,\n reduction=reduction,\n log_target=log_target,\n )\n if size_average is not None or reduce is not None:\n reduction_enum = _Reduction.legacy_get_enum(size_average, reduce)\n else:\n if reduction == \"mean\":\n warnings.warn(\n \"reduction: 'mean' divides the total loss by both the batch size and the support size.\"\n \"'batchmean' divides only by the batch size, and aligns with the KL div math definition.\"\n \"'mean' will be changed to behave the same as 'batchmean' in the next major release.\"\n )\n\n # special case for batchmean\n if reduction == \"batchmean\":\n reduction_enum = _Reduction.get_enum(\"sum\")\n else:\n reduction_enum = _Reduction.get_enum(reduction)\n\n reduced = torch.kl_div(input, target, reduction_enum, log_target=log_target)\n\n if reduction == \"batchmean\" and input.dim() != 0:\n reduced = reduced / input.size()[0]\n\n return reduced\n\n\ndef cross_entropy(\n input: Tensor,\n target: Tensor,\n weight: Optional[Tensor] = None,\n size_average: Optional[bool] = None,\n ignore_index: int = -100,\n reduce: Optional[bool] = None,\n reduction: str = \"mean\",\n label_smoothing: float = 0.0,\n) -> Tensor:\n r\"\"\"This criterion computes the cross entropy loss between input and target.\n\n See :class:`~torch.nn.CrossEntropyLoss` for details.\n\n Args:\n input (Tensor) : Predicted unnormalized scores (often referred to as logits);\n see Shape section below for supported shapes.\n target (Tensor) : Ground truth class indices or class probabilities;\n see Shape section below for supported shapes.\n weight (Tensor, optional): a manual rescaling weight given to each\n class. If given, has to be a Tensor of size `C`\n size_average (bool, optional): Deprecated (see :attr:`reduction`). By default,\n the losses are averaged over each loss element in the batch. Note that for\n some losses, there multiple elements per sample. If the field :attr:`size_average`\n is set to ``False``, the losses are instead summed for each minibatch. Ignored\n when reduce is ``False``. Default: ``True``\n ignore_index (int, optional): Specifies a target value that is ignored\n and does not contribute to the input gradient. When :attr:`size_average` is\n ``True``, the loss is averaged over non-ignored targets. Note that\n :attr:`ignore_index` is only applicable when the target contains class indices.\n Default: -100\n reduce (bool, optional): Deprecated (see :attr:`reduction`). By default, the\n losses are averaged or summed over observations for each minibatch depending\n on :attr:`size_average`. When :attr:`reduce` is ``False``, returns a loss per\n batch element instead and ignores :attr:`size_average`. Default: ``True``\n reduction (string, optional): Specifies the reduction to apply to the output:\n ``'none'`` | ``'mean'`` | ``'sum'``. ``'none'``: no reduction will be applied,\n ``'mean'``: the sum of the output will be divided by the number of\n elements in the output, ``'sum'``: the output will be summed. Note: :attr:`size_average`\n and :attr:`reduce` are in the process of being deprecated, and in the meantime,\n specifying either of those two args will override :attr:`reduction`. Default: ``'mean'``\n label_smoothing (float, optional): A float in [0.0, 1.0]. Specifies the amount\n of smoothing when computing the loss, where 0.0 means no smoothing. The targets\n become a mixture of the original ground truth and a uniform distribution as described in\n `Rethinking the Inception Architecture for Computer Vision <https://arxiv.org/abs/1512.00567>`__. Default: :math:`0.0`.\n\n Shape:\n - Input: Shape :math:`(C)`, :math:`(N, C)` or :math:`(N, C, d_1, d_2, ..., d_K)` with :math:`K \\geq 1`\n in the case of `K`-dimensional loss.\n - Target: If containing class indices, shape :math:`()`, :math:`(N)` or :math:`(N, d_1, d_2, ..., d_K)` with\n :math:`K \\geq 1` in the case of K-dimensional loss where each value should be between :math:`[0, C)`.\n If containing class probabilities, same shape as the input and each value should be between :math:`[0, 1]`.\n\n where:\n\n .. math::\n \\begin{aligned}\n C ={} & \\text{number of classes} \\\\\n N ={} & \\text{batch size} \\\\\n \\end{aligned}\n\n Examples::\n\n >>> # Example of target with class indices\n >>> input = torch.randn(3, 5, requires_grad=True)\n >>> target = torch.randint(5, (3,), dtype=torch.int64)\n >>> loss = F.cross_entropy(input, target)\n >>> loss.backward()\n >>>\n >>> # Example of target with class probabilities\n >>> input = torch.randn(3, 5, requires_grad=True)\n >>> target = torch.randn(3, 5).softmax(dim=1)\n >>> loss = F.cross_entropy(input, target)\n >>> loss.backward()\n \"\"\"\n if has_torch_function_variadic(input, target, weight):\n return handle_torch_function(\n cross_entropy,\n (input, target, weight),\n input,\n target,\n weight=weight,\n size_average=size_average,\n ignore_index=ignore_index,\n reduce=reduce,\n reduction=reduction,\n label_smoothing=label_smoothing,\n )\n if size_average is not None or reduce is not None:\n reduction = _Reduction.legacy_get_string(size_average, reduce)\n return torch._C._nn.cross_entropy_loss(input, target, weight, _Reduction.get_enum(reduction), ignore_index, label_smoothing)\n\n\ndef binary_cross_entropy(\n input: Tensor,\n target: Tensor,\n weight: Optional[Tensor] = None,\n size_average: Optional[bool] = None,\n reduce: Optional[bool] = None,\n reduction: str = \"mean\",\n) -> Tensor:\n r\"\"\"Function that measures the Binary Cross Entropy between the target and input\n probabilities.\n\n See :class:`~torch.nn.BCELoss` for details.\n\n Args:\n input: Tensor of arbitrary shape as probabilities.\n target: Tensor of the same shape as input with values between 0 and 1.\n weight (Tensor, optional): a manual rescaling weight\n if provided it's repeated to match input tensor shape\n size_average (bool, optional): Deprecated (see :attr:`reduction`). By default,\n the losses are averaged over each loss element in the batch. Note that for\n some losses, there multiple elements per sample. If the field :attr:`size_average`\n is set to ``False``, the losses are instead summed for each minibatch. Ignored\n when reduce is ``False``. Default: ``True``\n reduce (bool, optional): Deprecated (see :attr:`reduction`). By default, the\n losses are averaged or summed over observations for each minibatch depending\n on :attr:`size_average`. When :attr:`reduce` is ``False``, returns a loss per\n batch element instead and ignores :attr:`size_average`. Default: ``True``\n reduction (string, optional): Specifies the reduction to apply to the output:\n ``'none'`` | ``'mean'`` | ``'sum'``. ``'none'``: no reduction will be applied,\n ``'mean'``: the sum of the output will be divided by the number of\n elements in the output, ``'sum'``: the output will be summed. Note: :attr:`size_average`\n and :attr:`reduce` are in the process of being deprecated, and in the meantime,\n specifying either of those two args will override :attr:`reduction`. Default: ``'mean'``\n\n Examples::\n\n >>> input = torch.randn(3, 2, requires_grad=True)\n >>> target = torch.rand(3, 2, requires_grad=False)\n >>> loss = F.binary_cross_entropy(torch.sigmoid(input), target)\n >>> loss.backward()\n \"\"\"\n if has_torch_function_variadic(input, target, weight):\n return handle_torch_function(\n binary_cross_entropy,\n (input, target, weight),\n input,\n target,\n weight=weight,\n size_average=size_average,\n reduce=reduce,\n reduction=reduction,\n )\n if size_average is not None or reduce is not None:\n reduction_enum = _Reduction.legacy_get_enum(size_average, reduce)\n else:\n reduction_enum = _Reduction.get_enum(reduction)\n if target.size() != input.size():\n raise ValueError(\n \"Using a target size ({}) that is different to the input size ({}) is deprecated. \"\n \"Please ensure they have the same size.\".format(target.size(), input.size())\n )\n\n if weight is not None:\n new_size = _infer_size(target.size(), weight.size())\n weight = weight.expand(new_size)\n\n return torch._C._nn.binary_cross_entropy(input, target, weight, reduction_enum)\n\n\ndef binary_cross_entropy_with_logits(\n input: Tensor,\n target: Tensor,\n weight: Optional[Tensor] = None,\n size_average: Optional[bool] = None,\n reduce: Optional[bool] = None,\n reduction: str = \"mean\",\n pos_weight: Optional[Tensor] = None,\n) -> Tensor:\n r\"\"\"Function that measures Binary Cross Entropy between target and input\n logits.\n\n See :class:`~torch.nn.BCEWithLogitsLoss` for details.\n\n Args:\n input: Tensor of arbitrary shape as unnormalized scores (often referred to as logits).\n target: Tensor of the same shape as input with values between 0 and 1\n weight (Tensor, optional): a manual rescaling weight\n if provided it's repeated to match input tensor shape\n size_average (bool, optional): Deprecated (see :attr:`reduction`). By default,\n the losses are averaged over each loss element in the batch. Note that for\n some losses, there multiple elements per sample. If the field :attr:`size_average`\n is set to ``False``, the losses are instead summed for each minibatch. Ignored\n when reduce is ``False``. Default: ``True``\n reduce (bool, optional): Deprecated (see :attr:`reduction`). By default, the\n losses are averaged or summed over observations for each minibatch depending\n on :attr:`size_average`. When :attr:`reduce` is ``False``, returns a loss per\n batch element instead and ignores :attr:`size_average`. Default: ``True``\n reduction (string, optional): Specifies the reduction to apply to the output:\n ``'none'`` | ``'mean'`` | ``'sum'``. ``'none'``: no reduction will be applied,\n ``'mean'``: the sum of the output will be divided by the number of\n elements in the output, ``'sum'``: the output will be summed. Note: :attr:`size_average`\n and :attr:`reduce` are in the process of being deprecated, and in the meantime,\n specifying either of those two args will override :attr:`reduction`. Default: ``'mean'``\n pos_weight (Tensor, optional): a weight of positive examples.\n Must be a vector with length equal to the number of classes.\n\n Examples::\n\n >>> input = torch.randn(3, requires_grad=True)\n >>> target = torch.empty(3).random_(2)\n >>> loss = F.binary_cross_entropy_with_logits(input, target)\n >>> loss.backward()\n \"\"\"\n if has_torch_function_variadic(input, target, weight, pos_weight):\n return handle_torch_function(\n binary_cross_entropy_with_logits,\n (input, target, weight, pos_weight),\n input,\n target,\n weight=weight,\n size_average=size_average,\n reduce=reduce,\n reduction=reduction,\n pos_weight=pos_weight,\n )\n if size_average is not None or reduce is not None:\n reduction_enum = _Reduction.legacy_get_enum(size_average, reduce)\n else:\n reduction_enum = _Reduction.get_enum(reduction)\n\n if not (target.size() == input.size()):\n raise ValueError(\"Target size ({}) must be the same as input size ({})\".format(target.size(), input.size()))\n\n return torch.binary_cross_entropy_with_logits(input, target, weight, pos_weight, reduction_enum)\n\n\ndef smooth_l1_loss(\n input: Tensor,\n target: Tensor,\n size_average: Optional[bool] = None,\n reduce: Optional[bool] = None,\n reduction: str = \"mean\",\n beta: float = 1.0,\n) -> Tensor:\n r\"\"\"Function that uses a squared term if the absolute\n element-wise error falls below beta and an L1 term otherwise.\n\n See :class:`~torch.nn.SmoothL1Loss` for details.\n \"\"\"\n if has_torch_function_variadic(input, target):\n return handle_torch_function(\n smooth_l1_loss,\n (input, target),\n input,\n target,\n size_average=size_average,\n reduce=reduce,\n reduction=reduction,\n beta=beta,\n )\n if not (target.size() == input.size()):\n warnings.warn(\n \"Using a target size ({}) that is different to the input size ({}). \"\n \"This will likely lead to incorrect results due to broadcasting. \"\n \"Please ensure they have the same size.\".format(target.size(), input.size()),\n stacklevel=2,\n )\n if size_average is not None or reduce is not None:\n reduction = _Reduction.legacy_get_string(size_average, reduce)\n\n expanded_input, expanded_target = torch.broadcast_tensors(input, target)\n return torch._C._nn.smooth_l1_loss(expanded_input, expanded_target, _Reduction.get_enum(reduction), beta)\n\n\ndef huber_loss(\n input: Tensor,\n target: Tensor,\n reduction: str = 'mean',\n delta: float = 1.0,\n) -> Tensor:\n r\"\"\"Function that uses a squared term if the absolute\n element-wise error falls below delta and a delta-scaled L1 term otherwise.\n\n See :class:`~torch.nn.HuberLoss` for details.\n \"\"\"\n if has_torch_function_variadic(input, target):\n return handle_torch_function(\n huber_loss,\n (input, target),\n input,\n target,\n reduction=reduction,\n delta=delta,\n )\n if not (target.size() == input.size()):\n warnings.warn(\"Using a target size ({}) that is different to the input size ({}). \"\n \"This will likely lead to incorrect results due to broadcasting. \"\n \"Please ensure they have the same size.\".format(target.size(), input.size()),\n stacklevel=2)\n\n expanded_input, expanded_target = torch.broadcast_tensors(input, target)\n return torch._C._nn.huber_loss(expanded_input, expanded_target, _Reduction.get_enum(reduction), delta)\n\n\ndef l1_loss(\n input: Tensor,\n target: Tensor,\n size_average: Optional[bool] = None,\n reduce: Optional[bool] = None,\n reduction: str = \"mean\",\n) -> Tensor:\n r\"\"\"l1_loss(input, target, size_average=None, reduce=None, reduction='mean') -> Tensor\n\n Function that takes the mean element-wise absolute value difference.\n\n See :class:`~torch.nn.L1Loss` for details.\n \"\"\"\n if has_torch_function_variadic(input, target):\n return handle_torch_function(\n l1_loss, (input, target), input, target, size_average=size_average, reduce=reduce, reduction=reduction\n )\n if not (target.size() == input.size()):\n warnings.warn(\n \"Using a target size ({}) that is different to the input size ({}). \"\n \"This will likely lead to incorrect results due to broadcasting. \"\n \"Please ensure they have the same size.\".format(target.size(), input.size()),\n stacklevel=2,\n )\n if size_average is not None or reduce is not None:\n reduction = _Reduction.legacy_get_string(size_average, reduce)\n\n expanded_input, expanded_target = torch.broadcast_tensors(input, target)\n return torch._C._nn.l1_loss(expanded_input, expanded_target, _Reduction.get_enum(reduction))\n\n\ndef mse_loss(\n input: Tensor,\n target: Tensor,\n size_average: Optional[bool] = None,\n reduce: Optional[bool] = None,\n reduction: str = \"mean\",\n) -> Tensor:\n r\"\"\"mse_loss(input, target, size_average=None, reduce=None, reduction='mean') -> Tensor\n\n Measures the element-wise mean squared error.\n\n See :class:`~torch.nn.MSELoss` for details.\n \"\"\"\n if has_torch_function_variadic(input, target):\n return handle_torch_function(\n mse_loss, (input, target), input, target, size_average=size_average, reduce=reduce, reduction=reduction\n )\n if not (target.size() == input.size()):\n warnings.warn(\n \"Using a target size ({}) that is different to the input size ({}). \"\n \"This will likely lead to incorrect results due to broadcasting. \"\n \"Please ensure they have the same size.\".format(target.size(), input.size()),\n stacklevel=2,\n )\n if size_average is not None or reduce is not None:\n reduction = _Reduction.legacy_get_string(size_average, reduce)\n\n expanded_input, expanded_target = torch.broadcast_tensors(input, target)\n return torch._C._nn.mse_loss(expanded_input, expanded_target, _Reduction.get_enum(reduction))\n\n\ndef margin_ranking_loss(\n input1: Tensor,\n input2: Tensor,\n target: Tensor,\n margin: float = 0,\n size_average: Optional[bool] = None,\n reduce: Optional[bool] = None,\n reduction: str = \"mean\",\n) -> Tensor:\n r\"\"\"margin_ranking_loss(input1, input2, target, margin=0, size_average=None, reduce=None, reduction='mean') -> Tensor\n\n See :class:`~torch.nn.MarginRankingLoss` for details.\n \"\"\"\n if has_torch_function_variadic(input1, input2, target):\n return handle_torch_function(\n margin_ranking_loss,\n (input1, input2, target),\n input1,\n input2,\n target,\n margin=margin,\n size_average=size_average,\n reduce=reduce,\n reduction=reduction,\n )\n if size_average is not None or reduce is not None:\n reduction_enum = _Reduction.legacy_get_enum(size_average, reduce)\n else:\n reduction_enum = _Reduction.get_enum(reduction)\n if (input1.dim() != input2.dim() or input1.dim() != target.dim()):\n raise RuntimeError(\n (\n \"margin_ranking_loss : All input tensors should have same dimension but got sizes: \"\n \"input1: {}, input2: {}, target: {} \".format(input1.size(), input2.size(), target.size())\n )\n )\n return torch.margin_ranking_loss(input1, input2, target, margin, reduction_enum)\n\n\ndef hinge_embedding_loss(\n input: Tensor,\n target: Tensor,\n margin: float = 1.0,\n size_average: Optional[bool] = None,\n reduce: Optional[bool] = None,\n reduction: str = \"mean\",\n) -> Tensor:\n r\"\"\"hinge_embedding_loss(input, target, margin=1.0, size_average=None, reduce=None, reduction='mean') -> Tensor\n\n See :class:`~torch.nn.HingeEmbeddingLoss` for details.\n \"\"\"\n if has_torch_function_variadic(input, target):\n return handle_torch_function(\n hinge_embedding_loss,\n (input, target),\n input,\n target,\n margin=margin,\n size_average=size_average,\n reduce=reduce,\n reduction=reduction,\n )\n if size_average is not None or reduce is not None:\n reduction_enum = _Reduction.legacy_get_enum(size_average, reduce)\n else:\n reduction_enum = _Reduction.get_enum(reduction)\n return torch.hinge_embedding_loss(input, target, margin, reduction_enum)\n\n\ndef multilabel_margin_loss(\n input: Tensor,\n target: Tensor,\n size_average: Optional[bool] = None,\n reduce: Optional[bool] = None,\n reduction: str = \"mean\",\n) -> Tensor:\n r\"\"\"multilabel_margin_loss(input, target, size_average=None, reduce=None, reduction='mean') -> Tensor\n\n See :class:`~torch.nn.MultiLabelMarginLoss` for details.\n \"\"\"\n if has_torch_function_variadic(input, target):\n return handle_torch_function(\n multilabel_margin_loss,\n (input, target),\n input,\n target,\n size_average=size_average,\n reduce=reduce,\n reduction=reduction,\n )\n if size_average is not None or reduce is not None:\n reduction_enum = _Reduction.legacy_get_enum(size_average, reduce)\n else:\n reduction_enum = _Reduction.get_enum(reduction)\n return torch._C._nn.multilabel_margin_loss(input, target, reduction_enum)\n\n\ndef soft_margin_loss(\n input: Tensor,\n target: Tensor,\n size_average: Optional[bool] = None,\n reduce: Optional[bool] = None,\n reduction: str = \"mean\",\n) -> Tensor:\n r\"\"\"soft_margin_loss(input, target, size_average=None, reduce=None, reduction='mean') -> Tensor\n\n See :class:`~torch.nn.SoftMarginLoss` for details.\n \"\"\"\n if has_torch_function_variadic(input, target):\n return handle_torch_function(\n soft_margin_loss, (input, target), input, target, size_average=size_average, reduce=reduce, reduction=reduction\n )\n if size_average is not None or reduce is not None:\n reduction_enum = _Reduction.legacy_get_enum(size_average, reduce)\n else:\n reduction_enum = _Reduction.get_enum(reduction)\n return torch._C._nn.soft_margin_loss(input, target, reduction_enum)\n\n\ndef multilabel_soft_margin_loss(\n input: Tensor,\n target: Tensor,\n weight: Optional[Tensor] = None,\n size_average: Optional[bool] = None,\n reduce: Optional[bool] = None,\n reduction: str = \"mean\",\n) -> Tensor:\n r\"\"\"multilabel_soft_margin_loss(input, target, weight=None, size_average=None, reduce=None, reduction='mean') -> Tensor\n\n See :class:`~torch.nn.MultiLabelSoftMarginLoss` for details.\n \"\"\"\n if has_torch_function_variadic(input, target, weight):\n return handle_torch_function(\n multilabel_soft_margin_loss,\n (input, target, weight),\n input,\n target,\n weight=weight,\n size_average=size_average,\n reduce=reduce,\n reduction=reduction,\n )\n if size_average is not None or reduce is not None:\n reduction = _Reduction.legacy_get_string(size_average, reduce)\n\n loss = -(target * logsigmoid(input) + (1 - target) * logsigmoid(-input))\n\n if weight is not None:\n loss = loss * weight\n\n class_dim = input.dim() - 1\n C = input.size(class_dim)\n loss = loss.sum(dim=class_dim) / C # only return N loss values\n\n if reduction == \"none\":\n ret = loss\n elif reduction == \"mean\":\n ret = loss.mean()\n elif reduction == \"sum\":\n ret = loss.sum()\n else:\n ret = input\n raise ValueError(reduction + \" is not valid\")\n return ret\n\n\ndef cosine_embedding_loss(\n input1: Tensor,\n input2: Tensor,\n target: Tensor,\n margin: float = 0,\n size_average: Optional[bool] = None,\n reduce: Optional[bool] = None,\n reduction: str = \"mean\",\n) -> Tensor:\n r\"\"\"cosine_embedding_loss(input1, input2, target, margin=0, size_average=None, reduce=None, reduction='mean') -> Tensor\n\n See :class:`~torch.nn.CosineEmbeddingLoss` for details.\n \"\"\"\n if has_torch_function_variadic(input1, input2, target):\n return handle_torch_function(\n cosine_embedding_loss,\n (input1, input2, target),\n input1,\n input2,\n target,\n margin=margin,\n size_average=size_average,\n reduce=reduce,\n reduction=reduction,\n )\n if size_average is not None or reduce is not None:\n reduction_enum = _Reduction.legacy_get_enum(size_average, reduce)\n else:\n reduction_enum = _Reduction.get_enum(reduction)\n return torch.cosine_embedding_loss(input1, input2, target, margin, reduction_enum)\n\n\ndef multi_margin_loss(\n input: Tensor,\n target: Tensor,\n p: int = 1,\n margin: float = 1.0,\n weight: Optional[Tensor] = None,\n size_average: Optional[bool] = None,\n reduce: Optional[bool] = None,\n reduction: str = \"mean\",\n) -> Tensor:\n r\"\"\"multi_margin_loss(input, target, p=1, margin=1, weight=None, size_average=None, reduce=None, reduction='mean') -> Tensor\n\n See :class:`~torch.nn.MultiMarginLoss` for details.\n \"\"\"\n if has_torch_function_variadic(input, target, weight):\n return handle_torch_function(\n multi_margin_loss,\n (input, target, weight),\n input,\n target,\n p=p,\n margin=margin,\n weight=weight,\n size_average=size_average,\n reduce=reduce,\n reduction=reduction,\n )\n if size_average is not None or reduce is not None:\n reduction_enum = _Reduction.legacy_get_enum(size_average, reduce)\n else:\n reduction_enum = _Reduction.get_enum(reduction)\n if p != 1 and p != 2:\n raise ValueError(\"only p == 1 and p == 2 supported\")\n if weight is not None:\n if weight.dim() != 1:\n raise ValueError(\"weight must be one-dimensional\")\n\n return torch._C._nn.multi_margin_loss(input, target, p, margin, weight, reduction_enum)\n\n\npixel_shuffle = _add_docstr(\n torch.pixel_shuffle,\n r\"\"\"\npixel_shuffle(input, upscale_factor) -> Tensor\n\nRearranges elements in a tensor of shape :math:`(*, C \\times r^2, H, W)` to a\ntensor of shape :math:`(*, C, H \\times r, W \\times r)`, where r is the :attr:`upscale_factor`.\n\nSee :class:`~torch.nn.PixelShuffle` for details.\n\nArgs:\n input (Tensor): the input tensor\n upscale_factor (int): factor to increase spatial resolution by\n\nExamples::\n\n >>> input = torch.randn(1, 9, 4, 4)\n >>> output = torch.nn.functional.pixel_shuffle(input, 3)\n >>> print(output.size())\n torch.Size([1, 1, 12, 12])\n\"\"\",\n)\n\npixel_unshuffle = _add_docstr(\n torch.pixel_unshuffle,\n r\"\"\"\npixel_unshuffle(input, downscale_factor) -> Tensor\n\nReverses the :class:`~torch.nn.PixelShuffle` operation by rearranging elements in a\ntensor of shape :math:`(*, C, H \\times r, W \\times r)` to a tensor of shape\n:math:`(*, C \\times r^2, H, W)`, where r is the :attr:`downscale_factor`.\n\nSee :class:`~torch.nn.PixelUnshuffle` for details.\n\nArgs:\n input (Tensor): the input tensor\n downscale_factor (int): factor to increase spatial resolution by\n\nExamples::\n\n >>> input = torch.randn(1, 1, 12, 12)\n >>> output = torch.nn.functional.pixel_unshuffle(input, 3)\n >>> print(output.size())\n torch.Size([1, 9, 4, 4])\n\"\"\",\n)\n\nchannel_shuffle = _add_docstr(\n torch.channel_shuffle,\n r\"\"\"\nchannel_shuffle(input, groups) -> Tensor\n\nDivide the channels in a tensor of shape :math:`(*, C , H, W)`\ninto g groups and rearrange them as :math:`(*, C \\frac g, g, H, W)`,\nwhile keeping the original tensor shape.\n\nSee :class:`~torch.nn.ChannelShuffle` for details.\n\nArgs:\n input (Tensor): the input tensor\n groups (int): number of groups to divide channels in and rearrange.\n\nExamples::\n\n >>> input = torch.randn(1, 4, 2, 2)\n >>> print(input)\n [[[[1, 2],\n [3, 4]],\n [[5, 6],\n [7, 8]],\n [[9, 10],\n [11, 12]],\n [[13, 14],\n [15, 16]],\n ]]\n >>> output = torch.nn.functional.channel_shuffle(input, 2)\n >>> print(output)\n [[[[1, 2],\n [3, 4]],\n [[9, 10],\n [11, 12]],\n [[5, 6],\n [7, 8]],\n [[13, 14],\n [15, 16]],\n ]]\n\"\"\",\n)\n\nnative_channel_shuffle = _add_docstr(\n torch.native_channel_shuffle,\n r\"\"\"\nnative_channel_shuffle(input, groups) -> Tensor\n\nNative kernel level implementation of the `channel_shuffle`.\nThis function might become private in future releases, use with caution.\n\nDivide the channels in a tensor of shape :math:`(*, C , H, W)`\ninto g groups and rearrange them as :math:`(*, C \\frac g, g, H, W)`,\nwhile keeping the original tensor shape.\n\nSee :class:`~torch.nn.ChannelShuffle` for details.\n\nArgs:\n input (Tensor): the input tensor\n groups (int): number of groups to divide channels in and rearrange.\n\nExamples::\n\n >>> input = torch.randn(1, 4, 2, 2)\n >>> print(input)\n [[[[1, 2],\n [3, 4]],\n [[5, 6],\n [7, 8]],\n [[9, 10],\n [11, 12]],\n [[13, 14],\n [15, 16]],\n ]]\n >>> output = torch.nn.functional.native_channel_shuffle(input, 2)\n >>> print(output)\n [[[[1, 2],\n [3, 4]],\n [[9, 10],\n [11, 12]],\n [[5, 6],\n [7, 8]],\n [[13, 14],\n [15, 16]],\n ]]\n\"\"\",\n)\n\n@_overload # noqa: F811\ndef upsample(input: Tensor, size: Optional[int] = None, scale_factor: Optional[float] = None, mode: str = \"nearest\", align_corners: Optional[bool] = None) -> Tensor: # noqa: F811\n pass\n\n\n@_overload # noqa: F811\ndef upsample(input: Tensor, size: Optional[List[int]] = None, scale_factor: Optional[float] = None, mode: str = \"nearest\", align_corners: Optional[bool] = None) -> Tensor: # noqa: F811\n pass\n\n\ndef upsample(input, size=None, scale_factor=None, mode=\"nearest\", align_corners=None): # noqa: F811\n r\"\"\"Upsamples the input to either the given :attr:`size` or the given\n :attr:`scale_factor`\n\n .. warning::\n This function is deprecated in favor of :func:`torch.nn.functional.interpolate`.\n This is equivalent with ``nn.functional.interpolate(...)``.\n\n Note:\n {backward_reproducibility_note}\n\n The algorithm used for upsampling is determined by :attr:`mode`.\n\n Currently temporal, spatial and volumetric upsampling are supported, i.e.\n expected inputs are 3-D, 4-D or 5-D in shape.\n\n The input dimensions are interpreted in the form:\n `mini-batch x channels x [optional depth] x [optional height] x width`.\n\n The modes available for upsampling are: `nearest`, `linear` (3D-only),\n `bilinear`, `bicubic` (4D-only), `trilinear` (5D-only)\n\n Args:\n input (Tensor): the input tensor\n size (int or Tuple[int] or Tuple[int, int] or Tuple[int, int, int]):\n output spatial size.\n scale_factor (float or Tuple[float]): multiplier for spatial size. Has to match input size if it is a tuple.\n mode (string): algorithm used for upsampling:\n ``'nearest'`` | ``'linear'`` | ``'bilinear'`` | ``'bicubic'`` |\n ``'trilinear'``. Default: ``'nearest'``\n align_corners (bool, optional): Geometrically, we consider the pixels of the\n input and output as squares rather than points.\n If set to ``True``, the input and output tensors are aligned by the\n center points of their corner pixels, preserving the values at the corner pixels.\n If set to ``False``, the input and output tensors are aligned by the corner\n points of their corner pixels, and the interpolation uses edge value padding\n for out-of-boundary values, making this operation *independent* of input size\n when :attr:`scale_factor` is kept the same. This only has an effect when :attr:`mode`\n is ``'linear'``, ``'bilinear'``, ``'bicubic'`` or ``'trilinear'``.\n Default: ``False``\n\n .. note::\n With ``mode='bicubic'``, it's possible to cause overshoot, in other words it can produce\n negative values or values greater than 255 for images.\n Explicitly call ``result.clamp(min=0, max=255)`` if you want to reduce the overshoot\n when displaying the image.\n\n .. warning::\n With ``align_corners = True``, the linearly interpolating modes\n (`linear`, `bilinear`, and `trilinear`) don't proportionally align the\n output and input pixels, and thus the output values can depend on the\n input size. This was the default behavior for these modes up to version\n 0.3.1. Since then, the default behavior is ``align_corners = False``.\n See :class:`~torch.nn.Upsample` for concrete examples on how this\n affects the outputs.\n\n \"\"\"\n warnings.warn(\"nn.functional.upsample is deprecated. Use nn.functional.interpolate instead.\")\n return interpolate(input, size, scale_factor, mode, align_corners)\n\n\nupsample.__doc__ = upsample.__doc__.format(**reproducibility_notes)\n\n\n@_overload # noqa: F811\ndef interpolate(input: Tensor, size: Optional[int] = None, scale_factor: Optional[List[float]] = None, mode: str = 'nearest', align_corners: Optional[bool] = None, recompute_scale_factor: Optional[bool] = None, antialias: bool = False) -> Tensor: # noqa: F811\n pass\n\n\n@_overload # noqa: F811\ndef interpolate(input: Tensor, size: Optional[List[int]] = None, scale_factor: Optional[List[float]] = None, mode: str = 'nearest', align_corners: Optional[bool] = None, recompute_scale_factor: Optional[bool] = None, antialias: bool = False) -> Tensor: # noqa: F811\n pass\n\n\n@_overload # noqa: F811\ndef interpolate(input: Tensor, size: Optional[int] = None, scale_factor: Optional[float] = None, mode: str = 'nearest', align_corners: Optional[bool] = None, recompute_scale_factor: Optional[bool] = None, antialias: bool = False) -> Tensor: # noqa: F811\n pass\n\n\n@_overload # noqa: F811\ndef interpolate( # noqa: F811\n input: Tensor,\n size: Optional[List[int]] = None,\n scale_factor: Optional[float] = None,\n mode: str = \"nearest\",\n align_corners: Optional[bool] = None,\n recompute_scale_factor: Optional[bool] = None,\n antialias: bool = False,\n) -> Tensor: # noqa: F811\n pass\n\ndef interpolate(input: Tensor, size: Optional[int] = None, scale_factor: Optional[List[float]] = None, mode: str = 'nearest', align_corners: Optional[bool] = None, recompute_scale_factor: Optional[bool] = None, antialias: bool = False) -> Tensor: # noqa: F811\n r\"\"\"Down/up samples the input to either the given :attr:`size` or the given\n :attr:`scale_factor`\n\n The algorithm used for interpolation is determined by :attr:`mode`.\n\n Currently temporal, spatial and volumetric sampling are supported, i.e.\n expected inputs are 3-D, 4-D or 5-D in shape.\n\n The input dimensions are interpreted in the form:\n `mini-batch x channels x [optional depth] x [optional height] x width`.\n\n The modes available for resizing are: `nearest`, `linear` (3D-only),\n `bilinear`, `bicubic` (4D-only), `trilinear` (5D-only), `area`, `nearest-exact`\n\n Args:\n input (Tensor): the input tensor\n size (int or Tuple[int] or Tuple[int, int] or Tuple[int, int, int]):\n output spatial size.\n scale_factor (float or Tuple[float]): multiplier for spatial size. If `scale_factor` is a tuple,\n its length has to match `input.dim()`.\n mode (str): algorithm used for upsampling:\n ``'nearest'`` | ``'linear'`` | ``'bilinear'`` | ``'bicubic'`` |\n ``'trilinear'`` | ``'area'`` | ``'nearest-exact'``. Default: ``'nearest'``\n align_corners (bool, optional): Geometrically, we consider the pixels of the\n input and output as squares rather than points.\n If set to ``True``, the input and output tensors are aligned by the\n center points of their corner pixels, preserving the values at the corner pixels.\n If set to ``False``, the input and output tensors are aligned by the corner\n points of their corner pixels, and the interpolation uses edge value padding\n for out-of-boundary values, making this operation *independent* of input size\n when :attr:`scale_factor` is kept the same. This only has an effect when :attr:`mode`\n is ``'linear'``, ``'bilinear'``, ``'bicubic'`` or ``'trilinear'``.\n Default: ``False``\n recompute_scale_factor (bool, optional): recompute the scale_factor for use in the\n interpolation calculation. If `recompute_scale_factor` is ``True``, then\n `scale_factor` must be passed in and `scale_factor` is used to compute the\n output `size`. The computed output `size` will be used to infer new scales for\n the interpolation. Note that when `scale_factor` is floating-point, it may differ\n from the recomputed `scale_factor` due to rounding and precision issues.\n If `recompute_scale_factor` is ``False``, then `size` or `scale_factor` will\n be used directly for interpolation. Default: ``None``.\n antialias (bool, optional): flag to apply anti-aliasing. Default: ``False``. Using anti-alias\n option together with ``align_corners=False``, interpolation result would match Pillow\n result for downsampling operation. Supported modes: ``'bilinear'``, ``'bicubic'``.\n\n .. note::\n With ``mode='bicubic'``, it's possible to cause overshoot, in other words it can produce\n negative values or values greater than 255 for images.\n Explicitly call ``result.clamp(min=0, max=255)`` if you want to reduce the overshoot\n when displaying the image.\n\n .. note::\n Mode ``mode='nearest-exact'`` matches Scikit-Image and PIL nearest neighbours interpolation\n algorithms and fixes known issues with ``mode='nearest'``. This mode is introduced to keep\n backward compatibility.\n Mode ``mode='nearest'`` matches buggy OpenCV's ``INTER_NEAREST`` interpolation algorithm.\n\n Note:\n {backward_reproducibility_note}\n \"\"\"\n if has_torch_function_unary(input):\n return handle_torch_function(\n interpolate,\n (input,),\n input,\n size=size,\n scale_factor=scale_factor,\n mode=mode,\n align_corners=align_corners,\n recompute_scale_factor=recompute_scale_factor,\n antialias=antialias\n )\n\n if mode in (\"nearest\", \"area\", \"nearest-exact\"):\n if align_corners is not None:\n raise ValueError(\n \"align_corners option can only be set with the \"\n \"interpolating modes: linear | bilinear | bicubic | trilinear\"\n )\n else:\n if align_corners is None:\n align_corners = False\n\n dim = input.dim() - 2 # Number of spatial dimensions.\n\n # Process size and scale_factor. Validate that exactly one is set.\n # Validate its length if it is a list, or expand it if it is a scalar.\n # After this block, exactly one of output_size and scale_factors will\n # be non-None, and it will be a list (or tuple).\n if size is not None and scale_factor is not None:\n raise ValueError(\"only one of size or scale_factor should be defined\")\n elif size is not None:\n assert scale_factor is None\n scale_factors = None\n if isinstance(size, (list, tuple)):\n if len(size) != dim:\n raise ValueError(\n \"Input and output must have the same number of spatial dimensions, but got \"\n f\"input with with spatial dimensions of {list(input.shape[2:])} and output size of {size}. \"\n \"Please provide input tensor in (N, C, d1, d2, ...,dK) format and \"\n \"output size in (o1, o2, ...,oK) format.\"\n\n )\n output_size = size\n else:\n output_size = [size for _ in range(dim)]\n elif scale_factor is not None:\n assert size is None\n output_size = None\n if isinstance(scale_factor, (list, tuple)):\n if len(scale_factor) != dim:\n raise ValueError(\n \"Input and scale_factor must have the same number of spatial dimensions, but \"\n f\"got input with spatial dimensions of {list(input.shape[2:])} and \"\n f\"scale_factor of shape {scale_factor}. \"\n \"Please provide input tensor in (N, C, d1, d2, ...,dK) format and \"\n \"scale_factor in (s1, s2, ...,sK) format.\"\n )\n scale_factors = scale_factor\n else:\n scale_factors = [scale_factor for _ in range(dim)]\n else:\n raise ValueError(\"either size or scale_factor should be defined\")\n\n if recompute_scale_factor is not None and recompute_scale_factor and size is not None:\n raise ValueError(\"recompute_scale_factor is not meaningful with an explicit size.\")\n\n # \"area\" mode always requires an explicit size rather than scale factor.\n # Re-use the recompute_scale_factor code path.\n if mode == \"area\" and output_size is None:\n recompute_scale_factor = True\n\n if recompute_scale_factor is not None and recompute_scale_factor:\n # We compute output_size here, then un-set scale_factors.\n # The C++ code will recompute it based on the (integer) output size.\n if not torch.jit.is_scripting() and torch._C._get_tracing_state():\n # make scale_factor a tensor in tracing so constant doesn't get baked in\n output_size = [\n (torch.floor((input.size(i + 2).float() * torch.tensor(scale_factors[i], dtype=torch.float32)).float()))\n for i in range(dim)\n ]\n else:\n assert scale_factors is not None\n output_size = [int(math.floor(float(input.size(i + 2)) * scale_factors[i])) for i in range(dim)]\n scale_factors = None\n\n if antialias and not (mode in (\"bilinear\", \"bicubic\") and input.ndim == 4):\n raise ValueError(\"Anti-alias option is only supported for bilinear and bicubic modes\")\n\n if input.dim() == 3 and mode == \"nearest\":\n return torch._C._nn.upsample_nearest1d(input, output_size, scale_factors)\n if input.dim() == 4 and mode == \"nearest\":\n return torch._C._nn.upsample_nearest2d(input, output_size, scale_factors)\n if input.dim() == 5 and mode == \"nearest\":\n return torch._C._nn.upsample_nearest3d(input, output_size, scale_factors)\n\n if input.dim() == 3 and mode == \"nearest-exact\":\n return torch._C._nn._upsample_nearest_exact1d(input, output_size, scale_factors)\n if input.dim() == 4 and mode == \"nearest-exact\":\n return torch._C._nn._upsample_nearest_exact2d(input, output_size, scale_factors)\n if input.dim() == 5 and mode == \"nearest-exact\":\n return torch._C._nn._upsample_nearest_exact3d(input, output_size, scale_factors)\n\n if input.dim() == 3 and mode == \"area\":\n assert output_size is not None\n return adaptive_avg_pool1d(input, output_size)\n if input.dim() == 4 and mode == \"area\":\n assert output_size is not None\n return adaptive_avg_pool2d(input, output_size)\n if input.dim() == 5 and mode == \"area\":\n assert output_size is not None\n return adaptive_avg_pool3d(input, output_size)\n\n if input.dim() == 3 and mode == \"linear\":\n assert align_corners is not None\n return torch._C._nn.upsample_linear1d(input, output_size, align_corners, scale_factors)\n if input.dim() == 4 and mode == \"bilinear\":\n assert align_corners is not None\n if antialias:\n return torch._C._nn._upsample_bilinear2d_aa(input, output_size, align_corners, scale_factors)\n return torch._C._nn.upsample_bilinear2d(input, output_size, align_corners, scale_factors)\n if input.dim() == 5 and mode == \"trilinear\":\n assert align_corners is not None\n return torch._C._nn.upsample_trilinear3d(input, output_size, align_corners, scale_factors)\n if input.dim() == 4 and mode == \"bicubic\":\n assert align_corners is not None\n if antialias:\n return torch._C._nn._upsample_bicubic2d_aa(input, output_size, align_corners, scale_factors)\n return torch._C._nn.upsample_bicubic2d(input, output_size, align_corners, scale_factors)\n\n if input.dim() == 3 and mode == \"bilinear\":\n raise NotImplementedError(\"Got 3D input, but bilinear mode needs 4D input\")\n if input.dim() == 3 and mode == \"trilinear\":\n raise NotImplementedError(\"Got 3D input, but trilinear mode needs 5D input\")\n if input.dim() == 4 and mode == \"linear\":\n raise NotImplementedError(\"Got 4D input, but linear mode needs 3D input\")\n if input.dim() == 4 and mode == \"trilinear\":\n raise NotImplementedError(\"Got 4D input, but trilinear mode needs 5D input\")\n if input.dim() == 5 and mode == \"linear\":\n raise NotImplementedError(\"Got 5D input, but linear mode needs 3D input\")\n if input.dim() == 5 and mode == \"bilinear\":\n raise NotImplementedError(\"Got 5D input, but bilinear mode needs 4D input\")\n\n raise NotImplementedError(\n \"Input Error: Only 3D, 4D and 5D input Tensors supported\"\n \" (got {}D) for the modes: nearest | linear | bilinear | bicubic | trilinear | area | nearest-exact\"\n \" (got {})\".format(input.dim(), mode)\n )\n\n\ninterpolate.__doc__ = interpolate.__doc__.format(**reproducibility_notes)\n\n\n@_overload # noqa: F811\ndef upsample_nearest(input: Tensor, size: Optional[int] = None, scale_factor: Optional[float] = None) -> Tensor: # noqa: F811\n pass\n\n\n@_overload # noqa: F811\ndef upsample_nearest(input: Tensor, size: Optional[List[int]] = None, scale_factor: Optional[float] = None) -> Tensor: # noqa: F811\n pass\n\n\ndef upsample_nearest(input, size=None, scale_factor=None): # noqa: F811\n r\"\"\"Upsamples the input, using nearest neighbours' pixel values.\n\n .. warning::\n This function is deprecated in favor of :func:`torch.nn.functional.interpolate`.\n This is equivalent with ``nn.functional.interpolate(..., mode='nearest')``.\n\n Currently spatial and volumetric upsampling are supported (i.e. expected\n inputs are 4 or 5 dimensional).\n\n Args:\n input (Tensor): input\n size (int or Tuple[int, int] or Tuple[int, int, int]): output spatia\n size.\n scale_factor (int): multiplier for spatial size. Has to be an integer.\n\n Note:\n {backward_reproducibility_note}\n \"\"\"\n # DeprecationWarning is ignored by default\n warnings.warn(\"nn.functional.upsample_nearest is deprecated. Use nn.functional.interpolate instead.\")\n return interpolate(input, size, scale_factor, mode=\"nearest\")\n\n\nupsample_nearest.__doc__ = upsample_nearest.__doc__.format(**reproducibility_notes)\n\n\n@_overload # noqa: F811\ndef upsample_bilinear(\n input: Tensor, size: Optional[int] = None, scale_factor: Optional[float] = None\n) -> Tensor: # noqa: F811\n pass\n\n\n@_overload # noqa: F811\ndef upsample_bilinear( # noqa: F811\n input: Tensor, size: Optional[List[int]] = None, scale_factor: Optional[float] = None\n) -> Tensor: # noqa: F811\n pass\n\n\n@_overload # noqa: F811\ndef upsample_bilinear( # noqa: F811\n input: Tensor, size: Optional[int] = None, scale_factor: Optional[List[float]] = None\n) -> Tensor: # noqa: F811\n pass\n\n\n@_overload # noqa: F811\ndef upsample_bilinear( # noqa: F811\n input: Tensor, size: Optional[List[int]] = None, scale_factor: Optional[List[float]] = None\n) -> Tensor: # noqa: F811\n pass\n\n\ndef upsample_bilinear(input, size=None, scale_factor=None): # noqa: F811\n r\"\"\"Upsamples the input, using bilinear upsampling.\n\n .. warning::\n This function is deprecated in favor of :func:`torch.nn.functional.interpolate`.\n This is equivalent with\n ``nn.functional.interpolate(..., mode='bilinear', align_corners=True)``.\n\n Expected inputs are spatial (4 dimensional). Use `upsample_trilinear` fo\n volumetric (5 dimensional) inputs.\n\n Args:\n input (Tensor): input\n size (int or Tuple[int, int]): output spatial size.\n scale_factor (int or Tuple[int, int]): multiplier for spatial size\n\n Note:\n {backward_reproducibility_note}\n \"\"\"\n # DeprecationWarning is ignored by default\n warnings.warn(\"nn.functional.upsample_bilinear is deprecated. Use nn.functional.interpolate instead.\")\n return interpolate(input, size, scale_factor, mode=\"bilinear\", align_corners=True)\n\n\nupsample_bilinear.__doc__ = upsample_bilinear.__doc__.format(**reproducibility_notes)\n\nGRID_SAMPLE_INTERPOLATION_MODES = {\n \"bilinear\": 0,\n \"nearest\": 1,\n \"bicubic\": 2,\n}\n\nGRID_SAMPLE_PADDING_MODES = {\n \"zeros\": 0,\n \"border\": 1,\n \"reflection\": 2,\n}\n\n\ndef grid_sample(\n input: Tensor,\n grid: Tensor,\n mode: str = \"bilinear\",\n padding_mode: str = \"zeros\",\n align_corners: Optional[bool] = None,\n) -> Tensor:\n r\"\"\"Given an :attr:`input` and a flow-field :attr:`grid`, computes the\n ``output`` using :attr:`input` values and pixel locations from :attr:`grid`.\n\n Currently, only spatial (4-D) and volumetric (5-D) :attr:`input` are\n supported.\n\n In the spatial (4-D) case, for :attr:`input` with shape\n :math:`(N, C, H_\\text{in}, W_\\text{in})` and :attr:`grid` with shape\n :math:`(N, H_\\text{out}, W_\\text{out}, 2)`, the output will have shape\n :math:`(N, C, H_\\text{out}, W_\\text{out})`.\n\n For each output location ``output[n, :, h, w]``, the size-2 vector\n ``grid[n, h, w]`` specifies :attr:`input` pixel locations ``x`` and ``y``,\n which are used to interpolate the output value ``output[n, :, h, w]``.\n In the case of 5D inputs, ``grid[n, d, h, w]`` specifies the\n ``x``, ``y``, ``z`` pixel locations for interpolating\n ``output[n, :, d, h, w]``. :attr:`mode` argument specifies ``nearest`` or\n ``bilinear`` interpolation method to sample the input pixels.\n\n :attr:`grid` specifies the sampling pixel locations normalized by the\n :attr:`input` spatial dimensions. Therefore, it should have most values in\n the range of ``[-1, 1]``. For example, values ``x = -1, y = -1`` is the\n left-top pixel of :attr:`input`, and values ``x = 1, y = 1`` is the\n right-bottom pixel of :attr:`input`.\n\n If :attr:`grid` has values outside the range of ``[-1, 1]``, the corresponding\n outputs are handled as defined by :attr:`padding_mode`. Options are\n\n * ``padding_mode=\"zeros\"``: use ``0`` for out-of-bound grid locations,\n * ``padding_mode=\"border\"``: use border values for out-of-bound grid locations,\n * ``padding_mode=\"reflection\"``: use values at locations reflected by\n the border for out-of-bound grid locations. For location far away\n from the border, it will keep being reflected until becoming in bound,\n e.g., (normalized) pixel location ``x = -3.5`` reflects by border ``-1``\n and becomes ``x' = 1.5``, then reflects by border ``1`` and becomes\n ``x'' = -0.5``.\n\n Note:\n This function is often used in conjunction with :func:`affine_grid`\n to build `Spatial Transformer Networks`_ .\n\n Note:\n When using the CUDA backend, this operation may induce nondeterministic\n behaviour in its backward pass that is not easily switched off.\n Please see the notes on :doc:`/notes/randomness` for background.\n\n Note:\n NaN values in :attr:`grid` would be interpreted as ``-1``.\n\n Args:\n input (Tensor): input of shape :math:`(N, C, H_\\text{in}, W_\\text{in})` (4-D case)\n or :math:`(N, C, D_\\text{in}, H_\\text{in}, W_\\text{in})` (5-D case)\n grid (Tensor): flow-field of shape :math:`(N, H_\\text{out}, W_\\text{out}, 2)` (4-D case)\n or :math:`(N, D_\\text{out}, H_\\text{out}, W_\\text{out}, 3)` (5-D case)\n mode (str): interpolation mode to calculate output values\n ``'bilinear'`` | ``'nearest'`` | ``'bicubic'``. Default: ``'bilinear'``\n Note: ``mode='bicubic'`` supports only 4-D input.\n When ``mode='bilinear'`` and the input is 5-D, the interpolation mode\n used internally will actually be trilinear. However, when the input is 4-D,\n the interpolation mode will legitimately be bilinear.\n padding_mode (str): padding mode for outside grid values\n ``'zeros'`` | ``'border'`` | ``'reflection'``. Default: ``'zeros'``\n align_corners (bool, optional): Geometrically, we consider the pixels of the\n input as squares rather than points.\n If set to ``True``, the extrema (``-1`` and ``1``) are considered as referring\n to the center points of the input's corner pixels. If set to ``False``, they\n are instead considered as referring to the corner points of the input's corner\n pixels, making the sampling more resolution agnostic.\n This option parallels the ``align_corners`` option in\n :func:`interpolate`, and so whichever option is used here\n should also be used there to resize the input image before grid sampling.\n Default: ``False``\n\n Returns:\n output (Tensor): output Tensor\n\n .. _`Spatial Transformer Networks`:\n https://arxiv.org/abs/1506.02025\n\n .. warning::\n When ``align_corners = True``, the grid positions depend on the pixel\n size relative to the input image size, and so the locations sampled by\n :func:`grid_sample` will differ for the same input given at different\n resolutions (that is, after being upsampled or downsampled).\n The default behavior up to version 1.2.0 was ``align_corners = True``.\n Since then, the default behavior has been changed to ``align_corners = False``,\n in order to bring it in line with the default for :func:`interpolate`.\n\n .. note::\n ``mode='bicubic'`` is implemented using the `cubic convolution algorithm`_ with :math:`\\alpha=-0.75`.\n The constant :math:`\\alpha` might be different from packages to packages.\n For example, `PIL`_ and `OpenCV`_ use -0.5 and -0.75 respectively.\n This algorithm may \"overshoot\" the range of values it's interpolating.\n For example, it may produce negative values or values greater than 255 when interpolating input in [0, 255].\n Clamp the results with :func: `torch.clamp` to ensure they are within the valid range.\n .. _`cubic convolution algorithm`: https://en.wikipedia.org/wiki/Bicubic_interpolation\n .. _`PIL`: https://github.com/python-pillow/Pillow/blob/4634eafe3c695a014267eefdce830b4a825beed7/src/libImaging/Resample.c#L51\n .. _`OpenCV`: https://github.com/opencv/opencv/blob/f345ed564a06178670750bad59526cfa4033be55/modules/imgproc/src/resize.cpp#L908\n \"\"\"\n if has_torch_function_variadic(input, grid):\n return handle_torch_function(\n grid_sample, (input, grid), input, grid, mode=mode, padding_mode=padding_mode, align_corners=align_corners\n )\n if mode != \"bilinear\" and mode != \"nearest\" and mode != \"bicubic\":\n raise ValueError(\n \"nn.functional.grid_sample(): expected mode to be \"\n \"'bilinear', 'nearest' or 'bicubic', but got: '{}'\".format(mode)\n )\n if padding_mode != \"zeros\" and padding_mode != \"border\" and padding_mode != \"reflection\":\n raise ValueError(\n \"nn.functional.grid_sample(): expected padding_mode \"\n \"to be 'zeros', 'border', or 'reflection', \"\n \"but got: '{}'\".format(padding_mode)\n )\n\n if mode == \"bilinear\":\n mode_enum = 0\n elif mode == \"nearest\":\n mode_enum = 1\n else: # mode == 'bicubic'\n mode_enum = 2\n\n if padding_mode == \"zeros\":\n padding_mode_enum = 0\n elif padding_mode == \"border\":\n padding_mode_enum = 1\n else: # padding_mode == 'reflection'\n padding_mode_enum = 2\n\n if align_corners is None:\n warnings.warn(\n \"Default grid_sample and affine_grid behavior has changed \"\n \"to align_corners=False since 1.3.0. Please specify \"\n \"align_corners=True if the old behavior is desired. \"\n \"See the documentation of grid_sample for details.\"\n )\n align_corners = False\n\n return torch.grid_sampler(input, grid, mode_enum, padding_mode_enum, align_corners)\n\n\ndef affine_grid(theta: Tensor, size: List[int], align_corners: Optional[bool] = None) -> Tensor:\n r\"\"\"Generates a 2D or 3D flow field (sampling grid), given a batch of\n affine matrices :attr:`theta`.\n\n .. note::\n This function is often used in conjunction with :func:`grid_sample`\n to build `Spatial Transformer Networks`_ .\n\n Args:\n theta (Tensor): input batch of affine matrices with shape\n (:math:`N \\times 2 \\times 3`) for 2D or\n (:math:`N \\times 3 \\times 4`) for 3D\n size (torch.Size): the target output image size.\n (:math:`N \\times C \\times H \\times W` for 2D or\n :math:`N \\times C \\times D \\times H \\times W` for 3D)\n Example: torch.Size((32, 3, 24, 24))\n align_corners (bool, optional): if ``True``, consider ``-1`` and ``1``\n to refer to the centers of the corner pixels rather than the image corners.\n Refer to :func:`grid_sample` for a more complete description.\n A grid generated by :func:`affine_grid` should be passed to :func:`grid_sample`\n with the same setting for this option.\n Default: ``False``\n\n Returns:\n output (Tensor): output Tensor of size (:math:`N \\times H \\times W \\times 2`)\n\n .. _`Spatial Transformer Networks`:\n https://arxiv.org/abs/1506.02025\n\n .. warning::\n When ``align_corners = True``, the grid positions depend on the pixel\n size relative to the input image size, and so the locations sampled by\n :func:`grid_sample` will differ for the same input given at different\n resolutions (that is, after being upsampled or downsampled).\n The default behavior up to version 1.2.0 was ``align_corners = True``.\n Since then, the default behavior has been changed to ``align_corners = False``,\n in order to bring it in line with the default for :func:`interpolate`.\n .. warning::\n When ``align_corners = True``, 2D affine transforms on 1D data and\n 3D affine transforms on 2D data (that is, when one of the spatial\n dimensions has unit size) are ill-defined, and not an intended use case.\n This is not a problem when ``align_corners = False``.\n Up to version 1.2.0, all grid points along a unit dimension were\n considered arbitrarily to be at ``-1``.\n From version 1.3.0, under ``align_corners = True`` all grid points\n along a unit dimension are considered to be at ``0``\n (the center of the input image).\n \"\"\"\n if has_torch_function_unary(theta):\n return handle_torch_function(affine_grid, (theta,), theta, size, align_corners=align_corners)\n if align_corners is None:\n warnings.warn(\n \"Default grid_sample and affine_grid behavior has changed \"\n \"to align_corners=False since 1.3.0. Please specify \"\n \"align_corners=True if the old behavior is desired. \"\n \"See the documentation of grid_sample for details.\"\n )\n align_corners = False\n\n # enforce floating point dtype on theta\n if not theta.is_floating_point():\n raise ValueError(\"Expected theta to have floating point type, but got {}\".format(theta.dtype))\n # check that shapes and sizes match\n if len(size) == 4:\n if theta.dim() != 3 or theta.shape[-2] != 2 or theta.shape[-1] != 3:\n raise ValueError(\n \"Expected a batch of 2D affine matrices of shape Nx2x3 \"\n \"for size {}. Got {}.\".format(size, theta.shape)\n )\n spatial_size = size[-2:] # spatial dimension sizes\n elif len(size) == 5:\n if theta.dim() != 3 or theta.shape[-2] != 3 or theta.shape[-1] != 4:\n raise ValueError(\n \"Expected a batch of 3D affine matrices of shape Nx3x4 \"\n \"for size {}. Got {}.\".format(size, theta.shape)\n )\n spatial_size = size[-3:] # spatial dimension sizes\n else:\n raise NotImplementedError(\n \"affine_grid only supports 4D and 5D sizes, \"\n \"for 2D and 3D affine transforms, respectively. \"\n \"Got size {}.\".format(size)\n )\n # check for empty span\n if align_corners and min(spatial_size) == 1:\n warnings.warn(\n \"Since version 1.3.0, affine_grid behavior has changed \"\n \"for unit-size grids when align_corners=True. \"\n \"This is not an intended use case of affine_grid. \"\n \"See the documentation of affine_grid for details.\"\n )\n elif min(size) <= 0:\n raise ValueError(\"Expected non-zero, positive output size. Got {}\".format(size))\n\n return torch.affine_grid_generator(theta, size, align_corners)\n\n\ndef _pad(input: Tensor, pad: List[int], mode: str = \"constant\", value: float = 0.0) -> Tensor:\n r\"\"\"Pads tensor.\n\n Padding size:\n The padding size by which to pad some dimensions of :attr:`input`\n are described starting from the last dimension and moving forward.\n :math:`\\left\\lfloor\\frac{\\text{len(pad)}}{2}\\right\\rfloor` dimensions\n of ``input`` will be padded.\n For example, to pad only the last dimension of the input tensor, then\n :attr:`pad` has the form\n :math:`(\\text{padding\\_left}, \\text{padding\\_right})`;\n to pad the last 2 dimensions of the input tensor, then use\n :math:`(\\text{padding\\_left}, \\text{padding\\_right},`\n :math:`\\text{padding\\_top}, \\text{padding\\_bottom})`;\n to pad the last 3 dimensions, use\n :math:`(\\text{padding\\_left}, \\text{padding\\_right},`\n :math:`\\text{padding\\_top}, \\text{padding\\_bottom}`\n :math:`\\text{padding\\_front}, \\text{padding\\_back})`.\n\n Padding mode:\n See :class:`torch.nn.ConstantPad2d`, :class:`torch.nn.ReflectionPad2d`, and\n :class:`torch.nn.ReplicationPad2d` for concrete examples on how each of the\n padding modes works. Constant padding is implemented for arbitrary dimensions.\n Replicate and reflection padding is implemented for padding the last 3\n dimensions of 5D input tensor, or the last 2 dimensions of 4D input\n tensor, or the last dimension of 3D input tensor.\n\n Note:\n When using the CUDA backend, this operation may induce nondeterministic\n behaviour in its backward pass that is not easily switched off.\n Please see the notes on :doc:`/notes/randomness` for background.\n\n Args:\n input (Tensor): N-dimensional tensor\n pad (tuple): m-elements tuple, where\n :math:`\\frac{m}{2} \\leq` input dimensions and :math:`m` is even.\n mode: ``'constant'``, ``'reflect'``, ``'replicate'`` or ``'circular'``.\n Default: ``'constant'``\n value: fill value for ``'constant'`` padding. Default: ``0``\n\n Examples::\n\n >>> t4d = torch.empty(3, 3, 4, 2)\n >>> p1d = (1, 1) # pad last dim by 1 on each side\n >>> out = F.pad(t4d, p1d, \"constant\", 0) # effectively zero padding\n >>> print(out.size())\n torch.Size([3, 3, 4, 4])\n >>> p2d = (1, 1, 2, 2) # pad last dim by (1, 1) and 2nd to last by (2, 2)\n >>> out = F.pad(t4d, p2d, \"constant\", 0)\n >>> print(out.size())\n torch.Size([3, 3, 8, 4])\n >>> t4d = torch.empty(3, 3, 4, 2)\n >>> p3d = (0, 1, 2, 1, 3, 3) # pad by (0, 1), (2, 1), and (3, 3)\n >>> out = F.pad(t4d, p3d, \"constant\", 0)\n >>> print(out.size())\n torch.Size([3, 9, 7, 3])\n\n \"\"\"\n if has_torch_function_unary(input):\n return handle_torch_function(_pad, (input,), input, pad, mode=mode, value=value)\n assert len(pad) % 2 == 0, \"Padding length must be divisible by 2\"\n assert len(pad) // 2 <= input.dim(), \"Padding length too large\"\n if mode == \"constant\":\n return _VF.constant_pad_nd(input, pad, value)\n else:\n assert value == 0.0, 'Padding mode \"{}\"\" doesn\\'t take in value argument'.format(mode)\n if len(pad) == 2 and (input.dim() == 2 or input.dim() == 3):\n if mode == \"reflect\":\n return torch._C._nn.reflection_pad1d(input, pad)\n elif mode == \"replicate\":\n return torch._C._nn.replication_pad1d(input, pad)\n elif mode == \"circular\":\n return _pad_circular(input, pad)\n else:\n raise NotImplementedError\n\n elif len(pad) == 4 and (input.dim() == 3 or input.dim() == 4):\n if mode == \"reflect\":\n return torch._C._nn.reflection_pad2d(input, pad)\n elif mode == \"replicate\":\n return torch._C._nn.replication_pad2d(input, pad)\n elif mode == \"circular\":\n return _pad_circular(input, pad)\n else:\n raise NotImplementedError\n\n elif len(pad) == 6 and (input.dim() == 4 or input.dim() == 5):\n if mode == \"reflect\":\n return torch._C._nn.reflection_pad3d(input, pad)\n elif mode == \"replicate\":\n return torch._C._nn.replication_pad3d(input, pad)\n elif mode == \"circular\":\n return _pad_circular(input, pad)\n else:\n raise NotImplementedError\n else:\n raise NotImplementedError(\"Only 2D, 3D, 4D, 5D padding with non-constant padding are supported for now\")\n\n\n# We define this function as _pad because it takes an argument\n# named pad, which clobbers the recursive reference to the pad\n# function needed for __torch_function__ support\npad = _pad\n\n# distance\n\n\npairwise_distance = _add_docstr(\n torch.pairwise_distance,\n r\"\"\"\npairwise_distance(x1, x2, p=2.0, eps=1e-6, keepdim=False) -> Tensor\n\nSee :class:`torch.nn.PairwiseDistance` for details\n\"\"\")\n\n\npdist = _add_docstr(\n torch.pdist,\n r\"\"\"\npdist(input, p=2) -> Tensor\n\nComputes the p-norm distance between every pair of row vectors in the input.\nThis is identical to the upper triangular portion, excluding the diagonal, of\n`torch.norm(input[:, None] - input, dim=2, p=p)`. This function will be faster\nif the rows are contiguous.\n\nIf input has shape :math:`N \\times M` then the output will have shape\n:math:`\\frac{1}{2} N (N - 1)`.\n\nThis function is equivalent to `scipy.spatial.distance.pdist(input,\n'minkowski', p=p)` if :math:`p \\in (0, \\infty)`. When :math:`p = 0` it is\nequivalent to `scipy.spatial.distance.pdist(input, 'hamming') * M`.\nWhen :math:`p = \\infty`, the closest scipy function is\n`scipy.spatial.distance.pdist(xn, lambda x, y: np.abs(x - y).max())`.\n\nArgs:\n input: input tensor of shape :math:`N \\times M`.\n p: p value for the p-norm distance to calculate between each vector pair\n :math:`\\in [0, \\infty]`.\n\"\"\",\n)\n\n\ncosine_similarity = _add_docstr(\n torch.cosine_similarity,\n r\"\"\"\ncosine_similarity(x1, x2, dim=1, eps=1e-8) -> Tensor\n\nReturns cosine similarity between ``x1`` and ``x2``, computed along dim. ``x1`` and ``x2`` must be broadcastable\nto a common shape. ``dim`` refers to the dimension in this common shape. Dimension ``dim`` of the output is\nsqueezed (see :func:`torch.squeeze`), resulting in the\noutput tensor having 1 fewer dimension.\n\n.. math ::\n \\text{similarity} = \\dfrac{x_1 \\cdot x_2}{\\max(\\Vert x_1 \\Vert _2 \\cdot \\Vert x_2 \\Vert _2, \\epsilon)}\n\nSupports :ref:`type promotion <type-promotion-doc>`.\n\nArgs:\n x1 (Tensor): First input.\n x2 (Tensor): Second input.\n dim (int, optional): Dimension along which cosine similarity is computed. Default: 1\n eps (float, optional): Small value to avoid division by zero.\n Default: 1e-8\n\nExample::\n\n >>> input1 = torch.randn(100, 128)\n >>> input2 = torch.randn(100, 128)\n >>> output = F.cosine_similarity(input1, input2)\n >>> print(output)\n\"\"\",\n)\n\n\none_hot = _add_docstr(\n torch._C._nn.one_hot,\n r\"\"\"\none_hot(tensor, num_classes=-1) -> LongTensor\n\nTakes LongTensor with index values of shape ``(*)`` and returns a tensor\nof shape ``(*, num_classes)`` that have zeros everywhere except where the\nindex of last dimension matches the corresponding value of the input tensor,\nin which case it will be 1.\n\nSee also `One-hot on Wikipedia`_ .\n\n.. _One-hot on Wikipedia:\n https://en.wikipedia.org/wiki/One-hot\n\nArguments:\n tensor (LongTensor): class values of any shape.\n num_classes (int): Total number of classes. If set to -1, the number\n of classes will be inferred as one greater than the largest class\n value in the input tensor.\n\nReturns:\n LongTensor that has one more dimension with 1 values at the\n index of last dimension indicated by the input, and 0 everywhere\n else.\n\nExamples:\n >>> F.one_hot(torch.arange(0, 5) % 3)\n tensor([[1, 0, 0],\n [0, 1, 0],\n [0, 0, 1],\n [1, 0, 0],\n [0, 1, 0]])\n >>> F.one_hot(torch.arange(0, 5) % 3, num_classes=5)\n tensor([[1, 0, 0, 0, 0],\n [0, 1, 0, 0, 0],\n [0, 0, 1, 0, 0],\n [1, 0, 0, 0, 0],\n [0, 1, 0, 0, 0]])\n >>> F.one_hot(torch.arange(0, 6).view(3,2) % 3)\n tensor([[[1, 0, 0],\n [0, 1, 0]],\n [[0, 0, 1],\n [1, 0, 0]],\n [[0, 1, 0],\n [0, 0, 1]]])\n\"\"\",\n)\n\n\ndef triplet_margin_loss(\n anchor: Tensor,\n positive: Tensor,\n negative: Tensor,\n margin: float = 1.0,\n p: float = 2,\n eps: float = 1e-6,\n swap: bool = False,\n size_average: Optional[bool] = None,\n reduce: Optional[bool] = None,\n reduction: str = \"mean\",\n) -> Tensor:\n r\"\"\"\n See :class:`~torch.nn.TripletMarginLoss` for details\n \"\"\"\n if has_torch_function_variadic(anchor, positive, negative):\n return handle_torch_function(\n triplet_margin_loss,\n (anchor, positive, negative),\n anchor,\n positive,\n negative,\n margin=margin,\n p=p,\n eps=eps,\n swap=swap,\n size_average=size_average,\n reduce=reduce,\n reduction=reduction,\n )\n if size_average is not None or reduce is not None:\n reduction_enum = _Reduction.legacy_get_enum(size_average, reduce)\n else:\n reduction_enum = _Reduction.get_enum(reduction)\n return torch.triplet_margin_loss(anchor, positive, negative, margin, p, eps, swap, reduction_enum)\n\n\ndef triplet_margin_with_distance_loss(\n anchor: Tensor,\n positive: Tensor,\n negative: Tensor,\n *,\n distance_function: Optional[Callable[[Tensor, Tensor], Tensor]] = None,\n margin: float = 1.0,\n swap: bool = False,\n reduction: str = \"mean\"\n) -> Tensor:\n r\"\"\"\n See :class:`~torch.nn.TripletMarginWithDistanceLoss` for details.\n \"\"\"\n if torch.jit.is_scripting():\n raise NotImplementedError(\n \"F.triplet_margin_with_distance_loss does not support JIT scripting: \"\n \"functions requiring Callables cannot be scripted.\"\n )\n\n if has_torch_function_variadic(anchor, positive, negative):\n return handle_torch_function(\n triplet_margin_with_distance_loss,\n (anchor, positive, negative),\n anchor,\n positive,\n negative,\n distance_function=distance_function,\n margin=margin,\n swap=swap,\n reduction=reduction,\n )\n\n distance_function = distance_function if distance_function is not None else pairwise_distance\n\n positive_dist = distance_function(anchor, positive)\n negative_dist = distance_function(anchor, negative)\n\n if swap:\n swap_dist = distance_function(positive, negative)\n negative_dist = torch.min(negative_dist, swap_dist)\n\n output = torch.clamp(positive_dist - negative_dist + margin, min=0.0)\n\n reduction_enum = _Reduction.get_enum(reduction)\n if reduction_enum == 1:\n return output.mean()\n elif reduction_enum == 2:\n return output.sum()\n else:\n return output\n\n\ndef normalize(input: Tensor, p: float = 2.0, dim: int = 1, eps: float = 1e-12, out: Optional[Tensor] = None) -> Tensor:\n r\"\"\"Performs :math:`L_p` normalization of inputs over specified dimension.\n\n For a tensor :attr:`input` of sizes :math:`(n_0, ..., n_{dim}, ..., n_k)`, each\n :math:`n_{dim}` -element vector :math:`v` along dimension :attr:`dim` is transformed as\n\n .. math::\n v = \\frac{v}{\\max(\\lVert v \\rVert_p, \\epsilon)}.\n\n With the default arguments it uses the Euclidean norm over vectors along dimension :math:`1` for normalization.\n\n Args:\n input: input tensor of any shape\n p (float): the exponent value in the norm formulation. Default: 2\n dim (int): the dimension to reduce. Default: 1\n eps (float): small value to avoid division by zero. Default: 1e-12\n out (Tensor, optional): the output tensor. If :attr:`out` is used, this\n operation won't be differentiable.\n \"\"\"\n if has_torch_function_variadic(input, out):\n return handle_torch_function(normalize, (input, out), input, p=p, dim=dim, eps=eps, out=out)\n if out is None:\n denom = input.norm(p, dim, keepdim=True).clamp_min(eps).expand_as(input)\n return input / denom\n else:\n denom = input.norm(p, dim, keepdim=True).clamp_min_(eps).expand_as(input)\n return torch.div(input, denom, out=out)\n\n\ndef assert_int_or_pair(arg: List[int], arg_name: str, message: str) -> None:\n assert isinstance(arg, int) or len(arg) == 2, message.format(arg_name)\n\n\ndef unfold(\n input: Tensor, kernel_size: BroadcastingList2[int],\n dilation: BroadcastingList2[int] = 1,\n padding: BroadcastingList2[int] = 0,\n stride: BroadcastingList2[int] = 1\n) -> Tensor:\n r\"\"\"Extracts sliding local blocks from a batched input tensor.\n\n .. warning::\n Currently, only 4-D input tensors (batched image-like tensors) are\n supported.\n\n .. warning::\n\n More than one element of the unfolded tensor may refer to a single\n memory location. As a result, in-place operations (especially ones that\n are vectorized) may result in incorrect behavior. If you need to write\n to the tensor, please clone it first.\n\n\n See :class:`torch.nn.Unfold` for details\n \"\"\"\n if has_torch_function_unary(input):\n return handle_torch_function(\n unfold, (input,), input, kernel_size, dilation=dilation, padding=padding, stride=stride\n )\n if input.dim() == 4:\n msg = \"{} must be int or 2-tuple for 4D input\"\n assert_int_or_pair(kernel_size, \"kernel_size\", msg)\n assert_int_or_pair(dilation, \"dilation\", msg)\n assert_int_or_pair(padding, \"padding\", msg)\n assert_int_or_pair(stride, \"stride\", msg)\n\n return torch._C._nn.im2col(input, _pair(kernel_size), _pair(dilation), _pair(padding), _pair(stride))\n else:\n raise NotImplementedError(\"Input Error: Only 4D input Tensors are supported (got {}D)\".format(input.dim()))\n\n\ndef fold(\n input: Tensor, output_size: BroadcastingList2[int],\n kernel_size: BroadcastingList2[int],\n dilation: BroadcastingList2[int] = 1,\n padding: BroadcastingList2[int] = 0,\n stride: BroadcastingList2[int] = 1\n) -> Tensor:\n r\"\"\"Combines an array of sliding local blocks into a large containing\n tensor.\n\n .. warning::\n Currently, only unbatched (3D) or batched (4D) image-like output tensors are supported.\n\n See :class:`torch.nn.Fold` for details\n \"\"\"\n if has_torch_function_unary(input):\n return handle_torch_function(\n fold, (input,), input, output_size, kernel_size, dilation=dilation, padding=padding, stride=stride\n )\n if input.dim() == 3 or input.dim() == 2:\n msg = \"{} must be int or 2-tuple for 3D input\"\n assert_int_or_pair(output_size, \"output_size\", msg)\n assert_int_or_pair(kernel_size, \"kernel_size\", msg)\n assert_int_or_pair(dilation, \"dilation\", msg)\n assert_int_or_pair(padding, \"padding\", msg)\n assert_int_or_pair(stride, \"stride\", msg)\n\n return torch._C._nn.col2im(\n input, _pair(output_size), _pair(kernel_size), _pair(dilation), _pair(padding), _pair(stride)\n )\n else:\n raise NotImplementedError(\"Input Error: Only unbatched (2D) or batched (3D) input Tensors\"\n f\"are supported (got {input.dim()}D)\")\n\n\ndef _pad_circular(input: Tensor, padding: List[int]) -> Tensor:\n \"\"\"Circularly pads tensor.\n\n Tensor values at the beginning are used to pad the end, and values at the\n end are used to pad the beginning. For example, consider a single dimension\n with values [0, 1, 2, 3]. With circular padding of (1, 1) it would be\n padded to [3, 0, 1, 2, 3, 0], and with padding (1, 2) it would be padded to\n [3, 0, 1, 2, 3, 0, 1]. If negative padding is applied then the ends of the\n tensor get removed. With circular padding of (-1, -1) the previous example\n would become [1, 2]. Circular padding of (-1, 1) would produce\n [1, 2, 3, 1].\n\n The first and second dimensions of the tensor are not padded.\n\n Args:\n input: Tensor with shape :math:`(N, C, D[, H, W])`.\n padding: Tuple containing the number of elements to pad each side of\n the tensor. The length of padding must be twice the number of\n paddable dimensions. For example, the length of padding should be 4\n for a tensor of shape :math:`(N, C, H, W)`, and the length should\n be 6 for a tensor of shape :math:`(N, C, D, H, W)`.\n\n Examples::\n\n >>> x = torch.tensor([[[[0, 1, 2], [3, 4, 5]]]]) # Create tensor\n >>> # Example 1\n >>> padding = (1, 1, 1, 1)\n >>> y = F.pad(x, padding, mode='circular')\n >>> print(y)\n tensor([[[[5, 3, 4, 5, 3],\n [2, 0, 1, 2, 0],\n [5, 3, 4, 5, 3],\n [2, 0, 1, 2, 0]]]])\n >>> print(y.shape)\n torch.Size([1, 1, 4, 5])\n >>> # Example 2\n >>> padding = (1, 1, 2, 2)\n >>> z = F.pad(x, padding, mode='circular')\n >>> print(z)\n tensor([[[[2, 0, 1, 2, 0],\n [5, 3, 4, 5, 3],\n [2, 0, 1, 2, 0],\n [5, 3, 4, 5, 3],\n [2, 0, 1, 2, 0],\n [5, 3, 4, 5, 3]]]])\n >>> print(z.shape)\n torch.Size([1, 1, 6, 5])\n \"\"\"\n in_shape = input.shape\n paddable_shape = in_shape[2:]\n ndim = len(paddable_shape)\n\n for idx, size in enumerate(paddable_shape):\n # Only supports wrapping around once\n assert padding[-(idx * 2 + 1)] <= size, \"Padding value causes wrapping around more than once.\"\n assert padding[-(idx * 2 + 2)] <= size, \"Padding value causes wrapping around more than once.\"\n # Negative padding should not result in negative sizes\n assert (\n padding[-(idx * 2 + 1)] + padding[-(idx * 2 + 2)] + size >= 0\n ), \"Negative padding value is resulting in an empty dimension.\"\n\n # Get shape of padded tensor\n out_shape = in_shape[:2]\n for idx, size in enumerate(paddable_shape):\n out_shape += (size + padding[-(idx * 2 + 1)] + padding[-(idx * 2 + 2)],)\n\n out = input.new_empty(out_shape)\n\n # Put original array in padded array\n if ndim == 1:\n out_d0 = max(padding[-2], 0)\n out_d1 = out_shape[2] - max(padding[-1], 0)\n\n in_d0 = max(-padding[-2], 0)\n in_d1 = in_shape[2] - max(-padding[-1], 0)\n\n out[..., out_d0:out_d1] = input[..., in_d0:in_d1]\n elif ndim == 2:\n out_d0 = max(padding[-2], 0)\n out_d1 = out_shape[2] - max(padding[-1], 0)\n\n out_h0 = max(padding[-4], 0)\n out_h1 = out_shape[3] - max(padding[-3], 0)\n\n in_d0 = max(-padding[-2], 0)\n in_d1 = in_shape[2] - max(-padding[-1], 0)\n\n in_h0 = max(-padding[-4], 0)\n in_h1 = in_shape[3] - max(-padding[-3], 0)\n\n out[..., out_d0:out_d1, out_h0:out_h1] = input[..., in_d0:in_d1, in_h0:in_h1]\n elif ndim == 3:\n out_d0 = max(padding[-2], 0)\n out_d1 = out_shape[2] - max(padding[-1], 0)\n\n out_h0 = max(padding[-4], 0)\n out_h1 = out_shape[3] - max(padding[-3], 0)\n\n out_w0 = max(padding[-6], 0)\n out_w1 = out_shape[4] - max(padding[-5], 0)\n\n in_d0 = max(-padding[-2], 0)\n in_d1 = in_shape[2] - max(-padding[-1], 0)\n\n in_h0 = max(-padding[-4], 0)\n in_h1 = in_shape[3] - max(-padding[-3], 0)\n\n in_w0 = max(-padding[-6], 0)\n in_w1 = in_shape[4] - max(-padding[-5], 0)\n\n out[..., out_d0:out_d1, out_h0:out_h1, out_w0:out_w1] = input[..., in_d0:in_d1, in_h0:in_h1, in_w0:in_w1]\n\n # The following steps first pad the beginning of the tensor (left side),\n # and then pad the end of the tensor (right side).\n # Note: Corners will be written more than once when ndim > 1.\n\n # Only in cases where padding values are > 0 are when additional copying\n # is required.\n\n # Pad first dimension (depth)\n if padding[-2] > 0:\n i0 = out_shape[2] - padding[-2] - max(padding[-1], 0)\n i1 = out_shape[2] - max(padding[-1], 0)\n o0 = 0\n o1 = padding[-2]\n out[:, :, o0:o1] = out[:, :, i0:i1]\n if padding[-1] > 0:\n i0 = max(padding[-2], 0)\n i1 = max(padding[-2], 0) + padding[-1]\n o0 = out_shape[2] - padding[-1]\n o1 = out_shape[2]\n out[:, :, o0:o1] = out[:, :, i0:i1]\n\n # Pad second dimension (height)\n if len(padding) > 2:\n if padding[-4] > 0:\n i0 = out_shape[3] - padding[-4] - max(padding[-3], 0)\n i1 = out_shape[3] - max(padding[-3], 0)\n o0 = 0\n o1 = padding[-4]\n out[:, :, :, o0:o1] = out[:, :, :, i0:i1]\n if padding[-3] > 0:\n i0 = max(padding[-4], 0)\n i1 = max(padding[-4], 0) + padding[-3]\n o0 = out_shape[3] - padding[-3]\n o1 = out_shape[3]\n out[:, :, :, o0:o1] = out[:, :, :, i0:i1]\n\n # Pad third dimension (width)\n if len(padding) > 4:\n if padding[-6] > 0:\n i0 = out_shape[4] - padding[-6] - max(padding[-5], 0)\n i1 = out_shape[4] - max(padding[-5], 0)\n o0 = 0\n o1 = padding[-6]\n out[:, :, :, :, o0:o1] = out[:, :, :, :, i0:i1]\n if padding[-5] > 0:\n i0 = max(padding[-6], 0)\n i1 = max(padding[-6], 0) + padding[-5]\n o0 = out_shape[4] - padding[-5]\n o1 = out_shape[4]\n out[:, :, :, :, o0:o1] = out[:, :, :, :, i0:i1]\n\n return out\n\n#\n# multihead attention\n#\n\ndef _in_projection_packed(\n q: Tensor,\n k: Tensor,\n v: Tensor,\n w: Tensor,\n b: Optional[Tensor] = None,\n) -> List[Tensor]:\n r\"\"\"\n Performs the in-projection step of the attention operation, using packed weights.\n Output is a triple containing projection tensors for query, key and value.\n\n Args:\n q, k, v: query, key and value tensors to be projected. For self-attention,\n these are typically the same tensor; for encoder-decoder attention,\n k and v are typically the same tensor. (We take advantage of these\n identities for performance if they are present.) Regardless, q, k and v\n must share a common embedding dimension; otherwise their shapes may vary.\n w: projection weights for q, k and v, packed into a single tensor. Weights\n are packed along dimension 0, in q, k, v order.\n b: optional projection biases for q, k and v, packed into a single tensor\n in q, k, v order.\n\n Shape:\n Inputs:\n - q: :math:`(..., E)` where E is the embedding dimension\n - k: :math:`(..., E)` where E is the embedding dimension\n - v: :math:`(..., E)` where E is the embedding dimension\n - w: :math:`(E * 3, E)` where E is the embedding dimension\n - b: :math:`E * 3` where E is the embedding dimension\n\n Output:\n - in output list :math:`[q', k', v']`, each output tensor will have the\n same shape as the corresponding input tensor.\n \"\"\"\n E = q.size(-1)\n if k is v:\n if q is k:\n # self-attention\n return linear(q, w, b).chunk(3, dim=-1)\n else:\n # encoder-decoder attention\n w_q, w_kv = w.split([E, E * 2])\n if b is None:\n b_q = b_kv = None\n else:\n b_q, b_kv = b.split([E, E * 2])\n return (linear(q, w_q, b_q),) + linear(k, w_kv, b_kv).chunk(2, dim=-1)\n else:\n w_q, w_k, w_v = w.chunk(3)\n if b is None:\n b_q = b_k = b_v = None\n else:\n b_q, b_k, b_v = b.chunk(3)\n return linear(q, w_q, b_q), linear(k, w_k, b_k), linear(v, w_v, b_v)\n\n\ndef _in_projection(\n q: Tensor,\n k: Tensor,\n v: Tensor,\n w_q: Tensor,\n w_k: Tensor,\n w_v: Tensor,\n b_q: Optional[Tensor] = None,\n b_k: Optional[Tensor] = None,\n b_v: Optional[Tensor] = None,\n) -> Tuple[Tensor, Tensor, Tensor]:\n r\"\"\"\n Performs the in-projection step of the attention operation. This is simply\n a triple of linear projections, with shape constraints on the weights which\n ensure embedding dimension uniformity in the projected outputs.\n Output is a triple containing projection tensors for query, key and value.\n\n Args:\n q, k, v: query, key and value tensors to be projected.\n w_q, w_k, w_v: weights for q, k and v, respectively.\n b_q, b_k, b_v: optional biases for q, k and v, respectively.\n\n Shape:\n Inputs:\n - q: :math:`(Qdims..., Eq)` where Eq is the query embedding dimension and Qdims are any\n number of leading dimensions.\n - k: :math:`(Kdims..., Ek)` where Ek is the key embedding dimension and Kdims are any\n number of leading dimensions.\n - v: :math:`(Vdims..., Ev)` where Ev is the value embedding dimension and Vdims are any\n number of leading dimensions.\n - w_q: :math:`(Eq, Eq)`\n - w_k: :math:`(Eq, Ek)`\n - w_v: :math:`(Eq, Ev)`\n - b_q: :math:`(Eq)`\n - b_k: :math:`(Eq)`\n - b_v: :math:`(Eq)`\n\n Output: in output triple :math:`(q', k', v')`,\n - q': :math:`[Qdims..., Eq]`\n - k': :math:`[Kdims..., Eq]`\n - v': :math:`[Vdims..., Eq]`\n\n \"\"\"\n Eq, Ek, Ev = q.size(-1), k.size(-1), v.size(-1)\n assert w_q.shape == (Eq, Eq), f\"expecting query weights shape of {(Eq, Eq)}, but got {w_q.shape}\"\n assert w_k.shape == (Eq, Ek), f\"expecting key weights shape of {(Eq, Ek)}, but got {w_k.shape}\"\n assert w_v.shape == (Eq, Ev), f\"expecting value weights shape of {(Eq, Ev)}, but got {w_v.shape}\"\n assert b_q is None or b_q.shape == (Eq,), f\"expecting query bias shape of {(Eq,)}, but got {b_q.shape}\"\n assert b_k is None or b_k.shape == (Eq,), f\"expecting key bias shape of {(Eq,)}, but got {b_k.shape}\"\n assert b_v is None or b_v.shape == (Eq,), f\"expecting value bias shape of {(Eq,)}, but got {b_v.shape}\"\n return linear(q, w_q, b_q), linear(k, w_k, b_k), linear(v, w_v, b_v)\n\n\ndef _scaled_dot_product_attention(\n q: Tensor,\n k: Tensor,\n v: Tensor,\n attn_mask: Optional[Tensor] = None,\n dropout_p: float = 0.0,\n) -> Tuple[Tensor, Tensor]:\n r\"\"\"\n Computes scaled dot product attention on query, key and value tensors, using\n an optional attention mask if passed, and applying dropout if a probability\n greater than 0.0 is specified.\n Returns a tensor pair containing attended values and attention weights.\n\n Args:\n q, k, v: query, key and value tensors. See Shape section for shape details.\n attn_mask: optional tensor containing mask values to be added to calculated\n attention. May be 2D or 3D; see Shape section for details.\n dropout_p: dropout probability. If greater than 0.0, dropout is applied.\n\n Shape:\n - q: :math:`(B, Nt, E)` where B is batch size, Nt is the target sequence length,\n and E is embedding dimension.\n - key: :math:`(B, Ns, E)` where B is batch size, Ns is the source sequence length,\n and E is embedding dimension.\n - value: :math:`(B, Ns, E)` where B is batch size, Ns is the source sequence length,\n and E is embedding dimension.\n - attn_mask: either a 3D tensor of shape :math:`(B, Nt, Ns)` or a 2D tensor of\n shape :math:`(Nt, Ns)`.\n\n - Output: attention values have shape :math:`(B, Nt, E)`; attention weights\n have shape :math:`(B, Nt, Ns)`\n \"\"\"\n B, Nt, E = q.shape\n q = q / math.sqrt(E)\n # (B, Nt, E) x (B, E, Ns) -> (B, Nt, Ns)\n attn = torch.bmm(q, k.transpose(-2, -1))\n if attn_mask is not None:\n attn += attn_mask\n attn = softmax(attn, dim=-1)\n if dropout_p > 0.0:\n attn = dropout(attn, p=dropout_p)\n # (B, Nt, Ns) x (B, Ns, E) -> (B, Nt, E)\n output = torch.bmm(attn, v)\n return output, attn\n\n\ndef _mha_shape_check(query: Tensor, key: Tensor, value: Tensor,\n key_padding_mask: Optional[Tensor], attn_mask: Optional[Tensor], num_heads: int):\n # Verifies the expected shape for `query, `key`, `value`, `key_padding_mask` and `attn_mask`\n # and returns if the input is batched or not.\n # Raises an error if `query` is not 2-D (unbatched) or 3-D (batched) tensor.\n\n # Shape check.\n if query.dim() == 3:\n # Batched Inputs\n is_batched = True\n assert key.dim() == 3 and value.dim() == 3, \\\n (\"For batched (3-D) `query`, expected `key` and `value` to be 3-D\"\n f\" but found {key.dim()}-D and {value.dim()}-D tensors respectively\")\n if key_padding_mask is not None:\n assert key_padding_mask.dim() == 2, \\\n (\"For batched (3-D) `query`, expected `key_padding_mask` to be `None` or 2-D\"\n f\" but found {key_padding_mask.dim()}-D tensor instead\")\n if attn_mask is not None:\n assert attn_mask.dim() in (2, 3), \\\n (\"For batched (3-D) `query`, expected `attn_mask` to be `None`, 2-D or 3-D\"\n f\" but found {attn_mask.dim()}-D tensor instead\")\n elif query.dim() == 2:\n # Unbatched Inputs\n is_batched = False\n assert key.dim() == 2 and value.dim() == 2, \\\n (\"For unbatched (2-D) `query`, expected `key` and `value` to be 2-D\"\n f\" but found {key.dim()}-D and {value.dim()}-D tensors respectively\")\n\n if key_padding_mask is not None:\n assert key_padding_mask.dim() == 1, \\\n (\"For unbatched (2-D) `query`, expected `key_padding_mask` to be `None` or 1-D\"\n f\" but found {key_padding_mask.dim()}-D tensor instead\")\n\n if attn_mask is not None:\n assert attn_mask.dim() in (2, 3), \\\n (\"For unbatched (2-D) `query`, expected `attn_mask` to be `None`, 2-D or 3-D\"\n f\" but found {attn_mask.dim()}-D tensor instead\")\n if attn_mask.dim() == 3:\n expected_shape = (num_heads, query.shape[0], key.shape[0])\n assert attn_mask.shape == expected_shape, \\\n (f\"Expected `attn_mask` shape to be {expected_shape} but got {attn_mask.shape}\")\n else:\n raise AssertionError(\n f\"query should be unbatched 2D or batched 3D tensor but received {query.dim()}-D query tensor\")\n\n return is_batched\n\ndef multi_head_attention_forward(\n query: Tensor,\n key: Tensor,\n value: Tensor,\n embed_dim_to_check: int,\n num_heads: int,\n in_proj_weight: Tensor,\n in_proj_bias: Optional[Tensor],\n bias_k: Optional[Tensor],\n bias_v: Optional[Tensor],\n add_zero_attn: bool,\n dropout_p: float,\n out_proj_weight: Tensor,\n out_proj_bias: Optional[Tensor],\n training: bool = True,\n key_padding_mask: Optional[Tensor] = None,\n need_weights: bool = True,\n attn_mask: Optional[Tensor] = None,\n use_separate_proj_weight: bool = False,\n q_proj_weight: Optional[Tensor] = None,\n k_proj_weight: Optional[Tensor] = None,\n v_proj_weight: Optional[Tensor] = None,\n static_k: Optional[Tensor] = None,\n static_v: Optional[Tensor] = None,\n average_attn_weights: bool = True,\n) -> Tuple[Tensor, Optional[Tensor]]:\n r\"\"\"\n Args:\n query, key, value: map a query and a set of key-value pairs to an output.\n See \"Attention Is All You Need\" for more details.\n embed_dim_to_check: total dimension of the model.\n num_heads: parallel attention heads.\n in_proj_weight, in_proj_bias: input projection weight and bias.\n bias_k, bias_v: bias of the key and value sequences to be added at dim=0.\n add_zero_attn: add a new batch of zeros to the key and\n value sequences at dim=1.\n dropout_p: probability of an element to be zeroed.\n out_proj_weight, out_proj_bias: the output projection weight and bias.\n training: apply dropout if is ``True``.\n key_padding_mask: if provided, specified padding elements in the key will\n be ignored by the attention. This is an binary mask. When the value is True,\n the corresponding value on the attention layer will be filled with -inf.\n need_weights: output attn_output_weights.\n attn_mask: 2D or 3D mask that prevents attention to certain positions. A 2D mask will be broadcasted for all\n the batches while a 3D mask allows to specify a different mask for the entries of each batch.\n use_separate_proj_weight: the function accept the proj. weights for query, key,\n and value in different forms. If false, in_proj_weight will be used, which is\n a combination of q_proj_weight, k_proj_weight, v_proj_weight.\n q_proj_weight, k_proj_weight, v_proj_weight, in_proj_bias: input projection weight and bias.\n static_k, static_v: static key and value used for attention operators.\n average_attn_weights: If true, indicates that the returned ``attn_weights`` should be averaged across heads.\n Otherwise, ``attn_weights`` are provided separately per head. Note that this flag only has an effect\n when ``need_weights=True.``. Default: True\n\n\n Shape:\n Inputs:\n - query: :math:`(L, E)` or :math:`(L, N, E)` where L is the target sequence length, N is the batch size, E is\n the embedding dimension.\n - key: :math:`(S, E)` or :math:`(S, N, E)`, where S is the source sequence length, N is the batch size, E is\n the embedding dimension.\n - value: :math:`(S, E)` or :math:`(S, N, E)` where S is the source sequence length, N is the batch size, E is\n the embedding dimension.\n - key_padding_mask: :math:`(S)` or :math:`(N, S)` where N is the batch size, S is the source sequence length.\n If a ByteTensor is provided, the non-zero positions will be ignored while the zero positions\n will be unchanged. If a BoolTensor is provided, the positions with the\n value of ``True`` will be ignored while the position with the value of ``False`` will be unchanged.\n - attn_mask: 2D mask :math:`(L, S)` where L is the target sequence length, S is the source sequence length.\n 3D mask :math:`(N*num_heads, L, S)` where N is the batch size, L is the target sequence length,\n S is the source sequence length. attn_mask ensures that position i is allowed to attend the unmasked\n positions. If a ByteTensor is provided, the non-zero positions are not allowed to attend\n while the zero positions will be unchanged. If a BoolTensor is provided, positions with ``True``\n are not allowed to attend while ``False`` values will be unchanged. If a FloatTensor\n is provided, it will be added to the attention weight.\n - static_k: :math:`(N*num_heads, S, E/num_heads)`, where S is the source sequence length,\n N is the batch size, E is the embedding dimension. E/num_heads is the head dimension.\n - static_v: :math:`(N*num_heads, S, E/num_heads)`, where S is the source sequence length,\n N is the batch size, E is the embedding dimension. E/num_heads is the head dimension.\n\n Outputs:\n - attn_output: :math:`(L, E)` or :math:`(L, N, E)` where L is the target sequence length, N is the batch size,\n E is the embedding dimension.\n - attn_output_weights: Only returned when ``need_weights=True``. If ``average_attn_weights=True``, returns\n attention weights averaged across heads of shape :math:`(L, S)` when input is unbatched or\n :math:`(N, L, S)`, where :math:`N` is the batch size, :math:`L` is the target sequence length, and\n :math:`S` is the source sequence length. If ``average_weights=False``, returns attention weights per\n head of shape :math:`(num_heads, L, S)` when input is unbatched or :math:`(N, num_heads, L, S)`.\n \"\"\"\n tens_ops = (query, key, value, in_proj_weight, in_proj_bias, bias_k, bias_v, out_proj_weight, out_proj_bias)\n if has_torch_function(tens_ops):\n return handle_torch_function(\n multi_head_attention_forward,\n tens_ops,\n query,\n key,\n value,\n embed_dim_to_check,\n num_heads,\n in_proj_weight,\n in_proj_bias,\n bias_k,\n bias_v,\n add_zero_attn,\n dropout_p,\n out_proj_weight,\n out_proj_bias,\n training=training,\n key_padding_mask=key_padding_mask,\n need_weights=need_weights,\n attn_mask=attn_mask,\n use_separate_proj_weight=use_separate_proj_weight,\n q_proj_weight=q_proj_weight,\n k_proj_weight=k_proj_weight,\n v_proj_weight=v_proj_weight,\n static_k=static_k,\n static_v=static_v,\n )\n\n is_batched = _mha_shape_check(query, key, value, key_padding_mask, attn_mask, num_heads)\n\n # For unbatched input, we unsqueeze at the expected batch-dim to pretend that the input\n # is batched, run the computation and before returning squeeze the\n # batch dimension so that the output doesn't carry this temporary batch dimension.\n if not is_batched:\n # unsqueeze if the input is unbatched\n query = query.unsqueeze(1)\n key = key.unsqueeze(1)\n value = value.unsqueeze(1)\n if key_padding_mask is not None:\n key_padding_mask = key_padding_mask.unsqueeze(0)\n\n # set up shape vars\n tgt_len, bsz, embed_dim = query.shape\n src_len, _, _ = key.shape\n assert embed_dim == embed_dim_to_check, \\\n f\"was expecting embedding dimension of {embed_dim_to_check}, but got {embed_dim}\"\n if isinstance(embed_dim, torch.Tensor):\n # embed_dim can be a tensor when JIT tracing\n head_dim = embed_dim.div(num_heads, rounding_mode='trunc')\n else:\n head_dim = embed_dim // num_heads\n assert head_dim * num_heads == embed_dim, f\"embed_dim {embed_dim} not divisible by num_heads {num_heads}\"\n if use_separate_proj_weight:\n # allow MHA to have different embedding dimensions when separate projection weights are used\n assert key.shape[:2] == value.shape[:2], \\\n f\"key's sequence and batch dims {key.shape[:2]} do not match value's {value.shape[:2]}\"\n else:\n assert key.shape == value.shape, f\"key shape {key.shape} does not match value shape {value.shape}\"\n\n #\n # compute in-projection\n #\n if not use_separate_proj_weight:\n q, k, v = _in_projection_packed(query, key, value, in_proj_weight, in_proj_bias)\n else:\n assert q_proj_weight is not None, \"use_separate_proj_weight is True but q_proj_weight is None\"\n assert k_proj_weight is not None, \"use_separate_proj_weight is True but k_proj_weight is None\"\n assert v_proj_weight is not None, \"use_separate_proj_weight is True but v_proj_weight is None\"\n if in_proj_bias is None:\n b_q = b_k = b_v = None\n else:\n b_q, b_k, b_v = in_proj_bias.chunk(3)\n q, k, v = _in_projection(query, key, value, q_proj_weight, k_proj_weight, v_proj_weight, b_q, b_k, b_v)\n\n # prep attention mask\n if attn_mask is not None:\n if attn_mask.dtype == torch.uint8:\n warnings.warn(\"Byte tensor for attn_mask in nn.MultiheadAttention is deprecated. Use bool tensor instead.\")\n attn_mask = attn_mask.to(torch.bool)\n else:\n assert attn_mask.is_floating_point() or attn_mask.dtype == torch.bool, \\\n f\"Only float, byte, and bool types are supported for attn_mask, not {attn_mask.dtype}\"\n # ensure attn_mask's dim is 3\n if attn_mask.dim() == 2:\n correct_2d_size = (tgt_len, src_len)\n if attn_mask.shape != correct_2d_size:\n raise RuntimeError(f\"The shape of the 2D attn_mask is {attn_mask.shape}, but should be {correct_2d_size}.\")\n attn_mask = attn_mask.unsqueeze(0)\n elif attn_mask.dim() == 3:\n correct_3d_size = (bsz * num_heads, tgt_len, src_len)\n if attn_mask.shape != correct_3d_size:\n raise RuntimeError(f\"The shape of the 3D attn_mask is {attn_mask.shape}, but should be {correct_3d_size}.\")\n else:\n raise RuntimeError(f\"attn_mask's dimension {attn_mask.dim()} is not supported\")\n\n # prep key padding mask\n if key_padding_mask is not None and key_padding_mask.dtype == torch.uint8:\n warnings.warn(\"Byte tensor for key_padding_mask in nn.MultiheadAttention is deprecated. Use bool tensor instead.\")\n key_padding_mask = key_padding_mask.to(torch.bool)\n\n # add bias along batch dimension (currently second)\n if bias_k is not None and bias_v is not None:\n assert static_k is None, \"bias cannot be added to static key.\"\n assert static_v is None, \"bias cannot be added to static value.\"\n k = torch.cat([k, bias_k.repeat(1, bsz, 1)])\n v = torch.cat([v, bias_v.repeat(1, bsz, 1)])\n if attn_mask is not None:\n attn_mask = pad(attn_mask, (0, 1))\n if key_padding_mask is not None:\n key_padding_mask = pad(key_padding_mask, (0, 1))\n else:\n assert bias_k is None\n assert bias_v is None\n\n #\n # reshape q, k, v for multihead attention and make em batch first\n #\n q = q.contiguous().view(tgt_len, bsz * num_heads, head_dim).transpose(0, 1)\n if static_k is None:\n k = k.contiguous().view(k.shape[0], bsz * num_heads, head_dim).transpose(0, 1)\n else:\n # TODO finish disentangling control flow so we don't do in-projections when statics are passed\n assert static_k.size(0) == bsz * num_heads, \\\n f\"expecting static_k.size(0) of {bsz * num_heads}, but got {static_k.size(0)}\"\n assert static_k.size(2) == head_dim, \\\n f\"expecting static_k.size(2) of {head_dim}, but got {static_k.size(2)}\"\n k = static_k\n if static_v is None:\n v = v.contiguous().view(v.shape[0], bsz * num_heads, head_dim).transpose(0, 1)\n else:\n # TODO finish disentangling control flow so we don't do in-projections when statics are passed\n assert static_v.size(0) == bsz * num_heads, \\\n f\"expecting static_v.size(0) of {bsz * num_heads}, but got {static_v.size(0)}\"\n assert static_v.size(2) == head_dim, \\\n f\"expecting static_v.size(2) of {head_dim}, but got {static_v.size(2)}\"\n v = static_v\n\n # add zero attention along batch dimension (now first)\n if add_zero_attn:\n zero_attn_shape = (bsz * num_heads, 1, head_dim)\n k = torch.cat([k, torch.zeros(zero_attn_shape, dtype=k.dtype, device=k.device)], dim=1)\n v = torch.cat([v, torch.zeros(zero_attn_shape, dtype=v.dtype, device=v.device)], dim=1)\n if attn_mask is not None:\n attn_mask = pad(attn_mask, (0, 1))\n if key_padding_mask is not None:\n key_padding_mask = pad(key_padding_mask, (0, 1))\n\n # update source sequence length after adjustments\n src_len = k.size(1)\n\n # merge key padding and attention masks\n if key_padding_mask is not None:\n assert key_padding_mask.shape == (bsz, src_len), \\\n f\"expecting key_padding_mask shape of {(bsz, src_len)}, but got {key_padding_mask.shape}\"\n key_padding_mask = key_padding_mask.view(bsz, 1, 1, src_len). \\\n expand(-1, num_heads, -1, -1).reshape(bsz * num_heads, 1, src_len)\n if attn_mask is None:\n attn_mask = key_padding_mask\n elif attn_mask.dtype == torch.bool:\n attn_mask = attn_mask.logical_or(key_padding_mask)\n else:\n attn_mask = attn_mask.masked_fill(key_padding_mask, float(\"-inf\"))\n\n # convert mask to float\n if attn_mask is not None and attn_mask.dtype == torch.bool:\n new_attn_mask = torch.zeros_like(attn_mask, dtype=q.dtype)\n new_attn_mask.masked_fill_(attn_mask, float(\"-inf\"))\n attn_mask = new_attn_mask\n\n # adjust dropout probability\n if not training:\n dropout_p = 0.0\n\n #\n # (deep breath) calculate attention and out projection\n #\n attn_output, attn_output_weights = _scaled_dot_product_attention(q, k, v, attn_mask, dropout_p)\n attn_output = attn_output.transpose(0, 1).contiguous().view(tgt_len * bsz, embed_dim)\n attn_output = linear(attn_output, out_proj_weight, out_proj_bias)\n attn_output = attn_output.view(tgt_len, bsz, attn_output.size(1))\n\n if need_weights:\n # optionally average attention weights over heads\n attn_output_weights = attn_output_weights.view(bsz, num_heads, tgt_len, src_len)\n if average_attn_weights:\n attn_output_weights = attn_output_weights.sum(dim=1) / num_heads\n\n if not is_batched:\n # squeeze the output if input was unbatched\n attn_output = attn_output.squeeze(1)\n attn_output_weights = attn_output_weights.squeeze(0)\n return attn_output, attn_output_weights\n else:\n if not is_batched:\n # squeeze the output if input was unbatched\n attn_output = attn_output.squeeze(1)\n return attn_output, None\n", "import warnings\nfrom collections import OrderedDict, abc as container_abcs\nfrom itertools import chain, islice\nimport operator\n\nimport torch\nfrom .module import Module\nfrom ..parameter import Parameter\nfrom torch._jit_internal import _copy_to_script_wrapper\n\nfrom typing import Any, Dict, Iterable, Iterator, Mapping, Optional, overload, Tuple, TypeVar, Union\n\nT = TypeVar('T', bound=Module)\n\n\nclass Container(Module):\n\n def __init__(self, **kwargs: Any) -> None:\n super(Container, self).__init__()\n # DeprecationWarning is ignored by default <sigh>\n warnings.warn(\"nn.Container is deprecated. All of it's functionality \"\n \"is now implemented in nn.Module. Subclass that instead.\")\n for key, value in kwargs.items():\n self.add_module(key, value)\n\n\nclass Sequential(Module):\n r\"\"\"A sequential container.\n Modules will be added to it in the order they are passed in the\n constructor. Alternatively, an ``OrderedDict`` of modules can be\n passed in. The ``forward()`` method of ``Sequential`` accepts any\n input and forwards it to the first module it contains. It then\n \"chains\" outputs to inputs sequentially for each subsequent module,\n finally returning the output of the last module.\n\n The value a ``Sequential`` provides over manually calling a sequence\n of modules is that it allows treating the whole container as a\n single module, such that performing a transformation on the\n ``Sequential`` applies to each of the modules it stores (which are\n each a registered submodule of the ``Sequential``).\n\n What's the difference between a ``Sequential`` and a\n :class:`torch.nn.ModuleList`? A ``ModuleList`` is exactly what it\n sounds like--a list for storing ``Module`` s! On the other hand,\n the layers in a ``Sequential`` are connected in a cascading way.\n\n Example::\n\n # Using Sequential to create a small model. When `model` is run,\n # input will first be passed to `Conv2d(1,20,5)`. The output of\n # `Conv2d(1,20,5)` will be used as the input to the first\n # `ReLU`; the output of the first `ReLU` will become the input\n # for `Conv2d(20,64,5)`. Finally, the output of\n # `Conv2d(20,64,5)` will be used as input to the second `ReLU`\n model = nn.Sequential(\n nn.Conv2d(1,20,5),\n nn.ReLU(),\n nn.Conv2d(20,64,5),\n nn.ReLU()\n )\n\n # Using Sequential with OrderedDict. This is functionally the\n # same as the above code\n model = nn.Sequential(OrderedDict([\n ('conv1', nn.Conv2d(1,20,5)),\n ('relu1', nn.ReLU()),\n ('conv2', nn.Conv2d(20,64,5)),\n ('relu2', nn.ReLU())\n ]))\n \"\"\"\n\n _modules: Dict[str, Module] # type: ignore[assignment]\n\n @overload\n def __init__(self, *args: Module) -> None:\n ...\n\n @overload\n def __init__(self, arg: 'OrderedDict[str, Module]') -> None:\n ...\n\n def __init__(self, *args):\n super(Sequential, self).__init__()\n if len(args) == 1 and isinstance(args[0], OrderedDict):\n for key, module in args[0].items():\n self.add_module(key, module)\n else:\n for idx, module in enumerate(args):\n self.add_module(str(idx), module)\n\n def _get_item_by_idx(self, iterator, idx) -> T:\n \"\"\"Get the idx-th item of the iterator\"\"\"\n size = len(self)\n idx = operator.index(idx)\n if not -size <= idx < size:\n raise IndexError('index {} is out of range'.format(idx))\n idx %= size\n return next(islice(iterator, idx, None))\n\n @_copy_to_script_wrapper\n def __getitem__(self, idx) -> Union['Sequential', T]:\n if isinstance(idx, slice):\n return self.__class__(OrderedDict(list(self._modules.items())[idx]))\n else:\n return self._get_item_by_idx(self._modules.values(), idx)\n\n def __setitem__(self, idx: int, module: Module) -> None:\n key: str = self._get_item_by_idx(self._modules.keys(), idx)\n return setattr(self, key, module)\n\n def __delitem__(self, idx: Union[slice, int]) -> None:\n if isinstance(idx, slice):\n for key in list(self._modules.keys())[idx]:\n delattr(self, key)\n else:\n key = self._get_item_by_idx(self._modules.keys(), idx)\n delattr(self, key)\n\n @_copy_to_script_wrapper\n def __len__(self) -> int:\n return len(self._modules)\n\n @_copy_to_script_wrapper\n def __dir__(self):\n keys = super(Sequential, self).__dir__()\n keys = [key for key in keys if not key.isdigit()]\n return keys\n\n @_copy_to_script_wrapper\n def __iter__(self) -> Iterator[Module]:\n return iter(self._modules.values())\n\n # NB: We can't really type check this function as the type of input\n # may change dynamically (as is tested in\n # TestScript.test_sequential_intermediary_types). Cannot annotate\n # with Any as TorchScript expects a more precise type\n def forward(self, input):\n for module in self:\n input = module(input)\n return input\n\n def append(self, module: Module) -> 'Sequential':\n r\"\"\"Appends a given module to the end.\n\n Args:\n module (nn.Module): module to append\n \"\"\"\n self.add_module(str(len(self)), module)\n return self\n\n\nclass ModuleList(Module):\n r\"\"\"Holds submodules in a list.\n\n :class:`~torch.nn.ModuleList` can be indexed like a regular Python list, but\n modules it contains are properly registered, and will be visible by all\n :class:`~torch.nn.Module` methods.\n\n Args:\n modules (iterable, optional): an iterable of modules to add\n\n Example::\n\n class MyModule(nn.Module):\n def __init__(self):\n super(MyModule, self).__init__()\n self.linears = nn.ModuleList([nn.Linear(10, 10) for i in range(10)])\n\n def forward(self, x):\n # ModuleList can act as an iterable, or be indexed using ints\n for i, l in enumerate(self.linears):\n x = self.linears[i // 2](x) + l(x)\n return x\n \"\"\"\n\n _modules: Dict[str, Module] # type: ignore[assignment]\n\n def __init__(self, modules: Optional[Iterable[Module]] = None) -> None:\n super(ModuleList, self).__init__()\n if modules is not None:\n self += modules\n\n def _get_abs_string_index(self, idx):\n \"\"\"Get the absolute index for the list of modules\"\"\"\n idx = operator.index(idx)\n if not (-len(self) <= idx < len(self)):\n raise IndexError('index {} is out of range'.format(idx))\n if idx < 0:\n idx += len(self)\n return str(idx)\n\n @_copy_to_script_wrapper\n def __getitem__(self, idx: int) -> Union[Module, 'ModuleList']:\n if isinstance(idx, slice):\n return self.__class__(list(self._modules.values())[idx])\n else:\n return self._modules[self._get_abs_string_index(idx)]\n\n def __setitem__(self, idx: int, module: Module) -> None:\n idx = self._get_abs_string_index(idx)\n return setattr(self, str(idx), module)\n\n def __delitem__(self, idx: Union[int, slice]) -> None:\n if isinstance(idx, slice):\n for k in range(len(self._modules))[idx]:\n delattr(self, str(k))\n else:\n delattr(self, self._get_abs_string_index(idx))\n # To preserve numbering, self._modules is being reconstructed with modules after deletion\n str_indices = [str(i) for i in range(len(self._modules))]\n self._modules = OrderedDict(list(zip(str_indices, self._modules.values())))\n\n @_copy_to_script_wrapper\n def __len__(self) -> int:\n return len(self._modules)\n\n @_copy_to_script_wrapper\n def __iter__(self) -> Iterator[Module]:\n return iter(self._modules.values())\n\n def __iadd__(self, modules: Iterable[Module]) -> 'ModuleList':\n return self.extend(modules)\n\n def __add__(self, other: Iterable[Module]) -> 'ModuleList':\n combined = ModuleList()\n for i, module in enumerate(chain(self, other)):\n combined.add_module(str(i), module)\n return combined\n\n @_copy_to_script_wrapper\n def __dir__(self):\n keys = super(ModuleList, self).__dir__()\n keys = [key for key in keys if not key.isdigit()]\n return keys\n\n def insert(self, index: int, module: Module) -> None:\n r\"\"\"Insert a given module before a given index in the list.\n\n Args:\n index (int): index to insert.\n module (nn.Module): module to insert\n \"\"\"\n for i in range(len(self._modules), index, -1):\n self._modules[str(i)] = self._modules[str(i - 1)]\n self._modules[str(index)] = module\n\n def append(self, module: Module) -> 'ModuleList':\n r\"\"\"Appends a given module to the end of the list.\n\n Args:\n module (nn.Module): module to append\n \"\"\"\n self.add_module(str(len(self)), module)\n return self\n\n def extend(self, modules: Iterable[Module]) -> 'ModuleList':\n r\"\"\"Appends modules from a Python iterable to the end of the list.\n\n Args:\n modules (iterable): iterable of modules to append\n \"\"\"\n if not isinstance(modules, container_abcs.Iterable):\n raise TypeError(\"ModuleList.extend should be called with an \"\n \"iterable, but got \" + type(modules).__name__)\n offset = len(self)\n for i, module in enumerate(modules):\n self.add_module(str(offset + i), module)\n return self\n\n # remove forward alltogether to fallback on Module's _forward_unimplemented\n\n\nclass ModuleDict(Module):\n r\"\"\"Holds submodules in a dictionary.\n\n :class:`~torch.nn.ModuleDict` can be indexed like a regular Python dictionary,\n but modules it contains are properly registered, and will be visible by all\n :class:`~torch.nn.Module` methods.\n\n :class:`~torch.nn.ModuleDict` is an **ordered** dictionary that respects\n\n * the order of insertion, and\n\n * in :meth:`~torch.nn.ModuleDict.update`, the order of the merged\n ``OrderedDict``, ``dict`` (started from Python 3.6) or another\n :class:`~torch.nn.ModuleDict` (the argument to\n :meth:`~torch.nn.ModuleDict.update`).\n\n Note that :meth:`~torch.nn.ModuleDict.update` with other unordered mapping\n types (e.g., Python's plain ``dict`` before Python version 3.6) does not\n preserve the order of the merged mapping.\n\n Args:\n modules (iterable, optional): a mapping (dictionary) of (string: module)\n or an iterable of key-value pairs of type (string, module)\n\n Example::\n\n class MyModule(nn.Module):\n def __init__(self):\n super(MyModule, self).__init__()\n self.choices = nn.ModuleDict({\n 'conv': nn.Conv2d(10, 10, 3),\n 'pool': nn.MaxPool2d(3)\n })\n self.activations = nn.ModuleDict([\n ['lrelu', nn.LeakyReLU()],\n ['prelu', nn.PReLU()]\n ])\n\n def forward(self, x, choice, act):\n x = self.choices[choice](x)\n x = self.activations[act](x)\n return x\n \"\"\"\n\n _modules: Dict[str, Module] # type: ignore[assignment]\n\n def __init__(self, modules: Optional[Mapping[str, Module]] = None) -> None:\n super(ModuleDict, self).__init__()\n if modules is not None:\n self.update(modules)\n\n @_copy_to_script_wrapper\n def __getitem__(self, key: str) -> Module:\n return self._modules[key]\n\n def __setitem__(self, key: str, module: Module) -> None:\n self.add_module(key, module)\n\n def __delitem__(self, key: str) -> None:\n del self._modules[key]\n\n @_copy_to_script_wrapper\n def __len__(self) -> int:\n return len(self._modules)\n\n @_copy_to_script_wrapper\n def __iter__(self) -> Iterator[str]:\n return iter(self._modules)\n\n @_copy_to_script_wrapper\n def __contains__(self, key: str) -> bool:\n return key in self._modules\n\n def clear(self) -> None:\n \"\"\"Remove all items from the ModuleDict.\n \"\"\"\n self._modules.clear()\n\n def pop(self, key: str) -> Module:\n r\"\"\"Remove key from the ModuleDict and return its module.\n\n Args:\n key (string): key to pop from the ModuleDict\n \"\"\"\n v = self[key]\n del self[key]\n return v\n\n @_copy_to_script_wrapper\n def keys(self) -> Iterable[str]:\n r\"\"\"Return an iterable of the ModuleDict keys.\n \"\"\"\n return self._modules.keys()\n\n @_copy_to_script_wrapper\n def items(self) -> Iterable[Tuple[str, Module]]:\n r\"\"\"Return an iterable of the ModuleDict key/value pairs.\n \"\"\"\n return self._modules.items()\n\n @_copy_to_script_wrapper\n def values(self) -> Iterable[Module]:\n r\"\"\"Return an iterable of the ModuleDict values.\n \"\"\"\n return self._modules.values()\n\n def update(self, modules: Mapping[str, Module]) -> None:\n r\"\"\"Update the :class:`~torch.nn.ModuleDict` with the key-value pairs from a\n mapping or an iterable, overwriting existing keys.\n\n .. note::\n If :attr:`modules` is an ``OrderedDict``, a :class:`~torch.nn.ModuleDict`, or\n an iterable of key-value pairs, the order of new elements in it is preserved.\n\n Args:\n modules (iterable): a mapping (dictionary) from string to :class:`~torch.nn.Module`,\n or an iterable of key-value pairs of type (string, :class:`~torch.nn.Module`)\n \"\"\"\n if not isinstance(modules, container_abcs.Iterable):\n raise TypeError(\"ModuleDict.update should be called with an \"\n \"iterable of key/value pairs, but got \" +\n type(modules).__name__)\n\n if isinstance(modules, (OrderedDict, ModuleDict, container_abcs.Mapping)):\n for key, module in modules.items():\n self[key] = module\n else:\n # modules here can be a list with two items\n for j, m in enumerate(modules):\n if not isinstance(m, container_abcs.Iterable):\n raise TypeError(\"ModuleDict update sequence element \"\n \"#\" + str(j) + \" should be Iterable; is\" +\n type(m).__name__)\n if not len(m) == 2:\n raise ValueError(\"ModuleDict update sequence element \"\n \"#\" + str(j) + \" has length \" + str(len(m)) +\n \"; 2 is required\")\n # modules can be Mapping (what it's typed at), or a list: [(name1, module1), (name2, module2)]\n # that's too cumbersome to type correctly with overloads, so we add an ignore here\n self[m[0]] = m[1] # type: ignore[assignment]\n\n # remove forward alltogether to fallback on Module's _forward_unimplemented\n\n\nclass ParameterList(Module):\n r\"\"\"Holds parameters in a list.\n\n :class:`~torch.nn.ParameterList` can be used like a regular Python\n list, but Tensors that are :class:`~torch.nn.Parameter` are properly registered,\n and will be visible by all :class:`~torch.nn.Module` methods.\n\n Note that the constructor, assigning an element of the list, the\n :meth:`~torch.nn.ParameterDict.append` method and the :meth:`~torch.nn.ParameterDict.extend`\n method will convert any :class:`~torch.Tensor` into :class:`~torch.nn.Parameter`.\n\n Args:\n parameters (iterable, optional): an iterable of elements to add to the list.\n\n Example::\n\n class MyModule(nn.Module):\n def __init__(self):\n super(MyModule, self).__init__()\n self.params = nn.ParameterList([nn.Parameter(torch.randn(10, 10)) for i in range(10)])\n\n def forward(self, x):\n # ParameterList can act as an iterable, or be indexed using ints\n for i, p in enumerate(self.params):\n x = self.params[i // 2].mm(x) + p.mm(x)\n return x\n \"\"\"\n\n def __init__(self, values: Optional[Iterable[Any]] = None) -> None:\n super(ParameterList, self).__init__()\n self._size = 0\n if values is not None:\n self += values\n\n def _get_abs_string_index(self, idx):\n \"\"\"Get the absolute index for the list of modules\"\"\"\n idx = operator.index(idx)\n if not (-len(self) <= idx < len(self)):\n raise IndexError('index {} is out of range'.format(idx))\n if idx < 0:\n idx += len(self)\n return str(idx)\n\n @overload\n def __getitem__(self, idx: int) -> Any:\n ...\n\n @overload\n def __getitem__(self: T, idx: slice) -> T:\n ...\n\n def __getitem__(self, idx):\n if isinstance(idx, slice):\n start, stop, step = idx.indices(len(self))\n out = self.__class__()\n for i in range(start, stop, step):\n out.append(self[i])\n return out\n else:\n idx = self._get_abs_string_index(idx)\n return getattr(self, str(idx))\n\n def __setitem__(self, idx: int, param: Any) -> None:\n # Note that all other function that add an entry to the list part of\n # the ParameterList end up here. So this is the only place where we need\n # to wrap things into Parameter if needed.\n # Objects added via setattr() are not in the list part and thus won't\n # call into this function.\n idx = self._get_abs_string_index(idx)\n if isinstance(param, torch.Tensor) and not isinstance(param, Parameter):\n param = Parameter(param)\n return setattr(self, str(idx), param)\n\n def __len__(self) -> int:\n return self._size\n\n def __iter__(self) -> Iterator[Any]:\n return iter(self[i] for i in range(len(self)))\n\n def __iadd__(self, parameters: Iterable[Any]) -> 'ParameterList':\n return self.extend(parameters)\n\n def __dir__(self):\n return list(range(self._size))\n\n def append(self, value: Any) -> 'ParameterList':\n \"\"\"Appends a given value at the end of the list.\n\n Args:\n value (Any): value to append\n \"\"\"\n new_idx = len(self)\n self._size += 1\n self[new_idx] = value\n return self\n\n def extend(self, values: Iterable[Any]) -> 'ParameterList':\n \"\"\"Appends values from a Python iterable to the end of the list.\n\n Args:\n values (iterable): iterable of values to append\n \"\"\"\n # Tensor is an iterable but we never want to unpack it here\n if not isinstance(values, container_abcs.Iterable) or isinstance(values, torch.Tensor):\n raise TypeError(\"ParameterList.extend should be called with an \"\n \"iterable, but got \" + type(values).__name__)\n for value in values:\n self.append(value)\n return self\n\n def extra_repr(self) -> str:\n child_lines = []\n for k, p in enumerate(self):\n if isinstance(p, torch.Tensor):\n size_str = 'x'.join(str(size) for size in p.size())\n device_str = '' if not p.is_cuda else ' (GPU {})'.format(p.get_device())\n parastr = '{} containing: [{} of size {}{}]'.format(\n \"Parameter\" if isinstance(p, Parameter) else \"Tensor\",\n torch.typename(p), size_str, device_str)\n child_lines.append(' (' + str(k) + '): ' + parastr)\n else:\n child_lines.append(' (' + str(k) + '): Object of type: ' + type(p).__name__)\n\n tmpstr = '\\n'.join(child_lines)\n return tmpstr\n\n def __call__(self, *args, **kwargs):\n raise RuntimeError('ParameterList should not be called.')\n\n\nclass ParameterDict(Module):\n r\"\"\"Holds parameters in a dictionary.\n\n ParameterDict can be indexed like a regular Python dictionary, but Parameters it\n contains are properly registered, and will be visible by all Module methods.\n Other objects are treated as would be done by a regular Python dictionary\n\n :class:`~torch.nn.ParameterDict` is an **ordered** dictionary.\n :meth:`~torch.nn.ParameterDict.update` with other unordered mapping\n types (e.g., Python's plain ``dict``) does not preserve the order of the\n merged mapping. On the other hand, ``OrderedDict`` or another :class:`~torch.nn.ParameterDict`\n will preserve their ordering.\n\n Note that the constructor, assigning an element of the dictionary and the\n :meth:`~torch.nn.ParameterDict.update` method will convert any :class:`~torch.Tensor` into\n :class:`~torch.nn.Parameter`.\n\n Args:\n values (iterable, optional): a mapping (dictionary) of\n (string : Any) or an iterable of key-value pairs\n of type (string, Any)\n\n Example::\n\n class MyModule(nn.Module):\n def __init__(self):\n super(MyModule, self).__init__()\n self.params = nn.ParameterDict({\n 'left': nn.Parameter(torch.randn(5, 10)),\n 'right': nn.Parameter(torch.randn(5, 10))\n })\n\n def forward(self, x, choice):\n x = self.params[choice].mm(x)\n return x\n \"\"\"\n\n def __init__(self, parameters: Any = None) -> None:\n super(ParameterDict, self).__init__()\n self._keys: Dict[str, None] = {}\n if parameters is not None:\n self.update(parameters)\n\n def _key_to_attr(self, key: str) -> str:\n if not isinstance(key, str):\n raise TypeError(\"Index given to ParameterDict cannot be used as a key as it is \"\n f\"not a string (type is '{type(key).__name__}'). Open an issue on \"\n \"github if you need non-string keys.\")\n else:\n # Use the key as-is so that `.named_parameters()` returns the right thing\n return key\n\n def __getitem__(self, key: str) -> Any:\n attr = self._key_to_attr(key)\n return getattr(self, attr)\n\n def __setitem__(self, key: str, value: Any) -> None:\n # Note that all other function that add an entry to the dictionary part of\n # the ParameterDict end up here. So this is the only place where we need\n # to wrap things into Parameter if needed.\n # Objects added via setattr() are not in the dictionary part and thus won't\n # call into this function.\n self._keys[key] = None\n attr = self._key_to_attr(key)\n if isinstance(value, torch.Tensor) and not isinstance(value, Parameter):\n value = Parameter(value)\n setattr(self, attr, value)\n\n def __delitem__(self, key: str) -> None:\n del self._keys[key]\n attr = self._key_to_attr(key)\n delattr(self, attr)\n\n def __len__(self) -> int:\n return len(self._keys)\n\n def __iter__(self) -> Iterator[str]:\n return iter(self._keys)\n\n def __reversed__(self) -> Iterator[str]:\n return reversed(list(self._keys))\n\n def copy(self) -> 'ParameterDict':\n \"\"\"Returns a copy of this :class:`~torch.nn.ParameterDict` instance.\n \"\"\"\n # We have to use an OrderedDict because the ParameterDict constructor\n # behaves differently on plain dict vs OrderedDict\n return ParameterDict(OrderedDict((k, self[k]) for k in self._keys))\n\n def __contains__(self, key: str) -> bool:\n return key in self._keys\n\n def setdefault(self, key: str, default: Optional[Any] = None) -> Any:\n \"\"\"If key is in the ParameterDict, return its value.\n If not, insert `key` with a parameter `default` and return `default`.\n `default` defaults to `None`.\n\n Args:\n key (string): key to set default for\n default (Any): the parameter set to the key\n \"\"\"\n\n if key not in self:\n self[key] = default\n return self[key]\n\n def clear(self) -> None:\n \"\"\"Remove all items from the ParameterDict.\n \"\"\"\n for k in self._keys.copy():\n del self[k]\n\n def pop(self, key: str) -> Any:\n r\"\"\"Remove key from the ParameterDict and return its parameter.\n\n Args:\n key (string): key to pop from the ParameterDict\n \"\"\"\n v = self[key]\n del self[key]\n return v\n\n def popitem(self) -> Tuple[str, Any]:\n \"\"\"Remove and return the last inserted `(key, parameter)` pair\n from the ParameterDict\n \"\"\"\n k, _ = self._keys.popitem()\n # We need the key in the _keys to be able to access/del\n self._keys[k] = None\n val = self[k]\n del self[k]\n return k, val\n\n def get(self, key: str, default: Optional[Any] = None) -> Any:\n r\"\"\"Return the parameter associated with key if present.\n Otherwise return default if provided, None if not.\n\n Args:\n key (string): key to get from the ParameterDict\n default (Parameter, optional): value to return if key not present\n \"\"\"\n return self[key] if key in self else default\n\n def fromkeys(self, keys: Iterable[str], default: Optional[Any] = None) -> 'ParameterDict':\n r\"\"\"Return a new ParameterDict with the keys provided\n\n Args:\n keys (iterable, string): keys to make the new ParameterDict from\n default (Parameter, optional): value to set for all keys\n \"\"\"\n return ParameterDict(((k, default) for k in keys))\n\n def keys(self) -> Iterable[str]:\n r\"\"\"Return an iterable of the ParameterDict keys.\n \"\"\"\n return self._keys.keys()\n\n def items(self) -> Iterable[Tuple[str, Any]]:\n r\"\"\"Return an iterable of the ParameterDict key/value pairs.\n \"\"\"\n return ((k, self[k]) for k in self._keys)\n\n def values(self) -> Iterable[Any]:\n r\"\"\"Return an iterable of the ParameterDict values.\n \"\"\"\n return (self[k] for k in self._keys)\n\n def update(self, parameters: Union[Mapping[str, Any], 'ParameterDict']) -> None:\n r\"\"\"Update the :class:`~torch.nn.ParameterDict` with the key-value pairs from a\n mapping or an iterable, overwriting existing keys.\n\n .. note::\n If :attr:`parameters` is an ``OrderedDict``, a :class:`~torch.nn.ParameterDict`, or\n an iterable of key-value pairs, the order of new elements in it is preserved.\n\n Args:\n parameters (iterable): a mapping (dictionary) from string to\n :class:`~torch.nn.Parameter`, or an iterable of\n key-value pairs of type (string, :class:`~torch.nn.Parameter`)\n \"\"\"\n if not isinstance(parameters, container_abcs.Iterable):\n raise TypeError(\"ParametersDict.update should be called with an \"\n \"iterable of key/value pairs, but got \" +\n type(parameters).__name__)\n\n if isinstance(parameters, (OrderedDict, ParameterDict)):\n for key, parameter in parameters.items():\n self[key] = parameter\n elif isinstance(parameters, container_abcs.Mapping):\n for key, parameter in sorted(parameters.items()):\n self[key] = parameter\n else:\n for j, p in enumerate(parameters):\n if not isinstance(p, container_abcs.Iterable):\n raise TypeError(\"ParameterDict update sequence element \"\n \"#\" + str(j) + \" should be Iterable; is\" +\n type(p).__name__)\n if not len(p) == 2:\n raise ValueError(\"ParameterDict update sequence element \"\n \"#\" + str(j) + \" has length \" + str(len(p)) +\n \"; 2 is required\")\n # parameters as length-2 list too cumbersome to type, see ModuleDict.update comment\n self[p[0]] = p[1] # type: ignore[assignment]\n\n def extra_repr(self) -> str:\n child_lines = []\n for k, p in self.items():\n if isinstance(p, torch.Tensor):\n size_str = 'x'.join(str(size) for size in p.size())\n device_str = '' if not p.is_cuda else ' (GPU {})'.format(p.get_device())\n parastr = '{} containing: [{} of size {}{}]'.format(\n \"Parameter\" if isinstance(p, Parameter) else \"Tensor\",\n torch.typename(p), size_str, device_str)\n child_lines.append(' (' + str(k) + '): ' + parastr)\n else:\n child_lines.append(' (' + str(k) + '): Object of type: ' + type(p).__name__)\n tmpstr = '\\n'.join(child_lines)\n return tmpstr\n\n def __call__(self, input):\n raise RuntimeError('ParameterDict should not be called.')\n\n def __or__(self, other: 'ParameterDict') -> 'ParameterDict':\n copy = self.copy()\n copy.update(other)\n return copy\n\n def __ror__(self, other: 'ParameterDict') -> 'ParameterDict':\n copy = other.copy()\n copy.update(self)\n return copy\n\n def __ior__(self, other : 'ParameterDict') -> 'ParameterDict':\n self.update(other)\n return self\n", "import torch\nfrom .grad_mode import _DecoratorContextManager\nfrom collections import namedtuple\n\nfrom typing import Any\n\n# Global variable used to make the python API simpler to use\n_current_level = -1\n\ndef enter_dual_level():\n r\"\"\"Function that can be used to enter a new forward grad level.\n This level can be used to make and unpack dual Tensors to compute\n forward gradients.\n\n This function also updates the current level that is used by default\n by the other functions in this API.\n \"\"\"\n global _current_level\n new_level = torch._C._enter_dual_level()\n if new_level != _current_level + 1:\n raise RuntimeError(\"Entering a new forward AD level but the current level \"\n \"is not valid. Make sure you did not modified it directly.\")\n _current_level = new_level\n return new_level\n\ndef exit_dual_level(*, level=None):\n r\"\"\"Function that can be used to exit a forward grad level.\n This function deletes all the gradients associated with this\n level. Only deleting the latest entered level is allowed.\n\n This function also updates the current level that is used by default\n by the other functions in this API.\n \"\"\"\n global _current_level\n if level is None:\n level = _current_level\n if level != _current_level:\n raise RuntimeError(\"Trying to exit a forward AD level that was not the last one \"\n \"that was created. This is not supported.\")\n torch._C._exit_dual_level(level=level)\n _current_level = level - 1\n\ndef make_dual(tensor, tangent, *, level=None):\n r\"\"\"Associates a tensor value with a forward gradient, the tangent, to create a\n \"dual tensor\", which is used to compute forward AD gradients.\n The result is a new tensor aliased to :attr:`tensor` with :attr:`tangent` embedded\n as an attribute as-is if it has the same storage layout or copied otherwise.\n The tangent attribute can be recovered with :func:`unpack_dual`.\n\n This function is backward differentiable.\n\n Given a function `f` whose jacobian is `J`, it allows one to compute the Jacobian-vector product (`jvp`)\n between `J` and a given vector `v` as follows.\n\n Example::\n\n >>> with dual_level():\n ... inp = make_dual(x, v)\n ... out = f(inp)\n ... y, jvp = unpack_dual(out)\n\n Please see the `forward-mode AD tutorial <https://pytorch.org/tutorials/intermediate/forward_ad_usage.html>`__\n for detailed steps on how to use this API.\n\n \"\"\"\n if level is None:\n level = _current_level\n\n if level < 0:\n raise RuntimeError(\"Trying to create a dual Tensor for forward AD but no level \"\n \"exists, make sure to enter_dual_level() first.\")\n\n return torch._VF._make_dual(tensor, tangent, level=level)\n\nUnpackedDualTensor = namedtuple('UnpackedDualTensor', ['primal', 'tangent'])\n\ndef unpack_dual(tensor, *, level=None):\n r\"\"\"Unpacks a \"dual tensor\" to get both its Tensor value and its forward AD gradient.\n The result is a namedtuple ``(primal, tangent)`` where ``primal`` is a view of\n :attr:`tensor`'s primal and ``tangent`` is :attr:`tensor`'s tangent as-is.\n Neither of these tensors can be dual tensor of level :attr:`level`.\n\n This function is backward differentiable.\n\n Example::\n\n >>> with dual_level():\n ... inp = make_dual(x, x_t)\n ... out = f(inp)\n ... y, jvp = unpack_dual(out)\n ... jvp = unpack_dual(out).tangent\n\n Please see the `forward-mode AD tutorial <https://pytorch.org/tutorials/intermediate/forward_ad_usage.html>`__\n for detailed steps on how to use this API.\n \"\"\"\n if level is None:\n level = _current_level\n\n if level < 0:\n return UnpackedDualTensor(tensor, None)\n\n primal, dual = torch._VF._unpack_dual(tensor, level=level)\n\n return UnpackedDualTensor(primal, dual)\n\nclass dual_level(_DecoratorContextManager):\n r\"\"\"Context-manager that enables forward AD. All forward AD computation must\n be performed in a ``dual_level`` context.\n\n .. Note::\n\n The ``dual_level`` context appropriately enters and exit the dual level to\n controls the current forward AD level, which is used by default by the other\n functions in this API.\n\n We currently don't plan to support nested ``dual_level`` contexts, however, so\n only a single forward AD level is supported. To compute higher-order\n forward grads, one can use `functorch's jvp <https://github.com/pytorch/functorch#jvp>`__.\n\n Example::\n\n >>> x = torch.tensor([1])\n >>> x_t = torch.tensor([1])\n >>> with dual_level():\n ... inp = make_dual(x, x_t)\n ... # Do computations with inp\n ... out = your_fn(inp)\n ... _, grad = unpack_dual(out)\n >>> grad is None\n False\n >>> # After exiting the level, the grad is deleted\n >>> _, grad_after = unpack_dual(out)\n >>> grad is None\n True\n\n Please see the `forward-mode AD tutorial <https://pytorch.org/tutorials/intermediate/forward_ad_usage.html>`__\n for detailed steps on how to use this API.\n \"\"\"\n def __init__(self):\n super().__init__()\n\n def __enter__(self):\n return enter_dual_level()\n\n def __exit__(self, exc_type: Any, exc_value: Any, traceback: Any) -> None:\n exit_dual_level()\n" ]
[ [ "torch.zeros", "torch.max_pool1d_with_indices", "torch._C._add_docstr", "torch.adaptive_max_pool1d", "torch._C._nn.adaptive_avg_pool3d", "torch._C._nn.relu6_", "torch.bmm", "torch.empty_like", "torch._VF.feature_alpha_dropout_", "torch.cosine_embedding_loss", "torch.min", "torch._C._nn._upsample_nearest_exact1d", "torch._C._nn.hardsigmoid_", "torch._C._get_tracing_state", "torch.instance_norm", "torch.margin_ranking_loss", "torch._C._nn.adaptive_max_pool3d", "torch._C._nn._upsample_nearest_exact2d", "torch._VF.feature_alpha_dropout", "torch.any", "torch.broadcast_tensors", "torch.abs", "torch._C._nn.hardswish", "torch._C._nn.hardswish_", "torch.sign", "torch.relu_", "torch._C._nn.relu6", "torch._C._nn.mish", "torch.binary_cross_entropy_with_logits", "torch._VF.feature_dropout_", "torch.embedding_bag", "torch.tensor", "torch.batch_norm", "torch.rrelu", "torch._C._nn.fractional_max_pool2d", "torch.zeros_like", "torch.max_pool3d", "torch.embedding_renorm_", "torch._C._nn.silu_", "torch._C._nn._upsample_nearest_exact3d", "torch._C._nn.upsample_bilinear2d", "torch._C._nn._upsample_bicubic2d_aa", "torch._C._nn.silu", "torch._C._nn.upsample_nearest1d", "torch.selu_", "torch.max_pool2d", "torch.max_pool1d", "torch.jit.is_scripting", "torch._C._nn.replication_pad1d", "torch._C._nn.max_unpool3d", "torch._C._nn.glu", "torch._VF.dropout_", "torch._C._nn.upsample_linear1d", "torch._C._nn.replication_pad3d", "torch._C._nn._upsample_bilinear2d_aa", "torch._C._nn.max_pool3d_with_indices", "torch._C._nn.multilabel_margin_loss", "torch.relu", "torch.affine_grid_generator", "torch.group_norm", "torch._C._nn.binary_cross_entropy", "torch._C._nn.upsample_nearest3d", "torch.celu", "torch._C._nn.elu", "torch.unsqueeze", "torch.log", "torch._C._nn.leaky_relu_", "torch._C._nn.upsample_bicubic2d", "torch._VF.alpha_dropout", "torch._C._nn.hardtanh_", "torch._C._nn.adaptive_max_pool2d", "torch.embedding", "torch.triplet_margin_loss", "torch._C._nn.reflection_pad2d", "torch.grid_sampler", "torch.celu_", "torch.layer_norm", "torch._VF.threshold", "torch.no_grad", "torch._VF.dropout", "torch._C._nn.leaky_relu", "torch._C._nn.soft_margin_loss", "torch._C._nn.replication_pad2d", "torch.rrelu_", "torch._C._nn.fractional_max_pool3d", "torch._C._nn.max_unpool2d", "torch._C._nn.multi_margin_loss", "torch.div", "torch.selu", "torch._VF.constant_pad_nd", "torch.jit.annotate", "torch._C._nn.adaptive_avg_pool2d", "torch._VF.feature_dropout", "torch._C._nn.mish_", "torch._C._nn.upsample_nearest2d", "torch.kl_div", "torch._VF.threshold_", "torch._C._nn.elu_", "torch._C._nn.reflection_pad1d", "torch._VF.alpha_dropout_", "torch._C._nn.reflection_pad3d", "torch._C._nn.hardsigmoid", "torch._C._nn.max_pool2d_with_indices", "torch.hinge_embedding_loss", "torch._C._nn.hardtanh", "torch.clamp", "torch._C._nn.upsample_trilinear3d" ], [ "torch.typename" ], [ "torch._VF._unpack_dual", "torch._C._exit_dual_level", "torch._C._enter_dual_level", "torch._VF._make_dual" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
mediocretech/patchwork
[ "ad21c81611f74569e93f563d765cba2259b1d4b3", "ad21c81611f74569e93f563d765cba2259b1d4b3", "ad21c81611f74569e93f563d765cba2259b1d4b3" ]
[ "patchwork/tests/test_sample.py", "patchwork/_fine_tuning_models.py", "patchwork/tests/test_augment.py" ]
[ "# -*- coding: utf-8 -*-\nimport numpy as np\nimport pandas as pd\n\nfrom patchwork._sample import find_unlabeled, find_fully_labeled\nfrom patchwork._sample import find_partially_labeled\nfrom patchwork._sample import stratified_sample, unlabeled_sample\n\n\ntestdf = pd.DataFrame({\n \"filepath\":[\"a.jpg\", \"b.jpg\", \"c.jpg\", \"d.jpg\", \"e.jpg\"],\n \"exclude\":[True, False, False, False, False],\n \"validation\":[False, False, False, False, False],\n \"class1\":[None, 1, 0, 1, 1],\n \"class2\":[None, 0, 1, None, None]\n })\n\n\ndef test_find_unlabeled():\n unlab = find_unlabeled(testdf)\n assert unlab.sum() == 1\n assert \"a.jpg\" in testdf[\"filepath\"][unlab].values\n\ndef test_find_fully_labeled():\n flab = find_fully_labeled(testdf)\n assert flab.sum() == 2\n assert \"b.jpg\" in testdf[\"filepath\"][flab].values\n\n\ndef test_partially_unlabeled():\n plab = find_partially_labeled(testdf)\n assert plab.sum() == 2\n assert \"d.jpg\" in testdf[\"filepath\"][plab].values\n\n\n\n\ndef test_stratified_sampler():\n N = 100\n outlist, ys = stratified_sample(testdf, N=N, return_indices=False)\n \n assert len(outlist) == N\n assert ys.shape[0] == N\n assert ys.shape[1] == 2\n #assert isinstance(outlist[0], str)\n #assert False, \"this should definitely be tested\"", "# -*- coding: utf-8 -*-\nimport param\nimport tensorflow as tf\n\nfrom patchwork._layers import _next_layer\n\n\nclass GlobalPooling(param.Parameterized):\n \"\"\"\n Just a single pooling layer.\n \"\"\"\n pooling_type = param.ObjectSelector(default=\"max pool\", objects=[\"max pool\", \"average pool\", \"flatten\"])\n \n description = \"\"\"\n A single pooling or flattening layer to map outputs of a feature extractor to a dense vector. No trainable parameters.\n \"\"\"\n \n def build(self, feature_shape):\n #inpt = tf.keras.layers.Input((None, None, inpt_channels))\n inpt = tf.keras.layers.Input(feature_shape)\n if self.pooling_type == \"max pool\":\n pool = tf.keras.layers.GlobalMaxPool2D()\n elif self.pooling_type == \"average pool\":\n pool = tf.keras.layers.GlobalAvgPool2D()\n else:\n pool = tf.keras.layers.Flatten()\n # store a reference to the model in case we need it later\n self._model = tf.keras.Model(inpt, pool(inpt))\n return self._model\n \n def model_params(self):\n return {\"fine_tuning_type\":\"GlobalPooling\",\n \"pooling_type\":self.pooling_type,\n \"num_params\":self._model.count_params(),\n \"num_layers\":len(self._model.layers)}\n \n \nclass ConvNet(param.Parameterized):\n \"\"\"\n Convolutional network\n \"\"\"\n layers = param.String(default=\"128,p,d,128\", doc=\"Comma-separated list of filters\")\n kernel_size = param.ObjectSelector(default=1, objects=[1,3,5], doc=\"Spatial size of filters\")\n batchnorm = param.Boolean(False, doc=\"Whether to use batch normalization in convolutional layers\")\n separable_convolutions = param.Boolean(False, doc=\"Whether to use depthwise separable convolutions\")\n dropout_rate = param.Number(0.5, bounds=(0.05,0.95), doc=\"Spatial dropout rate.\")\n pooling_type = param.ObjectSelector(default=\"max pool\", objects=[\"max pool\", \"average pool\", \"flatten\"], \n doc=\"Whether to use global mean or max pooling.\")\n \n \n _description = \"\"\"\n Convolutional network with global pooling at the end. Set dropout to 0 to disable.\n \"\"\"\n description = \"\"\"\n Convolutional network with global pooling at the end. \\n\\n \n Use a comma-separated list to define layers: integer for a convolution, `p` for 2x2 max pooling, `d` for 2D spatial dropout, and `r` for a residual block.\n \"\"\"\n \n def build(self, feature_shape):\n inpt = tf.keras.layers.Input(feature_shape)\n net = inpt\n for l in self.layers.split(\",\"):\n l = l.strip()\n net = _next_layer(net, l, kernel_size=self.kernel_size,\n dropout_rate=self.dropout_rate,\n separable=self.separable_convolutions,\n batchnorm=self.batchnorm)\n \n if self.pooling_type == \"max pool\":\n net = tf.keras.layers.GlobalMaxPool2D()(net)\n elif self.pooling_type == \"average pool\":\n net = tf.keras.layers.GlobalAvgPool2D()(net)\n else:\n net = tf.keras.layers.Flatten()(net)\n # store reference to model in case we need it later\n self._model = tf.keras.Model(inpt, net)\n return self._model\n \n def model_params(self):\n return {\"fine_tuning_type\":\"ConvNet\",\n \"pooling_type\":self.pooling_type,\n \"num_params\":self._model.count_params(),\n \"num_layers\":len(self._model.layers),\n \"kernel_size\":self.kernel_size,\n \"separable\":self.separable_convolutions,\n \"structure\":self.layers}\n ", "# -*- coding: utf-8 -*-\nimport numpy as np\nimport tensorflow as tf\n\nfrom patchwork._augment import augment_function, _poisson, _random_zoom, _choose\nfrom patchwork._augment import _center_crop, _jitter, _random_jpeg_degrade, _gaussian_blur\nfrom patchwork._augment import SINGLE_AUG_FUNC\n\n\ntest_shape = (64,64,3)\ntest_img = np.zeros(test_shape, dtype=np.float32)\ntest_img_tensor = tf.constant(test_img, dtype=tf.float32)\n\n\ndef test_default_augment():\n augfunc = augment_function(test_shape[:2])\n augmented = augfunc(test_img_tensor)\n \n assert isinstance(augmented, tf.Tensor)\n assert augmented.get_shape() == test_img_tensor.get_shape()\n \n \ndef test_poisson():\n c = _poisson(100)\n \n assert c > 0\n assert c.dtype == tf.int32\n \n \ndef test_random_zoom():\n img = np.random.uniform(0,1, (32,32,3)).astype(np.float32)\n \n zoomed = _random_zoom(img, 0.1, (32,32))\n assert img.shape == zoomed.shape\n assert (zoomed.numpy() == img).all() == False\n \n \ndef test_gaussian_blur():\n img = np.random.uniform(0,1, (32,32,3)).astype(np.float32)\n \n blurred = _gaussian_blur(img, 1., (32,32))\n assert img.shape == blurred.shape\n assert (blurred.numpy() == img).all() == False\n \n \ndef test_center_crop():\n img = np.random.uniform(0,1, (32,32,3)).astype(np.float32)\n \n zoomed = _center_crop(img, 0.1, (32,32))\n assert img.shape == zoomed.shape\n assert (zoomed.numpy() == img).all() == False\n \ndef test_choose():\n choice = _choose(0.5)\n assert choice.dtype == tf.bool\n \n \ndef test_jitter():\n img = np.random.uniform(0,1, (32,32,3)).astype(np.float32)\n \n jittered = _jitter(img, 1.)\n assert img.shape == jittered.shape\n assert (jittered.numpy() == img).all() == False\n \n\ndef test_random_jpeg_degrade():\n img = np.random.uniform(0,1, (32,32,3)).astype(np.float32)\n \n degraded = _random_jpeg_degrade(img, 1.)\n assert img.shape == degraded.shape\n assert (degraded.numpy() == img).all() == False" ]
[ [ "pandas.DataFrame" ], [ "tensorflow.keras.layers.GlobalMaxPool2D", "tensorflow.keras.layers.GlobalAvgPool2D", "tensorflow.keras.Model", "tensorflow.keras.layers.Flatten", "tensorflow.keras.layers.Input" ], [ "tensorflow.constant", "numpy.random.uniform", "numpy.zeros" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [ "0.23", "0.21", "2.0", "1.4", "0.19", "1.1", "1.5", "1.2", "0.24", "0.20", "1.0", "0.25", "1.3" ], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [ "2.7", "2.2", "2.3", "2.4", "2.5", "2.6" ] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [ "1.10", "2.7", "1.12", "2.6", "2.2", "1.4", "2.3", "2.4", "2.9", "1.5", "1.7", "2.5", "0.12", "1.0", "2.8", "1.2", "2.10" ] } ]
jain-harshil/Adapter-BERT
[ "fd74ed0eea21b13034f9a834244191846de6b8d5" ]
[ "trainer.py" ]
[ "import argparse\nimport glob\nimport json\nimport logging\nimport os\nimport random\nimport helper\nimport numpy as np\nimport torch\nfrom torch.utils.data import DataLoader, RandomSampler, SequentialSampler, TensorDataset\nfrom torch.utils.data.distributed import DistributedSampler\nfrom tqdm import tqdm, trange\n\nfrom transformers import (\n WEIGHTS_NAME,\n AdamW,\n get_linear_schedule_with_warmup,\n)\n\ntry:\n from torch.utils.tensorboard import SummaryWriter\nexcept ImportError:\n from tensorboardX import SummaryWriter\n\nlogger = logging.getLogger(__name__)\n\ndef set_seed(seed):\n random.seed(seed)\n np.random.seed(seed)\n torch.manual_seed(seed)\n torch.cuda.manual_seed_all(seed)\n\ndef prepare_optimizer_and_scheduler(model, t_total, params):\n c = params[\"mcqa_config\"]\n no_decay = [\"bias\", \"LayerNorm.weight\"]\n optimizer_grouped_parameters = [\n {\n \"params\": [p for n, p in model.named_parameters() if not any(nd in n for nd in no_decay)],\n \"weight_decay\": c.weight_decay,\n },\n {\"params\": [p for n, p in model.named_parameters() if any(nd in n for nd in no_decay)], \"weight_decay\": 0.0},\n ]\n\n optimizer = AdamW(optimizer_grouped_parameters, lr=c.learning_rate, eps=c.adam_epsilon)\n scheduler = get_linear_schedule_with_warmup(optimizer, num_warmup_steps=c.warmup_steps, num_training_steps=t_total)\n\n # Check if saved optimizer or scheduler states exist\n if os.path.isfile(os.path.join(c.model_name_or_path, \"optimizer.pt\")) and os.path.isfile(os.path.join(c.model_name_or_path, \"scheduler.pt\")):\n # Load in optimizer and scheduler states\n optimizer.load_state_dict(torch.load(os.path.join(c.model_name_or_path, \"optimizer.pt\")))\n scheduler.load_state_dict(torch.load(os.path.join(c.model_name_or_path, \"scheduler.pt\")))\n\n return optimizer, scheduler\n\ndef save_model(model, optimizer, scheduler, step, dev_scores, params):\n # Save model checkpoint\n c = params[\"mcqa_config\"]\n\n output_dir = os.path.join(c.output_dir, \"best\")\n if not os.path.exists(output_dir):\n os.makedirs(output_dir)\n\n model_to_save = (model.module if hasattr(model, \"module\") else model) \n model_to_save.save_pretrained(output_dir)\n\n #torch.save(c, os.path.join(output_dir, \"training_c.bin\"))\n logger.info(\"Saving model checkpoint to %s\", output_dir)\n\n torch.save(optimizer.state_dict(), os.path.join(output_dir, \"optimizer.pt\"))\n torch.save(scheduler.state_dict(), os.path.join(output_dir, \"scheduler.pt\"))\n\n global_step_path = os.path.join(c.output_dir, \"best\", \"global_step.txt\")\n helper.write_list(global_step_path, [str(step)])\n\n if dev_scores:\n eval_output_dir = os.path.join(c.output_dir, \"eval\")\n if not os.path.exists(eval_output_dir):\n os.makedirs(eval_output_dir)\n\n output_eval_file = os.path.join(eval_output_dir, \"eval_result.txt\")\n with open(output_eval_file, \"w\") as writer:\n logger.info(\"***** Eval results {} *****\".format(\"XCOPA EN DEV\"))\n for key in sorted(dev_scores.keys()):\n logger.info(\" %s = %s\", key, str(dev_scores[key]))\n writer.write(\"%s = %s\\n\" % (key, str(dev_scores[key])))\n\n logger.info(\"Saving optimizer and scheduler states to %s\", output_dir) \n\ndef train(train_dataset, eval_dataset, model, params):\n \"\"\" Train the model \"\"\"\n c = params[\"mcqa_config\"]\n \n # enabling TensorBoard\n tb_writer = SummaryWriter()\n\n train_sampler = RandomSampler(train_dataset)\n train_dataloader = DataLoader(train_dataset, sampler=train_sampler, batch_size=c.train_batch_size)\n\n if c.max_steps > 0:\n t_total = c.max_steps\n c.num_train_epochs = c.max_steps // len(train_dataloader) + 1\n else:\n t_total = len(train_dataloader) * c.num_train_epochs\n\n # Prepare optimizer and schedule (linear warmup and decay)\n optimizer, scheduler = prepare_optimizer_and_scheduler(model, t_total, params)\n \n # Train!\n logger.info(\"***** Running training *****\")\n logger.info(\" Num examples = %d\", len(train_dataset))\n logger.info(\" Num Epochs = %d\", c.num_train_epochs)\n logger.info(\" Train batch size = %d\", c.train_batch_size)\n logger.info(\" Total optimization steps = %d\", t_total)\n\n global_step = 0\n epochs_trained = 0\n steps_trained_in_current_epoch = 0\n\n # Check if continuing training from a checkpoint\n if os.path.exists(c.model_name_or_path):\n # set global_step to global_step of last saved checkpoint from model path\n try:\n global_step = int(helper.load_lines(os.path.join(c.model_name_or_path, \"global_step.txt\"))[0].strip())\n except ValueError:\n global_step = 0\n epochs_trained = global_step // len(train_dataloader)\n steps_trained_in_current_epoch = global_step % len(train_dataloader)\n\n logger.info(\" Continuing training from checkpoint, will skip to saved global_step\")\n logger.info(\" Continuing training from epoch %d\", epochs_trained)\n logger.info(\" Continuing training from global step %d\", global_step)\n logger.info(\" Will skip the first %d steps in the first epoch\", steps_trained_in_current_epoch)\n\n tr_loss, logging_loss = 0.0, 0.0\n model.zero_grad()\n\n train_iterator = trange(epochs_trained, int(c.num_train_epochs), desc=\"Epoch\", disable=False)\n\n set_seed(c.seed) # Added here for reproductibility, every new training starts from the same seed, i.e., same parameter initialization\n\n eval_steps_no_improvement = 0\n stop_training = False\n best_eval_res = -1000000 if c.eval_metric_increasing else 1000000\n\n for _ in train_iterator:\n epoch_iterator = tqdm(train_dataloader, desc=\"Iteration\", disable=False)\n for step, batch in enumerate(epoch_iterator):\n # Skip past any already trained steps if resuming training\n if steps_trained_in_current_epoch > 0:\n steps_trained_in_current_epoch -= 1\n continue\n\n model.train() \n batch = tuple(t.to(c.device) for t in batch)\n\n if params[\"task_type\"] == \"mcqa\":\n outputs = model(batch, params[\"model_params\"])\n else:\n outputs = model(input_ids=batch[0], attention_mask=batch[1], token_type_ids=batch[2], labels=batch[3])\n\n loss = outputs[0] # model outputs are always tuple in transformers (see doc)\n \n # parameter updates\n loss.backward()\n tr_loss += loss.item()\n\n torch.nn.utils.clip_grad_norm_(model.parameters(), c.max_grad_norm)\n\n optimizer.step()\n scheduler.step() # Update learning rate schedule\n\n model.zero_grad() # zeroing gradients afer update\n\n global_step += 1\n\n if c.logging_steps > 0 and global_step % c.logging_steps == 0:\n print(\"Global step: \" + str(global_step))\n logs = {}\n results = evaluate(eval_dataset, model, params)\n for key, value in results.items():\n eval_key = \"eval_{}\".format(key)\n logs[eval_key] = value\n\n loss_scalar = (tr_loss - logging_loss) / c.logging_steps\n learning_rate_scalar = scheduler.get_lr()[0]\n logs[\"learning_rate\"] = learning_rate_scalar\n logs[\"loss\"] = loss_scalar\n logging_loss = tr_loss\n\n for key, value in logs.items():\n tb_writer.add_scalar(key, value, global_step)\n print(json.dumps({**logs, **{\"step\": global_step}}))\n\n eval_res = results[c.eval_stop_metric]\n if (c.eval_metric_increasing and eval_res < best_eval_res) or (not c.eval_metric_increasing and eval_res > best_eval_res):\n eval_steps_no_improvement += 1\n else:\n eval_steps_no_improvement = 0\n \n if eval_steps_no_improvement == c.num_evals_early_stop:\n print(\"Early stopping training. \")\n stop_training = True\n break\n \n if eval_steps_no_improvement == 0:\n best_eval_res = eval_res\n print(\"New best eval \" + c.eval_stop_metric + \": \" + str(best_eval_res))\n print(\"Saving best model...\")\n save_model(model, optimizer, scheduler, global_step, results, params)\n print(\"New best model saved!\")\n else:\n print(\"No improvement for \" + str(eval_steps_no_improvement) + \" steps!\")\n print(\"Current Eval \" + c.eval_stop_metric + \": \" + str(eval_res))\n print(\"Best Eval \" + c.eval_stop_metric + \" so far: \" + str(best_eval_res))\n \n if c.max_steps > 0 and global_step > c.max_steps:\n epoch_iterator.close()\n break\n \n if (c.max_steps > 0 and global_step > c.max_steps) or stop_training:\n train_iterator.close()\n break\n tb_writer.close()\n\n return global_step, tr_loss / global_step, best_eval_res\n\ndef compute_performance(preds, golds):\n if len(preds) != len(golds):\n raise ValueError(\"Predictions and gold labels not of same length!\")\n\n results = {}\n acc = len([i for i in range(len(preds)) if preds[i] == golds[i]]) / len(preds)\n results[\"Accuracy\"] = acc\n \n return results\n\ndef evaluate(eval_dataset, model, params):\n results = {}\n c = params[\"mcqa_config\"]\n c.eval_batch_size = c.train_batch_size\n\n # Note that DistributedSampler samples randomly\n eval_sampler = SequentialSampler(eval_dataset)\n eval_dataloader = DataLoader(eval_dataset, sampler=eval_sampler, batch_size=c.eval_batch_size)\n \n # Eval!\n logger.info(\"***** Running evaluation {} *****\".format(\"XCOPA VAL\"))\n logger.info(\" Num examples = %d\", len(eval_dataset))\n logger.info(\" Batch size = %d\", c.eval_batch_size)\n eval_loss = 0.0\n nb_eval_steps = 0\n preds = None\n golds = None\n \n for batch in tqdm(eval_dataloader, desc=\"Evaluating\"):\n model.eval()\n batch = tuple(t.to(c.device) for t in batch)\n\n with torch.no_grad():\n if params[\"task_type\"] == \"mcqa\":\n outputs = model(batch, params[\"model_params\"])\n else:\n outputs = model(input_ids=batch[0], attention_mask=batch[1], token_type_ids=batch[2], labels=batch[3])\n\n loss = outputs[0]\n logits = outputs[1]\n eval_loss += loss.mean().item()\n nb_eval_steps += 1\n\n if preds is None:\n preds = logits.detach().cpu().numpy()\n golds = batch[-1].detach().cpu().numpy()\n else:\n preds = np.append(preds, logits.detach().cpu().numpy(), axis=0)\n golds = np.append(golds, batch[-1].detach().cpu().numpy())\n\n eval_loss = eval_loss / nb_eval_steps\n results[\"Loss\"] = eval_loss\n preds = np.argmax(preds, axis=1)\n\n result = compute_performance(preds, golds)\n results.update(result)\n\n return results\n" ]
[ [ "numpy.random.seed", "torch.manual_seed", "torch.utils.data.SequentialSampler", "torch.utils.data.DataLoader", "torch.utils.data.RandomSampler", "numpy.argmax", "torch.no_grad", "torch.cuda.manual_seed_all" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
yzhou2890/demo
[ "7cbc43dc17680a199bc9376690f95cbf27dcafd6", "7cbc43dc17680a199bc9376690f95cbf27dcafd6" ]
[ "demo_CamShift.py", "demo_otsu.py" ]
[ "\n\n\n#-------------------\n# demo of mean-shift and camshift\n# http://opencv-python-tutroals.readthedocs.org/en/latest/py_tutorials/py_video/py_meanshift/py_meanshift.html\n#-------------------\n\n\nimport numpy as np\nimport cv2\n\n\nvcap = cv2.VideoCapture(0)\nnw0 = vcap.get(cv2.CAP_PROP_FRAME_WIDTH)\nnh0 = vcap.get(cv2.CAP_PROP_FRAME_HEIGHT)\n\nnheight = 320*4\nnwidth = int(nw0 * nheight / nh0 )\n\nvcap.set(cv2.CAP_PROP_FRAME_WIDTH, nwidth) #set width\nvcap.set(cv2.CAP_PROP_FRAME_HEIGHT, nheight) #set height\n\nvcap.set(cv2.CAP_PROP_FPS, 30) # frame rate\n\n\n# take first frame of the video\nret,frame = vcap.read()\nframe = cv2.flip(frame,1)\n\n\n# setup initial location of window\nr,h,c,w = nheight/4,nheight/3,nwidth/4,nwidth/3 # simply hardcoded the values\ntrack_window = (c,r,w,h)\n\n\n# set up the ROI for tracking\nroi = frame[r:r+h, c:c+w]\nhsv_roi = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV)\nmask = cv2.inRange(hsv_roi, np.array((0., 60.,32.)), np.array((180.,255.,255.)))\nroi_hist = cv2.calcHist([hsv_roi],[0],mask,[180],[0,180])\ncv2.normalize(roi_hist,roi_hist,0,255,cv2.NORM_MINMAX)\n\n# Setup the termination criteria, either 10 iteration or move by atleast 1 pt\nterm_crit = ( cv2.TERM_CRITERIA_EPS | cv2.TERM_CRITERIA_COUNT, 10, 1 )\n\nwhile(1):\n ret ,frame = vcap.read()\n frame = cv2.flip(frame,1)\n \n if ret == True:\n hsv = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV)\n dst = cv2.calcBackProject([hsv],[0],roi_hist,[0,180],1)\n \n # apply meanshift to get the new location\n #ret, track_window = cv2.meanShift(dst, track_window, term_crit)\n ret, track_window = cv2.CamShift(dst, track_window, term_crit)\n \n # Draw it on image\n x,y,w,h = track_window\n img2 = cv2.rectangle(frame, (x,y), (x+w,y+h), 255,2)\n cv2.imshow('img2',img2)\n \n k = cv2.waitKey(60) & 0xff\n if k == 27:\n break\n else:\n cv2.imwrite(chr(k)+\".jpg\",img2)\n\n else:\n break\n\ncv2.destroyAllWindows()\nvcap.release()\n\n\n\n", "\n\n#----------\n# demo of ostu threshold to webcam frames\n#----------\n\n\nimport numpy as np\nimport cv2\n\n\ndef UseWebCam():\n\n vcap = cv2.VideoCapture(0)\n\n nw0 = np.float(vcap.get(cv2.CAP_PROP_FRAME_WIDTH))\n nh0 = np.float(vcap.get(cv2.CAP_PROP_FRAME_HEIGHT))\n \n print('sensed size:')\n print( np.uint16(nw0), np.uint16(nh0) )\n \n nheight = np.uint16(400)\n nwidth = np.uint16( (nw0/(0.01+nh0) ) * nheight )\n \n print('displayed size:')\n print( nwidth, nheight )\n\n vcap.set(cv2.CAP_PROP_FRAME_WIDTH, nwidth) #set width\n vcap.set(cv2.CAP_PROP_FRAME_HEIGHT, nheight) #set height\n vcap.set(cv2.CAP_PROP_FPS, 30) # frame rate\n\n# vcap.set(cv2.CAP_PROP_EXPOSURE, 0.001) #set exposure time\n\n\n while(True):\n # Capture frame-by-frame\n ret, frame = vcap.read()\n # flip left-right\n frame = cv2.flip(frame, 1)\n \n \n '''HSV space'''\n hsv = cv2.cvtColor(frame,cv2.COLOR_BGR2HSV)\n '''RGB space'''\n rgb = cv2.cvtColor(frame,cv2.COLOR_BGR2RGB)\n '''gray'''\n gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)\n \n \n ret,th = cv2.threshold(gray,0,255,cv2.THRESH_OTSU)\n \n '''X'''\n X = np.vstack((gray, th))\n\n cv2.putText(X,'raw image',(3,30),cv2.FONT_HERSHEY_SIMPLEX,0.5,(255,0,0),1,cv2.LINE_AA)\n cv2.putText(X,'Otsu threshold',(3,nheight+30),cv2.FONT_HERSHEY_SIMPLEX,0.5,(128,0,0),1,cv2.LINE_AA)\n \n '''show'''\n cv2.imshow('Otsu threshold - To quit, press \\'q\\'', X)\n \n if cv2.waitKey(1) & 0xFF == ord('q'):\n break\n\n\n # When everything done, release the capture\n vcap.release()\n cv2.destroyAllWindows()\n\n\n#----------\n# M A I N\n#----------\n\nUseWebCam()\n" ]
[ [ "numpy.array" ], [ "numpy.uint16", "numpy.vstack" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
ADI10HERO/statsmodels
[ "d932507dc71c93b5d162a678042fb0701ee4bf57", "d932507dc71c93b5d162a678042fb0701ee4bf57", "d932507dc71c93b5d162a678042fb0701ee4bf57" ]
[ "statsmodels/tsa/statespace/simulation_smoother.py", "statsmodels/tsa/statespace/dynamic_factor.py", "statsmodels/multivariate/factor.py" ]
[ "\"\"\"\nState Space Representation, Kalman Filter, Smoother, and Simulation Smoother\n\nAuthor: Chad Fulton\nLicense: Simplified-BSD\n\"\"\"\n\nimport numpy as np\nfrom .kalman_smoother import KalmanSmoother\nfrom . import tools\n\nSIMULATION_STATE = 0x01\nSIMULATION_DISTURBANCE = 0x04\nSIMULATION_ALL = (\n SIMULATION_STATE | SIMULATION_DISTURBANCE\n)\n\n\nclass SimulationSmoother(KalmanSmoother):\n r\"\"\"\n State space representation of a time series process, with Kalman filter\n and smoother, and with simulation smoother.\n\n Parameters\n ----------\n k_endog : {array_like, int}\n The observed time-series process :math:`y` if array like or the\n number of variables in the process if an integer.\n k_states : int\n The dimension of the unobserved state process.\n k_posdef : int, optional\n The dimension of a guaranteed positive definite covariance matrix\n describing the shocks in the measurement equation. Must be less than\n or equal to `k_states`. Default is `k_states`.\n simulation_smooth_results_class : class, optional\n Default results class to use to save output of simulation smoothing.\n Default is `SimulationSmoothResults`. If specified, class must extend\n from `SimulationSmoothResults`.\n simulation_smoother_classes : dict, optional\n Dictionary with BLAS prefixes as keys and classes as values.\n **kwargs\n Keyword arguments may be used to provide default values for state space\n matrices, for Kalman filtering options, for Kalman smoothing\n options, or for Simulation smoothing options.\n See `Representation`, `KalmanFilter`, and `KalmanSmoother` for more\n details.\n \"\"\"\n\n simulation_outputs = [\n 'simulate_state', 'simulate_disturbance', 'simulate_all'\n ]\n\n def __init__(self, k_endog, k_states, k_posdef=None,\n simulation_smooth_results_class=None,\n simulation_smoother_classes=None, **kwargs):\n super(SimulationSmoother, self).__init__(\n k_endog, k_states, k_posdef, **kwargs\n )\n\n if simulation_smooth_results_class is None:\n simulation_smooth_results_class = SimulationSmoothResults\n self.simulation_smooth_results_class = simulation_smooth_results_class\n\n self.prefix_simulation_smoother_map = (\n simulation_smoother_classes\n if simulation_smoother_classes is not None\n else tools.prefix_simulation_smoother_map.copy())\n\n # Holder for an model-level simulation smoother objects, to use in\n # simulating new time series.\n self._simulators = {}\n\n def get_simulation_output(self, simulation_output=None,\n simulate_state=None, simulate_disturbance=None,\n simulate_all=None, **kwargs):\n r\"\"\"\n Get simulation output bitmask\n\n Helper method to get final simulation output bitmask from a set of\n optional arguments including the bitmask itself and possibly boolean\n flags.\n\n Parameters\n ----------\n simulation_output : int, optional\n Simulation output bitmask. If this is specified, it is simply\n returned and the other arguments are ignored.\n simulate_state : bool, optional\n Whether or not to include the state in the simulation output.\n simulate_disturbance : bool, optional\n Whether or not to include the state and observation disturbances\n in the simulation output.\n simulate_all : bool, optional\n Whether or not to include all simulation output.\n \\*\\*kwargs\n Additional keyword arguments. Present so that calls to this method\n can use \\*\\*kwargs without clearing out additional arguments.\n \"\"\"\n # If we do not explicitly have simulation_output, try to get it from\n # kwargs\n if simulation_output is None:\n simulation_output = 0\n\n if simulate_state:\n simulation_output |= SIMULATION_STATE\n if simulate_disturbance:\n simulation_output |= SIMULATION_DISTURBANCE\n if simulate_all:\n simulation_output |= SIMULATION_ALL\n\n # Handle case of no information in kwargs\n if simulation_output == 0:\n\n # If some arguments were passed, but we still do not have any\n # simulation output, raise an exception\n argument_set = not all([\n simulate_state is None, simulate_disturbance is None,\n simulate_all is None\n ])\n if argument_set:\n raise ValueError(\"Invalid simulation output options:\"\n \" given options would result in no\"\n \" output.\")\n\n # Otherwise set simulation output to be the same as smoother\n # output\n simulation_output = self.smoother_output\n\n return simulation_output\n\n def _simulate(self, nsimulations, measurement_shocks, state_shocks,\n initial_state):\n # Initialize the filter and representation\n prefix, dtype, create_smoother, create_filter, create_statespace = (\n self._initialize_smoother())\n\n # Initialize the state\n self._initialize_state(prefix=prefix)\n\n # Create the simulator if necessary\n if (prefix not in self._simulators or\n not nsimulations == self._simulators[prefix].nobs):\n\n simulation_output = 0\n # Kalman smoother parameters\n smoother_output = -1\n # Kalman filter parameters\n filter_method = self.filter_method\n inversion_method = self.inversion_method\n stability_method = self.stability_method\n conserve_memory = self.conserve_memory\n filter_timing = self.filter_timing\n loglikelihood_burn = self.loglikelihood_burn\n tolerance = self.tolerance\n\n # Create a new simulation smoother object\n cls = self.prefix_simulation_smoother_map[prefix]\n self._simulators[prefix] = cls(\n self._statespaces[prefix],\n filter_method, inversion_method, stability_method,\n conserve_memory, filter_timing, tolerance, loglikelihood_burn,\n smoother_output, simulation_output, nsimulations\n )\n simulator = self._simulators[prefix]\n\n # Set the disturbance variates\n if measurement_shocks is not None and state_shocks is not None:\n disturbance_variates = np.atleast_1d(np.array(\n np.r_[measurement_shocks.ravel(), state_shocks.ravel()],\n dtype=self.dtype\n ).squeeze())\n simulator.set_disturbance_variates(disturbance_variates,\n pretransformed=True)\n elif measurement_shocks is None and state_shocks is None:\n pass\n elif measurement_shocks is not None:\n raise ValueError('Must set `state_shocks` if `measurement_shocks`'\n ' is set.')\n elif state_shocks is not None:\n raise ValueError('Must set `measurement_shocks` if `state_shocks`'\n ' is set.')\n\n # Set the intial state vector\n initial_state = np.atleast_1d(np.array(\n initial_state, dtype=self.dtype\n ).squeeze())\n simulator.set_initial_state(initial_state)\n\n # Perform simulation smoothing\n # Note: simulation_output=-1 corresponds to whatever was setup when\n # the simulation smoother was constructed\n simulator.simulate(-1)\n\n simulated_obs = np.array(simulator.generated_obs, copy=True)\n simulated_state = np.array(simulator.generated_state, copy=True)\n\n return (\n simulated_obs[:, :nsimulations].T,\n simulated_state[:, :nsimulations].T\n )\n\n def simulation_smoother(self, simulation_output=None,\n results_class=None, prefix=None, **kwargs):\n r\"\"\"\n Retrieve a simulation smoother for the statespace model.\n\n Parameters\n ----------\n simulation_output : int, optional\n Determines which simulation smoother output is calculated.\n Default is all (including state and disturbances).\n simulation_smooth_results_class : class, optional\n Default results class to use to save output of simulation\n smoothing. Default is `SimulationSmoothResults`. If specified,\n class must extend from `SimulationSmoothResults`.\n prefix : str\n The prefix of the datatype. Usually only used internally.\n **kwargs\n Additional keyword arguments, used to set the simulation output.\n See `set_simulation_output` for more details.\n\n Returns\n -------\n SimulationSmoothResults\n \"\"\"\n\n # Set the class to be the default results class, if None provided\n if results_class is None:\n results_class = self.simulation_smooth_results_class\n\n # Instantiate a new results object\n if not issubclass(results_class, SimulationSmoothResults):\n raise ValueError('Invalid results class provided.')\n\n # Make sure we have the required Statespace representation\n prefix, dtype, create_smoother, create_filter, create_statespace = (\n self._initialize_smoother())\n\n # Simulation smoother parameters\n simulation_output = self.get_simulation_output(simulation_output,\n **kwargs)\n\n # Kalman smoother parameters\n smoother_output = kwargs.get('smoother_output', simulation_output)\n\n # Kalman filter parameters\n filter_method = kwargs.get('filter_method', self.filter_method)\n inversion_method = kwargs.get('inversion_method',\n self.inversion_method)\n stability_method = kwargs.get('stability_method',\n self.stability_method)\n conserve_memory = kwargs.get('conserve_memory',\n self.conserve_memory)\n filter_timing = kwargs.get('filter_timing',\n self.filter_timing)\n loglikelihood_burn = kwargs.get('loglikelihood_burn',\n self.loglikelihood_burn)\n tolerance = kwargs.get('tolerance', self.tolerance)\n\n # Create a new simulation smoother object\n cls = self.prefix_simulation_smoother_map[prefix]\n simulation_smoother = cls(\n self._statespaces[prefix],\n filter_method, inversion_method, stability_method, conserve_memory,\n filter_timing, tolerance, loglikelihood_burn, smoother_output,\n simulation_output\n )\n\n # Create results object\n results = results_class(self, simulation_smoother)\n\n return results\n\n\nclass SimulationSmoothResults(object):\n r\"\"\"\n Results from applying the Kalman smoother and/or filter to a state space\n model.\n\n Parameters\n ----------\n model : Representation\n A Statespace representation\n simulation_smoother : {{prefix}}SimulationSmoother object\n The Cython simulation smoother object with which to simulation smooth.\n\n Attributes\n ----------\n model : Representation\n A Statespace representation\n dtype : dtype\n Datatype of representation matrices\n prefix : str\n BLAS prefix of representation matrices\n simulation_output : int\n Bitmask controlling simulation output.\n simulate_state : bool\n Flag for if the state is included in simulation output.\n simulate_disturbance : bool\n Flag for if the state and observation disturbances are included in\n simulation output.\n simulate_all : bool\n Flag for if simulation output should include everything.\n generated_measurement_disturbance : array\n Measurement disturbance variates used to genereate the observation\n vector.\n generated_state_disturbance : array\n State disturbance variates used to genereate the state and\n observation vectors.\n generated_obs : array\n Generated observation vector produced as a byproduct of simulation\n smoothing.\n generated_state : array\n Generated state vector produced as a byproduct of simulation smoothing.\n simulated_state : array\n Simulated state.\n simulated_measurement_disturbance : array\n Simulated measurement disturbance.\n simulated_state_disturbance : array\n Simulated state disturbance.\n \"\"\"\n\n def __init__(self, model, simulation_smoother):\n self.model = model\n self.prefix = model.prefix\n self.dtype = model.dtype\n self._simulation_smoother = simulation_smoother\n\n # Output\n self._generated_measurement_disturbance = None\n self._generated_state_disturbance = None\n self._generated_obs = None\n self._generated_state = None\n self._simulated_state = None\n self._simulated_measurement_disturbance = None\n self._simulated_state_disturbance = None\n\n @property\n def simulation_output(self):\n return self._simulation_smoother.simulation_output\n\n @simulation_output.setter\n def simulation_output(self, value):\n self._simulation_smoother.simulation_output = value\n\n @property\n def simulate_state(self):\n return bool(self.simulation_output & SIMULATION_STATE)\n\n @simulate_state.setter\n def simulate_state(self, value):\n if bool(value):\n self.simulation_output = self.simulation_output | SIMULATION_STATE\n else:\n self.simulation_output = self.simulation_output & ~SIMULATION_STATE\n\n @property\n def simulate_disturbance(self):\n return bool(self.simulation_output & SIMULATION_DISTURBANCE)\n\n @simulate_disturbance.setter\n def simulate_disturbance(self, value):\n if bool(value):\n self.simulation_output = (\n self.simulation_output | SIMULATION_DISTURBANCE)\n else:\n self.simulation_output = (\n self.simulation_output & ~SIMULATION_DISTURBANCE)\n\n @property\n def simulate_all(self):\n return bool(self.simulation_output & SIMULATION_ALL)\n\n @simulate_all.setter\n def simulate_all(self, value):\n if bool(value):\n self.simulation_output = self.simulation_output | SIMULATION_ALL\n else:\n self.simulation_output = self.simulation_output & ~SIMULATION_ALL\n\n @property\n def generated_measurement_disturbance(self):\n r\"\"\"\n Randomly drawn measurement disturbance variates\n\n Used to construct `generated_obs`.\n\n Notes\n -----\n\n .. math::\n\n \\varepsilon_t^+ ~ N(0, H_t)\n\n If `disturbance_variates` were provided to the `simulate()` method,\n then this returns those variates (which were N(0,1)) transformed to the\n distribution above.\n \"\"\"\n if self._generated_measurement_disturbance is None:\n end = self.model.nobs * self.model.k_endog\n self._generated_measurement_disturbance = np.array(\n self._simulation_smoother.disturbance_variates[:end],\n copy=True).reshape(self.model.nobs, self.model.k_endog)\n return self._generated_measurement_disturbance\n\n @property\n def generated_state_disturbance(self):\n r\"\"\"\n Randomly drawn state disturbance variates, used to construct\n `generated_state` and `generated_obs`.\n\n Notes\n -----\n\n .. math::\n\n \\eta_t^+ ~ N(0, Q_t)\n\n If `disturbance_variates` were provided to the `simulate()` method,\n then this returns those variates (which were N(0,1)) transformed to the\n distribution above.\n \"\"\"\n if self._generated_state_disturbance is None:\n start = self.model.nobs * self.model.k_endog\n self._generated_state_disturbance = np.array(\n self._simulation_smoother.disturbance_variates[start:],\n copy=True).reshape(self.model.nobs, self.model.k_posdef)\n return self._generated_state_disturbance\n\n @property\n def generated_obs(self):\n r\"\"\"\n Generated vector of observations by iterating on the observation and\n transition equations, given a random initial state draw and random\n disturbance draws.\n\n Notes\n -----\n\n .. math::\n\n y_t^+ = d_t + Z_t \\alpha_t^+ + \\varepsilon_t^+\n \"\"\"\n if self._generated_obs is None:\n self._generated_obs = np.array(\n self._simulation_smoother.generated_obs, copy=True\n )\n return self._generated_obs\n\n @property\n def generated_state(self):\n r\"\"\"\n Generated vector of states by iterating on the transition equation,\n given a random initial state draw and random disturbance draws.\n\n Notes\n -----\n\n .. math::\n\n \\alpha_{t+1}^+ = c_t + T_t \\alpha_t^+ + \\eta_t^+\n \"\"\"\n if self._generated_state is None:\n self._generated_state = np.array(\n self._simulation_smoother.generated_state, copy=True\n )\n return self._generated_state\n\n @property\n def simulated_state(self):\n r\"\"\"\n Random draw of the state vector from its conditional distribution.\n\n Notes\n -----\n\n .. math::\n\n \\alpha ~ p(\\alpha \\mid Y_n)\n \"\"\"\n if self._simulated_state is None:\n self._simulated_state = np.array(\n self._simulation_smoother.simulated_state, copy=True\n )\n return self._simulated_state\n\n @property\n def simulated_measurement_disturbance(self):\n r\"\"\"\n Random draw of the measurement disturbance vector from its conditional\n distribution.\n\n Notes\n -----\n\n .. math::\n\n \\varepsilon ~ N(\\hat \\varepsilon, Var(\\hat \\varepsilon \\mid Y_n))\n \"\"\"\n if self._simulated_measurement_disturbance is None:\n self._simulated_measurement_disturbance = np.array(\n self._simulation_smoother.simulated_measurement_disturbance,\n copy=True\n )\n return self._simulated_measurement_disturbance\n\n @property\n def simulated_state_disturbance(self):\n r\"\"\"\n Random draw of the state disturbance vector from its conditional\n distribution.\n\n Notes\n -----\n\n .. math::\n\n \\eta ~ N(\\hat \\eta, Var(\\hat \\eta \\mid Y_n))\n \"\"\"\n if self._simulated_state_disturbance is None:\n self._simulated_state_disturbance = np.array(\n self._simulation_smoother.simulated_state_disturbance,\n copy=True\n )\n return self._simulated_state_disturbance\n\n def simulate(self, simulation_output=-1, disturbance_variates=None,\n initial_state_variates=None, pretransformed_variates=False):\n r\"\"\"\n Perform simulation smoothing\n\n Does not return anything, but populates the object's `simulated_*`\n attributes, as specified by simulation output.\n\n Parameters\n ----------\n simulation_output : int, optional\n Bitmask controlling simulation output. Default is to use the\n simulation output defined in object initialization.\n disturbance_variates : array_likes, optional\n Random values to use as disturbance variates, distributed standard\n Normal. Usually only specified if results are to be replicated\n (e.g. to enforce a seed) or for testing. If not specified, random\n variates are drawn.\n initial_state_variates : array_likes, optional\n Random values to use as initial state variates. Usually only\n specified if results are to be replicated (e.g. to enforce a seed)\n or for testing. If not specified, random variates are drawn.\n \"\"\"\n # Clear any previous output\n self._generated_measurement_disturbance = None\n self._generated_state_disturbance = None\n self._generated_state = None\n self._generated_obs = None\n self._generated_state = None\n self._simulated_state = None\n self._simulated_measurement_disturbance = None\n self._simulated_state_disturbance = None\n\n # Re-initialize the _statespace representation\n prefix, dtype, create_smoother, create_filter, create_statespace = (\n self.model._initialize_smoother())\n\n # Initialize the state\n self.model._initialize_state(prefix=prefix)\n\n # Draw the (independent) random variates for disturbances in the\n # simulation\n if disturbance_variates is not None:\n self._simulation_smoother.set_disturbance_variates(\n np.array(disturbance_variates, dtype=self.dtype),\n pretransformed=pretransformed_variates\n )\n else:\n self._simulation_smoother.draw_disturbance_variates()\n\n # Draw the (independent) random variates for the initial states in the\n # simulation\n if initial_state_variates is not None:\n self._simulation_smoother.set_initial_state_variates(\n np.array(initial_state_variates, dtype=self.dtype),\n pretransformed=pretransformed_variates\n )\n else:\n self._simulation_smoother.draw_initial_state_variates()\n\n # Perform simulation smoothing\n # Note: simulation_output=-1 corresponds to whatever was setup when\n # the simulation smoother was constructed\n self._simulation_smoother.simulate(simulation_output)\n", "# -*- coding: utf-8 -*-\n\"\"\"\nDynamic factor model\n\nAuthor: Chad Fulton\nLicense: Simplified-BSD\n\"\"\"\n\nfrom collections import OrderedDict\n\nimport numpy as np\nfrom .mlemodel import MLEModel, MLEResults, MLEResultsWrapper\nfrom .tools import (\n is_invertible, prepare_exog,\n constrain_stationary_univariate, unconstrain_stationary_univariate,\n constrain_stationary_multivariate, unconstrain_stationary_multivariate\n)\nfrom statsmodels.multivariate.pca import PCA\nfrom statsmodels.regression.linear_model import OLS\nfrom statsmodels.tsa.vector_ar.var_model import VAR\nfrom statsmodels.tsa.arima.model import ARIMA\nfrom statsmodels.tools.tools import Bunch\nfrom statsmodels.tools.data import _is_using_pandas\nfrom statsmodels.tsa.tsatools import lagmat\nfrom statsmodels.tools.decorators import cache_readonly\nimport statsmodels.base.wrapper as wrap\nfrom statsmodels.compat.pandas import Appender\n\n\nclass DynamicFactor(MLEModel):\n r\"\"\"\n Dynamic factor model\n\n Parameters\n ----------\n endog : array_like\n The observed time-series process :math:`y`\n exog : array_like, optional\n Array of exogenous regressors for the observation equation, shaped\n nobs x k_exog.\n k_factors : int\n The number of unobserved factors.\n factor_order : int\n The order of the vector autoregression followed by the factors.\n error_cov_type : {'scalar', 'diagonal', 'unstructured'}, optional\n The structure of the covariance matrix of the observation error term,\n where \"unstructured\" puts no restrictions on the matrix, \"diagonal\"\n requires it to be any diagonal matrix (uncorrelated errors), and\n \"scalar\" requires it to be a scalar times the identity matrix. Default\n is \"diagonal\".\n error_order : int, optional\n The order of the vector autoregression followed by the observation\n error component. Default is None, corresponding to white noise errors.\n error_var : bool, optional\n Whether or not to model the errors jointly via a vector autoregression,\n rather than as individual autoregressions. Has no effect unless\n `error_order` is set. Default is False.\n enforce_stationarity : bool, optional\n Whether or not to transform the AR parameters to enforce stationarity\n in the autoregressive component of the model. Default is True.\n **kwargs\n Keyword arguments may be used to provide default values for state space\n matrices or for Kalman filtering options. See `Representation`, and\n `KalmanFilter` for more details.\n\n Attributes\n ----------\n exog : array_like, optional\n Array of exogenous regressors for the observation equation, shaped\n nobs x k_exog.\n k_factors : int\n The number of unobserved factors.\n factor_order : int\n The order of the vector autoregression followed by the factors.\n error_cov_type : {'diagonal', 'unstructured'}\n The structure of the covariance matrix of the error term, where\n \"unstructured\" puts no restrictions on the matrix and \"diagonal\"\n requires it to be a diagonal matrix (uncorrelated errors).\n error_order : int\n The order of the vector autoregression followed by the observation\n error component.\n error_var : bool\n Whether or not to model the errors jointly via a vector autoregression,\n rather than as individual autoregressions. Has no effect unless\n `error_order` is set.\n enforce_stationarity : bool, optional\n Whether or not to transform the AR parameters to enforce stationarity\n in the autoregressive component of the model. Default is True.\n\n Notes\n -----\n The dynamic factor model considered here is in the so-called static form,\n and is specified:\n\n .. math::\n\n y_t & = \\Lambda f_t + B x_t + u_t \\\\\n f_t & = A_1 f_{t-1} + \\dots + A_p f_{t-p} + \\eta_t \\\\\n u_t & = C_1 u_{t-1} + \\dots + C_1 f_{t-q} + \\varepsilon_t\n\n where there are `k_endog` observed series and `k_factors` unobserved\n factors. Thus :math:`y_t` is a `k_endog` x 1 vector and :math:`f_t` is a\n `k_factors` x 1 vector.\n\n :math:`x_t` are optional exogenous vectors, shaped `k_exog` x 1.\n\n :math:`\\eta_t` and :math:`\\varepsilon_t` are white noise error terms. In\n order to identify the factors, :math:`Var(\\eta_t) = I`. Denote\n :math:`Var(\\varepsilon_t) \\equiv \\Sigma`.\n\n Options related to the unobserved factors:\n\n - `k_factors`: this is the dimension of the vector :math:`f_t`, above.\n To exclude factors completely, set `k_factors = 0`.\n - `factor_order`: this is the number of lags to include in the factor\n evolution equation, and corresponds to :math:`p`, above. To have static\n factors, set `factor_order = 0`.\n\n Options related to the observation error term :math:`u_t`:\n\n - `error_order`: the number of lags to include in the error evolution\n equation; corresponds to :math:`q`, above. To have white noise errors,\n set `error_order = 0` (this is the default).\n - `error_cov_type`: this controls the form of the covariance matrix\n :math:`\\Sigma`. If it is \"dscalar\", then :math:`\\Sigma = \\sigma^2 I`. If\n it is \"diagonal\", then\n :math:`\\Sigma = \\text{diag}(\\sigma_1^2, \\dots, \\sigma_n^2)`. If it is\n \"unstructured\", then :math:`\\Sigma` is any valid variance / covariance\n matrix (i.e. symmetric and positive definite).\n - `error_var`: this controls whether or not the errors evolve jointly\n according to a VAR(q), or individually according to separate AR(q)\n processes. In terms of the formulation above, if `error_var = False`,\n then the matrices :math:C_i` are diagonal, otherwise they are general\n VAR matrices.\n\n References\n ----------\n .. [*] Lütkepohl, Helmut. 2007.\n New Introduction to Multiple Time Series Analysis.\n Berlin: Springer.\n \"\"\"\n\n def __init__(self, endog, k_factors, factor_order, exog=None,\n error_order=0, error_var=False, error_cov_type='diagonal',\n enforce_stationarity=True, **kwargs):\n\n # Model properties\n self.enforce_stationarity = enforce_stationarity\n\n # Factor-related properties\n self.k_factors = k_factors\n self.factor_order = factor_order\n\n # Error-related properties\n self.error_order = error_order\n self.error_var = error_var and error_order > 0\n self.error_cov_type = error_cov_type\n\n # Exogenous data\n (self.k_exog, exog) = prepare_exog(exog)\n\n # Note: at some point in the future might add state regression, as in\n # SARIMAX.\n self.mle_regression = self.k_exog > 0\n\n # We need to have an array or pandas at this point\n if not _is_using_pandas(endog, None):\n endog = np.asanyarray(endog, order='C')\n\n # Save some useful model orders, internally used\n k_endog = endog.shape[1] if endog.ndim > 1 else 1\n self._factor_order = max(1, self.factor_order) * self.k_factors\n self._error_order = self.error_order * k_endog\n\n # Calculate the number of states\n k_states = self._factor_order\n k_posdef = self.k_factors\n if self.error_order > 0:\n k_states += self._error_order\n k_posdef += k_endog\n\n # We can still estimate the model with no dynamic state (e.g. SUR), we\n # just need to have one state that does nothing.\n self._unused_state = False\n if k_states == 0:\n k_states = 1\n k_posdef = 1\n self._unused_state = True\n\n # Test for non-multivariate endog\n if k_endog < 2:\n raise ValueError('The dynamic factors model is only valid for'\n ' multivariate time series.')\n\n # Test for too many factors\n if self.k_factors >= k_endog:\n raise ValueError('Number of factors must be less than the number'\n ' of endogenous variables.')\n\n # Test for invalid error_cov_type\n if self.error_cov_type not in ['scalar', 'diagonal', 'unstructured']:\n raise ValueError('Invalid error covariance matrix type'\n ' specification.')\n\n # By default, initialize as stationary\n kwargs.setdefault('initialization', 'stationary')\n\n # Initialize the state space model\n super(DynamicFactor, self).__init__(\n endog, exog=exog, k_states=k_states, k_posdef=k_posdef, **kwargs\n )\n\n # Set as time-varying model if we have exog\n if self.k_exog > 0:\n self.ssm._time_invariant = False\n\n # Initialize the components\n self.parameters = OrderedDict()\n self._initialize_loadings()\n self._initialize_exog()\n self._initialize_error_cov()\n self._initialize_factor_transition()\n self._initialize_error_transition()\n self.k_params = sum(self.parameters.values())\n\n # Cache parameter vector slices\n def _slice(key, offset):\n length = self.parameters[key]\n param_slice = np.s_[offset:offset + length]\n offset += length\n return param_slice, offset\n\n offset = 0\n self._params_loadings, offset = _slice('factor_loadings', offset)\n self._params_exog, offset = _slice('exog', offset)\n self._params_error_cov, offset = _slice('error_cov', offset)\n self._params_factor_transition, offset = (\n _slice('factor_transition', offset))\n self._params_error_transition, offset = (\n _slice('error_transition', offset))\n\n # Update _init_keys attached by super\n self._init_keys += ['k_factors', 'factor_order', 'error_order',\n 'error_var', 'error_cov_type',\n 'enforce_stationarity'] + list(kwargs.keys())\n\n def _initialize_loadings(self):\n # Initialize the parameters\n self.parameters['factor_loadings'] = self.k_endog * self.k_factors\n\n # Setup fixed components of state space matrices\n if self.error_order > 0:\n start = self._factor_order\n end = self._factor_order + self.k_endog\n self.ssm['design', :, start:end] = np.eye(self.k_endog)\n\n # Setup indices of state space matrices\n self._idx_loadings = np.s_['design', :, :self.k_factors]\n\n def _initialize_exog(self):\n # Initialize the parameters\n self.parameters['exog'] = self.k_exog * self.k_endog\n\n # If we have exog effects, then the obs intercept needs to be\n # time-varying\n if self.k_exog > 0:\n self.ssm['obs_intercept'] = np.zeros((self.k_endog, self.nobs))\n\n # Setup indices of state space matrices\n self._idx_exog = np.s_['obs_intercept', :self.k_endog, :]\n\n def _initialize_error_cov(self):\n if self.error_cov_type == 'scalar':\n self._initialize_error_cov_diagonal(scalar=True)\n elif self.error_cov_type == 'diagonal':\n self._initialize_error_cov_diagonal(scalar=False)\n elif self.error_cov_type == 'unstructured':\n self._initialize_error_cov_unstructured()\n\n def _initialize_error_cov_diagonal(self, scalar=False):\n # Initialize the parameters\n self.parameters['error_cov'] = 1 if scalar else self.k_endog\n\n # Setup fixed components of state space matrices\n\n # Setup indices of state space matrices\n k_endog = self.k_endog\n k_factors = self.k_factors\n idx = np.diag_indices(k_endog)\n if self.error_order > 0:\n matrix = 'state_cov'\n idx = (idx[0] + k_factors, idx[1] + k_factors)\n else:\n matrix = 'obs_cov'\n self._idx_error_cov = (matrix,) + idx\n\n def _initialize_error_cov_unstructured(self):\n # Initialize the parameters\n k_endog = self.k_endog\n self.parameters['error_cov'] = int(k_endog * (k_endog + 1) / 2)\n\n # Setup fixed components of state space matrices\n\n # Setup indices of state space matrices\n self._idx_lower_error_cov = np.tril_indices(self.k_endog)\n if self.error_order > 0:\n start = self.k_factors\n end = self.k_factors + self.k_endog\n self._idx_error_cov = (\n np.s_['state_cov', start:end, start:end])\n else:\n self._idx_error_cov = np.s_['obs_cov', :, :]\n\n def _initialize_factor_transition(self):\n order = self.factor_order * self.k_factors\n k_factors = self.k_factors\n\n # Initialize the parameters\n self.parameters['factor_transition'] = (\n self.factor_order * self.k_factors**2)\n\n # Setup fixed components of state space matrices\n # VAR(p) for factor transition\n if self.k_factors > 0:\n if self.factor_order > 0:\n self.ssm['transition', k_factors:order, :order - k_factors] = (\n np.eye(order - k_factors))\n\n self.ssm['selection', :k_factors, :k_factors] = np.eye(k_factors)\n # Identification requires constraining the state covariance to an\n # identity matrix\n self.ssm['state_cov', :k_factors, :k_factors] = np.eye(k_factors)\n\n # Setup indices of state space matrices\n self._idx_factor_transition = np.s_['transition', :k_factors, :order]\n\n def _initialize_error_transition(self):\n # Initialize the appropriate situation\n if self.error_order == 0:\n self._initialize_error_transition_white_noise()\n else:\n # Generic setup fixed components of state space matrices\n # VAR(q) for error transition\n # (in the individual AR case, we still have the VAR(q) companion\n # matrix structure, but force the coefficient matrices to be\n # diagonal)\n k_endog = self.k_endog\n k_factors = self.k_factors\n _factor_order = self._factor_order\n _error_order = self._error_order\n _slice = np.s_['selection',\n _factor_order:_factor_order + k_endog,\n k_factors:k_factors + k_endog]\n self.ssm[_slice] = np.eye(k_endog)\n _slice = np.s_[\n 'transition',\n _factor_order + k_endog:_factor_order + _error_order,\n _factor_order:_factor_order + _error_order - k_endog]\n self.ssm[_slice] = np.eye(_error_order - k_endog)\n\n # Now specialized setups\n if self.error_var:\n self._initialize_error_transition_var()\n else:\n self._initialize_error_transition_individual()\n\n def _initialize_error_transition_white_noise(self):\n # Initialize the parameters\n self.parameters['error_transition'] = 0\n\n # No fixed components of state space matrices\n\n # Setup indices of state space matrices (just an empty slice)\n self._idx_error_transition = np.s_['transition', 0:0, 0:0]\n\n def _initialize_error_transition_var(self):\n k_endog = self.k_endog\n _factor_order = self._factor_order\n _error_order = self._error_order\n\n # Initialize the parameters\n self.parameters['error_transition'] = _error_order * k_endog\n\n # Fixed components already setup above\n\n # Setup indices of state space matrices\n # Here we want to set all of the elements of the coefficient matrices,\n # the same as in a VAR specification\n self._idx_error_transition = np.s_[\n 'transition',\n _factor_order:_factor_order + k_endog,\n _factor_order:_factor_order + _error_order]\n\n def _initialize_error_transition_individual(self):\n k_endog = self.k_endog\n _error_order = self._error_order\n\n # Initialize the parameters\n self.parameters['error_transition'] = _error_order\n\n # Fixed components already setup above\n\n # Setup indices of state space matrices\n # Here we want to set only the diagonal elements of the coefficient\n # matrices, and we want to set them in order by equation, not by\n # matrix (i.e. set the first element of the first matrix's diagonal,\n # then set the first element of the second matrix's diagonal, then...)\n\n # The basic setup is a tiled list of diagonal indices, one for each\n # coefficient matrix\n idx = np.tile(np.diag_indices(k_endog), self.error_order)\n # Now we need to shift the rows down to the correct location\n row_shift = self._factor_order\n # And we need to shift the columns in an increasing way\n col_inc = self._factor_order + np.repeat(\n [i * k_endog for i in range(self.error_order)], k_endog)\n idx[0] += row_shift\n idx[1] += col_inc\n\n # Make a copy (without the row shift) so that we can easily get the\n # diagonal parameters back out of a generic coefficients matrix array\n idx_diag = idx.copy()\n idx_diag[0] -= row_shift\n idx_diag[1] -= self._factor_order\n idx_diag = idx_diag[:, np.lexsort((idx_diag[1], idx_diag[0]))]\n self._idx_error_diag = (idx_diag[0], idx_diag[1])\n\n # Finally, we want to fill the entries in in the correct order, which\n # is to say we want to fill in lexicographically, first by row then by\n # column\n idx = idx[:, np.lexsort((idx[1], idx[0]))]\n self._idx_error_transition = np.s_['transition', idx[0], idx[1]]\n\n def clone(self, endog, exog=None, **kwargs):\n return self._clone_from_init_kwds(endog, exog=exog, **kwargs)\n\n @property\n def _res_classes(self):\n return {'fit': (DynamicFactorResults, DynamicFactorResultsWrapper)}\n\n @property\n def start_params(self):\n params = np.zeros(self.k_params, dtype=np.float64)\n\n endog = self.endog.copy()\n mask = ~np.any(np.isnan(endog), axis=1)\n endog = endog[mask]\n\n # 1. Factor loadings (estimated via PCA)\n if self.k_factors > 0:\n # Use principal components + OLS as starting values\n res_pca = PCA(endog, ncomp=self.k_factors)\n mod_ols = OLS(endog, res_pca.factors)\n res_ols = mod_ols.fit()\n\n # Using OLS params for the loadings tends to gives higher starting\n # log-likelihood.\n params[self._params_loadings] = res_ols.params.T.ravel()\n # params[self._params_loadings] = res_pca.loadings.ravel()\n\n # However, using res_ols.resid tends to causes non-invertible\n # starting VAR coefficients for error VARs\n # endog = res_ols.resid\n endog = endog - np.dot(res_pca.factors, res_pca.loadings.T)\n\n # 2. Exog (OLS on residuals)\n if self.k_exog > 0:\n mod_ols = OLS(endog, exog=self.exog)\n res_ols = mod_ols.fit()\n # In the form: beta.x1.y1, beta.x2.y1, beta.x1.y2, ...\n params[self._params_exog] = res_ols.params.T.ravel()\n endog = res_ols.resid\n\n # 3. Factors (VAR on res_pca.factors)\n stationary = True\n if self.k_factors > 1 and self.factor_order > 0:\n # 3a. VAR transition (OLS on factors estimated via PCA)\n mod_factors = VAR(res_pca.factors)\n res_factors = mod_factors.fit(maxlags=self.factor_order, ic=None,\n trend='nc')\n # Save the parameters\n params[self._params_factor_transition] = (\n res_factors.params.T.ravel())\n\n # Test for stationarity\n coefficient_matrices = (\n params[self._params_factor_transition].reshape(\n self.k_factors * self.factor_order, self.k_factors\n ).T\n ).reshape(self.k_factors, self.k_factors, self.factor_order).T\n\n stationary = is_invertible([1] + list(-coefficient_matrices))\n elif self.k_factors > 0 and self.factor_order > 0:\n # 3b. AR transition\n Y = res_pca.factors[self.factor_order:]\n X = lagmat(res_pca.factors, self.factor_order, trim='both')\n params_ar = np.linalg.pinv(X).dot(Y)\n stationary = is_invertible(np.r_[1, -params_ar.squeeze()])\n params[self._params_factor_transition] = params_ar[:, 0]\n\n # Check for stationarity\n if not stationary and self.enforce_stationarity:\n raise ValueError('Non-stationary starting autoregressive'\n ' parameters found with `enforce_stationarity`'\n ' set to True.')\n\n # 4. Errors\n if self.error_order == 0:\n if self.error_cov_type == 'scalar':\n params[self._params_error_cov] = endog.var(axis=0).mean()\n elif self.error_cov_type == 'diagonal':\n params[self._params_error_cov] = endog.var(axis=0)\n elif self.error_cov_type == 'unstructured':\n cov_factor = np.diag(endog.std(axis=0))\n params[self._params_error_cov] = (\n cov_factor[self._idx_lower_error_cov].ravel())\n elif self.error_var:\n mod_errors = VAR(endog)\n res_errors = mod_errors.fit(maxlags=self.error_order, ic=None,\n trend='nc')\n\n # Test for stationarity\n coefficient_matrices = (\n np.array(res_errors.params.T).ravel().reshape(\n self.k_endog * self.error_order, self.k_endog\n ).T\n ).reshape(self.k_endog, self.k_endog, self.error_order).T\n\n stationary = is_invertible([1] + list(-coefficient_matrices))\n if not stationary and self.enforce_stationarity:\n raise ValueError('Non-stationary starting error autoregressive'\n ' parameters found with'\n ' `enforce_stationarity` set to True.')\n\n # Get the error autoregressive parameters\n params[self._params_error_transition] = (\n np.array(res_errors.params.T).ravel())\n\n # Get the error covariance parameters\n if self.error_cov_type == 'scalar':\n params[self._params_error_cov] = (\n res_errors.sigma_u.diagonal().mean())\n elif self.error_cov_type == 'diagonal':\n params[self._params_error_cov] = res_errors.sigma_u.diagonal()\n elif self.error_cov_type == 'unstructured':\n try:\n cov_factor = np.linalg.cholesky(res_errors.sigma_u)\n except np.linalg.LinAlgError:\n cov_factor = np.eye(res_errors.sigma_u.shape[0]) * (\n res_errors.sigma_u.diagonal().mean()**0.5)\n cov_factor = np.eye(res_errors.sigma_u.shape[0]) * (\n res_errors.sigma_u.diagonal().mean()**0.5)\n params[self._params_error_cov] = (\n cov_factor[self._idx_lower_error_cov].ravel())\n else:\n error_ar_params = []\n error_cov_params = []\n for i in range(self.k_endog):\n mod_error = ARIMA(endog[:, i], order=(self.error_order, 0, 0),\n trend='n', enforce_stationarity=True)\n res_error = mod_error.fit(method='burg')\n error_ar_params += res_error.params[:self.error_order].tolist()\n error_cov_params += res_error.params[-1:].tolist()\n\n params[self._params_error_transition] = np.r_[error_ar_params]\n params[self._params_error_cov] = np.r_[error_cov_params]\n\n return params\n\n @property\n def param_names(self):\n param_names = []\n endog_names = self.endog_names\n\n # 1. Factor loadings\n param_names += [\n 'loading.f%d.%s' % (j+1, endog_names[i])\n for i in range(self.k_endog)\n for j in range(self.k_factors)\n ]\n\n # 2. Exog\n # Recall these are in the form: beta.x1.y1, beta.x2.y1, beta.x1.y2, ...\n param_names += [\n 'beta.%s.%s' % (self.exog_names[j], endog_names[i])\n for i in range(self.k_endog)\n for j in range(self.k_exog)\n ]\n\n # 3. Error covariances\n if self.error_cov_type == 'scalar':\n param_names += ['sigma2']\n elif self.error_cov_type == 'diagonal':\n param_names += [\n 'sigma2.%s' % endog_names[i]\n for i in range(self.k_endog)\n ]\n elif self.error_cov_type == 'unstructured':\n param_names += [\n 'cov.chol[%d,%d]' % (i + 1, j + 1)\n for i in range(self.k_endog)\n for j in range(i+1)\n ]\n\n # 4. Factor transition VAR\n param_names += [\n 'L%d.f%d.f%d' % (i+1, k+1, j+1)\n for j in range(self.k_factors)\n for i in range(self.factor_order)\n for k in range(self.k_factors)\n ]\n\n # 5. Error transition VAR\n if self.error_var:\n param_names += [\n 'L%d.e(%s).e(%s)' % (i+1, endog_names[k], endog_names[j])\n for j in range(self.k_endog)\n for i in range(self.error_order)\n for k in range(self.k_endog)\n ]\n else:\n param_names += [\n 'L%d.e(%s).e(%s)' % (i+1, endog_names[j], endog_names[j])\n for j in range(self.k_endog)\n for i in range(self.error_order)\n ]\n\n return param_names\n\n @property\n def state_names(self):\n names = []\n endog_names = self.endog_names\n\n # Factors and lags\n names += [\n (('f%d' % (j + 1)) if i == 0 else ('f%d.L%d' % (j + 1, i)))\n for i in range(max(1, self.factor_order))\n for j in range(self.k_factors)]\n\n if self.error_order > 0:\n names += [\n (('e(%s)' % endog_names[j]) if i == 0\n else ('e(%s).L%d' % (endog_names[j], i)))\n for i in range(self.error_order)\n for j in range(self.k_endog)]\n\n if self._unused_state:\n names += ['dummy']\n\n return names\n\n def transform_params(self, unconstrained):\n \"\"\"\n Transform unconstrained parameters used by the optimizer to constrained\n parameters used in likelihood evaluation\n\n Parameters\n ----------\n unconstrained : array_like\n Array of unconstrained parameters used by the optimizer, to be\n transformed.\n\n Returns\n -------\n constrained : array_like\n Array of constrained parameters which may be used in likelihood\n evaluation.\n\n Notes\n -----\n Constrains the factor transition to be stationary and variances to be\n positive.\n \"\"\"\n unconstrained = np.array(unconstrained, ndmin=1)\n dtype = unconstrained.dtype\n constrained = np.zeros(unconstrained.shape, dtype=dtype)\n\n # 1. Factor loadings\n # The factor loadings do not need to be adjusted\n constrained[self._params_loadings] = (\n unconstrained[self._params_loadings])\n\n # 2. Exog\n # The regression coefficients do not need to be adjusted\n constrained[self._params_exog] = (\n unconstrained[self._params_exog])\n\n # 3. Error covariances\n # If we have variances, force them to be positive\n if self.error_cov_type in ['scalar', 'diagonal']:\n constrained[self._params_error_cov] = (\n unconstrained[self._params_error_cov]**2)\n # Otherwise, nothing needs to be done\n elif self.error_cov_type == 'unstructured':\n constrained[self._params_error_cov] = (\n unconstrained[self._params_error_cov])\n\n # 4. Factor transition VAR\n # VAR transition: optionally force to be stationary\n if self.enforce_stationarity and self.factor_order > 0:\n # Transform the parameters\n unconstrained_matrices = (\n unconstrained[self._params_factor_transition].reshape(\n self.k_factors, self._factor_order))\n # This is always an identity matrix, but because the transform\n # done prior to update (where the ssm representation matrices\n # change), it may be complex\n cov = self.ssm['state_cov', :self.k_factors, :self.k_factors].real\n coefficient_matrices, variance = (\n constrain_stationary_multivariate(unconstrained_matrices, cov))\n constrained[self._params_factor_transition] = (\n coefficient_matrices.ravel())\n else:\n constrained[self._params_factor_transition] = (\n unconstrained[self._params_factor_transition])\n\n # 5. Error transition VAR\n # VAR transition: optionally force to be stationary\n if self.enforce_stationarity and self.error_order > 0:\n\n # Joint VAR specification\n if self.error_var:\n unconstrained_matrices = (\n unconstrained[self._params_error_transition].reshape(\n self.k_endog, self._error_order))\n start = self.k_factors\n end = self.k_factors + self.k_endog\n cov = self.ssm['state_cov', start:end, start:end].real\n coefficient_matrices, variance = (\n constrain_stationary_multivariate(\n unconstrained_matrices, cov))\n constrained[self._params_error_transition] = (\n coefficient_matrices.ravel())\n # Separate AR specifications\n else:\n coefficients = (\n unconstrained[self._params_error_transition].copy())\n for i in range(self.k_endog):\n start = i * self.error_order\n end = (i + 1) * self.error_order\n coefficients[start:end] = constrain_stationary_univariate(\n coefficients[start:end])\n constrained[self._params_error_transition] = coefficients\n\n else:\n constrained[self._params_error_transition] = (\n unconstrained[self._params_error_transition])\n\n return constrained\n\n def untransform_params(self, constrained):\n \"\"\"\n Transform constrained parameters used in likelihood evaluation\n to unconstrained parameters used by the optimizer.\n\n Parameters\n ----------\n constrained : array_like\n Array of constrained parameters used in likelihood evaluation, to\n be transformed.\n\n Returns\n -------\n unconstrained : array_like\n Array of unconstrained parameters used by the optimizer.\n \"\"\"\n constrained = np.array(constrained, ndmin=1)\n dtype = constrained.dtype\n unconstrained = np.zeros(constrained.shape, dtype=dtype)\n\n # 1. Factor loadings\n # The factor loadings do not need to be adjusted\n unconstrained[self._params_loadings] = (\n constrained[self._params_loadings])\n\n # 2. Exog\n # The regression coefficients do not need to be adjusted\n unconstrained[self._params_exog] = (\n constrained[self._params_exog])\n\n # 3. Error covariances\n # If we have variances, force them to be positive\n if self.error_cov_type in ['scalar', 'diagonal']:\n unconstrained[self._params_error_cov] = (\n constrained[self._params_error_cov]**0.5)\n # Otherwise, nothing needs to be done\n elif self.error_cov_type == 'unstructured':\n unconstrained[self._params_error_cov] = (\n constrained[self._params_error_cov])\n\n # 3. Factor transition VAR\n # VAR transition: optionally force to be stationary\n if self.enforce_stationarity and self.factor_order > 0:\n # Transform the parameters\n constrained_matrices = (\n constrained[self._params_factor_transition].reshape(\n self.k_factors, self._factor_order))\n cov = self.ssm['state_cov', :self.k_factors, :self.k_factors].real\n coefficient_matrices, variance = (\n unconstrain_stationary_multivariate(\n constrained_matrices, cov))\n unconstrained[self._params_factor_transition] = (\n coefficient_matrices.ravel())\n else:\n unconstrained[self._params_factor_transition] = (\n constrained[self._params_factor_transition])\n\n # 5. Error transition VAR\n # VAR transition: optionally force to be stationary\n if self.enforce_stationarity and self.error_order > 0:\n\n # Joint VAR specification\n if self.error_var:\n constrained_matrices = (\n constrained[self._params_error_transition].reshape(\n self.k_endog, self._error_order))\n start = self.k_factors\n end = self.k_factors + self.k_endog\n cov = self.ssm['state_cov', start:end, start:end].real\n coefficient_matrices, variance = (\n unconstrain_stationary_multivariate(\n constrained_matrices, cov))\n unconstrained[self._params_error_transition] = (\n coefficient_matrices.ravel())\n # Separate AR specifications\n else:\n coefficients = (\n constrained[self._params_error_transition].copy())\n for i in range(self.k_endog):\n start = i * self.error_order\n end = (i + 1) * self.error_order\n coefficients[start:end] = (\n unconstrain_stationary_univariate(\n coefficients[start:end]))\n unconstrained[self._params_error_transition] = coefficients\n\n else:\n unconstrained[self._params_error_transition] = (\n constrained[self._params_error_transition])\n\n return unconstrained\n\n def _validate_can_fix_params(self, param_names):\n super(DynamicFactor, self)._validate_can_fix_params(param_names)\n\n ix = np.cumsum(list(self.parameters.values()))[:-1]\n (_, _, _, factor_transition_names, error_transition_names) = [\n arr.tolist() for arr in np.array_split(self.param_names, ix)]\n\n if self.enforce_stationarity and self.factor_order > 0:\n if self.k_factors > 1 or self.factor_order > 1:\n fix_all = param_names.issuperset(factor_transition_names)\n fix_any = (\n len(param_names.intersection(factor_transition_names)) > 0)\n if fix_any and not fix_all:\n raise ValueError(\n 'Cannot fix individual factor transition parameters'\n ' when `enforce_stationarity=True`. In this case,'\n ' must either fix all factor transition parameters or'\n ' none.')\n if self.enforce_stationarity and self.error_order > 0:\n if self.error_var or self.error_order > 1:\n fix_all = param_names.issuperset(error_transition_names)\n fix_any = (\n len(param_names.intersection(error_transition_names)) > 0)\n if fix_any and not fix_all:\n raise ValueError(\n 'Cannot fix individual error transition parameters'\n ' when `enforce_stationarity=True`. In this case,'\n ' must either fix all error transition parameters or'\n ' none.')\n\n def update(self, params, transformed=True, includes_fixed=False,\n complex_step=False):\n \"\"\"\n Update the parameters of the model\n\n Updates the representation matrices to fill in the new parameter\n values.\n\n Parameters\n ----------\n params : array_like\n Array of new parameters.\n transformed : bool, optional\n Whether or not `params` is already transformed. If set to False,\n `transform_params` is called. Default is True..\n\n Returns\n -------\n params : array_like\n Array of parameters.\n\n Notes\n -----\n Let `n = k_endog`, `m = k_factors`, and `p = factor_order`. Then the\n `params` vector has length\n :math:`[n \\times m] + [n] + [m^2 \\times p]`.\n It is expanded in the following way:\n\n - The first :math:`n \\times m` parameters fill out the factor loading\n matrix, starting from the [0,0] entry and then proceeding along rows.\n These parameters are not modified in `transform_params`.\n - The next :math:`n` parameters provide variances for the error_cov\n errors in the observation equation. They fill in the diagonal of the\n observation covariance matrix, and are constrained to be positive by\n `transofrm_params`.\n - The next :math:`m^2 \\times p` parameters are used to create the `p`\n coefficient matrices for the vector autoregression describing the\n factor transition. They are transformed in `transform_params` to\n enforce stationarity of the VAR(p). They are placed so as to make\n the transition matrix a companion matrix for the VAR. In particular,\n we assume that the first :math:`m^2` parameters fill the first\n coefficient matrix (starting at [0,0] and filling along rows), the\n second :math:`m^2` parameters fill the second matrix, etc.\n \"\"\"\n params = self.handle_params(params, transformed=transformed,\n includes_fixed=includes_fixed)\n\n # 1. Factor loadings\n # Update the design / factor loading matrix\n self.ssm[self._idx_loadings] = (\n params[self._params_loadings].reshape(self.k_endog, self.k_factors)\n )\n\n # 2. Exog\n if self.k_exog > 0:\n exog_params = params[self._params_exog].reshape(\n self.k_endog, self.k_exog).T\n self.ssm[self._idx_exog] = np.dot(self.exog, exog_params).T\n\n # 3. Error covariances\n if self.error_cov_type in ['scalar', 'diagonal']:\n self.ssm[self._idx_error_cov] = (\n params[self._params_error_cov])\n elif self.error_cov_type == 'unstructured':\n error_cov_lower = np.zeros((self.k_endog, self.k_endog),\n dtype=params.dtype)\n error_cov_lower[self._idx_lower_error_cov] = (\n params[self._params_error_cov])\n self.ssm[self._idx_error_cov] = (\n np.dot(error_cov_lower, error_cov_lower.T))\n\n # 4. Factor transition VAR\n self.ssm[self._idx_factor_transition] = (\n params[self._params_factor_transition].reshape(\n self.k_factors, self.factor_order * self.k_factors))\n\n # 5. Error transition VAR\n if self.error_var:\n self.ssm[self._idx_error_transition] = (\n params[self._params_error_transition].reshape(\n self.k_endog, self._error_order))\n else:\n self.ssm[self._idx_error_transition] = (\n params[self._params_error_transition])\n\n\nclass DynamicFactorResults(MLEResults):\n \"\"\"\n Class to hold results from fitting an DynamicFactor model.\n\n Parameters\n ----------\n model : DynamicFactor instance\n The fitted model instance\n\n Attributes\n ----------\n specification : dictionary\n Dictionary including all attributes from the DynamicFactor model\n instance.\n coefficient_matrices_var : array\n Array containing autoregressive lag polynomial coefficient matrices,\n ordered from lowest degree to highest.\n\n See Also\n --------\n statsmodels.tsa.statespace.kalman_filter.FilterResults\n statsmodels.tsa.statespace.mlemodel.MLEResults\n \"\"\"\n def __init__(self, model, params, filter_results, cov_type=None,\n **kwargs):\n super(DynamicFactorResults, self).__init__(model, params,\n filter_results, cov_type,\n **kwargs)\n\n self.df_resid = np.inf # attribute required for wald tests\n\n self.specification = Bunch(**{\n # Model properties\n 'k_endog': self.model.k_endog,\n 'enforce_stationarity': self.model.enforce_stationarity,\n\n # Factor-related properties\n 'k_factors': self.model.k_factors,\n 'factor_order': self.model.factor_order,\n\n # Error-related properties\n 'error_order': self.model.error_order,\n 'error_var': self.model.error_var,\n 'error_cov_type': self.model.error_cov_type,\n\n # Other properties\n 'k_exog': self.model.k_exog\n })\n\n # Polynomials / coefficient matrices\n self.coefficient_matrices_var = None\n if self.model.factor_order > 0:\n ar_params = (\n np.array(self.params[self.model._params_factor_transition]))\n k_factors = self.model.k_factors\n factor_order = self.model.factor_order\n self.coefficient_matrices_var = (\n ar_params.reshape(k_factors * factor_order, k_factors).T\n ).reshape(k_factors, k_factors, factor_order).T\n\n self.coefficient_matrices_error = None\n if self.model.error_order > 0:\n ar_params = (\n np.array(self.params[self.model._params_error_transition]))\n k_endog = self.model.k_endog\n error_order = self.model.error_order\n if self.model.error_var:\n self.coefficient_matrices_error = (\n ar_params.reshape(k_endog * error_order, k_endog).T\n ).reshape(k_endog, k_endog, error_order).T\n else:\n mat = np.zeros((k_endog, k_endog * error_order))\n mat[self.model._idx_error_diag] = ar_params\n self.coefficient_matrices_error = (\n mat.T.reshape(error_order, k_endog, k_endog))\n\n @property\n def factors(self):\n \"\"\"\n Estimates of unobserved factors\n\n Returns\n -------\n out : Bunch\n Has the following attributes shown in Notes.\n\n Notes\n -----\n The output is a bunch of the following format:\n\n - `filtered`: a time series array with the filtered estimate of\n the component\n - `filtered_cov`: a time series array with the filtered estimate of\n the variance/covariance of the component\n - `smoothed`: a time series array with the smoothed estimate of\n the component\n - `smoothed_cov`: a time series array with the smoothed estimate of\n the variance/covariance of the component\n - `offset`: an integer giving the offset in the state vector where\n this component begins\n \"\"\"\n # If present, level is always the first component of the state vector\n out = None\n spec = self.specification\n if spec.k_factors > 0:\n offset = 0\n end = spec.k_factors\n res = self.filter_results\n out = Bunch(\n filtered=res.filtered_state[offset:end],\n filtered_cov=res.filtered_state_cov[offset:end, offset:end],\n smoothed=None, smoothed_cov=None,\n offset=offset)\n if self.smoothed_state is not None:\n out.smoothed = self.smoothed_state[offset:end]\n if self.smoothed_state_cov is not None:\n out.smoothed_cov = (\n self.smoothed_state_cov[offset:end, offset:end])\n return out\n\n @cache_readonly\n def coefficients_of_determination(self):\n \"\"\"\n Coefficients of determination (:math:`R^2`) from regressions of\n individual estimated factors on endogenous variables.\n\n Returns\n -------\n coefficients_of_determination : array\n A `k_endog` x `k_factors` array, where\n `coefficients_of_determination[i, j]` represents the :math:`R^2`\n value from a regression of factor `j` and a constant on endogenous\n variable `i`.\n\n Notes\n -----\n Although it can be difficult to interpret the estimated factor loadings\n and factors, it is often helpful to use the coefficients of\n determination from univariate regressions to assess the importance of\n each factor in explaining the variation in each endogenous variable.\n\n In models with many variables and factors, this can sometimes lend\n interpretation to the factors (for example sometimes one factor will\n load primarily on real variables and another on nominal variables).\n\n See Also\n --------\n plot_coefficients_of_determination\n \"\"\"\n from statsmodels.tools import add_constant\n spec = self.specification\n coefficients = np.zeros((spec.k_endog, spec.k_factors))\n which = 'filtered' if self.smoothed_state is None else 'smoothed'\n\n for i in range(spec.k_factors):\n exog = add_constant(self.factors[which][i])\n for j in range(spec.k_endog):\n endog = self.filter_results.endog[j]\n coefficients[j, i] = OLS(endog, exog).fit().rsquared\n\n return coefficients\n\n def plot_coefficients_of_determination(self, endog_labels=None,\n fig=None, figsize=None):\n \"\"\"\n Plot the coefficients of determination\n\n Parameters\n ----------\n endog_labels : bool, optional\n Whether or not to label the endogenous variables along the x-axis\n of the plots. Default is to include labels if there are 5 or fewer\n endogenous variables.\n fig : Matplotlib Figure instance, optional\n If given, subplots are created in this figure instead of in a new\n figure. Note that the grid will be created in the provided\n figure using `fig.add_subplot()`.\n figsize : tuple, optional\n If a figure is created, this argument allows specifying a size.\n The tuple is (width, height).\n\n Notes\n -----\n\n Produces a `k_factors` x 1 plot grid. The `i`th plot shows a bar plot\n of the coefficients of determination associated with factor `i`. The\n endogenous variables are arranged along the x-axis according to their\n position in the `endog` array.\n\n See Also\n --------\n coefficients_of_determination\n \"\"\"\n from statsmodels.graphics.utils import _import_mpl, create_mpl_fig\n _import_mpl()\n fig = create_mpl_fig(fig, figsize)\n\n spec = self.specification\n\n # Should we label endogenous variables?\n if endog_labels is None:\n endog_labels = spec.k_endog <= 5\n\n # Plot the coefficients of determination\n coefficients_of_determination = self.coefficients_of_determination\n plot_idx = 1\n locations = np.arange(spec.k_endog)\n for coeffs in coefficients_of_determination.T:\n # Create the new axis\n ax = fig.add_subplot(spec.k_factors, 1, plot_idx)\n ax.set_ylim((0, 1))\n ax.set(title='Factor %i' % plot_idx, ylabel=r'$R^2$')\n bars = ax.bar(locations, coeffs)\n\n if endog_labels:\n width = bars[0].get_width()\n ax.xaxis.set_ticks(locations + width / 2)\n ax.xaxis.set_ticklabels(self.model.endog_names)\n else:\n ax.set(xlabel='Endogenous variables')\n ax.xaxis.set_ticks([])\n\n plot_idx += 1\n\n return fig\n\n @Appender(MLEResults.summary.__doc__)\n def summary(self, alpha=.05, start=None, separate_params=True):\n from statsmodels.iolib.summary import summary_params\n spec = self.specification\n\n # Create the model name\n model_name = []\n if spec.k_factors > 0:\n if spec.factor_order > 0:\n model_type = ('DynamicFactor(factors=%d, order=%d)' %\n (spec.k_factors, spec.factor_order))\n else:\n model_type = 'StaticFactor(factors=%d)' % spec.k_factors\n\n model_name.append(model_type)\n if spec.k_exog > 0:\n model_name.append('%d regressors' % spec.k_exog)\n else:\n model_name.append('SUR(%d regressors)' % spec.k_exog)\n\n if spec.error_order > 0:\n error_type = 'VAR' if spec.error_var else 'AR'\n model_name.append('%s(%d) errors' % (error_type, spec.error_order))\n\n summary = super(DynamicFactorResults, self).summary(\n alpha=alpha, start=start, model_name=model_name,\n display_params=not separate_params\n )\n\n if separate_params:\n indices = np.arange(len(self.params))\n\n def make_table(self, mask, title, strip_end=True):\n res = (self, self.params[mask], self.bse[mask],\n self.zvalues[mask], self.pvalues[mask],\n self.conf_int(alpha)[mask])\n\n param_names = [\n '.'.join(name.split('.')[:-1]) if strip_end else name\n for name in\n np.array(self.data.param_names)[mask].tolist()\n ]\n\n return summary_params(res, yname=None, xname=param_names,\n alpha=alpha, use_t=False, title=title)\n\n k_endog = self.model.k_endog\n k_exog = self.model.k_exog\n k_factors = self.model.k_factors\n factor_order = self.model.factor_order\n _factor_order = self.model._factor_order\n _error_order = self.model._error_order\n\n # Add parameter tables for each endogenous variable\n loading_indices = indices[self.model._params_loadings]\n loading_masks = []\n exog_indices = indices[self.model._params_exog]\n exog_masks = []\n for i in range(k_endog):\n # 1. Factor loadings\n # Recall these are in the form:\n # 'loading.f1.y1', 'loading.f2.y1', 'loading.f1.y2', ...\n\n loading_mask = (\n loading_indices[i * k_factors:(i + 1) * k_factors])\n loading_masks.append(loading_mask)\n\n # 2. Exog\n # Recall these are in the form:\n # beta.x1.y1, beta.x2.y1, beta.x1.y2, ...\n exog_mask = exog_indices[i * k_exog:(i + 1) * k_exog]\n exog_masks.append(exog_mask)\n\n # Create the table\n mask = np.concatenate([loading_mask, exog_mask])\n title = \"Results for equation %s\" % self.model.endog_names[i]\n table = make_table(self, mask, title)\n summary.tables.append(table)\n\n # Add parameter tables for each factor\n factor_indices = indices[self.model._params_factor_transition]\n factor_masks = []\n if factor_order > 0:\n for i in range(k_factors):\n start = i * _factor_order\n factor_mask = factor_indices[start: start + _factor_order]\n factor_masks.append(factor_mask)\n\n # Create the table\n title = \"Results for factor equation f%d\" % (i+1)\n table = make_table(self, factor_mask, title)\n summary.tables.append(table)\n\n # Add parameter tables for error transitions\n error_masks = []\n if spec.error_order > 0:\n error_indices = indices[self.model._params_error_transition]\n for i in range(k_endog):\n if spec.error_var:\n start = i * _error_order\n end = (i + 1) * _error_order\n else:\n start = i * spec.error_order\n end = (i + 1) * spec.error_order\n\n error_mask = error_indices[start:end]\n error_masks.append(error_mask)\n\n # Create the table\n title = (\"Results for error equation e(%s)\" %\n self.model.endog_names[i])\n table = make_table(self, error_mask, title)\n summary.tables.append(table)\n\n # Error covariance terms\n error_cov_mask = indices[self.model._params_error_cov]\n table = make_table(self, error_cov_mask,\n \"Error covariance matrix\", strip_end=False)\n summary.tables.append(table)\n\n # Add a table for all other parameters\n masks = []\n for m in (loading_masks, exog_masks, factor_masks,\n error_masks, [error_cov_mask]):\n m = np.array(m).flatten()\n if len(m) > 0:\n masks.append(m)\n masks = np.concatenate(masks)\n inverse_mask = np.array(list(set(indices).difference(set(masks))))\n if len(inverse_mask) > 0:\n table = make_table(self, inverse_mask, \"Other parameters\",\n strip_end=False)\n summary.tables.append(table)\n\n return summary\n\n\nclass DynamicFactorResultsWrapper(MLEResultsWrapper):\n _attrs = {}\n _wrap_attrs = wrap.union_dicts(MLEResultsWrapper._wrap_attrs,\n _attrs)\n _methods = {}\n _wrap_methods = wrap.union_dicts(MLEResultsWrapper._wrap_methods,\n _methods)\nwrap.populate_wrapper(DynamicFactorResultsWrapper, # noqa:E305\n DynamicFactorResults)\n", "# -*- coding: utf-8 -*-\n\nimport warnings\n\nimport numpy as np\nfrom numpy.linalg import eigh, inv, norm, matrix_rank\nimport pandas as pd\nfrom scipy.optimize import minimize\n\nfrom statsmodels.tools.decorators import cache_readonly\nfrom statsmodels.base.model import Model\nfrom statsmodels.iolib import summary2\nfrom statsmodels.graphics.utils import _import_mpl\n\nfrom .factor_rotation import rotate_factors, promax\n\n\n_opt_defaults = {'gtol': 1e-7}\n\n\ndef _check_args_1(endog, n_factor, corr, nobs):\n\n msg = \"Either endog or corr must be provided.\"\n if endog is not None and corr is not None:\n raise ValueError(msg)\n if endog is None and corr is None:\n warnings.warn('Both endog and corr are provided, ' +\n 'corr will be used for factor analysis.')\n\n if n_factor <= 0:\n raise ValueError('n_factor must be larger than 0! %d < 0' %\n (n_factor))\n\n if nobs is not None and endog is not None:\n warnings.warn(\"nobs is ignored when endog is provided\")\n\n\ndef _check_args_2(endog, n_factor, corr, nobs, k_endog):\n\n if n_factor > k_endog:\n raise ValueError('n_factor cannot be greater than the number'\n ' of variables! %d > %d' %\n (n_factor, k_endog))\n\n if np.max(np.abs(np.diag(corr) - 1)) > 1e-10:\n raise ValueError(\"corr must be a correlation matrix\")\n\n if corr.shape[0] != corr.shape[1]:\n raise ValueError('Correlation matrix corr must be a square '\n '(rows %d != cols %d)' % corr.shape)\n\n\nclass Factor(Model):\n \"\"\"\n Factor analysis\n\n Parameters\n ----------\n endog : array_like\n Variables in columns, observations in rows. May be `None` if\n `corr` is not `None`.\n n_factor : int\n The number of factors to extract\n corr : array_like\n Directly specify the correlation matrix instead of estimating\n it from `endog`. If provided, `endog` is not used for the\n factor analysis, it may be used in post-estimation.\n method : str\n The method to extract factors, currently must be either 'pa'\n for principal axis factor analysis or 'ml' for maximum\n likelihood estimation.\n smc : True or False\n Whether or not to apply squared multiple correlations (method='pa')\n endog_names : str\n Names of endogenous variables. If specified, it will be used\n instead of the column names in endog\n nobs : int\n The number of observations, not used if endog is present. Needs to\n be provided for inference if endog is None.\n missing : 'none', 'drop', or 'raise'\n Missing value handling for endog, default is row-wise deletion 'drop'\n If 'none', no nan checking is done. If 'drop', any observations with\n nans are dropped. If 'raise', an error is raised.\n\n\n Notes\n -----\n **Experimental**\n\n Supported rotations: 'varimax', 'quartimax', 'biquartimax',\n 'equamax', 'oblimin', 'parsimax', 'parsimony', 'biquartimin',\n 'promax'\n\n If method='ml', the factors are rotated to satisfy condition IC3\n of Bai and Li (2012). This means that the scores have covariance\n I, so the model for the covariance matrix is L * L' + diag(U),\n where L are the loadings and U are the uniquenesses. In addition,\n L' * diag(U)^{-1} L must be diagonal.\n\n References\n ----------\n .. [*] Hofacker, C. (2004). Exploratory Factor Analysis, Mathematical\n Marketing. http://www.openaccesstexts.org/pdf/Quant_Chapter_11_efa.pdf\n .. [*] J Bai, K Li (2012). Statistical analysis of factor models of high\n dimension. Annals of Statistics. https://arxiv.org/pdf/1205.6617.pdf\n \"\"\"\n def __init__(self, endog=None, n_factor=1, corr=None, method='pa',\n smc=True, endog_names=None, nobs=None, missing='drop'):\n\n _check_args_1(endog, n_factor, corr, nobs)\n\n if endog is not None:\n super(Factor, self).__init__(endog, exog=None, missing=missing)\n endog = self.endog # after preprocessing like missing, asarray\n k_endog = endog.shape[1]\n nobs = endog.shape[0]\n corr = self.corr = np.corrcoef(endog, rowvar=0)\n elif corr is not None:\n corr = self.corr = np.asarray(corr)\n k_endog = self.corr.shape[0]\n self.endog = None\n else:\n msg = \"Either endog or corr must be provided.\"\n raise ValueError(msg)\n\n _check_args_2(endog, n_factor, corr, nobs, k_endog)\n\n self.n_factor = n_factor\n self.loadings = None\n self.communality = None\n self.method = method\n self.smc = smc\n self.nobs = nobs\n self.method = method\n self.corr = corr\n self.k_endog = k_endog\n\n if endog_names is None:\n if hasattr(corr, 'index'):\n endog_names = corr.index\n if hasattr(corr, 'columns'):\n endog_names = corr.columns\n self.endog_names = endog_names\n\n @property\n def endog_names(self):\n \"\"\"Names of endogenous variables\"\"\"\n if self._endog_names is not None:\n return self._endog_names\n else:\n if self.endog is not None:\n return self.data.ynames\n else:\n d = 0\n n = self.corr.shape[0] - 1\n while n > 0:\n d += 1\n n //= 10\n return [('var%0' + str(d) + 'd') % i\n for i in range(self.corr.shape[0])]\n\n @endog_names.setter\n def endog_names(self, value):\n # Check validity of endog_names:\n if value is not None:\n if len(value) != self.corr.shape[0]:\n raise ValueError('The length of `endog_names` must '\n 'equal the number of variables.')\n self._endog_names = np.asarray(value)\n else:\n self._endog_names = None\n\n def fit(self, maxiter=50, tol=1e-8, start=None, opt_method='BFGS',\n opt=None, em_iter=3):\n \"\"\"\n Estimate factor model parameters.\n\n Parameters\n ----------\n maxiter : int\n Maximum number of iterations for iterative estimation algorithms\n tol : float\n Stopping criteria (error tolerance) for iterative estimation\n algorithms\n start : array_like\n Starting values, currently only used for ML estimation\n opt_method : str\n Optimization method for ML estimation\n opt : dict-like\n Keyword arguments passed to optimizer, only used for ML estimation\n em_iter : int\n The number of EM iterations before starting gradient optimization,\n only used for ML estimation.\n\n Returns\n -------\n results: FactorResults\n \"\"\"\n method = self.method.lower()\n if method == 'pa':\n return self._fit_pa(maxiter=maxiter, tol=tol)\n elif method == 'ml':\n return self._fit_ml(start, em_iter, opt_method, opt)\n else:\n msg = \"Unknown factor extraction approach '%s'\" % self.method\n raise ValueError(msg)\n\n def _fit_pa(self, maxiter=50, tol=1e-8):\n \"\"\"\n Extract factors using the iterative principal axis method\n\n Parameters\n ----------\n maxiter : int\n Maximum number of iterations for communality estimation\n tol : float\n If `norm(communality - last_communality) < tolerance`,\n estimation stops\n\n Returns\n -------\n results : FactorResults instance\n \"\"\"\n\n R = self.corr.copy() # inplace modification below\n\n # Parameter validation\n self.n_comp = matrix_rank(R)\n if self.n_factor > self.n_comp:\n raise ValueError('n_factor must be smaller or equal to the rank'\n ' of endog! %d > %d' %\n (self.n_factor, self.n_comp))\n if maxiter <= 0:\n raise ValueError('n_max_iter must be larger than 0! %d < 0' %\n (maxiter))\n if tol <= 0 or tol > 0.01:\n raise ValueError('tolerance must be larger than 0 and smaller than'\n ' 0.01! Got %f instead' % (tol))\n\n # Initial communality estimation\n if self.smc:\n c = 1 - 1 / np.diag(inv(R))\n else:\n c = np.ones(len(R))\n\n # Iterative communality estimation\n eigenvals = None\n for i in range(maxiter):\n # Get eigenvalues/eigenvectors of R with diag replaced by\n # communality\n for j in range(len(R)):\n R[j, j] = c[j]\n L, V = eigh(R, UPLO='U')\n c_last = np.array(c)\n ind = np.argsort(L)\n ind = ind[::-1]\n L = L[ind]\n n_pos = (L > 0).sum()\n V = V[:, ind]\n eigenvals = np.array(L)\n\n # Select eigenvectors with positive eigenvalues\n n = np.min([n_pos, self.n_factor])\n sL = np.diag(np.sqrt(L[:n]))\n V = V[:, :n]\n\n # Calculate new loadings and communality\n A = V.dot(sL)\n c = np.power(A, 2).sum(axis=1)\n if norm(c_last - c) < tol:\n break\n\n self.eigenvals = eigenvals\n self.communality = c\n self.uniqueness = 1 - c\n self.loadings = A\n return FactorResults(self)\n\n # Unpacks the model parameters from a flat vector, used for ML\n # estimation. The first k_endog elements of par are the square\n # roots of the uniquenesses. The remaining elements are the\n # factor loadings, packed one factor at a time.\n def _unpack(self, par):\n return (par[0:self.k_endog]**2,\n np.reshape(par[self.k_endog:], (-1, self.k_endog)).T)\n\n # Packs the model parameters into a flat parameter, used for ML\n # estimation.\n def _pack(self, load, uniq):\n return np.concatenate((np.sqrt(uniq), load.T.flat))\n\n def loglike(self, par):\n \"\"\"\n Evaluate the log-likelihood function.\n\n Parameters\n ----------\n par : ndarray or tuple of 2 ndarray's\n The model parameters, either a packed representation of\n the model parameters or a 2-tuple containing a `k_endog x\n n_factor` matrix of factor loadings and a `k_endog` vector\n of uniquenesses.\n\n Returns\n -------\n loglike : float\n \"\"\"\n\n if type(par) is np.ndarray:\n uniq, load = self._unpack(par)\n else:\n load, uniq = par[0], par[1]\n\n loadu = load / uniq[:, None]\n lul = np.dot(load.T, loadu)\n\n # log|GG' + S|\n # Using matrix determinant lemma:\n # |GG' + S| = |I + G'S^{-1}G|*|S|\n lul.flat[::lul.shape[0]+1] += 1\n _, ld = np.linalg.slogdet(lul)\n v = np.sum(np.log(uniq)) + ld\n\n # tr((GG' + S)^{-1}C)\n # Using Sherman-Morrison-Woodbury\n w = np.sum(1 / uniq)\n b = np.dot(load.T, self.corr / uniq[:, None])\n b = np.linalg.solve(lul, b)\n b = np.dot(loadu, b)\n w -= np.trace(b)\n\n # Scaled log-likelihood\n return -(v + w) / (2*self.k_endog)\n\n def score(self, par):\n \"\"\"\n Evaluate the score function (first derivative of loglike).\n\n Parameters\n ----------\n par : ndarray or tuple of 2 ndarray's\n The model parameters, either a packed representation of\n the model parameters or a 2-tuple containing a `k_endog x\n n_factor` matrix of factor loadings and a `k_endog` vector\n of uniquenesses.\n\n Returns\n -------\n score : ndarray\n \"\"\"\n\n if type(par) is np.ndarray:\n uniq, load = self._unpack(par)\n else:\n load, uniq = par[0], par[1]\n\n # Center term of SMW\n loadu = load / uniq[:, None]\n c = np.dot(load.T, loadu)\n c.flat[::c.shape[0]+1] += 1\n d = np.linalg.solve(c, load.T)\n\n # Precompute these terms\n lud = np.dot(loadu, d)\n cu = (self.corr / uniq) / uniq[:, None]\n r = np.dot(cu, load)\n lul = np.dot(lud.T, load)\n luz = np.dot(cu, lul)\n\n # First term\n du = 2*np.sqrt(uniq) * (1/uniq - (d * load.T).sum(0) / uniq**2)\n dl = 2*(loadu - np.dot(lud, loadu))\n\n # Second term\n h = np.dot(lud, cu)\n f = np.dot(h, lud.T)\n du -= 2*np.sqrt(uniq) * (np.diag(cu) - 2*np.diag(h) + np.diag(f))\n dl -= 2*r\n dl += 2*np.dot(lud, r)\n dl += 2*luz\n dl -= 2*np.dot(lud, luz)\n\n # Cannot use _pack because we are working with the square root\n # uniquenesses directly.\n return -np.concatenate((du, dl.T.flat)) / (2*self.k_endog)\n\n # Maximum likelihood factor analysis.\n def _fit_ml(self, start, em_iter, opt_method, opt):\n \"\"\"estimate Factor model using Maximum Likelihood\n \"\"\"\n\n # Starting values\n if start is None:\n load, uniq = self._fit_ml_em(em_iter)\n start = self._pack(load, uniq)\n elif len(start) == 2:\n if len(start[1]) != start[0].shape[0]:\n msg = \"Starting values have incompatible dimensions\"\n raise ValueError(msg)\n start = self._pack(start[0], start[1])\n else:\n raise ValueError(\"Invalid starting values\")\n\n def nloglike(par):\n return -self.loglike(par)\n\n def nscore(par):\n return -self.score(par)\n\n # Do the optimization\n if opt is None:\n opt = _opt_defaults\n r = minimize(nloglike, start, jac=nscore, method=opt_method,\n options=opt)\n if not r.success:\n warnings.warn(\"Fitting did not converge\")\n par = r.x\n uniq, load = self._unpack(par)\n\n if uniq.min() < 1e-10:\n warnings.warn(\"Some uniquenesses are nearly zero\")\n\n # Rotate solution to satisfy IC3 of Bai and Li\n load = self._rotate(load, uniq)\n\n self.uniqueness = uniq\n self.communality = 1 - uniq\n self.loadings = load\n self.mle_retvals = r\n\n return FactorResults(self)\n\n def _fit_ml_em(self, iter):\n \"\"\"estimate Factor model using EM algorithm\n \"\"\"\n # Starting values\n np.random.seed(3427)\n load = 0.1*np.random.normal(size=(self.k_endog, self.n_factor))\n uniq = 0.5 * np.ones(self.k_endog)\n\n for k in range(iter):\n\n loadu = load / uniq[:, None]\n\n f = np.dot(load.T, loadu)\n f.flat[::f.shape[0]+1] += 1\n\n r = np.linalg.solve(f, loadu.T)\n q = np.dot(loadu.T, load)\n h = np.dot(r, load)\n\n c = load - np.dot(load, h)\n c /= uniq[:, None]\n\n g = np.dot(q, r)\n e = np.dot(g, self.corr)\n d = np.dot(loadu.T, self.corr) - e\n\n a = np.dot(d, c)\n a -= np.dot(load.T, c)\n a.flat[::a.shape[0]+1] += 1\n\n b = np.dot(self.corr, c)\n\n load = np.linalg.solve(a, b.T).T\n uniq = np.diag(self.corr) - (load * d.T).sum(1)\n\n return load, uniq\n\n def _rotate(self, load, uniq):\n \"\"\"rotate loadings for MLE\n \"\"\"\n # Rotations used in ML estimation.\n load, s, _ = np.linalg.svd(load, 0)\n load *= s\n\n if self.nobs is None:\n nobs = 1\n else:\n nobs = self.nobs\n\n cm = np.dot(load.T, load / uniq[:, None]) / nobs\n _, f = np.linalg.eig(cm)\n load = np.dot(load, f)\n return load\n\n\nclass FactorResults(object):\n \"\"\"\n Factor results class\n\n For result summary, scree/loading plots and factor rotations\n\n Parameters\n ----------\n factor : Factor\n Fitted Factor class\n\n Attributes\n ----------\n uniqueness: ndarray\n The uniqueness (variance of uncorrelated errors unique to\n each variable)\n communality: ndarray\n 1 - uniqueness\n loadings : ndarray\n Each column is the loading vector for one factor\n loadings_no_rot : ndarray\n Unrotated loadings, not available under maximum likelihood\n analysis.\n eigenvalues : ndarray\n The eigenvalues for a factor analysis obtained using\n principal components; not available under ML estimation.\n n_comp : int\n Number of components (factors)\n nbs : int\n Number of observations\n fa_method : str\n The method used to obtain the decomposition, either 'pa' for\n 'principal axes' or 'ml' for maximum likelihood.\n df : int\n Degrees of freedom of the factor model.\n\n Notes\n -----\n Under ML estimation, the default rotation (used for `loadings`) is\n condition IC3 of Bai and Li (2012). Under this rotation, the\n factor scores are iid and standardized. If `G` is the canonical\n loadings and `U` is the vector of uniquenesses, then the\n covariance matrix implied by the factor analysis is `GG' +\n diag(U)`.\n\n Status: experimental, Some refactoring will be necessary when new\n features are added.\n \"\"\"\n def __init__(self, factor):\n self.model = factor\n self.endog_names = factor.endog_names\n self.loadings_no_rot = factor.loadings\n if hasattr(factor, \"eigenvals\"):\n self.eigenvals = factor.eigenvals\n\n self.communality = factor.communality\n self.uniqueness = factor.uniqueness\n self.rotation_method = None\n self.fa_method = factor.method\n self.n_comp = factor.loadings.shape[1]\n self.nobs = factor.nobs\n self._factor = factor\n if hasattr(factor, \"mle_retvals\"):\n self.mle_retvals = factor.mle_retvals\n\n p, k = self.loadings_no_rot.shape\n self.df = ((p - k)**2 - (p + k)) // 2\n\n # no rotation, overwritten in `rotate`\n self.loadings = factor.loadings\n self.rotation_matrix = np.eye(self.n_comp)\n\n\n def __str__(self):\n return self.summary().__str__()\n\n def rotate(self, method):\n \"\"\"\n Apply rotation, inplace modification of this Results instance\n\n Parameters\n ----------\n method : str\n Rotation to be applied. Allowed methods are varimax,\n quartimax, biquartimax, equamax, oblimin, parsimax,\n parsimony, biquartimin, promax.\n\n Returns\n -------\n None : nothing returned, modifications are inplace\n\n\n Notes\n -----\n Warning: 'varimax', 'quartimax' and 'oblimin' are verified against R or\n Stata. Some rotation methods such as promax do not produce the same\n results as the R or Stata default functions.\n\n See Also\n --------\n factor_rotation : subpackage that implements rotation methods\n \"\"\"\n self.rotation_method = method\n if method not in ['varimax', 'quartimax', 'biquartimax',\n 'equamax', 'oblimin', 'parsimax', 'parsimony',\n 'biquartimin', 'promax']:\n raise ValueError('Unknown rotation method %s' % (method))\n\n if method in ['varimax', 'quartimax', 'biquartimax', 'equamax',\n 'parsimax', 'parsimony', 'biquartimin']:\n self.loadings, T = rotate_factors(self.loadings_no_rot, method)\n elif method == 'oblimin':\n self.loadings, T = rotate_factors(self.loadings_no_rot,\n 'quartimin')\n elif method == 'promax':\n self.loadings, T = promax(self.loadings_no_rot)\n else:\n raise ValueError('rotation method not recognized')\n\n self.rotation_matrix = T\n\n def _corr_factors(self):\n \"\"\"correlation of factors implied by rotation\n\n If the rotation is oblique, then the factors are correlated.\n\n currently not cached\n\n Returns\n -------\n corr_f : ndarray\n correlation matrix of rotated factors, assuming initial factors are\n orthogonal\n \"\"\"\n T = self.rotation_matrix\n corr_f = T.T.dot(T)\n return corr_f\n\n def factor_score_params(self, method='bartlett'):\n \"\"\"\n Compute factor scoring coefficient matrix\n\n The coefficient matrix is not cached.\n\n Parameters\n ----------\n method : 'bartlett' or 'regression'\n Method to use for factor scoring.\n 'regression' can be abbreviated to `reg`\n\n Returns\n -------\n coeff_matrix : ndarray\n matrix s to compute factors f from a standardized endog ys.\n ``f = ys dot s``\n\n Notes\n -----\n The `regression` method follows the Stata definition.\n Method bartlett and regression are verified against Stats.\n Two unofficial methods, 'ols' and 'gls', produce similar factor scores\n but are not verified.\n\n See Also\n --------\n statsmodels.multivariate.factor.FactorResults.factor_scoring\n \"\"\"\n L = self.loadings\n T = self.rotation_matrix.T\n #TODO: check row versus column convention for T\n uni = 1 - self.communality #self.uniqueness\n\n if method == 'bartlett':\n s_mat = np.linalg.inv(L.T.dot(L/(uni[:,None]))).dot((L.T / uni)).T\n elif method.startswith('reg'):\n corr = self.model.corr\n corr_f = self._corr_factors()\n # if orthogonal then corr_f is just eye\n s_mat = corr_f.dot(L.T.dot(np.linalg.inv(corr))).T\n elif method == 'ols':\n # not verified\n corr = self.model.corr\n corr_f = self._corr_factors()\n s_mat = corr_f.dot(np.linalg.pinv(L)).T\n elif method == 'gls':\n # not verified\n #s_mat = np.linalg.inv(1*np.eye(L.shape[1]) + L.T.dot(L/(uni[:,None])))\n corr = self.model.corr\n corr_f = self._corr_factors()\n s_mat = np.linalg.inv(np.linalg.inv(corr_f) + L.T.dot(L/(uni[:,None])))\n s_mat = s_mat.dot(L.T / uni).T\n else:\n raise ValueError('method not available, use \"bartlett ' +\n 'or \"regression\"')\n return s_mat\n\n def factor_scoring(self, endog=None, method='bartlett', transform=True):\n \"\"\"\n factor scoring: compute factors for endog\n\n If endog was not provided when creating the factor class, then\n a standarized endog needs to be provided here.\n\n Parameters\n ----------\n method : 'bartlett' or 'regression'\n Method to use for factor scoring.\n 'regression' can be abbreviated to `reg`\n transform : bool\n If transform is true and endog is provided, then it will be\n standardized using mean and scale of original data, which has to\n be available in this case.\n If transform is False, then a provided endog will be used unchanged.\n The original endog in the Factor class will\n always be standardized if endog is None, independently of `transform`.\n\n Returns\n -------\n factor_score : ndarray\n estimated factors using scoring matrix s and standarized endog ys\n ``f = ys dot s``\n\n Notes\n -----\n Status: transform option is experimental and might change.\n\n See Also\n --------\n statsmodels.multivariate.factor.FactorResults.factor_score_params\n \"\"\"\n\n if transform is False and endog is not None:\n # no transformation in this case\n endog = np.asarray(endog)\n else:\n # we need to standardize with the original mean and scale\n if self.model.endog is not None:\n m = self.model.endog.mean(0)\n s = self.model.endog.std(ddof=1, axis=0)\n if endog is None:\n endog = self.model.endog\n else:\n endog = np.asarray(endog)\n else:\n raise ValueError('If transform is True, then `endog` needs ' +\n 'to be available in the Factor instance.')\n\n endog = (endog - m) / s\n\n s_mat = self.factor_score_params(method=method)\n factors = endog.dot(s_mat)\n return factors\n\n def summary(self):\n \"\"\"Summary\"\"\"\n summ = summary2.Summary()\n summ.add_title('Factor analysis results')\n loadings_no_rot = pd.DataFrame(\n self.loadings_no_rot,\n columns=[\"factor %d\" % (i)\n for i in range(self.loadings_no_rot.shape[1])],\n index=self.endog_names\n )\n if hasattr(self, \"eigenvals\"):\n # eigenvals not available for ML method\n eigenvals = pd.DataFrame(\n [self.eigenvals], columns=self.endog_names, index=[''])\n summ.add_dict({'': 'Eigenvalues'})\n summ.add_df(eigenvals)\n communality = pd.DataFrame([self.communality],\n columns=self.endog_names, index=[''])\n summ.add_dict({'': ''})\n summ.add_dict({'': 'Communality'})\n summ.add_df(communality)\n summ.add_dict({'': ''})\n summ.add_dict({'': 'Pre-rotated loadings'})\n summ.add_df(loadings_no_rot)\n summ.add_dict({'': ''})\n if self.rotation_method is not None:\n loadings = pd.DataFrame(\n self.loadings,\n columns=[\"factor %d\" % (i)\n for i in range(self.loadings.shape[1])],\n index=self.endog_names\n )\n summ.add_dict({'': '%s rotated loadings' % (self.rotation_method)})\n summ.add_df(loadings)\n return summ\n\n def get_loadings_frame(self, style='display', sort_=True, threshold=0.3,\n highlight_max=True, color_max='yellow',\n decimals=None):\n \"\"\"get loadings matrix as DataFrame or pandas Styler\n\n Parameters\n ----------\n style : 'display' (default), 'raw' or 'strings'\n Style to use for display\n\n * 'raw' returns just a DataFrame of the loadings matrix, no options are\n applied\n * 'display' add sorting and styling as defined by other keywords\n * 'strings' returns a DataFrame with string elements with optional sorting\n and suppressing small loading coefficients.\n\n sort_ : bool\n If True, then the rows of the DataFrame is sorted by contribution of each\n factor. applies if style is either 'display' or 'strings'\n threshold : float\n If the threshold is larger than zero, then loading coefficients are\n either colored white (if style is 'display') or replace by empty\n string (if style is 'strings').\n highlight_max : bool\n This add a background color to the largest coefficient in each row.\n color_max : html color\n default is 'yellow'. color for background of row maximum\n decimals : None or int\n If None, then pandas default precision applies. Otherwise values are\n rounded to the specified decimals. If style is 'display', then the\n underlying dataframe is not changed. If style is 'strings', then\n values are rounded before conversion to strings.\n\n Returns\n -------\n loadings : DataFrame or pandas Styler instance\n The return is a pandas Styler instance, if style is 'display' and\n at least one of highlight_max, threshold or decimals is applied.\n Otherwise, the returned loadings is a DataFrame.\n\n Examples\n --------\n >>> mod = Factor(df, 3, smc=True)\n >>> res = mod.fit()\n >>> res.get_loadings_frame(style='display', decimals=3, threshold=0.2)\n\n To get a sorted DataFrame, all styling options need to be turned off:\n\n >>> df_sorted = res.get_loadings_frame(style='display',\n ... highlight_max=False, decimals=None, threshold=0)\n\n Options except for highlighting are available for plain test or Latex\n usage:\n\n >>> lds = res_u.get_loadings_frame(style='strings', decimals=3,\n ... threshold=0.3)\n >>> print(lds.to_latex())\n \"\"\"\n\n loadings_df = pd.DataFrame(\n self.loadings,\n columns=[\"factor %d\" % (i)\n for i in range(self.loadings.shape[1])],\n index=self.endog_names\n )\n\n if style not in ['raw', 'display', 'strings']:\n msg = \"style has to be one of 'raw', 'display', 'strings'\"\n raise ValueError(msg)\n\n if style == 'raw':\n return loadings_df\n\n # add sorting and some formatting\n if sort_ is True:\n loadings_df2 = loadings_df.copy()\n n_f = len(loadings_df2)\n high = np.abs(loadings_df2.values).argmax(1)\n loadings_df2['high'] = high\n loadings_df2['largest'] = np.abs(loadings_df.values[np.arange(n_f), high])\n loadings_df2.sort_values(by=['high', 'largest'], ascending=[True, False], inplace=True)\n loadings_df = loadings_df2.drop(['high', 'largest'], axis=1)\n\n if style == 'display':\n sty = None\n if threshold > 0:\n def color_white_small(val):\n \"\"\"\n Takes a scalar and returns a string with\n the css property `'color: white'` for small values, black otherwise.\n\n takes threshold from outer scope\n \"\"\"\n color = 'white' if np.abs(val) < threshold else 'black'\n return 'color: %s' % color\n\n sty = loadings_df.style.applymap(color_white_small)\n\n if highlight_max is True:\n def highlight_max(s):\n '''\n highlight the maximum in a Series yellow.\n '''\n s = np.abs(s)\n is_max = s == s.max()\n return ['background-color: '+ color_max if v else '' for v in is_max]\n\n if sty is None:\n sty = loadings_df.style\n\n sty = sty.apply(highlight_max, axis=1)\n\n if decimals is not None:\n if sty is None:\n sty = loadings_df.style\n\n sty.format(\"{:.%sf}\" % decimals)\n\n if sty is None:\n return loadings_df\n else:\n return sty\n\n if style == 'strings':\n ld = loadings_df\n if decimals is not None:\n ld = ld.round(decimals)\n ld = ld.astype(str)\n if threshold > 0:\n ld[loadings_df.abs() < threshold] = ''\n return ld\n\n def plot_scree(self, ncomp=None):\n \"\"\"\n Plot of the ordered eigenvalues and variance explained for the loadings\n\n Parameters\n ----------\n ncomp : int, optional\n Number of loadings to include in the plot. If None, will\n included the same as the number of maximum possible loadings\n\n Returns\n -------\n fig : figure\n Handle to the figure\n \"\"\"\n _import_mpl()\n from .plots import plot_scree\n return plot_scree(self.eigenvals, self.n_comp, ncomp)\n\n def plot_loadings(self, loading_pairs=None, plot_prerotated=False):\n \"\"\"\n Plot factor loadings in 2-d plots\n\n Parameters\n ----------\n loading_pairs : None or a list of tuples\n Specify plots. Each tuple (i, j) represent one figure, i and j is\n the loading number for x-axis and y-axis, respectively. If `None`,\n all combinations of the loadings will be plotted.\n plot_prerotated : True or False\n If True, the loadings before rotation applied will be plotted. If\n False, rotated loadings will be plotted.\n\n Returns\n -------\n figs : a list of figure handles\n \"\"\"\n _import_mpl()\n from .plots import plot_loadings\n\n if self.rotation_method is None:\n plot_prerotated = True\n loadings = self.loadings_no_rot if plot_prerotated else self.loadings\n if plot_prerotated:\n title = 'Prerotated Factor Pattern'\n else:\n title = '%s Rotated Factor Pattern' % (self.rotation_method)\n var_explained = self.eigenvals / self.n_comp * 100\n\n return plot_loadings(loadings, loading_pairs=loading_pairs,\n title=title, row_names=self.endog_names,\n percent_variance=var_explained)\n\n @cache_readonly\n def fitted_cov(self):\n \"\"\"\n Returns the fitted covariance matrix.\n \"\"\"\n\n c = np.dot(self.loadings, self.loadings.T)\n c.flat[::c.shape[0]+1] += self.uniqueness\n return c\n\n @cache_readonly\n def uniq_stderr(self, kurt=0):\n \"\"\"\n The standard errors of the uniquenesses.\n\n Parameters\n ----------\n kurt: float\n Excess kurtosis\n\n Notes\n -----\n If excess kurtosis is known, provide as `kurt`. Standard\n errors are only available if the model was fit using maximum\n likelihood. If `endog` is not provided, `nobs` must be\n provided to obtain standard errors.\n\n These are asymptotic standard errors. See Bai and Li (2012)\n for conditions under which the standard errors are valid.\n\n The standard errors are only applicable to the original,\n unrotated maximum likelihood solution.\n \"\"\"\n\n if self.fa_method.lower() != \"ml\":\n msg = \"Standard errors only available under ML estimation\"\n raise ValueError(msg)\n\n if self.nobs is None:\n msg = \"nobs is required to obtain standard errors.\"\n raise ValueError(msg)\n\n v = self.uniqueness**2 * (2 + kurt)\n return np.sqrt(v / self.nobs)\n\n @cache_readonly\n def load_stderr(self):\n \"\"\"\n The standard errors of the loadings.\n\n Standard errors are only available if the model was fit using\n maximum likelihood. If `endog` is not provided, `nobs` must be\n provided to obtain standard errors.\n\n These are asymptotic standard errors. See Bai and Li (2012)\n for conditions under which the standard errors are valid.\n\n The standard errors are only applicable to the original,\n unrotated maximum likelihood solution.\n \"\"\"\n\n if self.fa_method.lower() != \"ml\":\n msg = \"Standard errors only available under ML estimation\"\n raise ValueError(msg)\n\n if self.nobs is None:\n msg = \"nobs is required to obtain standard errors.\"\n raise ValueError(msg)\n\n v = np.outer(self.uniqueness, np.ones(self.loadings.shape[1]))\n return np.sqrt(v / self.nobs)\n" ]
[ [ "numpy.array" ], [ "numpy.dot", "numpy.tril_indices", "numpy.arange", "numpy.eye", "numpy.isnan", "numpy.lexsort", "numpy.concatenate", "numpy.linalg.pinv", "numpy.asanyarray", "numpy.array_split", "numpy.linalg.cholesky", "numpy.diag_indices", "numpy.array", "numpy.zeros" ], [ "numpy.diag", "numpy.dot", "numpy.linalg.matrix_rank", "numpy.sqrt", "numpy.asarray", "pandas.DataFrame", "numpy.concatenate", "numpy.trace", "numpy.linalg.svd", "numpy.reshape", "numpy.linalg.slogdet", "numpy.linalg.eig", "numpy.eye", "numpy.arange", "numpy.log", "numpy.min", "numpy.power", "numpy.linalg.inv", "numpy.linalg.eigh", "scipy.optimize.minimize", "numpy.corrcoef", "numpy.argsort", "numpy.array", "numpy.sum", "numpy.linalg.solve", "numpy.abs", "numpy.random.seed", "numpy.linalg.norm", "numpy.ones", "numpy.linalg.pinv", "numpy.random.normal" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [ "0.23", "0.21", "2.0", "1.4", "0.19", "1.1", "1.5", "1.2", "0.24", "0.20", "1.0", "0.25", "1.3" ], "scipy": [ "0.13", "1.6", "0.14", "1.10", "0.15", "1.4", "0.16", "1.9", "0.19", "1.5", "0.18", "1.2", "1.7", "0.12", "1.0", "0.17", "1.3", "1.8" ], "tensorflow": [] } ]
NicRSkinner/tensorflow
[ "4cb0397a8bcd5527a00418e04ac843ecd472de4a" ]
[ "tensorflow/contrib/learn/python/learn/tests/estimators_test.py" ]
[ "# Copyright 2016 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\n\"\"\"Custom optimizer tests.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport random\nimport tensorflow as tf\n\nfrom tensorflow.contrib.learn.python import learn\nfrom tensorflow.contrib.learn.python.learn import datasets\nfrom tensorflow.contrib.learn.python.learn.estimators._sklearn import accuracy_score\nfrom tensorflow.contrib.learn.python.learn.estimators._sklearn import train_test_split\n\n\n# TODO(b/29580537): Remove when we deprecate feature column inference.\nclass InferredfeatureColumnTest(tf.test.TestCase):\n \"\"\"Custom optimizer tests.\"\"\"\n\n def testIrisMomentum(self):\n random.seed(42)\n\n iris = datasets.load_iris()\n x_train, x_test, y_train, y_test = train_test_split(iris.data,\n iris.target,\n test_size=0.2,\n random_state=42)\n\n def custom_optimizer():\n return tf.train.MomentumOptimizer(learning_rate=0.01, momentum=0.9)\n\n cont_features = [\n tf.contrib.layers.real_valued_column(\"\", dimension=4)]\n classifier = learn.DNNClassifier(\n feature_columns=cont_features,\n hidden_units=[10, 20, 10],\n n_classes=3,\n optimizer=custom_optimizer,\n config=learn.RunConfig(tf_random_seed=1))\n classifier.fit(x_train, y_train, steps=400)\n score = accuracy_score(y_test, classifier.predict(x_test))\n\n self.assertGreater(score, 0.65, \"Failed with score = {0}\".format(score))\n\n\nclass FeatureEngineeringFunctionTest(tf.test.TestCase):\n \"\"\"Tests feature_engineering_fn.\"\"\"\n\n def testFeatureEngineeringFn(self):\n\n def input_fn():\n return {\"x\": tf.constant(1.)}, {\"y\": tf.constant(11.)}\n\n def feature_engineering_fn(features, targets):\n _, _ = features, targets\n return ({\"transformed_x\": tf.constant([9.])},\n {\"transformed_y\": tf.constant([99.])})\n\n def model_fn(features, targets):\n # dummy variable:\n _ = tf.Variable([0.])\n predictions = features[\"transformed_x\"] + targets[\"transformed_y\"]\n loss = tf.constant([2.])\n return predictions, loss, tf.no_op()\n\n estimator = tf.contrib.learn.Estimator(\n model_fn=model_fn,\n feature_engineering_fn=feature_engineering_fn)\n estimator.fit(input_fn=input_fn, steps=1)\n prediction = next(estimator.predict(input_fn=input_fn, as_iterable=True))\n # predictions = transformed_x + transformed_y (108 = 9 + 99)\n self.assertEqual(108., prediction)\n\n def testNoneFeatureEngineeringFn(self):\n\n def input_fn():\n return {\"x\": tf.constant(1.)}, {\"y\": tf.constant(11.)}\n\n def feature_engineering_fn(features, targets):\n _, _ = features, targets\n return ({\"x\": tf.constant([9.])},\n {\"y\": tf.constant([99.])})\n\n def model_fn(features, targets):\n # dummy variable:\n _ = tf.Variable([0.])\n predictions = features[\"transformed_x\"] + targets[\"transformed_y\"]\n loss = tf.constant([2.])\n return predictions, loss, tf.no_op()\n\n estimator_with_fe_fn = tf.contrib.learn.Estimator(\n model_fn=model_fn,\n feature_engineering_fn=feature_engineering_fn)\n estimator_with_fe_fn.fit(input_fn=input_fn, steps=1)\n estimator_without_fe_fn = tf.contrib.learn.Estimator(model_fn=model_fn)\n estimator_without_fe_fn.fit(input_fn=input_fn, steps=1)\n\n # predictions = x + y\n prediction_with_fe_fn = next(\n estimator_with_fe_fn.predict(input_fn=input_fn, as_iterable=True))\n self.assertEqual(108., prediction_with_fe_fn)\n prediction_without_fe_fn = next(\n estimator_without_fe_fn.predict(input_fn=input_fn, as_iterable=True))\n self.assertEqual(12., prediction_without_fe_fn)\n\n\nclass CustomOptimizer(tf.test.TestCase):\n \"\"\"Custom optimizer tests.\"\"\"\n\n def testIrisMomentum(self):\n random.seed(42)\n\n iris = datasets.load_iris()\n x_train, x_test, y_train, y_test = train_test_split(iris.data,\n iris.target,\n test_size=0.2,\n random_state=42)\n\n def custom_optimizer():\n return tf.train.MomentumOptimizer(learning_rate=0.01, momentum=0.9)\n\n classifier = learn.DNNClassifier(\n hidden_units=[10, 20, 10],\n feature_columns=learn.infer_real_valued_columns_from_input(x_train),\n n_classes=3,\n optimizer=custom_optimizer,\n config=learn.RunConfig(tf_random_seed=1))\n classifier.fit(x_train, y_train, steps=400)\n score = accuracy_score(y_test, classifier.predict(x_test))\n\n self.assertGreater(score, 0.65, \"Failed with score = {0}\".format(score))\n\n\nif __name__ == \"__main__\":\n tf.test.main()\n" ]
[ [ "tensorflow.constant", "tensorflow.Variable", "tensorflow.contrib.learn.python.learn.datasets.load_iris", "tensorflow.test.main", "tensorflow.contrib.layers.real_valued_column", "tensorflow.contrib.learn.Estimator", "tensorflow.train.MomentumOptimizer", "tensorflow.no_op", "tensorflow.contrib.learn.python.learn.infer_real_valued_columns_from_input", "tensorflow.contrib.learn.python.learn.RunConfig", "tensorflow.contrib.learn.python.learn.estimators._sklearn.train_test_split" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
giovanism/pandas
[ "ac3056f2f13e5287dc66d09f31522f7fba592fec" ]
[ "pandas/tests/extension/json/test_json.py" ]
[ "import collections\nimport operator\n\nimport pytest\n\nimport pandas as pd\nimport pandas._testing as tm\nfrom pandas.tests.extension import base\n\nfrom .array import JSONArray, JSONDtype, make_data\n\n\[email protected]\ndef dtype():\n return JSONDtype()\n\n\[email protected]\ndef data():\n \"\"\"Length-100 PeriodArray for semantics test.\"\"\"\n data = make_data()\n\n # Why the while loop? NumPy is unable to construct an ndarray from\n # equal-length ndarrays. Many of our operations involve coercing the\n # EA to an ndarray of objects. To avoid random test failures, we ensure\n # that our data is coercible to an ndarray. Several tests deal with only\n # the first two elements, so that's what we'll check.\n\n while len(data[0]) == len(data[1]):\n data = make_data()\n\n return JSONArray(data)\n\n\[email protected]\ndef data_missing():\n \"\"\"Length 2 array with [NA, Valid]\"\"\"\n return JSONArray([{}, {\"a\": 10}])\n\n\[email protected]\ndef data_for_sorting():\n return JSONArray([{\"b\": 1}, {\"c\": 4}, {\"a\": 2, \"c\": 3}])\n\n\[email protected]\ndef data_missing_for_sorting():\n return JSONArray([{\"b\": 1}, {}, {\"a\": 4}])\n\n\[email protected]\ndef na_value(dtype):\n return dtype.na_value\n\n\[email protected]\ndef na_cmp():\n return operator.eq\n\n\[email protected]\ndef data_for_grouping():\n return JSONArray(\n [\n {\"b\": 1},\n {\"b\": 1},\n {},\n {},\n {\"a\": 0, \"c\": 2},\n {\"a\": 0, \"c\": 2},\n {\"b\": 1},\n {\"c\": 2},\n ]\n )\n\n\nclass BaseJSON:\n # NumPy doesn't handle an array of equal-length UserDicts.\n # The default assert_series_equal eventually does a\n # Series.values, which raises. We work around it by\n # converting the UserDicts to dicts.\n @classmethod\n def assert_series_equal(cls, left, right, *args, **kwargs):\n if left.dtype.name == \"json\":\n assert left.dtype == right.dtype\n left = pd.Series(\n JSONArray(left.values.astype(object)), index=left.index, name=left.name\n )\n right = pd.Series(\n JSONArray(right.values.astype(object)),\n index=right.index,\n name=right.name,\n )\n tm.assert_series_equal(left, right, *args, **kwargs)\n\n @classmethod\n def assert_frame_equal(cls, left, right, *args, **kwargs):\n obj_type = kwargs.get(\"obj\", \"DataFrame\")\n tm.assert_index_equal(\n left.columns,\n right.columns,\n exact=kwargs.get(\"check_column_type\", \"equiv\"),\n check_names=kwargs.get(\"check_names\", True),\n check_exact=kwargs.get(\"check_exact\", False),\n check_categorical=kwargs.get(\"check_categorical\", True),\n obj=f\"{obj_type}.columns\",\n )\n\n jsons = (left.dtypes == \"json\").index\n\n for col in jsons:\n cls.assert_series_equal(left[col], right[col], *args, **kwargs)\n\n left = left.drop(columns=jsons)\n right = right.drop(columns=jsons)\n tm.assert_frame_equal(left, right, *args, **kwargs)\n\n\nclass TestDtype(BaseJSON, base.BaseDtypeTests):\n pass\n\n\nclass TestInterface(BaseJSON, base.BaseInterfaceTests):\n def test_custom_asserts(self):\n # This would always trigger the KeyError from trying to put\n # an array of equal-length UserDicts inside an ndarray.\n data = JSONArray(\n [\n collections.UserDict({\"a\": 1}),\n collections.UserDict({\"b\": 2}),\n collections.UserDict({\"c\": 3}),\n ]\n )\n a = pd.Series(data)\n self.assert_series_equal(a, a)\n self.assert_frame_equal(a.to_frame(), a.to_frame())\n\n b = pd.Series(data.take([0, 0, 1]))\n with pytest.raises(AssertionError):\n self.assert_series_equal(a, b)\n\n with pytest.raises(AssertionError):\n self.assert_frame_equal(a.to_frame(), b.to_frame())\n\n\nclass TestConstructors(BaseJSON, base.BaseConstructorsTests):\n @pytest.mark.skip(reason=\"not implemented constructor from dtype\")\n def test_from_dtype(self, data):\n # construct from our dtype & string dtype\n pass\n\n\nclass TestReshaping(BaseJSON, base.BaseReshapingTests):\n @pytest.mark.skip(reason=\"Different definitions of NA\")\n def test_stack(self):\n \"\"\"\n The test does .astype(object).stack(). If we happen to have\n any missing values in `data`, then we'll end up with different\n rows since we consider `{}` NA, but `.astype(object)` doesn't.\n \"\"\"\n\n @pytest.mark.xfail(reason=\"dict for NA\")\n def test_unstack(self, data, index):\n # The base test has NaN for the expected NA value.\n # this matches otherwise\n return super().test_unstack(data, index)\n\n\nclass TestGetitem(BaseJSON, base.BaseGetitemTests):\n pass\n\n\nclass TestMissing(BaseJSON, base.BaseMissingTests):\n @pytest.mark.skip(reason=\"Setting a dict as a scalar\")\n def test_fillna_series(self):\n \"\"\"We treat dictionaries as a mapping in fillna, not a scalar.\"\"\"\n\n @pytest.mark.skip(reason=\"Setting a dict as a scalar\")\n def test_fillna_frame(self):\n \"\"\"We treat dictionaries as a mapping in fillna, not a scalar.\"\"\"\n\n\nunhashable = pytest.mark.skip(reason=\"Unhashable\")\n\n\nclass TestReduce(base.BaseNoReduceTests):\n pass\n\n\nclass TestMethods(BaseJSON, base.BaseMethodsTests):\n @unhashable\n def test_value_counts(self, all_data, dropna):\n pass\n\n @unhashable\n def test_sort_values_frame(self):\n # TODO (EA.factorize): see if _values_for_factorize allows this.\n pass\n\n def test_argsort(self, data_for_sorting):\n super().test_argsort(data_for_sorting)\n\n def test_argsort_missing(self, data_missing_for_sorting):\n super().test_argsort_missing(data_missing_for_sorting)\n\n @pytest.mark.parametrize(\"ascending\", [True, False])\n def test_sort_values(self, data_for_sorting, ascending):\n super().test_sort_values(data_for_sorting, ascending)\n\n @pytest.mark.parametrize(\"ascending\", [True, False])\n def test_sort_values_missing(self, data_missing_for_sorting, ascending):\n super().test_sort_values_missing(data_missing_for_sorting, ascending)\n\n @pytest.mark.skip(reason=\"combine for JSONArray not supported\")\n def test_combine_le(self, data_repeated):\n pass\n\n @pytest.mark.skip(reason=\"combine for JSONArray not supported\")\n def test_combine_add(self, data_repeated):\n pass\n\n @pytest.mark.skip(reason=\"combine for JSONArray not supported\")\n def test_combine_first(self, data):\n pass\n\n @unhashable\n def test_hash_pandas_object_works(self, data, kind):\n super().test_hash_pandas_object_works(data, kind)\n\n @pytest.mark.skip(reason=\"broadcasting error\")\n def test_where_series(self, data, na_value):\n # Fails with\n # *** ValueError: operands could not be broadcast together\n # with shapes (4,) (4,) (0,)\n super().test_where_series(data, na_value)\n\n @pytest.mark.skip(reason=\"Can't compare dicts.\")\n def test_searchsorted(self, data_for_sorting):\n super().test_searchsorted(data_for_sorting)\n\n\nclass TestCasting(BaseJSON, base.BaseCastingTests):\n @pytest.mark.skip(reason=\"failing on np.array(self, dtype=str)\")\n def test_astype_str(self):\n \"\"\"This currently fails in NumPy on np.array(self, dtype=str) with\n\n *** ValueError: setting an array element with a sequence\n \"\"\"\n\n\n# We intentionally don't run base.BaseSetitemTests because pandas'\n# internals has trouble setting sequences of values into scalar positions.\n\n\nclass TestGroupby(BaseJSON, base.BaseGroupbyTests):\n @unhashable\n def test_groupby_extension_transform(self):\n \"\"\"\n This currently fails in Series.name.setter, since the\n name must be hashable, but the value is a dictionary.\n I think this is what we want, i.e. `.name` should be the original\n values, and not the values for factorization.\n \"\"\"\n\n @unhashable\n def test_groupby_extension_apply(self):\n \"\"\"\n This fails in Index._do_unique_check with\n\n > hash(val)\n E TypeError: unhashable type: 'UserDict' with\n\n I suspect that once we support Index[ExtensionArray],\n we'll be able to dispatch unique.\n \"\"\"\n\n @pytest.mark.parametrize(\"as_index\", [True, False])\n def test_groupby_extension_agg(self, as_index, data_for_grouping):\n super().test_groupby_extension_agg(as_index, data_for_grouping)\n\n\nclass TestArithmeticOps(BaseJSON, base.BaseArithmeticOpsTests):\n def test_error(self, data, all_arithmetic_operators):\n pass\n\n def test_add_series_with_extension_array(self, data):\n ser = pd.Series(data)\n with pytest.raises(TypeError, match=\"unsupported\"):\n ser + data\n\n def test_divmod_series_array(self):\n # GH 23287\n # skipping because it is not implemented\n pass\n\n def _check_divmod_op(self, s, op, other, exc=NotImplementedError):\n return super()._check_divmod_op(s, op, other, exc=TypeError)\n\n\nclass TestComparisonOps(BaseJSON, base.BaseComparisonOpsTests):\n pass\n\n\nclass TestPrinting(BaseJSON, base.BasePrintingTests):\n pass\n" ]
[ [ "pandas._testing.assert_series_equal", "pandas._testing.assert_frame_equal", "pandas.Series" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [ "0.23", "0.21", "2.0", "1.4", "0.19", "1.1", "1.5", "1.2", "0.24", "0.20", "1.0", "0.25", "1.3" ], "scipy": [], "tensorflow": [] } ]
quantumjot/napari
[ "3b92d9cba5a178d04c5b5231192448cc316a9bfd", "f87e18641c529f6c592553b052805c9d75231b90" ]
[ "napari/_vispy/vispy_base_layer.py", "napari/components/add_layers_mixin.py" ]
[ "from abc import ABC, abstractmethod\nfrom functools import lru_cache\nimport numpy as np\nfrom vispy.app import Canvas\nfrom vispy.gloo import gl\nfrom vispy.visuals.transforms import STTransform\n\n\nclass VispyBaseLayer(ABC):\n \"\"\"Base object for individual layer views\n\n Meant to be subclassed.\n\n Parameters\n ----------\n layer : napari.layers.Layer\n Layer model.\n node : vispy.scene.VisualNode\n Central node with which to interact with the visual.\n\n Attributes\n ----------\n layer : napari.layers.Layer\n Layer model.\n node : vispy.scene.VisualNode\n Central node with which to interact with the visual.\n scale : sequence of float\n Scale factors for the layer visual in the scenecanvas.\n translate : sequence of float\n Translation values for the layer visual in the scenecanvas.\n scale_factor : float\n Conversion factor from canvas coordinates to image coordinates, which\n depends on the current zoom level.\n MAX_TEXTURE_SIZE_2D : int\n Max texture size allowed by the vispy canvas during 2D rendering.\n MAX_TEXTURE_SIZE_3D : int\n Max texture size allowed by the vispy canvas during 2D rendering.\n\n Extended Summary\n ----------\n _master_transform : vispy.visuals.transforms.STTransform\n Transform positioning the layer visual inside the scenecanvas.\n \"\"\"\n\n def __init__(self, layer, node):\n super().__init__()\n\n self.layer = layer\n self.node = node\n\n MAX_TEXTURE_SIZE_2D, MAX_TEXTURE_SIZE_3D = get_max_texture_sizes()\n self.MAX_TEXTURE_SIZE_2D = MAX_TEXTURE_SIZE_2D\n self.MAX_TEXTURE_SIZE_3D = MAX_TEXTURE_SIZE_3D\n\n self._position = (0,) * self.layer.dims.ndisplay\n\n self.layer.events.refresh.connect(lambda e: self.node.update())\n self.layer.events.set_data.connect(self._on_data_change)\n self.layer.events.visible.connect(self._on_visible_change)\n self.layer.events.opacity.connect(self._on_opacity_change)\n self.layer.events.blending.connect(self._on_blending_change)\n self.layer.events.scale.connect(self._on_scale_change)\n self.layer.events.translate.connect(self._on_translate_change)\n\n @property\n def _master_transform(self):\n \"\"\"vispy.visuals.transforms.STTransform:\n Central node's firstmost transform.\n \"\"\"\n # whenever a new parent is set, the transform is reset\n # to a NullTransform so we reset it here\n if not isinstance(self.node.transform, STTransform):\n self.node.transform = STTransform()\n\n return self.node.transform\n\n @property\n def order(self):\n \"\"\"int: Order in which the visual is drawn in the scenegraph.\n\n Lower values are closer to the viewer.\n \"\"\"\n return self.node.order\n\n @order.setter\n def order(self, order):\n self.node.order = order\n\n @property\n def scale(self):\n \"\"\"sequence of float: Scale factors.\"\"\"\n return self._master_transform.scale\n\n @scale.setter\n def scale(self, scale):\n # Avoid useless update if nothing changed in the displayed dims\n # Note that the master_transform scale is always a 4-vector so pad\n padded_scale = np.pad(\n scale, ((0, 4 - len(scale))), constant_values=1, mode='constant'\n )\n if self.scale is not None and np.all(self.scale == padded_scale):\n return\n self._master_transform.scale = padded_scale\n\n @property\n def translate(self):\n \"\"\"sequence of float: Translation values.\"\"\"\n return self._master_transform.translate\n\n @translate.setter\n def translate(self, translate):\n # Avoid useless update if nothing changed in the displayed dims\n # Note that the master_transform translate is always a 4-vector so pad\n padded_translate = np.pad(\n translate,\n ((0, 4 - len(translate))),\n constant_values=1,\n mode='constant',\n )\n if self.translate is not None and np.all(\n self.translate == padded_translate\n ):\n return\n self._master_transform.translate = padded_translate\n\n @property\n def scale_factor(self):\n \"\"\"float: Conversion factor from canvas pixels to data coordinates.\n \"\"\"\n if self.node.canvas is not None:\n transform = self.node.canvas.scene.node_transform(self.node)\n return transform.map([1, 1])[0] - transform.map([0, 0])[0]\n else:\n return 1\n\n @abstractmethod\n def _on_data_change(self, event=None):\n raise NotImplementedError()\n\n def _on_visible_change(self, event=None):\n self.node.visible = self.layer.visible\n\n def _on_opacity_change(self, event=None):\n self.node.opacity = self.layer.opacity\n\n def _on_blending_change(self, event=None):\n self.node.set_gl_state(self.layer.blending)\n self.node.update()\n\n def _on_scale_change(self, event=None):\n scale = self.layer._transforms.simplified.set_slice(\n self.layer.dims.displayed\n ).scale\n # convert NumPy axis ordering to VisPy axis ordering\n self.scale = scale[::-1]\n self.layer.corner_pixels = self.coordinates_of_canvas_corners()\n self.layer.position = self._transform_position(self._position)\n\n def _on_translate_change(self, event=None):\n translate = self.layer._transforms.simplified.set_slice(\n self.layer.dims.displayed\n ).translate\n # convert NumPy axis ordering to VisPy axis ordering\n self.translate = translate[::-1]\n self.layer.corner_pixels = self.coordinates_of_canvas_corners()\n self.layer.position = self._transform_position(self._position)\n\n def _transform_position(self, position):\n \"\"\"Transform cursor position from canvas space (x, y) into image space.\n\n Parameters\n -------\n position : 2-tuple\n Cursor position in canvase (x, y).\n\n Returns\n -------\n coords : tuple\n Coordinates of cursor in image space for displayed dimensions only\n \"\"\"\n nd = self.layer.dims.ndisplay\n if self.node.canvas is not None:\n transform = self.node.canvas.scene.node_transform(self.node)\n # Map and offset position so that pixel center is at 0\n mapped_position = transform.map(list(position))[:nd] - 0.5\n return tuple(mapped_position[::-1])\n else:\n return (0,) * nd\n\n def _reset_base(self):\n self._on_visible_change()\n self._on_opacity_change()\n self._on_blending_change()\n self._on_scale_change()\n self._on_translate_change()\n\n def coordinates_of_canvas_corners(self):\n \"\"\"Find location of the corners of canvas in data coordinates.\n\n This method should only be used during 2D image viewing. The result\n depends on the current pan and zoom position.\n\n Returns\n ----------\n corner_pixels : array\n Coordinates of top left and bottom right canvas pixel in the data.\n \"\"\"\n nd = self.layer.dims.ndisplay\n # Find image coordinate of top left canvas pixel\n if self.node.canvas is not None:\n offset = self.translate[:nd] / self.scale[:nd]\n tl_raw = np.floor(self._transform_position([0, 0]) + offset[::-1])\n br_raw = np.ceil(\n self._transform_position(self.node.canvas.size) + offset[::-1]\n )\n else:\n tl_raw = [0] * nd\n br_raw = [1] * nd\n\n top_left = np.zeros(self.layer.ndim)\n bottom_right = np.zeros(self.layer.ndim)\n for d, tl, br in zip(self.layer.dims.displayed, tl_raw, br_raw):\n top_left[d] = tl\n bottom_right[d] = br\n\n return np.array([top_left, bottom_right]).astype(int)\n\n def on_draw(self, event):\n \"\"\"Called whenever the canvas is drawn.\n\n This is triggered from vispy whenever new data is sent to the canvas or\n the camera is moved and is connected in the `QtViewer`.\n \"\"\"\n self.layer.scale_factor = self.scale_factor\n old_corner_pixels = self.layer.corner_pixels\n self.layer.corner_pixels = self.coordinates_of_canvas_corners()\n\n # For 2D multiscale data determine if new data has been requested\n if (\n self.layer.multiscale\n and self.layer.dims.ndisplay == 2\n and self.node.canvas is not None\n ):\n self.layer._update_multiscale(\n corner_pixels=old_corner_pixels,\n shape_threshold=self.node.canvas.size,\n )\n\n\n@lru_cache()\ndef get_max_texture_sizes():\n \"\"\"Get maximum texture sizes for 2D and 3D rendering.\n\n Returns\n -------\n MAX_TEXTURE_SIZE_2D : int or None\n Max texture size allowed by the vispy canvas during 2D rendering.\n MAX_TEXTURE_SIZE_3D : int or None\n Max texture size allowed by the vispy canvas during 2D rendering.\n \"\"\"\n # A canvas must be created to access gl values\n c = Canvas(show=False)\n try:\n MAX_TEXTURE_SIZE_2D = gl.glGetParameter(gl.GL_MAX_TEXTURE_SIZE)\n finally:\n c.close()\n if MAX_TEXTURE_SIZE_2D == ():\n MAX_TEXTURE_SIZE_2D = None\n # vispy doesn't expose GL_MAX_3D_TEXTURE_SIZE so hard coding\n # MAX_TEXTURE_SIZE_3D = gl.glGetParameter(gl.GL_MAX_3D_TEXTURE_SIZE)\n # if MAX_TEXTURE_SIZE_3D == ():\n # MAX_TEXTURE_SIZE_3D = None\n MAX_TEXTURE_SIZE_3D = 2048\n\n return MAX_TEXTURE_SIZE_2D, MAX_TEXTURE_SIZE_3D\n", "import inspect\nimport itertools\nimport os\nfrom functools import lru_cache\nfrom logging import getLogger\nfrom typing import Any, Dict, List, Optional, Sequence, Set, Union\n\nimport numpy as np\n\nfrom .. import layers\nfrom ..layers.image._image_utils import guess_labels, guess_multiscale\nfrom ..plugins.io import read_data_with_plugins\nfrom ..types import FullLayerData, LayerData\nfrom ..utils import colormaps\nfrom ..utils.colormaps import ensure_colormap_tuple\nfrom ..utils.misc import (\n ensure_iterable,\n ensure_sequence_of_iterables,\n is_sequence,\n)\n\nlogger = getLogger(__name__)\n\n\nclass AddLayersMixin:\n \"\"\"A mixin that adds add_* methods for adding layers to the ViewerModel.\n\n Each method corresponds to adding one or more layers to the viewer.\n Methods that just add a single layer contain the keyword arguments and\n copies of the documentation from that the layer. These are copied and\n pasted instead of being autogenerated because IDEs like PyCharm parse the\n source code for docs instead of pulling it up dynamically.\n\n These methods are separated into a mixin to keep the ViewerModel class\n easier to read and make these methods easier to maintain.\n \"\"\"\n\n def add_layer(self, layer: layers.Layer) -> layers.Layer:\n \"\"\"Add a layer to the viewer.\n\n Parameters\n ----------\n layer : :class:`napari.layers.Layer`\n Layer to add.\n\n Returns\n -------\n layer : :class:`napari.layers.Layer` or list\n The layer that was added (same as input).\n \"\"\"\n layer.events.select.connect(self._update_active_layer)\n layer.events.deselect.connect(self._update_active_layer)\n layer.events.status.connect(self._update_status)\n layer.events.help.connect(self._update_help)\n layer.events.interactive.connect(self._update_interactive)\n layer.events.cursor.connect(self._update_cursor)\n layer.events.cursor_size.connect(self._update_cursor_size)\n layer.events.data.connect(self._on_layers_change)\n layer.dims.events.ndisplay.connect(self._on_layers_change)\n layer.dims.events.order.connect(self._on_layers_change)\n layer.dims.events.range.connect(self._on_layers_change)\n self.layers.append(layer)\n self._update_layers(layers=[layer])\n\n if len(self.layers) == 1:\n self.reset_view()\n return layer\n\n def add_image(\n self,\n data=None,\n *,\n channel_axis=None,\n rgb=None,\n colormap=None,\n contrast_limits=None,\n gamma=1,\n interpolation='nearest',\n rendering='mip',\n iso_threshold=0.5,\n attenuation=0.5,\n name=None,\n metadata=None,\n scale=None,\n translate=None,\n opacity=1,\n blending=None,\n visible=True,\n multiscale=None,\n ) -> Union[layers.Image, List[layers.Image]]:\n \"\"\"Add an image layer to the layers list.\n\n Parameters\n ----------\n data : array or list of array\n Image data. Can be N dimensional. If the last dimension has length\n 3 or 4 can be interpreted as RGB or RGBA if rgb is `True`. If a\n list and arrays are decreasing in shape then the data is treated as\n a multiscale image.\n channel_axis : int, optional\n Axis to expand image along. If provided, each channel in the data\n will be added as an individual image layer. In channel_axis mode,\n all other parameters MAY be provided as lists, and the Nth value\n will be applied to the Nth channel in the data. If a single value\n is provided, it will be broadcast to all Layers.\n rgb : bool or list\n Whether the image is rgb RGB or RGBA. If not specified by user and\n the last dimension of the data has length 3 or 4 it will be set as\n `True`. If `False` the image is interpreted as a luminance image.\n If a list then must be same length as the axis that is being\n expanded as channels.\n colormap : str, vispy.Color.Colormap, tuple, dict, list\n Colormaps to use for luminance images. If a string must be the name\n of a supported colormap from vispy or matplotlib. If a tuple the\n first value must be a string to assign as a name to a colormap and\n the second item must be a Colormap. If a dict the key must be a\n string to assign as a name to a colormap and the value must be a\n Colormap. If a list then must be same length as the axis that is\n being expanded as channels, and each colormap is applied to each\n new image layer.\n contrast_limits : list (2,)\n Color limits to be used for determining the colormap bounds for\n luminance images. If not passed is calculated as the min and max of\n the image. If list of lists then must be same length as the axis\n that is being expanded and then each colormap is applied to each\n image.\n gamma : list, float\n Gamma correction for determining colormap linearity. Defaults to 1.\n If a list then must be same length as the axis that is being\n expanded as channels.\n interpolation : str or list\n Interpolation mode used by vispy. Must be one of our supported\n modes. If a list then must be same length as the axis that is being\n expanded as channels.\n rendering : str or list\n Rendering mode used by vispy. Must be one of our supported\n modes. If a list then must be same length as the axis that is being\n expanded as channels.\n iso_threshold : float or list\n Threshold for isosurface. If a list then must be same length as the\n axis that is being expanded as channels.\n attenuation : float or list\n Attenuation rate for attenuated maximum intensity projection. If a\n list then must be same length as the axis that is being expanded as\n channels.\n name : str or list of str\n Name of the layer. If a list then must be same length as the axis\n that is being expanded as channels.\n metadata : dict or list of dict\n Layer metadata. If a list then must be a list of dicts with the\n same length as the axis that is being expanded as channels.\n scale : tuple of float or list\n Scale factors for the layer. If a list then must be a list of\n tuples of float with the same length as the axis that is being\n expanded as channels.\n translate : tuple of float or list\n Translation values for the layer. If a list then must be a list of\n tuples of float with the same length as the axis that is being\n expanded as channels.\n opacity : float or list\n Opacity of the layer visual, between 0.0 and 1.0. If a list then\n must be same length as the axis that is being expanded as channels.\n blending : str or list\n One of a list of preset blending modes that determines how RGB and\n alpha values of the layer visual get mixed. Allowed values are\n {'opaque', 'translucent', and 'additive'}. If a list then\n must be same length as the axis that is being expanded as channels.\n visible : bool or list of bool\n Whether the layer visual is currently being displayed.\n If a list then must be same length as the axis that is\n being expanded as channels.\n multiscale : bool\n Whether the data is a multiscale image or not. Multiscale data is\n represented by a list of array like image data. If not specified by\n the user and if the data is a list of arrays that decrease in shape\n then it will be taken to be multiscale. The first image in the list\n should be the largest.\n\n Returns\n -------\n layer : :class:`napari.layers.Image` or list\n The newly-created image layer or list of image layers.\n \"\"\"\n\n if colormap is not None:\n # standardize colormap argument(s) to strings, and make sure they\n # are in AVAILABLE_COLORMAPS. This will raise one of many various\n # errors if the colormap argument is invalid. See\n # ensure_colormap_tuple for details\n if isinstance(colormap, list):\n colormap = [ensure_colormap_tuple(c)[0] for c in colormap]\n else:\n colormap, _ = ensure_colormap_tuple(colormap)\n\n # doing this here for IDE/console autocompletion in add_image function.\n kwargs = {\n 'rgb': rgb,\n 'colormap': colormap,\n 'contrast_limits': contrast_limits,\n 'gamma': gamma,\n 'interpolation': interpolation,\n 'rendering': rendering,\n 'iso_threshold': iso_threshold,\n 'attenuation': attenuation,\n 'name': name,\n 'metadata': metadata,\n 'scale': scale,\n 'translate': translate,\n 'opacity': opacity,\n 'blending': blending,\n 'visible': visible,\n 'multiscale': multiscale,\n }\n\n # these arguments are *already* iterables in the single-channel case.\n iterable_kwargs = {'scale', 'translate', 'contrast_limits', 'metadata'}\n\n if channel_axis is None:\n kwargs['colormap'] = kwargs['colormap'] or 'gray'\n kwargs['blending'] = kwargs['blending'] or 'translucent'\n # Helpful message if someone tries to add mulit-channel kwargs,\n # but forget the channel_axis arg\n for k, v in kwargs.items():\n if k not in iterable_kwargs and is_sequence(v):\n raise TypeError(\n f\"Received sequence for argument '{k}', \"\n \"did you mean to specify a 'channel_axis'? \"\n )\n\n return self.add_layer(layers.Image(data, **kwargs))\n else:\n # Determine if data is a multiscale\n if multiscale is None:\n multiscale, data = guess_multiscale(data)\n n_channels = (data[0] if multiscale else data).shape[channel_axis]\n kwargs['blending'] = kwargs['blending'] or 'additive'\n\n # turn the kwargs dict into a mapping of {key: iterator}\n # so that we can use {k: next(v) for k, v in kwargs.items()} below\n for key, val in kwargs.items():\n if key == 'colormap' and val is None:\n if n_channels == 1:\n kwargs[key] = iter(['gray'])\n elif n_channels == 2:\n kwargs[key] = iter(colormaps.MAGENTA_GREEN)\n else:\n kwargs[key] = itertools.cycle(colormaps.CYMRGB)\n\n # make sure that iterable_kwargs are a *sequence* of iterables\n # for the multichannel case. For example: if scale == (1, 2) &\n # n_channels = 3, then scale should == [(1, 2), (1, 2), (1, 2)]\n elif key in iterable_kwargs:\n kwargs[key] = iter(\n ensure_sequence_of_iterables(val, n_channels)\n )\n else:\n kwargs[key] = iter(ensure_iterable(val))\n\n layer_list = []\n for i in range(n_channels):\n if multiscale:\n image = [\n np.take(data[j], i, axis=channel_axis)\n for j in range(len(data))\n ]\n else:\n image = np.take(data, i, axis=channel_axis)\n i_kwargs = {k: next(v) for k, v in kwargs.items()}\n layer = self.add_layer(layers.Image(image, **i_kwargs))\n layer_list.append(layer)\n return layer_list\n\n def add_points(\n self,\n data=None,\n *,\n properties=None,\n symbol='o',\n size=10,\n edge_width=1,\n edge_color='black',\n edge_color_cycle=None,\n edge_colormap='viridis',\n edge_contrast_limits=None,\n face_color='white',\n face_color_cycle=None,\n face_colormap='viridis',\n face_contrast_limits=None,\n n_dimensional=False,\n name=None,\n metadata=None,\n scale=None,\n translate=None,\n opacity=1,\n blending='translucent',\n visible=True,\n ) -> layers.Points:\n \"\"\"Add a points layer to the layers list.\n\n Parameters\n ----------\n data : array (N, D)\n Coordinates for N points in D dimensions.\n properties : dict {str: array (N,)}, DataFrame\n Properties for each point. Each property should be an array of length N,\n where N is the number of points.\n symbol : str\n Symbol to be used for the point markers. Must be one of the\n following: arrow, clobber, cross, diamond, disc, hbar, ring,\n square, star, tailed_arrow, triangle_down, triangle_up, vbar, x.\n size : float, array\n Size of the point marker. If given as a scalar, all points are made\n the same size. If given as an array, size must be the same\n broadcastable to the same shape as the data.\n edge_width : float\n Width of the symbol edge in pixels.\n edge_color : str, array-like\n Color of the point marker border. Numeric color values should be RGB(A).\n edge_color_cycle : np.ndarray, list\n Cycle of colors (provided as string name, RGB, or RGBA) to map to edge_color if a\n categorical attribute is used color the vectors.\n edge_colormap : str, vispy.color.colormap.Colormap\n Colormap to set edge_color if a continuous attribute is used to set face_color.\n See vispy docs for details: http://vispy.org/color.html#vispy.color.Colormap\n edge_contrast_limits : None, (float, float)\n clims for mapping the property to a color map. These are the min and max value\n of the specified property that are mapped to 0 and 1, respectively.\n The default value is None. If set the none, the clims will be set to\n (property.min(), property.max())\n face_color : str, array-like\n Color of the point marker body. Numeric color values should be RGB(A).\n face_color_cycle : np.ndarray, list\n Cycle of colors (provided as string name, RGB, or RGBA) to map to face_color if a\n categorical attribute is used color the vectors.\n face_colormap : str, vispy.color.colormap.Colormap\n Colormap to set face_color if a continuous attribute is used to set face_color.\n See vispy docs for details: http://vispy.org/color.html#vispy.color.Colormap\n face_contrast_limits : None, (float, float)\n clims for mapping the property to a color map. These are the min and max value\n of the specified property that are mapped to 0 and 1, respectively.\n The default value is None. If set the none, the clims will be set to\n (property.min(), property.max())\n n_dimensional : bool\n If True, renders points not just in central plane but also in all\n n-dimensions according to specified point marker size.\n name : str\n Name of the layer.\n metadata : dict\n Layer metadata.\n scale : tuple of float\n Scale factors for the layer.\n translate : tuple of float\n Translation values for the layer.\n opacity : float\n Opacity of the layer visual, between 0.0 and 1.0.\n blending : str\n One of a list of preset blending modes that determines how RGB and\n alpha values of the layer visual get mixed. Allowed values are\n {'opaque', 'translucent', and 'additive'}.\n visible : bool\n Whether the layer visual is currently being displayed.\n\n Returns\n -------\n layer : :class:`napari.layers.Points`\n The newly-created points layer.\n\n Notes\n -----\n See vispy's marker visual docs for more details:\n http://api.vispy.org/en/latest/visuals.html#vispy.visuals.MarkersVisual\n \"\"\"\n if data is None:\n ndim = max(self.dims.ndim, 2)\n data = np.empty([0, ndim])\n\n layer = layers.Points(\n data=data,\n properties=properties,\n symbol=symbol,\n size=size,\n edge_width=edge_width,\n edge_color=edge_color,\n edge_color_cycle=edge_color_cycle,\n edge_colormap=edge_colormap,\n edge_contrast_limits=edge_contrast_limits,\n face_color=face_color,\n face_color_cycle=face_color_cycle,\n face_colormap=face_colormap,\n face_contrast_limits=face_contrast_limits,\n n_dimensional=n_dimensional,\n name=name,\n metadata=metadata,\n scale=scale,\n translate=translate,\n opacity=opacity,\n blending=blending,\n visible=visible,\n )\n self.add_layer(layer)\n return layer\n\n def add_labels(\n self,\n data,\n *,\n num_colors=50,\n properties=None,\n seed=0.5,\n name=None,\n metadata=None,\n scale=None,\n translate=None,\n opacity=0.7,\n blending='translucent',\n visible=True,\n multiscale=None,\n ) -> layers.Labels:\n \"\"\"Add a labels (or segmentation) layer to the layers list.\n\n An image-like layer where every pixel contains an integer ID\n corresponding to the region it belongs to.\n\n Using the viewer's label editing tools (painting, erasing) will\n modify the input-array in-place.\n\n To avoid this, pass a copy as follows:\n layer = viewer.add_labels(data.copy())\n # do some painting/editing\n\n Get the modified labels as follows:\n result = layer.data\n\n Parameters\n ----------\n data : array or list of array\n Labels data as an array or multiscale.\n num_colors : int\n Number of unique colors to use in colormap.\n properties : dict {str: array (N,)}, DataFrame\n Properties for each label. Each property should be an array of length\n N, where N is the number of labels, and the first property corresponds to\n background.\n seed : float\n Seed for colormap random generator.\n name : str\n Name of the layer.\n metadata : dict\n Layer metadata.\n scale : tuple of float\n Scale factors for the layer.\n translate : tuple of float\n Translation values for the layer.\n opacity : float\n Opacity of the layer visual, between 0.0 and 1.0.\n blending : str\n One of a list of preset blending modes that determines how RGB and\n alpha values of the layer visual get mixed. Allowed values are\n {'opaque', 'translucent', and 'additive'}.\n visible : bool\n Whether the layer visual is currently being displayed.\n multiscale : bool\n Whether the data is a multiscale image or not. Multiscale data is\n represented by a list of array like image data. If not specified by\n the user and if the data is a list of arrays that decrease in shape\n then it will be taken to be multiscale. The first image in the list\n should be the largest.\n\n Returns\n -------\n layer : :class:`napari.layers.Labels`\n The newly-created labels layer.\n \"\"\"\n layer = layers.Labels(\n data,\n num_colors=num_colors,\n properties=properties,\n seed=seed,\n name=name,\n metadata=metadata,\n scale=scale,\n translate=translate,\n opacity=opacity,\n blending=blending,\n visible=visible,\n multiscale=multiscale,\n )\n self.add_layer(layer)\n return layer\n\n def add_shapes(\n self,\n data=None,\n *,\n shape_type='rectangle',\n edge_width=1,\n edge_color='black',\n face_color='white',\n z_index=0,\n name=None,\n metadata=None,\n scale=None,\n translate=None,\n opacity=0.7,\n blending='translucent',\n visible=True,\n ) -> layers.Shapes:\n \"\"\"Add a shapes layer to the layers list.\n\n Parameters\n ----------\n data : list or array\n List of shape data, where each element is an (N, D) array of the\n N vertices of a shape in D dimensions. Can be an 3-dimensional\n array if each shape has the same number of vertices.\n shape_type : string or list\n String of shape shape_type, must be one of \"{'line', 'rectangle',\n 'ellipse', 'path', 'polygon'}\". If a list is supplied it must be\n the same length as the length of `data` and each element will be\n applied to each shape otherwise the same value will be used for all\n shapes.\n edge_width : float or list\n Thickness of lines and edges. If a list is supplied it must be the\n same length as the length of `data` and each element will be\n applied to each shape otherwise the same value will be used for all\n shapes.\n edge_color : str, array-like\n If string can be any color name recognized by vispy or hex value if\n starting with `#`. If array-like must be 1-dimensional array with 3\n or 4 elements. If a list is supplied it must be the same length as\n the length of `data` and each element will be applied to each shape\n otherwise the same value will be used for all shapes.\n face_color : str, array-like\n If string can be any color name recognized by vispy or hex value if\n starting with `#`. If array-like must be 1-dimensional array with 3\n or 4 elements. If a list is supplied it must be the same length as\n the length of `data` and each element will be applied to each shape\n otherwise the same value will be used for all shapes.\n z_index : int or list\n Specifier of z order priority. Shapes with higher z order are\n displayed ontop of others. If a list is supplied it must be the\n same length as the length of `data` and each element will be\n applied to each shape otherwise the same value will be used for all\n shapes.\n name : str\n Name of the layer.\n metadata : dict\n Layer metadata.\n scale : tuple of float\n Scale factors for the layer.\n translate : tuple of float\n Translation values for the layer.\n opacity : float\n Opacity of the layer visual, between 0.0 and 1.0.\n blending : str\n One of a list of preset blending modes that determines how RGB and\n alpha values of the layer visual get mixed. Allowed values are\n {'opaque', 'translucent', and 'additive'}.\n visible : bool\n Whether the layer visual is currently being displayed.\n\n Returns\n -------\n layer : :class:`napari.layers.Shapes`\n The newly-created shapes layer.\n \"\"\"\n if data is None:\n ndim = max(self.dims.ndim, 2)\n data = np.empty((0, 0, ndim))\n\n layer = layers.Shapes(\n data=data,\n shape_type=shape_type,\n edge_width=edge_width,\n edge_color=edge_color,\n face_color=face_color,\n z_index=z_index,\n name=name,\n metadata=metadata,\n scale=scale,\n translate=translate,\n opacity=opacity,\n blending=blending,\n visible=visible,\n )\n self.add_layer(layer)\n return layer\n\n def add_surface(\n self,\n data,\n *,\n colormap='gray',\n contrast_limits=None,\n gamma=1,\n name=None,\n metadata=None,\n scale=None,\n translate=None,\n opacity=1,\n blending='translucent',\n visible=True,\n ) -> layers.Surface:\n \"\"\"Add a surface layer to the layers list.\n\n Parameters\n ----------\n data : 3-tuple of array\n The first element of the tuple is an (N, D) array of vertices of\n mesh triangles. The second is an (M, 3) array of int of indices\n of the mesh triangles. The third element is the (K0, ..., KL, N)\n array of values used to color vertices where the additional L\n dimensions are used to color the same mesh with different values.\n colormap : str, vispy.Color.Colormap, tuple, dict\n Colormap to use for luminance images. If a string must be the name\n of a supported colormap from vispy or matplotlib. If a tuple the\n first value must be a string to assign as a name to a colormap and\n the second item must be a Colormap. If a dict the key must be a\n string to assign as a name to a colormap and the value must be a\n Colormap.\n contrast_limits : list (2,)\n Color limits to be used for determining the colormap bounds for\n luminance images. If not passed is calculated as the min and max of\n the image.\n gamma : float\n Gamma correction for determining colormap linearity. Defaults to 1.\n name : str\n Name of the layer.\n metadata : dict\n Layer metadata.\n scale : tuple of float\n Scale factors for the layer.\n translate : tuple of float\n Translation values for the layer.\n opacity : float\n Opacity of the layer visual, between 0.0 and 1.0.\n blending : str\n One of a list of preset blending modes that determines how RGB and\n alpha values of the layer visual get mixed. Allowed values are\n {'opaque', 'translucent', and 'additive'}.\n visible : bool\n Whether the layer visual is currently being displayed.\n\n Returns\n -------\n layer : :class:`napari.layers.Surface`\n The newly-created surface layer.\n \"\"\"\n layer = layers.Surface(\n data,\n colormap=colormap,\n contrast_limits=contrast_limits,\n gamma=gamma,\n name=name,\n metadata=metadata,\n scale=scale,\n translate=translate,\n opacity=opacity,\n blending=blending,\n visible=visible,\n )\n self.add_layer(layer)\n return layer\n\n def add_vectors(\n self,\n data,\n *,\n properties=None,\n edge_width=1,\n edge_color='red',\n edge_color_cycle=None,\n edge_colormap='viridis',\n edge_contrast_limits=None,\n length=1,\n name=None,\n metadata=None,\n scale=None,\n translate=None,\n opacity=0.7,\n blending='translucent',\n visible=True,\n ) -> layers.Vectors:\n \"\"\"Add a vectors layer to the layers list.\n\n Parameters\n ----------\n data : (N, 2, D) or (N1, N2, ..., ND, D) array\n An (N, 2, D) array is interpreted as \"coordinate-like\" data and a\n list of N vectors with start point and projections of the vector in\n D dimensions. An (N1, N2, ..., ND, D) array is interpreted as\n \"image-like\" data where there is a length D vector of the\n projections at each pixel.\n properties : dict {str: array (N,)}, DataFrame\n Properties for each vector. Each property should be an array of length N,\n where N is the number of vectors.\n edge_width : float\n Width for all vectors in pixels.\n length : float\n Multiplicative factor on projections for length of all vectors.\n edge_color : str\n Color of all of the vectors.\n edge_color_cycle : np.ndarray, list\n Cycle of colors (provided as string name, RGB, or RGBA) to map to edge_color if a\n categorical attribute is used color the vectors.\n edge_colormap : str, vispy.color.colormap.Colormap\n Colormap to set vector color if a continuous attribute is used to set edge_color.\n See vispy docs for details: http://vispy.org/color.html#vispy.color.Colormap\n edge_contrast_limits : None, (float, float)\n clims for mapping the property to a color map. These are the min and max value\n of the specified property that are mapped to 0 and 1, respectively.\n The default value is None. If set the none, the clims will be set to\n (property.min(), property.max())\n name : str\n Name of the layer.\n metadata : dict\n Layer metadata.\n scale : tuple of float\n Scale factors for the layer.\n translate : tuple of float\n Translation values for the layer.\n opacity : float\n Opacity of the layer visual, between 0.0 and 1.0.\n blending : str\n One of a list of preset blending modes that determines how RGB and\n alpha values of the layer visual get mixed. Allowed values are\n {'opaque', 'translucent', and 'additive'}.\n visible : bool\n Whether the layer visual is currently being displayed.\n\n Returns\n -------\n layer : :class:`napari.layers.Vectors`\n The newly-created vectors layer.\n \"\"\"\n layer = layers.Vectors(\n data,\n properties=properties,\n edge_width=edge_width,\n edge_color=edge_color,\n edge_color_cycle=edge_color_cycle,\n edge_colormap=edge_colormap,\n edge_contrast_limits=edge_contrast_limits,\n length=length,\n name=name,\n metadata=metadata,\n scale=scale,\n translate=translate,\n opacity=opacity,\n blending=blending,\n visible=visible,\n )\n self.add_layer(layer)\n return layer\n\n def open(\n self,\n path: Union[str, Sequence[str]],\n stack: bool = False,\n plugin: Optional[str] = None,\n layer_type: Optional[str] = None,\n **kwargs,\n ) -> List[layers.Layer]:\n \"\"\"Open a path or list of paths with plugins, and add layers to viewer.\n\n A list of paths will be handed one-by-one to the napari_get_reader hook\n if stack is False, otherwise the full list is passed to each plugin\n hook.\n\n Parameters\n ----------\n path : str or list of str\n A filepath, directory, or URL (or a list of any) to open.\n stack : bool, optional\n If a list of strings is passed and ``stack`` is ``True``, then the\n entire list will be passed to plugins. It is then up to individual\n plugins to know how to handle a list of paths. If ``stack`` is\n ``False``, then the ``path`` list is broken up and passed to plugin\n readers one by one. by default False.\n plugin : str, optional\n Name of a plugin to use. If provided, will force ``path`` to be\n read with the specified ``plugin``. If the requested plugin cannot\n read ``path``, an execption will be raised.\n layer_type : str, optional\n If provided, will force data read from ``path`` to be passed to the\n corresponding ``add_<layer_type>`` method (along with any\n additional) ``kwargs`` provided to this function. This *may*\n result in exceptions if the data returned from the path is not\n compatible with the layer_type.\n **kwargs\n All other keyword arguments will be passed on to the respective\n ``add_layer`` method.\n\n Returns\n -------\n layers : list\n A list of any layers that were added to the viewer.\n \"\"\"\n paths = [path] if isinstance(path, str) else path\n paths = [os.fspath(path) for path in paths] # PathObjects -> str\n if not isinstance(paths, (tuple, list)):\n raise ValueError(\n \"'path' argument must be a string, list, or tuple\"\n )\n\n if stack:\n return self._add_layers_with_plugins(\n paths, kwargs, plugin=plugin, layer_type=layer_type\n )\n\n added: List[layers.Layer] = [] # for layers that get added\n for _path in paths:\n added.extend(\n self._add_layers_with_plugins(\n _path, kwargs, plugin=plugin, layer_type=layer_type\n )\n )\n\n return added\n\n def _add_layers_with_plugins(\n self,\n path_or_paths: Union[str, Sequence[str]],\n kwargs: Optional[dict] = None,\n plugin: Optional[str] = None,\n layer_type: Optional[str] = None,\n ) -> List[layers.Layer]:\n \"\"\"Load a path or a list of paths into the viewer using plugins.\n\n This function is mostly called from self.open_path, where the ``stack``\n argument determines whether a list of strings is handed to plugins one\n at a time, or en-masse.\n\n Parameters\n ----------\n path_or_paths : str or list of str\n A filepath, directory, or URL (or a list of any) to open. If a\n list, the assumption is that the list is to be treated as a stack.\n kwargs : dict, optional\n keyword arguments that will be used to overwrite any of those that\n are returned in the meta dict from plugins.\n plugin : str, optional\n Name of a plugin to use. If provided, will force ``path`` to be\n read with the specified ``plugin``. If the requested plugin cannot\n read ``path``, an execption will be raised.\n layer_type : str, optional\n If provided, will force data read from ``path`` to be passed to the\n corresponding ``add_<layer_type>`` method (along with any\n additional) ``kwargs`` provided to this function. This *may*\n result in exceptions if the data returned from the path is not\n compatible with the layer_type.\n\n Returns\n -------\n List[layers.Layer]\n A list of any layers that were added to the viewer.\n \"\"\"\n layer_data = read_data_with_plugins(path_or_paths, plugin=plugin)\n\n # glean layer names from filename. These will be used as *fallback*\n # names, if the plugin does not return a name kwarg in their meta dict.\n if isinstance(path_or_paths, str):\n filenames = itertools.repeat(path_or_paths)\n elif is_sequence(path_or_paths):\n if len(path_or_paths) == len(layer_data):\n filenames = iter(path_or_paths)\n else:\n # if a list of paths has been returned as a list of layer data\n # without a 1:1 relationship between the two lists we iterate\n # over the first name\n filenames = itertools.repeat(path_or_paths[0])\n\n # add each layer to the viewer\n added: List[layers.Layer] = [] # for layers that get added\n for data, filename in zip(layer_data, filenames):\n basename, ext = os.path.splitext(os.path.basename(filename))\n _data = _unify_data_and_user_kwargs(\n data, kwargs, layer_type, fallback_name=basename\n )\n # actually add the layer\n new = self._add_layer_from_data(*_data)\n # some add_* methods return a List[Layer], others just a Layer\n # we want to always return a list\n added.extend(new if isinstance(new, list) else [new])\n return added\n\n def _add_layer_from_data(\n self, data, meta: dict = None, layer_type: Optional[str] = None\n ) -> Union[layers.Layer, List[layers.Layer]]:\n \"\"\"Add arbitrary layer data to the viewer.\n\n Primarily intended for usage by reader plugin hooks.\n\n Parameters\n ----------\n data : Any\n Data in a format that is valid for the corresponding `add_*` method\n of the specified ``layer_type``.\n meta : dict, optional\n Dict of keyword arguments that will be passed to the corresponding\n `add_*` method. MUST NOT contain any keyword arguments that are\n not valid for the corresponding method.\n layer_type : str\n Type of layer to add. MUST have a corresponding add_* method on\n on the viewer instance. If not provided, the layer is assumed to\n be \"image\", unless data.dtype is one of (np.int32, np.uint32,\n np.int64, np.uint64), in which case it is assumed to be \"labels\".\n\n Raises\n ------\n ValueError\n If ``layer_type`` is not one of the recognized layer types.\n TypeError\n If any keyword arguments in ``meta`` are unexpected for the\n corresponding `add_*` method for this layer_type.\n\n Examples\n --------\n A typical use case might be to upack a tuple of layer data with a\n specified layer_type.\n\n >>> viewer = napari.Viewer()\n >>> data = (\n ... np.random.random((10, 2)) * 20,\n ... {'face_color': 'blue'},\n ... 'points',\n ... )\n >>> viewer._add_layer_from_data(*data)\n\n \"\"\"\n\n layer_type = (layer_type or '').lower()\n\n # assumes that big integer type arrays are likely labels.\n if not layer_type:\n layer_type = guess_labels(data)\n\n if layer_type not in layers.NAMES:\n raise ValueError(\n f\"Unrecognized layer_type: '{layer_type}'. \"\n f\"Must be one of: {layers.NAMES}.\"\n )\n\n try:\n add_method = getattr(self, 'add_' + layer_type)\n except AttributeError:\n raise NotImplementedError(\n f\"Sorry! {layer_type} is a valid layer type, but there is no \"\n f\"viewer.add_{layer_type} available yet.\"\n )\n\n try:\n layer = add_method(data, **(meta or {}))\n except TypeError as exc:\n if 'unexpected keyword argument' in str(exc):\n bad_key = str(exc).split('keyword argument ')[-1]\n raise TypeError(\n \"_add_layer_from_data received an unexpected keyword \"\n f\"argument ({bad_key}) for layer type {layer_type}\"\n ) from exc\n else:\n raise exc\n\n return layer\n\n\n@lru_cache(maxsize=1)\ndef valid_add_kwargs() -> Dict[str, Set[str]]:\n \"\"\"Return a dict where keys are layer types & values are valid kwargs.\"\"\"\n valid = dict()\n for meth in dir(AddLayersMixin):\n if not meth.startswith('add_') or meth[4:] == 'layer':\n continue\n params = inspect.signature(getattr(AddLayersMixin, meth)).parameters\n valid[meth[4:]] = set(params) - {'self', 'kwargs'}\n return valid\n\n\ndef _normalize_layer_data(data: LayerData) -> FullLayerData:\n \"\"\"Accepts any layerdata tuple, and returns a fully qualified tuple.\n\n Parameters\n ----------\n data : LayerData\n 1-, 2-, or 3-tuple with (data, meta, layer_type).\n\n Returns\n -------\n FullLayerData\n 3-tuple with (data, meta, layer_type)\n\n Raises\n ------\n ValueError\n If data has len < 1 or len > 3, or if the second item in ``data`` is\n not a ``dict``, or the third item is not a valid layer_type ``str``\n \"\"\"\n if not isinstance(data, tuple) and 0 < len(data) < 4:\n raise ValueError(\"LayerData must be a 1-, 2-, or 3-tuple\")\n _data = list(data)\n if len(_data) > 1:\n if not isinstance(_data[1], dict):\n raise ValueError(\n \"The second item in a LayerData tuple must be a dict\"\n )\n else:\n _data.append(dict())\n if len(_data) > 2:\n if _data[2] not in layers.NAMES:\n raise ValueError(\n \"The third item in a LayerData tuple must be one of: \"\n f\"{layers.NAMES!r}.\"\n )\n else:\n _data.append(guess_labels(_data[0]))\n return tuple(_data) # type: ignore\n\n\ndef _unify_data_and_user_kwargs(\n data: LayerData,\n kwargs: Optional[dict] = None,\n layer_type: Optional[str] = None,\n fallback_name: str = None,\n) -> FullLayerData:\n \"\"\"Merge data returned from plugins with options specified by user.\n\n If ``data == (_data, _meta, _type)``. Then:\n\n - ``kwargs`` will be used to update ``_meta``\n - ``layer_type`` will replace ``_type`` and, if provided, ``_meta`` keys\n will be pruned to layer_type-appropriate kwargs\n - ``fallback_name`` is used if ``not _meta.get('name')``\n\n .. note:\n\n If a user specified both layer_type and additional keyword arguments\n to viewer.open(), it is their responsibility to make sure the kwargs\n match the layer_type.\n\n Parameters\n ----------\n data : LayerData\n 1-, 2-, or 3-tuple with (data, meta, layer_type) returned from plugin.\n kwargs : dict, optional\n User-supplied keyword arguments, to override those in ``meta`` supplied\n by plugins.\n layer_type : str, optional\n A user-supplied layer_type string, to override the ``layer_type``\n declared by the plugin.\n fallback_name : str, optional\n A name for the layer, to override any name in ``meta`` supplied by the\n plugin.\n\n Returns\n -------\n FullLayerData\n Fully qualified LayerData tuple with user-provided overrides.\n \"\"\"\n _data, _meta, _type = _normalize_layer_data(data)\n\n if layer_type:\n # the user has explicitly requested this be a certain layer type\n # strip any kwargs from the plugin that are no longer relevant\n _meta = prune_kwargs(_meta, layer_type)\n _type = layer_type\n\n if kwargs:\n # if user provided kwargs, use to override any meta dict values that\n # were returned by the plugin. We only prune kwargs if the user did\n # *not* specify the layer_type. This means that if a user specified\n # both layer_type and additional keyword arguments to viewer.open(),\n # it is their responsibility to make sure the kwargs match the\n # layer_type.\n _meta.update(prune_kwargs(kwargs, _type) if not layer_type else kwargs)\n\n if not _meta.get('name') and fallback_name:\n _meta['name'] = fallback_name\n return (_data, _meta, _type)\n\n\ndef prune_kwargs(kwargs: Dict[str, Any], layer_type: str) -> Dict[str, Any]:\n \"\"\"Return copy of ``kwargs`` with only keys valid for ``add_<layer_type>``\n\n Parameters\n ----------\n kwargs : dict\n A key: value mapping where some or all of the keys are parameter names\n for the corresponding ``Viewer.add_<layer_type>`` method.\n layer_type : str\n The type of layer that is going to be added with these ``kwargs``.\n\n Returns\n -------\n pruned_kwargs : dict\n A key: value mapping where all of the keys are valid parameter names\n for the corresponding ``Viewer.add_<layer_type>`` method.\n\n Raises\n ------\n ValueError\n If ``AddLayersMixin`` does not provide an ``add_<layer_type>`` method\n for the provided ``layer_type``.\n\n Examples\n --------\n >>> test_kwargs = {\n 'scale': (0.75, 1),\n 'blending': 'additive',\n 'num_colors': 10,\n }\n >>> prune_kwargs(test_kwargs, 'image')\n {'scale': (0.75, 1), 'blending': 'additive'}\n\n >>> # only labels has the ``num_colors`` argument\n >>> prune_kwargs(test_kwargs, 'labels')\n {'scale': (0.75, 1), 'blending': 'additive', 'num_colors': 10}\n \"\"\"\n add_method = getattr(AddLayersMixin, 'add_' + layer_type, None)\n if not add_method or layer_type == 'layer':\n raise ValueError(f\"Invalid layer_type: {layer_type}\")\n\n # get valid params for the corresponding add_<layer_type> method\n valid = valid_add_kwargs()[layer_type]\n return {k: v for k, v in kwargs.items() if k in valid}\n" ]
[ [ "numpy.all", "numpy.array", "numpy.zeros" ], [ "numpy.take", "numpy.empty" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
binghuang2018/aqml
[ "4901f3bd85db968fb3fc7ab97fd443421909d89d", "4901f3bd85db968fb3fc7ab97fd443421909d89d" ]
[ "io2/__init__.py", "lo/dmx.py" ]
[ "\nimport os,sys,time,re,fnmatch\nimport numpy as np\nimport ase.units as au\n\ndef timer(start,end):\n hours, rem = divmod(end-start, 3600)\n minutes, seconds = divmod(rem, 60)\n print((\"Time elapsed: {:0>2}:{:0>2}:{:05.2f}\".format(int(hours),int(minutes),seconds)))\n return\n\ndef cmdout(cmd):\n return os.popen(cmd).read().strip().split()\n\ndef cmdout1(cmd):\n return os.popen(cmd).read().strip().split('\\n')\n\ndef cmdout2(cmd):\n return os.popen(cmd).read().strip()\n\ndef get_label(k,n):\n fmt = '%%0%dd'%n # equal to '0'*(nd-len(str(i))) + str(i)\n return fmt%k\n\n\nclass Units(object):\n def __init__(self):\n self.a2b = 1/au.Bohr #1.8897261258369282;\n # bohr to angstrom\n self.b2a = au.Bohr #0.5291772105638411;\n\n self.h2e = au.Hartree #27.211386024367243\n self.c2j = au.kcal/au.kJ\n self.j2c = 1.0/self.c2j\n\n # eV to kcal/mol, kj/mol\n self.e2kc = au.eV/(au.kcal/au.mol) #23.060548012069496;\n self.e2kj = au.eV/(au.kJ/au.mol) #96.48533288249877;\n\n # kcal/mol, kj/mol to eV\n self.kc2e = 1.0/self.e2kc #0.04336410390059322;\n self.kj2e = 1.0/self.e2kj #0.010364269574711572;\n\n #\n self.h2kc = self.h2e*self.e2kc #627.5094738898777;\n self.h2kj = self.h2e*self.e2kj #2625.4996387552483;\n\n\nclass Folder(object):\n\n def __init__(self, wd, format, regexp='', use_conf=True):\n assert os.path.exists(wd), '#ERROR: folder does not exist?'\n self.wd = wd[:-1] if wd[-1] == '/' else wd\n self.format = format\n\n #nfs = int(os.popen('\\ls -afq %s/ | wc -l'%wd).read().strip()) - 2\n # The reason that we have to subtract 2 is due to that\n # \"\\ls -afq\" considers \".\" and \"..\" as two files as well\n\n fs = np.sort( os.listdir(wd) )\n fs2 = []; nss = []\n for f in fs:\n if f.endswith(format):\n if regexp != '':\n if not fnmatch.fnmatch(f, regexp): continue\n fi = '%s/%s'%(self.wd, f)\n nss.append( len(fi) )\n fs2.append( fi )\n nss_u = np.unique(nss)\n if len(nss_u) > 1:\n # e.g., for water clusters: frag_001.xyz, frag_001_face.xyz (conformer)\n print(' ** WARNING: The lengths of filenames are not the same')\n if not use_conf:\n fs1 = np.array(fs1)\n nss = np.array(nss)\n fs2 = fs1[ nss == np.min(nss_u) ]\n self.fs = fs2\n self.nc = len(fs2) # number of total molecules (including conformers)\n\n def update_fidx(self):\n \"\"\"\n sometimes, the idx in the filename is not consistanet with the\n line number shown for `ls -1 frag_*xyz | nl`, e.g.,\n 1 frag_001.xyz\n 2 frag_003.xyz\n 3 frag_004.xyz\n now we want the two numbers to be consistent\n\n Attention: the renmaing cannot be parallelized!!\n \"\"\"\n # possible filenames:\n # frag_001.out, frag_001_c0001_raw_u.out, ...\n idxs = np.array([ re.findall('frag_(\\d+)[\\._]', fi)[0] for fi in self.fs ])\n idxs_u = np.sort(np.unique(idxs))\n self.sidxs = idxs\n self.sidxs_u = idxs_u\n self.nm = idxs_u.shape[0]\n if self.nc > self.nm:\n print(' ** WARNING: there are conformers!!')\n\n ims = np.arange(self.nm) + 1\n n = len(str(self.nm))\n fsu = []\n for i in range(self.nc):\n f = self.fs[i]\n sidx = self.sidxs[i]\n sidx_u = get_label( ims[ sidx == self.sidxs_u ], n )\n if sidx != sidx_u:\n fu = sidx_u.join( f.split(sidx) )\n fsu.append(fu)\n cmd = 'mv %s %s'%(f,fu)\n iok = os.system(cmd)\n else:\n fsu.append(f)\n self.fs = fsu\n\n def filter_files(self, idsOnly=[], idsException=[], substitute_fs=[]):\n \"\"\"\n filter molecules further\n \"\"\"\n nmu = self.nc\n fsu = self.fs\n vf3 = np.vectorize(retrieve_idx)\n sidxs = vf3(fsu)\n if not use_conf:\n msg = '#ERROR: there are conformers!!'\n assert len(sidxs) == len(set(list(sidxs))), msg\n for fi in substitute_fs:\n sidxi = retrieve_idx(fi)\n iif = sidxs.index(sidxi)\n fsu[iif] = fi\n\n n = len(idsException)\n if n > 0 and n < nmu:\n if not use_conf:\n nmu = nm1 - n\n idxs1 = np.arange(nm1)\n fsu = fsu[ np.setdiff1d(idxs1, np.array(idsException)-1) ]\n else:\n raise '#ERROR: Plz fill code here to process such case'\n\n n2 = len(idsOnly)\n if n2 > 0:\n if not use_conf:\n nmu = n2\n idxs2 = np.array(idsOnly) - 1\n fsu = fsu[ idxs2 ]\n else:\n raise '#ERROR: Plz fill code here to process such case'\n\n #print ' -- use_conf, fsu = ', use_conf, fsu\n\n return nmu, fsu\n\ndef savezWrapper(data, path, compressed = False):\n from numpy import savez,savez_compressed\n if compressed:\n executeSting = \"savez_compressed(f\"\n else:\n executeSting = \"savez(f\"\n for key in list(data.keys()):\n executeSting += \",\" + key + \"= data['\" + key + \"']\"\n executeSting += \")\"\n with open( path, 'wb' ) as f:\n exec(executeSting)\n\ndef loadWrapper(path):\n from numpy import load\n out = {}\n with open( path, 'rb' ) as f:\n data = load(f)\n for key in data.files:\n out[key] = data[key]\n return out\n\ndef remove_files(obj):\n \"\"\"\n delete a file or multiple files\n \"\"\"\n fs = []\n if type(obj) is str:\n if os.path.exists(obj):\n fs.append(obj)\n elif type(obj) is list:\n for obj_i in obj:\n if os.path.exists(obj_i):\n fs.append(obj_i)\n os.system('rm %s'%( ' '.join(fs) ) )\n\n", "\nimport os, sys, ase\nimport numpy as np\nfrom aqml.cheminfo.rw.xyz import *\nimport itertools as itl\nimport cml.fslatm as sl\nimport scipy.spatial.distance as ssd\n\n#np.set_printoptions(precision=4)\n\nT,F = True,False\n\n\ndef get_nzs(ias1, ias2, zs, zsu):\n nzs = []\n nm = len(ias1)\n for i in range(nm):\n ib, ie = ias1[i], ias2[i]\n zsi = zs[ib:ie]\n nzsi = []\n for _zj in zsu:\n nzsi.append( (_zj == np.array(zsi,np.int)).sum() )\n nzs.append(nzsi)\n return np.array(nzs, np.int)\n\ndef get_neighbors(rbcut, coords):\n na = len(coords)\n ds = ssd.squareform( ssd.pdist( coords ) )\n ias = np.arange(na)\n _nbrs = []; ns = []\n nmax = 0\n for i in range(na):\n nbrs_i = ias[ds[i]<= rbcut]; nb = len(nbrs_i)\n if nb > nmax: nmax = nb\n _nbrs.append(nbrs_i); ns.append(nb)\n nbrs = - np.ones((na,nmax)).astype(np.int)\n for i in range(na):\n nbrs[i,:ns[i]] = _nbrs[i]\n return nmax, nbrs\n\n\nclass XData(object):\n\n def __init__(self, nas, zs, coords, wd=None):\n \"\"\" slatm object \"\"\"\n if wd is None:\n self.wd = os.environ['PWD']\n else:\n _wd = wd[:-1] if wd[-1] == '/' else wd\n self.wd = _wd\n self.nm = len(nas)\n self.zs = zs\n zsu = np.unique(self.zs).astype(np.int)\n self.zsu = zsu\n self.nas = nas\n ias2 = np.cumsum(self.nas)\n ias1 = np.concatenate( ([0], ias2[:-1]) )\n self.ias2 = ias2\n self.ias1 = ias1\n self.nat = sum(nas)\n self.nzs = get_nzs(ias1, ias2, zs, zsu)\n self.coords = coords\n\n def get_slatm_mbtypes(self):\n \"\"\" get slatm many-body types\"\"\"\n zsmax = self.zsu\n nzsmax = np.max(self.nzs, axis=0)\n if self.pbc != '000':\n # the PBC will introduce new many-body terms, so set\n # nzmax to 3 if it's less than 3\n nzsmax[ nzsmax <= 2 ] = 3\n boas = [ [zi,] for zi in zsmax ]\n bops = [ [zi,zi] for zi in zsmax ] + list( itl.combinations(zsmax,2) )\n bots = []\n for i in zsmax:\n for bop in bops:\n j,k = bop\n tas = [ [i,j,k], [i,k,j], [j,i,k] ]\n for tasi in tas:\n if (tasi not in bots) and (tasi[::-1] not in bots):\n nzsi = [ (zj == tasi).sum() for zj in zsmax ]\n if np.all(nzsi <= nzsmax):\n bots.append( tasi )\n self.mbs1 = np.array(boas,np.int)\n self.mbs2 = np.array(bops,np.int)\n self.mbs3 = np.array(bots,np.int)\n self.n1, self.n2, self.n3 = [len(bag) for bag in [boas, bops, bots]]\n\n def init_dm_pair(self):\n nbmax = 0\n nbrs = []; ns = []\n for i in range(self.nm):\n ib, ie = self.ias1[i], self.ias2[i]\n _nbmax, _nbrs = get_neighbors(self.rbcut, self.coords[ib:ie])\n assert _nbmax>0\n ns.append(_nbmax)\n nbmax = max(nbmax,_nbmax)\n nbrs += list(_nbrs)\n t = - np.ones((self.nat, nbmax)).astype(np.int)\n #print ' ** ias1, ias2 = ', self.ias1,self.ias2\n for i in range(self.nm):\n ib, ie = self.ias1[i], self.ias2[i]; #print ' ** ', nbrs[ib:ie]\n t[ib:ie, :ns[i]] = nbrs[ib:ie]\n self.nbmax = nbmax\n self.nbrs = t\n\n def init_grids(self):\n nsx = [0,0,0]; r0 = 0.1\n # set up grid\n nsx[0] = int((self.racut - r0)/self.dgrids[0]) + 1\n nsx[1] = int((self.rbcut - r0)/self.dgrids[0]) + 1\n #d2r = pi/dble(180) ! degree to rad\n #a0 = -20.0*d2r\n #a1 = pi + 20.0*d2r\n nsx[2] = int((np.pi + 40.0*(np.pi/180.))/self.dgrids[0]) + 1\n self.n = self.n1 + self.n2*nsx[0] + self.n3*nsx[2]\n self.nu = self.n1 + self.n2*nsx[1] + self.n3*nsx[2]\n self.nsx = nsx\n\n def get_x(self, param={'racut':3.2, 'rbcut':4.2}):\n \"\"\" molecular repr\n\n racut -- cutoff radius for atom\n rbcut -- cutoff radius for bond\n \"\"\"\n # init_param first\n _param = {'local':T, 'nbody':3, 'dgrids': [0.04,0.04], 'sigmas':[0.05,0.05],\\\n 'racut':3.2, 'rbcut':4.2, 'alchemy':False, 'iBoA':True, 'rpower2':6, 'coeffs':[1.], \\\n 'rpower3': 3, 'ws':[1.,1./2,1./3], 'pbc':'000', 'kernel':'gaussian', \\\n 'saves':[F,F,F], 'reuses':[F,F,F]}\n for key in param.keys():\n if param[key] != _param[key]:\n _param[key] = param[key]\n self.savex,self.saved,self.savek = _param['saves']\n self.reusex,self.reused,self.reusek = _param['reuses']\n keys = ['dgrids','sigmas','racut','rbcut','rpower2','rpower3', 'ws','kernel','pbc']\n self.dgrids,self.sigmas,self.racut,self.rbcut,self.rpower2,self.rpower3,self.ws,self.kernel,self.pbc = \\\n [ _param[key] for key in keys ]\n self.get_slatm_mbtypes()\n self.init_dm_pair()\n self.init_grids()\n\n mbs1 = self.mbs1\n mbs2 = self.mbs2\n mbs3 = self.mbs3; #print ' ** mbs2, mbs3 = ', mbs2.shape, mbs3.shape\n zs, nas = self.zs, self.nas\n n,nu,n1,n2,n3 = self.n,self.nu,self.n1,self.n2,self.n3\n nsx, dgrid, sigma, rpower2, rpower3 = self.nsx, self.dgrids[0], self.sigmas[0], self.rpower2, self.rpower3\n nbmax = self.nbmax\n rscut = [self.racut, self.rbcut]\n #print ' ++ nbmax = ', nbmax\n xsa = []; xsb = []; labels = []\n for i in range(self.nm):\n ib, ie = self.ias1[i], self.ias2[i]\n _nbrs = self.nbrs[ib:ie]\n zsi = zs[ib:ie]\n _xs1,_xsb1 = sl.fslatm.fget_local_spectrum(zsi,self.coords[ib:ie].T, _nbrs.T, \\\n n,nu, mbs1,mbs2.T,mbs3.T, nsx, rscut,dgrid,sigma, \\\n 1.,rpower2,1.,rpower3, 0, 0)\n #print ' ** shape(_xs), shape(_xsb) = ', _xs.shape, _xsb.shape\n _xs = _xs1.T; _xsb = _xsb1.T\n nul = np.zeros(_xs[0].shape)\n for ia in range(nas[i]):\n\n #xsa.append( _xs[ia] )\n #xsb.append( np.concatenate((_xs[ia],_xs[ia], nul,nul), axis=0) )\n #labels.append( [i,ia,ia,zsi[ia],zsi[ia]] )\n ## Note that the case [ A_I, A_I ] is automatically included below\n ## So the 3 lines above is not necessary\n li = list(_nbrs[ia])\n for ja in _nbrs[ia]:\n if ja > -1:\n lj = list(_nbrs[ja])\n label = [i,ia,ja,zsi[ia],zsi[ja]]; #print ' ** label = ', label\n labels.append(label)\n _ja= li.index(ja)\n _ia= lj.index(ia)\n xi = np.concatenate( (_xs[ia],_xs[ja], _xsb[ia,_ja,:],_xsb[ja,_ia,:]), axis=0 )\n xsb.append(xi)\n self.xsb = xsb\n self.xsa = xsa\n self.labels = np.array(labels, np.int)\n\n\n def get_idx(self, keys, ims=None, labels=None, opt='ii'):\n \"\"\"\n opt = 'ii' or 'zz'\n \"\"\"\n\n if labels is None:\n labels = self.labels\n nm = np.max(labels[:,0])\n nlb = len(labels)\n tidx = np.arange(nlb)\n if ims is None: ims = np.arange(nm)\n idxs = []\n if opt in ['ii']:\n for im in ims:\n for key in keys:\n ia, ja = key\n entry = tidx[ reduce(np.logical_and, (labels[:,0]==im, labels[:,1]==ia, labels[:,2]==ja)) ]\n assert len(entry)>0, '#ERROR: no entry found! R_IJ may exceed Rcut'\n idxs.append( entry[0] )\n elif opt in ['zz','z']:\n same_atm = (labels[:,1]==labels[:,2]) if opt=='z' else (labels[:,1]!=labels[:,2])\n for im in ims:\n for key in keys:\n zi, zj = key\n entries = tidx[ reduce(np.logical_and, (same_atm, labels[:,0]==im, labels[:,3]==zi, labels[:,4]==zj)) ]\n idxs += list(entries)\n else:\n raise '#ERROR:'\n return np.array(idxs,np.int)\n\nif __name__ == '__main__':\n import ase.io as aio\n\n m = aio.read('c08h10.xyz')\n nas = [len(m)]; zs = m.numbers; coords = m.positions\n obj = XData(nas, zs, coords)\n obj.get_x(param={'racut':3.2, 'rbcut':4.2})\n\n\n" ]
[ [ "numpy.unique", "numpy.min", "numpy.arange", "numpy.vectorize", "numpy.load", "numpy.array" ], [ "numpy.unique", "numpy.arange", "numpy.cumsum", "numpy.ones", "numpy.concatenate", "numpy.max", "numpy.all", "scipy.spatial.distance.pdist", "numpy.array", "numpy.zeros" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [ "0.13", "1.6", "0.14", "1.10", "0.15", "1.4", "0.16", "1.9", "0.19", "1.5", "0.18", "1.2", "1.7", "0.12", "1.0", "0.17", "1.3", "1.8" ], "tensorflow": [] } ]
mfreiwald/FromMotionToEmotion
[ "6417432ae56f1d9aabab1112e704671df8770a59" ]
[ "PCATest.py" ]
[ "from module.Configuration import Configuration\nfrom dask.distributed import Client\nimport logging\nfrom sklearn.preprocessing import StandardScaler\nfrom sklearn.decomposition import PCA\nimport pandas as pd\nfrom module.Evaluation import Evaluation\n# from module.Cluster import Cluster\n\n\ndef main():\n\n c = Client()\n\n # On a SLURM Network, you can call:\n # clu = Cluster()\n # c = clu.cli\n # Check module/Cluster.py for more details\n\n logging.getLogger('distributed.utils_perf').setLevel(logging.CRITICAL)\n\n standard_config = Configuration(window_size=20, window_step=5.0, remove_seconds=0)\n print(standard_config)\n eva = Evaluation(c, standard_config)\n\n df1 = eva.make_preprocessing()\n df2 = eva.make_feature_engineering(df1)\n df3 = eva.make_selection(df2)\n\n for pcaNr in [2, 5, 25, 66, 100, 150, 200]:\n df = df3.copy()\n pcaCols = [\"PC%03d\"%d for d in range(pcaNr)]\n x = df.values\n x = StandardScaler().fit_transform(x)\n pca = PCA(n_components=pcaNr)\n principalComponents = pca.fit_transform(x)\n df4 = pd.DataFrame(data = principalComponents, columns = pcaCols, index=df.index)\n 'PCA:', pcaNr\n results_clf_score, sizes, info_df, importances_df, all_probas = eva.make_classification(df4)\n\n # clu.close()\n\n\nif __name__ == \"__main__\":\n main()\n" ]
[ [ "sklearn.preprocessing.StandardScaler", "sklearn.decomposition.PCA", "pandas.DataFrame" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [ "0.23", "0.21", "2.0", "1.4", "0.19", "1.1", "1.5", "1.2", "0.24", "0.20", "1.0", "0.25", "1.3" ], "scipy": [], "tensorflow": [] } ]
dnxbjyj/python-basic
[ "14cba06cd84715ebb8acc6dd3cef84789316d81e" ]
[ "gui/wxpython/wxPython-demo-4.0.1/samples/floatcanvas/MovingTriangle.py" ]
[ "#!/usr/bin/env python\n\"\"\"\n\nThis is a small demo, showing how to make an object that can be moved around.\n\n\"\"\"\n\nimport wx\n\n#ver = 'local'\nver = 'installed'\n\nif ver == 'installed': ## import the installed version\n from wx.lib.floatcanvas import NavCanvas, Resources\n from wx.lib.floatcanvas import FloatCanvas as FC\n print(\"using installed version: %s\" % wx.lib.floatcanvas.__version__)\nelif ver == 'local':\n ## import a local version\n import sys\n sys.path.append(\"..\")\n from floatcanvas import NavCanvas, Resources\n from floatcanvas import FloatCanvas as FC\n\nimport numpy as N\n\n## here we create a new DrawObject:\n## code borrowed and adapted from Werner Bruhin\n\nclass ShapeMixin:\n \"\"\"\n just here for added features later\n \"\"\"\n def __init__(self):\n pass\n\nclass TriangleShape1(FC.Polygon, ShapeMixin):\n\n def __init__(self, XY, L):\n\n \"\"\"\n An equilateral triangle object\n\n XY is the middle of the triangle\n\n L is the length of one side of the Triangle\n \"\"\"\n\n XY = N.asarray(XY)\n XY.shape = (2,)\n\n Points = self.CompPoints(XY, L)\n\n FC.Polygon.__init__(self, Points,\n LineColor = \"Black\",\n LineStyle = \"Solid\",\n LineWidth = 2,\n FillColor = \"Red\",\n FillStyle = \"Solid\")\n ShapeMixin.__init__(self)\n\n def CompPoints(self, XY, L):\n c = L/ N.sqrt(3)\n\n Points = N.array(((0, c),\n ( L/2.0, -c/2.0),\n (-L/2.0, -c/2.0)),\n N.float_)\n\n Points += XY\n return Points\n\n\nclass DrawFrame(wx.Frame):\n\n \"\"\"\n A frame used for the FloatCanvas Demo\n\n \"\"\"\n\n def __init__(self,parent, id,title,position,size):\n wx.Frame.__init__(self,parent, id,title,position, size)\n\n self.CreateStatusBar()\n # Add the Canvas\n Canvas = NavCanvas.NavCanvas(self,-1,(500,500),\n ProjectionFun = None,\n Debug = 0,\n BackgroundColor = \"DARK SLATE BLUE\",\n ).Canvas\n\n self.Canvas = Canvas\n\n Canvas.Bind(FC.EVT_MOTION, self.OnMove )\n Canvas.Bind(FC.EVT_LEFT_UP, self.OnLeftUp )\n\n Canvas.AddRectangle((-5,-5),\n (10,10),\n LineColor = \"Red\",\n LineStyle = \"Solid\",\n LineWidth = 2,\n FillColor = \"CYAN\",\n FillStyle = \"Solid\")\n\n Points = N.array(((0,0),\n (1,0),\n (0.5, 1)),\n N.float_)\n\n data = (( (0,0), 1),\n ( (3,3), 2),\n ( (-2,3), 2.5 ),\n )\n\n for p, L in data:\n Tri = TriangleShape1(p, 1)\n Canvas.AddObject(Tri)\n Tri.Bind(FC.EVT_FC_LEFT_DOWN, self.TriHit)\n\n\n self.MoveTri = None\n\n self.Show(True)\n self.Canvas.ZoomToBB()\n\n self.Moving = False\n\n def TriHit(self, object):\n print(\"In TriHit\")\n if not self.Moving:\n self.Moving = True\n self.StartPoint = object.HitCoordsPixel\n self.StartTri = self.Canvas.WorldToPixel(object.Points)\n self.MoveTri = None\n self.MovingTri = object\n\n def OnMove(self, event):\n \"\"\"\n Updates the status bar with the world coordinates\n\n And moves the triangle it it is clicked on\n\n \"\"\"\n self.SetStatusText(\"%.4f, %.4f\"%tuple(event.Coords))\n\n if self.Moving:\n dxy = event.GetPosition() - self.StartPoint\n # Draw the Moving Triangle:\n dc = wx.ClientDC(self.Canvas)\n dc.SetPen(wx.Pen('WHITE', 2, wx.SHORT_DASH))\n dc.SetBrush(wx.TRANSPARENT_BRUSH)\n dc.SetLogicalFunction(wx.XOR)\n if self.MoveTri is not None:\n dc.DrawPolygon(self.MoveTri)\n self.MoveTri = self.StartTri + dxy\n dc.DrawPolygon(self.MoveTri)\n\n def OnLeftUp(self, event):\n if self.Moving:\n self.Moving = False\n if self.MoveTri is not None:\n dxy = event.GetPosition() - self.StartPoint\n dxy = self.Canvas.ScalePixelToWorld(dxy)\n self.MovingTri.Move(dxy) ## The Move function has jsut been added\n ## to the FloatCanvas PointsObject\n ## It does the next three lines for you.\n #self.Tri.Points += dxy\n #self.Tri.BoundingBox += dxy\n #self.Canvas.BoundingBoxDirty = True\n self.MoveTri = None\n self.Canvas.Draw(True)\n\nif __name__ == \"__main__\":\n app = wx.App(0)\n x = DrawFrame(None, -1, \"FloatCanvas TextBox Test App\", wx.DefaultPosition, (700,700) )\n x.Show()\n app.MainLoop()\n\n\n\n\n\n\n\n\n\n\n\n\n\n" ]
[ [ "numpy.asarray", "numpy.array", "numpy.sqrt" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
ansvver/pylufia
[ "0076b4baef1de5371476910c12c1829d694fa2f3" ]
[ "mir/feature/timbre/bark.py" ]
[ "# -*- coding: utf-8 -*-\n\nimport scipy as sp\nfrom pylufia.signal.spectral import *\n\ndef _make_bark_filterbank(fs, framesize):\n \"\"\"\n Calculate Bark-band filterbank\n \"\"\"\n f_centers = sp.array([50,150,250,350,450,570,700,840,1000,1170,1370,1600,1850,2150,2500,2900,3400,4000,4800,5800,7000,8500,10500,13500])\n f_lowers = sp.array([20,100,200,300,400,510,630,770,920,1080,1270,1480,1720,2000,2320,2700,3150,3700,4400,5300,6400,7700,9500,12000])\n f_uppers = sp.array([100,200,300,400,510,630,770,920,1080,1270,1480,1720,2000,2320,2700,3150,3700,4400,5300,6400,7700,9500,12000,15500])\n \n n_freqs = framesize/2\n n_bark_band = len(f_centers)\n \n fidx_centers = (framesize * f_centers / float(fs)).astype('int')\n fidx_lowers = (framesize * f_lowers / float(fs)).astype('int')\n fidx_uppers = (framesize * f_uppers / float(fs)).astype('int')\n \n filterbank = sp.zeros( (n_bark_band, n_freqs) )\n \n for n in range(n_bark_band):\n inc = 1.0 / (fidx_centers[n] - fidx_lowers[n])\n idxs = sp.arange(fidx_lowers[n], fidx_centers[n])\n filterbank[n, fidx_lowers[n]:fidx_centers[n]] = (idxs - fidx_lowers[n]) * inc\n # filterbank[n, fidx_lowers[n]:fidx_centers[n]] = 1.0\n dec = 1.0 / (fidx_uppers[n] - fidx_centers[n])\n idxs = sp.arange(fidx_centers[n], fidx_uppers[n])\n filterbank[n, fidx_centers[n]:fidx_uppers[n]] = 1.0 - (idxs - fidx_centers[n]) * dec\n # filterbank[n, fidx_centers[n]:fidx_uppers[n]] = 1.0\n \n return filterbank\n \ndef bark_spectrogram(input, framesize=1024, hopsize=512, fs=44100):\n \"\"\"\n Calculate bark-scaled spectrogram\n \n Parameters:\n inData: ndarray\n input signal\n framesize: int\n framesize\n hopsize: int\n hopsize\n fs: int\n samplingrate\n \n Returns:\n result: ndarray\n bark-scaled spectrogram\n \"\"\"\n S,F,T = stft(input, framesize, hopsize, fs, 'hann')\n S = sp.absolute(S)\n \n # bark_idx = [int(_hz2bark(F[i])) for i in xrange(len(F))]\n \n # bark_spe = sp.zeros((S.shape[1], n_bark_band))\n # for i in xrange(S.shape[0]):\n # bark_spe[:, bark_idx[i]] += S[i, :]\n \n # for i in xrange(n_bark_band):\n # count = bark_idx.count(i)\n # bark_spe[:, i] /= count\n bark_filterbank = _make_bark_filterbank(fs, framesize)\n bark_spe = sp.dot(S.T, bark_filterbank.T)\n \n return bark_spe\n\ndef _hz2bark(f):\n \"\"\"\n Hz -> Bark\n \"\"\"\n return 13 * sp.arctan(f / 1315.8) + 3.5 * sp.arctan(f / 7518.0)" ]
[ [ "scipy.zeros", "scipy.absolute", "scipy.arctan", "scipy.arange", "scipy.array", "scipy.dot" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
juheerizwana/tensorflow
[ "eb67de2735cb0350c2bac9fbdf6635670aabd5ae" ]
[ "tensorflow/python/keras/__init__.py" ]
[ "# Copyright 2015 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Implementation of the Keras API meant to be a high-level API for TensorFlow.\n\nDetailed documentation and user guides are available at\n[keras.io](https://keras.io).\n\"\"\"\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nfrom tensorflow.python import tf2\n\nfrom tensorflow.python.keras import activations\nfrom tensorflow.python.keras import applications\nfrom tensorflow.python.keras import backend\nfrom tensorflow.python.keras import callbacks\nfrom tensorflow.python.keras import callbacks_v1\nfrom tensorflow.python.keras import constraints\nfrom tensorflow.python.keras import datasets\nfrom tensorflow.python.keras import estimator\nfrom tensorflow.python.keras import initializers\nfrom tensorflow.python.keras import layers\nfrom tensorflow.python.keras import losses\nfrom tensorflow.python.keras import metrics\nfrom tensorflow.python.keras import models\nfrom tensorflow.python.keras import ops\nfrom tensorflow.python.keras import optimizers\nfrom tensorflow.python.keras import premade\nfrom tensorflow.python.keras import preprocessing\nfrom tensorflow.python.keras import regularizers\nfrom tensorflow.python.keras.layers import Input\nfrom tensorflow.python.keras.models import Model\nfrom tensorflow.python.keras.models import Sequential\n\nfrom tensorflow.python.util.tf_export import keras_export\n\nif tf2.enabled():\n __version__ = '2.3.0-tf'\nelse:\n __version__ = '2.2.4-tf'\n\nkeras_export('keras.__version__').export_constant(__name__, '__version__')\n\ndel absolute_import\ndel division\ndel print_function\n" ]
[ [ "tensorflow.python.util.tf_export.keras_export", "tensorflow.python.tf2.enabled" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [ "2.7", "2.6", "2.2", "1.13", "2.3", "2.4", "2.9", "2.5", "2.8", "2.10" ] } ]
AlonKellner/hifi-gan
[ "e842836a0c879289c1848e922695ea1117715739", "e842836a0c879289c1848e922695ea1117715739", "e842836a0c879289c1848e922695ea1117715739" ]
[ "src/speech_distillation/custom_layers.py", "src/speech_distillation/validation_visualization_callback.py", "src/speech_distillation/multilabel_wave_dataset.py" ]
[ "from random import randint\r\n\r\nimport torch\r\nimport torch.nn as nn\r\nimport torch.nn.functional as f\r\nimport math\r\nfrom extra_utils import get_padding_period, get_padding\r\nfrom src.meldataset import mel_spectrogram\r\n\r\n\r\nclass Conv1dRechanneled(nn.Conv1d):\r\n def __init__(self, in_channels, out_channels, kernel_size, stride=None, padding=0, dilation=1, groups=1, bias=True,\r\n padding_mode='zeros', device=None, dtype=None):\r\n common_denominator = math.gcd(in_channels, out_channels)\r\n if stride is None:\r\n stride = out_channels // common_denominator\r\n self.rechanneled = out_channels\r\n super(Conv1dRechanneled, self).__init__(\r\n in_channels=in_channels,\r\n out_channels=stride * in_channels,\r\n kernel_size=kernel_size,\r\n stride=stride,\r\n padding=padding,\r\n dilation=dilation,\r\n groups=groups,\r\n bias=bias,\r\n padding_mode=padding_mode,\r\n device=device,\r\n dtype=dtype)\r\n\r\n def forward(self, input: torch.Tensor) -> torch.Tensor:\r\n return self._conv_forward(input, self.weight, self.bias) \\\r\n .transpose(1, 2) \\\r\n .reshape(input.size()[0], -1, self.rechanneled) \\\r\n .transpose(1, 2)\r\n\r\n\r\nclass GroupShuffle1d(nn.Module):\r\n def __init__(self, groups):\r\n self.groups = groups\r\n super(GroupShuffle1d, self).__init__()\r\n\r\n def forward(self, input: torch.Tensor) -> torch.Tensor:\r\n batch_size, channels, length = input.size()\r\n mid_channels = torch.div(channels, self.groups, rounding_mode='floor')\r\n return input\\\r\n .reshape(batch_size, self.groups, mid_channels, -1)\\\r\n .transpose(1, 2)\\\r\n .reshape(batch_size, channels, -1)\r\n\r\n\r\nclass GroupUnshuffle1d(nn.Module):\r\n def __init__(self, groups):\r\n self.groups = groups\r\n super(GroupUnshuffle1d, self).__init__()\r\n\r\n def forward(self, input: torch.Tensor) -> torch.Tensor:\r\n batch_size, channels, length = input.size()\r\n mid_channels = torch.div(channels, self.groups, rounding_mode='floor')\r\n return input\\\r\n .reshape(batch_size, mid_channels, self.groups, -1)\\\r\n .transpose(1, 2)\\\r\n .reshape(batch_size, channels, -1)\r\n\r\n\r\nclass Roll1d(nn.Module):\r\n def __init__(self, period, padding_mode='constant', padding_value=0):\r\n self.period = period\r\n self.padding_mode = padding_mode\r\n self.padding_value = padding_value\r\n super(Roll1d, self).__init__()\r\n\r\n def forward(self, input: torch.Tensor) -> torch.Tensor:\r\n batch_size, channels, length = input.size()\r\n pre_padding, post_padding = get_padding_period(length, self.period)\r\n return f.pad(input, (pre_padding, post_padding), self.padding_mode, self.padding_value) \\\r\n .transpose(1, 2) \\\r\n .reshape(batch_size, -1, channels * self.period) \\\r\n .transpose(1, 2)\r\n\r\n\r\nclass Unroll1d(nn.Module):\r\n def __init__(self, period):\r\n self.period = period\r\n super(Unroll1d, self).__init__()\r\n\r\n def forward(self, input: torch.Tensor) -> torch.Tensor:\r\n batch_size, channels, length = input.size()\r\n return input \\\r\n .transpose(1, 2) \\\r\n .reshape(batch_size, length * self.period, -1) \\\r\n .transpose(1, 2)\r\n\r\n\r\nclass Replicate(nn.Module):\r\n def __init__(self, replica_count):\r\n self.replica_count = replica_count\r\n super(Replicate, self).__init__()\r\n\r\n def forward(self, input: torch.Tensor) -> torch.Tensor:\r\n replicas = [input for i in range(self.replica_count)]\r\n return torch.cat(replicas, dim=1)\r\n\r\n\r\nclass AvgChannels(nn.Module):\r\n def __init__(self):\r\n super(AvgChannels, self).__init__()\r\n\r\n def forward(self, input: torch.Tensor) -> torch.Tensor:\r\n return input.mean(dim=1).unsqueeze(1)\r\n\r\n\r\nclass AvgPool1dDilated(nn.Module):\r\n def __init__(self, kernel_size, stride=None, padding=0, dilation=1, ceil_mode: bool = False,\r\n count_include_pad: bool = True):\r\n self.kernel_size = kernel_size\r\n self.stride = stride\r\n self.padding = padding\r\n self.dilation = dilation\r\n self.ceil_mode = ceil_mode\r\n self.count_include_pad = count_include_pad\r\n super(AvgPool1dDilated, self).__init__()\r\n\r\n def forward(self, input: torch.Tensor) -> torch.Tensor:\r\n batch_size, channels, length = input.size()\r\n stacked_input = input\\\r\n .transpose(1, 2)\\\r\n .reshape(batch_size, -1, self.dilation, channels)\\\r\n .transpose(3, 1)\r\n pooled = f.avg_pool2d(stacked_input, (1, self.kernel_size), (self.stride, 1),\r\n (0, self.padding), self.ceil_mode, self.count_include_pad)\r\n return pooled\\\r\n .transpose(1, 3)\\\r\n .reshape(batch_size, length, channels)\\\r\n .transpose(2, 1)\r\n\r\n\r\nclass MelSpectrogram(nn.Module):\r\n def __init__(self, sampling_rate, output_channels, kernel_size, stride, padding_mode='constant', padding_value=0):\r\n self.sampling_rate = sampling_rate\r\n self.output_channels = output_channels\r\n self.kernel_size = kernel_size\r\n self.stride = stride\r\n self.padding_mode = padding_mode\r\n self.padding_value = padding_value\r\n super(MelSpectrogram, self).__init__()\r\n\r\n def forward(self, input: torch.Tensor) -> torch.Tensor:\r\n batch_size, channels, length = input.size()\r\n pre_padding, post_padding = get_padding_period(length, self.stride)\r\n padded_input = f.pad(input, (pre_padding, post_padding), self.padding_mode, self.padding_value)\r\n spec = mel_spectrogram(padded_input.squeeze(1),\r\n n_fft=self.kernel_size,\r\n num_mels=self.output_channels,\r\n sampling_rate=self.sampling_rate,\r\n hop_size=self.stride,\r\n win_size=self.kernel_size,\r\n fmin=0,\r\n fmax=None\r\n )\r\n return spec\r\n\r\n\r\nclass Noise1d(nn.Module):\r\n def __init__(self, channels):\r\n self.channels = channels\r\n super(Noise1d, self).__init__()\r\n\r\n def forward(self, input: torch.Tensor) -> torch.Tensor:\r\n batch_size, channels, length = input.size()\r\n return torch.randn(batch_size, self.channels, length, device=input.device)\r\n\r\n\r\nclass OneHot(nn.Module):\r\n def __init__(self, channels, dim=-1):\r\n self.channels = channels\r\n self.dim = dim\r\n super(OneHot, self).__init__()\r\n\r\n def forward(self, input_tensor: torch.Tensor) -> torch.Tensor:\r\n total_dims = len(input_tensor.size())\r\n one_hot = f.one_hot(input_tensor, self.channels).type(torch.FloatTensor).to(input_tensor.device)\r\n if self.dim != -1:\r\n permutation = [i if i < self.dim else i-1 if i > self.dim else -1 for i in range(0, total_dims+1)]\r\n one_hot = one_hot.permute(*permutation)\r\n return one_hot\r\n", "import torch\r\nfrom pytorch_lightning.callbacks import Callback\r\n\r\nfrom src.utils import plot_spectrogram, plot_categorical\r\nfrom logging_utils import rank\r\n\r\n\r\nclass ValidationVisualizationCallback(Callback):\r\n def __init__(self, amounts_to_log):\r\n self.amounts_to_log = amounts_to_log\r\n self.truth_to_log = {}\r\n self.to_log = {}\r\n\r\n def on_validation_batch_end(\r\n self,\r\n trainer,\r\n pl_module,\r\n outputs,\r\n batch,\r\n batch_idx: int,\r\n dataloader_idx: int,\r\n ):\r\n losses, outputs = outputs\r\n\r\n sw = pl_module.logger.experiment\r\n self.visualize(trainer, pl_module, batch, batch_idx, dataloader_idx, sw,\r\n 'wavs', self._visualize_wav, 'few', outputs['wav'])\r\n self.visualize(trainer, pl_module, batch, batch_idx, dataloader_idx, sw,\r\n 'mels', self._visualize_mel, 'few', outputs['mel'])\r\n self.visualize(trainer, pl_module, batch, batch_idx, dataloader_idx, sw,\r\n 'labels', self._visualize_label, 'few', outputs['label'], level=1)\r\n self.visualize(trainer, pl_module, batch, batch_idx, dataloader_idx, sw,\r\n 'outputs', self._visualize_output, 'once', outputs)\r\n del outputs\r\n self.visualize_model_parameters(trainer, pl_module, batch, batch_idx, dataloader_idx,\r\n sw)\r\n\r\n def visualize_model_parameters(self, trainer, pl_module, batch, batch_idx, dataloader_idx, sw):\r\n if self._check_to_log(batch_idx, 'parameters', 'once'):\r\n models = pl_module.get_learning_models()\r\n for name, model in models.items():\r\n sw.add_histogram(rank(f'parameters/{name}'), torch.cat([p.detach().view(-1) for p in model.parameters()]),\r\n pl_module.global_step)\r\n\r\n def visualize(self, trainer, pl_module, batch, batch_idx, dataloader_idx, sw, prefix, visualize, log_type, data,\r\n level=1000):\r\n if self._check_to_log(batch_idx, prefix, log_type):\r\n self.visualize_recursive(\r\n logger=sw,\r\n pl_module=pl_module,\r\n prefix=prefix,\r\n visualize=visualize,\r\n batch_idx=batch_idx,\r\n log_type=log_type,\r\n data=data,\r\n level=level\r\n )\r\n\r\n def visualize_recursive(self, logger, pl_module, batch_idx, prefix, data, visualize, log_type, level):\r\n if isinstance(data, dict) and level > 0:\r\n for key, value in data.items():\r\n new_prefix = f'{prefix}/{key}'\r\n if key != 'truth' or self._check_truth_to_log(batch_idx, new_prefix, log_type):\r\n self.visualize_recursive(\r\n logger=logger,\r\n pl_module=pl_module,\r\n batch_idx=batch_idx,\r\n prefix=new_prefix,\r\n log_type=log_type,\r\n visualize=visualize,\r\n data=value,\r\n level=level-1\r\n )\r\n elif isinstance(data, (list, tuple)) and level > 0:\r\n for key, value in enumerate(data):\r\n self.visualize_recursive(\r\n logger=logger,\r\n pl_module=pl_module,\r\n batch_idx=batch_idx,\r\n prefix=f'{prefix}/{key}',\r\n log_type=log_type,\r\n visualize=visualize,\r\n data=value,\r\n level=level-1\r\n )\r\n else:\r\n visualize(logger, pl_module, batch_idx, f'{prefix}/{batch_idx}', data)\r\n\r\n def _check_truth_to_log(self, index, key, log_type):\r\n truth_to_log = self._get_truth_to_log(key, log_type)\r\n if index in truth_to_log:\r\n truth_to_log.remove(index)\r\n return True\r\n return False\r\n\r\n def _get_truth_to_log(self, key, log_type):\r\n if key not in self.truth_to_log:\r\n self.truth_to_log[key] = list(range(self.amounts_to_log[log_type]))\r\n return self.truth_to_log[key]\r\n\r\n def _check_to_log(self, index, key, log_type):\r\n truth_to_log = self._get_truth_to_log(key, log_type)\r\n return index in truth_to_log\r\n\r\n def _get_to_log(self, key, log_type):\r\n if key not in self.truth_to_log:\r\n self.truth_to_log[key] = list(range(self.amounts_to_log[log_type]))\r\n return self.truth_to_log[key]\r\n\r\n def _visualize_wav(self, sw, pl_module, batch_idx, prefix, wav):\r\n for index, sub_wav in enumerate(wav):\r\n sw.add_audio(rank(f'{prefix}/{index}'), sub_wav.cpu().numpy(), pl_module.global_step,\r\n pl_module.config.sampling_rate)\r\n\r\n def _visualize_mel(self, sw, pl_module, batch_idx, prefix, mel):\r\n for index, sub_mel in enumerate(mel):\r\n sw.add_figure(rank(f'{prefix}/{index}'), plot_spectrogram(sub_mel.cpu().numpy()),\r\n pl_module.global_step)\r\n\r\n def _visualize_label(self, sw, pl_module, batch_idx, prefix, label):\r\n cat_label = self._cat_recursive(label)\r\n for index, sub_label in enumerate(cat_label):\r\n sw.add_figure(rank(f'{prefix}/{index}'), plot_categorical(sub_label.squeeze().cpu().numpy()),\r\n pl_module.global_step)\r\n\r\n def _visualize_output(self, sw, pl_module, batch_idx, prefix, output):\r\n sw.add_histogram(rank(prefix), output, pl_module.global_step)\r\n\r\n def _cat_recursive(self, label):\r\n if isinstance(label, dict):\r\n label_list = list(label.items())\r\n label_sorted = list(sorted(label_list, key=lambda pair: pair[0]))\r\n values = [self._cat_recursive(value) for key, value in label_sorted]\r\n return torch.cat(values, dim=1)\r\n else:\r\n label = label.squeeze()\r\n if label.dtype not in [torch.int64, torch.int32, torch.int16, torch.int8]:\r\n label = label.argmax(dim=1)\r\n label = label.squeeze().unsqueeze(1)\r\n return label\r\n", "import json\r\nimport math\r\nimport os\r\nimport pickle\r\nimport random\r\nfrom multiprocessing import Pool\r\nfrom pathlib import Path\r\n\r\nimport pandas as pd\r\nimport torch\r\nimport torch.nn.functional as F\r\nimport torch.utils.data\r\n\r\nfrom augmentation.augmentation_methods import \\\r\n NoiseAugmentor, RirAugmentor, CodecAugmentor, \\\r\n LowpassAugmentor, HighpassAugmentor, ReverbAugmentor, \\\r\n HilbertAugmentor\r\nfrom complex_data_parser import get_path_by_glob, parse_complex_data\r\nfrom src.meldataset import load_wav\r\nfrom textgrid_parsing import parse_textgrid\r\n\r\nPHI = (1 + math.sqrt(5))/2\r\n\r\nMAX_WAV_VALUE = 32768.0\r\n\r\nlabels_to_use = ['speaker', 'sex', 'mic-brand']\r\n\r\ntimed_labels_to_use = ['phones']\r\n\r\nlabel_groups = {\r\n 'content': ['speaker', 'sex', 'phones'],\r\n 'style': ['mic-brand']\r\n}\r\naugmentation_label_groups = {\r\n 'content': [],\r\n 'style': ['noise', 'rir', 'lowpass', 'highpass', 'reverb', 'codec', 'hilbert']\r\n}\r\n\r\n\r\nclass MultilabelWaveDataset(torch.utils.data.Dataset):\r\n def __init__(self, data_dir, cache_dir, name, source, segment_length, sampling_rate, embedding_size,\r\n augmentation_config=None, disable_wavs=False, split=True, size=None,\r\n fine_tuning=False, deterministic=False):\r\n self.data_dir = data_dir\r\n self.cache_dir = cache_dir\r\n self.name = name\r\n self.source = source\r\n self.segment_length = segment_length\r\n self.embedding_size = embedding_size\r\n self.sampling_rate = sampling_rate\r\n self.split = split\r\n self.fine_tuning = fine_tuning\r\n self.size = size\r\n self.deterministic = deterministic\r\n self.random = random.Random()\r\n self.disable_wavs = disable_wavs\r\n self.should_augment = augmentation_config is not None\r\n if self.should_augment:\r\n self.aug_options = augmentation_config['options']\r\n self.aug_probs = augmentation_config['probs']\r\n print('Creating [{}] dataset:'.format(self.name))\r\n name_path = Path(os.path.join(cache_dir, name))\r\n if not name_path.exists():\r\n os.mkdir(name_path)\r\n cache_path = Path(os.path.join(cache_dir, name, 'labels_cache'))\r\n if not name_path.exists():\r\n os.mkdir(cache_path)\r\n config_path = f'**/data_configs/{source}/*.json'\r\n self.files_with_labels = self.do_with_pickle_cache(lambda: self.get_files_with_labels(cache_dir, config_path),\r\n os.path.join(cache_dir, name, 'files_with_labels.pickle'))\r\n if self.size is None:\r\n self.size = len(self.files_with_labels)\r\n\r\n self.label_options_weights = self.do_with_pickle_cache(self.get_all_label_options_weights,\r\n os.path.join(cache_dir, name, 'label_options_weights.pickle'))\r\n base_prob = self.aug_probs['prob']\r\n sub_probs = self.aug_probs['sub_probs']\r\n for augmentation, augmentation_labels in self.aug_options.items():\r\n sub_prob = sub_probs[augmentation]['prob']\r\n option_prob = 1.0/len(augmentation_labels)\r\n self.label_options_weights[augmentation] = {'none': base_prob*(1-sub_prob), **{\r\n label: base_prob*sub_prob*option_prob for label in augmentation_labels\r\n }}\r\n\r\n all_label_groups = {key: [*label_groups[key], *augmentation_label_groups[key]] for key in label_groups.keys()}\r\n self.label_options_weights_groups = {\r\n key: {label: self.label_options_weights[label] for label in label_group}\r\n for key, label_group in all_label_groups.items()\r\n }\r\n\r\n self.label_options_groups = {\r\n key: {label: tuple(value.keys()) for label, value in label_group.items()}\r\n for key, label_group in self.label_options_weights_groups.items()\r\n }\r\n\r\n self.label_options = {\r\n key: tuple(label_group.keys())\r\n for key, label_group in self.label_options_weights.items()\r\n }\r\n\r\n self.label_weights_groups = {\r\n key: {label: tuple(value.values()) for label, value in label_group.items()}\r\n for key, label_group in self.label_options_weights_groups.items()\r\n }\r\n\r\n self.label_weights = {\r\n key: tuple(label_group.values())\r\n for key, label_group in self.label_options_weights.items()\r\n }\r\n\r\n if self.should_augment:\r\n self.aug_methods = {\r\n 'noise': NoiseAugmentor(self.data_dir, self.label_options).augment,\r\n 'rir': RirAugmentor(self.data_dir).augment,\r\n 'reverb': ReverbAugmentor(self.sampling_rate).augment,\r\n 'lowpass': LowpassAugmentor(self.sampling_rate).augment,\r\n 'highpass': HighpassAugmentor(self.sampling_rate).augment,\r\n 'codec': CodecAugmentor(self.sampling_rate).augment,\r\n 'hilbert': HilbertAugmentor(self.sampling_rate).augment\r\n }\r\n\r\n print('Dataset [{}] is ready!\\n'.format(self.name))\r\n\r\n @staticmethod\r\n def do_with_pickle_cache(func, pickle_path):\r\n pickle_path = Path(pickle_path)\r\n if pickle_path.exists():\r\n with open(pickle_path, 'rb') as pickle_file:\r\n result = pickle.load(pickle_file)\r\n else:\r\n if not pickle_path.parent.exists():\r\n pickle_path.parent.mkdir(parents=True, exist_ok=True)\r\n result = func()\r\n with open(pickle_path, 'wb') as pickle_file:\r\n pickle.dump(result, pickle_file)\r\n return result\r\n\r\n @staticmethod\r\n def create_pickle_cache(func, pickle_path):\r\n pickle_path = Path(pickle_path)\r\n if not pickle_path.exists():\r\n if not pickle_path.parent.exists():\r\n pickle_path.parent.mkdir(parents=True, exist_ok=True)\r\n result = func()\r\n with open(pickle_path, 'wb') as pickle_file:\r\n pickle.dump(result, pickle_file)\r\n\r\n def get_all_label_options_weights(self):\r\n all_label_options = {}\r\n for col in labels_to_use:\r\n all_label_options[col] = dict(self.files_with_labels[col].value_counts(normalize=True))\r\n\r\n with Pool(16) as pool:\r\n for label in timed_labels_to_use:\r\n all_label_options[label] = dict()\r\n results = pool.map(self.get_timed_labels_value_counts_by_index, range(len(self)))\r\n rows_to_remove = []\r\n for i, result in enumerate(results):\r\n if isinstance(result, Exception):\r\n rows_to_remove.append(i)\r\n else:\r\n for label in timed_labels_to_use:\r\n for key, value in result[label].items():\r\n if key not in all_label_options[label]:\r\n all_label_options[label][key] = 0\r\n all_label_options[label][key] += value\r\n for label in timed_labels_to_use:\r\n for key in all_label_options[label]:\r\n all_label_options[label][key] /= len(results)\r\n if len(rows_to_remove) > 0:\r\n self.files_with_labels = self.files_with_labels.drop(rows_to_remove).reset_index(drop=True)\r\n pickle_path = os.path.join(self.cache_dir, self.source, 'files_with_labels.pickle')\r\n with open(pickle_path, 'wb') as pickle_file:\r\n pickle.dump(self.files_with_labels, pickle_file)\r\n all_label_options_weights = all_label_options\r\n return all_label_options_weights\r\n\r\n def get_timed_labels_value_counts_by_index(self, i):\r\n try:\r\n labels, timed_labels = self.get_timed_labels(i)\r\n return self.get_labels_value_counts(timed_labels)\r\n except Exception as e:\r\n print('Item {} failed to get timed labels: [{}]'.format(i, e))\r\n return e\r\n\r\n def get_labels_value_counts(self, timed_labels):\r\n result = {}\r\n for label in timed_labels_to_use:\r\n result[label] = dict(timed_labels[label]['text'].value_counts(normalize=True))\r\n return result\r\n\r\n def get_files_with_labels(self, main_dir, config_path):\r\n main_dir = Path(main_dir)\r\n subdir_list = [path for path in main_dir.glob('*/')]\r\n results = None\r\n for subdir in subdir_list:\r\n try:\r\n config_files = [path for path in subdir.glob(config_path)]\r\n for config_file in config_files:\r\n config = config_file.read_text()\r\n config_dict = json.loads(config)\r\n print('Loading [{}]...'.format(config_dict['name']))\r\n complex_data = parse_complex_data(subdir, config_dict['config'], config_dict['result'])\r\n print('[{}] loaded successfully!'.format(config_dict['name']))\r\n if results is None:\r\n results = complex_data\r\n else:\r\n results = pd.concat([results, complex_data], axis=0, ignore_index=True)\r\n except Exception as e:\r\n print(e)\r\n print('Data config was not found or invalid, moving on.')\r\n continue\r\n\r\n return results\r\n\r\n def get_timed_labels(self, index):\r\n all_labels = self.files_with_labels.iloc[[index]].squeeze()\r\n labels = self.get_labels(index)\r\n timed_labels = parse_textgrid(all_labels['subdir'], all_labels['textgrid'])\r\n return labels, {key: value for key, value in timed_labels.items() if key in timed_labels_to_use}\r\n\r\n def get_labels(self, index):\r\n labels = self.files_with_labels[labels_to_use].iloc[[index]].squeeze()\r\n return labels\r\n\r\n def get_grouped_labels(self, index):\r\n labels = self.get_labels(index)\r\n grouped_labels = {group: labels.filter(group_labels).to_dict() for group, group_labels in label_groups.items()}\r\n return grouped_labels\r\n\r\n def __getitem__(self, index):\r\n if self.deterministic:\r\n self.random.seed(index)\r\n if self.size < len(self.files_with_labels):\r\n index = (int(len(self.files_with_labels) / PHI) * index) % len(self.files_with_labels)\r\n\r\n return self.get_augmented_item(index)\r\n\r\n def get_augmented_item(self, index):\r\n wav, wav_path, time_labels, grouped_labels = self.get_cut_item(index)\r\n if self.should_augment:\r\n wav, time_labels, grouped_labels = self.augment_item(wav, time_labels, grouped_labels)\r\n return wav, wav_path, time_labels, grouped_labels\r\n\r\n def create_pickle_label(self, index):\r\n return self.create_pickle_cache(\r\n lambda: self.get_fresh_label(index),\r\n os.path.join(self.cache_dir, self.source, 'labels_cache', '{}.pickle'.format(index))\r\n )\r\n\r\n def get_pickle_label(self, index):\r\n return self.do_with_pickle_cache(\r\n lambda: self.get_fresh_label(index),\r\n os.path.join(self.cache_dir, self.source, 'labels_cache', '{}.pickle'.format(index))\r\n )\r\n\r\n def get_fresh_label(self, index):\r\n labels, timed_labels = self.get_timed_labels(index)\r\n segmented_timed_labels = self.get_segmented_timed_labels(timed_labels)\r\n all_segmented_labels = self.add_segmented_labels(segmented_timed_labels, labels)\r\n segmented_tensor = self.convert_segmented_labels_to_tensor(all_segmented_labels, label_groups)\r\n return segmented_tensor\r\n\r\n def __len__(self):\r\n return min(len(self.files_with_labels), self.size)\r\n\r\n def get_segmented_timed_labels(self, timed_labels):\r\n return pd.concat(\r\n [\r\n self.get_segmented_timed_labels_for_single(label_name, timed_label)\r\n for label_name, timed_label in timed_labels.items()\r\n ],\r\n axis=1\r\n )\r\n\r\n def get_segmented_timed_labels_for_single(self, label_name, timed_label):\r\n result_rows = []\r\n time_interval = self.embedding_size / self.sampling_rate\r\n current_index = 0\r\n current_time = 0\r\n while current_index < len(timed_label):\r\n result_rows.append({label_name: timed_label.iloc[[current_index]].squeeze()['text']})\r\n current_time += time_interval\r\n if current_time > timed_label.iloc[[current_index]].squeeze()['end']:\r\n current_index += 1\r\n return pd.DataFrame(result_rows)\r\n\r\n def add_segmented_labels(self, segmented_timed_labels, labels):\r\n for col in labels.axes[0]:\r\n segmented_timed_labels[col] = labels[col]\r\n return segmented_timed_labels\r\n\r\n def convert_segmented_labels_to_tensor(self, all_segmented_labels, given_label_groups):\r\n all_tensors = {}\r\n for key, labels in given_label_groups.items():\r\n tensors = {}\r\n for col in labels:\r\n if col in all_segmented_labels:\r\n index_tensor = torch.tensor(\r\n all_segmented_labels[col].apply(lambda x: self.label_options[col].index(x)).tolist(),\r\n dtype=torch.int64\r\n )\r\n tensors[col] = index_tensor\r\n all_tensors[key] = tensors\r\n return all_tensors\r\n\r\n def get_wav(self, index):\r\n wav_path = get_path_by_glob(self.cache_dir, self.files_with_labels.iloc[[index]].squeeze()['wav'])\r\n if self.disable_wavs:\r\n return torch.zeros((self.segment_length,)), str(wav_path)\r\n audio, sampling_rate = load_wav(wav_path)\r\n\r\n if sampling_rate != self.sampling_rate:\r\n raise ValueError(\"{} SR doesn't match target {} SR\".format(\r\n sampling_rate, self.sampling_rate))\r\n\r\n audio = torch.FloatTensor(audio)\r\n return audio.squeeze(0), str(wav_path)\r\n\r\n def get_cut_item(self, index):\r\n wav, wav_path = self.get_wav(index)\r\n pickle_label_groups = self.get_pickle_label(index)\r\n length = wav.size(0)\r\n embedded_segment_length = self.segment_length // self.embedding_size\r\n embedded_length = min(length // self.embedding_size,\r\n next(iter(next(iter(pickle_label_groups.values())).values())).size(0))\r\n trimed_length = embedded_length * self.embedding_size\r\n trimed_start = 0\r\n if len(wav) > trimed_length:\r\n wav = wav[trimed_start:trimed_start + trimed_length]\r\n length = wav.size(0)\r\n # print(length, self.segment_length, embedded_length, embedded_segment_length)\r\n\r\n if length >= self.segment_length:\r\n max_embedded_start = embedded_length - embedded_segment_length\r\n embedded_start = self.random.randint(0, max_embedded_start)\r\n start = embedded_start * self.embedding_size\r\n # print('trim: ', start, embedded_start)\r\n else:\r\n embedded_padding = embedded_segment_length - embedded_length\r\n prefix_embedded_padding = self.random.randint(0, embedded_padding)\r\n postfix_embedded_padding = embedded_padding - prefix_embedded_padding\r\n padding = embedded_padding * self.embedding_size\r\n prefix_padding = prefix_embedded_padding * self.embedding_size\r\n postfix_padding = postfix_embedded_padding * self.embedding_size\r\n\r\n for key, group in pickle_label_groups.items():\r\n for label, label_item in group.items():\r\n label_item = label_item[0:embedded_length]\r\n if length >= self.segment_length:\r\n cut_label_item = label_item[embedded_start:embedded_start + embedded_segment_length]\r\n else:\r\n cut_label_item = torch.nn.functional.pad(label_item,\r\n (prefix_embedded_padding, postfix_embedded_padding),\r\n 'constant')\r\n group[label] = cut_label_item\r\n\r\n if length >= self.segment_length:\r\n wav = wav[start:start + self.segment_length]\r\n else:\r\n wav = torch.nn.functional.pad(wav, (prefix_padding, postfix_padding), 'constant')\r\n\r\n grouped_labels = self.get_grouped_labels(index)\r\n return wav, wav_path, pickle_label_groups, grouped_labels\r\n\r\n def augment_item(self, cut_wav, cut_label, grouped_labels):\r\n options = self.aug_options\r\n probs = self.aug_probs\r\n methods = self.aug_methods\r\n (length,) = next(iter(next(iter(cut_label.values())).values())).size()\r\n augmented_wav = cut_wav\r\n augmented_label = pd.DataFrame(['none'] * length, columns=['none'])\r\n should_augment = probs['prob'] > self.random.random()\r\n for augmentation in options.keys():\r\n augmented_wav, augmented_label, value = self.augment_item_with(augmented_wav, augmented_label, cut_label,\r\n methods, options,\r\n probs, augmentation, should_augment)\r\n for section, current_label_groups in augmentation_label_groups.items():\r\n if augmentation in current_label_groups:\r\n grouped_labels[section][augmentation] = value\r\n augmentation_tensors = self.convert_segmented_labels_to_tensor(augmented_label, augmentation_label_groups)\r\n for key in cut_label.keys():\r\n current_augmentation = augmentation_tensors[key]\r\n for label, value in current_augmentation.items():\r\n cut_label[key][label] = value\r\n return augmented_wav, cut_label, grouped_labels\r\n\r\n def augment_item_with(self, augmented_wav, augmented_label, cut_label, methods, options, probs, aug_type,\r\n should=True):\r\n value = 'none'\r\n probs = probs['sub_probs'][aug_type]\r\n values = options[aug_type]\r\n aug_method = methods[aug_type]\r\n if should and probs['prob'] > self.random.random():\r\n value = self.random.choice(values)\r\n augmented_label, augmented_wav, value = aug_method(\r\n self.random,\r\n augmented_label,\r\n cut_label,\r\n augmented_wav,\r\n value,\r\n self.disable_wavs\r\n )\r\n augmented_label[aug_type] = value\r\n return augmented_wav, augmented_label, value\r\n" ]
[ [ "torch.div", "torch.cat", "torch.randn", "torch.nn.functional.avg_pool2d", "torch.nn.functional.one_hot", "torch.nn.functional.pad" ], [ "torch.cat" ], [ "pandas.concat", "torch.zeros", "pandas.DataFrame", "torch.FloatTensor", "torch.nn.functional.pad" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [ "0.23", "0.21", "2.0", "1.4", "1.3", "0.19", "1.1", "1.5", "0.24", "0.20", "1.0", "0.25", "1.2" ], "scipy": [], "tensorflow": [] } ]
janmaltel/pytetris
[ "a60475c89ed0fd9666c6e0f1df42c63f62a5b2aa" ]
[ "tetris/board.py" ]
[ "import numpy as np\nimport numba\nfrom numba import njit, jitclass, float64, bool_, int64\n\n\n# Need to specify types for numba's @jitclass\nspec = [\n ('representation', bool_[:, :]),\n ('lowest_free_rows', int64[:]),\n ('changed_lines', int64[:]),\n ('pieces_per_changed_row', int64[:]),\n ('landing_height_bonus', float64),\n ('num_features', int64),\n ('feature_type', numba.types.string),\n ('num_rows', int64),\n ('num_columns', int64),\n ('n_cleared_lines', int64),\n ('anchor_row', int64),\n ('cleared_rows_relative_to_anchor', bool_[:]),\n ('features_are_calculated', bool_),\n ('features', float64[:]),\n ('is_terminal_state', bool_)\n]\n\n\n@jitclass(spec)\nclass Board:\n def __init__(self,\n representation,\n lowest_free_rows,\n changed_lines, #=np.array([0], dtype=np.int64),\n pieces_per_changed_row, #=np.array([0], dtype=np.int64),\n landing_height_bonus, # =0.0,\n num_features, #=8,\n feature_type, #=\"bcts\",\n is_terminal_state, # this is useful to generate a \"terminal state\"\n has_overlapping_fields=False\n ):\n self.is_terminal_state = is_terminal_state\n\n if not self.is_terminal_state:\n self.representation = representation\n self.lowest_free_rows = lowest_free_rows\n self.num_rows, self.num_columns = representation.shape\n if has_overlapping_fields:\n self.num_rows -= 4 # representation input in this case has e.g., 14 rows for a 10x10 board... => put it back to 10\n self.pieces_per_changed_row = pieces_per_changed_row\n self.landing_height_bonus = landing_height_bonus\n self.num_features = num_features\n self.feature_type = feature_type # \"bcts\"\n self.n_cleared_lines = 0 # Gets updated in self.clear_lines()\n self.anchor_row = changed_lines[0]\n self.cleared_rows_relative_to_anchor = self.clear_lines(changed_lines)\n self.features_are_calculated = False\n self.features = np.zeros(self.num_features, dtype=np.float64)\n if has_overlapping_fields:\n self.is_terminal_state = check_terminal(self.representation, self.num_rows)\n self.representation = self.representation[:self.num_rows, ]\n\n def get_features(self, addRBFandIntercept=False):\n # RBF = radial basis function features (see Scherrer et al. 2015)\n if not self.features_are_calculated:\n if self.feature_type == \"bcts\":\n self.calc_bcts_features()\n self.features_are_calculated = True\n else:\n raise ValueError(\"Feature type must be either bcts or standardized_bcts or simple or super_simple\")\n features = self.features\n if addRBFandIntercept:\n features = np.concatenate((\n np.array([1.]),\n features,\n np.exp(-(np.mean(self.lowest_free_rows) - np.arange(5) * self.num_rows / 4) ** 2 / (2 * (self.num_rows / 5) ** 2))\n ))\n return features\n\n def get_features_order_and_direct(self, direct_by, order_by, addRBF=False):\n if not self.features_are_calculated:\n if self.feature_type == \"bcts\":\n self.calc_bcts_features()\n self.features_are_calculated = True\n else:\n raise ValueError(\"Feature type must be either bcts or standardized_bcts or simple or super_simple\")\n out = self.features * direct_by # .copy()\n out = out[order_by]\n if addRBF:\n out = np.concatenate((\n out,\n np.exp(-(np.mean(self.lowest_free_rows) - np.arange(5) * self.num_rows / 4) ** 2 / (2 * (self.num_rows / 5) ** 2))\n ))\n return out\n\n def get_features_and_direct(self, direct_by, addRBF=False):\n if not self.features_are_calculated:\n if self.feature_type == \"bcts\":\n self.calc_bcts_features()\n self.features_are_calculated = True\n else:\n raise ValueError(\"Feature type must be either bcts or standardized_bcts or simple or super_simple\")\n out = self.features * direct_by # .copy()\n if addRBF:\n out = np.concatenate((\n out,\n np.exp(-(np.mean(self.lowest_free_rows) - np.arange(5) * self.num_rows / 4) ** 2 / (2 * (self.num_rows / 5) ** 2))\n ))\n return out\n\n def clear_lines(self, changed_lines):\n # This function is more convoluted than it should be because of missing numba/numpy functionality as of writing this function.\n num_columns = self.num_columns\n row_sums = np.sum(self.representation[changed_lines, :], axis=1)\n is_full = (row_sums == num_columns)\n full_lines = np.where(is_full)[0]\n n_cleared_lines = len(full_lines)\n if n_cleared_lines > 0:\n # print(self)\n representation = self.representation\n lowest_free_rows = self.lowest_free_rows\n lines_to_clear = changed_lines[full_lines].astype(np.int64)\n mask_keep = np.ones(len(representation), dtype=np.bool_)\n mask_keep[lines_to_clear] = False\n new_cols = np.zeros((n_cleared_lines, num_columns), dtype=np.bool_)\n representation = np.vstack((representation[mask_keep], new_cols))\n for col_ix in range(num_columns): # col_ix = 0\n old_lowest_free_row = lowest_free_rows[col_ix]\n if old_lowest_free_row > lines_to_clear[-1] + 1:\n lowest_free_rows[col_ix] -= n_cleared_lines\n else:\n lowest = 0\n for row_ix in range(old_lowest_free_row - n_cleared_lines - 1, -1, -1):\n if representation[row_ix, col_ix] == 1:\n lowest = row_ix + 1\n break\n lowest_free_rows[col_ix] = lowest\n self.lowest_free_rows = lowest_free_rows\n self.representation = representation\n\n self.n_cleared_lines = n_cleared_lines\n return is_full\n\n def calc_bcts_features(self):\n rows_with_holes_set = {100000} # numba does not like initialization of an empty set. The 100000 is removed at the end\n representation = self.representation\n num_rows, num_columns = representation.shape\n lowest_free_rows = self.lowest_free_rows\n col_transitions = 0\n row_transitions = 0\n holes = 0\n hole_depths = 0\n cumulative_wells = 0\n # row_transitions = 0\n for col_ix, lowest_free_row in enumerate(lowest_free_rows):\n # There is at least one column_transition from the highest full cell (or the bottom which is assumed to be full) to \"the top\".\n col_transitions += 1\n if col_ix == 0:\n local_well_streak = 0\n if lowest_free_row > 0:\n col = representation[:lowest_free_row, col_ix]\n cell_below = 1\n\n # Needed for hole_depth\n # TODO: Optimize... only count the first time when an actual hole is found\n number_of_full_cells_above = numba_sum_int(col)\n\n for row_ix, cell in enumerate(col):\n if cell == 0:\n # Holes\n holes += 1\n rows_with_holes_set.add(row_ix)\n hole_depths += number_of_full_cells_above\n\n # Column transitions\n if cell_below:\n col_transitions += 1\n\n # Row transitions and wells\n # Because col_ix == 0, all left_cells are 1\n # row_transitions += 1\n row_transitions += 1\n if representation[row_ix, col_ix + 1]: # if cell to the right is full\n local_well_streak += 1\n cumulative_wells += local_well_streak\n else:\n local_well_streak = 0\n\n else: # cell is 1!\n local_well_streak = 0\n\n # Keep track of full cells above for hole_depth-feature\n number_of_full_cells_above -= 1\n\n # Column transitions\n if not cell_below:\n col_transitions += 1\n\n # Define 'cell_below' for next (higher!) cell.\n cell_below = cell\n\n # Check wells until lowest_free_row_right\n # Check transitions until lowest_free_row_left\n max_well_possibility = lowest_free_rows[col_ix + 1]\n if max_well_possibility > lowest_free_row:\n for row_ix in range(lowest_free_row, max_well_possibility):\n if representation[row_ix, col_ix + 1]: # if cell to the right is full\n local_well_streak += 1\n cumulative_wells += local_well_streak\n else:\n local_well_streak = 0\n # # Add row transitions for each empty cell above lowest_free_row\n row_transitions += (num_rows - lowest_free_row)\n\n elif col_ix == num_columns - 1:\n local_well_streak = 0\n if lowest_free_row > 0:\n col = representation[:lowest_free_row, col_ix]\n cell_below = 1\n\n # Needed for hole_depth\n number_of_full_cells_above = numba_sum_int(col)\n\n for row_ix, cell in enumerate(col):\n if cell == 0:\n # Holes\n holes += 1\n rows_with_holes_set.add(row_ix)\n hole_depths += number_of_full_cells_above\n\n # Column transitions\n if cell_below:\n col_transitions += 1\n\n # Wells and row transitions\n # Because this is the last column (the right border is \"full\") and cell == 0:\n row_transitions += 1\n if representation[row_ix, col_ix - 1]: # if cell to the left is full\n row_transitions += 1\n local_well_streak += 1\n cumulative_wells += local_well_streak\n else:\n local_well_streak = 0\n\n else: # cell is 1!\n local_well_streak = 0\n\n # Keep track of full cells above for hole_depth-feature\n number_of_full_cells_above -= 1\n\n # Column transitions\n if not cell_below:\n col_transitions += 1\n\n # Row transitions\n cell_left = representation[row_ix, col_ix - 1]\n if not cell_left:\n row_transitions += 1\n\n # Define 'cell_below' for next (higher!) cell.\n cell_below = cell\n\n # Check wells until minimum(lowest_free_row_left, lowest_free_row_right)\n # Check transitions until lowest_free_row_left\n max_well_possibility = lowest_free_rows[col_ix - 1]\n if max_well_possibility > lowest_free_row:\n for row_ix in range(lowest_free_row, max_well_possibility):\n if representation[row_ix, col_ix - 1]: # if cell to the left is full\n row_transitions += 1\n local_well_streak += 1\n cumulative_wells += local_well_streak\n else:\n local_well_streak = 0\n # # Add row transitions from last column to border\n row_transitions += (num_rows - lowest_free_row)\n else:\n local_well_streak = 0\n if lowest_free_row > 0:\n col = representation[:lowest_free_row, col_ix]\n cell_below = 1\n\n # Needed for hole_depth\n number_of_full_cells_above = numba_sum_int(col)\n\n for row_ix, cell in enumerate(col):\n if cell == 0:\n # Holes\n holes += 1\n rows_with_holes_set.add(row_ix)\n hole_depths += number_of_full_cells_above\n\n # Column transitions\n if cell_below:\n col_transitions += 1\n\n # Wells and row transitions\n cell_left = representation[row_ix, col_ix - 1]\n if cell_left:\n row_transitions += 1\n cell_right = representation[row_ix, col_ix + 1]\n if cell_right:\n local_well_streak += 1\n cumulative_wells += local_well_streak\n else:\n local_well_streak = 0\n else:\n local_well_streak = 0\n\n else: # cell is 1!\n local_well_streak = 0\n # Keep track of full cells above for hole_depth-feature\n number_of_full_cells_above -= 1\n\n # Column transitions\n if not cell_below:\n col_transitions += 1\n\n # Row transitions\n cell_left = representation[row_ix, col_ix - 1]\n if not cell_left:\n row_transitions += 1\n\n # Define 'cell_below' for next (higher!) cell.\n cell_below = cell\n # Check wells until minimum(lowest_free_row_left, lowest_free_row_right)\n # Check transitions until lowest_free_row_left\n lowest_free_row_left = lowest_free_rows[col_ix - 1]\n lowest_free_row_right = lowest_free_rows[col_ix + 1]\n max_well_possibility = np.minimum(lowest_free_row_left, lowest_free_row_right)\n\n # Weird case distinction because max_well_possibility always \"includes\" lowest_free_row_left\n # but lowest_free_row_left can be higher than max_well_possibility. Don't want to double count.\n if max_well_possibility > lowest_free_row:\n for row_ix in range(lowest_free_row, max_well_possibility):\n cell_left = representation[row_ix, col_ix - 1]\n if cell_left:\n row_transitions += 1\n cell_right = representation[row_ix, col_ix + 1]\n if cell_right:\n local_well_streak += 1\n cumulative_wells += local_well_streak\n else:\n local_well_streak = 0\n else:\n local_well_streak = 0\n if lowest_free_row_left > max_well_possibility:\n for row_ix in range(max_well_possibility, lowest_free_row_left):\n cell_left = representation[row_ix, col_ix - 1]\n if cell_left:\n row_transitions += 1\n elif lowest_free_row_left > lowest_free_row:\n for row_ix in range(lowest_free_row, lowest_free_row_left):\n cell_left = representation[row_ix, col_ix - 1]\n if cell_left:\n row_transitions += 1\n\n rows_with_holes_set.remove(100000)\n rows_with_holes = len(rows_with_holes_set)\n eroded_pieces = numba_sum_int(self.cleared_rows_relative_to_anchor * self.pieces_per_changed_row)\n # n_cleared_lines = numba_sum_int(self.cleared_rows_relative_to_anchor)\n eroded_piece_cells = eroded_pieces * self.n_cleared_lines\n landing_height = self.anchor_row + self.landing_height_bonus\n self.features = np.array([rows_with_holes, col_transitions, holes, landing_height,\n cumulative_wells, row_transitions, eroded_piece_cells, hole_depths])\n\n\n@njit(cache=False)\ndef generate_empty_board(num_rows, num_columns):\n return Board(np.zeros((num_rows, num_columns), dtype=np.bool_), # representation=\n np.zeros(num_columns, dtype=np.int64), # lowest_free_rows=\n np.array([0], dtype=np.int64), # changed_lines=\n np.array([0], dtype=np.int64), # pieces_per_changed_row=\n 0.0, # landing_height_bonus=\n 8, # num_features=\n \"bcts\", # feature_type=\n False, # is_terminal_state=\n False # has_overlapping_fields=\n )\n\n\n@njit(cache=False)\ndef generate_terminal_board():\n return Board(np.ones((2, 2), dtype=np.bool_), # representation=\n np.zeros(2, dtype=np.int64), # lowest_free_rows=\n np.array([0], dtype=np.int64), # changed_lines=\n np.array([0], dtype=np.int64), # pieces_per_changed_row=\n 0.0, # landing_height_bonus=\n 8, # num_features=\n \"bcts\", # feature_type=\n True, # is_terminal_state=\n False # has_overlapping_fields=\n )\n\nspecTerm = [\n ('is_terminal_state', bool_),\n]\n\n\n@jitclass(specTerm)\nclass TerminalBoard:\n def __init__(self):\n self.terminal_state = True\n\n\n@njit(cache=False)\ndef check_terminal(representation, num_rows):\n is_terminal = False\n for ix in representation[num_rows]:\n if ix:\n is_terminal = True\n break\n return is_terminal\n\n\n@njit(fastmath=True, cache=False)\ndef numba_sum_int(int_arr):\n acc = 0\n for i in int_arr:\n acc += i\n return acc\n\n\n" ]
[ [ "numpy.minimum", "numpy.arange", "numpy.vstack", "numpy.ones", "numpy.mean", "numpy.array", "numpy.where", "numpy.sum", "numpy.zeros" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
sdmccabe/netrd
[ "f703c19b02f42c9f54bcab57014381da11dd58da", "f703c19b02f42c9f54bcab57014381da11dd58da" ]
[ "netrd/distance/distributional_nbd.py", "netrd/dynamics/branching_process.py" ]
[ "\"\"\"\ndistributional_nbd.py\n------\n\nDistributional Non-backtracking Spectral Distance.\n\n\"\"\"\n\nimport numpy as np\nimport networkx as nx\nimport scipy.sparse as sp\nfrom scipy.spatial.distance import euclidean, chebyshev\nfrom ..utilities.graph import unweighted\n\nfrom .base import BaseDistance\n\n\nclass DistributionalNBD(BaseDistance):\n \"\"\"\n Distributional Non-backtracking Spectral Distance.\n\n Computes the distance between two graphs using the empirical spectral density\n of the non-backtracking operator.\n\n See:\n \"Graph Comparison via the Non-backtracking Spectrum\"\n A. Mellor & A. Grusovin\n arXiv:1812.05457 / 10.1103/PhysRevE.99.052309\n\n \"\"\"\n\n VECTOR_DISTANCES = {'euclidean': euclidean, 'chebyshev': chebyshev}\n\n @unweighted\n def dist(\n self,\n G1,\n G2,\n sparse=False,\n shave=True,\n keep_evals=True,\n k=None,\n vector_distance='euclidean',\n **kwargs\n ):\n \"\"\"\n Distributional Non-backtracking Spectral Distance.\n\n Parameters\n ----------\n\n G1, G2 (nx.Graph)\n The two graphs to compare.\n\n sparse (bool)\n If sparse, matrices and eigenvalues found using sparse methods.\n If sparse, parameter 'k' should also be specified.\n Default: False\n\n k (int)\n The number of largest eigenvalues to be calculated for the\n spectral density.\n\n vector_distance (str)\n The distance measure used to compare two empirical distributions.\n Currently available are 'euclidean' and 'chebyshev', implemented\n using SciPy.\n Default: 'euclidean'\n\n keep_evals (bool)\n If True, stores the eigenvalues of the reduced non-backtracking\n matrix in self.results['evals']\n Default: False\n\n\n Returns\n -------\n float\n The distance between `G1` and `G2`\n\n \"\"\"\n B1 = reduced_hashimoto(G1, shave=shave, sparse=sparse, **kwargs)\n B2 = reduced_hashimoto(G2, shave=shave, sparse=sparse, **kwargs)\n\n # Find spectrum\n evals1 = nb_eigenvalues(B1, k=k)\n evals2 = nb_eigenvalues(B2, k=k)\n\n # Save spectrum\n if keep_evals:\n self.results['eigenvalues'] = (evals1, evals2)\n\n # Find rescaled spectral density\n distribution_1 = spectral_distribution(evals1)\n distribution_2 = spectral_distribution(evals2)\n\n # Compute distance\n distance_metric = self.__class__.VECTOR_DISTANCES[vector_distance]\n\n return distance_metric(distribution_1, distribution_2)\n\n\ndef shave_graph(graph):\n \"\"\"\n Returns the two-core of a graph.\n\n Iteratively remove the nodes of degree 0 or 1, until all nodes have\n degree at least 2.\n\n NOTE: duplicated from \"nbd.py\" to avoid excessive imports.\n\n \"\"\"\n core = graph.copy()\n while True:\n to_remove = [node for node, neighbors in core.adj.items() if len(neighbors) < 2]\n core.remove_nodes_from(to_remove)\n if len(to_remove) == 0:\n break\n return core\n\n\ndef pseudo_hashimoto(graph):\n \"\"\"\n Return the pseudo-Hashimoto matrix.\n\n The pseudo Hashimoto matrix of a graph is the block matrix defined as\n B' = [0 D-I]\n [-I A ]\n\n Where D is the degree-diagonal matrix, I is the identity matrix and A\n is the adjacency matrix. The eigenvalues of B' are always eigenvalues\n of B, the non-backtracking or Hashimoto matrix.\n\n Parameters\n ----------\n\n graph (nx.Graph): A NetworkX graph object.\n\n Returns\n -------\n\n A sparse matrix in csr format.\n\n NOTE: duplicated from \"nbd.py\" to avoid excessive imports.\n\n \"\"\"\n # Note: the rows of nx.adjacency_matrix(graph) are in the same order as\n # the list returned by graph.nodes().\n degrees = graph.degree()\n degrees = sp.diags([degrees[n] for n in graph.nodes()])\n adj = nx.adjacency_matrix(graph)\n ident = sp.eye(graph.order())\n pseudo = sp.bmat([[None, degrees - ident], [-ident, adj]])\n return pseudo.asformat('csr')\n\n\ndef reduced_hashimoto(graph, shave=True, sparse=True):\n \"\"\"\n\n\n Parameters\n ----------\n\n shave (bool)\n If True, first reduce the graph to its two-core.\n Else graph processed in its entirety.\n\n sparse (bool)\n If True, returned matrix will be sparse,\n else it will be dense.\n\n Returns\n -------\n\n np.ndarray/sp.csr_matrix\n The reduced Hashimoto Matrix.\n\n \"\"\"\n\n if shave:\n graph = shave_graph(graph)\n if len(graph) == 0:\n # We can provide a workaround for this case, however it is best\n # that it is brought to the attention of the user.\n raise NotImplementedError(\n \"Graph two-core is empty: non-backtracking methods unsuitable.\"\n )\n\n B = pseudo_hashimoto(graph)\n\n if not sparse:\n B = B.todense()\n\n return B\n\n\ndef nb_eigenvalues(B, k=None, **kwargs):\n \"\"\"\n Calculates the eigenvalues of a matrix B.\n\n Detects whether B is sparse/dense and uses the appropriate method.\n If B is sparse then parameter 'k' should be provided.\n \"\"\"\n\n if isinstance(B, np.ndarray):\n return np.linalg.eigvals(B)\n\n elif isinstance(B, sp.csr_matrix):\n random_state = np.random.RandomState(\n 1\n ) # Ensures that eigenvalue calculation is deterministic.\n return sp.linalg.eigs(\n B, k=k, v0=random_state.random(B.shape[0]), return_eigenvectors=False\n )\n else:\n raise Exception(\"Matrix must be of type np.ndarray or scipy.sparse.csr\")\n\n\ndef logr(r, rmax):\n \"\"\"\n Logarithm to the base r.\n\n NOTE:Maps zero to zero as a special case.\n \"\"\"\n\n if r == 0:\n return 0\n return np.log(r) / np.log(rmax)\n\n\ndef spectral_distribution(points, cumulative=True):\n \"\"\"\n Returns the distribution of complex values (in r,theta-space).\n \"\"\"\n\n points = np.array([(np.abs(z), np.angle(z)) for z in points])\n r, theta = np.split(points, 2, axis=1)\n\n r = np.array([logr(x, r.max()) for x in r])\n\n Z, R, THETA = np.histogram2d(\n x=r[:, 0],\n y=theta[:, 0],\n bins=(np.linspace(0, 1, 101), np.linspace(0, np.pi, 101)),\n )\n\n if cumulative:\n Z = Z.cumsum(axis=0).cumsum(axis=1)\n Z = Z / Z.max()\n\n return Z.flatten()\n", "\"\"\"\nbranching_process.py\n--------------------\n\nAdapted from:\nLevina, Anna, and Viola Priesemann. \"Subsampling scaling.\"\nNature communications 8 (2017): 15140.\nat [this link](https://www.nature.com/articles/ncomms15140)\n\nauthor: Brennan Klein\nemail: brennanjamesklein at gmail dot com\nsubmitted as part of the 2019 NetSI Collabathon\n\"\"\"\n\nfrom .base import BaseDynamics\nimport networkx as nx\nimport numpy as np\n\n\nclass BranchingModel(BaseDynamics):\n \"\"\"A sand-pile-like branching process.\"\"\"\n\n def simulate(\n self,\n G,\n L,\n initial_fraction=0.1,\n m=0.9975,\n target_Ahat=0.2,\n distribution_type='unif',\n scale=0.95,\n noise=True,\n ):\n r\"\"\"Simulate a (sand-pile-like) branching process dynamics .\n\n The results dictionary also stores the ground truth network as\n `'ground_truth'`.\n\n Parameters\n ----------\n\n G (nx.Graph)\n directed or undirected ground truth graph\n\n L (int)\n desired length of time series\n\n initial_fraction (float)\n fraction of nodes that start as active\n\n m (float)\n branching ratio of the dynamical process. :math:`m=1.0` means\n the system will be at criticality\n\n target_Ahat (float)\n desired average activity. This will ensure the process does not\n reach a stationary state and will always have some external\n drive.\n\n num_edges (int)\n the length of the cache, which should correspond to the\n combination of all possible activity over the simulation.\n\n distribution_type (str)\n string describing which type of random numbers\n\n scale (float)\n scale for how likely nodes are to topple\n\n noise (bool)\n add nonzero values to the time series\n\n Returns\n -------\n TS (np.ndarray)\n an :math:`N \\times L` time series\n\n References\n ----------\n\n .. [1] Levina, Anna, and Viola Priesemann. \"Subsampling scaling.\"\n Nature communications 8 (2017) 15140.\n https://www.nature.com/articles/ncomms15140\n\n \"\"\"\n N = G.number_of_nodes() # number of nodes\n M = G.number_of_edges() # number of edges\n A = nx.to_numpy_array(G) # adjacency matrix\n W = np.zeros(A.shape) # transition probability matrix (for weights)\n for i in range(A.shape[0]):\n if A[i].sum() > 0:\n W[i] = A[i] / A[i].sum()\n Gw = nx.from_numpy_array(W)\n G = nx.to_directed(Gw) # convert back into a graph object\n\n TS = initialize_history(N, L, initial_fraction, m, target_Ahat, noise)\n\n # because there's noise added, dont want to get false positives\n new_activity_times = np.nonzero(np.round(TS[:, 1:].sum(axis=0), 3))[0]\n\n # store\n cache = initialize_threshold_cache(M * L, distribution_type, scale)\n\n # now run dynamics\n\n for t in range(L - 1):\n if t not in list(new_activity_times):\n current_state = TS[:, t]\n\n # because there's noise added, dont want to get false positives\n active_nodes = list(np.nonzero(np.round(current_state, 3))[0])\n active_edges = G.out_edges(nbunch=active_nodes, data=True)\n\n if len(active_edges) != 0:\n current_targets = list(list(zip(*active_edges))[1])\n weights_array = np.array([j[2]['weight'] for j in active_edges])\n\n if len(cache) <= len(weights_array):\n cache = initialize_threshold_cache(\n M * L, distribution_type, scale\n )\n\n # find edges with edges that will exceed the weights cache\n # and thus will successfully propagate the information\n over_the_threshold = weights_array > cache[: len(weights_array)]\n cache = cache[(len(weights_array) + 1) :] # update the cache\n\n next_active_units = np.unique(\n np.array(current_targets)[over_the_threshold]\n )\n TS[next_active_units, t + 1] = 1\n\n # save the ground-truth network to results\n self.results['ground_truth'] = G\n # save the timeseries data to results\n self.results['TS'] = TS\n\n return TS\n\n\ndef initialize_history(N, L, initial_fraction, m, target_Ahat, noise):\n \"\"\"\n Initializes the TS of this simulation based on a configuration of\n parameters corresponding to the initial_fraction of active nodes,\n the branching ratio, m, and the target number of avalanches.\n\n Parameters\n ----------\n N (int): number of nodes\n L (int): desired length of time series\n initial_fraction (float): fraction of nodes that start as active\n m (float): branching ratio of the branching process\n target_Ahat (float): desired average activity. This will ensure the\n process does not reach a stationary state and\n will always have some external drive.\n noise (bool): add nonzero values to the time series\n\n\n Returns\n -------\n TS_init (np.ndarray): an N x L time series with nonzero entries in\n the first column\n\n \"\"\"\n\n TS_init = np.zeros((N, L))\n num_init = np.round(initial_fraction * N).astype('int')\n\n TS_init[np.random.permutation(N)[0:num_init], 0] = 1\n\n # maybe also here initialize TS_init with external drives?\n if m != 1.0:\n if N > 1000:\n N_nodes = N\n else:\n N_nodes = 1000\n h_vals = np.random.poisson(target_Ahat * N_nodes * np.abs(1 - m), L)\n else:\n if N > 100:\n N_nodes = N\n else:\n N_nodes = 100\n h_vals = np.random.poisson(0.01, L)\n # h_vals = np.random.poisson(target_Ahat*N_nodes * 0.01, L)\n\n sum_h_vals = sum(h_vals)\n external_drive_timestamps = sorted(list(np.nonzero(h_vals)[0]))\n external_drive_activenodes = list(np.random.choice(N, sum_h_vals))\n\n for timestamp in external_drive_timestamps:\n num_pops = h_vals[timestamp]\n active_nodes = [external_drive_activenodes.pop() for i in range(num_pops)]\n TS_init[active_nodes, timestamp] = 1 # or maybe equals 1?\n\n if noise:\n TS_init += np.random.uniform(-np.exp(-12), np.exp(-12), TS_init.shape)\n\n return TS_init\n\n\ndef initialize_threshold_cache(num_edges, distribution_type='unif', scale=1.0):\n \"\"\"\n A cache of random numbers. This is useful for speed, as calling the numpy\n random number generator can get costly with large networks and time series.\n\n Parameters\n ----------\n num_edges (int): the length of the cache, which should correspond to the\n combination of all possible activity over the simulation.\n distribution_type (str): string that describes which type of random numbers.\n scale (float): scale for how likely nodes are to topple\n\n Returns\n -------\n edges (np.ndarray): a vector of probability thresholds, above which the node\n will topple and send information to the following node.\n\n \"\"\"\n\n if distribution_type == 'unif':\n edges = scale * np.random.rand(num_edges)\n return edges\n\n elif distribution_type == 'normal':\n edges = scale * np.random.randn(num_edges)\n return edges\n" ]
[ [ "numpy.log", "numpy.linalg.eigvals", "numpy.split", "numpy.abs", "numpy.linspace", "numpy.angle", "scipy.sparse.bmat", "numpy.random.RandomState" ], [ "numpy.abs", "numpy.nonzero", "numpy.random.choice", "numpy.round", "numpy.random.poisson", "numpy.random.permutation", "numpy.random.randn", "numpy.random.rand", "numpy.array", "numpy.exp", "numpy.zeros" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [ "1.7", "1.0", "0.10", "1.2", "0.14", "0.19", "1.5", "0.12", "0.17", "0.13", "1.6", "1.4", "1.9", "1.3", "1.10", "0.15", "0.18", "0.16", "1.8" ], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
katherine-atwell/DiscourseSenser
[ "73008c9437a0466005be5f6d130050eece4069a6" ]
[ "dsenser/judge/judge.py" ]
[ "#!/usr/bin/env python\n# -*- coding: utf-8; mode: python; -*-\n\n\"\"\"Module providing class for judges classification.\n\nAttributes:\nJudge (class):\n class for merging judgments of different classifiers\n\n\"\"\"\n\n##################################################################\n# Imports\nfrom __future__ import absolute_import, print_function\n\n# from dsenser.judge.base import BaseJudge\nfrom dsenser.utils import is_explicit, timeit\n\nimport numpy as np\n\n##################################################################\n# Classes\n# class ExplicitJudge(BaseJudge):\n\n# @timeit(\"Training explicit judge...\")\n# def train(self, *args, **kwargs):\n# super(ExplicitJudge, self).train(*args, **kwargs)\n\n\n# class ImplicitJudge(BaseJudge):\n\n# @timeit(\"Training implicit judge...\")\n# def train(self, *args, **kwargs):\n# super(ImplicitJudge, self).train(*args, **kwargs)\n\n\nclass Judge(object):\n \"\"\"Meta-classifier for re-estimating decisions.\n\n Attrs:\n __init__(): class constructor\n train(): method for training meta-classifiers\n test(): method for joint predictions\n explicit: meta-classifier for explicit relations\n implicit: meta-classifier for implicit relations\n\n \"\"\"\n\n def __init__(self, a_n_x, a_n_y):\n \"\"\"Class constructor.\n\n Args:\n a_n_x (int):\n number of underlying cassifiers\n a_n_y (int):\n number of classes to predict\n\n\n \"\"\"\n pass\n # self.explicit = ImplicitJudge(a_n_x, a_n_y)\n # self.implicit = ExplicitJudge(a_n_x, a_n_y)\n\n def train(self, a_train, a_dev=()):\n \"\"\"Method for training the model.\n\n Args:\n a_train (list(3-tuple(x, rel, y))):\n list of training instances\n a_dev (2-tuple(dict, dict) or None):\n list of development instances\n\n Returns:\n (void)\n\n \"\"\"\n return\n # # divide training set into explicit and implicit relations\n # exp_train, imp_train = self._divide_data(a_train)\n # exp_dev, imp_dev = self._divide_data(a_dev)\n # # train explicit judge\n # self.explicit.train(exp_train, exp_dev)\n # # train implicit judge\n # self.implicit.train(imp_train, imp_dev)\n\n def predict(self, a_rel, a_x):\n \"\"\"Method for predicting sense of single relation.\n\n Args:\n a_rel (dict):\n input relation to classify\n a_x (np.array):\n (submodels x class) array of input predictions\n\n Returns:\n str:\n most probable sense of discourse relation\n\n \"\"\"\n ret = np.mean(a_x, axis=0)\n return (np.argmax(ret), ret)\n if is_explicit(a_rel):\n return self.explicit.predict(a_x)\n return self.implicit.predict(a_x)\n\n def _free(self):\n \"\"\"Free resources used by the model.\n\n Args:\n (void):\n\n Returns:\n (void):\n\n \"\"\"\n del self.explicit\n del self.implicit\n\n def _divide_data(self, a_data):\n \"\"\"Separate dataset into explicit and implicit instances.\n\n Args:\n a_data (2-tuple(dict, dict)):\n list of gold relations and dict with parses\n\n Returns:\n (2-tuple(list, list)):\n lists of explicit and implicit training instances\n\n \"\"\"\n if a_data is None:\n return ((), ())\n explicit_instances = []\n implicit_instances = []\n for x_i, irel, y_i in a_data:\n if is_explicit(irel):\n explicit_instances.append((x_i, y_i))\n else:\n implicit_instances.append((x_i, y_i))\n return (explicit_instances, implicit_instances)\n" ]
[ [ "numpy.argmax", "numpy.mean" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
adowaconan/variational_autoencoder_spindles
[ "0410fe86372ed50c5d136e7bbb13bbdf4dc4cc7b" ]
[ "old stuff/activiationFuns.py" ]
[ "\"\"\"Activations for TensorFlow.\nParag K. Mital, Jan 2016.\"\"\"\nimport tensorflow as tf\n\n\ndef lrelu(x, leak=0.2, name=\"lrelu\"):\n \"\"\"Leaky rectifier.\n\n Parameters\n ----------\n x : Tensor\n The tensor to apply the nonlinearity to.\n leak : float, optional\n Leakage parameter.\n name : str, optional\n Variable scope to use.\n\n Returns\n -------\n x : Tensor\n Output of the nonlinearity.\n \"\"\"\n with tf.variable_scope(name):\n f1 = 0.5 * (1 + leak)\n f2 = 0.5 * (1 - leak)\n return f1 * x + f2 * abs(x)" ]
[ [ "tensorflow.variable_scope" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [ "1.10", "1.12", "1.4", "1.13", "1.5", "1.7", "0.12", "1.0", "1.2" ] } ]
jemiar/surgery-gesture-recog
[ "83b98c2ccd937c898eb731ccdf28c9248ce3df8d" ]
[ "transit_classification.py" ]
[ "import cv2\nimport os\nimport numpy as np\nimport tensorflow as tf\nfrom tensorflow import keras\nimport tensorflow.keras.layers as layers\nfrom data_generator import DataGenerator\n\nbase_directory = './'\n\n# function used to read video data and save normal or transit blocks to folder\ndef save_data(fromarray=transcriptions, height=240, width=320, folder='data_001/', idnumber=1):\n # array to store ids of normal blocks\n normals = []\n # array to store ids of transit blocks\n transits = []\n # dictionary to store target y value (class) of each block\n labels = {}\n os.chdir(base_directory + 'Suturing/video/')\n # for each element in fromarray (store video file names)\n for arr in fromarray:\n # use CV2 to capture the video file\n cap = cv2.VideoCapture(arr['file'][:-4] + '_capture1.avi')\n i = 1\n # Initialize numpy array to store frames of Red, Green, Blue channels of video\n red_frames = np.empty((0, height, width))\n green_frames = np.empty((0, height, width))\n blue_frames = np.empty((0, height, width))\n # while reading the capture, only store 1 frame for every 3 frames\n while cap.isOpened():\n ret, frame = cap.read()\n if ret == False:\n break\n if i%3 == 1:\n # Resize the frame to reduce the computation during training\n frame = cv2.resize(frame, (width, height), interpolation=cv2.INTER_AREA)\n # Cast the frame as a numpy array\n f = np.asarray(frame)\n # Update the color of frame from BGR to RGB\n f = cv2.cvtColor(f, cv2.COLOR_BGR2RGB)\n # Apprend frame to its appropriate channel\n red_frames = np.append(red_frames, np.expand_dims(f[:,:,0], axis=0), axis=0)\n green_frames = np.append(green_frames, np.expand_dims(f[:,:,1], axis=0), axis=0)\n blue_frames = np.append(blue_frames, np.expand_dims(f[:,:,2], axis=0), axis=0)\n i += 1\n # Release the capture when finishing reading\n cap.release()\n # Normalize the value of each element to range [0, 1]\n red_frames = red_frames / 255.0\n green_frames = green_frames / 255.0\n blue_frames = blue_frames / 255.0\n\n # For each transciption (transcribe where each gesture starts and ends)\n for k, t in enumerate(arr['transcription']):\n # Save the normal block\n # Calculate the left most frame of 1 gesture\n left = (t[0] + 1) // 3\n # Calculate the right most frame of 1 gesture\n right = (t[1] - 1) // 3\n # Calculate the number of normal blocks in a gesture\n num_blocks = (right - left + 1) // 10\n for index in range(num_blocks):\n # Each block has shape (10, height, width, 3)\n block = np.expand_dims(red_frames[left+index*10:left+(index+1)*10,:,:], axis=3)\n block = np.append(block, np.expand_dims(green_frames[left+index*10:left+(index+1)*10,:,:], axis=3), axis=3)\n block = np.append(block, np.expand_dims(blue_frames[left+index*10:left+(index+1)*10,:,:], axis=3), axis=3)\n # Store normal block\n npy_name = 'id_' + str(idnumber)\n temp_obj = {'id': npy_name, 'file': arr['file'], 'label': 0}\n normals.append(temp_obj)\n labels[npy_name] = 0\n np.save(base_directory + folder + npy_name + '.npy', block)\n idnumber += 1\n\n # Save transit blocks\n if k < (len(arr['transcription']) - 1):\n # Each transit block has the last 5 frames of 1 gesture and the 1st 5 frames of the next gesture\n # Calculate the left most frame of a transit block\n ind = (t[1] - 1) // 3 - 4\n block = np.expand_dims(red_frames[ind:ind+10,:,:], axis=3)\n block = np.append(block, np.expand_dims(green_frames[ind:ind+10,:,:], axis=3), axis=3)\n block = np.append(block, np.expand_dims(blue_frames[ind:ind+10,:,:], axis=3), axis=3)\n # Store transit block\n npy_name = 'id_' + str(idnumber)\n temp_obj = {'id': npy_name, 'file': arr['file'], 'label': 1}\n transits.append(temp_obj)\n labels[npy_name] = 1\n np.save(base_directory + folder + npy_name + '.npy', block)\n idnumber += 1\n\n return normals, transits, labels\n\n# function to create 3D CNN model to classify normal and transit blocks\ndef create_model(height=240, width=320):\n # shape of input: 1 block has 10 frames x height x width x 3 channels (RGB)\n input = tf.keras.Input((10, height, width, 3))\n\n # 1st Conv3D block includes Conv3D with 8 filters, MaxPool3D and BatchNormalization\n x = layers.Conv3D(filters=8, kernel_size=(3,3,3), activation='relu')(input)\n x = layers.MaxPool3D(pool_size=(2,2,2))(x)\n x = layers.BatchNormalization()(x)\n\n # 2nd Conv3D block includes Conv3D with 16 filters, MaxPool3D and BatchNormalization\n x = layers.Conv3D(filters=16, kernel_size=(3,3,3), activation='relu')(x)\n x = layers.MaxPool3D(pool_size=(2,2,2))(x)\n x = layers.BatchNormalization()(x)\n\n # 3rd Conv3D block includes Conv3D with 32 filters, MaxPool3D and BatchNormalization\n x = layers.Conv3D(filters=32, kernel_size=(3,3,3), activation='relu')(input)\n x = layers.MaxPool3D(pool_size=(1,2,2))(x)\n x = layers.BatchNormalization()(x)\n\n # Fully-connected block includes GlobalAveragePooling3D, Fully-Connected layer with 512 units and DropOut for Regularization\n x = layers.GlobalAveragePooling3D()(x)\n x = layers.Dense(units=512, activation='relu')(x)\n x = layers.DropOut(0.7)(x)\n\n # output shape (1,) produces value between [0, 1]\n output = layers.Dense(units=1, activation='sigmoid')(x)\n\n model = tf.keras.Model(input, output, name='3DCNN')\n return model\n\n# Create model\nmodel = create_model(240, 320)\n\n# Create data generator for training and validation\nparams = {\n 'dim': (10, 240, 320),\n 'batch_size': 16,\n 'n_classes': 2,\n 'n_channels': 3,\n 'folder': 'data_001/',\n 'shuffle': True\n}\ntrain_generator = DataGenerator(training_ids, labels, **params)\nval_generator = DataGenerator(validation_ids, labels, **params)\n\nlearning_rate = 0.001\nmetrics = [keras.metrics.Accuracy(), keras.metrics.Precision(), keras.metrics.Recall()]\n# Compile model, using binary cross-entropy for loss\nmodel.compile(loss='binary_crossentropy', optimizer=keras.optimizers.Adam(learning_rate=learning_rate), metrics=metrics)\n# Train model in 100 epochs\nmodel.fit_generator(generator=train_generator, validation_data=val_generator, epochs=100, shuffle=True)\n" ]
[ [ "numpy.expand_dims", "tensorflow.keras.Input", "tensorflow.keras.layers.MaxPool3D", "numpy.asarray", "tensorflow.keras.layers.GlobalAveragePooling3D", "tensorflow.keras.layers.Dense", "tensorflow.keras.layers.DropOut", "tensorflow.keras.layers.Conv3D", "tensorflow.keras.Model", "numpy.save", "tensorflow.keras.metrics.Accuracy", "tensorflow.keras.metrics.Precision", "tensorflow.keras.layers.BatchNormalization", "tensorflow.keras.optimizers.Adam", "tensorflow.keras.metrics.Recall", "numpy.empty" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [ "2.7", "2.6", "2.4", "2.3", "2.5", "2.2" ] } ]
Akshaykumarcp/ML-hyperparameter-optimization
[ "9348b5b497a9f545f2e0cfe58450fd837ba42cb2" ]
[ "0.3_search_based_CV/0.3.3.1_grid_search.py" ]
[ "\"\"\" Grid Search for Hyperparameters \"\"\"\r\n\r\nimport pandas as pd\r\nimport matplotlib.pyplot as plt\r\n# https://archive.ics.uci.edu/ml/datasets/Breast+Cancer+Wisconsin+(Diagnostic)\r\nfrom sklearn.datasets import load_breast_cancer\r\nfrom sklearn.ensemble import GradientBoostingClassifier\r\nfrom sklearn.metrics import roc_auc_score\r\nfrom sklearn.model_selection import (\r\n GridSearchCV,\r\n train_test_split,\r\n)\r\n\r\n# in short, classification problem, trying to predict whether the tumor is malignant or benign\r\n\r\n# load dataset\r\nbreast_cancer_X, breast_cancer_y = load_breast_cancer(return_X_y=True)\r\n\r\nX = pd.DataFrame(breast_cancer_X)\r\ny = pd.Series(breast_cancer_y).map({0:1, 1:0})\r\n\r\nX.head()\r\n\"\"\" \r\n 0 1 2 3 4 5 6 7 8 9 10 11 ... 18 19 20 21 22 23 24 25 26 27 28 29\r\n0 17.99 10.38 122.80 1001.0 0.11840 0.27760 0.3001 0.14710 0.2419 0.07871 1.0950 0.9053 ... 0.03003 0.006193 25.38 17.33 184.60 2019.0 0.1622 0.6656 0.7119 0.2654 0.4601 0.11890 \r\n1 20.57 17.77 132.90 1326.0 0.08474 0.07864 0.0869 0.07017 0.1812 0.05667 0.5435 0.7339 ... 0.01389 0.003532 24.99 23.41 158.80 1956.0 0.1238 0.1866 0.2416 0.1860 0.2750 0.08902 \r\n2 19.69 21.25 130.00 1203.0 0.10960 0.15990 0.1974 0.12790 0.2069 0.05999 0.7456 0.7869 ... 0.02250 0.004571 23.57 25.53 152.50 1709.0 0.1444 0.4245 0.4504 0.2430 0.3613 0.08758 \r\n3 11.42 20.38 77.58 386.1 0.14250 0.28390 0.2414 0.10520 0.2597 0.09744 0.4956 1.1560 ... 0.05963 0.009208 14.91 26.50 98.87 567.7 0.2098 0.8663 0.6869 0.2575 0.6638 0.17300 \r\n4 20.29 14.34 135.10 1297.0 0.10030 0.13280 0.1980 0.10430 0.1809 0.05883 0.7572 0.7813 ... 0.01756 0.005115 22.54 16.67 152.20 1575.0 0.1374 0.2050 0.4000 0.1625 0.2364 0.07678 \r\n\r\n[5 rows x 30 columns] \"\"\"\r\n\r\n# percentage of benign (0) and malign tumors (1)\r\n\r\ny.value_counts() / len(y)\r\n\"\"\" \r\n0 0.627417\r\n1 0.372583\r\ndtype: float64 \"\"\"\r\n# split dataset into a train and test set\r\n\r\nX_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.3, random_state=0)\r\n\r\nX_train.shape, X_test.shape\r\n# ((398, 30), (171, 30))\r\n\r\n\"\"\" Grid Search \"\"\"\r\n\r\n# Let's use Grid Search to find the best hyperparameters for a Gradient Boosting Classifier.\r\n\r\n# set up the model\r\ngbm = GradientBoostingClassifier(random_state=0)\r\n\r\n# determine the hyperparameter space\r\nparam_grid = dict(\r\n n_estimators=[10, 20, 50, 100],\r\n min_samples_split=[0.1, 0.3, 0.5],\r\n max_depth=[1,2,3,4,None],\r\n )\r\n\r\nprint('Number of hyperparam combinations: ', len(param_grid['n_estimators'])*len(param_grid['min_samples_split'])*len(param_grid['max_depth']))\r\n# Number of hyperparam combinations: 60\r\n\r\n# set up the search\r\nsearch = GridSearchCV(gbm, param_grid, scoring='roc_auc', cv=5, refit=True)\r\n\r\n# find best hyperparameters\r\nsearch.fit(X_train, y_train)\r\nGridSearchCV(cv=5, estimator=GradientBoostingClassifier(random_state=0),\r\n param_grid={'max_depth': [1, 2, 3, 4, None],\r\n 'min_samples_split': [0.1, 0.3, 0.5],\r\n 'n_estimators': [10, 20, 50, 100]},\r\n scoring='roc_auc')\r\n\r\n# the best hyperparameters are stored in an attribute\r\nsearch.best_params_\r\n# {'max_depth': 2, 'min_samples_split': 0.1, 'n_estimators': 100}\r\n\r\n# we also find the data for all models evaluated\r\nresults = pd.DataFrame(search.cv_results_)\r\n\r\nprint(results.shape)\r\n# (60, 16)\r\n\r\nresults.head()\r\n\"\"\" \r\n mean_fit_time std_fit_time mean_score_time std_score_time param_max_depth ... split3_test_score split4_test_score mean_test_score std_test_score rank_test_score\r\n0 0.010000 2.780415e-07 0.001600 0.000800 1 ... 0.983103 0.940136 0.964248 0.016026 58\r\n1 0.018000 1.784161e-07 0.001400 0.000490 1 ... 0.986897 0.970068 0.971193 0.011607 52\r\n2 0.042800 1.599980e-03 0.002000 0.000001 1 ... 0.993103 0.980272 0.983475 0.011647 33\r\n3 0.081746 3.862444e-04 0.002007 0.000013 1 ... 0.997241 0.983673 0.988783 0.009298 18\r\n4 0.010002 1.974129e-06 0.001792 0.000411 1 ... 0.983103 0.940136 0.964248 0.016026 58\r\n\r\n[5 rows x 16 columns] \"\"\"\r\n\r\n# we can order the different models based on their performance\r\nresults.sort_values(by='mean_test_score', ascending=False, inplace=True)\r\nresults.reset_index(drop=True, inplace=True)\r\nresults[[\r\n 'param_max_depth', 'param_min_samples_split', 'param_n_estimators',\r\n 'mean_test_score', 'std_test_score',\r\n]].head()\r\n\"\"\" \r\n param_max_depth param_min_samples_split param_n_estimators mean_test_score std_test_score\r\n0 2 0.1 100 0.992415 0.006426\r\n1 2 0.3 100 0.992013 0.006461\r\n2 3 0.5 100 0.991949 0.006547\r\n3 4 0.5 100 0.991620 0.007117\r\n4 2 0.5 100 0.991545 0.006363 \"\"\"\r\n\r\nresults[[\r\n 'param_max_depth', 'param_min_samples_split', 'param_n_estimators',\r\n 'mean_test_score', 'std_test_score',\r\n]].tail()\r\n\"\"\" \r\n param_max_depth param_min_samples_split param_n_estimators mean_test_score std_test_score\r\n55 2 0.3 10 0.971111 0.011933\r\n56 2 0.5 10 0.965857 0.009779\r\n57 1 0.3 10 0.964248 0.016026\r\n58 1 0.5 10 0.964248 0.016026\r\n59 1 0.1 10 0.964248 0.016026 \"\"\"\r\n\r\n# plot model performance and error\r\nresults['mean_test_score'].plot(yerr=[results['std_test_score'], results['std_test_score']], subplots=True)\r\nplt.ylabel('Mean test score')\r\nplt.xlabel('Hyperparameter combinations')\r\nplt.show()\r\n\r\nX_train_preds = search.predict_proba(X_train)[:,1]\r\nX_test_preds = search.predict_proba(X_test)[:,1]\r\n\r\nprint('Train roc_auc: ', roc_auc_score(y_train, X_train_preds))\r\nprint('Test roc_auc: ', roc_auc_score(y_test, X_test_preds))\r\n\"\"\" \r\nTrain roc_auc: 1.0\r\nTest roc_auc: 0.996766607877719 \"\"\"\r\n\r\n# let's make a function to evaluate the model performance based on\r\n# single hyperparameters\r\n\r\ndef summarize_by_param(hparam):\r\n \r\n tmp = pd.concat([\r\n results.groupby(hparam)['mean_test_score'].mean(),\r\n results.groupby(hparam)['mean_test_score'].std(),\r\n ], axis=1)\r\n\r\n tmp.columns = ['mean_test_score', 'std_test_score']\r\n \r\n return tmp\r\n\r\n# performance change for n_estimators\r\ntmp = summarize_by_param('param_n_estimators')\r\n\r\ntmp.head()\r\n\"\"\" \r\n mean_test_score std_test_score\r\nparam_n_estimators\r\n10 0.973527 0.006351\r\n20 0.980913 0.005263\r\n50 0.987444 0.002510\r\n100 0.990123 0.002077 \"\"\"\r\n\r\ntmp['mean_test_score'].plot(yerr=[tmp['std_test_score'], tmp['std_test_score']], subplots=True)\r\nplt.ylabel('roc-auc')\r\nplt.show()\r\n\r\n# The optimal hyperparameter seems to be somewhere between 60 and 100.\r\n\r\ntmp = summarize_by_param('param_max_depth')\r\ntmp['mean_test_score'].plot(yerr=[tmp['std_test_score'], tmp['std_test_score']], subplots=True)\r\nplt.ylabel('roc-auc')\r\nplt.show()\r\n\r\n# The optimal hyperparameter seems to be between 2 and 3.\r\n\r\ntmp = summarize_by_param('param_min_samples_split')\r\ntmp['mean_test_score'].plot(yerr=[tmp['std_test_score'], tmp['std_test_score']], subplots=True)\r\n# array([<AxesSubplot:xlabel='param_min_samples_split'>], dtype=object)\r\nplt.show()\r\n\r\n# This parameter does not seem to improve performance much.\r\n\r\n# determine the hyperparameter space\r\nparam_grid = dict(\r\n n_estimators=[60, 80, 100, 120],\r\n max_depth=[2,3],\r\n loss = ['deviance', 'exponential'],\r\n )\r\n\r\n# set up the search\r\nsearch = GridSearchCV(gbm, param_grid, scoring='roc_auc', cv=5, refit=True)\r\n\r\n# find best hyperparameters\r\nsearch.fit(X_train, y_train)\r\nGridSearchCV(cv=5, estimator=GradientBoostingClassifier(random_state=0),\r\n param_grid={'loss': ['deviance', 'exponential'],\r\n 'max_depth': [2, 3],\r\n 'n_estimators': [60, 80, 100, 120]},\r\n scoring='roc_auc')\r\n# the best hyperparameters are stored in an attribute\r\n\r\nsearch.best_params_\r\n# {'loss': 'exponential', 'max_depth': 2, 'n_estimators': 120}\r\nresults = pd.DataFrame(search.cv_results_)\r\nresults.sort_values(by='mean_test_score', ascending=False, inplace=True)\r\nresults.reset_index(drop=True, inplace=True)\r\nresults[[\r\n 'param_max_depth', 'param_loss', 'param_n_estimators',\r\n 'mean_test_score', 'std_test_score',\r\n]].head(8)\r\n\"\"\" \r\n param_max_depth param_loss param_n_estimators mean_test_score std_test_score\r\n0 2 exponential 120 0.993095 0.006174\r\n1 2 exponential 100 0.992828 0.006021\r\n2 2 exponential 80 0.992765 0.006340\r\n3 2 deviance 120 0.992556 0.006791\r\n4 2 deviance 100 0.992149 0.006904\r\n5 2 exponential 60 0.991210 0.007305\r\n6 2 deviance 60 0.991066 0.006638\r\n7 2 deviance 80 0.990797 0.006979 \"\"\"\r\n \r\nresults['mean_test_score'].plot(yerr=[results['std_test_score'], results['std_test_score']], subplots=True)\r\n\r\nplt.ylabel('Mean test score')\r\nplt.xlabel('Hyperparameter combinations')\r\nplt.show()\r\n\r\nX_train_preds = search.predict_proba(X_train)[:,1]\r\nX_test_preds = search.predict_proba(X_test)[:,1]\r\n\r\nprint('Train roc_auc: ', roc_auc_score(y_train, X_train_preds))\r\nprint('Test roc_auc: ', roc_auc_score(y_test, X_test_preds))\r\n\"\"\" \r\nTrain roc_auc: 0.9999999999999999\r\nTest roc_auc: 0.9973544973544973 \"\"\"" ]
[ [ "sklearn.metrics.roc_auc_score", "sklearn.model_selection.GridSearchCV", "sklearn.datasets.load_breast_cancer", "pandas.Series", "sklearn.model_selection.train_test_split", "pandas.DataFrame", "sklearn.ensemble.GradientBoostingClassifier", "matplotlib.pyplot.xlabel", "matplotlib.pyplot.show", "matplotlib.pyplot.ylabel" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [ "0.23", "0.21", "2.0", "1.4", "1.3", "0.19", "1.1", "1.5", "0.24", "0.20", "1.0", "0.25", "1.2" ], "scipy": [], "tensorflow": [] } ]
pk1601cs33/baseline
[ "3cbe990533dad15e67018b4e529d26845574fb67" ]
[ "python/baseline/tf/tfy.py" ]
[ "import tensorflow as tf\nimport numpy as np\nfrom tensorflow.python.layers import core as layers_core\nfrom baseline.utils import lookup_sentence, beam_multinomial\nimport os\n\n\ndef _find_files_by_type(model_file, filetype):\n \"\"\"Find all files by type, removing suffix\n\n we rely on the fact that vocab files end in .vocab.\n\n :return: the file names without the filetype.\n \"\"\"\n matching_files = []\n\n filetype_ending = \".\" + filetype\n basepath = get_basepath_or_cwd(model_file)\n\n for filename in os.listdir(basepath):\n if filename.endswith(filetype_ending):\n filename_without_ending = filename[:-len(filetype_ending)]\n matching_files.append(os.path.join(basepath, filename_without_ending))\n\n if not matching_files:\n raise ValueError(\"no vocab files found in directory %s. \\\nPlease specify the model as path-like. e.g. /data/model/model-name-1234\" % basepath)\n\n return matching_files\n\ndef get_basepath_or_cwd(model_file):\n \"\"\"\n inspects the model_file variable for a directory name.\n\n if no directory is found, returns current working dir.\n \"\"\"\n basepath = os.path.dirname(model_file)\n if not os.path.isdir(basepath):\n basepath = os.getcwd()\n\n return basepath\n\n\ndef get_vocab_file_suffixes(model_file):\n\n \"\"\"Because our operations assume knowledge of the model name, we\n only need to return the suffix appended onto the end of the model\n name in the file.\n\n we make the assumption that a suffix is denoted with a hyphen.\n\n e.g. a vocab file name = tagger-model-tf-30803-word.vocab\n would return ['word']\n\n :param model_file: the nonspecific path to the model. this could be\n /data/model/<model_name>. we need to remove the model name.\n :return:\n \"\"\"\n filenames = _find_files_by_type(model_file, 'vocab')\n # mmodel_file can be path like or a string for just the model name.\n name_parts = model_file.split('/')\n model_name = name_parts[-1]\n # the length of the name plus 1 for the hyphen separating the suffix.\n return [x.split('/')[-1][len(model_name)+1:] for x in filenames]\n\n\ndef optimizer(loss_fn, **kwargs):\n\n global_step = tf.Variable(0, trainable=False)\n clip = kwargs.get('clip', None)\n mom = kwargs.get('mom', 0.9)\n optim = kwargs.get('optim', 'sgd')\n eta = kwargs.get('eta', kwargs.get('lr', 0.01))\n decay_type = kwargs.get('decay_type', None)\n decay_fn = None\n\n if decay_type == 'piecewise':\n boundaries = kwargs.get('bounds', None)\n decay_values = kwargs.get('decay_values', None)\n decay_fn = lambda lr, global_step: tf.train.piecewise_constant(global_step, boundaries, decay_values)\n\n elif decay_type == 'staircase':\n at_step = int(kwargs.get('bounds', 16000))\n decay_rate = float(kwargs.get('decay_rate', 0.5))\n decay_fn = lambda lr, global_step: tf.train.exponential_decay(lr, global_step, at_step, decay_rate, staircase=True)\n\n elif decay_type == 'invtime':\n decay_rate = float(kwargs.get('decay_rate', 0.05))\n at_step = int(kwargs.get('bounds', 16000))\n decay_fn = lambda lr, global_step: tf.train.inverse_time_decay(lr, global_step, at_step, decay_rate, staircase=False)\n\n # warm restarts in master, not in 1.5 yet\n #elif decay_type == 'sgdr':\n # at_step = kwargs.get('bounds', 1000)\n # decay_fn = lambda lr, global_step: tf.train.cosine_decay_restarts(lr, global_step, first_decay_steps=at_step)\n\n elif decay_type == 'cosine':\n at_step = kwargs.get('bounds', 1000)\n decay_fn = lambda lr, global_step: tf.train.cosine_decay(lr, global_step, at_step)\n\n elif decay_type == 'lincos':\n at_step = kwargs.get('bounds', 1000)\n decay_fn = lambda lr, global_step: tf.train.linear_cosine_decay(lr, global_step, at_step)\n\n elif decay_type == 'zaremba':\n boundaries = kwargs.get('bounds', None)\n decay_rate = float(kwargs.get('decay_rate', None))\n values = [eta/(decay_rate**i) for i in range(len(boundaries)+1)]\n print('Learning rate schedule:')\n print('B', len(boundaries), boundaries)\n print('V', len(values), values)\n decay_fn = lambda lr, global_step: tf.train.piecewise_constant(global_step, boundaries, values)\n\n if optim == 'adadelta':\n print('adadelta', eta)\n optz = lambda lr: tf.train.AdadeltaOptimizer(lr, 0.95, 1e-6)\n elif optim == 'adam':\n print('adam', eta)\n optz = lambda lr: tf.train.AdamOptimizer(lr)\n elif mom > 0:\n print('sgd-mom', eta, mom)\n optz = lambda lr: tf.train.MomentumOptimizer(lr, mom)\n else:\n print('sgd')\n optz = lambda lr: tf.train.GradientDescentOptimizer(lr)\n\n print('clip', clip)\n print('decay', decay_fn)\n return global_step, tf.contrib.layers.optimize_loss(loss_fn, global_step, eta, optz,\n clip_gradients=clip, learning_rate_decay_fn=decay_fn)\n\n\ndef tensor2seq(tensor):\n return tf.unstack(tf.transpose(tensor, perm=[1, 0, 2]))\n\n\ndef seq2tensor(sequence):\n return tf.transpose(tf.stack(sequence), perm=[1, 0, 2])\n\n\ndef dense_layer(output_layer_depth):\n output_layer = layers_core.Dense(output_layer_depth, use_bias=False, dtype=tf.float32, name=\"dense\")\n return output_layer\n\n\ndef lstm_cell(hsz, forget_bias=1.0):\n return tf.contrib.rnn.BasicLSTMCell(hsz, forget_bias=forget_bias, state_is_tuple=True)\n\n\ndef lstm_cell_w_dropout(hsz, pkeep, forget_bias=1.0):\n return tf.contrib.rnn.DropoutWrapper(tf.contrib.rnn.BasicLSTMCell(hsz, forget_bias=forget_bias, state_is_tuple=True), output_keep_prob=pkeep)\n\n\ndef stacked_lstm(hsz, pkeep, nlayers):\n return tf.contrib.rnn.MultiRNNCell([lstm_cell_w_dropout(hsz, pkeep) if i < nlayers - 1 else lstm_cell(hsz) for i in range(nlayers)], state_is_tuple=True)\n\n\ndef stacked_cnn(inputs, hsz, pkeep, nlayers, activation_fn=tf.nn.relu, filts=[5]):\n with tf.variable_scope(\"StackedCNN\"):\n layers = []\n for filt in filts:\n layer = tf.nn.dropout(tf.layers.conv1d(inputs, hsz, filt, activation=activation_fn, padding=\"same\", reuse=False), pkeep)\n\n for i in range(1, nlayers):\n layer = layer + tf.nn.dropout(tf.layers.conv1d(inputs, hsz, filt, activation=activation_fn, padding=\"same\", reuse=False), pkeep)\n layers += [layer]\n\n return tf.concat(values=layers, axis=2)\n\n\ndef rnn_cell_w_dropout(hsz, pkeep, rnntype, st=None):\n if st is not None:\n cell = tf.contrib.rnn.BasicLSTMCell(hsz, state_is_tuple=st) if rnntype.endswith('lstm') else tf.contrib.rnn.GRUCell(hsz)\n else:\n cell = tf.contrib.rnn.LSTMCell(hsz) if rnntype.endswith('lstm') else tf.contrib.rnn.GRUCell(hsz)\n return tf.contrib.rnn.DropoutWrapper(cell, output_keep_prob=pkeep)\n\n\ndef multi_rnn_cell_w_dropout(hsz, pkeep, rnntype, num_layers):\n return tf.contrib.rnn.MultiRNNCell([rnn_cell_w_dropout(hsz, pkeep, rnntype) for _ in range(num_layers)], state_is_tuple=True)\n\n\n# This function should never be used for decoding. It exists only so that the training model can greedily decode\n# It is super slow and doesnt use maintain a beam of hypotheses\ndef show_examples_tf(model, es, rlut1, rlut2, embed2, mxlen, sample, prob_clip, max_examples, reverse):\n si = np.random.randint(0, len(es))\n\n batch_dict = es[si]\n src_array = batch_dict['src']\n tgt_array = batch_dict['dst']\n src_len = batch_dict['src_len']\n\n if max_examples > 0:\n max_examples = min(max_examples, src_array.shape[0])\n src_array = src_array[0:max_examples]\n tgt_array = tgt_array[0:max_examples]\n src_len = src_len[0:max_examples]\n\n GO = embed2.vocab['<GO>']\n EOS = embed2.vocab['<EOS>']\n\n for src_len_i, src_i, tgt_i in zip(src_len, src_array, tgt_array):\n\n print('========================================================================')\n\n sent = lookup_sentence(rlut1, src_i, reverse=reverse)\n print('[OP] %s' % sent)\n sent = lookup_sentence(rlut2, tgt_i)\n print('[Actual] %s' % sent)\n dst_i = np.zeros((1, mxlen))\n src_i = src_i[np.newaxis,:]\n src_len_i = np.array([src_len_i])\n next_value = GO\n for j in range(mxlen):\n dst_i[0, j] = next_value\n tgt_len_i = np.array([j+1])\n output = model.step({'src': src_i, 'src_len': src_len_i, 'dst': dst_i, 'dst_len': tgt_len_i})[j]\n if sample is False:\n next_value = np.argmax(output)\n else:\n # This is going to zero out low prob. events so they are not\n # sampled from\n next_value = beam_multinomial(prob_clip, output)\n\n if next_value == EOS:\n break\n\n sent = lookup_sentence(rlut2, dst_i.squeeze())\n print('Guess: %s' % sent)\n print('------------------------------------------------------------------------')\n\n\ndef skip_conns(inputs, wsz_all, n, activation_fn=tf.nn.relu):\n for i in range(n):\n with tf.variable_scope(\"skip-%d\" % i):\n W_p = tf.get_variable(\"W_p\", [wsz_all, wsz_all])\n b_p = tf.get_variable(\"B_p\", [1, wsz_all], initializer=tf.constant_initializer(0.0))\n proj = activation_fn(tf.matmul(inputs, W_p) + b_p, \"skip_activation\")\n\n inputs = inputs + proj\n return inputs\n\n\ndef highway_conns(inputs, wsz_all, n):\n for i in range(n):\n with tf.variable_scope(\"highway-%d\" % i):\n W_p = tf.get_variable(\"W_p\", [wsz_all, wsz_all])\n b_p = tf.get_variable(\"B_p\", [1, wsz_all], initializer=tf.constant_initializer(0.0))\n proj = tf.nn.relu(tf.matmul(inputs, W_p) + b_p, \"relu-proj\")\n\n W_t = tf.get_variable(\"W_t\", [wsz_all, wsz_all])\n b_t = tf.get_variable(\"B_t\", [1, wsz_all], initializer=tf.constant_initializer(-2.0))\n transform = tf.nn.sigmoid(tf.matmul(inputs, W_t) + b_t, \"sigmoid-transform\")\n\n inputs = tf.multiply(transform, proj) + tf.multiply(inputs, 1 - transform)\n return inputs\n\n\ndef parallel_conv(input_, filtsz, dsz, motsz, activation_fn=tf.nn.relu):\n \"\"\"Do parallel convolutions with multiple filter widths and max-over-time pooling.\n\n :param input_: The inputs in the shape [B, T, H].\n :param filtsz: The list of filter widths to use.\n :param dsz: The depths of the input (H).\n :param motsz: The number of conv filters to use (can be an int or a list to allow for various sized filters)\n\n :Keyword Arguments:\n * *activation_fn* -- (``callable``) The activation function to apply after the convolution and bias add\n \"\"\"\n if not isinstance(motsz, list):\n motsz = [motsz] * len(filtsz)\n DUMMY_AXIS = 1\n TIME_AXIS = 2\n FEATURE_AXIS = 3\n expanded = tf.expand_dims(input_, DUMMY_AXIS)\n mots = []\n for fsz, cmotsz in zip(filtsz, motsz):\n with tf.variable_scope('cmot-%s' % fsz):\n kernel_shape = [1, fsz, dsz, cmotsz]\n W = tf.get_variable('W', kernel_shape)\n b = tf.get_variable(\n 'b', [cmotsz],\n initializer=tf.constant_initializer(0.0)\n )\n conv = tf.nn.conv2d(\n expanded, W,\n strides=[1, 1, 1, 1],\n padding=\"SAME\", name=\"CONV\"\n )\n activation = activation_fn(tf.nn.bias_add(conv, b), 'activation')\n mot = tf.reduce_max(activation, [TIME_AXIS], keep_dims=True)\n mots.append(mot)\n motsz_all = sum(motsz)\n combine = tf.reshape(tf.concat(values=mots, axis=FEATURE_AXIS), [-1, motsz_all])\n return combine\n\n\ndef char_word_conv_embeddings(char_vec, filtsz, char_dsz, wsz, activation_fn=tf.nn.tanh):\n combine = parallel_conv(char_vec, filtsz, char_dsz, wsz, activation_fn)\n wsz_all = wsz * len(filtsz)\n joined = skip_conns(combine, wsz_all, 1)\n return joined\n\n\ndef tf_activation(name):\n if name == \"tanh\":\n return tf.nn.tanh\n if name == \"sigmoid\":\n return tf.nn.sigmoid\n return tf.nn.relu\n\n\ndef char_word_conv_embeddings_var_fm(char_vec, filtsz, char_dsz, nfeat_factor, max_feat=200, activation_fn=tf.nn.tanh):\n nfeats = [min(nfeat_factor * fsz, max_feat) for fsz in filtsz]\n wsz_all = sum(nfeats)\n combine = parallel_conv(char_vec, filtsz, char_dsz, nfeats, activation_fn)\n joined = highway_conns(combine, wsz_all, 2)\n return joined\n\n\ndef pool_chars(xch, Wch, ce0, char_dsz, **kwargs):\n \"\"\"Take in a tensor of characters (B x maxs x maxw) and do character convolution\n\n :param xch: TF tensor for input characters, (B x maxs x maxw)\n :param Wch: A character embeddings matrix\n :param ce0: A control dependency for the embeddings that keeps the <PAD> value 0\n :param char_dsz: The character embedding dsz\n :param kwargs:\n :return: The character compositional embedding and the number of hidden units as a tuple\n \"\"\"\n wsz = kwargs.get('wsz', 30)\n filtsz = kwargs.get('cfiltsz', [3])\n mxlen = int(kwargs.get('maxs', kwargs.get('mxlen', 100)))\n mxwlen = kwargs.get('maxw', kwargs.get('mxwlen', 40))\n activation_type = kwargs.get('activation', 'tanh')\n with tf.variable_scope(\"Chars2Word\"):\n with tf.control_dependencies([ce0]):\n char_bt_x_w = tf.reshape(xch, [-1, mxwlen])\n cembed = tf.nn.embedding_lookup(Wch, char_bt_x_w, name=\"embeddings\")\n cmot = char_word_conv_embeddings(cembed, filtsz, char_dsz, wsz,\n activation_fn=tf_activation(activation_type))\n word_char = tf.reshape(cmot, [-1, mxlen, len(filtsz) * wsz])\n\n return word_char, len(filtsz) * wsz\n\n\ndef shared_char_word(Wch, xch_i, filtsz, char_dsz, wsz, reuse):\n\n with tf.variable_scope(\"SharedCharWord\", reuse=reuse):\n cembed = tf.nn.embedding_lookup(Wch, xch_i)\n if len(filtsz) == 0 or filtsz[0] == 0:\n return tf.reduce_sum(cembed, [1])\n return char_word_conv_embeddings(cembed, filtsz, char_dsz, wsz)\n\n\ndef shared_char_word_var_fm(Wch, xch_i, filtsz, char_dsz, wsz, reuse):\n\n with tf.variable_scope(\"SharedCharWord\", reuse=reuse):\n cembed = tf.nn.embedding_lookup(Wch, xch_i)\n if len(filtsz) == 0 or filtsz[0] == 0:\n return tf.reduce_sum(cembed, [1])\n return char_word_conv_embeddings_var_fm(cembed, filtsz, char_dsz, wsz)\n" ]
[ [ "tensorflow.layers.conv1d", "tensorflow.get_variable", "tensorflow.concat", "tensorflow.contrib.rnn.GRUCell", "tensorflow.control_dependencies", "tensorflow.stack", "tensorflow.reduce_sum", "tensorflow.train.AdamOptimizer", "tensorflow.python.layers.core.Dense", "tensorflow.nn.conv2d", "tensorflow.train.cosine_decay", "tensorflow.Variable", "tensorflow.train.exponential_decay", "tensorflow.train.piecewise_constant", "numpy.argmax", "tensorflow.train.MomentumOptimizer", "numpy.zeros", "tensorflow.matmul", "tensorflow.train.AdadeltaOptimizer", "tensorflow.train.GradientDescentOptimizer", "numpy.array", "tensorflow.nn.embedding_lookup", "tensorflow.nn.bias_add", "tensorflow.train.linear_cosine_decay", "tensorflow.reduce_max", "tensorflow.multiply", "tensorflow.transpose", "tensorflow.contrib.rnn.DropoutWrapper", "tensorflow.contrib.rnn.BasicLSTMCell", "tensorflow.reshape", "tensorflow.expand_dims", "tensorflow.constant_initializer", "tensorflow.contrib.rnn.LSTMCell", "tensorflow.variable_scope", "tensorflow.train.inverse_time_decay", "tensorflow.contrib.layers.optimize_loss" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [ "1.10" ] } ]
luxrck/ML-and-DL-course-work
[ "81b691cb0b4f6f63b4178f05cf309271a420e0dc" ]
[ "base/bert.py" ]
[ "import torch\nfrom torch.autograd import Variable\nfrom torch import nn, optim\nimport torch.nn.functional as F\n\nfrom transformers import BertForSequenceClassification, BertModel\n\n\nclass BertClassification(nn.Module):\n def __init__(self, model=\"bert-base-uncased\"):\n super().__init__()\n self.bert = BertForSequenceClassification.from_pretrained(model)\n\n def forward(self, x, x_length=[]):\n output = self.bert(x)\n return output[0]\n\n\nclass BertEncoder(nn.Module):\n def __init__(self, out_dim, pretrained_model=\"bert-base-uncased\", dropout_p=0.1):\n super().__init__()\n\n self.out_dim = out_dim\n self.bert = BertModel.from_pretrained(pretrained_model)\n\n # for p in self.bert.parameters():\n # p.requires_grad = False\n\n self.out = nn.Sequential(\n nn.Dropout(p=dropout_p),\n nn.Linear(self.bert.config.hidden_size, out_dim))\n \n def forward(self, x, x_length=[]):\n x_input = x[:,0,:]\n x_attn = x[:,1,:]\n x, _ = self.bert(x_input, attention_mask=x_attn)\n # out_pooled = outputs[0][:,0]\n x = x[:, 0]\n x = self.out(x)\n return x\n \n\n\n" ]
[ [ "torch.nn.Linear", "torch.nn.Dropout" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
WYVERN2742/pl_curves
[ "4688b3132318e28e51c9a80e32e0452f4caf972e" ]
[ "tests/test_calculate_cumulative_prop_trf.py" ]
[ "#!/usr/bin/env python3\nfrom pl_curve import calculate_cumulative_relative_abundance\nfrom pl_curve import remove_cumulative_abundance_over_one\nfrom pl_curve import calculate_cumulative_prop_trf\nimport pandas\nimport numpy as np\n\n\ndef test_calculate_cumulative_prop_trf():\n\n data1 = np.array([['', 'Step I'],\n ['219', 0.239709],\n ['218', 0.190986]])\n\n data2 = np.array([['', 'Step II'],\n ['219', 0.434289],\n ['218', 0.193835]])\n\n # create a dataframe, each sub list is one row\n df1 = pandas.DataFrame(data=data1[1:, 1:], index=data1[1:, 0],\n columns=data1[0, 1:]).astype(np.dtype(np.float64))\n\n df2 = pandas.DataFrame(data=data2[1:, 1:], index=data2[1:, 0],\n columns=data2[0, 1:]).astype(np.dtype(np.float64))\n\n samples = [df1, df2]\n\n samples = calculate_cumulative_relative_abundance(samples)\n samples = remove_cumulative_abundance_over_one(samples)\n result = calculate_cumulative_prop_trf(samples)\n\n df1_res = result[0]\n df2_res = result[1]\n\n assert 'Cum Prop TRFs' in df1_res.columns\n assert 'Cum Prop TRFs' in df2_res.columns\n\n assert df1_res.loc['219', 'Cum Prop TRFs'] == 0.5\n assert df1_res.loc['218', 'Cum Prop TRFs'] == 1.0\n\n assert df2_res.loc['219', 'Cum Prop TRFs'] == 0.5\n assert df2_res.loc['218', 'Cum Prop TRFs'] == 1.0\n" ]
[ [ "numpy.array", "numpy.dtype", "pandas.DataFrame" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [ "0.23", "0.21", "2.0", "1.4", "0.19", "1.1", "1.5", "1.2", "0.24", "0.20", "1.0", "0.25", "1.3" ], "scipy": [], "tensorflow": [] } ]
vibhavk/DrishtiShakti
[ "55f5c4ae48575ae58b8e4a4dde1d85be3d27a1f1" ]
[ "video_demo.py" ]
[ "from __future__ import division\nimport time\nimport torch \nimport torch.nn as nn\nfrom torch.autograd import Variable\nimport numpy as np\nimport cv2 \nfrom util import *\nfrom darknet import Darknet\nfrom preprocess import prep_image, inp_to_image\nimport pandas as pd\nimport random \nimport pickle as pkl\nimport argparse\n\n\ndef get_test_input(input_dim, CUDA):\n img = cv2.imread(\"dog-cycle-car.png\")\n img = cv2.resize(img, (input_dim, input_dim)) \n img_ = img[:,:,::-1].transpose((2,0,1))\n img_ = img_[np.newaxis,:,:,:]/255.0\n img_ = torch.from_numpy(img_).float()\n img_ = Variable(img_)\n \n if CUDA:\n img_ = img_.cuda()\n \n return img_\n\ndef prep_image(img, inp_dim):\n \"\"\"\n Prepare image for inputting to the neural network. \n \n Returns a Variable \n \"\"\"\n\n orig_im = img\n dim = orig_im.shape[1], orig_im.shape[0]\n img = cv2.resize(orig_im, (inp_dim, inp_dim))\n img_ = img[:,:,::-1].transpose((2,0,1)).copy()\n img_ = torch.from_numpy(img_).float().div(255.0).unsqueeze(0)\n return img_, orig_im, dim\n\ndef write(x, img):\n c1 = tuple(x[1:3].int())\n c2 = tuple(x[3:5].int())\n cls = int(x[-1])\n label = \"{0}\".format(classes[cls])\n color = random.choice(colors)\n cv2.rectangle(img, c1, c2,color, 1)\n t_size = cv2.getTextSize(label, cv2.FONT_HERSHEY_PLAIN, 1 , 1)[0]\n c2 = c1[0] + t_size[0] + 3, c1[1] + t_size[1] + 4\n cv2.rectangle(img, c1, c2,color, -1)\n cv2.putText(img, label, (c1[0], c1[1] + t_size[1] + 4), cv2.FONT_HERSHEY_PLAIN, 1, [225,255,255], 1);\n return img\n\ndef arg_parse():\n \"\"\"\n Parse arguements to the detect module\n \n \"\"\"\n \n \n parser = argparse.ArgumentParser(description='YOLO v3 Video Detection Module')\n \n parser.add_argument(\"--video\", dest = 'video', help = \n \"Video to run detection upon\",\n default = \"video.avi\", type = str)\n parser.add_argument(\"--dataset\", dest = \"dataset\", help = \"Dataset on which the network has been trained\", default = \"pascal\")\n parser.add_argument(\"--confidence\", dest = \"confidence\", help = \"Object Confidence to filter predictions\", default = 0.5)\n parser.add_argument(\"--nms_thresh\", dest = \"nms_thresh\", help = \"NMS Threshhold\", default = 0.4)\n parser.add_argument(\"--cfg\", dest = 'cfgfile', help = \n \"Config file\",\n default = \"cfg/yolov3.cfg\", type = str)\n parser.add_argument(\"--weights\", dest = 'weightsfile', help = \n \"weightsfile\",\n default = \"yolov3.weights\", type = str)\n parser.add_argument(\"--reso\", dest = 'reso', help = \n \"Input resolution of the network. Increase to increase accuracy. Decrease to increase speed\",\n default = \"416\", type = str)\n return parser.parse_args()\n\n\nif __name__ == '__main__':\n args = arg_parse()\n confidence = float(args.confidence)\n nms_thesh = float(args.nms_thresh)\n start = 0\n\n CUDA = torch.cuda.is_available()\n\n num_classes = 80\n\n CUDA = torch.cuda.is_available()\n \n bbox_attrs = 5 + num_classes\n \n print(\"Loading network.....\")\n model = Darknet(args.cfgfile)\n model.load_weights(args.weightsfile)\n print(\"Network successfully loaded\")\n\n model.net_info[\"height\"] = args.reso\n inp_dim = int(model.net_info[\"height\"])\n assert inp_dim % 32 == 0 \n assert inp_dim > 32\n\n if CUDA:\n model.cuda()\n \n model(get_test_input(inp_dim, CUDA), CUDA)\n\n model.eval()\n \n videofile = args.video\n \n cap = cv2.VideoCapture(videofile)\n \n assert cap.isOpened(), 'Cannot capture source'\n \n frames = 0\n start = time.time() \n while cap.isOpened():\n \n ret, frame = cap.read()\n if ret:\n \n\n img, orig_im, dim = prep_image(frame, inp_dim)\n \n im_dim = torch.FloatTensor(dim).repeat(1,2) \n \n \n if CUDA:\n im_dim = im_dim.cuda()\n img = img.cuda()\n \n \n output = model(Variable(img, volatile = True), CUDA)\n output = write_results(output, confidence, num_classes, nms = True, nms_conf = nms_thesh)\n\n if type(output) == int:\n frames += 1\n print(\"FPS of the video is {:5.2f}\".format( frames / (time.time() - start)))\n cv2.imshow(\"frame\", orig_im)\n key = cv2.waitKey(1)\n if key & 0xFF == ord('q'):\n break\n continue\n \n \n\n \n \n output[:,1:5] = torch.clamp(output[:,1:5], 0.0, float(inp_dim))\n \n im_dim = im_dim.repeat(output.size(0), 1)/inp_dim\n output[:,1:5] *= im_dim\n \n classes = load_classes('data/coco.names')\n colors = pkl.load(open(\"pallete\", \"rb\"))\n \n list(map(lambda x: write(x, orig_im), output))\n \n \n cv2.imshow(\"frame\", orig_im)\n key = cv2.waitKey(1)\n if key & 0xFF == ord('q'):\n break\n frames += 1\n print(\"FPS of the video is {:5.2f}\".format( frames / (time.time() - start)))\n\n \n else:\n break\n \n\n \n \n\n" ]
[ [ "torch.FloatTensor", "torch.from_numpy", "torch.cuda.is_available", "torch.autograd.Variable" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
Sebashndz/Divisa
[ "928f634adffa09e3d14a3b8914ab716dc8e6aa92" ]
[ "Divisa_Functions.py" ]
[ "\n#Librerias para webservice_request(nit)\nfrom zeep import Client\nimport os\nimport codecs\n\n#Funciones para lectura de XML e\n#inserción del mismo en BD\n\nimport logging\nimport pyodbc\n\nimport pandas as pd\nimport numpy as np\n#xml\nfrom collections import OrderedDict\nimport xml.etree.ElementTree as ET\nimport collections\nfrom lxml import etree\n#Permite extraer la fecha para colombia\nfrom datetime import datetime\nfrom pytz import timezone\n#Sistema Operativo\nimport os\nimport glob\n\n\ndef validate_route():\n\t\"\"\" Valida las rutas de almacenamoento de los archivos \"\"\"\n\n\tDirectorio = os.getcwd() + \"\\\\\"\n\t#PathCarpetaConsultas = Directorio +\"Consultas\\\\\"\n\tPathCarpetaConsultas = \"/var/www/html/flask/Consultas/\"\n\t#PathCarpetaConsultas = \"C:\\\\Python_Flask\\\\envDivisa\\\\Consultas\\\\\"\n\t#PathCarpetaResultados = Directorio +\"Resultados\\\\\"\n\t#PathCarpetaResultados = \"C:\\\\Python_Flask\\\\envDivisa\\\\Resultados\\\\\"\n\n\tif not os.path.isdir(PathCarpetaConsultas):\n\t\t\tos.mkdir(PathCarpetaConsultas)\n \n\t#if not os.path.isdir(PathCarpetaResultados):\n\t#\t\tos.mkdir(PathCarpetaResultados)\n\treturn True\n\n\ndef webservice_request(nit):\n\t\"\"\" Realiza petición al web service de Informa Colombia \"\"\"\n\tvalidate_route()\n\tDirectorio = os.getcwd() + \"\\\\\"\n\t#PathCarpetaConsultas = Directorio +\"Consultas\\\\\"\n\tPathCarpetaConsultas = \"/var/www/html/flask/Consultas/\"\n\n\t#PathCarpetaConsultas = \"C:\\\\Python_Flask\\\\envDivisa\\\\Consultas\\\\\"\n\t\n\t#Directorio = os.getcwd() + \"\\\\\"\n\t#print(\"###### LA RUTA ES: ##### \"+Directorio)\n\n\t#PathCarpetaConsultas = Directorio +\"Consultas\\\\\"\n\n\n\twsdl = \"https://www.informacolombia.com/InformaIntWeb/services/ProductoXML?wsdl\"\n\tclient = Client(wsdl)\n\n\tstringxml = \"\"\"\n\t\t<PETICION_PRODUCTO>\n\t\t<IDENTIFICACION>\n\t\t<USUARIO>C16134</USUARIO>\n\t <PASSWORD>Infor48925</PASSWORD>\n\t\t</IDENTIFICACION>\n\t\t<USUARIO_ORIGEN>Equipo_BI</USUARIO_ORIGEN>\n\t\t<PRODUCTO>\n\t\t<NOMBRE>INFORME_FINANCIERO_INTERNACIONAL_XML</NOMBRE>\n\t\t<IDENTIFICADOR>{}</IDENTIFICADOR>\n\t\t<IDIOMA>01</IDIOMA>\n\t\t</PRODUCTO>\n\t\t<PROVINCIA>11</PROVINCIA>\n\t\t<LOCALIDAD>BOGOTA</LOCALIDAD>\n\t\t<CODPOSTAL>0511</CODPOSTAL>\n\t\t</PETICION_PRODUCTO>\"\"\".format(nit)\n\n\tresponse = client.service.obtenerProductoXMLUncoded(stringxml)\n\tprint(response)\n\n\tprint(\"copiando el archivo-----------\")\n\n\tif not os.path.exists(PathCarpetaConsultas):\n\t\t\tos.makedirs(PathCarpetaConsultas)\n\t \n\n\twith codecs.open(PathCarpetaConsultas + nit + \".xml\", 'w', encoding='latin-1') as f:\n\t\t\tf.write(response)\n\treturn True\n\n\ndef connectionDB():\n\t\"\"\" Conexión a la BD \"\"\"\n\tconn = pyodbc.connect(\n 'DRIVER=FreeTDS;SERVER=instancia-divisa.cdjlf4mo9nan.sa-east-1.rds.amazonaws.com;PORT=1433;DATABASE=Divisa_Copia_2019_11-14;UID=admin;PWD=admindivisa;')\n\tcursor = conn.cursor()\n\t\n\treturn (conn,cursor)\n\n################\n#Funciones Base #\n################\ndef Validar_Formato_Tabla(df, DicFormatoEntrada):\n\t\"\"\"Funcion que recibe un dataframe y un diccionario, \n\t compara el df contra el diccionario y lo acomoda a ese formato, \n\t si algun campo del df no existe lo crea con null o si le sobra lo elimina para que los \n\t campos tengan estrictamente la estructura de entrada \n\t\"\"\"\n\tdfOrdenado = pd.DataFrame(columns=DicFormatoEntrada)\n\tdf = Append(df1=dfOrdenado, df2=df)\n\tdf = df[dfOrdenado.columns]\n\tdf = df.astype(object).where(pd.notnull(df),None)\n\treturn df\n\ndef ConsultaElemento(root,PathElemento):\n\t\"\"\" Extrae todos los elementos dentro del nivel espefifico \"\"\"\n\t#PathCarpetaConsultas = \"C:\\\\Python_Flask\\\\envDivisa\\\\Consultas\\\\\"\n\t#PathXml = PathCarpetaConsultas +\"8600259002.xml\"\n\t#tree = ET.parse(PathXml)\n\t#root = tree.getroot()\n \n\n\tfor Elemento in root.findall(PathElemento):\n\t\tElementoTexto = Elemento.text\n\treturn (ElementoTexto)\n\n\ndef Extraer_Label(PathEtiqueta):\t\n\t\"\"\" extrae la etiqueta que será utilizada para guardar el archivo csv \"\"\"\n\ttemporal_label = PathEtiqueta.split('/')\n\ttemporal_label.reverse()\n\treturn temporal_label[0]\n\ndef Extraer_Dataframe(Directorio,tree,PathDataFrame):\n\t\"\"\" Extrae dataframe en csv. NO se debe agregar \"/\" al final del path \"\"\"\n\t\n\ttags = []\n\toutput = []\n\n\tfor root in tree.findall(PathDataFrame + \"/\"):\n\t\ttags.append(root.tag)\n\n\ttag = OrderedDict((x, 1) for x in tags).keys()\n\tdf = pd.DataFrame(columns=tag)\n\n\tfor root in tree.findall(PathDataFrame):\n\t\tdata = list(root)\n\t\tlistado = OrderedDict((content.tag, content.text) for content in data)\n\t\tdf_table = pd.DataFrame(listado, columns = tag, index=[\"1\"])\n\t\tdf = df.append(df_table, ignore_index = True, sort=False)\n\n\tPath = Directorio +\"_\"+ Extraer_Label(PathDataFrame) + \".csv\"\n\treturn df\n\ndef Extraer_Dataframe_Atributos(Directorio,tree,PathDataFrame,Atributo):\n\t\"\"\"Extrae toda la informacion que la funcion Extraer_Dataframe, \n\t pero además al pasarle la lista de atributos, extrae los resultados de los mismos\n\t\"\"\"\n\n\t#Directorio=\"C:\\\\Python_Flask\\\\envDivisa\\\\\"\n\ttags = []\n\tdicAtributo = {}\n\td1 = collections.OrderedDict()\n\toutput = []\n\tdf1 = pd.DataFrame()\n\n\tfor item in Atributo:\n\t\ttags.append(item)\n\n\tfor root in tree.findall(PathDataFrame + \"/\"):\n\t\ttags.append(root.tag)\n\n\ttag = OrderedDict((x, 1) for x in tags).keys()\n\tdf = pd.DataFrame(columns=tag)\n\n\tfor root in tree.findall(PathDataFrame):\n\t\tdata = list(root)\n\t\tlistado = OrderedDict((content.tag, content.text) for content in data)\n\n\t\tfor item in Atributo:\n\t\t\tatributo = root.get(item)\n\t\t\tif atributo != None:\n\t\t\t\tdicAtributo[item] = atributo\n\t\t\t\td1.update(dicAtributo)\n\t\t\t\tdfatributo = pd.DataFrame(data=dicAtributo, columns = [item], index=[\"1\"])\n\t\t\t\tdicAtributo = {}\n\t\t\t\tdf1 = pd.concat([df1,dfatributo], axis=1)\n\n\t\tlistado.update(d1)\n\t\tdf_table = pd.DataFrame(listado, columns = tag, index=[\"1\"])\n\t\tdf = df.append(df_table, ignore_index = True, sort=False)\n\n\t\tdf1 = pd.DataFrame()\n\n\tdf.reset_index(drop=True, inplace=True)\n\tPath = Directorio +\"_\"+ Extraer_Label(PathDataFrame) + \".csv\"\n\n\treturn df\n\ndef Extraer_Dataframe_Atributos_Iterativo(PathDataFrame,Atributo):\n\t\"\"\" Extrae toda la informacion que la funcion Extraer_Dataframe_Atributos, \n\t\tpero pero en vez de traer valores extrae de cada child los mismos atributos de la clase madre\"\"\"\n\n\tdf1 = Extraer_Dataframe_Atributos(Directorio,PathDataFrame,Atributo)\n\tdf1.dropna(axis='columns', how='all', inplace=True)\n\n\tfor root in tree.findall(PathDataFrame + \"/\"):\n\t\tPathchild= PathDataFrame + \"/\"+ root.tag\n\t\tdf2 = Extraer_Dataframe_Atributos(Directorio,Pathchild, Atributo)\n\t\tdf2.dropna(axis='columns', how='all', inplace=True)\n\t\tdf2.columns += \"/\"+ root.tag\n\t\tdf1 = Concatenar(df1=df1,df2=df2)\n\t\tdf1.columns += \"/\"+ Extraer_Label(PathDataFrame)\n\n\treturn df1\n\ndef Extraer_Dataframe_Todos(PathRaiz):\n\t\"\"\" Extrae todos los dataframes dentro de un directorio y los guarda con csv\n\t\tNOTA: busca además los campos nulos asumiendo que son subdominios y extrae tambien esos dataframes\n\t\tNOTA: NO ESTA FUNCIONANDO PORQUE RETORNA MULTIPLES DATAFRAMES\"\"\"\n\n\tListado_Col = pd.DataFrame(Extraer_Dataframe(Directorio,tree,PathRaiz))\n\tdf = pd.DataFrame()\n\tfor col in Listado_Col:\n\t\tif Listado_Col[col].isnull().any():\n\t\t\truta = PathRaiz +\"/\"+ str(col)\n\t\t\tdf = Extraer_Dataframe(Directorio,tree,ruta)\n\n\treturn True\n\n\ndef Extraer_Dataframe_iteracion(PathDataFrame,NombreTag):\n\t\"\"\"\" Función para extraer un dataframe con los resultados de una ruta que contiene el mismo nombre para todos los elementos\n\t\t por lo tanto se tiene que iterar la respuesta\"\"\"\n\n\ttags = {}\n\toutput = []\n\tdf = pd.DataFrame(columns = [NombreTag])\n\tfor root in tree.findall(PathDataFrame + \"/\"):\n\t\tfor neighbor in root.iter(NombreTag):\n\t\t\ttags[NombreTag] = neighbor.text\n\t\t\ttagsdf = pd.DataFrame(list(tags.items()))\n\t\t\tdf = df.append(tags, ignore_index = True, sort=False)\n\n\tdf.columns += \"/\"+ Extraer_Label(PathDataFrame)\n\tPath = Directorio +\"_\"+ Extraer_Label(PathDataFrame) + \".csv\"\n\treturn df\n\ndef Extraer_Dataframe_Dic(Directorio,tree,root,PathDataFrame, Dic):\n\t\"\"\" Extrae un dataframe y seleciona solo las columnas asignadas en el diccionario \"\"\"\n\ttags = []\n\toutput = []\n\tfor root in tree.findall(PathDataFrame + \"/\"):\n\t\ttags.append(root.tag)\n\n\ttag = OrderedDict((x, 1) for x in tags).keys()\n\tdf = pd.DataFrame(columns=tag)\n\n\tfor root in tree.findall(PathDataFrame):\n\t\tdata = list(root)\n\t\tlistado = OrderedDict((content.tag, content.text) for content in data)\n\t\tdf_table = pd.DataFrame(listado, columns = tag, index=[\"1\"])\n\t\tdf = df.append(df_table, ignore_index = True, sort=False)\n\t\tdf\n\n\tdf = df.filter(items=Dic)\n\tdf\n\tdf.columns += \"/\"+ Extraer_Label(PathDataFrame)\n\tPath = Directorio +\"_\"+ Extraer_Label(PathDataFrame) + \".csv\"\n\treturn df\n\n\ndef Extraer_Dataframe_Dic_Atrib(PathDataFrame, Dic, Atributo):\n\t\"\"\" Extrae un dataframe y seleciona solo las columnas asignadas en el diccionario, para el atributo indicado\"\"\"\n\t#Directorio=\"C:\\\\Python_Flask\\\\envDivisa\\\\\"\n\tDirectorio = os.getcwd() + \"\\\\\"\n\ttags = []\n\toutput = []\n\n\tfor root in tree.findall(PathDataFrame + \"/\"):\n\t\tAtrib = root.get(Atributo)\n\t\tprint(\"Depuracion.......\")\n\t\tprint(\"atrib: \", root.get(Atributo))\n\t\tif Atrib == \"1\":\n\t\t\ttags.append(root.tag)\n\n\ttag = OrderedDict((x, 1) for x in tags).keys()\n\tdf = pd.DataFrame(columns=tag)\n\n\tfor root in tree.findall(PathDataFrame):\n\t\tAtrib = root.get(Atributo)\n\t\tprint(Atrib)\n\t\tif Atrib == \"1\":\n\t\t\tdata = list(root)\n\t\t\tlistado = OrderedDict((content.tag, content.text) for content in data)\n\t\t\tdf_table = pd.DataFrame(listado, columns = tag, index=[\"1\"])\n\t\t\tdf = df.append(df_table, ignore_index = True, sort=False)\n\t\t\tdf\n\n\tdf = df.filter(items=Dic)\n\tdf.columns += \"/\"+ Extraer_Label(PathDataFrame)\n\tPath = Directorio +\"_\"+ Extraer_Label(PathDataFrame) + \".csv\"\n\treturn df\n\ndef Completar_Espacios(df):\n\t\"\"\" Rellena los espacios con el dato anterior \"\"\"\n\tdf = df.fillna(method='ffill')\n\treturn df\n\n#para ejecutar usar el siguiente metodo-> Concatenar(df1=nombredf1, df2=nombredf2, ... , dfn=nombredfn)\ndef Concatenar(**kwargs):\n\t\"\"\" Recibe los nombres de los dataframes que se quieren concatenar \"\"\"\n\tdfs = list(kwargs.values())\n\tmergedf = pd.concat(dfs, sort=False, axis=1)\n\treturn mergedf\n\n#para ejecutar usar el siguiente metodo-> Append(df1=nombredf1, df2=nombredf2, ... , dfn=nombredfn)\ndef Append(**kwargs):\n\t\"\"\" Recibe los nombres de los dataframes que se quieren añadir uno bajo el otro\"\"\"\n\n\tappenddf = pd.DataFrame()\n\tdfs = list(kwargs.values())\n\tappenddf = appenddf.append(dfs, sort=False)\n\tappenddf.reset_index(drop=True, inplace=True)\n\tappenddf = appenddf.astype(object).where(pd.notnull(appenddf),None)\n\treturn appenddf\n\ndef Eliminar_Columnas(df, Dic):\n\t\"\"\" Recibe el dataframe y el listado de las columnas que se quieren eliminar\n \t\"\"\"\n\tdf = df.drop(columns=Dic)\n\treturn df\n\ndef Convertir_Numeros(df, Dic):\n\t\"\"\" Limpia los numeros del dataframe \"\"\"\n\n\tdf = pd.DataFrame(df)\n\tfor item in Dic:\n\t\t#print(\"LISTADO:\", item)\n\t\tdf[item] = df[item].str.replace('.','')\n\t\tdf[item] = df[item].str.replace(',','.', regex=True)\n\t\tdf[item] = df[item].astype(float)\n\treturn df\n\ndef Extraer_Dataframe_Evolucion(PathDataFrame, Atributo):\n\t\"\"\" extraer un df con la evolucion anual, el atributo \"YEAR\" es un parametro de la etiqueta\n \t\"\"\"\n\tcol1 = Extraer_Label(PathDataFrame)\n\tcol2 = col1 + \"_\" + Atributo\n\tDic = [col1, col2]\n\t\n\tdf = pd.DataFrame(columns=Dic)\n\tfor root in tree.findall(PathDataFrame):\n\t\tAnnoActual = datetime.date.today().year\n\t\tAnno = root.get(Atributo)\n\t\t#print(int(Anno))\n\t\t\n\t\tif int(Anno)>=(AnnoActual-3):\t\n\t\t\tlista = [(root.text, Anno)]\n\t\t\tdf1 = pd.DataFrame(lista, columns = Dic, index=[\"1\"])\n\t\t\tdf = df.append(df1, ignore_index = True, sort=False)\n #df.columns += \"/\"+ Extraer_Label(PathDataFrame)\n\treturn\tdf\n\ndef Combinar_Registros(df, PathDataFrame):\n\t\"\"\" Combina todos los registros del dataframe en una sola columna con el nombre del path\"\"\"\n\n\tnombrecol = str(Extraer_Label(PathDataFrame))\n\tdf = df.astype(str)\n\tdf2 = pd.DataFrame(columns=[nombrecol])\n\tlistcol = df.columns.values.tolist()\n\tdf2[nombrecol] = df[listcol].apply(lambda x: ' '.join(x), axis = 1)\n\treturn df2\n\ndef Extraer_Dataframe_Evolucion_sin_Atrib(PathDataFrame):\n\n\tcol1 = Extraer_Label(PathDataFrame)\n\tDic = [col1]\n\t\n\tdf = pd.DataFrame(columns=Dic)\n\tfor root in tree.findall(PathDataFrame):\n\t\tlista = [(root.text)]\n\t\tdf1 = pd.DataFrame(lista, columns = Dic, index=[\"1\"])\n\t\tdf = df.append(df1, ignore_index = True, sort=False)\n\t#print(\"DEPURACION FUNCION EVOLSIN ATRIBUTO ##########################################\")\n\t#print(df)\n\n\treturn\tdf\n\ndef Extraer_Dataframe_Subtipo(PathInfoFinan):\n df = pd.DataFrame(columns=['SUBTIPO','TIPO'])\n for content in tree.findall(PathInfoFinan+\"/\"):\n listado = {'SUBTIPO': content.tag, 'TIPO': content.get(\"SUBTIPO\")}\n df1 = pd.DataFrame(listado, columns = ['SUBTIPO','TIPO'], index=[\"1\"])\n #print(df)\n df = pd.concat([df,df1]).drop_duplicates().reset_index(drop=True)\n df.columns += \"/\"+ Extraer_Label(PathInfoFinan)\n return df\n\ndef Extraer_Dataframe_ActivoCorriente(PathActivoCorriente):\n temp = []\n dictlist = [] \n df = pd.DataFrame()\n for content in tree.findall(PathActivoCorriente+\"/\"):\n nombres = content.tag\n #print(nombres)\n listado = content.attrib\n #print(listado)\n for key, value in listado.items():\n temp = [key,value]\n dictlist.append(temp)\n #print(\"listado\")\n #print(dictlist)\n \n #df = pd.DataFrame(listado, columns = [content.tag])\n\n df1 = pd.DataFrame(columns=[content.tag + \"_VALOR\", content.tag], data=dictlist)\n dictlist = []\n df = Concatenar(df0=df, df1=df1)\n df.columns += \"/\"+ Extraer_Label(PathActivoCorriente)\n\n return df\n\ndef Guardar_csv(df, PathCarpeta, NombreArchivo):\n df = df\n #if not os.path.exists(PathCarpetaResultados):\n # os.makedirs(PathCarpetaResultados)\n # print(\"Carpeta Creada\")\n\n #Dataframe que se debe rellenar con la ultima fila\n df.to_csv(PathCarpeta + NombreArchivo, index = False, encoding='utf-8-sig')\n df\n return print(\"Archivo creado \", PathCarpeta + NombreArchivo)\n\ndef Extraer_Dataframe_1Atributo(tree,PathDataFrame, Atributo):\n \"\"\" Extraer de una ruta, todos los nombres y los valores de 1 atributo y crear\n\t\tun dataframe con estos datos en columnas y filas respectivamente \n \t\"\"\"\n Diccionario = {}\n columnas = []\n for root in tree.findall(PathDataFrame+\"/\"):\n \n Atrib = root.get(Atributo)\n if Atrib != None:\n columnas.append(root.tag)\n #print(columnas)\n Diccionario[root.tag] = Atrib\n df = pd.DataFrame(Diccionario, index=[0])\n df.columns += \"/\"+ Extraer_Label(PathDataFrame)\n return df\n\n\ndef Dict_to_Df_Financiero(Dictodf):\n try:\n df = pd.DataFrame([Dictodf[\"VALOR\"]], columns = [Dictodf[\"DESC\"]])\n df.reset_index(drop=True, inplace=True)\n df\n except:\n df = pd.DataFrame([np.nan], columns=[\"SIN ATRIBUTO\"])\n #print(\"@@@@ERROR@@@@@:Campo sin Atributo Financiero\")\n #try:\n # print(\"VALOR: \", Dictodf[\"VALOR\"])\n #except:\n # print()\n return df\n\n \ndef Financiero_Activos(Fecha_Captura,Id_Cliente,tree,PathBalancesPrio, subbalance, Id_Activo, Id_Info_Financiera):\n \"\"\"\tDescarga los dataframes de balance de activos especificos de partidas que se necesiten por ejemplo si se pasa el argumento\n \tAC entonces se extrae para cada año toda la informacion dentro de este dominio (ACC y ACL respectivamente para todos los años)\"\"\"\n Años = tree.findall(PathBalancesPrio)\n #print(\"$$$$$$$$$$$$\" + Años + \"$$$$$$$$$\")\n if Años:\n \t#print(\"lleno\")\n \tdf = pd.DataFrame()\n \ti = 0\n \tfor Año in Años:\n \t\t#print(Año)\n \t\ttags = []\n \t\ttry:\n \t\t\tdfNormaContable = pd.DataFrame([Año.attrib['NIIF']], columns=[\"NIIF\"])\n \t\texcept:\n \t\t\tdfNormaContable = pd.DataFrame([\"0\"], columns=[\"NIIF\"])\n \t\t\tprint(\"No NIFF\")\n \t\ti = i +1\n \t\tfor dato in Año:\n \t\t\ttags.append(dato.tag)\n\n \t\tdfFinanciero = pd.DataFrame(columns=tags)\n \t\tlistado = OrderedDict((dato.tag, dato.text) for dato in Año)\n \t\tdf_table = pd.DataFrame(listado, columns = tags, index=[\"1\"])\n \t\tdfFinanciero = dfFinanciero.append(df_table, ignore_index = True, sort=False)\n\n \t\ttry:\n \t\t\tdel dfFinanciero['PARTIDAS']\n \t\texcept:\n \t\t\tprint(\"Sin Columna PARTIDAS\")\n\n \t\tdfFinanciero = Concatenar(df1 = dfNormaContable, df2 = dfFinanciero)\n \t\tdfFinanciero[\"Id_Cliente\"]= Id_Cliente\n \t\tdfFinanciero[\"Fecha_Captura\"]= Fecha_Captura\n \t\tdicFinanciero = ['NormaContable', 'Fecha_Efecto', 'Duracion', 'Unidades', 'Fuente', 'Id_Cliente', 'Fecha_Captura']\n \t\tdfFinanciero.columns = dicFinanciero\n \t\tdfFinanciero = dfFinanciero[['Id_Cliente', 'Fecha_Captura', 'NormaContable', 'Fecha_Efecto', 'Duracion', 'Unidades', 'Fuente']]\n \t\ttry:\n \t\t\tpartidas = Año.find(\".//PARTIDAS\")\n \t\t\tdicpartidas = partidas[0].attrib\n \t\texcept:\n \t\t\tdicpartidas = {'DESC': np.nan, 'VALOR': np.nan}\n\n \t\tdfpartidas = Dict_to_Df_Financiero(dicpartidas)\n\n \t\tdfactivos = Concatenar(df1= dfFinanciero, df2=dfpartidas)\n \t\ttry:\n \t\t\tfor partida in partidas.findall('.//' + subbalance):\n \t\t\t\tdicpartida = partida[0].attrib\n \t\t\t\tdfpartida = Dict_to_Df_Financiero(dicpartida)\n \t\t\t\tfor child in partida.findall('./'):\n \t\t\t\t\tdicchild = partida.find(child.tag).attrib\n \t\t\t\t\tdfchild = Dict_to_Df_Financiero(dicchild)\n \t\t\t\t\tdfchild.columns += \"/\" + child.tag\n \t\t\t\t\tdfactivos = Concatenar(df1=dfactivos, df2=dfchild)\n \t\t\t\t\tfor part in child:\n \t\t\t\t\t\tdicpart = part.attrib\n \t\t\t\t\t\tdfpart = Dict_to_Df_Financiero(dicpart)\n \t\t\t\t\t\tdfpart.columns += \"/\" + child.tag\n \t\t\t\t\t\tdfpart\n \t\t\t\t\t\tdfactivos = Concatenar(df1=dfactivos, df2=dfpart)\n \t\texcept:\n \t\t\tdfactivos = pd.DataFrame()\n\n \t\tif i == 1:\n \t\t\tdf1 = dfactivos\n \t\t\t#print(\"depurando fi=1\")\n \t\t\tprint(list(df1.columns))\n \t\telse:\n \t\t\t#print(\"depurando fi=2\")\n \t\t\t#print(list(df1.columns))\n \t\t\t#print(list(dfactivos.columns))\n \t\t\tdicdf = ['Id_Cliente', 'Fecha_Captura', 'NormaContable', 'Fecha_Efecto', 'Duracion', 'Unidades', 'Fuente', 'TOTAL ACTIVO', 'TOTAL ACTIVO CORRIENTE/ACC', 'CUENTAS POR COBRAR - DEUDORES/ACC', 'INVENTARIOS/ACC', 'OTROS ACTIVOS/ACC', 'OTROS ACTIVOS NO FINANCIEROS/ACC', 'ACTIVOS POR IMPUESTOS CORRIENTES/ACC', 'EFECTIVO Y EQUIVALENTES AL EFECTIVO/ACC', 'SIN ATRIBUTO/ACC', 'CUENTAS COMERCIALES POR COBRAR Y OTRAS C/ACC', 'TOTAL ACTIVO NO CORRIENTE/ACL', 'INVERSIONES/ACL', 'INVERSIONES CONTABILIZADAS UTILIZANDO EL/ACL', 'INVERSIONES EN SUBSIDIARIAS, NEGOCIOS CO/ACL', 'PROPIEDADES PLANTA Y EQUIPO/ACL', 'PROPIEDAD DE INVERSIÓN/ACL', 'ACTIVOS INTANGIBLES DISTINTOS DE LA PLUS/ACL', 'INVERSIONES NO CORRIENTES/ACL', 'CUENTAS POR COBRAR NO CORRIENTES/ACL', 'CUENTAS COMERCIALES POR COBRAR Y OTRAS C/ACL']\n\n \t\t\ttry:\n \t\t\t\tdfactivos = Validar_Formato_Tabla(dfactivos, dicdf)\n \t\t\t\tdf1 = df1.set_index('Fecha_Efecto').combine_first(dfactivos.set_index('Fecha_Efecto')).reset_index()\n \t\t\texcept:\n \t\t\t\tdfactivos = pd.DataFrame(columns=dicdf)\n \t\t\t\tdfactivos = Validar_Formato_Tabla(dfactivos, dicdf)\n \t\t\t\tprint(\"error en df Activo\")\n\n \t\tdf = Append(df1=df, df2=dfFinanciero)\n \t\tdf1[\"Id_Activo\"] = Id_Activo\n \t\tdf1[\"Id_Info_Financiera\"] = Id_Info_Financiera\n \t\tDicActivosOrdenado = ['Id_Info_Financiera', 'Fecha_Efecto', 'TOTAL ACTIVO', 'TOTAL ACTIVO CORRIENTE/ACC', 'CUENTAS POR COBRAR - DEUDORES/ACC', 'INVENTARIOS/ACC', 'DIFERIDOS/ACC', 'GASTOS PAGADOS POR ANTICIPADO/ACC', 'OTROS ACTIVOS/ACC', 'OTROS ACTIVOS FINANCIEROS/ACC', 'OTROS ACTIVOS NO FINANCIEROS/ACC', 'ACTIVOS POR IMPUESTOS CORRIENTES/ACC', 'ACTIVOS CLASIFICADOS COMO MANTENIDOS PAR/ACC', 'EFECTIVO Y EQUIVALENTES AL EFECTIVO/ACC', 'CUENTAS COMERCIALES POR COBRAR Y OTRAS C/ACC', 'CUENTAS POR COBRAR PARTES RELACIONADAS Y/ACC', 'TOTAL ACTIVO NO CORRIENTE/ACL', 'INVERSIONES/ACL', 'INVERSIONES EN SUBSIDIARIAS, NEGOCIOS CO/ACL', 'INVERSIONES CONTABILIZADAS UTILIZANDO EL/ACL', 'PROPIEDADES PLANTA Y EQUIPO/ACL', 'DIFERIDOS/ACL', 'GASTOS PAGADOS POR ANTICIPADO/ACL', 'OTROS ACTIVOS/ACL', 'PROPIEDAD DE INVERSIÓN/ACL', 'PLUSVALÍA/ACL', 'ACTIVOS INTANGIBLES DISTINTOS DE LA PLUS/ACL', 'ACTIVOS POR IMPUESTOS DIFERIDOS/ACL', 'INVERSIONES NO CORRIENTES/ACL', 'CUENTAS POR COBRAR NO CORRIENTES/ACL', 'CUENTAS COMERCIALES POR COBRAR Y OTRAS C/ACL', 'CUENTAS POR COBRAR PARTES RELACIONADAS Y/ACL', 'OTROS ACTIVOS NO FINANCIEROS/ACL', 'OTROS ACTIVOS FINANCIEROS/ACL', 'Fecha_Captura']\n \t\tdf1 = Validar_Formato_Tabla(df1,DicActivosOrdenado)\n \treturn df1\n\n else:\n \tDicActivosOrdenado = ['Id_Info_Financiera', 'Fecha_Efecto', 'TOTAL ACTIVO', 'TOTAL ACTIVO CORRIENTE/ACC', 'CUENTAS POR COBRAR - DEUDORES/ACC', 'INVENTARIOS/ACC', 'DIFERIDOS/ACC', 'GASTOS PAGADOS POR ANTICIPADO/ACC', 'OTROS ACTIVOS/ACC', 'OTROS ACTIVOS FINANCIEROS/ACC', 'OTROS ACTIVOS NO FINANCIEROS/ACC', 'ACTIVOS POR IMPUESTOS CORRIENTES/ACC', 'ACTIVOS CLASIFICADOS COMO MANTENIDOS PAR/ACC', 'EFECTIVO Y EQUIVALENTES AL EFECTIVO/ACC', 'CUENTAS COMERCIALES POR COBRAR Y OTRAS C/ACC', 'CUENTAS POR COBRAR PARTES RELACIONADAS Y/ACC', 'TOTAL ACTIVO NO CORRIENTE/ACL', 'INVERSIONES/ACL', 'INVERSIONES EN SUBSIDIARIAS, NEGOCIOS CO/ACL', 'INVERSIONES CONTABILIZADAS UTILIZANDO EL/ACL', 'PROPIEDADES PLANTA Y EQUIPO/ACL', 'DIFERIDOS/ACL', 'GASTOS PAGADOS POR ANTICIPADO/ACL', 'OTROS ACTIVOS/ACL', 'PROPIEDAD DE INVERSIÓN/ACL', 'PLUSVALÍA/ACL', 'ACTIVOS INTANGIBLES DISTINTOS DE LA PLUS/ACL', 'ACTIVOS POR IMPUESTOS DIFERIDOS/ACL', 'INVERSIONES NO CORRIENTES/ACL', 'CUENTAS POR COBRAR NO CORRIENTES/ACL', 'CUENTAS COMERCIALES POR COBRAR Y OTRAS C/ACL', 'CUENTAS POR COBRAR PARTES RELACIONADAS Y/ACL', 'OTROS ACTIVOS NO FINANCIEROS/ACL', 'OTROS ACTIVOS FINANCIEROS/ACL', 'Fecha_Captura']\n \treturn pd.DataFrame(columns=DicActivosOrdenado)\n\ndef Financiero_Pasivos_Patrimonio(tree,Id_Cliente,Fecha_Captura,PathBalancesPrio, subbalance, Id_PasivoPatrimonio, Id_Info_Financiera):\n Años = tree.findall(PathBalancesPrio)\n try:\n df = pd.DataFrame()\n i = 0\n for Año in Años:\n tags = []\n #print ('_________________________Prioritario Año:', Año.attrib['EJERCICIO'], \"NIIF: \", Año.attrib['NIIF'])\n try:\n dfNormaContable = pd.DataFrame([Año.attrib['NIIF']], columns=[\"NIIF\"])\n except:\n dfNormaContable = pd.DataFrame([\"0\"], columns=[\"NIIF\"])\n i = i +1\n for dato in Año:\n tags.append(dato.tag)\n\n dfFinanciero = pd.DataFrame(columns=tags)\n\n listado = OrderedDict((dato.tag, dato.text) for dato in Año)\n df_table = pd.DataFrame(listado, columns = tags, index=[\"1\"])\n dfFinanciero = dfFinanciero.append(df_table, ignore_index = True, sort=False)\n del dfFinanciero['PARTIDAS']\n\n dfFinanciero = Concatenar(df1 = dfNormaContable, df2 = dfFinanciero)\n dfFinanciero[\"Id_Cliente\"]= Id_Cliente\n dfFinanciero[\"Fecha_Captura\"]= Fecha_Captura\n dicFinanciero = ['NormaContable', 'Fecha_Efecto', 'Duracion', 'Unidades', 'Fuente', 'Id_Cliente', 'Fecha_Captura']\n dfFinanciero.columns = dicFinanciero\n dfFinanciero = dfFinanciero[['Id_Cliente', 'Fecha_Captura', 'NormaContable', 'Fecha_Efecto', 'Duracion', 'Unidades', 'Fuente']]\n\n partidas = Año.find(\".//PARTIDAS\")\n dicpartidas = partidas[0].attrib\n dfpartidas = Dict_to_Df_Financiero(dicpartidas)\n dfactivos = Concatenar(df1= dfFinanciero, df2=dfpartidas)\n for partida in partidas.findall('.//' + subbalance):\n #print(\"Nivel 1________________\")\n dicpartida = partida.attrib\n #PASIVO y PATRIMONIO\n dicpartida = partidas.find(partida.tag).attrib\n dfpartida = Dict_to_Df_Financiero(dicpartida)\n #print(dfpartida) \n dfactivos = Concatenar(df1=dfactivos, df2=dfpartida)\n for child in partida.findall('./'):\n #print(\"Nivel 2___________\")\n #print(child.tag)\n dicchild = partida.find(child.tag).attrib\n #print(child.tag)\n dfchild = Dict_to_Df_Financiero(dicchild)\n dfchild.columns += \"/\" + child.tag\n #print(dfchild)\n dfactivos = Concatenar(df1=dfactivos, df2=dfchild)\n \n for childsub in child.findall('./'):\n #print(\"Nivel 3 ___________________\")\n #print(\"tag\", childsub.tag)\n dicchildsub = child.find(childsub.tag).attrib\n dfchildsub = Dict_to_Df_Financiero(dicchildsub)\n if child.tag == \"PT\":\n dfchildsub.columns += \"/\" + child.tag\n else:\n dfchildsub.columns += \"/\" + child.tag +\"/\" + childsub.tag\n #print(dfchildsub)\n dfactivos = Concatenar(df1=dfactivos, df2=dfchildsub)\n #for part in childsub:\n # dicpart = part.attrib\n # dfpart = Dict_to_Df_Financiero(dicpart)\n # dfpart.columns += \"/\" + childsub.tag\n # dfpart\n for part in childsub:\n #print(\"Nivel 4 ___________________\")\n dicpart = childsub.find(part.tag).attrib\n dfpart = Dict_to_Df_Financiero(dicpart)\n dfpart.columns += \"/\" + child.tag +\"/\" + childsub.tag\n dfpart\n #print(dfpart)\n dfactivos = Concatenar(df1=dfactivos, df2=dfpart)\n #print(dfactivos)\n\n if i == 1:\n df1 = dfactivos\n\n else:\n\n df1 = df1.set_index('Fecha_Efecto').combine_first(dfactivos.set_index('Fecha_Efecto')).reset_index()\n #print(df1[\"Fecha_Efecto\"])\n df = Append(df1=df, df2=dfFinanciero)\n\n df1[\"Id_PasivoPatrimonio\"] = Id_PasivoPatrimonio\n df1[\"Id_Info_Financiera\"] = Id_Info_Financiera \n #print(df.to_string())\n return df1\n except:\n DicPasivosPatrimonioOrdenado = ['Id_PasivoPatrimonio', 'Id_Info_Financiera', 'Fecha_Efecto', 'PASIVO + PATRIMONIO', 'PASIVO/PS', 'PASIVO A CORTO PLAZO/PS/PSC', 'OBLIGACIONES FINANCIERAS/PS/PSC', 'PASIVOS ESTIMADOS Y PROVISIONES/PS/PSC', 'PROVISIONES DIVERSAS/PS/PSC', 'OTROS PASIVOS FINANCIEROS/PS/PSC', 'OTROS PASIVOS NO FINANCIEROS/PS/PSC', 'CUENTAS POR PAGAR CORRIENTE/PS/PSC', 'CUENTAS COMERCIALES POR PAGAR Y OTRAS CU/PS/PSC', 'CUENTAS POR PAGAR A ENTIDADES RELACIONAD/PS/PSC', 'PASIVOS POR IMPUESTOS CORRIENTES/PS/PSC', 'PROVISIONES CORRIENTES POR BENEFICIOS A/PS/PSC', 'OTROS PASIVOS CORRIENTES/PS/PSC', 'PASIVO A LARGO PLAZO/PS/PSL', 'PASIVOS ESTIMADOS Y PROVISIONES/PS/PSL', 'OTROS PASIVOS FINANCIEROS/PS/PSL', 'OTROS PASIVOS NO FINANCIEROS/PS/PSL', 'PASIVO POR IMPUESTOS DIFERIDOS/PS/PSL', 'OBLIGACIONES FINANCIEROS NO CORRIENTES/PS/PSL', 'PROVISIONES NO CORRIENTES POR BENEFICIOS/PS/PSL', 'OTRAS PROVISIONES/PS/PSL', 'OTROS PASIVOS NO CORRIENTES/PS/PSL', 'PATRIMONIO/PT', 'CAPITAL SOCIAL/PT', 'SUPERµVIT DE CAPITAL/PT', 'RESERVAS/PT', 'RESULTADO EJERCICIO/PT', 'COTIZACIONES-AUXIL./APORTES NO VINC./PT','OTROS RUBROS DEL PATRIMONIO/PT', 'ACCIONES PROPIAS EN CARTERA/PT', 'OTRO RESULTADO INTEGRAL ACUMULADO/PT', 'OTRAS PARTICIPACIONES EN EL PATRIMONIO/PT', 'PRIMA DE EMISIÓN/PT', 'GANANCIAS ACUMULADAS/PT', 'CAPITAL EMITIDO/PT', 'Fecha_Captura']\n return pd.DataFrame(columns=DicPasivosPatrimonioOrdenado)\n\ndef Financiero_Resultados(tree,Id_Cliente,Fecha_Captura,PathBalancesPrio, subbalance, Id_Result_Ejercicio, Id_Info_Financiera):\n Años = tree.findall(PathBalancesPrio)\n try:\n df = pd.DataFrame()\n i = 0\n for Año in Años:\n tags = []\n #print ('Prioritario Año____________________________________:', Año.attrib['EJERCICIO'], \"NIIF: \", Año.attrib['NIIF'])\n try:\n dfNormaContable = pd.DataFrame([Año.attrib['NIIF']], columns=[\"NIIF\"])\n except:\n dfNormaContable = pd.DataFrame([\"0\"], columns=[\"NIIF\"])\n i = i +1\n for dato in Año:\n\n tags.append(dato.tag)\n\n dfFinanciero = pd.DataFrame(columns=tags)\n\n listado = OrderedDict((dato.tag, dato.text) for dato in Año)\n df_table = pd.DataFrame(listado, columns = tags, index=[\"1\"])\n dfFinanciero = dfFinanciero.append(df_table, ignore_index = True, sort=False)\n dicFinanciero = ['FEC_CIERRE', 'DURACION', 'COD_DIVISA', 'DESC_FUENTE', 'PARTIDAS']\n dfFinanciero = Validar_Formato_Tabla(dfFinanciero, dicFinanciero)\n del dfFinanciero['PARTIDAS']\n\n dfFinanciero = Concatenar(df1 = dfNormaContable, df2 = dfFinanciero)\n dfFinanciero[\"Id_Cliente\"]= Id_Cliente\n dfFinanciero[\"Fecha_Captura\"]= Fecha_Captura\n dicFinanciero = ['NormaContable', 'Fecha_Efecto', 'Duracion', 'Unidades', 'Fuente', 'Id_Cliente', 'Fecha_Captura']\n dfFinanciero.columns = dicFinanciero\n dfFinanciero = dfFinanciero[['Id_Cliente', 'Fecha_Captura', 'NormaContable', 'Fecha_Efecto', 'Duracion', 'Unidades', 'Fuente']]\n\n partidas = Año.find(\".//PARTIDAS\")\n dfactivos = dfFinanciero\n\n for partida in partidas.findall('.//' + subbalance):\n dicpartida = partida.attrib\n dfpartida = Dict_to_Df_Financiero(dicpartida)\n dfactivos = Concatenar(df1=dfactivos, df2=dfpartida)\n\n for child in partida:\n dicchild = child.attrib\n dfchild = Dict_to_Df_Financiero(dicchild)\n dfchild.columns += \"/\" + partida.tag\n dfactivos = Concatenar(df1=dfactivos, df2=dfchild)\n\n if i == 1:\n df1 = dfactivos\n #print(df1)\n else:\n df1 = df1.set_index('Fecha_Efecto').combine_first(dfactivos.set_index('Fecha_Efecto')).reset_index()\n\n\n #print(df1)\n df = Append(df1=df, df2=dfFinanciero) \n df1[\"Id_Result_Ejercicio\"] = Id_Result_Ejercicio\n df1[\"Id_Info_Financiera\"] = Id_Info_Financiera \n #print(df.columns)\n return df1\n except:\n DicResultadosOrdenado = ['Id_Info_Financiera', 'Fecha_Efecto', 'RESULTADO DEL EJERCICIO', 'RESULTADO ANTES DE IMPUESTOS/R', 'RESULTADOS OPERACIONALES/R', 'TOTAL GASTOS/R', 'COSTOS Y GASTOS OPERACIONALES/R', 'GASTOS DE ADMINISTRACION/R', 'GASTOS DE VENTAS/R','GASTOS DE DISTRIBUCIÓN/R', 'GASTOS POR BENEFICIOS A LOS EMPLEADOS/R', 'OTROS GASTOS OPERATIVOS/R', 'COSTO DE VENTAS/R', 'NO OPERACIONALES/R', 'GASTOS FINANCIEROS/R', 'TOTAL INGRESOS/R', 'INGRESOS OPERACIONALES/R', 'VENTAS/R', 'OTROS INGRESOS OPERACIONALES/R', 'INGRESOS NO OPERACIONALES/R', 'INGRESOS EXTRAORDINARIOS/R', 'INGRESOS FINANCIEROS/R', 'RESULTADO NO OPERACIONAL/R', 'RESULTADO FINANCIERO/R', 'RESULTADO DE IMPUESTOS/R', 'AJUSTES POR INFLACIàN/R', 'IMPUESTO DE RENTA Y COMPLEMENTARIOS/R', 'Fecha_Captura']\n return pd.DataFrame(columns=DicResultadosOrdenado)\n\ndef FinancieroEncabezados(Fecha_Captura,tree,PathBalancesPrio, NIT):\n Años = tree.findall(PathBalancesPrio)\n df = pd.DataFrame()\n i = 0\n for Año in Años:\n tags = []\n #print(Año.attrib)\n #print ('Prioritario Año:', Año.attrib['EJERCICIO'], \"NIIF: \", Año.tag[0])\n try:\n dfNormaContable = pd.DataFrame([Año.attrib['NIIF']], columns=[\"NIIF\"])\n except:\n dfNormaContable = pd.DataFrame([\"0\"], columns=[\"NIIF\"])\n i = i +1\n for dato in Año:\n tags.append(dato.tag)\n\n dfFinanciero = pd.DataFrame(columns=tags)\n dicFinanciero = ['FEC_CIERRE', 'DURACION', 'COD_DIVISA', 'DESC_FUENTE', 'PARTIDAS']\n dfFinanciero = Validar_Formato_Tabla(dfFinanciero, dicFinanciero)\n listado = OrderedDict((dato.tag, dato.text) for dato in Año)\n df_table = pd.DataFrame(listado, columns = tags, index=[\"1\"])\n dfFinanciero = dfFinanciero.append(df_table, ignore_index = True, sort=False)\n del dfFinanciero['PARTIDAS']\n\n dfFinanciero = Concatenar(df1 = dfNormaContable, df2 = dfFinanciero)\n dfFinanciero[\"Nit_Cliente\"]= NIT\n dfFinanciero[\"Fecha_Captura\"]= Fecha_Captura\n dicFinanciero = ['NormaContable', 'Fecha_Efecto', 'Duracion', 'Unidades', 'Fuente', 'Nit_Cliente', 'Fecha_Captura']\n dfFinanciero.columns = dicFinanciero\n \n dfFinanciero = dfFinanciero[['Nit_Cliente', 'Fecha_Captura', 'NormaContable', 'Fecha_Efecto', 'Duracion', 'Unidades', 'Fuente']]\n df = Append(df1=df, df2=dfFinanciero)\n #df[\"Id_Info_Financiera\"] = Id\n return df\n\n#Actividad_Exterior(tree,path de actividad comercial externa, escoger si es importa o exporta)\ndef Actividad_Exterior(tree,PathActividad, Actividad):\n Años = tree.findall(PathActividad)\n df = pd.DataFrame()\n i = 0\n for Año in Años:\n tags = []\n texto = []\n #print(Año.text)\n for dato in Año:\n tags.append(dato.tag)\n texto.append(dato.text)\n dictionary = dict(zip(tags, texto))\n dfActividad = pd.DataFrame.from_dict(dictionary, orient = 'index').T\n dicActividad = [\"ANYO\"]\n dfActividad = Validar_Formato_Tabla(dfActividad, dicActividad)\n #df = Append(df1=df, df2=dfActividad)\n #print(dfActividad)\n for ActivExt in Año.findall('.//' + Actividad):\n #print(ActivExt)\n tags2 = []\n texto2 = []\n for child in ActivExt:\n tags2.append(child.tag)\n texto2.append(child.text)\n dictionary = dict(zip(tags2, texto2))\n dfActividadExt = pd.DataFrame.from_dict(dictionary, orient = 'index').T\n dicActividadExt = [\"FEC_CAMBIO\",\"PRODUCTOS\",\"IMPORTE\",\"DIVISA\"]\n dfActividadExt = Validar_Formato_Tabla(dfActividadExt, dicActividadExt)\n dfActividad = Concatenar(df1=dfActividad, df2=dfActividadExt)\n #print(dfActividad.to_string())\n tags3 = []\n texto3 = []\n for paises in ActivExt.findall('.//' + \"PAISES\"):\n dfActividadpais = pd.DataFrame()\n\n for pais in paises:\n #print(pais.tag)\n #print(pais.text)\n tags3.append(pais.tag)\n texto3.append(pais.text)\n \n dictionary = dict(zip(tags3, texto3))\n df2 = pd.DataFrame.from_dict(dictionary, orient = 'index').T\n dfActividadpais = Append(df1=dfActividadpais, df2=df2)\n dicActividadpais = [\"DESC_PAIS\"]\n dfActividadpais = Validar_Formato_Tabla(dfActividadpais, dicActividadpais)\n #print(dfActividadpais)\n \n dfActividad = Concatenar(df1=dfActividad, df2=dfActividadpais)\n dfActividad = Completar_Espacios(dfActividad)\n df = Append(df1= df,df2=dfActividad)\n dicActivExt = ['ANYO', 'FEC_CAMBIO', 'PRODUCTOS', 'IMPORTE', 'DIVISA', 'DESC_PAIS']\n df = Validar_Formato_Tabla(df,dicActivExt)\n return df\n\ndef Extraer_Dataframe_Actividades(tree,PathDataFrame, Dic):\n \t\"\"\" Extrae todas las actividades comerciales de la empresa SEA PRIMARIA O SECUNDARIA\"\"\"\n \t\"\"\"Extrae un dataframe y seleciona solo las columnas asignadas en el diccionario\"\"\"\n \ttags = []\n \toutput = []\n\n \tfor root in tree.findall(PathDataFrame + \"/ACTIVIDAD/\"):\n \t\ttags.append(root.tag)\n\n \ttag = OrderedDict((x, 1) for x in tags).keys()\n \tdf = pd.DataFrame(columns=tag)\n\n \tdf_tipo = pd.DataFrame(columns=['Tipo_Actividad'])\n \tfor root in tree.findall(PathDataFrame + \"/\"):\n \t\tfor name in root.attrib:\n \t\t\ttipo_actividad = name\n\n \t\tdf_tipo = df_tipo.append({'Tipo_Actividad': name}, ignore_index=True)\n\n \tdf_tipo\n\n \tfor root in tree.findall(PathDataFrame + \"/ACTIVIDAD\"):\n \t\tdata = list(root)\n \t\tlistado = OrderedDict((content.tag, content.text) for content in data)\n \t\tdf_table = pd.DataFrame(listado, columns = tag, index=[\"1\"])\n \t\tdf = df.append(df_table, ignore_index = True, sort=False)\n\n \tdf = df.filter(items=Dic)\n \tdf\n \tdf.columns += \"/\"+ Extraer_Label(PathDataFrame)\n \tdf = Concatenar(df1=df,df2=df_tipo)\n \treturn df\n \ndef Extraer_Dataframe_Obligaciones(tree,PathDataFrame, des_situ,des_tipo):\n \t\"\"\" Extrae todas las obligaciones y los atributos tipo y descripcion de las etiquetas \"\"\"\n \ttags = []\n \toutput = []\n\n \tfor root in tree.findall(PathDataFrame + \"/OBLIGACION/\"):\n \t\ttags.append(root.tag)\n\n \ttag = OrderedDict((x, 1) for x in tags).keys()\n \tdf = pd.DataFrame(columns=tag)\n\n \tdf_tipo = pd.DataFrame(columns=['situ','tipo'])\n \tfor root in tree.findall(PathDataFrame + \"/\"):\n \t\tatrib_tipo = root.get(des_tipo)\n \t\tatrib_situ = root.get(des_situ)\n \t\tdf_tipo = df_tipo.append({'situ': atrib_situ, 'tipo': atrib_tipo}, ignore_index=True)\n\n \tfor root in tree.findall(PathDataFrame + \"/OBLIGACION\"):\n \t\tdata = list(root)\n \t\tlistado = OrderedDict((content.tag, content.text) for content in data)\n \t\tdf_table = pd.DataFrame(listado, columns = tag, index=[\"1\"])\n \t\tdf = df.append(df_table, ignore_index = True, sort=False)\n \t\tdf\n \tdf.columns += \"/\"+ Extraer_Label(PathDataFrame)\n \tdf = Concatenar(df1=df,df2=df_tipo)\n \tdicObligaciones = [\"PERIODO/OBLIGACIONES\",\"FECHA_EJECUCION/OBLIGACIONES\",\"FUENTE/OBLIGACIONES\",\"situ\",\"tipo\"]\n \tdf = Validar_Formato_Tabla(df,dicObligaciones)\n \treturn df\n \ndef Extraer_Dataframe_Politica_Ccial(NIT,Fecha_Captura,Directorio,tree,PathDataFrame):\n \t\"\"\" Extrae la informacion de la politica comercial para las ventas o las compras, se debe pasar el path completo de cada uno, determina tambien el porcentaje de nacional e internacional con manejo de errores cuando no hay politicas\"\"\"\n\n \ttags = []\n \toutput = []\n\n \tfor root in tree.findall(PathDataFrame + \"/\"):\n \t\ttags.append(root.tag)\n\n \ttag = OrderedDict((x, 1) for x in tags).keys()\n \tdf = pd.DataFrame(columns=tag)\n\n \tdf_politica = pd.DataFrame(columns=['Porc_Nacional_Pol_Ccial','Porc_Internacional_Pol_Ccial'])\n \tfor root in tree.findall(PathDataFrame):\n \t\tdata = list(root)\n \t\tlistado = OrderedDict((content.tag, content.text) for content in data)\n \t\t#print(listado)\n \t\tdf_table = pd.DataFrame(listado, columns = tag, index=[\"1\"])\n \t\tdf = df.append(df_table, ignore_index = True, sort=False)\n \t\t#df['Nit_Pagador'] = ConsultaElemento(PathNit)\n \t\t#df.set_index('Nit_Pagador')\n \t\ttry:\n \t\t\tnacional = root.find(\".//NACIONAL/PORCENTAJE\")\n \t\t\tnacional = nacional.text\n \t\texcept:\n \t\t\tprint(\"Sin Porcentaje Nacional\")\n \t\t\tnacional =np.nan\n \t\ttry:\n \t\t\tinternacional = root.find(\".//INTERNACIONAL/PORCENTAJE\")\n \t\t\tinternacional = internacional.text\n \t\texcept:\n \t\t\tinternacional = np.nan\n \t\t\tprint(\"Sin Porcentaje Internacional\")\n\n \t\t#print(root)\n \t\tdf_politica = df_politica.append({'Porc_Nacional_Pol_Ccial': nacional, 'Porc_Internacional_Pol_Ccial': internacional}, ignore_index=True)\n \t\tdf = Concatenar(df1=df,df2=df_politica)\n\n \t\t#print(df)\n \t#df.columns += \"/\"+ Extraer_Label(PathDataFrame)\n \t#print(\"########################################\")\n \tPath = Directorio +\"_\"+ Extraer_Label(PathDataFrame) + \".csv\"\n \t#df.to_csv(Path, index = False, encoding='utf-8-sig')\n \t#print(\"Archivo Creado con Exito en \\n\" + Path)\n \tdf[\"Nit_Cliente\"]=NIT\n \tdf[\"Tipo_Pol_Ccial\"]=Extraer_Label(PathDataFrame)\n \tdf[\"Fecha_Captura\"]=Fecha_Captura\n \tdicPolitica = [\"PRODUCTOS\",\"POLITICA\",\"FEC_EFECTO\",\"Porc_Nacional_Pol_Ccial\",\"Porc_Internacional_Pol_Ccial\",\"Nit_Cliente\",\"Tipo_Pol_Ccial\",\"Fecha_Captura\"]\n \t#print(dicPolitica)\n \tdf = Validar_Formato_Tabla(df,dicPolitica)\n \tdicPolitica = [\"Producto_Pol_Ccial\",\"Politica_Pol_CCial\",\"Fecha_Efecto_Pol_Ccial\",\"Porc_Nacional_Pol_Ccial\",\"Porc_Internacional_Pol_Ccial\",\"Nit_Cliente\",\"Tipo_Pol_Ccial\",\"Fecha_Captura\"]\n \tdf.columns = dicPolitica\n\n \treturn df\n\ndef Financiero_Indicadores(tree,PathIndicadoresFinancieros, dic):\n Años = tree.findall(PathIndicadoresFinancieros)\n #print(\"LOS AÑOS SON......\")\n #print(Años)\n dic = dic\n df = pd.DataFrame(columns = dic)\n i = 0\n for Año in Años:\n tags = []\n try:\n dfAnno = pd.DataFrame([Año.attrib['ANYO']], columns=[\"Fecha_Efecto_Indicador_Fro\"])\n #print(\"EL DF ANNO ES...\")\n #print(dfAnno)\n except:\n print(\"error al intentar extraer el año\")\n dfAnno = pd.DataFrame([np.nan], columns=[\"Fecha_Efecto_Indicador_Fro\"])\n dfdato = pd.DataFrame()\n for dato in Año:\n tags.append(dato.tag)\n\n df1 = pd.DataFrame()\n for child in dato.findall('./'):\n #print(child.tag)\n #print(child.text)\n dfchild = pd.DataFrame([child.text], columns = [child.tag])\n df1 = Concatenar(df1=df1,df2=dfchild)\n \n df1.columns += \"/\" + dato.tag\n dfdato = Concatenar(df1=dfdato,df2=df1)\n #print(dfdato.to_string())\n #print(\"for item\")\n df2 = Concatenar(df1=dfAnno,df2=dfdato)\n df2 = Validar_Formato_Tabla(df2,dic)\n #print(df2.to_string())\n df = Append(df1=df,df2=df2)\n #print(df.to_string()) \n return df\n\n\"\"\" FUNCIONES DE INSERCIÓN A BD\"\"\"\n\ndef ing_tbl_F_Info_Consulta(conn,cursor,tbl_F_Info_Consulta):\n \tif tbl_F_Info_Consulta.empty:\n \t\tprint(\"tbl_F_Info_Consulta esta vacio\")\n \t\t#return True\n \telse:\n \t\tfor index,row in tbl_F_Info_Consulta.iterrows():\n \t\t\tcursor.execute(\"INSERT INTO dbo.tbl_F_Info_Consulta([Nit_Cliente],[Nombre_Consulta],[Usuario],[Fecha_Captura]) values (?,?,?,?)\",\n \t\t\t\trow['Nit_Cliente'],\n \t\t\t\trow['Nombre_Consulta'],\n \t\t\t\trow['Usuario'],\n \t\t\t\trow['Fecha_Captura']\n \t\t\t\t)\n \t\tconn.commit()\n \t#conn.close()\n \treturn True\n\ndef ing_tbl_F_Referencias_Cciales(conn,cursor,tbl_F_Referencias_Cciales):\n \tif tbl_F_Referencias_Cciales.empty:\n \t\tprint(\"tbl_F_Referencias_Cciales esta vacio\")\n \telse:\n \t\tfor index,row in tbl_F_Referencias_Cciales.iterrows():\n \t\t\tcursor.execute(\"INSERT INTO dbo.tbl_F_Referencias_Cciales([Fecha_Efecto], [Nombre_Proveedor], [Importe_Proveedor], [Forma_Pago], [Plazo_Pago], [Fecha_Ultimo_Pago], [Producto], [Opinion_Proveedor], [Nit_Proveedor], [Nit_Cliente], [Fecha_Captura]) values (?,?,?,?,?,?,?,?,?,?,?)\",\n \t\t\t\trow['Fecha_Efecto'],\n \t\t\t\trow['Nombre_Proveedor'],\n \t\t\t\trow['Importe_Proveedor'],\n \t\t\t\trow['Forma_Pago'],\n \t\t\t\trow['Plazo_Pago'],\n \t\t\t\trow['Fecha_Ultimo_Pago'],\n \t\t\t\trow['Producto'],\n \t\t\t\trow['Opinion_Proveedor'],\n \t\t\t\trow['Nit_Proveedor'],\n \t\t\t\trow['Nit_Cliente'],\n \t\t\t\trow['Fecha_Captura']\n \t\t\t\t)\n \t\t\tconn.commit()\n \t#conn.close()\n \treturn True\n\n\ndef ing_tbl_F_Evolucion_Empleados(conn,cursor,tbl_F_Evolucion_Empleados):\n\tif tbl_F_Evolucion_Empleados.empty:\n\t\tprint(\"tbl_F_Evolucion_Empleados esta vacio\")\n\telse:\n\t\tfor index,row in tbl_F_Evolucion_Empleados.iterrows():\n\t\t\tcursor.execute(\"INSERT INTO dbo.tbl_F_Evolucion_Empleados([Fecha_Efecto], [Nit_Cliente], [Cantidad_Empleados], [Fecha_Captura]) values (?,?,?,?)\",\n\t\t\t\trow['Fecha_Efecto'],\n\t\t\t\trow['Nit_Cliente'],\n\t\t\t\trow['Cantidad_Empleados'],\n\t\t\t\trow['Fecha_Captura']\n\t\t\t\t)\n\t\t\tconn.commit()\n\t#conn.close()\n\treturn True\n\ndef ing_tbl_F_Riesgo_Comercial(conn,cursor,tbl_F_Riesgo_Comercial):\n\tif tbl_F_Riesgo_Comercial.empty:\n\t\tprint(\"tbl_F_Riesgo_Comercial esta vacio\")\n\telse:\n\t\tfor index,row in tbl_F_Riesgo_Comercial.iterrows():\n\t\t\tcursor.execute(\"INSERT INTO dbo.tbl_F_Riesgo_Comercial([Fecha_Efecto], [Nit_Cliente], [Situacion_Financiera], [Evolucion_Empresa], [Calificacion_Informa], [Riesgo_Informa], [Incidentes], [Info_Complementaria], [Fecha_Captura]) values (?,?,?,?,?,?,?,?,?)\",\n\t\t\t\trow['Fecha_Efecto'],\n\t\t\t\trow['Nit_Cliente'],row['Situacion_Financiera'],\n\t\t\t\trow['Evolucion_Empresa'],\n\t\t\t\trow['Calificacion_Informa'],\n\t\t\t\trow['Riesgo_Informa'],\n\t\t\t\trow['Incidentes'],\n\t\t\t\trow['Info_Complementaria'],\n\t\t\t\trow['Fecha_Captura']\n\t\t\t\t)\n\t\t\tconn.commit()\n\t#conn.close()\n\treturn True\n\ndef ing_tbl_D_Clientes(conn,cursor,tbl_D_Clientes):\n\tif tbl_D_Clientes.empty:\n\t\tprint(\"tbl_D_Clientes esta vacio\")\n\telse:\n\t\tfor index,row in tbl_D_Clientes.iterrows():\n\t\t\tcursor.execute(\"INSERT INTO dbo.tbl_D_Clientes([Nit_Cliente],[Duns_Cliente],[Nombre_Cliente],[Direccion_Cliente],[Municipio_Cliente],[Departamento_Cliente],[Pais_Cliente],[Telefono_Cliente],[Email_Cliente],[Direccion_Web_Cliente],[Fecha_Constitucion],[Forma_Juridica_Cliente],[Cod_ICI_Cliente],[Estado_Empresa],[Cod_Actividad_Ccial],[Actividad_Ccial],[Objeto_Social],[Tipo_Empresa]) values (?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?)\",\n\t\t\trow['Nit_Cliente'],\n\t\t\trow['Duns_Cliente'],\n\t\t\trow['Nombre_Cliente'],\n\t\t\trow['Direccion_Cliente'],\n\t\t\trow['Municipio_Cliente'],\n\t\t\trow['Departamento_Cliente'],\n\t\t\trow['Pais_Cliente'],\n\t\t\trow['Telefono_Cliente'],\n\t\t\trow['Email_Cliente'],\n\t\t\trow['Direccion_Web_Cliente'],\n\t\t\trow['Fecha_Constitucion'],\n\t\t\trow['Forma_Juridica_Cliente'],\n\t\t\trow['Cod_ICI_Cliente'],\n\t\t\trow['Estado_Empresa'],\n\t\t\trow['Cod_Actividad_Ccial'],\n\t\t\trow['Actividad_Ccial'],\n\t\t\trow['Objeto_Social'],\n\t\t\trow['Tipo_Empresa']\n\t\t\t)\n\t\t\tconn.commit()\n\t#conn.close()\n\treturn True\n\ndef ing_tbl_F_Info_Financiera(conn,cursor,tbl_F_Info_Financiera):\n\tfor index,row in tbl_F_Info_Financiera.iterrows():\n\t\tcursor.execute(\"INSERT INTO dbo.tbl_F_Info_Financiera([Nit_Cliente], [Unidades], [NormaContable], [Fecha_Efecto], [Duracion], [Fuente], [Fecha_Captura]) values(?,?,?,?,?,?,?)\",\n\t\t\trow['Nit_Cliente'],\n\t\t\trow['Unidades'],\n\t\t\trow['NormaContable'],\n\t\t\trow['Fecha_Efecto'],\n\t\t\trow['Duracion'],\n\t\t\trow['Fuente'],\n\t\t\trow['Fecha_Captura']\n\t\t\t)\n\tconn.commit()\n\treturn True\n\ndef ing_tbl_F_Activos(conn,cursor,tbl_F_Activos):\n\tfor index,row in tbl_F_Activos.iterrows():\n\t\tcursor.execute(\"INSERT INTO dbo.tbl_F_Activos([Id_Info_Financiera],[Nit_Cliente],[Fecha_Efecto],[Total_Activos],[Total_Activos_Cte],[Cuentas_x_Cobrar_Cte],[Inventarios_Cte],[Diferidos_Cte],[Gastos_Pagados_Ant_Cte],[Otros_Activos_Cte],[Otros_Activos_Financ_Cte],[Otros_Activos_No_Financ_Cte],[Activos_Imptos_Cte],[Activos_Calsif_Mantenido_Venta_Cte],[Efectivo_Equivalente_Cte],[Cuentas_x_Cobrar_Otras_Cte],[Cuentas_x_Cobrar_Partes_Rel_Cte],[Total_Activos_No_Cte],[Inversiones_No_Cte],[Inversiones_Asociadas_No_Cte],[Inversiones_Contabilizadas_No_Cte],[Propiedad_Planta_Equipo_No_Cte],[Diferidos_No_Cte],[Gastos_Pagados_Anticipado_No_Cte],[Otros_Activos_No_Cte],[Propiedad_Inversion_No_Cte],[Plusvalia_No_Cte],[Activos_Intangibles_No_Plusv_No_Cte],[Activos_Imptos_Diferido_No_Cte],[Inv_No_Cte],[Cuentas_x_Cobrar_No_Cte],[Cunetas_x_Cobrar_Otras_No_Cte],[Cuentas_x_Cobrar_Partes_Rel_No_Cte],[Otros_Activos_No_Fro],[Otros_Activos_Fro],[Fecha_Captura]) values (?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?)\",\n\t\t\trow['Id_Info_Financiera'],\n\t\t\trow['Nit_Cliente'],\n\t\t\trow['Fecha_Efecto'],\n\t\t\trow['Total_Activos'],\n\t\t\trow['Total_Activos_Cte'],\n\t\t\trow['Cuentas_x_Cobrar_Cte'],\n\t\t\trow['Inventarios_Cte'],\n\t\t\trow['Diferidos_Cte'],\n\t\t\trow['Gastos_Pagados_Ant_Cte'],\n\t\t\trow['Otros_Activos_Cte'],\n\t\t\trow['Otros_Activos_Financ_Cte'],\n\t\t\trow['Otros_Activos_No_Financ_Cte'],\n\t\t\trow['Activos_Imptos_Cte'],\n\t\t\trow['Activos_Calsif_Mantenido_Venta_Cte'],\n\t\t\trow['Efectivo_Equivalente_Cte'],\n\t\t\trow['Cuentas_x_Cobrar_Otras_Cte'],\n\t\t\trow['Cuentas_x_Cobrar_Partes_Rel_Cte'],\n\t\t\trow['Total_Activos_No_Cte'],\n\t\t\trow['Inversiones_No_Cte'],\n\t\t\trow['Inversiones_Asociadas_No_Cte'],\n\t\t\trow['Inversiones_Contabilizadas_No_Cte'],\n\t\t\trow['Propiedad_Planta_Equipo_No_Cte'],\n\t\t\trow['Diferidos_No_Cte'],\n\t\t\trow['Gastos_Pagados_Anticipado_No_Cte'],\n\t\t\trow['Otros_Activos_No_Cte'],\n\t\t\trow['Propiedad_Inversion_No_Cte'],\n\t\t\trow['Plusvalia_No_Cte'],\n\t\t\trow['Activos_Intangibles_No_Plusv_No_Cte'],\n\t\t\trow['Activos_Imptos_Diferido_No_Cte'],\n\t\t\trow['Inv_No_Cte'],\n\t\t\trow['Cuentas_x_Cobrar_No_Cte'],\n\t\t\trow['Cunetas_x_Cobrar_Otras_No_Cte'],\n\t\t\trow['Cuentas_x_Cobrar_Partes_Rel_No_Cte'],\n\t\t\trow['Otros_Activos_No_Fro'],\n\t\t\trow['Otros_Activos_Fro'],\n\t\t\trow['Fecha_Captura']\n\t\t\t)\n\t\tconn.commit()\n\treturn True\n\ndef ing_tbl_F_Pasivos_Patrimonio(conn,cursor,tbl_F_Pasivos_Patrimonio):\n\tfor index,row in tbl_F_Pasivos_Patrimonio.iterrows():\n\t\tcursor.execute(\"INSERT INTO dbo.tbl_F_Pasivos_Patrimonio([Id_Info_Financiera],[Nit_Cliente],[Fecha_Efecto],[Total_Pasivo_Patrimonio],[Total_Pasivo],[Total_Pasivo_Cte],[Obligaciones_Fra],[Pasivo_Est_Provi],[Provi_Diversa],[Otro_Pasivo_Fro],[Otro_Pasivo_No_Fro],[Cuentas_x_Pagar_Cte],[Otras_Cuentas_x_Pagar_Cte],[Cuentas_x_Pagar_Ent_Rel],[Pasivo_Impto_Cte],[Provi_Cte_Empleado],[Otro_Pasivo_Cte],[Total_Pasivo_No_Cte],[Pasivo_Estimado_Provisiones_No_Cte],[Otro_Pasivo_Fro_No_Cte],[Otro_Pasivo_No_Fro_No_Cte],[Pasivo_Impto_Diferido_No_Cte],[Obligaciones_Fro_No_Cte],[Provisiones_Beneficios_No_Cte],[Otras_Provisiones_No_Cte],[Otro_Pasivo_No_Cte],[Patrimonio],[Capital_Social_Pt],[Superavit_Capital_Pt],[Reserva_Pt],[Resultado_Ejercicio_Pt],[Cotiza_Aux_Aporte_No_Vinc_Pt],[Otros_Rubros_Pt],[Acciones_Propias_Cartera_Pt],[Otro_Resultado_Integral_Acum_Pt],[Otras_Participaciones_Pt],[Primas_Emision_Pt],[Ganancias_Acum_Pt],[Capital_Emitido_Pt],[Fecha_Captura]) values (?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?)\",\n\t\t\trow['Id_Info_Financiera'],\n\t\t\trow['Nit_Cliente'],\n\t\t\trow['Fecha_Efecto'],\n\t\t\trow['Total_Pasivo_Patrimonio'],\n\t\t\trow['Total_Pasivo'],\n\t\t\trow['Total_Pasivo_Cte'],\n\t\t\trow['Obligaciones_Fra'],\n\t\t\trow['Pasivo_Est_Provi'],\n\t\t\trow['Provi_Diversa'],\n\t\t\trow['Otro_Pasivo_Fro'],\n\t\t\trow['Otro_Pasivo_No_Fro'],\n\t\t\trow['Cuentas_x_Pagar_Cte'],\n\t\t\trow['Otras_Cuentas_x_Pagar_Cte'],\n\t\t\trow['Cuentas_x_Pagar_Ent_Rel'],\n\t\t\trow['Pasivo_Impto_Cte'],\n\t\t\trow['Provi_Cte_Empleado'],\n\t\t\trow['Otro_Pasivo_Cte'],\n\t\t\trow['Total_Pasivo_No_Cte'],\n\t\t\trow['Pasivo_Estimado_Provisiones_No_Cte'],\n\t\t\trow['Otro_Pasivo_Fro_No_Cte'],\n\t\t\trow['Otro_Pasivo_No_Fro_No_Cte'],\n\t\t\trow['Pasivo_Impto_Diferido_No_Cte'],\n\t\t\trow['Obligaciones_Fro_No_Cte'],\n\t\t\trow['Provisiones_Beneficios_No_Cte'],\n\t\t\trow['Otras_Provisiones_No_Cte'],\n\t\t\trow['Otro_Pasivo_No_Cte'],\n\t\t\trow['Patrimonio'],\n\t\t\trow['Capital_Social_Pt'],\n\t\t\trow['Superavit_Capital_Pt'],\n\t\t\trow['Reserva_Pt'],\n\t\t\trow['Resultado_Ejercicio_Pt'],\n\t\t\trow['Cotiza_Aux_Aporte_No_Vinc_Pt'],\n\t\t\trow['Otros_Rubros_Pt'],\n\t\t\trow['Acciones_Propias_Cartera_Pt'],\n\t\t\trow['Otro_Resultado_Integral_Acum_Pt'],\n\t\t\trow['Otras_Participaciones_Pt'],\n\t\t\trow['Primas_Emision_Pt'],\n\t\t\trow['Ganancias_Acum_Pt'],\n\t\t\trow['Capital_Emitido_Pt'],\n\t\t\trow['Fecha_Captura']\n\t\t\t)\n\tconn.commit()\n\treturn True\n\ndef ing_tbl_F_Resultados_Ejercicio(conn,cursor,tbl_F_Resultados_Ejercicio):\n\tfor index,row in tbl_F_Resultados_Ejercicio.iterrows():\n\t\tcursor.execute(\"INSERT INTO dbo.tbl_F_Resultados_Ejercicio([Id_Info_Financiera],[Nit_Cliente],[Fecha_Efecto],[Resultado_Ejercicio],[Resultado_Antes_Impto],[Resultado_Op],[Total_Gastos],[Costos_Gastos_Op],[Gastos_Op_Admin],[Gastos_Op_Venta],[Gastos_Dist],[Gastos_Beneficio_Empl],[Otros_Gastos_Op],[Costos_Venta],[Gastos_No_Op],[Gastos_Fro],[Total_Ingresos],[Ingresos_Operacional],[Ventas],[Otros_Ingresos_Op],[Ingresos_No_Op],[Ingresos_Extraordinarios],[Ingresos_Fro],[Resultados_No_Op],[Resultados_Fro],[Resultados_Impuesto],[Ajuste_Inflacion],[Impto_Renta],[Fecha_Captura]) values (?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?)\",\n\t\t\trow['Id_Info_Financiera'],\n\t\t\trow['Nit_Cliente'],\n\t\t\trow['Fecha_Efecto'],\n\t\t\trow['Resultado_Ejercicio'],\n\t\t\trow['Resultado_Antes_Impto'],\n\t\t\trow['Resultado_Op'],\n\t\t\trow['Total_Gastos'],\n\t\t\trow['Costos_Gastos_Op'],\n\t\t\trow['Gastos_Op_Admin'],\n\t\t\trow['Gastos_Op_Venta'],\n\t\t\trow['Gastos_Dist'],\n\t\t\trow['Gastos_Beneficio_Empl'],\n\t\t\trow['Otros_Gastos_Op'],\n\t\t\trow['Costos_Venta'],\n\t\t\trow['Gastos_No_Op'],\n\t\t\trow['Gastos_Fro'],\n\t\t\trow['Total_Ingresos'],\n\t\t\trow['Ingresos_Operacional'],\n\t\t\trow['Ventas'],\n\t\t\trow['Otros_Ingresos_Op'],\n\t\t\trow['Ingresos_No_Op'],\n\t\t\trow['Ingresos_Extraordinarios'],\n\t\t\trow['Ingresos_Fro'],\n\t\t\trow['Resultados_No_Op'],\n\t\t\trow['Resultados_Fro'],\n\t\t\trow['Resultados_Impuesto'],\n\t\t\trow['Ajuste_Inflacion'],\n\t\t\trow['Impto_Renta'],\n\t\t\trow['Fecha_Captura']\n\t\t\t)\n\tconn.commit()\n\treturn True\n\ndef ing_tbl_F_Participantes(conn,cursor,tbl_F_Participantes):\n\tif tbl_F_Participantes.empty:\n\t\tprint(\"tbl_F_Participantes esta vacio\")\n\telse:\n\t\tfor index,row in tbl_F_Participantes.iterrows():\n\t\t\tcursor.execute(\"INSERT INTO dbo.tbl_F_Participantes([Nit_Cliente], [Nombre_Participante], [Doc_Participante], [Porcentaje], [Fecha_Efecto], [Fecha_Captura]) values (?,?,?,?,?,?)\",\n\t\t\t\trow['Nit_Cliente'],\n\t\t\t\trow['Nombre_Participante'],\n\t\t\t\trow['Doc_Participante'],\n\t\t\t\trow['Porcentaje'],\n\t\t\t\trow['Fecha_Efecto'],\n\t\t\t\trow['Fecha_Captura']\n\t\t\t\t)\n\t\tconn.commit()\n\treturn True\n\ndef ing_tbl_F_Accionistas(conn,cursor,tbl_F_Accionistas):\n\tif tbl_F_Accionistas.empty:\n\t\tprint(\"tbl_F_Accionistas esta vacio\")\n\telse:\n\t\tfor index,row in tbl_F_Accionistas.iterrows():\n\t\t\tcursor.execute(\"INSERT INTO dbo.tbl_F_Accionistas([Nit_Cliente],[Doc_Accionista],[Nombre_Accionista],[Fecha_Efecto],[Fecha_Captura]) values (?,?,?,?,?)\",\n\t\t\t\trow['Nit_Cliente'],\n\t\t\t\trow['Doc_Accionista'],\n\t\t\t\trow['Nombre_Accionista'],\n\t\t\t\trow['Fecha_Efecto'],\n\t\t\t\trow['Fecha_Captura']\n\t\t\t\t)\n\t\tconn.commit()\n\treturn True\n\ndef ing_tbl_F_Administradores(conn,cursor,tbl_F_Administradores):\n\tif tbl_F_Administradores.empty:\n\t\tprint(\"tbl_F_Administradores esta vacio\")\n\telse:\n\t\tfor index,row in tbl_F_Administradores.iterrows():\n\t\t\tcursor.execute(\"INSERT INTO dbo.tbl_F_Administradores([Nit_Cliente],[Fecha_Actualizacion],[Doc_Administrador],[Nombre_Administrador],[Cargo_Administrador],[Fecha_Efecto],[Fecha_Captura]) values (?,?,?,?,?,?,?)\",\n\t\t\t\trow['Nit_Cliente'],\n\t\t\t\trow['Fecha_Actualizacion'],\n\t\t\t\trow['Doc_Administrador'],\n\t\t\t\trow['Nombre_Administrador'],\n\t\t\t\trow['Cargo_Administrador'],\n\t\t\t\trow['Fecha_Efecto'],\n\t\t\t\trow['Fecha_Captura']\n\t\t\t\t)\n\t\tconn.commit()\n\treturn True\n\ndef ing_tbl_F_Establecimientos(conn,cursor,tbl_F_Establecimientos):\n\tif tbl_F_Establecimientos.empty:\n\t\tprint(\"tbl_F_Establecimientos esta vacio\")\n\telse:\n\t\tfor index,row in tbl_F_Establecimientos.iterrows():\n\t\t\tcursor.execute(\"INSERT INTO dbo.tbl_F_Establecimientos([Nit_Cliente],[Nombre_Establecimiento],[Tipo_Explotacion],[Departamento],[Fecha_Efecto],[Fecha_Captura]) values (?,?,?,?,?,?)\",\n\t\t\t\trow['Nit_Cliente'],\n\t\t\t\trow['Nombre_Establecimiento'],\n\t\t\t\trow[\"Tipo_Explotacion\"],\n\t\t\t\trow[\"Departamento\"],\n\t\t\t\trow['Fecha_Efecto'],\n\t\t\t\trow['Fecha_Captura']\n\t\t\t\t)\n\t\tconn.commit()\n\treturn True\n\ndef ing_tbl_F_Incidencias(conn,cursor,tbl_F_Incidencias):\n\tif tbl_F_Incidencias.empty:\n\t\tprint(\"tbl_F_Incidencias esta vacio\")\n\telse:\n\t\tfor index,row in tbl_F_Incidencias.iterrows():\n\t\t\tcursor.execute(\"INSERT INTO dbo.tbl_F_Incidencias([Nit_Cliente],[Fecha_Efecto],[Estado_Incidencia],[Municipio],[Cod_Incidencia],[Tipo_Incidencia],[Descripcion_Incidencia],[Demandante],[Total_Incidencias],[Fecha_Captura]) values (?,?,?,?,?,?,?,?,?,?)\",\n\t\t\t\trow['Nit_Cliente'],\n\t\t\t\trow['Fecha_Efecto'],\n\t\t\t\trow['Estado_Incidencia'],\n\t\t\t\trow['Municipio'],\n\t\t\t\trow['Cod_Incidencia'],\n\t\t\t\trow['Tipo_Incidencia'],\n\t\t\t\trow['Descripcion_Incidencia'],\n\t\t\t\trow['Demandante'],\n\t\t\t\trow['Total_Incidencias'],\n\t\t\t\trow['Fecha_Captura']\n\t\t\t\t)\n\t\tconn.commit()\n\treturn True\n\ndef ing_tbl_F_Importaciones(conn,cursor,tbl_F_Importaciones):\n\tif tbl_F_Importaciones.empty:\n\t\tprint(\"tbl_F_Importaciones esta vacio\")\n\telse:\n\t\tfor index,row in tbl_F_Importaciones.iterrows():\n\t\t\tcursor.execute(\"INSERT INTO dbo.tbl_F_Importaciones([Nit_Cliente],[Anno],[Fecha_Efecto],[Producto],[Pais],[Valor],[Divisa],[Fecha_Captura]) values (?,?,?,?,?,?,?,?)\",\n\t\t\t\trow['Nit_Cliente'],\n\t\t\t\trow['Anno'],\n\t\t\t\trow['Fecha_Efecto'],\n\t\t\t\trow['Producto'],\n\t\t\t\trow['Pais'],\n\t\t\t\trow['Valor'],\n\t\t\t\trow['Divisa'],\n\t\t\t\trow['Fecha_Captura']\n\t\t\t\t)\n\t\tconn.commit()\n\treturn True\n\ndef ing_tbl_F_Exportaciones(conn,cursor,tbl_F_Exportaciones):\n\tif tbl_F_Exportaciones.empty:\n\t\tprint(\"tbl_F_Exportaciones esta vacio\")\n\telse:\n\t\tfor index,row in tbl_F_Exportaciones.iterrows():\n\t\t\tcursor.execute(\"INSERT INTO dbo.tbl_F_Exportaciones([Nit_Cliente],[Anno],[Fecha_Efecto],[Producto],[Pais],[Valor],[Divisa],[Fecha_Captura]) values (?,?,?,?,?,?,?,?)\",\n\t\t\t\trow['Nit_Cliente'],\n\t\t\t\trow['Anno'],\n\t\t\t\trow['Fecha_Efecto'],\n\t\t\t\trow['Producto'],\n\t\t\t\trow['Pais'],\n\t\t\t\trow['Valor'],\n\t\t\t\trow['Divisa'],\n\t\t\t\trow['Fecha_Captura']\n\t\t\t\t)\n\t\tconn.commit()\n\treturn True\n\ndef ing_tbl_F_Actividades(conn,cursor,tbl_F_Actividades):\n\tif tbl_F_Actividades.empty:\n\t\tprint(\"tbl_F_Actividades esta vacio\")\n\telse:\n\t\tfor index,row in tbl_F_Actividades.iterrows():\n\t\t\tcursor.execute(\"INSERT INTO dbo.tbl_F_Actividades([Nit_Cliente],[Tipo_Actividad],[Cod_Actividad],[Descripcion_Actividad],[Fecha_Captura]) values (?,?,?,?,?)\",\n\t\t\t\trow['Nit_Cliente'],\n\t\t\t\trow['Tipo_Actividad'],\n\t\t\t\trow['Cod_Actividad'],\n\t\t\t\trow['Descripcion_Actividad'],\n\t\t\t\trow['Fecha_Captura']\n\t\t\t\t)\n\t\tconn.commit()\n\treturn True\n\ndef ing_tbl_F_Obligaciones(conn,cursor,tbl_F_Obligaciones):\n\tif tbl_F_Obligaciones.empty:\n\t\tprint(\"tbl_F_Obligaciones esta vacio\")\n\telse:\n\t\tfor index,row in tbl_F_Obligaciones.iterrows():\n\t\t\tcursor.execute(\"INSERT INTO dbo.tbl_F_Obligaciones([Nit_Cliente],[Tipo_Obligacion],[Periodo_Obligacion],[Situacion_Obligacion],[Fecha_Ejecucion_Obligacion],[Fuente_Obligacion],[Fecha_Captura]) values (?,?,?,?,?,?,?)\",\n\t\t\t\trow['Nit_Cliente'],\n\t\t\t\trow['Tipo_Obligacion'],\n\t\t\t\trow['Periodo_Obligacion'],\n\t\t\t\trow['Situacion_Obligacion'],\n\t\t\t\trow['Fecha_Ejecucion_Obligacion'],\n\t\t\t\trow['Fuente_Obligacion'],\n\t\t\t\trow['Fecha_Captura']\n\t\t\t\t)\n\t\tconn.commit()\n\treturn True\n\ndef ing_tbl_F_Politica_Comercial(conn,cursor,tbl_F_Politica_Comercial):\n\tif tbl_F_Politica_Comercial.empty:\n\t\tprint(\"tbl_F_Politica_Comercial esta vacio\")\n\telse:\n\t\tfor index,row in tbl_F_Politica_Comercial.iterrows():\n\t\t\tcursor.execute(\"INSERT INTO dbo.tbl_F_Politica_Comercial([Nit_Cliente],[Tipo_Pol_Ccial],[Producto_Pol_Ccial],[Politica_Pol_CCial],[Fecha_Efecto_Pol_Ccial],[Porc_Nacional_Pol_Ccial],[Porc_Internacional_Pol_Ccial],[Fecha_Captura]) values (?,?,?,?,?,?,?,?)\",\n\t\t\t\trow['Nit_Cliente'],\n\t\t\t\trow['Tipo_Pol_Ccial'],\n\t\t\t\trow['Producto_Pol_Ccial'],\n\t\t\t\trow['Politica_Pol_CCial'],\n\t\t\t\trow['Fecha_Efecto_Pol_Ccial'],\n\t\t\t\trow['Porc_Nacional_Pol_Ccial'],\n\t\t\t\trow[\"Porc_Internacional_Pol_Ccial\"],\n\t\t\t\trow['Fecha_Captura']\n\t\t\t\t)\n\t\tconn.commit()\n\treturn True\n\ndef ing_tbl_F_Publicaciones_Prensa(conn,cursor,tbl_F_Publicaciones_Prensa):\n\tif tbl_F_Publicaciones_Prensa.empty:\n\t\tprint(\"tbl_F_Publicaciones_Prensa esta vacio\")\n\telse:\n\t\tfor index,row in tbl_F_Publicaciones_Prensa.iterrows():\n\t\t\tcursor.execute(\"INSERT INTO dbo.tbl_F_Publicaciones_Prensa([Nit_Cliente],[Fecha_Publicacion],[Fuente],[Tipo_Articulo],[Resumen_Publicacion],[Fecha_Captura]) values (?,?,?,?,?,?)\",\n\t\t\t\trow['Nit_Cliente'],\n\t\t\t\trow['Fecha_Publicacion'],\n\t\t\t\trow['Fuente'],\n\t\t\t\trow['Tipo_Articulo'],\n\t\t\t\trow['Resumen_Publicacion'],\n\t\t\t\trow['Fecha_Captura']\n\t\t\t\t)\n\t\tconn.commit()\n\treturn True\n\ndef ing_tbl_F_Publicaciones_Legales(conn,cursor,tbl_F_Publicaciones_Legales):\n\tif tbl_F_Publicaciones_Legales.empty:\n\t\tprint(\"tbl_F_Publicaciones_Legales esta vacio\")\n\telse:\n\t\tfor index,row in tbl_F_Publicaciones_Legales.iterrows():\n\t\t\tcursor.execute(\"INSERT INTO dbo.tbl_F_Publicaciones_Legales([Nit_Cliente],[Tipo_Acto],[Fecha_Acto],[Referencia],[Fuente],[Lugar_Publicacion],[Fecha_Captura]) values (?,?,?,?,?,?,?)\",\n\t\t\t\trow['Nit_Cliente'],\n\t\t\t\trow['Tipo_Acto'],\n\t\t\t\trow['Fecha_Acto'],\n\t\t\t\trow['Referencia'],\n\t\t\t\trow['Fuente'],\n\t\t\t\trow[\"Lugar_Publicacion\"],\n\t\t\t\trow['Fecha_Captura']\n\t\t\t\t)\n\t\tconn.commit()\n\treturn True\n\ndef ing_tbl_F_Relaciones_Terceros(conn,cursor,tbl_F_Relaciones_Terceros):\n\tif tbl_F_Relaciones_Terceros.empty:\n\t\tprint(\"tbl_F_Relaciones_Terceros esta vacio\")\n\telse:\n\t\tfor index,row in tbl_F_Relaciones_Terceros.iterrows():\n\t\t\tcursor.execute(\"INSERT INTO dbo.tbl_F_Relaciones_Terceros([Nit_Cliente],[Tipo_Relacion],[Razon_Social],[Nit_Razon_Social],[Fecha_Captura]) values (?,?,?,?,?)\",\n\t\t\t\trow['Nit_Cliente'],\n\t\t\t\trow['Tipo_Relacion'],\n\t\t\t\trow['Razon_Social'],\n\t\t\t\trow['Nit_Razon_Social'],\n\t\t\t\trow['Fecha_Captura']\n\t\t\t\t)\n\t\tconn.commit()\n\treturn True\n\ndef ing_tbl_F_Indicadores_Financieros(conn,cursor,tbl_F_Indicadores_Financieros):\n\tif tbl_F_Indicadores_Financieros.empty:\n\t\tprint(\"tbl_F_Indicadores_Financieros esta vacio\")\n\telse:\n\t\tfor index,row in tbl_F_Indicadores_Financieros.iterrows():\n\t\t\tcursor.execute(\"INSERT INTO dbo.tbl_F_Indicadores_Financieros([Nit_Cliente],[Fecha_Efecto_Indicador_Fro],[Evolucion_Ventas],[Evolucion_Utilidad_Neta],[Rentabilidad],[Rentabilidad_Operacional],[Rentabilidad_Patrimonio],[Rentabilidad_Activo_Total],[Cobertura_Gastos_Fro],[EBIT],[EBITDA],[Endeudamiento],[Concentracion_Corto_Plazo],[Endeudamiento_Sin_Valorizacion],[Apalancamiento_Fro],[Carga_Fra],[Capital_Trabajo],[Razon_Cte],[Prueba_Acida],[Dias_Rotacion_Inventario],[Dias_Ciclo_Operacional],[Rotacion_Activos],[Fecha_Captura]) values (?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?)\",\n\t\t\t\trow['Nit_Cliente'],\n\t\t\t\trow['Fecha_Efecto_Indicador_Fro'],\n\t\t\t\trow['Evolucion_Ventas'],\n\t\t\t\trow['Evolucion_Utilidad_Neta'],\n\t\t\t\trow['Rentabilidad'],\n\t\t\t\trow['Rentabilidad_Operacional'],\n\t\t\t\trow['Rentabilidad_Patrimonio'],\n\t\t\t\trow['Rentabilidad_Activo_Total'],\n\t\t\t\trow['Cobertura_Gastos_Fro'],\n\t\t\t\trow['EBIT'],\n\t\t\t\trow['EBITDA'],\n\t\t\t\trow['Endeudamiento'],\n\t\t\t\trow['Concentracion_Corto_Plazo'],\n\t\t\t\trow['Endeudamiento_Sin_Valorizacion'],\n\t\t\t\trow['Apalancamiento_Fro'],\n\t\t\t\trow['Carga_Fra'],\n\t\t\t\trow['Capital_Trabajo'],\n\t\t\t\trow['Razon_Cte'],\n\t\t\t\trow['Prueba_Acida'],\n\t\t\t\trow['Dias_Rotacion_Inventario'],\n\t\t\t\trow['Dias_Ciclo_Operacional'],\n\t\t\t\trow['Rotacion_Activos'],\n\t\t\t\trow['Fecha_Captura']\n\t\t\t\t)\n\t\tconn.commit()\n\treturn True\n\ndef Uptate_Tbls_Financieras(conn,cursor):\n\t#Update a Activos\n\tcursor.execute(f\"\"\"update [dbo].[tbl_F_Activos]\n\t\tset [dbo].[tbl_F_Activos].Id_Info_Financiera=ifi.Id_Info_Financiera\n\t\tfrom [dbo].[tbl_F_Activos] a\n\t\tinner join [dbo].[tbl_F_Info_Financiera] ifi\n\t\ton (a.Fecha_Captura=ifi.Fecha_Captura\n\t\tand a.Nit_Cliente=ifi.Nit_Cliente\n\t\tand a.Fecha_Efecto=ifi.Fecha_Efecto)\"\"\")\n\tconn.commit()\n\n\t#Update a Pasivo_Patrimonio\n\tcursor.execute(f\"\"\"update [dbo].[tbl_F_Pasivos_Patrimonio]\n\t\tset [dbo].[tbl_F_Pasivos_Patrimonio].Id_Info_Financiera=ifi.Id_Info_Financiera\n\t\tfrom [dbo].[tbl_F_Pasivos_Patrimonio] pp\n\t\tinner join [dbo].[tbl_F_Info_Financiera] ifi\n\t\ton (pp.Fecha_Captura=ifi.Fecha_Captura\n\t\tand pp.Nit_Cliente=ifi.Nit_Cliente\n\t\tand pp.Fecha_Efecto=ifi.Fecha_Efecto)\"\"\")\n\tconn.commit()\n\n\t#Update a Resultados Ejercicio\n\tcursor.execute(f\"\"\"update [dbo].[tbl_F_Resultados_Ejercicio]\n\t\tset [dbo].[tbl_F_Resultados_Ejercicio].Id_Info_Financiera=ifi.Id_Info_Financiera\n\t\tfrom [dbo].[tbl_F_Resultados_Ejercicio] re\n\t\tinner join [dbo].[tbl_F_Info_Financiera] ifi\n\t\ton (re.Fecha_Captura=ifi.Fecha_Captura\n\t\tand re.Nit_Cliente=ifi.Nit_Cliente\n\t\tand re.Fecha_Efecto=ifi.Fecha_Efecto)\"\"\")\n\tconn.commit()\n\n\"\"\"FUNCIÓN PRINCIPAL DE EJECUCIÓN\"\"\"\n\ndef save_dataframe(nit):\n\t\"\"\" Guarda los csv de los df \"\"\"\n\t#Directorio=\"C:\\\\Python_Flask\\\\envDivisa\\\\\"\n\tDirectorio = os.getcwd() + \"\\\\\"\n\t#PathCarpetaConsultas = \"C:\\\\Python_Flask\\\\envDivisa\\\\Consultas\\\\\"\n\t# PathCarpetaConsultas = Directorio +\"Consultas\\\\\"\n\tPathCarpetaConsultas = \"/var/www/html/flask/Consultas/\"\n\t#PathCarpetaResultados = \"C:\\\\Python_Flask\\\\envDivisa\\\\Resultados\\\\\"\n\tPathCarpetaResultados = Directorio +\"Resultados\\\\\"\n\t\n\tNIT = nit\n\tId_Cliente = NIT\n\tPathXml = PathCarpetaConsultas + NIT +\".xml\"\n\tconn,cursor=connectionDB()\n\n\n\t_NIT = \"./PRODUCTO_DEVUELTO/DATOS_PROD_DEVUELTO/INFORME_FINANCIERO_INTERNACIONAL/ID_ANEXA/IDFISCAL/VALOR\"\n\tNombreEmpresa = \"./PRODUCTO_DEVUELTO/DATOS_PROD_DEVUELTO/INFORME_FINANCIERO_INTERNACIONAL/DENOMINACION/RAZONSOCIAL/VALOR\"\n\n\ttree = ET.parse(PathXml)\n\troot = tree.getroot()\n\n\t_NIT = ConsultaElemento(root,_NIT)\n\t_NombreEmpresa = ConsultaElemento(root,NombreEmpresa).replace(\" \", \"\")\n\n\tNombreArchivo = _NIT+\"_\"+_NombreEmpresa+\".csv\"\n\tNombreArchivofill = \"_\"+_NIT+\"_\"+_NombreEmpresa+\".csv\"\n\tFecha_Captura_UTC = datetime.now(timezone('UTC'))\n\tFecha_Captura = Fecha_Captura_UTC.astimezone(timezone('America/Bogota')).strftime('%Y-%m-%d %H:%M:%S')\n\t#print(\"FECHA:\", Fecha_Captura)\n\n\t\"\"\"###tbl_F_Info_Consulta\"\"\"\n\n\tdicInfoConsulta = {'Nit_Cliente': [NIT], 'Nombre_Consulta': [\"INFORME FINANCIERO\"], 'Usuario':['C16134'], 'Fecha_Captura':[Fecha_Captura]}\n\ttbl_F_Info_Consulta = pd.DataFrame.from_dict(dicInfoConsulta)\n\t#Guardar_csv(tbl_F_Info_Consulta, PathCarpetaResultados, f\"{NIT}_tbl_F_Info_Consulta.csv\")\n\ting_tbl_F_Info_Consulta(conn,cursor,tbl_F_Info_Consulta)\n\n\n\t\"\"\" Referencias Comerciales \"\"\"\n\t\n\tId_Ref_Comercial = np.nan\n\tPathReferenciasComerciales = \"./PRODUCTO_DEVUELTO/DATOS_PROD_DEVUELTO/INFORME_FINANCIERO_INTERNACIONAL/REFCOMERCIAL/COMERCIAL/PROVEEDOR\"\n\tDicReferenciasComerciales = [\"IDENT_EMPRESA\",\"RAZONSOCIAL\",\"IMPORTE\",\"FORMA_PAGO_LOCAL\",\"PLAZO_PAGO_LOCAL\",\"FEC_ULT_PAGO\",\"FEC_EFECTO\",\"PRODUCTO\",\"COMPOR_PAGO_LOCAL\"]\n\tdfReferenciasComerciales = Extraer_Dataframe_Dic(Directorio,tree,root,PathReferenciasComerciales, DicReferenciasComerciales)\n\n\tDicReferenciasComerciales = [\"IDENT_EMPRESA/PROVEEDOR\",\"RAZONSOCIAL/PROVEEDOR\",\"IMPORTE/PROVEEDOR\",\"FORMA_PAGO_LOCAL/PROVEEDOR\",\"PLAZO_PAGO_LOCAL/PROVEEDOR\",\"FEC_ULT_PAGO/PROVEEDOR\",\"FEC_EFECTO/PROVEEDOR\",\"PRODUCTO/PROVEEDOR\",\"COMPOR_PAGO_LOCAL/PROVEEDOR\"]\n\tdfReferenciasComerciales = Validar_Formato_Tabla(dfReferenciasComerciales,DicReferenciasComerciales)\n\n\tdfReferenciasComerciales.columns = ['Nit_Proveedor', 'Nombre_Proveedor', 'Importe_Proveedor', 'Forma_Pago', 'Plazo_Pago', 'Fecha_Ultimo_Pago', 'Fecha_Efecto', 'Producto', 'Opinion_Proveedor']\n\tdfReferenciasComerciales['Fecha_Captura'] = Fecha_Captura\n\tdfReferenciasComerciales['Nit_Cliente'] = NIT\n\n\tDictbl_F_Referencias_Cciales = ['Fecha_Efecto', 'Nombre_Proveedor', 'Importe_Proveedor', 'Forma_Pago', 'Plazo_Pago', 'Fecha_Ultimo_Pago', 'Producto', 'Opinion_Proveedor', 'Nit_Proveedor', 'Nit_Cliente', 'Fecha_Captura']\n\ttbl_F_Referencias_Cciales = Validar_Formato_Tabla(dfReferenciasComerciales,Dictbl_F_Referencias_Cciales)\n\t#Guardar_csv(tbl_F_Referencias_Cciales, PathCarpetaResultados, f\"{NIT}_tbl_F_Referencias_Cciales.csv\")\n\ting_tbl_F_Referencias_Cciales(conn,cursor,tbl_F_Referencias_Cciales)\n\n\t\"\"\"###tbl_F_Evolucion_Empleados\"\"\"\n\n\tPathEvolucionEmpleados_Actual = \"./PRODUCTO_DEVUELTO/DATOS_PROD_DEVUELTO/INFORME_FINANCIERO_INTERNACIONAL/EMPLEADOS/ACTUAL\"\n\tDicEvolucionEmpleados_Actual = [\"FIJOS\",\"FEC_EFECTO\"]\n\ttry:\n\t\tdfEvolucionEmpleados_Actual = Extraer_Dataframe_Dic(Directorio,tree,root,PathEvolucionEmpleados_Actual, DicEvolucionEmpleados_Actual)\n\t\tdfEvolucionEmpleados_Actual.columns = ['Cantidad_Empleados', 'Fecha_Efecto']\n\texcept:\n\t\tdfEvolucionEmpleados_Actual = pd.DataFrame(columns=['Cantidad_Empleados', 'Fecha_Efecto'])\n\n\ttry:\n\t\tPathEvolucionEmpleados_Hist = \"./PRODUCTO_DEVUELTO/DATOS_PROD_DEVUELTO/INFORME_FINANCIERO_INTERNACIONAL/EMPLEADOS/ANTERIOR\"\n\t\tDicEvolucionEmpleados_Hist = [\"FIJOS\",\"FEC_EFECTO\"]\n\t\tdfEvolucionEmpleados_Hist = Extraer_Dataframe_Dic(Directorio,tree,root,PathEvolucionEmpleados_Hist, DicEvolucionEmpleados_Hist)\n\t\tdfEvolucionEmpleados_Hist.columns = ['Cantidad_Empleados', 'Fecha_Efecto']\n\t\tdfEvolucionEmpleados = Append(df1= dfEvolucionEmpleados_Actual, df2= dfEvolucionEmpleados_Hist)\n\texcept:\n\t\tdfEvolucionEmpleados = dfEvolucionEmpleados_Actual\n\n\tId_Evolucion_Empleado = np.nan\n\n\tdfEvolucionEmpleados[\"Nit_Cliente\"] = NIT\n\tdfEvolucionEmpleados[\"Fecha_Captura\"] = Fecha_Captura\n\tDictbl_F_Evolucion_Empleados = [\"Fecha_Efecto\", \"Nit_Cliente\", \"Cantidad_Empleados\", 'Fecha_Captura']\n\ttbl_F_Evolucion_Empleados = Validar_Formato_Tabla(dfEvolucionEmpleados,Dictbl_F_Evolucion_Empleados)\n\n\t#Guardar_csv(tbl_F_Evolucion_Empleados, PathCarpetaResultados, f\"{NIT}_tbl_F_Evolucion_Empleados.csv\")\n\ting_tbl_F_Evolucion_Empleados(conn,cursor,tbl_F_Evolucion_Empleados)\n\n\t\"\"\"###tbl_F_Riesgo_Comercial\"\"\"\n\n\tId_Riesgo_Comercial = np.nan\n\n\tPathRiesgoComercial = \"./PRODUCTO_DEVUELTO/DATOS_PROD_DEVUELTO/INFORME_FINANCIERO_INTERNACIONAL/EVALUACION\"\n\tAtribRiesgoComercial = \"DES\"\n\tdfRiesgoComercial = Extraer_Dataframe_1Atributo(tree,PathRiesgoComercial, AtribRiesgoComercial)\n\tdicRiesgoComercial = [\"SINTESIS_SITUACION_FINANCIERA/EVALUACION\", \"SINTESIS_TIPOLOGIA/EVALUACION\", \"SINTESIS_TRAYECTORIA/EVALUACION\", \"SINTESIS_INCIDENTES/EVALUACION\"]\n\tdfRiesgoComercial = Validar_Formato_Tabla(dfRiesgoComercial,dicRiesgoComercial)\n\t##display(HTML(dfRiesgoComercial.to_html()))\n\n\tdfRiesgoComercialCalificacion = Extraer_Dataframe(Directorio,tree,PathRiesgoComercial)\n\ttry:\n\t\tdfRiesgoComercialCalificacion = dfRiesgoComercialCalificacion[[\"FEC_CALCULO\", \"NOTA\", \"TEXTO_NOTA\"]]\n\texcept:\n\t\tdfRiesgoComercialCalificacion = dfRiesgoComercialCalificacion[[\"FEC_CALCULO\", \"TEXTO_NOTA\"]]\n\t\tdfRiesgoComercialCalificacion['NOTA'] = np.nan\n\t\tdfRiesgoComercialCalificacion = dfRiesgoComercialCalificacion[[\"FEC_CALCULO\", \"NOTA\", \"TEXTO_NOTA\"]]\n\n\tdfRiesgoComercialCalificacion.columns = [\"Fecha_Efecto\", \"Calificacion_Informa\", \"Riesgo_Informa\"]\n\n\ttbl_F_Riesgo_Comercial = dfRiesgoComercial.drop([\"SINTESIS_TIPOLOGIA/EVALUACION\"], axis=1)\n\ttbl_F_Riesgo_Comercial.columns = [\"Situacion_Financiera\", \"Evolucion_Empresa\", \"Incidentes\"]\n\n\ttbl_F_Riesgo_Comercial[\"Nit_Cliente\"] = NIT\n\n\ttbl_F_Riesgo_Comercial = Concatenar(df1= tbl_F_Riesgo_Comercial, df2= dfRiesgoComercialCalificacion)\n\ttbl_F_Riesgo_Comercial[\"Fecha_Captura\"] = Fecha_Captura\n\n\tPathRiesgoComercialCLinton = \"./PRODUCTO_DEVUELTO/DATOS_PROD_DEVUELTO/INFORME_FINANCIERO_INTERNACIONAL/EVALUACION/OPINION_CLIENTE\"\n\tRiesgoComercialCLinton = ConsultaElemento(root,PathRiesgoComercialCLinton)\n\ttbl_F_Riesgo_Comercial[\"Info_Complementaria\"] = RiesgoComercialCLinton\n\n\tDictbl_F_Riesgo_Comercial = ['Fecha_Efecto', 'Nit_Cliente', 'Situacion_Financiera', 'Evolucion_Empresa', 'Calificacion_Informa', 'Riesgo_Informa', 'Incidentes', 'Info_Complementaria', 'Fecha_Captura']\n\ttbl_F_Riesgo_Comercial = Validar_Formato_Tabla(tbl_F_Riesgo_Comercial,Dictbl_F_Riesgo_Comercial)\n\n\t#Guardar_csv(tbl_F_Riesgo_Comercial, PathCarpetaResultados, f\"{NIT}_tbl_F_Riesgo_Comercial.csv\")\n\ting_tbl_F_Riesgo_Comercial(conn,cursor,tbl_F_Riesgo_Comercial)\n\n\t\"\"\"###tbl_D_Clientes\"\"\"\n\n\tCod_ICI_Cliente = np.nan\n\n\tPathIdentificacionCaracteristicasNIT = \"./PRODUCTO_DEVUELTO/DATOS_PROD_DEVUELTO/INFORME_FINANCIERO_INTERNACIONAL/ID_ANEXA/IDFISCAL\"\n\tDicIdentificacionCaracteristicasNIT = [\"VALOR\"]\n\tdfIdentificacionCaracteristicasNIT = Extraer_Dataframe_Dic(Directorio,tree,root,PathIdentificacionCaracteristicasNIT, DicIdentificacionCaracteristicasNIT)\n\n\ttbl_D_Clientes = dfIdentificacionCaracteristicasNIT\n\ttbl_D_Clientes.columns = ['Nit_Cliente']\n\ttbl_D_Clientes['Id_Cliente'] = Id_Cliente\n\n\tPathIdentificacionCaracteristicasRAZONZ = \"./PRODUCTO_DEVUELTO/DATOS_PROD_DEVUELTO/INFORME_FINANCIERO_INTERNACIONAL/DENOMINACION/RAZONSOCIAL\"\n\tDicIdentificacionCaracteristicasRAZONZ = [\"VALOR\", \"FEC_EFECTO\"]\n\tdfIdentificacionCaracteristicasRAZONZ = Extraer_Dataframe_Dic(Directorio,tree,root,PathIdentificacionCaracteristicasRAZONZ, DicIdentificacionCaracteristicasRAZONZ)\n\ttbl_D_Clientes['Nombre_Cliente'] = dfIdentificacionCaracteristicasRAZONZ[\"VALOR/RAZONSOCIAL\"].iloc[0]\n\n\tPathIdentificacionCaracteristicasDIR = \"./PRODUCTO_DEVUELTO/DATOS_PROD_DEVUELTO/INFORME_FINANCIERO_INTERNACIONAL/DIRECCION/SEDESOCIAL\"\n\tdfIdentificacionCaracteristicasDIR = Extraer_Dataframe(Directorio,tree,PathIdentificacionCaracteristicasDIR)\n\tdfIdentificacionCaracteristicasDIR[\"Numero\"] = \" # \"\n\ttry:\n\t\tMunicipio = dfIdentificacionCaracteristicasDIR[\"LOCALIDAD\"].iloc[0]\n\t\tDepartamento = dfIdentificacionCaracteristicasDIR[\"DESC_PROVINCIA\"].iloc[0]\n\t\tPais = dfIdentificacionCaracteristicasDIR[\"DESC_PAIS\"].iloc[0]\n\texcept:\n\t\tMunicipio = np.nan\n\t\tDepartamento = np.nan\n\t\tPais = np.nan\n\n\tdicDireccion = [\"DESC_TIPOVIA\",\"VIA\", \"Numero\", \"NUMEROVIA\"]\n\tdfIdentificacionCaracteristicasDIR = Validar_Formato_Tabla(dfIdentificacionCaracteristicasDIR,dicDireccion)\n\n\tdfIdentificacionCaracteristicasDIR = Combinar_Registros(dfIdentificacionCaracteristicasDIR, PathIdentificacionCaracteristicasDIR)\n\n\tdfIdentificacionCaracteristicasDIR = dfIdentificacionCaracteristicasDIR[0:1][:]\n\ttbl_D_Clientes['Direccion_Cliente'] = dfIdentificacionCaracteristicasDIR[\"SEDESOCIAL\"].iloc[0]\n\n\tPathIdentificacionCaracteristicasWEB = \"./PRODUCTO_DEVUELTO/DATOS_PROD_DEVUELTO/INFORME_FINANCIERO_INTERNACIONAL/DIRECCION/WEB\"\n\tDicIdentificacionCaracteristicasWEB = [\"VALOR\"]\n\tdfIdentificacionCaracteristicasWEB = Extraer_Dataframe_Dic(Directorio,tree,root,PathIdentificacionCaracteristicasWEB, DicIdentificacionCaracteristicasWEB)\n\ttry:\n\t\ttbl_D_Clientes[\"Direccion_Web_Cliente\"] = dfIdentificacionCaracteristicasWEB['VALOR/WEB'].iloc[0].lower()\n\texcept:\n\t\ttbl_D_Clientes[\"Direccion_Web_Cliente\"] = np.nan\n\n\tPathIdentificacionCaracteristicasTEL = \"./PRODUCTO_DEVUELTO/DATOS_PROD_DEVUELTO/INFORME_FINANCIERO_INTERNACIONAL/CONTACTO/TELEFONO\"\n\tDicIdentificacionCaracteristicasTEL = [\"VALOR\"]\n\tdfIdentificacionCaracteristicasTEL = Extraer_Dataframe_Dic(Directorio,tree,root,PathIdentificacionCaracteristicasTEL, DicIdentificacionCaracteristicasTEL)\n\ttbl_D_Clientes['Telefono_Cliente'] = dfIdentificacionCaracteristicasTEL['VALOR/TELEFONO'].iloc[0]\n\n\tPathIdentificacionCaracteristicasEMAIL = \"./PRODUCTO_DEVUELTO/DATOS_PROD_DEVUELTO/INFORME_FINANCIERO_INTERNACIONAL/CONTACTO/EMAIL\"\n\tDicIdentificacionCaracteristicasEMAIL = [\"VALOR\"]\n\tdfIdentificacionCaracteristicasEMAIL = Extraer_Dataframe_Dic(Directorio,tree,root,PathIdentificacionCaracteristicasEMAIL, DicIdentificacionCaracteristicasEMAIL)\n\ttbl_D_Clientes['Email_Cliente'] = dfIdentificacionCaracteristicasEMAIL['VALOR/EMAIL'].iloc[0]\n\n\tPathIdentificacionCaracteristicasJURID = \"./PRODUCTO_DEVUELTO/DATOS_PROD_DEVUELTO/INFORME_FINANCIERO_INTERNACIONAL/FORJUR/\"\n\tDicIdentificacionCaracteristicasJURID = [\"DES_TIPO\", \"FEC_EFECTO\"]\n\tdfIdentificacionCaracteristicasJURID = Extraer_Dataframe_Dic(Directorio,tree,root,PathIdentificacionCaracteristicasJURID, DicIdentificacionCaracteristicasJURID)\n\ttbl_D_Clientes['Forma_Juridica_Cliente'] = dfIdentificacionCaracteristicasJURID[\"DES_TIPO/\"].iloc[0]\n\ttbl_D_Clientes['Fecha_Constitucion'] = dfIdentificacionCaracteristicasJURID[\"FEC_EFECTO/\"].iloc[0]\n\n\tPathIdentificacionCaracteristicasACTIV = \"./PRODUCTO_DEVUELTO/DATOS_PROD_DEVUELTO/INFORME_FINANCIERO_INTERNACIONAL/ACTIVIDADES/CODIGO/ACTIVIDAD\"\n\tDicIdentificacionCaracteristicasACTIV = [\"CODIGO\",\"DESC_FORMATO_LOCAL\"]\n\tdfIdentificacionCaracteristicasACTIV = Extraer_Dataframe_Dic(Directorio,tree,root,PathIdentificacionCaracteristicasACTIV, DicIdentificacionCaracteristicasACTIV)\n\ttbl_D_Clientes['Cod_Actividad_Ccial'] = dfIdentificacionCaracteristicasACTIV[\"CODIGO/ACTIVIDAD\"].iloc[0]\n\ttbl_D_Clientes['Actividad_Ccial'] = dfIdentificacionCaracteristicasACTIV[\"DESC_FORMATO_LOCAL/ACTIVIDAD\"].iloc[0]\n\n\tPathIdentificacionCaracteristicasESTADO = \"./PRODUCTO_DEVUELTO/DATOS_PROD_DEVUELTO/INFORME_FINANCIERO_INTERNACIONAL/ESTADOEMPRESA\"\n\tDicIdentificacionCaracteristicasESTADO = [\"DESESTADO\"]\n\tdfIdentificacionCaracteristicasESTADO = Extraer_Dataframe_Dic(Directorio,tree,root,PathIdentificacionCaracteristicasESTADO, DicIdentificacionCaracteristicasESTADO)\n\ttbl_D_Clientes['Estado_Empresa'] = dfIdentificacionCaracteristicasESTADO[\"DESESTADO/ESTADOEMPRESA\"].iloc[0]\n\n\t#Actividades Objeto Social\n\n\tPathActividadesObjetoSocial = \"./PRODUCTO_DEVUELTO/DATOS_PROD_DEVUELTO/INFORME_FINANCIERO_INTERNACIONAL/ACTIVIDADES/TEXTO/ACTIVIDAD\"\n\tDicActividadesObjetoSocial = [\"TEXTO\"]\n\tdfActividadesObjetoSocial = Extraer_Dataframe_Dic(Directorio,tree,root,PathActividadesObjetoSocial, DicActividadesObjetoSocial)\n\n\ttbl_D_Clientes[\"Tipo_Empresa\"] = dfRiesgoComercial[\"SINTESIS_TIPOLOGIA/EVALUACION\"].iloc[0]\n\ttry:\n\t\ttbl_D_Clientes[\"Objeto_Social\"] = dfActividadesObjetoSocial[\"TEXTO/ACTIVIDAD\"].iloc[0]\n\texcept:\n\t\tprint(\"Sin información en la columna TEXTO\")\n\t\tDicActividadesObjetoSocial = [\"TEXTO/ACTIVIDAD\"]\n\t\tdfActividadesObjetoSocial = Validar_Formato_Tabla(dfActividadesObjetoSocial, DicActividadesObjetoSocial)\n\t\ttbl_D_Clientes[\"Objeto_Social\"] = np.nan\n\n\ttbl_D_Clientes[\"Cod_ICI_Cliente\"] = Cod_ICI_Cliente\n\ttbl_D_Clientes[\"Fecha_Captura\"] = Fecha_Captura\n\n\tPathIdentificacionCaracteristicasDUNS = \"./PRODUCTO_DEVUELTO/DATOS_PROD_DEVUELTO/INFORME_FINANCIERO_INTERNACIONAL/ID_ANEXA/DUNS\"\n\tDicIdentificacionCaracteristicasDUNS = [\"VALOR\"]\n\tdfIdentificacionCaracteristicasDUNS = Extraer_Dataframe_Dic(Directorio,tree,root,PathIdentificacionCaracteristicasDUNS, DicIdentificacionCaracteristicasDUNS)\n\ttbl_D_Clientes[\"Duns_Cliente\"] = dfIdentificacionCaracteristicasDUNS[\"VALOR/DUNS\"].iloc[0]\n\ttbl_D_Clientes[\"Municipio_Cliente\"] = Municipio\n\ttbl_D_Clientes[\"Departamento_Cliente\"] = Departamento\n\ttbl_D_Clientes[\"Pais_Cliente\"] = Pais\n\tDictbl_D_Clientes = ['Nit_Cliente','Duns_Cliente','Nombre_Cliente', 'Direccion_Cliente', 'Municipio_Cliente','Departamento_Cliente','Pais_Cliente','Telefono_Cliente','Email_Cliente','Direccion_Web_Cliente','Fecha_Constitucion','Forma_Juridica_Cliente','Cod_ICI_Cliente','Estado_Empresa','Cod_Actividad_Ccial','Actividad_Ccial','Objeto_Social','Tipo_Empresa']\n\ttbl_D_Clientes = Validar_Formato_Tabla(tbl_D_Clientes,Dictbl_D_Clientes)\n\n\t#Guardar_csv(tbl_D_Clientes, PathCarpetaResultados, f\"{NIT}_tbl_D_Clientes.csv\")\n\ting_tbl_D_Clientes(conn,cursor,tbl_D_Clientes)\n\n\t\"\"\"###tbl_F_Info_Financiera\"\"\"\n\n\tId_Info_Financiera = np.nan\n\tPathBalances = \"./PRODUCTO_DEVUELTO/DATOS_PROD_DEVUELTO/INFORME_FINANCIERO_INTERNACIONAL/BALANCES/PRIORITARIO\"\n\tdicFinancieroEncabezadoOrdenado = ['Nit_Cliente', 'Unidades', 'NormaContable', 'Fecha_Efecto', 'Duracion', 'Fuente', 'Fecha_Captura']\n\tdfFinancieroEncabezado = FinancieroEncabezados(Fecha_Captura,tree,PathBalances, NIT)\n\ttbl_F_Info_Financiera = Validar_Formato_Tabla(dfFinancieroEncabezado, dicFinancieroEncabezadoOrdenado)\n\ttry:\n\t\tPathBalancesActual = \"./PRODUCTO_DEVUELTO/DATOS_PROD_DEVUELTO/INFORME_FINANCIERO_INTERNACIONAL/BALANCES/ACTUAL\"\n\t\tdfFinancieroEncabezadoActual = FinancieroEncabezados(Fecha_Captura,tree,PathBalancesActual, NIT)\n\t\ttbl_F_Info_FinancieraActual = Validar_Formato_Tabla(dfFinancieroEncabezadoActual, dicFinancieroEncabezadoOrdenado)\n\t\ttbl_F_Info_Financiera=Append(df1=tbl_F_Info_FinancieraActual,df2=tbl_F_Info_Financiera)\n\texcept:\n\t\tprint(\"Sin informacion Actual\")\n\t\ttbl_F_Info_Financiera=tbl_F_Info_Financiera.sort_values('Fecha_Efecto',ascending=False)\n\n\t#Guardar_csv(tbl_F_Info_Financiera, PathCarpetaResultados, f\"{NIT}_tbl_F_Info_Financiera.csv\")\n\ting_tbl_F_Info_Financiera(conn,cursor,tbl_F_Info_Financiera)\n\n\t\"\"\"###tbl_F_Activos\"\"\"\n\t\n\tBalanceActivos = \"AC\"\n\tId_Activo = None\n\n\tDicActivosOrdenado = ['Id_Info_Financiera', 'Fecha_Efecto', 'TOTAL ACTIVO', 'TOTAL ACTIVO CORRIENTE/ACC', 'CUENTAS POR COBRAR - DEUDORES/ACC', 'INVENTARIOS/ACC', 'DIFERIDOS/ACC', 'GASTOS PAGADOS POR ANTICIPADO/ACC', 'OTROS ACTIVOS/ACC', 'OTROS ACTIVOS FINANCIEROS/ACC', 'OTROS ACTIVOS NO FINANCIEROS/ACC', 'ACTIVOS POR IMPUESTOS CORRIENTES/ACC', 'ACTIVOS CLASIFICADOS COMO MANTENIDOS PAR/ACC', 'EFECTIVO Y EQUIVALENTES AL EFECTIVO/ACC', 'CUENTAS COMERCIALES POR COBRAR Y OTRAS C/ACC', 'CUENTAS POR COBRAR PARTES RELACIONADAS Y/ACC', 'TOTAL ACTIVO NO CORRIENTE/ACL', 'INVERSIONES/ACL', 'INVERSIONES EN SUBSIDIARIAS, NEGOCIOS CO/ACL', 'INVERSIONES CONTABILIZADAS UTILIZANDO EL/ACL', 'PROPIEDADES PLANTA Y EQUIPO/ACL', 'DIFERIDOS/ACL', 'GASTOS PAGADOS POR ANTICIPADO/ACL', 'OTROS ACTIVOS/ACL', 'PROPIEDAD DE INVERSIÓN/ACL', 'PLUSVALÍA/ACL', 'ACTIVOS INTANGIBLES DISTINTOS DE LA PLUS/ACL', 'ACTIVOS POR IMPUESTOS DIFERIDOS/ACL', 'INVERSIONES NO CORRIENTES/ACL', 'CUENTAS POR COBRAR NO CORRIENTES/ACL', 'CUENTAS COMERCIALES POR COBRAR Y OTRAS C/ACL', 'CUENTAS POR COBRAR PARTES RELACIONADAS Y/ACL', 'OTROS ACTIVOS NO FINANCIEROS/ACL', 'OTROS ACTIVOS FINANCIEROS/ACL', 'Fecha_Captura']\n\tdfActivos = Financiero_Activos(Fecha_Captura,Id_Cliente,tree,PathBalances, BalanceActivos, Id_Activo, Id_Info_Financiera)\n\ttbl_F_Activos = Validar_Formato_Tabla(dfActivos, DicActivosOrdenado)\n\ttbl_F_Activos.columns = ['Id_Info_Financiera','Fecha_Efecto','Total_Activos','Total_Activos_Cte','Cuentas_x_Cobrar_Cte','Inventarios_Cte','Diferidos_Cte','Gastos_Pagados_Ant_Cte','Otros_Activos_Cte','Otros_Activos_Financ_Cte','Otros_Activos_No_Financ_Cte','Activos_Imptos_Cte','Activos_Calsif_Mantenido_Venta_Cte','Efectivo_Equivalente_Cte','Cuentas_x_Cobrar_Otras_Cte','Cuentas_x_Cobrar_Partes_Rel_Cte','Total_Activos_No_Cte','Inversiones_No_Cte','Inversiones_Asociadas_No_Cte','Inversiones_Contabilizadas_No_Cte','Propiedad_Planta_Equipo_No_Cte','Diferidos_No_Cte','Gastos_Pagados_Anticipado_No_Cte','Otros_Activos_No_Cte','Propiedad_Inversion_No_Cte','Plusvalia_No_Cte','Activos_Intangibles_No_Plusv_No_Cte','Activos_Imptos_Diferido_No_Cte','Inv_No_Cte','Cuentas_x_Cobrar_No_Cte','Cunetas_x_Cobrar_Otras_No_Cte','Cuentas_x_Cobrar_Partes_Rel_No_Cte','Otros_Activos_No_Fro','Otros_Activos_Fro','Fecha_Captura']\n\n\ttry:\n\t\tdfActivosActual = Financiero_Activos(Fecha_Captura,Id_Cliente,tree,PathBalancesActual, BalanceActivos, Id_Activo, Id_Info_Financiera)\n\t\ttbl_F_ActivosActual = Validar_Formato_Tabla(dfActivosActual, DicActivosOrdenado)\n\t\tDicActivosActualOrdenado = ['Id_Info_Financiera','Fecha_Efecto','Total_Activos','Total_Activos_Cte','Cuentas_x_Cobrar_Cte','Inventarios_Cte','Diferidos_Cte','Gastos_Pagados_Ant_Cte','Otros_Activos_Cte','Otros_Activos_Financ_Cte','Otros_Activos_No_Financ_Cte','Activos_Imptos_Cte','Activos_Calsif_Mantenido_Venta_Cte','Efectivo_Equivalente_Cte','Cuentas_x_Cobrar_Otras_Cte','Cuentas_x_Cobrar_Partes_Rel_Cte','Total_Activos_No_Cte','Inversiones_No_Cte','Inversiones_Asociadas_No_Cte','Inversiones_Contabilizadas_No_Cte','Propiedad_Planta_Equipo_No_Cte','Diferidos_No_Cte','Gastos_Pagados_Anticipado_No_Cte','Otros_Activos_No_Cte','Propiedad_Inversion_No_Cte','Plusvalia_No_Cte','Activos_Intangibles_No_Plusv_No_Cte','Activos_Imptos_Diferido_No_Cte','Inv_No_Cte','Cuentas_x_Cobrar_No_Cte','Cunetas_x_Cobrar_Otras_No_Cte','Cuentas_x_Cobrar_Partes_Rel_No_Cte','Otros_Activos_No_Fro','Otros_Activos_Fro','Fecha_Captura']\n\t\ttbl_F_ActivosActual.columns = DicActivosActualOrdenado\n\t\ttbl_F_Activos = Append(df1=tbl_F_Activos,df2=tbl_F_ActivosActual)\n\t\ttbl_F_Activos = Validar_Formato_Tabla(tbl_F_Activos,DicActivosActualOrdenado)\n\texcept:\n\t\tprint(\"Sin Informacion Actual\")\n\n\tDicActivosOrdenado = ['Id_Info_Financiera','Nit_Cliente','Fecha_Efecto','Total_Activos','Total_Activos_Cte','Cuentas_x_Cobrar_Cte','Inventarios_Cte','Diferidos_Cte','Gastos_Pagados_Ant_Cte','Otros_Activos_Cte','Otros_Activos_Financ_Cte','Otros_Activos_No_Financ_Cte','Activos_Imptos_Cte','Activos_Calsif_Mantenido_Venta_Cte','Efectivo_Equivalente_Cte','Cuentas_x_Cobrar_Otras_Cte','Cuentas_x_Cobrar_Partes_Rel_Cte','Total_Activos_No_Cte','Inversiones_No_Cte','Inversiones_Asociadas_No_Cte','Inversiones_Contabilizadas_No_Cte','Propiedad_Planta_Equipo_No_Cte','Diferidos_No_Cte','Gastos_Pagados_Anticipado_No_Cte','Otros_Activos_No_Cte','Propiedad_Inversion_No_Cte','Plusvalia_No_Cte','Activos_Intangibles_No_Plusv_No_Cte','Activos_Imptos_Diferido_No_Cte','Inv_No_Cte','Cuentas_x_Cobrar_No_Cte','Cunetas_x_Cobrar_Otras_No_Cte','Cuentas_x_Cobrar_Partes_Rel_No_Cte','Otros_Activos_No_Fro','Otros_Activos_Fro','Fecha_Captura']\n\ttbl_F_Activos[\"Nit_Cliente\"] = NIT\n\ttbl_F_Activos = Validar_Formato_Tabla(tbl_F_Activos, DicActivosOrdenado)\n\n\ttbl_F_Activos = tbl_F_Activos.sort_values('Fecha_Efecto',ascending=False)\n\ttbl_F_Activos.replace({pd.np.NaN:None},inplace=True)\n\ttbl_F_Activos = tbl_F_Activos.astype(object).where(pd.notnull(tbl_F_Activos),None)\n\t#Guardar_csv(tbl_F_Activos, PathCarpetaResultados, f\"{NIT}_tbl_F_Activos.csv\")\n\ting_tbl_F_Activos(conn,cursor,tbl_F_Activos)\n\n\t\"\"\"###tbl_F_Pasivos_Patrimonio\"\"\"\n\n\tId_PasivoPatrimonio = np.nan\n\tBalancePasivosPatrimonio = \"P\"\n\tDicPasivosPatrimonioOrdenado = ['Id_PasivoPatrimonio', 'Id_Info_Financiera', 'Fecha_Efecto', 'PASIVO + PATRIMONIO', 'PASIVO/PS', 'PASIVO A CORTO PLAZO/PS/PSC', 'OBLIGACIONES FINANCIERAS/PS/PSC', 'PASIVOS ESTIMADOS Y PROVISIONES/PS/PSC', 'PROVISIONES DIVERSAS/PS/PSC', 'OTROS PASIVOS FINANCIEROS/PS/PSC', 'OTROS PASIVOS NO FINANCIEROS/PS/PSC', 'CUENTAS POR PAGAR CORRIENTE/PS/PSC', 'CUENTAS COMERCIALES POR PAGAR Y OTRAS CU/PS/PSC', 'CUENTAS POR PAGAR A ENTIDADES RELACIONAD/PS/PSC', 'PASIVOS POR IMPUESTOS CORRIENTES/PS/PSC', 'PROVISIONES CORRIENTES POR BENEFICIOS A/PS/PSC', 'OTROS PASIVOS CORRIENTES/PS/PSC', 'PASIVO A LARGO PLAZO/PS/PSL', 'PASIVOS ESTIMADOS Y PROVISIONES/PS/PSL', 'OTROS PASIVOS FINANCIEROS/PS/PSL', 'OTROS PASIVOS NO FINANCIEROS/PS/PSL', 'PASIVO POR IMPUESTOS DIFERIDOS/PS/PSL', 'OBLIGACIONES FINANCIEROS NO CORRIENTES/PS/PSL', 'PROVISIONES NO CORRIENTES POR BENEFICIOS/PS/PSL', 'OTRAS PROVISIONES/PS/PSL', 'OTROS PASIVOS NO CORRIENTES/PS/PSL', 'PATRIMONIO/PT', 'CAPITAL SOCIAL/PT', 'SUPERµVIT DE CAPITAL/PT', 'RESERVAS/PT', 'RESULTADO EJERCICIO/PT', 'COTIZACIONES-AUXIL./APORTES NO VINC./PT','OTROS RUBROS DEL PATRIMONIO/PT', 'ACCIONES PROPIAS EN CARTERA/PT', 'OTRO RESULTADO INTEGRAL ACUMULADO/PT', 'OTRAS PARTICIPACIONES EN EL PATRIMONIO/PT', 'PRIMA DE EMISIÓN/PT', 'GANANCIAS ACUMULADAS/PT', 'CAPITAL EMITIDO/PT', 'Fecha_Captura']\n\n\tdfPasivosPatrimonio = Financiero_Pasivos_Patrimonio(tree,Id_Cliente,Fecha_Captura,PathBalances, BalancePasivosPatrimonio, Id_PasivoPatrimonio, Id_Info_Financiera)\n\ttbl_F_Pasivos_Patrimonio = Validar_Formato_Tabla(dfPasivosPatrimonio, DicPasivosPatrimonioOrdenado)\n\tDicPasivosPatrimonioOrdenado = ['Id_Pasivo_Patrimonio','Id_Info_Financiera','Fecha_Efecto','Total_Pasivo_Patrimonio','Total_Pasivo','Total_Pasivo_Cte','Obligaciones_Fra','Pasivo_Est_Provi','Provi_Diversa','Otro_Pasivo_Fro','Otro_Pasivo_No_Fro','Cuentas_x_Pagar_Cte','Otras_Cuentas_x_Pagar_Cte','Cuentas_x_Pagar_Ent_Rel','Pasivo_Impto_Cte','Provi_Cte_Empleado','Otro_Pasivo_Cte','Total_Pasivo_No_Cte','Pasivo_Estimado_Provisiones_No_Cte','Otro_Pasivo_Fro_No_Cte','Otro_Pasivo_No_Fro_No_Cte','Pasivo_Impto_Diferido_No_Cte','Obligaciones_Fro_No_Cte','Provisiones_Beneficios_No_Cte','Otras_Provisiones_No_Cte','Otro_Pasivo_No_Cte','Patrimonio','Capital_Social_Pt','Superavit_Capital_Pt','Reserva_Pt','Resultado_Ejercicio_Pt','Cotiza_Aux_Aporte_No_Vinc_Pt','Otros_Rubros_Pt','Acciones_Propias_Cartera_Pt','Otro_Resultado_Integral_Acum_Pt','Otras_Participaciones_Pt','Primas_Emision_Pt','Ganancias_Acum_Pt','Capital_Emitido_Pt','Fecha_Captura']\n\ttbl_F_Pasivos_Patrimonio.columns = DicPasivosPatrimonioOrdenado\n\tDicPasivosPatrimonioOrdenado = ['Id_Info_Financiera','Fecha_Efecto','Total_Pasivo_Patrimonio','Total_Pasivo','Total_Pasivo_Cte','Obligaciones_Fra','Pasivo_Est_Provi','Provi_Diversa','Otro_Pasivo_Fro','Otro_Pasivo_No_Fro','Cuentas_x_Pagar_Cte','Otras_Cuentas_x_Pagar_Cte','Cuentas_x_Pagar_Ent_Rel','Pasivo_Impto_Cte','Provi_Cte_Empleado','Otro_Pasivo_Cte','Total_Pasivo_No_Cte','Pasivo_Estimado_Provisiones_No_Cte','Otro_Pasivo_Fro_No_Cte','Otro_Pasivo_No_Fro_No_Cte','Pasivo_Impto_Diferido_No_Cte','Obligaciones_Fro_No_Cte','Provisiones_Beneficios_No_Cte','Otras_Provisiones_No_Cte','Otro_Pasivo_No_Cte','Patrimonio','Capital_Social_Pt','Superavit_Capital_Pt','Reserva_Pt','Resultado_Ejercicio_Pt','Cotiza_Aux_Aporte_No_Vinc_Pt','Otros_Rubros_Pt','Acciones_Propias_Cartera_Pt','Otro_Resultado_Integral_Acum_Pt','Otras_Participaciones_Pt','Primas_Emision_Pt','Ganancias_Acum_Pt','Capital_Emitido_Pt','Fecha_Captura']\n\ttbl_F_Pasivos_Patrimonio = Validar_Formato_Tabla(tbl_F_Pasivos_Patrimonio, DicPasivosPatrimonioOrdenado)\n\n\ttry:\n\t\tDicPasivosPatrimonioOrdenadoActual = ['Id_PasivoPatrimonio', 'Id_Info_Financiera', 'Fecha_Efecto', 'PASIVO + PATRIMONIO', 'PASIVO/PS', 'PASIVO A CORTO PLAZO/PS/PSC', 'OBLIGACIONES FINANCIERAS/PS/PSC', 'PASIVOS ESTIMADOS Y PROVISIONES/PS/PSC', 'PROVISIONES DIVERSAS/PS/PSC', 'OTROS PASIVOS FINANCIEROS/PS/PSC', 'OTROS PASIVOS NO FINANCIEROS/PS/PSC', 'CUENTAS POR PAGAR CORRIENTE/PS/PSC', 'CUENTAS COMERCIALES POR PAGAR Y OTRAS CU/PS/PSC', 'CUENTAS POR PAGAR A ENTIDADES RELACIONAD/PS/PSC', 'PASIVOS POR IMPUESTOS CORRIENTES/PS/PSC', 'PROVISIONES CORRIENTES POR BENEFICIOS A/PS/PSC', 'OTROS PASIVOS CORRIENTES/PS/PSC', 'PASIVO A LARGO PLAZO/PS/PSL', 'PASIVOS ESTIMADOS Y PROVISIONES/PS/PSL', 'OTROS PASIVOS FINANCIEROS/PS/PSL', 'OTROS PASIVOS NO FINANCIEROS/PS/PSL', 'PASIVO POR IMPUESTOS DIFERIDOS/PS/PSL', 'OBLIGACIONES FINANCIEROS NO CORRIENTES/PS/PSL', 'PROVISIONES NO CORRIENTES POR BENEFICIOS/PS/PSL', 'OTRAS PROVISIONES/PS/PSL', 'OTROS PASIVOS NO CORRIENTES/PS/PSL', 'PATRIMONIO/PT', 'CAPITAL SOCIAL/PT', 'SUPERµVIT DE CAPITAL/PT', 'RESERVAS/PT', 'RESULTADO EJERCICIO/PT', 'COTIZACIONES-AUXIL./APORTES NO VINC./PT','OTROS RUBROS DEL PATRIMONIO/PT', 'ACCIONES PROPIAS EN CARTERA/PT', 'OTRO RESULTADO INTEGRAL ACUMULADO/PT', 'OTRAS PARTICIPACIONES EN EL PATRIMONIO/PT', 'PRIMA DE EMISIÓN/PT', 'GANANCIAS ACUMULADAS/PT', 'CAPITAL EMITIDO/PT', 'Fecha_Captura']\n\t\tdfPasivosPatrimonioActual = Financiero_Pasivos_Patrimonio(tree,Id_Cliente,Fecha_Captura,PathBalancesActual, BalancePasivosPatrimonio, Id_PasivoPatrimonio, Id_Info_Financiera)\n\t\ttbl_F_Pasivos_PatrimonioActual = Validar_Formato_Tabla(dfPasivosPatrimonioActual, DicPasivosPatrimonioOrdenadoActual)\n\t\tDicPasivosPatrimonioOrdenadoActual = ['Id_Pasivo_Patrimonio','Id_Info_Financiera','Fecha_Efecto','Total_Pasivo_Patrimonio','Total_Pasivo','Total_Pasivo_Cte','Obligaciones_Fra','Pasivo_Est_Provi','Provi_Diversa','Otro_Pasivo_Fro','Otro_Pasivo_No_Fro','Cuentas_x_Pagar_Cte','Otras_Cuentas_x_Pagar_Cte','Cuentas_x_Pagar_Ent_Rel','Pasivo_Impto_Cte','Provi_Cte_Empleado','Otro_Pasivo_Cte','Total_Pasivo_No_Cte','Pasivo_Estimado_Provisiones_No_Cte','Otro_Pasivo_Fro_No_Cte','Otro_Pasivo_No_Fro_No_Cte','Pasivo_Impto_Diferido_No_Cte','Obligaciones_Fro_No_Cte','Provisiones_Beneficios_No_Cte','Otras_Provisiones_No_Cte','Otro_Pasivo_No_Cte','Patrimonio','Capital_Social_Pt','Superavit_Capital_Pt','Reserva_Pt','Resultado_Ejercicio_Pt','Cotiza_Aux_Aporte_No_Vinc_Pt','Otros_Rubros_Pt','Acciones_Propias_Cartera_Pt','Otro_Resultado_Integral_Acum_Pt','Otras_Participaciones_Pt','Primas_Emision_Pt','Ganancias_Acum_Pt','Capital_Emitido_Pt','Fecha_Captura']\n\t\ttbl_F_Pasivos_PatrimonioActual.columns = DicPasivosPatrimonioOrdenadoActual\n\t\tDicPasivosPatrimonioOrdenadoActual = ['Id_Info_Financiera','Fecha_Efecto','Total_Pasivo_Patrimonio','Total_Pasivo','Total_Pasivo_Cte','Obligaciones_Fra','Pasivo_Est_Provi','Provi_Diversa','Otro_Pasivo_Fro','Otro_Pasivo_No_Fro','Cuentas_x_Pagar_Cte','Otras_Cuentas_x_Pagar_Cte','Cuentas_x_Pagar_Ent_Rel','Pasivo_Impto_Cte','Provi_Cte_Empleado','Otro_Pasivo_Cte','Total_Pasivo_No_Cte','Pasivo_Estimado_Provisiones_No_Cte','Otro_Pasivo_Fro_No_Cte','Otro_Pasivo_No_Fro_No_Cte','Pasivo_Impto_Diferido_No_Cte','Obligaciones_Fro_No_Cte','Provisiones_Beneficios_No_Cte','Otras_Provisiones_No_Cte','Otro_Pasivo_No_Cte','Patrimonio','Capital_Social_Pt','Superavit_Capital_Pt','Reserva_Pt','Resultado_Ejercicio_Pt','Cotiza_Aux_Aporte_No_Vinc_Pt','Otros_Rubros_Pt','Acciones_Propias_Cartera_Pt','Otro_Resultado_Integral_Acum_Pt','Otras_Participaciones_Pt','Primas_Emision_Pt','Ganancias_Acum_Pt','Capital_Emitido_Pt','Fecha_Captura']\n\t\ttbl_F_Pasivos_PatrimonioActual = Validar_Formato_Tabla(tbl_F_Pasivos_PatrimonioActual, DicPasivosPatrimonioOrdenadoActual)\n\t\ttbl_F_Pasivos_Patrimonio = Append(df1=tbl_F_Pasivos_PatrimonioActual,df2=tbl_F_Pasivos_Patrimonio)\n\texcept:\n\t\tprint(\"Sin informacion Actual\")\n\n\tDicPasivosPatrimonioOrdenado = ['Id_Info_Financiera','Nit_Cliente','Fecha_Efecto','Total_Pasivo_Patrimonio','Total_Pasivo','Total_Pasivo_Cte','Obligaciones_Fra','Pasivo_Est_Provi','Provi_Diversa','Otro_Pasivo_Fro','Otro_Pasivo_No_Fro','Cuentas_x_Pagar_Cte','Otras_Cuentas_x_Pagar_Cte','Cuentas_x_Pagar_Ent_Rel','Pasivo_Impto_Cte','Provi_Cte_Empleado','Otro_Pasivo_Cte','Total_Pasivo_No_Cte','Pasivo_Estimado_Provisiones_No_Cte','Otro_Pasivo_Fro_No_Cte','Otro_Pasivo_No_Fro_No_Cte','Pasivo_Impto_Diferido_No_Cte','Obligaciones_Fro_No_Cte','Provisiones_Beneficios_No_Cte','Otras_Provisiones_No_Cte','Otro_Pasivo_No_Cte','Patrimonio','Capital_Social_Pt','Superavit_Capital_Pt','Reserva_Pt','Resultado_Ejercicio_Pt','Cotiza_Aux_Aporte_No_Vinc_Pt','Otros_Rubros_Pt','Acciones_Propias_Cartera_Pt','Otro_Resultado_Integral_Acum_Pt','Otras_Participaciones_Pt','Primas_Emision_Pt','Ganancias_Acum_Pt','Capital_Emitido_Pt','Fecha_Captura']\n\ttbl_F_Pasivos_Patrimonio[\"Nit_Cliente\"]=NIT\n\ttbl_F_Pasivos_Patrimonio = Validar_Formato_Tabla(tbl_F_Pasivos_Patrimonio, DicPasivosPatrimonioOrdenado)\n\ttbl_F_Pasivos_Patrimonio=tbl_F_Pasivos_Patrimonio.sort_values('Fecha_Efecto',ascending=False)\n\n\t#Guardar_csv(tbl_F_Pasivos_Patrimonio, PathCarpetaResultados, f\"{NIT}_tbl_F_Pasivos_Patrimonio.csv\")\n\ting_tbl_F_Pasivos_Patrimonio(conn,cursor,tbl_F_Pasivos_Patrimonio)\n\n\t\"\"\"###tbl_F_Resultados_Ejercicio\"\"\"\n\n\tId_Result_Ejercicio = np.nan\n\tBalanceResultados = \"R\"\n\tDicResultadosOrdenado = ['Id_Info_Financiera', 'Fecha_Efecto', 'RESULTADO DEL EJERCICIO', 'RESULTADO ANTES DE IMPUESTOS/R', 'RESULTADOS OPERACIONALES/R', 'TOTAL GASTOS/R', 'COSTOS Y GASTOS OPERACIONALES/R', 'GASTOS DE ADMINISTRACION/R', 'GASTOS DE VENTAS/R','GASTOS DE DISTRIBUCIÓN/R', 'GASTOS POR BENEFICIOS A LOS EMPLEADOS/R', 'OTROS GASTOS OPERATIVOS/R', 'COSTO DE VENTAS/R', 'NO OPERACIONALES/R', 'GASTOS FINANCIEROS/R', 'TOTAL INGRESOS/R', 'INGRESOS OPERACIONALES/R', 'VENTAS/R', 'OTROS INGRESOS OPERACIONALES/R', 'INGRESOS NO OPERACIONALES/R', 'INGRESOS EXTRAORDINARIOS/R', 'INGRESOS FINANCIEROS/R', 'RESULTADO NO OPERACIONAL/R', 'RESULTADO FINANCIERO/R', 'RESULTADO DE IMPUESTOS/R', 'AJUSTES POR INFLACIàN/R', 'IMPUESTO DE RENTA Y COMPLEMENTARIOS/R', 'Fecha_Captura']\n\tdfResultados = Financiero_Resultados(tree,Id_Cliente,Fecha_Captura,PathBalances, BalanceResultados, Id_Result_Ejercicio, Id_Info_Financiera)\n\ttbl_F_Resultados_Ejercicio = Validar_Formato_Tabla(dfResultados, DicResultadosOrdenado)\n\tDicResultadosOrdenado = ['Id_Info_Financiera','Fecha_Efecto','Resultado_Ejercicio','Resultado_Antes_Impto','Resultado_Op','Total_Gastos','Costos_Gastos_Op','Gastos_Op_Admin','Gastos_Op_Venta','Gastos_Dist','Gastos_Beneficio_Empl','Otros_Gastos_Op','Costos_Venta','Gastos_No_Op','Gastos_Fro','Total_Ingresos','Ingresos_Operacional','Ventas','Otros_Ingresos_Op','Ingresos_No_Op','Ingresos_Extraordinarios','Ingresos_Fro','Resultados_No_Op','Resultados_Fro','Resultados_Impuesto','Ajuste_Inflacion','Impto_Renta','Fecha_Captura']\n\ttbl_F_Resultados_Ejercicio.columns = DicResultadosOrdenado\n\n\ttry:\n\t\tDicResultadosOrdenadoActual = ['Id_Info_Financiera', 'Fecha_Efecto', 'RESULTADO DEL EJERCICIO', 'RESULTADO ANTES DE IMPUESTOS/R', 'RESULTADOS OPERACIONALES/R', 'TOTAL GASTOS/R', 'COSTOS Y GASTOS OPERACIONALES/R', 'GASTOS DE ADMINISTRACION/R', 'GASTOS DE VENTAS/R','GASTOS DE DISTRIBUCIÓN/R', 'GASTOS POR BENEFICIOS A LOS EMPLEADOS/R', 'OTROS GASTOS OPERATIVOS/R', 'COSTO DE VENTAS/R', 'NO OPERACIONALES/R', 'GASTOS FINANCIEROS/R', 'TOTAL INGRESOS/R', 'INGRESOS OPERACIONALES/R', 'VENTAS/R', 'OTROS INGRESOS OPERACIONALES/R', 'INGRESOS NO OPERACIONALES/R', 'INGRESOS EXTRAORDINARIOS/R', 'INGRESOS FINANCIEROS/R', 'RESULTADO NO OPERACIONAL/R', 'RESULTADO FINANCIERO/R', 'RESULTADO DE IMPUESTOS/R', 'AJUSTES POR INFLACIàN/R', 'IMPUESTO DE RENTA Y COMPLEMENTARIOS/R', 'Fecha_Captura']\n\t\tdfResultadosActual = Financiero_Resultados(tree,Id_Cliente,Fecha_Captura,PathBalancesActual, BalanceResultados, Id_Result_Ejercicio, Id_Info_Financiera)\n\t\ttbl_F_Resultados_EjercicioActual = Validar_Formato_Tabla(dfResultadosActual, DicResultadosOrdenadoActual)\n\t\tDicResultadosOrdenadoActual = ['Id_Info_Financiera','Fecha_Efecto','Resultado_Ejercicio','Resultado_Antes_Impto','Resultado_Op','Total_Gastos','Costos_Gastos_Op','Gastos_Op_Admin','Gastos_Op_Venta','Gastos_Dist','Gastos_Beneficio_Empl','Otros_Gastos_Op','Costos_Venta','Gastos_No_Op','Gastos_Fro','Total_Ingresos','Ingresos_Operacional','Ventas','Otros_Ingresos_Op','Ingresos_No_Op','Ingresos_Extraordinarios','Ingresos_Fro','Resultados_No_Op','Resultados_Fro','Resultados_Impuesto','Ajuste_Inflacion','Impto_Renta','Fecha_Captura']\n\t\ttbl_F_Resultados_EjercicioActual.columns = DicResultadosOrdenadoActual\n\t\ttbl_F_Resultados_Ejercicio = Append(df1=tbl_F_Resultados_EjercicioActual,df2=tbl_F_Resultados_Ejercicio)\n\texcept:\n\t\tprint(\"Sin informacion actual\")\n\n\tDicResultadosOrdenado = ['Id_Info_Financiera','Nit_Cliente','Fecha_Efecto','Resultado_Ejercicio','Resultado_Antes_Impto','Resultado_Op','Total_Gastos','Costos_Gastos_Op','Gastos_Op_Admin','Gastos_Op_Venta','Gastos_Dist','Gastos_Beneficio_Empl','Otros_Gastos_Op','Costos_Venta','Gastos_No_Op','Gastos_Fro','Total_Ingresos','Ingresos_Operacional','Ventas','Otros_Ingresos_Op','Ingresos_No_Op','Ingresos_Extraordinarios','Ingresos_Fro','Resultados_No_Op','Resultados_Fro','Resultados_Impuesto','Ajuste_Inflacion','Impto_Renta','Fecha_Captura']\n\ttbl_F_Resultados_Ejercicio['Nit_Cliente'] = int(NIT)\n\ttbl_F_Resultados_Ejercicio = Validar_Formato_Tabla(tbl_F_Resultados_Ejercicio, DicResultadosOrdenado)\n\n\ttbl_F_Resultados_Ejercicio=tbl_F_Resultados_Ejercicio.sort_values('Fecha_Efecto',ascending=False)\n\n\t#Guardar_csv(tbl_F_Resultados_Ejercicio, PathCarpetaResultados, f\"{NIT}_tbl_F_Resultados_Ejercicio.csv\")\n\ting_tbl_F_Resultados_Ejercicio(conn,cursor,tbl_F_Resultados_Ejercicio)\n\n\t\"\"\"###tbl_F_Participantes\"\"\"\n\n\tId_Participante = np.nan\n\tPathInfoCorporativa_Partic = \"./PRODUCTO_DEVUELTO/DATOS_PROD_DEVUELTO/INFORME_FINANCIERO_INTERNACIONAL/VINCFINAN/PARTICIPACIONES/PARTICIPACION\"\n\tdfInfoCorporativa_Partic = Extraer_Dataframe(Directorio,tree,PathInfoCorporativa_Partic)\n\tdicInfoCorporativa_Partic = ['RAZONSOCIAL', 'FEC_EFECTO', 'PORCENTAJE', 'IDENT_EMPRESA']\n\tdfInfoCorporativa_Partic = Validar_Formato_Tabla(dfInfoCorporativa_Partic, dicInfoCorporativa_Partic)\n\tdicInfoCorporativa_Partic = ['Nombre_Participante', 'Fecha_Efecto', 'Porcentaje', 'Doc_Participante']\n\tdfInfoCorporativa_Partic.columns = dicInfoCorporativa_Partic\n\tdfInfoCorporativa_Partic[\"Nit_Cliente\"] = NIT\n\tdfInfoCorporativa_Partic[\"Fecha_Captura\"] = Fecha_Captura\n\tdicInfoCorporativa_Partic = ['Nit_Cliente', 'Nombre_Participante', 'Doc_Participante', 'Porcentaje', 'Fecha_Efecto', 'Fecha_Captura']\n\ttbl_F_Participantes = Validar_Formato_Tabla(dfInfoCorporativa_Partic, dicInfoCorporativa_Partic)\n\n\t#Guardar_csv(tbl_F_Participantes, PathCarpetaResultados, f\"{NIT}_tbl_F_Participantes.csv\")\n\ting_tbl_F_Participantes(conn,cursor,tbl_F_Participantes)\n\n\t\"\"\"###tbl_F_Accionistas\"\"\"\n\n\tId_Accionista = np.nan\n\tPathInfoCorporativa_Accion = \"./PRODUCTO_DEVUELTO/DATOS_PROD_DEVUELTO/INFORME_FINANCIERO_INTERNACIONAL/VINCFINAN/ACCIONISTAS/ACCIONISTA\"\n\tdfInfoCorporativa_Accion = Extraer_Dataframe(Directorio,tree,PathInfoCorporativa_Accion)\n\tdicInfoCorporativa_Accion = ['RAZONSOCIAL', 'FEC_EFECTO', 'ID_FISCAL']\n\tdfInfoCorporativa_Accion = Validar_Formato_Tabla(dfInfoCorporativa_Accion, dicInfoCorporativa_Accion)\n\tdicInfoCorporativa_Accion = ['Nombre_Accionista', 'Fecha_Efecto', 'Doc_Accionista']\n\tdfInfoCorporativa_Accion.columns = dicInfoCorporativa_Accion\n\tdfInfoCorporativa_Accion['Nit_Cliente'] = int(NIT)\n\tdfInfoCorporativa_Accion['Fecha_Captura'] = Fecha_Captura\n\tdicInfoCorporativa_Accion = ['Nit_Cliente', 'Doc_Accionista', 'Nombre_Accionista', 'Fecha_Efecto', 'Fecha_Captura']\n\ttbl_F_Accionistas = Validar_Formato_Tabla(dfInfoCorporativa_Accion, dicInfoCorporativa_Accion)\n\t#Guardar_csv(tbl_F_Accionistas, PathCarpetaResultados, f\"{NIT}_tbl_F_Accionistas.csv\")\n\ting_tbl_F_Accionistas(conn,cursor,tbl_F_Accionistas)\n\n\t\"\"\"###tbl_F_Administradores\"\"\"\n\n\tPathInformacionComercial_Fec_Actualiz = \"./PRODUCTO_DEVUELTO/DATOS_PROD_DEVUELTO/INFORME_FINANCIERO_INTERNACIONAL/ADMINISTRADORES/FEC_ACTUALIZACION\"\n\ttry:\n\t\tFecha_Actualizacion = ConsultaElemento(root,PathInformacionComercial_Fec_Actualiz)\n\texcept:\n\t\tFecha_Actualizacion = np.nan\n\n\tId_Administrador = np.nan\n\n\tPathInformacionComercial_Admin = \"./PRODUCTO_DEVUELTO/DATOS_PROD_DEVUELTO/INFORME_FINANCIERO_INTERNACIONAL/ADMINISTRADORES/ADMINISTRADOR\"\n\tdicInformacionComercial_Admin = [\"DESC_CARGO\",\"NOMBRE\",\"APELLIDO1\", \"APELLIDO2\", \"ID_VALOR\", \"FEC_EFECTO\"]\n\tdfInformacionComercial_Admin = Extraer_Dataframe_Dic(Directorio,tree,root,PathInformacionComercial_Admin, dicInformacionComercial_Admin)\n\tdicInformacionComercial_Admin = ['DESC_CARGO/ADMINISTRADOR', 'NOMBRE/ADMINISTRADOR', 'APELLIDO1/ADMINISTRADOR', 'APELLIDO2/ADMINISTRADOR', 'ID_VALOR/ADMINISTRADOR', 'FEC_EFECTO/ADMINISTRADOR']\n\tdfInformacionComercial_Admin = Validar_Formato_Tabla(dfInformacionComercial_Admin, dicInformacionComercial_Admin)\n\n\tdicNombre = [\"NOMBRE/ADMINISTRADOR\", \"APELLIDO1/ADMINISTRADOR\", \"APELLIDO2/ADMINISTRADOR\"]\n\tdfNombre = dfInformacionComercial_Admin[dicNombre]\n\tdfNombre = dfNombre.replace({pd.np.nan: \"\"})\n\tdfNombre = Combinar_Registros(dfNombre, PathInformacionComercial_Admin)\n\tdicNombre = Validar_Formato_Tabla(dfNombre, dicNombre)\n\tdfInformacionComercial_Admin = dfInformacionComercial_Admin.drop(dicNombre, axis=1)\n\tdfInformacionComercial_Admin = Concatenar(df1=dfInformacionComercial_Admin, df2=dfNombre)\n\n\tdicInformacionComercial_Admin = ['Cargo_Administrador', 'Doc_Administrador', 'Fecha_Efecto', 'Nombre_Administrador']\n\tdfInformacionComercial_Admin.columns = dicInformacionComercial_Admin\n\tdfInformacionComercial_Admin[\"Nit_Cliente\"] = int(NIT)\n\tdfInformacionComercial_Admin[\"Fecha_Captura\"] = Fecha_Captura\n\tdfInformacionComercial_Admin[\"Fecha_Actualizacion\"] = Fecha_Actualizacion\n\tdicInformacionComercial_Admin=['Nit_Cliente', 'Fecha_Actualizacion', 'Doc_Administrador', 'Nombre_Administrador', 'Cargo_Administrador', 'Fecha_Efecto', 'Fecha_Captura']\n\ttbl_F_Administradores = Validar_Formato_Tabla(dfInformacionComercial_Admin,dicInformacionComercial_Admin)\n\t#Guardar_csv(tbl_F_Administradores, PathCarpetaResultados, f\"{NIT}_tbl_F_Administradores.csv\")\n\ting_tbl_F_Administradores(conn,cursor,tbl_F_Administradores)\n\n\n\t\"\"\"###tbl_F_Establecimientos\"\"\"\n\n\tPathEstalecimientos = \"./PRODUCTO_DEVUELTO/DATOS_PROD_DEVUELTO/INFORME_FINANCIERO_INTERNACIONAL/SUCURSALES/SUCURSAL\"\n\ttry:\n\t\tdicEstablecimientos = [\"ROTULO\",\"DIR_DES_PROVINCIA\",\"DES_TIPO_EXPLOTACION\",\"FECHA_SITUACION\"]\n\t\tdfEstablecimientos = Extraer_Dataframe_Dic(Directorio,tree,root,PathEstalecimientos, dicEstablecimientos)\n\t\tdicEstablecimientos = [\"Nombre_Establecimiento\", \"Departamento\",\"Tipo_Explotacion\",\"Fecha_Efecto\"]\n\t\tdfEstablecimientos.columns = dicEstablecimientos\n\t\tdfEstablecimientos[\"Nit_Cliente\"] = NIT\n\t\tdfEstablecimientos[\"Fecha_Captura\"] = Fecha_Captura\n\texcept:\n\t\tdfEstablecimientos = pd.DataFrame()\n\n\tdicEstablecimientos = ['Nit_Cliente', 'Nombre_Establecimiento', 'Tipo_Explotacion', 'Departamento', 'Fecha_Efecto', 'Fecha_Captura']\n\ttbl_F_Establecimientos = Validar_Formato_Tabla(dfEstablecimientos, dicEstablecimientos)\n\t#Guardar_csv(tbl_F_Establecimientos, PathCarpetaResultados, f\"{NIT}_tbl_F_Establecimientos.csv\")\n\ting_tbl_F_Establecimientos(conn,cursor,tbl_F_Establecimientos)\n\n\t\"\"\"###tbl_F_Incidencias\"\"\"\n\n\tdicIncidencias = [\"FEC_EFECTO\",\"DESC_MUNICIPIO\",\"COD_INCIDENCIA\",\"DES_INCIDENCIA\",\"TEXTO\",\"DEMANDANTE_RAZONSOCIAL\"]\n\ttry:\n\t\tPathIncidenciasVigentes = \"./PRODUCTO_DEVUELTO/DATOS_PROD_DEVUELTO/INFORME_FINANCIERO_INTERNACIONAL/INCIDENCIAS/JUDICIALES/VIGENTES/INCIDENCIA\"\n\t\tdfIncidenciasVigentes = Extraer_Dataframe_Dic(Directorio,tree,root,PathIncidenciasVigentes, dicIncidencias)\n\t\tdicIncidenciasVigentes = [\"FEC_EFECTO/INCIDENCIA\",\"DESC_MUNICIPIO/INCIDENCIA\",\"COD_INCIDENCIA/INCIDENCIA\",\"DES_INCIDENCIA/INCIDENCIA\",\"TEXTO/INCIDENCIA\",\"DEMANDANTE_RAZONSOCIAL/INCIDENCIA\"]\n\t\tdfIncidenciasVigentes = Validar_Formato_Tabla(dfIncidenciasVigentes, dicIncidenciasVigentes)\n\t\tdfIncidenciasVigentes[\"Estado_Incidencia\"] = \"VIGENTE\"\n\texcept:\n\t\t#print(\"Sin incidencias vigentes\")\n\t\tdfIncidenciasVigentes = pd.DataFrame()\n\n\ttry:\n\t\tPathIncidenciasFin = \"./PRODUCTO_DEVUELTO/DATOS_PROD_DEVUELTO/INFORME_FINANCIERO_INTERNACIONAL/INCIDENCIAS/JUDICIALES/FINALIZADAS/INCIDENCIA\"\n\t\tdfIncidenciasFin = Extraer_Dataframe_Dic(Directorio,tree,root,PathIncidenciasFin, dicIncidencias)\n\t\tdicIncidenciasFin = [\"FEC_EFECTO/INCIDENCIA\",\"DESC_MUNICIPIO/INCIDENCIA\",\"COD_INCIDENCIA/INCIDENCIA\",\"DES_INCIDENCIA/INCIDENCIA\",\"TEXTO/INCIDENCIA\",\"DEMANDANTE_RAZONSOCIAL/INCIDENCIA\"]\n\t\tdfIncidenciasFin = Validar_Formato_Tabla(dfIncidenciasFin, dicIncidenciasFin)\n\t\tdfIncidenciasFin['Estado_Incidencia'] = 'FINALIZADO'\n\texcept:\n\t\t#print(\"Sin incidencias finalizadas\")\n\t\tdfIncidenciasFin = pd.DataFrame()\n\n\ttbl_F_Incidencias = Append(df1=dfIncidenciasFin,df2=dfIncidenciasVigentes)\n\tdicIncidencias = [\"Fecha_Efecto\",\"Municipio\",\"Cod_Incidencia\",\"Tipo_Incidencia\",\"Descripcion_Incidencia\",\"Demandante\", \"Estado_Incidencia\"]\n\ttbl_F_Incidencias.columns = dicIncidencias\n\n\ttry:\n\t\tPathIncidenciasTotal = \"./PRODUCTO_DEVUELTO/DATOS_PROD_DEVUELTO/INFORME_FINANCIERO_INTERNACIONAL/INCIDENCIAS/JUDICIALES/VIGENTES/NUMTOTAL\"\n\t\tIncidenciasTotal = ConsultaElemento(root,PathIncidenciasTotal)\n\t\ttbl_F_Incidencias[\"Total_Incidencias\"] = IncidenciasTotal\n\texcept:\n\t\ttbl_F_Incidencias[\"Total_Incidencias\"] = np.nan\n\n\ttbl_F_Incidencias[\"Fecha_Captura\"] = Fecha_Captura\n\ttbl_F_Incidencias[\"Nit_Cliente\"] = NIT\n\n\tdicIncidencias = [\"Nit_Cliente\",\"Fecha_Efecto\",\"Estado_Incidencia\",\"Municipio\",\"Cod_Incidencia\",\"Tipo_Incidencia\",\"Descripcion_Incidencia\",\"Demandante\",\"Total_Incidencias\",\"Fecha_Captura\"]\n\ttbl_F_Incidencias = Validar_Formato_Tabla(tbl_F_Incidencias,dicIncidencias)\n\t#Guardar_csv(tbl_F_Incidencias, PathCarpetaResultados, f\"{NIT}_tbl_F_Incidencias.csv\")\n\ting_tbl_F_Incidencias(conn,cursor,tbl_F_Incidencias)\n\n\t\"\"\"###Actividad Comercial\"\"\"\n\n\tPathActividades_codigo = \"./PRODUCTO_DEVUELTO/DATOS_PROD_DEVUELTO/INFORME_FINANCIERO_INTERNACIONAL/ACTIVIDADES/CODIGO\"\n\tdicActividades = [\"CODIGO\",\"DESC_FORMATO_LOCAL\"]\n\tdfActividades = Extraer_Dataframe_Actividades(tree,PathActividades_codigo, dicActividades)\n\tdicActividades = [\"Cod_Actividad\",\"Descripcion_Actividad\", \"Tipo_Actividad\"]\n\tdfActividades.columns = dicActividades\n\tdfActividades[\"Nit_Cliente\"]=NIT\n\tdfActividades[\"Fecha_Captura\"]=Fecha_Captura\n\tdicActividades = [\"Nit_Cliente\",\"Tipo_Actividad\",\"Cod_Actividad\",\"Descripcion_Actividad\",\"Fecha_Captura\"]\n\ttbl_F_Actividades = Validar_Formato_Tabla(dfActividades,dicActividades)\n\ting_tbl_F_Actividades(conn,cursor,tbl_F_Actividades)\n\n\t\"\"\"###Actividad Comercial Exterior\"\"\"\n\n\tActividadImportacion = \"IMPORTA\"\n\tActividadExportacion = \"EXPORTA\"\n\tdicActividadExterior = [\"Anno\",\"Fecha_Efecto\",\"Producto\",\"Valor\",\"Divisa\", \"Pais\"]\n\n\tPathActividadActual = \"./PRODUCTO_DEVUELTO/DATOS_PROD_DEVUELTO/INFORME_FINANCIERO_INTERNACIONAL/ACTEXTERNA/ACTUAL\"\n\ttry:\n\t\tdfActividadImportacionActual = Actividad_Exterior(tree,PathActividadActual, ActividadImportacion)\n\t\tdfActividadExportacionActual = Actividad_Exterior(tree,PathActividadActual, ActividadExportacion)\n\texcept:\n\t\tdfActividadImportacionActual = pd.DataFrame()\n\t\tdfActividadExportacionActual = pd.DataFrame()\n\t\t#print(\"Sin Actividad Actual\")\n\n\tPathActividadAnterior = \"./PRODUCTO_DEVUELTO/DATOS_PROD_DEVUELTO/INFORME_FINANCIERO_INTERNACIONAL/ACTEXTERNA/ANTERIOR\"\n\ttry:\n\t\tdfActividadImportacionAnterior = Actividad_Exterior(tree,PathActividadAnterior, ActividadImportacion)\n\t\tdfActividadExportacionAnterior = Actividad_Exterior(tree,PathActividadAnterior, ActividadExportacion)\n\texcept:\n\t\tdfActividadImportacionAnterior = pd.DataFrame()\n\t\tdfActividadExportacionAnterior = pd.DataFrame()\n\t\t#print(\"Sin Actividad Actual\")\n\n\ttbl_F_Importaciones = Append(df1=dfActividadImportacionActual,df2=dfActividadImportacionAnterior)\n\tif tbl_F_Importaciones.empty:\n\t\ttbl_F_Importaciones = Validar_Formato_Tabla(tbl_F_Importaciones,dicActividadExterior)\n\telse:\n\t\ttbl_F_Importaciones.columns = dicActividadExterior\n\n\ttbl_F_Exportaciones = Append(df1=dfActividadExportacionActual,df2=dfActividadExportacionAnterior)\n\tif tbl_F_Exportaciones.empty:\n\t\ttbl_F_Exportaciones = Validar_Formato_Tabla(tbl_F_Exportaciones, dicActividadExterior)\n\telse:\n\t\ttbl_F_Exportaciones.columns = dicActividadExterior\n\n\ttbl_F_Importaciones[\"Fecha_Captura\"] = Fecha_Captura\n\ttbl_F_Exportaciones[\"Fecha_Captura\"] = Fecha_Captura\n\ttbl_F_Importaciones[\"Nit_Cliente\"] = NIT\n\ttbl_F_Exportaciones[\"Nit_Cliente\"] = NIT\n\n\tdicActividadExterior = [\"Nit_Cliente\",\"Anno\", \"Fecha_Efecto\",\"Producto\",\"Pais\",\"Valor\",\"Divisa\", \"Fecha_Captura\"]\n\ttbl_F_Importaciones = Validar_Formato_Tabla(tbl_F_Importaciones, dicActividadExterior)\n\ttbl_F_Exportaciones = Validar_Formato_Tabla(tbl_F_Exportaciones, dicActividadExterior)\n\t#Guardar_csv(tbl_F_Importaciones, PathCarpetaResultados, f\"{NIT}_tbl_F_Importaciones.csv\")\n\t#Guardar_csv(tbl_F_Exportaciones, PathCarpetaResultados, f\"{NIT}_tbl_F_Exportaciones.csv\")\n\ting_tbl_F_Importaciones(conn,cursor,tbl_F_Importaciones)\n\ting_tbl_F_Exportaciones(conn,cursor,tbl_F_Exportaciones)\n\n\t\"\"\"###Obligaciones\"\"\"\n\n\tPathObligaciones = \"./PRODUCTO_DEVUELTO/DATOS_PROD_DEVUELTO/INFORME_FINANCIERO_INTERNACIONAL/OBLIGACIONES\"\n\tdicObligaciones = [\"Periodo_Obligacion\",\"Fecha_Ejecucion_Obligacion\",\"Fuente_Obligacion\",\"Situacion_Obligacion\",\"Tipo_Obligacion\"]\n\tdfObligaciones = Extraer_Dataframe_Obligaciones(tree,PathObligaciones,\"DES_SITU\",\"DES_TIPO\")\n\tdfObligaciones.columns = dicObligaciones\n\tdfObligaciones[\"Nit_Cliente\"] = NIT\n\tdfObligaciones[\"Fecha_Captura\"] = Fecha_Captura\n\tdicObligaciones = [\"Nit_Cliente\",\"Tipo_Obligacion\",\"Periodo_Obligacion\",\"Situacion_Obligacion\",\"Fecha_Ejecucion_Obligacion\",\"Fuente_Obligacion\",\"Fecha_Captura\"]\n\ttbl_F_Obligaciones = Validar_Formato_Tabla(dfObligaciones,dicObligaciones)\n\ting_tbl_F_Obligaciones(conn,cursor,tbl_F_Obligaciones)\n\n\t\"\"\"###Politica Comercial\"\"\"\n\n\tdicPolitica = [\"Nit_Cliente\",\"Tipo_Pol_Ccial\",\"Producto_Pol_Ccial\",\"Politica_Pol_CCial\",\"Fecha_Efecto_Pol_Ccial\",\"Porc_Nacional_Pol_Ccial\",\"Porc_Internacional_Pol_Ccial\",\"Fecha_Captura\"]\n\n\ttry:\n\t\tPathPoliticaVentas = \"./PRODUCTO_DEVUELTO/DATOS_PROD_DEVUELTO/INFORME_FINANCIERO_INTERNACIONAL/POLVENTAS/VENTAS\"\n\t\tdfPoliticaVentas = Extraer_Dataframe_Politica_Ccial(NIT,Fecha_Captura,Directorio,tree,PathPoliticaVentas)\n\t\tdfPoliticaVentas = Validar_Formato_Tabla(dfPoliticaVentas,dicPolitica)\n\t\t#print(dfPoliticaVentas)\n\texcept:\n\t\tPathPoliticaVentas = pd.DataFrame(columns=dicPolitica)\n\n\ttry:\n\t\tPathPoliticaCompra = \"./PRODUCTO_DEVUELTO/DATOS_PROD_DEVUELTO/INFORME_FINANCIERO_INTERNACIONAL/POLVENTAS/COMPRAS\"\n\t\tdfPoliticaCompra = Extraer_Dataframe_Politica_Ccial(NIT,Fecha_Captura,Directorio,tree,PathPoliticaCompra)\n\t\tdfPoliticaCompra = Validar_Formato_Tabla(dfPoliticaCompra,dicPolitica)\n\t\t#Print(dfPoliticaCompra)\n\texcept:\n\t\tPathPoliticaCompra = pd.DataFrame(columns=dicPolitica)\n\n\ttry:\n\t\ttbl_F_Politica_Comercial = Append(df1=dfPoliticaCompra,df2=dfPoliticaVentas)\n\texcept:\n\t\ttbl_F_Politica_Comercial = pd.DataFrame(columns=dicPolitica)\n\n\n\ting_tbl_F_Politica_Comercial(conn,cursor,tbl_F_Politica_Comercial)\n\n\t\"\"\"###Publicaciones de Prensa\"\"\"\n\n\ttry:\n\t\tPathPublicaciones_Prensa = \"./PRODUCTO_DEVUELTO/DATOS_PROD_DEVUELTO/INFORME_FINANCIERO_INTERNACIONAL/PRENSA/ARTICULO\"\n\t\tdicPublicaciones_Prensa = [\"FEC_ARTICULO\",\"FUENTE_ARTICULO_LOCAL\",\"TEXTO_LOCAL\"]\n\t\tdfPublicaciones_Prensa = Extraer_Dataframe_Dic(Directorio,tree,root,PathPublicaciones_Prensa, dicPublicaciones_Prensa)\n\t\tdicPublicaciones_Prensa = [\"Fecha_Publicacion\",\"Fuente\",\"Resumen_Publicacion\"]\n\t\tdfPublicaciones_Prensa.columns = dicPublicaciones_Prensa\n\t\tdfPublicaciones_Prensa[\"Nit_Cliente\"]=NIT\n\t\tdfPublicaciones_Prensa[\"Tipo_Articulo\"]=\"Prensa\"\n\t\tdfPublicaciones_Prensa[\"Fecha_Captura\"]=Fecha_Captura\n\t\tdicPublicaciones_Prensa = [\"Nit_Cliente\",\"Fecha_Publicacion\",\"Fuente\",\"Tipo_Articulo\",\"Resumen_Publicacion\",\"Fecha_Captura\"]\n\t\ttbl_F_Publicaciones_Prensa = Validar_Formato_Tabla(dfPublicaciones_Prensa,dicPublicaciones_Prensa)\n\texcept:\n\t\ttbl_F_Publicaciones_Prensa = pd.DataFrame()\n\n\t#Guardar_csv(tbl_F_Publicaciones_Prensa, PathCarpetaResultados, f\"{NIT}_tbl_F_Publicaciones_Prensa.csv\")\n\ting_tbl_F_Publicaciones_Prensa(conn,cursor,tbl_F_Publicaciones_Prensa)\n\n\t\"\"\"###Publicaciones Legales\"\"\"\n\n\ttry:\n\t\tPathPublicaciones_Legales = \"./PRODUCTO_DEVUELTO/DATOS_PROD_DEVUELTO/INFORME_FINANCIERO_INTERNACIONAL/PBLC_LEGAL/PUBLICACION\"\n\t\tdicPublicaciones_Legales = [\"FEC_EFECTO\",\"TIPO_ACTO\",\"DES_TIPO_ACTO\",\"FUENTE\",\"DES_FUENTE\"]\n\t\tdfPublicaciones_Legales = Extraer_Dataframe_Dic(Directorio,tree,root,PathPublicaciones_Legales, dicPublicaciones_Legales)\n\t\tdicPublicaciones_Legales = [\"Fecha_Acto\",\"Tipo_Acto\",\"Referencia\",\"Fuente\",\"Lugar_Publicacion\"]\n\t\tdfPublicaciones_Legales.columns = dicPublicaciones_Legales\n\t\tdfPublicaciones_Legales[\"Nit_Cliente\"]=NIT\n\t\tdfPublicaciones_Legales[\"Fecha_Captura\"]=Fecha_Captura\n\t\tdicPublicaciones_Legales = [\"Nit_Cliente\",\"Tipo_Acto\",\"Fecha_Acto\",\"Referencia\",\"Fuente\",\"Lugar_Publicacion\",\"Fecha_Captura\"]\n\t\ttbl_F_Publicaciones_Legales = Validar_Formato_Tabla(dfPublicaciones_Legales,dicPublicaciones_Legales)\n\texcept:\n\t\ttbl_F_Publicaciones_Legales = pd.DataFrame()\n\n\t#Guardar_csv(tbl_F_Publicaciones_Legales, PathCarpetaResultados, f\"{NIT}_tbl_F_Publicaciones_Legales.csv\")\n\ting_tbl_F_Publicaciones_Legales(conn,cursor,tbl_F_Publicaciones_Legales)\n\n\t\"\"\"###Relaciones Terceros\"\"\"\n\n\tdicRelaciones_Terceros = [\"RAZONSOCIAL\", \"IDENT_EMPRESA\"]\n\tdicRelaciones_Terceros2 = [\"Razon_Social\",\"Nit_Razon_Social\"]\n\n\ttry:\n\t\tPathRelaciones_Terceros_Bancos = \"./PRODUCTO_DEVUELTO/DATOS_PROD_DEVUELTO/INFORME_FINANCIERO_INTERNACIONAL/RELTERCEROS/BANCOS/BANCO\"\n\t\tdfRelaciones_Terceros_Bancos = Extraer_Dataframe_Dic(Directorio,tree,root,PathRelaciones_Terceros_Bancos,dicRelaciones_Terceros)\n\t\tdfRelaciones_Terceros_Bancos.columns = dicRelaciones_Terceros2\n\t\tdfRelaciones_Terceros_Bancos[\"Tipo_Relacion\"]=\"BANCO\"\n\texcept:\n\t\tdfRelaciones_Terceros_Bancos = pd.DataFrame()\n\n\ttry:\n\t\tPathRelaciones_Terceros_Clientes = \"./PRODUCTO_DEVUELTO/DATOS_PROD_DEVUELTO/INFORME_FINANCIERO_INTERNACIONAL/RELTERCEROS/CLIENTES/CLIENTE\"\n\t\tdfRelaciones_Terceros_Clientes = Extraer_Dataframe_Dic(Directorio,tree,root,PathRelaciones_Terceros_Clientes,dicRelaciones_Terceros)\n\t\tdfRelaciones_Terceros_Clientes.columns = dicRelaciones_Terceros2\n\t\tdfRelaciones_Terceros_Clientes[\"Tipo_Relacion\"]=\"CLIENTE\"\n\texcept:\n\t\tdfRelaciones_Terceros_Clientes = pd.DataFrame()\n\n\ttry:\n\t\tPathRelaciones_Terceros_Proveedores = \"./PRODUCTO_DEVUELTO/DATOS_PROD_DEVUELTO/INFORME_FINANCIERO_INTERNACIONAL/RELTERCEROS/PROVEEDORES/PROVEEDOR\"\n\t\tdfRelaciones_Terceros_Proveedores = Extraer_Dataframe_Dic(Directorio,tree,root,PathRelaciones_Terceros_Proveedores,dicRelaciones_Terceros)\n\t\tdfRelaciones_Terceros_Proveedores.columns = dicRelaciones_Terceros2\n\t\tdfRelaciones_Terceros_Proveedores[\"Tipo_Relacion\"]=\"PROVEEDOR\"\n\texcept:\n\t\tdfRelaciones_Terceros_Proveedores = pd.DataFrame()\n\n\ttry:\n\t\tPathRelaciones_Terceros_Aseguradoras = \"./PRODUCTO_DEVUELTO/DATOS_PROD_DEVUELTO/INFORME_FINANCIERO_INTERNACIONAL/RELTERCEROS/ASEGURADORAS/ASEGURADORA\"\n\t\tdfRelaciones_Terceros_Aseguradoras = Extraer_Dataframe_Dic(Directorio,tree,root,PathRelaciones_Terceros_Aseguradoras,dicRelaciones_Terceros)\n\t\tdfRelaciones_Terceros_Aseguradoras.columns = dicRelaciones_Terceros2\n\t\tdfRelaciones_Terceros_Aseguradoras[\"Tipo_Relacion\"]=\"ASEGURADORA\"\n\texcept:\n\t\tdfRelaciones_Terceros_Aseguradoras = pd.DataFrame()\n\n\ttbl_F_Relaciones_Terceros = Append(df1=dfRelaciones_Terceros_Bancos,df2=dfRelaciones_Terceros_Clientes,df3=dfRelaciones_Terceros_Proveedores,df4=dfRelaciones_Terceros_Aseguradoras)\n\ttbl_F_Relaciones_Terceros[\"Nit_Cliente\"]=NIT\n\ttbl_F_Relaciones_Terceros[\"Fecha_Captura\"]=Fecha_Captura\n\tdicRelaciones_Terceros = [\"Nit_Cliente\",\"Tipo_Relacion\",\"Razon_Social\",\"Nit_Razon_Social\",\"Fecha_Captura\"]\n\ttbl_F_Relaciones_Terceros = Validar_Formato_Tabla(tbl_F_Relaciones_Terceros,dicRelaciones_Terceros)\n\ting_tbl_F_Relaciones_Terceros(conn,cursor,tbl_F_Relaciones_Terceros)\n\n\ttry:\n\t\tdicIndicadores = ['Fecha_Efecto_Indicador_Fro','VENTAS/EVOLUCION','RESULTADOS/EVOLUCION','RENTABILIDAD/RENTABILIDAD','OPERACIONAL/RENTABILIDAD','PATRIMONIO/RENTABILIDAD','ACTIVO/RENTABILIDAD','COBERTURA/RENTABILIDAD','EBIT/RENTABILIDAD','EBITDA/RENTABILIDAD','ENDEUDAMIENTO/ENDEUDAMIENTO','CORTO_PLAZO/ENDEUDAMIENTO','SIN_VALORIZACION/ENDEUDAMIENTO','APALANCAMIENTO/ENDEUDAMIENTO','CARGA_FINANCIERA/ENDEUDAMIENTO','CAPITAL_TRABAJO/LIQUIDEZ','RAZON_CORRIENTE/LIQUIDEZ','PRUEBA_ACIDA/LIQUIDEZ','ROTACION_INVENTARIO/EFICIENCIA','CICLO_OPERACIONAL/EFICIENCIA','ROTACION_ACTIVOS/EFICIENCIA']\n\t\tdicIndicadoresFinancieros = ['Fecha_Efecto_Indicador_Fro','Evolucion_Ventas','Evolucion_Utilidad_Neta','Rentabilidad','Rentabilidad_Operacional','Rentabilidad_Patrimonio','Rentabilidad_Activo_Total','Cobertura_Gastos_Fro','EBIT','EBITDA','Endeudamiento','Concentracion_Corto_Plazo','Endeudamiento_Sin_Valorizacion','Apalancamiento_Fro','Carga_Fra','Capital_Trabajo','Razon_Cte','Prueba_Acida','Dias_Rotacion_Inventario','Dias_Ciclo_Operacional','Rotacion_Activos']\n\t\tPathIndicadoresFinancieros = \"./PRODUCTO_DEVUELTO/DATOS_PROD_DEVUELTO/INFORME_FINANCIERO_INTERNACIONAL/BALANCES/RATIOS/EJERCICIO\"\n\t\t#print(\"EL PATH FRO ES....\")\n\t\t#print(PathIndicadoresFinancieros)\n\t\tdfIndicadoresFinancieros = Financiero_Indicadores(tree,PathIndicadoresFinancieros, dicIndicadores)\n\t\tdfIndicadoresFinancieros.columns = dicIndicadoresFinancieros\n\t\tdfIndicadoresFinancieros[\"Nit_Cliente\"]=NIT\n\t\tdfIndicadoresFinancieros[\"Fecha_Captura\"]=Fecha_Captura\n\t\tdicIndicadoresFinancieros = ['Nit_Cliente','Fecha_Efecto_Indicador_Fro','Evolucion_Ventas','Evolucion_Utilidad_Neta','Rentabilidad','Rentabilidad_Operacional','Rentabilidad_Patrimonio','Rentabilidad_Activo_Total','Cobertura_Gastos_Fro','EBIT','EBITDA','Endeudamiento','Concentracion_Corto_Plazo','Endeudamiento_Sin_Valorizacion','Apalancamiento_Fro','Carga_Fra','Capital_Trabajo','Razon_Cte','Prueba_Acida','Dias_Rotacion_Inventario','Dias_Ciclo_Operacional','Rotacion_Activos','Fecha_Captura']\n\t\ttbl_F_Indicadores_Financieros = Validar_Formato_Tabla(dfIndicadoresFinancieros,dicIndicadoresFinancieros)\n\texcept:\n\t\ttbl_F_Indicadores_Financieros = pd.DataFrame(columns=dicIndicadoresFinancieros)\n\t\tprint(\"Error en el Indicador Financiero, no tiene\")\n\n\t#print(\"LOS VALORES SON\")\n\t#print(tbl_F_Indicadores_Financieros)\n\t\n\n\ting_tbl_F_Indicadores_Financieros(conn,cursor,tbl_F_Indicadores_Financieros)\n\n\tUptate_Tbls_Financieras(conn,cursor)\n\n\tcursor.close()\n\tconn.close()\n\n\n\n\n\n" ]
[ [ "pandas.concat", "pandas.notnull", "pandas.DataFrame", "pandas.DataFrame.from_dict" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [ "0.23", "0.21", "2.0", "1.4", "1.3", "0.19", "1.1", "1.5", "0.24", "0.20", "1.0", "0.25", "1.2" ], "scipy": [], "tensorflow": [] } ]
YuxinZou/mmclassification
[ "2037260ea6c98a3b115e97727e1151a1c2c32f7a", "2037260ea6c98a3b115e97727e1151a1c2c32f7a", "2037260ea6c98a3b115e97727e1151a1c2c32f7a", "2037260ea6c98a3b115e97727e1151a1c2c32f7a", "2037260ea6c98a3b115e97727e1151a1c2c32f7a" ]
[ "mmcls/core/export/test.py", "mmcls/datasets/base_dataset.py", "tools/visualizations/vis_pipeline.py", "mmcls/models/utils/embed.py", "mmcls/models/losses/asymmetric_loss.py" ]
[ "# Copyright (c) OpenMMLab. All rights reserved.\nimport warnings\n\nimport numpy as np\nimport onnxruntime as ort\nimport torch\n\nfrom mmcls.models.classifiers import BaseClassifier\n\n\nclass ONNXRuntimeClassifier(BaseClassifier):\n \"\"\"Wrapper for classifier's inference with ONNXRuntime.\"\"\"\n\n def __init__(self, onnx_file, class_names, device_id):\n super(ONNXRuntimeClassifier, self).__init__()\n sess = ort.InferenceSession(onnx_file)\n\n providers = ['CPUExecutionProvider']\n options = [{}]\n is_cuda_available = ort.get_device() == 'GPU'\n if is_cuda_available:\n providers.insert(0, 'CUDAExecutionProvider')\n options.insert(0, {'device_id': device_id})\n sess.set_providers(providers, options)\n\n self.sess = sess\n self.CLASSES = class_names\n self.device_id = device_id\n self.io_binding = sess.io_binding()\n self.output_names = [_.name for _ in sess.get_outputs()]\n self.is_cuda_available = is_cuda_available\n\n def simple_test(self, img, img_metas, **kwargs):\n raise NotImplementedError('This method is not implemented.')\n\n def extract_feat(self, imgs):\n raise NotImplementedError('This method is not implemented.')\n\n def forward_train(self, imgs, **kwargs):\n raise NotImplementedError('This method is not implemented.')\n\n def forward_test(self, imgs, img_metas, **kwargs):\n input_data = imgs\n # set io binding for inputs/outputs\n device_type = 'cuda' if self.is_cuda_available else 'cpu'\n if not self.is_cuda_available:\n input_data = input_data.cpu()\n self.io_binding.bind_input(\n name='input',\n device_type=device_type,\n device_id=self.device_id,\n element_type=np.float32,\n shape=input_data.shape,\n buffer_ptr=input_data.data_ptr())\n\n for name in self.output_names:\n self.io_binding.bind_output(name)\n # run session to get outputs\n self.sess.run_with_iobinding(self.io_binding)\n results = self.io_binding.copy_outputs_to_cpu()[0]\n return list(results)\n\n\nclass TensorRTClassifier(BaseClassifier):\n\n def __init__(self, trt_file, class_names, device_id):\n super(TensorRTClassifier, self).__init__()\n from mmcv.tensorrt import TRTWraper, load_tensorrt_plugin\n try:\n load_tensorrt_plugin()\n except (ImportError, ModuleNotFoundError):\n warnings.warn('If input model has custom op from mmcv, \\\n you may have to build mmcv with TensorRT from source.')\n model = TRTWraper(\n trt_file, input_names=['input'], output_names=['probs'])\n\n self.model = model\n self.device_id = device_id\n self.CLASSES = class_names\n\n def simple_test(self, img, img_metas, **kwargs):\n raise NotImplementedError('This method is not implemented.')\n\n def extract_feat(self, imgs):\n raise NotImplementedError('This method is not implemented.')\n\n def forward_train(self, imgs, **kwargs):\n raise NotImplementedError('This method is not implemented.')\n\n def forward_test(self, imgs, img_metas, **kwargs):\n input_data = imgs\n with torch.cuda.device(self.device_id), torch.no_grad():\n results = self.model({'input': input_data})['probs']\n results = results.detach().cpu().numpy()\n\n return list(results)\n", "# Copyright (c) OpenMMLab. All rights reserved.\nimport copy\nfrom abc import ABCMeta, abstractmethod\nfrom typing import List\n\nimport mmcv\nimport numpy as np\nfrom torch.utils.data import Dataset\n\nfrom mmcls.core.evaluation import precision_recall_f1, support\nfrom mmcls.models.losses import accuracy\nfrom .pipelines import Compose\n\n\nclass BaseDataset(Dataset, metaclass=ABCMeta):\n \"\"\"Base dataset.\n\n Args:\n data_prefix (str): the prefix of data path\n pipeline (list): a list of dict, where each element represents\n a operation defined in `mmcls.datasets.pipelines`\n ann_file (str | None): the annotation file. When ann_file is str,\n the subclass is expected to read from the ann_file. When ann_file\n is None, the subclass is expected to read according to data_prefix\n test_mode (bool): in train mode or test mode\n \"\"\"\n\n CLASSES = None\n\n def __init__(self,\n data_prefix,\n pipeline,\n classes=None,\n ann_file=None,\n test_mode=False):\n super(BaseDataset, self).__init__()\n self.ann_file = ann_file\n self.data_prefix = data_prefix\n self.test_mode = test_mode\n self.pipeline = Compose(pipeline)\n self.CLASSES = self.get_classes(classes)\n self.data_infos = self.load_annotations()\n\n @abstractmethod\n def load_annotations(self):\n pass\n\n @property\n def class_to_idx(self):\n \"\"\"Map mapping class name to class index.\n\n Returns:\n dict: mapping from class name to class index.\n \"\"\"\n\n return {_class: i for i, _class in enumerate(self.CLASSES)}\n\n def get_gt_labels(self):\n \"\"\"Get all ground-truth labels (categories).\n\n Returns:\n np.ndarray: categories for all images.\n \"\"\"\n\n gt_labels = np.array([data['gt_label'] for data in self.data_infos])\n return gt_labels\n\n def get_cat_ids(self, idx: int) -> List[int]:\n \"\"\"Get category id by index.\n\n Args:\n idx (int): Index of data.\n\n Returns:\n cat_ids (List[int]): Image category of specified index.\n \"\"\"\n\n return [int(self.data_infos[idx]['gt_label'])]\n\n def prepare_data(self, idx):\n results = copy.deepcopy(self.data_infos[idx])\n return self.pipeline(results)\n\n def __len__(self):\n return len(self.data_infos)\n\n def __getitem__(self, idx):\n return self.prepare_data(idx)\n\n @classmethod\n def get_classes(cls, classes=None):\n \"\"\"Get class names of current dataset.\n\n Args:\n classes (Sequence[str] | str | None): If classes is None, use\n default CLASSES defined by builtin dataset. If classes is a\n string, take it as a file name. The file contains the name of\n classes where each line contains one class name. If classes is\n a tuple or list, override the CLASSES defined by the dataset.\n\n Returns:\n tuple[str] or list[str]: Names of categories of the dataset.\n \"\"\"\n if classes is None:\n return cls.CLASSES\n\n if isinstance(classes, str):\n # take it as a file path\n class_names = mmcv.list_from_file(classes)\n elif isinstance(classes, (tuple, list)):\n class_names = classes\n else:\n raise ValueError(f'Unsupported type {type(classes)} of classes.')\n\n return class_names\n\n def evaluate(self,\n results,\n metric='accuracy',\n metric_options=None,\n indices=None,\n logger=None):\n \"\"\"Evaluate the dataset.\n\n Args:\n results (list): Testing results of the dataset.\n metric (str | list[str]): Metrics to be evaluated.\n Default value is `accuracy`.\n metric_options (dict, optional): Options for calculating metrics.\n Allowed keys are 'topk', 'thrs' and 'average_mode'.\n Defaults to None.\n indices (list, optional): The indices of samples corresponding to\n the results. Defaults to None.\n logger (logging.Logger | str, optional): Logger used for printing\n related information during evaluation. Defaults to None.\n Returns:\n dict: evaluation results\n \"\"\"\n if metric_options is None:\n metric_options = {'topk': (1, 5)}\n if isinstance(metric, str):\n metrics = [metric]\n else:\n metrics = metric\n allowed_metrics = [\n 'accuracy', 'precision', 'recall', 'f1_score', 'support'\n ]\n eval_results = {}\n results = np.vstack(results)\n gt_labels = self.get_gt_labels()\n if indices is not None:\n gt_labels = gt_labels[indices]\n num_imgs = len(results)\n assert len(gt_labels) == num_imgs, 'dataset testing results should '\\\n 'be of the same length as gt_labels.'\n\n invalid_metrics = set(metrics) - set(allowed_metrics)\n if len(invalid_metrics) != 0:\n raise ValueError(f'metric {invalid_metrics} is not supported.')\n\n topk = metric_options.get('topk', (1, 5))\n thrs = metric_options.get('thrs')\n average_mode = metric_options.get('average_mode', 'macro')\n\n if 'accuracy' in metrics:\n if thrs is not None:\n acc = accuracy(results, gt_labels, topk=topk, thrs=thrs)\n else:\n acc = accuracy(results, gt_labels, topk=topk)\n if isinstance(topk, tuple):\n eval_results_ = {\n f'accuracy_top-{k}': a\n for k, a in zip(topk, acc)\n }\n else:\n eval_results_ = {'accuracy': acc}\n if isinstance(thrs, tuple):\n for key, values in eval_results_.items():\n eval_results.update({\n f'{key}_thr_{thr:.2f}': value.item()\n for thr, value in zip(thrs, values)\n })\n else:\n eval_results.update(\n {k: v.item()\n for k, v in eval_results_.items()})\n\n if 'support' in metrics:\n support_value = support(\n results, gt_labels, average_mode=average_mode)\n eval_results['support'] = support_value\n\n precision_recall_f1_keys = ['precision', 'recall', 'f1_score']\n if len(set(metrics) & set(precision_recall_f1_keys)) != 0:\n if thrs is not None:\n precision_recall_f1_values = precision_recall_f1(\n results, gt_labels, average_mode=average_mode, thrs=thrs)\n else:\n precision_recall_f1_values = precision_recall_f1(\n results, gt_labels, average_mode=average_mode)\n for key, values in zip(precision_recall_f1_keys,\n precision_recall_f1_values):\n if key in metrics:\n if isinstance(thrs, tuple):\n eval_results.update({\n f'{key}_thr_{thr:.2f}': value\n for thr, value in zip(thrs, values)\n })\n else:\n eval_results[key] = values\n\n return eval_results\n", "# Copyright (c) OpenMMLab. All rights reserved.\nimport argparse\nimport copy\nimport itertools\nimport os\nimport re\nimport sys\nimport warnings\nfrom pathlib import Path\nfrom typing import List\n\nimport cv2\nimport mmcv\nimport numpy as np\nfrom mmcv import Config, DictAction, ProgressBar\n\nfrom mmcls.core import visualization as vis\nfrom mmcls.datasets.builder import PIPELINES, build_dataset, build_from_cfg\nfrom mmcls.models.utils import to_2tuple\n\n# text style\nbright_style, reset_style = '\\x1b[1m', '\\x1b[0m'\nred_text, blue_text = '\\x1b[31m', '\\x1b[34m'\nwhite_background = '\\x1b[107m'\n\n\ndef parse_args():\n parser = argparse.ArgumentParser(\n description='Visualize a Dataset Pipeline')\n parser.add_argument('config', help='config file path')\n parser.add_argument(\n '--skip-type',\n type=str,\n nargs='*',\n default=['ToTensor', 'Normalize', 'ImageToTensor', 'Collect'],\n help='the pipelines to skip when visualizing')\n parser.add_argument(\n '--output-dir',\n default='',\n type=str,\n help='folder to save output pictures, if not set, do not save.')\n parser.add_argument(\n '--phase',\n default='train',\n type=str,\n choices=['train', 'test', 'val'],\n help='phase of dataset to visualize, accept \"train\" \"test\" and \"val\".'\n ' Default train.')\n parser.add_argument(\n '--number',\n type=int,\n default=sys.maxsize,\n help='number of images selected to visualize, must bigger than 0. if '\n 'the number is bigger than length of dataset, show all the images in '\n 'dataset; default \"sys.maxsize\", show all images in dataset')\n parser.add_argument(\n '--mode',\n default='concat',\n type=str,\n choices=['original', 'transformed', 'concat', 'pipeline'],\n help='display mode; display original pictures or transformed pictures'\n ' or comparison pictures. \"original\" means show images load from disk'\n '; \"transformed\" means to show images after transformed; \"concat\" '\n 'means show images stitched by \"original\" and \"output\" images. '\n '\"pipeline\" means show all the intermediate images. Default concat.')\n parser.add_argument(\n '--show',\n default=False,\n action='store_true',\n help='whether to display images in pop-up window. Default False.')\n parser.add_argument(\n '--adaptive',\n default=False,\n action='store_true',\n help='whether to automatically adjust the visualization image size')\n parser.add_argument(\n '--min-edge-length',\n default=200,\n type=int,\n help='the min edge length when visualizing images, used when '\n '\"--adaptive\" is true. Default 200.')\n parser.add_argument(\n '--max-edge-length',\n default=800,\n type=int,\n help='the max edge length when visualizing images, used when '\n '\"--adaptive\" is true. Default 1000.')\n parser.add_argument(\n '--bgr2rgb',\n default=False,\n action='store_true',\n help='flip the color channel order of images')\n parser.add_argument(\n '--window-size',\n default='12*7',\n help='size of the window to display images, in format of \"$W*$H\".')\n parser.add_argument(\n '--cfg-options',\n nargs='+',\n action=DictAction,\n help='override some settings in the used config, the key-value pair '\n 'in xxx=yyy format will be merged into config file. If the value to '\n 'be overwritten is a list, it should be like key=\"[a,b]\" or key=a,b '\n 'It also allows nested list/tuple values, e.g. key=\"[(a,b),(c,d)]\" '\n 'Note that the quotation marks are necessary and that no white space '\n 'is allowed.')\n parser.add_argument(\n '--show-options',\n nargs='+',\n action=DictAction,\n help='custom options for display. key-value pair in xxx=yyy. options '\n 'in `mmcls.core.visualization.ImshowInfosContextManager.put_img_infos`'\n )\n args = parser.parse_args()\n\n assert args.number > 0, \"'args.number' must be larger than zero.\"\n if args.window_size != '':\n assert re.match(r'\\d+\\*\\d+', args.window_size), \\\n \"'window-size' must be in format 'W*H'.\"\n if args.output_dir == '' and not args.show:\n raise ValueError(\"if '--output-dir' and '--show' are not set, \"\n 'nothing will happen when the program running.')\n\n if args.show_options is None:\n args.show_options = {}\n return args\n\n\ndef retrieve_data_cfg(config_path, skip_type, cfg_options, phase):\n cfg = Config.fromfile(config_path)\n if cfg_options is not None:\n cfg.merge_from_dict(cfg_options)\n data_cfg = cfg.data[phase]\n while 'dataset' in data_cfg:\n data_cfg = data_cfg['dataset']\n data_cfg['pipeline'] = [\n x for x in data_cfg.pipeline if x['type'] not in skip_type\n ]\n\n return cfg\n\n\ndef build_dataset_pipelines(cfg, phase):\n \"\"\"build dataset and pipeline from config.\n\n Separate the pipeline except 'LoadImageFromFile' step if\n 'LoadImageFromFile' in the pipeline.\n \"\"\"\n data_cfg = cfg.data[phase]\n loadimage_pipeline = []\n if len(data_cfg.pipeline\n ) != 0 and data_cfg.pipeline[0]['type'] == 'LoadImageFromFile':\n loadimage_pipeline.append(data_cfg.pipeline.pop(0))\n origin_pipeline = data_cfg.pipeline\n data_cfg.pipeline = loadimage_pipeline\n dataset = build_dataset(data_cfg)\n pipelines = {\n pipeline_cfg['type']: build_from_cfg(pipeline_cfg, PIPELINES)\n for pipeline_cfg in origin_pipeline\n }\n\n return dataset, pipelines\n\n\ndef prepare_imgs(args, imgs: List[np.ndarray], steps=None):\n \"\"\"prepare the showing picture.\"\"\"\n ori_shapes = [img.shape for img in imgs]\n # adaptive adjustment to rescale pictures\n if args.adaptive:\n for i, img in enumerate(imgs):\n imgs[i] = adaptive_size(img, args.min_edge_length,\n args.max_edge_length)\n else:\n # if src image is too large or too small,\n # warning a \"--adaptive\" message.\n for ori_h, ori_w, _ in ori_shapes:\n if (args.min_edge_length > ori_h or args.min_edge_length > ori_w\n or args.max_edge_length < ori_h\n or args.max_edge_length < ori_w):\n msg = red_text\n msg += 'The visualization picture is too small or too large to'\n msg += ' put text information on it, please add '\n msg += bright_style + red_text + white_background\n msg += '\"--adaptive\"'\n msg += reset_style + red_text\n msg += ' to adaptively rescale the showing pictures'\n msg += reset_style\n warnings.warn(msg)\n\n if len(imgs) == 1:\n return imgs[0]\n else:\n return concat_imgs(imgs, steps, ori_shapes)\n\n\ndef concat_imgs(imgs, steps, ori_shapes):\n \"\"\"Concat list of pictures into a single big picture, align height here.\"\"\"\n show_shapes = [img.shape for img in imgs]\n show_heights = [shape[0] for shape in show_shapes]\n show_widths = [shape[1] for shape in show_shapes]\n\n max_height = max(show_heights)\n text_height = 20\n font_size = 0.5\n pic_horizontal_gap = min(show_widths) // 10\n for i, img in enumerate(imgs):\n cur_height = show_heights[i]\n pad_height = max_height - cur_height\n pad_top, pad_bottom = to_2tuple(pad_height // 2)\n # handle instance that the pad_height is an odd number\n if pad_height % 2 == 1:\n pad_top = pad_top + 1\n pad_bottom += text_height * 3 # keep pxs to put step information text\n pad_left, pad_right = to_2tuple(pic_horizontal_gap)\n # make border\n img = cv2.copyMakeBorder(\n img,\n pad_top,\n pad_bottom,\n pad_left,\n pad_right,\n cv2.BORDER_CONSTANT,\n value=(255, 255, 255))\n # put transform phase information in the bottom\n imgs[i] = cv2.putText(\n img=img,\n text=steps[i],\n org=(pic_horizontal_gap, max_height + text_height // 2),\n fontFace=cv2.FONT_HERSHEY_TRIPLEX,\n fontScale=font_size,\n color=(255, 0, 0),\n lineType=1)\n # put image size information in the bottom\n imgs[i] = cv2.putText(\n img=img,\n text=str(ori_shapes[i]),\n org=(pic_horizontal_gap, max_height + int(text_height * 1.5)),\n fontFace=cv2.FONT_HERSHEY_TRIPLEX,\n fontScale=font_size,\n color=(255, 0, 0),\n lineType=1)\n\n # Height alignment for concatenating\n board = np.concatenate(imgs, axis=1)\n return board\n\n\ndef adaptive_size(image, min_edge_length, max_edge_length, src_shape=None):\n \"\"\"rescale image if image is too small to put text like cifar.\"\"\"\n assert min_edge_length >= 0 and max_edge_length >= 0\n assert max_edge_length >= min_edge_length\n src_shape = image.shape if src_shape is None else src_shape\n image_h, image_w, _ = src_shape\n\n if image_h < min_edge_length or image_w < min_edge_length:\n image = mmcv.imrescale(\n image, min(min_edge_length / image_h, min_edge_length / image_h))\n if image_h > max_edge_length or image_w > max_edge_length:\n image = mmcv.imrescale(\n image, max(max_edge_length / image_h, max_edge_length / image_w))\n return image\n\n\ndef get_display_img(args, item, pipelines):\n \"\"\"get image to display.\"\"\"\n # srcs picture could be in RGB or BGR order due to different backends.\n if args.bgr2rgb:\n item['img'] = mmcv.bgr2rgb(item['img'])\n src_image = item['img'].copy()\n pipeline_images = [src_image]\n\n # get intermediate images through pipelines\n if args.mode in ['transformed', 'concat', 'pipeline']:\n for pipeline in pipelines.values():\n item = pipeline(item)\n trans_image = copy.deepcopy(item['img'])\n trans_image = np.ascontiguousarray(trans_image, dtype=np.uint8)\n pipeline_images.append(trans_image)\n\n # concatenate images to be showed according to mode\n if args.mode == 'original':\n image = prepare_imgs(args, [src_image], ['src'])\n elif args.mode == 'transformed':\n image = prepare_imgs(args, [pipeline_images[-1]], ['transformed'])\n elif args.mode == 'concat':\n steps = ['src', 'transformed']\n image = prepare_imgs(args, [pipeline_images[0], pipeline_images[-1]],\n steps)\n elif args.mode == 'pipeline':\n steps = ['src'] + list(pipelines.keys())\n image = prepare_imgs(args, pipeline_images, steps)\n\n return image\n\n\ndef main():\n args = parse_args()\n wind_w, wind_h = args.window_size.split('*')\n wind_w, wind_h = int(wind_w), int(wind_h) # showing windows size\n cfg = retrieve_data_cfg(args.config, args.skip_type, args.cfg_options,\n args.phase)\n\n dataset, pipelines = build_dataset_pipelines(cfg, args.phase)\n CLASSES = dataset.CLASSES\n display_number = min(args.number, len(dataset))\n progressBar = ProgressBar(display_number)\n\n with vis.ImshowInfosContextManager(fig_size=(wind_w, wind_h)) as manager:\n for i, item in enumerate(itertools.islice(dataset, display_number)):\n image = get_display_img(args, item, pipelines)\n\n # dist_path is None as default, means not saving pictures\n dist_path = None\n if args.output_dir:\n # some datasets don't have filenames, such as cifar\n src_path = item.get('filename', '{}.jpg'.format(i))\n dist_path = os.path.join(args.output_dir, Path(src_path).name)\n\n infos = dict(label=CLASSES[item['gt_label']])\n\n ret, _ = manager.put_img_infos(\n image,\n infos,\n font_size=20,\n out_file=dist_path,\n show=args.show,\n **args.show_options)\n\n progressBar.update()\n\n if ret == 1:\n print('\\nMannualy interrupted.')\n break\n\n\nif __name__ == '__main__':\n main()\n", "# Copyright (c) OpenMMLab. All rights reserved.\nimport warnings\n\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nfrom mmcv.cnn import build_conv_layer, build_norm_layer\nfrom mmcv.runner.base_module import BaseModule\n\nfrom .helpers import to_2tuple\n\n\ndef resize_pos_embed(pos_embed,\n src_shape,\n dst_shape,\n mode='bicubic',\n num_extra_tokens=1):\n \"\"\"Resize pos_embed weights.\n\n Args:\n pos_embed (torch.Tensor): Position embedding weights with shape\n [1, L, C].\n src_shape (tuple): The resolution of downsampled origin training\n image, in format (H, W).\n dst_shape (tuple): The resolution of downsampled new training\n image, in format (H, W).\n mode (str): Algorithm used for upsampling. Choose one from 'nearest',\n 'linear', 'bilinear', 'bicubic' and 'trilinear'.\n Defaults to 'bicubic'.\n num_extra_tokens (int): The number of extra tokens, such as cls_token.\n Defaults to 1.\n\n Returns:\n torch.Tensor: The resized pos_embed of shape [1, L_new, C]\n \"\"\"\n if src_shape[0] == dst_shape[0] and src_shape[1] == dst_shape[1]:\n return pos_embed\n assert pos_embed.ndim == 3, 'shape of pos_embed must be [1, L, C]'\n _, L, C = pos_embed.shape\n src_h, src_w = src_shape\n assert L == src_h * src_w + num_extra_tokens, \\\n f\"The length of `pos_embed` ({L}) doesn't match the expected \" \\\n f'shape ({src_h}*{src_w}+{num_extra_tokens}). Please check the' \\\n '`img_size` argument.'\n extra_tokens = pos_embed[:, :num_extra_tokens]\n\n src_weight = pos_embed[:, num_extra_tokens:]\n src_weight = src_weight.reshape(1, src_h, src_w, C).permute(0, 3, 1, 2)\n\n dst_weight = F.interpolate(\n src_weight, size=dst_shape, align_corners=False, mode=mode)\n dst_weight = torch.flatten(dst_weight, 2).transpose(1, 2)\n\n return torch.cat((extra_tokens, dst_weight), dim=1)\n\n\nclass PatchEmbed(BaseModule):\n \"\"\"Image to Patch Embedding.\n\n We use a conv layer to implement PatchEmbed.\n\n Args:\n img_size (int | tuple): The size of input image. Default: 224\n in_channels (int): The num of input channels. Default: 3\n embed_dims (int): The dimensions of embedding. Default: 768\n norm_cfg (dict, optional): Config dict for normalization layer.\n Default: None\n conv_cfg (dict, optional): The config dict for conv layers.\n Default: None\n init_cfg (`mmcv.ConfigDict`, optional): The Config for initialization.\n Default: None\n \"\"\"\n\n def __init__(self,\n img_size=224,\n in_channels=3,\n embed_dims=768,\n norm_cfg=None,\n conv_cfg=None,\n init_cfg=None):\n super(PatchEmbed, self).__init__(init_cfg)\n warnings.warn('The `PatchEmbed` in mmcls will be deprecated. '\n 'Please use `mmcv.cnn.bricks.transformer.PatchEmbed`. '\n \"It's more general and supports dynamic input shape\")\n\n if isinstance(img_size, int):\n img_size = to_2tuple(img_size)\n elif isinstance(img_size, tuple):\n if len(img_size) == 1:\n img_size = to_2tuple(img_size[0])\n assert len(img_size) == 2, \\\n f'The size of image should have length 1 or 2, ' \\\n f'but got {len(img_size)}'\n\n self.img_size = img_size\n self.embed_dims = embed_dims\n\n # Use conv layer to embed\n conv_cfg = conv_cfg or dict()\n _conv_cfg = dict(\n type='Conv2d', kernel_size=16, stride=16, padding=0, dilation=1)\n _conv_cfg.update(conv_cfg)\n self.projection = build_conv_layer(_conv_cfg, in_channels, embed_dims)\n\n # Calculate how many patches a input image is splited to.\n h_out, w_out = [(self.img_size[i] + 2 * self.projection.padding[i] -\n self.projection.dilation[i] *\n (self.projection.kernel_size[i] - 1) - 1) //\n self.projection.stride[i] + 1 for i in range(2)]\n\n self.patches_resolution = (h_out, w_out)\n self.num_patches = h_out * w_out\n\n if norm_cfg is not None:\n self.norm = build_norm_layer(norm_cfg, embed_dims)[1]\n else:\n self.norm = None\n\n def forward(self, x):\n B, C, H, W = x.shape\n assert H == self.img_size[0] and W == self.img_size[1], \\\n f\"Input image size ({H}*{W}) doesn't \" \\\n f'match model ({self.img_size[0]}*{self.img_size[1]}).'\n # The output size is (B, N, D), where N=H*W/P/P, D is embid_dim\n x = self.projection(x).flatten(2).transpose(1, 2)\n\n if self.norm is not None:\n x = self.norm(x)\n\n return x\n\n\n# Modified from pytorch-image-models\nclass HybridEmbed(BaseModule):\n \"\"\"CNN Feature Map Embedding.\n\n Extract feature map from CNN, flatten,\n project to embedding dim.\n\n Args:\n backbone (nn.Module): CNN backbone\n img_size (int | tuple): The size of input image. Default: 224\n feature_size (int | tuple, optional): Size of feature map extracted by\n CNN backbone. Default: None\n in_channels (int): The num of input channels. Default: 3\n embed_dims (int): The dimensions of embedding. Default: 768\n conv_cfg (dict, optional): The config dict for conv layers.\n Default: None.\n init_cfg (`mmcv.ConfigDict`, optional): The Config for initialization.\n Default: None.\n \"\"\"\n\n def __init__(self,\n backbone,\n img_size=224,\n feature_size=None,\n in_channels=3,\n embed_dims=768,\n conv_cfg=None,\n init_cfg=None):\n super(HybridEmbed, self).__init__(init_cfg)\n assert isinstance(backbone, nn.Module)\n if isinstance(img_size, int):\n img_size = to_2tuple(img_size)\n elif isinstance(img_size, tuple):\n if len(img_size) == 1:\n img_size = to_2tuple(img_size[0])\n assert len(img_size) == 2, \\\n f'The size of image should have length 1 or 2, ' \\\n f'but got {len(img_size)}'\n\n self.img_size = img_size\n self.backbone = backbone\n if feature_size is None:\n with torch.no_grad():\n # FIXME this is hacky, but most reliable way of\n # determining the exact dim of the output feature\n # map for all networks, the feature metadata has\n # reliable channel and stride info, but using\n # stride to calc feature dim requires info about padding of\n # each stage that isn't captured.\n training = backbone.training\n if training:\n backbone.eval()\n o = self.backbone(\n torch.zeros(1, in_channels, img_size[0], img_size[1]))\n if isinstance(o, (list, tuple)):\n # last feature if backbone outputs list/tuple of features\n o = o[-1]\n feature_size = o.shape[-2:]\n feature_dim = o.shape[1]\n backbone.train(training)\n else:\n feature_size = to_2tuple(feature_size)\n if hasattr(self.backbone, 'feature_info'):\n feature_dim = self.backbone.feature_info.channels()[-1]\n else:\n feature_dim = self.backbone.num_features\n self.num_patches = feature_size[0] * feature_size[1]\n\n # Use conv layer to embed\n conv_cfg = conv_cfg or dict()\n _conv_cfg = dict(\n type='Conv2d', kernel_size=1, stride=1, padding=0, dilation=1)\n _conv_cfg.update(conv_cfg)\n self.projection = build_conv_layer(_conv_cfg, feature_dim, embed_dims)\n\n def forward(self, x):\n x = self.backbone(x)\n if isinstance(x, (list, tuple)):\n # last feature if backbone outputs list/tuple of features\n x = x[-1]\n x = self.projection(x).flatten(2).transpose(1, 2)\n return x\n\n\nclass PatchMerging(BaseModule):\n \"\"\"Merge patch feature map.\n\n This layer use nn.Unfold to group feature map by kernel_size, and use norm\n and linear layer to embed grouped feature map.\n\n Args:\n input_resolution (tuple): The size of input patch resolution.\n in_channels (int): The num of input channels.\n expansion_ratio (Number): Expansion ratio of output channels. The num\n of output channels is equal to int(expansion_ratio * in_channels).\n kernel_size (int | tuple, optional): the kernel size in the unfold\n layer. Defaults to 2.\n stride (int | tuple, optional): the stride of the sliding blocks in the\n unfold layer. Defaults to be equal with kernel_size.\n padding (int | tuple, optional): zero padding width in the unfold\n layer. Defaults to 0.\n dilation (int | tuple, optional): dilation parameter in the unfold\n layer. Defaults to 1.\n bias (bool, optional): Whether to add bias in linear layer or not.\n Defaults to False.\n norm_cfg (dict, optional): Config dict for normalization layer.\n Defaults to dict(type='LN').\n init_cfg (dict, optional): The extra config for initialization.\n Defaults to None.\n \"\"\"\n\n def __init__(self,\n input_resolution,\n in_channels,\n expansion_ratio,\n kernel_size=2,\n stride=None,\n padding=0,\n dilation=1,\n bias=False,\n norm_cfg=dict(type='LN'),\n init_cfg=None):\n super().__init__(init_cfg)\n warnings.warn('The `PatchMerging` in mmcls will be deprecated. '\n 'Please use `mmcv.cnn.bricks.transformer.PatchMerging`. '\n \"It's more general and supports dynamic input shape\")\n\n H, W = input_resolution\n self.input_resolution = input_resolution\n self.in_channels = in_channels\n self.out_channels = int(expansion_ratio * in_channels)\n\n if stride is None:\n stride = kernel_size\n kernel_size = to_2tuple(kernel_size)\n stride = to_2tuple(stride)\n padding = to_2tuple(padding)\n dilation = to_2tuple(dilation)\n self.sampler = nn.Unfold(kernel_size, dilation, padding, stride)\n\n sample_dim = kernel_size[0] * kernel_size[1] * in_channels\n\n if norm_cfg is not None:\n self.norm = build_norm_layer(norm_cfg, sample_dim)[1]\n else:\n self.norm = None\n\n self.reduction = nn.Linear(sample_dim, self.out_channels, bias=bias)\n\n # See https://pytorch.org/docs/stable/generated/torch.nn.Conv2d.html\n H_out = (H + 2 * padding[0] - dilation[0] *\n (kernel_size[0] - 1) - 1) // stride[0] + 1\n W_out = (W + 2 * padding[1] - dilation[1] *\n (kernel_size[1] - 1) - 1) // stride[1] + 1\n self.output_resolution = (H_out, W_out)\n\n def forward(self, x):\n \"\"\"\n x: B, H*W, C\n \"\"\"\n H, W = self.input_resolution\n B, L, C = x.shape\n assert L == H * W, 'input feature has wrong size'\n\n x = x.view(B, H, W, C).permute([0, 3, 1, 2]) # B, C, H, W\n\n # Use nn.Unfold to merge patch. About 25% faster than original method,\n # but need to modify pretrained model for compatibility\n x = self.sampler(x) # B, 4*C, H/2*W/2\n x = x.transpose(1, 2) # B, H/2*W/2, 4*C\n\n x = self.norm(x) if self.norm else x\n x = self.reduction(x)\n\n return x\n", "# Copyright (c) OpenMMLab. All rights reserved.\nimport torch\nimport torch.nn as nn\n\nfrom ..builder import LOSSES\nfrom .utils import convert_to_one_hot, weight_reduce_loss\n\n\ndef asymmetric_loss(pred,\n target,\n weight=None,\n gamma_pos=1.0,\n gamma_neg=4.0,\n clip=0.05,\n reduction='mean',\n avg_factor=None,\n use_sigmoid=True,\n eps=1e-8):\n r\"\"\"asymmetric loss.\n\n Please refer to the `paper <https://arxiv.org/abs/2009.14119>`__ for\n details.\n\n Args:\n pred (torch.Tensor): The prediction with shape (N, \\*).\n target (torch.Tensor): The ground truth label of the prediction with\n shape (N, \\*).\n weight (torch.Tensor, optional): Sample-wise loss weight with shape\n (N, ). Defaults to None.\n gamma_pos (float): positive focusing parameter. Defaults to 0.0.\n gamma_neg (float): Negative focusing parameter. We usually set\n gamma_neg > gamma_pos. Defaults to 4.0.\n clip (float, optional): Probability margin. Defaults to 0.05.\n reduction (str): The method used to reduce the loss.\n Options are \"none\", \"mean\" and \"sum\". If reduction is 'none' , loss\n is same shape as pred and label. Defaults to 'mean'.\n avg_factor (int, optional): Average factor that is used to average\n the loss. Defaults to None.\n use_sigmoid (bool): Whether the prediction uses sigmoid instead\n of softmax. Defaults to True.\n eps (float): The minimum value of the argument of logarithm. Defaults\n to 1e-8.\n\n Returns:\n torch.Tensor: Loss.\n \"\"\"\n assert pred.shape == \\\n target.shape, 'pred and target should be in the same shape.'\n\n if use_sigmoid:\n pred_sigmoid = pred.sigmoid()\n else:\n pred_sigmoid = nn.functional.softmax(pred, dim=-1)\n\n target = target.type_as(pred)\n\n if clip and clip > 0:\n pt = (1 - pred_sigmoid +\n clip).clamp(max=1) * (1 - target) + pred_sigmoid * target\n else:\n pt = (1 - pred_sigmoid) * (1 - target) + pred_sigmoid * target\n asymmetric_weight = (1 - pt).pow(gamma_pos * target + gamma_neg *\n (1 - target))\n loss = -torch.log(pt.clamp(min=eps)) * asymmetric_weight\n if weight is not None:\n assert weight.dim() == 1\n weight = weight.float()\n if pred.dim() > 1:\n weight = weight.reshape(-1, 1)\n loss = weight_reduce_loss(loss, weight, reduction, avg_factor)\n return loss\n\n\[email protected]_module()\nclass AsymmetricLoss(nn.Module):\n \"\"\"asymmetric loss.\n\n Args:\n gamma_pos (float): positive focusing parameter.\n Defaults to 0.0.\n gamma_neg (float): Negative focusing parameter. We\n usually set gamma_neg > gamma_pos. Defaults to 4.0.\n clip (float, optional): Probability margin. Defaults to 0.05.\n reduction (str): The method used to reduce the loss into\n a scalar.\n loss_weight (float): Weight of loss. Defaults to 1.0.\n use_sigmoid (bool): Whether the prediction uses sigmoid instead\n of softmax. Defaults to True.\n eps (float): The minimum value of the argument of logarithm. Defaults\n to 1e-8.\n \"\"\"\n\n def __init__(self,\n gamma_pos=0.0,\n gamma_neg=4.0,\n clip=0.05,\n reduction='mean',\n loss_weight=1.0,\n use_sigmoid=True,\n eps=1e-8):\n super(AsymmetricLoss, self).__init__()\n self.gamma_pos = gamma_pos\n self.gamma_neg = gamma_neg\n self.clip = clip\n self.reduction = reduction\n self.loss_weight = loss_weight\n self.use_sigmoid = use_sigmoid\n self.eps = eps\n\n def forward(self,\n pred,\n target,\n weight=None,\n avg_factor=None,\n reduction_override=None):\n r\"\"\"asymmetric loss.\n\n Args:\n pred (torch.Tensor): The prediction with shape (N, \\*).\n target (torch.Tensor): The ground truth label of the prediction\n with shape (N, \\*), N or (N,1).\n weight (torch.Tensor, optional): Sample-wise loss weight with shape\n (N, \\*). Defaults to None.\n avg_factor (int, optional): Average factor that is used to average\n the loss. Defaults to None.\n reduction_override (str, optional): The method used to reduce the\n loss into a scalar. Options are \"none\", \"mean\" and \"sum\".\n Defaults to None.\n\n Returns:\n torch.Tensor: Loss.\n \"\"\"\n assert reduction_override in (None, 'none', 'mean', 'sum')\n reduction = (\n reduction_override if reduction_override else self.reduction)\n if target.dim() == 1 or (target.dim() == 2 and target.shape[1] == 1):\n target = convert_to_one_hot(target.view(-1, 1), pred.shape[-1])\n loss_cls = self.loss_weight * asymmetric_loss(\n pred,\n target,\n weight,\n gamma_pos=self.gamma_pos,\n gamma_neg=self.gamma_neg,\n clip=self.clip,\n reduction=reduction,\n avg_factor=avg_factor,\n use_sigmoid=self.use_sigmoid,\n eps=self.eps)\n return loss_cls\n" ]
[ [ "torch.cuda.device", "torch.no_grad" ], [ "numpy.array", "numpy.vstack" ], [ "numpy.concatenate", "numpy.ascontiguousarray" ], [ "torch.cat", "torch.zeros", "torch.nn.Linear", "torch.nn.Unfold", "torch.no_grad", "torch.nn.functional.interpolate", "torch.flatten" ], [ "torch.nn.functional.softmax" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
volpatto/fluids
[ "693268d0fd7c9e2f276ebdf14353cb9d7dbac195", "693268d0fd7c9e2f276ebdf14353cb9d7dbac195", "693268d0fd7c9e2f276ebdf14353cb9d7dbac195" ]
[ "tests/test_design_climate.py", "tests/test_control_valve.py", "tests/test_jet_pump.py" ]
[ "# -*- coding: utf-8 -*-\n'''Chemical Engineering Design Library (ChEDL). Utilities for process modeling.\nCopyright (C) 2016, 2017 Caleb Bell <[email protected]>\n\nPermission is hereby granted, free of charge, to any person obtaining a copy\nof this software and associated documentation files (the \"Software\"), to deal\nin the Software without restriction, including without limitation the rights\nto use, copy, modify, merge, publish, distribute, sublicense, and/or sell\ncopies of the Software, and to permit persons to whom the Software is\nfurnished to do so, subject to the following conditions:\n\nThe above copyright notice and this permission notice shall be included in all\ncopies or substantial portions of the Software.\n\nTHE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\nIMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\nFITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\nAUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\nLIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\nOUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\nSOFTWARE.'''\n\nfrom numpy.testing import assert_allclose\nimport pytest\nimport numpy as np\nfrom fluids.design_climate import *\nfrom fluids.design_climate import _latlongs, stations\n\n\ndef test_heating_degree_days():\n assert_allclose(heating_degree_days(273, truncate=False), -18.483333333333292)\n assert 0 == heating_degree_days(273, truncate=True)\n assert 0 == heating_degree_days(273)\n assert_allclose(heating_degree_days(273, T_base = 250), 23)\n assert_allclose(heating_degree_days(279, T_base=300, truncate=False), -21)\n \ndef test_cooling_degree_days():\n \n assert_allclose( cooling_degree_days(250), 33.15)\n assert 0 == cooling_degree_days(300)\n assert_allclose(cooling_degree_days(300, truncate=False), -16.85)\n assert_allclose(cooling_degree_days(250, T_base=300), 50)\n\n\[email protected]\[email protected]\ndef test_month_average_temperature():\n station = get_closest_station(38.8572, -77.0369)\n station_data = StationDataGSOD(station)\n Ts_calc = station_data.month_average_temperature(1990, 2000, include_yearly=False, minimum_days=23)\n Ts_expect = [276.1599380905833, 277.5375516246206, 281.1881231671554, 286.7367003367004, 291.8689638318671, 296.79545454545456, 299.51868686868687, 298.2097914630174, 294.4116161616162, 288.25883023786247, 282.3188552188553, 277.8282339524275]\n assert_allclose(Ts_calc, Ts_expect, rtol=1E-3)\n \n assert station_data.warmest_month(1990, 2000) == 6\n assert station_data.coldest_month(1990, 2000) == 0\n \n\n \ndef test_IntegratedSurfaceDatabaseStation():\n \n # Information confirmed elsewhere i.e. https://geographic.org/global_weather/not_specified_canada/calgary_intl_cs_713930_99999.html\n values = [713930.0, 99999.0, 'CALGARY INTL CS', 'CA', None, None, 51.1, -114.0, 1081.0, 20040921.0, 20150831.0]\n test_station = IntegratedSurfaceDatabaseStation(*values)\n for value, attr in zip(values, test_station.__slots__):\n assert value == getattr(test_station, attr)\n \ndef test_data():\n assert _latlongs.shape[0] >= 27591\n for station in stations:\n assert abs(station.LAT) <= 90\n assert abs(station.LON) <= 180\n\n\ndef test_get_closest_station():\n s = get_closest_station(51.02532675, -114.049868485806, 20150000)\n assert s.NAME == 'CALGARY INTL CS'\n \n with pytest.raises(Exception):\n get_closest_station(51.02532675, -114.049868485806, 90150000)\n \n \nsample_data_random_station_1999 = '''STN--- WBAN YEARMODA TEMP DEWP SLP STP VISIB WDSP MXSPD GUST MAX MIN PRCP SNDP FRSHTT\n712650 99999 19990101 12.3 24 3.1 24 1022.8 24 1013.0 24 8.4 24 15.0 24 22.9 29.9 23.7 4.1 0.00G 999.9 001000\n712650 99999 19990102 10.1 24 -0.6 24 1034.1 24 1024.1 24 6.8 24 12.7 24 29.9 35.0 23.0* 5.0* 0.00G 999.9 001000\n712650 99999 19990103 28.1 24 24.9 24 1007.5 24 997.9 24 3.6 24 24.2 24 36.9 43.9 35.6* 21.2* 0.71G 999.9 001000\n712650 99999 19990104 17.8 24 10.8 24 1011.0 24 1001.3 24 8.8 24 26.1 24 30.9 42.0 36.7 13.3 0.04G 999.9 001000\n712650 99999 19990105 16.3 22 9.3 22 1022.9 22 1013.1 22 8.0 22 15.4 22 22.9 38.1 20.1 13.3 0.00G 999.9 001000\n712650 99999 19990106 19.3 24 14.4 24 1016.3 24 1006.6 24 6.2 24 19.7 24 28.0 34.0 28.4* 10.4* 0.00G 999.9 001000\n712650 99999 19990107 17.4 23 8.6 23 1019.5 23 1009.8 23 8.9 23 20.9 23 23.9 35.0 29.1 10.9 0.02G 999.9 001000\n712650 99999 19990108 15.5 24 9.8 24 1026.3 24 1016.5 24 7.0 24 9.3 24 20.0 26.0 21.2* 10.4* 0.00G 999.9 001000\n712650 99999 19990109 20.8 24 16.2 24 1012.1 24 1002.4 24 3.8 24 9.3 24 18.1 23.9 24.8* 19.4* 0.04G 999.9 001000\n712650 99999 19990111 12.2 24 4.7 24 1016.8 24 1007.0 24 7.7 24 14.4 24 27.0 38.1 20.7 7.5 0.02G 999.9 000000\n712650 99999 19990112 20.8 24 15.8 24 1011.8 24 1002.1 24 4.6 24 7.8 24 15.9 19.0 24.8 7.5 0.00G 999.9 001000\n712650 99999 19990113 10.3 24 2.7 24 1026.1 24 1016.3 24 3.9 24 11.7 24 19.0 23.9 22.5* -0.4* 0.20G 999.9 001000\n712650 99999 19990114 -0.7 24 -9.1 24 1034.7 24 1024.5 24 2.1 24 20.1 24 34.0 42.0 15.8* -7.6* 0.00G 999.9 001000\n712650 99999 19990115 13.4 22 9.2 22 1016.2 22 1006.4 22 3.2 22 13.5 22 20.0 42.0 19.4* 8.6* 0.55G 999.9 001000\n712650 99999 19990116 25.4 24 21.7 24 1009.2 24 999.7 24 5.6 24 18.8 24 26.0 32.1 35.6* 12.2* 0.02G 999.9 001000\n712650 99999 19990117 33.3 24 28.0 24 1017.6 24 1008.1 24 6.1 24 8.9 24 19.0 32.1 37.4* 29.8* 0.16G 999.9 000000\n712650 99999 19990118 36.6 24 33.9 24 1006.8 24 997.5 24 6.8 24 13.7 24 18.1 21.0 41.4 27.7 0.26G 999.9 010000\n712650 99999 19990119 34.2 24 28.5 24 1006.3 24 996.9 24 8.7 24 22.6 24 30.9 36.9 41.4 32.5 0.02G 999.9 000000\n712650 99999 19990120 32.3 24 25.8 24 1017.6 24 1008.2 24 8.5 24 12.1 24 20.0 38.1 33.8* 30.2* 0.00G 999.9 000000\n712650 99999 19990121 32.0 24 27.5 24 1018.7 24 1009.3 24 7.2 24 4.6 24 11.1 21.0 37.4* 26.6* 0.00G 999.9 000000\n712650 99999 19990122 34.0 19 31.6 19 1020.7 19 1011.2 19 6.7 19 21.5 19 28.9 37.9 36.7 26.1 0.02G 999.9 010000\n712650 99999 19990123 40.1 24 39.1 24 1013.3 24 1003.9 24 4.8 24 9.9 24 21.0 39.0 48.2* 36.3* 0.33G 999.9 010000\n712650 99999 19990124 38.9 24 34.0 24 1009.7 23 1000.3 23 7.7 24 14.4 24 22.9 28.9 48.2* 33.8* 0.12G 999.9 011000\n712650 99999 19990125 31.7 23 24.9 23 1021.4 23 1011.8 23 6.3 23 8.1 23 18.1 31.1 41.7 29.3 0.00G 999.9 001000\n712650 99999 19990126 31.3 24 23.7 24 1028.3 24 1018.6 24 9.0 24 11.2 24 18.1 25.1 32.7 27.3 0.00I 999.9 000000\n712650 99999 19990127 30.5 23 25.8 23 1021.1 23 1011.5 23 6.0 23 11.3 23 23.9 32.1 34.3 22.1 0.00G 999.9 010000\n712650 99999 19990128 28.2 23 25.9 23 1015.2 23 1005.7 23 4.4 23 15.0 23 23.9 32.1 34.3 22.1 0.08G 999.9 011000\n712650 99999 19990129 21.9 24 16.4 24 1028.6 24 1018.8 24 7.8 24 4.1 24 9.9 21.0 31.3 18.3 0.02G 999.9 001000\n712650 99999 19990130 30.1 23 22.0 23 1033.4 23 1023.7 23 7.9 23 9.1 23 14.0 18.1 33.8* 26.6* 0.02G 999.9 000000\n712650 99999 19990131 27.4 24 16.6 24 1042.4 24 1032.6 24 9.0 24 9.9 24 15.0 21.0 34.0 24.6 0.00G 999.9 000000\n712650 99999 19990201 28.0 24 24.4 24 1030.5 24 1022.4 4 7.4 24 8.5 24 15.9 18.1 33.8* 23.0* 0.00G 999.9 000000\n712650 99999 19990202 34.9 24 34.3 24 1012.5 22 1004.2 4 1.7 24 8.7 24 15.0 20.0 37.4* 33.6* 0.04G 999.9 010000\n712650 99999 19990203 36.9 24 32.0 24 1009.6 24 998.9 4 9.0 24 12.5 24 16.9 25.1 40.3 33.4 0.14G 999.9 000000\n712650 99999 19990204 37.2 24 32.1 24 1008.1 21 998.7 4 8.6 24 9.3 24 21.0 33.0 39.6* 33.8* 0.00G 999.9 010000\n712650 99999 19990205 25.7 23 14.5 23 1027.2 23 9999.9 0 9.0 23 9.5 23 23.9 35.9 41.4 21.2 0.06G 999.9 000000\n712650 99999 19990206 33.5 24 27.4 24 1014.9 21 1006.3 4 6.8 24 6.7 24 11.1 20.0 39.2* 27.9* 0.00G 999.9 001000\n712650 99999 19990207 33.7 24 25.9 24 1015.5 22 1006.2 4 8.4 24 10.0 24 25.1 28.0 37.4* 30.2* 0.02G 999.9 000000\n712650 99999 19990208 26.8 21 16.6 21 1016.7 21 9999.9 0 9.0 21 7.2 21 15.9 29.9 34.5 19.8 0.00G 999.9 000000\n712650 99999 19990209 38.2 23 30.3 23 1012.0 23 1003.0 4 8.5 23 13.1 23 22.9 28.9 48.2* 32.0* 0.00G 999.9 000000\n712650 99999 19990210 37.2 24 26.4 24 1020.4 24 1009.4 4 9.0 24 9.8 24 16.9 35.9 48.4 30.7 0.02G 999.9 000000\n712650 99999 19990211 35.6 24 32.6 24 1020.1 23 1011.9 4 8.0 24 9.2 24 15.0 24.1 41.5 30.2 0.00G 999.9 010000\n712650 99999 19990212 38.8 24 34.8 24 1006.6 24 997.3 4 7.5 24 13.7 24 25.1 40.0 50.0 31.3 0.02G 999.9 011000\n712650 99999 19990213 25.2 24 15.8 24 1013.8 21 1002.8 4 8.1 24 18.3 24 22.9 45.1 32.0* 20.8* 0.18G 999.9 001000\n712650 99999 19990214 23.0 24 7.9 24 1028.4 24 1018.2 4 9.0 24 7.6 24 15.0 32.1 32.0* 16.9* 0.00G 999.9 000000\n712650 99999 19990215 34.5 21 26.4 21 1021.5 18 1013.0 4 8.9 21 11.6 21 18.1 999.9 41.0* 30.2* 0.00I 999.9 000000\n712650 99999 19990216 36.6 24 32.6 24 1016.2 24 1007.4 4 7.6 24 7.6 24 14.0 20.0 42.3 28.9 0.00G 999.9 000000\n712650 99999 19990217 37.4 24 33.3 24 1010.6 20 1001.2 4 5.8 24 12.8 24 22.0 28.0 42.3 31.6 0.06G 999.9 010000\n712650 99999 19990218 33.2 24 24.0 24 1014.6 23 1004.7 4 8.3 24 10.1 24 15.9 24.1 41.2 29.8 0.02G 999.9 000000\n712650 99999 19990219 24.7 24 11.2 24 1019.0 24 1009.2 4 9.0 24 8.5 24 12.0 19.0 34.2 19.0 0.00G 999.9 000000\n712650 99999 19990220 23.6 24 11.2 24 1020.3 23 1010.4 4 9.0 24 8.8 24 13.0 16.9 30.2* 18.9* 0.00I 999.9 000000\n712650 99999 19990221 21.0 24 2.7 24 1023.8 24 1013.6 4 9.0 24 12.6 24 15.9 26.0 30.6 15.4 0.00G 999.9 000000\n712650 99999 19990222 13.1 23 -9.4 23 1031.8 23 1021.4 4 8.9 23 8.2 23 15.9 25.1 26.2 4.5 0.00G 999.9 000000\n712650 99999 19990223 18.9 24 1.1 24 1032.5 24 1022.8 4 8.9 24 10.1 24 18.1 21.0 25.7 4.5 0.02G 999.9 000000\n712650 99999 19990224 26.5 23 13.4 23 1028.9 22 1019.8 4 8.9 23 13.4 23 18.1 23.9 32.0* 21.2* 0.00G 999.9 000000\n712650 99999 19990225 33.4 24 22.1 24 1021.3 24 1012.3 4 9.0 24 11.0 24 20.0 999.9 35.6* 32.0* 0.00I 999.9 000000\n712650 99999 19990226 33.5 24 20.7 24 1021.9 24 1012.3 4 9.0 24 7.5 24 12.0 24.1 42.6 26.1 0.00G 999.9 000000\n712650 99999 19990227 32.8 24 26.0 24 1017.3 24 1008.7 4 4.7 24 7.1 24 16.9 999.9 42.6 25.9 0.00I 999.9 000000\n712650 99999 19990228 37.7 24 35.8 24 998.6 23 990.9 4 4.3 24 11.6 24 16.9 22.0 41.9 25.9 0.37G 999.9 010000\n712650 99999 19990301 36.3 24 32.1 24 994.7 23 985.0 4 6.8 24 10.2 24 22.0 35.0 41.0* 32.0* 0.10G 999.9 010000\n712650 99999 19990302 33.2 24 19.5 24 1005.5 23 9999.9 0 9.0 24 13.2 24 21.0 30.9 41.0* 24.8* 0.00G 999.9 001000\n712650 99999 19990303 34.6 24 31.5 24 1004.4 24 996.3 4 5.6 24 11.8 24 21.0 29.9 42.3 25.0 0.02G 999.9 010000\n712650 99999 19990304 28.0 24 18.7 24 1003.4 24 991.9 4 9.0 24 21.1 24 29.9 39.0 36.7 24.8 0.33G 999.9 001000\n712650 99999 19990305 23.1 24 10.9 24 1026.0 24 1015.0 4 9.0 24 9.2 24 19.0 42.0 31.8 17.6 0.02G 999.9 000000\n712650 99999 19990306 20.1 24 13.9 24 1023.4 21 1014.4 4 2.2 24 19.0 24 27.0 35.0 27.0 15.3 0.06G 999.9 001000\n712650 99999 19990307 13.8 24 -2.9 24 1034.1 24 1022.7 4 8.7 24 13.2 24 22.0 36.9 25.3 6.6 0.04G 999.9 001000\n712650 99999 19990308 18.6 24 3.4 24 1040.4 24 1030.7 4 9.0 24 6.2 24 9.9 28.9 26.6 6.6 0.00G 999.9 000000\n712650 99999 19990309 24.0 24 10.8 24 1029.5 23 1021.2 4 9.0 24 12.3 24 25.1 29.9 27.5 10.9 0.00G 999.9 001000\n712650 99999 19990310 24.4 24 5.7 24 1020.0 24 1010.5 4 9.0 24 10.9 24 15.9 32.1 28.4* 18.5* 0.06G 999.9 000000\n712650 99999 19990311 28.9 24 14.6 24 1020.3 24 1010.8 4 9.0 24 11.0 24 19.0 28.0 33.8* 25.9* 0.02G 999.9 000000\n712650 99999 19990312 28.2 24 13.8 24 1022.6 24 1012.6 4 9.0 24 13.5 24 19.0 28.9 34.0 22.8 0.00G 999.9 000000\n712650 99999 19990313 29.5 24 12.7 24 1027.6 24 1017.9 4 9.0 24 7.9 24 14.0 28.9 35.4 22.8 0.00G 999.9 000000\n712650 99999 19990314 32.4 24 17.6 24 1022.5 24 1013.8 4 9.0 24 9.8 24 15.0 22.0 35.4 23.5 0.00G 999.9 000000\n712650 99999 19990315 34.1 24 15.5 24 1016.4 24 1007.3 4 9.0 24 7.5 24 15.9 21.0 42.8* 28.4* 0.00G 999.9 000000\n712650 99999 19990316 37.0 24 22.3 24 1011.8 24 1003.3 4 9.0 24 11.1 24 26.0 999.9 42.8* 32.0* 0.00G 999.9 000000\n712650 99999 19990317 39.8 24 33.0 24 1007.6 24 998.9 4 6.1 24 7.2 24 15.0 32.1 50.0* 34.9* 0.00G 999.9 000000\n712650 99999 19990318 43.5 24 32.8 24 1004.3 24 994.1 4 8.9 24 17.5 24 34.0 41.0 52.5 33.6 0.00G 999.9 000000\n712650 99999 19990319 35.3 24 22.5 24 1021.6 24 1010.9 4 9.0 24 12.8 24 18.1 41.0 47.5 29.3 0.02G 999.9 000000\n712650 99999 19990320 32.9 24 23.4 24 1025.4 24 1016.4 4 9.0 24 6.8 24 13.0 22.0 43.3 27.0 0.00G 999.9 000000\n712650 99999 19990321 38.0 24 31.7 24 1012.5 24 1004.6 4 8.9 24 11.0 24 18.1 999.9 42.8* 35.6* 99.99 999.9 010000\n712650 99999 19990322 35.9 24 29.4 24 1005.4 23 996.1 4 7.8 24 15.6 24 27.0 34.0 43.9 32.9 99.99 999.9 011000\n712650 99999 19990323 35.3 24 23.0 24 1015.7 24 1005.3 4 9.0 24 15.4 24 22.9 36.9 43.0 29.7 0.02G 999.9 000000\n712650 99999 19990324 39.1 24 26.8 24 1014.2 24 1004.8 4 8.9 24 9.6 24 15.9 26.0 46.4* 34.9* 0.00G 999.9 000000\n712650 99999 19990325 33.0 24 18.2 24 1022.4 22 9999.9 0 9.0 24 9.2 24 15.0 22.0 49.1 27.7 0.00G 999.9 001000\n712650 99999 19990326 33.3 24 22.8 24 1026.8 24 1017.1 4 9.0 24 7.6 24 13.0 21.0 39.2* 26.6* 0.00G 999.9 001000\n712650 99999 19990327 37.3 24 25.6 24 1026.6 24 1017.2 4 9.0 24 2.5 24 7.0 999.9 45.5 27.0 0.00I 999.9 000000\n712650 99999 19990328 41.9 24 24.2 24 1021.7 24 1013.0 4 9.0 24 3.6 24 8.0 999.9 50.5 29.1 0.00I 999.9 000000\n712650 99999 19990329 47.4 24 29.6 24 1016.6 24 1007.4 4 8.9 24 11.1 24 21.0 30.9 59.9 32.5 0.00I 999.9 000000\n712650 99999 19990330 44.7 24 28.6 24 1022.6 24 1012.8 4 9.0 24 10.5 24 21.0 33.0 59.9 36.3 0.02G 999.9 000000\n712650 99999 19990331 46.0 24 32.9 24 1018.4 24 1009.9 4 9.0 24 8.6 24 15.0 17.1 55.6 37.4 0.00G 999.9 000000\n712650 99999 19990401 46.9 24 41.6 24 1013.6 23 9999.9 0 5.3 24 3.6 24 14.0 999.9 56.3 37.9 0.00I 999.9 000000\n712650 99999 19990402 42.7 24 40.9 24 1017.8 22 1007.9 4 3.2 24 9.6 24 16.9 20.0 52.3 38.8 0.00I 999.9 000000\n712650 99999 19990403 43.0 24 33.9 24 1016.1 24 1007.3 4 9.0 24 9.0 24 15.0 24.1 48.0 38.8 0.00G 999.9 000000\n712650 99999 19990404 42.7 24 35.4 24 1016.1 23 1006.1 4 8.1 24 10.0 24 18.1 999.9 57.0 38.7 99.99 999.9 010000\n712650 99999 19990405 38.8 23 23.5 23 1026.1 23 1016.1 4 8.9 23 13.3 23 19.0 27.0 57.0 33.8 0.06G 999.9 000000\n712650 99999 19990406 42.6 24 35.7 24 1016.0 24 1009.1 4 8.9 24 14.5 24 21.0 27.0 50.0* 39.2* 0.00G 999.9 010000\n712650 99999 19990407 48.8 24 36.6 24 1015.0 24 1004.7 4 9.0 24 16.1 24 25.1 36.9 57.0 38.8 0.12G 999.9 000000\n712650 99999 19990408 51.4 23 37.9 23 1010.0 23 9999.9 0 8.4 23 10.9 23 22.9 34.0 64.4* 39.2* 0.00G 999.9 010000\n712650 99999 19990409 44.9 24 31.2 24 1009.2 24 1000.5 4 9.0 24 14.4 24 20.0 33.0 64.6 39.0 0.06G 999.9 000000\n712650 99999 19990410 42.3 24 23.7 24 1017.0 24 1006.3 4 9.0 24 8.6 24 14.0 27.0 48.7 36.3 0.00G 999.9 000000\n712650 99999 19990411 38.0 24 30.0 24 1015.3 24 1007.0 4 8.1 24 20.3 24 35.0 42.0 41.2* 33.8* 0.00G 999.9 010000\n712650 99999 19990412 43.0 23 25.8 23 1014.4 22 1003.8 4 8.9 23 12.4 23 19.0 42.9 52.2 32.9 0.12G 999.9 011000\n712650 99999 19990413 45.6 24 18.7 24 1017.8 24 1008.8 4 9.0 24 12.2 24 16.9 25.1 56.5 36.0 0.02G 999.9 000000\n712650 99999 19990414 50.2 23 22.0 23 1012.4 23 1003.5 4 8.9 23 10.5 23 16.9 28.9 64.4 36.0 0.00G 999.9 000000\n712650 99999 19990415 47.2 24 28.2 24 1008.4 24 1000.0 4 9.0 24 6.5 24 12.0 29.9 65.7 38.1 0.00G 999.9 000000\n712650 99999 19990416 43.0 24 32.4 24 998.6 24 990.0 4 8.6 24 15.3 24 25.1 30.9 51.1 38.8 0.00G 999.9 010000\n712650 99999 19990417 43.2 24 38.8 24 1002.0 22 9999.9 0 8.5 24 6.9 24 12.0 33.0 51.8* 40.5* 0.14G 999.9 000000\n712650 99999 19990418 45.0 24 36.6 24 1010.7 22 1000.7 4 9.0 24 7.1 24 15.9 19.0 51.8* 37.4* 0.00G 999.9 010000\n712650 99999 19990419 44.2 24 39.3 24 1015.5 23 1006.1 4 8.4 24 6.4 24 13.0 22.0 51.6 37.9 0.02G 999.9 010000\n712650 99999 19990420 45.7 23 32.9 23 1015.5 23 1006.0 4 8.9 23 6.8 22 9.9 15.0 55.0 40.1 0.00G 999.9 010000\n712650 99999 19990421 45.8 24 35.8 24 1015.9 24 1006.9 4 8.7 24 5.0 24 11.1 17.1 55.0 38.1 0.02G 999.9 000000\n712650 99999 19990422 46.9 24 39.8 24 1014.5 21 1004.6 4 7.4 24 7.8 24 14.0 999.9 55.8 38.1 0.00G 999.9 010000\n712650 99999 19990423 45.1 24 35.6 24 1018.4 24 1008.6 4 7.1 24 10.7 24 18.1 22.0 50.0 42.4 0.75G 999.9 010000\n712650 99999 19990424 43.6 24 17.3 24 1027.5 24 1017.2 4 9.0 24 9.6 24 15.0 27.0 53.6* 33.8* 0.06G 999.9 000000\n712650 99999 19990425 45.4 24 24.3 24 1023.8 24 1015.5 4 9.0 24 11.1 24 21.0 26.0 62.6* 37.4* 0.02G 999.9 000000\n712650 99999 19990426 53.5 24 32.5 24 1012.2 24 1003.3 4 9.0 24 11.6 24 18.1 26.0 65.1 36.7 0.00G 999.9 000000\n712650 99999 19990427 46.4 24 28.6 24 1019.9 24 1009.4 4 9.0 24 8.9 24 13.0 29.9 65.1 41.0 0.00G 999.9 000000\n712650 99999 19990428 48.8 24 33.4 24 1025.6 24 9999.9 0 9.0 24 12.3 24 21.0 26.0 51.8* 46.4* 0.00G 999.9 000000\n712650 99999 19990429 51.7 24 30.7 24 1026.9 24 9999.9 0 9.0 24 7.3 24 12.0 18.1 55.9 45.5 0.00I 999.9 000000\n712650 99999 19990430 54.4 24 28.9 24 1027.0 24 1017.7 4 9.0 24 5.5 24 11.1 18.1 59.4 46.6 0.00G 999.9 000000\n712650 99999 19990501 55.9 24 30.6 24 1025.1 24 1016.0 4 9.0 24 4.2 24 8.0 999.9 62.6* 50.0* 0.00I 999.9 000000\n712650 99999 19990502 57.4 24 37.8 24 1022.9 24 1013.8 4 9.0 24 3.6 24 8.0 999.9 68.7 45.7 0.00I 999.9 000000\n712650 99999 19990503 59.9 24 39.3 24 1020.3 24 1011.3 4 9.0 24 6.9 24 11.1 999.9 68.9 45.7 0.00I 999.9 000000\n712650 99999 19990504 59.5 24 40.8 24 1016.0 24 1007.0 4 9.0 24 4.2 24 11.1 999.9 67.6 50.0 0.00I 999.9 000000\n712650 99999 19990505 59.7 23 49.5 23 1012.7 23 9999.9 0 9.0 23 11.4 23 19.0 25.1 67.6 50.0 0.00I 999.9 000000\n712650 99999 19990506 59.0 24 52.7 24 1009.0 24 1000.4 4 8.2 24 13.0 24 20.0 26.0 65.5 48.2 0.00G 999.9 000000\n712650 99999 19990507 57.8 24 53.0 24 1009.6 23 1000.0 4 7.7 24 6.9 24 12.0 27.0 64.2 48.2 0.00G 999.9 010000\n712650 99999 19990508 55.2 24 53.4 24 1007.8 24 999.0 4 4.9 24 7.7 24 13.0 21.0 62.2 49.3 99.99 999.9 010000\n712650 99999 19990509 55.0 24 47.3 24 1012.3 24 9999.9 0 7.5 24 10.3 24 19.0 22.0 62.1 49.3 0.20G 999.9 010000\n712650 99999 19990510 49.6 24 36.8 24 1020.9 24 9999.9 0 9.0 24 6.3 24 13.0 24.1 61.3 40.5 0.06G 999.9 000000\n712650 99999 19990511 52.2 24 37.1 24 1025.2 24 1015.6 4 9.0 24 10.0 24 18.1 999.9 57.9 40.5 0.00G 999.9 000000\n712650 99999 19990512 51.5 24 32.4 24 1021.5 24 1012.9 4 9.0 24 5.6 24 15.0 26.0 57.6 42.4 0.00G 999.9 000000\n712650 99999 19990513 50.1 22 29.9 22 1017.5 22 1008.1 4 8.9 22 9.6 22 16.9 20.0 57.6 42.4 0.00G 999.9 000000\n712650 99999 19990514 54.0 22 36.7 22 1022.2 19 9999.9 0 9.0 22 13.6 22 20.0 25.1 57.2* 50.0* 0.00I 999.9 000000\n712650 99999 19990515 57.2 24 41.9 24 1024.7 24 1015.3 4 9.0 24 7.2 24 11.1 26.0 60.8 49.5 0.00G 999.9 000000\n712650 99999 19990516 57.9 24 45.2 24 1023.1 24 1014.0 4 9.0 24 8.6 24 12.0 999.9 62.8 48.6 0.00I 999.9 000000\n712650 99999 19990517 57.7 24 48.1 24 1018.9 24 1010.2 4 9.0 24 8.9 24 14.0 17.1 64.6 48.6 0.00G 999.9 000000\n712650 99999 19990518 61.1 24 53.6 24 1013.8 24 1005.2 4 8.8 24 7.6 24 14.0 19.0 66.2* 57.2* 0.00G 999.9 010000\n712650 99999 19990519 58.8 24 51.6 24 1016.1 22 9999.9 0 7.2 24 8.9 24 14.0 22.0 68.0* 51.4* 0.26G 999.9 010000\n712650 99999 19990520 55.2 24 44.1 24 1023.3 24 1013.8 4 9.0 24 6.2 24 11.1 25.1 68.2 45.1 0.18G 999.9 000000\n712650 99999 19990521 57.5 24 50.0 24 1020.4 24 1011.6 4 8.2 24 4.0 24 11.1 999.9 67.1 45.1 0.00G 999.9 000000\n712650 99999 19990522 60.2 24 50.6 24 1014.7 24 1005.9 4 8.9 24 5.5 24 12.0 999.9 67.1 50.0 0.00I 999.9 000000\n712650 99999 19990523 62.4 22 52.7 22 1012.3 22 9999.9 0 8.9 22 5.6 22 11.1 999.9 69.8 55.8 0.00I 999.9 000000\n712650 99999 19990524 57.2 24 53.4 24 999.1 22 9999.9 0 7.1 24 6.6 24 15.0 22.0 60.8* 53.6* 99.99 999.9 010000\n712650 99999 19990525 50.4 22 44.3 22 999.2 22 989.7 4 8.8 22 18.1 22 22.9 29.9 55.0* 48.2* 0.41G 999.9 010000\n712650 99999 19990526 51.7 24 46.7 24 1005.2 22 995.1 4 9.0 24 11.5 24 20.0 35.9 57.2* 47.7* 0.20G 999.9 010000\n712650 99999 19990527 56.0 24 42.7 24 1013.1 24 1003.6 4 9.0 24 8.0 24 15.0 20.0 75.2* 42.8* 0.02G 999.9 000000\n712650 99999 19990528 57.1 23 46.4 23 1014.9 23 1005.4 4 8.9 23 6.8 23 13.0 15.9 74.3 42.4 0.00G 999.9 000000\n712650 99999 19990529 65.9 24 48.9 24 1018.7 23 1009.1 4 8.9 23 4.6 24 8.9 999.9 79.2 46.4 0.00I 999.9 000000\n712650 99999 19990530 64.0 23 53.0 23 1021.1 23 1011.8 4 6.9 22 3.8 23 12.0 999.9 79.3 52.9 0.00I 999.9 000000\n712650 99999 19990531 64.7 24 55.2 24 1017.1 24 1008.4 4 8.1 24 2.8 24 7.0 999.9 80.2 52.9 99.99 999.9 010000\n712650 99999 19990601 61.3 24 59.3 24 1012.0 22 1003.2 4 2.9 24 5.5 24 12.0 999.9 73.8 57.6 99.99 999.9 010000\n712650 99999 19990602 63.5 24 62.4 24 1010.4 15 9999.9 0 2.1 24 3.4 24 8.0 999.9 67.6 57.7 99.99 999.9 010000\n712650 99999 19990603 61.4 24 55.6 24 1012.6 21 1001.5 4 7.4 24 11.3 24 16.9 21.0 67.8 57.9 0.00I 999.9 000000\n712650 99999 19990604 59.4 23 46.2 23 1021.8 23 1012.1 4 8.9 23 6.4 23 9.9 25.1 65.5 52.5 0.02G 999.9 000000\n712650 99999 19990605 58.6 24 45.9 24 1020.7 23 1011.6 4 9.0 24 8.0 24 13.0 999.9 66.6 52.5 0.00G 999.9 010000\n712650 99999 19990606 63.2 24 56.3 24 1017.2 24 1008.4 4 7.2 24 6.2 24 15.0 999.9 72.0 52.5 0.00I 999.9 000000\n712650 99999 19990607 66.6 24 62.2 24 1014.8 23 1005.9 4 4.4 24 4.7 24 22.9 41.0 77.7 52.5 0.00G 999.9 010000\n712650 99999 19990608 72.4 24 59.7 24 1010.9 24 9999.9 0 8.8 24 11.1 24 25.1 41.0 84.2* 57.2* 0.33G 999.9 000000\n712650 99999 19990609 68.0 23 58.2 23 1015.3 19 1006.0 4 8.9 23 7.0 23 16.9 31.1 84.2 57.0 0.02G 999.9 000000\n712650 99999 19990610 65.0 23 59.5 23 1020.2 21 1010.6 4 8.8 23 13.9 23 18.1 21.0 72.1 60.8 0.00G 999.9 000000\n712650 99999 19990611 65.9 24 61.1 24 1021.1 24 1011.8 4 6.3 24 9.9 24 15.9 24.1 71.6* 60.8* 0.00G 999.9 000000\n712650 99999 19990612 68.4 24 62.2 24 1021.1 24 1012.0 4 6.6 24 7.6 24 13.0 18.1 73.6 60.3 0.00G 999.9 010000\n712650 99999 19990613 69.0 24 60.3 24 1019.1 24 1010.4 4 9.0 24 5.8 24 11.1 999.9 74.5 60.6 0.00I 999.9 000000\n712650 99999 19990614 68.4 24 62.2 24 1011.5 19 9999.9 0 7.9 24 9.6 24 22.9 27.0 73.6 61.2 99.99 999.9 010000\n712650 99999 19990615 58.0 24 43.5 24 1018.2 24 1007.8 4 9.0 24 11.8 24 15.9 28.0 73.4 49.1 0.22G 999.9 000000\n712650 99999 19990616 56.5 24 41.5 24 1022.4 24 1013.2 4 9.0 24 3.8 24 8.0 22.0 63.3 48.7 0.00G 999.9 000000\n712650 99999 19990617 59.0 24 46.8 24 1020.8 24 1011.2 4 9.0 24 4.0 24 8.9 999.9 64.6 48.7 0.00I 999.9 000000\n712650 99999 19990618 58.9 24 49.6 24 1024.7 24 1015.2 4 9.0 24 8.1 24 14.0 15.0 66.2* 50.0* 0.00I 999.9 000000\n712650 99999 19990619 62.9 24 53.5 24 1027.8 24 1018.2 4 9.0 24 2.4 24 7.0 999.9 71.6* 53.6* 0.00I 999.9 000000\n712650 99999 19990620 66.2 24 56.4 24 1026.6 24 1017.7 4 9.0 24 6.0 24 8.0 999.9 73.4* 62.6* 0.00I 999.9 000000\n712650 99999 19990621 68.0 23 55.6 23 1025.0 23 1015.8 4 8.9 23 4.5 23 7.0 999.9 75.2* 60.8* 0.00I 999.9 000000\n712650 99999 19990622 69.3 23 54.3 23 1022.8 23 9999.9 0 9.0 23 4.5 23 8.9 999.9 75.9 59.2 0.00I 999.9 000000\n712650 99999 19990623 71.0 23 60.8 23 1018.1 23 1009.6 4 8.9 23 5.8 23 8.0 999.9 82.4* 66.2* 0.00I 999.9 000000\n712650 99999 19990624 72.0 23 63.1 23 1013.3 22 1004.5 4 8.7 23 5.3 23 8.9 999.9 82.0 65.5 99.99 999.9 010000\n712650 99999 19990625 72.3 24 67.7 24 1011.4 22 1002.2 4 6.9 24 6.6 24 15.9 999.9 78.3 66.9 99.99 999.9 010000\n712650 99999 19990626 74.1 21 61.7 21 1013.6 21 9999.9 0 9.0 21 5.1 21 9.9 999.9 83.1* 66.2* 99.99 999.9 010000\n712650 99999 19990627 73.3 24 65.5 24 1011.3 22 1002.7 4 8.3 24 6.4 24 13.0 999.9 84.2 66.4 99.99 999.9 010000\n712650 99999 19990628 74.8 23 70.1 23 1005.2 22 996.7 4 5.4 23 5.7 23 9.9 17.1 80.6* 71.6* 0.06G 999.9 000000\n712650 99999 19990629 72.4 23 63.5 23 1001.1 23 991.5 4 6.8 23 12.2 23 23.9 29.9 82.8 68.7 0.00G 999.9 010000\n712650 99999 19990630 61.3 23 50.0 23 1014.4 23 1004.5 4 8.9 23 7.1 23 16.9 32.1 68.0* 51.8* 0.04G 999.9 000000\n712650 99999 19990701 65.6 23 60.5 23 1012.3 23 1004.6 4 8.1 23 8.6 23 16.9 999.9 73.4* 60.8* 0.00G 999.9 010000\n712650 99999 19990702 71.5 22 62.0 22 1010.5 22 1000.5 4 7.5 22 11.1 22 19.0 21.0 82.4* 62.6* 0.08G 999.9 010000\n712650 99999 19990703 71.8 22 64.0 22 1017.3 22 1008.4 4 8.4 22 5.0 22 14.0 32.1 84.9 62.6 0.00G 999.9 010000\n712650 99999 19990704 75.5 22 71.1 22 1014.8 20 9999.9 0 5.9 22 6.2 22 13.0 21.0 93.2* 69.8* 99.99 999.9 010000\n712650 99999 19990705 80.2 24 71.9 24 1013.3 24 1004.6 4 8.7 24 10.0 24 15.9 20.0 93.4 67.8 0.00I 999.9 000000\n712650 99999 19990706 77.9 22 68.1 22 1009.4 22 1000.4 4 6.7 22 12.8 22 21.0 33.0 92.7 66.6 0.00G 999.9 000000\n712650 99999 19990707 71.8 23 53.8 23 1012.5 23 1003.4 4 9.0 21 12.5 23 21.0 28.9 89.8 53.4 0.00I 999.9 000000\n712650 99999 19990708 67.4 19 51.2 19 1014.2 19 9999.9 0 9.0 19 10.8 19 16.9 21.0 82.8 53.4 0.00I 999.9 000000\n712650 99999 19990709 63.4 24 55.3 24 1010.7 23 1002.7 4 9.0 24 8.8 24 20.0 25.1 75.9 55.8 99.99 999.9 010000\n712650 99999 19990710 67.3 23 53.7 23 1009.5 23 998.7 4 8.9 23 12.6 23 15.9 26.0 77.2 55.8 0.00G 999.9 000000\n712650 99999 19990711 63.2 22 49.5 22 1021.3 22 9999.9 0 9.0 22 7.7 22 14.0 20.0 74.3 55.2 0.00I 999.9 000000\n712650 99999 19990712 62.8 23 54.6 23 1023.8 23 1014.5 4 8.9 23 5.0 23 11.1 999.9 73.9 54.0 0.00I 999.9 000000\n712650 99999 19990713 67.1 22 55.8 22 1021.2 20 9999.9 0 9.0 22 6.3 22 9.9 999.9 75.2* 59.0* 0.00I 999.9 000000\n712650 99999 19990714 69.0 23 61.4 23 1019.4 21 9999.9 0 8.6 23 6.1 23 8.9 999.9 75.2* 64.4* 0.00I 999.9 000000\n712650 99999 19990715 69.9 20 60.6 20 1017.3 20 9999.9 0 6.6 20 5.7 20 15.0 999.9 78.8* 64.4* 0.00I 999.9 000000\n712650 99999 19990716 72.2 23 64.0 23 1017.9 23 9999.9 0 2.5 15 6.4 23 15.9 999.9 80.6* 64.4* 0.00I 999.9 000000\n712650 99999 19990717 72.6 22 65.4 22 1017.4 22 9999.9 0 4.0 21 6.9 22 22.9 999.9 84.2* 66.2* 99.99 999.9 010000\n712650 99999 19990718 73.4 23 67.0 23 1018.4 23 9999.9 0 5.3 23 4.8 23 15.0 40.0 87.8* 66.2* 0.02G 999.9 000000\n712650 99999 19990719 71.8 22 62.4 22 1016.7 21 9999.9 0 8.7 22 4.7 22 8.0 999.9 77.0* 68.0* 0.00G 999.9 010000\n712650 99999 19990720 70.1 22 60.1 22 1019.2 22 9999.9 0 8.6 22 7.3 22 12.0 15.9 73.4* 64.4* 0.00I 999.9 000000\n712650 99999 19990721 70.7 23 61.1 23 1019.3 23 9999.9 0 9.0 23 6.7 23 12.0 999.9 75.2* 66.2* 0.00I 999.9 000000\n712650 99999 19990722 73.3 22 60.6 22 1015.1 22 9999.9 0 9.0 22 3.4 22 8.9 999.9 82.4* 66.2* 0.00I 999.9 000000\n712650 99999 19990723 77.0 22 66.5 22 1013.8 22 9999.9 0 8.7 22 7.0 22 13.0 999.9 84.2* 69.8* 0.00I 999.9 000000\n712650 99999 19990724 77.9 24 68.9 24 1008.2 24 9999.9 0 8.3 24 9.6 24 14.0 999.9 87.8* 71.6* 0.00I 999.9 000000\n712650 99999 19990725 77.4 24 62.3 24 1007.7 23 9999.9 0 9.0 24 9.8 24 23.9 33.0 89.2 67.1 99.99 999.9 010000\n712650 99999 19990726 77.2 24 64.2 24 1009.8 24 9999.9 0 9.0 24 9.6 24 16.9 22.0 86.0* 69.8* 0.00I 999.9 000000\n712650 99999 19990727 80.6 24 60.3 24 1011.2 24 9999.9 0 9.0 24 9.6 24 21.0 28.0 91.4* 71.6* 0.00G 999.9 000000\n712650 99999 19990728 73.2 21 54.5 21 1011.6 21 9999.9 0 9.0 21 5.8 21 14.0 20.0 82.4* 62.6* 0.00I 999.9 000000\n712650 99999 19990729 75.7 21 66.0 21 1005.5 19 9999.9 0 9.0 21 6.7 21 11.1 999.9 87.6 68.9 0.00I 999.9 000000\n712650 99999 19990730 78.0 24 68.5 24 1003.5 24 994.5 4 9.0 24 5.3 24 12.0 999.9 88.5 68.9 0.00I 999.9 000000\n712650 99999 19990731 75.1 21 70.2 21 1005.6 18 9999.9 0 7.2 21 5.4 21 16.9 26.0 87.8 69.6 99.99 999.9 010000\n712650 99999 19990801 75.4 24 61.8 24 1009.3 24 9999.9 0 8.6 24 7.9 24 14.0 15.9 82.4* 68.0* 0.00I 999.9 000000\n712650 99999 19990802 69.9 24 56.6 24 1017.6 24 9999.9 0 9.0 24 7.1 24 13.0 16.9 78.8* 60.8* 0.00I 999.9 000000\n712650 99999 19990803 69.7 23 54.5 23 1019.9 23 9999.9 0 9.0 23 8.1 23 15.0 19.0 79.2 60.8 0.00I 999.9 000000\n712650 99999 19990804 66.9 23 63.0 23 1014.2 21 9999.9 0 7.7 23 5.4 23 8.9 21.0 79.2 60.8 99.99 999.9 010000\n712650 99999 19990805 67.5 24 59.5 24 1010.2 24 9999.9 0 8.3 24 4.4 24 11.1 999.9 77.0* 60.8* 0.00I 999.9 000000\n712650 99999 19990806 66.8 20 54.0 20 1011.6 20 9999.9 0 8.8 20 9.2 20 18.1 23.9 78.8* 59.0* 99.99 999.9 010000\n712650 99999 19990807 64.4 23 55.7 23 1015.9 23 9999.9 0 9.0 23 5.5 23 11.1 15.9 71.6* 57.2* 0.00I 999.9 000000\n712650 99999 19990808 68.9 23 62.3 23 1007.8 23 9999.9 0 6.6 23 9.5 23 22.0 29.9 78.8* 64.4* 99.99 999.9 010000\n712650 99999 19990809 62.9 22 50.7 22 1013.9 22 9999.9 0 9.0 22 6.6 22 15.0 21.0 69.8* 55.4* 0.00I 999.9 000000\n712650 99999 19990810 62.6 23 54.8 23 1010.4 23 9999.9 0 8.7 23 6.9 23 18.1 999.9 74.1 60.8 99.99 999.9 010000\n712650 99999 19990811 66.0 23 63.5 23 1008.8 23 9999.9 0 4.0 23 3.8 23 9.9 999.9 71.6* 62.6* 0.00I 999.9 000000\n712650 99999 19990812 68.8 21 62.3 21 1013.7 21 9999.9 0 7.6 21 4.3 21 9.9 999.9 78.8* 62.6* 0.00I 999.9 000000\n712650 99999 19990813 70.8 21 67.4 21 1007.6 17 9999.9 0 4.9 21 6.0 21 11.1 999.9 75.2* 68.0* 99.99 999.9 010000\n712650 99999 19990814 66.3 21 60.6 21 1008.7 21 998.7 4 8.5 21 9.2 21 16.9 22.0 75.9 61.7 99.99 999.9 010000\n712650 99999 19990815 66.1 22 55.1 22 1019.5 22 1009.6 4 8.9 22 5.0 22 12.0 999.9 71.8* 60.8* 0.00I 999.9 000000\n712650 99999 19990816 67.3 23 58.3 23 1022.2 22 1013.0 4 8.9 23 3.9 23 9.9 999.9 75.2* 62.6* 0.00I 999.9 000000\n712650 99999 19990817 73.3 22 63.9 22 1013.4 21 1004.8 4 7.6 22 9.0 22 15.0 21.0 84.7* 66.2* 0.00I 999.9 000000\n712650 99999 19990818 68.4 23 59.5 23 1014.0 23 1004.2 4 8.9 23 7.3 23 12.0 15.0 86.0 61.7 0.00I 999.9 000000\n712650 99999 19990819 67.5 23 57.1 23 1016.5 23 1007.1 4 8.9 23 6.9 23 11.1 999.9 74.8 61.7 0.00I 999.9 000000\n712650 99999 19990820 67.1 24 59.6 24 1016.8 24 1007.8 4 8.8 24 6.3 24 14.0 18.1 71.6 63.0 99.99 999.9 010000\n712650 99999 19990821 67.8 23 62.3 23 1017.6 23 1008.2 4 8.9 23 9.0 23 14.0 18.1 72.1 63.0 0.24G 999.9 010000\n712650 99999 19990822 68.3 23 60.0 23 1017.8 23 9999.9 0 8.9 23 4.9 23 8.9 999.9 74.1 61.2 0.00G 999.9 000000\n712650 99999 19990823 70.8 23 61.3 23 1016.9 23 1007.9 4 8.6 23 3.8 23 7.0 999.9 79.0 61.2 0.00I 999.9 000000\n712650 99999 19990824 72.5 23 66.3 23 1014.8 22 1005.9 4 8.4 23 10.3 23 15.0 999.9 79.9 62.6 99.99 999.9 010000\n712650 99999 19990825 72.1 23 67.7 23 1014.4 22 1005.3 4 5.7 23 5.2 23 13.0 15.9 76.1 68.9 99.99 999.9 010000\n712650 99999 19990826 72.4 23 67.7 23 1012.7 23 1003.8 4 8.6 23 9.3 23 15.0 18.1 75.7 68.9 99.99 999.9 010000\n712650 99999 19990827 72.0 24 67.0 24 1011.6 24 1002.4 4 8.7 24 7.2 24 12.0 999.9 75.2* 68.0* 99.99 999.9 010000\n712650 99999 19990828 76.7 24 66.3 24 1010.7 24 1001.9 4 6.7 24 8.5 24 16.9 25.1 89.6* 69.8* 99.99 999.9 010000\n712650 99999 19990829 67.7 17 47.7 17 1015.5 15 1005.2 4 8.9 17 12.3 17 18.1 27.0 80.6* 62.6* 0.00I 999.9 000000\n712650 99999 19990830 60.4 20 48.2 20 1026.3 15 9999.9 0 9.0 20 9.7 20 15.9 21.0 64.4* 53.6* 0.00I 999.9 000000\n712650 99999 19990831 64.1 23 55.9 23 1026.3 23 1017.1 4 8.9 23 4.6 23 8.0 999.9 70.2 57.6 0.00I 999.9 000000\n712650 99999 19990901 66.9 24 58.9 24 1023.0 22 1014.1 4 8.9 23 3.2 24 6.0 999.9 73.4 56.5 99.99 999.9 010000\n712650 99999 19990902 69.7 24 59.6 24 1020.3 24 1011.4 4 8.5 24 2.9 24 8.0 999.9 82.4* 60.8* 0.00I 999.9 000000\n712650 99999 19990903 72.2 23 60.2 23 1018.8 23 9999.9 0 8.5 23 3.2 23 8.0 999.9 82.4* 62.6* 0.00I 999.9 000000\n712650 99999 19990904 73.4 23 62.7 23 1018.6 23 1009.5 4 7.9 23 3.4 23 9.9 999.9 81.9 62.1 0.00I 999.9 000000\n712650 99999 19990905 74.8 23 67.1 23 1018.0 22 1009.2 4 8.0 23 8.7 23 14.0 999.9 80.2 66.4 99.99 999.9 010000\n712650 99999 19990906 75.4 23 70.0 23 1012.5 21 9999.9 0 9.0 23 7.2 23 11.1 999.9 80.6* 71.6* 99.99 999.9 010000\n712650 99999 19990907 72.0 24 68.1 24 1009.4 21 1000.4 4 7.4 24 7.1 24 14.0 15.9 77.0* 68.0* 99.99 999.9 010000\n712650 99999 19990908 72.8 24 66.7 24 1009.0 23 1000.2 4 5.8 24 6.0 24 13.0 21.0 78.8* 68.0* 0.10G 999.9 010000\n712650 99999 19990909 72.0 23 67.7 23 1007.3 21 9999.9 0 6.2 23 9.2 23 15.9 999.9 78.6 68.0 0.00G 999.9 010000\n712650 99999 19990910 66.2 22 54.2 22 1008.6 20 9999.9 0 9.0 22 8.5 22 15.0 19.0 73.4* 60.8* 0.00I 999.9 000000\n712650 99999 19990911 66.6 24 51.5 24 1012.2 24 1002.3 4 9.0 24 14.2 24 19.0 22.0 72.0 59.7 0.00G 999.9 010000\n712650 99999 19990912 63.4 21 55.6 21 1019.0 21 1010.0 4 8.9 21 6.4 21 14.0 26.0 73.8 53.8 0.00G 999.9 000000\n712650 99999 19990913 68.8 21 65.2 21 1015.0 15 9999.9 0 7.8 21 11.2 21 20.0 999.9 72.0 66.9 99.99 999.9 010000\n712650 99999 19990914 66.1 23 59.1 23 1016.0 23 9999.9 0 7.0 23 9.0 23 15.9 999.9 72.0 57.9 0.00H 999.9 010000\n712650 99999 19990915 61.6 24 52.0 24 1019.2 24 1009.7 4 9.0 24 5.4 24 13.0 35.0 71.8 53.4 0.00G 999.9 000000\n712650 99999 19990916 63.4 24 51.7 24 1017.0 24 1008.2 4 9.0 24 10.9 24 19.0 21.0 71.1 53.4 0.00G 999.9 000000\n712650 99999 19990917 61.2 22 46.6 22 1018.0 22 9999.9 0 9.0 22 11.3 22 15.9 24.1 69.8* 53.6* 0.00G 999.9 000000\n712650 99999 19990918 60.8 24 50.2 24 1020.3 24 1011.3 4 9.0 24 5.8 24 9.9 15.0 70.5 51.4 0.00I 999.9 000000\n712650 99999 19990919 62.9 22 57.8 22 1017.4 22 1008.3 4 8.6 22 5.0 22 11.1 999.9 69.8* 55.4* 0.00I 999.9 000000\n712650 99999 19990920 64.0 23 58.7 23 1010.8 22 9999.9 0 8.2 23 7.9 23 14.0 21.0 70.2 55.0 99.99 999.9 010000\n712650 99999 19990921 52.0 24 39.8 24 1017.7 24 1007.9 4 9.0 24 9.6 24 15.0 23.9 68.2 45.5 0.00I 999.9 000000\n712650 99999 19990922 51.5 23 39.4 23 1015.8 23 1007.4 4 8.9 23 7.3 23 11.1 999.9 64.4* 41.0* 0.00I 999.9 000000\n712650 99999 19990923 60.6 24 49.7 24 1008.0 23 999.3 4 9.0 24 10.5 24 21.0 23.9 69.8* 53.6* 99.99 999.9 010000\n712650 99999 19990924 61.8 24 53.3 24 1007.1 24 997.5 4 8.9 24 7.7 24 15.9 27.0 69.6 53.4 0.00G 999.9 010000\n712650 99999 19990925 56.6 24 46.5 24 1016.2 24 1006.1 4 9.0 24 6.2 24 8.9 19.0 67.6 51.3 0.08G 999.9 000000\n712650 99999 19990926 59.3 24 52.2 24 1020.4 24 1011.2 4 9.0 24 9.7 24 20.0 21.0 64.4* 53.6* 0.00G 999.9 000000\n712650 99999 19990927 63.9 20 60.6 20 1020.2 20 1010.8 4 6.1 20 7.8 20 14.0 24.1 70.2 55.6 0.00G 999.9 000000\n712650 99999 19990928 66.7 4 64.6 4 1020.2 4 1011.0 4 999.9 0 6.8 4 11.1 999.9 70.9 60.3 0.00D 999.9 000000\n712650 99999 19990930 57.2 4 51.0 4 1004.7 4 995.6 4 7.3 4 13.7 4 20.0 36.9 69.6 51.3 1.87G 999.9 000000\n712650 99999 19991001 58.5 8 42.9 8 1012.8 8 1001.3 4 8.8 8 12.9 8 18.1 32.1 62.6 51.3 0.06G 3.1 000000\n712650 99999 19991002 52.7 4 45.8 4 1017.7 4 1008.4 4 8.7 4 4.0 4 7.0 24.1 63.3 46.0 0.00G 3.5 000000\n712650 99999 19991004 46.6 9 36.3 9 1024.1 9 1015.1 4 8.7 9 8.4 9 12.0 22.0 54.5 38.7 0.28G 2.8 000000\n712650 99999 19991005 47.0 24 38.9 24 1022.1 24 9999.9 0 9.0 24 5.6 24 11.1 25.1 55.4* 40.6* 0.10G 0.4 000000\n712650 99999 19991006 49.7 23 37.8 23 1018.7 21 1008.2 4 8.9 23 14.0 23 18.1 25.1 55.8 38.3 0.00G 0.4 010000\n712650 99999 19991007 39.0 22 24.8 22 1030.5 22 1020.4 4 8.9 22 7.3 22 9.9 999.9 53.2 34.3 0.00D 1.2 000000\n712650 99999 19991008 51.5 21 45.0 21 1022.3 19 1014.6 4 8.7 21 6.6 21 15.0 999.9 59.0* 42.4* 0.02D 2.0 010000\n712650 99999 19991009 55.9 19 54.4 19 1018.5 13 9999.9 0 3.5 19 9.2 19 14.0 999.9 61.3 45.0 0.22C 2.0 010000\n712650 99999 19991010 56.6 22 56.0 22 1016.1 15 1007.4 4 2.7 22 2.4 22 6.0 999.9 62.6* 51.8* 0.00D 2.0 000000\n712650 99999 19991011 57.4 21 45.9 21 1019.3 21 1009.2 4 8.3 21 9.5 21 15.9 23.9 64.4* 49.5* 0.02D 999.9 010000\n712650 99999 19991012 49.0 20 40.5 20 1025.9 18 1017.2 4 8.6 20 5.3 20 11.1 999.9 55.6* 42.8* 0.00D 999.9 000000\n712650 99999 19991013 55.6 22 52.8 22 1009.6 22 9999.9 0 7.0 22 6.8 22 15.0 999.9 62.1 41.9 0.00H 0.4 010000\n712650 99999 19991014 44.2 22 35.8 22 1015.6 20 1004.4 4 8.3 22 13.0 22 22.9 35.0 52.0* 37.4* 1.59D 999.9 010000\n712650 99999 19991015 49.7 24 40.7 24 1020.9 24 1011.8 4 9.0 24 6.1 24 12.0 15.0 57.2* 44.6* 0.02D 999.9 000000\n712650 99999 19991016 55.7 23 51.6 23 1015.1 23 1006.6 4 8.8 23 6.6 23 15.0 999.9 62.6 42.6 0.00D 2.0 000000\n712650 99999 19991017 55.5 24 51.0 24 1014.3 22 1005.0 4 8.3 24 7.4 24 15.9 21.0 64.2 49.1 0.00D 2.4 010000\n712650 99999 19991018 47.2 24 39.5 24 1017.8 23 9999.9 0 9.0 24 7.2 24 12.0 15.9 51.8* 42.8* 0.00C 999.9 000000\n712650 99999 19991019 46.1 23 37.5 23 1024.8 23 1015.5 4 8.9 23 8.6 23 14.0 20.0 51.3 39.0 0.00G 2.0 000000\n712650 99999 19991020 50.3 23 41.1 23 1020.2 22 1011.2 4 8.9 23 9.7 23 21.0 28.9 55.9 40.8 0.00G 999.9 000000\n712650 99999 19991021 47.7 24 37.1 24 1018.0 24 1009.5 4 9.0 24 9.0 24 18.1 999.9 56.1 39.4 0.00D 2.0 000000\n712650 99999 19991022 52.5 24 44.9 24 999.1 22 990.9 4 9.0 24 15.0 24 22.9 28.0 55.8 39.4 0.00G 2.0 010000\n712650 99999 19991023 45.6 24 39.0 24 999.4 24 989.7 4 9.0 24 11.8 24 18.1 29.9 55.8 42.1 0.08G 2.0 000000\n712650 99999 19991024 44.0 24 34.6 24 1012.1 24 1001.3 4 9.0 24 12.1 24 18.1 27.0 49.6 39.4 0.00G 999.9 000000\n712650 99999 19991025 45.4 23 36.9 23 1017.6 23 1008.3 4 8.9 23 13.0 23 18.1 29.9 53.8 39.4 0.00G 2.4 000000\n712650 99999 19991026 51.7 24 39.2 24 1011.6 23 1002.1 4 9.0 24 13.4 24 20.0 28.0 59.2 40.8 0.02G 3.1 000000\n712650 99999 19991027 44.4 23 34.1 23 1023.9 23 1013.3 4 8.9 23 8.3 23 14.0 16.9 49.5* 41.0* 0.00G 2.0 000000\n712650 99999 19991028 46.6 23 41.1 23 1024.4 23 1016.1 4 8.9 23 6.1 23 11.1 999.9 55.6 40.3 0.00D 0.4 000000\n712650 99999 19991029 48.7 23 44.7 23 1024.5 23 1014.2 4 7.4 23 7.0 23 13.0 999.9 57.2 40.3 0.00D 999.9 000000\n712650 99999 19991030 51.2 21 47.9 21 1024.6 21 1015.5 4 7.7 21 7.3 21 12.0 999.9 57.6* 48.7* 0.00D 999.9 000000\n712650 99999 19991031 55.7 24 48.5 24 1020.6 21 1011.2 4 5.5 24 7.6 24 15.0 25.1 64.4* 48.2* 0.04D 999.9 010000\n712650 99999 19991101 47.1 24 43.6 24 1020.6 24 1012.0 4 7.8 24 4.5 24 12.0 999.9 65.8 39.6 0.00D 999.9 000000\n712650 99999 19991102 49.3 24 47.8 24 1005.6 22 998.0 4 4.3 24 11.0 24 25.1 35.9 55.8 39.6 0.31D 999.9 010000\n712650 99999 19991103 37.1 24 34.0 24 994.6 20 985.3 4 7.5 24 22.1 24 30.9 42.9 40.1* 35.1* 1.46G 999.9 010000\n712650 99999 19991104 40.7 24 31.3 24 1015.3 22 1004.0 4 9.0 24 18.0 24 26.0 33.0 46.4* 36.9* 0.06G 999.9 010000\n712650 99999 19991105 47.0 23 41.7 23 1018.8 23 1010.1 4 8.9 23 12.2 23 25.1 30.9 55.4* 40.5* 0.00G 999.9 000000\n712650 99999 19991106 46.3 22 34.5 22 1021.9 22 9999.9 0 9.0 22 11.4 22 16.9 31.1 56.3 39.7 0.00G 999.9 000000\n712650 99999 19991107 39.3 23 27.0 23 1027.0 23 1017.1 4 8.9 23 7.7 23 15.0 19.0 49.6 33.4 0.00D 999.9 000000\n712650 99999 19991108 40.3 22 30.1 22 1024.2 22 1015.6 4 8.9 22 5.3 22 11.1 999.9 46.4* 35.6* 0.00D 999.9 000000\n712650 99999 19991109 47.0 21 43.1 21 1013.6 21 9999.9 0 7.6 21 6.8 21 16.9 22.0 55.4* 41.0* 0.00C 999.9 000000\n712650 99999 19991110 50.6 24 47.7 24 1009.4 23 999.7 4 6.0 24 7.7 24 15.9 19.0 56.3 39.9 0.02D 999.9 010000\n712650 99999 19991111 35.2 23 23.5 23 1027.1 23 1015.7 4 8.9 23 12.0 23 18.1 25.1 57.6 30.0 0.14G 999.9 000000\n712650 99999 19991112 39.0 22 31.0 22 1024.7 22 1017.2 4 8.7 22 6.2 22 13.0 999.9 44.6* 34.5* 0.00G 999.9 000000\n712650 99999 19991113 44.4 22 41.5 22 1017.8 22 1009.0 4 7.5 22 7.3 22 11.1 999.9 48.2* 42.8* 0.02D 999.9 000000\n712650 99999 19991114 47.2 24 38.3 24 1005.8 23 996.5 4 5.9 24 13.9 24 22.9 35.0 55.4* 41.0* 0.00D 999.9 000000\n712650 99999 19991115 38.7 23 26.2 23 1013.1 21 9999.9 0 8.8 23 12.6 23 19.0 38.1 42.8* 35.6* 0.00G 999.9 000000\n712650 99999 19991116 30.4 22 18.3 22 1011.8 22 1001.9 4 8.9 22 13.1 22 22.9 29.9 39.6 26.1 0.00D 999.9 000000\n712650 99999 19991117 31.0 22 20.2 22 1019.4 22 1009.5 4 8.9 22 8.7 22 15.0 15.9 39.2* 25.2* 0.00D 999.9 000000\n712650 99999 19991118 39.5 22 29.2 22 1020.8 20 9999.9 0 9.0 22 8.5 22 12.0 999.9 44.6* 35.6* 0.00H 999.9 010000\n712650 99999 19991119 46.4 21 38.2 21 1018.7 21 1010.2 4 8.2 21 6.0 21 14.0 999.9 51.8* 41.0* 0.00D 999.9 000000\n712650 99999 19991120 46.6 24 42.6 24 1015.2 22 1005.4 4 6.2 24 7.0 24 13.0 999.9 53.2 40.8 0.18D 999.9 010000\n712650 99999 19991121 44.1 21 43.3 21 1020.5 19 9999.9 0 4.4 21 4.4 21 12.0 999.9 50.9 42.3 0.00B 999.9 000000\n712650 99999 19991122 47.7 24 46.4 24 1019.8 21 1010.3 4 3.8 24 7.1 24 13.0 999.9 51.1 43.0 0.02D 999.9 010000\n712650 99999 19991123 49.4 24 48.3 24 1023.0 23 1013.5 4 2.4 24 5.8 24 16.9 999.9 55.2 43.2 0.02D 999.9 000000\n712650 99999 19991124 48.5 24 44.4 24 1015.8 24 1006.1 4 6.3 24 13.8 24 23.9 29.9 55.2 43.2 0.00D 999.9 010000\n712650 99999 19991125 42.3 24 35.0 24 1024.9 24 1015.1 4 9.0 24 6.7 24 19.0 32.1 52.7 38.1 0.00G 999.9 000000\n712650 99999 19991126 42.2 24 41.3 24 1013.8 19 1005.4 4 6.1 24 12.2 24 21.0 27.0 45.1 38.1 0.00G 999.9 010000\n712650 99999 19991127 43.6 23 39.5 23 1010.4 22 1000.2 4 7.8 23 11.1 23 16.9 21.0 48.2* 41.0* 0.26D 999.9 010000\n712650 99999 19991128 41.1 22 30.1 22 1020.2 22 1009.3 4 8.9 22 14.4 22 21.0 28.9 48.7 37.2 0.00D 999.9 000000\n712650 99999 19991129 35.4 23 26.6 23 1026.5 21 1016.4 4 8.9 23 11.2 23 16.9 23.9 38.7* 32.0* 0.00D 999.9 001000\n712650 99999 19991130 27.9 23 15.6 23 1037.0 23 1026.5 4 8.9 23 7.5 23 12.0 27.0 32.0* 23.0* 0.00G 999.9 000000\n712650 99999 19991201 28.0 23 16.0 23 1036.0 23 1026.9 4 8.9 23 5.2 23 8.0 999.9 33.8* 23.0* 0.00D 999.9 001000\n712650 99999 19991202 37.9 21 30.9 21 1025.1 21 1016.1 4 8.9 21 10.2 21 16.9 999.9 41.2 22.8 0.00D 999.9 000000\n712650 99999 19991203 43.8 23 39.5 23 1017.6 19 1008.4 4 8.9 23 4.6 23 8.9 999.9 48.9 40.5 0.02D 999.9 010000\n712650 99999 19991204 46.2 24 44.7 24 1016.4 21 9999.9 0 6.3 24 7.1 24 15.9 999.9 55.4* 42.8* 0.02B 999.9 010000\n712650 99999 19991205 47.0 24 46.7 24 1013.0 21 1005.5 4 3.4 24 5.4 24 13.0 999.9 49.6 44.4 0.08C 999.9 010000\n712650 99999 19991206 42.9 24 40.1 24 1010.4 17 9999.9 0 7.7 24 8.1 24 12.0 18.1 48.2* 37.4* 0.36C 999.9 010000\n712650 99999 19991207 36.3 24 27.5 24 1022.9 24 1012.6 4 8.9 23 9.2 24 15.9 18.1 37.4* 33.8* 0.00D 999.9 000000\n712650 99999 19991208 37.4 23 32.7 23 1026.1 23 1016.5 4 7.9 22 5.1 23 14.0 999.9 43.2 30.7 0.00D 999.9 000000\n712650 99999 19991209 37.9 24 36.9 24 1025.2 20 1016.4 4 2.9 24 3.0 24 8.9 999.9 43.5 31.8 0.00D 999.9 000000\n712650 99999 19991210 42.7 23 38.7 23 1010.8 21 1002.3 4 5.9 23 9.0 23 22.9 35.9 45.5 31.8 0.14D 999.9 010000\n712650 99999 19991211 34.2 21 22.8 21 1020.9 21 1010.4 4 8.9 21 15.5 21 23.9 35.9 47.1 31.6 0.00D 999.9 000000\n712650 99999 19991212 31.6 22 25.5 22 1022.2 20 1012.7 4 8.9 22 5.8 22 15.0 999.9 37.4* 27.3* 0.00G 999.9 000000\n712650 99999 19991213 36.5 24 32.2 24 1018.2 23 9999.9 0 7.5 24 6.2 24 15.0 15.9 41.0* 33.8* 0.02C 999.9 010000\n712650 99999 19991214 35.9 22 30.6 22 1018.9 22 1010.1 4 8.5 22 22.7 22 35.0 40.0 41.2 34.0 0.02D 999.9 010000\n712650 99999 19991215 40.8 23 38.2 23 1010.8 18 1002.5 4 7.1 23 9.7 23 26.0 32.1 46.4 34.0 0.18D 999.9 010000\n712650 99999 19991216 39.8 24 32.1 24 1006.2 22 9999.9 0 8.9 24 20.9 24 28.9 36.9 46.6 37.2 0.06D 999.9 011000\n712650 99999 19991217 31.6 23 16.3 23 1016.6 23 1006.0 4 8.9 23 16.9 23 22.9 35.9 41.4 26.6 0.00G 999.9 000000\n712650 99999 19991218 27.5 22 17.5 22 1026.6 22 9999.9 0 9.0 22 5.3 22 12.0 999.9 33.6 23.5 0.00C 999.9 000000\n712650 99999 19991219 31.0 23 21.3 23 1028.5 23 1019.0 4 8.9 23 10.4 23 22.0 27.0 35.6* 25.0* 0.00D 999.9 000000\n712650 99999 19991220 40.4 23 35.3 23 1016.4 22 1008.5 4 8.6 23 10.9 23 23.9 30.9 44.6* 36.7* 0.02D 999.9 010000\n712650 99999 19991221 28.2 18 14.5 18 1019.3 17 9999.9 0 9.0 18 19.2 18 29.9 36.9 45.1 24.3 0.02C 999.9 000000\n712650 99999 19991222 21.5 23 10.1 23 1024.4 23 9999.9 0 9.0 23 14.9 23 20.0 31.1 28.2 18.7 0.00G 999.9 000000\n712650 99999 19991223 22.1 22 10.3 22 1022.4 21 9999.9 0 8.6 22 15.8 22 22.0 26.0 25.0 18.7 0.00G 999.9 001000\n712650 99999 19991224 18.5 24 7.7 24 1023.5 24 1013.4 4 9.0 24 8.4 24 15.9 27.0 24.8* 14.0* 0.00G 999.9 000000\n712650 99999 19991225 20.8 24 11.4 24 1025.5 24 1016.7 4 9.0 24 11.2 24 30.9 36.9 30.2* 15.8* 0.00G 999.9 001000\n712650 99999 19991226 31.1 23 19.8 23 1002.6 20 9999.9 0 8.6 23 26.2 23 36.9 43.9 33.8* 26.6* 0.00G 999.9 001000\n712650 99999 19991227 21.9 22 12.4 22 1008.7 22 998.3 4 8.3 22 9.6 22 16.9 999.9 26.6* 17.6* 0.00D 999.9 001000\n712650 99999 19991228 18.9 24 11.0 24 1005.5 24 997.5 4 8.3 24 13.9 24 26.0 30.9 27.0* 10.4* 0.00G 999.9 001000\n712650 99999 19991229 23.1 24 14.9 24 1004.7 23 994.4 4 8.1 24 13.1 24 29.9 35.9 32.0* 14.0* 0.00G 999.9 001000\n712650 99999 19991230 34.5 24 24.7 24 1003.9 24 992.8 4 8.7 24 17.9 24 35.0 45.1 42.3 12.6 0.00G 999.9 001000\n712650 99999 19991231 22.9 24 13.1 24 1018.2 24 1008.2 4 9.0 24 6.2 24 11.1 29.9 42.3 16.3 0.02G 999.9 000000\n'''\n\n\[email protected]\[email protected]\ndef test_get_station_year_text():\n downloaded_data = get_station_year_text(712650, 99999, 1999)\n try:\n downloaded_data = downloaded_data.decode('utf-8')\n except:\n pass\n assert downloaded_data == sample_data_random_station_1999\n \n with pytest.raises(Exception):\n get_station_year_text(712650, 99999, 19999999999)\n\n\[email protected]\[email protected]\ndef test_geocode():\n latlon = geocode('Fredericton, NB')\n assert_allclose(latlon, (45.966425, -66.645813), rtol=1e-4)", "# -*- coding: utf-8 -*-\n'''Chemical Engineering Design Library (ChEDL). Utilities for process modeling.\nCopyright (C) 2016, 2017 Caleb Bell <[email protected]>\n\nPermission is hereby granted, free of charge, to any person obtaining a copy\nof this software and associated documentation files (the \"Software\"), to deal\nin the Software without restriction, including without limitation the rights\nto use, copy, modify, merge, publish, distribute, sublicense, and/or sell\ncopies of the Software, and to permit persons to whom the Software is\nfurnished to do so, subject to the following conditions:\n\nThe above copyright notice and this permission notice shall be included in all\ncopies or substantial portions of the Software.\n\nTHE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\nIMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\nFITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\nAUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\nLIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\nOUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\nSOFTWARE.'''\n\nfrom fluids import *\nfrom numpy.testing import assert_allclose\nimport numpy as np\nimport pytest\nfrom scipy.interpolate import splrep\n\ndef test_control_valve():\n from fluids.control_valve import cavitation_index, FF_critical_pressure_ratio_l, is_choked_turbulent_l, is_choked_turbulent_g, Reynolds_valve, loss_coefficient_piping, Reynolds_factor\n CI = cavitation_index(1E6, 8E5, 2E5)\n assert_allclose(CI, 4.0)\n\n FF = FF_critical_pressure_ratio_l(70100.0, 22120000.0)\n assert_allclose(FF, 0.9442375225233299)\n\n F = is_choked_turbulent_l(460.0, 680.0, 70.1, 0.9442375225233299, 0.9)\n T = is_choked_turbulent_l(460.0, 680.0, 70.1, 0.9442375225233299, 0.6)\n assert_allclose([False, True], [F, T])\n\n with pytest.raises(Exception):\n is_choked_turbulent_l(460.0, 680.0, 70.1, 0.9442375225233299)\n\n # Example 4, compressible flow - small flow trim sized for gas flow:\n assert False == is_choked_turbulent_g(0.536, 1.193, 0.8)\n # Custom example\n assert True == is_choked_turbulent_g(0.9, 1.193, 0.7)\n\n with pytest.raises(Exception):\n is_choked_turbulent_g(0.544, 0.929)\n\n Rev = Reynolds_valve(3.26e-07, 360, 100.0, 0.6, 0.98, 238.05817216710483)\n assert_allclose(Rev, 6596953.826574914)\n\n Rev = Reynolds_valve(3.26e-07, 360, 150.0, 0.9, 0.46, 164.9954763704956)\n assert_allclose(Rev, 2967024.346783506)\n\n K = loss_coefficient_piping(0.05, 0.08, 0.1)\n assert_allclose(K, 0.6580810546875)\n\n ### Reynolds factor (laminar)\n # In Example 4, compressible flow with small flow trim sized for gas flow\n # (Cv in the problem was converted to Kv here to make FR match with N32, N2):\n f = Reynolds_factor(FL=0.98, C=0.015483, d=15., Rev=1202., full_trim=False)\n assert_allclose(f, 0.7148753122302025)\n\n # Custom, same as above but with full trim:\n f = Reynolds_factor(FL=0.98, C=0.015483, d=15., Rev=1202., full_trim=True)\n assert_allclose(f, 0.9875328782172637)\n\n # Example 4 with Rev < 10:\n f = Reynolds_factor(FL=0.98, C=0.015483, d=15., Rev=8., full_trim=False)\n assert_allclose(f, 0.08339546213461975)\n\n # Same, with full_trim\n f = Reynolds_factor(FL=0.98, C=0.015483, d=15., Rev=8., full_trim=True)\n assert_allclose(f, 43.619397389803986)\n\ndef test_control_valve_size_l():\n ### Control valve liquid\n # From [1]_, matching example 1 for a globe, parabolic plug,\n # flow-to-open valve.\n\n Kv = size_control_valve_l(rho=965.4, Psat=70.1E3, Pc=22120E3, mu=3.1472E-4, P1=680E3, P2=220E3, Q=0.1, D1=0.15, D2=0.15, d=0.15, FL=0.9, Fd=0.46)\n assert_allclose(Kv, 164.9954763704956)\n\n # Same as above - diameters removed\n Kv = size_control_valve_l(rho=965.4, Psat=70.1E3, Pc=22120E3, mu=3.1472E-4, P1=680E3, P2=220E3, Q=0.1)\n assert_allclose(Kv, 164.9954763704956)\n\n # From [1]_, matching example 2 for a ball, segmented ball,\n # flow-to-open valve.\n\n Kv = size_control_valve_l(rho=965.4, Psat=70.1E3, Pc=22120E3, mu=3.1472E-4, P1=680E3, P2=220E3, Q=0.1, D1=0.1, D2=0.1, d=0.1, FL=0.6, Fd=0.98)\n assert_allclose(Kv, 238.05817216710483)\n\n # Modified example 1 with non-choked flow, with reducer and expander\n\n Kv = size_control_valve_l(rho=965.4, Psat=70.1E3, Pc=22120E3, mu=3.1472E-4, P1=680E3, P2=220E3, Q=0.1, D1=0.1, D2=0.09, d=0.08, FL=0.9, Fd=0.46)\n assert_allclose(Kv, 177.44417090966715)\n\n # Modified example 2 with non-choked flow, with reducer and expander\n\n Kv = size_control_valve_l(rho=965.4, Psat=70.1E3, Pc=22120E3, mu=3.1472E-4, P1=680E3, P2=220E3, Q=0.1, D1=0.1, D2=0.1, d=0.95, FL=0.6, Fd=0.98)\n assert_allclose(Kv, 230.1734424266345)\n \n # Same, test intermediate values\n ans = size_control_valve_l(rho=965.4, Psat=70.1E3, Pc=22120E3, mu=3.1472E-4, P1=680E3, P2=220E3, Q=0.1, D1=0.1, D2=0.1, d=0.95, FL=0.6, Fd=0.98, full_output=True)\n del ans['choked']\n del ans['FR']\n ans_expect = {'Kv': 230.1734424266345,\n 'FF': 0.9442375225233299,\n 'FLP': 0.620553360954273,\n 'FP': 0.8112169324177585,\n 'Rev': 6596962.21111206}\n for k in ans_expect.keys():\n assert_allclose(ans[k], ans_expect[k])\n\n # Modified example 2 with laminar flow at 100x viscosity, 100th flow rate, and 1/10th diameters:\n\n Kv = size_control_valve_l(rho=965.4, Psat=70.1E3, Pc=22120E3, mu=3.1472E-2, P1=680E3, P2=220E3, Q=0.001, D1=0.01, D2=0.01, d=0.01, FL=0.6, Fd=0.98)\n assert_allclose(Kv, 3.0947562381723626)\n\n # Last test, laminar full trim\n Kv = size_control_valve_l(rho=965.4, Psat=70.1E3, Pc=22120E3, mu=3.1472E-2, P1=680E3, P2=220E3, Q=0.001, D1=0.01, D2=0.01, d=0.02, FL=0.6, Fd=0.98)\n assert_allclose(Kv, 3.0947562381723626)\n\n # TODO: find a test where the following is tested, or remove it as unnecessary.\n # if C/FR >= Ci:\n # Ci = iterate_piping_laminar(Ci)\n # Efforts to make this happen have been unsuccessful.\n\n\n\n # Test the ignore choked option\n ans = size_control_valve_l(rho=965.4, Psat=70.1E3, Pc=22120E3, mu=3.1472E-4, P1=680E3, P2=220E3, Q=0.1, D1=0.1, D2=0.1, d=0.1, FL=0.6, Fd=0.98, allow_choked=False, full_output=True)\n assert_allclose(ans['Kv'], 164.9954763704956)\n assert_allclose(ans['Rev'], 7805019.992655547)\n assert ans['choked'] == True # Still true even though the choke is ignored\n assert ans['FF']\n assert ans['FLP'] is None\n assert ans['FP'] is None\n assert ans['FR'] is None\n \n # Test the laminar switch\n for Kv, boolean in zip((0.014547698964079439, 0.011190537664676491), (True, False)):\n ans = size_control_valve_l(rho=965.4, Psat=70.1E3, Pc=22120E3, mu=3.1472E-4, P1=680E3, P2=670E3, Q=0.000001, D1=0.1, D2=0.1, d=0.1, FL=0.6, Fd=0.98, allow_laminar=boolean, full_output=True)\n assert_allclose(ans['Kv'], Kv)\n\n\ndef test_control_valve_size_g():\n # From [1]_, matching example 3 for non-choked gas flow with attached\n # fittings and a rotary, eccentric plug, flow-to-open control valve:\n\n Kv = size_control_valve_g(T=433., MW=44.01, mu=1.4665E-4, gamma=1.30, Z=0.988, P1=680E3, P2=310E3, Q=38/36., D1=0.08, D2=0.1, d=0.05, FL=0.85, Fd=0.42, xT=0.60)\n assert_allclose(Kv, 72.58664545391052)\n\n # From [1]_, roughly matching example 4 for a small flow trim sized tapered\n # needle plug valve. Difference is 3% and explained by the difference in\n # algorithms used.\n\n Kv = size_control_valve_g(T=320., MW=39.95, mu=5.625E-5, gamma=1.67, Z=1.0, P1=2.8E5, P2=1.3E5, Q=0.46/3600., D1=0.015, D2=0.015, d=0.015, FL=0.98, Fd=0.07, xT=0.8)\n assert_allclose(Kv, 0.016498765335995726)\n \n # Diameters removed\n Kv = size_control_valve_g(T=320., MW=39.95, mu=5.625E-5, gamma=1.67, Z=1.0, P1=2.8E5, P2=1.3E5, Q=0.46/3600., xT=0.8)\n assert_allclose(Kv, 0.012691357950765944)\n ans = size_control_valve_g(T=320., MW=39.95, mu=5.625E-5, gamma=1.67, Z=1.0, P1=2.8E5, P2=1.3E5, Q=0.46/3600., xT=0.8, full_output=True)\n assert ans['laminar'] == False\n assert ans['choked'] == False\n assert ans['FP'] is None\n assert ans['FR'] is None\n assert ans['xTP'] is None\n assert ans['Rev'] is None\n\n # Choked custom example\n Kv = size_control_valve_g(T=433., MW=44.01, mu=1.4665E-4, gamma=1.30, Z=0.988, P1=680E3, P2=30E3, Q=38/36., D1=0.08, D2=0.1, d=0.05, FL=0.85, Fd=0.42, xT=0.60)\n assert_allclose(Kv, 70.67468803987839)\n \n\n # Laminar custom example\n Kv = size_control_valve_g(T=320., MW=39.95, mu=5.625E-5, gamma=1.67, Z=1.0, P1=2.8E5, P2=1.3E5, Q=0.46/3600., D1=0.015, D2=0.015, d=0.001, FL=0.98, Fd=0.07, xT=0.8)\n assert_allclose(Kv, 0.016498765335995726)\n\n # Laminar custom example with iteration\n Kv = size_control_valve_g(T=320., MW=39.95, mu=5.625E-5, gamma=1.67, Z=1.0, P1=2.8E5, P2=2.7E5, Q=0.1/3600., D1=0.015, D2=0.015, d=0.001, FL=0.98, Fd=0.07, xT=0.8)\n assert_allclose(Kv, 0.989125783445497)\n \n # test not allowing chokes\n ans_choked = size_control_valve_g(T=320., MW=39.95, mu=5.625E-5, gamma=1.67, Z=1.0, P1=2.8E5, P2=1e4, Q=0.46/3600., D1=0.015, D2=0.015, d=0.015, FL=0.98, Fd=0.07, xT=0.8, full_output=True, allow_choked=True)\n ans = size_control_valve_g(T=320., MW=39.95, mu=5.625E-5, gamma=1.67, Z=1.0, P1=2.8E5, P2=1e4, Q=0.46/3600., D1=0.015, D2=0.015, d=0.015, FL=0.98, Fd=0.07, xT=0.8, full_output=True, allow_choked=False)\n assert not np.isclose(ans_choked['Kv'], ans['Kv'], rtol=1E-4)\n \n # Test not allowing laminar\n for Kv, boolean in zip((0.001179609179354541, 0.00090739167642657), (True, False)):\n ans = size_control_valve_g(T=320., MW=39.95, mu=5.625E-5, gamma=1.67, Z=1.0, P1=2.8E5, P2=1e4, Q=1e-5, D1=0.015, D2=0.015, d=0.015, FL=0.98, Fd=0.07, xT=0.8, full_output=True, allow_laminar=boolean)\n assert_allclose(Kv, ans['Kv'])\n \n assert ans['choked'] # Still true even though the choke is ignored\n assert ans['xTP'] is None\n assert ans['Y'] \n assert ans['FP'] is None\n assert ans['FR'] is None\n assert ans['Rev']\n \n # Test a warning is issued and a solution is still returned when in an unending loop\n # Ends with C ratio converged to 0.907207790871228\n \n \n args = {'P1': 680000.0, 'full_output': True, 'allow_choked': True, \n 'Q': 0.24873053149856303, 'T': 433.0, 'Z': 0.9908749375670418, \n 'FL': 0.85, 'allow_laminar': True, 'd': 0.05, 'mu': 2.119519588834806e-05,\n 'MW': 44.0095, 'Fd': 0.42, 'gamma': 1.2431389717945152, 'D2': 0.1, \n 'xT': 0.6, 'D1': 0.08}\n ans = size_control_valve_g(P2=678000., **args)\n assert ans['warning']\n \n # Test Kv does not reach infinity\n kwargs = {'P2': 310000.0028935982, 'P1': 680000.0, 'full_output': True, 'allow_choked': True, 'T': 433.0, 'Z': 0.9896087377962123, 'FL': 0.85, 'allow_laminar': True, 'd': 0.05, 'mu': 2.119519588834806e-05, 'MW': 44.0095, 'Fd': 0.42, 'gamma': 1.2431389717945152, 'D2': 0.1, 'xT': 0.6, 'D1': 0.08}\n size_control_valve_g(Q=1000000000.0, **kwargs)\n\n\ndef test_control_valve_choke_P_l():\n P2 = control_valve_choke_P_l(69682.89291024722, 22048320.0, 0.6, 680000.0)\n assert_allclose(P2, 458887.5306077305)\n P1 = control_valve_choke_P_l(69682.89291024722, 22048320.0, 0.6, P2=P2)\n assert_allclose(P1, 680000.0)\n\ndef test_control_valve_choke_P_g():\n P2 = control_valve_choke_P_g(1, 1.3, 1E5)\n assert_allclose(P2, 7142.857142857143)\n P1 = control_valve_choke_P_g(1, 1.3, P2=P2)\n assert_allclose(P1, 100000.0)\n\n\ndef test_control_valve_noise_l_2015():\n m = 30 # kg/s\n P1 = 1E6\n P2 = 8E5\n \n Psat = 2.32E3\n rho = 997.0\n c = 1400.0\n Kv = Cv_to_Kv(90)\n d = .1\n Di =.1071\n FL = 0.92\n Fd = 0.42\n rho_air = 1.293\n c_air = 343.0\n t_pipe = .0036\n \n # Example 1\n noise = control_valve_noise_l_2015(m, P1, P2, Psat, rho, c, Kv, d, Di, FL, Fd,\n t_pipe, rho_pipe=7800.0, c_pipe=5000.0, \n rho_air=rho_air, c_air=343.0, xFz=None, An=-4.6)\n assert_allclose(noise, 65.47210071692108)\n \n # Example 2\n m = 40 # kg/s\n P1 = 1E6\n P2 = 6.5E5\n noise = control_valve_noise_l_2015(m, P1, P2, Psat, rho, c, Kv, d, Di, FL, Fd,\n t_pipe, rho_pipe=7800.0, c_pipe=5000.0, \n rho_air=rho_air, c_air=343.0, xFz=None, An=-4.6)\n assert_allclose(noise, 81.58199982219298)\n\n # Example 3\n m = 40 # kg/s\n P1 = 1E6\n P2 = 6.5E5\n noise = control_valve_noise_l_2015(m, P1, P2, Psat, rho, c, Kv, d, Di, FL, Fd,\n t_pipe, rho_pipe=7800.0, c_pipe=5000.0, \n rho_air=rho_air, c_air=343.0, xFz=0.254340899267+0.1, An=-4.6)\n assert_allclose(noise, 69.93930269695811)\n\ndef test_control_valve_noise_g_2011():\n \n ans = control_valve_noise_g_2011(m=2.22, P1=1E6, P2=7.2E5, T1=450, rho=5.3, \n gamma=1.22, MW=19.8, Kv=Cv_to_Kv(90.0), \n d=0.1, Di=0.2031, FL=None, FLP=0.792, FP=0.98,\n Fd=0.2959450058448346,\n t_pipe=0.008, rho_pipe=8000.0, c_pipe=5000.0, \n rho_air=1.293, c_air=343.0, An=-3.8, Stp=0.2)\n assert_allclose(ans, 91.67631681476502)\n \n ans = control_valve_noise_g_2011(m=2.29, P1=1E6, P2=6.9E5, T1=450, rho=5.3,\n gamma=1.22, MW=19.8, Kv=Cv_to_Kv(90.0), \n d=0.1, Di=0.2031, FL=None, FLP=0.792, FP=0.98,\n Fd=0.2959450058448346,\n t_pipe=0.008, rho_pipe=8000.0, c_pipe=5000.0, \n rho_air=1.293, c_air=343.0, An=-3.8, Stp=0.2)\n assert_allclose(ans, 92.80027236454005)\n \n ans = control_valve_noise_g_2011(m=2.59, P1=1E6, P2=4.8E5, T1=450, rho=5.3,\n gamma=1.22, MW=19.8, Kv=Cv_to_Kv(90.0), \n d=0.1, Di=0.2031, FL=None, FLP=0.792, FP=0.98,\n Fd=0.2959450058448346,\n t_pipe=0.008, rho_pipe=8000.0, c_pipe=5000.0, \n rho_air=1.293, c_air=343.0, An=-3.8, Stp=0.2)\n assert_allclose(97.65988432967984, ans)\n \n ans = control_valve_noise_g_2011(m=1.18, P1=1E6, P2=4.2E5, T1=450, rho=5.3, \n gamma=1.22, MW=19.8, Kv=Cv_to_Kv(40.0), \n d=0.2031, Di=0.2031, FL=None, FLP=0.792, FP=0.98,\n Fd=0.2959450058448346,\n t_pipe=0.008, rho_pipe=8000.0, c_pipe=5000.0, \n rho_air=1.293, c_air=343.0, An=-3.8, Stp=0.2)\n assert_allclose(94.16189978031449, ans)# should be 94\n \n ans = control_valve_noise_g_2011(m=1.19, P1=1E6, P2=5E4, T1=450, rho=5.3,\n gamma=1.22, MW=19.8, Kv=Cv_to_Kv(40.0), \n d=0.2031, Di=0.2031, FL=None, FLP=0.792, FP=0.98,\n Fd=0.2959450058448346,\n t_pipe=0.008, rho_pipe=8000.0, c_pipe=5000.0, \n rho_air=1.293, c_air=343.0, An=-3.8, Stp=0.2)\n assert_allclose(ans, 97.48317214321824)\n \n ans = control_valve_noise_g_2011(m=0.89, P1=1E6, P2=5E4, T1=450, rho=5.3,\n gamma=1.22, MW=19.8, Kv=Cv_to_Kv(30.0), \n d=0.1, Di=0.15, FL=None, FLP=0.792, FP=0.98,\n Fd=0.2959450058448346,\n t_pipe=0.008, rho_pipe=8000.0, c_pipe=5000.0, \n rho_air=1.293, c_air=343.0, An=-3.8, Stp=0.2)\n assert_allclose(ans, 93.38835049261132)\n \n\ndef test_opening_quick_data():\n from fluids.control_valve import opening_quick_tck, opening_quick, frac_CV_quick\n tck_recalc = splrep(opening_quick, frac_CV_quick, k=3, s=0)\n [assert_allclose(i, j) for i, j in zip(opening_quick_tck, tck_recalc)]\n\ndef test_opening_equal_data():\n from fluids.control_valve import opening_equal, frac_CV_equal, opening_equal_tck\n tck_recalc = splrep(opening_equal, frac_CV_equal, k=3, s=0)\n [assert_allclose(i, j) for i, j in zip(opening_equal_tck, tck_recalc)]\n", "# -*- coding: utf-8 -*-\n'''Chemical Engineering Design Library (ChEDL). Utilities for process modeling.\nCopyright (C) 2018 Caleb Bell <[email protected]>\n\nPermission is hereby granted, free of charge, to any person obtaining a copy\nof this software and associated documentation files (the \"Software\"), to deal\nin the Software without restriction, including without limitation the rights\nto use, copy, modify, merge, publish, distribute, sublicense, and/or sell\ncopies of the Software, and to permit persons to whom the Software is\nfurnished to do so, subject to the following conditions:\n\nThe above copyright notice and this permission notice shall be included in all\ncopies or substantial portions of the Software.\n\nTHE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\nIMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\nFITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\nAUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\nLIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\nOUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\nSOFTWARE.'''\n\nfrom __future__ import division\nfrom fluids import *\nimport numpy as np\nfrom numpy.testing import assert_allclose\nimport pytest\n\n\ndef test_liquid_jet_pump_ancillary():\n # This equation has been checked from theory 2018-05-08 - it is\n # confirmed to be correct more than the large one!!!\n rhop=998.\n rhos=1098.\n Ks=0.11\n Kp=.04\n \n solution_vars = {'P1': 426256.1597041593,\n 'P2': 133600,\n 'Qp': 0.01,\n 'Qs': 0.01,\n 'd_mixing': 0.045,\n 'd_nozzle': 0.022382858811037732}\n \n for key, value in solution_vars.items():\n kwargs = dict(solution_vars)\n del kwargs[key]\n new_value = liquid_jet_pump_ancillary(rhop=rhop, rhos=rhos, Ks=Ks, Kp=Kp, **kwargs)\n assert_allclose(new_value, value)\n\n\[email protected]\[email protected]\ndef test_liquid_jet_pump_ancillary_rhos_Ks_Ps():\n for rhop in [998., 1050, 1150, 1250, 800]:\n for rhos in [1098., 1100, 1200, 1600, 4000, 100]:\n for Ks in [1E-9, 1E-3, 0.11, .5, 1, 5, 10, 100, 1000]:\n for Kp in [1E-9, 1E-3, 0.11, .5, 1, 5, 10, 100, 1000]:\n for P_mult in [0.1, 0.5, 1, 2, 10]:\n solution_vars = {'P1': 426256.1597041593,\n 'P2': 133600,\n 'Qp': 0.01,\n 'd_mixing': 0.045,\n 'd_nozzle': 0.022382858811037732}\n solution_vars['P1'] *= P_mult\n if solution_vars['P1'] < solution_vars['P2']:\n continue\n \n # Finish calculating good known values\n solution_vars['Qs'] = liquid_jet_pump_ancillary(rhop=rhop, rhos=rhos, Ks=Ks, Kp=Kp, **solution_vars)\n if solution_vars['Qs'].imag:\n # Do not keep testing if obtained an imaginary flow rate\n continue\n # Try each variable with the solver\n for key, value in solution_vars.items():\n kwargs = dict(solution_vars)\n del kwargs[key]\n new_value = liquid_jet_pump_ancillary(rhop=rhop, rhos=rhos, Ks=Ks, Kp=Kp, **kwargs)\n assert_allclose(new_value, value)\n \[email protected] \[email protected]\ndef test_liquid_jet_pump_ancillary_d_mixing():\n rhop=998.\n rhos=1098.\n Ks=0.11\n Kp=.04\n \n \n for rhos in [1098., 1100, 1200, 1600, 4000, 100]:\n for Ks in [1E-9, 1E-3, 0.11, .5, 1, 5, 10, 100, 1000]:\n for D_mult in np.linspace(0.1, 10, 100).tolist():\n solution_vars = {'P1': 426256.1597041593,\n 'P2': 133600,\n 'Qp': 0.01,\n 'd_mixing': 0.045,\n 'd_nozzle': 0.022382858811037732}\n solution_vars['d_mixing'] *= D_mult\n if solution_vars['d_mixing'] < solution_vars['d_nozzle']*1.43:\n continue\n \n # Finish calculating good known values\n solution_vars['Qs'] = liquid_jet_pump_ancillary(rhop=rhop, rhos=rhos, Ks=Ks, Kp=Kp, **solution_vars)\n if solution_vars['Qs'].imag:\n # Do not keep testing if obtained an imaginary flow rate\n continue\n # Try each variable with the solver\n for key, value in solution_vars.items():\n kwargs = dict(solution_vars)\n del kwargs[key]\n # print(solution_vars, key)\n \n new_value = liquid_jet_pump_ancillary(rhop=rhop, rhos=rhos, Ks=Ks, Kp=Kp, **kwargs)\n assert_allclose(new_value, value)\n \n \ndef validate_liquid_jet_pump(rhop, rhos, Ks, Kp, Km, Kd, nozzle_retracted, \n solution_vars, d_diffuser=None, full=False):\n '''Helper function for testing `liquid_jet_pump`.\n Returns the number of solutions where the return values are the same as \n thosegiven in `solution_vars`, and the number of cases where it is not.\n \n There is nothing wrong with getting a different answer; there are multiple\n solutions in the case of many variable sets.\n \n Raises an exception if a solution cannot be found.\n '''\n if full:\n all_solution_vars = dict(solution_vars)\n solution_vars = dict(solution_vars)\n del solution_vars['M']\n del solution_vars['N']\n del solution_vars['R']\n del solution_vars['alpha']\n del solution_vars['d_diffuser']\n del solution_vars['efficiency']\n \n \n same, different = 0, 0\n done = {}\n for i in solution_vars.keys():\n for j in solution_vars.keys():\n if i == j:\n continue\n elif frozenset([i, j]) in done:\n continue\n # Skip tests with alreardy tested variables; and where the two variables are the same\n \n kwargs = dict(solution_vars)\n del kwargs[i]\n del kwargs[j]\n# print('SOLVING FOR', i, j, kwargs) # \n ans = liquid_jet_pump(rhop=rhop, rhos=rhos, Ks=Ks, Kp=Kp, Km=Km, d_diffuser=d_diffuser, Kd=Kd, max_variations=10000, nozzle_retracted=nozzle_retracted, **kwargs)\n# print(i, j, ans[i], ans[j])\n# print('SOLVED, STARTING NEXT')\n try:\n for key, value in solution_vars.items():\n assert_allclose(value, abs(ans[key]))\n same += 1\n # Since it matched, check the other parameters as well\n if full:\n for key, value in all_solution_vars.items():\n assert_allclose(value, abs(ans[key]))\n except:\n for key, value in ans.items():\n # Had some issues with under zero values\n assert value > 0\n\n different += 1\n done[frozenset([i, j])] = True\n return same, different\n\n\[email protected]\ndef test_liquid_jet_pump_examples_round_robin():\n\n # Example one and two variants\n solution_vars = {'P1': 426256.1597041593,\n 'P2': 133600,\n 'P5': 200000.0,\n 'Qp': 0.01,\n 'Qs': 0.01,\n 'd_mixing': 0.045,\n 'd_nozzle': 0.022382858811037732}\n validate_liquid_jet_pump(rhop=998., rhos=1098., Ks=0.11, Kp=0.04, Km=.186, Kd=0.12, nozzle_retracted=False, solution_vars=solution_vars)\n \n solution_vars = {\n 'P1': 468726.56966322445,\n 'P2': 133600,\n 'P5': 200000.0,\n 'Qp': 0.01,\n 'Qs': 0.001,\n 'd_mixing': 0.0665377148831667,\n 'd_nozzle': 0.022382858811037732}\n validate_liquid_jet_pump(rhop=998., rhos=1098., Ks=0.11, Kp=0.04, Km=.186, Kd=0.12, nozzle_retracted=False, solution_vars=solution_vars)\n solution_vars = {\n 'P1': 426256.1597041593,\n 'P2': 133600,\n 'P5': 200000.0,\n 'Qp': 0.1,\n 'Qs': 0.0201,\n 'd_mixing': 0.19926717348339726,\n 'd_nozzle': 0.07320212423451278}\n validate_liquid_jet_pump(rhop=998., rhos=1098., Ks=0.11, Kp=0.04, Km=.186, Kd=0.12, nozzle_retracted=False, solution_vars=solution_vars)\n \n \n # Example 2\n solution_vars = {'P1': 550000.0,\n 'P2': 170000.0,\n 'P5': 192362.72123108635,\n 'Qp': 0.0005588580085548165,\n 'Qs': 0.0018975332068311196,\n 'd_mixing': 0.024,\n 'd_nozzle': 0.0048}\n \n validate_liquid_jet_pump(rhop=790.5, rhos=790.5, Km=.1, Kd=0.1, Ks=0.1, Kp=0.03, nozzle_retracted=False, solution_vars=solution_vars)\n\n\[email protected]\ndef test_liquid_jet_pump_examples_round_robin_Ex3():\n # Example 3\n rhop=765.0\n rhos=765.0\n Km=.15\n Kd=0.12\n Ks=0.38\n Kp=0.05\n nozzle_retracted=True\n d_diffuser=0.0318\n\n # point 5\n solution_vars = {\n 'P1': 1000000.0,\n 'P2': 47500.0,\n 'P5': 109500.0,\n 'Qp': 0.0005587193619566122,\n 'Qs': 0.001400084261324908,\n 'd_mixing': 0.017,\n 'd_nozzle': 0.0038}\n same, different = validate_liquid_jet_pump(nozzle_retracted=nozzle_retracted, d_diffuser=d_diffuser,rhop=rhop, rhos=rhos, Km=Km, Kd=Kd, Ks=Ks, Kp=Kp, solution_vars=solution_vars)\n assert same > 10\n # Point 4\n solution_vars = {\n 'P1': 800000.0,\n 'P2': 46020.0,\n 'P5': 95000.0,\n 'Qp': 0.0004971366273938245,\n 'Qs': 0.0012500084707104235,\n 'd_mixing': 0.017,\n 'd_nozzle': 0.0038}\n same, different = validate_liquid_jet_pump(nozzle_retracted=nozzle_retracted, d_diffuser=d_diffuser,rhop=rhop, rhos=rhos, Km=Km, Kd=Kd, Ks=Ks, Kp=Kp, solution_vars=solution_vars)\n assert same > 10\n\n # Custom point with full validation\n expected = {'M': 2.633280822186772,\n 'N': 0.06818823529411765,\n 'P1': 500000.0,\n 'P2': 46020.0,\n 'P5': 75000.0,\n 'Qp': 0.0003864114714478578,\n 'Qs': 0.0010175299172366153,\n 'R': 0.05097107567130409,\n 'alpha': 0.28014905021125414,\n 'd_diffuser': 0.0318,\n 'd_mixing': 0.016831456429424897,\n 'd_nozzle': 0.0038,\n 'efficiency': 0.1795587722987592}\n same, different = validate_liquid_jet_pump(nozzle_retracted=nozzle_retracted, d_diffuser=d_diffuser,rhop=rhop, rhos=rhos, Km=Km, Kd=Kd, Ks=Ks, Kp=Kp, solution_vars=expected, full=True)\n assert same > 15" ]
[ [ "numpy.testing.assert_allclose" ], [ "numpy.isclose", "scipy.interpolate.splrep", "numpy.testing.assert_allclose" ], [ "numpy.linspace", "numpy.testing.assert_allclose" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [ "0.13", "1.6", "0.14", "1.10", "0.15", "1.4", "0.16", "1.9", "0.19", "1.5", "0.18", "1.2", "1.7", "0.12", "1.0", "0.17", "1.3", "1.8" ], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
src-d/formatml
[ "f052313391a5a2ca47af7859520f968c423024f1", "f052313391a5a2ca47af7859520f968c423024f1" ]
[ "formatml/models/codrep_model.py", "formatml/pipelines/codrep/run.py" ]
[ "from bisect import bisect_right\nfrom logging import getLogger\nfrom typing import Any, Dict, List, Optional\n\nfrom dgl import unbatch\nfrom torch.nn import LogSoftmax, Module, NLLLoss\n\nfrom formatml.models.model import Model\nfrom formatml.modules.graph_encoders.graph_encoder import GraphEncoder\nfrom formatml.modules.misc.graph_embedding import GraphEmbedding\nfrom formatml.modules.misc.selector import Selector\n\n\nclass CodRepModel(Model):\n \"\"\"\n CodRep Model.\n\n Uses a graph encoder and a projection decoder with an optional RNN.\n \"\"\"\n\n _logger = getLogger(__name__)\n\n def __init__(\n self,\n graph_embedder: GraphEmbedding,\n graph_encoder: GraphEncoder,\n class_projection: Module,\n graph_field_name: str,\n feature_field_names: List[str],\n indexes_field_name: str,\n label_field_name: str,\n ) -> None:\n \"\"\"Construct a complete model.\"\"\"\n super().__init__()\n self.graph_embedder = graph_embedder\n self.graph_encoder = graph_encoder\n self.selector = Selector()\n self.class_projection = class_projection\n self.graph_field_name = graph_field_name\n self.feature_field_names = feature_field_names\n self.indexes_field_name = indexes_field_name\n self.label_field_name = label_field_name\n self.softmax = LogSoftmax(dim=1)\n\n def forward(self, sample: Dict[str, Any]) -> Dict[str, Any]: # type: ignore\n \"\"\"Forward pass of an embedder, encoder and decoder.\"\"\"\n if \"forward\" in sample:\n raise RuntimeError(\"Forward already computed.\")\n if \"loss\" in sample:\n raise RuntimeError(\"Loss already computed.\")\n graph, etypes = sample[self.graph_field_name]\n features = [sample[field_name] for field_name in self.feature_field_names]\n formatting_indexes = sample[self.indexes_field_name].indexes\n graph = self.graph_embedder(graph=graph, features=features)\n encodings = self.graph_encoder(\n graph=graph, feat=graph.ndata[\"x\"], etypes=etypes\n )\n label_encodings = self.selector(tensor=encodings, indexes=formatting_indexes)\n projections = self.class_projection(label_encodings)\n softmaxed = self.softmax(projections)\n labels = sample[self.label_field_name]\n sample[\"forward\"] = softmaxed\n if labels is not None:\n sample[\"loss\"] = NLLLoss(\n weight=softmaxed.new(\n [graph.batch_size, formatting_indexes.numel() - graph.batch_size]\n )\n )(softmaxed, labels)\n return sample\n\n def decode(\n self,\n *,\n sample: Dict[str, Any],\n prefix: str = \"\",\n metadata: Optional[Dict[str, Any]] = None\n ) -> None:\n batched_graph = sample[\"typed_dgl_graph\"].graph\n graphs = unbatch(batched_graph)\n start = 0\n total_number_of_nodes = 0\n bounds = []\n numpy_indexes = sample[\"indexes\"].indexes.cpu().numpy()\n for graph in graphs:\n total_number_of_nodes += graph.number_of_nodes()\n end = bisect_right(numpy_indexes, total_number_of_nodes - 1)\n bounds.append((start, end))\n start = end\n for (start, end), path in zip(bounds, sample[\"metadata\"]):\n path_probas = sample[\"forward\"][start:end, 1]\n path_indexes = sample[\"indexes\"].offsets[start:end]\n predictions = path_indexes[path_probas.argsort(descending=True)]\n if metadata is not None and \"metadata\" in metadata:\n metadata[\"metadata\"][path] = {\n index: [\"%.8f\" % (2 ** proba)]\n for index, proba in zip(path_indexes.tolist(), path_probas.tolist())\n }\n predictions += 1\n print(\"%s%s %s\" % (prefix, path, \" \".join(map(str, predictions.numpy()))))\n\n def build_metadata(self) -> Dict[str, Any]:\n return dict(columns=[\"Probability\"], metadata={})\n", "from argparse import ArgumentParser\nfrom bz2 import open as bz2_open\nfrom json import dump as json_dump, load as json_load\nfrom pathlib import Path\nfrom pickle import load as pickle_load\nfrom typing import Optional\n\nfrom torch import load as torch_load, no_grad\nfrom torch.utils.data import DataLoader\n\nfrom formatml.datasets.codrep_dataset import CodRepDataset\nfrom formatml.pipelines.codrep.cli_builder import CLIBuilder\nfrom formatml.pipelines.codrep.parse import parse\nfrom formatml.pipelines.codrep.tensorize import tensorize\nfrom formatml.pipelines.codrep.train import build_model\nfrom formatml.pipelines.pipeline import register_step\nfrom formatml.utils.config import Config\nfrom formatml.utils.helpers import setup_logging\n\n\ndef add_arguments_to_parser(parser: ArgumentParser) -> None:\n cli_builder = CLIBuilder(parser)\n cli_builder.add_raw_dir()\n cli_builder.add_uasts_dir()\n cli_builder.add_instance_file()\n cli_builder.add_tensors_dir()\n parser.add_argument(\n \"--checkpoint-file\", required=True, help=\"Path to the model checkpoint.\"\n )\n cli_builder.add_configs_dir()\n parser.add_argument(\n \"--training-configs-dir\",\n required=True,\n help=\"Path to the configs used for training.\",\n )\n parser.add_argument(\n \"--prefix\", required=True, help=\"Path prefixing the output paths.\"\n )\n parser.add_argument(\"--metadata-dir\", help=\"Path to the metadata output directory.\")\n\n\n@register_step(pipeline_name=\"codrep\", parser_definer=add_arguments_to_parser)\ndef run(\n *,\n raw_dir: str,\n uasts_dir: str,\n instance_file: str,\n tensors_dir: str,\n checkpoint_file: str,\n configs_dir: str,\n training_configs_dir: str,\n prefix: str,\n metadata_dir: Optional[str],\n log_level: str,\n) -> None:\n \"\"\"Run the model and output CodRep predictions.\"\"\"\n arguments = locals()\n configs_dir_path = Path(configs_dir).expanduser().resolve()\n configs_dir_path.mkdir(parents=True, exist_ok=True)\n training_configs_dir_path = Path(training_configs_dir).expanduser().resolve()\n tensors_dir_path = Path(tensors_dir).expanduser().resolve()\n Config.from_arguments(\n arguments, [\"instance_file\", \"checkpoint_file\"], \"configs_dir\"\n ).save(configs_dir_path / \"train.json\")\n logger = setup_logging(__name__, log_level)\n\n training_configs = {}\n for step in [\"parse\", \"tensorize\", \"train\"]:\n with (training_configs_dir_path / step).with_suffix(\".json\").open(\n \"r\", encoding=\"utf8\"\n ) as fh:\n training_configs[step] = json_load(fh)\n\n parse(\n raw_dir=raw_dir,\n uasts_dir=uasts_dir,\n configs_dir=configs_dir,\n log_level=log_level,\n )\n\n tensorize(\n uasts_dir=uasts_dir,\n instance_file=instance_file,\n tensors_dir=tensors_dir,\n configs_dir=configs_dir,\n n_workers=training_configs[\"tensorize\"][\"options\"][\"n_workers\"],\n pickle_protocol=training_configs[\"tensorize\"][\"options\"][\"pickle_protocol\"],\n log_level=log_level,\n )\n\n dataset = CodRepDataset(input_dir=tensors_dir_path)\n logger.info(f\"Dataset of size {len(dataset)}\")\n\n with bz2_open(instance_file, \"rb\") as fh:\n instance = pickle_load(fh)\n\n model = build_model(\n instance=instance,\n model_decoder_type=training_configs[\"train\"][\"options\"][\"model_decoder_type\"],\n model_encoder_iterations=training_configs[\"train\"][\"options\"][\n \"model_encoder_iterations\"\n ],\n model_encoder_output_dim=training_configs[\"train\"][\"options\"][\n \"model_encoder_output_dim\"\n ],\n model_encoder_message_dim=training_configs[\"train\"][\"options\"][\n \"model_encoder_message_dim\"\n ],\n )\n # The model needs a forward to be completely initialized.\n model(instance.collate([dataset[0]]))\n logger.info(f\"Configured model {model}\")\n\n model.load_state_dict(\n torch_load(checkpoint_file, map_location=\"cpu\")[\"model_state_dict\"]\n )\n model.eval()\n logger.info(f\"Loaded model parameters from %s\", checkpoint_file)\n\n dataloader = DataLoader(\n dataset,\n shuffle=False,\n collate_fn=instance.collate,\n batch_size=10,\n num_workers=1,\n )\n\n metadata = None if metadata_dir is None else model.build_metadata()\n metadata_output = (\n None if metadata_dir is None else Path(metadata_dir) / \"metadata.json\"\n )\n\n with no_grad():\n for sample in dataloader:\n sample = model(sample)\n model.decode(sample=sample, prefix=prefix, metadata=metadata)\n\n if metadata_output is not None:\n with metadata_output.open(\"w\", encoding=\"utf8\") as fh:\n json_dump(metadata, fh)\n" ]
[ [ "torch.nn.LogSoftmax" ], [ "torch.no_grad", "torch.utils.data.DataLoader", "torch.load" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
linyiyue/ray
[ "90d2456ec70270a1f894ec3ef6f3004533859e03", "90d2456ec70270a1f894ec3ef6f3004533859e03", "90d2456ec70270a1f894ec3ef6f3004533859e03", "c18caa4db36d466718bdbcb2229aa0b2dc03da1f", "90d2456ec70270a1f894ec3ef6f3004533859e03", "c18caa4db36d466718bdbcb2229aa0b2dc03da1f" ]
[ "python/ray/tests/test_reference_counting.py", "python/ray/util/sgd/v2/tests/test_trainer.py", "python/ray/tune/tests/test_tune_restore.py", "rllib/tests/test_external_multi_agent_env.py", "python/ray/data/datasource/numpy_datasource.py", "rllib/examples/custom_observation_filters.py" ]
[ "# coding: utf-8\nimport copy\nimport logging\nimport os\nimport sys\nimport time\n\nimport numpy as np\n\nimport pytest\n\nimport ray\nimport ray.cluster_utils\nimport ray._private.gcs_utils as gcs_utils\nfrom ray._private.test_utils import (\n SignalActor, kill_actor_and_wait_for_failure, put_object,\n wait_for_condition, new_scheduler_enabled)\n\nlogger = logging.getLogger(__name__)\n\n\[email protected]\ndef one_worker_100MiB(request):\n # It has lots of tests that don't require object spilling.\n config = {\n \"task_retry_delay_ms\": 0,\n \"automatic_object_spilling_enabled\": False\n }\n yield ray.init(\n num_cpus=1,\n object_store_memory=100 * 1024 * 1024,\n _system_config=config)\n ray.shutdown()\n\n\ndef _fill_object_store_and_get(obj, succeed=True, object_MiB=20,\n num_objects=5):\n for _ in range(num_objects):\n ray.put(np.zeros(object_MiB * 1024 * 1024, dtype=np.uint8))\n\n if type(obj) is bytes:\n obj = ray.ObjectRef(obj)\n\n if succeed:\n wait_for_condition(\n lambda: ray.worker.global_worker.core_worker.object_exists(obj))\n else:\n wait_for_condition(\n lambda: not ray.worker.global_worker.core_worker.object_exists(obj)\n )\n\n\ndef _check_refcounts(expected):\n actual = ray.worker.global_worker.core_worker.get_all_reference_counts()\n assert len(expected) == len(actual)\n for object_ref, (local, submitted) in expected.items():\n hex_id = object_ref.hex().encode(\"ascii\")\n assert hex_id in actual\n assert local == actual[hex_id][\"local\"]\n assert submitted == actual[hex_id][\"submitted\"]\n\n\ndef check_refcounts(expected, timeout=10):\n start = time.time()\n while True:\n try:\n _check_refcounts(expected)\n break\n except AssertionError as e:\n if time.time() - start > timeout:\n raise e\n else:\n time.sleep(0.1)\n\n\ndef test_local_refcounts(ray_start_regular):\n obj_ref1 = ray.put(None)\n check_refcounts({obj_ref1: (1, 0)})\n obj_ref1_copy = copy.copy(obj_ref1)\n check_refcounts({obj_ref1: (2, 0)})\n del obj_ref1\n check_refcounts({obj_ref1_copy: (1, 0)})\n del obj_ref1_copy\n check_refcounts({})\n\n\ndef test_dependency_refcounts(ray_start_regular):\n @ray.remote\n def one_dep(dep, signal=None, fail=False):\n if signal is not None:\n ray.get(signal.wait.remote())\n if fail:\n raise Exception(\"failed on purpose\")\n\n @ray.remote\n def one_dep_large(dep, signal=None):\n if signal is not None:\n ray.get(signal.wait.remote())\n # This will be spilled to plasma.\n return np.zeros(10 * 1024 * 1024, dtype=np.uint8)\n\n # Test that regular plasma dependency refcounts are decremented once the\n # task finishes.\n signal = SignalActor.remote()\n large_dep = ray.put(np.zeros(10 * 1024 * 1024, dtype=np.uint8))\n result = one_dep.remote(large_dep, signal=signal)\n check_refcounts({large_dep: (1, 1), result: (1, 0)})\n ray.get(signal.send.remote())\n # Reference count should be removed once the task finishes.\n check_refcounts({large_dep: (1, 0), result: (1, 0)})\n del large_dep, result\n check_refcounts({})\n\n # Test that inlined dependency refcounts are decremented once they are\n # inlined.\n signal = SignalActor.remote()\n dep = one_dep.remote(None, signal=signal)\n check_refcounts({dep: (1, 0)})\n result = one_dep.remote(dep)\n check_refcounts({dep: (1, 1), result: (1, 0)})\n ray.get(signal.send.remote())\n # Reference count should be removed as soon as the dependency is inlined.\n check_refcounts({dep: (1, 0), result: (1, 0)})\n del dep, result\n check_refcounts({})\n\n # Test that spilled plasma dependency refcounts are decremented once\n # the task finishes.\n signal1, signal2 = SignalActor.remote(), SignalActor.remote()\n dep = one_dep_large.remote(None, signal=signal1)\n check_refcounts({dep: (1, 0)})\n result = one_dep.remote(dep, signal=signal2)\n check_refcounts({dep: (1, 1), result: (1, 0)})\n ray.get(signal1.send.remote())\n ray.get(dep, timeout=10)\n # Reference count should remain because the dependency is in plasma.\n check_refcounts({dep: (1, 1), result: (1, 0)})\n ray.get(signal2.send.remote())\n # Reference count should be removed because the task finished.\n check_refcounts({dep: (1, 0), result: (1, 0)})\n del dep, result\n check_refcounts({})\n\n # Test that regular plasma dependency refcounts are decremented if a task\n # fails.\n signal = SignalActor.remote()\n large_dep = ray.put(np.zeros(10 * 1024 * 1024, dtype=np.uint8))\n result = one_dep.remote(large_dep, signal=signal, fail=True)\n check_refcounts({large_dep: (1, 1), result: (1, 0)})\n ray.get(signal.send.remote())\n # Reference count should be removed once the task finishes.\n check_refcounts({large_dep: (1, 0), result: (1, 0)})\n del large_dep, result\n check_refcounts({})\n\n # Test that spilled plasma dependency refcounts are decremented if a task\n # fails.\n signal1, signal2 = SignalActor.remote(), SignalActor.remote()\n dep = one_dep_large.remote(None, signal=signal1)\n check_refcounts({dep: (1, 0)})\n result = one_dep.remote(dep, signal=signal2, fail=True)\n check_refcounts({dep: (1, 1), result: (1, 0)})\n ray.get(signal1.send.remote())\n ray.get(dep, timeout=10)\n # Reference count should remain because the dependency is in plasma.\n check_refcounts({dep: (1, 1), result: (1, 0)})\n ray.get(signal2.send.remote())\n # Reference count should be removed because the task finished.\n check_refcounts({dep: (1, 0), result: (1, 0)})\n del dep, result\n check_refcounts({})\n\n\[email protected](new_scheduler_enabled(), reason=\"dynamic res todo\")\ndef test_actor_creation_task(ray_start_regular):\n @ray.remote\n def large_object():\n # This will be spilled to plasma.\n return np.zeros(10 * 1024 * 1024, dtype=np.uint8)\n\n @ray.remote(resources={\"init\": 1})\n class Actor:\n def __init__(self, dependency):\n return\n\n def ping(self):\n return\n\n a = Actor.remote(large_object.remote())\n ping = a.ping.remote()\n ready, unready = ray.wait([ping], timeout=1)\n assert not ready\n\n ray.experimental.set_resource(\"init\", 1)\n ray.get(ping)\n\n\ndef test_basic_pinning(one_worker_100MiB):\n @ray.remote\n def f(array):\n return np.sum(array)\n\n @ray.remote\n class Actor(object):\n def __init__(self):\n # Hold a long-lived reference to a ray.put object's ID. The object\n # should not be garbage collected while the actor is alive because\n # the object is pinned by the raylet.\n self.large_object = ray.put(\n np.zeros(25 * 1024 * 1024, dtype=np.uint8))\n\n def get_large_object(self):\n return ray.get(self.large_object)\n\n actor = Actor.remote()\n\n # Fill up the object store with short-lived objects. These should be\n # evicted before the long-lived object whose reference is held by\n # the actor.\n for batch in range(10):\n intermediate_result = f.remote(\n np.zeros(10 * 1024 * 1024, dtype=np.uint8))\n ray.get(intermediate_result)\n\n # The ray.get below would fail with only LRU eviction, as the object\n # that was ray.put by the actor would have been evicted.\n ray.get(actor.get_large_object.remote())\n\n\ndef test_pending_task_dependency_pinning(one_worker_100MiB):\n @ray.remote\n def pending(input1, input2):\n return\n\n # The object that is ray.put here will go out of scope immediately, so if\n # pending task dependencies aren't considered, it will be evicted before\n # the ray.get below due to the subsequent ray.puts that fill up the object\n # store.\n np_array = np.zeros(20 * 1024 * 1024, dtype=np.uint8)\n signal = SignalActor.remote()\n obj_ref = pending.remote(np_array, signal.wait.remote())\n\n for _ in range(2):\n ray.put(np.zeros(20 * 1024 * 1024, dtype=np.uint8))\n\n ray.get(signal.send.remote())\n ray.get(obj_ref)\n\n\ndef test_feature_flag(shutdown_only):\n ray.init(object_store_memory=100 * 1024 * 1024)\n\n @ray.remote\n def f(array):\n return np.sum(array)\n\n @ray.remote\n class Actor(object):\n def __init__(self):\n self.large_object = ray.put(\n np.zeros(25 * 1024 * 1024, dtype=np.uint8))\n\n def wait_for_actor_to_start(self):\n pass\n\n def get_large_object(self):\n return ray.get(self.large_object)\n\n actor = Actor.remote()\n ray.get(actor.wait_for_actor_to_start.remote())\n\n # The ray.get below fails with only LRU eviction, as the object\n # that was ray.put by the actor should have been evicted.\n ref = actor.get_large_object.remote()\n ray.get(ref)\n\n # Keep refs in scope so that they don't get GCed immediately.\n for _ in range(5):\n put_ref = ray.put(np.zeros(40 * 1024 * 1024, dtype=np.uint8))\n del put_ref\n\n wait_for_condition(\n lambda: not ray.worker.global_worker.core_worker.object_exists(ref))\n\n\ndef test_out_of_band_serialized_object_ref(one_worker_100MiB):\n assert len(\n ray.worker.global_worker.core_worker.get_all_reference_counts()) == 0\n obj_ref = ray.put(\"hello\")\n _check_refcounts({obj_ref: (1, 0)})\n obj_ref_str = ray.cloudpickle.dumps(obj_ref)\n _check_refcounts({obj_ref: (2, 0)})\n del obj_ref\n assert len(\n ray.worker.global_worker.core_worker.get_all_reference_counts()) == 1\n assert ray.get(ray.cloudpickle.loads(obj_ref_str)) == \"hello\"\n\n\ndef test_captured_object_ref(one_worker_100MiB):\n captured_id = ray.put(np.zeros(10 * 1024 * 1024, dtype=np.uint8))\n\n @ray.remote\n def f(signal):\n ray.get(signal.wait.remote())\n ray.get(captured_id) # noqa: F821\n\n signal = SignalActor.remote()\n obj_ref = f.remote(signal)\n\n # Delete local references.\n del f\n del captured_id\n\n # Test that the captured object ref is pinned despite having no local\n # references.\n ray.get(signal.send.remote())\n _fill_object_store_and_get(obj_ref)\n\n captured_id = ray.put(np.zeros(10 * 1024 * 1024, dtype=np.uint8))\n\n @ray.remote\n class Actor:\n def get(self, signal):\n ray.get(signal.wait.remote())\n ray.get(captured_id) # noqa: F821\n\n signal = SignalActor.remote()\n actor = Actor.remote()\n obj_ref = actor.get.remote(signal)\n\n # Delete local references.\n del Actor\n del captured_id\n\n # Test that the captured object ref is pinned despite having no local\n # references.\n ray.get(signal.send.remote())\n _fill_object_store_and_get(obj_ref)\n\n\n# Remote function takes serialized reference and doesn't hold onto it after\n# finishing. Referenced object shouldn't be evicted while the task is pending\n# and should be evicted after it returns.\[email protected](\"use_ray_put,failure\", [(False, False), (False, True),\n (True, False), (True, True)])\ndef test_basic_serialized_reference(one_worker_100MiB, use_ray_put, failure):\n @ray.remote(max_retries=1)\n def pending(ref, dep):\n ray.get(ref[0])\n if failure:\n os._exit(0)\n\n array_oid = put_object(\n np.zeros(20 * 1024 * 1024, dtype=np.uint8), use_ray_put)\n signal = SignalActor.remote()\n obj_ref = pending.remote([array_oid], signal.wait.remote())\n\n # Remove the local reference.\n array_oid_bytes = array_oid.binary()\n del array_oid\n\n # Check that the remote reference pins the object.\n _fill_object_store_and_get(array_oid_bytes)\n\n # Fulfill the dependency, causing the task to finish.\n ray.get(signal.send.remote())\n try:\n ray.get(obj_ref)\n assert not failure\n except ray.exceptions.WorkerCrashedError:\n assert failure\n\n # Reference should be gone, check that array gets evicted.\n _fill_object_store_and_get(array_oid_bytes, succeed=False)\n\n\n# Call a recursive chain of tasks that pass a serialized reference to the end\n# of the chain. The reference should still exist while the final task in the\n# chain is running and should be removed once it finishes.\[email protected](\"use_ray_put,failure\", [(False, False), (False, True),\n (True, False), (True, True)])\ndef test_recursive_serialized_reference(one_worker_100MiB, use_ray_put,\n failure):\n @ray.remote(max_retries=1)\n def recursive(ref, signal, max_depth, depth=0):\n ray.get(ref[0])\n if depth == max_depth:\n ray.get(signal.wait.remote())\n if failure:\n os._exit(0)\n return\n else:\n return recursive.remote(ref, signal, max_depth, depth + 1)\n\n signal = SignalActor.remote()\n\n max_depth = 5\n array_oid = put_object(\n np.zeros(20 * 1024 * 1024, dtype=np.uint8), use_ray_put)\n head_oid = recursive.remote([array_oid], signal, max_depth)\n\n # Remove the local reference.\n array_oid_bytes = array_oid.binary()\n del array_oid\n\n tail_oid = head_oid\n for _ in range(max_depth):\n tail_oid = ray.get(tail_oid)\n\n # Check that the remote reference pins the object.\n _fill_object_store_and_get(array_oid_bytes)\n\n # Fulfill the dependency, causing the tail task to finish.\n ray.get(signal.send.remote())\n try:\n assert ray.get(tail_oid) is None\n assert not failure\n except ray.exceptions.OwnerDiedError:\n # There is only 1 core, so the same worker will execute all `recursive`\n # tasks. Therefore, if we kill the worker during the last task, its\n # owner (the worker that executed the second-to-last task) will also\n # have died.\n assert failure\n\n # Reference should be gone, check that array gets evicted.\n _fill_object_store_and_get(array_oid_bytes, succeed=False)\n\n\n# Test that a passed reference held by an actor after the method finishes\n# is kept until the reference is removed from the actor. Also tests giving\n# the actor a duplicate reference to the same object ref.\[email protected](\"use_ray_put,failure\", [(False, False), (False, True),\n (True, False), (True, True)])\ndef test_actor_holding_serialized_reference(one_worker_100MiB, use_ray_put,\n failure):\n @ray.remote\n class GreedyActor(object):\n def __init__(self):\n pass\n\n def set_ref1(self, ref):\n self.ref1 = ref\n\n def add_ref2(self, new_ref):\n self.ref2 = new_ref\n\n def delete_ref1(self):\n self.ref1 = None\n\n def delete_ref2(self):\n self.ref2 = None\n\n # Test that the reference held by the actor isn't evicted.\n array_oid = put_object(\n np.zeros(20 * 1024 * 1024, dtype=np.uint8), use_ray_put)\n actor = GreedyActor.remote()\n actor.set_ref1.remote([array_oid])\n\n # Test that giving the same actor a duplicate reference works.\n ray.get(actor.add_ref2.remote([array_oid]))\n\n # Remove the local reference.\n array_oid_bytes = array_oid.binary()\n del array_oid\n\n # Test that the remote references still pin the object.\n _fill_object_store_and_get(array_oid_bytes)\n\n # Test that removing only the first reference doesn't unpin the object.\n ray.get(actor.delete_ref1.remote())\n _fill_object_store_and_get(array_oid_bytes)\n\n if failure:\n # Test that the actor exiting stops the reference from being pinned.\n # Kill the actor and wait for the actor to exit.\n kill_actor_and_wait_for_failure(actor)\n with pytest.raises(ray.exceptions.RayActorError):\n ray.get(actor.delete_ref1.remote())\n else:\n # Test that deleting the second reference stops it from being pinned.\n ray.get(actor.delete_ref2.remote())\n _fill_object_store_and_get(array_oid_bytes, succeed=False)\n\n\n# Test that a passed reference held by an actor after a task finishes\n# is kept until the reference is removed from the worker. Also tests giving\n# the worker a duplicate reference to the same object ref.\[email protected](sys.platform == \"win32\", reason=\"Failing on Windows.\")\[email protected](\"use_ray_put,failure\", [(False, False), (False, True),\n (True, False), (True, True)])\ndef test_worker_holding_serialized_reference(one_worker_100MiB, use_ray_put,\n failure):\n @ray.remote(max_retries=1)\n def child(dep1, dep2):\n if failure:\n os._exit(0)\n return\n\n @ray.remote\n class Submitter:\n def __init__(self):\n pass\n\n def launch_pending_task(self, ref, signal):\n return child.remote(ref[0], signal.wait.remote())\n\n signal = SignalActor.remote()\n\n # Test that the reference held by the actor isn't evicted.\n array_oid = put_object(\n np.zeros(20 * 1024 * 1024, dtype=np.uint8), use_ray_put)\n s = Submitter.remote()\n child_return_id = ray.get(\n s.launch_pending_task.remote([array_oid], signal))\n\n # Remove the local reference.\n array_oid_bytes = array_oid.binary()\n del array_oid\n\n # Test that the reference prevents the object from being evicted.\n _fill_object_store_and_get(array_oid_bytes)\n\n ray.get(signal.send.remote())\n try:\n ray.get(child_return_id)\n assert not failure\n except ray.exceptions.WorkerCrashedError:\n assert failure\n del child_return_id\n\n _fill_object_store_and_get(array_oid_bytes, succeed=False)\n\n\n# Test that an object containing object refs within it pins the inner IDs.\ndef test_basic_nested_ids(one_worker_100MiB):\n inner_oid = ray.put(np.zeros(20 * 1024 * 1024, dtype=np.uint8))\n outer_oid = ray.put([inner_oid])\n\n # Remove the local reference to the inner object.\n inner_oid_bytes = inner_oid.binary()\n del inner_oid\n\n # Check that the outer reference pins the inner object.\n _fill_object_store_and_get(inner_oid_bytes)\n\n # Remove the outer reference and check that the inner object gets evicted.\n del outer_oid\n _fill_object_store_and_get(inner_oid_bytes, succeed=False)\n\n\ndef _all_actors_dead():\n return all(actor[\"State\"] == gcs_utils.ActorTableData.DEAD\n for actor in list(ray.state.actors().values()))\n\n\[email protected](sys.platform == \"win32\", reason=\"Failing on Windows.\")\ndef test_kill_actor_immediately_after_creation(ray_start_regular):\n @ray.remote\n class A:\n pass\n\n a = A.remote()\n b = A.remote()\n\n ray.kill(a)\n ray.kill(b)\n wait_for_condition(_all_actors_dead, timeout=10)\n\n\ndef test_remove_actor_immediately_after_creation(ray_start_regular):\n @ray.remote\n class A:\n pass\n\n a = A.remote()\n b = A.remote()\n\n del a\n del b\n wait_for_condition(_all_actors_dead, timeout=10)\n\n\nif __name__ == \"__main__\":\n import sys\n sys.exit(pytest.main([\"-v\", __file__]))\n", "import os\nimport time\nfrom pathlib import Path\nfrom unittest.mock import patch\n\nimport horovod.torch as hvd_torch\nimport pytest\nimport ray\nimport ray.util.sgd.v2 as sgd\nimport tensorflow as tf\nimport torch\nfrom ray._private.test_utils import wait_for_condition\nfrom ray.util.sgd.v2 import Trainer, TorchConfig, TensorflowConfig, \\\n HorovodConfig\nfrom ray.util.sgd.v2.backends.backend import BackendConfig, Backend, \\\n BackendExecutor\nfrom ray.util.sgd.v2.callbacks.callback import SGDCallback\nfrom ray.util.sgd.v2.examples.tensorflow_mnist_example import train_func as \\\n tensorflow_mnist_train_func\nfrom ray.util.sgd.v2.examples.train_fashion_mnist_example import train_func \\\n as \\\n fashion_mnist_train_func\nfrom ray.util.sgd.v2.examples.train_linear_example import train_func as \\\n linear_train_func\n\nfrom ray.util.sgd.v2.examples.horovod.horovod_example import train_func as \\\n horovod_torch_train_func, HorovodTrainClass\nfrom ray.util.sgd.v2.worker_group import WorkerGroup\n\n\[email protected]\ndef ray_start_2_cpus():\n address_info = ray.init(num_cpus=2)\n yield address_info\n # The code after the yield will run as teardown code.\n ray.shutdown()\n\n\[email protected]\ndef ray_start_2_cpus_2_gpus():\n address_info = ray.init(num_cpus=2, num_gpus=2)\n yield address_info\n # The code after the yield will run as teardown code.\n ray.shutdown()\n\n\[email protected]\ndef ray_start_8_cpus():\n address_info = ray.init(num_cpus=8)\n yield address_info\n # The code after the yield will run as teardown code.\n ray.shutdown()\n\n\[email protected]\ndef ray_start_4_cpus_4_gpus_4_extra():\n address_info = ray.init(num_cpus=4, num_gpus=4, resources={\"extra\": 4})\n yield address_info\n # The code after the yield will run as teardown code.\n ray.shutdown()\n\n\nclass TestConfig(BackendConfig):\n @property\n def backend_cls(self):\n return TestBackend\n\n\nclass TestBackend(Backend):\n def on_start(self, worker_group: WorkerGroup, backend_config: TestConfig):\n pass\n\n def on_shutdown(self, worker_group: WorkerGroup,\n backend_config: TestConfig):\n pass\n\n\nclass TestCallback(SGDCallback):\n def __init__(self):\n self.result_list = []\n\n def handle_result(self, results):\n self.result_list.append(results)\n\n\ndef gen_execute_single_async_special(special_f):\n def execute_single_async_special(self, i, f, *args, **kwargs):\n assert len(self.workers) == 2\n if i == 0 and hasattr(self, \"should_fail\") and self.should_fail:\n kwargs[\"train_func\"] = special_f\n return self.workers[i].actor._BaseWorkerMixin__execute.remote(\n f, *args, **kwargs)\n\n return execute_single_async_special\n\n\ndef gen_new_backend_executor(special_f):\n \"\"\"Returns a BackendExecutor that runs special_f on worker 0 once.\"\"\"\n\n class TestBackendExecutor(BackendExecutor):\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n self._has_failed = False\n\n def start_training(self, *args, **kwargs):\n special_execute = gen_execute_single_async_special(special_f)\n if not self._has_failed:\n self.worker_group.should_fail = True\n self._has_failed = True\n else:\n self.worker_group.should_fail = False\n with patch.object(WorkerGroup, \"execute_single_async\",\n special_execute):\n super().start_training(*args, **kwargs)\n\n return TestBackendExecutor\n\n\nclass KillCallback(SGDCallback):\n def __init__(self, fail_on, worker_group):\n self.counter = 0\n self.fail_on = fail_on\n self.worker_group = worker_group\n\n def handle_result(self, results):\n print(results)\n assert all(r[\"loss\"] == 1 for r in results)\n if self.counter == self.fail_on:\n ray.kill(self.worker_group.workers[0].actor)\n time.sleep(3)\n self.counter += 1\n\n\[email protected](\"num_workers\", [1, 2])\ndef test_start_shutdown(ray_start_2_cpus, num_workers):\n config = TestConfig()\n assert ray.available_resources()[\"CPU\"] == 2\n trainer = Trainer(config, num_workers=num_workers)\n trainer.start()\n time.sleep(1)\n\n remaining = 2 - num_workers\n if remaining == 0:\n assert \"CPU\" not in ray.available_resources()\n else:\n assert ray.available_resources()[\"CPU\"] == remaining\n\n trainer.shutdown()\n time.sleep(1)\n assert ray.available_resources()[\"CPU\"] == 2\n\n\ndef test_run(ray_start_2_cpus):\n config = TestConfig()\n\n def train_func():\n return 1\n\n trainer = Trainer(config, num_workers=2)\n trainer.start()\n results = trainer.run(train_func)\n trainer.shutdown()\n\n assert len(results) == 2\n assert all(result == 1 for result in results)\n\n\ndef test_run_config(ray_start_2_cpus):\n backend_config = TestConfig()\n\n def train_func(config):\n return config[\"fruit\"]\n\n config = {\"fruit\": \"banana\"}\n\n trainer = Trainer(backend_config, num_workers=2)\n trainer.start()\n results = trainer.run(train_func, config)\n trainer.shutdown()\n\n assert len(results) == 2\n assert all(result == \"banana\" for result in results)\n\n\ndef test_report(ray_start_2_cpus):\n config = TestConfig()\n\n def train_func():\n for i in range(3):\n sgd.report(index=i)\n return 1\n\n callback = TestCallback()\n trainer = Trainer(config, num_workers=2)\n trainer.start()\n results = trainer.run(train_func, callbacks=[callback])\n assert results == [1, 1]\n\n result_list = callback.result_list\n assert len(result_list) == 3\n for index in range(len(result_list)):\n intermediate_results = result_list[index]\n assert len(intermediate_results) == 2\n for worker_result in intermediate_results:\n assert worker_result[\"index\"] == index\n\n\ndef test_fast_slow(ray_start_2_cpus):\n test_config = TestConfig()\n\n def train():\n for i in range(2):\n sgd.save_checkpoint(epoch=i)\n sgd.report(index=i)\n\n def train_slow():\n for i in range(2):\n sgd.save_checkpoint(epoch=i)\n time.sleep(5)\n sgd.report(index=i)\n time.sleep(5)\n\n new_backend_executor_cls = gen_new_backend_executor(train_slow)\n callback = TestCallback()\n\n with patch.object(ray.util.sgd.v2.trainer, \"BackendExecutor\",\n new_backend_executor_cls):\n trainer = Trainer(test_config, num_workers=2)\n trainer.start()\n trainer.run(train, callbacks=[callback])\n\n assert trainer.latest_checkpoint[\"epoch\"] == 1\n\n result_list = callback.result_list\n assert len(result_list) == 2\n for index in range(len(result_list)):\n intermediate_results = result_list[index]\n assert len(intermediate_results) == 2\n for worker_result in intermediate_results:\n assert worker_result[\"index\"] == index\n\n\ndef test_mismatch_report(ray_start_2_cpus):\n test_config = TestConfig()\n\n def train():\n for _ in range(2):\n sgd.report(loss=1)\n\n def train_mismatch():\n sgd.report(loss=1)\n\n new_backend_executor_cls = gen_new_backend_executor(train_mismatch)\n\n with patch.object(ray.util.sgd.v2.trainer, \"BackendExecutor\",\n new_backend_executor_cls):\n trainer = Trainer(test_config, num_workers=2)\n trainer.start()\n with pytest.raises(RuntimeError):\n trainer.run(train)\n\n\ndef test_run_iterator(ray_start_2_cpus):\n config = TestConfig()\n\n def train_func():\n for i in range(3):\n sgd.report(index=i)\n return 1\n\n trainer = Trainer(config, num_workers=2)\n trainer.start()\n iterator = trainer.run_iterator(train_func)\n\n count = 0\n for results in iterator:\n assert (value[\"index\"] == count for value in results)\n count += 1\n\n assert count == 3\n assert iterator.is_finished()\n assert iterator.get_final_results() == [1, 1]\n\n with pytest.raises(StopIteration):\n next(iterator)\n\n\ndef test_run_iterator_returns(ray_start_2_cpus):\n config = TestConfig()\n\n def train_func():\n for i in range(3):\n sgd.report(index=i)\n return 1\n\n trainer = Trainer(config, num_workers=2)\n trainer.start()\n iterator = trainer.run_iterator(train_func)\n\n assert iterator.get_final_results() is None\n assert iterator.get_final_results(force=True) == [1, 1]\n\n with pytest.raises(StopIteration):\n next(iterator)\n\n\ndef test_run_iterator_error(ray_start_2_cpus):\n config = TestConfig()\n\n def fail_train():\n raise NotImplementedError\n\n trainer = Trainer(config, num_workers=2)\n trainer.start()\n iterator = trainer.run_iterator(fail_train)\n\n with pytest.raises(NotImplementedError):\n next(iterator)\n\n assert iterator.get_final_results() is None\n assert iterator.is_finished()\n\n\ndef test_checkpoint(ray_start_2_cpus):\n config = TestConfig()\n\n def train_func():\n assert sgd.load_checkpoint() is None\n for i in range(3):\n sgd.save_checkpoint(epoch=i)\n return 1\n\n trainer = Trainer(config, num_workers=2)\n trainer.start()\n trainer.run(train_func)\n checkpoint = trainer.latest_checkpoint\n\n assert checkpoint is not None\n assert checkpoint[\"epoch\"] == 2\n\n def train_func_checkpoint():\n checkpoint = sgd.load_checkpoint()\n assert checkpoint is not None\n assert checkpoint[\"epoch\"] == 2\n\n for i in range(checkpoint[\"epoch\"], 5):\n sgd.save_checkpoint(epoch=i)\n return 1\n\n trainer.run(train_func_checkpoint, checkpoint=checkpoint)\n checkpoint = trainer.latest_checkpoint\n\n assert checkpoint is not None\n assert checkpoint[\"epoch\"] == 4\n\n\ndef test_mismatch_checkpoint(ray_start_2_cpus):\n test_config = TestConfig()\n\n def train():\n for i in range(2):\n sgd.save_checkpoint(epoch=i)\n\n def train_mismatch():\n sgd.save_checkpoint(epoch=0)\n\n new_backend_executor_cls = gen_new_backend_executor(train_mismatch)\n\n with patch.object(ray.util.sgd.v2.trainer, \"BackendExecutor\",\n new_backend_executor_cls):\n trainer = Trainer(test_config, num_workers=2)\n trainer.start()\n with pytest.raises(RuntimeError):\n trainer.run(train)\n\n\ndef test_mismatch_checkpoint_report(ray_start_2_cpus):\n test_config = TestConfig()\n\n def train():\n for i in range(2):\n sgd.save_checkpoint(epoch=i)\n sgd.report(index=i)\n\n def train_mismatch():\n sgd.save_checkpoint(epoch=0)\n sgd.report(index=0)\n # skip checkpoint\n sgd.report(index=1)\n\n new_backend_executor_cls = gen_new_backend_executor(train_mismatch)\n callback = TestCallback()\n\n with patch.object(ray.util.sgd.v2.trainer, \"BackendExecutor\",\n new_backend_executor_cls):\n trainer = Trainer(test_config, num_workers=2)\n trainer.start()\n with pytest.raises(RuntimeError):\n trainer.run(train, callbacks=[callback])\n # validate checkpoint\n assert trainer.latest_checkpoint[\"epoch\"] == 0\n # validate callback\n result_list = callback.result_list\n assert len(result_list) == 1 # 1 epoch succeeded\n intermediate_results = result_list[0]\n assert len(intermediate_results) == 2 # both workers reported\n for worker_result in intermediate_results:\n assert worker_result[\"index\"] == 0\n\n\ndef test_load_checkpoint(ray_start_2_cpus):\n config = TestConfig()\n\n def train_func_checkpoint():\n checkpoint = sgd.load_checkpoint()\n assert checkpoint is not None\n assert checkpoint[\"epoch\"] == 3\n\n result = []\n for i in range(checkpoint[\"epoch\"], 5):\n result.append(i)\n return result\n\n trainer = Trainer(config, num_workers=2)\n trainer.start()\n result = trainer.run(train_func_checkpoint, checkpoint={\"epoch\": 3})\n\n assert result is not None\n assert len(result) == 2\n assert result[0] == [3, 4]\n assert result[1] == [3, 4]\n\n\[email protected](\"logdir\", [\n None, \"/tmp/test/trainer/test_persisted_checkpoint\",\n \"~/tmp/test/trainer/test_persisted_checkpoint\"\n])\ndef test_persisted_checkpoint(ray_start_2_cpus, logdir):\n config = TestConfig()\n\n def train():\n for i in range(2):\n sgd.save_checkpoint(epoch=i)\n\n trainer = Trainer(config, num_workers=2, logdir=logdir)\n trainer.start()\n trainer.run(train)\n\n assert trainer.latest_checkpoint_path is not None\n if logdir is not None:\n assert trainer.logdir == Path(logdir).expanduser().resolve()\n assert trainer.latest_checkpoint_dir.is_dir()\n assert trainer.latest_checkpoint_path.is_file()\n assert trainer.latest_checkpoint_path.name == f\"checkpoint_{2:06d}\"\n assert trainer.latest_checkpoint_path.parent.name == \"checkpoints\"\n latest_checkpoint = trainer.latest_checkpoint\n\n def validate():\n checkpoint = sgd.load_checkpoint()\n assert checkpoint is not None\n assert checkpoint == latest_checkpoint\n\n trainer.run(validate, checkpoint=trainer.latest_checkpoint_path)\n\n\ndef test_world_rank(ray_start_2_cpus):\n config = TestConfig()\n\n def train_func():\n return sgd.world_rank()\n\n trainer = Trainer(config, num_workers=2)\n trainer.start()\n results = trainer.run(train_func)\n\n assert set(results) == {0, 1}\n\n\ndef test_tensorflow_mnist(ray_start_2_cpus):\n num_workers = 2\n epochs = 3\n\n trainer = Trainer(\"tensorflow\", num_workers=num_workers)\n config = {\"lr\": 1e-3, \"batch_size\": 64, \"epochs\": epochs}\n trainer.start()\n results = trainer.run(tensorflow_mnist_train_func, config)\n trainer.shutdown()\n\n assert len(results) == num_workers\n result = results[0]\n\n loss = result[\"loss\"]\n assert len(loss) == epochs\n assert loss[-1] < loss[0]\n\n accuracy = result[\"accuracy\"]\n assert len(accuracy) == epochs\n assert accuracy[-1] > accuracy[0]\n\n\[email protected](\n len(tf.config.list_physical_devices(\"GPU\")) < 2,\n reason=\"Only run if multiple GPUs are available.\")\ndef test_tensorflow_mnist_gpu(ray_start_2_cpus_2_gpus):\n num_workers = 2\n epochs = 3\n\n trainer = Trainer(\"tensorflow\", num_workers=num_workers, use_gpu=True)\n config = {\"lr\": 1e-3, \"batch_size\": 64, \"epochs\": epochs}\n trainer.start()\n results = trainer.run(tensorflow_mnist_train_func, config)\n trainer.shutdown()\n\n assert len(results) == num_workers\n result = results[0]\n\n loss = result[\"loss\"]\n assert len(loss) == epochs\n assert loss[-1] < loss[0]\n\n accuracy = result[\"accuracy\"]\n assert len(accuracy) == epochs\n assert accuracy[-1] > accuracy[0]\n\n\ndef test_torch_linear(ray_start_2_cpus):\n num_workers = 2\n epochs = 3\n\n trainer = Trainer(\"torch\", num_workers=num_workers)\n config = {\"lr\": 1e-2, \"hidden_size\": 1, \"batch_size\": 4, \"epochs\": epochs}\n trainer.start()\n results = trainer.run(linear_train_func, config)\n trainer.shutdown()\n\n assert len(results) == num_workers\n\n for result in results:\n assert len(result) == epochs\n assert result[-1][\"loss\"] < result[0][\"loss\"]\n\n\ndef test_torch_fashion_mnist(ray_start_2_cpus):\n num_workers = 2\n epochs = 3\n\n trainer = Trainer(\"torch\", num_workers=num_workers)\n config = {\"lr\": 1e-3, \"batch_size\": 64, \"epochs\": epochs}\n trainer.start()\n results = trainer.run(fashion_mnist_train_func, config)\n trainer.shutdown()\n\n assert len(results) == num_workers\n\n for result in results:\n assert len(result) == epochs\n assert result[-1] < result[0]\n\n\[email protected](\n torch.cuda.device_count() < 2,\n reason=\"Only run if multiple GPUs are available.\")\ndef test_torch_fashion_mnist_gpu(ray_start_2_cpus_2_gpus):\n num_workers = 2\n epochs = 3\n\n trainer = Trainer(\"torch\", num_workers=num_workers, use_gpu=True)\n config = {\"lr\": 1e-3, \"batch_size\": 64, \"epochs\": epochs}\n trainer.start()\n results = trainer.run(fashion_mnist_train_func, config)\n trainer.shutdown()\n\n assert len(results) == num_workers\n\n for result in results:\n assert len(result) == epochs\n assert result[-1] < result[0]\n\n\ndef test_horovod_simple(ray_start_2_cpus):\n def simple_fn():\n hvd_torch.init()\n return hvd_torch.rank()\n\n num_workers = 2\n trainer = Trainer(\"horovod\", num_workers)\n trainer.start()\n result = trainer.run(simple_fn)\n trainer.shutdown()\n\n assert result == list(range(num_workers))\n\n\ndef test_horovod_torch_mnist(ray_start_2_cpus):\n num_workers = 2\n num_epochs = 2\n trainer = Trainer(\"horovod\", num_workers)\n trainer.start()\n results = trainer.run(\n horovod_torch_train_func,\n config={\n \"num_epochs\": num_epochs,\n \"lr\": 1e-3\n })\n trainer.shutdown()\n\n assert len(results) == num_workers\n for worker_result in results:\n assert len(worker_result) == num_epochs\n assert worker_result[num_epochs - 1] < worker_result[0]\n\n\[email protected](\n torch.cuda.device_count() < 2,\n reason=\"Only run if multiple GPUs are available.\")\ndef test_horovod_torch_mnist_gpu(ray_start_2_cpus_2_gpus):\n num_workers = 2\n num_epochs = 2\n trainer = Trainer(\"horovod\", num_workers, use_gpu=True)\n trainer.start()\n results = trainer.run(\n horovod_torch_train_func,\n config={\n \"num_epochs\": num_epochs,\n \"lr\": 1e-3\n })\n trainer.shutdown()\n\n assert len(results) == num_workers\n for worker_result in results:\n assert len(worker_result) == num_epochs\n assert worker_result[num_epochs - 1] < worker_result[0]\n\n\ndef test_horovod_torch_mnist_stateful(ray_start_2_cpus):\n num_workers = 2\n num_epochs = 2\n trainer = Trainer(\"horovod\", num_workers)\n workers = trainer.to_worker_group(\n HorovodTrainClass, config={\n \"num_epochs\": num_epochs,\n \"lr\": 1e-3\n })\n results = []\n for epoch in range(num_epochs):\n results.append(ray.get([w.train.remote(epoch=epoch) for w in workers]))\n trainer.shutdown()\n\n assert len(results) == num_epochs\n for i in range(num_workers):\n assert results[num_epochs - 1][i] < results[0][i]\n\n\ndef test_init_failure(ray_start_2_cpus):\n with pytest.raises(TypeError):\n Trainer(5)\n\n with pytest.raises(ValueError):\n Trainer(\"invalid\")\n\n\ndef test_start_failure(ray_start_2_cpus):\n with pytest.raises(ValueError):\n trainer = Trainer(\"torch\", num_workers=0)\n trainer.start()\n\n\ndef test_run_failure(ray_start_2_cpus):\n test_config = TestConfig()\n\n def train_invalid_signature(a, b):\n pass\n\n trainer = Trainer(test_config, num_workers=2)\n\n # Raise RuntimeError when trainer has not been started yet.\n with pytest.raises(RuntimeError):\n trainer.run(lambda: 1)\n\n trainer.start()\n\n with pytest.raises(ValueError):\n trainer.run(train_invalid_signature)\n\n trainer.shutdown()\n\n\ndef test_user_error(ray_start_2_cpus):\n \"\"\"Tests if user training function raises an error\"\"\"\n\n config = TestConfig()\n\n def fail_train_1():\n raise NotImplementedError\n\n trainer = Trainer(config, num_workers=2)\n trainer.start()\n\n with pytest.raises(NotImplementedError):\n trainer.run(fail_train_1)\n\n def fail_train_2():\n for _ in range(2):\n sgd.report(loss=1)\n raise NotImplementedError\n\n with pytest.raises(NotImplementedError):\n trainer.run(fail_train_2)\n\n\ndef test_worker_failure_1(ray_start_2_cpus):\n test_config = TestConfig()\n\n def train():\n return 1\n\n def train_actor_failure():\n import sys\n sys.exit(0)\n\n new_backend_executor_cls = gen_new_backend_executor(train_actor_failure)\n\n with patch.object(ray.util.sgd.v2.trainer, \"BackendExecutor\",\n new_backend_executor_cls):\n trainer = Trainer(test_config, num_workers=2)\n trainer.start()\n results = trainer.run(train)\n assert results == [1, 1]\n\n\ndef test_worker_failure_2(ray_start_2_cpus):\n test_config = TestConfig()\n\n def train():\n for _ in range(2):\n sgd.report(loss=1)\n return 1\n\n def train_actor_failure():\n for _ in range(2):\n sgd.report(loss=1)\n import sys\n sys.exit(0)\n\n new_backend_executor_cls = gen_new_backend_executor(train_actor_failure)\n\n with patch.object(ray.util.sgd.v2.trainer, \"BackendExecutor\",\n new_backend_executor_cls):\n trainer = Trainer(test_config, num_workers=2)\n trainer.start()\n results = trainer.run(train)\n assert results == [1, 1]\n\n\ndef test_worker_failure_local_rank(ray_start_2_cpus):\n test_config = TestConfig()\n\n def train():\n return sgd.local_rank()\n\n def train_actor_failure():\n import sys\n sys.exit(0)\n return sgd.local_rank()\n\n new_backend_executor_cls = gen_new_backend_executor(train_actor_failure)\n\n with patch.object(ray.util.sgd.v2.trainer, \"BackendExecutor\",\n new_backend_executor_cls):\n trainer = Trainer(test_config, num_workers=2)\n trainer.start()\n results = trainer.run(train)\n assert set(results) == {0, 1}\n\n\ndef test_worker_start_failure(ray_start_2_cpus):\n test_config = TestConfig()\n\n trainer = Trainer(test_config, num_workers=2)\n\n restart = trainer._executor._restart\n\n def init_hook():\n pass\n\n def init_hook_fail():\n ray.actor.exit_actor()\n\n def restart_patched(self):\n self._initialization_hook = init_hook\n restart()\n\n with patch.object(BackendExecutor, \"_restart\", restart_patched):\n trainer.start(initialization_hook=init_hook_fail)\n assert len(trainer._executor.worker_group) == 2\n\n\ndef test_max_failures(ray_start_2_cpus):\n test_config = TestConfig()\n\n def train():\n import sys\n sys.exit(0)\n\n trainer = Trainer(test_config, num_workers=2)\n trainer.start()\n iterator = trainer.run_iterator(train)\n with pytest.raises(RuntimeError):\n iterator.get_final_results(force=True)\n assert iterator._executor._num_failures == 3\n\n\ndef test_start_max_failures(ray_start_2_cpus):\n test_config = TestConfig()\n\n trainer = Trainer(test_config, num_workers=2)\n\n def init_hook_fail():\n import sys\n sys.exit(0)\n\n with pytest.raises(RuntimeError):\n trainer.start(initialization_hook=init_hook_fail)\n\n\[email protected](\"backend\", [\"test\", \"torch\", \"tf\", \"horovod\"])\ndef test_worker_kill(ray_start_2_cpus, backend):\n if backend == \"test\":\n test_config = TestConfig()\n elif backend == \"torch\":\n test_config = TorchConfig()\n elif backend == \"tf\":\n test_config = TensorflowConfig()\n elif backend == \"horovod\":\n test_config = HorovodConfig()\n\n trainer = Trainer(test_config, num_workers=2)\n\n def train_func():\n for i in range(2):\n sgd.report(loss=1, iter=i)\n\n trainer.start()\n kill_callback = KillCallback(\n fail_on=0, worker_group=trainer._executor.worker_group)\n trainer.run(train_func, callbacks=[kill_callback])\n # Run 1: iter=0, counter=1, Successful\n # Run 2: iter=1, counter=1, Unsuccessful, starts training from beginning\n # Run 3: iter=0, counter=2, Successful\n # Run 4: iter=1, counter=3, Successful\n assert kill_callback.counter == 3\n\n trainer.shutdown()\n trainer.start()\n\n kill_callback = KillCallback(\n fail_on=1, worker_group=trainer._executor.worker_group)\n trainer.run(train_func, callbacks=[kill_callback])\n # Run 1: iter=0, counter=1, Successful\n # Run 2: iter=1, counter=2, Successful\n # Run 3: None, counter=2, Unsuccessful, starts training from beginning.\n # Run 4: iter=0, counter=3, Successful\n # Run 5: iter=1, counter=4, Successful\n assert kill_callback.counter == 4\n\n def train():\n return 1\n\n # Make sure Trainer is usable even after failure handling.\n trainer.run(train)\n\n\ndef test_worker_kill_checkpoint(ray_start_2_cpus):\n test_config = TestConfig()\n\n def train():\n checkpoint = sgd.load_checkpoint()\n if checkpoint:\n epoch = checkpoint[\"epoch\"]\n else:\n epoch = 0\n print(\"Epoch: \", epoch)\n for i in range(epoch, 2):\n sgd.report(loss=1, iter=i)\n sgd.save_checkpoint(epoch=i + 1)\n\n trainer = Trainer(test_config, num_workers=2)\n trainer.start()\n kill_callback = KillCallback(\n fail_on=0, worker_group=trainer._executor.worker_group)\n\n trainer.run(train, callbacks=[kill_callback])\n\n # Run 1: epoch=0, counter=1, Successful\n # *Checkpoint is saved.*\n # *Worker is killed*\n # *Getting checkpoint fails. Workers are restarted from beginning*\n # Run 2: epoch=0, counter=2, Successful\n # Run 3: epoch=1, counter=3, Successful\n assert kill_callback.counter == 3\n assert trainer.latest_checkpoint[\"epoch\"] == 2\n\n trainer.shutdown()\n trainer.start()\n\n kill_callback = KillCallback(\n fail_on=1, worker_group=trainer._executor.worker_group)\n trainer.run(train, callbacks=[kill_callback])\n # Run 1: epoch=0, counter=1, Successful\n # *Checkpoint saved*\n # *Latest checkpoint updated, epoch=1\n # Run 2: epoch=1, counter=2, Successful\n # *Checkpoint saved*\n # *Worker is killed*\n # *Getting checkpoint fails. Workers are restarted from last checkpoint.*\n # Run 3: epoch=1, counter=3, Successful.\n assert kill_callback.counter == 3\n assert trainer.latest_checkpoint[\"epoch\"] == 2\n\n def train():\n return 1\n\n # Make sure Trainer is usable even after failure handling.\n trainer.run(train)\n\n\ndef test_multiple_run(ray_start_2_cpus):\n config = TestConfig()\n\n def train_1():\n return 1\n\n trainer = Trainer(config, num_workers=2)\n trainer.start()\n\n output_1 = trainer.run(train_1)\n assert output_1 == [1, 1]\n\n def train_2():\n return 2\n\n output_2 = trainer.run(train_2)\n assert output_2 == [2, 2]\n\n\ndef test_run_after_user_error(ray_start_2_cpus):\n config = TestConfig()\n\n def fail_train():\n raise NotImplementedError\n\n trainer = Trainer(config, num_workers=2)\n trainer.start()\n with pytest.raises(NotImplementedError):\n trainer.run(fail_train)\n\n def train():\n return 1\n\n output = trainer.run(train)\n assert output == [1, 1]\n\n\[email protected](\"resource\", [\"CPU\", \"GPU\", \"extra\"])\[email protected](\"num_requested\", [0.5, 1, 2])\ndef test_resources(ray_start_4_cpus_4_gpus_4_extra, resource, num_requested):\n num_workers = 2\n config = TestConfig()\n original = ray.available_resources().get(resource)\n resources_per_worker = {resource: num_requested}\n use_gpu = resource == \"GPU\"\n trainer = Trainer(\n config,\n num_workers=num_workers,\n use_gpu=use_gpu,\n resources_per_worker=resources_per_worker)\n\n trainer.start()\n expected = original - num_workers * num_requested\n wait_for_condition(\n lambda: ray.available_resources().get(resource, 0) == expected)\n\n trainer.shutdown()\n wait_for_condition(\n lambda: ray.available_resources().get(resource, 0) == original)\n\n\ndef test_gpu_requests(ray_start_4_cpus_4_gpus_4_extra):\n\n # GPUs should not be requested if `use_gpu` is False.\n with pytest.raises(ValueError):\n Trainer(\n TestConfig(),\n num_workers=2,\n use_gpu=False,\n resources_per_worker={\"GPU\": 1})\n\n # GPUs should not be set to 0 if `use_gpu` is True.\n with pytest.raises(ValueError):\n Trainer(\n TestConfig(),\n num_workers=2,\n use_gpu=True,\n resources_per_worker={\"GPU\": 0})\n\n def get_resources():\n return os.environ[\"CUDA_VISIBLE_DEVICES\"]\n\n # 0 GPUs will be requested and should not raise an error.\n trainer = Trainer(TestConfig(), num_workers=2, use_gpu=False)\n trainer.start()\n result = trainer.run(get_resources)\n assert result == [\"\", \"\"]\n trainer.shutdown()\n\n # 1 GPU will be requested and should not raise an error.\n trainer = Trainer(TestConfig(), num_workers=2, use_gpu=True)\n trainer.start()\n result = trainer.run(get_resources)\n assert result == [\"0,1\", \"0,1\"]\n trainer.shutdown()\n\n # Partial GPUs should not raise an error.\n trainer = Trainer(\n TestConfig(),\n num_workers=2,\n use_gpu=True,\n resources_per_worker={\"GPU\": 0.1})\n trainer.start()\n result = trainer.run(get_resources)\n assert result == [\"0\", \"0\"]\n trainer.shutdown()\n\n # Multiple GPUs should not raise an error.\n trainer = Trainer(\n TestConfig(),\n num_workers=2,\n use_gpu=True,\n resources_per_worker={\"GPU\": 2})\n trainer.start()\n result = trainer.run(get_resources)\n assert result == [\"0,1,2,3\", \"0,1,2,3\"]\n trainer.shutdown()\n\n\ndef test_to_worker_group(ray_start_2_cpus):\n config = TestConfig()\n trainer = Trainer(config, num_workers=2)\n\n class Incrementer:\n def __init__(self, starting=0):\n self.count = starting\n\n def increment(self):\n self.count += 1\n\n def get_count(self):\n return self.count\n\n workers = trainer.to_worker_group(Incrementer, starting=2)\n assert ray.get([w.get_count.remote() for w in workers]) == [2, 2]\n\n ray.get([w.increment.remote() for w in workers])\n assert ray.get([w.get_count.remote() for w in workers]) == [3, 3]\n\n ray.get(workers[0].increment.remote())\n assert ray.get([w.get_count.remote() for w in workers]) == [4, 3]\n\n ray.get(workers[1].increment.remote())\n assert ray.get([w.get_count.remote() for w in workers]) == [4, 4]\n\n\nif __name__ == \"__main__\":\n import pytest\n import sys\n\n sys.exit(pytest.main([\"-v\", \"-x\", __file__]))\n", "# coding: utf-8\nimport signal\nfrom collections import Counter\nimport os\nimport shutil\nimport tempfile\nimport time\nfrom typing import List\nimport unittest\n\nimport skopt\nimport numpy as np\nfrom hyperopt import hp\nfrom nevergrad.optimization import optimizerlib\nfrom zoopt import ValueType\nfrom hebo.design_space.design_space import DesignSpace as HEBODesignSpace\n\nimport ray\nfrom ray import tune\nfrom ray._private.test_utils import recursive_fnmatch\nfrom ray.rllib import _register_all\nfrom ray.tune.callback import Callback\nfrom ray.tune.suggest.basic_variant import BasicVariantGenerator\nfrom ray.tune.suggest import ConcurrencyLimiter, Searcher\nfrom ray.tune.suggest.hyperopt import HyperOptSearch\nfrom ray.tune.suggest.dragonfly import DragonflySearch\nfrom ray.tune.suggest.bayesopt import BayesOptSearch\nfrom ray.tune.suggest.flaml import CFO, BlendSearch\nfrom ray.tune.suggest.skopt import SkOptSearch\nfrom ray.tune.suggest.nevergrad import NevergradSearch\nfrom ray.tune.suggest.optuna import OptunaSearch\nfrom ray.tune.suggest.sigopt import SigOptSearch\nfrom ray.tune.suggest.zoopt import ZOOptSearch\nfrom ray.tune.suggest.hebo import HEBOSearch\nfrom ray.tune.suggest.ax import AxSearch\nfrom ray.tune.suggest.bohb import TuneBOHB\nfrom ray.tune.schedulers.hb_bohb import HyperBandForBOHB\nfrom ray.tune.trial import Trial\nfrom ray.tune.utils import validate_save_restore\nfrom ray.tune.utils._mock_trainable import MyTrainableClass\n\n\nclass TuneRestoreTest(unittest.TestCase):\n def setUp(self):\n ray.init(num_cpus=1, num_gpus=0, local_mode=True)\n tmpdir = tempfile.mkdtemp()\n test_name = \"TuneRestoreTest\"\n tune.run(\n \"PG\",\n name=test_name,\n stop={\"training_iteration\": 1},\n checkpoint_freq=1,\n local_dir=tmpdir,\n config={\n \"env\": \"CartPole-v0\",\n \"framework\": \"tf\",\n },\n )\n\n logdir = os.path.expanduser(os.path.join(tmpdir, test_name))\n self.logdir = logdir\n self.checkpoint_path = recursive_fnmatch(logdir, \"checkpoint-1\")[0]\n\n def tearDown(self):\n shutil.rmtree(self.logdir)\n ray.shutdown()\n _register_all()\n\n def testTuneRestore(self):\n self.assertTrue(os.path.isfile(self.checkpoint_path))\n tune.run(\n \"PG\",\n name=\"TuneRestoreTest\",\n stop={\"training_iteration\": 2}, # train one more iteration.\n checkpoint_freq=1,\n restore=self.checkpoint_path, # Restore the checkpoint\n config={\n \"env\": \"CartPole-v0\",\n \"framework\": \"tf\",\n },\n )\n\n def testPostRestoreCheckpointExistence(self):\n \"\"\"Tests that checkpoint restored from is not deleted post-restore.\"\"\"\n self.assertTrue(os.path.isfile(self.checkpoint_path))\n tune.run(\n \"PG\",\n name=\"TuneRestoreTest\",\n stop={\"training_iteration\": 2},\n checkpoint_freq=1,\n keep_checkpoints_num=1,\n restore=self.checkpoint_path,\n config={\n \"env\": \"CartPole-v0\",\n \"framework\": \"tf\",\n },\n )\n self.assertTrue(os.path.isfile(self.checkpoint_path))\n\n\nclass TuneInterruptionTest(unittest.TestCase):\n def setUp(self) -> None:\n # Wait up to five seconds for placement groups when starting a trial\n os.environ[\"TUNE_PLACEMENT_GROUP_WAIT_S\"] = \"5\"\n # Block for results even when placement groups are pending\n os.environ[\"TUNE_TRIAL_STARTUP_GRACE_PERIOD\"] = \"0\"\n os.environ[\"TUNE_TRIAL_RESULT_WAIT_TIME_S\"] = \"99999\"\n\n def testExperimentInterrupted(self):\n import multiprocessing\n\n trainer_semaphore = multiprocessing.Semaphore()\n driver_semaphore = multiprocessing.Semaphore()\n\n class SteppingCallback(Callback):\n def on_step_end(self, iteration, trials, **info):\n driver_semaphore.release() # Driver should continue\n trainer_semaphore.acquire() # Wait until released\n\n def _run(local_dir):\n def _train(config):\n for i in range(7):\n tune.report(val=i)\n\n tune.run(\n _train,\n local_dir=local_dir,\n name=\"interrupt\",\n callbacks=[SteppingCallback()])\n\n local_dir = tempfile.mkdtemp()\n process = multiprocessing.Process(target=_run, args=(local_dir, ))\n process.daemon = False\n process.start()\n\n exp_dir = os.path.join(local_dir, \"interrupt\")\n\n # Skip first five steps\n for i in range(5):\n driver_semaphore.acquire() # Wait for callback\n trainer_semaphore.release() # Continue training\n\n driver_semaphore.acquire()\n\n experiment_state_file = None\n for file in os.listdir(exp_dir):\n if file.startswith(\"experiment_state\"):\n experiment_state_file = os.path.join(exp_dir, file)\n break\n\n self.assertTrue(experiment_state_file)\n last_mtime = os.path.getmtime(experiment_state_file)\n\n # Now send kill signal\n os.kill(process.pid, signal.SIGINT)\n # Release trainer. It should handle the signal and try to\n # checkpoint the experiment\n trainer_semaphore.release()\n\n time.sleep(2) # Wait for checkpoint\n new_mtime = os.path.getmtime(experiment_state_file)\n\n self.assertNotEqual(last_mtime, new_mtime)\n\n shutil.rmtree(local_dir)\n\n\nclass TuneFailResumeGridTest(unittest.TestCase):\n class FailureInjectorCallback(Callback):\n \"\"\"Adds random failure injection to the TrialExecutor.\"\"\"\n\n def __init__(self, steps=20):\n self._step = 0\n self.steps = steps\n\n def on_trial_start(self, trials, **info):\n self._step += 1\n if self._step >= self.steps:\n print(f\"Failing after step {self._step} with \"\n f\"{len(trials)} trials\")\n raise RuntimeError\n\n class CheckStateCallback(Callback):\n \"\"\"Checks state for the experiment initialization.\"\"\"\n\n def __init__(self, expected_trials=20):\n self.expected_trials = expected_trials\n self._checked = False\n\n def on_step_begin(self, iteration, trials, **kwargs):\n if not self._checked:\n assert len(trials) == self.expected_trials\n self._checked = True\n\n class CheckTrialResourcesCallback(Callback):\n \"\"\"Checks if pending trials are requesting the right amount of\n resources.\n\n The check happens exactly once after `check_after` number of calls\n to on_step_begin(). Note, we deliberately delay the check to after\n `check_after` number of steps. This is because when we start a\n tuning job from fresh (rather than restored), trial list is still\n empty - any check now would be trivial and thus wasted.\n \"\"\"\n\n def __init__(self, expected_cpu: int, check_after: int = 1):\n self._expected_cpu = expected_cpu\n self._checked = False\n self._check_after = check_after\n\n def on_step_begin(self, iteration: int, trials: List[\"Trial\"], **info):\n if not self._checked and iteration >= self._check_after:\n for trial in trials:\n if trial.status == Trial.PENDING:\n assert trial.resources.cpu == self._expected_cpu\n self._checked = True\n\n def setUp(self):\n self.logdir = tempfile.mkdtemp()\n os.environ[\"TUNE_GLOBAL_CHECKPOINT_S\"] = \"0\"\n # Wait up to 1.5 seconds for placement groups when starting a trial\n os.environ[\"TUNE_PLACEMENT_GROUP_WAIT_S\"] = \"1.5\"\n # Block for results even when placement groups are pending\n os.environ[\"TUNE_TRIAL_STARTUP_GRACE_PERIOD\"] = \"0\"\n os.environ[\"TUNE_TRIAL_RESULT_WAIT_TIME_S\"] = \"99999\"\n\n # Change back to local_mode=True after this is resolved:\n # https://github.com/ray-project/ray/issues/13932\n ray.init(local_mode=False, num_cpus=2)\n\n from ray.tune import register_trainable\n register_trainable(\"trainable\", MyTrainableClass)\n\n def tearDown(self):\n os.environ.pop(\"TUNE_GLOBAL_CHECKPOINT_S\")\n shutil.rmtree(self.logdir)\n ray.shutdown()\n\n def testFailResumeGridSearch(self):\n os.environ[\"TUNE_MAX_PENDING_TRIALS_PG\"] = \"1\"\n\n config = dict(\n num_samples=3,\n fail_fast=True,\n config={\n \"test\": tune.grid_search([1, 2, 3]),\n \"test2\": tune.grid_search([1, 2, 3]),\n },\n stop={\"training_iteration\": 2},\n local_dir=self.logdir,\n verbose=1)\n\n with self.assertRaises(RuntimeError):\n tune.run(\n \"trainable\",\n callbacks=[self.FailureInjectorCallback()],\n **config)\n\n analysis = tune.run(\n \"trainable\",\n resume=True,\n callbacks=[self.CheckStateCallback()],\n **config)\n assert len(analysis.trials) == 27\n test_counter = Counter([t.config[\"test\"] for t in analysis.trials])\n assert all(v == 9 for v in test_counter.values())\n test2_counter = Counter([t.config[\"test2\"] for t in analysis.trials])\n assert all(v == 9 for v in test2_counter.values())\n\n # Unfinished trials' resources should be updated.\n def testResourceUpdateInResume(self):\n os.environ[\"TUNE_MAX_PENDING_TRIALS_PG\"] = \"1\"\n\n config = dict(\n num_samples=3,\n fail_fast=True,\n config={\n \"test\": tune.grid_search([1, 2, 3]),\n \"test2\": tune.grid_search([1, 2, 3]),\n },\n stop={\"training_iteration\": 2},\n local_dir=self.logdir,\n verbose=1)\n\n with self.assertRaises(RuntimeError):\n tune.run(\n \"trainable\",\n callbacks=[\n self.FailureInjectorCallback(),\n self.CheckTrialResourcesCallback(1)\n ],\n **config)\n\n analysis = tune.run(\n \"trainable\",\n resume=True,\n resources_per_trial={\"cpu\": 2},\n callbacks=[self.CheckTrialResourcesCallback(2)],\n **config)\n assert len(analysis.trials) == 27\n\n def testFailResumeWithPreset(self):\n os.environ[\"TUNE_MAX_PENDING_TRIALS_PG\"] = \"1\"\n\n search_alg = BasicVariantGenerator(points_to_evaluate=[{\n \"test\": -1,\n \"test2\": -1\n }, {\n \"test\": -1\n }, {\n \"test2\": -1\n }])\n\n config = dict(\n num_samples=3 + 3, # 3 preset, 3 samples\n fail_fast=True,\n config={\n \"test\": tune.grid_search([1, 2, 3]),\n \"test2\": tune.grid_search([1, 2, 3]),\n },\n stop={\"training_iteration\": 2},\n local_dir=self.logdir,\n verbose=1)\n with self.assertRaises(RuntimeError):\n tune.run(\n \"trainable\",\n callbacks=[self.FailureInjectorCallback(5)],\n search_alg=search_alg,\n **config)\n\n analysis = tune.run(\n \"trainable\",\n resume=True,\n callbacks=[self.CheckStateCallback(expected_trials=5)],\n search_alg=search_alg,\n **config)\n assert len(analysis.trials) == 34\n test_counter = Counter([t.config[\"test\"] for t in analysis.trials])\n assert test_counter.pop(-1) == 4\n assert all(v == 10 for v in test_counter.values())\n test2_counter = Counter([t.config[\"test2\"] for t in analysis.trials])\n assert test2_counter.pop(-1) == 4\n assert all(v == 10 for v in test2_counter.values())\n\n def testFailResumeAfterPreset(self):\n os.environ[\"TUNE_MAX_PENDING_TRIALS_PG\"] = \"1\"\n\n search_alg = BasicVariantGenerator(points_to_evaluate=[{\n \"test\": -1,\n \"test2\": -1\n }, {\n \"test\": -1\n }, {\n \"test2\": -1\n }])\n\n config = dict(\n num_samples=3 + 3, # 3 preset, 3 samples\n fail_fast=True,\n config={\n \"test\": tune.grid_search([1, 2, 3]),\n \"test2\": tune.grid_search([1, 2, 3]),\n },\n stop={\"training_iteration\": 2},\n local_dir=self.logdir,\n verbose=1)\n\n with self.assertRaises(RuntimeError):\n tune.run(\n \"trainable\",\n callbacks=[self.FailureInjectorCallback(15)],\n search_alg=search_alg,\n **config)\n\n analysis = tune.run(\n \"trainable\",\n resume=True,\n callbacks=[self.CheckStateCallback(expected_trials=15)],\n search_alg=search_alg,\n **config)\n assert len(analysis.trials) == 34\n test_counter = Counter([t.config[\"test\"] for t in analysis.trials])\n assert test_counter.pop(-1) == 4\n assert all(v == 10 for v in test_counter.values())\n test2_counter = Counter([t.config[\"test2\"] for t in analysis.trials])\n assert test2_counter.pop(-1) == 4\n assert all(v == 10 for v in test2_counter.values())\n\n def testMultiExperimentFail(self):\n os.environ[\"TUNE_MAX_PENDING_TRIALS_PG\"] = \"1\"\n\n experiments = []\n for i in range(3):\n experiments.append(\n tune.Experiment(\n run=MyTrainableClass,\n name=\"trainable\",\n num_samples=2,\n config={\n \"test\": tune.grid_search([1, 2, 3]),\n },\n stop={\"training_iteration\": 1},\n local_dir=self.logdir))\n\n with self.assertRaises(RuntimeError):\n tune.run(\n experiments,\n callbacks=[self.FailureInjectorCallback(10)],\n fail_fast=True)\n\n analysis = tune.run(\n experiments,\n resume=True,\n callbacks=[self.CheckStateCallback(expected_trials=10)],\n fail_fast=True)\n assert len(analysis.trials) == 18\n\n def testWarningLargeGrid(self):\n config = dict(\n num_samples=3,\n fail_fast=True,\n config={\n \"test\": tune.grid_search(list(range(20))),\n \"test2\": tune.grid_search(list(range(20))),\n \"test3\": tune.grid_search(list(range(20))),\n \"test4\": tune.grid_search(list(range(20))),\n \"test5\": tune.grid_search(list(range(20))),\n },\n stop={\"training_iteration\": 2},\n local_dir=self.logdir,\n verbose=1)\n with self.assertWarnsRegex(UserWarning,\n \"exceeds the serialization threshold\"):\n with self.assertRaises(RuntimeError):\n tune.run(\n \"trainable\",\n callbacks=[self.FailureInjectorCallback(10)],\n **config)\n\n\nclass TuneExampleTest(unittest.TestCase):\n def setUp(self):\n ray.init(num_cpus=2)\n\n def tearDown(self):\n ray.shutdown()\n _register_all()\n\n def testPBTKeras(self):\n from ray.tune.examples.pbt_tune_cifar10_with_keras import Cifar10Model\n from tensorflow.python.keras.datasets import cifar10\n cifar10.load_data()\n validate_save_restore(Cifar10Model)\n validate_save_restore(Cifar10Model, use_object_store=True)\n\n def testPyTorchMNIST(self):\n from ray.tune.examples.mnist_pytorch_trainable import TrainMNIST\n from torchvision import datasets\n datasets.MNIST(\"~/data\", train=True, download=True)\n validate_save_restore(TrainMNIST)\n validate_save_restore(TrainMNIST, use_object_store=True)\n\n def testHyperbandExample(self):\n from ray.tune.examples.hyperband_example import MyTrainableClass\n validate_save_restore(MyTrainableClass)\n validate_save_restore(MyTrainableClass, use_object_store=True)\n\n def testAsyncHyperbandExample(self):\n from ray.tune.utils.mock import MyTrainableClass\n validate_save_restore(MyTrainableClass)\n validate_save_restore(MyTrainableClass, use_object_store=True)\n\n\nclass AutoInitTest(unittest.TestCase):\n def testTuneRestore(self):\n self.assertFalse(ray.is_initialized())\n tune.run(\"__fake\", name=\"TestAutoInit\", stop={\"training_iteration\": 1})\n self.assertTrue(ray.is_initialized())\n\n def tearDown(self):\n ray.shutdown()\n _register_all()\n\n\nclass AbstractWarmStartTest:\n def setUp(self):\n ray.init(num_cpus=1, local_mode=True)\n self.tmpdir = tempfile.mkdtemp()\n self.experiment_name = \"results\"\n\n def tearDown(self):\n shutil.rmtree(self.tmpdir)\n ray.shutdown()\n _register_all()\n\n def set_basic_conf(self):\n raise NotImplementedError()\n\n def get_scheduler(self):\n return None\n\n def treat_trial_config(self, trial_config):\n return trial_config\n\n def run_part_from_scratch(self):\n np.random.seed(162)\n search_alg, cost = self.set_basic_conf()\n search_alg = ConcurrencyLimiter(search_alg, 1)\n results_exp_1 = tune.run(\n cost,\n num_samples=5,\n search_alg=search_alg,\n scheduler=self.get_scheduler(),\n verbose=0,\n name=self.experiment_name,\n local_dir=self.tmpdir)\n checkpoint_path = os.path.join(self.tmpdir, \"warmStartTest.pkl\")\n search_alg.save(checkpoint_path)\n return results_exp_1, np.random.get_state(), checkpoint_path\n\n def run_from_experiment_restore(self, random_state):\n search_alg, cost = self.set_basic_conf()\n search_alg = ConcurrencyLimiter(search_alg, 1)\n search_alg.restore_from_dir(\n os.path.join(self.tmpdir, self.experiment_name))\n results = tune.run(\n cost,\n num_samples=5,\n search_alg=search_alg,\n scheduler=self.get_scheduler(),\n verbose=0,\n name=self.experiment_name,\n local_dir=self.tmpdir)\n return results\n\n def run_explicit_restore(self, random_state, checkpoint_path):\n np.random.set_state(random_state)\n search_alg2, cost = self.set_basic_conf()\n search_alg2 = ConcurrencyLimiter(search_alg2, 1)\n search_alg2.restore(checkpoint_path)\n return tune.run(\n cost,\n num_samples=5,\n search_alg=search_alg2,\n scheduler=self.get_scheduler(),\n verbose=0)\n\n def run_full(self):\n np.random.seed(162)\n search_alg3, cost = self.set_basic_conf()\n search_alg3 = ConcurrencyLimiter(search_alg3, 1)\n return tune.run(\n cost,\n num_samples=10,\n search_alg=search_alg3,\n scheduler=self.get_scheduler(),\n verbose=0)\n\n def testWarmStart(self):\n results_exp_1, r_state, checkpoint_path = self.run_part_from_scratch()\n results_exp_2 = self.run_explicit_restore(r_state, checkpoint_path)\n results_exp_3 = self.run_full()\n trials_1_config = self.treat_trial_config(\n [trial.config for trial in results_exp_1.trials])\n trials_2_config = self.treat_trial_config(\n [trial.config for trial in results_exp_2.trials])\n trials_3_config = self.treat_trial_config(\n [trial.config for trial in results_exp_3.trials])\n self.assertEqual(trials_1_config + trials_2_config, trials_3_config)\n\n def testRestore(self):\n results_exp_1, r_state, checkpoint_path = self.run_part_from_scratch()\n results_exp_2 = self.run_from_experiment_restore(r_state)\n results_exp_3 = self.run_full()\n\n trials_1_config = self.treat_trial_config(\n [trial.config for trial in results_exp_1.trials])\n trials_2_config = self.treat_trial_config(\n [trial.config for trial in results_exp_2.trials])\n trials_3_config = self.treat_trial_config(\n [trial.config for trial in results_exp_3.trials])\n self.assertEqual(trials_1_config + trials_2_config, trials_3_config)\n\n\nclass HyperoptWarmStartTest(AbstractWarmStartTest, unittest.TestCase):\n def set_basic_conf(self):\n space = {\n \"x\": hp.uniform(\"x\", 0, 10),\n \"y\": hp.uniform(\"y\", -10, 10),\n \"z\": hp.uniform(\"z\", -10, 0)\n }\n\n def cost(space, reporter):\n loss = space[\"x\"]**2 + space[\"y\"]**2 + space[\"z\"]**2\n reporter(loss=loss)\n\n search_alg = HyperOptSearch(\n space,\n metric=\"loss\",\n mode=\"min\",\n random_state_seed=5,\n n_initial_points=1,\n )\n search_alg = ConcurrencyLimiter(search_alg, max_concurrent=1000)\n return search_alg, cost\n\n\nclass BayesoptWarmStartTest(AbstractWarmStartTest, unittest.TestCase):\n def set_basic_conf(self, analysis=None):\n space = {\"width\": (0, 20), \"height\": (-100, 100)}\n\n def cost(space, reporter):\n reporter(loss=(space[\"height\"] - 14)**2 - abs(space[\"width\"] - 3))\n\n search_alg = BayesOptSearch(\n space, metric=\"loss\", mode=\"min\", analysis=analysis)\n return search_alg, cost\n\n def testBootStrapAnalysis(self):\n analysis = self.run_full()\n search_alg3, cost = self.set_basic_conf(analysis)\n search_alg3 = ConcurrencyLimiter(search_alg3, 1)\n tune.run(cost, num_samples=10, search_alg=search_alg3, verbose=0)\n\n\nclass CFOWarmStartTest(AbstractWarmStartTest, unittest.TestCase):\n def set_basic_conf(self):\n space = {\n \"height\": tune.uniform(-100, 100),\n \"width\": tune.randint(0, 100),\n }\n\n def cost(param, reporter):\n reporter(loss=(param[\"height\"] - 14)**2 - abs(param[\"width\"] - 3))\n\n search_alg = CFO(\n space=space,\n metric=\"loss\",\n mode=\"min\",\n seed=20,\n )\n\n return search_alg, cost\n\n\nclass BlendSearchWarmStartTest(AbstractWarmStartTest, unittest.TestCase):\n def set_basic_conf(self):\n space = {\n \"height\": tune.uniform(-100, 100),\n \"width\": tune.randint(0, 100),\n \"time_budget_s\": 10,\n }\n\n def cost(param, reporter):\n reporter(loss=(param[\"height\"] - 14)**2 - abs(param[\"width\"] - 3))\n\n search_alg = BlendSearch(\n space=space,\n metric=\"loss\",\n mode=\"min\",\n seed=20,\n )\n\n return search_alg, cost\n\n\nclass SkoptWarmStartTest(AbstractWarmStartTest, unittest.TestCase):\n def set_basic_conf(self):\n optimizer = skopt.Optimizer([(0, 20), (-100, 100)])\n previously_run_params = [[10, 0], [15, -20]]\n known_rewards = [-189, -1144]\n\n def cost(space, reporter):\n reporter(loss=(space[\"height\"]**2 + space[\"width\"]**2))\n\n search_alg = SkOptSearch(\n optimizer, [\"width\", \"height\"],\n metric=\"loss\",\n mode=\"min\",\n points_to_evaluate=previously_run_params,\n evaluated_rewards=known_rewards)\n search_alg = ConcurrencyLimiter(search_alg, max_concurrent=1000)\n return search_alg, cost\n\n\nclass NevergradWarmStartTest(AbstractWarmStartTest, unittest.TestCase):\n def set_basic_conf(self):\n instrumentation = 2\n parameter_names = [\"height\", \"width\"]\n optimizer = optimizerlib.OnePlusOne(instrumentation)\n\n def cost(space, reporter):\n reporter(loss=(space[\"height\"] - 14)**2 - abs(space[\"width\"] - 3))\n\n search_alg = NevergradSearch(\n optimizer,\n parameter_names,\n metric=\"loss\",\n mode=\"min\",\n )\n search_alg = ConcurrencyLimiter(search_alg, max_concurrent=1000)\n return search_alg, cost\n\n\nclass OptunaWarmStartTest(AbstractWarmStartTest, unittest.TestCase):\n def set_basic_conf(self):\n from optuna.samplers import TPESampler\n space = OptunaSearch.convert_search_space({\n \"width\": tune.uniform(0, 20),\n \"height\": tune.uniform(-100, 100)\n })\n\n def cost(space, reporter):\n reporter(loss=(space[\"height\"] - 14)**2 - abs(space[\"width\"] - 3))\n\n search_alg = OptunaSearch(\n space, sampler=TPESampler(seed=10), metric=\"loss\", mode=\"min\")\n return search_alg, cost\n\n\nclass DragonflyWarmStartTest(AbstractWarmStartTest, unittest.TestCase):\n def set_basic_conf(self):\n from dragonfly.opt.gp_bandit import EuclideanGPBandit\n from dragonfly.exd.experiment_caller import EuclideanFunctionCaller\n from dragonfly import load_config\n\n def cost(space, reporter):\n height, width = space[\"point\"]\n reporter(loss=(height - 14)**2 - abs(width - 3))\n\n domain_vars = [{\n \"name\": \"height\",\n \"type\": \"float\",\n \"min\": -10,\n \"max\": 10\n }, {\n \"name\": \"width\",\n \"type\": \"float\",\n \"min\": 0,\n \"max\": 20\n }]\n\n domain_config = load_config({\"domain\": domain_vars})\n\n func_caller = EuclideanFunctionCaller(\n None, domain_config.domain.list_of_domains[0])\n optimizer = EuclideanGPBandit(func_caller, ask_tell_mode=True)\n search_alg = DragonflySearch(\n optimizer, metric=\"loss\", mode=\"min\", random_state_seed=162)\n search_alg = ConcurrencyLimiter(search_alg, max_concurrent=1000)\n return search_alg, cost\n\n def treat_trial_config(self, trial_config):\n return [list(x[\"point\"]) for x in trial_config]\n\n\nclass SigOptWarmStartTest(AbstractWarmStartTest, unittest.TestCase):\n def set_basic_conf(self):\n space = [\n {\n \"name\": \"width\",\n \"type\": \"int\",\n \"bounds\": {\n \"min\": 0,\n \"max\": 20\n },\n },\n {\n \"name\": \"height\",\n \"type\": \"int\",\n \"bounds\": {\n \"min\": -100,\n \"max\": 100\n },\n },\n ]\n\n def cost(space, reporter):\n reporter(loss=(space[\"height\"] - 14)**2 - abs(space[\"width\"] - 3))\n\n # Unfortunately, SigOpt doesn't allow setting of random state. Thus,\n # we always end up with different suggestions, which is unsuitable\n # for the warm start test. Here we make do with points_to_evaluate,\n # and ensure that state is preserved over checkpoints and restarts.\n points = [\n {\n \"width\": 5,\n \"height\": 20\n },\n {\n \"width\": 10,\n \"height\": -20\n },\n {\n \"width\": 15,\n \"height\": 30\n },\n {\n \"width\": 5,\n \"height\": -30\n },\n {\n \"width\": 10,\n \"height\": 40\n },\n {\n \"width\": 15,\n \"height\": -40\n },\n {\n \"width\": 5,\n \"height\": 50\n },\n {\n \"width\": 10,\n \"height\": -50\n },\n {\n \"width\": 15,\n \"height\": 60\n },\n {\n \"width\": 12,\n \"height\": -60\n },\n ]\n\n search_alg = SigOptSearch(\n space,\n name=\"SigOpt Example Experiment\",\n metric=\"loss\",\n mode=\"min\",\n points_to_evaluate=points)\n search_alg = ConcurrencyLimiter(search_alg, max_concurrent=1)\n return search_alg, cost\n\n def testWarmStart(self):\n if \"SIGOPT_KEY\" not in os.environ:\n self.skipTest(\"No SigOpt API key found in environment.\")\n return\n\n super().testWarmStart()\n\n def testRestore(self):\n if \"SIGOPT_KEY\" not in os.environ:\n self.skipTest(\"No SigOpt API key found in environment.\")\n return\n super().testRestore()\n\n\nclass ZOOptWarmStartTest(AbstractWarmStartTest, unittest.TestCase):\n def set_basic_conf(self):\n dim_dict = {\n \"height\": (ValueType.CONTINUOUS, [-100, 100], 1e-2),\n \"width\": (ValueType.DISCRETE, [0, 20], False)\n }\n\n def cost(param, reporter):\n reporter(loss=(param[\"height\"] - 14)**2 - abs(param[\"width\"] - 3))\n\n search_alg = ZOOptSearch(\n algo=\"Asracos\", # only support ASRacos currently\n budget=200,\n dim_dict=dim_dict,\n metric=\"loss\",\n mode=\"min\")\n\n return search_alg, cost\n\n\nclass HEBOWarmStartTest(AbstractWarmStartTest, unittest.TestCase):\n def set_basic_conf(self):\n space_config = [\n {\n \"name\": \"width\",\n \"type\": \"num\",\n \"lb\": 0,\n \"ub\": 20\n },\n {\n \"name\": \"height\",\n \"type\": \"num\",\n \"lb\": -100,\n \"ub\": 100\n },\n ]\n space = HEBODesignSpace().parse(space_config)\n\n def cost(param, reporter):\n reporter(loss=(param[\"height\"] - 14)**2 - abs(param[\"width\"] - 3))\n\n search_alg = HEBOSearch(\n space=space, metric=\"loss\", mode=\"min\", random_state_seed=5)\n\n return search_alg, cost\n\n\nclass AxWarmStartTest(AbstractWarmStartTest, unittest.TestCase):\n def set_basic_conf(self):\n from ax.service.ax_client import AxClient\n space = AxSearch.convert_search_space({\n \"width\": tune.uniform(0, 20),\n \"height\": tune.uniform(-100, 100)\n })\n\n from ax.modelbridge.generation_strategy import (GenerationStep,\n GenerationStrategy)\n from ax.modelbridge.registry import Models\n\n # set generation strategy to sobol to ensure reproductibility\n try:\n # ax-platform>=0.2.0\n gs = GenerationStrategy(steps=[\n GenerationStep(\n model=Models.SOBOL,\n num_trials=-1,\n model_kwargs={\"seed\": 4321},\n ),\n ])\n except TypeError:\n # ax-platform<0.2.0\n gs = GenerationStrategy(steps=[\n GenerationStep(\n model=Models.SOBOL,\n num_arms=-1,\n model_kwargs={\"seed\": 4321},\n ),\n ])\n\n client = AxClient(random_seed=4321, generation_strategy=gs)\n client.create_experiment(\n parameters=space, objective_name=\"loss\", minimize=True)\n\n def cost(space, reporter):\n reporter(loss=(space[\"height\"] - 14)**2 - abs(space[\"width\"] - 3))\n\n search_alg = AxSearch(ax_client=client)\n return search_alg, cost\n\n\nclass BOHBWarmStartTest(AbstractWarmStartTest, unittest.TestCase):\n def set_basic_conf(self):\n space = {\n \"width\": tune.uniform(0, 20),\n \"height\": tune.uniform(-100, 100)\n }\n\n def cost(space, reporter):\n for i in range(10):\n reporter(\n loss=(space[\"height\"] - 14)**2 -\n abs(space[\"width\"] - 3 - i))\n\n search_alg = TuneBOHB(space=space, metric=\"loss\", mode=\"min\", seed=1)\n\n return search_alg, cost\n\n def get_scheduler(self):\n return HyperBandForBOHB(max_t=10, metric=\"loss\", mode=\"min\")\n\n\nclass SearcherTest(unittest.TestCase):\n class MockSearcher(Searcher):\n def __init__(self, data):\n self.data = data\n\n def save(self, path):\n with open(path, \"w\") as f:\n f.write(self.data)\n\n def restore(self, path):\n with open(path, \"r\") as f:\n self.data = f.read()\n\n def testSaveRestoreDir(self):\n tmpdir = tempfile.mkdtemp()\n original_data = \"hello-its-me\"\n searcher = self.MockSearcher(original_data)\n searcher.save_to_dir(tmpdir)\n searcher_2 = self.MockSearcher(\"no-its-not-me\")\n searcher_2.restore_from_dir(tmpdir)\n assert searcher_2.data == original_data\n\n\nif __name__ == \"__main__\":\n import pytest\n import sys\n sys.exit(pytest.main([\"-v\", __file__] + sys.argv[1:]))\n", "import gym\nimport numpy as np\nimport unittest\n\nimport ray\nfrom ray.rllib.env.external_multi_agent_env import ExternalMultiAgentEnv\nfrom ray.rllib.evaluation.rollout_worker import RolloutWorker\nfrom ray.rllib.evaluation.tests.test_rollout_worker import MockPolicy\nfrom ray.rllib.examples.env.multi_agent import BasicMultiAgent\nfrom ray.rllib.policy.sample_batch import SampleBatch\nfrom ray.rllib.tests.test_external_env import make_simple_serving\n\nSimpleMultiServing = make_simple_serving(True, ExternalMultiAgentEnv)\n\n\nclass TestExternalMultiAgentEnv(unittest.TestCase):\n @classmethod\n def setUpClass(cls) -> None:\n ray.init()\n\n @classmethod\n def tearDownClass(cls) -> None:\n ray.shutdown()\n\n def test_external_multi_agent_env_complete_episodes(self):\n agents = 4\n ev = RolloutWorker(\n env_creator=lambda _: SimpleMultiServing(BasicMultiAgent(agents)),\n policy_spec=MockPolicy,\n rollout_fragment_length=40,\n batch_mode=\"complete_episodes\")\n for _ in range(3):\n batch = ev.sample()\n self.assertEqual(batch.count, 40)\n self.assertEqual(\n len(np.unique(batch[SampleBatch.AGENT_INDEX])), agents)\n\n def test_external_multi_agent_env_truncate_episodes(self):\n agents = 4\n ev = RolloutWorker(\n env_creator=lambda _: SimpleMultiServing(BasicMultiAgent(agents)),\n policy_spec=MockPolicy,\n rollout_fragment_length=40,\n batch_mode=\"truncate_episodes\")\n for _ in range(3):\n batch = ev.sample()\n self.assertEqual(batch.count, 160)\n self.assertEqual(\n len(np.unique(batch[SampleBatch.AGENT_INDEX])), agents)\n\n def test_external_multi_agent_env_sample(self):\n agents = 2\n act_space = gym.spaces.Discrete(2)\n obs_space = gym.spaces.Discrete(2)\n ev = RolloutWorker(\n env_creator=lambda _: SimpleMultiServing(BasicMultiAgent(agents)),\n policy_spec={\n \"p0\": (MockPolicy, obs_space, act_space, {}),\n \"p1\": (MockPolicy, obs_space, act_space, {}),\n },\n policy_mapping_fn=lambda aid, **kwargs: \"p{}\".format(aid % 2),\n rollout_fragment_length=50)\n batch = ev.sample()\n self.assertEqual(batch.count, 50)\n\n\nif __name__ == \"__main__\":\n import pytest\n import sys\n sys.exit(pytest.main([\"-v\", __file__]))\n", "from io import BytesIO\nfrom typing import TYPE_CHECKING\n\nimport numpy as np\n\nif TYPE_CHECKING:\n import pyarrow\n\nfrom ray.data.block import BlockAccessor\nfrom ray.data.datasource.file_based_datasource import (FileBasedDatasource)\n\n\nclass NumpyDatasource(FileBasedDatasource):\n \"\"\"Numpy datasource, for reading and writing Numpy files.\n\n Examples:\n >>> source = NumpyDatasource()\n >>> ray.data.read_datasource(source, paths=\"/path/to/dir\").take()\n ... [array([0., 1., 2.]), ...]\n\n \"\"\"\n\n def _read_file(self, f: \"pyarrow.NativeFile\", path: str, **reader_args):\n # TODO(ekl) Ideally numpy can read directly from the file, but it\n # seems like it requires the file to be seekable.\n buf = BytesIO()\n data = f.readall()\n buf.write(data)\n buf.seek(0)\n return np.load(buf)\n\n def _write_block(self, f: \"pyarrow.NativeFile\", block: BlockAccessor,\n **writer_args):\n np.save(f, block.to_arrow())\n\n def _file_format(self):\n return \"npy\"\n", "\"\"\"Example of a custom observation filter\n\nThis example shows:\n - using a custom observation filter\n\n\"\"\"\nimport argparse\n\nimport numpy as np\nimport ray\nfrom ray import tune\nfrom ray.rllib.utils.filter import Filter\nfrom ray.rllib.utils.framework import try_import_tf\n\ntf1, tf, tfv = try_import_tf()\n\nparser = argparse.ArgumentParser()\nparser.add_argument(\n \"--run\",\n type=str,\n default=\"PPO\",\n help=\"The RLlib-registered algorithm to use.\")\nparser.add_argument(\"--stop-iters\", type=int, default=200)\n\n\nclass SimpleRollingStat:\n def __init__(self, n=0, m=0, s=0):\n self._n = n\n self._m = m\n self._s = s\n\n def copy(self):\n return SimpleRollingStat(self._n, self._m, self._s)\n\n def push(self, x):\n self._n += 1\n delta = x - self._m\n self._m += delta / self._n\n self._s += delta * delta * (self._n - 1) / self._n\n\n def update(self, other):\n n1 = self._n\n n2 = other._n\n n = n1 + n2\n if n == 0:\n return\n\n delta = self._m - other._m\n delta2 = delta * delta\n\n self._n = n\n self._m = (n1 * self._m + n2 * other._m) / n\n self._s = self._s + other._s + delta2 * n1 * n2 / n\n\n @property\n def n(self):\n return self._n\n\n @property\n def mean(self):\n return self._m\n\n @property\n def var(self):\n return self._s / (self._n - 1) if self._n > 1 else np.square(self._m)\n\n @property\n def std(self):\n return np.sqrt(self.var)\n\n\nclass CustomFilter(Filter):\n \"\"\"\n Filter that normalizes by using a single mean\n and std sampled from all obs inputs\n \"\"\"\n is_concurrent = False\n\n def __init__(self, shape):\n self.rs = SimpleRollingStat()\n self.buffer = SimpleRollingStat()\n self.shape = shape\n\n def clear_buffer(self):\n self.buffer = SimpleRollingStat(self.shape)\n\n def apply_changes(self, other, with_buffer=False):\n self.rs.update(other.buffer)\n if with_buffer:\n self.buffer = other.buffer.copy()\n\n def copy(self):\n other = CustomFilter(self.shape)\n other.sync(self)\n return other\n\n def as_serializable(self):\n return self.copy()\n\n def sync(self, other):\n assert other.shape == self.shape, \"Shapes don't match!\"\n self.rs = other.rs.copy()\n self.buffer = other.buffer.copy()\n\n def __call__(self, x, update=True):\n x = np.asarray(x)\n if update:\n if len(x.shape) == len(self.shape) + 1:\n # The vectorized case.\n for i in range(x.shape[0]):\n self.push_stats(x[i], (self.rs, self.buffer))\n else:\n # The unvectorized case.\n self.push_stats(x, (self.rs, self.buffer))\n x = x - self.rs.mean\n x = x / (self.rs.std + 1e-8)\n return x\n\n @staticmethod\n def push_stats(vector, buffers):\n for x in vector:\n for buffer in buffers:\n buffer.push(x)\n\n def __repr__(self):\n return f\"CustomFilter({self.shape}, {self.rs}, {self.buffer})\"\n\n\nif __name__ == \"__main__\":\n args = parser.parse_args()\n ray.init()\n\n config = {\n \"env\": \"CartPole-v0\",\n \"observation_filter\": lambda size: CustomFilter(size),\n \"num_workers\": 0,\n }\n\n results = tune.run(\n args.run, config=config, stop={\"training_iteration\": args.stop_iters})\n\n ray.shutdown()\n" ]
[ [ "numpy.zeros", "numpy.sum" ], [ "torch.cuda.device_count", "tensorflow.config.list_physical_devices" ], [ "numpy.random.set_state", "tensorflow.python.keras.datasets.cifar10.load_data", "numpy.random.get_state", "numpy.random.seed" ], [ "numpy.unique" ], [ "numpy.load" ], [ "numpy.asarray", "numpy.square", "numpy.sqrt" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [ "1.10", "2.7", "1.12", "1.4", "2.2", "1.13", "2.3", "2.4", "1.5", "1.7", "2.5", "2.6" ] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
PterosDiacos/jiant
[ "5aca4c5c54c4385708d3bda2d53420224ddf5dc3" ]
[ "scripts/winograd/preprocess_winograd.py" ]
[ "#!/usr/bin/env python\n\n\"\"\"\nThis file will preprocess the SuperGLUE Winograd Schema Challenge data, aligning the span indices\nto the tokenizaer of choice, and saving as a JSON file.\n\nAn example of the span index transformation is below:\n[Mr., Porter, is, nice] with span indices [0, 2] -> [Mr, ., Por, ter, is, nice ]\nwith span indices [0, 3].\n\nUsage:\n Run the below command from the root directory\n python -m scripts.winograd.preprocess_winograd\n -t {tokenizer_name} --data_dir {path/to/directory}\n\nThe input file should be in jsonl form, with text and tags columns. The output will be\nin JSON form. See realign_spans for more details.\n\n\"\"\"\n\nfrom typing import Tuple, List, Text\nfrom src.utils import tokenizers\nfrom src.utils import retokenize\nfrom src.utils import utils\nimport argparse\nimport functools\nimport json\nimport os\nimport re\nimport sys\nimport multiprocessing\nfrom tqdm import tqdm\nimport pandas as pd\nimport logging as log\n\nlog.basicConfig(format=\"%(asctime)s: %(message)s\", datefmt=\"%m/%d %I:%M:%S %p\", level=log.INFO)\n\n\ndef realign_spans(record, tokenizer_name):\n \"\"\"\n Builds the indices alignment while also tokenizing the input\n piece by piece.\n\n Parameters\n -----------------------\n record: dict with the below fields\n text: str\n targets: list of dictionaries\n label: bool\n span1_index: int, start index of first span\n span1_text: str, text of first span\n span2_index: int, start index of second span\n span2_text: str, text of second span\n tokenizer_name: str\n\n Returns\n ------------------------\n record: dict with the below fields:\n text: str in tokenized form\n targets: dictionary with the below fields\n -label: bool\n -span_1: (int, int) of token indices\n -span1_text: str, the string\n -span2: (int, int) of token indices\n -span2_text: str, the string\n \"\"\"\n\n # find span indices and text\n text = record[\"text\"].split()\n span1 = record[\"targets\"][0][\"span1_index\"]\n span1_text = record[\"targets\"][0][\"span1_text\"]\n span2 = record[\"targets\"][0][\"span2_index\"]\n span2_text = record[\"targets\"][0][\"span2_text\"]\n\n # construct end spans given span text space-tokenized length\n span1 = [span1, span1 + len(span1_text.strip().split())]\n span2 = [span2, span2 + len(span2_text.strip().split())]\n indices = [span1, span2]\n\n sorted_indices = sorted(indices, key=lambda x: x[0])\n current_tokenization = []\n span_mapping = {}\n\n # align first span to tokenized text\n aligner_fn = retokenize.get_aligner_fn(tokenizer_name)\n _, new_tokens = aligner_fn(\" \".join(text[: sorted_indices[0][0]]))\n current_tokenization.extend(new_tokens)\n new_span1start = len(current_tokenization)\n _, span_tokens = aligner_fn(\" \".join(text[sorted_indices[0][0] : sorted_indices[0][1]]))\n current_tokenization.extend(span_tokens)\n new_span1end = len(current_tokenization)\n span_mapping[sorted_indices[0][0]] = [new_span1start, new_span1end]\n\n # re-indexing second span\n _, new_tokens = aligner_fn(\" \".join(text[sorted_indices[0][1] : sorted_indices[1][0]]))\n current_tokenization.extend(new_tokens)\n new_span2start = len(current_tokenization)\n _, span_tokens = aligner_fn(\" \".join(text[sorted_indices[1][0] : sorted_indices[1][1]]))\n current_tokenization.extend(span_tokens)\n new_span2end = len(current_tokenization)\n span_mapping[sorted_indices[1][0]] = [new_span2start, new_span2end]\n\n # save back into record\n _, all_text = aligner_fn(\" \".join(text))\n record[\"targets\"][0][\"span1\"] = span_mapping[record[\"targets\"][0][\"span1_index\"]]\n record[\"targets\"][0][\"span2\"] = span_mapping[record[\"targets\"][0][\"span2_index\"]]\n record[\"text\"] = \" \".join(all_text)\n return record\n\n\ndef _map_fn(record, tokenizer_name):\n new_record = realign_spans(record, tokenizer_name)\n return json.dumps(new_record)\n\n\ndef preprocess_winograd(fname, tokenizer_name, worker_pool):\n new_name = fname + \".retokenized.\" + tokenizer_name\n log.info(\"Processing file: %s\", fname)\n # decompress into list of dictionaries\n inputs = list(pd.read_json(fname, lines=True).T.to_dict().values())\n log.info(\" saving to %s\", new_name)\n map_fn = functools.partial(_map_fn, tokenizer_name=tokenizer_name)\n with open(new_name, \"w\") as fd:\n for line in tqdm(worker_pool.imap(map_fn, inputs, chunksize=500), total=len(inputs)):\n fd.write(line)\n fd.write(\"\\n\")\n\n\ndef main(args):\n parser = argparse.ArgumentParser()\n parser.add_argument(\"-t\", dest=\"tokenizer_name\", type=str, help=\"Tokenizer name.\")\n parser.add_argument(\"--data_dir\", type=str, help=\"Path to data directory.\")\n args = parser.parse_args(args)\n worker_pool = multiprocessing.Pool(2)\n for fname in [\"train.jsonl\", \"val.jsonl\", \"test_with_labels.jsonl\"]:\n fname = args.data_dir + fname\n preprocess_winograd(fname, args.tokenizer_name, worker_pool=worker_pool)\n\n\nif __name__ == \"__main__\":\n main(sys.argv[1:])\n sys.exit(0)\n" ]
[ [ "pandas.read_json" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [ "0.23", "0.21", "2.0", "1.4", "0.19", "1.1", "1.5", "1.2", "0.24", "0.20", "1.0", "0.25", "1.3" ], "scipy": [], "tensorflow": [] } ]
SLAMPAI/large-scale-pretraining-transfer
[ "730c1f25e56bbe5c70e5933f845824f98c015876" ]
[ "transfer_learning/datasets/padchest.py" ]
[ "import pandas as pd\nimport numpy as np\nimport os\nimport albumentations as A\nimport torch\nfrom torch.utils.data import Dataset\nimport random\n# import cv2\nfrom skimage.io import imread\nSEED = 42\nTRAIN_RATIO = 0.9\n\nclass PadChest(Dataset):\n \"\"\"\n PadChest dataset\n Hospital San Juan de Alicante - University of Alicante\n \n PadChest: A large chest x-ray image dataset with multi-label annotated reports.\n Aurelia Bustos, Antonio Pertusa, Jose-Maria Salinas, and Maria de la Iglesia-Vayá. \n arXiv preprint, 2019. https://arxiv.org/abs/1901.07441\n \n Dataset website:\n http://bimcv.cipf.es/bimcv-projects/padchest/\n \n Download full size images here:\n https://academictorrents.com/details/dec12db21d57e158f78621f06dcbe78248d14850\n \n Download resized (224x224) images here (recropped):\n https://academictorrents.com/details/96ebb4f92b85929eadfb16761f310a6d04105797\n \"\"\"\n def __init__(self, path, train=True, aug=None, transform=None, views=[\"AP\", \"PA\"], unique_patients=False):\n # def __init__(self, imgpath, \n # csvpath=os.path.join(thispath, \"PADCHEST_chest_x_ray_images_labels_160K_01.02.19.csv.gz\"), \n # views=[\"PA\"],\n # transform=None, \n # data_aug=None,\n # flat_dir=True, \n # seed=0, \n # unique_patients=True):\n super().__init__()\n # super(PC_Dataset, self).__init__()\n # np.random.seed(seed) # Reset the seed so all runs are the same.\n csvpath = os.path.join(path, \"PADCHEST_chest_x_ray_images_labels_160K_01.02.19.csv.gz\")\n data_aug = aug\n\n self.pathologies = [\"Atelectasis\", \"Consolidation\", \"Infiltration\",\n \"Pneumothorax\", \"Edema\", \"Emphysema\", \"Fibrosis\",\n \"Effusion\", \"Pneumonia\", \"Pleural_Thickening\",\n \"Cardiomegaly\", \"Nodule\", \"Mass\", \"Hernia\",\"Fracture\", \n \"Granuloma\", \"Flattened Diaphragm\", \"Bronchiectasis\",\n \"Aortic Elongation\", \"Scoliosis\", \n \"Hilar Enlargement\", \"Support Devices\" , \"Tuberculosis\",\n \"Air Trapping\", \"Costophrenic Angle Blunting\", \"Aortic Atheromatosis\",\n \"Hemidiaphragm Elevation\"]\n \n self.pathologies = sorted(self.pathologies)\n \n mapping = dict()\n \n mapping[\"Infiltration\"] = [\"infiltrates\",\n \"interstitial pattern\", \n \"ground glass pattern\",\n \"reticular interstitial pattern\",\n \"reticulonodular interstitial pattern\",\n \"alveolar pattern\",\n \"consolidation\",\n \"air bronchogram\"]\n mapping[\"Pleural_Thickening\"] = [\"pleural thickening\"]\n mapping[\"Consolidation\"] = [\"air bronchogram\"]\n mapping[\"Hilar Enlargement\"] = [\"adenopathy\",\n \"pulmonary artery enlargement\"]\n mapping[\"Support Devices\"] = [\"device\",\n \"pacemaker\"]\n \n self.imgpath = path\n self.transform = transform\n self.data_aug = data_aug\n # self.flat_dir = flat_dir\n self.csvpath = csvpath\n \n # self.check_paths_exist()\n self.csv = pd.read_csv(self.csvpath, low_memory=False)\n # self.MAXVAL = 65535\n\n # standardize view names\n self.csv.loc[self.csv[\"Projection\"].isin([\"AP_horizontal\"]),\"Projection\"] = \"AP Supine\"\n \n # Keep only the specified views\n if type(views) is not list:\n views = [views]\n self.views = views\n \n self.csv[\"view\"] = self.csv['Projection']\n # print(self.csv.view.unique())\n self.csv = self.csv[self.csv[\"view\"].isin(self.views)]\n\n # remove null stuff\n self.csv = self.csv[~self.csv[\"Labels\"].isnull()]\n \n # remove missing files\n missing = [\"216840111366964012819207061112010307142602253_04-014-084.png\",\n \"216840111366964012989926673512011074122523403_00-163-058.png\",\n \"216840111366964012959786098432011033083840143_00-176-115.png\",\n \"216840111366964012558082906712009327122220177_00-102-064.png\",\n \"216840111366964012339356563862009072111404053_00-043-192.png\",\n \"216840111366964013076187734852011291090445391_00-196-188.png\",\n \"216840111366964012373310883942009117084022290_00-064-025.png\",\n \"216840111366964012283393834152009033102258826_00-059-087.png\",\n \"216840111366964012373310883942009170084120009_00-097-074.png\",\n \"216840111366964012819207061112010315104455352_04-024-184.png\"]\n missing.extend([\n\t # \"216840111366964012283393834152009033102258826_00-059-087.png\",\n\t # \"216840111366964012339356563862009068084200743_00-045-105.png\",\n\t # \"216840111366964012339356563862009072111404053_00-043-192.png\",\n\t # \"216840111366964012373310883942009117084022290_00-064-025.png\",\n\t # \"216840111366964012373310883942009170084120009_00-097-074.png\",\n\t # \"216840111366964012558082906712009300162151055_00-078-079.png\",\n\t # \"216840111366964012558082906712009327122220177_00-102-064.png\",\n\t # \"216840111366964012819207061112010306085429121_04-020-102.png\",\n\t # \"216840111366964012819207061112010307142602253_04-014-084.png\",\n\t # \"216840111366964012819207061112010315104455352_04-024-184.png\",\n\t # \"216840111366964012959786098432011033083840143_00-176-115.png\",\n\t # \"216840111366964012989926673512011074122523403_00-163-058.png\",\n\t # \"216840111366964012989926673512011101154138555_00-191-086.png\",\n\t # \"216840111366964012989926673512011132200139442_00-157-099.png\",\n\t # \"216840111366964013076187734852011178154626671_00-145-086.png\",\n\t # \"216840111366964013076187734852011291090445391_00-196-188.png\",\n #wrong\n \"216840111366964013829543166512013353113303615_02-092-190.png\",\n \"216840111366964012904401302362010337093236130_03-198-079.png\",\n \"216840111366964012904401302362010336141343749_03-198-010.png\",\n \"216840111366964012989926673512011151082430686_00-157-045.png\",\n \"216840111366964012989926673512011083134050913_00-168-009.png\",\n \"216840111366964012373310883942009077082646386_00-047-124.png\",\n \"216840111366964013686042548532013208193054515_02-026-007.png\",\n \"216840111366964013962490064942014134093945580_01-178-104.png\",\n \"216840111366964012819207061112010281134410801_00-129-131.png\",\n \"216840111366964013590140476722013043111952381_02-065-198.png\",\n \"216840111366964012283393834152009027091819347_00-007-136.png\",\n \"216840111366964012373310883942009152114636712_00-102-045.png\",\n \"216840111366964012283393834152009033140208626_00-059-118.png\",\n \"216840111366964013590140476722013058110301622_02-056-111.png\",\n \"216840111366964012487858717522009280135853083_00-075-001.png\",\n \"216840111366964013590140476722013049100117076_02-063-097.png\",\n \"216840111366964013649110343042013092101343018_02-075-146.png\",\n \"216840111366964012487858717522009280135853083_00-075-001.png\",\n \"216840111366964012819207061112010306085429121_04-020-102.png\",\n \"269300710246070740096540277379121868595_e7zsan.png\",\n \"216840111366964012373310883942009180082307973_00-097-011.png\",\n ])\n self.csv = self.csv[~self.csv[\"ImageID\"].isin(missing)]\n \n if unique_patients:\n self.csv = self.csv.groupby(\"PatientID\").first().reset_index()\n \n # Get our classes.\n self.labels = []\n for pathology in self.pathologies:\n mask = self.csv[\"Labels\"].str.contains(pathology.lower())\n if pathology in mapping:\n for syn in mapping[pathology]:\n #print(\"mapping\", syn)\n mask |= self.csv[\"Labels\"].str.contains(syn.lower())\n self.labels.append(mask.values)\n self.labels = np.asarray(self.labels).T\n self.labels = self.labels.astype(np.float32)\n \n ########## add consistent csv values\n \n # offset_day_int\n dt = pd.to_datetime(self.csv[\"StudyDate_DICOM\"], format=\"%Y%m%d\")\n self.csv[\"offset_day_int\"] = dt.astype(np.int)// 10**9 // 86400\n \n # patientid\n self.csv[\"patientid\"] = self.csv[\"PatientID\"].astype(str)\n\n\n inds = np.arange(len(self.csv))\n rng = np.random.RandomState(SEED)\n rng.shuffle(inds)\n # print(\"Padchest size full\" , len(self.csv))\n nb_train = int(len(inds) * TRAIN_RATIO)\n if train:\n inds = inds[0:nb_train]\n else:\n inds = inds[nb_train:]\n self.csv = self.csv.iloc[inds]\n self.labels = self.labels[inds]\n # print(\"Padchest size\" , len(self.csv))\n\n def string(self):\n return self.__class__.__name__ + \" num_samples={} views={} data_aug={}\".format(len(self), self.views, self.data_aug)\n \n def __len__(self):\n return len(self.labels)\n\n def __getitem__(self, idx):\n\n imgid = self.csv['ImageID'].iloc[idx]\n img_path = os.path.join(self.imgpath,imgid)\n # try:\n img = imread(img_path)\n # except Exception:\n # print('<<',img_path,'>>')\n # return torch.zeros((3,224,224)).float(),torch.zeros(27).float()\n img = img / 65535\n # print(img.min(), img.max())\n # Check that images are 2D arrays\n if len(img.shape) > 2:\n img = img[:, :, 0]\n if len(img.shape) < 2:\n print(\"error, dimension lower than 2 for image\")\n\n # Add color channel\n img = img[None, :, :] \n \n if self.transform is not None:\n img = self.transform(img)\n \n if self.data_aug is not None:\n img = self.data_aug(img)\n\n img = img * np.ones((3,1,1), dtype=\"float32\") # use 3 channels\n img = torch.from_numpy(img).float()\n target = torch.from_numpy(self.labels[idx]).float()\n return img, target\n" ]
[ [ "pandas.read_csv", "pandas.to_datetime", "numpy.asarray", "torch.from_numpy", "numpy.ones", "numpy.random.RandomState" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [ "2.0", "1.4", "1.3", "1.1", "1.5", "1.2" ], "scipy": [], "tensorflow": [] } ]
adamzabek/Drug-Repurposing
[ "9dbe94305b17db1cb5dba2e9a27c38d07a97dad3" ]
[ "Code_ppi/Code/graphviz_plotting.py" ]
[ "import numpy as np\nimport pandas as pd\nimport networkx as nx\nimport matplotlib.pyplot as plt\nfrom matplotlib.colors import LinearSegmentedColormap\nimport graphviz\nimport matplotlib\nimport matplotlib.cm as cm\n\n\n# define which genes are targets of drugs\nrectangles = ['ACVR2A', 'AURKC', 'BRSK1', 'CDK17', 'EGFR', 'FGFR1', 'FGFR3', 'HDAC1', 'HSP90AA1', 'IRAK1', 'PAK1', 'PDE4B', 'RIPK1', 'RIPK2', 'STK3']\n\n\ndef plot_graphviz_style_graph(g, fname):\n\tagraph = nx.nx_agraph.to_agraph(g)\n\tagraph.graph_attr.update(size=\"21.26,35.08!\")\n\tagraph.node_attr.update(style='filled', color = '#DCDCDC')\n\t# make certain nodes rectangles (targets of drugs)\n\tfor node in g.nodes():\n\t\tif node in rectangles:\n\t\t\tn = agraph.get_node(node)\n\t\t\tn.attr['shape']='box'\n\t# color nodes according to log2 fold change of infection\n\tlog2fc = pd.read_csv('../Save/blancoA_l2fc.csv', index_col=0)\n\tlog2fc.index = [name.upper() for name in log2fc.index.values]\n\tlog2fc_ppi = log2fc.loc[log2fc.index.intersection(list(g.nodes())), :]\n\tlog2fc_ppi_pos = log2fc_ppi.loc[log2fc_ppi['0'] > 0, :]\n\tlog2fc_ppi_neg = log2fc_ppi.loc[log2fc_ppi['0'] < 0, :]\n\n\t# map log2fc to colors\n\tupdate_node_colors(agraph, log2fc_ppi_pos, 'Reds', minima=0, maxima=3)\n\tupdate_node_colors(agraph, log2fc_ppi_neg, 'Blues', minima=-3, maxima=0)\n\t\n\n\tagraph.layout(prog='dot') # use dot\n\tagraph.draw(fname, format='png')\n\treturn agraph\n\n\ndef update_node_colors(agraph, log2fc_ppi_pos, cmap, minima=None, maxima=None):\n\tlst = log2fc_ppi_pos.values.flatten()\n\tif minima is None and maxima is None:\n\t\tminima = min(lst)\n\t\tmaxima = max(lst)\n\n\tnorm = matplotlib.colors.Normalize(vmin=minima, vmax=maxima, clip=True)\n\tmapper = cm.ScalarMappable(norm=norm, cmap=cmap)\n\n\tfor v, name in zip(lst, log2fc_ppi_pos.index.values):\n\t\tcol = matplotlib.colors.rgb2hex(mapper.to_rgba(v))\n\t\tn = agraph.get_node(name)\n\t\tn.attr['color']=col\n\t\tn.attr['fillcolor']=col" ]
[ [ "pandas.read_csv", "matplotlib.cm.ScalarMappable", "matplotlib.colors.Normalize" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [ "2.0", "1.4", "1.1", "1.5", "1.2", "1.3" ], "scipy": [], "tensorflow": [] } ]
marcelsan/OpenBot
[ "44b52d7155c3ad216231a6548361cafdaae5d5b3" ]
[ "policy/openbot/tfrecord_utils.py" ]
[ "\"\"\"\nCreated by Marcel Santos - Intel Intelligent Systems Lab - 2021\nThis script implements several utility routines for manipulating tensorflow records.\n\"\"\"\nimport tensorflow as tf\n\n\ndef image_feature(value):\n \"\"\"Returns a bytes_list from a string / byte.\"\"\"\n return tf.train.Feature(\n bytes_list=tf.train.BytesList(value=[tf.io.encode_jpeg(value).numpy()])\n )\n\n\ndef bytes_feature(value):\n \"\"\"Returns a bytes_list from a string / byte.\"\"\"\n return tf.train.Feature(bytes_list=tf.train.BytesList(value=[value.encode()]))\n\n\ndef float_feature(value):\n \"\"\"Returns a float_list from a float / double.\"\"\"\n return tf.train.Feature(float_list=tf.train.FloatList(value=[value]))\n\n\ndef int64_feature(value):\n \"\"\"Returns an int64_list from a bool / enum / int / uint.\"\"\"\n return tf.train.Feature(int64_list=tf.train.Int64List(value=[value]))\n\n\ndef float_feature_list(value):\n \"\"\"Returns a list of float_list from a float / double.\"\"\"\n return tf.train.Feature(float_list=tf.train.FloatList(value=value))\n\n\ndef parse_tfrecord_fn(example):\n \"\"\" Parse the input `tf.train.Example` proto.\"\"\"\n\n # Create a description of the features.\n feature_description = {\n \"image\": tf.io.FixedLenFeature([], tf.string),\n \"path\": tf.io.FixedLenFeature([], tf.string),\n \"left\": tf.io.FixedLenFeature([], tf.float32),\n \"right\": tf.io.FixedLenFeature([], tf.float32),\n \"cmd\": tf.io.FixedLenFeature([], tf.float32),\n }\n \n example = tf.io.parse_single_example(example, feature_description)\n img = tf.io.decode_jpeg(example[\"image\"], channels=3)\n img = tf.image.convert_image_dtype(img, tf.float32)\n example[\"image\"] = img\n return example\n\n\ndef create_example(image, path, ctrl_cmd):\n \"\"\" Converts the train features into a `tf.train.Example` eady to be written to a tfrecord file.\"\"\"\n\n # Create a dictionary mapping the feature name to the tf.train.Example-compatible data type.\n feature = {\n \"image\": image_feature(image),\n \"path\": bytes_feature(path),\n \"left\": float_feature(float(ctrl_cmd[0])/255.0),\n \"right\": float_feature(float(ctrl_cmd[1])/255.0),\n \"cmd\": float_feature(float(ctrl_cmd[2])),\n }\n\n return tf.train.Example(features=tf.train.Features(feature=feature))" ]
[ [ "tensorflow.io.decode_jpeg", "tensorflow.io.parse_single_example", "tensorflow.io.FixedLenFeature", "tensorflow.train.FloatList", "tensorflow.image.convert_image_dtype", "tensorflow.train.Features", "tensorflow.io.encode_jpeg", "tensorflow.train.Int64List" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
skristimorris/grocery-spending_skmorris
[ "88e441d258370263ba3920d30fafd14f8d8ac7ca" ]
[ "app.py" ]
[ "\"\"\"Track and analyze grocery spending at an item level.\n\nThis app allows a user to input a grocery item.\n\n\"\"\"\n\n# app.py\n\nimport dash\nimport dash_core_components as dcc\nimport dash_html_components as html\nimport dash_bootstrap_components as dbc\nfrom dash.dependencies import Input, Output, State\nimport plotly.express as px\nimport pandas as pd\nimport dash_table\nfrom dash_table import FormatTemplate\nfrom datetime import date\n\ndf = pd.read_csv('data/items.csv') # read items.csv file into df\ndf_category = pd.read_csv('data/category.csv') # read category.csv file into df\npd.options.display.float_format = '{:.2f}'.format # set pandas format to 2 decimals\n\ndf['total'] = df['price'] * df['quantity'] # add 'total' column to df\n\ndf['month_year'] = pd.to_datetime(df['date']).dt.strftime('%B %Y') # add 'month_year' column to df and convert to 'month year' str format\ndf = df.sort_values(by='date').reset_index(drop=True) # sort df by date and reset and drop index\n\ndf_table = df[['name', 'price', 'quantity', 'date']] # create df to display table in layout\n\ndf_date = df.sort_values(by='date', ascending=False) # sort df by date in descending order and set to variable\ndf_date = df_date.head(1) # select top row of df\ndf_date = df_date.month_year.item() # select value from 'month_year' column to use as default in date dropdown\n\n# Ref: https://dash.plotly.com/layout\napp = dash.Dash(__name__, external_stylesheets=[dbc.themes.BOOTSTRAP])\napp.config.suppress_callback_exceptions = True\n\n# Ref: https://dash-bootstrap-components.opensource.faculty.ai/docs/components/form/\ndef InputItem():\n \"\"\"Create a form with inputs to add a new item.\n \n Args:\n none\n \n Returns:\n item name: input text field to enter item name\n category: dropdown to select category\n price: input numeric field to enter price\n quantity: slider to select quantity\n date: date picker to select date of purchase\n output: text to show exceptions\n \"\"\"\n input_addItem = dbc.Form(\n [\n dbc.FormGroup(\n [\n dbc.Label('Item Name'),\n dcc.Input(\n id='name',\n placeholder='Enter grocery item',\n style={'width': '100%'}\n ),\n ]\n ),\n dbc.FormGroup(\n [\n html.Label('Category'),\n dcc.Dropdown(\n id='category',\n options=[\n {'label': i, 'value': i} for i in sorted(df_category.Category)\n ],\n )\n ]\n ),\n dbc.FormGroup(\n [\n html.Label('Price'),\n dcc.Input(\n id='price',\n type='number',\n placeholder='Enter price of item',\n style={'width': '100%'}\n )\n ]\n ),\n dbc.FormGroup(\n [\n html.Label('Quantity'),\n dcc.Slider(\n id='quantity',\n min=0,\n max=10,\n step=1,\n marks={\n i: '{}'.format(i)\n if i == 1\n else str(i)\n for i in range(1,11)\n },\n value=1,\n ),\n html.Br(),\n html.Label('Date of Purchase'),\n html.Br(),\n dcc.DatePickerSingle(\n id='date',\n month_format='MMM Do, YY',\n date=date.today()\n )\n ]\n ),\n html.Div(id='output-add-item',\n style={'color': 'red'})\n ]\n )\n return input_addItem\n\nbutton_item = html.Div([\ndbc.Row(\n [\n dbc.Col(\n dbc.Button(\n \"New Item\", id='button-new-item', color=\"primary\", className=\"ml-2\", n_clicks=0, block=True\n ),\n width=\"auto\",\n \n ), \n dbc.Modal(\n [\n dbc.ModalHeader('Add New Item'),\n dbc.ModalBody(InputItem()),\n dbc.ModalFooter(\n [\n dbc.Button('Submit', id='submit-new-item', className='ml-auto', n_clicks=0, color='primary'),\n dbc.Button('Cancel', id='cancel', className='ml-auto', n_clicks=0, color='primary')\n ]\n )\n ],\n id='modal',\n is_open=False,\n ) \n ],\n no_gutters=True,\n className=\"ml-auto flex-nowrap mt-3 mt-md-0\",\n align=\"center\"\n),\n])\n\n# Ref: https://dash.plotly.com/dash-core-components/graph\ndashboard = html.Div(\n [\n html.Br(),\n html.Br(),\n html.H5('Spending Dashboard', style={'textAlign': 'left'}),\n html.Hr(),\n html.Div([\n dash_table.DataTable(\n id='table-item',\n data=df.to_dict('records'),\n columns=[\n {\n 'name': i, 'id': i\n }\n for i in (df.columns)\n ],\n ) \n ],\n style={'display': 'none'},\n ),\n html.P('Select a date:'),\n html.Div([\n dcc.Dropdown(\n id='dash-monthyear',\n options=[\n {'label': i, 'value': i} for i in df.month_year.unique()\n ],\n value=df_date,\n clearable=False\n )], \n style={\n 'width': '20%',\n 'display': 'inline-block'\n },\n ),\n dbc.Row(\n dcc.Graph(id='graph-spending-all')\n ),\n html.Hr(),\n html.P('Select a category:'),\n html.Div([\n dcc.Dropdown(id='dash-category',\n clearable=False\n )\n ],\n style={\n 'width': '20%',\n 'display': 'inline-block'\n },\n ),\n html.Br(),\n dbc.Row([\n dbc.Col(\n dcc.Graph(id='graph-item')\n ),\n dbc.Col([\n dash_table.DataTable(\n id='table-item-display',\n data=df.to_dict('records'),\n columns=[\n {'name': 'Name', 'id': 'name'},\n {'name': 'Price', 'id': 'price', 'type': 'numeric', 'format': FormatTemplate.money(2)},\n {'name': 'Quantity', 'id': 'quantity'},\n {'name': 'Date', 'id': 'date'},\n ],\n page_action='native',\n page_current=0,\n page_size=10,\n sort_action='native',\n sort_mode='single',\n sort_by=[{'column_id': 'date', 'direction': 'desc'}],\n style_cell={'textAlign': 'left', 'font-family': 'sans-serif'},\n selected_columns=[],\n selected_rows=[],\n style_as_list_view=True,\n )\n ])\n ]),\n dbc.Row(\n dcc.Graph(id='graph-trend')\n ),\n ],\n style={\n 'margin-left': '15%',\n 'margin-right': '5%',\n 'padding': '20px 10px'\n }\n)\n\n# Ref: https://dash-bootstrap-components.opensource.faculty.ai/docs/components/navbar/#\nnavbar = dbc.Navbar(\n [\n html.A(\n dbc.Row(\n [\n dbc.Col(dbc.NavbarBrand(\"Grocery Spending Tracker\", className=\"ml-2\")),\n ],\n align=\"left\",\n no_gutters=True,\n ),\n ),\n ],\n color=\"dark\",\n dark=True,\n fixed='top'\n)\n\n# Ref: https://dash-bootstrap-components.opensource.faculty.ai/examples/simple-sidebar/\nsidebar = html.Div(\n [\n html.Br(),\n html.Br(),\n button_item,\n ],\n style={\n 'position': 'fixed',\n 'top': 0,\n 'left': 0,\n 'bottom': 0,\n 'width': '10%',\n 'padding': '20px 10px',\n 'background-color': '#f8f9fa'\n }\n)\n\[email protected](\n Output('modal', 'is_open'),\n Output('output-add-item', 'children'),\n [Input('button-new-item', 'n_clicks'), \n Input('cancel', 'n_clicks'),\n Input('submit-new-item', 'n_clicks')],\n [State('modal', 'is_open'),\n State('name', 'value'),\n State('category', 'value'),\n State('price', 'value')\n ]\n)\ndef toggle_modal(n1, n2, n3, is_open, name, category, price):\n \"\"\"Callback to toggle modal.\n \n Args:\n n1: number of times new item button is clicked\n n2: number of times cancel button is clicked\n n3: number of times submit button is clicked\n is_open: passes open state of modal\n name: passes state of item name value\n category: passes state of category value\n price: passes state of price value\n \n Returns:\n enables modal to be toggled between open and closed when the buttons are clicked,\n if submit button is clicked and name, category or price is empty (quantity & date have default values):\n modal does not close and string displays with missing input fields \n \"\"\"\n ctx = dash.callback_context\n input_id = ctx.triggered[0]['prop_id'].split('.')[0]\n\n if input_id == 'button-new-item':\n return not is_open, None\n elif input_id == 'cancel':\n return not is_open, None\n elif input_id == 'submit-new-item':\n if name == None:\n return dash.no_update, 'Please enter an item name.'\n if category == None:\n return dash.no_update, 'Please select a category.'\n if price == None:\n return dash.no_update, 'Please enter a price.'\n return not is_open, None\n return is_open, None\n\[email protected](\n [Output('name', 'value'),\n Output('category', 'value'),\n Output('price', 'value'),\n Output('quantity', 'value'),\n Output('date', 'date')],\n [Input('modal', 'is_open')]\n)\ndef clear_input(is_open):\n \"\"\"Callback to clear input values when modal is opened.\n \n Args:\n is_open: open state of modal\n \n Returns:\n None for name, category, and price inputs and resets quantity slider to 1 and date to today's date\n \"\"\"\n return (None,None,None,1,date.today())\n\n# Ref: https://dash.plotly.com/basic-callbacks\[email protected](\n Output('dash-category', 'options'),\n [Input('table-item', 'data'),\n Input('dash-monthyear', 'value')]\n)\ndef set_cat_option(data, month_year):\n \"\"\"Callback to set category dropdown options based on the month selected.\n \n Args:\n data: dataframe\n month_year: selected date from dropdown\n \n Returns:\n list of categories into category dropdown from dataframe for selected date\n \"\"\"\n dff = pd.DataFrame.from_dict(data)\n dff = dff.query('month_year == @month_year')\n return [{'label': i, 'value': i} for i in sorted(dff.category.unique())]\n\[email protected](\n Output('dash-category', 'value'),\n [Input('dash-category', 'options')]\n)\ndef set_cat_default(available_options):\n \"\"\"Callback to set category dropdown default value.\n \n Args:\n available_options: list of categories from dropdown\n \n Returns:\n first value from category dropdown to set as default dropdown value\n \"\"\"\n return available_options[0]['value']\n\n\n# Ref: https://plotly.com/python/pie-charts/\[email protected](\n Output('graph-spending-all', 'figure'),\n [Input('table-item', 'data'),\n Input('dash-monthyear', 'value')]\n)\ndef generate_graph_all_cat(data, month_year):\n \"\"\"Callback to generate graph to show spending in all categories for the selected month.\n \n Args:\n data: dataframe\n month_year: selected date from dropdown\n \n Returns:\n pie chart dispalying amounts spent per category and total amount spent for selected month\n \"\"\"\n dff = pd.DataFrame.from_dict(data)\n dff = dff.query('month_year == @month_year')\n dff_total = dff['total'].sum()\n total_format = '{:.2f}'.format(dff_total)\n\n fig = px.pie(dff, \n values='total', \n names='category', \n title= 'Spending for All Categories in {}'.format(month_year),\n hole= .5)\n fig.update_traces(\n hoverinfo='label+percent', \n texttemplate='%{value:$.2f}',\n textposition='inside'\n )\n fig.update_layout(\n annotations= [\n dict(text= 'Total Amount <br> ${}'.format(total_format), x=0.5, y=0.5, font_size=15, showarrow=False),\n ],\n legend_title='<b> Category </b>'\n )\n return fig \n\n# Ref: https://plotly.com/python/pie-charts/\[email protected](\n Output('graph-item', 'figure'),\n [Input('table-item', 'data'),\n Input('dash-monthyear', 'value'),\n Input('dash-category', 'value')]\n)\ndef update_graph_item(data, month_year, category):\n \"\"\"Callback to generate graph to show amounts spent per item for the selected month and category.\n \n Args:\n data: dataframe\n month_year: selected date from dropdown\n category: selected category from dropdown\n \n Returns:\n pie chart dispalying amounts spent per item and total amount spent for selected month and category \n \"\"\"\n dff = pd.DataFrame.from_dict(data)\n dff = dff.query('month_year == @month_year and category ==@category')\n dff_total = dff['total'].sum()\n total_format = '{:.2f}'.format(dff_total)\n\n fig = px.pie(dff, \n values='total', \n names='name', \n title= 'Spending for {} in {}'.format(category, month_year),\n hole= .5,\n )\n fig.update_traces(\n hoverinfo='label+percent', \n texttemplate='%{value:$}',\n textinfo='value'\n )\n fig.update_layout(\n annotations= [\n dict(text= 'Total Amount <br> ${}'.format(total_format), x=0.5, y=0.5, font_size=15, showarrow=False),\n ],\n legend_title='<b> Item </b>'\n )\n return fig\n\n# Ref: https://plotly.com/python/pie-charts/\[email protected](\n Output('graph-trend', 'figure'),\n [Input('table-item', 'data'),\n Input('dash-category', 'value')]\n)\ndef update_graph_trend(data, category):\n \"\"\"Callback to generate graph to show amounts spent in all months for the selected category.\n \n Args:\n data: dataframe\n category: selected category from dropdown\n \n Returns:\n bar chart dispalying amounts spent per month for selected category \n \"\"\"\n dff = pd.DataFrame.from_dict(data)\n\n fig = px.bar(dff.query('category == @category'), x='month_year', y='price', color='category', barmode='group', \n title= 'Spending History for {}'.format(category),\n labels={\n 'category': 'Category', 'price': 'Total Amount', 'month_year': 'Month of Purchase'\n }\n )\n fig.update_traces(\n texttemplate='%{value:$}',\n textposition='outside'\n )\n return fig\n\n# Ref: https://dash.plotly.com/advanced-callbacks\[email protected](\n Output('table-item', 'data'),\n [Input('submit-new-item', 'n_clicks')],\n [State('name', 'value'),\n State('category', 'value'),\n State('price', 'value'),\n State('quantity', 'value'),\n State('date', 'date'),\n ]\n)\ndef update_table(n, name, category, price, quantity, date):\n \"\"\"Callback to add new item to dataframe and write to csv file.\n \n Args:\n n: number of times submit button is clicked\n name: passes state of item name value\n category: passes state of category value\n price: passes state of price value\n quantity: passes state of quantity value\n date: passes state of date value\n \n Returns:\n dataframe with appended new row to hidden datatable in dashboard layout\n\n Raises:\n no update to dataframe if name, category, price, quantity, or date is empty \n \"\"\"\n df = pd.read_csv(\"data/items.csv\")\n ctx = dash.callback_context\n input_id = ctx.triggered[0]['prop_id'].split('.')[0]\n\n if input_id == 'submit-new-item':\n if None in [name, category, price]:\n return dash.no_update\n else:\n new_row = {'name': name, 'category': category, 'price': price, 'quantity': quantity, 'date': date}\n df = df.append(new_row, ignore_index=True)\n df.to_csv(\"data/items.csv\", index=False)\n df['total'] = df['price'] * df['quantity']\n df['month_year'] = pd.to_datetime(df['date']).dt.strftime('%B %Y')\n df = df.sort_values(by='date').reset_index(drop=True)\n return df.to_dict('records')\n else:\n df['total'] = df['price'] * df['quantity']\n df['month_year'] = pd.to_datetime(df['date']).dt.strftime('%B %Y')\n df = df.sort_values(by='date').reset_index(drop=True)\n return df.to_dict('records')\n\[email protected](\n Output('table-item-display', 'data'),\n [Input('table-item', 'data'),\n Input('dash-monthyear', 'value'),\n Input('dash-category', 'value')]\n)\ndef update_table_display(data, month_year, category):\n \"\"\"Callback to update datatable based on date and category dropdown selection.\n \n Args:\n data: dataframe\n month_year: selected date from dropdown\n category: selected category from dropdown\n \n Returns:\n datatable dispalying items for selected month and category \n \"\"\"\n dff = pd.DataFrame.from_dict(data)\n df_table = pd.DataFrame(dff.query('month_year == @month_year and category == @category'))\n return df_table.to_dict('records')\n \napp.layout = html.Div([navbar, sidebar, dashboard])\n\nif __name__ == '__main__':\n app.run_server(debug=True)" ]
[ [ "pandas.read_csv", "pandas.to_datetime", "pandas.DataFrame.from_dict" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [ "2.0", "1.4", "1.1", "1.5", "1.2", "1.3" ], "scipy": [], "tensorflow": [] } ]
drozzy/autonomous-learning-library
[ "67b27aa71e6689e3447f1b342296b4360419ac38", "67b27aa71e6689e3447f1b342296b4360419ac38", "67b27aa71e6689e3447f1b342296b4360419ac38" ]
[ "all/agents/dqn.py", "all/bodies/vision.py", "all/presets/continuous/ddpg.py" ]
[ "import numpy as np\nimport torch\nfrom torch.nn.functional import mse_loss\nfrom ._agent import Agent\n\n\nclass DQN(Agent):\n '''\n Deep Q-Network (DQN).\n DQN was one of the original deep reinforcement learning algorithms.\n It extends the ideas behind Q-learning to work well with modern convolution networks.\n The core innovation is the use of a replay buffer, which allows the use of batch-style\n updates with decorrelated samples. It also uses a \"target\" network in order to\n improve the stability of updates.\n https://www.nature.com/articles/nature14236\n\n Args:\n q (QNetwork): An Approximation of the Q function.\n policy (GreedyPolicy): A policy derived from the Q-function.\n replay_buffer (ReplayBuffer): The experience replay buffer.\n discount_factor (float): Discount factor for future rewards.\n exploration (float): The probability of choosing a random action.\n loss (function): The weighted loss function to use.\n minibatch_size (int): The number of experiences to sample in each training update.\n n_actions (int): The number of available actions.\n replay_start_size (int): Number of experiences in replay buffer when training begins.\n update_frequency (int): Number of timesteps per training update.\n '''\n\n def __init__(self,\n q,\n policy,\n replay_buffer,\n discount_factor=0.99,\n loss=mse_loss,\n minibatch_size=32,\n replay_start_size=5000,\n update_frequency=1,\n ):\n # objects\n self.q = q\n self.policy = policy\n self.replay_buffer = replay_buffer\n self.loss = loss\n # hyperparameters\n self.discount_factor = discount_factor\n self.minibatch_size = minibatch_size\n self.replay_start_size = replay_start_size\n self.update_frequency = update_frequency\n # private\n self._state = None\n self._action = None\n self._frames_seen = 0\n\n def act(self, state):\n self.replay_buffer.store(self._state, self._action, state)\n self._train()\n self._state = state\n self._action = self.policy.no_grad(state)\n return self._action\n\n def eval(self, state):\n return self.policy.eval(state)\n\n def _train(self):\n if self._should_train():\n # sample transitions from buffer\n (states, actions, rewards, next_states, _) = self.replay_buffer.sample(self.minibatch_size)\n # forward pass\n values = self.q(states, actions)\n # compute targets\n targets = rewards + self.discount_factor * torch.max(self.q.target(next_states), dim=1)[0]\n # compute loss\n loss = self.loss(values, targets)\n # backward pass\n self.q.reinforce(loss)\n\n def _should_train(self):\n self._frames_seen += 1\n return (self._frames_seen > self.replay_start_size and self._frames_seen % self.update_frequency == 0)\n\n\nclass DQNTestAgent(Agent):\n def __init__(self, q, n_actions, exploration=0.):\n self.q = q\n self.n_actions = n_actions\n self.exploration = 0.001\n\n def act(self, state):\n if np.random.rand() < self.exploration:\n return np.random.randint(0, self.n_actions)\n return torch.argmax(self.q.eval(state)).item()\n", "import torch\nfrom all.core import State, StateArray\nfrom ._body import Body\n\n\nclass FrameStack(Body):\n def __init__(self, agent, size=4, lazy=False):\n super().__init__(agent)\n self._frames = []\n self._size = size\n self._lazy = lazy\n self._to_cache = TensorDeviceCache()\n\n def process_state(self, state):\n if not self._frames:\n self._frames = [state.observation] * self._size\n else:\n self._frames = self._frames[1:] + [state.observation]\n if self._lazy:\n return LazyState.from_state(state, self._frames, self._to_cache)\n if isinstance(state, StateArray):\n return state.update('observation', torch.cat(self._frames, dim=1))\n return state.update('observation', torch.cat(self._frames, dim=0))\n\n\nclass TensorDeviceCache:\n '''\n To efficiently implement device trasfer of lazy states, this class\n caches the transfered tensor so that it is not copied multiple times.\n '''\n\n def __init__(self, max_size=16):\n self.max_size = max_size\n self.cache_data = []\n\n def convert(self, value, device):\n cached = None\n for el in self.cache_data:\n if el[0] is value:\n cached = el[1]\n break\n if cached is not None and cached.device == torch.device(device):\n new_v = cached\n else:\n new_v = value.to(device)\n self.cache_data.append((value, new_v))\n if len(self.cache_data) > self.max_size:\n self.cache_data.pop(0)\n return new_v\n\n\nclass LazyState(State):\n @classmethod\n def from_state(cls, state, frames, to_cache):\n state = LazyState(state, device=frames[0].device)\n state.to_cache = to_cache\n state['observation'] = frames\n return state\n\n def __getitem__(self, key):\n if key == 'observation':\n v = dict.__getitem__(self, key)\n if torch.is_tensor(v):\n return v\n return torch.cat(dict.__getitem__(self, key), dim=0)\n return super().__getitem__(key)\n\n def update(self, key, value):\n x = {}\n for k in self.keys():\n if not k == key:\n x[k] = super().__getitem__(k)\n x[key] = value\n state = LazyState(x, device=self.device)\n state.to_cache = self.to_cache\n return state\n\n def to(self, device):\n if device == self.device:\n return self\n x = {}\n for key, value in self.items():\n if key == 'observation':\n x[key] = [self.to_cache.convert(v, device) for v in value]\n # x[key] = [v.to(device) for v in value]#torch.cat(value,axis=0).to(device)\n elif torch.is_tensor(value):\n x[key] = value.to(device)\n else:\n x[key] = value\n state = LazyState.from_state(x, x['observation'], self.to_cache)\n return state\n", "import copy\nfrom torch.optim import Adam\nfrom torch.optim.lr_scheduler import CosineAnnealingLR\nfrom all.agents import DDPG, DDPGTestAgent\nfrom all.approximation import QContinuous, PolyakTarget\nfrom all.bodies import TimeFeature\nfrom all.logging import DummyWriter\nfrom all.policies import DeterministicPolicy\nfrom all.memory import ExperienceReplayBuffer\nfrom all.presets.builder import PresetBuilder\nfrom all.presets.preset import Preset\nfrom all.presets.continuous.models import fc_q, fc_deterministic_policy\n\n\ndefault_hyperparameters = {\n # Common settings\n \"discount_factor\": 0.98,\n # Adam optimizer settings\n \"lr_q\": 1e-3,\n \"lr_pi\": 1e-3,\n # Training settings\n \"minibatch_size\": 100,\n \"update_frequency\": 1,\n \"polyak_rate\": 0.005,\n # Replay Buffer settings\n \"replay_start_size\": 5000,\n \"replay_buffer_size\": 1e6,\n # Exploration settings\n \"noise\": 0.1,\n # Model construction\n \"q_model_constructor\": fc_q,\n \"policy_model_constructor\": fc_deterministic_policy\n}\n\n\nclass DDPGContinuousPreset(Preset):\n \"\"\"\n DDPG continuous control preset.\n\n Args:\n env (all.environments.AtariEnvironment): The environment for which to construct the agent.\n name (str): A human-readable name for the preset.\n device (torch.device): The device on which to load the agent.\n\n Keyword Args:\n discount_factor (float): Discount factor for future rewards.\n lr_q (float): Learning rate for the Q network.\n lr_pi (float): Learning rate for the policy network.\n minibatch_size (int): Number of experiences to sample in each training update.\n update_frequency (int): Number of timesteps per training update.\n polyak_rate (float): Speed with which to update the target network towards the online network.\n replay_start_size (int): Number of experiences in replay buffer when training begins.\n replay_buffer_size (int): Maximum number of experiences to store in the replay buffer.\n noise (float): The amount of exploration noise to add.\n q_model_constructor (function): The function used to construct the neural q model.\n policy_model_constructor (function): The function used to construct the neural policy model.\n \"\"\"\n\n def __init__(self, env, name, device, **hyperparameters):\n super().__init__(name, device, hyperparameters)\n self.q_model = hyperparameters[\"q_model_constructor\"](env).to(device)\n self.policy_model = hyperparameters[\"policy_model_constructor\"](env).to(device)\n self.action_space = env.action_space\n\n def agent(self, writer=DummyWriter(), train_steps=float('inf')):\n n_updates = (train_steps - self.hyperparameters[\"replay_start_size\"]) / self.hyperparameters[\"update_frequency\"]\n\n q_optimizer = Adam(self.q_model.parameters(), lr=self.hyperparameters[\"lr_q\"])\n\n q = QContinuous(\n self.q_model,\n q_optimizer,\n target=PolyakTarget(self.hyperparameters[\"polyak_rate\"]),\n scheduler=CosineAnnealingLR(\n q_optimizer,\n n_updates\n ),\n writer=writer\n )\n\n policy_optimizer = Adam(self.policy_model.parameters(), lr=self.hyperparameters[\"lr_pi\"])\n policy = DeterministicPolicy(\n self.policy_model,\n policy_optimizer,\n self.action_space,\n target=PolyakTarget(self.hyperparameters[\"polyak_rate\"]),\n scheduler=CosineAnnealingLR(\n policy_optimizer,\n n_updates\n ),\n writer=writer\n )\n\n replay_buffer = ExperienceReplayBuffer(\n self.hyperparameters[\"replay_buffer_size\"],\n device=self.device\n )\n\n return TimeFeature(DDPG(\n q,\n policy,\n replay_buffer,\n self.action_space,\n noise=self.hyperparameters[\"noise\"],\n replay_start_size=self.hyperparameters[\"replay_start_size\"],\n discount_factor=self.hyperparameters[\"discount_factor\"],\n update_frequency=self.hyperparameters[\"update_frequency\"],\n minibatch_size=self.hyperparameters[\"minibatch_size\"],\n ))\n\n def test_agent(self):\n policy = DeterministicPolicy(\n copy.deepcopy(self.policy_model),\n None,\n self.action_space,\n )\n return TimeFeature(DDPGTestAgent(policy))\n\n\nddpg = PresetBuilder('ddpg', default_hyperparameters, DDPGContinuousPreset)\n" ]
[ [ "numpy.random.rand", "numpy.random.randint" ], [ "torch.device", "torch.is_tensor", "torch.cat" ], [ "torch.optim.lr_scheduler.CosineAnnealingLR" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
skriegman/evometamaterials
[ "5975f132e31ff826fc18c025c61ed29545a7b8da" ]
[ "HomProp2D.py" ]
[ "import numpy as np\r\nimport scipy as sp\r\nfrom numpy import linalg as LA\r\nfrom scipy.sparse import coo_matrix, csc_matrix\r\nfrom scipy.sparse.linalg import spsolve\r\nimport cvxopt \r\nimport cvxopt.cholmod\r\n\r\n\r\ndef GetHomProp2D_PlaneStress(MetaDesign,E1,nu1,E2,nu2,Amat=np.eye(2)):\r\n# Get unit cell full stiffness matrix Kuc - assume plane Strain, thickness = 1\r\n# 1 for stiff material; 0 for soft material\r\n nelx = MetaDesign.shape[1]\r\n nely = MetaDesign.shape[0]\r\n ndof = 2*(nelx+1)*(nely+1)\r\n\r\n KA = np.array([[12., 3., -6., -3., -6., -3., 0., 3.],\r\n [ 3., 12., 3., 0., -3., -6., -3., -6.],\r\n [-6., 3., 12., -3., 0., -3., -6., 3.],\r\n [-3., 0., -3., 12., 3., -6., 3., -6.],\r\n [-6., -3., 0., 3., 12., 3., -6., -3.],\r\n [-3., -6., -3., -6., 3., 12., 3., 0.],\r\n [ 0., -3., -6., 3., -6., 3., 12., -3.],\r\n [ 3., -6., 3., -6., -3., 0., -3., 12.]])\r\n KB = np.array([[-4., 3., -2., 9., 2., -3., 4., -9.],\r\n [ 3., -4., -9., 4., -3., 2., 9., -2.],\r\n [-2., -9., -4., -3., 4., 9., 2., 3.],\r\n [ 9., 4., -3., -4., -9., -2., 3., 2.],\r\n [ 2., -3., 4., -9., -4., 3., -2., 9.],\r\n [-3., 2., 9., -2., 3., -4., -9., 4.],\r\n [ 4., 9., 2., 3., -2., -9., -4., -3.],\r\n [-9., -2., 3., 2., 9., 4., -3., -4.]])\r\n\r\n KE1 = E1/(1-nu1**2)/24*(KA+nu1*KB)\r\n KE2 = E2/(1-nu2**2)/24*(KA+nu2*KB)\r\n\r\n # FE: Build the index vectors for the for coo matrix format.\r\n edofMat=np.zeros((nelx*nely,8),dtype=np.int)\r\n for elx in range(nelx):\r\n for ely in range(nely):\r\n el = ely+elx*nely\r\n n1=(nely+1)*elx+ely\r\n n2=(nely+1)*(elx+1)+ely\r\n edofMat[el,:]=np.array([2*n1+2, 2*n1+3, 2*n2+2, 2*n2+3,2*n2, 2*n2+1, 2*n1, 2*n1+1])\r\n\r\n # Construct the index pointers for the coo format\r\n iK = np.kron(edofMat,np.ones((8,1))).flatten()\r\n jK = np.kron(edofMat,np.ones((1,8))).flatten() \r\n sK=((KE1.flatten()[np.newaxis]).T * MetaDesign.flatten()).flatten('F') + ((KE2.flatten()[np.newaxis]).T * (1-MetaDesign).flatten()).flatten('F')\r\n Kuc = sp.sparse.coo_matrix((sK,(iK,jK)),shape=(ndof,ndof)).tocsr()\r\n# Kuc = 0.5 * (Kuc.T+Kuc)\r\n# Kuc = cvxopt.spmatrix(sK,iK,jK,(ndof,ndof))\r\n \r\n # Get unit cell periodic topology\r\n M = np.eye((nelx+1)*(nely+1))\r\n M[0,[nely,(nely+1)*nelx,(nelx+1)*(nely+1)-1]] = 1\r\n M[1:nely,range(1+(nely+1)*nelx,nely+(nely+1)*nelx)] = np.eye(nely-1)\r\n M[np.arange((nely+1),(nely+1)*nelx,(nely+1)),np.arange(2*nely+1,(nely+1)*nelx,(nely+1))] = 1\r\n M = M[np.sum(M,axis=0)<2,:].T\r\n # Compute homogenized elasticity tensor\r\n B0 = sp.sparse.kron(M,np.eye(2))\r\n# print(B0)\r\n Bep = np.array([[Amat[0,0], 0., Amat[1,0]/2],\r\n [0., Amat[1,0], Amat[0,0]/2],\r\n [Amat[0,1], 0., Amat[1,1]/2],\r\n [0., Amat[1,1], Amat[0,1]/2]])\r\n BaTop = np.zeros(((nelx+1)*(nely+1),2),dtype=np.single)\r\n BaTop[(nely+1)*nelx+np.arange(0,nely+1),0] = 1\r\n BaTop[np.arange(nely,(nely+1)*(nelx+1),(nely+1)),1] = -1\r\n Ba = np.kron(BaTop,np.eye(2,dtype=float))\r\n \r\n TikReg = sp.sparse.eye(B0.shape[1])*1e-8\r\n F = (Kuc.dot(B0)).T.dot(Ba)\r\n Kg = (Kuc.dot(B0)).T.dot(B0)+TikReg \r\n Kg = (0.5 * (Kg.T + Kg)).tocoo()\r\n# Kgc, lower = sp.linalg.cho_factor(0.5 * (Kg.T + Kg))\r\n# D0 = sp.linalg.cho_solve((Kgc,lower),F)\r\n# D0 = np.linalg.solve(0.5*(Kg.T + Kg),F)\r\n Ksp = cvxopt.spmatrix(Kg.data,Kg.row.astype(np.int),Kg.col.astype(np.int))\r\n Fsp = cvxopt.matrix(F)\r\n cvxopt.cholmod.linsolve(Ksp,Fsp)\r\n# D0 = sp.sparse.linalg.spsolve(0.5*(Kg.T + Kg), F)\r\n D0 = np.array(Fsp)\r\n Da = -B0.dot(D0)+Ba\r\n Kda = (Kuc.dot(Da)).T.dot(Da)\r\n Chom = (Kda.dot(Bep)).T.dot(Bep) / LA.det(Amat)\r\n Modes = Da.dot(Bep)\r\n\r\n # Chris said to replace the output with this:\r\n # Chom = sp.linalg.inv(Chom)\r\n # nueff = -0.5 * (Chom[1,0]/Chom[0,0] + Chom[0,1]/Chom[1,1])\r\n # Eeff = 0.5*(1/Chom[0,0]+1/Chom[1,1]) # Avg young mod\r\n\r\n return Chom # change this to nueff, Eeff and optimize both (very stiff?, very negative poisson)\r\n\r\nE1 = 1000\r\nnu1 = 0.33\r\nE2 = 1\r\nnu2 = 0.0 \r\n\r\ndesign = np.ones((64,64))\r\n# design is a (binary) matrix\r\nEhom = GetHomProp2D_PlaneStress(design,E1,nu1,E2,nu2)" ]
[ [ "scipy.sparse.coo_matrix", "scipy.sparse.eye", "numpy.arange", "numpy.eye", "numpy.ones", "numpy.linalg.det", "numpy.array", "numpy.zeros", "numpy.sum" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [ "1.7", "1.0", "0.10", "1.2", "0.14", "0.19", "1.5", "0.12", "0.17", "0.13", "1.6", "1.4", "1.9", "1.3", "1.10", "0.15", "0.18", "0.16", "1.8" ], "tensorflow": [] } ]
Yokohide0317/transformers
[ "1089c30a4a3c56dcf017e500ba4b44e5c39f68dd", "28e091430eea9e0d40839e56fd0d57aec262f5f9", "1089c30a4a3c56dcf017e500ba4b44e5c39f68dd" ]
[ "tests/test_trainer.py", "src/transformers/trainer_seq2seq.py", "src/transformers/models/ctrl/modeling_tf_ctrl.py" ]
[ "# coding=utf-8\n# Copyright 2018 the HuggingFace Inc. team.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport dataclasses\nimport gc\nimport math\nimport os\nimport random\nimport re\nimport subprocess\nimport tempfile\nimport unittest\nfrom pathlib import Path\nfrom unittest.mock import Mock, patch\n\nimport numpy as np\n\nfrom huggingface_hub import Repository, delete_repo, login\nfrom parameterized import parameterized\nfrom requests.exceptions import HTTPError\nfrom transformers import (\n AutoTokenizer,\n IntervalStrategy,\n PretrainedConfig,\n TrainingArguments,\n is_torch_available,\n logging,\n)\nfrom transformers.file_utils import WEIGHTS_NAME, is_apex_available\nfrom transformers.testing_utils import (\n ENDPOINT_STAGING,\n PASS,\n USER,\n CaptureLogger,\n TestCasePlus,\n get_gpu_count,\n get_tests_dir,\n is_staging_test,\n require_optuna,\n require_ray,\n require_sentencepiece,\n require_sigopt,\n require_tokenizers,\n require_torch,\n require_torch_bf16,\n require_torch_gpu,\n require_torch_multi_gpu,\n require_torch_non_multi_gpu,\n require_torch_tf32,\n require_torch_up_to_2_gpus,\n slow,\n)\nfrom transformers.trainer_utils import PREFIX_CHECKPOINT_DIR\nfrom transformers.training_args import OptimizerNames\nfrom transformers.utils.hp_naming import TrialShortNamer\n\n\nif is_torch_available():\n import torch\n from torch import nn\n from torch.utils.data import IterableDataset\n\n import transformers.optimization\n from transformers import (\n AutoModelForSequenceClassification,\n EarlyStoppingCallback,\n GlueDataset,\n GlueDataTrainingArguments,\n GPT2Config,\n GPT2LMHeadModel,\n LineByLineTextDataset,\n PreTrainedModel,\n Trainer,\n TrainerState,\n )\n from transformers.modeling_utils import unwrap_model\n\n\nPATH_SAMPLE_TEXT = f\"{get_tests_dir()}/fixtures/sample_text.txt\"\n\n\nclass RegressionDataset:\n def __init__(self, a=2, b=3, length=64, seed=42, label_names=None):\n np.random.seed(seed)\n self.label_names = [\"labels\"] if label_names is None else label_names\n self.length = length\n self.x = np.random.normal(size=(length,)).astype(np.float32)\n self.ys = [a * self.x + b + np.random.normal(scale=0.1, size=(length,)) for _ in self.label_names]\n self.ys = [y.astype(np.float32) for y in self.ys]\n\n def __len__(self):\n return self.length\n\n def __getitem__(self, i):\n result = {name: y[i] for name, y in zip(self.label_names, self.ys)}\n result[\"input_x\"] = self.x[i]\n return result\n\n\[email protected]\nclass RegressionTrainingArguments(TrainingArguments):\n a: float = 0.0\n b: float = 0.0\n\n def __post_init__(self):\n super().__post_init__()\n # save resources not dealing with reporting (also avoids the warning when it's not set)\n self.report_to = []\n\n\nclass RepeatDataset:\n def __init__(self, x, length=64):\n self.x = x\n self.length = length\n\n def __len__(self):\n return self.length\n\n def __getitem__(self, i):\n return {\"input_ids\": self.x, \"labels\": self.x}\n\n\nclass DynamicShapesDataset:\n def __init__(self, length=64, seed=42, batch_size=8):\n self.length = length\n np.random.seed(seed)\n sizes = np.random.randint(1, 20, (length // batch_size,))\n # For easy batching, we make every batch_size consecutive samples the same size.\n self.xs = [np.random.normal(size=(s,)) for s in sizes.repeat(batch_size)]\n self.ys = [np.random.normal(size=(s,)) for s in sizes.repeat(batch_size)]\n\n def __len__(self):\n return self.length\n\n def __getitem__(self, i):\n return {\"input_x\": self.xs[i], \"labels\": self.ys[i]}\n\n\nclass AlmostAccuracy:\n def __init__(self, thresh=0.25):\n self.thresh = thresh\n\n def __call__(self, eval_pred):\n predictions, labels = eval_pred\n true = np.abs(predictions - labels) <= self.thresh\n return {\"accuracy\": true.astype(np.float32).mean().item()}\n\n\nclass RegressionModelConfig(PretrainedConfig):\n def __init__(self, a=0, b=0, double_output=False, **kwargs):\n super().__init__(**kwargs)\n self.a = a\n self.b = b\n self.double_output = double_output\n self.hidden_size = 1\n\n\nif is_torch_available():\n\n class SampleIterableDataset(IterableDataset):\n def __init__(self, a=2, b=3, length=64, seed=42, label_names=None):\n self.dataset = RegressionDataset(a=a, b=b, length=length, seed=seed, label_names=label_names)\n\n def __iter__(self):\n for i in range(len(self.dataset)):\n yield self.dataset[i]\n\n class FiniteIterableDataset(SampleIterableDataset):\n def __init__(self, a=2, b=3, length=64, seed=42, label_names=None):\n super().__init__(a, b, length, seed, label_names)\n self.current_sample = 0\n\n def __iter__(self):\n while self.current_sample < len(self.dataset):\n yield self.dataset[self.current_sample]\n self.current_sample += 1\n\n class RegressionModel(nn.Module):\n def __init__(self, a=0, b=0, double_output=False):\n super().__init__()\n self.a = nn.Parameter(torch.tensor(a).float())\n self.b = nn.Parameter(torch.tensor(b).float())\n self.double_output = double_output\n self.config = None\n\n def forward(self, input_x, labels=None, **kwargs):\n y = input_x * self.a + self.b\n if labels is None:\n return (y, y) if self.double_output else (y,)\n loss = nn.functional.mse_loss(y, labels)\n return (loss, y, y) if self.double_output else (loss, y)\n\n class RegressionDictModel(nn.Module):\n def __init__(self, a=0, b=0):\n super().__init__()\n self.a = nn.Parameter(torch.tensor(a).float())\n self.b = nn.Parameter(torch.tensor(b).float())\n self.config = None\n\n def forward(self, input_x, labels=None, **kwargs):\n y = input_x * self.a + self.b\n result = {\"output\": y}\n if labels is not None:\n result[\"loss\"] = nn.functional.mse_loss(y, labels)\n return result\n\n class RegressionPreTrainedModel(PreTrainedModel):\n config_class = RegressionModelConfig\n base_model_prefix = \"regression\"\n\n def __init__(self, config):\n super().__init__(config)\n self.a = nn.Parameter(torch.tensor(config.a).float())\n self.b = nn.Parameter(torch.tensor(config.b).float())\n self.double_output = config.double_output\n\n def forward(self, input_x, labels=None, **kwargs):\n y = input_x * self.a + self.b\n if labels is None:\n return (y, y) if self.double_output else (y,)\n loss = nn.functional.mse_loss(y, labels)\n return (loss, y, y) if self.double_output else (loss, y)\n\n class RegressionRandomPreTrainedModel(PreTrainedModel):\n config_class = RegressionModelConfig\n base_model_prefix = \"regression\"\n\n def __init__(self, config):\n super().__init__(config)\n self.a = nn.Parameter(torch.tensor(config.a).float())\n self.b = nn.Parameter(torch.tensor(config.b).float())\n\n def forward(self, input_x, labels=None, **kwargs):\n y = input_x * self.a + self.b\n torch_rand = torch.randn(1).squeeze()\n np_rand = np.random.rand()\n rand_rand = random.random()\n\n y += 0.05 * torch_rand + 0.05 * torch.tensor(np_rand + rand_rand)\n\n if labels is None:\n return (y,)\n loss = nn.functional.mse_loss(y, labels)\n return (loss, y)\n\n class TstLayer(nn.Module):\n def __init__(self, hidden_size):\n super().__init__()\n self.linear1 = nn.Linear(hidden_size, hidden_size)\n self.ln1 = nn.LayerNorm(hidden_size)\n self.linear2 = nn.Linear(hidden_size, hidden_size)\n self.ln2 = nn.LayerNorm(hidden_size)\n self.bias = nn.Parameter(torch.zeros(hidden_size))\n\n def forward(self, x):\n h = self.ln1(nn.functional.relu(self.linear1(x)))\n h = nn.functional.relu(self.linear2(x))\n return self.ln2(x + h + self.bias)\n\n def get_regression_trainer(a=0, b=0, double_output=False, train_len=64, eval_len=64, pretrained=True, **kwargs):\n label_names = kwargs.get(\"label_names\", None)\n train_dataset = RegressionDataset(length=train_len, label_names=label_names)\n eval_dataset = RegressionDataset(length=eval_len, label_names=label_names)\n\n model_init = kwargs.pop(\"model_init\", None)\n if model_init is not None:\n model = None\n else:\n if pretrained:\n config = RegressionModelConfig(a=a, b=b, double_output=double_output)\n model = RegressionPreTrainedModel(config)\n else:\n model = RegressionModel(a=a, b=b, double_output=double_output)\n\n compute_metrics = kwargs.pop(\"compute_metrics\", None)\n data_collator = kwargs.pop(\"data_collator\", None)\n optimizers = kwargs.pop(\"optimizers\", (None, None))\n output_dir = kwargs.pop(\"output_dir\", \"./regression\")\n\n args = RegressionTrainingArguments(output_dir, a=a, b=b, **kwargs)\n return Trainer(\n model,\n args,\n data_collator=data_collator,\n train_dataset=train_dataset,\n eval_dataset=eval_dataset,\n compute_metrics=compute_metrics,\n optimizers=optimizers,\n model_init=model_init,\n )\n\n\nclass TrainerIntegrationCommon:\n def check_saved_checkpoints(self, output_dir, freq, total, is_pretrained=True):\n file_list = [WEIGHTS_NAME, \"training_args.bin\", \"optimizer.pt\", \"scheduler.pt\", \"trainer_state.json\"]\n if is_pretrained:\n file_list.append(\"config.json\")\n for step in range(freq, total, freq):\n checkpoint = os.path.join(output_dir, f\"checkpoint-{step}\")\n self.assertTrue(os.path.isdir(checkpoint))\n for filename in file_list:\n self.assertTrue(os.path.isfile(os.path.join(checkpoint, filename)))\n\n def check_best_model_has_been_loaded(\n self, output_dir, freq, total, trainer, metric, greater_is_better=False, is_pretrained=True\n ):\n checkpoint = os.path.join(output_dir, f\"checkpoint-{(total // freq) * freq}\")\n log_history = TrainerState.load_from_json(os.path.join(checkpoint, \"trainer_state.json\")).log_history\n\n values = [d[metric] for d in log_history]\n best_value = max(values) if greater_is_better else min(values)\n best_checkpoint = (values.index(best_value) + 1) * freq\n checkpoint = os.path.join(output_dir, f\"checkpoint-{best_checkpoint}\")\n if is_pretrained:\n best_model = RegressionPreTrainedModel.from_pretrained(checkpoint)\n best_model.to(trainer.args.device)\n else:\n best_model = RegressionModel()\n state_dict = torch.load(os.path.join(checkpoint, WEIGHTS_NAME))\n best_model.load_state_dict(state_dict)\n best_model.to(trainer.args.device)\n self.assertTrue(torch.allclose(best_model.a, trainer.model.a))\n self.assertTrue(torch.allclose(best_model.b, trainer.model.b))\n\n metrics = trainer.evaluate()\n self.assertEqual(metrics[metric], best_value)\n\n def check_trainer_state_are_the_same(self, trainer_state, trainer_state1):\n # We'll pop things so operate on copies.\n state = trainer_state.copy()\n state1 = trainer_state1.copy()\n # Log history main contain different logs for the time metrics (after resuming a training).\n log_history = state.pop(\"log_history\", None)\n log_history1 = state1.pop(\"log_history\", None)\n self.assertEqual(state, state1)\n skip_log_keys = [\"train_runtime\", \"train_samples_per_second\", \"train_steps_per_second\", \"train_loss\"]\n for log, log1 in zip(log_history, log_history1):\n for key in skip_log_keys:\n _ = log.pop(key, None)\n _ = log1.pop(key, None)\n self.assertEqual(log, log1)\n\n\n@require_torch\n@require_sentencepiece\n@require_tokenizers\nclass TrainerIntegrationPrerunTest(TestCasePlus, TrainerIntegrationCommon):\n \"\"\"\n Only tests that want to tap into the auto-pre-run 2 trainings:\n - self.default_trained_model\n - self.alternate_trained_model\n directly, or via check_trained_model\n \"\"\"\n\n def setUp(self):\n super().setUp()\n args = TrainingArguments(\".\")\n self.n_epochs = args.num_train_epochs\n self.batch_size = args.train_batch_size\n trainer = get_regression_trainer(learning_rate=0.1)\n trainer.train()\n self.default_trained_model = (trainer.model.a, trainer.model.b)\n\n trainer = get_regression_trainer(learning_rate=0.1, seed=314)\n trainer.train()\n self.alternate_trained_model = (trainer.model.a, trainer.model.b)\n\n def check_trained_model(self, model, alternate_seed=False):\n # Checks a training seeded with learning_rate = 0.1\n (a, b) = self.alternate_trained_model if alternate_seed else self.default_trained_model\n self.assertTrue(torch.allclose(model.a, a))\n self.assertTrue(torch.allclose(model.b, b))\n\n def test_reproducible_training(self):\n # Checks that training worked, model trained and seed made a reproducible training.\n trainer = get_regression_trainer(learning_rate=0.1)\n trainer.train()\n self.check_trained_model(trainer.model)\n\n # Checks that a different seed gets different (reproducible) results.\n trainer = get_regression_trainer(learning_rate=0.1, seed=314)\n trainer.train()\n self.check_trained_model(trainer.model, alternate_seed=True)\n\n def test_trainer_with_datasets(self):\n import datasets\n\n np.random.seed(42)\n x = np.random.normal(size=(64,)).astype(np.float32)\n y = 2.0 * x + 3.0 + np.random.normal(scale=0.1, size=(64,))\n train_dataset = datasets.Dataset.from_dict({\"input_x\": x, \"label\": y})\n\n # Base training. Should have the same results as test_reproducible_training\n model = RegressionModel()\n args = TrainingArguments(\"./regression\", learning_rate=0.1)\n trainer = Trainer(model, args, train_dataset=train_dataset)\n trainer.train()\n self.check_trained_model(trainer.model)\n\n # Can return tensors.\n train_dataset.set_format(type=\"torch\", dtype=torch.float32)\n model = RegressionModel()\n trainer = Trainer(model, args, train_dataset=train_dataset)\n trainer.train()\n self.check_trained_model(trainer.model)\n\n # Adding one column not used by the model should have no impact\n z = np.random.normal(size=(64,)).astype(np.float32)\n train_dataset = datasets.Dataset.from_dict({\"input_x\": x, \"label\": y, \"extra\": z})\n model = RegressionModel()\n trainer = Trainer(model, args, train_dataset=train_dataset)\n trainer.train()\n self.check_trained_model(trainer.model)\n\n def test_model_init(self):\n train_dataset = RegressionDataset()\n args = TrainingArguments(\"./regression\", learning_rate=0.1)\n trainer = Trainer(args=args, train_dataset=train_dataset, model_init=lambda: RegressionModel())\n trainer.train()\n self.check_trained_model(trainer.model)\n\n # Re-training should restart from scratch, thus lead the same results.\n trainer.train()\n self.check_trained_model(trainer.model)\n\n # Re-training should restart from scratch, thus lead the same results and new seed should be used.\n trainer.args.seed = 314\n trainer.train()\n self.check_trained_model(trainer.model, alternate_seed=True)\n\n def test_gradient_accumulation(self):\n # Training with half the batch size but accumulation steps as 2 should give the same results.\n trainer = get_regression_trainer(\n gradient_accumulation_steps=2, per_device_train_batch_size=4, learning_rate=0.1\n )\n trainer.train()\n self.check_trained_model(trainer.model)\n\n def test_training_loss(self):\n n_gpus = max(1, get_gpu_count())\n\n # With even logs\n trainer = get_regression_trainer(logging_steps=64 / (8 * n_gpus))\n trainer.train()\n log_history = trainer.state.log_history\n\n losses = [log[\"loss\"] for log in log_history if \"loss\" in log]\n train_loss = log_history[-1][\"train_loss\"]\n self.assertAlmostEqual(sum(losses) / len(losses), train_loss, places=4)\n\n # With uneven logs\n trainer = get_regression_trainer(logging_steps=5)\n trainer.train()\n log_history = trainer.state.log_history\n\n # Training loss should be the same as before\n new_train_loss = log_history[-1][\"train_loss\"]\n self.assertAlmostEqual(train_loss, new_train_loss, places=4)\n\n def test_custom_optimizer(self):\n train_dataset = RegressionDataset()\n args = TrainingArguments(\"./regression\")\n model = RegressionModel()\n optimizer = torch.optim.SGD(model.parameters(), lr=1.0)\n lr_scheduler = torch.optim.lr_scheduler.LambdaLR(optimizer, lr_lambda=lambda x: 1.0)\n trainer = Trainer(model, args, train_dataset=train_dataset, optimizers=(optimizer, lr_scheduler))\n trainer.train()\n\n (a, b) = self.default_trained_model\n self.assertFalse(torch.allclose(trainer.model.a, a))\n self.assertFalse(torch.allclose(trainer.model.b, b))\n self.assertEqual(trainer.optimizer.state_dict()[\"param_groups\"][0][\"lr\"], 1.0)\n\n def test_adafactor_lr_none(self):\n # test the special case where lr=None, since Trainer can't not have lr_scheduler\n\n from transformers.optimization import Adafactor, AdafactorSchedule\n\n train_dataset = RegressionDataset()\n args = TrainingArguments(\"./regression\")\n model = RegressionModel()\n optimizer = Adafactor(model.parameters(), scale_parameter=True, relative_step=True, warmup_init=True, lr=None)\n lr_scheduler = AdafactorSchedule(optimizer)\n trainer = Trainer(model, args, train_dataset=train_dataset, optimizers=(optimizer, lr_scheduler))\n trainer.train()\n\n (a, b) = self.default_trained_model\n self.assertFalse(torch.allclose(trainer.model.a, a))\n self.assertFalse(torch.allclose(trainer.model.b, b))\n self.assertGreater(trainer.optimizer.state_dict()[\"param_groups\"][0][\"lr\"], 0)\n\n @require_torch_gpu\n @require_torch_bf16\n def test_mixed_bf16(self):\n\n # very basic test\n trainer = get_regression_trainer(learning_rate=0.1, bf16=True)\n trainer.train()\n self.check_trained_model(trainer.model)\n\n # --bf16 --half_precision_backend apex can't be used together\n with self.assertRaises(ValueError):\n trainer = get_regression_trainer(learning_rate=0.1, bf16=True, half_precision_backend=\"apex\")\n\n # will add more specific tests once there are some bugs to fix\n\n @require_torch_gpu\n @require_torch_tf32\n def test_tf32(self):\n\n # very basic test\n trainer = get_regression_trainer(learning_rate=0.1, tf32=True)\n trainer.train()\n self.check_trained_model(trainer.model)\n\n\n@require_torch\n@require_sentencepiece\n@require_tokenizers\nclass TrainerIntegrationTest(TestCasePlus, TrainerIntegrationCommon):\n def setUp(self):\n super().setUp()\n args = TrainingArguments(\".\")\n self.n_epochs = args.num_train_epochs\n self.batch_size = args.train_batch_size\n\n def test_trainer_works_with_dict(self):\n # Edge case because Apex with mode O2 will change our models to return dicts. This test checks it doesn't break\n # anything.\n train_dataset = RegressionDataset()\n eval_dataset = RegressionDataset()\n model = RegressionDictModel()\n args = TrainingArguments(\"./regression\")\n trainer = Trainer(model, args, train_dataset=train_dataset, eval_dataset=eval_dataset)\n trainer.train()\n _ = trainer.evaluate()\n _ = trainer.predict(eval_dataset)\n\n def test_evaluation_with_keys_to_drop(self):\n config = GPT2Config(vocab_size=100, n_positions=128, n_embd=32, n_layer=3, n_head=4)\n tiny_gpt2 = GPT2LMHeadModel(config)\n x = torch.randint(0, 100, (128,))\n eval_dataset = RepeatDataset(x)\n args = TrainingArguments(\"./test\")\n trainer = Trainer(tiny_gpt2, args, eval_dataset=eval_dataset)\n # By default the past_key_values are removed\n result = trainer.predict(eval_dataset)\n self.assertTrue(isinstance(result.predictions, np.ndarray))\n # We can still get them by setting ignore_keys to []\n result = trainer.predict(eval_dataset, ignore_keys=[])\n self.assertTrue(isinstance(result.predictions, tuple))\n self.assertEqual(len(result.predictions), 2)\n\n def test_training_arguments_are_left_untouched(self):\n trainer = get_regression_trainer()\n trainer.train()\n args = TrainingArguments(\"./regression\", report_to=[])\n dict1, dict2 = args.to_dict(), trainer.args.to_dict()\n for key in dict1.keys():\n # Logging dir can be slightly different as they default to something with the time.\n if key != \"logging_dir\":\n self.assertEqual(dict1[key], dict2[key])\n\n def test_number_of_steps_in_training(self):\n # Regular training has n_epochs * len(train_dl) steps\n trainer = get_regression_trainer(learning_rate=0.1)\n train_output = trainer.train()\n self.assertEqual(train_output.global_step, self.n_epochs * 64 / self.batch_size)\n\n # Check passing num_train_epochs works (and a float version too):\n trainer = get_regression_trainer(learning_rate=0.1, num_train_epochs=1.5)\n train_output = trainer.train()\n self.assertEqual(train_output.global_step, int(1.5 * 64 / self.batch_size))\n\n # If we pass a max_steps, num_train_epochs is ignored\n trainer = get_regression_trainer(learning_rate=0.1, max_steps=10)\n train_output = trainer.train()\n self.assertEqual(train_output.global_step, 10)\n\n def test_logging_inf_nan_filter(self):\n config = GPT2Config(vocab_size=100, n_positions=128, n_embd=32, n_layer=3, n_head=4)\n tiny_gpt2 = GPT2LMHeadModel(config)\n x = torch.randint(0, 100, (128,))\n train_dataset = RepeatDataset(x)\n\n # Trainer without inf/nan filter\n args = TrainingArguments(\"./test\", learning_rate=1e9, logging_steps=5, logging_nan_inf_filter=False)\n trainer = Trainer(tiny_gpt2, args, train_dataset=train_dataset)\n trainer.train()\n log_history_no_filter = trainer.state.log_history\n\n # Trainer with inf/nan filter\n args = TrainingArguments(\"./test\", learning_rate=1e9, logging_steps=5, logging_nan_inf_filter=True)\n trainer = Trainer(tiny_gpt2, args, train_dataset=train_dataset)\n trainer.train()\n log_history_filter = trainer.state.log_history\n\n def is_any_loss_nan_or_inf(log_history):\n losses = [l[\"loss\"] for l in log_history[:-1]]\n return any(math.isnan(x) for x in losses) or any(math.isinf(x) for x in losses)\n\n self.assertTrue(is_any_loss_nan_or_inf(log_history_no_filter))\n self.assertFalse(is_any_loss_nan_or_inf(log_history_filter))\n\n def test_train_and_eval_dataloaders(self):\n n_gpu = max(1, torch.cuda.device_count())\n trainer = get_regression_trainer(learning_rate=0.1, per_device_train_batch_size=16)\n self.assertEqual(trainer.get_train_dataloader().batch_size, 16 * n_gpu)\n trainer = get_regression_trainer(learning_rate=0.1, per_device_eval_batch_size=16)\n self.assertEqual(trainer.get_eval_dataloader().batch_size, 16 * n_gpu)\n\n # Check drop_last works\n trainer = get_regression_trainer(\n train_len=66, eval_len=74, learning_rate=0.1, per_device_train_batch_size=16, per_device_eval_batch_size=32\n )\n self.assertEqual(len(trainer.get_train_dataloader()), 66 // (16 * n_gpu) + 1)\n self.assertEqual(len(trainer.get_eval_dataloader()), 74 // (32 * n_gpu) + 1)\n\n trainer = get_regression_trainer(\n train_len=66,\n eval_len=74,\n learning_rate=0.1,\n per_device_train_batch_size=16,\n per_device_eval_batch_size=32,\n dataloader_drop_last=True,\n )\n self.assertEqual(len(trainer.get_train_dataloader()), 66 // (16 * n_gpu))\n self.assertEqual(len(trainer.get_eval_dataloader()), 74 // (32 * n_gpu))\n\n # Check passing a new dataset for evaluation works\n new_eval_dataset = RegressionDataset(length=128)\n self.assertEqual(len(trainer.get_eval_dataloader(new_eval_dataset)), 128 // (32 * n_gpu))\n\n @require_torch_multi_gpu\n def test_data_is_not_parallelized_when_model_is_parallel(self):\n model = RegressionModel()\n # Make the Trainer believe it's a parallelized model\n model.is_parallelizable = True\n model.model_parallel = True\n args = TrainingArguments(\"./regression\", per_device_train_batch_size=16, per_device_eval_batch_size=16)\n trainer = Trainer(model, args, train_dataset=RegressionDataset(), eval_dataset=RegressionDataset())\n # Check the Trainer was fooled\n self.assertTrue(trainer.is_model_parallel)\n self.assertEqual(trainer.args.n_gpu, 1)\n\n # The batch size of the training and evaluation dataloaders should be 16, not 16 * n_gpu\n self.assertEqual(trainer.get_train_dataloader().batch_size, 16)\n self.assertEqual(len(trainer.get_train_dataloader()), 64 // 16)\n self.assertEqual(trainer.get_eval_dataloader().batch_size, 16)\n self.assertEqual(len(trainer.get_eval_dataloader()), 64 // 16)\n\n def test_evaluate(self):\n trainer = get_regression_trainer(a=1.5, b=2.5, compute_metrics=AlmostAccuracy())\n results = trainer.evaluate()\n\n x, y = trainer.eval_dataset.x, trainer.eval_dataset.ys[0]\n pred = 1.5 * x + 2.5\n expected_loss = ((pred - y) ** 2).mean()\n self.assertAlmostEqual(results[\"eval_loss\"], expected_loss)\n expected_acc = AlmostAccuracy()((pred, y))[\"accuracy\"]\n self.assertAlmostEqual(results[\"eval_accuracy\"], expected_acc)\n\n # With a number of elements not a round multiple of the batch size\n trainer = get_regression_trainer(a=1.5, b=2.5, eval_len=66, compute_metrics=AlmostAccuracy())\n results = trainer.evaluate()\n\n x, y = trainer.eval_dataset.x, trainer.eval_dataset.ys[0]\n pred = 1.5 * x + 2.5\n expected_loss = ((pred - y) ** 2).mean()\n self.assertAlmostEqual(results[\"eval_loss\"], expected_loss)\n expected_acc = AlmostAccuracy()((pred, y))[\"accuracy\"]\n self.assertAlmostEqual(results[\"eval_accuracy\"], expected_acc)\n\n def test_predict(self):\n trainer = get_regression_trainer(a=1.5, b=2.5)\n preds = trainer.predict(trainer.eval_dataset).predictions\n x = trainer.eval_dataset.x\n self.assertTrue(np.allclose(preds, 1.5 * x + 2.5))\n\n # With a number of elements not a round multiple of the batch size\n trainer = get_regression_trainer(a=1.5, b=2.5, eval_len=66)\n preds = trainer.predict(trainer.eval_dataset).predictions\n x = trainer.eval_dataset.x\n self.assertTrue(np.allclose(preds, 1.5 * x + 2.5))\n\n # With more than one output of the model\n trainer = get_regression_trainer(a=1.5, b=2.5, double_output=True)\n preds = trainer.predict(trainer.eval_dataset).predictions\n x = trainer.eval_dataset.x\n self.assertTrue(len(preds), 2)\n self.assertTrue(np.allclose(preds[0], 1.5 * x + 2.5))\n self.assertTrue(np.allclose(preds[1], 1.5 * x + 2.5))\n\n # With more than one output/label of the model\n trainer = get_regression_trainer(a=1.5, b=2.5, double_output=True, label_names=[\"labels\", \"labels_2\"])\n outputs = trainer.predict(trainer.eval_dataset)\n preds = outputs.predictions\n labels = outputs.label_ids\n x = trainer.eval_dataset.x\n self.assertTrue(len(preds), 2)\n self.assertTrue(np.allclose(preds[0], 1.5 * x + 2.5))\n self.assertTrue(np.allclose(preds[1], 1.5 * x + 2.5))\n self.assertTrue(np.array_equal(labels[0], trainer.eval_dataset.ys[0]))\n self.assertTrue(np.array_equal(labels[1], trainer.eval_dataset.ys[1]))\n\n def test_dynamic_shapes(self):\n eval_dataset = DynamicShapesDataset(batch_size=self.batch_size)\n model = RegressionModel(a=2, b=1)\n args = TrainingArguments(\"./regression\")\n trainer = Trainer(model, args, eval_dataset=eval_dataset)\n\n # Check evaluation can run to completion\n _ = trainer.evaluate()\n\n # Check predictions\n preds = trainer.predict(eval_dataset)\n for expected, seen in zip(eval_dataset.ys, preds.label_ids):\n self.assertTrue(np.array_equal(expected, seen[: expected.shape[0]]))\n self.assertTrue(np.all(seen[expected.shape[0] :] == -100))\n\n for expected, seen in zip(eval_dataset.xs, preds.predictions):\n self.assertTrue(np.array_equal(2 * expected + 1, seen[: expected.shape[0]]))\n self.assertTrue(np.all(seen[expected.shape[0] :] == -100))\n\n # Same tests with eval accumulation\n args = TrainingArguments(\"./regression\", eval_accumulation_steps=2)\n trainer = Trainer(model, args, eval_dataset=eval_dataset)\n\n # Check evaluation can run to completion\n _ = trainer.evaluate()\n\n # Check predictions\n preds = trainer.predict(eval_dataset)\n for expected, seen in zip(eval_dataset.ys, preds.label_ids):\n self.assertTrue(np.array_equal(expected, seen[: expected.shape[0]]))\n self.assertTrue(np.all(seen[expected.shape[0] :] == -100))\n\n for expected, seen in zip(eval_dataset.xs, preds.predictions):\n self.assertTrue(np.array_equal(2 * expected + 1, seen[: expected.shape[0]]))\n self.assertTrue(np.all(seen[expected.shape[0] :] == -100))\n\n def test_log_level(self):\n # testing only --log_level (--log_level_replica requires multiple gpus and DDP and is tested elsewhere)\n logger = logging.get_logger()\n log_info_string = \"Running training\"\n\n # test with the default log_level - should be info and thus log on the main process\n with CaptureLogger(logger) as cl:\n trainer = get_regression_trainer()\n trainer.train()\n self.assertIn(log_info_string, cl.out)\n\n # test with low log_level - lower than info\n with CaptureLogger(logger) as cl:\n trainer = get_regression_trainer(log_level=\"debug\")\n trainer.train()\n self.assertIn(log_info_string, cl.out)\n\n # test with high log_level - should be quiet\n with CaptureLogger(logger) as cl:\n trainer = get_regression_trainer(log_level=\"error\")\n trainer.train()\n self.assertNotIn(log_info_string, cl.out)\n\n def test_save_checkpoints(self):\n with tempfile.TemporaryDirectory() as tmpdir:\n trainer = get_regression_trainer(output_dir=tmpdir, save_steps=5)\n trainer.train()\n self.check_saved_checkpoints(tmpdir, 5, int(self.n_epochs * 64 / self.batch_size))\n\n # With a regular model that is not a PreTrainedModel\n with tempfile.TemporaryDirectory() as tmpdir:\n trainer = get_regression_trainer(output_dir=tmpdir, save_steps=5, pretrained=False)\n trainer.train()\n self.check_saved_checkpoints(tmpdir, 5, int(self.n_epochs * 64 / self.batch_size), False)\n\n @require_torch_multi_gpu\n def test_run_seq2seq_double_train_wrap_once(self):\n # test that we don't wrap the model more than once\n # since wrapping primarily happens on multi-gpu setup we want multiple gpus to test for\n # example DataParallel(DataParallel(model))\n\n trainer = get_regression_trainer()\n trainer.train()\n model_wrapped_before = trainer.model_wrapped\n trainer.train()\n model_wrapped_after = trainer.model_wrapped\n self.assertIs(model_wrapped_before, model_wrapped_after, \"should be not wrapped twice\")\n\n @require_torch_up_to_2_gpus\n def test_can_resume_training(self):\n # This test will fail for more than 2 GPUs since the batch size will get bigger and with the number of\n # save_steps, the checkpoint will resume training at epoch 2 or more (so the data seen by the model\n # won't be the same since the training dataloader is shuffled).\n\n with tempfile.TemporaryDirectory() as tmpdir:\n kwargs = dict(output_dir=tmpdir, train_len=128, save_steps=5, learning_rate=0.1)\n trainer = get_regression_trainer(**kwargs)\n trainer.train()\n (a, b) = trainer.model.a.item(), trainer.model.b.item()\n state = dataclasses.asdict(trainer.state)\n\n checkpoint = os.path.join(tmpdir, \"checkpoint-5\")\n\n # Reinitialize trainer\n trainer = get_regression_trainer(**kwargs)\n\n trainer.train(resume_from_checkpoint=checkpoint)\n (a1, b1) = trainer.model.a.item(), trainer.model.b.item()\n state1 = dataclasses.asdict(trainer.state)\n self.assertEqual(a, a1)\n self.assertEqual(b, b1)\n self.check_trainer_state_are_the_same(state, state1)\n\n # Now check with a later checkpoint that it also works when we span over one epoch\n checkpoint = os.path.join(tmpdir, \"checkpoint-15\")\n\n # Reinitialize trainer and load model\n trainer = get_regression_trainer(**kwargs)\n\n trainer.train(resume_from_checkpoint=checkpoint)\n (a1, b1) = trainer.model.a.item(), trainer.model.b.item()\n state1 = dataclasses.asdict(trainer.state)\n self.assertEqual(a, a1)\n self.assertEqual(b, b1)\n self.check_trainer_state_are_the_same(state, state1)\n\n # With a regular model that is not a PreTrainedModel\n with tempfile.TemporaryDirectory() as tmpdir:\n kwargs = dict(output_dir=tmpdir, train_len=128, save_steps=5, learning_rate=0.1, pretrained=False)\n\n trainer = get_regression_trainer(**kwargs)\n trainer.train()\n (a, b) = trainer.model.a.item(), trainer.model.b.item()\n state = dataclasses.asdict(trainer.state)\n\n checkpoint = os.path.join(tmpdir, \"checkpoint-5\")\n\n # Reinitialize trainer and load model\n trainer = get_regression_trainer(**kwargs)\n\n trainer.train(resume_from_checkpoint=checkpoint)\n (a1, b1) = trainer.model.a.item(), trainer.model.b.item()\n state1 = dataclasses.asdict(trainer.state)\n self.assertEqual(a, a1)\n self.assertEqual(b, b1)\n self.check_trainer_state_are_the_same(state, state1)\n\n # Now check with a later checkpoint that it also works when we span over one epoch\n checkpoint = os.path.join(tmpdir, \"checkpoint-15\")\n\n # Reinitialize trainer and load model\n trainer = get_regression_trainer(**kwargs)\n\n trainer.train(resume_from_checkpoint=checkpoint)\n (a1, b1) = trainer.model.a.item(), trainer.model.b.item()\n state1 = dataclasses.asdict(trainer.state)\n self.assertEqual(a, a1)\n self.assertEqual(b, b1)\n self.check_trainer_state_are_the_same(state, state1)\n\n # Now check failures\n\n # 1. fail to find a bogus checkpoint\n trainer = get_regression_trainer()\n with self.assertRaises(Exception) as context:\n trainer.train(resume_from_checkpoint=f\"{checkpoint}-bogus\")\n self.assertTrue(\"Can't find a valid checkpoint at\" in str(context.exception))\n\n # 2. fail to find any checkpoint - due a fresh output_dir\n output_dir2 = self.get_auto_remove_tmp_dir()\n trainer = get_regression_trainer(output_dir=output_dir2)\n with self.assertRaises(Exception) as context:\n trainer.train(resume_from_checkpoint=True)\n self.assertTrue(\"No valid checkpoint found in output directory\" in str(context.exception))\n\n @require_torch_non_multi_gpu\n def test_resume_training_with_randomness(self):\n # This test will fail flakily for more than 1 GPUs since the result will be slightly more different\n # TODO: investigate why it fails for 2 GPUs?\n\n if torch.cuda.is_available():\n torch.backends.cudnn.deterministic = True\n train_dataset = RegressionDataset(length=128)\n eval_dataset = RegressionDataset()\n\n config = RegressionModelConfig(a=0, b=2)\n model = RegressionRandomPreTrainedModel(config)\n\n tmp_dir = self.get_auto_remove_tmp_dir()\n args = RegressionTrainingArguments(tmp_dir, save_steps=5, learning_rate=0.1)\n trainer = Trainer(model, args, train_dataset=train_dataset, eval_dataset=eval_dataset)\n\n trainer.train()\n (a, b) = trainer.model.a.item(), trainer.model.b.item()\n\n model = RegressionRandomPreTrainedModel(config)\n trainer = Trainer(model, args, train_dataset=train_dataset, eval_dataset=eval_dataset)\n trainer.train(resume_from_checkpoint=os.path.join(tmp_dir, \"checkpoint-15\"))\n (a1, b1) = trainer.model.a.item(), trainer.model.b.item()\n\n self.assertAlmostEqual(a, a1, delta=1e-8)\n self.assertAlmostEqual(b, b1, delta=1e-8)\n\n # regression for this issue: https://github.com/huggingface/transformers/issues/12970\n def test_training_with_resume_from_checkpoint_false(self):\n train_dataset = RegressionDataset(length=128)\n eval_dataset = RegressionDataset()\n\n config = RegressionModelConfig(a=0, b=2)\n model = RegressionRandomPreTrainedModel(config)\n\n tmp_dir = self.get_auto_remove_tmp_dir()\n args = RegressionTrainingArguments(tmp_dir, save_steps=5, learning_rate=0.1)\n trainer = Trainer(model, args, train_dataset=train_dataset, eval_dataset=eval_dataset)\n\n trainer.train(resume_from_checkpoint=False)\n\n @require_torch_up_to_2_gpus\n def test_resume_training_with_gradient_accumulation(self):\n # This test will fail for more than 2 GPUs since the batch size will get bigger and with the number of\n # save_steps, the checkpoint will resume training at epoch 2 or more (so the data seen by the model\n # won't be the same since the training dataloader is shuffled).\n\n with tempfile.TemporaryDirectory() as tmpdir:\n trainer = get_regression_trainer(\n output_dir=tmpdir,\n train_len=128,\n gradient_accumulation_steps=2,\n per_device_train_batch_size=4,\n save_steps=5,\n learning_rate=0.1,\n )\n trainer.train()\n (a, b) = trainer.model.a.item(), trainer.model.b.item()\n state = dataclasses.asdict(trainer.state)\n\n checkpoint = os.path.join(tmpdir, \"checkpoint-5\")\n\n # Reinitialize trainer\n trainer = get_regression_trainer(\n output_dir=tmpdir,\n train_len=128,\n gradient_accumulation_steps=2,\n per_device_train_batch_size=4,\n save_steps=5,\n learning_rate=0.1,\n )\n\n trainer.train(resume_from_checkpoint=checkpoint)\n (a1, b1) = trainer.model.a.item(), trainer.model.b.item()\n state1 = dataclasses.asdict(trainer.state)\n self.assertEqual(a, a1)\n self.assertEqual(b, b1)\n self.check_trainer_state_are_the_same(state, state1)\n\n @require_torch_up_to_2_gpus\n def test_resume_training_with_frozen_params(self):\n # This test will fail for more than 2 GPUs since the batch size will get bigger and with the number of\n # save_steps, the checkpoint will resume training at epoch 2 or more (so the data seen by the model\n # won't be the same since the training dataloader is shuffled).\n\n with tempfile.TemporaryDirectory() as tmpdir:\n trainer = get_regression_trainer(\n output_dir=tmpdir,\n train_len=128,\n per_device_train_batch_size=4,\n save_steps=5,\n learning_rate=0.1,\n )\n trainer.model.a.requires_grad_(False)\n trainer.train()\n (a, b) = trainer.model.a.item(), trainer.model.b.item()\n state = dataclasses.asdict(trainer.state)\n\n checkpoint = os.path.join(tmpdir, \"checkpoint-5\")\n\n # Reinitialize trainer\n trainer = get_regression_trainer(\n output_dir=tmpdir,\n train_len=128,\n per_device_train_batch_size=4,\n save_steps=5,\n learning_rate=0.1,\n )\n trainer.model.a.requires_grad_(False)\n\n trainer.train(resume_from_checkpoint=checkpoint)\n\n self.assertFalse(trainer.model.a.requires_grad)\n (a1, b1) = trainer.model.a.item(), trainer.model.b.item()\n state1 = dataclasses.asdict(trainer.state)\n self.assertEqual(a, a1)\n self.assertEqual(b, b1)\n self.check_trainer_state_are_the_same(state, state1)\n\n def test_load_best_model_at_end(self):\n total = int(self.n_epochs * 64 / self.batch_size)\n with tempfile.TemporaryDirectory() as tmpdir:\n trainer = get_regression_trainer(\n a=1.5,\n b=2.5,\n output_dir=tmpdir,\n learning_rate=0.1,\n eval_steps=5,\n evaluation_strategy=\"steps\",\n save_steps=5,\n load_best_model_at_end=True,\n )\n self.assertFalse(trainer.args.greater_is_better)\n trainer.train()\n self.check_saved_checkpoints(tmpdir, 5, total)\n self.check_best_model_has_been_loaded(tmpdir, 5, total, trainer, \"eval_loss\")\n\n with tempfile.TemporaryDirectory() as tmpdir:\n trainer = get_regression_trainer(\n a=1.5,\n b=2.5,\n output_dir=tmpdir,\n learning_rate=0.1,\n eval_steps=5,\n evaluation_strategy=\"steps\",\n save_steps=5,\n load_best_model_at_end=True,\n metric_for_best_model=\"accuracy\",\n compute_metrics=AlmostAccuracy(),\n )\n self.assertTrue(trainer.args.greater_is_better)\n trainer.train()\n self.check_saved_checkpoints(tmpdir, 5, total)\n self.check_best_model_has_been_loaded(tmpdir, 5, total, trainer, \"eval_accuracy\", greater_is_better=True)\n\n with tempfile.TemporaryDirectory() as tmpdir:\n trainer = get_regression_trainer(\n a=1.5,\n b=2.5,\n output_dir=tmpdir,\n learning_rate=0.1,\n evaluation_strategy=\"epoch\",\n save_strategy=\"epoch\",\n load_best_model_at_end=True,\n metric_for_best_model=\"accuracy\",\n compute_metrics=AlmostAccuracy(),\n )\n self.assertTrue(trainer.args.greater_is_better)\n trainer.train()\n self.check_saved_checkpoints(tmpdir, 64 // self.batch_size, total)\n self.check_best_model_has_been_loaded(\n tmpdir, 64 // self.batch_size, total, trainer, \"eval_accuracy\", greater_is_better=True\n )\n\n # Test this works with a non PreTrainedModel\n with tempfile.TemporaryDirectory() as tmpdir:\n trainer = get_regression_trainer(\n output_dir=tmpdir,\n learning_rate=0.1,\n eval_steps=5,\n evaluation_strategy=\"steps\",\n save_steps=5,\n load_best_model_at_end=True,\n pretrained=False,\n )\n self.assertFalse(trainer.args.greater_is_better)\n trainer.train()\n self.check_saved_checkpoints(tmpdir, 5, total, is_pretrained=False)\n self.check_best_model_has_been_loaded(tmpdir, 5, total, trainer, \"eval_loss\", is_pretrained=False)\n\n @slow\n def test_trainer_eval_mrpc(self):\n MODEL_ID = \"bert-base-cased-finetuned-mrpc\"\n tokenizer = AutoTokenizer.from_pretrained(MODEL_ID)\n model = AutoModelForSequenceClassification.from_pretrained(MODEL_ID)\n data_args = GlueDataTrainingArguments(\n task_name=\"mrpc\", data_dir=f\"{get_tests_dir()}/fixtures/tests_samples/MRPC\", overwrite_cache=True\n )\n eval_dataset = GlueDataset(data_args, tokenizer=tokenizer, mode=\"dev\")\n\n training_args = TrainingArguments(output_dir=\"./examples\", no_cuda=True)\n trainer = Trainer(model=model, args=training_args, eval_dataset=eval_dataset)\n result = trainer.evaluate()\n self.assertLess(result[\"eval_loss\"], 0.2)\n\n @slow\n def test_trainer_eval_lm(self):\n MODEL_ID = \"distilroberta-base\"\n tokenizer = AutoTokenizer.from_pretrained(MODEL_ID)\n dataset = LineByLineTextDataset(\n tokenizer=tokenizer,\n file_path=PATH_SAMPLE_TEXT,\n block_size=tokenizer.max_len_single_sentence,\n )\n self.assertEqual(len(dataset), 31)\n\n def test_training_iterable_dataset(self):\n config = RegressionModelConfig()\n model = RegressionPreTrainedModel(config)\n train_dataset = SampleIterableDataset()\n\n args = RegressionTrainingArguments(output_dir=\"./examples\", max_steps=4)\n trainer = Trainer(model=model, args=args, train_dataset=train_dataset)\n trainer.train()\n self.assertEqual(trainer.state.global_step, 4)\n\n loader = trainer.get_train_dataloader()\n self.assertIsInstance(loader, torch.utils.data.DataLoader)\n self.assertIsInstance(loader.sampler, torch.utils.data.dataloader._InfiniteConstantSampler)\n\n def test_training_finite_iterable_dataset(self):\n config = RegressionModelConfig()\n model = RegressionPreTrainedModel(config)\n\n batch_size = 1\n num_samples = 10\n\n available_steps = num_samples // batch_size\n\n data = FiniteIterableDataset(length=num_samples)\n train_args = TrainingArguments(\n \".\",\n max_steps=available_steps + 1, # set a higher number than actually available\n per_device_train_batch_size=batch_size,\n )\n trainer = Trainer(model, train_dataset=data, args=train_args)\n with self.assertLogs(\"transformers.trainer\", level=\"WARNING\") as logs:\n trainer.train()\n self.assertIn(f\"stopping training at step {available_steps}!\", logs.output[0])\n\n def test_evaluation_iterable_dataset(self):\n config = RegressionModelConfig(a=1.5, b=2.5)\n model = RegressionPreTrainedModel(config)\n eval_dataset = SampleIterableDataset()\n\n args = RegressionTrainingArguments(output_dir=\"./examples\")\n trainer = Trainer(model=model, args=args, eval_dataset=eval_dataset, compute_metrics=AlmostAccuracy())\n results = trainer.evaluate()\n\n x, y = trainer.eval_dataset.dataset.x, trainer.eval_dataset.dataset.ys[0]\n pred = 1.5 * x + 2.5\n expected_loss = ((pred - y) ** 2).mean()\n self.assertAlmostEqual(results[\"eval_loss\"], expected_loss)\n expected_acc = AlmostAccuracy()((pred, y))[\"accuracy\"]\n self.assertAlmostEqual(results[\"eval_accuracy\"], expected_acc)\n\n # With a number of elements not a round multiple of the batch size\n eval_dataset = SampleIterableDataset(length=66)\n results = trainer.evaluate(eval_dataset)\n\n x, y = eval_dataset.dataset.x, eval_dataset.dataset.ys[0]\n pred = 1.5 * x + 2.5\n expected_loss = ((pred - y) ** 2).mean()\n self.assertAlmostEqual(results[\"eval_loss\"], expected_loss)\n expected_acc = AlmostAccuracy()((pred, y))[\"accuracy\"]\n self.assertAlmostEqual(results[\"eval_accuracy\"], expected_acc)\n\n def test_predict_iterable_dataset(self):\n config = RegressionModelConfig(a=1.5, b=2.5)\n model = RegressionPreTrainedModel(config)\n eval_dataset = SampleIterableDataset()\n\n args = RegressionTrainingArguments(output_dir=\"./examples\")\n trainer = Trainer(model=model, args=args, eval_dataset=eval_dataset, compute_metrics=AlmostAccuracy())\n\n preds = trainer.predict(trainer.eval_dataset).predictions\n x = eval_dataset.dataset.x\n self.assertTrue(np.allclose(preds, 1.5 * x + 2.5))\n\n # With a number of elements not a round multiple of the batch size\n test_dataset = SampleIterableDataset(length=66)\n preds = trainer.predict(test_dataset).predictions\n x = test_dataset.dataset.x\n self.assertTrue(np.allclose(preds, 1.5 * x + 2.5))\n\n def test_num_train_epochs_in_training(self):\n # len(train_dl) < gradient_accumulation_steps shouldn't give ``ZeroDivisionError`` when ``max_steps`` is given.\n # It should give 1 update step for each epoch.\n trainer = get_regression_trainer(\n max_steps=3, train_len=64, per_device_train_batch_size=16, gradient_accumulation_steps=5\n )\n train_output = trainer.train()\n self.assertEqual(train_output.global_step, 3)\n\n # Even ``max_steps`` is not specified, we still expect 1 update step for each epoch if\n # len(train_dl) < gradient_accumulation_steps.\n trainer = get_regression_trainer(train_len=64, per_device_train_batch_size=16, gradient_accumulation_steps=5)\n train_output = trainer.train()\n self.assertEqual(train_output.global_step, int(self.n_epochs))\n\n def test_early_stopping_callback(self):\n # early stopping stops training before num_training_epochs\n with tempfile.TemporaryDirectory() as tmp_dir:\n trainer = get_regression_trainer(\n output_dir=tmp_dir,\n num_train_epochs=20,\n gradient_accumulation_steps=1,\n per_device_train_batch_size=16,\n load_best_model_at_end=True,\n evaluation_strategy=IntervalStrategy.EPOCH,\n save_strategy=IntervalStrategy.EPOCH,\n compute_metrics=AlmostAccuracy(),\n metric_for_best_model=\"accuracy\",\n )\n trainer.add_callback(EarlyStoppingCallback(1, 0.0001))\n train_output = trainer.train()\n self.assertLess(train_output.global_step, 20 * 64 / 16)\n\n # Invalid inputs to trainer with early stopping callback result in assertion error\n with tempfile.TemporaryDirectory() as tmp_dir:\n trainer = get_regression_trainer(\n output_dir=tmp_dir,\n num_train_epochs=20,\n gradient_accumulation_steps=1,\n per_device_train_batch_size=16,\n evaluation_strategy=IntervalStrategy.EPOCH,\n compute_metrics=AlmostAccuracy(),\n metric_for_best_model=\"accuracy\",\n )\n trainer.add_callback(EarlyStoppingCallback(1))\n self.assertEqual(trainer.state.global_step, 0)\n try:\n trainer.train()\n except AssertionError:\n self.assertEqual(trainer.state.global_step, 0)\n\n def test_flos_extraction(self):\n trainer = get_regression_trainer(learning_rate=0.1)\n\n def assert_flos_extraction(trainer, wrapped_model_to_check):\n self.assertEqual(trainer.model, unwrap_model(wrapped_model_to_check))\n self.assertGreaterEqual(getattr(unwrap_model(wrapped_model_to_check).config, \"total_flos\", 0), 0)\n\n # with plain model\n assert_flos_extraction(trainer, trainer.model)\n\n # with enforced DataParallel\n assert_flos_extraction(trainer, nn.DataParallel(trainer.model))\n\n trainer.train()\n self.assertTrue(isinstance(trainer.state.total_flos, float))\n\n def check_checkpoint_deletion(self, trainer, output_dir, expected):\n # Make fake checkpoints\n for n in [5, 10, 15, 20, 25]:\n os.makedirs(os.path.join(output_dir, f\"{PREFIX_CHECKPOINT_DIR}-{n}\"), exist_ok=True)\n trainer._rotate_checkpoints(output_dir=output_dir)\n glob_checkpoints = [str(x) for x in Path(output_dir).glob(f\"{PREFIX_CHECKPOINT_DIR}-*\")]\n values = [int(re.match(f\".*{PREFIX_CHECKPOINT_DIR}-([0-9]+)\", d).groups()[0]) for d in glob_checkpoints]\n self.assertSetEqual(set(values), set(expected))\n\n def test_checkpoint_rotation(self):\n with tempfile.TemporaryDirectory() as tmp_dir:\n # Without best model at end\n trainer = get_regression_trainer(output_dir=tmp_dir, save_total_limit=2)\n self.check_checkpoint_deletion(trainer, tmp_dir, [20, 25])\n\n # With best model at end\n trainer = get_regression_trainer(\n output_dir=tmp_dir, evaluation_strategy=\"steps\", load_best_model_at_end=True, save_total_limit=2\n )\n trainer.state.best_model_checkpoint = os.path.join(tmp_dir, \"checkpoint-5\")\n self.check_checkpoint_deletion(trainer, tmp_dir, [5, 25])\n\n # Edge case: we don't always honor save_total_limit=1 if load_best_model_at_end=True to be able to resume\n # from checkpoint\n trainer = get_regression_trainer(\n output_dir=tmp_dir, evaluation_strategy=\"steps\", load_best_model_at_end=True, save_total_limit=1\n )\n trainer.state.best_model_checkpoint = os.path.join(tmp_dir, \"checkpoint-25\")\n self.check_checkpoint_deletion(trainer, tmp_dir, [25])\n\n trainer.state.best_model_checkpoint = os.path.join(tmp_dir, \"checkpoint-5\")\n self.check_checkpoint_deletion(trainer, tmp_dir, [5, 25])\n\n def check_mem_metrics(self, trainer, check_func):\n metrics = trainer.train().metrics\n check_func(\"init_mem_cpu_alloc_delta\", metrics)\n check_func(\"train_mem_cpu_alloc_delta\", metrics)\n if torch.cuda.device_count() > 0:\n check_func(\"init_mem_gpu_alloc_delta\", metrics)\n check_func(\"train_mem_gpu_alloc_delta\", metrics)\n\n metrics = trainer.evaluate()\n check_func(\"eval_mem_cpu_alloc_delta\", metrics)\n if torch.cuda.device_count() > 0:\n check_func(\"eval_mem_gpu_alloc_delta\", metrics)\n\n metrics = trainer.predict(RegressionDataset()).metrics\n check_func(\"test_mem_cpu_alloc_delta\", metrics)\n if torch.cuda.device_count() > 0:\n check_func(\"test_mem_gpu_alloc_delta\", metrics)\n\n def test_mem_metrics(self):\n\n # with mem metrics enabled\n trainer = get_regression_trainer(skip_memory_metrics=False)\n self.check_mem_metrics(trainer, self.assertIn)\n\n # with mem metrics disabled\n trainer = get_regression_trainer(skip_memory_metrics=True)\n self.check_mem_metrics(trainer, self.assertNotIn)\n\n @require_torch_gpu\n def test_fp16_full_eval(self):\n\n # this is a sensitive test so let's keep debugging printouts in place for quick diagnosis.\n # it's using pretty large safety margins, but small enough to detect broken functionality.\n debug = 0\n n_gpus = get_gpu_count()\n\n bs = 8\n eval_len = 16 * n_gpus\n # make the params somewhat big so that there will be enough RAM consumed to be able to\n # measure things. We should get about 64KB for a+b in fp32\n a = torch.ones(1000, bs) + 0.001\n b = torch.ones(1000, bs) - 0.001\n\n # 1. with mem metrics enabled\n trainer = get_regression_trainer(a=a, b=b, eval_len=eval_len, skip_memory_metrics=False)\n metrics = trainer.evaluate()\n del trainer\n gc.collect()\n\n fp32_init = metrics[\"init_mem_gpu_alloc_delta\"]\n fp32_eval = metrics[\"eval_mem_gpu_alloc_delta\"]\n\n if debug:\n print(f\"fp32_init {fp32_init}\")\n print(f\"fp32_eval {fp32_eval}\")\n\n # here we expect the model to be preloaded in trainer.__init__ and consume around 64K gpu ram.\n # perfect world: fp32_init == 64<<10\n self.assertGreater(fp32_init, 59_000)\n # after eval should be no extra memory allocated - with a small margin (other than the peak\n # memory consumption for the forward calculation that gets recovered)\n # perfect world: fp32_eval == close to zero\n self.assertLess(fp32_eval, 5_000)\n\n # 2. with mem metrics disabled\n trainer = get_regression_trainer(a=a, b=b, eval_len=eval_len, fp16_full_eval=True, skip_memory_metrics=False)\n metrics = trainer.evaluate()\n fp16_init = metrics[\"init_mem_gpu_alloc_delta\"]\n fp16_eval = metrics[\"eval_mem_gpu_alloc_delta\"]\n\n if debug:\n print(f\"fp16_init {fp16_init}\")\n print(f\"fp16_eval {fp16_eval}\")\n\n # here we expect the model to not be preloaded in trainer.__init__, so with a small margin it should be close to 0\n # perfect world: fp16_init == close to zero\n self.assertLess(fp16_init, 5_000)\n # here we put the model on device in eval and only `half()` of it, i.e. about 32K,(again we ignore the peak margin which gets returned back)\n # perfect world: fp32_init == 32<<10\n self.assertGreater(fp16_eval, 27_000)\n\n # 3. relative comparison fp32 vs full fp16\n # should be about half of fp16_init\n # perfect world: fp32_init/2 == fp16_eval\n self.assertAlmostEqual(fp16_eval, fp32_init / 2, delta=5_000)\n\n @require_torch_gpu\n @require_torch_bf16\n def test_bf16_full_eval(self):\n # note: most of the logic is the same as test_fp16_full_eval\n\n # this is a sensitive test so let's keep debugging printouts in place for quick diagnosis.\n # it's using pretty large safety margins, but small enough to detect broken functionality.\n debug = 0\n n_gpus = get_gpu_count()\n\n bs = 8\n eval_len = 16 * n_gpus\n # make the params somewhat big so that there will be enough RAM consumed to be able to\n # measure things. We should get about 64KB for a+b in fp32\n a = torch.ones(1000, bs) + 0.001\n b = torch.ones(1000, bs) - 0.001\n\n # 1. with mem metrics enabled\n trainer = get_regression_trainer(a=a, b=b, eval_len=eval_len, skip_memory_metrics=False)\n metrics = trainer.evaluate()\n del trainer\n gc.collect()\n\n fp32_init = metrics[\"init_mem_gpu_alloc_delta\"]\n fp32_eval = metrics[\"eval_mem_gpu_alloc_delta\"]\n\n if debug:\n print(f\"fp32_init {fp32_init}\")\n print(f\"fp32_eval {fp32_eval}\")\n\n # here we expect the model to be preloaded in trainer.__init__ and consume around 64K gpu ram.\n # perfect world: fp32_init == 64<<10\n self.assertGreater(fp32_init, 59_000)\n # after eval should be no extra memory allocated - with a small margin (other than the peak\n # memory consumption for the forward calculation that gets recovered)\n # perfect world: fp32_eval == close to zero\n self.assertLess(fp32_eval, 5_000)\n\n # 2. with mem metrics disabled\n trainer = get_regression_trainer(a=a, b=b, eval_len=eval_len, bf16_full_eval=True, skip_memory_metrics=False)\n metrics = trainer.evaluate()\n bf16_init = metrics[\"init_mem_gpu_alloc_delta\"]\n bf16_eval = metrics[\"eval_mem_gpu_alloc_delta\"]\n\n if debug:\n print(f\"bf16_init {bf16_init}\")\n print(f\"bf16_eval {bf16_eval}\")\n\n # here we expect the model to not be preloaded in trainer.__init__, so with a small margin it should be close to 0\n # perfect world: bf16_init == close to zero\n self.assertLess(bf16_init, 5_000)\n # here we put the model on device in eval and only `half()` of it, i.e. about 32K,(again we ignore the peak margin which gets returned back)\n # perfect world: fp32_init == 32<<10\n self.assertGreater(bf16_eval, 27_000)\n\n # 3. relative comparison fp32 vs full bf16\n # should be about half of bf16_init\n # perfect world: fp32_init/2 == bf16_eval\n self.assertAlmostEqual(bf16_eval, fp32_init / 2, delta=5_000)\n\n def test_no_wd_param_group(self):\n model = nn.Sequential(TstLayer(128), nn.ModuleList([TstLayer(128), TstLayer(128)]))\n trainer = Trainer(model=model)\n trainer.create_optimizer_and_scheduler(10)\n # fmt: off\n wd_names = ['0.linear1.weight', '0.linear2.weight', '1.0.linear1.weight', '1.0.linear2.weight', '1.1.linear1.weight', '1.1.linear2.weight']\n # fmt: on\n wd_params = [p for n, p in model.named_parameters() if n in wd_names]\n no_wd_params = [p for n, p in model.named_parameters() if n not in wd_names]\n self.assertListEqual(trainer.optimizer.param_groups[0][\"params\"], wd_params)\n self.assertListEqual(trainer.optimizer.param_groups[1][\"params\"], no_wd_params)\n\n\n@require_torch\n@is_staging_test\nclass TrainerIntegrationWithHubTester(unittest.TestCase):\n @classmethod\n def setUpClass(cls):\n cls._token = login(username=USER, password=PASS)\n\n @classmethod\n def tearDownClass(cls):\n for model in [\"test-trainer\", \"test-trainer-epoch\", \"test-trainer-step\"]:\n try:\n delete_repo(token=cls._token, name=model)\n except HTTPError:\n pass\n\n try:\n delete_repo(token=cls._token, name=\"test-trainer-org\", organization=\"valid_org\")\n except HTTPError:\n pass\n\n def test_push_to_hub(self):\n with tempfile.TemporaryDirectory() as tmp_dir:\n trainer = get_regression_trainer(\n output_dir=os.path.join(tmp_dir, \"test-trainer\"),\n push_to_hub=True,\n hub_token=self._token,\n )\n url = trainer.push_to_hub()\n\n # Extract repo_name from the url\n re_search = re.search(ENDPOINT_STAGING + r\"/([^/]+/[^/]+)/\", url)\n self.assertTrue(re_search is not None)\n repo_name = re_search.groups()[0]\n\n self.assertEqual(repo_name, f\"{USER}/test-trainer\")\n\n model = RegressionPreTrainedModel.from_pretrained(repo_name)\n self.assertEqual(model.a.item(), trainer.model.a.item())\n self.assertEqual(model.b.item(), trainer.model.b.item())\n\n def test_push_to_hub_in_organization(self):\n with tempfile.TemporaryDirectory() as tmp_dir:\n trainer = get_regression_trainer(output_dir=tmp_dir)\n trainer.save_model()\n trainer = get_regression_trainer(\n output_dir=os.path.join(tmp_dir, \"test-trainer-org\"),\n push_to_hub=True,\n hub_model_id=\"valid_org/test-trainer-org\",\n hub_token=self._token,\n )\n url = trainer.push_to_hub()\n\n # Extract repo_name from the url\n re_search = re.search(ENDPOINT_STAGING + r\"/([^/]+/[^/]+)/\", url)\n self.assertTrue(re_search is not None)\n repo_name = re_search.groups()[0]\n self.assertEqual(repo_name, \"valid_org/test-trainer-org\")\n\n model = RegressionPreTrainedModel.from_pretrained(\"valid_org/test-trainer-org\")\n self.assertEqual(model.a.item(), trainer.model.a.item())\n self.assertEqual(model.b.item(), trainer.model.b.item())\n\n def get_commit_history(self, repo):\n commit_logs = subprocess.run(\n \"git log\".split(),\n stderr=subprocess.PIPE,\n stdout=subprocess.PIPE,\n check=True,\n encoding=\"utf-8\",\n cwd=repo,\n ).stdout\n commits = commit_logs.split(\"\\n\\n\")[1::2]\n return [commit.strip() for commit in commits]\n\n def test_push_to_hub_with_saves_each_epoch(self):\n with tempfile.TemporaryDirectory() as tmp_dir:\n trainer = get_regression_trainer(\n output_dir=os.path.join(tmp_dir, \"test-trainer-epoch\"),\n push_to_hub=True,\n hub_token=self._token,\n save_strategy=\"epoch\",\n )\n trainer.train()\n\n with tempfile.TemporaryDirectory() as tmp_dir:\n _ = Repository(tmp_dir, clone_from=f\"{USER}/test-trainer-epoch\", use_auth_token=self._token)\n commits = self.get_commit_history(tmp_dir)\n expected_commits = [f\"Training in progress, epoch {i}\" for i in range(3, 0, -1)]\n expected_commits.append(\"initial commit\")\n self.assertListEqual(commits, expected_commits)\n\n def test_push_to_hub_with_saves_each_n_steps(self):\n num_gpus = max(1, get_gpu_count())\n if num_gpus > 2:\n return\n\n with tempfile.TemporaryDirectory() as tmp_dir:\n trainer = get_regression_trainer(\n output_dir=os.path.join(tmp_dir, \"test-trainer-step\"),\n push_to_hub=True,\n hub_token=self._token,\n save_strategy=\"steps\",\n save_steps=5,\n )\n trainer.train()\n\n with tempfile.TemporaryDirectory() as tmp_dir:\n _ = Repository(tmp_dir, clone_from=f\"{USER}/test-trainer-step\", use_auth_token=self._token)\n commits = self.get_commit_history(tmp_dir)\n total_steps = 20 // num_gpus\n expected_commits = [f\"Training in progress, step {i}\" for i in range(total_steps, 0, -5)]\n expected_commits.append(\"initial commit\")\n self.assertListEqual(commits, expected_commits)\n\n\n@require_torch\n@require_optuna\nclass TrainerHyperParameterOptunaIntegrationTest(unittest.TestCase):\n def setUp(self):\n args = TrainingArguments(\".\")\n self.n_epochs = args.num_train_epochs\n self.batch_size = args.train_batch_size\n\n def test_hyperparameter_search(self):\n class MyTrialShortNamer(TrialShortNamer):\n DEFAULTS = {\"a\": 0, \"b\": 0}\n\n def hp_space(trial):\n return {}\n\n def model_init(trial):\n if trial is not None:\n a = trial.suggest_int(\"a\", -4, 4)\n b = trial.suggest_int(\"b\", -4, 4)\n else:\n a = 0\n b = 0\n config = RegressionModelConfig(a=a, b=b, double_output=False)\n\n return RegressionPreTrainedModel(config)\n\n def hp_name(trial):\n return MyTrialShortNamer.shortname(trial.params)\n\n with tempfile.TemporaryDirectory() as tmp_dir:\n trainer = get_regression_trainer(\n output_dir=tmp_dir,\n learning_rate=0.1,\n logging_steps=1,\n evaluation_strategy=IntervalStrategy.EPOCH,\n save_strategy=IntervalStrategy.EPOCH,\n num_train_epochs=4,\n disable_tqdm=True,\n load_best_model_at_end=True,\n logging_dir=\"runs\",\n run_name=\"test\",\n model_init=model_init,\n )\n trainer.hyperparameter_search(direction=\"minimize\", hp_space=hp_space, hp_name=hp_name, n_trials=4)\n\n\n@require_torch\n@require_ray\nclass TrainerHyperParameterRayIntegrationTest(unittest.TestCase):\n def setUp(self):\n args = TrainingArguments(\".\")\n self.n_epochs = args.num_train_epochs\n self.batch_size = args.train_batch_size\n\n def ray_hyperparameter_search(self):\n class MyTrialShortNamer(TrialShortNamer):\n DEFAULTS = {\"a\": 0, \"b\": 0}\n\n def hp_space(trial):\n from ray import tune\n\n return {\n \"a\": tune.randint(-4, 4),\n \"b\": tune.randint(-4, 4),\n }\n\n def model_init(config):\n if config is None:\n a = 0\n b = 0\n else:\n a = config[\"a\"]\n b = config[\"b\"]\n model_config = RegressionModelConfig(a=a, b=b, double_output=False)\n\n return RegressionPreTrainedModel(model_config)\n\n def hp_name(params):\n return MyTrialShortNamer.shortname(params)\n\n with tempfile.TemporaryDirectory() as tmp_dir:\n trainer = get_regression_trainer(\n output_dir=tmp_dir,\n learning_rate=0.1,\n logging_steps=1,\n evaluation_strategy=IntervalStrategy.EPOCH,\n save_strategy=IntervalStrategy.EPOCH,\n num_train_epochs=4,\n disable_tqdm=True,\n load_best_model_at_end=True,\n logging_dir=\"runs\",\n run_name=\"test\",\n model_init=model_init,\n )\n trainer.hyperparameter_search(\n direction=\"minimize\", hp_space=hp_space, hp_name=hp_name, backend=\"ray\", n_trials=4\n )\n\n def test_hyperparameter_search(self):\n self.ray_hyperparameter_search()\n\n def test_hyperparameter_search_ray_client(self):\n import ray\n from ray.util.client.ray_client_helpers import ray_start_client_server\n\n with ray_start_client_server():\n assert ray.util.client.ray.is_connected()\n self.ray_hyperparameter_search()\n\n\n@require_torch\n@require_sigopt\nclass TrainerHyperParameterSigOptIntegrationTest(unittest.TestCase):\n def setUp(self):\n args = TrainingArguments(\".\")\n self.n_epochs = args.num_train_epochs\n self.batch_size = args.train_batch_size\n\n def test_hyperparameter_search(self):\n class MyTrialShortNamer(TrialShortNamer):\n DEFAULTS = {\"a\": 0, \"b\": 0}\n\n def hp_space(trial):\n return [\n {\"bounds\": {\"min\": -4, \"max\": 4}, \"name\": \"a\", \"type\": \"int\"},\n {\"bounds\": {\"min\": -4, \"max\": 4}, \"name\": \"b\", \"type\": \"int\"},\n ]\n\n def model_init(trial):\n if trial is not None:\n a = trial.assignments[\"a\"]\n b = trial.assignments[\"b\"]\n else:\n a = 0\n b = 0\n config = RegressionModelConfig(a=a, b=b, double_output=False)\n\n return RegressionPreTrainedModel(config)\n\n def hp_name(trial):\n return MyTrialShortNamer.shortname(trial.assignments)\n\n with tempfile.TemporaryDirectory() as tmp_dir:\n trainer = get_regression_trainer(\n output_dir=tmp_dir,\n learning_rate=0.1,\n logging_steps=1,\n evaluation_strategy=IntervalStrategy.EPOCH,\n save_strategy=IntervalStrategy.EPOCH,\n num_train_epochs=4,\n disable_tqdm=True,\n load_best_model_at_end=True,\n logging_dir=\"runs\",\n run_name=\"test\",\n model_init=model_init,\n )\n trainer.hyperparameter_search(\n direction=\"minimize\", hp_space=hp_space, hp_name=hp_name, backend=\"sigopt\", n_trials=4\n )\n\n\noptim_test_params = []\nif is_torch_available():\n default_adam_kwargs = {\n \"betas\": (TrainingArguments.adam_beta1, TrainingArguments.adam_beta2),\n \"eps\": TrainingArguments.adam_epsilon,\n \"lr\": TrainingArguments.learning_rate,\n }\n\n optim_test_params = [\n (\n OptimizerNames.ADAMW_HF,\n transformers.optimization.AdamW,\n default_adam_kwargs,\n ),\n (\n OptimizerNames.ADAMW_HF.value,\n transformers.optimization.AdamW,\n default_adam_kwargs,\n ),\n (\n OptimizerNames.ADAMW_TORCH,\n torch.optim.AdamW,\n default_adam_kwargs,\n ),\n (\n OptimizerNames.ADAFACTOR,\n transformers.optimization.Adafactor,\n {\n \"scale_parameter\": False,\n \"relative_step\": False,\n \"lr\": TrainingArguments.learning_rate,\n },\n ),\n ]\n if is_apex_available():\n import apex\n\n optim_test_params.append(\n (\n OptimizerNames.ADAMW_APEX_FUSED,\n apex.optimizers.FusedAdam,\n default_adam_kwargs,\n )\n )\n\n\n@require_torch\nclass TrainerOptimizerChoiceTest(unittest.TestCase):\n def check_optim_and_kwargs(self, optim: OptimizerNames, mandatory_kwargs, expected_cls):\n args = TrainingArguments(optim=optim, output_dir=\"None\")\n actual_cls, optim_kwargs = Trainer.get_optimizer_cls_and_kwargs(args)\n self.assertEqual(expected_cls, actual_cls)\n self.assertIsNotNone(optim_kwargs)\n\n for p, v in mandatory_kwargs.items():\n self.assertTrue(p in optim_kwargs)\n actual_v = optim_kwargs[p]\n self.assertTrue(actual_v == v, f\"Failed check for {p}. Expected {v}, but got {actual_v}.\")\n\n @parameterized.expand(optim_test_params, skip_on_empty=True)\n def test_optim_supported(self, name: str, expected_cls, mandatory_kwargs):\n # exercises all the valid --optim options\n self.check_optim_and_kwargs(name, mandatory_kwargs, expected_cls)\n\n trainer = get_regression_trainer(optim=name)\n trainer.train()\n\n def test_fused_adam(self):\n # Pretend that apex is installed and mock apex.optimizers.FusedAdam exists.\n # Trainer.get_optimizer_cls_and_kwargs does not use FusedAdam, but only has to return a\n # class called, so mocking apex.optimizers.FusedAdam should be fine for testing and allow\n # the test to run without requiring an apex installation.\n mock = Mock()\n modules = {\n \"apex\": mock,\n \"apex.optimizers\": mock.optimizers,\n \"apex.optimizers.FusedAdam\": mock.optimizers.FusedAdam,\n }\n with patch.dict(\"sys.modules\", modules):\n self.check_optim_and_kwargs(\n OptimizerNames.ADAMW_APEX_FUSED,\n default_adam_kwargs,\n mock.optimizers.FusedAdam,\n )\n\n def test_fused_adam_no_apex(self):\n args = TrainingArguments(optim=OptimizerNames.ADAMW_APEX_FUSED, output_dir=\"None\")\n\n # Pretend that apex does not exist, even if installed. By setting apex to None, importing\n # apex will fail even if apex is installed.\n with patch.dict(\"sys.modules\", {\"apex.optimizers\": None}):\n with self.assertRaises(ValueError):\n Trainer.get_optimizer_cls_and_kwargs(args)\n", "# Copyright 2020 The HuggingFace Team. All rights reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom typing import Any, Dict, List, Optional, Tuple, Union\n\nimport torch\nfrom torch import nn\nfrom torch.utils.data import Dataset\n\nfrom .deepspeed import is_deepspeed_zero3_enabled\nfrom .trainer import Trainer\nfrom .trainer_utils import PredictionOutput\nfrom .utils import logging\n\n\nlogger = logging.get_logger(__name__)\n\n\nclass Seq2SeqTrainer(Trainer):\n def evaluate(\n self,\n eval_dataset: Optional[Dataset] = None,\n ignore_keys: Optional[List[str]] = None,\n metric_key_prefix: str = \"eval\",\n max_length: Optional[int] = None,\n num_beams: Optional[int] = None,\n ) -> Dict[str, float]:\n \"\"\"\n Run evaluation and returns metrics.\n\n The calling script will be responsible for providing a method to compute metrics, as they are task-dependent\n (pass it to the init `compute_metrics` argument).\n\n You can also subclass and override this method to inject custom behavior.\n\n Args:\n eval_dataset (`Dataset`, *optional*):\n Pass a dataset if you wish to override `self.eval_dataset`. If it is an `datasets.Dataset`, columns not\n accepted by the `model.forward()` method are automatically removed. It must implement the `__len__`\n method.\n ignore_keys (`List[str]`, *optional*):\n A list of keys in the output of your model (if it is a dictionary) that should be ignored when\n gathering predictions.\n metric_key_prefix (`str`, *optional*, defaults to `\"eval\"`):\n An optional prefix to be used as the metrics key prefix. For example the metrics \"bleu\" will be named\n \"eval_bleu\" if the prefix is `\"eval\"` (default)\n max_length (`int`, *optional*):\n The maximum target length to use when predicting with the generate method.\n num_beams (`int`, *optional*):\n Number of beams for beam search that will be used when predicting with the generate method. 1 means no\n beam search.\n\n Returns:\n A dictionary containing the evaluation loss and the potential metrics computed from the predictions. The\n dictionary also contains the epoch number which comes from the training state.\n \"\"\"\n self._max_length = max_length if max_length is not None else self.args.generation_max_length\n self._num_beams = num_beams if num_beams is not None else self.args.generation_num_beams\n return super().evaluate(eval_dataset, ignore_keys=ignore_keys, metric_key_prefix=metric_key_prefix)\n\n def predict(\n self,\n test_dataset: Dataset,\n ignore_keys: Optional[List[str]] = None,\n metric_key_prefix: str = \"test\",\n max_length: Optional[int] = None,\n num_beams: Optional[int] = None,\n ) -> PredictionOutput:\n \"\"\"\n Run prediction and returns predictions and potential metrics.\n\n Depending on the dataset and your use case, your test dataset may contain labels. In that case, this method\n will also return metrics, like in `evaluate()`.\n\n Args:\n test_dataset (`Dataset`):\n Dataset to run the predictions on. If it is an `datasets.Dataset`, columns not accepted by the\n `model.forward()` method are automatically removed. Has to implement the method `__len__`\n ignore_keys (`List[str]`, *optional*):\n A list of keys in the output of your model (if it is a dictionary) that should be ignored when\n gathering predictions.\n metric_key_prefix (`str`, *optional*, defaults to `\"eval\"`):\n An optional prefix to be used as the metrics key prefix. For example the metrics \"bleu\" will be named\n \"eval_bleu\" if the prefix is `\"eval\"` (default)\n max_length (`int`, *optional*):\n The maximum target length to use when predicting with the generate method.\n num_beams (`int`, *optional*):\n Number of beams for beam search that will be used when predicting with the generate method. 1 means no\n beam search.\n\n <Tip>\n\n If your predictions or labels have different sequence lengths (for instance because you're doing dynamic\n padding in a token classification task) the predictions will be padded (on the right) to allow for\n concatenation into one array. The padding index is -100.\n\n </Tip>\n\n Returns: *NamedTuple* A namedtuple with the following keys:\n\n - predictions (`np.ndarray`): The predictions on `test_dataset`.\n - label_ids (`np.ndarray`, *optional*): The labels (if the dataset contained some).\n - metrics (`Dict[str, float]`, *optional*): The potential dictionary of metrics (if the dataset contained\n labels).\n \"\"\"\n self._max_length = max_length if max_length is not None else self.args.generation_max_length\n self._num_beams = num_beams if num_beams is not None else self.args.generation_num_beams\n return super().predict(test_dataset, ignore_keys=ignore_keys, metric_key_prefix=metric_key_prefix)\n\n def prediction_step(\n self,\n model: nn.Module,\n inputs: Dict[str, Union[torch.Tensor, Any]],\n prediction_loss_only: bool,\n ignore_keys: Optional[List[str]] = None,\n ) -> Tuple[Optional[float], Optional[torch.Tensor], Optional[torch.Tensor]]:\n \"\"\"\n Perform an evaluation step on `model` using `inputs`.\n\n Subclass and override to inject custom behavior.\n\n Args:\n model (`nn.Module`):\n The model to evaluate.\n inputs (`Dict[str, Union[torch.Tensor, Any]]`):\n The inputs and targets of the model.\n\n The dictionary will be unpacked before being fed to the model. Most models expect the targets under the\n argument `labels`. Check your model's documentation for all accepted arguments.\n prediction_loss_only (`bool`):\n Whether or not to return the loss only.\n\n Return:\n Tuple[Optional[float], Optional[torch.Tensor], Optional[torch.Tensor]]: A tuple with the loss, logits and\n labels (each being optional).\n \"\"\"\n\n if not self.args.predict_with_generate or prediction_loss_only:\n return super().prediction_step(\n model, inputs, prediction_loss_only=prediction_loss_only, ignore_keys=ignore_keys\n )\n\n has_labels = \"labels\" in inputs\n inputs = self._prepare_inputs(inputs)\n\n # XXX: adapt synced_gpus for fairscale as well\n gen_kwargs = {\n \"max_length\": self._max_length if self._max_length is not None else self.model.config.max_length,\n \"num_beams\": self._num_beams if self._num_beams is not None else self.model.config.num_beams,\n \"synced_gpus\": True if is_deepspeed_zero3_enabled() else False,\n }\n\n # prepare generation inputs\n # some encoder-decoder models can have varying encder's and thus\n # varying model input names\n if hasattr(self.model, \"encoder\") and self.model.encoder.main_input_name != self.model.main_input_name:\n generation_inputs = inputs[self.model.encoder.main_input_name]\n else:\n generation_inputs = inputs[self.model.main_input_name]\n\n generated_tokens = self.model.generate(\n generation_inputs,\n attention_mask=inputs.get(\"attention_mask\", None),\n **gen_kwargs,\n )\n # in case the batch is shorter than max length, the output should be padded\n if generated_tokens.shape[-1] < gen_kwargs[\"max_length\"]:\n generated_tokens = self._pad_tensors_to_max_len(generated_tokens, gen_kwargs[\"max_length\"])\n\n with torch.no_grad():\n with self.autocast_smart_context_manager():\n outputs = model(**inputs)\n if has_labels:\n if self.label_smoother is not None:\n loss = self.label_smoother(outputs, inputs[\"labels\"]).mean().detach()\n else:\n loss = (outputs[\"loss\"] if isinstance(outputs, dict) else outputs[0]).mean().detach()\n else:\n loss = None\n\n if self.args.prediction_loss_only:\n return (loss, None, None)\n\n if has_labels:\n labels = inputs[\"labels\"]\n if labels.shape[-1] < gen_kwargs[\"max_length\"]:\n labels = self._pad_tensors_to_max_len(labels, gen_kwargs[\"max_length\"])\n else:\n labels = None\n\n return (loss, generated_tokens, labels)\n\n def _pad_tensors_to_max_len(self, tensor, max_length):\n if self.tokenizer is not None and hasattr(self.tokenizer, \"pad_token_id\"):\n # If PAD token is not defined at least EOS token has to be defined\n pad_token_id = (\n self.tokenizer.pad_token_id if self.tokenizer.pad_token_id is not None else self.tokenizer.eos_token_id\n )\n else:\n if self.model.config.pad_token_id is not None:\n pad_token_id = self.model.config.pad_token_id\n else:\n raise ValueError(\"Pad_token_id must be set in the configuration of the model, in order to pad tensors\")\n\n padded_tensor = pad_token_id * torch.ones(\n (tensor.shape[0], max_length), dtype=tensor.dtype, device=tensor.device\n )\n padded_tensor[:, : tensor.shape[-1]] = tensor\n return padded_tensor\n", "# coding=utf-8\n# Copyright 2018 Salesforce and HuggingFace Inc. team.\n# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\" TF 2.0 CTRL model.\"\"\"\n\nimport warnings\n\nimport numpy as np\nimport tensorflow as tf\n\nfrom ...file_utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward\nfrom ...modeling_tf_outputs import TFBaseModelOutputWithPast, TFCausalLMOutputWithPast, TFSequenceClassifierOutput\nfrom ...modeling_tf_utils import (\n TFCausalLanguageModelingLoss,\n TFPreTrainedModel,\n TFSequenceClassificationLoss,\n TFSharedEmbeddings,\n get_initializer,\n input_processing,\n keras_serializable,\n shape_list,\n)\nfrom ...utils import logging\nfrom .configuration_ctrl import CTRLConfig\n\n\nlogger = logging.get_logger(__name__)\n\n_CHECKPOINT_FOR_DOC = \"ctrl\"\n_CONFIG_FOR_DOC = \"CTRLConfig\"\n_TOKENIZER_FOR_DOC = \"CTRLTokenizer\"\n\nTF_CTRL_PRETRAINED_MODEL_ARCHIVE_LIST = [\n \"ctrl\"\n # See all CTRL models at https://huggingface.co/models?filter=ctrl\n]\n\n\ndef angle_defn(pos, i, d_model_size):\n angle_rates = 1 / np.power(10000, (2 * (i // 2)) / d_model_size)\n return pos * angle_rates\n\n\ndef positional_encoding(position, d_model_size):\n # create the sinusoidal pattern for the positional encoding\n angle_rads = angle_defn(np.arange(position)[:, np.newaxis], np.arange(d_model_size)[np.newaxis, :], d_model_size)\n\n sines = np.sin(angle_rads[:, 0::2])\n cosines = np.cos(angle_rads[:, 1::2])\n pos_encoding = tf.convert_to_tensor(np.concatenate([sines, cosines], axis=-1))\n\n return pos_encoding\n\n\ndef scaled_dot_product_attention(q, k, v, mask, attention_mask=None, head_mask=None):\n # calculate attention\n matmul_qk = tf.matmul(q, k, transpose_b=True)\n\n dk = tf.cast(shape_list(k)[-1], dtype=matmul_qk.dtype)\n scaled_attention_logits = matmul_qk / tf.math.sqrt(dk)\n\n if mask is not None:\n scaled_attention_logits += tf.cast(mask * -1e4, dtype=scaled_attention_logits.dtype)\n\n if attention_mask is not None:\n # Apply the attention mask\n attention_mask = tf.cast(attention_mask, dtype=scaled_attention_logits.dtype)\n scaled_attention_logits = scaled_attention_logits + attention_mask\n\n attention_weights = tf.nn.softmax(scaled_attention_logits, axis=-1)\n\n # Mask heads if we want to\n if head_mask is not None:\n attention_weights = attention_weights * head_mask\n\n output = tf.matmul(attention_weights, v)\n\n return output, attention_weights\n\n\nclass TFMultiHeadAttention(tf.keras.layers.Layer):\n def __init__(self, d_model_size, num_heads, output_attentions=False, **kwargs):\n super().__init__(**kwargs)\n self.num_heads = num_heads\n self.d_model_size = d_model_size\n self.output_attentions = output_attentions\n\n self.depth = int(d_model_size / self.num_heads)\n\n self.Wq = tf.keras.layers.Dense(d_model_size, name=\"Wq\")\n self.Wk = tf.keras.layers.Dense(d_model_size, name=\"Wk\")\n self.Wv = tf.keras.layers.Dense(d_model_size, name=\"Wv\")\n\n self.dense = tf.keras.layers.Dense(d_model_size, name=\"dense\")\n\n def split_into_heads(self, x, batch_size):\n x = tf.reshape(x, (batch_size, -1, self.num_heads, self.depth))\n return tf.transpose(x, perm=[0, 2, 1, 3])\n\n def call(self, v, k, q, mask, layer_past, attention_mask, head_mask, use_cache, output_attentions, training=False):\n batch_size = shape_list(q)[0]\n\n q = self.Wq(q)\n k = self.Wk(k)\n v = self.Wv(v)\n\n q = self.split_into_heads(q, batch_size)\n k = self.split_into_heads(k, batch_size)\n v = self.split_into_heads(v, batch_size)\n\n if layer_past is not None:\n past_key, past_value = tf.unstack(layer_past, axis=0)\n k = tf.concat((past_key, k), axis=-2)\n v = tf.concat((past_value, v), axis=-2)\n\n if use_cache:\n present = tf.stack((k, v), axis=0)\n else:\n present = (None,)\n\n output = scaled_dot_product_attention(q, k, v, mask, attention_mask, head_mask)\n scaled_attention = tf.transpose(output[0], perm=[0, 2, 1, 3])\n attn = output[1]\n original_size_attention = tf.reshape(scaled_attention, (batch_size, -1, self.d_model_size))\n output = self.dense(original_size_attention)\n outputs = (output, present)\n\n if output_attentions:\n outputs = outputs + (attn,)\n\n return outputs\n\n\nclass TFPointWiseFeedForwardLayer(tf.keras.layers.Layer):\n def __init__(self, d_model_size, dff, **kwargs):\n super().__init__(**kwargs)\n\n self.dense_0 = tf.keras.layers.Dense(dff, activation=\"relu\", name=\"0\")\n self.dense_2 = tf.keras.layers.Dense(d_model_size, name=\"2\")\n\n def call(self, inputs, trainable=False):\n dense_0_output = self.dense_0(inputs)\n dense_2_output = self.dense_2(dense_0_output)\n\n return dense_2_output\n\n\nclass TFEncoderLayer(tf.keras.layers.Layer):\n def __init__(\n self, d_model_size, num_heads, dff, rate=0.1, layer_norm_epsilon=1e-6, output_attentions=False, **kwargs\n ):\n super().__init__(**kwargs)\n\n self.output_attentions = output_attentions\n\n self.multi_head_attention = TFMultiHeadAttention(\n d_model_size, num_heads, output_attentions=self.output_attentions, name=\"multi_head_attention\"\n )\n self.ffn = TFPointWiseFeedForwardLayer(d_model_size, dff, name=\"ffn\")\n\n self.layernorm1 = tf.keras.layers.LayerNormalization(epsilon=layer_norm_epsilon, name=\"layernorm1\")\n self.layernorm2 = tf.keras.layers.LayerNormalization(epsilon=layer_norm_epsilon, name=\"layernorm2\")\n\n self.dropout1 = tf.keras.layers.Dropout(rate)\n self.dropout2 = tf.keras.layers.Dropout(rate)\n\n def call(self, x, mask, layer_past, attention_mask, head_mask, use_cache, output_attentions, training=False):\n normed = self.layernorm1(x)\n attn_outputs = self.multi_head_attention(\n normed,\n normed,\n normed,\n mask,\n layer_past,\n attention_mask,\n head_mask,\n use_cache,\n output_attentions,\n training=training,\n )\n attn_output = attn_outputs[0]\n attn_output = self.dropout1(attn_output, training=training)\n out1 = x + attn_output\n\n out2 = self.layernorm2(out1)\n ffn_output = self.ffn(out2)\n ffn_output = self.dropout2(ffn_output, training=training)\n out2 = out1 + ffn_output\n\n outputs = (out2,) + attn_outputs[1:]\n return outputs\n\n\n@keras_serializable\nclass TFCTRLMainLayer(tf.keras.layers.Layer):\n config_class = CTRLConfig\n\n def __init__(self, config, **kwargs):\n super().__init__(**kwargs)\n\n self.config = config\n self.output_hidden_states = config.output_hidden_states\n self.output_attentions = config.output_attentions\n self.use_cache = config.use_cache\n self.return_dict = config.use_return_dict\n\n self.d_model_size = config.n_embd\n self.num_layers = config.n_layer\n\n self.pos_encoding = positional_encoding(config.n_positions, self.d_model_size)\n\n self.w = TFSharedEmbeddings(\n config.vocab_size, config.n_embd, initializer_range=config.initializer_range, name=\"w\"\n )\n\n self.dropout = tf.keras.layers.Dropout(config.embd_pdrop)\n self.h = [\n TFEncoderLayer(\n config.n_embd,\n config.n_head,\n config.dff,\n config.resid_pdrop,\n config.layer_norm_epsilon,\n self.output_attentions,\n name=f\"h_._{i}\",\n )\n for i in range(config.n_layer)\n ]\n self.layernorm = tf.keras.layers.LayerNormalization(epsilon=config.layer_norm_epsilon, name=\"layernorm\")\n\n def get_input_embeddings(self):\n return self.w\n\n def set_input_embeddings(self, value):\n self.w.weight = value\n self.w.vocab_size = shape_list(value)[0]\n\n def _prune_heads(self, heads_to_prune):\n \"\"\"\n Prunes heads of the model. heads_to_prune: dict of {layer_num: list of heads to prune in this layer}\n \"\"\"\n raise NotImplementedError\n\n def call(\n self,\n input_ids=None,\n past=None,\n attention_mask=None,\n token_type_ids=None,\n position_ids=None,\n head_mask=None,\n inputs_embeds=None,\n use_cache=None,\n output_attentions=None,\n output_hidden_states=None,\n return_dict=None,\n training=False,\n **kwargs,\n ):\n inputs = input_processing(\n func=self.call,\n config=self.config,\n input_ids=input_ids,\n past=past,\n attention_mask=attention_mask,\n token_type_ids=token_type_ids,\n position_ids=position_ids,\n head_mask=head_mask,\n inputs_embeds=inputs_embeds,\n use_cache=use_cache,\n output_attentions=output_attentions,\n output_hidden_states=output_hidden_states,\n return_dict=return_dict,\n training=training,\n kwargs_call=kwargs,\n )\n\n # If using past key value states, only the last tokens\n # should be given as an input\n if inputs[\"past\"] is not None:\n if inputs[\"input_ids\"] is not None:\n inputs[\"input_ids\"] = inputs[\"input_ids\"][:, -1:]\n if inputs[\"inputs_embeds\"] is not None:\n inputs[\"inputs_embeds\"] = inputs[\"inputs_embeds\"][:, -1:]\n if inputs[\"token_type_ids\"] is not None:\n inputs[\"token_type_ids\"] = inputs[\"token_type_ids\"][:, -1:]\n\n if inputs[\"input_ids\"] is not None and inputs[\"inputs_embeds\"] is not None:\n raise ValueError(\"You cannot specify both input_ids and inputs_embeds at the same time\")\n elif inputs[\"input_ids\"] is not None:\n input_shape = shape_list(inputs[\"input_ids\"])\n inputs[\"input_ids\"] = tf.reshape(inputs[\"input_ids\"], [-1, input_shape[-1]])\n elif inputs[\"inputs_embeds\"] is not None:\n input_shape = shape_list(inputs[\"inputs_embeds\"])[:-1]\n else:\n raise ValueError(\"You have to specify either input_ids or inputs_embeds\")\n\n if inputs[\"past\"] is None:\n past_length = 0\n inputs[\"past\"] = [None] * len(self.h)\n else:\n past_length = shape_list(inputs[\"past\"][0][0])[-2]\n if inputs[\"position_ids\"] is None:\n inputs[\"position_ids\"] = tf.expand_dims(\n tf.range(past_length, input_shape[-1] + past_length, dtype=tf.int32), axis=0\n )\n inputs[\"position_ids\"] = tf.tile(inputs[\"position_ids\"], [input_shape[0], 1])\n\n # Attention mask.\n if inputs[\"attention_mask\"] is not None:\n # We create a 3D attention mask from a 2D tensor mask.\n # Sizes are [batch_size, 1, 1, to_seq_length]\n # So we can broadcast to [batch_size, num_heads, from_seq_length, to_seq_length]\n # this attention mask is more simple than the triangular masking of causal attention\n # used in OpenAI GPT, we just need to prepare the broadcast dimension here.\n inputs[\"attention_mask\"] = tf.reshape(inputs[\"attention_mask\"], (input_shape[0], 1, 1, input_shape[1]))\n\n # Since attention_mask is 1.0 for positions we want to attend and 0.0 for\n # masked positions, this operation will create a tensor which is 0.0 for\n # positions we want to attend and -10000.0 for masked positions.\n # Since we are adding it to the raw scores before the softmax, this is\n # effectively the same as removing these entirely.\n\n one_cst = tf.constant(1.0)\n ten_thousand_cst = tf.constant(-10000.0)\n inputs[\"attention_mask\"] = tf.cast(inputs[\"attention_mask\"], dtype=one_cst.dtype)\n inputs[\"attention_mask\"] = tf.multiply(tf.subtract(one_cst, inputs[\"attention_mask\"]), ten_thousand_cst)\n\n # Prepare head mask if needed\n # 1.0 in head_mask indicate we keep the head\n # attention_probs has shape bsz x n_heads x N x N\n # head_mask has shape n_layer x batch x n_heads x N x N\n if inputs[\"head_mask\"] is not None:\n raise NotImplementedError\n else:\n inputs[\"head_mask\"] = [None] * self.num_layers\n\n if inputs[\"token_type_ids\"] is not None:\n inputs[\"token_type_ids\"] = tf.reshape(\n inputs[\"token_type_ids\"], [-1, shape_list(inputs[\"token_type_ids\"])[-1]]\n )\n token_type_embeds = self.w(inputs[\"token_type_ids\"], mode=\"embedding\")\n token_type_embeds *= tf.math.sqrt(tf.cast(self.d_model_size, dtype=token_type_embeds.dtype))\n else:\n token_type_embeds = tf.constant(0.0)\n inputs[\"position_ids\"] = tf.reshape(inputs[\"position_ids\"], [-1, shape_list(inputs[\"position_ids\"])[-1]])\n\n if inputs[\"inputs_embeds\"] is None:\n inputs[\"inputs_embeds\"] = self.w(inputs[\"input_ids\"], mode=\"embedding\")\n seq_len = input_shape[-1]\n mask = 1 - tf.linalg.band_part(tf.ones((seq_len, seq_len)), -1, 0)\n\n inputs[\"inputs_embeds\"] *= tf.math.sqrt(tf.cast(self.d_model_size, inputs[\"inputs_embeds\"].dtype))\n\n pos_embeds = tf.gather(self.pos_encoding, inputs[\"position_ids\"])\n pos_embeds = tf.cast(pos_embeds, dtype=token_type_embeds.dtype)\n hidden_states = inputs[\"inputs_embeds\"] + pos_embeds + token_type_embeds\n\n hidden_states = self.dropout(hidden_states, training=inputs[\"training\"])\n\n output_shape = input_shape + [shape_list(hidden_states)[-1]]\n presents = () if inputs[\"use_cache\"] else None\n all_hidden_states = () if inputs[\"output_hidden_states\"] else None\n all_attentions = () if inputs[\"output_attentions\"] else None\n for i, (h, layer_past) in enumerate(zip(self.h, inputs[\"past\"])):\n if inputs[\"output_hidden_states\"]:\n all_hidden_states = all_hidden_states + (tf.reshape(hidden_states, output_shape),)\n outputs = h(\n hidden_states,\n mask,\n layer_past,\n inputs[\"attention_mask\"],\n inputs[\"head_mask\"][i],\n inputs[\"use_cache\"],\n inputs[\"output_attentions\"],\n training=inputs[\"training\"],\n )\n hidden_states, present = outputs[:2]\n\n if inputs[\"use_cache\"]:\n presents = presents + (present,)\n\n if inputs[\"output_attentions\"]:\n all_attentions = all_attentions + (outputs[2],)\n\n hidden_states = self.layernorm(hidden_states)\n hidden_states = tf.reshape(hidden_states, output_shape)\n if inputs[\"output_hidden_states\"]:\n all_hidden_states = all_hidden_states + (hidden_states,)\n\n if inputs[\"output_attentions\"]:\n # let the number of heads free (-1) so we can extract attention even after head pruning\n attention_output_shape = input_shape[:-1] + [-1] + shape_list(all_attentions[0])[-2:]\n all_attentions = tuple(tf.reshape(t, attention_output_shape) for t in all_attentions)\n\n if not inputs[\"return_dict\"]:\n return tuple(v for v in [hidden_states, presents, all_hidden_states, all_attentions] if v is not None)\n\n return TFBaseModelOutputWithPast(\n last_hidden_state=hidden_states,\n past_key_values=presents,\n hidden_states=all_hidden_states,\n attentions=all_attentions,\n )\n\n\nclass TFCTRLPreTrainedModel(TFPreTrainedModel):\n \"\"\"\n An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained\n models.\n \"\"\"\n\n config_class = CTRLConfig\n base_model_prefix = \"transformer\"\n\n\nCTRL_START_DOCSTRING = r\"\"\"\n\n This model inherits from [`TFPreTrainedModel`]. Check the superclass documentation for the generic methods the\n library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads\n etc.)\n\n This model is also a [tf.keras.Model](https://www.tensorflow.org/api_docs/python/tf/keras/Model) subclass. Use it\n as a regular TF 2.0 Keras Model and refer to the TF 2.0 documentation for all matter related to general usage and\n behavior.\n\n <Tip>\n\n TF 2.0 models accepts two formats as inputs:\n\n - having all inputs as keyword arguments (like PyTorch models), or\n - having all inputs as a list, tuple or dict in the first positional arguments.\n\n This second option is useful when using [`tf.keras.Model.fit`] method which currently requires having all the\n tensors in the first argument of the model call function: `model(inputs)`.\n\n If you choose this second option, there are three possibilities you can use to gather all the input Tensors in the\n first positional argument :\n\n - a single Tensor with `input_ids` only and nothing else: `model(inputs_ids)`\n - a list of varying length with one or several input Tensors IN THE ORDER given in the docstring:\n `model([input_ids, attention_mask])` or `model([input_ids, attention_mask, token_type_ids])`\n - a dictionary with one or several input Tensors associated to the input names given in the docstring:\n `model({\"input_ids\": input_ids, \"token_type_ids\": token_type_ids})`\n\n </Tip>\n\n Parameters:\n config ([`CTRLConfig`]): Model configuration class with all the parameters of the model.\n Initializing with a config file does not load the weights associated with the model, only the\n configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.\n\"\"\"\n\nCTRL_INPUTS_DOCSTRING = r\"\"\"\n Args:\n input_ids (`Numpy array` or `tf.Tensor` of shape `(batch_size, input_ids_length)`):\n `input_ids_length` = `sequence_length` if `past` is `None` else `past[0].shape[-2]` (`sequence_length` of\n input past key value states).\n\n Indices of input sequence tokens in the vocabulary.\n\n If `past` is used, only input IDs that do not have their past calculated should be passed as `input_ids`.\n\n Indices can be obtained using [`CTRLTokenizer`]. See [`PreTrainedTokenizer.__call__`] and\n [`PreTrainedTokenizer.encode`] for details.\n\n [What are input IDs?](../glossary#input-ids)\n past (`List[tf.Tensor]` of length `config.n_layers`):\n Contains pre-computed hidden-states (key and values in the attention blocks) as computed by the model (see\n `past` output below). Can be used to speed up sequential decoding. The token ids which have their past\n given to this model should not be passed as input ids as they have already been computed.\n attention_mask (`tf.Tensor` or `Numpy array` of shape `(batch_size, sequence_length)`, *optional*):\n Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:\n\n - 1 for tokens that are **not masked**,\n - 0 for tokens that are **masked**.\n\n [What are attention masks?](../glossary#attention-mask)\n token_type_ids (`tf.Tensor` or `Numpy array` of shape `(batch_size, sequence_length)`, *optional*):\n Segment token indices to indicate first and second portions of the inputs. Indices are selected in `[0,\n 1]`:\n\n - 0 corresponds to a *sentence A* token,\n - 1 corresponds to a *sentence B* token.\n\n [What are token type IDs?](../glossary#token-type-ids)\n position_ids (`tf.Tensor` or `Numpy array` of shape `(batch_size, sequence_length)`, *optional*):\n Indices of positions of each input sequence tokens in the position embeddings. Selected in the range `[0,\n config.max_position_embeddings - 1]`.\n\n [What are position IDs?](../glossary#position-ids)\n head_mask (`torch.FloatTensor` of shape `(num_heads,)` or `(num_layers, num_heads)`, *optional*):\n Mask to nullify selected heads of the self-attention modules. Mask values selected in `[0, 1]`:\n\n - 1 indicates the head is **not masked**,\n - 0 indicates the head is **masked**.\n\n inputs_embeds (`tf.Tensor` or `Numpy array` of shape `(batch_size, sequence_length, hidden_size)`, *optional*):\n Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This\n is useful if you want more control over how to convert `input_ids` indices into associated vectors than the\n model's internal embedding lookup matrix.\n use_cache (`bool`, *optional*):\n If set to `True`, `past` key value states are returned and can be used to speed up decoding (see `past`).\n output_attentions (`bool`, *optional*):\n Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned\n tensors for more detail. This argument can be used only in eager mode, in graph mode the value in the\n config will be used instead.\n output_hidden_states (`bool`, *optional*):\n Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for\n more detail. This argument can be used only in eager mode, in graph mode the value in the config will be\n used instead.\n return_dict (`bool`, *optional*):\n Whether or not to return a [`~file_utils.ModelOutput`] instead of a plain tuple. This argument can be used\n in eager mode, in graph mode the value will always be set to True.\n training (`bool`, *optional*, defaults to `False`):\n Whether or not to use the model in training mode (some modules like dropout modules have different\n behaviors between training and evaluation).\n\"\"\"\n\n\n@add_start_docstrings(\n \"The bare CTRL Model transformer outputting raw hidden-states without any specific head on top.\",\n CTRL_START_DOCSTRING,\n)\nclass TFCTRLModel(TFCTRLPreTrainedModel):\n def __init__(self, config, *inputs, **kwargs):\n super().__init__(config, *inputs, **kwargs)\n self.transformer = TFCTRLMainLayer(config, name=\"transformer\")\n\n @add_start_docstrings_to_model_forward(CTRL_INPUTS_DOCSTRING)\n @add_code_sample_docstrings(\n processor_class=_TOKENIZER_FOR_DOC,\n checkpoint=_CHECKPOINT_FOR_DOC,\n output_type=TFBaseModelOutputWithPast,\n config_class=_CONFIG_FOR_DOC,\n )\n def call(\n self,\n input_ids=None,\n past=None,\n attention_mask=None,\n token_type_ids=None,\n position_ids=None,\n head_mask=None,\n inputs_embeds=None,\n use_cache=None,\n output_attentions=None,\n output_hidden_states=None,\n return_dict=None,\n training=False,\n **kwargs,\n ):\n inputs = input_processing(\n func=self.call,\n config=self.config,\n input_ids=input_ids,\n past=past,\n attention_mask=attention_mask,\n token_type_ids=token_type_ids,\n position_ids=position_ids,\n head_mask=head_mask,\n inputs_embeds=inputs_embeds,\n use_cache=use_cache,\n output_attentions=output_attentions,\n output_hidden_states=output_hidden_states,\n return_dict=return_dict,\n training=training,\n kwargs_call=kwargs,\n )\n outputs = self.transformer(\n input_ids=inputs[\"input_ids\"],\n past=inputs[\"past\"],\n attention_mask=inputs[\"attention_mask\"],\n token_type_ids=inputs[\"token_type_ids\"],\n position_ids=inputs[\"position_ids\"],\n head_mask=inputs[\"head_mask\"],\n inputs_embeds=inputs[\"inputs_embeds\"],\n use_cache=inputs[\"use_cache\"],\n output_attentions=inputs[\"output_attentions\"],\n output_hidden_states=inputs[\"output_hidden_states\"],\n return_dict=inputs[\"return_dict\"],\n training=inputs[\"training\"],\n )\n return outputs\n\n def serving_output(self, output):\n pkv = tf.convert_to_tensor(output.past_key_values) if self.config.use_cache else None\n hs = tf.convert_to_tensor(output.hidden_states) if self.config.output_hidden_states else None\n attns = tf.convert_to_tensor(output.attentions) if self.config.output_attentions else None\n\n return TFBaseModelOutputWithPast(\n last_hidden_state=output.last_hidden_state, past_key_values=pkv, hidden_states=hs, attentions=attns\n )\n\n\nclass TFCTRLLMHead(tf.keras.layers.Layer):\n def __init__(self, config, input_embeddings, **kwargs):\n super().__init__(**kwargs)\n self.vocab_size = config.vocab_size\n\n # The output weights are the same as the input embeddings, but there is\n # an output-only bias for each token.\n self.input_embeddings = input_embeddings\n\n def build(self, input_shape):\n self.bias = self.add_weight(shape=(self.vocab_size,), initializer=\"zeros\", trainable=True, name=\"bias\")\n super().build(input_shape)\n\n def get_output_embeddings(self):\n return self.input_embeddings\n\n def set_output_embeddings(self, value):\n self.input_embeddings.weight = value\n self.input_embeddings.vocab_size = shape_list(value)[0]\n\n def get_bias(self):\n return {\"bias\": self.bias}\n\n def set_bias(self, value):\n self.bias = value[\"bias\"]\n self.vocab_size = shape_list(value[\"bias\"])[0]\n\n def call(self, hidden_states):\n hidden_states = self.input_embeddings(hidden_states, mode=\"linear\")\n hidden_states = hidden_states + self.bias\n return hidden_states\n\n\n@add_start_docstrings(\n \"\"\"\n The CTRL Model transformer with a language modeling head on top (linear layer with weights tied to the input\n embeddings).\n \"\"\",\n CTRL_START_DOCSTRING,\n)\nclass TFCTRLLMHeadModel(TFCTRLPreTrainedModel, TFCausalLanguageModelingLoss):\n def __init__(self, config, *inputs, **kwargs):\n super().__init__(config, *inputs, **kwargs)\n self.transformer = TFCTRLMainLayer(config, name=\"transformer\")\n\n self.lm_head = TFCTRLLMHead(config, self.transformer.w, name=\"lm_head\")\n\n def get_lm_head(self):\n return self.lm_head\n\n def get_prefix_bias_name(self):\n warnings.warn(\"The method get_prefix_bias_name is deprecated. Please use `get_bias` instead.\", FutureWarning)\n return self.name + \"/\" + self.lm_head.name\n\n def prepare_inputs_for_generation(self, inputs, past, **kwargs):\n # only last token for inputs_ids if past is defined in kwargs\n if past:\n inputs = tf.expand_dims(inputs[:, -1], -1)\n\n return {\"input_ids\": inputs, \"past\": past, \"use_cache\": kwargs[\"use_cache\"]}\n\n @add_start_docstrings_to_model_forward(CTRL_INPUTS_DOCSTRING)\n @add_code_sample_docstrings(\n processor_class=_TOKENIZER_FOR_DOC,\n checkpoint=_CHECKPOINT_FOR_DOC,\n output_type=TFCausalLMOutputWithPast,\n config_class=_CONFIG_FOR_DOC,\n )\n def call(\n self,\n input_ids=None,\n past=None,\n attention_mask=None,\n token_type_ids=None,\n position_ids=None,\n head_mask=None,\n inputs_embeds=None,\n use_cache=None,\n output_attentions=None,\n output_hidden_states=None,\n return_dict=None,\n labels=None,\n training=False,\n **kwargs,\n ):\n r\"\"\"\n labels (`tf.Tensor` of shape `(batch_size, sequence_length)`, *optional*):\n Labels for computing the cross entropy classification loss. Indices should be in `[0, ...,\n config.vocab_size - 1]`.\n \"\"\"\n inputs = input_processing(\n func=self.call,\n config=self.config,\n input_ids=input_ids,\n past=past,\n attention_mask=attention_mask,\n token_type_ids=token_type_ids,\n position_ids=position_ids,\n head_mask=head_mask,\n inputs_embeds=inputs_embeds,\n use_cache=use_cache,\n output_attentions=output_attentions,\n output_hidden_states=output_hidden_states,\n return_dict=return_dict,\n labels=labels,\n training=training,\n kwargs_call=kwargs,\n )\n transformer_outputs = self.transformer(\n input_ids=inputs[\"input_ids\"],\n past=inputs[\"past\"],\n attention_mask=inputs[\"attention_mask\"],\n token_type_ids=inputs[\"token_type_ids\"],\n position_ids=inputs[\"position_ids\"],\n head_mask=inputs[\"head_mask\"],\n inputs_embeds=inputs[\"inputs_embeds\"],\n use_cache=inputs[\"use_cache\"],\n output_attentions=inputs[\"output_attentions\"],\n output_hidden_states=inputs[\"output_hidden_states\"],\n return_dict=inputs[\"return_dict\"],\n training=inputs[\"training\"],\n )\n\n hidden_states = transformer_outputs[0]\n\n logits = self.lm_head(hidden_states)\n\n loss = None\n if inputs[\"labels\"] is not None:\n # shift labels to the left and cut last logit token\n logits = logits[:, :-1]\n labels = inputs[\"labels\"][:, 1:]\n loss = self.hf_compute_loss(labels, logits)\n\n if not inputs[\"return_dict\"]:\n output = (logits,) + transformer_outputs[1:]\n return ((loss,) + output) if loss is not None else output\n\n return TFCausalLMOutputWithPast(\n loss=loss,\n logits=logits,\n past_key_values=transformer_outputs.past_key_values,\n hidden_states=transformer_outputs.hidden_states,\n attentions=transformer_outputs.attentions,\n )\n\n def serving_output(self, output):\n pkv = tf.convert_to_tensor(output.past_key_values) if self.config.use_cache else None\n hs = tf.convert_to_tensor(output.hidden_states) if self.config.output_hidden_states else None\n attns = tf.convert_to_tensor(output.attentions) if self.config.output_attentions else None\n\n return TFCausalLMOutputWithPast(logits=output.logits, past_key_values=pkv, hidden_states=hs, attentions=attns)\n\n\n@add_start_docstrings(\n \"\"\"\n The CTRL Model transformer with a sequence classification head on top (linear layer).\n\n [`TFCTRLForSequenceClassification`] uses the last token in order to do the classification, as other causal models\n (e.g. GPT-1, GPT-2) do.\n\n Since it does classification on the last token, it requires to know the position of the last token. If a\n `pad_token_id` is defined in the configuration, it finds the last token that is not a padding token in each row. If\n no `pad_token_id` is defined, it simply takes the last value in each row of the batch. Since it cannot guess the\n padding tokens when `inputs_embeds` are passed instead of `input_ids`, it does the same (take the last value in\n each row of the batch).\n \"\"\",\n CTRL_START_DOCSTRING,\n)\nclass TFCTRLForSequenceClassification(TFCTRLPreTrainedModel, TFSequenceClassificationLoss):\n def __init__(self, config, *inputs, **kwargs):\n super().__init__(config, *inputs, **kwargs)\n self.num_labels = config.num_labels\n self.classifier = tf.keras.layers.Dense(\n config.num_labels,\n kernel_initializer=get_initializer(config.initializer_range),\n name=\"classifier\",\n use_bias=False,\n )\n self.transformer = TFCTRLMainLayer(config, name=\"transformer\")\n\n def get_output_embeddings(self):\n return self.transformer.w\n\n @add_start_docstrings_to_model_forward(CTRL_INPUTS_DOCSTRING)\n @add_code_sample_docstrings(\n processor_class=_TOKENIZER_FOR_DOC,\n checkpoint=_CHECKPOINT_FOR_DOC,\n output_type=TFSequenceClassifierOutput,\n config_class=_CONFIG_FOR_DOC,\n )\n def call(\n self,\n input_ids=None,\n past=None,\n attention_mask=None,\n token_type_ids=None,\n position_ids=None,\n head_mask=None,\n inputs_embeds=None,\n use_cache=None,\n output_attentions=None,\n output_hidden_states=None,\n return_dict=None,\n labels=None,\n training=False,\n **kwargs,\n ):\n r\"\"\"\n labels (`tf.Tensor` of shape `(batch_size, sequence_length)`, *optional*):\n Labels for computing the cross entropy classification loss. Indices should be in `[0, ...,\n config.vocab_size - 1]`.\n \"\"\"\n inputs = input_processing(\n func=self.call,\n config=self.config,\n input_ids=input_ids,\n past=past,\n attention_mask=attention_mask,\n token_type_ids=token_type_ids,\n position_ids=position_ids,\n head_mask=head_mask,\n inputs_embeds=inputs_embeds,\n use_cache=use_cache,\n output_attentions=output_attentions,\n output_hidden_states=output_hidden_states,\n return_dict=return_dict,\n labels=labels,\n training=training,\n kwargs_call=kwargs,\n )\n\n transformer_outputs = self.transformer(\n input_ids=inputs[\"input_ids\"],\n past=inputs[\"past\"],\n attention_mask=inputs[\"attention_mask\"],\n token_type_ids=inputs[\"token_type_ids\"],\n position_ids=inputs[\"position_ids\"],\n head_mask=inputs[\"head_mask\"],\n inputs_embeds=inputs[\"inputs_embeds\"],\n use_cache=inputs[\"use_cache\"],\n output_attentions=inputs[\"output_attentions\"],\n output_hidden_states=inputs[\"output_hidden_states\"],\n return_dict=inputs[\"return_dict\"],\n training=inputs[\"training\"],\n )\n\n hidden_states = transformer_outputs[0]\n logits = self.classifier(hidden_states)\n in_logits = None\n if self.config.pad_token_id is None:\n sequence_lengths = -1\n else:\n if inputs[\"input_ids\"] is not None:\n sequence_lengths = (\n tf.reduce_sum(\n tf.cast(\n tf.math.not_equal(inputs[\"input_ids\"], self.config.pad_token_id),\n dtype=inputs[\"input_ids\"].dtype,\n ),\n -1,\n keepdims=False,\n )\n - 1\n )\n in_logits = tf.gather(logits, sequence_lengths, batch_dims=1, axis=1)\n else:\n sequence_lengths = -1\n logger.warning(\n f\"{self.__class__.__name__} will not detect padding tokens in `inputs_embeds`. Results may be \"\n f\"unexpected if using padding tokens in conjunction with `inputs_embeds.`\"\n )\n loss = None\n\n if inputs[\"labels\"] is not None:\n if input_ids is not None:\n batch_size, sequence_length = shape_list(inputs[\"input_ids\"])[:2]\n else:\n batch_size, sequence_length = shape_list(inputs[\"inputs_embeds\"])[:2]\n assert (\n self.config.pad_token_id is not None or batch_size == 1\n ), \"Cannot handle batch sizes > 1 if no padding token is defined.\"\n\n if not tf.is_tensor(sequence_lengths):\n in_logits = logits[0:batch_size, sequence_lengths]\n\n loss = self.hf_compute_loss(\n tf.reshape(inputs[\"labels\"], [-1, 1]), tf.reshape(in_logits, [-1, self.num_labels])\n )\n\n pooled_logits = in_logits if in_logits is not None else logits\n\n if not inputs[\"return_dict\"]:\n output = (pooled_logits,) + transformer_outputs[1:]\n return ((loss,) + output) if loss is not None else output\n\n return TFSequenceClassifierOutput(\n loss=loss,\n logits=pooled_logits,\n hidden_states=transformer_outputs.hidden_states,\n attentions=transformer_outputs.attentions,\n )\n\n # Copied from transformers.models.bert.modeling_tf_bert.TFBertForSequenceClassification.serving_output\n def serving_output(self, output: TFSequenceClassifierOutput) -> TFSequenceClassifierOutput:\n hs = tf.convert_to_tensor(output.hidden_states) if self.config.output_hidden_states else None\n attns = tf.convert_to_tensor(output.attentions) if self.config.output_attentions else None\n\n return TFSequenceClassifierOutput(logits=output.logits, hidden_states=hs, attentions=attns)\n" ]
[ [ "torch.optim.lr_scheduler.LambdaLR", "torch.randint", "torch.zeros", "numpy.all", "torch.cuda.is_available", "torch.allclose", "numpy.random.randint", "torch.ones", "numpy.allclose", "torch.randn", "torch.tensor", "torch.nn.functional.mse_loss", "torch.nn.Linear", "numpy.random.rand", "torch.cuda.device_count", "numpy.abs", "numpy.random.seed", "numpy.array_equal", "torch.nn.LayerNorm", "numpy.random.normal", "torch.nn.DataParallel" ], [ "torch.no_grad", "torch.ones" ], [ "tensorflow.keras.layers.LayerNormalization", "tensorflow.convert_to_tensor", "tensorflow.concat", "tensorflow.stack", "tensorflow.cast", "numpy.concatenate", "tensorflow.math.not_equal", "numpy.arange", "numpy.sin", "tensorflow.subtract", "tensorflow.gather", "tensorflow.tile", "tensorflow.matmul", "tensorflow.is_tensor", "tensorflow.unstack", "numpy.power", "tensorflow.keras.layers.Dense", "tensorflow.nn.softmax", "tensorflow.math.sqrt", "tensorflow.transpose", "tensorflow.constant", "tensorflow.range", "tensorflow.reshape", "numpy.cos", "tensorflow.expand_dims", "tensorflow.ones", "tensorflow.keras.layers.Dropout" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [ "1.10" ] } ]
6895mahfuzgit/Linear_Algebra_for_Machine_Learning
[ "3f266391491d9ab99e53a3547900c6b1bd657af1", "3f266391491d9ab99e53a3547900c6b1bd657af1", "3f266391491d9ab99e53a3547900c6b1bd657af1" ]
[ "eigonvector_using_numpy.py", "matrix_inverse_using_numpy.py", "trace_operator.py" ]
[ "# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Sat Sep 18 02:37:53 2021\n\n@author: Mahfuz_Shazol\n\"\"\"\n\nimport numpy as np\n\nA=np.array([\n [-1,4],\n [2,-2]\n ])\n\nlambdas,V=np.linalg.eig(A)\nprint('lambdas',lambdas)\n\n#each column is separate eivactor V\nprint('V',V)\n\n#A**v==Lamda**v\n\nv=V[:,0]\nprint(v)\nLamda=lambdas[0]\n\nr1=np.dot(A,v)\nprint(r1)\n\nr2=np.dot(Lamda,v)\nprint(r2)\n", "# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Wed Sep 15 23:57:35 2021\n\n@author: Mahfuz_Shazol\n\"\"\"\nimport numpy as np\n\nX=np.array([[4,2],[-5,-3]])\nX_inv=np.linalg.inv(X)\nprint('Inverse of X ',X_inv)\nY=np.array([4,-7])\n\n#w=(X**-1)Y\nresult=np.dot(X_inv,Y)\nprint(result)\n\n\n#Y=Xw\nresult_y=np.dot(X,result)\nprint(result_y)", "# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Fri Sep 17 02:09:53 2021\n\n@author: Mahfuz_Shazol\n\"\"\"\n\nimport numpy as np\nimport torch as th\n\nA=np.array([[25,2],\n [5,4]])\n\nA_trace=np.trace(A)\nprint(A_trace)\n\n# Tr(A)=Tr(A.T)\nresult1=np.trace(A)\nprint(result1)\nresult2=np.trace(A.T)\nprint(result2)\nprint('Tr(A)=Tr(A.T) Ans:',result1==result2)\n\n\n#Calculate Frobenius norm AF=(Tr(A A.T))**(1/2)\nA_p=th.tensor([\n [-1,2],\n [3,-2],\n [5,7],\n ])\n\n\ncalculated_frobenius_norm=(th.trace(th.matmul(th.as_tensor(A),th.as_tensor(A.T))))**(1/2)\nprint('calculated_frobenius_norm Ans:',calculated_frobenius_norm)\n\nnorm_result=np.linalg.norm(A)\nprint(norm_result)\n\n\n\n\n\n\n\n\n\n \n\n\n" ]
[ [ "numpy.linalg.eig", "numpy.dot", "numpy.array" ], [ "numpy.linalg.inv", "numpy.dot", "numpy.array" ], [ "numpy.linalg.norm", "torch.tensor", "numpy.array", "numpy.trace", "torch.as_tensor" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
yuanliangxie/YOLOv3_simple_baseline
[ "325e2963ae770e6f45912f3142941d3bddaf9d6e", "325e2963ae770e6f45912f3142941d3bddaf9d6e" ]
[ "models/model/poly_yolo.py", "models/loss/centernet_hourglass_loss_module.py" ]
[ "import torch.nn as nn\nimport models.head.poly_yolo_head as yolov3_head\nimport models.backbone.poly_darknet.poly_darknet as darknet53\nimport models.backbone.poly_darknet.poly_neck as neck\nimport models.loss.poly_yolo_loss_anchor_free_module as loss\nfrom utils.logger import print_logger\nfrom models.layer.layer_fundation import Conv2dBatchLeaky as Convolutional\nimport numpy as np\nimport torch\nfrom utils.utils_select_device import select_device\n\n\nclass yolov3(nn.Module):\n\tdef __init__(self, config, logger=None, init_weight=True):\n\t\tsuper().__init__()\n\t\tself.backbone = darknet53.darknet53()\n\t\tself.neck = neck.neck()\n\t\tself.head = yolov3_head.yolov3_head(nAnchors=1, nClass=config[\"yolo\"][\"classes\"])\n\t\tself.loss = loss.yolo_loss_module(config)\n\t\tif logger == None:\n\t\t\tself.logger = print_logger()\n\t\telse:\n\t\t\tself.logger = logger\n\t\tif init_weight:\n\t\t\tself.__init_weights()\n\n\tdef forward(self, input, target=None):\n\t\tfeatures = self.backbone(input)\n\t\tneck_features = self.neck(features)\n\t\tyolo_loss_input = self.head(neck_features)\n\t\tloss_or_output = self.loss(yolo_loss_input, target)\n\t\treturn loss_or_output #input=416,[13, 26, 52]\n\n\tdef __init_weights(self):\n\n\t\t\" Note :nn.Conv2d nn.BatchNorm2d'initing modes are uniform \"\n\t\tfor m in self.modules():#\n\t\t\tif isinstance(m, nn.Conv2d):\n\t\t\t\ttorch.nn.init.normal_(m.weight.data, 0.0, 0.01)\n\t\t\t\t# torch.nn.init.constant_(m.weight.data,0.001)#在测试时为了看模型有没有弄错,进行的改动\n\t\t\t\tif m.bias is not None:\n\t\t\t\t\tm.bias.data.zero_()\n\t\t\t\tprint(\"initing {}\".format(m))\n\n\t\t\telif isinstance(m, nn.BatchNorm2d):\n\t\t\t\ttorch.nn.init.constant_(m.weight.data, 1.0)\n\t\t\t\ttorch.nn.init.constant_(m.bias.data, 0.0)\n\n\t\t\t\tprint(\"initing {}\".format(m))\n\n\tdef load_darknet_weights(self, weight_file, cutoff=52):#加载成功\n\t\t\"https://github.com/ultralytics/yolov3/blob/master/models.py\"\n\n\t\tprint(\"load darknet weights : \", weight_file)\n\n\t\twith open(weight_file, 'rb') as f:\n\t\t\t_ = np.fromfile(f, dtype=np.int32, count=5)\n\t\t\tweights = np.fromfile(f, dtype=np.float32)\n\t\t\tprint(\"weights.shape:{}\".format(weights.shape))\n\t\tcount = 0\n\t\tptr = 0\n\t\tfor m in self.backbone.modules():\n\t\t\tif isinstance(m, Convolutional):\n\t\t\t\t# only initing backbone conv's weights\n\t\t\t\tif count == cutoff:\n\t\t\t\t\tbreak\n\t\t\t\tcount += 1\n\t\t\t\t#conv_layer = m._Convolutional__conv\n\t\t\t\tfor sub_m in m.modules():\n\t\t\t\t\tif isinstance(sub_m, nn.Conv2d):\n\t\t\t\t\t\tconv_layer = sub_m\n\t\t\t\t\telif isinstance(sub_m, nn.BatchNorm2d):\n\t\t\t\t\t\tbn_layer = sub_m\n\n\t\t\t\t# Load BN bias, weights, running mean and running variance\n\t\t\t\tnum_b = bn_layer.bias.numel() # Number of biases\n\t\t\t\t# Bias\n\t\t\t\tbn_b = torch.from_numpy(weights[ptr:ptr + num_b]).view_as(bn_layer.bias.data)\n\t\t\t\tbn_layer.bias.data.copy_(bn_b)\n\t\t\t\tptr += num_b\n\t\t\t\t# Weight\n\t\t\t\tbn_w = torch.from_numpy(weights[ptr:ptr + num_b]).view_as(bn_layer.weight.data)\n\t\t\t\tbn_layer.weight.data.copy_(bn_w)\n\t\t\t\tptr += num_b\n\t\t\t\t# Running Mean\n\t\t\t\tbn_rm = torch.from_numpy(weights[ptr:ptr + num_b]).view_as(bn_layer.running_mean)\n\t\t\t\tbn_layer.running_mean.data.copy_(bn_rm)\n\t\t\t\tptr += num_b\n\t\t\t\t# Running Var\n\t\t\t\tbn_rv = torch.from_numpy(weights[ptr:ptr + num_b]).view_as(bn_layer.running_var)\n\t\t\t\tbn_layer.running_var.data.copy_(bn_rv)\n\t\t\t\tptr += num_b\n\n\t\t\t\tprint(\"loading weight {}\".format(bn_layer))\n\t\t\t\t# else:\n\t\t\t\t# # Load conv. bias\n\t\t\t\t# num_b = conv_layer.bias.numel()\n\t\t\t\t# conv_b = torch.from_numpy(weights[ptr:ptr + num_b]).view_as(conv_layer.bias.data)\n\t\t\t\t# conv_layer.bias.data.copy_(conv_b)\n\t\t\t\t# ptr += num_b\n\t\t\t\t# Load conv. weights\n\t\t\t\tnum_w = conv_layer.weight.numel()\n\t\t\t\tconv_w = torch.from_numpy(weights[ptr:ptr + num_w]).view_as(conv_layer.weight.data)\n\t\t\t\tconv_layer.weight.data.copy_(conv_w)\n\t\t\t\tptr += num_w\n\t\t\t\tprint(\"loading weight {}\".format(conv_layer))\n\t\tprint(\"ptr:{}\".format(ptr))\n\t\tif ptr == weights.shape[0]:\n\t\t\tprint(\"convert success!\")\n\n\t# def count_darknet_count(self):\n\t# count = 0\n\t# for m in self.backbone.modules():\n\t# if isinstance(m, Convolutional):\n\t# count += 1\n\t# print(\"count:\",count)\n\n\tdef load_darknet_pth_weights(self, pth_file, cutoff=52):\n\t\tprint(\"load darknet_coco_pth_weights : \", pth_file)\n\t\tcount = 0\n\t\tpretrain_coco_weight_darknet = torch.load(pth_file)\n\t\tlist_keys = list(pretrain_coco_weight_darknet.keys())\n\t\tkeys_count = 0\n\t\tfor m in self.backbone.modules():\n\t\t\tif isinstance(m, Convolutional):\n\t\t\t\t# only initing backbone conv's weights\n\t\t\t\tif count == cutoff:\n\t\t\t\t\tbreak\n\t\t\t\tcount += 1\n\t\t\t\t#conv_layer = m._Convolutional__conv\n\t\t\t\tfor sub_m in m.modules():\n\t\t\t\t\tif isinstance(sub_m, nn.Conv2d):\n\t\t\t\t\t\tconv_layer = sub_m\n\t\t\t\t\telif isinstance(sub_m, nn.BatchNorm2d):\n\t\t\t\t\t\tbn_layer = sub_m\n\t\t\t\tif 'conv' in list_keys[keys_count]:\n\t\t\t\t\tweight = pretrain_coco_weight_darknet[list_keys[keys_count]]\n\t\t\t\t\tconv_layer.weight.data.copy_(weight)\n\t\t\t\t\tkeys_count +=1\n\n\t\t\t\tif 'bn' in list_keys[keys_count]:\n\t\t\t\t\tif \"weight\" in list_keys[keys_count]:\n\t\t\t\t\t\tweight = pretrain_coco_weight_darknet[list_keys[keys_count]]\n\t\t\t\t\t\tbn_layer.weight.data.copy_(weight)\n\t\t\t\t\t\tkeys_count += 1\n\n\t\t\t\t\tif \"bias\" in list_keys[keys_count]:\n\t\t\t\t\t\tbias = pretrain_coco_weight_darknet[list_keys[keys_count]]\n\t\t\t\t\t\tbn_layer.bias.data.copy_(bias)\n\t\t\t\t\t\tkeys_count += 1\n\n\t\t\t\t\tif \"running_mean\" in list_keys[keys_count]:\n\t\t\t\t\t\trunning_mean = pretrain_coco_weight_darknet[list_keys[keys_count]]\n\t\t\t\t\t\tbn_layer.running_mean.data.copy_(running_mean)\n\t\t\t\t\t\tkeys_count += 1\n\n\t\t\t\t\tif \"running_var\" in list_keys[keys_count]:\n\t\t\t\t\t\trunning_var = pretrain_coco_weight_darknet[list_keys[keys_count]]\n\t\t\t\t\t\tbn_layer.running_var.data.copy_(running_var)\n\t\t\t\t\t\tkeys_count += 1\n\n\t\tprint(\"count:{},keys_count:{}\".format(count, keys_count))\n\t\tif keys_count == len(list_keys):\n\t\t\tprint(\"convert success!\")\n\nif __name__ == '__main__':\n\timport train.Voc_data_preprocess.params_init_voc as params_init\n\tconfig = params_init.TRAINING_PARAMS\n\tdevice = select_device(0)\n\ttorch.cuda.manual_seed_all(1)\n\ttorch.backends.cudnn.deterministic = True\n\ttorch.manual_seed(1)\n\tnp.random.seed(1)\n\tnet = yolov3(config)\n\tnet.to(device)\n\t# net.cpu()\n\t# net.count_darknet_count()\n\t# for idx, m in enumerate(net.backbone.layer[0].modules()):\n\t# print(idx, \"->\", m)\n\t# net.backbone.layer[0].parameters()\n\t#pretrain_coco_weight_darknet = torch.load(\"darknet53_weights_pytorch.pth\")\n\tnet.load_darknet_weights('../../weights/darknet53.conv.74')\n\t#net.load_darknet_pth_weights(pth_file = \"../../weights/darknet53_weights_pytorch.pth\")\n\tnet.eval()\n\timages = torch.ones((1,3,416,416)).to(device)\n\tyolo_loss_input = net(images)\n\tprint(yolo_loss_input[0].shape)\n\tprint(yolo_loss_input[0])\n\t\"\"\"\n\toutput:\n\ttensor([ 1.1618e-05, -2.5806e-04, -1.8426e-04, -1.0144e-06, -8.8483e-05,\n\t\t\t\t -2.9103e-05, -4.6486e-05, -5.9855e-05, -3.9318e-05, -4.0554e-05,\n\t\t\t\t -6.2083e-05, 2.8495e-05, -2.7813e-04], grad_fn=<SliceBackward>)\n\t\"\"\"\n\n", "from models.loss.centernet_loss import centernet_Loss\nimport torch.nn as nn\nimport torch\nclass centernet_loss_module(nn.Module):\n\tdef __init__(self, config, stride=4, nstack=2):\n\t\tsuper().__init__()\n\t\tself.nstack = nstack\n\t\tif nstack == 1:\n\t\t\tself.center_loss = centernet_Loss(config[\"model\"][\"classes\"], stride, config=config, device_id= config[\"device_id\"])\n\t\telif nstack == 2:\n\t\t\tself.center_loss1 = centernet_Loss(config[\"model\"][\"classes\"], stride, config=config, device_id= config[\"device_id\"])\n\t\t\tself.center_loss2 = centernet_Loss(config[\"model\"][\"classes\"], stride, config=config, device_id= config[\"device_id\"])\n\n\n\tdef forward(self, input, target=None):\n\t\tresult = []\n\t\tif self.nstack == 1:\n\t\t\tcls_pred, txty_pred, twth_pred = input[0]\n\t\t\tcenter_loss_input = torch.cat((txty_pred, twth_pred, cls_pred), dim=1)\n\t\t\tresult.append(self.center_loss(center_loss_input, target))\n\n\t\telif self.nstack == 2:\n\t\t\tif target == None:\n\t\t\t\tcls_pred, txty_pred, twth_pred = input[0]\n\t\t\t\tcenter_loss_input = torch.cat((txty_pred, twth_pred, cls_pred), dim=1)\n\t\t\t\tresult.append(self.center_loss2(center_loss_input, target))\n\t\t\telse:\n\t\t\t\t#input1\n\t\t\t\tcls_pred1, txty_pred1, twth_pred1 = input[0]\n\t\t\t\tcenter_loss_input1 = torch.cat((txty_pred1, twth_pred1, cls_pred1), dim=1)\n\t\t\t\tresult1 = self.center_loss1(center_loss_input1, target)\n\n\t\t\t\t#intput2\n\t\t\t\tcls_pred2, txty_pred2, twth_pred2 = input[1]\n\t\t\t\tcenter_loss_input2 = torch.cat((txty_pred2, twth_pred2, cls_pred2), dim=1)\n\t\t\t\tresult2 = self.center_loss2(center_loss_input2, target)\n\n\t\t\t\tresult3 = []\n\t\t\t\tfor sub_list in list(zip(result1, result2)):\n\t\t\t\t\tresult3.append(sub_list[0] + sub_list[1])\n\n\t\t\t\tresult.append(result3) #TODO:合并结果\n\t\treturn result\n" ]
[ [ "numpy.fromfile", "torch.ones", "numpy.random.seed", "torch.load", "torch.nn.init.constant_", "torch.manual_seed", "torch.from_numpy", "torch.nn.init.normal_", "torch.cuda.manual_seed_all" ], [ "torch.cat" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
udellgroup/gcimpute
[ "b29650e61785af904a3bff753ffc2995449883cf" ]
[ "gcimpute/marginal_imputation.py" ]
[ "import numpy as np\nfrom statsmodels.distributions.empirical_distribution import ECDF\nfrom scipy.stats import norm, poisson\n\ndef weighted_quantile(values, quantiles, sample_weight=None, \n values_sorted=False, old_style=True):\n \"\"\" \n Very close to numpy.percentile, but supports weights. NOTE: quantiles should be in [0, 1]!\n :param values: numpy.array with data\n :param quantiles: array-like with many quantiles needed\n :param sample_weight: array-like of the same length as `array`\n :param values_sorted: bool, if True, then will avoid sorting of\n initial array\n :param old_style: if True, will correct output to be consistent\n with numpy.percentile.\n :return: numpy.array with computed quantiles.\n\n Acknowledgement: code from Alleo's answer in stackoverflow \n https://stackoverflow.com/questions/21844024/weighted-percentile-using-numpy\n \"\"\"\n values = np.array(values)\n quantiles = np.array(quantiles)\n if sample_weight is None:\n sample_weight = np.ones(len(values))\n sample_weight = np.array(sample_weight)\n assert np.all(quantiles >= 0) and np.all(quantiles <= 1), \\\n 'quantiles should be in [0, 1]'\n\n if not values_sorted:\n sorter = np.argsort(values)\n values = values[sorter]\n sample_weight = sample_weight[sorter]\n\n weighted_quantiles = np.cumsum(sample_weight) - 0.5 * sample_weight\n if old_style:\n # To be convenient with numpy.percentile\n weighted_quantiles -= weighted_quantiles[0]\n weighted_quantiles /= weighted_quantiles[-1]\n else:\n weighted_quantiles /= np.sum(sample_weight)\n return np.interp(quantiles, weighted_quantiles, values)\n\ndef inverse_ecdf(x, x_obs, DECIMAL_PRECISION = 3):\n \"\"\"\n computes the inverse ecdf (quantile) for x with ecdf given by data\n \"\"\"\n data = x_obs\n n = len(data)\n if n==0:\n print('No observation can be used for imputation')\n raise\n # round to avoid numerical errors in ceiling function\n quantile_indices = np.ceil(np.round_((n + 1) * x - 1, DECIMAL_PRECISION))\n quantile_indices = np.clip(quantile_indices, a_min=0,a_max=n-1).astype(int)\n sort = np.sort(data)\n return sort[quantile_indices]\n\n\ndef truncated_marginal_lower(x, x_obs):\n xmin = x_obs.min()\n loc = np.isclose(x_obs, xmin)\n q_lower = loc.mean()\n func = ECDF(x_obs[~loc])\n\n # from scores to lower & upper bounds\n lower = np.empty_like(x)\n upper = np.empty_like(x)\n loc_x = np.isclose(x, xmin)\n lower[loc_x] = -np.inf\n upper[loc_x] = norm.ppf(q_lower)\n # Put same values at non truncated entries for identification purpose from truncated entries\n # avoid 1 in scores, no need to avoid 0 since it is at least q_lower\n n = loc.sum()\n loc_x_nontrun = ~loc_x\n q_nontrun = q_lower + (1-q_lower) * func(x[loc_x_nontrun]) * n/(n+1)\n lower[loc_x_nontrun] = norm.ppf(q_nontrun)\n upper[loc_x_nontrun] = lower[loc_x_nontrun].copy()\n return lower, upper\n\ndef truncated_marginal_upper(x, x_obs):\n xmax = x_obs.max()\n loc = np.isclose(x_obs, xmax)\n q_upper = loc.mean()\n func = ECDF(x_obs[~loc])\n\n # from scores to lower & upper bounds\n lower = np.empty_like(x)\n upper = np.empty_like(x)\n loc_x = np.isclose(x, xmax)\n lower[loc_x] = norm.ppf(1-q_upper)\n upper[loc_x] = np.inf\n # Put same values at non truncated entries for identification purpose from truncated entries\n # avoid 0 in scores, no need to avoid 1 since it is at most 1-q_upper\n n = loc.sum()\n loc_x_nontrun = ~loc_x\n q_nontrun = (1-q_upper) * func(x[loc_x_nontrun])\n q_nontrun[q_nontrun == 0] = 1/(2*(n+1)) \n lower[loc_x_nontrun] = norm.ppf(q_nontrun)\n upper[loc_x_nontrun] = lower[loc_x_nontrun].copy()\n return lower, upper\n\ndef truncated_marginal_twoside(x, x_obs):\n xmin = x_obs.min()\n xmax = x_obs.max()\n loc_upper = np.isclose(x_obs, xmax)\n loc_lower = np.isclose(x_obs, xmin)\n q_upper = loc_upper.mean()\n q_lower = loc_lower.mean()\n loc_nontrun = ~(loc_upper | loc_lower)\n func = ECDF(x_obs[loc_nontrun])\n\n # from scores to lower & upper bounds\n lower = np.empty_like(x)\n upper = np.empty_like(x)\n loc_x_upper = np.isclose(x, xmax)\n loc_x_lower = np.isclose(x, xmin)\n lower[loc_x_lower] = -np.inf\n upper[loc_x_lower] = norm.ppf(q_lower)\n lower[loc_x_upper] = norm.ppf(1-q_upper)\n upper[loc_x_upper] = np.inf\n # Put same values at non truncated entries for identification purpose from truncated entries\n # no need to avoid 0 or 1 for scores at non truncated entries\n # the values range from q_lower to 1-q_upper\n loc_x_nontrun = ~(loc_x_upper | loc_x_lower)\n q_nontrun = q_lower + (1-q_lower-q_upper) * func(x[loc_x_nontrun])\n lower[loc_x_nontrun] = norm.ppf(q_nontrun)\n upper[loc_x_nontrun] = lower[loc_x_nontrun].copy()\n return lower, upper\n\ndef truncated_inverse_marginal_lower(q, x_obs, eps=1e-6):\n x = x_obs\n xmin = x.min()\n loc_lower = np.isclose(x, xmin)\n loc_nontrun = ~loc_lower\n \n q_lower = loc_lower.mean()\n x_nontrun = x[loc_nontrun]\n \n x_imp = np.empty_like(q)\n imp_lower = q<=q_lower+eps\n x_imp[imp_lower] = xmin\n imp_nontrun = ~imp_lower\n q_adjusted = (q[imp_nontrun]-q_lower)/(1-q_lower)\n x_imp[imp_nontrun] = np.quantile(x_nontrun, q_adjusted)\n \n return x_imp\n \ndef truncated_inverse_marginal_upper(q, x_obs, eps=1e-6):\n x = x_obs\n xmax = x.max()\n loc_upper = np.isclose(x, xmax)\n loc_nontrun = ~loc_upper\n \n q_upper = loc_upper.mean()\n x_nontrun = x[loc_nontrun]\n \n x_imp = np.empty_like(q)\n imp_upper = q>=1-q_upper-eps\n x_imp[imp_upper] = xmax\n imp_nontrun = ~imp_upper\n q_adjusted = q[imp_nontrun]/(1-q_upper)\n x_imp[imp_nontrun] = np.quantile(x_nontrun, q_adjusted)\n \n return x_imp\n\ndef truncated_inverse_marginal_twoside(q, x_obs, eps=1e-6):\n x = x_obs\n xmax = x.max()\n xmin = x.min()\n loc_upper = np.isclose(x, xmax)\n loc_lower = np.isclose(x, xmin)\n loc_nontrun = ~(loc_upper | loc_lower)\n \n q_upper = loc_upper.mean()\n q_lower = loc_lower.mean()\n x_nontrun = x[loc_nontrun]\n \n x_imp = np.empty_like(q)\n imp_upper = q>=1-q_upper-eps\n imp_lower = q<=q_lower+eps\n imp_nontrun = ~(imp_upper|imp_lower)\n x_imp[imp_upper] = xmax\n x_imp[imp_lower] = xmin\n q_adjusted = (q[imp_nontrun]-q_lower)/(1-q_upper-q_lower)\n x_imp[imp_nontrun] = np.quantile(x_nontrun, q_adjusted)\n \n return x_imp" ]
[ [ "scipy.stats.norm.ppf", "numpy.clip", "numpy.empty_like", "numpy.quantile", "numpy.cumsum", "numpy.sort", "numpy.all", "numpy.interp", "numpy.round_", "numpy.argsort", "numpy.array", "numpy.sum", "numpy.isclose" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
Datatouille/pythainlp
[ "286c67bf75be53e9a90ad1078c2250a5769be8a3" ]
[ "pythainlp/word_vector/__init__.py" ]
[ "# -*- coding: utf-8 -*-\n\"\"\"\nthai2fit - Thai word vector\nCode by https://github.com/cstorm125/thai2fit\n\"\"\"\nfrom typing import List, Tuple\n\nimport numpy as np\nfrom gensim.models import KeyedVectors\nfrom gensim.models.keyedvectors import Word2VecKeyedVectors\nfrom pythainlp.corpus import download as download_data\nfrom pythainlp.corpus import get_corpus, get_corpus_path\nfrom pythainlp.tokenize import Tokenizer\n\nWV_DIM = 300\n\n_THAI2FIT_WORDS = get_corpus(\"words_th_thai2fit_201810.txt\")\n_pythainlp_tokenizer = Tokenizer(custom_dict=_THAI2FIT_WORDS, engine=\"newmm\")\n\n\ndef _download() -> str:\n path = get_corpus_path(\"thai2fit_wv\")\n if not path:\n download_data(\"thai2fit_wv\")\n path = get_corpus_path(\"thai2fit_wv\")\n return path\n\n\ndef get_model() -> Word2VecKeyedVectors:\n \"\"\"\n Download model\n\n :return: `gensim` word2vec model\n :rtype: gensim.models.keyedvectors.Word2VecKeyedVectors\n \"\"\"\n return KeyedVectors.load_word2vec_format(_download(), binary=True)\n\n\n_MODEL = get_model()\n\n\ndef most_similar_cosmul(\n positive: List[str], negative: List[str]\n) -> List[Tuple[str, float]]:\n \"\"\"\n This function find the top-10 words that are most similar with respect\n to from two lists of words labeled as positive and negative.\n The top-10 most similar words are obtained using multiplication\n combination objective from Omer Levy and Yoav Goldberg\n [OmerLevy_YoavGoldberg_2014]_.\n\n We use the function :func:`gensim.most_similar_cosmul` directly from\n :mod:`gensim`.\n\n :param list positive: a list of words to add\n :param list negative: a list of words to substract\n\n :raises KeyError: if there is any word in `positive` or `negative`\n not in the vocabulary of the model.\n :return: list of top-10 most similar words and its similarity score\n :rtype: list[tuple[str,float]]\n\n :Note:\n * With a single word in the positive list, it will find the\n most similar words to the word given (similar\n to :func:`gensim.most_similar`)\n * If a word in `positive` or `negative` is not in the vocabulary,\n :class:`KeyError` will be raised.\n\n :Example:\n\n Find the **top-10** most similar words to the word: \"แม่น้ำ\".\n\n >>> from pythainlp.word_vector import most_similar_cosmul\n >>>\n >>> list_positive = ['แม่น้ำ']\n >>> list_negative = []\n >>> most_similar_cosmul(list_positive, list_negative)\n [('ลำน้ำ', 0.8206598162651062), ('ทะเลสาบ', 0.775945782661438),\n ('ลุ่มน้ำ', 0.7490593194961548), ('คลอง', 0.7471904754638672),\n ('ปากแม่น้ำ', 0.7354257106781006), ('ฝั่งแม่น้ำ', 0.7120099067687988),\n ('ทะเล', 0.7030453681945801), ('ริมแม่น้ำ', 0.7015200257301331),\n ('แหล่งน้ำ', 0.6997432112693787), ('ภูเขา', 0.6960948705673218)]\n\n Find the **top-10** most similar words to the words: \"นายก\", \"รัฐมนตรี\",\n and \"ประเทศ\".\n\n >>> from pythainlp.word_vector import most_similar_cosmul\n >>>\n >>> list_positive = ['นายก', 'รัฐมนตรี', 'ประเทศ']\n >>> list_negative = []\n most_similar_cosmul(list_positive, list_negative)\n [('รองนายกรัฐมนตรี', 0.2730445861816406),\n ('เอกอัครราชทูต', 0.26500266790390015),\n ('นายกรัฐมนตรี', 0.2649088203907013),\n ('ผู้ว่าราชการจังหวัด', 0.25119125843048096),\n ('ผู้ว่าการ', 0.2510434687137604), ('เลขาธิการ', 0.24824175238609314),\n ('ผู้ว่า', 0.2453523576259613), ('ประธานกรรมการ', 0.24147476255893707),\n ('รองประธาน', 0.24123257398605347), ('สมาชิกวุฒิสภา', 0.2405330240726471)]\n\n Find the **top-10** most similar words when having **only** positive list\n and **both** positive and negative lists.\n\n >>> from pythainlp.word_vector import most_similar_cosmul\n >>>\n >>> list_positive = ['ประเทศ', 'ไทย', 'จีน', 'ญี่ปุ่น']\n >>> list_negative = []\n >>> most_similar_cosmul(list_positive, list_negative)\n [('ประเทศจีน', 0.22022421658039093), ('เกาหลี', 0.2196873426437378),\n ('สหรัฐอเมริกา', 0.21660110354423523),\n ('ประเทศญี่ปุ่น', 0.21205860376358032),\n ('ประเทศไทย', 0.21159221231937408), ('เกาหลีใต้', 0.20321202278137207),\n ('อังกฤษ', 0.19610872864723206), ('ฮ่องกง', 0.1928885132074356),\n ('ฝรั่งเศส', 0.18383873999118805), ('พม่า', 0.18369348347187042)]\n >>>\n >>> list_positive = ['ประเทศ', 'ไทย', 'จีน', 'ญี่ปุ่น']\n >>> list_negative = ['อเมริกา']\n >>> most_similar_cosmul(list_positive, list_negative)\n [('ประเทศไทย', 0.3278159201145172), ('เกาหลี', 0.3201899230480194),\n ('ประเทศจีน', 0.31755179166793823), ('พม่า', 0.30845439434051514),\n ('ประเทศญี่ปุ่น', 0.306713730096817), ('เกาหลีใต้', 0.3003999888896942),\n ('ลาว', 0.2995176911354065), ('คนไทย', 0.2885020673274994),\n ('เวียดนาม', 0.2878379821777344), ('ชาวไทย', 0.28480708599090576)]\n\n The function return :class:`KeyError` when the term \"เมนูอาหารไทย\"\n is not in the vocabulary.\n\n >>> from pythainlp.word_vector import most_similar_cosmul\n >>>\n >>> list_positive = ['เมนูอาหารไทย']\n >>> list_negative = []\n >>> most_similar_cosmul(list_positive, list_negative)\n KeyError: \"word 'เมนูอาหารไทย' not in vocabulary\"\n \"\"\"\n\n return _MODEL.most_similar_cosmul(positive=positive, negative=negative)\n\n\ndef doesnt_match(words: List[str]) -> str:\n \"\"\"\n This function returns one word that is mostly unrelated to other words\n in the list. We use the function :func:`doesnt_match` from :mod:`gensim`.\n\n :param list words: a list of words\n\n :raises KeyError: if there is any word in `positive` or `negative` not in\n the vocabulary of the model.\n :return: the word that mostly unrelated\n :rtype: strt\n\n :Note:\n * If a word in `words` is not in the vocabulary, :class:`KeyError`\n will be raised.\n\n :Example:\n\n Pick the word \"พริกไทย\" (name of food) out of the list of meals\n (\"อาหารเช้า\", \"อาหารเที่ยง\", \"อาหารเย็น\").\n\n >>> from pythainlp.word_vector import doesnt_match\n >>>\n >>> words = ['อาหารเช้า','อาหารเที่ยง','อาหารเย็น','พริกไทย']\n >>> doesnt_match(words)\n พริกไทย\n\n Pick the word \"เรือ\" (name of vehicle) out of the list of words related\n to occupation (\"ดีไซน์เนอร์\", \"พนักงานเงินเดือน\", \"หมอ\").\n\n >>> from pythainlp.word_vector import doesnt_match\n >>>\n >>> words = ['ดีไซน์เนอร์', 'พนักงานเงินเดือน', 'หมอ', 'เรือ']\n >>> doesnt_match(words)\n เรือ\n\n \"\"\"\n return _MODEL.doesnt_match(words)\n\n\ndef similarity(word1: str, word2: str) -> float:\n \"\"\"\n This function computae cosine similarity between two words.\n\n :param string word1: first word to be compared\n :param string word2: second word to be compared\n\n :raises KeyError: if either `word1` or `word2` is not in the vocabulary\n of the model.\n :return: the cosine similarity between the two word vectors\n :rtype: float\n\n :Note:\n * If a word in `word1` or `word2` is not in the vocabulary,\n :class:`KeyError` will be raised.\n\n :Example:\n\n Compute consine similarity between two words: \"รถไฟ\" and \"รถไฟฟ้า\"\n (train and electric train).\n\n >>> from pythainlp.word_vector import similarity\n >>> similarity('รถไฟ','รถไฟฟ้า')\n 0.43387136\n\n\n Compute consine similarity between two words: \"เสือดาว\" and \"รถไฟฟ้า\"\n (leopard and electric train).\n\n >>> from pythainlp.word_vector import similarity\n >>> similarity('เสือดาว','รถไฟฟ้า')\n 0.04300258\n\n \"\"\"\n return _MODEL.similarity(word1, word2)\n\n\ndef sentence_vectorizer(text: str, use_mean: bool = True) -> np.ndarray:\n \"\"\"\n This function convert a Thai sentence into vector.\n Specifically, it first tokenize that text and map each tokenized words\n with the word vectors from the model.\n Then, word vectors are aggregatesd into one vector of 300 dimension\n by calulating either mean, or summation of all word vectors.\n\n :param string text: text input\n :param boolean use_mean: if `True` aggregate word vectors with mean of all\n word vectors. Otherwise, aggregate with summation\n of all word vectors\n\n :return: 300-dimension vector representing the given sentence in form of\n :mod:`numpy` array\n :rtype: :class:`numpy.ndarray((1,300))`\n\n\n :Example:\n\n Vectorize the sentence, \"อ้วนเสี้ยวเข้ายึดแคว้นกิจิ๋ว ในปี พ.ศ. 735\", into\n one sentence vector with two aggregation meanthods: mean and summation.\n\n >>> from pythainlp.word_vector import sentence_vectorizer\n >>>\n >>> sentence = 'อ้วนเสี้ยวเข้ายึดแคว้นกิจิ๋ว ในปี พ.ศ. 735'\n >>> sentence_vectorizer(sentence, use_mean=True)\n array([[-0.00421414, -0.08881307, 0.05081136, -0.05632929, -0.06607185,\n 0.03059357, -0.113882 , -0.00074836, 0.05035743, 0.02914307,\n ...\n 0.02893357, 0.11327957, 0.04562086, -0.05015393, 0.11641257,\n 0.32304936, -0.05054322, 0.03639471, -0.06531371, 0.05048079]])\n >>>\n >>> sentence_vectorizer(sentence, use_mean=False)\n array([[-0.05899798, -1.24338295, 0.711359 , -0.78861002, -0.92500597,\n 0.42831 , -1.59434797, -0.01047703, 0.705004 , 0.40800299,\n ...\n 0.40506999, 1.58591403, 0.63869202, -0.702155 , 1.62977601,\n 4.52269109, -0.70760502, 0.50952601, -0.914392 , 0.70673105]])\n \"\"\"\n words = _pythainlp_tokenizer.word_tokenize(text)\n\n vec = np.zeros((1, WV_DIM))\n\n for word in words:\n if word == \" \":\n word = \"xxspace\"\n elif word == \"\\n\":\n word = \"xxeol\"\n\n if word in _MODEL.index2word:\n vec += _MODEL.word_vec(word)\n\n if use_mean:\n vec /= len(words)\n\n return vec\n" ]
[ [ "numpy.zeros" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
bo-miao/MAMP
[ "61c7ed01150f870084b93987bd528d855e2dbb50" ]
[ "functional/dataset/TestLoader.py" ]
[ "import os\nimport random\nimport json\nimport cv2\nimport numpy as np\nimport torch.utils.data as data\nimport torch\nimport torchvision.transforms as transforms\n\nfrom functional.utils.mask_io import read_mask\n\nvideo_names = None\n\n\ndef dataloader_davis(filepath):\n global video_names\n video_txt = filepath + '/ImageSets/2017/val.txt'\n video_names = sorted(open(video_txt).readlines())\n\n annotation_all = []\n jpeg_all = []\n video_all = []\n root_label_path = os.path.join(filepath, 'Annotations/480p/')\n root_img_path = os.path.join(filepath, 'JPEGImages/480p/')\n annotation_index_all = []\n for video in video_names:\n video_all.append(video.strip())\n annotation_index_all.append([0])\n\n anno_path = os.path.join(filepath, 'Annotations/480p/' + video.strip())\n cat_annos = sorted(os.listdir(anno_path))\n annotation_all.append(cat_annos)\n\n jpeg_path = os.path.join(filepath, 'JPEGImages/480p/' + video.strip())\n cat_jpegs = sorted(os.listdir(jpeg_path))\n jpeg_all.append(cat_jpegs)\n\n return root_label_path, root_img_path, annotation_all, jpeg_all, video_all, annotation_index_all\n\n\ndef dataloader_youtube(filepath):\n global video_names\n label_file = os.path.join(filepath, 'valid/meta.json')\n video_dict = json.load(open(label_file, 'r'))['videos']\n video_names = sorted(list(video_dict.keys()))\n print(\"TEST DIR: {}, VIDEO NUMBER: {}\".format(label_file, len(video_names)))\n\n annotation_all = []\n jpeg_all = []\n video_all = []\n obj_num_all = []\n annotation_index_all = []\n root_img_path = os.path.join(filepath, 'valid/JPEGImages')\n root_label_path = os.path.join(filepath, 'valid/Annotations')\n for idx, video in enumerate(video_names):\n video_all.append(video)\n data = video_dict[video]['objects']\n obj_names = list(data.keys())\n\n images = []\n labels = []\n max_obj_num = 0\n for obj_n in obj_names:\n max_obj_num = max(max_obj_num, int(obj_n))\n images += map(lambda x: x + '.jpg', list(data[obj_n][\"frames\"]))\n labels.append(data[obj_n][\"frames\"][0] + '.png')\n\n images = sorted(np.unique(images))\n labels = sorted(np.unique(labels))\n obj_num_all.append(max_obj_num)\n\n annotation_all.append(labels)\n jpeg_all.append(images)\n\n anno_idx = []\n for anno in labels:\n anno = anno.replace('png', 'jpg')\n anno_idx.append(images.index(anno))\n annotation_index_all.append(anno_idx)\n\n return root_label_path, root_img_path, annotation_all, jpeg_all, video_all, annotation_index_all, obj_num_all\n\n\ndef dataloader_custom(filepath):\n global video_names\n video_names = sorted(os.listdir(os.path.join(filepath, 'valid_demo/JPEGImages')))\n\n annotation_all = []\n jpeg_all = []\n video_all = []\n annotation_index_all = []\n root_img_path = os.path.join(filepath, 'valid_demo/JPEGImages')\n root_label_path = os.path.join(filepath, 'valid_demo/Annotations')\n for idx, video in enumerate(video_names):\n video_all.append(video)\n images = sorted(np.unique(os.listdir(os.path.join(root_img_path, video))))\n labels = sorted(np.unique(os.listdir(os.path.join(root_label_path, video))))\n first_frame_idx = images.index(labels[0].replace('png', 'jpg'))\n images = images[first_frame_idx:]\n\n annotation_all.append(labels)\n jpeg_all.append(images)\n\n anno_idx = []\n for anno in labels:\n anno = anno.replace('png', 'jpg')\n anno_idx.append(images.index(anno))\n annotation_index_all.append(anno_idx)\n\n return root_label_path, root_img_path, annotation_all, jpeg_all, video_all, annotation_index_all\n\n\ndef frame_read(path):\n image = cv2.imread(path)\n image = np.float32(image) / 255.0\n image = cv2.cvtColor(image, cv2.COLOR_BGR2Lab)\n image = transforms.ToTensor()(image)\n image = transforms.Normalize([50,0,0], [50,127,127])(image)\n return image\n\n\ndef annotation_read(path):\n anno = read_mask(path)\n anno = np.expand_dims(anno, 0)\n return torch.Tensor(anno).contiguous().long()\n\n\nclass test_image_folder_davis(data.Dataset):\n def __init__(self, train_data, training=False):\n root_annos, root_imgs, annos, jpegs, videos, annos_index = train_data\n self.root_annos = root_annos\n self.root_imgs = root_imgs\n self.annos = annos\n self.jpegs = jpegs\n self.videos = videos\n self.annos_index = annos_index\n self.training = training\n\n def __getitem__(self, index):\n annos = self.annos[index]\n jpegs = self.jpegs[index]\n video_name = self.videos[index]\n annos_index = self.annos_index[index]\n\n annotations = [annotation_read(os.path.join(self.root_annos, video_name, anno)) for anno in annos]\n images_rgb = [frame_read(os.path.join(self.root_imgs, video_name, jpeg)) for jpeg in jpegs]\n\n _, height, width = annotations[0].shape\n meta = {\"video_name\": video_name, \"annotation_index\": annos_index, \"frame_names\": [x.replace('jpg', 'png') for x in jpegs],\n \"height\": height, \"width\": width, \"abs_frame_path\": [os.path.join(self.root_imgs, video_name, x) for x in jpegs]}\n return images_rgb, annotations, meta\n\n def __len__(self):\n return len(self.annos)\n\n\nclass test_image_folder_youtube(data.Dataset):\n def __init__(self, train_data, training=False):\n root_annos, root_imgs, annos, jpegs, videos, annos_index, obj_num = train_data\n self.root_annos = root_annos\n self.root_imgs = root_imgs\n self.annos = annos\n self.jpegs = jpegs\n self.videos = videos\n self.annos_index = annos_index\n self.obj_num = obj_num\n self.training = training\n\n def __getitem__(self, index):\n annos = self.annos[index]\n jpegs = self.jpegs[index]\n video_name = self.videos[index]\n annos_index = self.annos_index[index]\n object_number = self.obj_num[index]\n\n annotations = [annotation_read(os.path.join(self.root_annos, video_name, anno)) for anno in annos]\n images_rgb = [frame_read(os.path.join(self.root_imgs, video_name, jpeg)) for jpeg in jpegs]\n\n _, height, width = annotations[0].shape\n\n meta = {\"video_name\": video_name, \"annotation_index\": annos_index, \"frame_names\": [x.replace('jpg', 'png') for x in jpegs],\n \"video_object_number\": object_number, \"height\": height, \"width\": width,\n \"abs_frame_path\": [os.path.join(self.root_imgs, video_name, x) for x in jpegs]}\n return images_rgb, annotations, meta\n\n def __len__(self):\n return len(self.annos)\n\n\nclass test_image_folder_custom(data.Dataset):\n def __init__(self, train_data, training=False):\n root_annos, root_imgs, annos, jpegs, videos, annos_index = train_data\n self.root_annos = root_annos\n self.root_imgs = root_imgs\n self.annos = annos\n self.jpegs = jpegs\n self.videos = videos\n self.annos_index = annos_index\n self.training = training\n\n def __getitem__(self, index):\n annos = self.annos[index]\n jpegs = self.jpegs[index]\n video_name = self.videos[index]\n annos_index = self.annos_index[index]\n\n annotations = [annotation_read(os.path.join(self.root_annos, video_name, anno)) for anno in annos]\n images_rgb = [frame_read(os.path.join(self.root_imgs, video_name, jpeg)) for jpeg in jpegs]\n\n _, height, width = annotations[0].shape\n\n meta = {\"video_name\": video_name, \"annotation_index\": annos_index, \"frame_names\": [x.replace('jpg', 'png') for x in jpegs],\n \"height\": height, \"width\": width, \"abs_frame_path\": [os.path.join(self.root_imgs, video_name, x) for x in jpegs]}\n return images_rgb, annotations, meta\n\n def __len__(self):\n return len(self.annos)\n" ]
[ [ "numpy.expand_dims", "torch.Tensor", "numpy.unique", "torch.utils.data.keys", "numpy.float32" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
openAIRoom/IQAAD
[ "d6a74558ea5335367be139b3d021f217b292fcda" ]
[ "anomaly_loss/ms_ssim.py" ]
[ "# -*- coding: utf-8 -*-\r\n# @Time : \r\n# @Author : \r\n# @Email : \r\n# @File : \r\n# @Software: \r\n\r\n\r\nimport torch\r\nimport torch.nn.functional as F\r\nimport torch.nn as nn\r\nfrom torch import Tensor\r\n\r\n\r\nclass SSIMLoss(nn.Module):\r\n def __init__(self, kernel_size: int = 11, sigma: float = 1.5) -> None:\r\n\r\n \"\"\"Computes the structural similarity (SSIM) index map between two images\r\n Args:\r\n kernel_size (int): Height and width of the gaussian kernel.\r\n sigma (float): Gaussian standard deviation in the x and y direction.\r\n \"\"\"\r\n\r\n super().__init__()\r\n self.kernel_size = kernel_size\r\n self.sigma = sigma\r\n self.gaussian_kernel = self._create_gaussian_kernel(self.kernel_size, self.sigma)\r\n\r\n def forward(self, x: Tensor, y: Tensor):\r\n\r\n if not self.gaussian_kernel.is_cuda:\r\n self.gaussian_kernel = self.gaussian_kernel.to(x.device)\r\n\r\n l_map, cs_map = self._ssim(x, y)\r\n return l_map, cs_map\r\n\r\n def _ssim(self, x: Tensor, y: Tensor):\r\n\r\n # Compute means\r\n ux = F.conv2d(x, self.gaussian_kernel, padding=self.kernel_size // 2, groups=3)\r\n uy = F.conv2d(y, self.gaussian_kernel, padding=self.kernel_size // 2, groups=3)\r\n\r\n # Compute variances\r\n uxx = F.conv2d(x * x, self.gaussian_kernel, padding=self.kernel_size // 2, groups=3)\r\n uyy = F.conv2d(y * y, self.gaussian_kernel, padding=self.kernel_size // 2, groups=3)\r\n uxy = F.conv2d(x * y, self.gaussian_kernel, padding=self.kernel_size // 2, groups=3)\r\n vx = uxx - ux * ux\r\n vy = uyy - uy * uy\r\n vxy = uxy - ux * uy\r\n\r\n c1 = 0.01 ** 2\r\n c2 = 0.03 ** 2\r\n\r\n cs_map = (2 * vxy + c2) / (vx + vy + c2) # set alpha=beta=gamma=1\r\n l_map = ((2 * ux * uy + c1) / (ux ** 2 + uy ** 2 + c1)) # 亮度项\r\n return l_map, cs_map\r\n\r\n def _create_gaussian_kernel(self, kernel_size: int, sigma: float) -> Tensor:\r\n\r\n start = (1 - kernel_size) / 2\r\n end = (1 + kernel_size) / 2\r\n kernel_1d = torch.arange(start, end, step=1, dtype=torch.float)\r\n kernel_1d = torch.exp(-torch.pow(kernel_1d / sigma, 2) / 2)\r\n kernel_1d = (kernel_1d / kernel_1d.sum()).unsqueeze(dim=0)\r\n\r\n kernel_2d = torch.matmul(kernel_1d.t(), kernel_1d)\r\n kernel_2d = kernel_2d.expand(3, 1, kernel_size, kernel_size).contiguous()\r\n return kernel_2d\r\n\r\n\r\nclass MSSSIM(torch.nn.Module):\r\n def __init__(self, num_scales=4):\r\n super(MSSSIM, self).__init__()\r\n self.num_scales = num_scales\r\n self.model = SSIMLoss()\r\n\r\n def forward(self, img_1, img_2, as_loss=False):\r\n b, c, h, w = img_1.shape\r\n\r\n mcs = []\r\n l = None\r\n for scale in range(self.num_scales):\r\n if scale > 0:\r\n img_1 = F.avg_pool2d(img_1, kernel_size=2, stride=2, padding=0)\r\n img_2 = F.avg_pool2d(img_2, kernel_size=2, stride=2, padding=0)\r\n\r\n l_map, cs_map = self.model(img_1, img_2)\r\n if l is None:\r\n l = l_map\r\n mcs.append(F.interpolate(cs_map, size=(h, w), mode=\"bilinear\", align_corners=False))\r\n\r\n msssim_map = torch.mean(torch.stack(mcs), dim=0) * l\r\n if as_loss:\r\n return torch.mean(1 - msssim_map)\r\n else:\r\n return torch.mean(1 - msssim_map, axis=1).unsqueeze(1)\r\n\r\n" ]
[ [ "torch.mean", "torch.nn.functional.conv2d", "torch.nn.functional.avg_pool2d", "torch.nn.functional.interpolate", "torch.arange", "torch.stack", "torch.pow" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
bateman/twitpersonality
[ "0d25ecf617f414f55f472144e8f57026b5cca1c3", "0d25ecf617f414f55f472144e8f57026b5cca1c3" ]
[ "training/plot_LASSO_OCEAN.py", "utilities/word_coverage.py" ]
[ "from sklearn.linear_model import Lasso\nfrom sklearn.metrics import mean_squared_error\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport datasetUtils as dsu\nimport embeddings\nimport sys\nimport os\n\n#configs\nmethod = \"conc\"\npost_threshold = 3\nx_scale = (0,5.5)\ny_scale = (0,5.5)\n\ndef savePlot(y_true, y_pred, title, path):\n plt.clf()\n plt.xlim(x_scale)\n plt.ylim(y_scale)\n plt.plot(y_true, y_pred, '.')\n plt.ylabel('predicted values')\n plt.xlabel('actual values')\n plt.title(title)\n plt.savefig(path)\n #plt.show()\n\ndataset = \"fasttext\"\n#dataset = \"\"\ndataset_path = \"../FastText/dataset.vec\"\n#dataset_path = \"D:\\Downloads\\datasetssss\\Set9.bin\"\nshuffle = \"yes\"\n\nposts = []\nyO = []\nyC = []\nyE = []\nyA = []\nyN = []\n\nprint(\"[LASSO] Loading myPersonality...\")\n[posts, yO, yC, yE, yA, yN] = dsu.readMyPersonality()\nprint(\"[LASSO] Loading embeddings dataset...\")\nif dataset == 'fasttext':\n transform = True\n wordDictionary = dsu.parseFastText(dataset_path)\n #res = dsu.parseFastText(dataset_path)\n #wordDictionary = res[0]\n #wordWrods = res[1]\nelse:\n transform = False\n wordDictionary = dsu.loadEmbeddingsDataset(dataset_path, True)\nprint(\"[LASSO] Data successfully loaded.\")\n\nif shuffle == 'True' or shuffle == 'yes' or shuffle == 'true':\n s = np.arange(posts.shape[0])\n np.random.shuffle(s)\n posts = posts[s]\n yO = yO[s]\n yC = yC[s]\n yE = yE[s]\n yA = yA[s]\n yN = yN[s]\n print(\"Data shuffled.\")\n\n#only for test\nsubsetSize = len(posts)\nsubsetSize = 7500\n#subsetSize = 1000\nposts = posts[0:subsetSize]\nyO = yO[0:subsetSize]\nyC = yC[0:subsetSize]\nyE = yE[0:subsetSize]\nyA = yA[0:subsetSize]\nyN = yN[0:subsetSize]\n\nold_yO = yO\nold_yC = yC\nold_yE = yE\nold_yA = yA\nold_yN = yN\n[conE, yO, yC, yE, yA, yN] = embeddings.transformTextForTraining(wordDictionary, post_threshold, posts, old_yO, old_yC, old_yE, old_yA, old_yN, method, transform)\nprint(\"Embeddings computed.\")\n\nsplit_index = round(len(conE)*0.85)\ndata_train = conE[:split_index]\ndata_test = conE[split_index:]\n\nl = 1\nfor labels in [yO, yC, yE, yA, yN]:\n\n if l==1:\n big5trait = \"O\"\n print(\"[LASSO] computing results for Openness...\")\n elif l==2:\n big5trait = \"C\"\n print(\"[LASSO] computing results for Conscientiousness...\")\n elif l==3:\n big5trait = \"E\"\n print(\"[LASSO] computing results for Extraversion...\")\n elif l==4:\n big5trait = \"A\"\n print(\"[LASSO] computing results for Agreeableness...\")\n elif l==5:\n big5trait = \"N\"\n print(\"[LASSO] computing results for Neuroticism...\")\n l += 1\n\n model = Lasso(alpha = 1e-4, normalize=True, max_iter = 1e5).fit(data_train, labels[:split_index])\n res = model.predict(data_test)\n mse = mean_squared_error(labels[split_index:], res)\n title = big5trait + \"_\" + method + \"_alpha0.0001_\\n\"+str(round(mse,3))[0:5]\n savePlot(labels[split_index:], res, title, \"Plots/LASSO/\"+method+\"/\"+title.split(\"\\n\")[0]+\".png\")", "from sklearn.feature_extraction.text import CountVectorizer\nfrom gensim.models.keyedvectors import KeyedVectors\nimport datasetUtils as dsu\nimport numpy as np\n\ndef countWords(embeddings_dataset):\n tot_words = 0\n found_words = 0\n for document in data:\n words = analyzer(document)\n if len(words) < 1:\n #move to the next document\n continue\n for word in words:\n tot_words += 1\n try:\n word_embedding = embeddings_dataset[word]\n found_words += 1\n except KeyError:\n continue\n return [tot_words, found_words]\n\nprint(\"Loading train data...\")\n[data, y_O, y_C, y_E, y_A, y_N] = dsu.readMyPersonality()\n\nvectorizer = CountVectorizer(stop_words=\"english\", analyzer=\"word\")\nanalyzer = vectorizer.build_analyzer()\n\nprint(\"Loading Datasets...\")\nwordDictionary = dsu.parseFastText(\"../FastText/dataset.vec\")\nprint(\"FastText loaded.\")\n\n[words, hits] = countWords(wordDictionary)\n\nprint(\"tot_words:\", words)\nprint(\"found_words:\", hits)\nprint(\"word coverage: %.2f%%\" %float((100*hits)/words))\n" ]
[ [ "matplotlib.pyplot.title", "matplotlib.pyplot.ylim", "numpy.arange", "matplotlib.pyplot.savefig", "numpy.random.shuffle", "matplotlib.pyplot.plot", "sklearn.metrics.mean_squared_error", "matplotlib.pyplot.xlim", "matplotlib.pyplot.clf", "sklearn.linear_model.Lasso", "matplotlib.pyplot.xlabel", "matplotlib.pyplot.ylabel" ], [ "sklearn.feature_extraction.text.CountVectorizer" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
Prakhar016/GRE-admission-Predictor
[ "69ffbeb5aad59e513e94215e2e699de5f73f3505" ]
[ "GRE_COLLEGE_PREDICT.py" ]
[ "# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Tue Jul 23 18:35:16 2019\n\n@author: Prakhar\n\"\"\"\n\n\nimport numpy as np\nimport pandas as pd\nfrom sklearn.linear_model import LassoCV\ndf = pd.read_csv(\"Admission_Predict.csv\")\nx=df.iloc[:,1:-2].values\n\ny=df[df.columns[-1:]]\nls = LassoCV(cv=5, random_state=0)\nls.fit(x,y)\ngre1=x[:,0].mean()\ntof1=x[:,1].mean()\nun1=x[:,2].mean()\nsop1=x[:,3].mean()\nlor1=x[:,4].mean()\ncgpa1=x[:,5].mean()\ndef pred(GREScore,TOEFLScore,UniversityRating,SOP ,LOR ,CGPA):\n x=[]\n x.append(GREScore)\n x.append(TOEFLScore)\n x.append(UniversityRating)\n x.append(SOP)\n x.append(LOR)\n x.append(CGPA)\n arr=np.array(x)\n return ls.predict(arr.reshape(1,-1))\n\n\nprint(\"If No value then enter N\")\ngre=input(\"Enter the gre_score:-\")\nif gre=='N':\n gre=gre1\nelse:\n gre=float(gre)\ntoefl=input(\"Enter the toefl_score:-\")\nif toefl=='N':\n toefl=tof1\nelse:\n toefl=float(toefl)\nuniv=input(\"Enter the university_score:-\")\nif univ=='N':\n univ=un1\nelse:\n univ=float(univ)\nsop=input(\"Enter the sop_score:-\")\nif sop=='N':\n sop=sop1\nelse:\n sop=float(sop)\nlor=input(\"Enter the lor_score:-\")\nif lor=='N':\n lor=lor1\nelse:\n lor=float(lor)\ncgpa=input(\"Enter the cgpa_score:-\")\nif cgpa=='N':\n cgpa=cgpa1\nelse:\n cgpa=float(cgpa)\nPREDICT=pred(gre,toefl,univ,sop,lor,cgpa)\n\n\nif PREDICT>0.95:\n print(\"admission possble in Top Collages\")\nelif 0.80<PREDICT<0.95:\n print(\"maybe admission possible\")\nelif PREDICT<0.80:\n print(\"Better luck next time\")\n\n\n" ]
[ [ "numpy.array", "pandas.read_csv", "sklearn.linear_model.LassoCV" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [ "2.0", "1.4", "1.1", "1.5", "1.2", "1.3" ], "scipy": [], "tensorflow": [] } ]
lev1khachatryan/ASDS_CV
[ "c9f0c0412002e929bcb7cc2fc6e5392977a9fa76", "c9f0c0412002e929bcb7cc2fc6e5392977a9fa76" ]
[ "Classes/20191114-Semantic_Segmentation/experiments/experiment_segmentation/run.py", "CaseStudies/Canny_Edge_Detector/Python/my_utils.py" ]
[ "import argparse\nfrom segmentation import SegModel\nimport tensorflow as tf\nimport os\n\nparser = argparse.ArgumentParser(description='')\nparser.add_argument('--dataset_path', dest='dataset_path',\n default='dataset/',\n help='name of the dataset')\nparser.add_argument('--epoch', dest='epoch', type=int, default=2, help='# of epoch')\nparser.add_argument('--batch_size', dest='batch_size', type=int, default=1, help='# images in batch')\nparser.add_argument('--image_width', dest='image_width', type=int, default=288, help='scale images to this size')\nparser.add_argument('--image_height', dest='image_height', type=int, default=288, help='then crop to this size')\nparser.add_argument('--lr', dest='lr', type=float, default=1e-4, help='initial learning rate for adam')\nparser.add_argument('--summary_write_freq', dest='summary_write_freq', type=int, default=10,\n help='for how many iterations write a summary')\n\nparser.add_argument('--test_start', dest='test_start', type=int, default=0, help='epoch from which start to test')\nparser.add_argument('--test_end', dest='test_end', type=int, default=100, help='epoch on which to stop test')\n\nparser.add_argument('--num_classes', dest='num_classes', type=int, default=2, help='number of output classes')\nparser.add_argument('--mode', dest='mode', default='test_val', help='train, test, test_val, freeze')\nparser.add_argument('--checkpoint_dir', dest='checkpoint_dir', default=os.path.dirname(__file__)+'/checkpoint',\n help='models are saved here')\nparser.add_argument('--summaries_dir', dest='summaries_dir', default=os.path.dirname(__file__)+'/summary',\n help='sample are saved here')\nparser.add_argument('--test_out_dir', dest='test_out_dir', default=os.path.dirname(__file__)+'/result',\n help='test sample are saved here')\nparser.add_argument('--test_dir', dest='test_dir', default=os.path.dirname(__file__)+'/test',\n help='read test from here')\nparser.add_argument('--freezed_pb_path', dest='freezed_pb_path',\n default=os.path.dirname(__file__)+'/test.pb',\n help='path to save .pb')\n\nargs = parser.parse_args()\n\n\ndef main(_):\n if not os.path.exists(args.summaries_dir):\n os.makedirs(args.summaries_dir)\n\n if not os.path.exists(args.test_out_dir):\n os.makedirs(args.test_out_dir)\n\n if not os.path.exists(args.checkpoint_dir):\n os.makedirs(args.checkpoint_dir)\n\n config = tf.ConfigProto()\n config.gpu_options.allow_growth = True\n\n with tf.Session(config=config) as sess:\n model = SegModel(args, sess)\n model.build_model()\n\n if args.mode == 'train':\n model.train()\n elif args.mode == 'test':\n model.test(args.test_out_dir, 1, ['softmax'], model.blending)\n elif args.mode == 'test_val':\n model.measure_metrics(args, model.compute_IOU)\n elif args.mode == 'freeze':\n model.freeze_save_graph(sess=sess, output_node='mask', path='./', name='test.pb')\n\nif __name__ == '__main__':\n tf.app.run()\n", "import numpy as np\nimport skimage\nimport matplotlib.pyplot as plt \nimport matplotlib.image as mpimg\nimport os\nimport scipy.misc as sm\n\ndef rgb2gray(rgb):\n\n r, g, b = rgb[:,:,0], rgb[:,:,1], rgb[:,:,2]\n gray = 0.2989 * r + 0.5870 * g + 0.1140 * b\n\n return gray\n\ndef load_data(dir_name = 'faces_imgs'): \n '''\n Load images from the \"faces_imgs\" directory\n Images are in JPG and we convert it to gray scale images\n '''\n imgs = []\n for filename in os.listdir(dir_name):\n if os.path.isfile(dir_name + '/' + filename):\n img = mpimg.imread(dir_name + '/' + filename)\n img = rgb2gray(img)\n imgs.append(img)\n return imgs\n\n\ndef visualize(imgs, format=None, gray=False):\n plt.figure(figsize=(20, 40))\n for i, img in enumerate(imgs):\n if img.shape[0] == 3:\n img = img.transpose(1,2,0)\n plt_idx = i+1\n plt.subplot(2, 2, plt_idx)\n plt.imshow(img, format)\n plt.show()\n\n " ]
[ [ "tensorflow.ConfigProto", "tensorflow.Session", "tensorflow.app.run" ], [ "matplotlib.pyplot.imshow", "matplotlib.image.imread", "matplotlib.pyplot.subplot", "matplotlib.pyplot.show", "matplotlib.pyplot.figure" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [ "1.10" ] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
MarcelGM/transformers
[ "aad1d9b6d5c58fd974618ac0aead1c5bd1119467", "aad1d9b6d5c58fd974618ac0aead1c5bd1119467", "aad1d9b6d5c58fd974618ac0aead1c5bd1119467", "aad1d9b6d5c58fd974618ac0aead1c5bd1119467", "aad1d9b6d5c58fd974618ac0aead1c5bd1119467", "aad1d9b6d5c58fd974618ac0aead1c5bd1119467", "aad1d9b6d5c58fd974618ac0aead1c5bd1119467" ]
[ "kk_development.py", "examples/research_projects/bert-loses-patience/run_glue_with_pabee.py", "examples/flax/language-modeling/run_mlm_flax.py", "examples/research_projects/lxmert/processing_image.py", "examples/legacy/run_openai_gpt.py", "tests/test_modeling_deberta.py", "examples/research_projects/distillation/run_squad_w_distillation.py" ]
[ "import torch\nfrom transformers import AutoConfig, AutoModelForSeq2SeqLM, BartTokenizer, BartForConditionalGeneration, BartExtendedForConditionalGeneration, BartConfig, BartExtendedModel\n\n# Loading trained model\nPATH = \"/home/ec2-user/moymarce/transformers/checkpoints/5-source_oracle-double/\"\ntokenizer = BartTokenizer.from_pretrained(PATH)\nmodel = BartExtendedForConditionalGeneration.from_pretrained(PATH)\n\n# Generate example\nARTICLE_TO_SUMMARIZE = \"My friends are cool but they eat too many carbs. I hope one day they start eating healthier. Maybe a plant-based diet would be enough. <knw> My friends are cool\"\nsummary_ids = model.generate(inputs['input_ids'], num_beams=4, max_length=20, early_stopping=True, use_cache=False)\nprint('Predicted text by model:', [tokenizer.decode(g, skip_special_tokens=True, clean_up_tokenization_spaces=False) for g in summary_ids], sep='\\n')\n\n# Add special token\ntokenizer.add_tokens(['<knw>'], special_tokens=True)\n# Initialize special tokens\nknw_token_id = tokenizer.convert_tokens_to_ids(['<knw>'])[0] #50265\npad_id = tokenizer.pad_token\n\n# Tokenize inputs into batch\nARTICLE_TO_SUMMARIZE = \"My friends are cool but they eat too many carbs. I hope one day they start eating healthier. Maybe a plant-based diet would be enough. <knw> My friends are cool\"\nKNOWLEDGE = \"My friends are cool\"\ninputs = tokenizer([ARTICLE_TO_SUMMARIZE], max_length=1024, return_tensors='pt')\nknowledge_inputs = tokenizer([KNOWLEDGE], max_length=1024, return_tensors='pt')\n\ntokenizer([ARTICLE_TO_SUMMARIZE, KNOWLEDGE], max_length=1024, return_tensors='pt')\n\n# Masking\nX = torch.Tensor([[1,2,3,4], [5,6,7,8]])\nindexes = ((X == 3) + (X == 6)).nonzero(as_tuple=True)\n\nknw_token_id = tokenizer.convert_tokens_to_ids(['<knw>'])[0] #50265\npad_id = tokenizer.pad_token\n\nfor row, ind in zip(X, indexes[1]):\n ind = (row == tokenizer.decode('<knw>')).nonzero()\n print('row', row, ind)\n print(row[ind:])\n row[ind:] = torch.zeros(row[ind:].size())", "# coding=utf-8\n# Copyright 2020 The Google AI Language Team Authors, The HuggingFace Inc. team and Microsoft Corporation.\n# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\" Training and inference using the library models for sequence classification on GLUE (Bert, Albert) with PABEE.\"\"\"\n\n\nimport argparse\nimport glob\nimport json\nimport logging\nimport os\nimport random\n\nimport numpy as np\nimport torch\nfrom torch.utils.data import DataLoader, RandomSampler, SequentialSampler, TensorDataset\nfrom torch.utils.data.distributed import DistributedSampler\nfrom tqdm import tqdm, trange\n\nimport transformers\nfrom pabee.modeling_pabee_albert import AlbertForSequenceClassificationWithPabee\nfrom pabee.modeling_pabee_bert import BertForSequenceClassificationWithPabee\nfrom transformers import (\n WEIGHTS_NAME,\n AdamW,\n AlbertConfig,\n AlbertTokenizer,\n BertConfig,\n BertTokenizer,\n get_linear_schedule_with_warmup,\n)\nfrom transformers import glue_compute_metrics as compute_metrics\nfrom transformers import glue_convert_examples_to_features as convert_examples_to_features\nfrom transformers import glue_output_modes as output_modes\nfrom transformers import glue_processors as processors\nfrom transformers.trainer_utils import is_main_process\n\n\ntry:\n from torch.utils.tensorboard import SummaryWriter\nexcept ImportError:\n from tensorboardX import SummaryWriter\n\n\nlogger = logging.getLogger(__name__)\n\nMODEL_CLASSES = {\n \"bert\": (BertConfig, BertForSequenceClassificationWithPabee, BertTokenizer),\n \"albert\": (AlbertConfig, AlbertForSequenceClassificationWithPabee, AlbertTokenizer),\n}\n\n\ndef set_seed(args):\n random.seed(args.seed)\n np.random.seed(args.seed)\n torch.manual_seed(args.seed)\n if args.n_gpu > 0:\n torch.cuda.manual_seed_all(args.seed)\n\n\ndef train(args, train_dataset, model, tokenizer):\n \"\"\"Train the model\"\"\"\n if args.local_rank in [-1, 0]:\n tb_writer = SummaryWriter()\n\n args.train_batch_size = args.per_gpu_train_batch_size * max(1, args.n_gpu)\n train_sampler = RandomSampler(train_dataset) if args.local_rank == -1 else DistributedSampler(train_dataset)\n train_dataloader = DataLoader(train_dataset, sampler=train_sampler, batch_size=args.train_batch_size)\n\n if args.max_steps > 0:\n t_total = args.max_steps\n args.num_train_epochs = args.max_steps // (len(train_dataloader) // args.gradient_accumulation_steps) + 1\n else:\n t_total = len(train_dataloader) // args.gradient_accumulation_steps * args.num_train_epochs\n\n # Prepare optimizer and schedule (linear warmup and decay)\n no_decay = [\"bias\", \"LayerNorm.weight\"]\n optimizer_grouped_parameters = [\n {\n \"params\": [p for n, p in model.named_parameters() if not any(nd in n for nd in no_decay)],\n \"weight_decay\": args.weight_decay,\n },\n {\"params\": [p for n, p in model.named_parameters() if any(nd in n for nd in no_decay)], \"weight_decay\": 0.0},\n ]\n\n optimizer = AdamW(optimizer_grouped_parameters, lr=args.learning_rate, eps=args.adam_epsilon)\n scheduler = get_linear_schedule_with_warmup(\n optimizer, num_warmup_steps=args.warmup_steps, num_training_steps=t_total\n )\n\n # Check if saved optimizer or scheduler states exist\n if os.path.isfile(os.path.join(args.model_name_or_path, \"optimizer.pt\")) and os.path.isfile(\n os.path.join(args.model_name_or_path, \"scheduler.pt\")\n ):\n # Load in optimizer and scheduler states\n optimizer.load_state_dict(torch.load(os.path.join(args.model_name_or_path, \"optimizer.pt\")))\n scheduler.load_state_dict(torch.load(os.path.join(args.model_name_or_path, \"scheduler.pt\")))\n\n if args.fp16:\n try:\n from apex import amp\n except ImportError:\n raise ImportError(\"Please install apex from https://www.github.com/nvidia/apex to use fp16 training.\")\n model, optimizer = amp.initialize(model, optimizer, opt_level=args.fp16_opt_level)\n\n # multi-gpu training (should be after apex fp16 initialization)\n if args.n_gpu > 1:\n model = torch.nn.DataParallel(model)\n\n # Distributed training (should be after apex fp16 initialization)\n if args.local_rank != -1:\n model = torch.nn.parallel.DistributedDataParallel(\n model,\n device_ids=[args.local_rank],\n output_device=args.local_rank,\n find_unused_parameters=True,\n )\n\n # Train!\n logger.info(\"***** Running training *****\")\n logger.info(\" Num examples = %d\", len(train_dataset))\n logger.info(\" Num Epochs = %d\", args.num_train_epochs)\n logger.info(\" Instantaneous batch size per GPU = %d\", args.per_gpu_train_batch_size)\n logger.info(\n \" Total train batch size (w. parallel, distributed & accumulation) = %d\",\n args.train_batch_size\n * args.gradient_accumulation_steps\n * (torch.distributed.get_world_size() if args.local_rank != -1 else 1),\n )\n logger.info(\" Gradient Accumulation steps = %d\", args.gradient_accumulation_steps)\n logger.info(\" Total optimization steps = %d\", t_total)\n\n global_step = 0\n epochs_trained = 0\n steps_trained_in_current_epoch = 0\n # Check if continuing training from a checkpoint\n if os.path.exists(args.model_name_or_path):\n # set global_step to gobal_step of last saved checkpoint from model path\n global_step = int(args.model_name_or_path.split(\"-\")[-1].split(\"/\")[0])\n epochs_trained = global_step // (len(train_dataloader) // args.gradient_accumulation_steps)\n steps_trained_in_current_epoch = global_step % (len(train_dataloader) // args.gradient_accumulation_steps)\n\n logger.info(\" Continuing training from checkpoint, will skip to saved global_step\")\n logger.info(\" Continuing training from epoch %d\", epochs_trained)\n logger.info(\" Continuing training from global step %d\", global_step)\n logger.info(\n \" Will skip the first %d steps in the first epoch\",\n steps_trained_in_current_epoch,\n )\n\n tr_loss, logging_loss = 0.0, 0.0\n model.zero_grad()\n train_iterator = trange(\n epochs_trained,\n int(args.num_train_epochs),\n desc=\"Epoch\",\n disable=args.local_rank not in [-1, 0],\n )\n set_seed(args) # Added here for reproductibility\n for _ in train_iterator:\n epoch_iterator = tqdm(train_dataloader, desc=\"Iteration\", disable=args.local_rank not in [-1, 0])\n for step, batch in enumerate(epoch_iterator):\n\n # Skip past any already trained steps if resuming training\n if steps_trained_in_current_epoch > 0:\n steps_trained_in_current_epoch -= 1\n continue\n\n model.train()\n batch = tuple(t.to(args.device) for t in batch)\n inputs = {\n \"input_ids\": batch[0],\n \"attention_mask\": batch[1],\n \"labels\": batch[3],\n }\n inputs[\"token_type_ids\"] = batch[2]\n outputs = model(**inputs)\n loss = outputs[0] # model outputs are always tuple in transformers (see doc)\n\n if args.n_gpu > 1:\n loss = loss.mean() # mean() to average on multi-gpu parallel training\n if args.gradient_accumulation_steps > 1:\n loss = loss / args.gradient_accumulation_steps\n\n if args.fp16:\n with amp.scale_loss(loss, optimizer) as scaled_loss:\n scaled_loss.backward()\n else:\n loss.backward()\n\n tr_loss += loss.item()\n if (step + 1) % args.gradient_accumulation_steps == 0:\n if args.fp16:\n torch.nn.utils.clip_grad_norm_(amp.master_params(optimizer), args.max_grad_norm)\n else:\n torch.nn.utils.clip_grad_norm_(model.parameters(), args.max_grad_norm)\n\n optimizer.step()\n scheduler.step() # Update learning rate schedule\n model.zero_grad()\n global_step += 1\n\n if args.local_rank in [-1, 0] and args.logging_steps > 0 and global_step % args.logging_steps == 0:\n logs = {}\n if (\n args.local_rank == -1 and args.evaluate_during_training\n ): # Only evaluate when single GPU otherwise metrics may not average well\n results = evaluate(args, model, tokenizer)\n for key, value in results.items():\n eval_key = \"eval_{}\".format(key)\n logs[eval_key] = value\n\n loss_scalar = (tr_loss - logging_loss) / args.logging_steps\n learning_rate_scalar = scheduler.get_lr()[0]\n logs[\"learning_rate\"] = learning_rate_scalar\n logs[\"loss\"] = loss_scalar\n logging_loss = tr_loss\n\n for key, value in logs.items():\n tb_writer.add_scalar(key, value, global_step)\n print(json.dumps({**logs, **{\"step\": global_step}}))\n\n if args.local_rank in [-1, 0] and args.save_steps > 0 and global_step % args.save_steps == 0:\n # Save model checkpoint\n output_dir = os.path.join(args.output_dir, \"checkpoint-{}\".format(global_step))\n model_to_save = (\n model.module if hasattr(model, \"module\") else model\n ) # Take care of distributed/parallel training\n model_to_save.save_pretrained(output_dir)\n tokenizer.save_pretrained(output_dir)\n\n torch.save(args, os.path.join(output_dir, \"training_args.bin\"))\n logger.info(\"Saving model checkpoint to %s\", output_dir)\n\n torch.save(optimizer.state_dict(), os.path.join(output_dir, \"optimizer.pt\"))\n torch.save(scheduler.state_dict(), os.path.join(output_dir, \"scheduler.pt\"))\n logger.info(\"Saving optimizer and scheduler states to %s\", output_dir)\n\n if args.max_steps > 0 and global_step > args.max_steps:\n epoch_iterator.close()\n break\n if args.max_steps > 0 and global_step > args.max_steps:\n train_iterator.close()\n break\n\n if args.local_rank in [-1, 0]:\n tb_writer.close()\n\n return global_step, tr_loss / global_step\n\n\ndef evaluate(args, model, tokenizer, prefix=\"\", patience=0):\n\n if args.model_type == \"albert\":\n model.albert.set_regression_threshold(args.regression_threshold)\n model.albert.set_patience(patience)\n model.albert.reset_stats()\n elif args.model_type == \"bert\":\n model.bert.set_regression_threshold(args.regression_threshold)\n model.bert.set_patience(patience)\n model.bert.reset_stats()\n else:\n raise NotImplementedError()\n\n # Loop to handle MNLI double evaluation (matched, mis-matched)\n eval_task_names = (\"mnli\", \"mnli-mm\") if args.task_name == \"mnli\" else (args.task_name,)\n eval_outputs_dirs = (args.output_dir, args.output_dir + \"-MM\") if args.task_name == \"mnli\" else (args.output_dir,)\n\n results = {}\n for eval_task, eval_output_dir in zip(eval_task_names, eval_outputs_dirs):\n eval_dataset = load_and_cache_examples(args, eval_task, tokenizer, evaluate=True)\n\n if not os.path.exists(eval_output_dir) and args.local_rank in [-1, 0]:\n os.makedirs(eval_output_dir)\n\n args.eval_batch_size = args.per_gpu_eval_batch_size * max(1, args.n_gpu)\n # Note that DistributedSampler samples randomly\n eval_sampler = SequentialSampler(eval_dataset)\n eval_dataloader = DataLoader(eval_dataset, sampler=eval_sampler, batch_size=args.eval_batch_size)\n\n # multi-gpu eval\n if args.n_gpu > 1 and not isinstance(model, torch.nn.DataParallel):\n model = torch.nn.DataParallel(model)\n\n # Eval!\n logger.info(\"***** Running evaluation {} *****\".format(prefix))\n logger.info(\" Num examples = %d\", len(eval_dataset))\n logger.info(\" Batch size = %d\", args.eval_batch_size)\n eval_loss = 0.0\n nb_eval_steps = 0\n preds = None\n out_label_ids = None\n for batch in tqdm(eval_dataloader, desc=\"Evaluating\"):\n model.eval()\n batch = tuple(t.to(args.device) for t in batch)\n\n with torch.no_grad():\n inputs = {\n \"input_ids\": batch[0],\n \"attention_mask\": batch[1],\n \"labels\": batch[3],\n }\n inputs[\"token_type_ids\"] = batch[2]\n outputs = model(**inputs)\n tmp_eval_loss, logits = outputs[:2]\n\n eval_loss += tmp_eval_loss.mean().item()\n nb_eval_steps += 1\n if preds is None:\n preds = logits.detach().cpu().numpy()\n out_label_ids = inputs[\"labels\"].detach().cpu().numpy()\n else:\n preds = np.append(preds, logits.detach().cpu().numpy(), axis=0)\n out_label_ids = np.append(out_label_ids, inputs[\"labels\"].detach().cpu().numpy(), axis=0)\n\n eval_loss = eval_loss / nb_eval_steps\n if args.output_mode == \"classification\":\n preds = np.argmax(preds, axis=1)\n elif args.output_mode == \"regression\":\n preds = np.squeeze(preds)\n result = compute_metrics(eval_task, preds, out_label_ids)\n results.update(result)\n\n output_eval_file = os.path.join(eval_output_dir, prefix, \"eval_results.txt\")\n with open(output_eval_file, \"w\") as writer:\n logger.info(\"***** Eval results {} *****\".format(prefix))\n for key in sorted(result.keys()):\n logger.info(\" %s = %s\", key, str(result[key]))\n print(\" %s = %s\" % (key, str(result[key])))\n writer.write(\"%s = %s\\n\" % (key, str(result[key])))\n\n if args.eval_all_checkpoints and patience != 0:\n if args.model_type == \"albert\":\n model.albert.log_stats()\n elif args.model_type == \"bert\":\n model.bert.log_stats()\n else:\n raise NotImplementedError()\n\n return results\n\n\ndef load_and_cache_examples(args, task, tokenizer, evaluate=False):\n if args.local_rank not in [-1, 0] and not evaluate:\n torch.distributed.barrier() # Make sure only the first process in distributed training process the dataset, and the others will use the cache\n\n processor = processors[task]()\n output_mode = output_modes[task]\n # Load data features from cache or dataset file\n cached_features_file = os.path.join(\n args.data_dir,\n \"cached_{}_{}_{}_{}\".format(\n \"dev\" if evaluate else \"train\",\n list(filter(None, args.model_name_or_path.split(\"/\"))).pop(),\n str(args.max_seq_length),\n str(task),\n ),\n )\n if os.path.exists(cached_features_file) and not args.overwrite_cache:\n logger.info(\"Loading features from cached file %s\", cached_features_file)\n features = torch.load(cached_features_file)\n else:\n logger.info(\"Creating features from dataset file at %s\", args.data_dir)\n label_list = processor.get_labels()\n if task in [\"mnli\", \"mnli-mm\"] and args.model_type in [\"roberta\", \"xlmroberta\"]:\n # HACK(label indices are swapped in RoBERTa pretrained model)\n label_list[1], label_list[2] = label_list[2], label_list[1]\n examples = (\n processor.get_dev_examples(args.data_dir) if evaluate else processor.get_train_examples(args.data_dir)\n )\n features = convert_examples_to_features(\n examples,\n tokenizer,\n label_list=label_list,\n max_length=args.max_seq_length,\n output_mode=output_mode,\n )\n if args.local_rank in [-1, 0]:\n logger.info(\"Saving features into cached file %s\", cached_features_file)\n torch.save(features, cached_features_file)\n\n if args.local_rank == 0 and not evaluate:\n torch.distributed.barrier() # Make sure only the first process in distributed training process the dataset, and the others will use the cache\n\n # Convert to Tensors and build dataset\n all_input_ids = torch.tensor([f.input_ids for f in features], dtype=torch.long)\n all_attention_mask = torch.tensor([f.attention_mask for f in features], dtype=torch.long)\n all_token_type_ids = torch.tensor([f.token_type_ids for f in features], dtype=torch.long)\n if output_mode == \"classification\":\n all_labels = torch.tensor([f.label for f in features], dtype=torch.long)\n elif output_mode == \"regression\":\n all_labels = torch.tensor([f.label for f in features], dtype=torch.float)\n\n dataset = TensorDataset(all_input_ids, all_attention_mask, all_token_type_ids, all_labels)\n return dataset\n\n\ndef main():\n parser = argparse.ArgumentParser()\n\n # Required parameters\n parser.add_argument(\n \"--data_dir\",\n default=None,\n type=str,\n required=True,\n help=\"The input data dir. Should contain the .tsv files (or other data files) for the task.\",\n )\n parser.add_argument(\n \"--model_type\",\n default=None,\n type=str,\n required=True,\n help=\"Model type selected in the list: \" + \", \".join(MODEL_CLASSES.keys()),\n )\n parser.add_argument(\n \"--model_name_or_path\",\n default=None,\n type=str,\n required=True,\n help=\"Path to pre-trained model or shortcut name.\",\n )\n parser.add_argument(\n \"--task_name\",\n default=None,\n type=str,\n required=True,\n help=\"The name of the task to train selected in the list: \" + \", \".join(processors.keys()),\n )\n parser.add_argument(\n \"--output_dir\",\n default=None,\n type=str,\n required=True,\n help=\"The output directory where the model predictions and checkpoints will be written.\",\n )\n parser.add_argument(\n \"--patience\",\n default=\"0\",\n type=str,\n required=False,\n )\n parser.add_argument(\n \"--regression_threshold\",\n default=0,\n type=float,\n required=False,\n )\n\n # Other parameters\n parser.add_argument(\n \"--config_name\",\n default=\"\",\n type=str,\n help=\"Pretrained config name or path if not the same as model_name\",\n )\n parser.add_argument(\n \"--tokenizer_name\",\n default=\"\",\n type=str,\n help=\"Pretrained tokenizer name or path if not the same as model_name\",\n )\n parser.add_argument(\n \"--cache_dir\",\n default=\"\",\n type=str,\n help=\"Where do you want to store the pre-trained models downloaded from huggingface.co\",\n )\n parser.add_argument(\n \"--max_seq_length\",\n default=128,\n type=int,\n help=\"The maximum total input sequence length after tokenization. Sequences longer \"\n \"than this will be truncated, sequences shorter will be padded.\",\n )\n parser.add_argument(\"--do_train\", action=\"store_true\", help=\"Whether to run training.\")\n parser.add_argument(\"--do_eval\", action=\"store_true\", help=\"Whether to run eval on the dev set.\")\n parser.add_argument(\n \"--evaluate_during_training\",\n action=\"store_true\",\n help=\"Run evaluation during training at each logging step.\",\n )\n parser.add_argument(\n \"--do_lower_case\",\n action=\"store_true\",\n help=\"Set this flag if you are using an uncased model.\",\n )\n\n parser.add_argument(\n \"--per_gpu_train_batch_size\",\n default=8,\n type=int,\n help=\"Batch size per GPU/CPU for training.\",\n )\n parser.add_argument(\n \"--per_gpu_eval_batch_size\",\n default=1,\n type=int,\n help=\"Batch size per GPU/CPU for evaluation.\",\n )\n parser.add_argument(\n \"--gradient_accumulation_steps\",\n type=int,\n default=1,\n help=\"Number of updates steps to accumulate before performing a backward/update pass.\",\n )\n parser.add_argument(\n \"--learning_rate\",\n default=5e-5,\n type=float,\n help=\"The initial learning rate for Adam.\",\n )\n parser.add_argument(\"--weight_decay\", default=0.0, type=float, help=\"Weight decay if we apply some.\")\n parser.add_argument(\"--adam_epsilon\", default=1e-8, type=float, help=\"Epsilon for Adam optimizer.\")\n parser.add_argument(\"--max_grad_norm\", default=1.0, type=float, help=\"Max gradient norm.\")\n parser.add_argument(\n \"--num_train_epochs\",\n default=3.0,\n type=float,\n help=\"Total number of training epochs to perform.\",\n )\n parser.add_argument(\n \"--max_steps\",\n default=-1,\n type=int,\n help=\"If > 0: set total number of training steps to perform. Override num_train_epochs.\",\n )\n parser.add_argument(\"--warmup_steps\", default=0, type=int, help=\"Linear warmup over warmup_steps.\")\n\n parser.add_argument(\"--logging_steps\", type=int, default=500, help=\"Log every X updates steps.\")\n parser.add_argument(\n \"--save_steps\",\n type=int,\n default=500,\n help=\"Save checkpoint every X updates steps.\",\n )\n parser.add_argument(\n \"--eval_all_checkpoints\",\n action=\"store_true\",\n help=\"Evaluate all checkpoints starting with the same prefix as model_name ending and ending with step number\",\n )\n parser.add_argument(\"--no_cuda\", action=\"store_true\", help=\"Avoid using CUDA when available\")\n parser.add_argument(\n \"--overwrite_output_dir\",\n action=\"store_true\",\n help=\"Overwrite the content of the output directory\",\n )\n parser.add_argument(\n \"--overwrite_cache\",\n action=\"store_true\",\n help=\"Overwrite the cached training and evaluation sets\",\n )\n parser.add_argument(\"--seed\", type=int, default=42, help=\"random seed for initialization\")\n\n parser.add_argument(\n \"--fp16\",\n action=\"store_true\",\n help=\"Whether to use 16-bit (mixed) precision (through NVIDIA apex) instead of 32-bit\",\n )\n parser.add_argument(\n \"--fp16_opt_level\",\n type=str,\n default=\"O1\",\n help=\"For fp16: Apex AMP optimization level selected in ['O0', 'O1', 'O2', and 'O3'].\"\n \"See details at https://nvidia.github.io/apex/amp.html\",\n )\n parser.add_argument(\n \"--local_rank\",\n type=int,\n default=-1,\n help=\"For distributed training: local_rank\",\n )\n parser.add_argument(\"--server_ip\", type=str, default=\"\", help=\"For distant debugging.\")\n parser.add_argument(\"--server_port\", type=str, default=\"\", help=\"For distant debugging.\")\n args = parser.parse_args()\n\n if (\n os.path.exists(args.output_dir)\n and os.listdir(args.output_dir)\n and args.do_train\n and not args.overwrite_output_dir\n ):\n raise ValueError(\n \"Output directory ({}) already exists and is not empty. Use --overwrite_output_dir to overcome.\".format(\n args.output_dir\n )\n )\n\n # Setup distant debugging if needed\n if args.server_ip and args.server_port:\n # Distant debugging - see https://code.visualstudio.com/docs/python/debugging#_attach-to-a-local-script\n import ptvsd\n\n print(\"Waiting for debugger attach\")\n ptvsd.enable_attach(address=(args.server_ip, args.server_port), redirect_output=True)\n ptvsd.wait_for_attach()\n\n # Setup CUDA, GPU & distributed training\n if args.local_rank == -1 or args.no_cuda:\n device = torch.device(\"cuda\" if torch.cuda.is_available() and not args.no_cuda else \"cpu\")\n args.n_gpu = torch.cuda.device_count()\n else: # Initializes the distributed backend which will take care of sychronizing nodes/GPUs\n torch.cuda.set_device(args.local_rank)\n device = torch.device(\"cuda\", args.local_rank)\n torch.distributed.init_process_group(backend=\"nccl\")\n args.n_gpu = 1\n args.device = device\n\n # Setup logging\n logging.basicConfig(\n format=\"%(asctime)s - %(levelname)s - %(name)s - %(message)s\",\n datefmt=\"%m/%d/%Y %H:%M:%S\",\n level=logging.INFO if args.local_rank in [-1, 0] else logging.WARN,\n )\n logger.warning(\n \"Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s\",\n args.local_rank,\n device,\n args.n_gpu,\n bool(args.local_rank != -1),\n args.fp16,\n )\n # Set the verbosity to info of the Transformers logger (on main process only):\n if is_main_process(args.local_rank):\n transformers.utils.logging.set_verbosity_info()\n transformers.utils.logging.enable_default_handler()\n transformers.utils.logging.enable_explicit_format()\n # Set seed\n set_seed(args)\n\n # Prepare GLUE task\n args.task_name = args.task_name.lower()\n if args.task_name not in processors:\n raise ValueError(\"Task not found: %s\" % (args.task_name))\n processor = processors[args.task_name]()\n args.output_mode = output_modes[args.task_name]\n label_list = processor.get_labels()\n num_labels = len(label_list)\n\n if args.patience != \"0\" and args.per_gpu_eval_batch_size != 1:\n raise ValueError(\"The eval batch size must be 1 with PABEE inference on.\")\n\n # Load pretrained model and tokenizer\n if args.local_rank not in [-1, 0]:\n torch.distributed.barrier() # Make sure only the first process in distributed training will download model & vocab\n\n args.model_type = args.model_type.lower()\n config_class, model_class, tokenizer_class = MODEL_CLASSES[args.model_type]\n config = config_class.from_pretrained(\n args.config_name if args.config_name else args.model_name_or_path,\n num_labels=num_labels,\n finetuning_task=args.task_name,\n cache_dir=args.cache_dir if args.cache_dir else None,\n )\n tokenizer = tokenizer_class.from_pretrained(\n args.tokenizer_name if args.tokenizer_name else args.model_name_or_path,\n do_lower_case=args.do_lower_case,\n cache_dir=args.cache_dir if args.cache_dir else None,\n )\n model = model_class.from_pretrained(\n args.model_name_or_path,\n from_tf=bool(\".ckpt\" in args.model_name_or_path),\n config=config,\n cache_dir=args.cache_dir if args.cache_dir else None,\n )\n\n if args.local_rank == 0:\n torch.distributed.barrier() # Make sure only the first process in distributed training will download model & vocab\n\n model.to(args.device)\n\n print(\"Total Model Parameters:\", sum(param.numel() for param in model.parameters()))\n output_layers_param_num = sum(param.numel() for param in model.classifiers.parameters())\n print(\"Output Layers Parameters:\", output_layers_param_num)\n single_output_layer_param_num = sum(param.numel() for param in model.classifiers[0].parameters())\n print(\n \"Added Output Layers Parameters:\",\n output_layers_param_num - single_output_layer_param_num,\n )\n\n logger.info(\"Training/evaluation parameters %s\", args)\n\n # Training\n if args.do_train:\n train_dataset = load_and_cache_examples(args, args.task_name, tokenizer, evaluate=False)\n global_step, tr_loss = train(args, train_dataset, model, tokenizer)\n logger.info(\" global_step = %s, average loss = %s\", global_step, tr_loss)\n\n # Saving best-practices: if you use defaults names for the model, you can reload it using from_pretrained()\n if args.do_train and (args.local_rank == -1 or torch.distributed.get_rank() == 0):\n logger.info(\"Saving model checkpoint to %s\", args.output_dir)\n # Save a trained model, configuration and tokenizer using `save_pretrained()`.\n # They can then be reloaded using `from_pretrained()`\n model_to_save = (\n model.module if hasattr(model, \"module\") else model\n ) # Take care of distributed/parallel training\n model_to_save.save_pretrained(args.output_dir)\n tokenizer.save_pretrained(args.output_dir)\n\n # Good practice: save your training arguments together with the trained model\n torch.save(args, os.path.join(args.output_dir, \"training_args.bin\"))\n\n # Load a trained model and vocabulary that you have fine-tuned\n model = model_class.from_pretrained(args.output_dir)\n tokenizer = tokenizer_class.from_pretrained(args.output_dir)\n model.to(args.device)\n\n # Evaluation\n results = {}\n if args.do_eval and args.local_rank in [-1, 0]:\n patience_list = [int(x) for x in args.patience.split(\",\")]\n tokenizer = tokenizer_class.from_pretrained(args.output_dir, do_lower_case=args.do_lower_case)\n checkpoints = [args.output_dir]\n if args.eval_all_checkpoints:\n checkpoints = list(\n os.path.dirname(c) for c in sorted(glob.glob(args.output_dir + \"/**/\" + WEIGHTS_NAME, recursive=True))\n )\n\n logger.info(\"Evaluate the following checkpoints: %s\", checkpoints)\n\n for checkpoint in checkpoints:\n\n global_step = checkpoint.split(\"-\")[-1] if len(checkpoints) > 1 else \"\"\n prefix = checkpoint.split(\"/\")[-1] if checkpoint.find(\"checkpoint\") != -1 else \"\"\n\n model = model_class.from_pretrained(checkpoint)\n model.to(args.device)\n\n print(f\"Evaluation for checkpoint {prefix}\")\n for patience in patience_list:\n result = evaluate(args, model, tokenizer, prefix=prefix, patience=patience)\n result = dict((k + \"_{}\".format(global_step), v) for k, v in result.items())\n results.update(result)\n return results\n\n\nif __name__ == \"__main__\":\n main()\n", "#!/usr/bin/env python\n# coding=utf-8\n# Copyright 2021 The HuggingFace Team All rights reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"\nFine-tuning the library models for masked language modeling (BERT, ALBERT, RoBERTa...) with whole word masking on a\ntext file or a dataset.\n\nHere is the full list of checkpoints on the hub that can be fine-tuned by this script:\nhttps://huggingface.co/models?filter=masked-lm\n\"\"\"\nimport logging\nimport os\nimport sys\nimport time\nfrom dataclasses import dataclass, field\n\n# You can also adapt this script on your own masked language modeling task. Pointers for this are left as comments.\nfrom pathlib import Path\nfrom typing import Dict, List, Optional, Tuple\n\nimport numpy as np\nfrom datasets import load_dataset\nfrom tqdm import tqdm\n\nimport flax\nimport jax\nimport jax.numpy as jnp\nimport optax\nfrom flax import jax_utils, traverse_util\nfrom flax.training import train_state\nfrom flax.training.common_utils import get_metrics, onehot, shard\nfrom transformers import (\n CONFIG_MAPPING,\n FLAX_MODEL_FOR_MASKED_LM_MAPPING,\n AutoConfig,\n AutoTokenizer,\n FlaxAutoModelForMaskedLM,\n HfArgumentParser,\n PreTrainedTokenizerBase,\n TensorType,\n TrainingArguments,\n is_tensorboard_available,\n set_seed,\n)\n\n\n# Cache the result\nhas_tensorboard = is_tensorboard_available()\nif has_tensorboard:\n try:\n from flax.metrics.tensorboard import SummaryWriter\n except ImportError as ie:\n has_tensorboard = False\n print(f\"Unable to display metrics through TensorBoard because some package are not installed: {ie}\")\n\nelse:\n print(\n \"Unable to display metrics through TensorBoard because the package is not installed: \"\n \"Please run pip install tensorboard to enable.\"\n )\n\n\nMODEL_CONFIG_CLASSES = list(FLAX_MODEL_FOR_MASKED_LM_MAPPING.keys())\nMODEL_TYPES = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES)\n\n\n@dataclass\nclass ModelArguments:\n \"\"\"\n Arguments pertaining to which model/config/tokenizer we are going to fine-tune, or train from scratch.\n \"\"\"\n\n model_name_or_path: Optional[str] = field(\n default=None,\n metadata={\n \"help\": \"The model checkpoint for weights initialization.\"\n \"Don't set if you want to train a model from scratch.\"\n },\n )\n model_type: Optional[str] = field(\n default=None,\n metadata={\"help\": \"If training from scratch, pass a model type from the list: \" + \", \".join(MODEL_TYPES)},\n )\n config_name: Optional[str] = field(\n default=None, metadata={\"help\": \"Pretrained config name or path if not the same as model_name\"}\n )\n tokenizer_name: Optional[str] = field(\n default=None, metadata={\"help\": \"Pretrained tokenizer name or path if not the same as model_name\"}\n )\n cache_dir: Optional[str] = field(\n default=None, metadata={\"help\": \"Where do you want to store the pretrained models downloaded from s3\"}\n )\n use_fast_tokenizer: bool = field(\n default=True,\n metadata={\"help\": \"Whether to use one of the fast tokenizer (backed by the tokenizers library) or not.\"},\n )\n dtype: Optional[str] = field(\n default=\"float32\",\n metadata={\n \"help\": \"Floating-point format in which the model weights should be initialized and trained. Choose one of `[float32, float16, bfloat16]`.\"\n },\n )\n\n\n@dataclass\nclass DataTrainingArguments:\n \"\"\"\n Arguments pertaining to what data we are going to input our model for training and eval.\n \"\"\"\n\n dataset_name: Optional[str] = field(\n default=None, metadata={\"help\": \"The name of the dataset to use (via the datasets library).\"}\n )\n dataset_config_name: Optional[str] = field(\n default=None, metadata={\"help\": \"The configuration name of the dataset to use (via the datasets library).\"}\n )\n train_file: Optional[str] = field(default=None, metadata={\"help\": \"The input training data file (a text file).\"})\n validation_file: Optional[str] = field(\n default=None,\n metadata={\"help\": \"An optional input evaluation data file to evaluate the perplexity on (a text file).\"},\n )\n train_ref_file: Optional[str] = field(\n default=None,\n metadata={\"help\": \"An optional input train ref data file for whole word masking in Chinese.\"},\n )\n validation_ref_file: Optional[str] = field(\n default=None,\n metadata={\"help\": \"An optional input validation ref data file for whole word masking in Chinese.\"},\n )\n overwrite_cache: bool = field(\n default=False, metadata={\"help\": \"Overwrite the cached training and evaluation sets\"}\n )\n validation_split_percentage: Optional[int] = field(\n default=5,\n metadata={\n \"help\": \"The percentage of the train set used as validation set in case there's no validation split\"\n },\n )\n max_seq_length: Optional[int] = field(\n default=None,\n metadata={\n \"help\": \"The maximum total input sequence length after tokenization. Sequences longer \"\n \"than this will be truncated. Default to the max input length of the model.\"\n },\n )\n preprocessing_num_workers: Optional[int] = field(\n default=None,\n metadata={\"help\": \"The number of processes to use for the preprocessing.\"},\n )\n mlm_probability: float = field(\n default=0.15, metadata={\"help\": \"Ratio of tokens to mask for masked language modeling loss\"}\n )\n pad_to_max_length: bool = field(\n default=False,\n metadata={\n \"help\": \"Whether to pad all samples to `max_seq_length`. \"\n \"If False, will pad the samples dynamically when batching to the maximum length in the batch.\"\n },\n )\n line_by_line: bool = field(\n default=False,\n metadata={\"help\": \"Whether distinct lines of text in the dataset are to be handled as distinct sequences.\"},\n )\n\n def __post_init__(self):\n if self.dataset_name is None and self.train_file is None and self.validation_file is None:\n raise ValueError(\"Need either a dataset name or a training/validation file.\")\n else:\n if self.train_file is not None:\n extension = self.train_file.split(\".\")[-1]\n assert extension in [\"csv\", \"json\", \"txt\"], \"`train_file` should be a csv, a json or a txt file.\"\n if self.validation_file is not None:\n extension = self.validation_file.split(\".\")[-1]\n assert extension in [\"csv\", \"json\", \"txt\"], \"`validation_file` should be a csv, a json or a txt file.\"\n\n\[email protected]\nclass FlaxDataCollatorForLanguageModeling:\n \"\"\"\n Data collator used for language modeling. Inputs are dynamically padded to the maximum length of a batch if they\n are not all of the same length.\n\n Args:\n tokenizer (:class:`~transformers.PreTrainedTokenizer` or :class:`~transformers.PreTrainedTokenizerFast`):\n The tokenizer used for encoding the data.\n mlm_probability (:obj:`float`, `optional`, defaults to 0.15):\n The probability with which to (randomly) mask tokens in the input.\n\n .. note::\n\n For best performance, this data collator should be used with a dataset having items that are dictionaries or\n BatchEncoding, with the :obj:`\"special_tokens_mask\"` key, as returned by a\n :class:`~transformers.PreTrainedTokenizer` or a :class:`~transformers.PreTrainedTokenizerFast` with the\n argument :obj:`return_special_tokens_mask=True`.\n \"\"\"\n\n tokenizer: PreTrainedTokenizerBase\n mlm_probability: float = 0.15\n\n def __post_init__(self):\n if self.tokenizer.mask_token is None:\n raise ValueError(\n \"This tokenizer does not have a mask token which is necessary for masked language modeling. \"\n \"You should pass `mlm=False` to train on causal language modeling instead.\"\n )\n\n def __call__(self, examples: List[Dict[str, np.ndarray]], pad_to_multiple_of: int) -> Dict[str, np.ndarray]:\n # Handle dict or lists with proper padding and conversion to tensor.\n batch = self.tokenizer.pad(examples, pad_to_multiple_of=pad_to_multiple_of, return_tensors=TensorType.NUMPY)\n\n # If special token mask has been preprocessed, pop it from the dict.\n special_tokens_mask = batch.pop(\"special_tokens_mask\", None)\n\n batch[\"input_ids\"], batch[\"labels\"] = self.mask_tokens(\n batch[\"input_ids\"], special_tokens_mask=special_tokens_mask\n )\n return batch\n\n def mask_tokens(\n self, inputs: np.ndarray, special_tokens_mask: Optional[np.ndarray]\n ) -> Tuple[jnp.ndarray, jnp.ndarray]:\n \"\"\"\n Prepare masked tokens inputs/labels for masked language modeling: 80% MASK, 10% random, 10% original.\n \"\"\"\n labels = inputs.copy()\n # We sample a few tokens in each sequence for MLM training (with probability `self.mlm_probability`)\n probability_matrix = np.full(labels.shape, self.mlm_probability)\n special_tokens_mask = special_tokens_mask.astype(\"bool\")\n\n probability_matrix[special_tokens_mask] = 0.0\n masked_indices = np.random.binomial(1, probability_matrix).astype(\"bool\")\n labels[~masked_indices] = -100 # We only compute loss on masked tokens\n\n # 80% of the time, we replace masked input tokens with tokenizer.mask_token ([MASK])\n indices_replaced = np.random.binomial(1, np.full(labels.shape, 0.8)).astype(\"bool\") & masked_indices\n inputs[indices_replaced] = self.tokenizer.convert_tokens_to_ids(self.tokenizer.mask_token)\n\n # 10% of the time, we replace masked input tokens with random word\n indices_random = np.random.binomial(1, np.full(labels.shape, 0.5)).astype(\"bool\")\n indices_random &= masked_indices & ~indices_replaced\n\n random_words = np.random.randint(self.tokenizer.vocab_size, size=labels.shape, dtype=\"i4\")\n inputs[indices_random] = random_words[indices_random]\n\n # The rest of the time (10% of the time) we keep the masked input tokens unchanged\n return inputs, labels\n\n\ndef generate_batch_splits(samples_idx: jnp.ndarray, batch_size: int) -> jnp.ndarray:\n num_samples = len(samples_idx)\n samples_to_remove = num_samples % batch_size\n\n if samples_to_remove != 0:\n samples_idx = samples_idx[:-samples_to_remove]\n sections_split = num_samples // batch_size\n batch_idx = np.split(samples_idx, sections_split)\n return batch_idx\n\n\ndef write_metric(train_metrics, eval_metrics, train_time, step):\n summary_writer.scalar(\"train_time\", train_time, step)\n\n train_metrics = get_metrics(train_metrics)\n for key, vals in train_metrics.items():\n tag = f\"train_{key}\"\n for i, val in enumerate(vals):\n summary_writer.scalar(tag, val, step - len(vals) + i + 1)\n\n for metric_name, value in eval_metrics.items():\n summary_writer.scalar(f\"eval_{metric_name}\", value, step)\n\n\nif __name__ == \"__main__\":\n # See all possible arguments in src/transformers/training_args.py\n # or by passing the --help flag to this script.\n # We now keep distinct sets of args, for a cleaner separation of concerns.\n\n parser = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments))\n if len(sys.argv) == 2 and sys.argv[1].endswith(\".json\"):\n # If we pass only one argument to the script and it's the path to a json file,\n # let's parse it to get our arguments.\n model_args, data_args, training_args = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1]))\n else:\n model_args, data_args, training_args = parser.parse_args_into_dataclasses()\n\n if (\n os.path.exists(training_args.output_dir)\n and os.listdir(training_args.output_dir)\n and training_args.do_train\n and not training_args.overwrite_output_dir\n ):\n raise ValueError(\n f\"Output directory ({training_args.output_dir}) already exists and is not empty.\"\n \"Use --overwrite_output_dir to overcome.\"\n )\n\n # Setup logging\n logging.basicConfig(\n format=\"%(asctime)s - %(levelname)s - %(name)s - %(message)s\",\n level=\"NOTSET\",\n datefmt=\"[%X]\",\n )\n\n # Log on each process the small summary:\n logger = logging.getLogger(__name__)\n logger.warning(\n f\"Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}\"\n + f\"distributed training: {bool(training_args.local_rank != -1)}, 16-bits training: {training_args.fp16}\"\n )\n\n # Set the verbosity to info of the Transformers logger (on main process only):\n logger.info(f\"Training/evaluation parameters {training_args}\")\n\n # Set seed before initializing model.\n set_seed(training_args.seed)\n\n # Get the datasets: you can either provide your own CSV/JSON/TXT training and evaluation files (see below)\n # or just provide the name of one of the public datasets available on the hub at https://huggingface.co/datasets/\n # (the dataset will be downloaded automatically from the datasets Hub).\n #\n # For CSV/JSON files, this script will use the column called 'text' or the first column if no column called\n # 'text' is found. You can easily tweak this behavior (see below).\n #\n # In distributed training, the load_dataset function guarantees that only one local process can concurrently\n # download the dataset.\n if data_args.dataset_name is not None:\n # Downloading and loading a dataset from the hub.\n datasets = load_dataset(data_args.dataset_name, data_args.dataset_config_name, cache_dir=model_args.cache_dir)\n\n if \"validation\" not in datasets.keys():\n datasets[\"validation\"] = load_dataset(\n data_args.dataset_name,\n data_args.dataset_config_name,\n split=f\"train[:{data_args.validation_split_percentage}%]\",\n cache_dir=model_args.cache_dir,\n )\n datasets[\"train\"] = load_dataset(\n data_args.dataset_name,\n data_args.dataset_config_name,\n split=f\"train[{data_args.validation_split_percentage}%:]\",\n cache_dir=model_args.cache_dir,\n )\n else:\n data_files = {}\n if data_args.train_file is not None:\n data_files[\"train\"] = data_args.train_file\n if data_args.validation_file is not None:\n data_files[\"validation\"] = data_args.validation_file\n extension = data_args.train_file.split(\".\")[-1]\n if extension == \"txt\":\n extension = \"text\"\n datasets = load_dataset(extension, data_files=data_files, cache_dir=model_args.cache_dir)\n # See more about loading any type of standard or custom dataset (from files, python dict, pandas DataFrame, etc) at\n # https://huggingface.co/docs/datasets/loading_datasets.html.\n\n # Load pretrained model and tokenizer\n\n # Distributed training:\n # The .from_pretrained methods guarantee that only one local process can concurrently\n # download model & vocab.\n if model_args.config_name:\n config = AutoConfig.from_pretrained(model_args.config_name, cache_dir=model_args.cache_dir)\n elif model_args.model_name_or_path:\n config = AutoConfig.from_pretrained(model_args.model_name_or_path, cache_dir=model_args.cache_dir)\n else:\n config = CONFIG_MAPPING[model_args.model_type]()\n logger.warning(\"You are instantiating a new config instance from scratch.\")\n\n if model_args.tokenizer_name:\n tokenizer = AutoTokenizer.from_pretrained(\n model_args.tokenizer_name, cache_dir=model_args.cache_dir, use_fast=model_args.use_fast_tokenizer\n )\n elif model_args.model_name_or_path:\n tokenizer = AutoTokenizer.from_pretrained(\n model_args.model_name_or_path, cache_dir=model_args.cache_dir, use_fast=model_args.use_fast_tokenizer\n )\n else:\n raise ValueError(\n \"You are instantiating a new tokenizer from scratch. This is not supported by this script.\"\n \"You can do it from another script, save it, and load it from here, using --tokenizer_name.\"\n )\n\n # Preprocessing the datasets.\n # First we tokenize all the texts.\n if training_args.do_train:\n column_names = datasets[\"train\"].column_names\n else:\n column_names = datasets[\"validation\"].column_names\n text_column_name = \"text\" if \"text\" in column_names else column_names[0]\n\n max_seq_length = min(data_args.max_seq_length, tokenizer.model_max_length)\n\n if data_args.line_by_line:\n # When using line_by_line, we just tokenize each nonempty line.\n padding = \"max_length\" if data_args.pad_to_max_length else False\n\n def tokenize_function(examples):\n # Remove empty lines\n examples = [line for line in examples if len(line) > 0 and not line.isspace()]\n return tokenizer(\n examples,\n return_special_tokens_mask=True,\n padding=padding,\n truncation=True,\n max_length=max_seq_length,\n )\n\n tokenized_datasets = datasets.map(\n tokenize_function,\n input_columns=[text_column_name],\n batched=True,\n num_proc=data_args.preprocessing_num_workers,\n remove_columns=column_names,\n load_from_cache_file=not data_args.overwrite_cache,\n )\n\n else:\n # Otherwise, we tokenize every text, then concatenate them together before splitting them in smaller parts.\n # We use `return_special_tokens_mask=True` because DataCollatorForLanguageModeling (see below) is more\n # efficient when it receives the `special_tokens_mask`.\n def tokenize_function(examples):\n return tokenizer(examples[text_column_name], return_special_tokens_mask=True)\n\n tokenized_datasets = datasets.map(\n tokenize_function,\n batched=True,\n num_proc=data_args.preprocessing_num_workers,\n remove_columns=column_names,\n load_from_cache_file=not data_args.overwrite_cache,\n )\n\n # Main data processing function that will concatenate all texts from our dataset and generate chunks of\n # max_seq_length.\n def group_texts(examples):\n # Concatenate all texts.\n concatenated_examples = {k: sum(examples[k], []) for k in examples.keys()}\n total_length = len(concatenated_examples[list(examples.keys())[0]])\n # We drop the small remainder, we could add padding if the model supported it instead of this drop, you can\n # customize this part to your needs.\n total_length = (total_length // max_seq_length) * max_seq_length\n # Split by chunks of max_len.\n result = {\n k: [t[i : i + max_seq_length] for i in range(0, total_length, max_seq_length)]\n for k, t in concatenated_examples.items()\n }\n return result\n\n # Note that with `batched=True`, this map processes 1,000 texts together, so group_texts throws away a\n # remainder for each of those groups of 1,000 texts. You can adjust that batch_size here but a higher value\n # might be slower to preprocess.\n #\n # To speed up this part, we use multiprocessing. See the documentation of the map method for more information:\n # https://huggingface.co/docs/datasets/package_reference/main_classes.html#datasets.Dataset.map\n tokenized_datasets = tokenized_datasets.map(\n group_texts,\n batched=True,\n num_proc=data_args.preprocessing_num_workers,\n load_from_cache_file=not data_args.overwrite_cache,\n )\n\n # Enable tensorboard only on the master node\n if has_tensorboard and jax.process_index() == 0:\n summary_writer = SummaryWriter(log_dir=Path(training_args.output_dir).joinpath(\"logs\").as_posix())\n\n # Data collator\n # This one will take care of randomly masking the tokens.\n data_collator = FlaxDataCollatorForLanguageModeling(tokenizer=tokenizer, mlm_probability=data_args.mlm_probability)\n\n # Initialize our training\n rng = jax.random.PRNGKey(training_args.seed)\n dropout_rngs = jax.random.split(rng, jax.local_device_count())\n\n model = FlaxAutoModelForMaskedLM.from_config(config, seed=training_args.seed, dtype=getattr(jnp, model_args.dtype))\n\n # Store some constant\n num_epochs = int(training_args.num_train_epochs)\n train_batch_size = int(training_args.per_device_train_batch_size) * jax.device_count()\n eval_batch_size = int(training_args.per_device_eval_batch_size) * jax.device_count()\n\n num_train_steps = len(tokenized_datasets[\"train\"]) // train_batch_size * num_epochs\n\n # Create learning rate schedule\n warmup_fn = optax.linear_schedule(\n init_value=0.0, end_value=training_args.learning_rate, transition_steps=training_args.warmup_steps\n )\n decay_fn = optax.linear_schedule(\n init_value=training_args.learning_rate,\n end_value=0,\n transition_steps=num_train_steps - training_args.warmup_steps,\n )\n linear_decay_lr_schedule_fn = optax.join_schedules(\n schedules=[warmup_fn, decay_fn], boundaries=[training_args.warmup_steps]\n )\n\n # We use Optax's \"masking\" functionality to not apply weight decay\n # to bias and LayerNorm scale parameters. decay_mask_fn returns a\n # mask boolean with the same structure as the parameters.\n # The mask is True for parameters that should be decayed.\n def decay_mask_fn(params):\n flat_params = traverse_util.flatten_dict(params)\n flat_mask = {path: (path[-1] != \"bias\" and path[-2:] != (\"LayerNorm\", \"scale\")) for path in flat_params}\n return traverse_util.unflatten_dict(flat_mask)\n\n # create adam optimizer\n adamw = optax.adamw(\n learning_rate=linear_decay_lr_schedule_fn,\n b1=training_args.adam_beta1,\n b2=training_args.adam_beta2,\n eps=1e-8,\n weight_decay=training_args.weight_decay,\n mask=decay_mask_fn,\n )\n\n # Setup train state\n state = train_state.TrainState.create(apply_fn=model.__call__, params=model.params, tx=adamw)\n\n # Define gradient update step fn\n def train_step(state, batch, dropout_rng):\n dropout_rng, new_dropout_rng = jax.random.split(dropout_rng)\n\n def loss_fn(params):\n labels = batch.pop(\"labels\")\n\n logits = state.apply_fn(**batch, params=params, dropout_rng=dropout_rng, train=True)[0]\n\n # compute loss, ignore padded input tokens\n label_mask = jnp.where(labels > 0, 1.0, 0.0)\n loss = optax.softmax_cross_entropy(logits, onehot(labels, logits.shape[-1])) * label_mask\n\n # take average\n loss = loss.sum() / label_mask.sum()\n\n return loss\n\n grad_fn = jax.value_and_grad(loss_fn)\n loss, grad = grad_fn(state.params)\n grad = jax.lax.pmean(grad, \"batch\")\n new_state = state.apply_gradients(grads=grad)\n\n metrics = jax.lax.pmean(\n {\"loss\": loss, \"learning_rate\": linear_decay_lr_schedule_fn(state.step)}, axis_name=\"batch\"\n )\n\n return new_state, metrics, new_dropout_rng\n\n # Create parallel version of the train step\n p_train_step = jax.pmap(train_step, \"batch\", donate_argnums=(0,))\n\n # Define eval fn\n def eval_step(params, batch):\n labels = batch.pop(\"labels\")\n\n logits = model(**batch, params=params, train=False)[0]\n\n # compute loss, ignore padded input tokens\n label_mask = jnp.where(labels > 0, 1.0, 0.0)\n loss = optax.softmax_cross_entropy(logits, onehot(labels, logits.shape[-1])) * label_mask\n\n # compute accuracy\n accuracy = jnp.equal(jnp.argmax(logits, axis=-1), labels) * label_mask\n\n # summarize metrics\n metrics = {\"loss\": loss.sum(), \"accuracy\": accuracy.sum(), \"normalizer\": label_mask.sum()}\n metrics = jax.lax.psum(metrics, axis_name=\"batch\")\n\n return metrics\n\n p_eval_step = jax.pmap(eval_step, \"batch\", donate_argnums=(0,))\n\n # Replicate the train state on each device\n state = jax_utils.replicate(state)\n\n train_metrics = []\n train_time = 0\n epochs = tqdm(range(num_epochs), desc=f\"Epoch ... (1/{num_epochs})\", position=0)\n for epoch in epochs:\n # ======================== Training ================================\n train_start = time.time()\n\n # Create sampling rng\n rng, input_rng = jax.random.split(rng)\n\n # Generate an epoch by shuffling sampling indices from the train dataset\n num_train_samples = len(tokenized_datasets[\"train\"])\n train_samples_idx = jax.random.permutation(input_rng, jnp.arange(num_train_samples))\n train_batch_idx = generate_batch_splits(train_samples_idx, train_batch_size)\n\n # Gather the indexes for creating the batch and do a training step\n for i, batch_idx in enumerate(tqdm(train_batch_idx, desc=\"Training...\", position=1)):\n samples = [tokenized_datasets[\"train\"][int(idx)] for idx in batch_idx]\n model_inputs = data_collator(samples, pad_to_multiple_of=16)\n\n # Model forward\n model_inputs = shard(model_inputs.data)\n state, train_metric, dropout_rngs = p_train_step(state, model_inputs, dropout_rngs)\n train_metrics.append(train_metric)\n\n train_time += time.time() - train_start\n\n epochs.write(\n f\"Epoch... ({epoch + 1}/{num_epochs} | Loss: {train_metric['loss']}, Learning Rate: {train_metric['learning_rate']})\"\n )\n\n # ======================== Evaluating ==============================\n num_eval_samples = len(tokenized_datasets[\"validation\"])\n eval_samples_idx = jnp.arange(num_eval_samples)\n eval_batch_idx = generate_batch_splits(eval_samples_idx, eval_batch_size)\n\n eval_metrics = []\n for i, batch_idx in enumerate(tqdm(eval_batch_idx, desc=\"Evaluating ...\", position=2)):\n samples = [tokenized_datasets[\"validation\"][int(idx)] for idx in batch_idx]\n model_inputs = data_collator(samples, pad_to_multiple_of=16)\n\n # Model forward\n model_inputs = shard(model_inputs.data)\n metrics = p_eval_step(state.params, model_inputs)\n eval_metrics.append(metrics)\n\n # normalize eval metrics\n eval_metrics = get_metrics(eval_metrics)\n eval_metrics = jax.tree_map(jnp.sum, eval_metrics)\n eval_normalizer = eval_metrics.pop(\"normalizer\")\n eval_metrics = jax.tree_map(lambda x: x / eval_normalizer, eval_metrics)\n\n # Update progress bar\n epochs.desc = (\n f\"Epoch... ({epoch + 1}/{num_epochs} | Loss: {eval_metrics['loss']}, Acc: {eval_metrics['accuracy']})\"\n )\n\n # Save metrics\n if has_tensorboard and jax.process_index() == 0:\n cur_step = epoch * (len(tokenized_datasets[\"train\"]) // train_batch_size)\n write_metric(train_metrics, eval_metrics, train_time, cur_step)\n\n # save last checkpoint\n if jax.process_index() == 0:\n params = jax.device_get(jax.tree_map(lambda x: x[0], state.params))\n model.save_pretrained(training_args.output_dir, params=params)\n", "\"\"\"\n coding=utf-8\n Copyright 2018, Antonio Mendoza Hao Tan, Mohit Bansal\n Adapted From Facebook Inc, Detectron2\n\n Licensed under the Apache License, Version 2.0 (the \"License\");\n you may not use this file except in compliance with the License.\n You may obtain a copy of the License at\n\n http://www.apache.org/licenses/LICENSE-2.0\n\n Unless required by applicable law or agreed to in writing, software\n distributed under the License is distributed on an \"AS IS\" BASIS,\n WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n See the License for the specific language governing permissions and\n limitations under the License.import copy\n \"\"\"\nimport sys\nfrom typing import Tuple\n\nimport numpy as np\nimport torch\nimport torch.nn.functional as F\nfrom PIL import Image\n\nfrom utils import img_tensorize\n\n\nclass ResizeShortestEdge:\n def __init__(self, short_edge_length, max_size=sys.maxsize):\n \"\"\"\n Args:\n short_edge_length (list[min, max])\n max_size (int): maximum allowed longest edge length.\n \"\"\"\n self.interp_method = \"bilinear\"\n self.max_size = max_size\n self.short_edge_length = short_edge_length\n\n def __call__(self, imgs):\n img_augs = []\n for img in imgs:\n h, w = img.shape[:2]\n # later: provide list and randomly choose index for resize\n size = np.random.randint(self.short_edge_length[0], self.short_edge_length[1] + 1)\n if size == 0:\n return img\n scale = size * 1.0 / min(h, w)\n if h < w:\n newh, neww = size, scale * w\n else:\n newh, neww = scale * h, size\n if max(newh, neww) > self.max_size:\n scale = self.max_size * 1.0 / max(newh, neww)\n newh = newh * scale\n neww = neww * scale\n neww = int(neww + 0.5)\n newh = int(newh + 0.5)\n\n if img.dtype == np.uint8:\n pil_image = Image.fromarray(img)\n pil_image = pil_image.resize((neww, newh), Image.BILINEAR)\n img = np.asarray(pil_image)\n else:\n img = img.permute(2, 0, 1).unsqueeze(0) # 3, 0, 1) # hw(c) -> nchw\n img = F.interpolate(img, (newh, neww), mode=self.interp_method, align_corners=False).squeeze(0)\n img_augs.append(img)\n\n return img_augs\n\n\nclass Preprocess:\n def __init__(self, cfg):\n self.aug = ResizeShortestEdge([cfg.INPUT.MIN_SIZE_TEST, cfg.INPUT.MIN_SIZE_TEST], cfg.INPUT.MAX_SIZE_TEST)\n self.input_format = cfg.INPUT.FORMAT\n self.size_divisibility = cfg.SIZE_DIVISIBILITY\n self.pad_value = cfg.PAD_VALUE\n self.max_image_size = cfg.INPUT.MAX_SIZE_TEST\n self.device = cfg.MODEL.DEVICE\n self.pixel_std = torch.tensor(cfg.MODEL.PIXEL_STD).to(self.device).view(len(cfg.MODEL.PIXEL_STD), 1, 1)\n self.pixel_mean = torch.tensor(cfg.MODEL.PIXEL_MEAN).to(self.device).view(len(cfg.MODEL.PIXEL_STD), 1, 1)\n self.normalizer = lambda x: (x - self.pixel_mean) / self.pixel_std\n\n def pad(self, images):\n max_size = tuple(max(s) for s in zip(*[img.shape for img in images]))\n image_sizes = [im.shape[-2:] for im in images]\n images = [\n F.pad(\n im,\n [0, max_size[-1] - size[1], 0, max_size[-2] - size[0]],\n value=self.pad_value,\n )\n for size, im in zip(image_sizes, images)\n ]\n\n return torch.stack(images), torch.tensor(image_sizes)\n\n def __call__(self, images, single_image=False):\n with torch.no_grad():\n if not isinstance(images, list):\n images = [images]\n if single_image:\n assert len(images) == 1\n for i in range(len(images)):\n if isinstance(images[i], torch.Tensor):\n images.insert(i, images.pop(i).to(self.device).float())\n elif not isinstance(images[i], torch.Tensor):\n images.insert(\n i,\n torch.as_tensor(img_tensorize(images.pop(i), input_format=self.input_format))\n .to(self.device)\n .float(),\n )\n # resize smallest edge\n raw_sizes = torch.tensor([im.shape[:2] for im in images])\n images = self.aug(images)\n # transpose images and convert to torch tensors\n # images = [torch.as_tensor(i.astype(\"float32\")).permute(2, 0, 1).to(self.device) for i in images]\n # now normalize before pad to avoid useless arithmetic\n images = [self.normalizer(x) for x in images]\n # now pad them to do the following operations\n images, sizes = self.pad(images)\n # Normalize\n\n if self.size_divisibility > 0:\n raise NotImplementedError()\n # pad\n scales_yx = torch.true_divide(raw_sizes, sizes)\n if single_image:\n return images[0], sizes[0], scales_yx[0]\n else:\n return images, sizes, scales_yx\n\n\ndef _scale_box(boxes, scale_yx):\n boxes[:, 0::2] *= scale_yx[:, 1]\n boxes[:, 1::2] *= scale_yx[:, 0]\n return boxes\n\n\ndef _clip_box(tensor, box_size: Tuple[int, int]):\n assert torch.isfinite(tensor).all(), \"Box tensor contains infinite or NaN!\"\n h, w = box_size\n tensor[:, 0].clamp_(min=0, max=w)\n tensor[:, 1].clamp_(min=0, max=h)\n tensor[:, 2].clamp_(min=0, max=w)\n tensor[:, 3].clamp_(min=0, max=h)\n", "#!/usr/bin/env python\n# coding=utf-8\n# Copyright 2018 Google AI, Google Brain and Carnegie Mellon University Authors and the HuggingFace Inc. team.\n# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\" OpenAI GPT model fine-tuning script.\n Adapted from https://github.com/huggingface/pytorch-openai-transformer-lm/blob/master/train.py\n It self adapted from https://github.com/openai/finetune-transformer-lm/blob/master/train.py\n\n This script with default values fine-tunes and evaluate a pretrained OpenAI GPT on the RocStories dataset:\n python run_openai_gpt.py \\\n --model_name openai-gpt \\\n --do_train \\\n --do_eval \\\n --train_dataset \"$ROC_STORIES_DIR/cloze_test_val__spring2016 - cloze_test_ALL_val.csv\" \\\n --eval_dataset \"$ROC_STORIES_DIR/cloze_test_test__spring2016 - cloze_test_ALL_test.csv\" \\\n --output_dir ../log \\\n --train_batch_size 16 \\\n\"\"\"\nimport argparse\nimport csv\nimport logging\nimport os\nimport random\n\nimport numpy as np\nimport torch\nfrom torch.utils.data import DataLoader, RandomSampler, SequentialSampler, TensorDataset\nfrom tqdm import tqdm, trange\n\nfrom transformers import (\n CONFIG_NAME,\n WEIGHTS_NAME,\n AdamW,\n OpenAIGPTDoubleHeadsModel,\n OpenAIGPTTokenizer,\n get_linear_schedule_with_warmup,\n)\n\n\nlogging.basicConfig(\n format=\"%(asctime)s - %(levelname)s - %(name)s - %(message)s\", datefmt=\"%m/%d/%Y %H:%M:%S\", level=logging.INFO\n)\nlogger = logging.getLogger(__name__)\n\n\ndef accuracy(out, labels):\n outputs = np.argmax(out, axis=1)\n return np.sum(outputs == labels)\n\n\ndef load_rocstories_dataset(dataset_path):\n \"\"\"Output a list of tuples(story, 1st continuation, 2nd continuation, label)\"\"\"\n with open(dataset_path, encoding=\"utf_8\") as f:\n f = csv.reader(f)\n output = []\n next(f) # skip the first line\n for line in tqdm(f):\n output.append((\" \".join(line[1:5]), line[5], line[6], int(line[-1]) - 1))\n return output\n\n\ndef pre_process_datasets(encoded_datasets, input_len, cap_length, start_token, delimiter_token, clf_token):\n \"\"\"Pre-process datasets containing lists of tuples(story, 1st continuation, 2nd continuation, label)\n\n To Transformer inputs of shape (n_batch, n_alternative, length) comprising for each batch, continuation:\n input_ids[batch, alternative, :] = [start_token] + story[:cap_length] + [delimiter_token] + cont1[:cap_length] + [clf_token]\n \"\"\"\n tensor_datasets = []\n for dataset in encoded_datasets:\n n_batch = len(dataset)\n input_ids = np.zeros((n_batch, 2, input_len), dtype=np.int64)\n mc_token_ids = np.zeros((n_batch, 2), dtype=np.int64)\n lm_labels = np.full((n_batch, 2, input_len), fill_value=-100, dtype=np.int64)\n mc_labels = np.zeros((n_batch,), dtype=np.int64)\n for (\n i,\n (story, cont1, cont2, mc_label),\n ) in enumerate(dataset):\n with_cont1 = [start_token] + story[:cap_length] + [delimiter_token] + cont1[:cap_length] + [clf_token]\n with_cont2 = [start_token] + story[:cap_length] + [delimiter_token] + cont2[:cap_length] + [clf_token]\n input_ids[i, 0, : len(with_cont1)] = with_cont1\n input_ids[i, 1, : len(with_cont2)] = with_cont2\n mc_token_ids[i, 0] = len(with_cont1) - 1\n mc_token_ids[i, 1] = len(with_cont2) - 1\n lm_labels[i, 0, : len(with_cont1)] = with_cont1\n lm_labels[i, 1, : len(with_cont2)] = with_cont2\n mc_labels[i] = mc_label\n all_inputs = (input_ids, mc_token_ids, lm_labels, mc_labels)\n tensor_datasets.append(tuple(torch.tensor(t) for t in all_inputs))\n return tensor_datasets\n\n\ndef main():\n parser = argparse.ArgumentParser()\n parser.add_argument(\"--model_name\", type=str, default=\"openai-gpt\", help=\"pretrained model name\")\n parser.add_argument(\"--do_train\", action=\"store_true\", help=\"Whether to run training.\")\n parser.add_argument(\"--do_eval\", action=\"store_true\", help=\"Whether to run eval on the dev set.\")\n parser.add_argument(\n \"--output_dir\",\n default=None,\n type=str,\n required=True,\n help=\"The output directory where the model predictions and checkpoints will be written.\",\n )\n parser.add_argument(\"--train_dataset\", type=str, default=\"\")\n parser.add_argument(\"--eval_dataset\", type=str, default=\"\")\n parser.add_argument(\"--seed\", type=int, default=42)\n parser.add_argument(\"--num_train_epochs\", type=int, default=3)\n parser.add_argument(\"--train_batch_size\", type=int, default=8)\n parser.add_argument(\"--eval_batch_size\", type=int, default=16)\n parser.add_argument(\"--adam_epsilon\", default=1e-8, type=float, help=\"Epsilon for Adam optimizer.\")\n parser.add_argument(\"--max_grad_norm\", type=int, default=1)\n parser.add_argument(\n \"--max_steps\",\n default=-1,\n type=int,\n help=\"If > 0: set total number of training \\\n steps to perform. Override num_train_epochs.\",\n )\n parser.add_argument(\n \"--gradient_accumulation_steps\",\n type=int,\n default=1,\n help=\"Number of updates steps to accumulate before\\\n performing a backward/update pass.\",\n )\n parser.add_argument(\"--learning_rate\", type=float, default=6.25e-5)\n parser.add_argument(\"--warmup_steps\", default=0, type=int, help=\"Linear warmup over warmup_steps.\")\n parser.add_argument(\"--lr_schedule\", type=str, default=\"warmup_linear\")\n parser.add_argument(\"--weight_decay\", type=float, default=0.01)\n parser.add_argument(\"--lm_coef\", type=float, default=0.9)\n parser.add_argument(\"--n_valid\", type=int, default=374)\n\n parser.add_argument(\"--server_ip\", type=str, default=\"\", help=\"Can be used for distant debugging.\")\n parser.add_argument(\"--server_port\", type=str, default=\"\", help=\"Can be used for distant debugging.\")\n args = parser.parse_args()\n print(args)\n\n if args.server_ip and args.server_port:\n # Distant debugging - see https://code.visualstudio.com/docs/python/debugging#_attach-to-a-local-script\n import ptvsd\n\n print(\"Waiting for debugger attach\")\n ptvsd.enable_attach(address=(args.server_ip, args.server_port), redirect_output=True)\n ptvsd.wait_for_attach()\n\n random.seed(args.seed)\n np.random.seed(args.seed)\n torch.manual_seed(args.seed)\n torch.cuda.manual_seed_all(args.seed)\n\n device = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")\n n_gpu = torch.cuda.device_count()\n logger.info(\"device: {}, n_gpu {}\".format(device, n_gpu))\n\n if not args.do_train and not args.do_eval:\n raise ValueError(\"At least one of `do_train` or `do_eval` must be True.\")\n\n if not os.path.exists(args.output_dir):\n os.makedirs(args.output_dir)\n\n # Load tokenizer and model\n # This loading functions also add new tokens and embeddings called `special tokens`\n # These new embeddings will be fine-tuned on the RocStories dataset\n special_tokens = [\"_start_\", \"_delimiter_\", \"_classify_\"]\n tokenizer = OpenAIGPTTokenizer.from_pretrained(args.model_name)\n tokenizer.add_tokens(special_tokens)\n special_tokens_ids = tokenizer.convert_tokens_to_ids(special_tokens)\n model = OpenAIGPTDoubleHeadsModel.from_pretrained(args.model_name)\n model.resize_token_embeddings(len(tokenizer))\n model.to(device)\n\n # Load and encode the datasets\n def tokenize_and_encode(obj):\n \"\"\"Tokenize and encode a nested object\"\"\"\n if isinstance(obj, str):\n return tokenizer.convert_tokens_to_ids(tokenizer.tokenize(obj))\n elif isinstance(obj, int):\n return obj\n return list(tokenize_and_encode(o) for o in obj)\n\n logger.info(\"Encoding dataset...\")\n train_dataset = load_rocstories_dataset(args.train_dataset)\n eval_dataset = load_rocstories_dataset(args.eval_dataset)\n datasets = (train_dataset, eval_dataset)\n encoded_datasets = tokenize_and_encode(datasets)\n\n # Compute the max input length for the Transformer\n max_length = model.config.n_positions // 2 - 2\n input_length = max(\n len(story[:max_length]) + max(len(cont1[:max_length]), len(cont2[:max_length])) + 3\n for dataset in encoded_datasets\n for story, cont1, cont2, _ in dataset\n )\n input_length = min(input_length, model.config.n_positions) # Max size of input for the pre-trained model\n\n # Prepare inputs tensors and dataloaders\n tensor_datasets = pre_process_datasets(encoded_datasets, input_length, max_length, *special_tokens_ids)\n train_tensor_dataset, eval_tensor_dataset = tensor_datasets[0], tensor_datasets[1]\n\n train_data = TensorDataset(*train_tensor_dataset)\n train_sampler = RandomSampler(train_data)\n train_dataloader = DataLoader(train_data, sampler=train_sampler, batch_size=args.train_batch_size)\n\n eval_data = TensorDataset(*eval_tensor_dataset)\n eval_sampler = SequentialSampler(eval_data)\n eval_dataloader = DataLoader(eval_data, sampler=eval_sampler, batch_size=args.eval_batch_size)\n\n # Prepare optimizer\n if args.do_train:\n if args.max_steps > 0:\n t_total = args.max_steps\n args.num_train_epochs = args.max_steps // (len(train_dataloader) // args.gradient_accumulation_steps) + 1\n else:\n t_total = len(train_dataloader) // args.gradient_accumulation_steps * args.num_train_epochs\n\n param_optimizer = list(model.named_parameters())\n no_decay = [\"bias\", \"LayerNorm.bias\", \"LayerNorm.weight\"]\n optimizer_grouped_parameters = [\n {\n \"params\": [p for n, p in param_optimizer if not any(nd in n for nd in no_decay)],\n \"weight_decay\": args.weight_decay,\n },\n {\"params\": [p for n, p in param_optimizer if any(nd in n for nd in no_decay)], \"weight_decay\": 0.0},\n ]\n optimizer = AdamW(optimizer_grouped_parameters, lr=args.learning_rate, eps=args.adam_epsilon)\n scheduler = get_linear_schedule_with_warmup(\n optimizer, num_warmup_steps=args.warmup_steps, num_training_steps=t_total\n )\n\n if args.do_train:\n nb_tr_steps, tr_loss, exp_average_loss = 0, 0, None\n model.train()\n for _ in trange(int(args.num_train_epochs), desc=\"Epoch\"):\n tr_loss = 0\n nb_tr_steps = 0\n tqdm_bar = tqdm(train_dataloader, desc=\"Training\")\n for step, batch in enumerate(tqdm_bar):\n batch = tuple(t.to(device) for t in batch)\n input_ids, mc_token_ids, lm_labels, mc_labels = batch\n losses = model(input_ids, mc_token_ids=mc_token_ids, lm_labels=lm_labels, mc_labels=mc_labels)\n loss = args.lm_coef * losses[0] + losses[1]\n loss.backward()\n optimizer.step()\n scheduler.step()\n optimizer.zero_grad()\n tr_loss += loss.item()\n exp_average_loss = (\n loss.item() if exp_average_loss is None else 0.7 * exp_average_loss + 0.3 * loss.item()\n )\n nb_tr_steps += 1\n tqdm_bar.desc = \"Training loss: {:.2e} lr: {:.2e}\".format(exp_average_loss, scheduler.get_lr()[0])\n\n # Save a trained model\n if args.do_train:\n # Save a trained model, configuration and tokenizer\n model_to_save = model.module if hasattr(model, \"module\") else model # Only save the model itself\n\n # If we save using the predefined names, we can load using `from_pretrained`\n output_model_file = os.path.join(args.output_dir, WEIGHTS_NAME)\n output_config_file = os.path.join(args.output_dir, CONFIG_NAME)\n\n torch.save(model_to_save.state_dict(), output_model_file)\n model_to_save.config.to_json_file(output_config_file)\n tokenizer.save_vocabulary(args.output_dir)\n\n # Load a trained model and vocabulary that you have fine-tuned\n model = OpenAIGPTDoubleHeadsModel.from_pretrained(args.output_dir)\n tokenizer = OpenAIGPTTokenizer.from_pretrained(args.output_dir)\n model.to(device)\n\n if args.do_eval:\n model.eval()\n eval_loss, eval_accuracy = 0, 0\n nb_eval_steps, nb_eval_examples = 0, 0\n for batch in tqdm(eval_dataloader, desc=\"Evaluating\"):\n batch = tuple(t.to(device) for t in batch)\n input_ids, mc_token_ids, lm_labels, mc_labels = batch\n with torch.no_grad():\n _, mc_loss, _, mc_logits = model(\n input_ids, mc_token_ids=mc_token_ids, lm_labels=lm_labels, mc_labels=mc_labels\n )\n\n mc_logits = mc_logits.detach().cpu().numpy()\n mc_labels = mc_labels.to(\"cpu\").numpy()\n tmp_eval_accuracy = accuracy(mc_logits, mc_labels)\n\n eval_loss += mc_loss.mean().item()\n eval_accuracy += tmp_eval_accuracy\n\n nb_eval_examples += input_ids.size(0)\n nb_eval_steps += 1\n\n eval_loss = eval_loss / nb_eval_steps\n eval_accuracy = eval_accuracy / nb_eval_examples\n train_loss = tr_loss / nb_tr_steps if args.do_train else None\n result = {\"eval_loss\": eval_loss, \"eval_accuracy\": eval_accuracy, \"train_loss\": train_loss}\n\n output_eval_file = os.path.join(args.output_dir, \"eval_results.txt\")\n with open(output_eval_file, \"w\") as writer:\n logger.info(\"***** Eval results *****\")\n for key in sorted(result.keys()):\n logger.info(\" %s = %s\", key, str(result[key]))\n writer.write(\"%s = %s\\n\" % (key, str(result[key])))\n\n\nif __name__ == \"__main__\":\n main()\n", "# coding=utf-8\n# Copyright 2018 Microsoft Authors and the HuggingFace Inc. team.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport unittest\n\nfrom transformers import is_torch_available\nfrom transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device\n\nfrom .test_configuration_common import ConfigTester\nfrom .test_modeling_common import ModelTesterMixin, ids_tensor\n\n\nif is_torch_available():\n import torch\n\n from transformers import (\n DebertaConfig,\n DebertaForMaskedLM,\n DebertaForQuestionAnswering,\n DebertaForSequenceClassification,\n DebertaForTokenClassification,\n DebertaModel,\n )\n from transformers.models.deberta.modeling_deberta import DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST\n\n\n@require_torch\nclass DebertaModelTest(ModelTesterMixin, unittest.TestCase):\n\n all_model_classes = (\n (\n DebertaModel,\n DebertaForMaskedLM,\n DebertaForSequenceClassification,\n DebertaForTokenClassification,\n DebertaForQuestionAnswering,\n )\n if is_torch_available()\n else ()\n )\n\n test_torchscript = False\n test_pruning = False\n test_head_masking = False\n is_encoder_decoder = False\n\n class DebertaModelTester(object):\n def __init__(\n self,\n parent,\n batch_size=13,\n seq_length=7,\n is_training=True,\n use_input_mask=True,\n use_token_type_ids=True,\n use_labels=True,\n vocab_size=99,\n hidden_size=32,\n num_hidden_layers=5,\n num_attention_heads=4,\n intermediate_size=37,\n hidden_act=\"gelu\",\n hidden_dropout_prob=0.1,\n attention_probs_dropout_prob=0.1,\n max_position_embeddings=512,\n type_vocab_size=16,\n type_sequence_label_size=2,\n initializer_range=0.02,\n relative_attention=False,\n position_biased_input=True,\n pos_att_type=\"None\",\n num_labels=3,\n num_choices=4,\n scope=None,\n ):\n self.parent = parent\n self.batch_size = batch_size\n self.seq_length = seq_length\n self.is_training = is_training\n self.use_input_mask = use_input_mask\n self.use_token_type_ids = use_token_type_ids\n self.use_labels = use_labels\n self.vocab_size = vocab_size\n self.hidden_size = hidden_size\n self.num_hidden_layers = num_hidden_layers\n self.num_attention_heads = num_attention_heads\n self.intermediate_size = intermediate_size\n self.hidden_act = hidden_act\n self.hidden_dropout_prob = hidden_dropout_prob\n self.attention_probs_dropout_prob = attention_probs_dropout_prob\n self.max_position_embeddings = max_position_embeddings\n self.type_vocab_size = type_vocab_size\n self.type_sequence_label_size = type_sequence_label_size\n self.initializer_range = initializer_range\n self.num_labels = num_labels\n self.num_choices = num_choices\n self.relative_attention = relative_attention\n self.position_biased_input = position_biased_input\n self.pos_att_type = pos_att_type\n self.scope = scope\n\n def prepare_config_and_inputs(self):\n input_ids = ids_tensor([self.batch_size, self.seq_length], self.vocab_size)\n\n input_mask = None\n if self.use_input_mask:\n input_mask = ids_tensor([self.batch_size, self.seq_length], vocab_size=2)\n\n token_type_ids = None\n if self.use_token_type_ids:\n token_type_ids = ids_tensor([self.batch_size, self.seq_length], self.type_vocab_size)\n\n sequence_labels = None\n token_labels = None\n choice_labels = None\n if self.use_labels:\n sequence_labels = ids_tensor([self.batch_size], self.type_sequence_label_size)\n token_labels = ids_tensor([self.batch_size, self.seq_length], self.num_labels)\n choice_labels = ids_tensor([self.batch_size], self.num_choices)\n\n config = DebertaConfig(\n vocab_size=self.vocab_size,\n hidden_size=self.hidden_size,\n num_hidden_layers=self.num_hidden_layers,\n num_attention_heads=self.num_attention_heads,\n intermediate_size=self.intermediate_size,\n hidden_act=self.hidden_act,\n hidden_dropout_prob=self.hidden_dropout_prob,\n attention_probs_dropout_prob=self.attention_probs_dropout_prob,\n max_position_embeddings=self.max_position_embeddings,\n type_vocab_size=self.type_vocab_size,\n initializer_range=self.initializer_range,\n relative_attention=self.relative_attention,\n position_biased_input=self.position_biased_input,\n pos_att_type=self.pos_att_type,\n )\n\n return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels\n\n def check_loss_output(self, result):\n self.parent.assertListEqual(list(result.loss.size()), [])\n\n def create_and_check_deberta_model(\n self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels\n ):\n model = DebertaModel(config=config)\n model.to(torch_device)\n model.eval()\n sequence_output = model(input_ids, attention_mask=input_mask, token_type_ids=token_type_ids)[0]\n sequence_output = model(input_ids, token_type_ids=token_type_ids)[0]\n sequence_output = model(input_ids)[0]\n\n self.parent.assertListEqual(\n list(sequence_output.size()), [self.batch_size, self.seq_length, self.hidden_size]\n )\n\n def create_and_check_deberta_for_masked_lm(\n self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels\n ):\n model = DebertaForMaskedLM(config=config)\n model.to(torch_device)\n model.eval()\n result = model(input_ids, attention_mask=input_mask, token_type_ids=token_type_ids, labels=token_labels)\n\n self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.vocab_size))\n\n def create_and_check_deberta_for_sequence_classification(\n self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels\n ):\n config.num_labels = self.num_labels\n model = DebertaForSequenceClassification(config)\n model.to(torch_device)\n model.eval()\n result = model(input_ids, attention_mask=input_mask, token_type_ids=token_type_ids, labels=sequence_labels)\n self.parent.assertListEqual(list(result.logits.size()), [self.batch_size, self.num_labels])\n self.check_loss_output(result)\n\n def create_and_check_deberta_for_token_classification(\n self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels\n ):\n config.num_labels = self.num_labels\n model = DebertaForTokenClassification(config=config)\n model.to(torch_device)\n model.eval()\n result = model(input_ids, attention_mask=input_mask, token_type_ids=token_type_ids, labels=token_labels)\n self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.num_labels))\n\n def create_and_check_deberta_for_question_answering(\n self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels\n ):\n model = DebertaForQuestionAnswering(config=config)\n model.to(torch_device)\n model.eval()\n result = model(\n input_ids,\n attention_mask=input_mask,\n token_type_ids=token_type_ids,\n start_positions=sequence_labels,\n end_positions=sequence_labels,\n )\n self.parent.assertEqual(result.start_logits.shape, (self.batch_size, self.seq_length))\n self.parent.assertEqual(result.end_logits.shape, (self.batch_size, self.seq_length))\n\n def prepare_config_and_inputs_for_common(self):\n config_and_inputs = self.prepare_config_and_inputs()\n (\n config,\n input_ids,\n token_type_ids,\n input_mask,\n sequence_labels,\n token_labels,\n choice_labels,\n ) = config_and_inputs\n inputs_dict = {\"input_ids\": input_ids, \"token_type_ids\": token_type_ids, \"attention_mask\": input_mask}\n return config, inputs_dict\n\n def setUp(self):\n self.model_tester = DebertaModelTest.DebertaModelTester(self)\n self.config_tester = ConfigTester(self, config_class=DebertaConfig, hidden_size=37)\n\n def test_config(self):\n self.config_tester.run_common_tests()\n\n def test_deberta_model(self):\n config_and_inputs = self.model_tester.prepare_config_and_inputs()\n self.model_tester.create_and_check_deberta_model(*config_and_inputs)\n\n def test_for_sequence_classification(self):\n config_and_inputs = self.model_tester.prepare_config_and_inputs()\n self.model_tester.create_and_check_deberta_for_sequence_classification(*config_and_inputs)\n\n def test_for_masked_lm(self):\n config_and_inputs = self.model_tester.prepare_config_and_inputs()\n self.model_tester.create_and_check_deberta_for_masked_lm(*config_and_inputs)\n\n def test_for_question_answering(self):\n config_and_inputs = self.model_tester.prepare_config_and_inputs()\n self.model_tester.create_and_check_deberta_for_question_answering(*config_and_inputs)\n\n def test_for_token_classification(self):\n config_and_inputs = self.model_tester.prepare_config_and_inputs()\n self.model_tester.create_and_check_deberta_for_token_classification(*config_and_inputs)\n\n @slow\n def test_model_from_pretrained(self):\n for model_name in DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:\n model = DebertaModel.from_pretrained(model_name)\n self.assertIsNotNone(model)\n\n\n@require_torch\n@require_sentencepiece\n@require_tokenizers\nclass DebertaModelIntegrationTest(unittest.TestCase):\n @unittest.skip(reason=\"Model not available yet\")\n def test_inference_masked_lm(self):\n pass\n\n @slow\n def test_inference_no_head(self):\n model = DebertaModel.from_pretrained(\"microsoft/deberta-base\")\n\n input_ids = torch.tensor([[0, 31414, 232, 328, 740, 1140, 12695, 69, 46078, 1588, 2]])\n attention_mask = torch.tensor([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]])\n output = model(input_ids, attention_mask=attention_mask)[0]\n # compare the actual values for a slice.\n expected_slice = torch.tensor(\n [[[-0.5986, -0.8055, -0.8462], [1.4484, -0.9348, -0.8059], [0.3123, 0.0032, -1.4131]]]\n )\n self.assertTrue(torch.allclose(output[:, 1:4, 1:4], expected_slice, atol=1e-4), f\"{output[:, 1:4, 1:4]}\")\n", "# coding=utf-8\n# Copyright 2018 The Google AI Language Team Authors and The HuggingFace Inc. team.\n# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\" This is the exact same script as `examples/question-answering/run_squad.py` (as of 2020, January 8th) with an additional and optional step of distillation.\"\"\"\n\nimport argparse\nimport glob\nimport logging\nimport os\nimport random\nimport timeit\n\nimport numpy as np\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nfrom torch.utils.data import DataLoader, RandomSampler, SequentialSampler\nfrom torch.utils.data.distributed import DistributedSampler\nfrom tqdm import tqdm, trange\n\nimport transformers\nfrom transformers import (\n WEIGHTS_NAME,\n AdamW,\n BertConfig,\n BertForQuestionAnswering,\n BertTokenizer,\n DistilBertConfig,\n DistilBertForQuestionAnswering,\n DistilBertTokenizer,\n RobertaConfig,\n RobertaForQuestionAnswering,\n RobertaTokenizer,\n XLMConfig,\n XLMForQuestionAnswering,\n XLMTokenizer,\n XLNetConfig,\n XLNetForQuestionAnswering,\n XLNetTokenizer,\n get_linear_schedule_with_warmup,\n squad_convert_examples_to_features,\n)\nfrom transformers.data.metrics.squad_metrics import (\n compute_predictions_log_probs,\n compute_predictions_logits,\n squad_evaluate,\n)\nfrom transformers.data.processors.squad import SquadResult, SquadV1Processor, SquadV2Processor\nfrom transformers.trainer_utils import is_main_process\n\n\ntry:\n from torch.utils.tensorboard import SummaryWriter\nexcept ImportError:\n from tensorboardX import SummaryWriter\n\n\nlogger = logging.getLogger(__name__)\n\n\nMODEL_CLASSES = {\n \"bert\": (BertConfig, BertForQuestionAnswering, BertTokenizer),\n \"xlnet\": (XLNetConfig, XLNetForQuestionAnswering, XLNetTokenizer),\n \"xlm\": (XLMConfig, XLMForQuestionAnswering, XLMTokenizer),\n \"distilbert\": (DistilBertConfig, DistilBertForQuestionAnswering, DistilBertTokenizer),\n \"roberta\": (RobertaConfig, RobertaForQuestionAnswering, RobertaTokenizer),\n}\n\n\ndef set_seed(args):\n random.seed(args.seed)\n np.random.seed(args.seed)\n torch.manual_seed(args.seed)\n if args.n_gpu > 0:\n torch.cuda.manual_seed_all(args.seed)\n\n\ndef to_list(tensor):\n return tensor.detach().cpu().tolist()\n\n\ndef train(args, train_dataset, model, tokenizer, teacher=None):\n \"\"\"Train the model\"\"\"\n if args.local_rank in [-1, 0]:\n tb_writer = SummaryWriter()\n\n args.train_batch_size = args.per_gpu_train_batch_size * max(1, args.n_gpu)\n train_sampler = RandomSampler(train_dataset) if args.local_rank == -1 else DistributedSampler(train_dataset)\n train_dataloader = DataLoader(train_dataset, sampler=train_sampler, batch_size=args.train_batch_size)\n\n if args.max_steps > 0:\n t_total = args.max_steps\n args.num_train_epochs = args.max_steps // (len(train_dataloader) // args.gradient_accumulation_steps) + 1\n else:\n t_total = len(train_dataloader) // args.gradient_accumulation_steps * args.num_train_epochs\n\n # Prepare optimizer and schedule (linear warmup and decay)\n no_decay = [\"bias\", \"LayerNorm.weight\"]\n optimizer_grouped_parameters = [\n {\n \"params\": [p for n, p in model.named_parameters() if not any(nd in n for nd in no_decay)],\n \"weight_decay\": args.weight_decay,\n },\n {\"params\": [p for n, p in model.named_parameters() if any(nd in n for nd in no_decay)], \"weight_decay\": 0.0},\n ]\n optimizer = AdamW(optimizer_grouped_parameters, lr=args.learning_rate, eps=args.adam_epsilon)\n scheduler = get_linear_schedule_with_warmup(\n optimizer, num_warmup_steps=args.warmup_steps, num_training_steps=t_total\n )\n\n # Check if saved optimizer or scheduler states exist\n if os.path.isfile(os.path.join(args.model_name_or_path, \"optimizer.pt\")) and os.path.isfile(\n os.path.join(args.model_name_or_path, \"scheduler.pt\")\n ):\n # Load in optimizer and scheduler states\n optimizer.load_state_dict(torch.load(os.path.join(args.model_name_or_path, \"optimizer.pt\")))\n scheduler.load_state_dict(torch.load(os.path.join(args.model_name_or_path, \"scheduler.pt\")))\n\n if args.fp16:\n try:\n from apex import amp\n except ImportError:\n raise ImportError(\"Please install apex from https://www.github.com/nvidia/apex to use fp16 training.\")\n\n model, optimizer = amp.initialize(model, optimizer, opt_level=args.fp16_opt_level)\n\n # multi-gpu training (should be after apex fp16 initialization)\n if args.n_gpu > 1:\n model = torch.nn.DataParallel(model)\n\n # Distributed training (should be after apex fp16 initialization)\n if args.local_rank != -1:\n model = torch.nn.parallel.DistributedDataParallel(\n model, device_ids=[args.local_rank], output_device=args.local_rank, find_unused_parameters=True\n )\n\n # Train!\n logger.info(\"***** Running training *****\")\n logger.info(\" Num examples = %d\", len(train_dataset))\n logger.info(\" Num Epochs = %d\", args.num_train_epochs)\n logger.info(\" Instantaneous batch size per GPU = %d\", args.per_gpu_train_batch_size)\n logger.info(\n \" Total train batch size (w. parallel, distributed & accumulation) = %d\",\n args.train_batch_size\n * args.gradient_accumulation_steps\n * (torch.distributed.get_world_size() if args.local_rank != -1 else 1),\n )\n logger.info(\" Gradient Accumulation steps = %d\", args.gradient_accumulation_steps)\n logger.info(\" Total optimization steps = %d\", t_total)\n\n global_step = 1\n epochs_trained = 0\n steps_trained_in_current_epoch = 0\n # Check if continuing training from a checkpoint\n if os.path.exists(args.model_name_or_path):\n try:\n # set global_step to gobal_step of last saved checkpoint from model path\n checkpoint_suffix = args.model_name_or_path.split(\"-\")[-1].split(\"/\")[0]\n global_step = int(checkpoint_suffix)\n epochs_trained = global_step // (len(train_dataloader) // args.gradient_accumulation_steps)\n steps_trained_in_current_epoch = global_step % (len(train_dataloader) // args.gradient_accumulation_steps)\n\n logger.info(\" Continuing training from checkpoint, will skip to saved global_step\")\n logger.info(\" Continuing training from epoch %d\", epochs_trained)\n logger.info(\" Continuing training from global step %d\", global_step)\n logger.info(\" Will skip the first %d steps in the first epoch\", steps_trained_in_current_epoch)\n except ValueError:\n logger.info(\" Starting fine-tuning.\")\n\n tr_loss, logging_loss = 0.0, 0.0\n model.zero_grad()\n train_iterator = trange(\n epochs_trained, int(args.num_train_epochs), desc=\"Epoch\", disable=args.local_rank not in [-1, 0]\n )\n # Added here for reproductibility\n set_seed(args)\n\n for _ in train_iterator:\n epoch_iterator = tqdm(train_dataloader, desc=\"Iteration\", disable=args.local_rank not in [-1, 0])\n for step, batch in enumerate(epoch_iterator):\n\n # Skip past any already trained steps if resuming training\n if steps_trained_in_current_epoch > 0:\n steps_trained_in_current_epoch -= 1\n continue\n\n model.train()\n if teacher is not None:\n teacher.eval()\n batch = tuple(t.to(args.device) for t in batch)\n\n inputs = {\n \"input_ids\": batch[0],\n \"attention_mask\": batch[1],\n \"start_positions\": batch[3],\n \"end_positions\": batch[4],\n }\n if args.model_type != \"distilbert\":\n inputs[\"token_type_ids\"] = None if args.model_type == \"xlm\" else batch[2]\n if args.model_type in [\"xlnet\", \"xlm\"]:\n inputs.update({\"cls_index\": batch[5], \"p_mask\": batch[6]})\n if args.version_2_with_negative:\n inputs.update({\"is_impossible\": batch[7]})\n outputs = model(**inputs)\n loss, start_logits_stu, end_logits_stu = outputs\n\n # Distillation loss\n if teacher is not None:\n if \"token_type_ids\" not in inputs:\n inputs[\"token_type_ids\"] = None if args.teacher_type == \"xlm\" else batch[2]\n with torch.no_grad():\n start_logits_tea, end_logits_tea = teacher(\n input_ids=inputs[\"input_ids\"],\n token_type_ids=inputs[\"token_type_ids\"],\n attention_mask=inputs[\"attention_mask\"],\n )\n assert start_logits_tea.size() == start_logits_stu.size()\n assert end_logits_tea.size() == end_logits_stu.size()\n\n loss_fct = nn.KLDivLoss(reduction=\"batchmean\")\n loss_start = (\n loss_fct(\n F.log_softmax(start_logits_stu / args.temperature, dim=-1),\n F.softmax(start_logits_tea / args.temperature, dim=-1),\n )\n * (args.temperature ** 2)\n )\n loss_end = (\n loss_fct(\n F.log_softmax(end_logits_stu / args.temperature, dim=-1),\n F.softmax(end_logits_tea / args.temperature, dim=-1),\n )\n * (args.temperature ** 2)\n )\n loss_ce = (loss_start + loss_end) / 2.0\n\n loss = args.alpha_ce * loss_ce + args.alpha_squad * loss\n\n if args.n_gpu > 1:\n loss = loss.mean() # mean() to average on multi-gpu parallel (not distributed) training\n if args.gradient_accumulation_steps > 1:\n loss = loss / args.gradient_accumulation_steps\n\n if args.fp16:\n with amp.scale_loss(loss, optimizer) as scaled_loss:\n scaled_loss.backward()\n else:\n loss.backward()\n\n tr_loss += loss.item()\n if (step + 1) % args.gradient_accumulation_steps == 0:\n if args.fp16:\n torch.nn.utils.clip_grad_norm_(amp.master_params(optimizer), args.max_grad_norm)\n else:\n torch.nn.utils.clip_grad_norm_(model.parameters(), args.max_grad_norm)\n\n optimizer.step()\n scheduler.step() # Update learning rate schedule\n model.zero_grad()\n global_step += 1\n\n # Log metrics\n if args.local_rank in [-1, 0] and args.logging_steps > 0 and global_step % args.logging_steps == 0:\n # Only evaluate when single GPU otherwise metrics may not average well\n if args.local_rank == -1 and args.evaluate_during_training:\n results = evaluate(args, model, tokenizer)\n for key, value in results.items():\n tb_writer.add_scalar(\"eval_{}\".format(key), value, global_step)\n tb_writer.add_scalar(\"lr\", scheduler.get_lr()[0], global_step)\n tb_writer.add_scalar(\"loss\", (tr_loss - logging_loss) / args.logging_steps, global_step)\n logging_loss = tr_loss\n\n if args.local_rank in [-1, 0] and args.save_steps > 0 and global_step % args.save_steps == 0:\n # Save model checkpoint\n output_dir = os.path.join(args.output_dir, \"checkpoint-{}\".format(global_step))\n if not os.path.exists(output_dir):\n os.makedirs(output_dir)\n model_to_save = (\n model.module if hasattr(model, \"module\") else model\n ) # Take care of distributed/parallel training\n model_to_save.save_pretrained(output_dir)\n tokenizer.save_pretrained(output_dir)\n\n torch.save(args, os.path.join(output_dir, \"training_args.bin\"))\n logger.info(\"Saving model checkpoint to %s\", output_dir)\n\n torch.save(optimizer.state_dict(), os.path.join(output_dir, \"optimizer.pt\"))\n torch.save(scheduler.state_dict(), os.path.join(output_dir, \"scheduler.pt\"))\n logger.info(\"Saving optimizer and scheduler states to %s\", output_dir)\n\n if args.max_steps > 0 and global_step > args.max_steps:\n epoch_iterator.close()\n break\n if args.max_steps > 0 and global_step > args.max_steps:\n train_iterator.close()\n break\n\n if args.local_rank in [-1, 0]:\n tb_writer.close()\n\n return global_step, tr_loss / global_step\n\n\ndef evaluate(args, model, tokenizer, prefix=\"\"):\n dataset, examples, features = load_and_cache_examples(args, tokenizer, evaluate=True, output_examples=True)\n\n if not os.path.exists(args.output_dir) and args.local_rank in [-1, 0]:\n os.makedirs(args.output_dir)\n\n args.eval_batch_size = args.per_gpu_eval_batch_size * max(1, args.n_gpu)\n\n # Note that DistributedSampler samples randomly\n eval_sampler = SequentialSampler(dataset)\n eval_dataloader = DataLoader(dataset, sampler=eval_sampler, batch_size=args.eval_batch_size)\n\n # multi-gpu evaluate\n if args.n_gpu > 1 and not isinstance(model, torch.nn.DataParallel):\n model = torch.nn.DataParallel(model)\n\n # Eval!\n logger.info(\"***** Running evaluation {} *****\".format(prefix))\n logger.info(\" Num examples = %d\", len(dataset))\n logger.info(\" Batch size = %d\", args.eval_batch_size)\n\n all_results = []\n start_time = timeit.default_timer()\n\n for batch in tqdm(eval_dataloader, desc=\"Evaluating\"):\n model.eval()\n batch = tuple(t.to(args.device) for t in batch)\n\n with torch.no_grad():\n inputs = {\"input_ids\": batch[0], \"attention_mask\": batch[1]}\n if args.model_type != \"distilbert\":\n inputs[\"token_type_ids\"] = None if args.model_type == \"xlm\" else batch[2] # XLM don't use segment_ids\n example_indices = batch[3]\n if args.model_type in [\"xlnet\", \"xlm\"]:\n inputs.update({\"cls_index\": batch[4], \"p_mask\": batch[5]})\n\n outputs = model(**inputs)\n\n for i, example_index in enumerate(example_indices):\n eval_feature = features[example_index.item()]\n unique_id = int(eval_feature.unique_id)\n\n output = [to_list(output[i]) for output in outputs]\n\n # Some models (XLNet, XLM) use 5 arguments for their predictions, while the other \"simpler\"\n # models only use two.\n if len(output) >= 5:\n start_logits = output[0]\n start_top_index = output[1]\n end_logits = output[2]\n end_top_index = output[3]\n cls_logits = output[4]\n\n result = SquadResult(\n unique_id,\n start_logits,\n end_logits,\n start_top_index=start_top_index,\n end_top_index=end_top_index,\n cls_logits=cls_logits,\n )\n\n else:\n start_logits, end_logits = output\n result = SquadResult(unique_id, start_logits, end_logits)\n\n all_results.append(result)\n\n evalTime = timeit.default_timer() - start_time\n logger.info(\" Evaluation done in total %f secs (%f sec per example)\", evalTime, evalTime / len(dataset))\n\n # Compute predictions\n output_prediction_file = os.path.join(args.output_dir, \"predictions_{}.json\".format(prefix))\n output_nbest_file = os.path.join(args.output_dir, \"nbest_predictions_{}.json\".format(prefix))\n\n if args.version_2_with_negative:\n output_null_log_odds_file = os.path.join(args.output_dir, \"null_odds_{}.json\".format(prefix))\n else:\n output_null_log_odds_file = None\n\n if args.model_type in [\"xlnet\", \"xlm\"]:\n # XLNet uses a more complex post-processing procedure\n predictions = compute_predictions_log_probs(\n examples,\n features,\n all_results,\n args.n_best_size,\n args.max_answer_length,\n output_prediction_file,\n output_nbest_file,\n output_null_log_odds_file,\n model.config.start_n_top,\n model.config.end_n_top,\n args.version_2_with_negative,\n tokenizer,\n args.verbose_logging,\n )\n else:\n predictions = compute_predictions_logits(\n examples,\n features,\n all_results,\n args.n_best_size,\n args.max_answer_length,\n args.do_lower_case,\n output_prediction_file,\n output_nbest_file,\n output_null_log_odds_file,\n args.verbose_logging,\n args.version_2_with_negative,\n args.null_score_diff_threshold,\n tokenizer,\n )\n\n # Compute the F1 and exact scores.\n results = squad_evaluate(examples, predictions)\n return results\n\n\ndef load_and_cache_examples(args, tokenizer, evaluate=False, output_examples=False):\n if args.local_rank not in [-1, 0] and not evaluate:\n # Make sure only the first process in distributed training process the dataset, and the others will use the cache\n torch.distributed.barrier()\n\n # Load data features from cache or dataset file\n input_file = args.predict_file if evaluate else args.train_file\n cached_features_file = os.path.join(\n os.path.dirname(input_file),\n \"cached_distillation_{}_{}_{}\".format(\n \"dev\" if evaluate else \"train\",\n list(filter(None, args.model_name_or_path.split(\"/\"))).pop(),\n str(args.max_seq_length),\n ),\n )\n if os.path.exists(cached_features_file) and not args.overwrite_cache:\n logger.info(\"Loading features from cached file %s\", cached_features_file)\n features_and_dataset = torch.load(cached_features_file)\n\n try:\n features, dataset, examples = (\n features_and_dataset[\"features\"],\n features_and_dataset[\"dataset\"],\n features_and_dataset[\"examples\"],\n )\n except KeyError:\n raise DeprecationWarning(\n \"You seem to be loading features from an older version of this script please delete the \"\n \"file %s in order for it to be created again\" % cached_features_file\n )\n else:\n logger.info(\"Creating features from dataset file at %s\", input_file)\n processor = SquadV2Processor() if args.version_2_with_negative else SquadV1Processor()\n if evaluate:\n examples = processor.get_dev_examples(args.data_dir, filename=args.predict_file)\n else:\n examples = processor.get_train_examples(args.data_dir, filename=args.train_file)\n\n features, dataset = squad_convert_examples_to_features(\n examples=examples,\n tokenizer=tokenizer,\n max_seq_length=args.max_seq_length,\n doc_stride=args.doc_stride,\n max_query_length=args.max_query_length,\n is_training=not evaluate,\n return_dataset=\"pt\",\n threads=args.threads,\n )\n\n if args.local_rank in [-1, 0]:\n logger.info(\"Saving features into cached file %s\", cached_features_file)\n torch.save({\"features\": features, \"dataset\": dataset, \"examples\": examples}, cached_features_file)\n\n if args.local_rank == 0 and not evaluate:\n # Make sure only the first process in distributed training process the dataset, and the others will use the cache\n torch.distributed.barrier()\n\n if output_examples:\n return dataset, examples, features\n return dataset\n\n\ndef main():\n parser = argparse.ArgumentParser()\n\n # Required parameters\n parser.add_argument(\n \"--model_type\",\n default=None,\n type=str,\n required=True,\n help=\"Model type selected in the list: \" + \", \".join(MODEL_CLASSES.keys()),\n )\n parser.add_argument(\n \"--model_name_or_path\",\n default=None,\n type=str,\n required=True,\n help=\"Path to pretrained model or model identifier from huggingface.co/models\",\n )\n parser.add_argument(\n \"--output_dir\",\n default=None,\n type=str,\n required=True,\n help=\"The output directory where the model checkpoints and predictions will be written.\",\n )\n\n # Distillation parameters (optional)\n parser.add_argument(\n \"--teacher_type\",\n default=None,\n type=str,\n help=\"Teacher type. Teacher tokenizer and student (model) tokenizer must output the same tokenization. Only for distillation.\",\n )\n parser.add_argument(\n \"--teacher_name_or_path\",\n default=None,\n type=str,\n help=\"Path to the already SQuAD fine-tuned teacher model. Only for distillation.\",\n )\n parser.add_argument(\n \"--alpha_ce\", default=0.5, type=float, help=\"Distillation loss linear weight. Only for distillation.\"\n )\n parser.add_argument(\n \"--alpha_squad\", default=0.5, type=float, help=\"True SQuAD loss linear weight. Only for distillation.\"\n )\n parser.add_argument(\n \"--temperature\", default=2.0, type=float, help=\"Distillation temperature. Only for distillation.\"\n )\n\n # Other parameters\n parser.add_argument(\n \"--data_dir\",\n default=None,\n type=str,\n help=\"The input data dir. Should contain the .json files for the task.\"\n + \"If no data dir or train/predict files are specified, will run with tensorflow_datasets.\",\n )\n parser.add_argument(\n \"--train_file\",\n default=None,\n type=str,\n help=\"The input training file. If a data dir is specified, will look for the file there\"\n + \"If no data dir or train/predict files are specified, will run with tensorflow_datasets.\",\n )\n parser.add_argument(\n \"--predict_file\",\n default=None,\n type=str,\n help=\"The input evaluation file. If a data dir is specified, will look for the file there\"\n + \"If no data dir or train/predict files are specified, will run with tensorflow_datasets.\",\n )\n parser.add_argument(\n \"--config_name\", default=\"\", type=str, help=\"Pretrained config name or path if not the same as model_name\"\n )\n parser.add_argument(\n \"--tokenizer_name\",\n default=\"\",\n type=str,\n help=\"Pretrained tokenizer name or path if not the same as model_name\",\n )\n parser.add_argument(\n \"--cache_dir\",\n default=\"\",\n type=str,\n help=\"Where do you want to store the pre-trained models downloaded from huggingface.co\",\n )\n\n parser.add_argument(\n \"--version_2_with_negative\",\n action=\"store_true\",\n help=\"If true, the SQuAD examples contain some that do not have an answer.\",\n )\n parser.add_argument(\n \"--null_score_diff_threshold\",\n type=float,\n default=0.0,\n help=\"If null_score - best_non_null is greater than the threshold predict null.\",\n )\n\n parser.add_argument(\n \"--max_seq_length\",\n default=384,\n type=int,\n help=\"The maximum total input sequence length after WordPiece tokenization. Sequences \"\n \"longer than this will be truncated, and sequences shorter than this will be padded.\",\n )\n parser.add_argument(\n \"--doc_stride\",\n default=128,\n type=int,\n help=\"When splitting up a long document into chunks, how much stride to take between chunks.\",\n )\n parser.add_argument(\n \"--max_query_length\",\n default=64,\n type=int,\n help=\"The maximum number of tokens for the question. Questions longer than this will \"\n \"be truncated to this length.\",\n )\n parser.add_argument(\"--do_train\", action=\"store_true\", help=\"Whether to run training.\")\n parser.add_argument(\"--do_eval\", action=\"store_true\", help=\"Whether to run eval on the dev set.\")\n parser.add_argument(\n \"--evaluate_during_training\", action=\"store_true\", help=\"Rul evaluation during training at each logging step.\"\n )\n parser.add_argument(\n \"--do_lower_case\", action=\"store_true\", help=\"Set this flag if you are using an uncased model.\"\n )\n\n parser.add_argument(\"--per_gpu_train_batch_size\", default=8, type=int, help=\"Batch size per GPU/CPU for training.\")\n parser.add_argument(\n \"--per_gpu_eval_batch_size\", default=8, type=int, help=\"Batch size per GPU/CPU for evaluation.\"\n )\n parser.add_argument(\"--learning_rate\", default=5e-5, type=float, help=\"The initial learning rate for Adam.\")\n parser.add_argument(\n \"--gradient_accumulation_steps\",\n type=int,\n default=1,\n help=\"Number of updates steps to accumulate before performing a backward/update pass.\",\n )\n parser.add_argument(\"--weight_decay\", default=0.0, type=float, help=\"Weight decay if we apply some.\")\n parser.add_argument(\"--adam_epsilon\", default=1e-8, type=float, help=\"Epsilon for Adam optimizer.\")\n parser.add_argument(\"--max_grad_norm\", default=1.0, type=float, help=\"Max gradient norm.\")\n parser.add_argument(\n \"--num_train_epochs\", default=3.0, type=float, help=\"Total number of training epochs to perform.\"\n )\n parser.add_argument(\n \"--max_steps\",\n default=-1,\n type=int,\n help=\"If > 0: set total number of training steps to perform. Override num_train_epochs.\",\n )\n parser.add_argument(\"--warmup_steps\", default=0, type=int, help=\"Linear warmup over warmup_steps.\")\n parser.add_argument(\n \"--n_best_size\",\n default=20,\n type=int,\n help=\"The total number of n-best predictions to generate in the nbest_predictions.json output file.\",\n )\n parser.add_argument(\n \"--max_answer_length\",\n default=30,\n type=int,\n help=\"The maximum length of an answer that can be generated. This is needed because the start \"\n \"and end predictions are not conditioned on one another.\",\n )\n parser.add_argument(\n \"--verbose_logging\",\n action=\"store_true\",\n help=\"If true, all of the warnings related to data processing will be printed. \"\n \"A number of warnings are expected for a normal SQuAD evaluation.\",\n )\n\n parser.add_argument(\"--logging_steps\", type=int, default=50, help=\"Log every X updates steps.\")\n parser.add_argument(\"--save_steps\", type=int, default=50, help=\"Save checkpoint every X updates steps.\")\n parser.add_argument(\n \"--eval_all_checkpoints\",\n action=\"store_true\",\n help=\"Evaluate all checkpoints starting with the same prefix as model_name ending and ending with step number\",\n )\n parser.add_argument(\"--no_cuda\", action=\"store_true\", help=\"Whether not to use CUDA when available\")\n parser.add_argument(\n \"--overwrite_output_dir\", action=\"store_true\", help=\"Overwrite the content of the output directory\"\n )\n parser.add_argument(\n \"--overwrite_cache\", action=\"store_true\", help=\"Overwrite the cached training and evaluation sets\"\n )\n parser.add_argument(\"--seed\", type=int, default=42, help=\"random seed for initialization\")\n\n parser.add_argument(\"--local_rank\", type=int, default=-1, help=\"local_rank for distributed training on gpus\")\n parser.add_argument(\n \"--fp16\",\n action=\"store_true\",\n help=\"Whether to use 16-bit (mixed) precision (through NVIDIA apex) instead of 32-bit\",\n )\n parser.add_argument(\n \"--fp16_opt_level\",\n type=str,\n default=\"O1\",\n help=\"For fp16: Apex AMP optimization level selected in ['O0', 'O1', 'O2', and 'O3'].\"\n \"See details at https://nvidia.github.io/apex/amp.html\",\n )\n parser.add_argument(\"--server_ip\", type=str, default=\"\", help=\"Can be used for distant debugging.\")\n parser.add_argument(\"--server_port\", type=str, default=\"\", help=\"Can be used for distant debugging.\")\n\n parser.add_argument(\"--threads\", type=int, default=1, help=\"multiple threads for converting example to features\")\n args = parser.parse_args()\n\n if (\n os.path.exists(args.output_dir)\n and os.listdir(args.output_dir)\n and args.do_train\n and not args.overwrite_output_dir\n ):\n raise ValueError(\n \"Output directory ({}) already exists and is not empty. Use --overwrite_output_dir to overcome.\".format(\n args.output_dir\n )\n )\n\n # Setup distant debugging if needed\n if args.server_ip and args.server_port:\n # Distant debugging - see https://code.visualstudio.com/docs/python/debugging#_attach-to-a-local-script\n import ptvsd\n\n print(\"Waiting for debugger attach\")\n ptvsd.enable_attach(address=(args.server_ip, args.server_port), redirect_output=True)\n ptvsd.wait_for_attach()\n\n # Setup CUDA, GPU & distributed training\n if args.local_rank == -1 or args.no_cuda:\n device = torch.device(\"cuda\" if torch.cuda.is_available() and not args.no_cuda else \"cpu\")\n args.n_gpu = 0 if args.no_cuda else torch.cuda.device_count()\n else: # Initializes the distributed backend which will take care of sychronizing nodes/GPUs\n torch.cuda.set_device(args.local_rank)\n device = torch.device(\"cuda\", args.local_rank)\n torch.distributed.init_process_group(backend=\"nccl\")\n args.n_gpu = 1\n args.device = device\n\n # Setup logging\n logging.basicConfig(\n format=\"%(asctime)s - %(levelname)s - %(name)s - %(message)s\",\n datefmt=\"%m/%d/%Y %H:%M:%S\",\n level=logging.INFO if args.local_rank in [-1, 0] else logging.WARN,\n )\n logger.warning(\n \"Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s\",\n args.local_rank,\n device,\n args.n_gpu,\n bool(args.local_rank != -1),\n args.fp16,\n )\n # Set the verbosity to info of the Transformers logger (on main process only):\n if is_main_process(args.local_rank):\n transformers.utils.logging.set_verbosity_info()\n transformers.utils.logging.enable_default_handler()\n transformers.utils.logging.enable_explicit_format()\n # Set seed\n set_seed(args)\n\n # Load pretrained model and tokenizer\n if args.local_rank not in [-1, 0]:\n # Make sure only the first process in distributed training will download model & vocab\n torch.distributed.barrier()\n\n args.model_type = args.model_type.lower()\n config_class, model_class, tokenizer_class = MODEL_CLASSES[args.model_type]\n config = config_class.from_pretrained(\n args.config_name if args.config_name else args.model_name_or_path,\n cache_dir=args.cache_dir if args.cache_dir else None,\n )\n tokenizer = tokenizer_class.from_pretrained(\n args.tokenizer_name if args.tokenizer_name else args.model_name_or_path,\n do_lower_case=args.do_lower_case,\n cache_dir=args.cache_dir if args.cache_dir else None,\n )\n model = model_class.from_pretrained(\n args.model_name_or_path,\n from_tf=bool(\".ckpt\" in args.model_name_or_path),\n config=config,\n cache_dir=args.cache_dir if args.cache_dir else None,\n )\n\n if args.teacher_type is not None:\n assert args.teacher_name_or_path is not None\n assert args.alpha_ce > 0.0\n assert args.alpha_ce + args.alpha_squad > 0.0\n assert args.teacher_type != \"distilbert\", \"We constraint teachers not to be of type DistilBERT.\"\n teacher_config_class, teacher_model_class, _ = MODEL_CLASSES[args.teacher_type]\n teacher_config = teacher_config_class.from_pretrained(\n args.teacher_name_or_path, cache_dir=args.cache_dir if args.cache_dir else None\n )\n teacher = teacher_model_class.from_pretrained(\n args.teacher_name_or_path, config=teacher_config, cache_dir=args.cache_dir if args.cache_dir else None\n )\n teacher.to(args.device)\n else:\n teacher = None\n\n if args.local_rank == 0:\n # Make sure only the first process in distributed training will download model & vocab\n torch.distributed.barrier()\n\n model.to(args.device)\n\n logger.info(\"Training/evaluation parameters %s\", args)\n\n # Before we do anything with models, we want to ensure that we get fp16 execution of torch.einsum if args.fp16 is set.\n # Otherwise it'll default to \"promote\" mode, and we'll get fp32 operations. Note that running `--fp16_opt_level=\"O2\"` will\n # remove the need for this code, but it is still valid.\n if args.fp16:\n try:\n import apex\n\n apex.amp.register_half_function(torch, \"einsum\")\n except ImportError:\n raise ImportError(\"Please install apex from https://www.github.com/nvidia/apex to use fp16 training.\")\n\n # Training\n if args.do_train:\n train_dataset = load_and_cache_examples(args, tokenizer, evaluate=False, output_examples=False)\n global_step, tr_loss = train(args, train_dataset, model, tokenizer, teacher=teacher)\n logger.info(\" global_step = %s, average loss = %s\", global_step, tr_loss)\n\n # Save the trained model and the tokenizer\n if args.do_train and (args.local_rank == -1 or torch.distributed.get_rank() == 0):\n logger.info(\"Saving model checkpoint to %s\", args.output_dir)\n # Save a trained model, configuration and tokenizer using `save_pretrained()`.\n # They can then be reloaded using `from_pretrained()`\n model_to_save = (\n model.module if hasattr(model, \"module\") else model\n ) # Take care of distributed/parallel training\n model_to_save.save_pretrained(args.output_dir)\n tokenizer.save_pretrained(args.output_dir)\n\n # Good practice: save your training arguments together with the trained model\n torch.save(args, os.path.join(args.output_dir, \"training_args.bin\"))\n\n # Load a trained model and vocabulary that you have fine-tuned\n model = model_class.from_pretrained(args.output_dir)\n tokenizer = tokenizer_class.from_pretrained(args.output_dir, do_lower_case=args.do_lower_case)\n model.to(args.device)\n\n # Evaluation - we can ask to evaluate all the checkpoints (sub-directories) in a directory\n results = {}\n if args.do_eval and args.local_rank in [-1, 0]:\n if args.do_train:\n logger.info(\"Loading checkpoints saved during training for evaluation\")\n checkpoints = [args.output_dir]\n if args.eval_all_checkpoints:\n checkpoints = list(\n os.path.dirname(c) for c in sorted(glob.glob(args.output_dir + \"/**/\" + WEIGHTS_NAME, recursive=True))\n )\n\n logger.info(\"Evaluate the following checkpoints: %s\", checkpoints)\n\n for checkpoint in checkpoints:\n # Reload the model\n global_step = checkpoint.split(\"-\")[-1] if len(checkpoints) > 1 else \"\"\n model = model_class.from_pretrained(checkpoint)\n model.to(args.device)\n\n # Evaluate\n result = evaluate(args, model, tokenizer, prefix=global_step)\n\n result = dict((k + (\"_{}\".format(global_step) if global_step else \"\"), v) for k, v in result.items())\n results.update(result)\n\n logger.info(\"Results: {}\".format(results))\n\n return results\n\n\nif __name__ == \"__main__\":\n main()\n" ]
[ [ "torch.Tensor" ], [ "torch.load", "numpy.squeeze", "torch.utils.data.DataLoader", "torch.no_grad", "torch.cuda.manual_seed_all", "torch.cuda.is_available", "torch.device", "torch.distributed.get_rank", "torch.save", "torch.distributed.init_process_group", "torch.utils.data.distributed.DistributedSampler", "torch.utils.data.TensorDataset", "torch.distributed.barrier", "torch.tensor", "numpy.argmax", "torch.cuda.device_count", "torch.distributed.get_world_size", "torch.nn.parallel.DistributedDataParallel", "numpy.random.seed", "torch.cuda.set_device", "torch.manual_seed", "torch.utils.data.SequentialSampler", "torch.utils.data.RandomSampler", "torch.nn.DataParallel" ], [ "numpy.random.binomial", "numpy.split", "numpy.random.randint", "numpy.full" ], [ "torch.true_divide", "numpy.asarray", "torch.tensor", "torch.isfinite", "torch.no_grad", "torch.nn.functional.interpolate", "torch.stack", "torch.nn.functional.pad", "numpy.random.randint" ], [ "numpy.random.seed", "torch.manual_seed", "torch.utils.data.TensorDataset", "torch.utils.data.SequentialSampler", "torch.utils.data.RandomSampler", "torch.utils.data.DataLoader", "numpy.full", "torch.tensor", "numpy.argmax", "torch.no_grad", "torch.cuda.manual_seed_all", "torch.cuda.is_available", "torch.cuda.device_count", "numpy.zeros", "numpy.sum" ], [ "torch.allclose", "torch.tensor" ], [ "torch.nn.functional.softmax", "torch.load", "torch.utils.data.DataLoader", "torch.no_grad", "torch.cuda.manual_seed_all", "torch.cuda.is_available", "torch.device", "torch.distributed.get_rank", "torch.save", "torch.distributed.init_process_group", "torch.utils.data.distributed.DistributedSampler", "torch.distributed.barrier", "torch.cuda.device_count", "torch.distributed.get_world_size", "torch.nn.parallel.DistributedDataParallel", "torch.nn.KLDivLoss", "numpy.random.seed", "torch.cuda.set_device", "torch.nn.functional.log_softmax", "torch.manual_seed", "torch.utils.data.SequentialSampler", "torch.utils.data.RandomSampler", "torch.nn.DataParallel" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
UditSinghParihar/d2-net
[ "f0d63609730b06e064c037256e0e40bac5b5ca43" ]
[ "lib/lossSIFT.py" ]
[ "import matplotlib\nimport matplotlib.pyplot as plt\n\nimport numpy as np\nimport cv2\nfrom sys import exit\n\nimport torch\nimport torch.nn.functional as F\n\nfrom lib.utils import (\n\tgrid_positions,\n\tupscale_positions,\n\tdownscale_positions,\n\tsavefig,\n\timshow_image\n)\nfrom lib.exceptions import NoGradientError, EmptyTensorError\nimport torchgeometry as tgm\n\n\nmatplotlib.use('Agg')\n\n\ndef loss_function(\n\t\tmodel, batch, device, margin=1, safe_radius=4, scaling_steps=3, plot=False\n):\n\toutput = model({\n\t\t'image1': batch['image1'].to(device),\n\t\t'image2': batch['image2'].to(device)\n\t})\n\n\tloss = torch.tensor(np.array([0], dtype=np.float32), device=device)\n\thas_grad = False\n\n\tn_valid_samples = 0\n\tfor idx_in_batch in range(batch['image1'].size(0)):\n\t\t# Annotations\n\t\tdepth1 = batch['depth1'][idx_in_batch].to(device) # [h1, w1]\n\t\tintrinsics1 = batch['intrinsics1'][idx_in_batch].to(device) # [3, 3]\n\t\tpose1 = batch['pose1'][idx_in_batch].view(4, 4).to(device) # [4, 4]\n\t\tbbox1 = batch['bbox1'][idx_in_batch].to(device) # [2]\n\n\t\tdepth2 = batch['depth2'][idx_in_batch].to(device)\n\t\tintrinsics2 = batch['intrinsics2'][idx_in_batch].to(device)\n\t\tpose2 = batch['pose2'][idx_in_batch].view(4, 4).to(device)\n\t\tbbox2 = batch['bbox2'][idx_in_batch].to(device)\n\n\t\t# Network output\n\t\tdense_features1 = output['dense_features1'][idx_in_batch]\n\t\tc, h1, w1 = dense_features1.size()\n\t\tscores1 = output['scores1'][idx_in_batch].view(-1)\n\n\t\tdense_features2 = output['dense_features2'][idx_in_batch]\n\t\t_, h2, w2 = dense_features2.size()\n\t\tscores2 = output['scores2'][idx_in_batch]\n\n\n\t\tall_descriptors1 = F.normalize(dense_features1.view(c, -1), dim=0)\n\t\tdescriptors1 = all_descriptors1\n\n\t\tall_descriptors2 = F.normalize(dense_features2.view(c, -1), dim=0)\n\n\t\t# Warp the positions from image 1 to image 2\n\t\tfmap_pos1 = grid_positions(h1, w1, device)\n\n\t\thOrig, wOrig = int(batch['image1'].shape[2]/8), int(batch['image1'].shape[3]/8)\n\t\tfmap_pos1Orig = grid_positions(hOrig, wOrig, device)\n\t\tpos1 = upscale_positions(fmap_pos1Orig, scaling_steps=scaling_steps)\n\n\t\t# SIFT Feature Detection\n\t\t\n\t\timgNp1 = imshow_image(\n\t\t\t\t\t\tbatch['image1'][idx_in_batch].cpu().numpy(),\n\t\t\t\t\t\tpreprocessing=batch['preprocessing']\n\t\t\t\t\t)\n\t\timgNp1 = cv2.cvtColor(imgNp1, cv2.COLOR_BGR2RGB)\n\t\t# surf = cv2.xfeatures2d.SIFT_create()\n\t\tsurf = cv2.xfeatures2d.SURF_create(100)\n\t\t# surf = cv2.ORB_create()\n\n\t\tkp = surf.detect(imgNp1, None)\n\t\tkeyP = [(kp[i].pt) for i in range(len(kp))]\n\t\tkeyP = np.asarray(keyP).T\n\t\tkeyP[[0, 1]] = keyP[[1, 0]]\n\t\tkeyP = np.floor(keyP) + 0.5\n\n\t\tpos1 = torch.from_numpy(keyP).to(pos1.device).float()\n\t\t\n\t\ttry:\n\t\t\tpos1, pos2, ids = warp(\n\t\t\t\tpos1,\n\t\t\t\tdepth1, intrinsics1, pose1, bbox1,\n\t\t\t\tdepth2, intrinsics2, pose2, bbox2\n\t\t\t)\n\t\texcept EmptyTensorError:\n\t\t\tcontinue\n\n\t\tids = idsAlign(pos1, device, h1, w1)\n\n\t\t# cv2.drawKeypoints(imgNp1, kp, imgNp1)\n\t\t# cv2.imshow('Keypoints', imgNp1)\n\t\t# cv2.waitKey(0)\n\n\t\t# drawTraining(batch['image1'], batch['image2'], pos1, pos2, batch, idx_in_batch, output, save=False)\n\t\t\n\t\t# exit(1)\n\t\n\n\t\t# Top view homography adjustment\n\n\t\tH1 = output['H1'][idx_in_batch] \n\t\tH2 = output['H2'][idx_in_batch]\n\n\t\ttry:\n\t\t\tpos1, pos2 = homoAlign(pos1, pos2, H1, H2, device)\n\t\texcept IndexError:\n\t\t\tcontinue\n\n\t\tids = idsAlign(pos1, device, h1, w1)\n\n\t\timg_warp1 = tgm.warp_perspective(batch['image1'].to(device), H1, dsize=(400, 400))\n\t\timg_warp2 = tgm.warp_perspective(batch['image2'].to(device), H2, dsize=(400, 400))\n\n\t\t# drawTraining(img_warp1, img_warp2, pos1, pos2, batch, idx_in_batch, output)\n\t\t# exit(1)\n\n\t\tfmap_pos1 = fmap_pos1[:, ids]\n\t\tdescriptors1 = descriptors1[:, ids]\n\t\tscores1 = scores1[ids]\n\n\t\t# Skip the pair if not enough GT correspondences are available\n\t\tif ids.size(0) < 128:\n\t\t\tprint(ids.size(0))\n\t\t\tcontinue\n\n\t\t# Descriptors at the corresponding positions\n\t\tfmap_pos2 = torch.round(\n\t\t\tdownscale_positions(pos2, scaling_steps=scaling_steps)\n\t\t).long()\n\t\n\t\tdescriptors2 = F.normalize(\n\t\t\tdense_features2[:, fmap_pos2[0, :], fmap_pos2[1, :]],\n\t\t\tdim=0\n\t\t)\n\t\n\t\tpositive_distance = 2 - 2 * (\n\t\t\tdescriptors1.t().unsqueeze(1) @ descriptors2.t().unsqueeze(2)\n\t\t).squeeze()\n\n\t\t# positive_distance = getPositiveDistance(descriptors1, descriptors2)\n\n\t\tall_fmap_pos2 = grid_positions(h2, w2, device)\n\t\tposition_distance = torch.max(\n\t\t\ttorch.abs(\n\t\t\t\tfmap_pos2.unsqueeze(2).float() -\n\t\t\t\tall_fmap_pos2.unsqueeze(1)\n\t\t\t),\n\t\t\tdim=0\n\t\t)[0]\n\t\tis_out_of_safe_radius = position_distance > safe_radius\n\t\t\n\t\tdistance_matrix = 2 - 2 * (descriptors1.t() @ all_descriptors2)\n\t\t# distance_matrix = getDistanceMatrix(descriptors1, all_descriptors2)\n\n\t\tnegative_distance2 = torch.min(\n\t\t\tdistance_matrix + (1 - is_out_of_safe_radius.float()) * 10.,\n\t\t\tdim=1\n\t\t)[0]\n\t\t\n\t\t# negative_distance2 = semiHardMine(distance_matrix, is_out_of_safe_radius, positive_distance, margin)\n\t\t\n\t\tall_fmap_pos1 = grid_positions(h1, w1, device)\n\t\tposition_distance = torch.max(\n\t\t\ttorch.abs(\n\t\t\t\tfmap_pos1.unsqueeze(2).float() -\n\t\t\t\tall_fmap_pos1.unsqueeze(1)\n\t\t\t),\n\t\t\tdim=0\n\t\t)[0]\n\t\tis_out_of_safe_radius = position_distance > safe_radius\n\t\t\n\t\tdistance_matrix = 2 - 2 * (descriptors2.t() @ all_descriptors1)\n\t\t# distance_matrix = getDistanceMatrix(descriptors2, all_descriptors1)\n\t\t\n\t\tnegative_distance1 = torch.min(\n\t\t\tdistance_matrix + (1 - is_out_of_safe_radius.float()) * 10.,\n\t\t\tdim=1\n\t\t)[0]\n\n\t\t# negative_distance1 = semiHardMine(distance_matrix, is_out_of_safe_radius, positive_distance, margin)\n\n\t\tdiff = positive_distance - torch.min(\n\t\t\tnegative_distance1, negative_distance2\n\t\t)\n\n\t\tscores2 = scores2[fmap_pos2[0, :], fmap_pos2[1, :]]\n\n\t\tloss = loss + (\n\t\t\ttorch.sum(scores1 * scores2 * F.relu(margin + diff)) /\n\t\t\t(torch.sum(scores1 * scores2) )\n\t\t)\n\n\t\thas_grad = True\n\t\tn_valid_samples += 1\n\n\t\tif plot and batch['batch_idx'] % batch['log_interval'] == 0:\n\t\t\t# drawTraining(batch['image1'], batch['image2'], pos1, pos2, batch, idx_in_batch, output, save=True)\n\t\t\tdrawTraining(img_warp1, img_warp2, pos1, pos2, batch, idx_in_batch, output, save=True)\n\n\tif not has_grad:\n\t\traise NoGradientError\n\n\tloss = loss / (n_valid_samples )\n\n\treturn loss\n\n\ndef interpolate_depth(pos, depth):\n\t# Depth filtering and interpolation of sparse depth\n\n\tdevice = pos.device\n\n\tids = torch.arange(0, pos.size(1), device=device)\n\n\th, w = depth.size()\n\n\ti = pos[0, :]\n\tj = pos[1, :]\n\n\t# Valid corners\n\ti_top_left = torch.floor(i).long()\n\tj_top_left = torch.floor(j).long()\n\tvalid_top_left = torch.min(i_top_left >= 0, j_top_left >= 0)\n\n\ti_top_right = torch.floor(i).long()\n\tj_top_right = torch.ceil(j).long()\n\tvalid_top_right = torch.min(i_top_right >= 0, j_top_right < w)\n\n\ti_bottom_left = torch.ceil(i).long()\n\tj_bottom_left = torch.floor(j).long()\n\tvalid_bottom_left = torch.min(i_bottom_left < h, j_bottom_left >= 0)\n\n\ti_bottom_right = torch.ceil(i).long()\n\tj_bottom_right = torch.ceil(j).long()\n\tvalid_bottom_right = torch.min(i_bottom_right < h, j_bottom_right < w)\n\n\tvalid_corners = torch.min(\n\t\ttorch.min(valid_top_left, valid_top_right),\n\t\ttorch.min(valid_bottom_left, valid_bottom_right)\n\t)\n\n\ti_top_left = i_top_left[valid_corners]\n\tj_top_left = j_top_left[valid_corners]\n\n\ti_top_right = i_top_right[valid_corners]\n\tj_top_right = j_top_right[valid_corners]\n\n\ti_bottom_left = i_bottom_left[valid_corners]\n\tj_bottom_left = j_bottom_left[valid_corners]\n\n\ti_bottom_right = i_bottom_right[valid_corners]\n\tj_bottom_right = j_bottom_right[valid_corners]\n\n\tids = ids[valid_corners]\n\n\tif ids.size(0) == 0:\n\t\traise EmptyTensorError\n\n\t# Valid depth\n\tvalid_depth = torch.min(\n\t\ttorch.min(\n\t\t\tdepth[i_top_left, j_top_left] > 0,\n\t\t\tdepth[i_top_right, j_top_right] > 0\n\t\t),\n\t\ttorch.min(\n\t\t\tdepth[i_bottom_left, j_bottom_left] > 0,\n\t\t\tdepth[i_bottom_right, j_bottom_right] > 0\n\t\t)\n\t)\n\n\ti_top_left = i_top_left[valid_depth]\n\tj_top_left = j_top_left[valid_depth]\n\n\ti_top_right = i_top_right[valid_depth]\n\tj_top_right = j_top_right[valid_depth]\n\n\ti_bottom_left = i_bottom_left[valid_depth]\n\tj_bottom_left = j_bottom_left[valid_depth]\n\n\ti_bottom_right = i_bottom_right[valid_depth]\n\tj_bottom_right = j_bottom_right[valid_depth]\n\n\tids = ids[valid_depth]\n\n\tif ids.size(0) == 0:\n\t\traise EmptyTensorError\n\n\t# Interpolation\n\ti = i[ids]\n\tj = j[ids]\n\tdist_i_top_left = i - i_top_left.float()\n\tdist_j_top_left = j - j_top_left.float()\n\tw_top_left = (1 - dist_i_top_left) * (1 - dist_j_top_left)\n\tw_top_right = (1 - dist_i_top_left) * dist_j_top_left\n\tw_bottom_left = dist_i_top_left * (1 - dist_j_top_left)\n\tw_bottom_right = dist_i_top_left * dist_j_top_left\n\n\tinterpolated_depth = (\n\t\tw_top_left * depth[i_top_left, j_top_left] +\n\t\tw_top_right * depth[i_top_right, j_top_right] +\n\t\tw_bottom_left * depth[i_bottom_left, j_bottom_left] +\n\t\tw_bottom_right * depth[i_bottom_right, j_bottom_right]\n\t)\n\n\tpos = torch.cat([i.view(1, -1), j.view(1, -1)], dim=0)\n\n\treturn [interpolated_depth, pos, ids]\n\n\ndef uv_to_pos(uv):\n\treturn torch.cat([uv[1, :].view(1, -1), uv[0, :].view(1, -1)], dim=0)\n\n\ndef warp(\n\t\tpos1,\n\t\tdepth1, intrinsics1, pose1, bbox1,\n\t\tdepth2, intrinsics2, pose2, bbox2\n):\n\tdevice = pos1.device\n\n\tZ1, pos1, ids = interpolate_depth(pos1, depth1)\n\t# COLMAP convention\n\tu1 = pos1[1, :] + bbox1[1] + .5\n\tv1 = pos1[0, :] + bbox1[0] + .5\n\n\tX1 = (u1 - intrinsics1[0, 2]) * (Z1 / intrinsics1[0, 0])\n\tY1 = (v1 - intrinsics1[1, 2]) * (Z1 / intrinsics1[1, 1])\n\n\tXYZ1_hom = torch.cat([\n\t\tX1.view(1, -1),\n\t\tY1.view(1, -1),\n\t\tZ1.view(1, -1),\n\t\ttorch.ones(1, Z1.size(0), device=device)\n\t], dim=0)\n\tXYZ2_hom = torch.chain_matmul(pose2, torch.inverse(pose1), XYZ1_hom)\n\tXYZ2 = XYZ2_hom[: -1, :] / XYZ2_hom[-1, :].view(1, -1)\n\n\tuv2_hom = torch.matmul(intrinsics2, XYZ2)\n\tuv2 = uv2_hom[: -1, :] / uv2_hom[-1, :].view(1, -1)\n\n\tu2 = uv2[0, :] - bbox2[1] - .5\n\tv2 = uv2[1, :] - bbox2[0] - .5\n\tuv2 = torch.cat([u2.view(1, -1), v2.view(1, -1)], dim=0)\n\n\tannotated_depth, pos2, new_ids = interpolate_depth(uv_to_pos(uv2), depth2)\n\n\tids = ids[new_ids]\n\tpos1 = pos1[:, new_ids]\n\testimated_depth = XYZ2[2, new_ids]\n\n\tdiffernce = torch.abs(estimated_depth - annotated_depth)\n\tinlier_mask = torch.abs(estimated_depth - annotated_depth) < 0.05\n\n\tids = ids[inlier_mask]\n\tif ids.size(0) == 0:\n\t\traise EmptyTensorError\n\n\tpos2 = pos2[:, inlier_mask]\n\tpos1 = pos1[:, inlier_mask]\n\n\treturn pos1, pos2, ids\n\n\ndef drawTraining(image1, image2, pos1, pos2, batch, idx_in_batch, output, save=False):\n\tpos1_aux = pos1.cpu().numpy()\n\tpos2_aux = pos2.cpu().numpy()\n\n\tk = pos1_aux.shape[1]\n\tcol = np.random.rand(k, 3)\n\tn_sp = 4\n\tplt.figure()\n\tplt.subplot(1, n_sp, 1)\n\tim1 = imshow_image(\n\t\timage1[0].cpu().numpy(),\n\t\tpreprocessing=batch['preprocessing']\n\t)\n\tplt.imshow(im1)\n\tplt.scatter(\n\t\tpos1_aux[1, :], pos1_aux[0, :],\n\t\ts=0.25**2, c=col, marker=',', alpha=0.5\n\t)\n\tplt.axis('off')\n\tplt.subplot(1, n_sp, 2)\n\tplt.imshow(\n\t\toutput['scores1'][idx_in_batch].data.cpu().numpy(),\n\t\tcmap='Reds'\n\t)\n\tplt.axis('off')\n\tplt.subplot(1, n_sp, 3)\n\tim2 = imshow_image(\n\t\timage2[0].cpu().numpy(),\n\t\tpreprocessing=batch['preprocessing']\n\t)\n\tplt.imshow(im2)\n\tplt.scatter(\n\t\tpos2_aux[1, :], pos2_aux[0, :],\n\t\ts=0.25**2, c=col, marker=',', alpha=0.5\n\t)\n\tplt.axis('off')\n\tplt.subplot(1, n_sp, 4)\n\tplt.imshow(\n\t\toutput['scores2'][idx_in_batch].data.cpu().numpy(),\n\t\tcmap='Reds'\n\t)\n\tplt.axis('off')\n\n\tif(save == True):\n\t\tsavefig('train_vis/%s.%02d.%02d.%d.png' % (\n\t\t\t'train' if batch['train'] else 'valid',\n\t\t\tbatch['epoch_idx'],\n\t\t\tbatch['batch_idx'] // batch['log_interval'],\n\t\t\tidx_in_batch\n\t\t), dpi=300)\n\telse:\n\t\tplt.show()\n\t\n\tplt.close()\n\n\tim1 = cv2.cvtColor(im1, cv2.COLOR_BGR2RGB)\n\tim2 = cv2.cvtColor(im2, cv2.COLOR_BGR2RGB)\n\n\tfor i in range(0, pos1_aux.shape[1], 1):\n\t\tim1 = cv2.circle(im1, (pos1_aux[1, i], pos1_aux[0, i]), 1, (0, 0, 255), 2)\n\tfor i in range(0, pos2_aux.shape[1], 1):\n\t\tim2 = cv2.circle(im2, (pos2_aux[1, i], pos2_aux[0, i]), 1, (0, 0, 255), 2)\n\n\tim3 = cv2.hconcat([im1, im2])\n\n\tfor i in range(0, pos1_aux.shape[1], 1):\n\t\tim3 = cv2.line(im3, (int(pos1_aux[1, i]), int(pos1_aux[0, i])), (int(pos2_aux[1, i]) + im1.shape[1], int(pos2_aux[0, i])), (0, 255, 0), 1)\n\n\tif(save == True):\n\t\tcv2.imwrite('train_vis/%s.%02d.%02d.%d.png' % (\n\t\t\t'train_corr' if batch['train'] else 'valid',\n\t\t\tbatch['epoch_idx'],\n\t\t\tbatch['batch_idx'] // batch['log_interval'],\n\t\t\tidx_in_batch\n\t\t), im3)\n\telse:\n\t\tcv2.imshow('Image', im3)\n\t\tcv2.waitKey(0)\n\n\ndef homoAlign(pos1, pos2, H1, H2, device):\n\tones = torch.ones(pos1.shape[1]).reshape(1, pos1.shape[1]).to(device)\n\n\tpos1[[0, 1]] = pos1[[1, 0]]\n\tpos2[[0, 1]] = pos2[[1, 0]]\n\n\tpos1Homo = torch.cat((pos1, ones), dim=0)\n\tpos2Homo = torch.cat((pos2, ones), dim=0)\n\n\tpos1Warp = H1 @ pos1Homo\n\tpos2Warp = H2 @ pos2Homo\n\n\tpos1Warp = pos1Warp/pos1Warp[2, :]\n\tpos1Warp = pos1Warp[0:2, :]\n\n\tpos2Warp = pos2Warp/pos2Warp[2, :]\n\tpos2Warp = pos2Warp[0:2, :]\n\n\tpos1Warp[[0, 1]] = pos1Warp[[1, 0]]\n\tpos2Warp[[0, 1]] = pos2Warp[[1, 0]]\n\n\tpos1Pov = []\n\tpos2Pov = []\n\n\tfor i in range(pos1.shape[1]):\n\t\tif(380 > pos1Warp[0, i] > 0 and 380 > pos1Warp[1, i] > 0 and 380 > pos2Warp[0, i] > 0 and 380 > pos2Warp[1, i] > 0):\n\t\t\tpos1Pov.append((pos1Warp[0, i], pos1Warp[1, i]))\n\t\t\tpos2Pov.append((pos2Warp[0, i], pos2Warp[1, i]))\n\n\tpos1Pov = torch.Tensor(pos1Pov).to(device)\n\tpos2Pov = torch.Tensor(pos2Pov).to(device)\n\n\tpos1Pov = torch.transpose(pos1Pov, 0, 1)\n\tpos2Pov = torch.transpose(pos2Pov, 0, 1)\n\n\treturn pos1Pov, pos2Pov\n\n\ndef idsAlign(pos1, device, h1, w1):\n\t# row = pos1[0, :]/8\n\t# col = pos1[1, :]/8\n\tpos1D = downscale_positions(pos1, scaling_steps=3)\n\trow = pos1D[0, :]\n\tcol = pos1D[1, :]\n\n\tids = []\n\n\tfor i in range(row.shape[0]):\n\t\tindex = (h1 * row[i]) + col[i]\n\t\tids.append(index)\n\n\tids = torch.round(torch.Tensor(ids)).long().to(device)\n\n\treturn ids\n\n\ndef semiHardMine(distance_matrix, is_out_of_safe_radius, positive_distance, margin):\n\tnegative_distances = distance_matrix + (1 - is_out_of_safe_radius.float()) * 10.\n\t\n\tnegDist = []\n\n\tfor i, row in enumerate(negative_distances):\n\t\tposDist = positive_distance[i]\n\t\t\n\t\trow = row[(posDist + margin > row) & (row > posDist)]\n\t\t\n\t\tif(row.size(0) == 0):\n\t\t\tnegDist.append(negative_distances[i, 0])\n\t\telse:\n\t\t\tperm = torch.randperm(row.size(0))\n\t\t\tnegDist.append(row[perm[0]])\n\t\t\n\tnegDist = torch.Tensor(negDist).to(positive_distance.device)\n\n\treturn negDist\n" ]
[ [ "torch.abs", "matplotlib.pyplot.imshow", "torch.transpose", "torch.cat", "numpy.asarray", "torch.sum", "torch.ones", "torch.from_numpy", "torch.inverse", "matplotlib.pyplot.subplot", "torch.nn.functional.relu", "matplotlib.pyplot.close", "matplotlib.pyplot.axis", "matplotlib.pyplot.figure", "torch.floor", "torch.min", "numpy.random.rand", "numpy.floor", "numpy.array", "matplotlib.pyplot.show", "torch.nn.functional.normalize", "torch.ceil", "matplotlib.pyplot.scatter", "torch.Tensor", "matplotlib.use", "torch.matmul" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
xhuohai/nncase
[ "cf7921c273c7446090939c64f57ef783a62bf29c" ]
[ "tests/importer/tflite_/basic/test_unary.py" ]
[ "# Copyright 2019-2021 Canaan Inc.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"System test: test unary\"\"\"\n# pylint: disable=invalid-name, unused-argument, import-outside-toplevel\n\nimport pytest\nimport tensorflow as tf\nimport numpy as np\nfrom tflite_test_runner import TfliteTestRunner\n\n\ndef _make_module(in_shape):\n class UnaryModule(tf.Module):\n def __init__(self):\n super(UnaryModule).__init__()\n\n @tf.function(input_signature=[tf.TensorSpec(in_shape, tf.float32)])\n def __call__(self, x):\n outs = []\n outs.append(tf.math.abs(-x))\n outs.append(tf.math.ceil(x))\n outs.append(tf.math.cos(x))\n outs.append(tf.math.exp(x))\n # outs.append(tf.math.floor(x)) # large errors in ptq\n outs.append(tf.math.log(x + 2))\n outs.append(tf.math.negative(x))\n # outs.append(tf.math.round(x))\n outs.append(tf.math.rsqrt(x + 2))\n outs.append(tf.math.sin(x))\n outs.append(tf.math.sqrt(x + 2))\n outs.append(tf.math.square(x))\n outs.append(tf.math.tanh(x))\n outs.append(tf.math.sigmoid(x))\n return outs\n return UnaryModule()\n\n\nin_shapes = [\n [3],\n [64, 3],\n [3, 64, 3],\n [8, 6, 16, 3]\n]\n\n\[email protected]('in_shape', in_shapes)\ndef test_unary(in_shape, request):\n module = _make_module(in_shape)\n\n runner = TfliteTestRunner(request.node.name)\n model_file = runner.from_tensorflow(module)\n runner.run(model_file)\n\n\nif __name__ == \"__main__\":\n pytest.main(\n ['-vv', 'test_unary.py'])\n" ]
[ [ "tensorflow.math.abs", "tensorflow.math.sqrt", "tensorflow.math.cos", "tensorflow.math.negative", "tensorflow.math.rsqrt", "tensorflow.math.log", "tensorflow.math.exp", "tensorflow.math.sigmoid", "tensorflow.math.sin", "tensorflow.math.ceil", "tensorflow.math.square", "tensorflow.math.tanh", "tensorflow.TensorSpec" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [ "2.7", "2.6", "2.2", "1.13", "2.3", "2.4", "2.9", "2.5", "2.8", "2.10" ] } ]
RicardoSerr/ProDy
[ "bdd7bab89d32033d20cd26bb17789f94ee4f7c02", "bdd7bab89d32033d20cd26bb17789f94ee4f7c02", "bdd7bab89d32033d20cd26bb17789f94ee4f7c02" ]
[ "prody/dynamics/analysis.py", "prody/dynamics/functions.py", "prody/proteins/header.py" ]
[ "# -*- coding: utf-8 -*-\n\"\"\"This module defines functions for calculating physical properties from normal\nmodes.\"\"\"\n\nimport time\n\nimport numpy as np\n\nfrom prody import LOGGER\nfrom prody.atomic import Atomic\nfrom prody.ensemble import Ensemble, Conformation\nfrom prody.trajectory import TrajBase\nfrom prody.utilities import importLA, checkCoords, div0\nfrom numpy import sqrt, arange, log, polyfit, array\n\nfrom .nma import NMA\nfrom .modeset import ModeSet\nfrom .mode import VectorBase, Mode, Vector\nfrom .gnm import GNMBase\n\n__all__ = ['calcCollectivity', 'calcCovariance', 'calcCrossCorr',\n 'calcFractVariance', 'calcSqFlucts', 'calcTempFactors',\n 'calcProjection', 'calcCrossProjection',\n 'calcSpecDimension', 'calcPairDeformationDist',\n 'calcDistFlucts', 'calcHinges', 'calcHitTime', 'calcHitTime',\n 'calcAnisousFromModel', 'calcScipionScore', 'calcHemnmaScore']\n #'calcEntropyTransfer', 'calcOverallNetEntropyTransfer']\n\ndef calcCollectivity(mode, masses=None, is3d=None):\n \"\"\"Returns collectivity of the mode. This function implements collectivity\n as defined in equation 5 of [BR95]_. If *masses* are provided, they will\n be incorporated in the calculation. Otherwise, atoms are assumed to have\n uniform masses.\n\n .. [BR95] Bruschweiler R. Collective protein dynamics and nuclear\n spin relaxation. *J Chem Phys* **1995** 102:3396-3403.\n\n :arg mode: mode(s) or vector(s)\n :type mode: :class:`.Mode`, :class:`.Vector`, :class:`.ModeSet`,\n :class:`.NMA`, :class:`~numpy.ndarray`\n\n :arg masses: atomic masses\n :type masses: :class:`numpy.ndarray`\n \n :arg is3d: whether mode is 3d. Default is **None** which means determine \n the value based on ``mode.is3d()``.\n :type is3d: bool\n \"\"\"\n\n if isinstance(mode, np.ndarray):\n V = mode\n ndim = V.ndim\n shape = V.shape\n\n if is3d is None:\n is3d = False\n\n if ndim == 0:\n raise ValueError('mode cannot be an empty array')\n elif ndim == 1:\n V = V[:, np.newaxis]\n\n n = shape[0]\n if is3d:\n n_atoms = n // 3\n else:\n n_atoms = n\n else:\n V, W, is3d_, n_atoms = _getModeProperties(mode)\n if is3d is None:\n is3d = is3d_\n \n colls = []\n\n def log0(a):\n return log(a + np.finfo(float).eps)\n\n for v in V.T:\n if is3d:\n u2in = (v ** 2)\n u2in_Nx3 = np.reshape(u2in, (n_atoms, 3))\n u2in = u2in_Nx3.sum(axis=1)\n else:\n u2in = (v ** 2)\n if masses is not None:\n if len(masses) != n_atoms:\n raise ValueError('length of masses must be equal to number of atoms')\n u2in = u2in / masses\n u2in = u2in * (1 / u2in.sum() ** 0.5)\n coll = np.exp(-(u2in * log0(u2in)).sum()) / n_atoms\n colls.append(coll)\n \n if len(colls) == 1:\n return coll\n else:\n return np.array(colls)\n\ndef calcSpecDimension(mode):\n\n \"\"\"\n :arg mode: mode or vector\n :type mode: :class:`.Mode` or :class:`.Vector`\n\n \"\"\"\n # if not isinstance(mode, Mode):\n # raise TypeError('mode must be a Mode instance')\n \n length = mode.shape[0]\n numbers = arange(2,length+1)\n ds,p=polyfit(log(sqrt(mode[0:int(length*0.25)])),log(numbers[0:int(length*0.25)]),1)\n \n return ds\n\ndef calcFracDimension(mode):\n \"\"\"\n :arg mode: mode or vector\n :type mode: mode or vector \"\"\"\n\n\n\n\ndef calcFractVariance(mode):\n \"\"\"Returns fraction of variance explained by the *mode*. Fraction of\n variance is the ratio of the variance along a mode to the trace of the\n covariance matrix of the model.\"\"\"\n\n if isinstance(mode, Mode):\n var = mode.getVariance()\n trace = mode.getModel()._getTrace()\n elif isinstance(mode, (ModeSet, NMA)):\n var = mode.getVariances()\n if isinstance(mode, ModeSet):\n trace = mode.getModel()._getTrace()\n else:\n trace = mode._getTrace()\n else:\n raise TypeError('mode must be a Mode instance')\n if trace is None:\n raise ValueError('modes are not calculated')\n\n return var / trace\n\n\ndef calcProjection(ensemble, modes, rmsd=True, norm=False):\n \"\"\"Returns projection of conformational deviations onto given modes.\n *ensemble* coordinates are used to calculate the deviations that are\n projected onto *modes*. For K conformations and M modes, a (K,M)\n matrix is returned.\n\n :arg ensemble: an ensemble, trajectory or a conformation for which\n deviation(s) will be projected, or a deformation vector\n :type ensemble: :class:`.Ensemble`, :class:`.Conformation`,\n :class:`.Vector`, :class:`.Trajectory`\n \n :arg modes: up to three normal modes\n :type modes: :class:`.Mode`, :class:`.ModeSet`, :class:`.NMA`\n\n By default, root-mean-square deviation (RMSD) along the normal mode is\n calculated. To calculate the raw projection pass ``rmsd=False``.\n\n By default, the projection is not normalized. If you would like it to be,\n pass ``norm=True``.\n\n :class:`.Vector` instances are accepted as *ensemble* argument to allow\n for projecting a deformation vector onto normal modes.\"\"\"\n\n if not isinstance(ensemble, (Ensemble, Conformation, Vector, TrajBase)):\n raise TypeError('ensemble must be Ensemble, Conformation, Vector, '\n 'or a TrajBase, not {0}'.format(type(ensemble)))\n if not isinstance(modes, (NMA, ModeSet, VectorBase)):\n raise TypeError('rows must be NMA, ModeSet, or Mode, not {0}'\n .format(type(modes)))\n if not modes.is3d():\n raise ValueError('modes must be 3-dimensional')\n if isinstance(ensemble, Vector):\n n_atoms = ensemble.numAtoms()\n else:\n n_atoms = ensemble.numSelected()\n if n_atoms != modes.numAtoms():\n raise ValueError('number of atoms are not the same')\n if isinstance(ensemble, Vector):\n if not ensemble.is3d():\n raise ValueError('ensemble must be a 3d vector instance')\n deviations = ensemble._getArray()\n elif isinstance(ensemble, (Ensemble, Conformation)):\n deviations = ensemble.getDeviations()\n else:\n nfi = ensemble.nextIndex()\n ensemble.goto(0)\n deviations = np.array([frame.getDeviations() for frame in ensemble])\n ensemble.goto(nfi)\n if deviations.ndim == 3:\n deviations = deviations.reshape((deviations.shape[0],\n deviations.shape[1] * 3))\n elif deviations.ndim == 2:\n deviations = deviations.reshape((1, deviations.shape[0] * 3))\n else:\n deviations = deviations.reshape((1, deviations.shape[0]))\n la = importLA()\n if norm:\n N = la.norm(deviations)\n if N != 0:\n deviations = deviations / N\n projection = np.dot(deviations, modes._getArray())\n if rmsd:\n projection = (1 / (n_atoms ** 0.5)) * projection\n return projection\n\n\ndef calcCrossProjection(ensemble, mode1, mode2, scale=None, **kwargs):\n \"\"\"Returns projection of conformational deviations onto modes from\n different models.\n\n :arg ensemble: ensemble for which deviations will be projected\n :type ensemble: :class:`.Ensemble`\n\n :arg mode1: normal mode to project conformations onto\n :type mode1: :class:`.Mode`, :class:`.Vector`\n\n :arg mode2: normal mode to project conformations onto\n :type mode2: :class:`.Mode`, :class:`.Vector`\n\n :arg scale: scale width of the projection onto mode1 (``x``) or mode2(``y``),\n an optimized scaling factor (scalar) will be calculated by default \n or a value of scalar can be passed.\n \n This function uses calcProjection and its arguments can be \n passed to it as keyword arguments.\n By default, this function applies RMSD scaling and normalisation. \n These can be turned off with ``rmsd=False`` and ``norm=False``.\"\"\"\n\n if not isinstance(ensemble, (Ensemble, Conformation, Vector, TrajBase)):\n raise TypeError('ensemble must be Ensemble, Conformation, Vector, '\n 'or a Trajectory, not {0}'.format(type(ensemble)))\n if not isinstance(mode1, VectorBase):\n raise TypeError('mode1 must be a Mode instance, not {0}'\n .format(type(mode1)))\n if not mode1.is3d():\n raise ValueError('mode1 must be 3-dimensional')\n if not isinstance(mode2, VectorBase):\n raise TypeError('mode2 must be a Mode instance, not {0}'\n .format(type(mode2)))\n if not mode2.is3d():\n raise ValueError('mode2 must be 3-dimensional')\n\n if scale is not None:\n assert isinstance(scale, str), 'scale must be a string'\n scale = scale.lower()\n assert scale in ('x', 'y'), 'scale must be x or y'\n\n xcoords = calcProjection(ensemble, mode1, kwargs.get('rmsd', True), kwargs.get('norm', True))\n ycoords = calcProjection(ensemble, mode2, kwargs.pop('rmsd', True), kwargs.pop('norm', True))\n if scale:\n scalar = kwargs.get('scalar', None)\n if scalar:\n assert isinstance(scalar, (float, int)), 'scalar must be a number'\n else:\n scalar = ((ycoords.max() - ycoords.min()) /\n (xcoords.max() - xcoords.min())\n ) * np.sign(np.dot(xcoords, ycoords))\n if scale == 'x':\n LOGGER.info('Projection onto {0} is scaled by {1:.2f}'\n .format(mode1, scalar))\n else:\n scalar = 1 / scalar\n LOGGER.info('Projection onto {0} is scaled by {1:.2f}'\n .format(mode2, scalar))\n\n if scale == 'x':\n xcoords = xcoords * scalar\n else:\n ycoords = ycoords * scalar\n\n return xcoords, ycoords\n\ndef _getModeProperties(modes):\n V = []; W = []; is3d = None; n_atoms = 0\n if isinstance(modes, VectorBase):\n V = modes._getArray()\n if isinstance(modes, Mode):\n W = modes.getVariance()\n else:\n W = 1.\n V = np.asarray([V]).T\n W = np.asarray([[W]])\n is3d = modes.is3d()\n n_atoms = modes.numAtoms()\n elif isinstance(modes, (NMA, ModeSet)):\n V = modes._getArray()\n W = np.diag(modes.getVariances())\n is3d = modes.is3d()\n n_atoms = modes.numAtoms()\n elif isinstance(modes, list):\n for mode in modes:\n if not isinstance(mode, VectorBase):\n raise TypeError('modes can be a list of VectorBase instances, '\n 'not {0}'.format(type(mode)))\n V.append(mode._getArray())\n if isinstance(mode, Mode):\n W.append(modes.getVariance())\n else:\n W.append(1.)\n if is3d is None:\n is3d = mode.is3d()\n n_atoms = mode.numAtoms()\n else:\n if is3d != mode.is3d():\n raise ValueError('modes must be either all from ANM or GNM')\n if n_atoms != mode.numAtoms():\n raise ValueError('each mode in the list must have the same number of atoms')\n V = np.array(V).T\n W = np.diag(W)\n else:\n raise TypeError('modes must be a Mode, NMA, ModeSet instance, '\n 'or a list of Mode instances, not {0}'.format(type(modes)))\n return V, W, is3d, n_atoms\n\ndef calcSqFlucts(modes):\n \"\"\"Returns sum of square-fluctuations for given set of normal *modes*.\n Square fluctuations for a single mode is obtained by multiplying the\n square of the mode array with the variance (:meth:`.Mode.getVariance`)\n along the mode. For :class:`.PCA` and :class:`.EDA` models built using\n coordinate data in Å, unit of square-fluctuations is |A2|, for\n :class:`.ANM` and :class:`.GNM`, on the other hand, it is arbitrary or\n relative units.\"\"\"\n\n V = []; W = []; is3d = None; n_atoms = 0\n V, W, is3d, n_atoms = _getModeProperties(modes)\n\n sq_flucts = np.dot(V * V, W).sum(axis=1)\n\n if is3d:\n sq_flucts_Nx3 = np.reshape(sq_flucts, (n_atoms, 3))\n sq_flucts = sq_flucts_Nx3.sum(axis=1)\n return sq_flucts\n\n\ndef calcCrossCorr(modes, n_cpu=1, norm=True):\n \"\"\"Returns cross-correlations matrix. For a 3-d model, cross-correlations\n matrix is an NxN matrix, where N is the number of atoms. Each element of\n this matrix is the trace of the submatrix corresponding to a pair of atoms.\n Cross-correlations matrix may be calculated using all modes or a subset of modes\n of an NMA instance. For large systems, calculation of cross-correlations\n matrix may be time consuming. Optionally, multiple processors may be\n employed to perform calculations by passing ``n_cpu=2`` or more.\"\"\"\n\n if not isinstance(n_cpu, int):\n raise TypeError('n_cpu must be an integer')\n elif n_cpu < 1:\n raise ValueError('n_cpu must be equal to or greater than 1')\n\n if not isinstance(modes, (Mode, Vector, NMA, ModeSet)):\n if isinstance(modes, list):\n try:\n is3d = modes[0].is3d()\n except:\n raise TypeError('modes must be a list of Mode or Vector instances, '\n 'not {0}'.format(type(modes)))\n else:\n raise TypeError('modes must be a Mode, Vector, NMA, or ModeSet instance, '\n 'not {0}'.format(type(modes)))\n else:\n is3d = modes.is3d()\n\n if is3d:\n model = modes\n if isinstance(modes, (Mode, ModeSet)):\n model = modes._model\n if isinstance(modes, (Mode)):\n indices = [modes.getIndex()]\n n_modes = 1\n else:\n indices = modes.getIndices()\n n_modes = len(modes)\n elif isinstance(modes, Vector):\n indices = [0]\n n_modes = 1\n else:\n n_modes = len(modes)\n indices = np.arange(n_modes)\n \n array = model._getArray()\n n_atoms = model._n_atoms\n\n if not isinstance(modes, Vector):\n variances = model._vars\n else:\n array = array.reshape(-1, 1)\n variances = np.ones(1)\n\n if n_cpu == 1:\n s = (n_modes, n_atoms, 3)\n arvar = (array[:, indices]*variances[indices]).T.reshape(s)\n array = array[:, indices].T.reshape(s)\n covariance = np.tensordot(array.transpose(2, 0, 1),\n arvar.transpose(0, 2, 1),\n axes=([0, 1], [1, 0]))\n else:\n import multiprocessing\n n_cpu = min(multiprocessing.cpu_count(), n_cpu)\n queue = multiprocessing.Queue()\n size = n_modes / n_cpu\n for i in range(n_cpu):\n if n_cpu - i == 1:\n indices = modes.indices[i*size:]\n else:\n indices = modes.indices[i*size:(i+1)*size]\n process = multiprocessing.Process(\n target=_crossCorrelations,\n args=(queue, n_atoms, array, variances, indices))\n process.start()\n while queue.qsize() < n_cpu:\n time.sleep(0.05)\n covariance = queue.get()\n while queue.qsize() > 0:\n covariance += queue.get()\n else:\n covariance = calcCovariance(modes)\n if norm:\n diag = np.power(covariance.diagonal(), 0.5)\n D = np.outer(diag, diag)\n covariance = div0(covariance, D)\n return covariance\n\n\ndef _crossCorrelations(queue, n_atoms, array, variances, indices):\n \"\"\"Calculate covariance-matrix for a subset of modes.\"\"\"\n\n n_modes = len(indices)\n arvar = (array[:, indices] * variances[indices]).T.reshape((n_modes,\n n_atoms, 3))\n array = array[:, indices].T.reshape((n_modes, n_atoms, 3))\n covariance = np.tensordot(array.transpose(2, 0, 1),\n arvar.transpose(0, 2, 1),\n axes=([0, 1], [1, 0]))\n queue.put(covariance)\n\ndef calcDistFlucts(modes, n_cpu=1, norm=True):\n \"\"\"Returns the matrix of distance fluctuations (i.e. an NxN matrix\n where N is the number of residues, of MSFs in the inter-residue distances)\n computed from the cross-correlation matrix (see Eq. 12.E.1 in [IB18]_). \n The arguments are the same as in :meth:`.calcCrossCorr`.\n\n .. [IB18] Dill K, Jernigan RL, Bahar I. Protein Actions: Principles and\n Modeling. *Garland Science* **2017**. \"\"\"\n\n cc = calcCrossCorr(modes, n_cpu=n_cpu, norm=norm)\n cc_diag = np.diag(cc).reshape(-1,1)\n distFluct = cc_diag.T + cc_diag -2.*cc\n return distFluct\n\ndef calcTempFactors(modes, atoms):\n \"\"\"Returns temperature (β) factors calculated using *modes* from a\n :class:`.ANM` or :class:`.GNM` instance scaled according to the \n experimental B-factors from *atoms*.\"\"\"\n\n model = modes.getModel()\n if not isinstance(model, GNMBase):\n raise TypeError('modes must come from GNM or ANM')\n if model.numAtoms() != atoms.numAtoms():\n raise ValueError('modes and atoms must have same number of nodes')\n sqf = calcSqFlucts(modes)\n expBetas = atoms.getBetas()\n # add warning message if experimental B-factors are zeros or meaningless (e.g., having same values)?\n if expBetas.max() < 0.5 or expBetas.std() < 0.5:\n LOGGER.warning('Experimental B-factors are quite small or meaningless. The calculated B-factors may be incorrect.')\n return sqf * (expBetas.sum() / sqf.sum())\n\n\ndef calcCovariance(modes):\n \"\"\"Returns covariance matrix calculated for given *modes*.\n This is 3Nx3N for 3-d models and NxN (equivalent to cross-correlations) \n for 1-d models such as GNM.\"\"\"\n\n if isinstance(modes, NMA):\n return modes.getCovariance()\n else:\n V, W, _, _ = _getModeProperties(modes)\n return np.dot(V, np.dot(W, V.T))\n\n\ndef calcPairDeformationDist(model, coords, ind1, ind2, kbt=1.): \n \"\"\"Returns distribution of the deformations in the distance contributed by each mode \n for selected pair of residues *ind1* *ind2* using *model* from a :class:`.ANM`.\n Method described in [EB08]_ equation (10) and figure (2). \n \n .. [EB08] Eyal E., Bahar I. Toward a Molecular Understanding of \n the Anisotropic Response of Proteins to External Forces:\n Insights from Elastic Network Models. *Biophys J* **2008** 94:3424-34355. \n \n :arg model: this is an 3-dimensional :class:`NMA` instance from a :class:`.ANM`\n calculations.\n :type model: :class:`.ANM` \n\n :arg coords: a coordinate set or an object with :meth:`getCoords` method.\n Recommended: ``coords = parsePDB('pdbfile').select('protein and name CA')``.\n :type coords: :class:`~numpy.ndarray`.\n\n :arg ind1: first residue number.\n :type ind1: int \n \n :arg ind2: second residue number.\n :type ind2: int \n \"\"\"\n\n try:\n resnum_list = coords.getResnums()\n resnam_list = coords.getResnames()\n coords = (coords._getCoords() if hasattr(coords, '_getCoords') else\n coords.getCoords())\n except AttributeError:\n try:\n checkCoords(coords)\n except TypeError:\n raise TypeError('coords must be a Numpy array or an object '\n 'with `getCoords` method')\n \n if not isinstance(model, NMA):\n raise TypeError('model must be a NMA instance')\n elif not model.is3d():\n raise TypeError('model must be a 3-dimensional NMA instance')\n elif len(model) == 0:\n raise ValueError('model must have normal modes calculated')\n \n linalg = importLA()\n n_atoms = model.numAtoms()\n n_modes = model.numModes()\n LOGGER.timeit('_pairdef')\n\n r_ij = np.zeros((n_atoms,n_atoms,3))\n r_ij_norm = np.zeros((n_atoms,n_atoms,3))\n\n for i in range(n_atoms):\n for j in range(i+1,n_atoms):\n r_ij[i][j] = coords[j,:] - coords[i,:]\n r_ij[j][i] = r_ij[i][j]\n r_ij_norm[i][j] = r_ij[i][j]/linalg.norm(r_ij[i][j])\n r_ij_norm[j][i] = r_ij_norm[i][j]\n\n eigvecs = model.getEigvecs()\n eigvals = model.getEigvals()\n \n D_pair_k = []\n mode_nr = []\n ind1 = ind1 - resnum_list[0]\n ind2 = ind2 - resnum_list[0]\n\n for m in range(6,n_modes):\n U_ij_k = [(eigvecs[ind1*3][m] - eigvecs[ind2*3][m]), (eigvecs[ind1*3+1][m] \\\n - eigvecs[ind2*3+1][m]), (eigvecs[ind1*3+2][m] - eigvecs[ind2*3+2][m])] \n D_ij_k = abs(sqrt(kbt/eigvals[m])*(np.vdot(r_ij_norm[ind1][ind2], U_ij_k))) \n D_pair_k.append(D_ij_k)\n mode_nr.append(m)\n\n LOGGER.report('Deformation was calculated in %.2lfs.', label='_pairdef')\n \n return mode_nr, D_pair_k\n\ndef calcHinges(modes, atoms=None, flag=False):\n \"\"\"Returns the hinge sites identified using normal modes. \n\n :arg modes: normal modes of which will be used to identify hinge sites\n :type modes: :class:`.GNM` \n \n :arg atoms: an Atomic object on which to map hinges. The output will then be a selection. \n :type atoms: :class:`.Atomic`\n\n :arg flag: whether return flag or index array. Default is **False**\n :type flag: bool\n\n \"\"\"\n\n def identify(v):\n # obtain the signs of eigenvector\n s = np.sign(v)\n # obtain the relative magnitude of eigenvector\n mag = np.sign(np.diff(np.abs(v)))\n # obtain the cross-overs\n torf = np.diff(s)!=0\n torf = np.append(torf, [False], axis=0)\n # find which side is more close to zero\n for j, m in enumerate(mag):\n if torf[j] and m < 0:\n torf[j+1] = True\n torf[j] = False\n\n return torf\n\n if modes.is3d():\n raise ValueError('3D models are not supported.')\n\n # obtain the eigenvectors\n V = modes.getArray()\n if V.ndim == 1:\n hinges = identify(V)\n elif V.ndim == 2:\n _, n = V.shape\n hinges = []\n for i in range(n):\n v = V[:, i]\n torf = identify(v)\n hinges.append(torf)\n\n hinges = np.stack(hinges).T\n else:\n raise TypeError('wrong dimension of the array: %d'%V.ndim)\n \n if not flag:\n hinge_list = np.where(hinges)[0]\n if atoms is not None:\n if isinstance(atoms, Atomic):\n return atoms[hinge_list]\n else:\n raise TypeError('atoms should be an Atomic object')\n return sorted(set(hinge_list))\n return hinges\n\ndef calcHitTime(model, method='standard'):\n \"\"\"Returns the hit and commute times between pairs of nodes calculated \n based on a :class:`.NMA` object. \n\n .. [CB95] Chennubhotla C., Bahar I. Signal Propagation in Proteins and Relation\n to Equilibrium Fluctuations. *PLoS Comput Biol* **2007** 3(9).\n\n :arg model: model to be used to calculate hit times\n :type model: :class:`.NMA` \n\n :arg method: method to be used to calculate hit times. Available options are \n ``\"standard\"`` or ``\"kirchhoff\"``. Default is ``\"standard\"``\n :type method: str\n\n :returns: (:class:`~numpy.ndarray`, :class:`~numpy.ndarray`)\n \"\"\"\n\n try:\n K = model.getKirchhoff()\n except AttributeError:\n raise TypeError('model must be an NMA instance')\n\n if K is None:\n raise ValueError('model not built')\n \n method = method.lower()\n\n D = np.diag(K)\n A = np.diag(D) - K\n\n start = time.time()\n linalg = importLA()\n if method == 'standard':\n st = D / sum(D)\n\n P = np.dot(np.diag(D**(-1)), A)\n W = np.ones((len(st), 1)) * st.T\n Z = linalg.pinv(np.eye(P.shape[0], P.shape[1]) - P + W)\n\n H = np.ones((len(st), 1)) * np.diag(Z).T - Z\n H = H / W\n H = H.T\n\n elif method == 'kirchhoff':\n K_inv = linalg.pinv(K)\n sum_D = sum(D)\n\n T1 = (sum_D * np.ones((len(D),1)) * np.diag(K_inv)).T\n\n T2 = sum_D * K_inv\n T3_i = np.dot((np.ones((len(D),1)) * D), K_inv)\n\n H = T1 - T2 + T3_i - T3_i.T\n\n C = H + H.T\n\n LOGGER.debug('Hit and commute times are calculated in {0:.2f}s.'\n .format(time.time()-start)) \n return H, C\n\n\ndef calcAnisousFromModel(model, ):\n \"\"\"Returns a Nx6 matrix containing anisotropic B factors (ANISOU lines)\n from a covariance matrix calculated from **model**.\n\n :arg model: 3D model from which to calculate covariance matrix\n :type model: :class:`.ANM`, :class:`.PCA`\n\n .. ipython:: python\n\n from prody import *\n protein = parsePDB('1ejg')\n anm, calphas = calcANM(protein)\n adp_matrix = calcAnisousFromModel(anm)\"\"\"\n\n if not isinstance(model, (NMA, Mode)) or not model.is3d():\n raise TypeError('model must be of type ANM, PCA or Mode, not {0}'\n .format(type(model)))\n\n cov = calcCovariance(model)\n n_atoms = model.numAtoms()\n \n submatrices = [cov[i*3:(i+1)*3, i*3:(i+1)*3] for i in range(n_atoms)]\n\n anisou = np.zeros((n_atoms, 6))\n for index, submatrix in enumerate(submatrices):\n anisou[index, 0] = submatrix[0, 0]\n anisou[index, 1] = submatrix[1, 1]\n anisou[index, 2] = submatrix[2, 2]\n anisou[index, 3] = submatrix[0, 1]\n anisou[index, 4] = submatrix[0, 2]\n anisou[index, 5] = submatrix[1, 2]\n return anisou\n\n\ndef calcScipionScore(modes):\n \"\"\"Calculate the score from hybrid electron microscopy normal mode analysis (HEMNMA) \n [CS14]_ as implemented in the Scipion continuousflex plugin [MH20]_. This score \n prioritises modes as a function of mode number and collectivity order.\n\n .. [CS14] Sorzano COS, de la Rosa-Trevín JM, Tama F, Jonić S.\n Hybrid Electron Microscopy Normal Mode Analysis graphical interface and protocol.\n *J Struct Biol* **2014** 188:134-41.\n\n .. [MH20] Harastani M, Sorzano COS, Jonić S. \n Hybrid Electron Microscopy Normal Mode Analysis with Scipion.\n *Protein Sci* **2020** 29:223-236.\n\n :arg modes: mode(s) or vector(s)\n :type modes: :class:`.Mode`, :class:`.Vector`, :class:`.ModeSet`, :class:`.NMA`\n \"\"\"\n n_modes = modes.numModes()\n \n if n_modes > 1:\n collectivityList = list(calcCollectivity(modes))\n else:\n collectivityList = [calcCollectivity(modes)]\n\n idxSorted = [i[0] for i in sorted(enumerate(collectivityList),\n key=lambda x: x[1],\n reverse=True)]\n\n score = np.zeros(n_modes)\n modeNum = list(range(n_modes))\n\n for i in range(n_modes):\n score[idxSorted[i]] = idxSorted[i] + modeNum[i] + 2 \n\n score = score / (2.0 * n_modes) \n\n return score\n\ncalcHemnmaScore = calcScipionScore\n", "# -*- coding: utf-8 -*-\n\"\"\"This module defines input and output functions.\"\"\"\n\nfrom collections import OrderedDict\nimport datetime\n\nimport os\nfrom os.path import abspath, join, isfile, isdir, split, splitext\n\nimport numpy as np\n\nfrom prody import LOGGER, SETTINGS, PY3K\nfrom prody.atomic import Atomic, AtomSubset\nfrom prody.utilities import openFile, openSQLite, isExecutable, which, PLATFORM, addext, wrapModes\nfrom prody.proteins import parseSTAR, writeSTAR, alignChains, parsePDB\nfrom prody.ensemble import PDBEnsemble\n\nfrom .nma import NMA, MaskedNMA\nfrom .anm import ANM, ANMBase, MaskedANM\nfrom .analysis import calcCollectivity, calcScipionScore\nfrom .analysis import calcProjection\nfrom .analysis import calcCollectivity\nfrom .gnm import GNM, GNMBase, ZERO, MaskedGNM\nfrom .exanm import exANM, MaskedExANM\nfrom .rtb import RTB\nfrom .pca import PCA, EDA\nfrom .imanm import imANM\nfrom .exanm import exANM\nfrom .mode import Vector, Mode, VectorBase\nfrom .modeset import ModeSet\nfrom .editing import sliceModel, reduceModel, trimModel\nfrom .editing import sliceModelByMask, reduceModelByMask, trimModelByMask\n\n__all__ = ['parseArray', 'parseModes', 'parseSparseMatrix',\n 'parseGromacsModes', 'parseScipionModes',\n 'writeArray', 'writeModes', 'writeScipionModes',\n 'saveModel', 'loadModel', 'saveVector', 'loadVector',\n 'calcENM', 'realignModes']\n\n\ndef saveModel(nma, filename=None, matrices=False, **kwargs):\n \"\"\"Save *nma* model data as :file:`filename.nma.npz`. By default,\n eigenvalues, eigenvectors, variances, trace of covariance matrix,\n and name of the model will be saved. If *matrices* is **True**,\n covariance, Hessian or Kirchhoff matrices are saved too, whichever\n are available. If *filename* is **None**, name of the NMA instance\n will be used as the filename, after ``\" \"`` (white spaces) in the name\n are replaced with ``\"_\"`` (underscores). Extension may differ based\n on the type of the NMA model. For ANM models, it is :file:`.anm.npz`.\n Upon successful completion of saving, filename is returned. This\n function makes use of :func:`~numpy.savez` function.\"\"\"\n\n if not isinstance(nma, NMA):\n raise TypeError('invalid type for nma, {0}'.format(type(nma)))\n #if len(nma) == 0:\n # raise ValueError('nma instance does not contain data')\n\n add_attr = kwargs.pop('attr', [])\n\n dict_ = nma.__dict__\n attr_list = ['_title', '_trace', '_array', '_eigvals', '_vars', '_n_atoms',\n '_dof', '_n_modes']\n\n if add_attr:\n for attr in add_attr:\n if attr not in attr_list:\n attr_list.append(attr)\n if filename is None:\n filename = nma.getTitle().replace(' ', '_')\n if isinstance(nma, GNMBase):\n attr_list.append('_cutoff')\n attr_list.append('_gamma')\n if matrices:\n attr_list.append('_kirchhoff')\n if isinstance(nma, ANMBase):\n attr_list.append('_hessian')\n if isinstance(nma, ANMBase):\n type_ = 'ANM'\n else:\n type_ = 'GNM'\n elif isinstance(nma, EDA):\n type_ = 'EDA'\n elif isinstance(nma, PCA):\n type_ = 'PCA'\n else:\n type_ = 'NMA'\n\n if matrices:\n attr_list.append('_cov')\n attr_dict = {'type': type_}\n for attr in attr_list:\n value = dict_[attr]\n if value is not None:\n attr_dict[attr] = value\n\n if isinstance(nma, MaskedNMA):\n if isinstance(nma, MaskedGNM):\n attr_dict['type'] = 'mGNM'\n elif isinstance(nma, MaskedANM):\n attr_dict['type'] = 'mANM'\n else:\n raise TypeError('invalid MaskedNMA type: %s'%(str(type(nma))))\n\n attr_dict['mask'] = nma.mask\n attr_dict['masked'] = nma.masked\n \n if isinstance(nma, RTB):\n attr_dict['type'] = 'RTB'\n if matrices:\n attr_dict['_project'] = nma._project\n\n if isinstance(nma, imANM):\n attr_dict['type'] = 'imANM'\n\n if isinstance(nma, exANM):\n attr_dict['type'] = 'exANM'\n\n suffix = '.' + attr_dict['type'].lower()\n if not filename.lower().endswith('.npz'):\n if not filename.lower().endswith(suffix):\n filename += suffix + '.npz'\n else:\n filename += '.npz'\n ostream = openFile(filename, 'wb', **kwargs)\n np.savez(ostream, **attr_dict)\n ostream.close()\n return filename\n\n\ndef loadModel(filename, **kwargs):\n \"\"\"Returns NMA instance after loading it from file (*filename*).\n This function makes use of :func:`~numpy.load` function. See\n also :func:`saveModel`.\"\"\"\n\n if not 'encoding' in kwargs:\n kwargs['encoding'] = 'latin1'\n\n if not 'allow_pickle' in kwargs:\n kwargs['allow_pickle'] = True \n\n with np.load(filename, **kwargs) as attr_dict:\n try:\n type_ = attr_dict['type']\n except KeyError:\n raise IOError('{0} is not a valid NMA model file'.format(filename))\n\n if isinstance(type_, np.ndarray):\n type_ = np.asarray(type_, dtype=str)\n\n type_ = str(type_)\n\n try:\n title = attr_dict['_title']\n except KeyError:\n title = attr_dict['_name']\n\n if isinstance(title, np.ndarray):\n title = np.asarray(title, dtype=str)\n title = str(title)\n if type_ == 'ANM':\n nma = ANM(title)\n elif type_ == 'PCA':\n nma = PCA(title)\n elif type_ == 'EDA':\n nma = EDA(title)\n elif type_ == 'GNM':\n nma = GNM(title)\n elif type_ == 'mGNM':\n nma = MaskedGNM(title)\n elif type_ == 'mANM':\n nma = MaskedANM(title)\n elif type_ == 'exANM':\n nma = exANM(title)\n elif type_ == 'imANM':\n nma = imANM(title)\n elif type_ == 'NMA':\n nma = NMA(title)\n elif type_ == 'RTB':\n nma = RTB(title)\n else:\n raise IOError('NMA model type is not recognized: {0}'.format(type_))\n\n dict_ = nma.__dict__\n for attr in attr_dict.files:\n if attr in ('type', '_name', '_title'):\n continue\n elif attr in ('_trace', '_cutoff', '_gamma'):\n dict_[attr] = attr_dict[attr][()]\n elif attr in ('_dof', '_n_atoms', '_n_modes'):\n dict_[attr] = int(attr_dict[attr])\n elif attr in ('masked', ):\n dict_[attr] = bool(attr_dict[attr])\n elif attr in ('mask', ):\n if not attr_dict[attr].shape:\n dict_[attr] = bool(attr_dict[attr])\n else:\n dict_[attr] = attr_dict[attr]\n else:\n dict_[attr] = attr_dict[attr]\n\n return nma\n\n\ndef saveVector(vector, filename, **kwargs):\n \"\"\"Save *vector* data as :file:`filename.vec.npz`. Upon successful\n completion of saving, filename is returned. This function makes use\n of :func:`numpy.savez` function.\"\"\"\n\n if not isinstance(vector, Vector):\n raise TypeError('invalid type for vector, {0}'.format(type(vector)))\n attr_dict = {}\n attr_dict['title'] = vector.getTitle()\n attr_dict['array'] = vector._getArray()\n attr_dict['is3d'] = vector.is3d()\n\n if not filename.lower().endswith('.npz'):\n if not filename.lower().endswith('.vec'):\n filename += '.vec.npz'\n else:\n filename += '.npz'\n\n ostream = openFile(filename, 'wb', **kwargs)\n np.savez(ostream, **attr_dict)\n ostream.close()\n return filename\n\n\ndef loadVector(filename):\n \"\"\"Returns :class:`.Vector` instance after loading it from *filename* using\n :func:`numpy.load`. See also :func:`saveVector`.\"\"\"\n\n attr_dict = np.load(filename)\n try:\n title = str(attr_dict['title'])\n except KeyError:\n title = str(attr_dict['name'])\n return Vector(attr_dict['array'], title, bool(attr_dict['is3d']))\n\n\ndef writeModes(filename, modes, format='%.18e', delimiter=' '):\n \"\"\"Write *modes* (eigenvectors) into a plain text file with name\n *filename*. See also :func:`writeArray`.\"\"\"\n\n if not isinstance(modes, (NMA, ModeSet, Mode)):\n raise TypeError('modes must be NMA, ModeSet, or Mode, not {0}'\n .format(type(modes)))\n return writeArray(filename, modes._getArray(), format=format,\n delimiter=delimiter)\n\n\ndef parseModes(normalmodes, eigenvalues=None, nm_delimiter=None,\n nm_skiprows=0, nm_usecols=None, ev_delimiter=None,\n ev_skiprows=0, ev_usecols=None, ev_usevalues=None):\n \"\"\"Returns :class:`.NMA` instance with normal modes parsed from\n *normalmodes*.\n\n In normal mode file *normalmodes*, columns must correspond to modes\n (eigenvectors). Optionally, *eigenvalues* can be parsed from a separate\n file. If eigenvalues are not provided, they will all be set to 1.\n\n :arg normalmodes: File or filename that contains normal modes.\n If the filename extension is :file:`.gz` or :file:`.bz2`, the file is\n first decompressed.\n :type normalmodes: str or file\n\n :arg eigenvalues: Optional, file or filename that contains eigenvalues.\n If the filename extension is :file:`.gz` or :file:`.bz2`,\n the file is first decompressed.\n :type eigenvalues: str or file\n\n :arg nm_delimiter: The string used to separate values in *normalmodes*.\n By default, this is any whitespace.\n :type nm_delimiter: str\n\n :arg nm_skiprows: Skip the first *skiprows* lines in *normalmodes*.\n Default is ``0``.\n :type nm_skiprows: 0\n\n :arg nm_usecols: Which columns to read from *normalmodes*, with 0 being the\n first. For example, ``usecols = (1,4,5)`` will extract the 2nd, 5th and\n 6th columns. The default, **None**, results in all columns being read.\n :type nm_usecols: list\n\n :arg ev_delimiter: The string used to separate values in *eigenvalues*.\n By default, this is any whitespace.\n :type ev_delimiter: str\n\n :arg ev_skiprows: Skip the first *skiprows* lines in *eigenvalues*.\n Default is ``0``.\n :type ev_skiprows: 0\n\n :arg ev_usecols: Which columns to read from *eigenvalues*, with 0 being the\n first. For example, ``usecols = (1,4,5)`` will extract the 2nd, 5th and\n 6th columns. The default, **None**, results in all columns being read.\n :type ev_usecols: list\n\n :arg ev_usevalues: Which columns to use after the eigenvalue column is\n parsed from *eigenvalues*, with 0 being the first.\n This can be used if *eigenvalues* contains more values than the\n number of modes in *normalmodes*.\n :type ev_usevalues: list\n\n See :func:`parseArray` for details of parsing arrays from files.\"\"\"\n\n modes = parseArray(normalmodes, delimiter=nm_delimiter,\n skiprows=nm_skiprows, usecols=nm_usecols)\n if eigenvalues is not None:\n values = parseArray(eigenvalues, delimiter=ev_delimiter,\n skiprows=ev_skiprows, usecols=ev_usecols)\n values = values.flatten()\n if ev_usevalues is not None:\n values = values[ev_usevalues]\n nma = NMA(splitext(split(normalmodes)[1])[0])\n nma.setEigens(modes, values)\n return nma\n\n\ndef parseScipionModes(run_path, title=None, pdb=None):\n \"\"\"Returns :class:`.NMA` containing eigenvectors and eigenvalues \n parsed from a ContinuousFlex FlexProtNMA Run directory.\n\n :arg run_path: path to the Run directory\n :type run_path: str\n \n :arg title: title for :class:`.NMA` object\n :type title: str\n \"\"\"\n if run_path.endswith(\"/\"):\n run_path = run_path[:-1]\n run_name = os.path.split(run_path)[-1]\n top_dirs = os.path.split(run_path)[0][:-4] # exclude \"Runs\"\n\n star_data = parseSTAR(run_path + '/modes.xmd')\n star_loop = star_data[0][0]\n \n n_modes = star_loop.numRows()\n \n row1 = star_loop[0]\n mode1 = parseArray(top_dirs + row1['_nmaModefile']).reshape(-1)\n dof = mode1.shape[0]\n\n if pdb is not None:\n atoms = parsePDB(pdb)\n n_atoms = atoms.numAtoms()\n else:\n # assume standard NMA\n n_atoms = dof//3\n\n vectors = np.zeros((dof, n_modes))\n vectors[:, 0] = mode1\n\n eigvals = np.zeros(n_modes)\n\n try:\n eigvals[0] = float(row1['_nmaEigenval'])\n found_eigvals = True\n except:\n found_eigvals = False\n\n for i, row in enumerate(star_loop[1:]):\n vectors[:, i+1] = parseArray(top_dirs + row['_nmaModefile']).reshape(-1)\n if found_eigvals:\n eigvals[i+1] = float(row['_nmaEigenval'])\n \n if not found_eigvals:\n log_fname = run_path + '/logs/run.stdout'\n fi = open(log_fname, 'r')\n lines = fi.readlines()\n fi.close()\n\n for line in lines:\n if line.find('Eigenvector number') != -1:\n j = int(line.strip().split()[-1]) - 1\n if line.find('Corresponding eigenvalue') != -1:\n eigvals[j] = float(line.strip().split()[-1])\n if not found_eigvals:\n found_eigvals = True\n \n if title is None:\n title = run_name\n\n if not found_eigvals:\n LOGGER.warn('No eigenvalues found')\n eigvals=None\n\n if dof == n_atoms * 3:\n nma = NMA(title)\n else:\n nma = GNM(title)\n\n nma.setEigens(vectors, eigvals)\n return nma\n\n\ndef writeScipionModes(output_path, modes, write_star=False, scores=None,\n only_sqlite=False, collectivityThreshold=0.):\n \"\"\"Writes *modes* to a set of files that can be recognised by Scipion.\n A directory called **\"modes\"** will be created if it doesn't already exist. \n Filenames inside will start with **\"vec\"** and have the mode number as the extension.\n \n :arg output_path: path to the directory where the modes directory will be\n :type output_path: str\n\n :arg modes: modes to be written to files\n :type modes: :class:`.Mode`, :class:`.ModeSet`, :class:`.NMA`\n\n :arg write_star: whether to write modes.xmd STAR file.\n Default is **False** as qualifyModesStep writes it with scores.\n :type write_star: bool\n\n :arg scores: scores from qualifyModesStep for re-writing sqlite\n Default is **None** and then it uses :func:`.calcScipionScore`\n :type scores: list\n\n :arg only_sqlite: whether to write only the sqlite file instead of everything.\n Default is **False** but it can be useful to set it to **True** for updating the sqlite file.\n :type only_sqlite: bool \n\n :arg collectivityThreshold: collectivity threshold below which modes are not enabled\n Default is 0.\n :type collectivityThreshold: float\n \"\"\"\n if not isinstance(output_path, str):\n raise TypeError('output_path should be a string, not {0}'\n .format(type(output_path)))\n\n if not isdir(output_path):\n raise ValueError('output_path should be a working path')\n\n if not isinstance(modes, (NMA, ModeSet, VectorBase)):\n raise TypeError('rows must be NMA, ModeSet, or Mode, not {0}'\n .format(type(modes)))\n\n if not isinstance(write_star, bool):\n raise TypeError('write_star should be boolean, not {0}'\n .format(type(write_star)))\n\n if scores is not None:\n if not isinstance(scores, list):\n raise TypeError('scores should be a list or None, not {0}'\n .format(type(scores)))\n\n if not isinstance(only_sqlite, bool):\n raise TypeError('only_sqlite should be boolean, not {0}'\n .format(type(only_sqlite)))\n\n if not isinstance(collectivityThreshold, float):\n raise TypeError('collectivityThreshold should be float, not {0}'\n .format(type(collectivityThreshold)))\n\n if modes.numModes() == 1 and not isinstance(modes, NMA):\n old_modes = modes\n modes = NMA(old_modes)\n modes.setEigens(old_modes.getArray().reshape(-1, 1))\n\n modes_dir = output_path + '/modes/'\n if not isdir(modes_dir):\n os.mkdir(modes_dir)\n\n modefiles = []\n for mode in modes:\n mode_num = mode.getIndex() + 1\n if mode.is3d():\n modefiles.append(writeArray(modes_dir + 'vec.{0}'.format(mode_num),\n mode.getArrayNx3(), '%12.4e', ''))\n else:\n modefiles.append(writeArray(modes_dir + 'vec.{0}'.format(mode_num),\n mode.getArray(), '%12.4e', ''))\n\n if modes.numModes() > 1:\n order = modes.getIndices()\n collectivities = list(calcCollectivity(modes))\n eigvals = modes.getEigvals()\n enabled = [1 if eigval > ZERO and collectivities[i] > collectivityThreshold else -1\n for i, eigval in enumerate(eigvals)]\n if scores is None:\n scores = list(calcScipionScore(modes))\n else:\n mode = modes[0]\n eigvals = np.array([mode.getEigval()])\n collectivities = [calcCollectivity(mode)]\n order = [mode.getIndex()]\n enabled = [1 if mode.getEigval() > ZERO and collectivities[0] > collectivityThreshold else -1]\n if scores is None:\n scores = [calcScipionScore(mode)[0]]\n\n modes_sqlite_fn = output_path + '/modes.sqlite'\n sql_con = openSQLite(modes_sqlite_fn, 'n')\n cursor = sql_con.cursor()\n \n cursor.execute('''CREATE TABLE Properties(key,value)''')\n properties = [('self', 'SetOfNormalModes'),\n ('_size', str(modes.numModes())),\n ('_streamState', '2'),\n ('_mapperPath', '{0}, '.format(modes_sqlite_fn))]\n cursor.executemany('''INSERT INTO Properties VALUES(?,?);''', properties);\n \n cursor.execute('''CREATE TABLE Classes(id primary key, label_property, column_name, class_name)''')\n classes = [(1, 'self', 'c00', 'NormalMode'),\n (2, '_modeFile', 'c01', 'String'),\n (3, '_collectivity', 'c02', 'Float'),\n (4, '_score', 'c03', 'Float')]\n cursor.executemany('''INSERT INTO Classes VALUES(?,?,?,?);''', classes);\n \n cursor.execute('''CREATE TABLE Objects(id primary key, enabled, label, comment, creation, c01, c02, c03)''')\n \n star_dict = OrderedDict()\n\n star_dict['noname'] = OrderedDict() # Data Block with title noname\n loop_dict = star_dict['noname'][0] = OrderedDict() # Loop 0\n\n loop_dict['fields'] = OrderedDict()\n fields = ['_enabled', '_nmaCollectivity', '_nmaModefile', '_nmaScore',\n '_nmaEigenval', '_order_']\n for j, field in enumerate(fields):\n loop_dict['fields'][j] = field\n\n loop_dict['data'] = OrderedDict()\n\n now = datetime.datetime.now() \n creation = now.strftime(\"%Y-%m-%d %H:%M:%S\")\n for i, mode in enumerate(modes):\n loop_dict['data'][i] = OrderedDict()\n \n id = mode.getIndex() + 1\n loop_dict['data'][i]['_order_'] = str(id)\n \n enab = enabled[i]\n loop_dict['data'][i]['_enabled'] = '%2i' % enab\n if enab != 1:\n enab = 0\n \n label = ''\n comment = ''\n \n c01 = loop_dict['data'][i]['_nmaModefile'] = modefiles[i]\n \n collec = collectivities[i]\n loop_dict['data'][i]['_nmaCollectivity'] = '%8.6f' % collec\n c02 = float('%6.4f' % collec)\n \n c03 = scores[i]\n loop_dict['data'][i]['_nmaScore'] = '%8.6f' % c03\n\n eigval = eigvals[i]\n if float('%9.6f' % eigval) > 0:\n loop_dict['data'][i]['_nmaEigenval'] = '%9.6f' % eigval\n else:\n loop_dict['data'][i]['_nmaEigenval'] = '%9.6e' % eigval\n \n cursor.execute('''INSERT INTO Objects VALUES(?,?,?,?,?,?,?,?)''',\n (id, enab, label, comment, creation, c01, c02, c03))\n\n if write_star:\n writeSTAR(output_path + '/modes.xmd', star_dict)\n\n sql_con.commit()\n sql_con.close()\n\n return modes_dir\n\n\ndef writeArray(filename, array, format='%3.2f', delimiter=' '):\n \"\"\"Write 1-d or 2-d array data into a delimited text file.\n\n This function is using :func:`numpy.savetxt` to write the file, after\n making some type and value checks. Default *format* argument is ``\"%d\"``.\n Default *delimiter* argument is white space, ``\" \"``.\n\n *filename* will be returned upon successful writing.\"\"\"\n\n if not isinstance(array, np.ndarray):\n raise TypeError('array must be a Numpy ndarray, not {0}'\n .format(type(array)))\n elif not array.ndim in (1, 2):\n raise ValueError('array must be a 1 or 2-dimensional Numpy ndarray, '\n 'not {0}-d'.format(type(array.ndim)))\n np.savetxt(filename, array, format, delimiter)\n return filename\n\ndef parseArray(filename, delimiter=None, skiprows=0, usecols=None,\n dtype=float):\n \"\"\"Parse array data from a file.\n\n This function is using :func:`numpy.loadtxt` to parse the file. Each row\n in the text file must have the same number of values.\n\n :arg filename: File or filename to read. If the filename extension is\n :file:`.gz` or :file:`.bz2`, the file is first decompressed.\n :type filename: str or file\n\n :arg delimiter: The string used to separate values. By default,\n this is any whitespace.\n :type delimiter: str\n\n :arg skiprows: Skip the first *skiprows* lines, default is ``0``.\n :type skiprows: int\n\n :arg usecols: Which columns to read, with 0 being the first. For example,\n ``usecols = (1,4,5)`` will extract the 2nd, 5th and 6th columns.\n The default, **None**, results in all columns being read.\n :type usecols: list\n\n :arg dtype: Data-type of the resulting array, default is :func:`float`.\n :type dtype: :class:`numpy.dtype`.\"\"\"\n\n array = np.loadtxt(filename, dtype=dtype, delimiter=delimiter,\n skiprows=skiprows, usecols=usecols)\n return array\n\ndef parseSparseMatrix(filename, symmetric=False, delimiter=None, skiprows=0,\n irow=0, icol=1, first=1):\n \"\"\"Parse sparse matrix data from a file.\n\n This function is using :func:`parseArray` to parse the file.\n Input must have the following format::\n\n 1 1 9.958948135375977e+00\n 1 2 -3.788214445114136e+00\n 1 3 6.236155629158020e-01\n 1 4 -7.820609807968140e-01\n\n Each row in the text file must have the same number of values.\n\n :arg filename: File or filename to read. If the filename extension is\n :file:`.gz` or :file:`.bz2`, the file is first decompressed.\n :type filename: str or file\n\n :arg symmetric: Set **True** if the file contains triangular part of a\n symmetric matrix, default is **True**.\n :type symmetric: bool\n\n :arg delimiter: The string used to separate values. By default,\n this is any whitespace.\n :type delimiter: str\n\n :arg skiprows: Skip the first *skiprows* lines, default is ``0``.\n :type skiprows: int\n\n :arg irow: Index of the column in data file corresponding to row indices,\n default is ``0``.\n :type irow: int\n\n :arg icol: Index of the column in data file corresponding to column indices,\n default is ``1``.\n :type icol: int\n\n :arg first: First index in the data file (0 or 1), default is ``1``.\n :type first: int\n\n Data-type of the resulting array, default is :func:`float`.\"\"\"\n\n irow = int(irow)\n icol = int(icol)\n first = int(first)\n assert 0 <= irow <= 2 and 0 <= icol <= 2, 'irow/icol may be 0, 1, or 2'\n assert icol != irow, 'irow and icol must not be equal'\n idata = [0, 1, 2]\n idata.pop(idata.index(irow))\n idata.pop(idata.index(icol))\n idata = idata[0]\n sparse = parseArray(filename, delimiter, skiprows)\n if symmetric:\n dim1 = dim2 = int(sparse[:, [irow, icol]].max())\n else:\n dim1, dim2 = sparse[:, [irow, icol]].max(0).astype(int)\n matrix = np.zeros((dim1, dim2))\n irow = (sparse[:, irow] - first).astype(int)\n icol = (sparse[:, icol] - first).astype(int)\n matrix[irow, icol] = sparse[:, idata]\n if symmetric:\n matrix[icol, irow] = sparse[:, idata]\n return matrix\n\ndef calcENM(atoms, select=None, model='anm', trim='trim', gamma=1.0, \n title=None, n_modes=None, **kwargs):\n \"\"\"Returns an :class:`.ANM` or :class:`.GNM` instance and *atoms* used for the \n calculations. The model can be trimmed, sliced, or reduced based on \n the selection.\n\n :arg atoms: atoms on which the ENM is performed. It can be any :class:`Atomic` \n class that supports selection or a :class:`~numpy.ndarray`.\n :type atoms: :class:`.Atomic`, :class:`.AtomGroup`, :class:`.Selection`, :class:`~numpy.ndarray`\n\n :arg select: part of the atoms that is considered as the system. \n If set to **None**, then all atoms will be considered as the system\n :type select: str, :class:`.Selection`, :class:`~numpy.ndarray`\n\n :arg model: type of ENM that will be performed. It can be either ``\"anm\"`` \n or ``\"gnm\"`` or ``\"exanm\"``\n :type model: str\n\n :arg trim: type of method that will be used to trim the model. It can \n be either ``\"trim\"`` , ``\"slice\"``, or ``\"reduce\"``. If set to ``\"trim\"``, \n the parts that is not in the selection will simply be removed\n :type trim: str\n \"\"\"\n \n if isinstance(select, (str, AtomSubset)):\n if not isinstance(atoms, Atomic):\n raise TypeError('atoms must be a Atomic instance in order to be selected')\n try:\n if title is None:\n title = atoms.getTitle()\n except AttributeError:\n title = 'Unknown'\n\n mask = kwargs.pop('mask', None)\n zeros = kwargs.pop('zeros', False)\n turbo = kwargs.pop('turbo', True)\n\n if model is GNM:\n model = 'gnm'\n elif model is ANM:\n model = 'anm'\n else:\n model = str(model).lower().strip() \n\n if trim is reduceModel:\n trim = 'reduce'\n elif trim is sliceModel:\n trim = 'slice'\n elif trim is None:\n trim = 'trim'\n else:\n trim = str(trim).lower().strip()\n \n enm = None\n MaskedModel = None\n if model == 'anm':\n anm = ANM(title)\n anm.buildHessian(atoms, gamma=gamma, **kwargs)\n enm = anm\n MaskedModel = MaskedANM\n elif model == 'gnm':\n gnm = GNM(title)\n gnm.buildKirchhoff(atoms, gamma=gamma, **kwargs)\n enm = gnm\n MaskedModel = MaskedGNM\n elif model == 'exanm':\n exanm = exANM(title)\n exanm.buildHessian(atoms, gamma=gamma, **kwargs)\n enm = exanm\n MaskedModel = MaskedExANM\n else:\n raise TypeError('model should be either ANM or GNM instead of {0}'.format(model))\n \n if select is None:\n enm.calcModes(n_modes=n_modes, zeros=zeros, turbo=turbo)\n else:\n if trim == 'slice':\n enm.calcModes(n_modes=n_modes, zeros=zeros, turbo=turbo)\n if isinstance(select, np.ndarray):\n enm = sliceModelByMask(enm, select)\n atoms = select\n else:\n enm, atoms = sliceModel(enm, atoms, select)\n elif trim == 'reduce':\n if isinstance(select, np.ndarray):\n enm = reduceModelByMask(enm, select)\n atoms = select\n else:\n enm, atoms = reduceModel(enm, atoms, select)\n enm.calcModes(n_modes=n_modes, zeros=zeros, turbo=turbo)\n elif trim == 'trim':\n if isinstance(select, np.ndarray):\n enm = trimModelByMask(enm, select)\n atoms = select\n else:\n enm, atoms = trimModel(enm, atoms, select)\n enm.calcModes(n_modes=n_modes, zeros=zeros, turbo=turbo)\n else:\n raise ValueError('trim can only be \"trim\", \"reduce\", or \"slice\"')\n \n if mask is not None:\n enm = MaskedModel(enm, mask)\n return enm, atoms\n\n\ndef parseGromacsModes(run_path, title=\"\", model='nma', **kwargs):\n \"\"\"Returns :class:`.NMA` containing eigenvectors and eigenvalues parsed from a run directory \n containing results from gmx covar or gmx nmeig followed by gmx anaeig \n including eigenvalues in an xvg file and eigenvectors in pdb files\n (see http://www.strodel.info/index_files/lecture/html/analysis-9.html).\n\n :arg run_path: path to the run directory\n :type run_path: str\n \n :arg title: title for resulting object\n Default is ``\"\"``\n :type title: str\n\n :arg model: type of calculated that was performed. It can be either ``\"nma\"`` \n or ``\"pca\"``. If it is not changed to ``\"pca\"`` then ``\"nma\"`` will be assumed.\n :type model: str\n\n :arg eigval_fname: filename or path for xvg file containing eigenvalues\n Default is ``\"eigenval.xvg\"`` as this is the default from Gromacs\n :type eigval_fname: str\n\n :arg eigvec_fname: filename or path for trr file containing eigenvectors\n Default is ``\"eigenvec.trr\"`` as this is the default from Gromacs\n :type eigvec_fname: str\n\n :arg pdb_fname: filename or path for pdb file containing the reference structure\n Default is ``\"average.pdb\"`` although this is probably suboptimal\n :type pdb_fname: str\n \"\"\" \n try:\n from mdtraj import load_trr\n except ImportError:\n raise ImportError('Please install mdtraj in order to use parseGromacsModes.')\n\n if not isinstance(run_path, str):\n raise TypeError('run_path should be a string')\n\n if not run_path.endswith('/'):\n run_path += '/'\n\n if not isinstance(title, str):\n raise TypeError('title should be a string')\n\n if model == 'pca':\n result = PCA(title)\n else:\n if model != 'nma':\n LOGGER.warn('model not recognised so using NMA')\n result = NMA(title)\n\n\n eigval_fname = kwargs.get('eigval_fname', 'eigenval.xvg')\n if not isinstance(eigval_fname, str):\n raise TypeError('eigval_fname should be a string')\n\n if isfile(eigval_fname):\n vals_fname = eigval_fname\n elif isfile(run_path + eigval_fname):\n vals_fname = run_path + eigval_fname\n else:\n raise ValueError('eigval_fname should point be a path to a file '\n 'either relative to run_path or an absolute one')\n\n\n eigvec_fname = kwargs.get('eigvec_fname', 'eigenvec.trr')\n if not isinstance(eigvec_fname, str):\n raise TypeError('eigvec_fname should be a string')\n\n if isfile(eigvec_fname):\n vecs_fname = eigval_fname\n elif isfile(run_path + eigvec_fname):\n vecs_fname = run_path + eigvec_fname\n else:\n raise ValueError('eigvec_fname should point be a path to a file '\n 'either relative to run_path or an absolute one')\n\n\n pdb_fname = kwargs.get('pdb_fname', 'average.pdb')\n if not isinstance(pdb_fname, str):\n raise TypeError('pdb_fname should be a string')\n\n if isfile(pdb_fname):\n pdb = eigval_fname\n elif isfile(run_path + pdb_fname):\n pdb = run_path + pdb_fname\n else:\n raise ValueError('pdb_fname should point be a path to a file '\n 'either relative to run_path or an absolute one')\n \n \n fi = open(vals_fname, 'r')\n lines = fi.readlines()\n fi.close()\n \n eigvals = []\n for line in lines:\n if not (line.startswith('@') or line.startswith('#')):\n eigvals.append(float(line.strip().split()[-1])*100) # convert to A**2 from nm**2\n\n eigvals = np.array(eigvals)\n\n # Parse eigenvectors trr with mdtraj, which uses nm so doesn't rescale\n vecs_traj = load_trr(vecs_fname, top=pdb)\n\n # format vectors appropriately, skipping initial and average structures\n vectors = np.array([frame.xyz.flatten() for frame in vecs_traj[2:]]).T\n\n result.setEigens(vectors, eigvals)\n return result\n\ndef realignModes(modes, atoms, ref):\n \"\"\"Align *modes* in the original frame based on *atoms*\n onto another frame based on *ref* using the transformation \n from alignment of *atoms* to *ref*\n \n :arg modes: multiple 3D modes\n :type modes: :class:`.ModeSet`, :class:`.ANM`, :class:`.PCA`\n\n :arg atoms: central structure related to *modes* to map onto *ref*\n Inserting *atoms* into an ensemble and projecting onto *modes*\n should give all zeros\n :type atoms: :class:`.Atomic`\n \n :arg ref: reference structure for mapping\n :type ref: :class:`.Atomic`\n \"\"\"\n if not isinstance(modes, (ModeSet, NMA)):\n raise TypeError('modes should be a ModeSet of NMA instance')\n\n if not modes.is3d():\n raise ValueError('modes should be 3D for this function to work')\n\n if not isinstance(atoms, Atomic):\n raise TypeError('atoms should be an Atomic instance')\n\n if not isinstance(ref, Atomic):\n raise TypeError('ref should be an Atomic instance')\n\n n_atoms = modes.numAtoms()\n\n if atoms.numAtoms() != n_atoms:\n raise ValueError('atoms and modes should have the same number of atoms')\n\n def_coords = np.array([atoms.getCoords() + mode.getArrayNx3()\n for mode in modes])\n\n def_ens = PDBEnsemble('applied eigvecs')\n def_ens.setCoords(atoms)\n def_ens.setAtoms(atoms)\n def_ens.addCoordset(atoms)\n def_ens.addCoordset(def_coords)\n\n if not np.allclose(calcProjection(def_ens[0], modes),\n np.zeros(modes.numModes())):\n raise ValueError('projection of atoms onto modes (via an ensemble) '\n 'is not all zeros so atoms is not appropriate')\n\n if ref.numAtoms() != n_atoms:\n ref = alignChains(ref, atoms)[0]\n \n def_ens.setCoords(ref)\n def_ens.superpose()\n\n new_vectors = np.array([np.array(coords - def_ens.getCoordsets()[0]).flatten()\n for coords in def_ens.getCoordsets()[1:]]).T\n\n # initialise a new modes object with the same type\n result = type(modes)()\n\n result.setEigens(new_vectors, modes.getEigvals())\n return result\n", "# -*- coding: utf-8 -*-\n\"\"\"This module defines functions for parsing header data from PDB files.\"\"\"\n\nfrom collections import defaultdict\nimport os.path\n\nimport numpy as np\n\nfrom prody import LOGGER\nfrom prody.atomic import ATOMIC_FIELDS\nfrom prody.atomic import Atomic, AtomGroup\nfrom prody.atomic import getSequence\nfrom prody.measure import Transformation\nfrom prody.utilities import openFile\n\nfrom .localpdb import fetchPDB\n\n__all__ = ['Chemical', 'Polymer', 'DBRef', 'parsePDBHeader',\n 'assignSecstr', 'buildBiomolecules']\n\n\nclass Chemical(object):\n\n \"\"\"A data structure for storing information on chemical components\n (or heterogens) in PDB structures.\n\n A :class:`Chemical` instance has the following attributes:\n\n =========== ===== =======================================================\n Attribute Type Description (RECORD TYPE)\n =========== ===== =======================================================\n resname str residue name (or chemical component identifier) (HET)\n name str chemical name (HETNAM)\n chain str chain identifier (HET)\n resnum int residue (or sequence) number (HET)\n icode str insertion code (HET)\n natoms int number of atoms present in the structure (HET)\n description str description of the chemical component (HET)\n synonyms list synonyms (HETSYN)\n formula str chemical formula (FORMUL)\n pdbentry str PDB entry that chemical data is extracted from\n =========== ===== =======================================================\n\n Chemical class instances can be obtained as follows:\n\n .. ipython:: python\n\n from prody import *\n chemical = parsePDBHeader('1zz2', 'chemicals')[0]\n chemical\n chemical.name\n chemical.natoms\n len(chemical)\"\"\"\n\n __slots__ = ['resname', 'name', 'chain', 'resnum', 'icode',\n 'natoms', 'description', 'synonyms', 'formula', 'pdbentry']\n\n def __init__(self, resname):\n\n #: residue name (or chemical component identifier)\n self.resname = resname\n #: chemical name\n self.name = None\n #: chain identifier\n self.chain = None\n #: residue (or sequence) number\n self.resnum = None\n #: insertion code\n self.icode = None\n #: number of atoms present in the structure\n self.natoms = None\n #: description of the chemical component\n self.description = None\n #: list of synonyms\n self.synonyms = None\n #: chemical formula\n self.formula = None\n #: PDB entry that chemical data is extracted from\n self.pdbentry = None\n\n def __str__(self):\n return self.resname\n\n def __repr__(self):\n return '<Chemical: {0} ({1}_{2}_{3})>'.format(self.resname,\n self.pdbentry,\n self.chain, self.resnum)\n\n def __len__(self):\n return self.natoms\n\n\nclass Polymer(object):\n\n \"\"\"A data structure for storing information on polymer components\n (protein or nucleic) of PDB structures.\n\n A :class:`Polymer` instance has the following attributes:\n\n ========== ====== ======================================================\n Attribute Type Description (RECORD TYPE)\n ========== ====== ======================================================\n chid str chain identifier\n name str name of the polymer (macromolecule) (COMPND)\n fragment str specifies a domain or region of the molecule (COMPND)\n synonyms list synonyms for the polymer (COMPND)\n ec list associated Enzyme Commission numbers (COMPND)\n engineered bool indicates that the polymer was produced using\n recombinant technology or by purely chemical synthesis\n (COMPND)\n mutation bool indicates presence of a mutation (COMPND)\n comments str additional comments\n sequence str polymer chain sequence (SEQRES)\n dbrefs list sequence database records (DBREF[1|2] and SEQADV),\n see :class:`DBRef`\n modified list | modified residues (MODRES)\n | when modified residues are present, each will be\n represented as: ``(resname, chid, resnum, icode, stdname,\n comment)``\n pdbentry str PDB entry that polymer data is extracted from\n ========== ====== ======================================================\n\n Polymer class instances can be obtained as follows:\n\n .. ipython:: python\n\n polymer = parsePDBHeader('2k39', 'polymers')[0]\n polymer\n polymer.pdbentry\n polymer.chid\n polymer.name\n polymer.sequence\n len(polymer.sequence)\n len(polymer)\n dbref = polymer.dbrefs[0]\n dbref.database\n dbref.accession\n dbref.idcode\"\"\"\n\n __slots__ = ['chid', 'name', 'fragment', 'synonyms', 'ec',\n 'engineered', 'mutation', 'comments', 'sequence', 'pdbentry',\n 'dbrefs', 'modified']\n\n def __init__(self, chid):\n\n #: chain identifier\n self.chid = chid\n #: name of the polymer (macromolecule)\n self.name = ''\n #: specifies a domain or region of the molecule\n self.fragment = None\n #: list of synonyms for the molecule\n self.synonyms = None\n #: list of associated Enzyme Commission numbers\n self.ec = None\n self.engineered = None\n \"\"\"indicates that the molecule was produced using recombinant\n technology or by purely chemical synthesis\"\"\"\n #: sequence database reference records\n self.dbrefs = []\n #: indicates presence of a mutation\n self.mutation = None\n #: additional comments\n self.comments = None\n #: polymer chain sequence\n self.sequence = ''\n #: modified residues\n self.modified = None\n #: PDB entry that polymer data is extracted from\n self.pdbentry = None\n\n def __str__(self):\n return self.name\n\n def __repr__(self):\n return '<Polymer: {0} ({1}_{2})>'.format(self.name,\n self.pdbentry, self.chid)\n\n def __len__(self):\n return len(self.sequence)\n\n_PDB_DBREF = {\n 'GB': 'GenBank',\n 'PDB': 'PDB',\n 'UNP': 'UniProt',\n 'NORINE': 'Norine',\n 'UNIMES': 'UNIMES',\n 'EMDB': 'EMDB',\n 'BMRB': 'BMRB'\n}\n\n\nclass DBRef(object):\n\n \"\"\"A data structure for storing reference to sequence databases for polymer\n components in PDB structures. Information if parsed from **DBREF[1|2]**\n and **SEQADV** records in PDB header.\"\"\"\n\n __slots__ = ['database', 'dbabbr', 'idcode', 'accession',\n 'first', 'last', 'diff']\n\n def __init__(self):\n\n #: sequence database, one of UniProt, GenBank, Norine, UNIMES, or PDB\n self.database = None\n #: database abbreviation, one of UNP, GB, NORINE, UNIMES, or PDB\n self.dbabbr = None\n #: database identification code, i.e. entry name in UniProt\n self.idcode = None\n #: database accession code\n self.accession = None\n #: initial residue numbers, ``(resnum, icode, dbnum)``\n self.first = None\n #: ending residue numbers, ``(resnum, icode, dbnum)``\n self.last = None\n self.diff = []\n \"\"\"list of differences between PDB and database sequences,\n ``(resname, resnum, icode, dbResname, dbResnum, comment)``\"\"\"\n\n def __str__(self):\n return self.accession\n\n def __repr__(self):\n return '<DBRef: {0} ({1})>'.format(self.accession, self.database)\n\n_START_COORDINATE_SECTION = set(['ATOM ', 'MODEL ', 'HETATM'])\n\n\ndef cleanString(string, nows=False):\n \"\"\"*nows* is no white space.\"\"\"\n\n if nows:\n return ''.join(string.strip().split())\n else:\n return ' '.join(string.strip().split())\n\n\ndef parsePDBHeader(pdb, *keys):\n \"\"\"Returns header data dictionary for *pdb*. This function is equivalent to\n ``parsePDB(pdb, header=True, model=0, meta=False)``, likewise *pdb* may be\n an identifier or a filename.\n\n List of header records that are parsed.\n\n ============ ================= ============================================\n Record type Dictionary key(s) Description\n ============ ================= ============================================\n HEADER | classification | molecule classification\n | deposition_date | deposition date\n | identifier | PDB identifier\n TITLE title title for the experiment or analysis\n SPLIT split list of PDB entries that make up the whole\n structure when combined with this one\n COMPND polymers see :class:`Polymer`\n EXPDTA experiment information about the experiment\n NUMMDL n_models number of models\n MDLTYP model_type additional structural annotation\n AUTHOR authors list of contributors\n JRNL reference reference information dictionary:\n * *authors*: list of authors\n * *title*: title of the article\n * *editors*: list of editors\n * *issn*:\n * *reference*: journal, vol, issue, etc.\n * *publisher*: publisher information\n * *pmid*: pubmed identifier\n * *doi*: digital object identifier\n DBREF[1|2] polymers see :class:`Polymer` and :class:`DBRef`\n SEQADV polymers see :class:`Polymer`\n SEQRES polymers see :class:`Polymer`\n MODRES polymers see :class:`Polymer`\n HELIX polymers see :class:`Polymer`\n SHEET polymers see :class:`Polymer`\n HET chemicals see :class:`Chemical`\n HETNAM chemicals see :class:`Chemical`\n HETSYN chemicals see :class:`Chemical`\n FORMUL chemicals see :class:`Chemical`\n REMARK 2 resolution resolution of structures, when applicable\n REMARK 4 version PDB file version\n REMARK 350 biomoltrans biomolecular transformation lines\n (unprocessed)\n REMARK 900\t related_entries related entries in the PDB or EMDB\n ============ ================= ============================================\n\n Header records that are not parsed are: OBSLTE, CAVEAT, SOURCE, KEYWDS,\n REVDAT, SPRSDE, SSBOND, LINK, CISPEP, CRYST1, ORIGX1, ORIGX2, ORIGX3,\n MTRIX1, MTRIX2, MTRIX3, and REMARK X not mentioned above.\"\"\"\n\n if not os.path.isfile(pdb):\n if len(pdb) == 4 and pdb.isalnum():\n filename = fetchPDB(pdb)\n if filename is None:\n raise IOError('PDB file for {0} could not be downloaded.'\n .format(pdb))\n pdb = filename\n else:\n raise IOError('{0} is not a valid filename or a valid PDB '\n 'identifier.'.format(pdb))\n pdb = openFile(pdb, 'rt')\n header, _ = getHeaderDict(pdb, *keys)\n pdb.close()\n return header\n\n\ndef getHeaderDict(stream, *keys):\n \"\"\"Returns header data in a dictionary. *stream* may be a list of PDB lines\n or a stream.\"\"\"\n\n lines = defaultdict(list)\n loc = 0\n for loc, line in enumerate(stream):\n startswith = line[0:6]\n if startswith in _START_COORDINATE_SECTION:\n break\n lines[startswith].append((loc, line))\n if not loc:\n #raise ValueError('empty PDB file or stream')\n return None, loc\n for i, line in lines['REMARK']:\n lines[line[:10]].append((i, line))\n\n pdbid = _PDB_HEADER_MAP['identifier'](lines)\n lines['pdbid'] = pdbid\n if keys:\n keys = list(keys)\n for k, key in enumerate(keys):\n if key in _PDB_HEADER_MAP:\n value = _PDB_HEADER_MAP[key](lines)\n keys[k] = value\n else:\n raise KeyError('{0} is not a valid header data identifier'\n .format(repr(key)))\n if key in ('chemicals', 'polymers'):\n for component in value:\n component.pdbentry = pdbid\n if len(keys) == 1:\n return keys[0], loc\n else:\n return tuple(keys), loc\n else:\n header = {}\n for key, func in _PDB_HEADER_MAP.items(): # PY3K: OK\n value = func(lines)\n if value is not None:\n header[key] = value\n for chem in header.get('chemicals', []):\n chem.pdbentry = pdbid\n header[chem.resname] = chem\n for poly in header.get('polymers', []):\n poly.pdbentry = pdbid\n header[poly.chid] = poly\n return header, loc\n\n\ndef _getBiomoltrans(lines):\n\n \n biomolecule = defaultdict(list)\n for i, line in lines['REMARK 350']:\n if line[11:23] == 'BIOMOLECULE:':\n currentBiomolecule = line.split()[-1]\n applyToChains = []\n elif line[11:41] == 'APPLY THE FOLLOWING TO CHAINS:' \\\n or line[30:41] == 'AND CHAINS:':\n applyToChains.extend(line[41:].replace(' ', '')\n .strip().strip(',').split(','))\n elif line[13:18] == 'BIOMT':\n biomt = biomolecule[currentBiomolecule]\n if line[13:19] == 'BIOMT1':\n if applyToChains == []:\n applyToChains = biomt[0]\n biomt.append(applyToChains)\n elif line[13:19]:\n applyToChains = []\n biomt.append(line[23:])\n return dict(biomolecule)\n\n\ndef _getResolution(lines):\n\n for i, line in lines['REMARK 2']:\n if 'RESOLUTION' in line:\n try:\n return float(line[23:30])\n except:\n return None\n\n\ndef _getRelatedEntries(lines):\n\n dbrefs = []\n for i, line in lines['REMARK 900']:\n if 'RELATED ID' in line:\n dbref = DBRef()\n end_of_id = line.find('RELATED DB')\n dbref.accession = line[23:end_of_id].strip()\n dbref.dbabbr = line[end_of_id+12:end_of_id+16].strip()\n dbref.database = _PDB_DBREF.get(dbref.dbabbr, 'Unknown')\n \n dbrefs.append(dbref)\n\n return dbrefs\n\n\ndef _getSpaceGroup(lines):\n\n for i, line in lines['REMARK 290']:\n if 'SYMMETRY OPERATORS FOR SPACE GROUP:' in line:\n try:\n return line.split('GROUP:')[1].strip()\n except:\n return None\n\n\ndef _getHelix(lines):\n\n alphas = 'ABCDEFGHIJKLMNOPQRSTUVWXYZ'\n helix = {}\n for i, line in lines['HELIX ']:\n try:\n chid = line[19]\n # helix class, serial number, identifier\n value = (int(line[38:40]), int(line[7:10]), line[11:14].strip())\n except:\n continue\n\n initICode = line[25]\n initResnum = int(line[21:25])\n if initICode != ' ':\n for icode in alphas[alphas.index(initICode):]:\n helix[(chid, initResnum, icode)] = value\n initResnum += 1\n endICode = line[37]\n endResnum = int(line[33:37])\n if endICode != ' ':\n for icode in alphas[:alphas.index(endICode)+1]:\n helix[(chid, endResnum, icode)] = value\n endResnum -= 1\n for resnum in range(initResnum, endResnum+1):\n helix[(chid, resnum, '')] = value\n return helix\n\n\ndef _getHelixRange(lines):\n\n alphas = 'ABCDEFGHIJKLMNOPQRSTUVWXYZ'\n helix = []\n for i, line in lines['HELIX ']:\n try:\n chid = line[19]\n Hclass=int(line[38:40])\n Hnr=int(line[7:10])\n except:\n continue\n\n initResnum = int(line[21:25])\n endICode = line[37]\n endResnum = int(line[33:37])\n if endICode != ' ':\n endResnum -= 1\n helix.append(['H', chid, Hclass, Hnr, initResnum, endResnum]) \n \n return helix\n\n\ndef _getSheet(lines):\n\n alphas = 'ABCDEFGHIJKLMNOPQRSTUVWXYZ'\n sheet = {}\n for i, line in lines['SHEET ']:\n try:\n chid = line[21]\n # sense # strand num # sheet id\n value = (int(line[38:40]), int(line[7:10]), line[11:14].strip())\n except:\n continue\n\n initICode = line[26]\n initResnum = int(line[22:26])\n if initICode != ' ':\n for icode in alphas[alphas.index(initICode):]:\n sheet[(chid, initResnum, icode)] = value\n initResnum += 1\n endICode = line[37]\n endResnum = int(line[33:37])\n if endICode != ' ':\n for icode in alphas[:alphas.index(endICode)+1]:\n sheet[(chid, endResnum, icode)] = value\n endResnum -= 1\n for resnum in range(initResnum, endResnum+1):\n sheet[(chid, resnum, '')] = value\n return sheet\n\n\ndef _getSheetRange(lines):\n\n alphas = 'ABCDEFGHIJKLMNOPQRSTUVWXYZ'\n sheet = []\n for i, line in lines['SHEET ']:\n try:\n chid = line[21]\n dir = int(line[38:40])\n Snr = int(line[7:10])\n except:\n continue\n\n initICode = line[26]\n initResnum = int(line[22:26])\n if initICode != ' ':\n initResnum += 1\n endICode = line[37]\n endResnum = int(line[33:37])\n if endICode != ' ':\n endResnum -= 1\n sheet.append(['E', chid, dir, Snr, initResnum, endResnum])\n return sheet\n\n\ndef _getReference(lines):\n \"\"\"Returns a reference of the PDB entry.\"\"\"\n\n ref = {}\n title = ''\n authors = []\n editors = []\n reference = ''\n publisher = ''\n for i, line in lines['JRNL ']:\n try:\n what = line.split(None, 2)[1]\n except:\n continue\n if what == 'AUTH':\n authors.extend(line[19:].strip().split(','))\n elif what == 'TITL':\n title += line[19:]\n elif what == 'EDIT':\n editors.extend(line[19:].strip().split(','))\n elif what == 'REF':\n reference += line[19:]\n elif what == 'PUBL':\n publisher += line[19:]\n elif what == 'REFN':\n ref['issn'] = line[19:].strip()\n elif what == 'PMID':\n ref['pmid'] = line[19:].strip()\n elif what == 'DOI':\n ref['doi'] = line[19:].strip()\n ref['authors'] = authors\n ref['title'] = cleanString(title)\n ref['editors'] = editors\n ref['reference'] = cleanString(reference)\n ref['publisher'] = cleanString(publisher)\n\n return ref\n\n\ndef _getPolymers(lines):\n \"\"\"Returns list of polymers (macromolecules).\"\"\"\n\n pdbid = lines['pdbid']\n polymers = dict()\n for i, line in lines['SEQRES']:\n ch = line[11]\n poly = polymers.get(ch, Polymer(ch))\n polymers[ch] = poly\n poly.sequence += ''.join(getSequence(line[19:].split()))\n\n for i, line in lines['DBREF ']:\n i += 1\n\n ch = line[12]\n if ch == ' ':\n if not len(polymers) == 1:\n LOGGER.warn('DBREF chain identifier is not specified '\n '({0}:{1})'.format(pdbid, i))\n continue\n else:\n ch = list(polymers)[0]\n dbabbr = line[26:32].strip()\n dbref = DBRef()\n dbref.dbabbr = dbabbr\n dbref.database = _PDB_DBREF.get(dbabbr, 'Unknown')\n dbref.accession = line[33:41].strip()\n dbref.idcode = line[42:54].strip()\n\n try:\n first = int(line[14:18])\n except:\n LOGGER.warn('DBREF for chain {2}: failed to parse '\n 'initial sequence number of the PDB sequence '\n '({0}:{1})'.format(pdbid, i, ch))\n try:\n last = int(line[20:24])\n except:\n LOGGER.warn('DBREF for chain {2}: failed to parse '\n 'ending sequence number of the PDB sequence '\n '({0}:{1})'.format(pdbid, i, ch))\n try:\n dbref.first = (first, line[18], int(line[56:60]))\n except:\n LOGGER.warn('DBREF for chain {2}: failed to parse '\n 'initial sequence number of the database sequence '\n '({0}:{1})'.format(pdbid, i, ch))\n try:\n dbref.last = (last, line[24].strip(), int(line[62:67]))\n except:\n LOGGER.warn('DBREF for chain {2}: failed to parse '\n 'ending sequence number of the database sequence '\n '({0}:{1})'.format(pdbid, i, ch))\n\n poly = polymers.get(ch, Polymer(ch))\n polymers[ch] = poly\n poly.dbrefs.append(dbref)\n\n dbref1 = lines['DBREF1']\n dbref2 = lines['DBREF2']\n if len(dbref1) != len(dbref2):\n LOGGER.warn('DBREF1 and DBREF1 records are not complete')\n dbref12 = []\n else:\n dbref12 = zip(dbref1, dbref2) # PY3K: OK\n\n for dbref1, dbref2 in dbref12:\n i, line = dbref1\n i += 1\n ch = line[12]\n\n dbabbr = line[26:32].strip()\n dbref = DBRef()\n dbref.dbabbr = dbabbr\n dbref.database = _PDB_DBREF.get(dbabbr, 'Unknown')\n dbref.idcode = line[47:67].strip()\n\n try:\n first = int(line[14:18])\n except:\n LOGGER.warn('DBREF1 for chain {2}: failed to parse '\n 'initial sequence number of the PDB sequence '\n '({0}:{1})'.format(pdbid, i, ch))\n try:\n last = int(line[20:24])\n except:\n LOGGER.warn('DBREF1 for chain {2}: failed to parse '\n 'ending sequence number of the PDB sequence '\n '({0}:{1})'.format(pdbid, i, ch))\n i, line = dbref2\n i += 1\n if line[12] == ' ':\n LOGGER.warn('DBREF2 chain identifier is not specified '\n '({0}:{1})'.format(pdbid, ch))\n elif line[12] != ch:\n LOGGER.warn('DBREF1 and DBREF2 chain id mismatch'\n '({0}:{1})'.format(pdbid, ch))\n\n dbref.accession = line[18:40].strip()\n try:\n dbref.first = (first, line[18].strip(), int(line[45:55]))\n except:\n LOGGER.warn('DBREF2 for chain {2}: failed to parse '\n 'initial sequence number of the database sequence '\n '({0}:{1})'.format(pdbid, i, ch))\n try:\n dbref.last = (last, line[24].strip(), int(line[57:67]))\n except:\n LOGGER.warn('DBREF2 for chain {2}: failed to parse '\n 'ending sequence number of the database sequence '\n '({0}:{1})'.format(pdbid, i, ch))\n\n poly = polymers.get(ch, Polymer(ch))\n polymers[ch] = poly\n poly.dbrefs.append(dbref)\n\n for poly in polymers.values(): # PY3K: OK\n resnum = []\n for dbref in poly.dbrefs:\n dbabbr = dbref.dbabbr\n if dbabbr == 'PDB':\n if not (pdbid == dbref.accession == dbref.idcode):\n LOGGER.warn('DBREF for chain {2} refers to PDB '\n 'entry {3} ({0}:{1})'\n .format(pdbid, i, ch, dbref.accession))\n else:\n if pdbid == dbref.accession or pdbid == dbref.idcode:\n LOGGER.warn('DBREF for chain {2} is {3}, '\n 'expected PDB ({0}:{1})'\n .format(pdbid, i, ch, dbabbr))\n dbref.database = 'PDB'\n resnum.append((dbref.first[0], dbref.last[0]))\n resnum.sort()\n last = -10000\n for first, temp in resnum:\n if first <= last:\n LOGGER.warn('DBREF records overlap for chain {0} ({1})'\n .format(poly.chid, pdbid))\n last = temp\n\n for i, line in lines['MODRES']:\n ch = line[16]\n if ch == ' ':\n if not len(polymers) == 1:\n LOGGER.warn('MODRES chain identifier is not specified '\n '({0}:{1})'.format(pdbid, i))\n continue\n else:\n ch = list(polymers)[0]\n poly = polymers.get(ch, Polymer(ch))\n polymers[ch] = poly\n if poly.modified is None:\n poly.modified = []\n poly.modified.append((line[12:15].strip(), line[16],\n line[18:22].strip() + line[22].strip(), \n line[24:27].strip(),\n line[29:70].strip()))\n\n for i, line in lines['SEQADV']:\n i += 1\n ch = line[16]\n if ch == ' ':\n if not len(polymers) == 1:\n LOGGER.warn('SEQADV chain identifier is not specified '\n '({0}:{1})'.format(pdbid, i))\n continue\n else:\n ch = list(polymers)[0]\n poly = polymers.get(ch, Polymer(ch))\n polymers[ch] = poly\n dbabbr = line[24:28].strip()\n resname = line[12:15].strip()\n try:\n resnum = int(line[18:22].strip())\n except:\n #LOGGER.warn('SEQADV for chain {2}: failed to parse PDB sequence '\n # 'number ({0}:{1})'.format(pdbid, i, ch))\n continue\n icode = line[22].strip()\n try:\n dbnum = int(line[43:48].strip())\n except:\n #LOGGER.warn('SEQADV for chain {2}: failed to parse database '\n # 'sequence number ({0}:{1})'.format(pdbid, i, ch))\n continue \n\n comment = line[49:70].strip()\n match = False\n for dbref in poly.dbrefs:\n if not dbref.first[0] <= resnum <= dbref.last[0]:\n continue\n match = True\n if dbref.dbabbr != dbabbr:\n LOGGER.warn('SEQADV for chain {2}: reference database '\n 'mismatch, expected {3} parsed {4} '\n '({0}:{1})'.format(pdbid, i, ch,\n repr(dbref.dbabbr), repr(dbabbr)))\n continue\n dbacc = line[29:38].strip()\n if dbref.accession[:9] != dbacc[:9]:\n LOGGER.warn('SEQADV for chain {2}: accession code '\n 'mismatch, expected {3} parsed {4} '\n '({0}:{1})'.format(pdbid, i, ch,\n repr(dbref.accession), repr(dbacc)))\n continue\n dbref.diff.append((resname, resnum, icode, dbnum, dbnum, comment))\n if not match:\n LOGGER.warn('SEQADV for chain {2}: database sequence reference '\n 'not found ({0}:{1})'.format(pdbid, i, ch))\n continue\n\n string = ' '.join([line[10:].strip() for i, line in lines['COMPND']])\n if string.startswith('MOL_ID'):\n dict_ = {}\n for molecule in string[6:].split('MOL_ID'):\n dict_.clear()\n for token in molecule.split(';'):\n token = token.strip()\n if not token:\n continue\n items = token.split(':', 1)\n if len(items) == 2:\n key, value = items\n dict_[key.strip()] = value.strip()\n\n chains = dict_.pop('CHAIN', '').strip()\n\n if not chains:\n continue\n for ch in chains.split(','):\n ch = ch.strip()\n poly = polymers.get(ch, Polymer(ch))\n polymers[ch] = poly\n poly.name = dict_.get('MOLECULE', '')\n\n poly.fragment = dict_.get('FRAGMENT', '')\n\n poly.comments = dict_.get('OTHER_DETAILS', '')\n\n val = dict_.get('SYNONYM', '')\n poly.synonyms = [s.strip() for s in val.split(',')\n ] if val else []\n\n val = dict_.get('EC', '')\n poly.ec = [s.strip() for s in val.split(',')] if val else []\n\n poly.engineered = dict_.get('ENGINEERED', '') == 'YES'\n poly.mutation = dict_.get('MUTATION', '') == 'YES'\n\n return list(polymers.values())\n\n\ndef _getChemicals(lines):\n \"\"\"Returns list of chemical components (heterogens).\"\"\"\n\n chemicals = defaultdict(list)\n chem_names = defaultdict(str)\n chem_synonyms = defaultdict(str)\n chem_formulas = defaultdict(str)\n for i, line in lines['HET ']:\n chem = Chemical(line[7:10].strip())\n chem.chain = line[12].strip()\n chem.resnum = int(line[13:17])\n chem.icode = line[17].strip()\n chem.natoms = int(line[20:25].strip() or '0')\n chem.description = line[30:70].strip()\n chemicals[chem.resname].append(chem)\n for i, line in lines['HETNAM']:\n chem = line[11:14].strip()\n chem_names[chem] += line[15:70].rstrip()\n for i, line in lines['HETSYN']:\n chem = line[11:14].strip()\n chem_synonyms[chem] += line[15:70].strip()\n for i, line in lines['FORMUL']:\n chem = line[12:15].strip()\n chem_formulas[chem] += line[18:70].rstrip()\n\n for chem, name in chem_names.items(): # PY3K: OK\n name = cleanString(name)\n for chem in chemicals[chem]:\n chem.name = cleanString(name, nows=True)\n for chem, formula in chem_formulas.items(): # PY3K: OK\n formula = cleanString(formula)\n for chem in chemicals[chem]:\n chem.formula = formula\n for chem, synonyms in chem_synonyms.items(): # PY3K: OK\n synonyms = cleanString(synonyms)\n synonyms = synonyms.split(';')\n for chem in chemicals[chem]:\n chem.synonyms = [syn.strip() for syn in synonyms]\n\n alist = []\n for chem in chemicals.values(): # PY3K: OK\n for chem in chem:\n alist.append(chem)\n return alist\n\n\ndef _getVersion(lines):\n\n for i, line in lines['REMARK 4']:\n if 'COMPLIES' in line:\n try:\n # Return a string, because floating makes 3.20, 3.2 or\n # may arise problems if wwPDB uses a version number like 3.30.1\n return line.split('V.')[1].split(',')[0].strip()\n except:\n return None\n\n\ndef _getNumModels(lines):\n\n # \"NUMMDL\", Integer, 11 - 14: Number of models.\n line = lines['NUMMDL']\n if line:\n i, line = line[0]\n try:\n return int(line[10:14])\n except:\n pass\n\n# Make sure that lambda functions defined below won't raise exceptions\n_PDB_HEADER_MAP = {\n 'helix': _getHelix,\n 'helix_range': _getHelixRange,\n 'sheet': _getSheet,\n 'sheet_range': _getSheetRange,\n 'chemicals': _getChemicals,\n 'polymers': _getPolymers,\n 'reference': _getReference,\n 'resolution': _getResolution,\n 'biomoltrans': _getBiomoltrans,\n 'version': _getVersion,\n 'deposition_date': lambda lines: lines['HEADER'][0][1][50:59].strip()\n if lines['HEADER'] else None,\n 'classification': lambda lines: lines['HEADER'][0][1][10:50].strip()\n if lines['HEADER'] else None,\n 'identifier': lambda lines: lines['HEADER'][0][1][62:66].strip()\n if lines['HEADER'] else None,\n 'title': lambda lines: cleanString(\n ''.join([line[1][10:].rstrip() for line in lines['TITLE ']])\n ) if lines['TITLE '] else None,\n 'experiment': lambda lines: cleanString(\n ''.join([line[1][10:].rstrip() for line in lines['EXPDTA']])\n ) if lines['EXPDTA'] else None,\n 'authors': lambda lines: cleanString(\n ''.join([line[1][10:].rstrip() for line in lines['AUTHOR']]),\n True).split(',') if lines['AUTHOR'] else None,\n 'split': lambda lines: (' '.join([line[1][11:].rstrip()\n for line in lines['SPLIT ']])).split()\n if lines['SPLIT '] else None,\n 'model_type': lambda lines: cleanString(\n ''.join([line[1][10:].rstrip() for line in lines['MDLTYP']])\n ) if lines['MDLTYP'] else None,\n 'n_models': _getNumModels,\n 'space_group': _getSpaceGroup,\n 'related_entries': _getRelatedEntries,\n}\n\nmapHelix = {\n 1: 'H', # 4-turn helix (alpha helix)\n 2: '', # other helix, Right-handed omega\n 3: 'I', # 5-turn helix (pi helix)\n 4: '', # other helix, Right-handed gamma\n 5: 'G', # 3-turn helix (3-10 helix)\n 6: '', # Left-handed alpha\n 7: '', # Left-handed omega\n 8: '', # Left-handed gamma\n 9: '', # 2 - 7 ribbon/helix\n 10: '', # Polyproline\n}\n\ndef isHelix(secstrs):\n torf = np.logical_and(secstrs=='', False)\n for h in mapHelix.values():\n if h != '':\n torf = np.logical_or(torf, secstrs==h)\n return torf\n\ndef isSheet(secstrs):\n torf = secstrs == 'E'\n return torf\n\ndef assignSecstr(header, atoms, coil=True):\n \"\"\"Assign secondary structure from *header* dictionary to *atoms*.\n *header* must be a dictionary parsed using the :func:`.parsePDB`.\n *atoms* may be an instance of :class:`.AtomGroup`, :class:`.Selection`,\n :class:`.Chain` or :class:`.Residue`. ProDy can be configured to\n automatically parse and assign secondary structure information using\n ``confProDy(auto_secondary=True)`` command. See also :func:`.confProDy`\n function.\n\n The Dictionary of Protein Secondary Structure, in short DSSP, type\n single letter code assignments are used:\n\n * **G** = 3-turn helix (310 helix). Min length 3 residues.\n * **H** = 4-turn helix (alpha helix). Min length 4 residues.\n * **I** = 5-turn helix (pi helix). Min length 5 residues.\n * **T** = hydrogen bonded turn (3, 4 or 5 turn)\n * **E** = extended strand in parallel and/or anti-parallel\n beta-sheet conformation. Min length 2 residues.\n * **B** = residue in isolated beta-bridge (single pair beta-sheet\n hydrogen bond formation)\n * **S** = bend (the only non-hydrogen-bond based assignment).\n * **C** = residues not in one of above conformations.\n\n\n See http://en.wikipedia.org/wiki/Protein_secondary_structure#The_DSSP_code\n for more details.\n\n Following PDB helix classes are omitted:\n\n * Right-handed omega (2, class number)\n * Right-handed gamma (4)\n * Left-handed alpha (6)\n * Left-handed omega (7)\n * Left-handed gamma (8)\n * 2 - 7 ribbon/helix (9)\n * Polyproline (10)\n\n Secondary structures are assigned to all atoms in a residue. Amino acid\n residues without any secondary structure assignments in the header\n section will be assigned coil (C) conformation. This can be prevented\n by passing ``coil=False`` argument.\"\"\"\n\n if not isinstance(header, dict):\n raise TypeError('header must be a dictionary')\n helix = header.get('helix', {})\n sheet = header.get('sheet', {})\n if len(helix) == 0 and len(sheet) == 0:\n #LOGGER.warn('header does not contain secondary structure data')\n return atoms\n\n ssa = atoms.getSecstrs()\n if ssa is None:\n if isinstance(atoms, AtomGroup):\n ag = atoms\n else:\n ag = atoms.getAtomGroup()\n ag.setSecstrs(np.zeros(ag.numAtoms(),\n ATOMIC_FIELDS['secondary'].dtype))\n ag.setSecids(np.zeros(ag.numAtoms(),\n ATOMIC_FIELDS['secid'].dtype))\n ag.setSecclasses(np.zeros(ag.numAtoms(),\n ATOMIC_FIELDS['secclass'].dtype)) \n ag.setSecindices(np.zeros(ag.numAtoms(),\n ATOMIC_FIELDS['secindex'].dtype)) \n\n prot = atoms.select('protein')\n if prot is not None and coil:\n prot.setSecstrs('C')\n hierview = atoms.getHierView()\n count = 0\n getResidue = hierview.getResidue\n for key, value in helix.items(): # PY3K: OK\n res = getResidue(*key)\n if res is None:\n continue\n res.setSecids(value[2])\n res.setSecclasses(value[0])\n res.setSecindices(value[1])\n res.setSecstrs(mapHelix[value[0]])\n \n count += 1\n for key, value in sheet.items(): # PY3K: OK\n res = getResidue(*key)\n if res is None:\n continue\n res.setSecids(value[2])\n res.setSecclasses(value[0])\n res.setSecindices(value[1])\n res.setSecstrs('E')\n count += 1\n\n LOGGER.info('Secondary structures were assigned to {0} residues.'\n .format(count))\n\n return atoms\n\n\ndef buildBiomolecules(header, atoms, biomol=None):\n \"\"\"Returns *atoms* after applying biomolecular transformations from *header*\n dictionary. Biomolecular transformations are applied to all coordinate\n sets in the molecule.\n\n Some PDB files contain transformations for more than 1 biomolecules. A\n specific set of transformations can be choosen using *biomol* argument.\n Transformation sets are identified by numbers, e.g. ``\"1\"``, ``\"2\"``, ...\n\n If multiple biomolecular transformations are provided in the *header*\n dictionary, biomolecules will be returned as\n :class:`.AtomGroup` instances in a :func:`list`.\n\n If the resulting biomolecule has more than 26 chains, the molecular\n assembly will be split into multiple :class:`.AtomGroup`\n instances each containing at most 26 chains. These\n :class:`.AtomGroup` instances will be returned in a tuple.\n\n Note that atoms in biomolecules are ordered according to chain identifiers.\n When multiple chains in a biomolecule have the same chain identifier, they \n are given different segment names to distinguish them.\n \"\"\"\n\n if not isinstance(header, dict):\n raise TypeError('header must be a dictionary')\n\n if not isinstance(atoms, Atomic):\n raise TypeError('atoms must be an Atomic instance')\n\n biomt = header.get('biomoltrans')\n if not isinstance(biomt, dict) or len(biomt) == 0:\n LOGGER.warn(\"no biomolecular transformations found so original structure was used\")\n return atoms\n\n if not isinstance(atoms, AtomGroup):\n atoms = atoms.copy()\n\n biomols = []\n if biomol is None:\n keys = list(biomt)\n else:\n biomol = str(biomol)\n if biomol in biomt:\n keys = [biomol]\n else:\n LOGGER.warn('Transformations for biomolecule {0} was not '\n 'found in the header dictionary.'.format(biomol))\n return None\n\n keys.sort()\n for i in keys:\n segnm = list('ABCDEFGHIJKLMNOPQRSTUVWXYZ'*20)\n ags = []\n mt = biomt[i]\n # mt is a list, first item is list of chain identifiers\n # following items are lines corresponding to transformation\n # mt must have 3n + 1 lines\n if (len(mt)) % 4 != 0:\n LOGGER.warn('Biomolecular transformations {0} were not '\n 'applied'.format(i))\n continue\n\n for times in range(int((len(mt)) / 4)):\n rotation = np.zeros((3, 3))\n translation = np.zeros(3)\n line0 = np.fromstring(mt[times*4+1], sep=' ')\n rotation[0, :] = line0[:3]\n translation[0] = line0[3]\n line1 = np.fromstring(mt[times*4+2], sep=' ')\n rotation[1, :] = line1[:3]\n translation[1] = line1[3]\n line2 = np.fromstring(mt[times*4+3], sep=' ')\n rotation[2, :] = line2[:3]\n translation[2] = line2[3]\n t = Transformation(rotation, translation)\n\n newag = atoms.select('chain ' + ' '.join(mt[times*4+0])).copy()\n if newag is None:\n continue\n newag.all.setSegnames(segnm.pop(0))\n for acsi in range(newag.numCoordsets()):\n newag.setACSIndex(acsi)\n newag = t.apply(newag)\n newag.setACSIndex(0)\n ags.append(newag)\n\n if ags:\n newag = ags.pop(0)\n while ags:\n newag += ags.pop(0)\n newag.setTitle('{0} biomolecule {1}'\n .format(atoms.getTitle(), i))\n biomols.append(newag)\n \n if biomols:\n if len(biomols) == 1:\n return biomols[0]\n else:\n return biomols\n else:\n return None\n" ]
[ [ "numpy.diag", "numpy.dot", "numpy.sqrt", "numpy.asarray", "numpy.where", "numpy.reshape", "numpy.arange", "numpy.eye", "numpy.stack", "numpy.finfo", "numpy.diff", "numpy.outer", "numpy.zeros", "numpy.append", "numpy.array", "numpy.vdot", "numpy.abs", "numpy.ones", "numpy.sign" ], [ "numpy.savez", "numpy.asarray", "numpy.savetxt", "numpy.load", "numpy.array", "numpy.zeros", "numpy.loadtxt" ], [ "numpy.logical_or", "numpy.logical_and", "numpy.zeros", "numpy.fromstring" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
haowu74/advanced-lane-lines
[ "5daf8572e132ee42a3309ff6f73680301cc8953c" ]
[ "src/color_gradient_threshold.py" ]
[ "import numpy as np\nimport cv2\nimport matplotlib.pyplot as plt\nimport matplotlib.image as mpimg\nimport pickle\n\ndef abs_sobel_thresh(img, orient='x', sobel_kernel=3, thresh=(0, 255)):\n # Apply the following steps to img\n # 1) Convert to grayscale\n gray = cv2.cvtColor(img, cv2.COLOR_RGB2GRAY)\n # 2) Take the derivative in x or y given orient = 'x' or 'y'\n if orient == 'x':\n sobel = cv2.Sobel(gray, cv2.CV_64F, 1, 0, ksize=sobel_kernel)\n else:\n sobel = cv2.Sobel(gray, cv2.CV_64F, 0, 1, ksize=sobel_kernel)\n # 3) Take the absolute value of the derivative or gradient\n abs_sobel = np.absolute(sobel)\n # 4) Scale to 8-bit (0 - 255) then convert to type = np.uint8\n scaled_sobel = np.uint8(255 * abs_sobel / np.max(abs_sobel))\n # 5) Create a mask of 1's where the scaled gradient magnitude\n # is > thresh_min and < thresh_max\n binary_output = np.zeros_like(scaled_sobel)\n binary_output[(scaled_sobel >= thresh[0]) & (scaled_sobel <= thresh[1])] = 1\n # 6) Return this mask as your binary_output image\n\n # binary_output = np.copy(img) # Remove this line\n return binary_output\n\ndef mag_thresh(img, sobel_kernel=3, mag_thresh=(0, 255)):\n # Apply the following steps to img\n # 1) Convert to grayscale\n gray = cv2.cvtColor(img, cv2.COLOR_RGB2GRAY)\n # 2) Take the gradient in x and y separately\n sobelx = cv2.Sobel(gray, cv2.CV_64F, 1, 0, ksize=sobel_kernel)\n sobely = cv2.Sobel(gray, cv2.CV_64F, 0, 1, ksize=sobel_kernel)\n # 3) Calculate the magnitude\n abs_sobel = np.sqrt(np.multiply(sobelx, sobelx) + np.multiply(sobely, sobely))\n # 4) Scale to 8-bit (0 - 255) and convert to type = np.uint8\n scaled_sobel = np.uint8(255 * abs_sobel / np.max(abs_sobel))\n # 5) Create a binary mask where mag thresholds are met\n binary_output = np.zeros_like(scaled_sobel)\n binary_output[(scaled_sobel >= mag_thresh[0]) & (scaled_sobel <= mag_thresh[1])] = 1\n # 6) Return this mask as your binary_output image\n # binary_output = np.copy(img) # Remove this line\n return binary_output\n\ndef dir_threshold(img, sobel_kernel=3, thresh=(0, np.pi / 2)):\n # Apply the following steps to img\n # 1) Convert to grayscale\n gray = cv2.cvtColor(img, cv2.COLOR_RGB2GRAY)\n # 2) Take the gradient in x and y separately\n sobelx = cv2.Sobel(gray, cv2.CV_64F, 1, 0, ksize=sobel_kernel)\n sobely = cv2.Sobel(gray, cv2.CV_64F, 0, 1, ksize=sobel_kernel)\n # 3) Take the absolute value of the x and y gradients\n abs_sobelx = np.absolute(sobelx)\n abs_sobely = np.absolute(sobely)\n # 4) Use np.arctan2(abs_sobely, abs_sobelx) to calculate the direction of the gradient\n dir = np.arctan2(abs_sobely, abs_sobelx)\n # 5) Create a binary mask where direction thresholds are met\n binary_output = np.zeros_like(dir)\n binary_output[(dir >= thresh[0]) & (dir <= thresh[1])] = 1\n # 6) Return this mask as your binary_output image\n # binary_output = np.copy(img) # Remove this line\n return binary_output\n\ndef hls_select(img, thresh=(0, 255)):\n hls = cv2.cvtColor(img, cv2.COLOR_RGB2HLS)\n s_channel = hls[:,:,2]\n binary_output = np.zeros_like(s_channel)\n binary_output[(s_channel > thresh[0]) & (s_channel <= thresh[1])] = 1\n return binary_output\n\n" ]
[ [ "numpy.absolute", "numpy.multiply", "numpy.arctan2", "numpy.max", "numpy.zeros_like" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
arbenton/garage
[ "5c398255fbfae375370483f18216996d82590a88", "5c398255fbfae375370483f18216996d82590a88", "96cb8887fcae90531a645d540653010e7fe10fcc", "5c398255fbfae375370483f18216996d82590a88" ]
[ "tests/garage/tf/core/test_mlp.py", "src/garage/experiment/experiment.py", "src/garage/tf/distributions/categorical.py", "tests/garage/tf/models/test_mlp_model.py" ]
[ "import numpy as np\nimport tensorflow as tf\n\nfrom garage.tf.core.mlp import mlp\nfrom tests.fixtures import TfGraphTestCase\n\n\nclass TestMLP(TfGraphTestCase):\n def setup_method(self):\n super(TestMLP, self).setup_method()\n self.obs_input = np.array([[1, 2, 3, 4]])\n input_shape = self.obs_input.shape[1:] # 4\n self.hidden_nonlinearity = tf.nn.relu\n\n self._input = tf.placeholder(\n tf.float32, shape=(None, ) + input_shape, name='input')\n\n self._output_shape = 2\n\n # We build a default mlp\n with tf.variable_scope('MLP'):\n self.mlp_f = mlp(\n input_var=self._input,\n output_dim=self._output_shape,\n hidden_sizes=(32, 32),\n hidden_nonlinearity=self.hidden_nonlinearity,\n name='mlp1')\n\n self.sess.run(tf.global_variables_initializer())\n\n def test_multiple_same_mlp(self):\n # We create another mlp with the same name, trying to reuse it\n with tf.variable_scope('MLP', reuse=True):\n self.mlp_same_copy = mlp(\n input_var=self._input,\n output_dim=self._output_shape,\n hidden_sizes=(32, 32),\n hidden_nonlinearity=self.hidden_nonlinearity,\n name='mlp1')\n\n # We modify the weight of the default mlp and feed\n # The another mlp created should output the same result\n with tf.variable_scope('MLP', reuse=True):\n w = tf.get_variable('mlp1/hidden_0/kernel')\n self.sess.run(w.assign(w + 1))\n mlp_output = self.sess.run(\n self.mlp_f, feed_dict={self._input: self.obs_input})\n mlp_output2 = self.sess.run(\n self.mlp_same_copy, feed_dict={self._input: self.obs_input})\n\n np.testing.assert_array_almost_equal(mlp_output, mlp_output2)\n\n def test_different_mlp(self):\n # We create another mlp with different name\n with tf.variable_scope('MLP'):\n self.mlp_different_copy = mlp(\n input_var=self._input,\n output_dim=self._output_shape,\n hidden_sizes=(32, 32),\n hidden_nonlinearity=self.hidden_nonlinearity,\n name='mlp2')\n\n # Initialize the new mlp variables\n self.sess.run(tf.global_variables_initializer())\n\n # We modify the weight of the default mlp and feed\n # The another mlp created should output different result\n with tf.variable_scope('MLP', reuse=True):\n w = tf.get_variable('mlp1/hidden_0/kernel')\n self.sess.run(w.assign(w + 1))\n mlp_output = self.sess.run(\n self.mlp_f, feed_dict={self._input: self.obs_input})\n mlp_output2 = self.sess.run(\n self.mlp_different_copy,\n feed_dict={self._input: self.obs_input})\n\n np.not_equal(mlp_output, mlp_output2)\n\n def test_output_shape(self):\n mlp_output = self.sess.run(\n self.mlp_f, feed_dict={self._input: self.obs_input})\n\n assert mlp_output.shape[1] == self._output_shape\n\n def test_output_value(self):\n with tf.variable_scope('MLP', reuse=True):\n h1_w = tf.get_variable('mlp1/hidden_0/kernel')\n h1_b = tf.get_variable('mlp1/hidden_0/bias')\n h2_w = tf.get_variable('mlp1/hidden_1/kernel')\n h2_b = tf.get_variable('mlp1/hidden_1/bias')\n out_w = tf.get_variable('mlp1/output/kernel')\n out_b = tf.get_variable('mlp1/output/bias')\n\n mlp_output = self.sess.run(\n self.mlp_f, feed_dict={self._input: self.obs_input})\n\n # First layer\n h2_in = tf.matmul(self._input, h1_w) + h1_b\n h2_in = self.hidden_nonlinearity(h2_in)\n\n # Second layer\n h3_in = tf.matmul(h2_in, h2_w) + h2_b\n h3_in = self.hidden_nonlinearity(h3_in)\n\n # Output layer\n h3_out = tf.matmul(h3_in, out_w) + out_b\n out = self.sess.run(h3_out, feed_dict={self._input: self.obs_input})\n\n np.testing.assert_array_equal(out, mlp_output)\n\n def test_layer_normalization(self):\n # Create a mlp with layer normalization\n with tf.variable_scope('MLP'):\n self.mlp_f_w_n = mlp(\n input_var=self._input,\n output_dim=self._output_shape,\n hidden_sizes=(32, 32),\n hidden_nonlinearity=self.hidden_nonlinearity,\n name='mlp2',\n layer_normalization=True)\n\n # Initialize the new mlp variables\n self.sess.run(tf.global_variables_initializer())\n\n with tf.variable_scope('MLP', reuse=True):\n h1_w = tf.get_variable('mlp2/hidden_0/kernel')\n h1_b = tf.get_variable('mlp2/hidden_0/bias')\n h2_w = tf.get_variable('mlp2/hidden_1/kernel')\n h2_b = tf.get_variable('mlp2/hidden_1/bias')\n out_w = tf.get_variable('mlp2/output/kernel')\n out_b = tf.get_variable('mlp2/output/bias')\n beta_1 = tf.get_variable('mlp2/LayerNorm/beta')\n gamma_1 = tf.get_variable('mlp2/LayerNorm/gamma')\n beta_2 = tf.get_variable('mlp2/LayerNorm_1/beta')\n gamma_2 = tf.get_variable('mlp2/LayerNorm_1/gamma')\n\n # First layer\n y = tf.matmul(self._input, h1_w) + h1_b\n y = self.hidden_nonlinearity(y)\n mean, variance = tf.nn.moments(y, [1], keep_dims=True)\n normalized_y = (y - mean) / tf.sqrt(variance + 1e-12)\n y_out = normalized_y * gamma_1 + beta_1\n\n # Second layer\n y = tf.matmul(y_out, h2_w) + h2_b\n y = self.hidden_nonlinearity(y)\n mean, variance = tf.nn.moments(y, [1], keep_dims=True)\n normalized_y = (y - mean) / tf.sqrt(variance + 1e-12)\n y_out = normalized_y * gamma_2 + beta_2\n\n # Output layer\n y = tf.matmul(y_out, out_w) + out_b\n\n out = self.sess.run(y, feed_dict={self._input: self.obs_input})\n mlp_output = self.sess.run(\n self.mlp_f_w_n, feed_dict={self._input: self.obs_input})\n\n np.testing.assert_array_almost_equal(out, mlp_output)\n", "# flake8: noqa\nimport base64\nimport collections\nimport datetime\nimport inspect\nimport os\nimport os.path as osp\nimport pickle\nimport re\nimport subprocess\nimport sys\n\nimport dateutil.tz\nimport numpy as np\n\nfrom garage.core import Serializable\n\n\nclass AttrDict(dict):\n def __init__(self, *args, **kwargs):\n super(AttrDict, self).__init__(*args, **kwargs)\n self.__dict__ = self\n\n\ndef flatten(l):\n return [item for sublist in l for item in sublist]\n\n\nclass BinaryOp(Serializable):\n def __init__(self):\n Serializable.quick_init(self, locals())\n\n def rdiv(self, a, b):\n return b / a\n # def __init__(self, opname, a, b):\n # self.opname = opname\n # self.a = a\n # self.b = b\n\n\nclass VariantDict(AttrDict):\n def __init__(self, d, hidden_keys):\n super(VariantDict, self).__init__(d)\n self._hidden_keys = hidden_keys\n\n def dump(self):\n return {k: v for k, v in self.items() if k not in self._hidden_keys}\n\n\nclass VariantGenerator:\n \"\"\"\n Usage:\n\n vg = VariantGenerator()\n vg.add(\"param1\", [1, 2, 3])\n vg.add(\"param2\", ['x', 'y'])\n vg.variants() => # all combinations of [1,2,3] x ['x','y']\n\n Supports noncyclic dependency among parameters:\n vg = VariantGenerator()\n vg.add(\"param1\", [1, 2, 3])\n vg.add(\"param2\", lambda param1: [param1+1, param1+2])\n vg.variants() => # ..\n \"\"\"\n\n def __init__(self):\n self._variants = []\n self._populate_variants()\n self._hidden_keys = []\n for k, vs, cfg in self._variants:\n if cfg.get('hide', False):\n self._hidden_keys.append(k)\n\n def add(self, key, vals, **kwargs):\n self._variants.append((key, vals, kwargs))\n\n def _populate_variants(self):\n methods = inspect.getmembers(\n self.__class__,\n predicate=lambda x: inspect.isfunction(x) or inspect.ismethod(x))\n methods = [\n x[1].__get__(self, self.__class__) for x in methods\n if getattr(x[1], '__is_variant', False)\n ]\n for m in methods:\n self.add(m.__name__, m, **getattr(m, '__variant_config', dict()))\n\n def variants(self, randomized=False):\n ret = list(self.ivariants())\n if randomized:\n np.random.shuffle(ret)\n return list(map(self.variant_dict, ret))\n\n def variant_dict(self, variant):\n return VariantDict(variant, self._hidden_keys)\n\n def to_name_suffix(self, variant):\n suffix = []\n for k, vs, cfg in self._variants:\n if not cfg.get('hide', False):\n suffix.append(k + '_' + str(variant[k]))\n return '_'.join(suffix)\n\n def ivariants(self):\n dependencies = list()\n for key, vals, _ in self._variants:\n if hasattr(vals, '__call__'):\n args = inspect.getfullargspec(vals).args\n if hasattr(vals, 'im_self') or hasattr(vals, '__self__'):\n # remove the first 'self' parameter\n args = args[1:]\n dependencies.append((key, set(args)))\n else:\n dependencies.append((key, set()))\n sorted_keys = []\n # topo sort all nodes\n while len(sorted_keys) < len(self._variants):\n # get all nodes with zero in-degree\n free_nodes = [k for k, v in dependencies if not v]\n if not free_nodes:\n error_msg = 'Invalid parameter dependency: \\n'\n for k, v in dependencies:\n if v:\n error_msg += k + ' depends on ' + ' & '.join(v) + '\\n'\n raise ValueError(error_msg)\n dependencies = [(k, v) for k, v in dependencies\n if k not in free_nodes]\n # remove the free nodes from the remaining dependencies\n for _, v in dependencies:\n v.difference_update(free_nodes)\n sorted_keys += free_nodes\n return self._ivariants_sorted(sorted_keys)\n\n def _ivariants_sorted(self, sorted_keys):\n if not sorted_keys:\n yield dict()\n else:\n first_keys = sorted_keys[:-1]\n first_variants = self._ivariants_sorted(first_keys)\n last_key = sorted_keys[-1]\n last_vals = [v for k, v, _ in self._variants if k == last_key][0]\n if hasattr(last_vals, '__call__'):\n last_val_keys = inspect.getfullargspec(last_vals).args\n if hasattr(last_vals, 'im_self') or hasattr(\n last_vals, '__self__'):\n last_val_keys = last_val_keys[1:]\n else:\n last_val_keys = None\n for variant in first_variants:\n if hasattr(last_vals, '__call__'):\n last_variants = last_vals(\n **{k: variant[k]\n for k in last_val_keys})\n for last_choice in last_variants:\n yield AttrDict(variant, **{last_key: last_choice})\n else:\n for last_choice in last_vals:\n yield AttrDict(variant, **{last_key: last_choice})\n\n\ndef variant(*args, **kwargs):\n def _variant(fn):\n fn.__is_variant = True\n fn.__variant_config = kwargs\n return fn\n\n if len(args) == 1 and isinstance(args[0], collections.Callable):\n return _variant(args[0])\n return _variant\n\n\ndef query_yes_no(question, default='yes'):\n \"\"\"Ask a yes/no question via raw_input() and return their answer.\n\n \"question\" is a string that is presented to the user.\n \"default\" is the presumed answer if the user just hits <Enter>.\n It must be \"yes\" (the default), \"no\" or None (meaning\n an answer is required of the user).\n\n The \"answer\" return value is True for \"yes\" or False for \"no\".\n \"\"\"\n valid = {'yes': True, 'y': True, 'ye': True, 'no': False, 'n': False}\n if default is None:\n prompt = ' [y/n] '\n elif default == 'yes':\n prompt = ' [Y/n] '\n elif default == 'no':\n prompt = ' [y/N] '\n else:\n raise ValueError(\"invalid default answer: '%s'\" % default)\n\n while True:\n sys.stdout.write(question + prompt)\n choice = input().lower()\n if default is not None and choice == '':\n return valid[default]\n elif choice in valid:\n return valid[choice]\n else:\n sys.stdout.write(\"Please respond with 'yes' or 'no' \"\n \"(or 'y' or 'n').\\n\")\n\n\nexp_count = 0\nnow = datetime.datetime.now(dateutil.tz.tzlocal())\ntimestamp = now.strftime('%Y_%m_%d_%H_%M_%S')\n\n\ndef run_experiment(method_call=None,\n batch_tasks=None,\n exp_prefix='experiment',\n exp_name=None,\n log_dir=None,\n script='garage.experiment.experiment_wrapper',\n python_command='python',\n dry=False,\n env=None,\n variant=None,\n use_tf=False,\n use_gpu=False,\n use_cloudpickle=None,\n pre_commands=None,\n **kwargs):\n \"\"\"Serialize the method call and run the experiment using the\n specified mode.\n\n Args:\n method_call (callable): A method call.\n batch_tasks (list[dict]): A batch of method calls.\n exp_prefix (str): Name prefix for the experiment.\n exp_name (str): Name of the experiment.\n log_dir (str): Log directory for the experiment.\n script (str): The name of the entrance point python script.\n python_command (str): Python command to run the experiment.\n dry (bool): Whether to do a dry-run, which only prints the\n commands without executing them.\n env (dict): Extra environment variables.\n variant (dict): If provided, should be a dictionary of parameters.\n use_tf (bool): Used along with the Theano and GPU configuration\n when using TensorFlow\n use_gpu (bool): Whether the launched task is running on GPU.\n This triggers a few configuration changes including certain\n environment flags.\n use_cloudpickle (bool): Whether to use cloudpickle or not.\n pre_commands (str): Pre commands to run the experiment.\n\n \"\"\"\n assert method_call is not None or batch_tasks is not None, (\n 'Must provide at least either method_call or batch_tasks')\n\n if use_cloudpickle is None:\n for task in (batch_tasks or [method_call]):\n assert hasattr(task, '__call__')\n use_cloudpickle = True\n # ensure variant exists\n if variant is None:\n variant = dict()\n\n if batch_tasks is None:\n batch_tasks = [\n dict(\n kwargs,\n pre_commands=pre_commands,\n method_call=method_call,\n exp_name=exp_name,\n log_dir=log_dir,\n env=env,\n variant=variant,\n use_cloudpickle=use_cloudpickle)\n ]\n\n global exp_count\n\n if use_tf:\n if not use_gpu:\n os.environ['CUDA_VISIBLE_DEVICES'] = ''\n else:\n os.unsetenv('CUDA_VISIBLE_DEVICES')\n\n for task in batch_tasks:\n call = task.pop('method_call')\n if use_cloudpickle:\n import cloudpickle\n data = base64.b64encode(cloudpickle.dumps(call)).decode('utf-8')\n else:\n data = base64.b64encode(pickle.dumps(call)).decode('utf-8')\n task['args_data'] = data\n exp_count += 1\n\n if task.get('exp_name', None) is None:\n task['exp_name'] = '{}_{}_{:04n}'.format(exp_prefix, timestamp,\n exp_count)\n\n if task.get('log_dir', None) is None:\n task['log_dir'] = (\n '{log_dir}/local/{exp_prefix}/{exp_name}'.format(\n log_dir=osp.join(os.getcwd(), 'data'),\n exp_prefix=exp_prefix.replace('_', '-'),\n exp_name=task['exp_name']))\n\n if task.get('variant', None) is not None:\n variant = task.pop('variant')\n if 'exp_name' not in variant:\n variant['exp_name'] = task['exp_name']\n task['variant_data'] = base64.b64encode(\n pickle.dumps(variant)).decode('utf-8')\n elif 'variant' in task:\n del task['variant']\n task['env'] = task.get('env', dict()) or dict()\n task['env']['GARAGE_USE_GPU'] = str(use_gpu)\n task['env']['GARAGE_USE_TF'] = str(use_tf)\n\n for task in batch_tasks:\n env = task.pop('env', None)\n command = to_local_command(\n task, python_command=python_command, script=script)\n print(command)\n if dry:\n return\n try:\n if env is None:\n env = dict()\n subprocess.call(command, shell=True, env=dict(os.environ, **env))\n except Exception as e:\n print(e)\n if isinstance(e, KeyboardInterrupt):\n raise\n\n\n_find_unsafe = re.compile(r'[a-zA-Z0-9_^@%+=:,./-]').search\n\n\ndef _shellquote(s):\n \"\"\"Return a shell-escaped version of the string *s*.\"\"\"\n if not s:\n return \"''\"\n\n if _find_unsafe(s) is None:\n return s\n\n # use single quotes, and put single quotes into double quotes\n # the string $'b is then quoted as '$'\"'\"'b'\n\n return \"'\" + s.replace(\"'\", \"'\\\"'\\\"'\") + \"'\"\n\n\ndef _to_param_val(v):\n if v is None:\n return ''\n elif isinstance(v, list):\n return ' '.join(map(_shellquote, list(map(str, v))))\n else:\n return _shellquote(str(v))\n\n\ndef to_local_command(params,\n python_command='python',\n script='garage.experiment.experiment_wrapper'):\n command = python_command + ' -m ' + script\n\n garage_env = eval(os.environ.get('GARAGE_ENV', '{}'))\n for k, v in garage_env.items():\n command = '{}={} '.format(k, v) + command\n pre_commands = params.pop('pre_commands', None)\n post_commands = params.pop('post_commands', None)\n if pre_commands is not None or post_commands is not None:\n print('Not executing the pre_commands: ', pre_commands,\n ', nor post_commands: ', post_commands)\n\n for k, v in params.items():\n if isinstance(v, dict):\n for nk, nv in v.items():\n if str(nk) == '_name':\n command += ' --{} {}'.format(k, _to_param_val(nv))\n else:\n command += \\\n ' --{}_{} {}'.format(k, nk, _to_param_val(nv))\n else:\n command += ' --{} {}'.format(k, _to_param_val(v))\n return command\n\n\ndef concretize(obj):\n if isinstance(obj, dict):\n # make sure that there's no hidden caveat\n ret = dict()\n for k, v in obj.items():\n ret[concretize(k)] = concretize(v)\n return ret\n elif isinstance(obj, (list, tuple)):\n return obj.__class__(list(map(concretize, obj)))\n else:\n return obj\n", "import numpy as np\nimport tensorflow as tf\n\nfrom garage.tf.distributions.base import Distribution\nfrom garage.tf.misc.tensor_utils import compile_function\n\nTINY = 1e-8\n\n\ndef from_onehot(x_var):\n ret = np.zeros((len(x_var), ), 'int32')\n nonzero_n, nonzero_a = np.nonzero(x_var)\n ret[nonzero_n] = nonzero_a\n return ret\n\n\nclass Categorical(Distribution):\n def __init__(self, dim, name=None):\n with tf.variable_scope(name, 'Categorical'):\n self._dim = dim\n self._name = name\n weights_var = tf.placeholder(\n dtype=tf.float32, shape=(None, dim), name='weights')\n self._f_sample = compile_function(\n inputs=[weights_var],\n outputs=tf.multinomial(\n tf.log(weights_var + 1e-8), num_samples=1)[:, 0],\n )\n\n @property\n def dim(self):\n return self._dim\n\n def kl_sym(self, old_dist_info_vars, new_dist_info_vars, name=None):\n \"\"\"\n Compute the symbolic KL divergence of two categorical distributions\n \"\"\"\n with tf.name_scope(name, 'kl_sym',\n [old_dist_info_vars, new_dist_info_vars]):\n old_prob_var = old_dist_info_vars['prob']\n new_prob_var = new_dist_info_vars['prob']\n ndims = old_prob_var.get_shape().ndims\n # Assume layout is N * A\n return tf.reduce_sum(\n old_prob_var *\n (tf.log(old_prob_var + TINY) - tf.log(new_prob_var + TINY)),\n axis=ndims - 1)\n\n def kl(self, old_dist_info, new_dist_info):\n \"\"\"\n Compute the KL divergence of two categorical distributions\n \"\"\"\n old_prob = old_dist_info['prob']\n new_prob = new_dist_info['prob']\n return np.sum(\n old_prob * (np.log(old_prob + TINY) - np.log(new_prob + TINY)),\n axis=-1)\n\n def likelihood_ratio_sym(self,\n x_var,\n old_dist_info_vars,\n new_dist_info_vars,\n name=None):\n with tf.name_scope(name, 'likelihood_ratio_sym',\n [x_var, old_dist_info_vars, new_dist_info_vars]):\n old_prob_var = old_dist_info_vars['prob']\n new_prob_var = new_dist_info_vars['prob']\n ndims = old_prob_var.get_shape().ndims\n x_var = tf.cast(x_var, tf.float32)\n # Assume layout is N * A\n return (tf.reduce_sum(new_prob_var * x_var, ndims - 1) + TINY) / \\\n (tf.reduce_sum(old_prob_var * x_var, ndims - 1) + TINY)\n\n def entropy_sym(self, dist_info_vars, name=None):\n with tf.name_scope(name, 'entropy_sym', [dist_info_vars]):\n probs = dist_info_vars['prob']\n return -tf.reduce_sum(probs * tf.log(probs + TINY), axis=1)\n\n def cross_entropy_sym(self,\n old_dist_info_vars,\n new_dist_info_vars,\n name=None):\n with tf.name_scope(name, 'cross_entropy_sym',\n [old_dist_info_vars, new_dist_info_vars]):\n old_prob_var = old_dist_info_vars['prob']\n new_prob_var = new_dist_info_vars['prob']\n ndims = old_prob_var.get_shape().ndims\n # Assume layout is N * A\n return tf.reduce_sum(\n old_prob_var * (-tf.log(new_prob_var + TINY)), axis=ndims - 1)\n\n def entropy(self, info):\n probs = info['prob']\n return -np.sum(probs * np.log(probs + TINY), axis=-1)\n\n def log_likelihood_sym(self, x_var, dist_info_vars, name=None):\n with tf.name_scope(name, 'log_likelihood_sym',\n [x_var, dist_info_vars]):\n probs = dist_info_vars['prob']\n ndims = probs.get_shape().ndims\n return tf.log(\n tf.reduce_sum(probs * tf.cast(x_var, tf.float32), ndims - 1) +\n TINY)\n\n def log_likelihood(self, xs, dist_info):\n probs = dist_info['prob']\n # Assume layout is N * A\n return np.log(np.sum(probs * xs, axis=-1) + TINY)\n\n @property\n def dist_info_specs(self):\n return [('prob', (self.dim, ))]\n\n def sample(self, dist_info):\n return self._f_sample(dist_info['prob'])\n\n def sample_sym(self, dist_info, name=None):\n with tf.name_scope(name, 'sample_sym', [dist_info]):\n probs = dist_info['prob']\n samples = tf.multinomial(tf.log(probs + 1e-8), num_samples=1)[:, 0]\n\n return tf.nn.embedding_lookup(\n np.eye(self.dim, dtype=np.float32), samples)\n", "import pickle\n\nimport numpy as np\nimport pytest\nimport tensorflow as tf\n\nfrom garage.tf.models import MLPDuelingModel\nfrom garage.tf.models import MLPMergeModel\nfrom garage.tf.models import MLPModel\nfrom tests.fixtures import TfGraphTestCase\n\n\nclass TestMLPModel(TfGraphTestCase):\n def setup_method(self):\n super().setup_method()\n self.input_var = tf.placeholder(tf.float32, shape=(None, 5))\n self.obs = np.ones((1, 5))\n\n # yapf: disable\n @pytest.mark.parametrize('output_dim, hidden_sizes', [\n (1, (0, )),\n (1, (1, )),\n (1, (2, )),\n (2, (3, )),\n (2, (1, 1)),\n (3, (2, 2)),\n ])\n # yapf: enable\n def test_output_values(self, output_dim, hidden_sizes):\n model = MLPModel(\n output_dim=output_dim,\n hidden_sizes=hidden_sizes,\n hidden_nonlinearity=None,\n hidden_w_init=tf.ones_initializer(),\n output_w_init=tf.ones_initializer())\n outputs = model.build(self.input_var)\n output = self.sess.run(outputs, feed_dict={self.input_var: self.obs})\n\n expected_output = np.full([1, output_dim], 5 * np.prod(hidden_sizes))\n\n assert np.array_equal(output, expected_output)\n\n # yapf: disable\n @pytest.mark.parametrize('output_dim, hidden_sizes', [\n (1, (0, )),\n (1, (1, )),\n (1, (2, )),\n (2, (3, )),\n (2, (1, 1)),\n (3, (2, 2)),\n ])\n # yapf: enable\n def test_output_values_dueling(self, output_dim, hidden_sizes):\n model = MLPDuelingModel(\n output_dim=output_dim,\n hidden_sizes=hidden_sizes,\n hidden_nonlinearity=None,\n hidden_w_init=tf.ones_initializer(),\n output_w_init=tf.ones_initializer())\n outputs = model.build(self.input_var)\n output = self.sess.run(outputs, feed_dict={self.input_var: self.obs})\n\n expected_output = np.full([1, output_dim], 5 * np.prod(hidden_sizes))\n\n assert np.array_equal(output, expected_output)\n\n # yapf: disable\n @pytest.mark.parametrize('output_dim, hidden_sizes', [\n (1, (0, )),\n (1, (1, )),\n (1, (2, )),\n (2, (3, )),\n (2, (1, 1)),\n (3, (2, 2)),\n ])\n # yapf: enable\n def test_output_values_merging(self, output_dim, hidden_sizes):\n model = MLPMergeModel(\n output_dim=output_dim,\n hidden_sizes=hidden_sizes,\n concat_layer=0,\n hidden_nonlinearity=None,\n hidden_w_init=tf.ones_initializer(),\n output_w_init=tf.ones_initializer())\n\n input_var2 = tf.placeholder(tf.float32, shape=(None, 5))\n obs2 = np.ones((1, 5))\n\n outputs = model.build(self.input_var, input_var2)\n output = self.sess.run(\n outputs, feed_dict={\n self.input_var: self.obs,\n input_var2: obs2\n })\n\n expected_output = np.full([1, output_dim], 10 * np.prod(hidden_sizes))\n assert np.array_equal(output, expected_output)\n\n # yapf: disable\n @pytest.mark.parametrize('output_dim, hidden_sizes', [\n (1, (0, )),\n (1, (1, )),\n (1, (2, )),\n (2, (3, )),\n (2, (1, 1)),\n (3, (2, 2)),\n ])\n # yapf: enable\n def test_is_pickleable(self, output_dim, hidden_sizes):\n model = MLPModel(\n output_dim=output_dim,\n hidden_sizes=hidden_sizes,\n hidden_nonlinearity=None,\n hidden_w_init=tf.ones_initializer(),\n output_w_init=tf.ones_initializer())\n outputs = model.build(self.input_var)\n\n # assign bias to all one\n with tf.variable_scope('MLPModel/mlp', reuse=True):\n bias = tf.get_variable('hidden_0/bias')\n\n bias.load(tf.ones_like(bias).eval())\n\n output1 = self.sess.run(outputs, feed_dict={self.input_var: self.obs})\n\n h = pickle.dumps(model)\n with tf.Session(graph=tf.Graph()) as sess:\n input_var = tf.placeholder(tf.float32, shape=(None, 5))\n model_pickled = pickle.loads(h)\n outputs = model_pickled.build(input_var)\n output2 = sess.run(outputs, feed_dict={input_var: self.obs})\n\n assert np.array_equal(output1, output2)\n" ]
[ [ "tensorflow.get_variable", "tensorflow.matmul", "tensorflow.nn.moments", "tensorflow.placeholder", "numpy.testing.assert_array_equal", "tensorflow.global_variables_initializer", "tensorflow.variable_scope", "numpy.not_equal", "tensorflow.sqrt", "numpy.array", "numpy.testing.assert_array_almost_equal" ], [ "numpy.random.shuffle" ], [ "numpy.log", "numpy.nonzero", "tensorflow.reduce_sum", "numpy.eye", "tensorflow.cast", "tensorflow.placeholder", "tensorflow.name_scope", "tensorflow.log", "tensorflow.variable_scope", "numpy.sum" ], [ "tensorflow.get_variable", "tensorflow.Graph", "numpy.array_equal", "tensorflow.ones_like", "tensorflow.placeholder", "numpy.ones", "tensorflow.ones_initializer", "numpy.prod", "tensorflow.variable_scope" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [ "1.10" ] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [ "1.10", "1.12", "1.4", "1.5", "1.7", "0.12", "1.0", "1.2" ] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [ "1.4", "1.5", "1.7", "0.12", "1.0", "1.2" ] } ]