repo_name
stringlengths 6
130
| hexsha
list | file_path
list | code
list | apis
list | possible_versions
list |
---|---|---|---|---|---|
billschereriii/SmartSim
|
[
"7ef4cffeba23fe19b931bdae819f4de99bb112a3"
] |
[
"smartsim/ml/torch/data.py"
] |
[
"import numpy as np\nimport torch\n\nfrom smartsim.ml.data import DynamicDataDownloader, StaticDataDownloader\n\n\nclass StaticDataGenerator(StaticDataDownloader, torch.utils.data.IterableDataset):\n \"\"\"A class to download a dataset from the DB.\n\n Details about parameters and features of this class can be found\n in the documentation of ``StaticDataDownloader``, of which it is just\n a PyTorch-specialized sub-class.\n\n Note that if the ``StaticDataGenerator`` has to be used through a ``DataLoader``,\n `init_samples` must be set to `False`, as sources and samples will be initialized\n by the ``DataLoader`` workers.\n \"\"\"\n\n def __init__(self, **kwargs):\n StaticDataDownloader.__init__(self, **kwargs)\n\n def _add_samples(self, batch_name, target_name):\n if self.samples is None:\n self.samples = torch.tensor(self.client.get_tensor(batch_name))\n if self.need_targets:\n self.targets = torch.tensor(self.client.get_tensor(target_name))\n else:\n self.samples = torch.cat(\n (self.samples, torch.tensor(self.client.get_tensor(batch_name)))\n )\n if self.need_targets:\n self.targets = torch.cat(\n (self.targets, torch.tensor(self.client.get_tensor(target_name)))\n )\n\n self.num_samples = self.samples.shape[0]\n self.indices = np.arange(self.num_samples)\n self.log(\"Success!\")\n self.log(f\"New dataset size: {self.num_samples}\")\n\n def update_data(self):\n self._update_samples_and_targets()\n if self.shuffle:\n np.random.shuffle(self.indices)\n\n\nclass DynamicDataGenerator(DynamicDataDownloader, StaticDataGenerator):\n \"\"\"A class to download batches from the DB.\n\n Details about parameters and features of this class can be found\n in the documentation of ``DynamicDataDownloader``, of which it is just\n a PyTorch-specialized sub-class.\n\n Note that if the ``DynamicDataGenerator`` has to be used through a ``DataLoader``,\n `init_samples` must be set to `False`, as sources and samples will be initialized\n by the ``DataLoader`` workers.\n \"\"\"\n\n def __init__(self, **kwargs):\n StaticDataGenerator.__init__(self, **kwargs)\n\n def __iter__(self):\n if self.sources:\n self.update_data()\n return super().__iter__()\n\n def _add_samples(self, batch_name, target_name):\n StaticDataGenerator._add_samples(self, batch_name, target_name)\n\n def __iter__(self):\n if self.sources:\n self.update_data()\n return super().__iter__()\n\n\nclass DataLoader(torch.utils.data.DataLoader): # pragma: no cover\n \"\"\"DataLoader to be used as a wrapper of StaticDataGenerator or DynamicDataGenerator\n\n This is just a sub-class of ``torch.utils.data.DataLoader`` which\n sets up sources of a data generator correctly. DataLoader parameters\n such as `num_workers` can be passed at initialization. `batch_size` should always\n be set to None.\n \"\"\"\n\n def __init__(self, dataset: StaticDataGenerator, **kwargs):\n super().__init__(\n dataset,\n worker_init_fn=self.worker_init_fn,\n persistent_workers=True,\n **kwargs,\n )\n\n @staticmethod\n def worker_init_fn(worker_id):\n worker_info = torch.utils.data.get_worker_info()\n dataset = worker_info.dataset # the dataset copy in this worker process\n dataset.init_sources()\n overall_sources = dataset.sources\n\n worker_id = worker_info.id\n\n # configure the dataset to only process the split workload\n per_worker = int((len(overall_sources)) // worker_info.num_workers)\n\n if per_worker > 0:\n if worker_id < worker_info.num_workers - 1:\n sources = overall_sources[\n worker_id * per_worker : (worker_id + 1) * per_worker\n ]\n else:\n sources = overall_sources[worker_id * per_worker :]\n else:\n if worker_id < len(overall_sources):\n sources = overall_sources[worker_id]\n else:\n sources = []\n\n dataset.init_samples(sources)\n"
] |
[
[
"numpy.arange",
"torch.utils.data.get_worker_info",
"numpy.random.shuffle"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
dwhite54/insightface
|
[
"ea172e4921c3960c0684404afff6d0d862447eae",
"ea172e4921c3960c0684404afff6d0d862447eae",
"ea172e4921c3960c0684404afff6d0d862447eae",
"ea172e4921c3960c0684404afff6d0d862447eae"
] |
[
"detection/RetinaFaceAntiCov/test.py",
"recognition/partial_fc/mxnet/evaluation/ijb.py",
"gender-age/mtcnn_detector.py",
"gender-age/face_model.py"
] |
[
"import cv2\nimport sys\nimport numpy as np\nimport datetime\nimport os\nimport glob\nfrom retinaface_cov import RetinaFaceCoV\n\nthresh = 0.8\nmask_thresh = 0.2\nscales = [640, 1080]\n\ncount = 1\n\ngpuid = 0\n#detector = RetinaFaceCoV('./model/mnet_cov1', 0, gpuid, 'net3')\ndetector = RetinaFaceCoV('./model/mnet_cov2', 0, gpuid, 'net3l')\n\nimg = cv2.imread('n1.jpg')\nprint(img.shape)\nim_shape = img.shape\ntarget_size = scales[0]\nmax_size = scales[1]\nim_size_min = np.min(im_shape[0:2])\nim_size_max = np.max(im_shape[0:2])\n#im_scale = 1.0\n#if im_size_min>target_size or im_size_max>max_size:\nim_scale = float(target_size) / float(im_size_min)\n# prevent bigger axis from being more than max_size:\nif np.round(im_scale * im_size_max) > max_size:\n im_scale = float(max_size) / float(im_size_max)\n\nprint('im_scale', im_scale)\n\nscales = [im_scale]\nflip = False\n\nfor c in range(count):\n faces, landmarks = detector.detect(img,\n thresh,\n scales=scales,\n do_flip=flip)\n\nif faces is not None:\n print('find', faces.shape[0], 'faces')\n for i in range(faces.shape[0]):\n #print('score', faces[i][4])\n face = faces[i]\n box = face[0:4].astype(np.int)\n mask = face[5]\n print(i, box, mask)\n #color = (255,0,0)\n if mask >= mask_thresh:\n color = (0, 0, 255)\n else:\n color = (0, 255, 0)\n cv2.rectangle(img, (box[0], box[1]), (box[2], box[3]), color, 2)\n landmark5 = landmarks[i].astype(np.int)\n #print(landmark.shape)\n for l in range(landmark5.shape[0]):\n color = (255, 0, 0)\n cv2.circle(img, (landmark5[l][0], landmark5[l][1]), 1, color, 2)\n\n filename = './cov_test.jpg'\n print('writing', filename)\n cv2.imwrite(filename, img)\n",
"import argparse\nimport os\nimport pickle\nimport timeit\nimport warnings\nfrom pathlib import Path\n\nimport cv2\nimport matplotlib\nimport matplotlib.pyplot as plt\nimport mxnet as mx\nimport numpy as np\nimport pandas as pd\nimport sklearn\nfrom menpo.visualize.viewmatplotlib import sample_colours_from_colourmap\nfrom mxnet.gluon.data import Dataset, DataLoader\nfrom prettytable import PrettyTable\nfrom skimage import transform as trans\nfrom sklearn import preprocessing\nfrom sklearn.metrics import roc_curve, auc\nfrom tqdm import tqdm\n\nmatplotlib.use('Agg')\nwarnings.filterwarnings(\"ignore\")\n\nparser = argparse.ArgumentParser(description='do ijb test')\n# general\nparser.add_argument('--model-prefix', default='', help='path to load model.')\nparser.add_argument('--model-epoch', default=1, type=int, help='')\nparser.add_argument('--image-path', default='', type=str, help='')\nparser.add_argument('--result-dir', default='.', type=str, help='')\nparser.add_argument('--gpu', default='0', type=str, help='gpu id')\nparser.add_argument('--batch-size', default=128, type=int, help='')\nparser.add_argument('--job', default='insightface', type=str, help='job name')\nparser.add_argument('-es', '--emb-size', type=int, help='embedding size')\nparser.add_argument('--target',\n default='IJBC',\n type=str,\n help='target, set to IJBC or IJBB')\nargs = parser.parse_args()\n\nos.environ['CUDA_VISIBLE_DEVICES'] = args.gpu\n\ntarget = args.target\nmodel_path = args.model_prefix\nimage_path = args.image_path\nresult_dir = args.result_dir\nepoch = args.model_epoch\nuse_norm_score = True # if Ture, TestMode(N1)\nuse_detector_score = True # if Ture, TestMode(D1)\nuse_flip_test = True # if Ture, TestMode(F1)\njob = args.job\nbatch_size = args.batch_size\n\n\nclass DatasetIJB(Dataset):\n def __init__(self, root, lines, align=True):\n self.src = np.array(\n [[30.2946, 51.6963], [65.5318, 51.5014], [48.0252, 71.7366],\n [33.5493, 92.3655], [62.7299, 92.2041]],\n dtype=np.float32)\n self.src[:, 0] += 8.0\n self.lines = lines\n self.img_root = root\n self.align = align\n\n def __len__(self):\n return len(self.lines)\n\n def __getitem__(self, idx):\n each_line = self.lines[idx]\n name_lmk_score = each_line.strip().split(' ') # \"name lmk score\"\n img_name = os.path.join(self.img_root, name_lmk_score[0])\n img = cv2.imread(img_name)\n\n if self.align:\n landmark = np.array([float(x) for x in name_lmk_score[1:-1]],\n dtype=np.float32)\n landmark = landmark.reshape((5, 2))\n #\n assert landmark.shape[0] == 68 or landmark.shape[0] == 5\n assert landmark.shape[1] == 2\n if landmark.shape[0] == 68:\n landmark5 = np.zeros((5, 2), dtype=np.float32)\n landmark5[0] = (landmark[36] + landmark[39]) / 2\n landmark5[1] = (landmark[42] + landmark[45]) / 2\n landmark5[2] = landmark[30]\n landmark5[3] = landmark[48]\n landmark5[4] = landmark[54]\n else:\n landmark5 = landmark\n #\n tform = trans.SimilarityTransform()\n tform.estimate(landmark5, self.src)\n #\n M = tform.params[0:2, :]\n img = cv2.warpAffine(img, M, (112, 112), borderValue=0.0)\n img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)\n img_flip = np.fliplr(img)\n img = np.transpose(img, (2, 0, 1)) # 3*112*112, RGB\n img_flip = np.transpose(img_flip, (2, 0, 1))\n input_blob = np.zeros((2, 3, 112, 112), dtype=np.uint8)\n input_blob[0] = img\n input_blob[1] = img_flip\n return mx.nd.array(input_blob)\n\n\ndef extract_parallel(prefix, epoch, dataset, batch_size, size):\n # init\n model_list = list()\n num_ctx = len(os.environ['CUDA_VISIBLE_DEVICES'].split(\",\"))\n num_iter = 0\n feat_mat = mx.nd.zeros(shape=(len(dataset), 2 * size))\n\n def batchify_fn(data):\n return mx.nd.concat(*data, dim=0)\n\n data_loader = DataLoader(dataset,\n batch_size,\n last_batch='keep',\n num_workers=8,\n thread_pool=True,\n prefetch=16,\n batchify_fn=batchify_fn)\n symbol, arg_params, aux_params = mx.module.module.load_checkpoint(\n prefix, epoch)\n all_layers = symbol.get_internals()\n symbol = all_layers['fc1_output']\n\n # init model list\n for i in range(num_ctx):\n model = mx.mod.Module(symbol, context=mx.gpu(i), label_names=None)\n model.bind(for_training=False,\n data_shapes=[('data', (2 * batch_size, 3, 112, 112))])\n model.set_params(arg_params, aux_params)\n model_list.append(model)\n\n # extract parallel and async\n num_model = len(model_list)\n for image in tqdm(data_loader):\n data_batch = mx.io.DataBatch(data=(image, ))\n model_list[num_iter % num_model].forward(data_batch, is_train=False)\n feat = model_list[num_iter %\n num_model].get_outputs(merge_multi_context=True)[0]\n feat = mx.nd.L2Normalization(feat)\n feat = mx.nd.reshape(feat, (-1, size * 2))\n feat_mat[batch_size * num_iter:batch_size * num_iter +\n feat.shape[0], :] = feat.as_in_context(mx.cpu())\n num_iter += 1\n #if num_iter % 20 == 0:\n # mx.nd.waitall()\n return feat_mat.asnumpy()\n\n\n# 将一个list尽量均分成n份,限制len(list)==n,份数大于原list内元素个数则分配空list[]\ndef divideIntoNstrand(listTemp, n):\n twoList = [[] for i in range(n)]\n for i, e in enumerate(listTemp):\n twoList[i % n].append(e)\n return twoList\n\n\ndef read_template_media_list(path):\n ijb_meta = pd.read_csv(path, sep=' ', header=None).values\n templates = ijb_meta[:, 1].astype(np.int)\n medias = ijb_meta[:, 2].astype(np.int)\n return templates, medias\n\n\ndef read_template_pair_list(path):\n pairs = pd.read_csv(path, sep=' ', header=None).values\n t1 = pairs[:, 0].astype(np.int)\n t2 = pairs[:, 1].astype(np.int)\n label = pairs[:, 2].astype(np.int)\n return t1, t2, label\n\n\ndef read_image_feature(path):\n with open(path, 'rb') as fid:\n img_feats = pickle.load(fid)\n return img_feats\n\n\ndef image2template_feature(img_feats=None, templates=None, medias=None):\n # ==========================================================\n # 1. face image feature l2 normalization. img_feats:[number_image x feats_dim]\n # 2. compute media feature.\n # 3. compute template feature.\n # ==========================================================\n unique_templates = np.unique(templates)\n template_feats = np.zeros((len(unique_templates), img_feats.shape[1]))\n\n for count_template, uqt in enumerate(unique_templates):\n\n (ind_t, ) = np.where(templates == uqt)\n face_norm_feats = img_feats[ind_t]\n face_medias = medias[ind_t]\n unique_medias, unique_media_counts = np.unique(face_medias,\n return_counts=True)\n media_norm_feats = []\n for u, ct in zip(unique_medias, unique_media_counts):\n (ind_m, ) = np.where(face_medias == u)\n if ct == 1:\n media_norm_feats += [face_norm_feats[ind_m]]\n else: # image features from the same video will be aggregated into one feature\n media_norm_feats += [\n np.mean(face_norm_feats[ind_m], axis=0, keepdims=True)\n ]\n media_norm_feats = np.array(media_norm_feats)\n # media_norm_feats = media_norm_feats / np.sqrt(np.sum(media_norm_feats ** 2, -1, keepdims=True))\n template_feats[count_template] = np.sum(media_norm_feats, axis=0)\n if count_template % 2000 == 0:\n print('Finish Calculating {} template features.'.format(\n count_template))\n # template_norm_feats = template_feats / np.sqrt(np.sum(template_feats ** 2, -1, keepdims=True))\n template_norm_feats = sklearn.preprocessing.normalize(template_feats)\n # print(template_norm_feats.shape)\n return template_norm_feats, unique_templates\n\n\n# In[ ]:\n\n\ndef verification(template_norm_feats=None,\n unique_templates=None,\n p1=None,\n p2=None):\n # ==========================================================\n # Compute set-to-set Similarity Score.\n # ==========================================================\n template2id = np.zeros((max(unique_templates) + 1, 1), dtype=int)\n for count_template, uqt in enumerate(unique_templates):\n template2id[uqt] = count_template\n\n score = np.zeros((len(p1), )) # save cosine distance between pairs\n\n total_pairs = np.array(range(len(p1)))\n batchsize = 100000 # small batchsize instead of all pairs in one batch due to the memory limiation\n sublists = [\n total_pairs[i:i + batchsize] for i in range(0, len(p1), batchsize)\n ]\n total_sublists = len(sublists)\n for c, s in enumerate(sublists):\n feat1 = template_norm_feats[template2id[p1[s]]]\n feat2 = template_norm_feats[template2id[p2[s]]]\n similarity_score = np.sum(feat1 * feat2, -1)\n score[s] = similarity_score.flatten()\n if c % 10 == 0:\n print('Finish {}/{} pairs.'.format(c, total_sublists))\n return score\n\n\n# In[ ]:\ndef verification2(template_norm_feats=None,\n unique_templates=None,\n p1=None,\n p2=None):\n template2id = np.zeros((max(unique_templates) + 1, 1), dtype=int)\n for count_template, uqt in enumerate(unique_templates):\n template2id[uqt] = count_template\n score = np.zeros((len(p1), )) # save cosine distance between pairs\n total_pairs = np.array(range(len(p1)))\n batchsize = 100000 # small batchsize instead of all pairs in one batch due to the memory limiation\n sublists = [\n total_pairs[i:i + batchsize] for i in range(0, len(p1), batchsize)\n ]\n total_sublists = len(sublists)\n for c, s in enumerate(sublists):\n feat1 = template_norm_feats[template2id[p1[s]]]\n feat2 = template_norm_feats[template2id[p2[s]]]\n similarity_score = np.sum(feat1 * feat2, -1)\n score[s] = similarity_score.flatten()\n if c % 10 == 0:\n print('Finish {}/{} pairs.'.format(c, total_sublists))\n return score\n\n\ndef read_score(path):\n with open(path, 'rb') as fid:\n img_feats = pickle.load(fid)\n return img_feats\n\n\n# # Step1: Load Meta Data\n\nassert target == 'IJBC' or target == 'IJBB'\n\n# =============================================================\n# load image and template relationships for template feature embedding\n# tid --> template id, mid --> media id\n# format:\n# image_name tid mid\n# =============================================================\nstart = timeit.default_timer()\ntemplates, medias = read_template_media_list(\n os.path.join('%s/meta' % image_path,\n '%s_face_tid_mid.txt' % target.lower()))\nstop = timeit.default_timer()\nprint('Time: %.2f s. ' % (stop - start))\n\n# =============================================================\n# load template pairs for template-to-template verification\n# tid : template id, label : 1/0\n# format:\n# tid_1 tid_2 label\n# =============================================================\nstart = timeit.default_timer()\np1, p2, label = read_template_pair_list(\n os.path.join('%s/meta' % image_path,\n '%s_template_pair_label.txt' % target.lower()))\nstop = timeit.default_timer()\nprint('Time: %.2f s. ' % (stop - start))\n\n# # Step 2: Get Image Features\n\n# =============================================================\n# load image features\n# format:\n# img_feats: [image_num x feats_dim] (227630, 512)\n# =============================================================\nstart = timeit.default_timer()\nimg_path = '%s/loose_crop' % image_path\nimg_list_path = '%s/meta/%s_name_5pts_score.txt' % (image_path, target.lower())\nimg_list = open(img_list_path)\nfiles = img_list.readlines()\ndataset = DatasetIJB(root=img_path, lines=files, align=True)\nimg_feats = extract_parallel(args.model_prefix,\n args.model_epoch,\n dataset,\n args.batch_size,\n size=args.emb_size)\n\nfaceness_scores = []\nfor each_line in files:\n name_lmk_score = each_line.split()\n faceness_scores.append(name_lmk_score[-1])\n\nfaceness_scores = np.array(faceness_scores).astype(np.float32)\n\nstop = timeit.default_timer()\nprint('Time: %.2f s. ' % (stop - start))\nprint('Feature Shape: ({} , {}) .'.format(img_feats.shape[0],\n img_feats.shape[1]))\n\n# # Step3: Get Template Features\n\n# In[ ]:\n\n# =============================================================\n# compute template features from image features.\n# =============================================================\nstart = timeit.default_timer()\n# ==========================================================\n# Norm feature before aggregation into template feature?\n# Feature norm from embedding network and faceness score are able to decrease weights for noise samples (not face).\n# ==========================================================\n# 1. FaceScore (Feature Norm)\n# 2. FaceScore (Detector)\n\nif use_flip_test:\n # concat --- F1\n # img_input_feats = img_feats\n # add --- F2\n img_input_feats = img_feats[:, 0:img_feats.shape[1] //\n 2] + img_feats[:, img_feats.shape[1] // 2:]\nelse:\n img_input_feats = img_feats[:, 0:img_feats.shape[1] // 2]\n\nif use_norm_score:\n img_input_feats = img_input_feats\nelse:\n # normalise features to remove norm information\n img_input_feats = img_input_feats / np.sqrt(\n np.sum(img_input_feats**2, -1, keepdims=True))\n\nif use_detector_score:\n print(img_input_feats.shape, faceness_scores.shape)\n # img_input_feats = img_input_feats * np.matlib.repmat(faceness_scores[:,np.newaxis], 1, img_input_feats.shape[1])\n img_input_feats = img_input_feats * faceness_scores[:, np.newaxis]\nelse:\n img_input_feats = img_input_feats\n\ntemplate_norm_feats, unique_templates = image2template_feature(\n img_input_feats, templates, medias)\nstop = timeit.default_timer()\nprint('Time: %.2f s. ' % (stop - start))\n\n# # Step 4: Get Template Similarity Scores\n\n# In[ ]:\n\n# =============================================================\n# compute verification scores between template pairs.\n# =============================================================\nstart = timeit.default_timer()\nscore = verification(template_norm_feats, unique_templates, p1, p2)\nstop = timeit.default_timer()\nprint('Time: %.2f s. ' % (stop - start))\n\n# In[ ]:\n\nsave_path = result_dir + '/%s_result' % target\n\nif not os.path.exists(save_path):\n os.makedirs(save_path)\n\nscore_save_file = os.path.join(save_path, \"%s.npy\" % job)\nnp.save(score_save_file, score)\n\n# # Step 5: Get ROC Curves and TPR@FPR Table\n\n# In[ ]:\n\nfiles = [score_save_file]\nmethods = []\nscores = []\nfor file in files:\n methods.append(Path(file).stem)\n scores.append(np.load(file))\n\nmethods = np.array(methods)\nscores = dict(zip(methods, scores))\ncolours = dict(\n zip(methods, sample_colours_from_colourmap(methods.shape[0], 'Set2')))\n# x_labels = [1/(10**x) for x in np.linspace(6, 0, 6)]\nx_labels = [10**-6, 10**-5, 10**-4, 10**-3, 10**-2, 10**-1]\ntpr_fpr_table = PrettyTable(['Methods'] + [str(x) for x in x_labels])\nfig = plt.figure()\nfor method in methods:\n fpr, tpr, _ = roc_curve(label, scores[method])\n roc_auc = auc(fpr, tpr)\n fpr = np.flipud(fpr)\n tpr = np.flipud(tpr) # select largest tpr at same fpr\n plt.plot(fpr,\n tpr,\n color=colours[method],\n lw=1,\n label=('[%s (AUC = %0.4f %%)]' %\n (method.split('-')[-1], roc_auc * 100)))\n tpr_fpr_row = []\n tpr_fpr_row.append(\"%s-%s\" % (method, target))\n for fpr_iter in np.arange(len(x_labels)):\n _, min_index = min(\n list(zip(abs(fpr - x_labels[fpr_iter]), range(len(fpr)))))\n # tpr_fpr_row.append('%.4f' % tpr[min_index])\n tpr_fpr_row.append('%.2f' % (tpr[min_index] * 100))\n tpr_fpr_table.add_row(tpr_fpr_row)\nplt.xlim([10**-6, 0.1])\nplt.ylim([0.3, 1.0])\nplt.grid(linestyle='--', linewidth=1)\nplt.xticks(x_labels)\nplt.yticks(np.linspace(0.3, 1.0, 8, endpoint=True))\nplt.xscale('log')\nplt.xlabel('False Positive Rate')\nplt.ylabel('True Positive Rate')\nplt.title('ROC on IJB')\nplt.legend(loc=\"lower right\")\n# plt.show()\nfig.savefig(os.path.join(save_path, '%s.pdf' % job))\nprint(tpr_fpr_table)\n",
"# coding: utf-8\nimport os\nimport mxnet as mx\nimport numpy as np\nimport math\nimport cv2\nfrom multiprocessing import Pool\nfrom itertools import repeat\nfrom itertools import izip\nfrom helper import nms, adjust_input, generate_bbox, detect_first_stage_warpper\n\n\nclass MtcnnDetector(object):\n \"\"\"\n Joint Face Detection and Alignment using Multi-task Cascaded Convolutional Neural Networks\n see https://github.com/kpzhang93/MTCNN_face_detection_alignment\n this is a mxnet version\n \"\"\"\n def __init__(self,\n model_folder='.',\n minsize=20,\n threshold=[0.6, 0.7, 0.8],\n factor=0.709,\n num_worker=1,\n accurate_landmark=False,\n ctx=mx.cpu()):\n \"\"\"\n Initialize the detector\n\n Parameters:\n ----------\n model_folder : string\n path for the models\n minsize : float number\n minimal face to detect\n threshold : float number\n detect threshold for 3 stages\n factor: float number\n scale factor for image pyramid\n num_worker: int number\n number of processes we use for first stage\n accurate_landmark: bool\n use accurate landmark localization or not\n\n \"\"\"\n self.num_worker = num_worker\n self.accurate_landmark = accurate_landmark\n\n # load 4 models from folder\n models = ['det1', 'det2', 'det3', 'det4']\n models = [os.path.join(model_folder, f) for f in models]\n\n self.PNets = []\n for i in range(num_worker):\n workner_net = mx.model.FeedForward.load(models[0], 1, ctx=ctx)\n self.PNets.append(workner_net)\n\n #self.Pool = Pool(num_worker)\n\n self.RNet = mx.model.FeedForward.load(models[1], 1, ctx=ctx)\n self.ONet = mx.model.FeedForward.load(models[2], 1, ctx=ctx)\n self.LNet = mx.model.FeedForward.load(models[3], 1, ctx=ctx)\n\n self.minsize = float(minsize)\n self.factor = float(factor)\n self.threshold = threshold\n\n def convert_to_square(self, bbox):\n \"\"\"\n convert bbox to square\n\n Parameters:\n ----------\n bbox: numpy array , shape n x 5\n input bbox\n\n Returns:\n -------\n square bbox\n \"\"\"\n square_bbox = bbox.copy()\n\n h = bbox[:, 3] - bbox[:, 1] + 1\n w = bbox[:, 2] - bbox[:, 0] + 1\n max_side = np.maximum(h, w)\n square_bbox[:, 0] = bbox[:, 0] + w * 0.5 - max_side * 0.5\n square_bbox[:, 1] = bbox[:, 1] + h * 0.5 - max_side * 0.5\n square_bbox[:, 2] = square_bbox[:, 0] + max_side - 1\n square_bbox[:, 3] = square_bbox[:, 1] + max_side - 1\n return square_bbox\n\n def calibrate_box(self, bbox, reg):\n \"\"\"\n calibrate bboxes\n\n Parameters:\n ----------\n bbox: numpy array, shape n x 5\n input bboxes\n reg: numpy array, shape n x 4\n bboxex adjustment\n\n Returns:\n -------\n bboxes after refinement\n\n \"\"\"\n w = bbox[:, 2] - bbox[:, 0] + 1\n w = np.expand_dims(w, 1)\n h = bbox[:, 3] - bbox[:, 1] + 1\n h = np.expand_dims(h, 1)\n reg_m = np.hstack([w, h, w, h])\n aug = reg_m * reg\n bbox[:, 0:4] = bbox[:, 0:4] + aug\n return bbox\n\n def pad(self, bboxes, w, h):\n \"\"\"\n pad the the bboxes, alse restrict the size of it\n\n Parameters:\n ----------\n bboxes: numpy array, n x 5\n input bboxes\n w: float number\n width of the input image\n h: float number\n height of the input image\n Returns :\n ------s\n dy, dx : numpy array, n x 1\n start point of the bbox in target image\n edy, edx : numpy array, n x 1\n end point of the bbox in target image\n y, x : numpy array, n x 1\n start point of the bbox in original image\n ex, ex : numpy array, n x 1\n end point of the bbox in original image\n tmph, tmpw: numpy array, n x 1\n height and width of the bbox\n\n \"\"\"\n tmpw, tmph = bboxes[:, 2] - bboxes[:, 0] + 1, bboxes[:,\n 3] - bboxes[:,\n 1] + 1\n num_box = bboxes.shape[0]\n\n dx, dy = np.zeros((num_box, )), np.zeros((num_box, ))\n edx, edy = tmpw.copy() - 1, tmph.copy() - 1\n\n x, y, ex, ey = bboxes[:, 0], bboxes[:, 1], bboxes[:, 2], bboxes[:, 3]\n\n tmp_index = np.where(ex > w - 1)\n edx[tmp_index] = tmpw[tmp_index] + w - 2 - ex[tmp_index]\n ex[tmp_index] = w - 1\n\n tmp_index = np.where(ey > h - 1)\n edy[tmp_index] = tmph[tmp_index] + h - 2 - ey[tmp_index]\n ey[tmp_index] = h - 1\n\n tmp_index = np.where(x < 0)\n dx[tmp_index] = 0 - x[tmp_index]\n x[tmp_index] = 0\n\n tmp_index = np.where(y < 0)\n dy[tmp_index] = 0 - y[tmp_index]\n y[tmp_index] = 0\n\n return_list = [dy, edy, dx, edx, y, ey, x, ex, tmpw, tmph]\n return_list = [item.astype(np.int32) for item in return_list]\n\n return return_list\n\n def slice_index(self, number):\n \"\"\"\n slice the index into (n,n,m), m < n\n Parameters:\n ----------\n number: int number\n number\n \"\"\"\n def chunks(l, n):\n \"\"\"Yield successive n-sized chunks from l.\"\"\"\n for i in range(0, len(l), n):\n yield l[i:i + n]\n\n num_list = range(number)\n return list(chunks(num_list, self.num_worker))\n\n def detect_face_limited(self, img, det_type=2):\n height, width, _ = img.shape\n if det_type >= 2:\n total_boxes = np.array(\n [[0.0, 0.0, img.shape[1], img.shape[0], 0.9]],\n dtype=np.float32)\n num_box = total_boxes.shape[0]\n\n # pad the bbox\n [dy, edy, dx, edx, y, ey, x, ex, tmpw,\n tmph] = self.pad(total_boxes, width, height)\n # (3, 24, 24) is the input shape for RNet\n input_buf = np.zeros((num_box, 3, 24, 24), dtype=np.float32)\n\n for i in range(num_box):\n tmp = np.zeros((tmph[i], tmpw[i], 3), dtype=np.uint8)\n tmp[dy[i]:edy[i] + 1,\n dx[i]:edx[i] + 1, :] = img[y[i]:ey[i] + 1,\n x[i]:ex[i] + 1, :]\n input_buf[i, :, :, :] = adjust_input(cv2.resize(tmp, (24, 24)))\n\n output = self.RNet.predict(input_buf)\n\n # filter the total_boxes with threshold\n passed = np.where(output[1][:, 1] > self.threshold[1])\n total_boxes = total_boxes[passed]\n\n if total_boxes.size == 0:\n return None\n\n total_boxes[:, 4] = output[1][passed, 1].reshape((-1, ))\n reg = output[0][passed]\n\n # nms\n pick = nms(total_boxes, 0.7, 'Union')\n total_boxes = total_boxes[pick]\n total_boxes = self.calibrate_box(total_boxes, reg[pick])\n total_boxes = self.convert_to_square(total_boxes)\n total_boxes[:, 0:4] = np.round(total_boxes[:, 0:4])\n else:\n total_boxes = np.array(\n [[0.0, 0.0, img.shape[1], img.shape[0], 0.9]],\n dtype=np.float32)\n num_box = total_boxes.shape[0]\n [dy, edy, dx, edx, y, ey, x, ex, tmpw,\n tmph] = self.pad(total_boxes, width, height)\n # (3, 48, 48) is the input shape for ONet\n input_buf = np.zeros((num_box, 3, 48, 48), dtype=np.float32)\n\n for i in range(num_box):\n tmp = np.zeros((tmph[i], tmpw[i], 3), dtype=np.float32)\n tmp[dy[i]:edy[i] + 1, dx[i]:edx[i] + 1, :] = img[y[i]:ey[i] + 1,\n x[i]:ex[i] + 1, :]\n input_buf[i, :, :, :] = adjust_input(cv2.resize(tmp, (48, 48)))\n\n output = self.ONet.predict(input_buf)\n #print(output[2])\n\n # filter the total_boxes with threshold\n passed = np.where(output[2][:, 1] > self.threshold[2])\n total_boxes = total_boxes[passed]\n\n if total_boxes.size == 0:\n return None\n\n total_boxes[:, 4] = output[2][passed, 1].reshape((-1, ))\n reg = output[1][passed]\n points = output[0][passed]\n\n # compute landmark points\n bbw = total_boxes[:, 2] - total_boxes[:, 0] + 1\n bbh = total_boxes[:, 3] - total_boxes[:, 1] + 1\n points[:, 0:5] = np.expand_dims(\n total_boxes[:, 0], 1) + np.expand_dims(bbw, 1) * points[:, 0:5]\n points[:, 5:10] = np.expand_dims(\n total_boxes[:, 1], 1) + np.expand_dims(bbh, 1) * points[:, 5:10]\n\n # nms\n total_boxes = self.calibrate_box(total_boxes, reg)\n pick = nms(total_boxes, 0.7, 'Min')\n total_boxes = total_boxes[pick]\n points = points[pick]\n\n if not self.accurate_landmark:\n return total_boxes, points\n\n #############################################\n # extended stage\n #############################################\n num_box = total_boxes.shape[0]\n patchw = np.maximum(total_boxes[:, 2] - total_boxes[:, 0] + 1,\n total_boxes[:, 3] - total_boxes[:, 1] + 1)\n patchw = np.round(patchw * 0.25)\n\n # make it even\n patchw[np.where(np.mod(patchw, 2) == 1)] += 1\n\n input_buf = np.zeros((num_box, 15, 24, 24), dtype=np.float32)\n for i in range(5):\n x, y = points[:, i], points[:, i + 5]\n x, y = np.round(x - 0.5 * patchw), np.round(y - 0.5 * patchw)\n [dy, edy, dx, edx, y, ey, x, ex, tmpw, tmph] = self.pad(\n np.vstack([x, y, x + patchw - 1, y + patchw - 1]).T, width,\n height)\n for j in range(num_box):\n tmpim = np.zeros((tmpw[j], tmpw[j], 3), dtype=np.float32)\n tmpim[dy[j]:edy[j] + 1,\n dx[j]:edx[j] + 1, :] = img[y[j]:ey[j] + 1,\n x[j]:ex[j] + 1, :]\n input_buf[j, i * 3:i * 3 + 3, :, :] = adjust_input(\n cv2.resize(tmpim, (24, 24)))\n\n output = self.LNet.predict(input_buf)\n\n pointx = np.zeros((num_box, 5))\n pointy = np.zeros((num_box, 5))\n\n for k in range(5):\n # do not make a large movement\n tmp_index = np.where(np.abs(output[k] - 0.5) > 0.35)\n output[k][tmp_index[0]] = 0.5\n\n pointx[:, k] = np.round(points[:, k] -\n 0.5 * patchw) + output[k][:, 0] * patchw\n pointy[:, k] = np.round(points[:, k + 5] -\n 0.5 * patchw) + output[k][:, 1] * patchw\n\n points = np.hstack([pointx, pointy])\n points = points.astype(np.int32)\n\n return total_boxes, points\n\n def detect_face(self, img, det_type=0):\n \"\"\"\n detect face over img\n Parameters:\n ----------\n img: numpy array, bgr order of shape (1, 3, n, m)\n input image\n Retures:\n -------\n bboxes: numpy array, n x 5 (x1,y2,x2,y2,score)\n bboxes\n points: numpy array, n x 10 (x1, x2 ... x5, y1, y2 ..y5)\n landmarks\n \"\"\"\n\n # check input\n height, width, _ = img.shape\n if det_type == 0:\n MIN_DET_SIZE = 12\n\n if img is None:\n return None\n\n # only works for color image\n if len(img.shape) != 3:\n return None\n\n # detected boxes\n total_boxes = []\n\n minl = min(height, width)\n\n # get all the valid scales\n scales = []\n m = MIN_DET_SIZE / self.minsize\n minl *= m\n factor_count = 0\n while minl > MIN_DET_SIZE:\n scales.append(m * self.factor**factor_count)\n minl *= self.factor\n factor_count += 1\n\n #############################################\n # first stage\n #############################################\n #for scale in scales:\n # return_boxes = self.detect_first_stage(img, scale, 0)\n # if return_boxes is not None:\n # total_boxes.append(return_boxes)\n\n sliced_index = self.slice_index(len(scales))\n total_boxes = []\n for batch in sliced_index:\n #local_boxes = self.Pool.map( detect_first_stage_warpper, \\\n # izip(repeat(img), self.PNets[:len(batch)], [scales[i] for i in batch], repeat(self.threshold[0])) )\n local_boxes = map( detect_first_stage_warpper, \\\n izip(repeat(img), self.PNets[:len(batch)], [scales[i] for i in batch], repeat(self.threshold[0])) )\n total_boxes.extend(local_boxes)\n\n # remove the Nones\n total_boxes = [i for i in total_boxes if i is not None]\n\n if len(total_boxes) == 0:\n return None\n\n total_boxes = np.vstack(total_boxes)\n\n if total_boxes.size == 0:\n return None\n\n # merge the detection from first stage\n pick = nms(total_boxes[:, 0:5], 0.7, 'Union')\n total_boxes = total_boxes[pick]\n\n bbw = total_boxes[:, 2] - total_boxes[:, 0] + 1\n bbh = total_boxes[:, 3] - total_boxes[:, 1] + 1\n\n # refine the bboxes\n total_boxes = np.vstack([\n total_boxes[:, 0] + total_boxes[:, 5] * bbw,\n total_boxes[:, 1] + total_boxes[:, 6] * bbh,\n total_boxes[:, 2] + total_boxes[:, 7] * bbw,\n total_boxes[:, 3] + total_boxes[:, 8] * bbh, total_boxes[:, 4]\n ])\n\n total_boxes = total_boxes.T\n total_boxes = self.convert_to_square(total_boxes)\n total_boxes[:, 0:4] = np.round(total_boxes[:, 0:4])\n else:\n total_boxes = np.array(\n [[0.0, 0.0, img.shape[1], img.shape[0], 0.9]],\n dtype=np.float32)\n\n #############################################\n # second stage\n #############################################\n num_box = total_boxes.shape[0]\n\n # pad the bbox\n [dy, edy, dx, edx, y, ey, x, ex, tmpw,\n tmph] = self.pad(total_boxes, width, height)\n # (3, 24, 24) is the input shape for RNet\n input_buf = np.zeros((num_box, 3, 24, 24), dtype=np.float32)\n\n for i in range(num_box):\n tmp = np.zeros((tmph[i], tmpw[i], 3), dtype=np.uint8)\n tmp[dy[i]:edy[i] + 1, dx[i]:edx[i] + 1, :] = img[y[i]:ey[i] + 1,\n x[i]:ex[i] + 1, :]\n input_buf[i, :, :, :] = adjust_input(cv2.resize(tmp, (24, 24)))\n\n output = self.RNet.predict(input_buf)\n\n # filter the total_boxes with threshold\n passed = np.where(output[1][:, 1] > self.threshold[1])\n total_boxes = total_boxes[passed]\n\n if total_boxes.size == 0:\n return None\n\n total_boxes[:, 4] = output[1][passed, 1].reshape((-1, ))\n reg = output[0][passed]\n\n # nms\n pick = nms(total_boxes, 0.7, 'Union')\n total_boxes = total_boxes[pick]\n total_boxes = self.calibrate_box(total_boxes, reg[pick])\n total_boxes = self.convert_to_square(total_boxes)\n total_boxes[:, 0:4] = np.round(total_boxes[:, 0:4])\n\n #############################################\n # third stage\n #############################################\n num_box = total_boxes.shape[0]\n\n # pad the bbox\n [dy, edy, dx, edx, y, ey, x, ex, tmpw,\n tmph] = self.pad(total_boxes, width, height)\n # (3, 48, 48) is the input shape for ONet\n input_buf = np.zeros((num_box, 3, 48, 48), dtype=np.float32)\n\n for i in range(num_box):\n tmp = np.zeros((tmph[i], tmpw[i], 3), dtype=np.float32)\n tmp[dy[i]:edy[i] + 1, dx[i]:edx[i] + 1, :] = img[y[i]:ey[i] + 1,\n x[i]:ex[i] + 1, :]\n input_buf[i, :, :, :] = adjust_input(cv2.resize(tmp, (48, 48)))\n\n output = self.ONet.predict(input_buf)\n\n # filter the total_boxes with threshold\n passed = np.where(output[2][:, 1] > self.threshold[2])\n total_boxes = total_boxes[passed]\n\n if total_boxes.size == 0:\n return None\n\n total_boxes[:, 4] = output[2][passed, 1].reshape((-1, ))\n reg = output[1][passed]\n points = output[0][passed]\n\n # compute landmark points\n bbw = total_boxes[:, 2] - total_boxes[:, 0] + 1\n bbh = total_boxes[:, 3] - total_boxes[:, 1] + 1\n points[:, 0:5] = np.expand_dims(\n total_boxes[:, 0], 1) + np.expand_dims(bbw, 1) * points[:, 0:5]\n points[:, 5:10] = np.expand_dims(\n total_boxes[:, 1], 1) + np.expand_dims(bbh, 1) * points[:, 5:10]\n\n # nms\n total_boxes = self.calibrate_box(total_boxes, reg)\n pick = nms(total_boxes, 0.7, 'Min')\n total_boxes = total_boxes[pick]\n points = points[pick]\n\n if not self.accurate_landmark:\n return total_boxes, points\n\n #############################################\n # extended stage\n #############################################\n num_box = total_boxes.shape[0]\n patchw = np.maximum(total_boxes[:, 2] - total_boxes[:, 0] + 1,\n total_boxes[:, 3] - total_boxes[:, 1] + 1)\n patchw = np.round(patchw * 0.25)\n\n # make it even\n patchw[np.where(np.mod(patchw, 2) == 1)] += 1\n\n input_buf = np.zeros((num_box, 15, 24, 24), dtype=np.float32)\n for i in range(5):\n x, y = points[:, i], points[:, i + 5]\n x, y = np.round(x - 0.5 * patchw), np.round(y - 0.5 * patchw)\n [dy, edy, dx, edx, y, ey, x, ex, tmpw, tmph] = self.pad(\n np.vstack([x, y, x + patchw - 1, y + patchw - 1]).T, width,\n height)\n for j in range(num_box):\n tmpim = np.zeros((tmpw[j], tmpw[j], 3), dtype=np.float32)\n tmpim[dy[j]:edy[j] + 1,\n dx[j]:edx[j] + 1, :] = img[y[j]:ey[j] + 1,\n x[j]:ex[j] + 1, :]\n input_buf[j, i * 3:i * 3 + 3, :, :] = adjust_input(\n cv2.resize(tmpim, (24, 24)))\n\n output = self.LNet.predict(input_buf)\n\n pointx = np.zeros((num_box, 5))\n pointy = np.zeros((num_box, 5))\n\n for k in range(5):\n # do not make a large movement\n tmp_index = np.where(np.abs(output[k] - 0.5) > 0.35)\n output[k][tmp_index[0]] = 0.5\n\n pointx[:, k] = np.round(points[:, k] -\n 0.5 * patchw) + output[k][:, 0] * patchw\n pointy[:, k] = np.round(points[:, k + 5] -\n 0.5 * patchw) + output[k][:, 1] * patchw\n\n points = np.hstack([pointx, pointy])\n points = points.astype(np.int32)\n\n return total_boxes, points\n\n def list2colmatrix(self, pts_list):\n \"\"\"\n convert list to column matrix\n Parameters:\n ----------\n pts_list:\n input list\n Retures:\n -------\n colMat: \n\n \"\"\"\n assert len(pts_list) > 0\n colMat = []\n for i in range(len(pts_list)):\n colMat.append(pts_list[i][0])\n colMat.append(pts_list[i][1])\n colMat = np.matrix(colMat).transpose()\n return colMat\n\n def find_tfrom_between_shapes(self, from_shape, to_shape):\n \"\"\"\n find transform between shapes\n Parameters:\n ----------\n from_shape: \n to_shape: \n Retures:\n -------\n tran_m:\n tran_b:\n \"\"\"\n assert from_shape.shape[0] == to_shape.shape[\n 0] and from_shape.shape[0] % 2 == 0\n\n sigma_from = 0.0\n sigma_to = 0.0\n cov = np.matrix([[0.0, 0.0], [0.0, 0.0]])\n\n # compute the mean and cov\n from_shape_points = from_shape.reshape(from_shape.shape[0] / 2, 2)\n to_shape_points = to_shape.reshape(to_shape.shape[0] / 2, 2)\n mean_from = from_shape_points.mean(axis=0)\n mean_to = to_shape_points.mean(axis=0)\n\n for i in range(from_shape_points.shape[0]):\n temp_dis = np.linalg.norm(from_shape_points[i] - mean_from)\n sigma_from += temp_dis * temp_dis\n temp_dis = np.linalg.norm(to_shape_points[i] - mean_to)\n sigma_to += temp_dis * temp_dis\n cov += (to_shape_points[i].transpose() -\n mean_to.transpose()) * (from_shape_points[i] - mean_from)\n\n sigma_from = sigma_from / to_shape_points.shape[0]\n sigma_to = sigma_to / to_shape_points.shape[0]\n cov = cov / to_shape_points.shape[0]\n\n # compute the affine matrix\n s = np.matrix([[1.0, 0.0], [0.0, 1.0]])\n u, d, vt = np.linalg.svd(cov)\n\n if np.linalg.det(cov) < 0:\n if d[1] < d[0]:\n s[1, 1] = -1\n else:\n s[0, 0] = -1\n r = u * s * vt\n c = 1.0\n if sigma_from != 0:\n c = 1.0 / sigma_from * np.trace(np.diag(d) * s)\n\n tran_b = mean_to.transpose() - c * r * mean_from.transpose()\n tran_m = c * r\n\n return tran_m, tran_b\n\n def extract_image_chips(self, img, points, desired_size=256, padding=0):\n \"\"\"\n crop and align face\n Parameters:\n ----------\n img: numpy array, bgr order of shape (1, 3, n, m)\n input image\n points: numpy array, n x 10 (x1, x2 ... x5, y1, y2 ..y5)\n desired_size: default 256\n padding: default 0\n Retures:\n -------\n crop_imgs: list, n\n cropped and aligned faces \n \"\"\"\n crop_imgs = []\n for p in points:\n shape = []\n for k in range(len(p) / 2):\n shape.append(p[k])\n shape.append(p[k + 5])\n\n if padding > 0:\n padding = padding\n else:\n padding = 0\n # average positions of face points\n mean_face_shape_x = [\n 0.224152, 0.75610125, 0.490127, 0.254149, 0.726104\n ]\n mean_face_shape_y = [\n 0.2119465, 0.2119465, 0.628106, 0.780233, 0.780233\n ]\n\n from_points = []\n to_points = []\n\n for i in range(len(shape) / 2):\n x = (padding + mean_face_shape_x[i]) / (2 * padding +\n 1) * desired_size\n y = (padding + mean_face_shape_y[i]) / (2 * padding +\n 1) * desired_size\n to_points.append([x, y])\n from_points.append([shape[2 * i], shape[2 * i + 1]])\n\n # convert the points to Mat\n from_mat = self.list2colmatrix(from_points)\n to_mat = self.list2colmatrix(to_points)\n\n # compute the similar transfrom\n tran_m, tran_b = self.find_tfrom_between_shapes(from_mat, to_mat)\n\n probe_vec = np.matrix([1.0, 0.0]).transpose()\n probe_vec = tran_m * probe_vec\n\n scale = np.linalg.norm(probe_vec)\n angle = 180.0 / math.pi * math.atan2(probe_vec[1, 0], probe_vec[0,\n 0])\n\n from_center = [(shape[0] + shape[2]) / 2.0,\n (shape[1] + shape[3]) / 2.0]\n to_center = [0, 0]\n to_center[1] = desired_size * 0.4\n to_center[0] = desired_size * 0.5\n\n ex = to_center[0] - from_center[0]\n ey = to_center[1] - from_center[1]\n\n rot_mat = cv2.getRotationMatrix2D((from_center[0], from_center[1]),\n -1 * angle, scale)\n rot_mat[0][2] += ex\n rot_mat[1][2] += ey\n\n chips = cv2.warpAffine(img, rot_mat, (desired_size, desired_size))\n crop_imgs.append(chips)\n\n return crop_imgs\n",
"from __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nfrom scipy import misc\nimport sys\nimport os\nimport argparse\n#import tensorflow as tf\nimport numpy as np\nimport mxnet as mx\nimport random\nimport cv2\nimport sklearn\nfrom sklearn.decomposition import PCA\nfrom time import sleep\nfrom easydict import EasyDict as edict\nfrom mtcnn_detector import MtcnnDetector\nsys.path.append(os.path.join(os.path.dirname(__file__), '..', 'src', 'common'))\nimport face_image\nimport face_preprocess\n\n\ndef do_flip(data):\n for idx in range(data.shape[0]):\n data[idx, :, :] = np.fliplr(data[idx, :, :])\n\n\ndef get_model(ctx, image_size, model_str, layer):\n _vec = model_str.split(',')\n assert len(_vec) == 2\n prefix = _vec[0]\n epoch = int(_vec[1])\n print('loading', prefix, epoch)\n sym, arg_params, aux_params = mx.model.load_checkpoint(prefix, epoch)\n all_layers = sym.get_internals()\n sym = all_layers[layer + '_output']\n model = mx.mod.Module(symbol=sym, context=ctx, label_names=None)\n #model.bind(data_shapes=[('data', (args.batch_size, 3, image_size[0], image_size[1]))], label_shapes=[('softmax_label', (args.batch_size,))])\n model.bind(data_shapes=[('data', (1, 3, image_size[0], image_size[1]))])\n model.set_params(arg_params, aux_params)\n return model\n\n\nclass FaceModel:\n def __init__(self, args):\n self.args = args\n if args.gpu >= 0:\n ctx = mx.gpu(args.gpu)\n else:\n ctx = mx.cpu()\n _vec = args.image_size.split(',')\n assert len(_vec) == 2\n image_size = (int(_vec[0]), int(_vec[1]))\n self.model = None\n if len(args.model) > 0:\n self.model = get_model(ctx, image_size, args.model, 'fc1')\n\n self.det_minsize = 50\n self.det_threshold = [0.6, 0.7, 0.8]\n #self.det_factor = 0.9\n self.image_size = image_size\n mtcnn_path = os.path.join(os.path.dirname(__file__), 'mtcnn-model')\n if args.det == 0:\n detector = MtcnnDetector(model_folder=mtcnn_path,\n ctx=ctx,\n num_worker=1,\n accurate_landmark=True,\n threshold=self.det_threshold)\n else:\n detector = MtcnnDetector(model_folder=mtcnn_path,\n ctx=ctx,\n num_worker=1,\n accurate_landmark=True,\n threshold=[0.0, 0.0, 0.2])\n self.detector = detector\n\n def get_input(self, face_img):\n ret = self.detector.detect_face(face_img, det_type=self.args.det)\n if ret is None:\n return None\n bbox, points = ret\n if bbox.shape[0] == 0:\n return None\n bbox = bbox[0, 0:4]\n points = points[0, :].reshape((2, 5)).T\n #print(bbox)\n #print(points)\n nimg = face_preprocess.preprocess(face_img,\n bbox,\n points,\n image_size='112,112')\n nimg = cv2.cvtColor(nimg, cv2.COLOR_BGR2RGB)\n aligned = np.transpose(nimg, (2, 0, 1))\n input_blob = np.expand_dims(aligned, axis=0)\n data = mx.nd.array(input_blob)\n db = mx.io.DataBatch(data=(data, ))\n return db\n\n def get_ga(self, data):\n self.model.forward(data, is_train=False)\n ret = self.model.get_outputs()[0].asnumpy()\n g = ret[:, 0:2].flatten()\n gender = np.argmax(g)\n a = ret[:, 2:202].reshape((100, 2))\n a = np.argmax(a, axis=1)\n age = int(sum(a))\n\n return gender, age\n"
] |
[
[
"numpy.round",
"numpy.max",
"numpy.min"
],
[
"matplotlib.pyplot.legend",
"numpy.linspace",
"numpy.flipud",
"numpy.mean",
"numpy.where",
"pandas.read_csv",
"numpy.unique",
"numpy.fliplr",
"numpy.save",
"numpy.load",
"numpy.zeros",
"matplotlib.pyplot.figure",
"matplotlib.pyplot.title",
"matplotlib.pyplot.ylim",
"matplotlib.pyplot.xscale",
"sklearn.metrics.roc_curve",
"numpy.transpose",
"sklearn.metrics.auc",
"numpy.array",
"numpy.sum",
"matplotlib.pyplot.ylabel",
"matplotlib.use",
"matplotlib.pyplot.xlim",
"sklearn.preprocessing.normalize",
"matplotlib.pyplot.grid",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.xticks"
],
[
"numpy.matrix",
"numpy.hstack",
"numpy.linalg.svd",
"numpy.expand_dims",
"numpy.maximum",
"numpy.abs",
"numpy.diag",
"numpy.vstack",
"numpy.linalg.norm",
"numpy.round",
"numpy.linalg.det",
"numpy.mod",
"numpy.array",
"numpy.where",
"numpy.zeros"
],
[
"numpy.fliplr",
"numpy.expand_dims",
"numpy.argmax",
"numpy.transpose"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [
"2.0",
"1.4",
"1.1",
"1.5",
"1.2",
"1.3"
],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
cbs228/nbtschema
|
[
"1d040229ee608bc5b22d3ec2781ce1a1e173b4a4"
] |
[
"nbtschematic/schematic.py"
] |
[
"# coding=UTF-8\n\"\"\" Defines an nbtlib schema for schematic files \"\"\"\nfrom typing import Tuple\nimport enum\nimport numpy as np\nimport nbtlib as nbt\n\n\nclass Entity(nbt.CompoundSchema):\n \"\"\"\n Entities describe objects which are not anchored to blocks, like mobs.\n \"\"\"\n schema = {\n 'id': nbt.String,\n 'Pos': nbt.List[nbt.Double],\n 'Motion': nbt.List[nbt.Double],\n 'Rotation': nbt.List[nbt.Float],\n 'FallDistance': nbt.Float,\n 'Fire': nbt.Short,\n 'Air': nbt.Short,\n 'OnGround': nbt.Byte,\n 'NoGravity': nbt.Byte,\n 'Invulnerable': nbt.Byte,\n 'PortalCooldown': nbt.Int,\n 'UUIDMost': nbt.Long,\n 'UUIDLeast': nbt.Long\n }\n\n\nclass BlockEntity(nbt.CompoundSchema):\n \"\"\"\n Block entities contain additional metadata for placed blocks\n \"\"\"\n schema = {\n 'id': nbt.String,\n 'x': nbt.Int,\n 'y': nbt.Int,\n 'z': nbt.Int\n }\n\n\nclass Schematic(nbt.CompoundSchema):\n \"\"\"\n Schematic files represent a small section of a level\n\n Key fields include:\n\n - `Blocks`: A dense array of block IDs at each coordinate. Sorted by block\n height (bottop to top), then length (``Z``), then width (``X``). The\n index of the block at\n ``blocks[X, Y, Z]`` is ``Y * length * width + Z * width + X.``\n - `Data`: A dense array of data values for each block. This field shares\n sizes and indexing with `Blocks`.\n - `Entities`: A list of Compound tags which are entities.\n - `TileEntities`: A list of Compound tags which are block entities, which\n were previously known as tile entities.\n \"\"\"\n schema = {\n 'Height': nbt.Short,\n 'Length': nbt.Short,\n 'Width': nbt.Short,\n 'Materials': nbt.String,\n 'Blocks': nbt.ByteArray,\n 'Data': nbt.ByteArray,\n 'Entities': nbt.List[Entity],\n 'TileEntities': nbt.List[BlockEntity]\n }\n\n\nclass SchematicFileRoot(nbt.CompoundSchema):\n \"\"\"\n Describes the root element of a schematic file\n \"\"\"\n schema = {\n 'Schematic': Schematic\n }\n\n\nclass Material(enum.Enum):\n \"\"\"\n Block Materials\n\n This enumeration indicates whether the block IDs in this schematic\n are to be taken from `Classic`, `Pocket`, or `Alpha` versions.\n Versions beyond `Alpha`—including `Beta` and stable builds—share a\n compatible set of block IDs. `Alpha` is the default for all\n newly-created schematics.\n \"\"\"\n Classic = \"Classic\"\n Pocket = \"Pocket\"\n Alpha = \"Alpha\"\n\n\nclass SchematicFile(nbt.File, SchematicFileRoot):\n \"\"\"\n Schematic File\n\n Schematic files are commonly used by world editors such as MCEdit,\n Schematica, and WorldEdit. They are intended to represent a small\n section of a level for the purposes of interchange or permanent\n storage.\n\n The origin of the schematic is always ``X = 0``, ``Y = 0``, ``Z = 0``.\n All positions for blocks, entities, and block entities are transformed\n into the schematic's coordinate system.\n\n Schematic coordinates map directly to data indices. Blocks and block\n data are stored in contiguous numpy byte arrays. The first dimension\n in these arrays is height (``Y``). The second and third dimensions\n are ``Z`` and ``X``, respectively.\n \"\"\"\n\n def __init__(self, shape: Tuple[int, int, int] = (1, 1, 1),\n blocks=None, data=None):\n super().__init__({'Schematic': {}})\n self.gzipped = True\n self.byteorder = 'big'\n self.root_name = 'Schematic'\n self.material = Material.Alpha\n self.resize(shape)\n if blocks is not None:\n self.blocks = blocks\n if data is not None:\n self.data = data\n self.entities = nbt.List()\n self.blockentities = nbt.List()\n\n def resize(self, shape: Tuple[int, int, int]) -> None:\n \"\"\"\n Resize the schematic file\n\n Resizing the schematic clears the blocks and data\n\n :param shape: New dimensions for the schematic, as a tuple of\n ``(n_y, n_z, n_x)``.\n \"\"\"\n\n self.root['Height'] = nbt.Short(shape[0])\n self.root['Length'] = nbt.Short(shape[1])\n self.root['Width'] = nbt.Short(shape[2])\n self.blocks = np.zeros(shape, dtype=np.uint8, order='C')\n self.data = np.zeros(shape, dtype=np.uint8, order='C')\n\n @classmethod\n def load(cls, filename, gzipped=True, byteorder='big') -> 'SchematicFile':\n \"\"\"\n Load a schematic file from disk\n\n If the schematic file is already loaded into memory, use the\n :meth:`~from_fileobj()` method instead.\n\n :param filename: Path to a schematic file on disk.\n :param gzipped: Schematic files are always stored gzipped. This option\n defaults to True\n :param byteorder: Schematic files are always stored in big endian\n number format.\n :return: Loaded schematic\n \"\"\"\n return super().load(filename=filename,\n gzipped=gzipped, byteorder=byteorder)\n\n @property\n def material(self) -> Material:\n \"\"\"\n Block materials used by this schematic\n\n This enumeration indicates whether the block IDs in this schematic\n are to be taken from `Classic`, `Pocket`, or `Alpha` versions.\n Versions beyond `Alpha`—including `Beta` and stable builds—share a\n compatible set of block IDs. `Alpha` is the default for all\n newly-created schematics.\n\n :return: Enumerated Material type\n \"\"\"\n return Material[self.root['Materials']]\n\n @material.setter\n def material(self, value: Material = Material.Alpha):\n self.root['Materials'] = value.value\n\n @property\n def shape(self) -> Tuple[nbt.Short, nbt.Short, nbt.Short]:\n \"\"\" Schematic shape\n\n :return: Shape of the schematic, as a tuple of ``Y``, ``Z``, and ``X``\n size.\n \"\"\"\n return self.root['Height'], self.root['Length'], self.root['Width']\n\n @property\n def blocks(self) -> np.array:\n \"\"\" Block IDs\n\n Entries in this array are the block ID at each coordinate of\n the schematic. This method returns an nbtlib type, but you may\n coerce it to a pure numpy array with ``numpy.asarray()``\n\n :return: 3D array which contains a view into the block IDs.\n Array indices are in ``Y``, ``Z``, ``X`` order.\n \"\"\"\n return self.root['Blocks'].reshape(self.shape, order='C').view()\n\n @blocks.setter\n def blocks(self, value):\n if not np.all(value.shape == self.shape):\n raise ValueError(f\"Input shape {value.shape} does not match \"\n f\"schematic shape {self.shape}\")\n\n self.root['Blocks'] = nbt.ByteArray(value.reshape(-1))\n\n @property\n def data(self) -> nbt.ByteArray:\n \"\"\" Block data\n\n Entries in this array are the block data values at each\n coordinate of the schematic. Only the lower four bits\n are used. This method returns an nbtlib type, but you may\n coerce it to a pure numpy array with ``numpy.asarray()``\n\n :return: 3D array which contains a view into the block data.\n Array indices are in ``Y``, ``Z``, ``X`` order.\n \"\"\"\n return self.root['Data'].reshape(self.shape, order='C').view()\n\n @data.setter\n def data(self, value):\n if not np.all(value.shape == self.shape):\n raise ValueError(f\"Input shape {value.shape} does not match \"\n f\"schematic shape {self.shape}\")\n\n self.root['Data'] = nbt.ByteArray(value.reshape(-1))\n\n @property\n def entities(self) -> nbt.List[nbt.Compound]:\n \"\"\" Entities\n\n Each Entity in the schematic is a Compound tag. The schema only\n represents keys which are common to all Entities.\n\n :return: List of entities\n \"\"\"\n return self.root['Entities']\n\n @entities.setter\n def entities(self, value: nbt.List[nbt.Compound]):\n self.root['Entities'] = value\n\n @property\n def blockentities(self) -> nbt.List[nbt.Compound]:\n \"\"\" Block Entities\n\n Block entities were previously known as \"tile entities\" and\n contain extended attributes for placed blocks. The schematic\n only enforces keys which are common to all entities, including\n a position and an ID.\n\n :return: List of block entities\n \"\"\"\n return self.root['TileEntities']\n\n @blockentities.setter\n def blockentities(self, value: nbt.List[nbt.Compound]):\n self.root['TileEntities'] = value\n\n def __enter__(self):\n return self.root\n"
] |
[
[
"numpy.all",
"numpy.zeros"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
lime-j/YTMT-Strategy-1
|
[
"aacc38c4e61b91e187cac81aa95500e0422d4d0f"
] |
[
"models/opt_ytmt_model_sirs.py"
] |
[
"import torch\nfrom torch import nn\nimport torch.nn.functional as F\n\nimport os\nimport numpy as np\nfrom collections import OrderedDict\n\nimport util.util as util\nimport util.index as index\nimport models.networks as networks\nimport models.losses_opt as losses\nfrom models import arch\n\nfrom .base_model import BaseModel\nfrom PIL import Image\nfrom os.path import join\n\n\ndef tensor2im(image_tensor, imtype=np.uint8):\n image_tensor = image_tensor.detach()\n image_numpy = image_tensor[0].cpu().float().numpy()\n image_numpy = np.clip(image_numpy, 0, 1)\n if image_numpy.shape[0] == 1:\n image_numpy = np.tile(image_numpy, (3, 1, 1))\n image_numpy = (np.transpose(image_numpy, (1, 2, 0))) * 255.0\n # image_numpy = image_numpy.astype(imtype)\n return image_numpy\n\n\nclass EdgeMap(nn.Module):\n def __init__(self, scale=1):\n super(EdgeMap, self).__init__()\n self.scale = scale\n self.requires_grad = False\n\n def forward(self, img):\n img = img / self.scale\n\n N, C, H, W = img.shape\n gradX = torch.zeros(N, 1, H, W, dtype=img.dtype, device=img.device)\n gradY = torch.zeros(N, 1, H, W, dtype=img.dtype, device=img.device)\n\n gradx = (img[..., 1:, :] - img[..., :-1, :]).abs().sum(dim=1, keepdim=True)\n grady = (img[..., 1:] - img[..., :-1]).abs().sum(dim=1, keepdim=True)\n\n gradX[..., :-1, :] += gradx\n gradX[..., 1:, :] += gradx\n gradX[..., 1:-1, :] /= 2\n\n gradY[..., :-1] += grady\n gradY[..., 1:] += grady\n gradY[..., 1:-1] /= 2\n\n # edge = (gradX + gradY) / 2\n edge = (gradX + gradY)\n\n return edge\n\n\nclass YTMTNetBase(BaseModel):\n def _init_optimizer(self, optimizers):\n self.optimizers = optimizers\n for optimizer in self.optimizers:\n util.set_opt_param(optimizer, 'initial_lr', self.opt.lr)\n util.set_opt_param(optimizer, 'weight_decay', self.opt.wd)\n\n def set_input(self, data, mode='train'):\n target_t = None\n target_r = None\n data_name = None\n identity = False\n mode = mode.lower()\n if mode == 'train':\n input, target_t, target_r = data['input'], data['target_t'], data['target_r']\n elif mode == 'eval':\n input, target_t, target_r, data_name = data['input'], data['target_t'], data['target_r'], data['fn']\n elif mode == 'test':\n input, data_name = data['input'], data['fn']\n else:\n raise NotImplementedError('Mode [%s] is not implemented' % mode)\n\n if len(self.gpu_ids) > 0: # transfer data into gpu\n input = input.to(device=self.gpu_ids[0])\n if target_t is not None:\n target_t = target_t.to(device=self.gpu_ids[0])\n if target_r is not None:\n target_r = target_r.to(device=self.gpu_ids[0])\n\n self.input = input\n self.identity = identity\n self.input_edge = self.edge_map(self.input)\n self.target_t = target_t\n self.target_r = target_r\n self.data_name = data_name\n\n self.issyn = False if 'real' in data else True\n self.aligned = False if 'unaligned' in data else True\n\n if target_t is not None:\n self.target_edge = self.edge_map(self.target_t)\n\n def eval(self, data, savedir=None, suffix=None, pieapp=None):\n # only the 1st input of the whole minibatch would be evaluated\n self._eval()\n self.set_input(data, 'eval')\n\n with torch.no_grad():\n self.forward()\n\n output_i = tensor2im(self.output_i)\n output_j = tensor2im(self.output_j)\n target = tensor2im(self.target_t)\n target_r = tensor2im(self.target_r)\n\n if self.aligned:\n res = index.quality_assess(output_i, target)\n # res = index.quality_assess(output_j, target_r)\n else:\n res = {}\n\n if savedir is not None:\n if self.data_name is not None:\n name = os.path.splitext(os.path.basename(self.data_name[0]))[0]\n savedir = join(savedir, suffix, name)\n os.makedirs(savedir, exist_ok=True)\n Image.fromarray(output_i.astype(np.uint8)).save(\n join(savedir, '{}_t.png'.format(self.opt.name)))\n Image.fromarray(output_j.astype(np.uint8)).save(\n join(savedir, '{}_r.png'.format(self.opt.name)))\n Image.fromarray(target.astype(np.uint8)).save(join(savedir, 't_label.png'))\n Image.fromarray(target_r.astype(np.uint8)).save(join(savedir, 'r_label.png'))\n Image.fromarray(tensor2im(self.input).astype(np.uint8)).save(join(savedir, 'm_input.png'))\n else:\n if not os.path.exists(join(savedir, 'transmission_layer')):\n os.makedirs(join(savedir, 'transmission_layer'))\n os.makedirs(join(savedir, 'blended'))\n Image.fromarray(target.astype(np.uint8)).save(\n join(savedir, 'transmission_layer', str(self._count) + '.png'))\n Image.fromarray(tensor2im(self.input).astype(np.uint8)).save(\n join(savedir, 'blended', str(self._count) + '.png'))\n self._count += 1\n\n return res\n\n def test(self, data, savedir=None):\n # only the 1st input of the whole minibatch would be evaluated\n self._eval()\n self.set_input(data, 'test')\n\n if self.data_name is not None and savedir is not None:\n name = os.path.splitext(os.path.basename(self.data_name[0]))[0]\n if not os.path.exists(join(savedir, name)):\n os.makedirs(join(savedir, name))\n\n if os.path.exists(join(savedir, name, '{}.png'.format(self.opt.name))):\n return\n\n with torch.no_grad():\n output_i, output_j = self.forward()\n output_i = tensor2im(output_i)\n output_j = tensor2im(output_j)\n if self.data_name is not None and savedir is not None:\n Image.fromarray(output_i.astype(np.uint8)).save(join(savedir, name, '{}_l.png'.format(self.opt.name)))\n Image.fromarray(output_j.astype(np.uint8)).save(join(savedir, name, '{}_r.png'.format(self.opt.name)))\n Image.fromarray(tensor2im(self.input).astype(np.uint8)).save(join(savedir, name, 'm_input.png'))\n\n\nclass YTMTNetModel(YTMTNetBase):\n def name(self):\n return 'ytmtnet'\n\n def __init__(self):\n self.epoch = 0\n self.iterations = 0\n self.device = torch.device(\"cuda:0\" if torch.cuda.is_available() else \"cpu\")\n\n def print_network(self):\n print('--------------------- Model ---------------------')\n print('##################### NetG #####################')\n networks.print_network(self.net_i)\n if self.isTrain and self.opt.lambda_gan > 0:\n print('##################### NetD #####################')\n networks.print_network(self.netD)\n\n def _eval(self):\n self.net_i.eval()\n\n def _train(self):\n self.net_i.train()\n\n def initialize(self, opt):\n BaseModel.initialize(self, opt)\n\n in_channels = 3\n self.vgg = None\n\n if opt.hyper:\n self.vgg = losses.Vgg19(requires_grad=False).to(self.device)\n in_channels += 1472\n\n self.net_i = arch.__dict__[self.opt.inet](in_channels, 3).to(self.device)\n networks.init_weights(self.net_i, init_type=opt.init_type) # using default initialization as EDSR\n self.edge_map = EdgeMap(scale=1).to(self.device)\n\n if self.isTrain:\n # define loss functions\n self.loss_dic = losses.init_loss(opt, self.Tensor)\n t_vggloss = losses.ContentLoss()\n t_vggloss.initialize(losses.VGGLoss(self.vgg))\n self.loss_dic['t_vgg'] = t_vggloss\n\n r_vggloss = losses.ContentLoss()\n r_vggloss.initialize(losses.VGGLoss(self.vgg))\n self.loss_dic['r_vgg'] = r_vggloss\n\n # Define discriminator\n # if self.opt.lambda_gan > 0:\n self.netD = networks.define_D(opt, 3)\n self.optimizer_D = torch.optim.Adam(self.netD.parameters(),\n lr=opt.lr, betas=(0.9, 0.999))\n self._init_optimizer([self.optimizer_D])\n\n # initialize optimizers\n self.optimizer_G = torch.optim.Adam(self.net_i.parameters(),\n lr=opt.lr, betas=(0.9, 0.999), weight_decay=opt.wd)\n\n self._init_optimizer([self.optimizer_G])\n\n if opt.resume:\n self.load(self, opt.resume_epoch)\n\n if opt.no_verbose is False:\n self.print_network()\n\n def backward_D(self):\n for p in self.netD.parameters():\n p.requires_grad = True\n\n loss_D_1, pred_fake_1, pred_real_1 = self.loss_dic['gan'].get_loss(\n self.netD, self.input, self.output_i, self.target_t)\n\n self.loss_D, self.pred_fake, self.pred_real = (loss_D_1, pred_fake_1, pred_real_1)\n\n (self.loss_D * self.opt.lambda_gan).backward(retain_graph=True)\n\n def get_loss(self, out_l, out_r):\n loss_G_GAN = self.loss_dic['gan'].get_g_loss(self.netD, self.input, out_l, self.target_t) * self.opt.lambda_gan\n loss_icnn_vgg = self.loss_dic['t_vgg'].get_loss(out_l, self.target_t) * self.opt.lambda_vgg\n loss_rcnn_vgg = self.loss_dic['r_vgg'].get_loss(out_r, self.target_r) * 0.2 * self.opt.lambda_vgg\n\n loss_icnn_pixel = self.loss_dic['t_pixel'].get_loss(out_l, self.target_t)\n loss_rcnn_pixel = self.loss_dic['r_pixel'].get_loss(out_r, self.target_r) * 1.5\n return loss_G_GAN, loss_icnn_pixel, loss_rcnn_pixel, loss_icnn_vgg + loss_rcnn_vgg\n\n def backward_G(self):\n # Make it a tiny bit faster\n for p in self.netD.parameters():\n p.requires_grad = False\n self.loss_exclu = None\n\n self.loss_G_GAN, self.loss_icnn_pixel, self.loss_rcnn_pixel, \\\n self.loss_icnn_vgg = self.get_loss(self.output_i, self.output_j)\n\n # self.loss_exclu = self.exclusion_loss(self.output_i, self.output_j, 3)\n\n self.loss_recons = self.loss_dic['recons'](self.output_i, -self.output_j, self.input) * 0.2\n\n self.loss_G = self.loss_G_GAN + self.loss_icnn_pixel + self.loss_rcnn_pixel + \\\n self.loss_icnn_vgg + self.loss_recons\n\n self.loss_G.backward()\n\n def hyper_column(self, input_img):\n hypercolumn = self.vgg(input_img)\n _, C, H, W = input_img.shape\n hypercolumn = [F.interpolate(feature.detach(), size=(H, W), mode='bilinear', align_corners=False) for\n feature in hypercolumn]\n input_i = [input_img]\n input_i.extend(hypercolumn)\n input_i = torch.cat(input_i, dim=1)\n return input_i\n\n def forward(self):\n # without edge\n input_i = self.input\n if self.vgg is not None:\n input_i = self.hyper_column(input_i)\n output_i, output_j = self.net_i(input_i, fn=self.data_name[0] if self.data_name else None)\n\n self.output_i = output_i\n self.output_j = output_j\n\n return output_i, output_j\n\n def optimize_parameters(self):\n self._train()\n self.forward()\n\n if self.opt.lambda_gan > 0:\n self.optimizer_D.zero_grad()\n self.backward_D()\n self.optimizer_D.step()\n\n self.optimizer_G.zero_grad()\n self.backward_G()\n\n self.optimizer_G.step()\n\n def get_current_errors(self):\n ret_errors = OrderedDict()\n if self.loss_rcnn_pixel is not None:\n ret_errors['RPixel'] = self.loss_rcnn_pixel.item()\n if self.loss_icnn_pixel is not None:\n ret_errors['IPixel'] = self.loss_icnn_pixel.item()\n if self.loss_icnn_vgg is not None:\n ret_errors['VGG'] = self.loss_icnn_vgg.item()\n if self.loss_G_GAN is not None:\n ret_errors['GAN'] = self.loss_G_GAN.item()\n if self.loss_exclu is not None:\n ret_errors['Exclu'] = self.loss_exclu.item()\n if self.loss_recons is not None:\n ret_errors['Recons'] = self.loss_recons.item()\n\n ret_errors['lr'] = self.optimizer_G.param_groups[0]['lr']\n\n if self.opt.lambda_gan > 0 and self.loss_G_GAN is not None:\n ret_errors['G'] = self.loss_G_GAN.item()\n ret_errors['D'] = self.loss_D.item()\n\n return ret_errors\n\n def get_current_visuals(self):\n ret_visuals = OrderedDict()\n ret_visuals['input'] = tensor2im(self.input).astype(np.uint8)\n ret_visuals['output_i'] = tensor2im(self.output_i).astype(np.uint8)\n ret_visuals['output_j'] = tensor2im(self.output_j).astype(np.uint8)\n ret_visuals['target'] = tensor2im(self.target_t).astype(np.uint8)\n ret_visuals['reflection'] = tensor2im(self.target_r).astype(np.uint8)\n # ret_visuals['residual'] = tensor2im((self.input - self.output_i)).astype(np.uint8)\n\n return ret_visuals\n\n def exclusion_loss(self, img_T, img_R, level=3, eps=1e-6):\n grad_x_loss = []\n grad_y_loss = []\n\n for l in range(level):\n grad_x_T, grad_y_T = self.compute_grad(img_T)\n grad_x_R, grad_y_R = self.compute_grad(img_R)\n\n alphax = (2.0 * torch.mean(torch.abs(grad_x_T))) / (torch.mean(torch.abs(grad_x_R)) + eps)\n alphay = (2.0 * torch.mean(torch.abs(grad_y_T))) / (torch.mean(torch.abs(grad_y_R)) + eps)\n\n gradx1_s = (torch.sigmoid(grad_x_T) * 2) - 1 # mul 2 minus 1 is to change sigmoid into tanh\n grady1_s = (torch.sigmoid(grad_y_T) * 2) - 1\n gradx2_s = (torch.sigmoid(grad_x_R * alphax) * 2) - 1\n grady2_s = (torch.sigmoid(grad_y_R * alphay) * 2) - 1\n\n grad_x_loss.append(((torch.mean(torch.mul(gradx1_s.pow(2), gradx2_s.pow(2)))) + eps) ** 0.25)\n grad_y_loss.append(((torch.mean(torch.mul(grady1_s.pow(2), grady2_s.pow(2)))) + eps) ** 0.25)\n\n img_T = F.interpolate(img_T, scale_factor=0.5, mode='bilinear')\n img_R = F.interpolate(img_R, scale_factor=0.5, mode='bilinear')\n loss_gradxy = torch.sum(sum(grad_x_loss) / 3) + torch.sum(sum(grad_y_loss) / 3)\n\n return loss_gradxy / 2\n\n def contain_loss(self, img_T, img_R, img_I, eps=1e-6):\n pix_num = np.prod(img_I.shape)\n predict_tx, predict_ty = self.compute_grad(img_T)\n predict_tx, predict_ty = self.compute_grad(img_T)\n predict_rx, predict_ry = self.compute_grad(img_R)\n input_x, input_y = self.compute_grad(img_I)\n\n out = torch.norm(predict_tx / (input_x + eps), 2) ** 2 + \\\n torch.norm(predict_ty / (input_y + eps), 2) ** 2 + \\\n torch.norm(predict_rx / (input_x + eps), 2) ** 2 + \\\n torch.norm(predict_ry / (input_y + eps), 2) ** 2\n\n return out / pix_num\n\n def compute_grad(self, img):\n gradx = img[:, :, 1:, :] - img[:, :, :-1, :]\n grady = img[:, :, :, 1:] - img[:, :, :, :-1]\n return gradx, grady\n\n def load(self, model, resume_epoch=None):\n icnn_path = model.opt.icnn_path\n state_dict = None\n\n if icnn_path is None:\n model_path = util.get_model_list(model.save_dir, self.opt.name, epoch=resume_epoch)\n state_dict = torch.load(model_path)\n model.epoch = state_dict['epoch']\n model.iterations = state_dict['iterations']\n model.net_i.load_state_dict(state_dict['icnn'])\n if model.isTrain:\n model.optimizer_G.load_state_dict(state_dict['opt_g'])\n else:\n state_dict = torch.load(icnn_path)\n model.net_i.load_state_dict(state_dict['icnn'])\n model.epoch = state_dict['epoch']\n model.iterations = state_dict['iterations']\n # if model.isTrain:\n # model.optimizer_G.load_state_dict(state_dict['opt_g'])\n\n if model.isTrain:\n if 'netD' in state_dict:\n print('Resume netD ...')\n model.netD.load_state_dict(state_dict['netD'])\n model.optimizer_D.load_state_dict(state_dict['opt_d'])\n\n print('Resume from epoch %d, iteration %d' % (model.epoch, model.iterations))\n return state_dict\n\n def state_dict(self):\n state_dict = {\n 'icnn': self.net_i.state_dict(),\n 'opt_g': self.optimizer_G.state_dict(),\n 'epoch': self.epoch, 'iterations': self.iterations\n }\n\n if self.opt.lambda_gan > 0:\n state_dict.update({\n 'opt_d': self.optimizer_D.state_dict(),\n 'netD': self.netD.state_dict(),\n })\n\n return state_dict\n"
] |
[
[
"torch.abs",
"torch.sigmoid",
"torch.norm",
"torch.cat",
"torch.zeros",
"numpy.clip",
"torch.load",
"numpy.tile",
"torch.no_grad",
"numpy.prod",
"numpy.transpose",
"torch.nn.functional.interpolate",
"torch.cuda.is_available"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
drmaxchen/pyradio
|
[
"f2e46856425cfb233d29d391199bfb9b85824b06"
] |
[
"radiomics/shape.py"
] |
[
"import numpy\nimport SimpleITK as sitk\n\nfrom radiomics import base, cShape, deprecated\n\n\nclass RadiomicsShape(base.RadiomicsFeaturesBase):\n r\"\"\"\n In this group of features we included descriptors of the three-dimensional size and shape of the ROI. These features\n are independent from the gray level intensity distribution in the ROI and are therefore only calculated on the\n non-derived image and mask.\n\n Let:\n\n - :math:`V` the volume of the ROI in mm\\ :sup:`3`\n - :math:`A` the surface area of the ROI in mm\\ :sup:`2`\n \"\"\"\n\n def __init__(self, inputImage, inputMask, **kwargs):\n super(RadiomicsShape, self).__init__(inputImage, inputMask, **kwargs)\n\n def _initVoxelBasedCalculation(self):\n raise NotImplementedError('Shape features are not available in voxel-based mode')\n\n def _initSegmentBasedCalculation(self):\n\n self.pixelSpacing = numpy.array(self.inputImage.GetSpacing()[::-1])\n\n # Pad inputMask to prevent index-out-of-range errors\n self.logger.debug('Padding the mask with 0s')\n\n cpif = sitk.ConstantPadImageFilter()\n\n padding = numpy.tile(1, 3)\n try:\n cpif.SetPadLowerBound(padding)\n cpif.SetPadUpperBound(padding)\n except TypeError:\n # newer versions of SITK/python want a tuple or list\n cpif.SetPadLowerBound(padding.tolist())\n cpif.SetPadUpperBound(padding.tolist())\n\n self.inputMask = cpif.Execute(self.inputMask)\n\n # Reassign self.maskArray using the now-padded self.inputMask and force and interger datatype\n self.maskArray = (sitk.GetArrayFromImage(self.inputMask) == self.label).astype('int')\n self.labelledVoxelCoordinates = numpy.where(self.maskArray != 0)\n\n self.logger.debug('Pre-calculate Volume, Surface Area and Eigenvalues')\n\n # Volume, Surface Area and eigenvalues are pre-calculated\n # Compute volume\n z, y, x = self.pixelSpacing\n Np = len(self.labelledVoxelCoordinates[0])\n self.Volume = Np * (z * x * y)\n\n # Compute Surface Area\n self.SurfaceArea = self._calculateSurfaceArea()\n\n # Compute eigenvalues and -vectors\n coordinates = numpy.array(self.labelledVoxelCoordinates, dtype='int').transpose((1, 0)) # Transpose equals zip(*a)\n physicalCoordinates = coordinates * self.pixelSpacing[None, :]\n physicalCoordinates -= numpy.mean(physicalCoordinates, axis=0) # Centered at 0\n physicalCoordinates /= numpy.sqrt(Np)\n covariance = numpy.dot(physicalCoordinates.T.copy(), physicalCoordinates)\n self.eigenValues, eigenVectors = numpy.linalg.eig(covariance) # eigenVectors are not used\n\n # Correct machine precision errors causing very small negative eigen values in case of some 2D segmentations\n machine_errors = numpy.bitwise_and(self.eigenValues < 0, self.eigenValues > -1e-10)\n if numpy.sum(machine_errors) > 0:\n self.logger.warning('Encountered %d eigenvalues < 0 and > -1e-10, rounding to 0', numpy.sum(machine_errors))\n self.eigenValues[machine_errors] = 0\n\n self.eigenValues.sort() # Sort the eigenValues from small to large\n\n self.diameters = None # Do not precompute diameters, but instantiate the variable for lazy assignment\n\n self.logger.debug('Shape feature class initialized')\n\n def _calculateSurfaceArea(self):\n self.logger.debug('Calculating Surface Area in C')\n\n return cShape.calculate_surfacearea(self.maskArray, self.pixelSpacing)\n\n def calculateDiameters(self):\n \"\"\"\n Calculate maximum diameters in 2D and 3D using C extension. Function returns a tuple with 4 elements:\n\n 0. Maximum 2D diameter Slice (XY Plane, Axial)\n 1. Maximum 2D diameter Column (ZX Plane, Coronal)\n 2. Maximum 2D diameter Row (ZY Plane, Sagittal)\n 3. Maximum 3D diameter\n \"\"\"\n self.logger.debug('Calculating Maximum diameters in C')\n Ns = len(self.labelledVoxelCoordinates[0])\n return cShape.calculate_diameter(self.maskArray, self.pixelSpacing, Ns)\n\n def getVolumeFeatureValue(self):\n r\"\"\"\n **1. Volume**\n\n .. math::\n V = \\displaystyle\\sum^{N}_{i=1}{V_i}\n\n The volume of the ROI :math:`V` is approximated by multiplying the number of voxels in the ROI by the volume of a\n single voxel :math:`V_i`.\n\n .. note::\n In the IBSI feature definitions, a more precise approximation of the volume is used. That method uses tetrahedrons\n consisting of the origin and faces in the ROI. Although the method implemented here overestimates the volume,\n especially in small volumes, the difference will be negligible in large ROIs.\n \"\"\"\n return self.Volume\n\n def getSurfaceAreaFeatureValue(self):\n r\"\"\"\n **2. Surface Area**\n\n .. math::\n A = \\displaystyle\\sum^{N}_{i=1}{\\frac{1}{2}|\\text{a}_i\\text{b}_i \\times \\text{a}_i\\text{c}_i|}\n\n Where:\n\n :math:`N` is the number of triangles forming the surface mesh of the volume (ROI)\n\n :math:`\\text{a}_i\\text{b}_i` and :math:`\\text{a}_i\\text{c}_i` are the edges of the :math:`i^{\\text{th}}` triangle\n formed by points :math:`\\text{a}_i`, :math:`\\text{b}_i` and :math:`\\text{c}_i`\n\n Surface Area is an approximation of the surface of the ROI in mm2, calculated using a marching cubes algorithm.\n\n References:\n\n - Lorensen WE, Cline HE. Marching cubes: A high resolution 3D surface construction algorithm. ACM SIGGRAPH Comput\n Graph `Internet <http://portal.acm.org/citation.cfm?doid=37402.37422>`_. 1987;21:163-9.\n \"\"\"\n return self.SurfaceArea\n\n def getSurfaceVolumeRatioFeatureValue(self):\n r\"\"\"\n **3. Surface Area to Volume ratio**\n\n .. math::\n \\textit{surface to volume ratio} = \\frac{A}{V}\n\n Here, a lower value indicates a more compact (sphere-like) shape. This feature is not dimensionless, and is\n therefore (partly) dependent on the volume of the ROI.\n \"\"\"\n return self.SurfaceArea / self.Volume\n\n def getSphericityFeatureValue(self):\n r\"\"\"\n **4. Sphericity**\n\n .. math::\n \\textit{sphericity} = \\frac{\\sqrt[3]{36 \\pi V^2}}{A}\n\n Sphericity is a measure of the roundness of the shape of the tumor region relative to a sphere. It is a\n dimensionless measure, independent of scale and orientation. The value range is :math:`0 < sphericity \\leq 1`, where\n a value of 1 indicates a perfect sphere (a sphere has the smallest possible surface area for a given volume,\n compared to other solids).\n\n .. note::\n This feature is correlated to Compactness 1, Compactness 2 and Spherical Disproportion. In the default\n parameter file provided in the ``pyradiomics/examples/exampleSettings`` folder, Compactness 1 and Compactness 2\n are therefore disabled.\n \"\"\"\n return (36 * numpy.pi * self.Volume ** 2) ** (1.0 / 3.0) / self.SurfaceArea\n\n @deprecated\n def getCompactness1FeatureValue(self):\n r\"\"\"\n **5. Compactness 1**\n\n .. math::\n \\textit{compactness 1} = \\frac{V}{\\sqrt{\\pi A^3}}\n\n Similar to Sphericity, Compactness 1 is a measure of how compact the shape of the tumor is relative to a sphere\n (most compact). It is therefore correlated to Sphericity and redundant. It is provided here for completeness.\n The value range is :math:`0 < compactness\\ 1 \\leq \\frac{1}{6 \\pi}`, where a value of :math:`\\frac{1}{6 \\pi}`\n indicates a perfect sphere.\n\n By definition, :math:`compactness\\ 1 = \\frac{1}{6 \\pi}\\sqrt{compactness\\ 2} =\n \\frac{1}{6 \\pi}\\sqrt{sphericity^3}`.\n\n .. note::\n This feature is correlated to Compactness 2, Sphericity and Spherical Disproportion.\n Therefore, this feature is marked, so it is not enabled by default (i.e. this feature will not be enabled if no\n individual features are specified (enabling 'all' features), but will be enabled when individual features are\n specified, including this feature). To include this feature in the extraction, specify it by name in the enabled features.\n \"\"\"\n return self.Volume / (self.SurfaceArea ** (3.0 / 2.0) * numpy.sqrt(numpy.pi))\n\n @deprecated\n def getCompactness2FeatureValue(self):\n r\"\"\"\n **6. Compactness 2**\n\n .. math::\n \\textit{compactness 2} = 36 \\pi \\frac{V^2}{A^3}\n\n Similar to Sphericity and Compactness 1, Compactness 2 is a measure of how compact the shape of the tumor is\n relative to a sphere (most compact). It is a dimensionless measure, independent of scale and orientation. The value\n range is :math:`0 < compactness\\ 2 \\leq 1`, where a value of 1 indicates a perfect sphere.\n\n By definition, :math:`compactness\\ 2 = (sphericity)^3`\n\n .. note::\n This feature is correlated to Compactness 1, Sphericity and Spherical Disproportion.\n Therefore, this feature is marked, so it is not enabled by default (i.e. this feature will not be enabled if no\n individual features are specified (enabling 'all' features), but will be enabled when individual features are\n specified, including this feature). To include this feature in the extraction, specify it by name in the enabled features.\n \"\"\"\n return (36.0 * numpy.pi) * (self.Volume ** 2.0) / (self.SurfaceArea ** 3.0)\n\n @deprecated\n def getSphericalDisproportionFeatureValue(self):\n r\"\"\"\n **7. Spherical Disproportion**\n\n .. math::\n \\textit{spherical disproportion} = \\frac{A}{4\\pi R^2} = \\frac{A}{\\sqrt[3]{36 \\pi V^2}}\n\n Where :math:`R` is the radius of a sphere with the same volume as the tumor, and equal to\n :math:`\\sqrt[3]{\\frac{3V}{4\\pi}}`.\n\n Spherical Disproportion is the ratio of the surface area of the tumor region to the surface area of a sphere with\n the same volume as the tumor region, and by definition, the inverse of Sphericity. Therefore, the value range is\n :math:`spherical\\ disproportion \\geq 1`, with a value of 1 indicating a perfect sphere.\n\n .. note::\n This feature is correlated to Compactness 2, Compactness2 and Sphericity.\n Therefore, this feature is marked, so it is not enabled by default (i.e. this feature will not be enabled if no\n individual features are specified (enabling 'all' features), but will be enabled when individual features are\n specified, including this feature). To include this feature in the extraction, specify it by name in the enabled features.\n \"\"\"\n return self.SurfaceArea / (36 * numpy.pi * self.Volume ** 2) ** (1.0 / 3.0)\n\n def getMaximum3DDiameterFeatureValue(self):\n r\"\"\"\n **8. Maximum 3D diameter**\n\n Maximum 3D diameter is defined as the largest pairwise Euclidean distance between surface voxels in the ROI.\n\n Also known as Feret Diameter.\n\n .. warning::\n This feature is only available when C Extensions are enabled\n \"\"\"\n if self.diameters is None:\n self.diameters = self.calculateDiameters()\n return self.diameters[3]\n\n def getMaximum2DDiameterSliceFeatureValue(self):\n r\"\"\"\n **9. Maximum 2D diameter (Slice)**\n\n Maximum 2D diameter (Slice) is defined as the largest pairwise Euclidean distance between tumor surface voxels in\n the row-column (generally the axial) plane.\n\n .. warning::\n This feature is only available when C Extensions are enabled\n \"\"\"\n if self.diameters is None:\n self.diameters = self.calculateDiameters()\n return self.diameters[0]\n\n def getMaximum2DDiameterColumnFeatureValue(self):\n r\"\"\"\n **10. Maximum 2D diameter (Column)**\n\n Maximum 2D diameter (Column) is defined as the largest pairwise Euclidean distance between tumor surface voxels in\n the row-slice (usually the coronal) plane.\n\n .. warning::\n This feature is only available when C Extensions are enabled\n \"\"\"\n if self.diameters is None:\n self.diameters = self.calculateDiameters()\n return self.diameters[1]\n\n def getMaximum2DDiameterRowFeatureValue(self):\n r\"\"\"\n **11. Maximum 2D diameter (Row)**\n\n Maximum 2D diameter (Row) is defined as the largest pairwise Euclidean distance between tumor surface voxels in the\n column-slice (usually the sagittal) plane.\n\n .. warning::\n This feature is only available when C Extensions are enabled\n \"\"\"\n if self.diameters is None:\n self.diameters = self.calculateDiameters()\n return self.diameters[2]\n\n def getMajorAxisFeatureValue(self):\n r\"\"\"\n **12. Major Axis**\n\n .. math::\n \\textit{major axis} = 4 \\sqrt{\\lambda_{\\text{major}}}\n\n \"\"\"\n if self.eigenValues[2] < 0:\n self.logger.warning('Major axis eigenvalue negative! (%g)', self.eigenValues[2])\n return numpy.nan\n return numpy.sqrt(self.eigenValues[2]) * 4\n\n def getMinorAxisFeatureValue(self):\n r\"\"\"\n **13. Minor Axis**\n\n .. math::\n \\textit{minor axis} = 4 \\sqrt{\\lambda_{\\text{minor}}}\n\n \"\"\"\n if self.eigenValues[1] < 0:\n self.logger.warning('Minor axis eigenvalue negative! (%g)', self.eigenValues[1])\n return numpy.nan\n return numpy.sqrt(self.eigenValues[1]) * 4\n\n def getLeastAxisFeatureValue(self):\n r\"\"\"\n **14. Least Axis**\n\n .. math::\n \\textit{least axis} = 4 \\sqrt{\\lambda_{\\text{least}}}\n\n \"\"\"\n if self.eigenValues[0] < 0:\n self.logger.warning('Least axis eigenvalue negative! (%g)', self.eigenValues[0])\n return numpy.nan\n return numpy.sqrt(self.eigenValues[0]) * 4\n\n def getElongationFeatureValue(self):\n r\"\"\"\n **15. Elongation**\n\n Elongation is calculated using its implementation in SimpleITK, and is defined as:\n\n .. math::\n \\textit{elongation} = \\sqrt{\\frac{\\lambda_{\\text{minor}}}{\\lambda_{\\text{major}}}}\n\n Here, :math:`\\lambda_{\\text{major}}` and :math:`\\lambda_{\\text{minor}}` are the lengths of the largest and second\n largest principal component axes. The values range between 1 (where the cross section through the first and second\n largest principal moments is circle-like (non-elongated)) and 0 (where the object is a single point or 1 dimensional\n line).\n \"\"\"\n if self.eigenValues[1] < 0 or self.eigenValues[2] < 0:\n self.logger.warning('Elongation eigenvalue negative! (%g, %g)', self.eigenValues[1], self.eigenValues[2])\n return numpy.nan\n return numpy.sqrt(self.eigenValues[1] / self.eigenValues[2])\n\n def getFlatnessFeatureValue(self):\n r\"\"\"\n **16. Flatness**\n\n Flatness is calculated using its implementation in SimpleITK, and is defined as:\n\n .. math::\n \\textit{flatness} = \\sqrt{\\frac{\\lambda_{\\text{least}}}{\\lambda_{\\text{major}}}}\n\n Here, :math:`\\lambda_{\\text{major}}` and :math:`\\lambda_{\\text{least}}` are the lengths of the largest and smallest\n principal component axes. The values range between 1 (non-flat, sphere-like) and 0 (a flat object).\n \"\"\"\n if self.eigenValues[0] < 0 or self.eigenValues[2] < 0:\n self.logger.warning('Elongation eigenvalue negative! (%g, %g)', self.eigenValues[0], self.eigenValues[2])\n return numpy.nan\n return numpy.sqrt(self.eigenValues[0] / self.eigenValues[2])\n\n def _interpolate(self, grid, p1, p2):\n diff = (.5 - self.maskArray[tuple(grid[p1])]) / (self.maskArray[tuple(grid[p2])] - self.maskArray[tuple(grid[p1])])\n return (grid[p1] + ((grid[p2] - grid[p1]) * diff)) * self.pixelSpacing\n\n def _getMarchingTables(self):\n vertList = numpy.array(((0, 0, 0.5), (0, 0.5, 1), (0, 1, 0.5), (0, 0.5, 0),\n (1, 0, 0.5), (1, 0.5, 1), (1, 1, 0.5), (1, 0.5, 0),\n (0.5, 0, 0), (0.5, 0, 1), (0.5, 1, 1), (0.5, 1, 0)), dtype='float64')\n vertList *= self.pixelSpacing[None, :]\n\n triTable = [[],\n [[0, 8, 3]],\n [[0, 1, 9]],\n [[1, 8, 3], [9, 8, 1]],\n [[1, 2, 10]],\n [[0, 8, 3], [1, 2, 10]],\n [[9, 2, 10], [0, 2, 9]],\n [[2, 8, 3], [2, 10, 8], [10, 9, 8]],\n [[3, 11, 2]],\n [[0, 11, 2], [8, 11, 0]],\n [[1, 9, 0], [2, 3, 11]],\n [[1, 11, 2], [1, 9, 11], [9, 8, 11]],\n [[3, 10, 1], [11, 10, 3]],\n [[0, 10, 1], [0, 8, 10], [8, 11, 10]],\n [[3, 9, 0], [3, 11, 9], [11, 10, 9]],\n [[9, 8, 10], [10, 8, 11]],\n [[4, 7, 8]],\n [[4, 3, 0], [7, 3, 4]],\n [[0, 1, 9], [8, 4, 7]],\n [[4, 1, 9], [4, 7, 1], [7, 3, 1]],\n [[1, 2, 10], [8, 4, 7]],\n [[3, 4, 7], [3, 0, 4], [1, 2, 10]],\n [[9, 2, 10], [9, 0, 2], [8, 4, 7]],\n [[2, 10, 9], [2, 9, 7], [2, 7, 3], [7, 9, 4]],\n [[8, 4, 7], [3, 11, 2]],\n [[11, 4, 7], [11, 2, 4], [2, 0, 4]],\n [[9, 0, 1], [8, 4, 7], [2, 3, 11]],\n [[4, 7, 11], [9, 4, 11], [9, 11, 2], [9, 2, 1]],\n [[3, 10, 1], [3, 11, 10], [7, 8, 4]],\n [[1, 11, 10], [1, 4, 11], [1, 0, 4], [7, 11, 4]],\n [[4, 7, 8], [9, 0, 11], [9, 11, 10], [11, 0, 3]],\n [[4, 7, 11], [4, 11, 9], [9, 11, 10]],\n [[9, 5, 4]],\n [[9, 5, 4], [0, 8, 3]],\n [[0, 5, 4], [1, 5, 0]],\n [[8, 5, 4], [8, 3, 5], [3, 1, 5]],\n [[1, 2, 10], [9, 5, 4]],\n [[3, 0, 8], [1, 2, 10], [4, 9, 5]],\n [[5, 2, 10], [5, 4, 2], [4, 0, 2]],\n [[2, 10, 5], [3, 2, 5], [3, 5, 4], [3, 4, 8]],\n [[9, 5, 4], [2, 3, 11]],\n [[0, 11, 2], [0, 8, 11], [4, 9, 5]],\n [[0, 5, 4], [0, 1, 5], [2, 3, 11]],\n [[2, 1, 5], [2, 5, 8], [2, 8, 11], [4, 8, 5]],\n [[10, 3, 11], [10, 1, 3], [9, 5, 4]],\n [[4, 9, 5], [0, 8, 1], [8, 10, 1], [8, 11, 10]],\n [[5, 4, 0], [5, 0, 11], [5, 11, 10], [11, 0, 3]],\n [[5, 4, 8], [5, 8, 10], [10, 8, 11]],\n [[9, 7, 8], [5, 7, 9]],\n [[9, 3, 0], [9, 5, 3], [5, 7, 3]],\n [[0, 7, 8], [0, 1, 7], [1, 5, 7]],\n [[1, 5, 3], [3, 5, 7]],\n [[9, 7, 8], [9, 5, 7], [10, 1, 2]],\n [[10, 1, 2], [9, 5, 0], [5, 3, 0], [5, 7, 3]],\n [[8, 0, 2], [8, 2, 5], [8, 5, 7], [10, 5, 2]],\n [[2, 10, 5], [2, 5, 3], [3, 5, 7]],\n [[7, 9, 5], [7, 8, 9], [3, 11, 2]],\n [[9, 5, 7], [9, 7, 2], [9, 2, 0], [2, 7, 11]],\n [[2, 3, 11], [0, 1, 8], [1, 7, 8], [1, 5, 7]],\n [[11, 2, 1], [11, 1, 7], [7, 1, 5]],\n [[9, 5, 8], [8, 5, 7], [10, 1, 3], [10, 3, 11]],\n [[5, 7, 0], [5, 0, 9], [7, 11, 0], [1, 0, 10], [11, 10, 0]],\n [[11, 10, 0], [11, 0, 3], [10, 5, 0], [8, 0, 7], [5, 7, 0]],\n [[11, 10, 5], [7, 11, 5]],\n [[10, 6, 5]],\n [[0, 8, 3], [5, 10, 6]],\n [[9, 0, 1], [5, 10, 6]],\n [[1, 8, 3], [1, 9, 8], [5, 10, 6]],\n [[1, 6, 5], [2, 6, 1]],\n [[1, 6, 5], [1, 2, 6], [3, 0, 8]],\n [[9, 6, 5], [9, 0, 6], [0, 2, 6]],\n [[5, 9, 8], [5, 8, 2], [5, 2, 6], [3, 2, 8]],\n [[2, 3, 11], [10, 6, 5]],\n [[11, 0, 8], [11, 2, 0], [10, 6, 5]],\n [[0, 1, 9], [2, 3, 11], [5, 10, 6]],\n [[5, 10, 6], [1, 9, 2], [9, 11, 2], [9, 8, 11]],\n [[6, 3, 11], [6, 5, 3], [5, 1, 3]],\n [[0, 8, 11], [0, 11, 5], [0, 5, 1], [5, 11, 6]],\n [[3, 11, 6], [0, 3, 6], [0, 6, 5], [0, 5, 9]],\n [[6, 5, 9], [6, 9, 11], [11, 9, 8]],\n [[5, 10, 6], [4, 7, 8]],\n [[4, 3, 0], [4, 7, 3], [6, 5, 10]],\n [[1, 9, 0], [5, 10, 6], [8, 4, 7]],\n [[10, 6, 5], [1, 9, 7], [1, 7, 3], [7, 9, 4]],\n [[6, 1, 2], [6, 5, 1], [4, 7, 8]],\n [[1, 2, 5], [5, 2, 6], [3, 0, 4], [3, 4, 7]],\n [[8, 4, 7], [9, 0, 5], [0, 6, 5], [0, 2, 6]],\n [[7, 3, 9], [7, 9, 4], [3, 2, 9], [5, 9, 6], [2, 6, 9]],\n [[3, 11, 2], [7, 8, 4], [10, 6, 5]],\n [[5, 10, 6], [4, 7, 2], [4, 2, 0], [2, 7, 11]],\n [[0, 1, 9], [4, 7, 8], [2, 3, 11], [5, 10, 6]],\n [[9, 2, 1], [9, 11, 2], [9, 4, 11], [7, 11, 4], [5, 10, 6]],\n [[8, 4, 7], [3, 11, 5], [3, 5, 1], [5, 11, 6]],\n [[5, 1, 11], [5, 11, 6], [1, 0, 11], [7, 11, 4], [0, 4, 11]],\n [[0, 5, 9], [0, 6, 5], [0, 3, 6], [11, 6, 3], [8, 4, 7]],\n [[6, 5, 9], [6, 9, 11], [4, 7, 9], [7, 11, 9]],\n [[10, 4, 9], [6, 4, 10]],\n [[4, 10, 6], [4, 9, 10], [0, 8, 3]],\n [[10, 0, 1], [10, 6, 0], [6, 4, 0]],\n [[8, 3, 1], [8, 1, 6], [8, 6, 4], [6, 1, 10]],\n [[1, 4, 9], [1, 2, 4], [2, 6, 4]],\n [[3, 0, 8], [1, 2, 9], [2, 4, 9], [2, 6, 4]],\n [[0, 2, 4], [4, 2, 6]],\n [[8, 3, 2], [8, 2, 4], [4, 2, 6]],\n [[10, 4, 9], [10, 6, 4], [11, 2, 3]],\n [[0, 8, 2], [2, 8, 11], [4, 9, 10], [4, 10, 6]],\n [[3, 11, 2], [0, 1, 6], [0, 6, 4], [6, 1, 10]],\n [[6, 4, 1], [6, 1, 10], [4, 8, 1], [2, 1, 11], [8, 11, 1]],\n [[9, 6, 4], [9, 3, 6], [9, 1, 3], [11, 6, 3]],\n [[8, 11, 1], [8, 1, 0], [11, 6, 1], [9, 1, 4], [6, 4, 1]],\n [[3, 11, 6], [3, 6, 0], [0, 6, 4]],\n [[6, 4, 8], [11, 6, 8]],\n [[7, 10, 6], [7, 8, 10], [8, 9, 10]],\n [[0, 7, 3], [0, 10, 7], [0, 9, 10], [6, 7, 10]],\n [[10, 6, 7], [1, 10, 7], [1, 7, 8], [1, 8, 0]],\n [[10, 6, 7], [10, 7, 1], [1, 7, 3]],\n [[1, 2, 6], [1, 6, 8], [1, 8, 9], [8, 6, 7]],\n [[2, 6, 9], [2, 9, 1], [6, 7, 9], [0, 9, 3], [7, 3, 9]],\n [[7, 8, 0], [7, 0, 6], [6, 0, 2]],\n [[7, 3, 2], [6, 7, 2]],\n [[2, 3, 11], [10, 6, 8], [10, 8, 9], [8, 6, 7]],\n [[2, 0, 7], [2, 7, 11], [0, 9, 7], [6, 7, 10], [9, 10, 7]],\n [[1, 8, 0], [1, 7, 8], [1, 10, 7], [6, 7, 10], [2, 3, 11]],\n [[11, 2, 1], [11, 1, 7], [10, 6, 1], [6, 7, 1]],\n [[8, 9, 6], [8, 6, 7], [9, 1, 6], [11, 6, 3], [1, 3, 6]],\n [[0, 9, 1], [11, 6, 7]],\n [[7, 8, 0], [7, 0, 6], [3, 11, 0], [11, 6, 0]],\n [[7, 11, 6]]]\n return vertList, triTable\n"
] |
[
[
"numpy.sqrt",
"numpy.linalg.eig",
"numpy.tile",
"numpy.bitwise_and",
"numpy.mean",
"numpy.array",
"numpy.where",
"numpy.sum"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
nehiridil/MLDays_nlp
|
[
"20d29d01836c82361cb1b656f2e98d7435a93622",
"20d29d01836c82361cb1b656f2e98d7435a93622",
"20d29d01836c82361cb1b656f2e98d7435a93622"
] |
[
"bobo/Lib/site-packages/gensim/models/wrappers/ldavowpalwabbit.py",
"bobo/Lib/site-packages/gensim/topic_coherence/aggregation.py",
"bobo/Lib/site-packages/gensim/topic_coherence/text_analysis.py"
] |
[
"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n#\n# Copyright (C) 2015 Dave Challis <[email protected]>\n# Licensed under the GNU LGPL v2.1 - http://www.gnu.org/licenses/lgpl.html\n\n\"\"\"Python wrapper for `Vowpal Wabbit's Latent Dirichlet Allocation <https://github.com/JohnLangford/vowpal_wabbit/>`_.\n\nThis uses `Matt Hoffman's online algorithm\n<http://papers.nips.cc/paper/3902-online-learning-for-latent-dirichlet-allocation.pdf>`_, i.e. the same algorithm\nthat Gensim's :class:`~gensim.models.ldamodel.LdaModel` is based on.\n\nInstallation\n------------\nUse `official guide <https://github.com/JohnLangford/vowpal_wabbit>`_ or this one ::\n\n git clone https://github.com/JohnLangford/vowpal_wabbit.git\n cd vowpal_wabbit\n make\n make test\n sudo make install\n\nWarnings\n--------\nCurrently working and tested with Vowpal Wabbit versions 7.10 to 8.1.1. Vowpal Wabbit's API isn't currently stable,\nso this may or may not work with older/newer versions. The aim will be to ensure this wrapper always works with\nthe latest release of Vowpal Wabbit.\n\n\nExamples\n--------\n\nTrain model\n\n.. sourcecode:: pycon\n\n >>> from gensim.test.utils import common_corpus, common_dictionary\n >>> from gensim.models.wrappers import LdaVowpalWabbit\n >>>\n >>> path_to_wv_binary = \"/path/to/vw/binary\"\n >>> model = LdaVowpalWabbit(path_to_wv_binary, corpus=common_corpus, num_topics=20, id2word=common_dictionary)\n\nUpdate existing model\n\n.. sourcecode:: pycon\n\n >>> another_corpus = [[(1, 1), (2, 1)], [(3, 5)]]\n >>> model.update(another_corpus)\n\nGet topic probability distributions for a document\n\n.. sourcecode:: pycon\n\n >>> document_bow = [(1, 1)]\n >>> print(model[document_bow])\n\nPrint topics\n\n.. sourcecode:: pycon\n\n >>> print(model.print_topics())\n\nSave/load the trained model\n\n.. sourcecode:: pycon\n\n >>> from gensim.test.utils import get_tmpfile\n >>>\n >>> temp_path = get_tmpfile(\"vw_lda.model\")\n >>> model.save(temp_path)\n >>>\n >>> loaded_lda = LdaVowpalWabbit.load(temp_path)\n\nCalculate log-perplexoty on given corpus\n\n.. sourcecode:: pycon\n\n >>> another_corpus = [[(1, 1), (2, 1)], [(3, 5)]]\n >>> print(model.log_perpexity(another_corpus))\n\nVowpal Wabbit works on files, so this wrapper maintains a temporary directory while it's around,\nreading/writing there as necessary.\n\n\"\"\"\nfrom __future__ import division\nfrom __future__ import print_function\nfrom __future__ import unicode_literals\n\nimport logging\nimport os\nimport shutil\nimport subprocess\nimport tempfile\n\nimport numpy\n\nfrom gensim import utils, matutils\nfrom gensim.models.ldamodel import LdaModel\n\nlogger = logging.getLogger(__name__)\n\n\nclass LdaVowpalWabbit(utils.SaveLoad):\n \"\"\"Python wrapper using `Vowpal Wabbit's online LDA <https://github.com/JohnLangford/vowpal_wabbit/>`_.\n\n Communication between Vowpal Wabbit and Python takes place by passing around data files\n on disk and calling the 'vw' binary with the subprocess module.\n\n Warnings\n --------\n This is **only** python wrapper for `Vowpal Wabbit's online LDA <https://github.com/JohnLangford/vowpal_wabbit/>`_,\n you need to install original implementation first and pass the path to binary to ``vw_path``.\n\n \"\"\"\n def __init__(self, vw_path, corpus=None, num_topics=100, id2word=None,\n chunksize=256, passes=1, alpha=0.1, eta=0.1, decay=0.5,\n offset=1, gamma_threshold=0.001, random_seed=None,\n cleanup_files=True, tmp_prefix='tmp'):\n \"\"\"\n\n Parameters\n ----------\n vw_path : str\n Path to Vowpal Wabbit's binary.\n corpus : iterable of list of (int, int), optional\n Collection of texts in BoW format. If given, training will start immediately,\n otherwise, you should call :meth:`~gensim.models.wrappers.ldavowpalwabbit.LdaVowpalWabbit.train` or\n :meth:`~gensim.models.wrappers.ldavowpalwabbit.LdaVowpalWabbit.update` manually for training.\n num_topics : int, optional\n Number of requested latent topics to be extracted from the training corpus.\n Corresponds to VW's ``--lda <num_topics>`` argument.\n id2word : :class:`~gensim.corpora.dictionary.Dictionary`, optional\n Mapping from word ids (integers) to words (strings).\n chunksize : int, optional\n Number of documents examined in each batch.\n Corresponds to VW's ``--minibatch <batch_size>`` argument.\n passes : int, optional\n Number of passes over the dataset to use.\n Corresponds to VW's ``--passes <passes>`` argument.\n alpha : float, optional\n Float effecting sparsity of per-document topic weights.\n This is applied symmetrically, and should be set higher to when documents are thought to look more similar.\n Corresponds to VW's ``--lda_alpha <alpha>`` argument.\n eta : float, optional\n Affects the sparsity of topic distributions.\n This is applied symmetrically, and should be set higher when topics\n are thought to look more similar.\n Corresponds to VW's ``--lda_rho <rho>`` argument.\n decay : float, optional\n Learning rate decay, affects how quickly learnt values are forgotten.\n Should be set to a value between 0.5 and 1.0 to guarantee convergence.\n Corresponds to VW's ``--power_t <tau>`` argument.\n offset: int, optional\n Learning offset, set to higher values to slow down learning on early iterations of the algorithm.\n Corresponds to VW's ``--initial_t <tau>`` argument.\n gamma_threshold : float, optional\n Affects when learning loop will be broken out of, higher values will result in earlier loop completion.\n Corresponds to VW's ``--epsilon <eps>`` argument.\n random_seed : int, optional\n Sets random seed when learning.\n Corresponds to VW's ``--random_seed <seed>`` argument.\n cleanup_files : bool, optional\n Whether or not to delete temporary directory and files used by this wrapper.\n Setting to False can be useful for debugging, or for re-using Vowpal Wabbit files elsewhere.\n tmp_prefix : str, optional\n To prefix temporary working directory name.\n\n \"\"\"\n # default parameters are taken from Vowpal Wabbit's defaults, and\n # parameter names changed to match Gensim's LdaModel where possible\n self.vw_path = vw_path\n self.id2word = id2word\n\n if self.id2word is None:\n if corpus is None:\n raise ValueError(\n \"at least one of corpus/id2word must be specified, to establish input space dimensionality\"\n )\n logger.warning(\"no word id mapping provided; initializing from corpus, assuming identity\")\n self.id2word = utils.dict_from_corpus(corpus)\n self.num_terms = len(self.id2word)\n elif len(self.id2word) > 0:\n self.num_terms = 1 + max(self.id2word.keys())\n else:\n self.num_terms = 0\n\n if self.num_terms == 0:\n raise ValueError(\"cannot compute LDA over an empty collection (no terms)\")\n\n # LDA parameters\n self.num_topics = num_topics\n self.chunksize = chunksize\n self.passes = passes\n self.alpha = alpha\n self.eta = eta\n self.gamma_threshold = gamma_threshold\n self.offset = offset\n self.decay = decay\n self.random_seed = random_seed\n self._initial_offset = offset\n\n # temporary files used for Vowpal Wabbit input/output\n self.tmp_dir = None\n self.tmp_prefix = tmp_prefix\n self.cleanup_files = cleanup_files\n self._init_temp_dir(tmp_prefix)\n\n # used for saving/loading this model's state\n self._model_data = None\n self._topics_data = None\n\n # cache loaded topics as numpy array\n self._topics = None\n\n if corpus is not None:\n self.train(corpus)\n\n def train(self, corpus):\n \"\"\"Clear any existing model state, and train on given `corpus`.\n\n Parameters\n ----------\n corpus : iterable of list of (int, int)\n Collection of texts in BoW format.\n\n \"\"\"\n logger.debug('Training new model from corpus')\n\n # reset any existing offset, model, or topics generated\n self.offset = self._initial_offset\n self._topics = None\n\n corpus_size = write_corpus_as_vw(corpus, self._corpus_filename)\n\n cmd = self._get_vw_train_command(corpus_size)\n\n _run_vw_command(cmd)\n\n # ensure that future updates of this model use correct offset\n self.offset += corpus_size\n\n def update(self, corpus):\n \"\"\"Update existing model with `corpus`.\n\n Parameters\n ----------\n corpus : iterable of list of (int, int)\n Collection of texts in BoW format.\n\n \"\"\"\n if not os.path.exists(self._model_filename):\n return self.train(corpus)\n\n logger.debug('Updating exiting model from corpus')\n\n # reset any existing topics generated\n self._topics = None\n\n corpus_size = write_corpus_as_vw(corpus, self._corpus_filename)\n\n cmd = self._get_vw_update_command(corpus_size)\n\n _run_vw_command(cmd)\n\n # ensure that future updates of this model use correct offset\n self.offset += corpus_size\n\n def log_perplexity(self, chunk):\n \"\"\"Get per-word lower bound on log perplexity.\n\n Parameters\n ----------\n chunk : iterable of list of (int, int)\n Collection of texts in BoW format.\n\n Returns\n -------\n bound : float\n Per-word lower bound on log perplexity.\n\n \"\"\"\n vw_data = self._predict(chunk)[1]\n corpus_words = sum(cnt for document in chunk for _, cnt in document)\n bound = -vw_data['average_loss']\n logger.info(\n \"%.3f per-word bound, %.1f perplexity estimate based on a held-out corpus of %i documents with %i words\",\n bound, numpy.exp2(-bound), vw_data['corpus_size'], corpus_words\n )\n return bound\n\n def get_topics(self):\n \"\"\"Get topics X words matrix.\n\n Returns\n -------\n numpy.ndarray\n `num_topics` x `vocabulary_size` array of floats which represents the learned term topic matrix.\n\n \"\"\"\n topics = self._get_topics()\n return topics / topics.sum(axis=1)[:, None]\n\n def print_topics(self, num_topics=10, num_words=10):\n \"\"\"Alias for :meth:`~gensim.models.wrappers.dtmmodel.DtmModel.show_topics`.\n\n Parameters\n ----------\n num_topics : int, optional\n Number of topics to return, set `-1` to get all topics.\n num_words : int, optional\n Number of words.\n\n Returns\n -------\n list of str\n Topics as a list of strings\n\n \"\"\"\n return self.show_topics(num_topics, num_words, log=True)\n\n def show_topics(self, num_topics=10, num_words=10, log=False, formatted=True):\n \"\"\"Get the `num_words` most probable words for `num_topics` number of topics.\n\n Parameters\n ----------\n num_topics : int, optional\n Number of topics to return, set `-1` to get all topics.\n num_words : int, optional\n Number of words.\n log : bool, optional\n If True - will write topics with logger.\n formatted : bool, optional\n If `True` - return the topics as a list of strings, otherwise as lists of (weight, word) pairs.\n\n Returns\n -------\n list of str\n Topics as a list of strings (if formatted=True) **OR**\n list of (float, str)\n Topics as list of (weight, word) pairs (if formatted=False)\n\n \"\"\"\n if num_topics < 0 or num_topics >= self.num_topics:\n num_topics = self.num_topics\n else:\n num_topics = min(num_topics, self.num_topics)\n\n chosen_topics = range(num_topics)\n shown = []\n\n for i in chosen_topics:\n if formatted:\n topic = self.print_topic(i, topn=num_words)\n else:\n topic = self.show_topic(i, topn=num_words)\n\n shown.append(topic)\n\n if log:\n logger.info(\"topic #%i (%.3f): %s\", i, self.alpha, topic)\n\n return shown\n\n def print_topic(self, topicid, topn=10):\n \"\"\"Get text representation of topic.\n\n Parameters\n ----------\n topicid : int\n Id of topic.\n topn : int, optional\n Top number of words in topic.\n\n Returns\n -------\n str\n Topic `topicid` in text representation.\n\n \"\"\"\n return ' + '.join('{0:.3f}*{1}'.format(v[0], v[1]) for v in self.show_topic(topicid, topn))\n\n def show_topic(self, topicid, topn=10):\n \"\"\"Get `num_words` most probable words for the given `topicid`.\n\n Parameters\n ----------\n topicid : int\n Id of topic.\n topn : int, optional\n Top number of topics that you'll receive.\n\n Returns\n -------\n list of (str, float)\n Sequence of probable words, as a list of `(word, word_probability)` for `topicid` topic.\n\n \"\"\"\n topics = self._get_topics()\n topic = topics[topicid]\n bestn = matutils.argsort(topic, topn, reverse=True)\n return [(topic[t_id], self.id2word[t_id]) for t_id in bestn]\n\n def save(self, fname, *args, **kwargs):\n \"\"\"Save model to file.\n\n Parameters\n ----------\n fname : str\n Path to output file.\n\n \"\"\"\n if os.path.exists(self._model_filename):\n # Vowpal Wabbit uses its own binary model file, read this into\n # variable before serialising this object - keeps all data\n # self contained within a single serialised file\n logger.debug(\"Reading model bytes from '%s'\", self._model_filename)\n with utils.open(self._model_filename, 'rb') as fhandle:\n self._model_data = fhandle.read()\n\n if os.path.exists(self._topics_filename):\n logger.debug(\"Reading topic bytes from '%s'\", self._topics_filename)\n with utils.open(self._topics_filename, 'rb') as fhandle:\n self._topics_data = fhandle.read()\n\n if 'ignore' not in kwargs:\n kwargs['ignore'] = frozenset(['_topics', 'tmp_dir'])\n\n super(LdaVowpalWabbit, self).save(fname, *args, **kwargs)\n\n @classmethod\n def load(cls, fname, *args, **kwargs):\n \"\"\"Load model from `fname`.\n\n Parameters\n ----------\n fname : str\n Path to file with :class:`~gensim.models.wrappers.ldavowpalwabbit.LdaVowpalWabbit`.\n\n \"\"\"\n lda_vw = super(LdaVowpalWabbit, cls).load(fname, *args, **kwargs)\n lda_vw._init_temp_dir(prefix=lda_vw.tmp_prefix)\n\n if lda_vw._model_data:\n # Vowpal Wabbit operates on its own binary model file - deserialise\n # to file at load time, making it immediately ready for use\n logger.debug(\"Writing model bytes to '%s'\", lda_vw._model_filename)\n with utils.open(lda_vw._model_filename, 'wb') as fhandle:\n fhandle.write(lda_vw._model_data)\n lda_vw._model_data = None # no need to keep in memory after this\n\n if lda_vw._topics_data:\n logger.debug(\"Writing topic bytes to '%s'\", lda_vw._topics_filename)\n with utils.open(lda_vw._topics_filename, 'wb') as fhandle:\n fhandle.write(lda_vw._topics_data)\n lda_vw._topics_data = None\n\n return lda_vw\n\n def __del__(self):\n \"\"\"Cleanup the temporary directory used by this wrapper.\"\"\"\n if self.cleanup_files and self.tmp_dir:\n logger.debug(\"Recursively deleting: %s\", self.tmp_dir)\n shutil.rmtree(self.tmp_dir)\n\n def _init_temp_dir(self, prefix='tmp'):\n \"\"\"Create a working temporary directory with given prefix.\n\n Parameters\n ----------\n prefix : str\n Prefix of the temporary directory.\n\n \"\"\"\n self.tmp_dir = tempfile.mkdtemp(prefix=prefix)\n logger.info('using %s as temp dir', self.tmp_dir)\n\n def _get_vw_predict_command(self, corpus_size):\n \"\"\"Get list of command line arguments for running prediction.\n\n Parameters\n ----------\n corpus_size : int\n Size of the corpus.\n\n \"\"\"\n cmd = [\n self.vw_path,\n '--testonly', # don't update model with this data\n '--lda_D', str(corpus_size),\n '-i', self._model_filename, # load existing binary model\n '-d', self._corpus_filename,\n '--learning_rate', '0', # possibly not needed, but harmless\n '-p', self._predict_filename\n ]\n\n if self.random_seed is not None:\n cmd.extend(['--random_seed', str(self.random_seed)])\n\n return cmd\n\n def _get_vw_train_command(self, corpus_size, update=False):\n \"\"\"Get list of command line arguments for running model training.\n\n Parameters\n ----------\n corpus_size : int\n Size of corpus.\n update : bool\n Set `True` to further train an existing model.\n\n Returns\n -------\n list of str\n Sequence of all training parameters.\n\n \"\"\"\n cmd = [\n self.vw_path,\n '-d', self._corpus_filename,\n '--power_t', str(self.decay),\n '--initial_t', str(self.offset),\n '--minibatch', str(self.chunksize),\n '--lda_D', str(corpus_size),\n '--passes', str(self.passes),\n '--cache_file', self._cache_filename,\n '--lda_epsilon', str(self.gamma_threshold),\n '--readable_model', self._topics_filename,\n '-k', # clear cache\n '-f', self._model_filename\n ]\n\n if update:\n cmd.extend(['-i', self._model_filename])\n else:\n # these params are read from model file if updating\n cmd.extend([\n '--lda', str(self.num_topics),\n '-b', str(self.num_terms.bit_length()),\n '--lda_alpha', str(self.alpha),\n '--lda_rho', str(self.eta)\n ])\n\n if self.random_seed is not None:\n cmd.extend(['--random_seed', str(self.random_seed)])\n\n return cmd\n\n def _get_vw_update_command(self, corpus_size):\n \"\"\"Get list of command line arguments to update a model.\n Alias for :meth:`~gensim.models.wrappers.dtmmodel.DtmModel._get_vw_train_command`\n\n Parameters\n ----------\n corpus_size : int\n Size of the corpus.\n\n Returns\n -------\n list of str\n Sequence of all training parameters.\n\n \"\"\"\n return self._get_vw_train_command(corpus_size, update=True)\n\n def _load_vw_topics(self):\n \"\"\"Read topics file generated by Vowpal Wabbit, convert to numpy array.\"\"\"\n topics = numpy.zeros((self.num_topics, self.num_terms), dtype=numpy.float32)\n\n with utils.open(self._topics_filename, 'rb') as topics_file:\n found_data = False\n\n for line in topics_file:\n # look for start of data\n if not found_data:\n if line.startswith(b'0 ') and b':' not in line:\n found_data = True\n else:\n continue\n\n fields = line.split()\n word_id = int(fields[0])\n\n # output contains entries for 2**b terms, where b was set\n # by the '-b' option, ignore anything past num_terms\n if word_id >= self.num_terms:\n break\n\n topics[:, word_id] = fields[1:]\n\n # normalise to probability distribution\n self._topics = topics / topics.sum(axis=1, keepdims=True)\n\n def _get_topics(self):\n \"\"\"Get topics matrix, load from file if necessary.\"\"\"\n if self._topics is None:\n self._load_vw_topics()\n return self._topics\n\n def _predict(self, chunk):\n \"\"\"Run given chunk of documents against currently trained model.\n\n Parameters\n ----------\n chunk : iterable of list of (int, int)\n Sequence of documents in BoW format.\n\n Returns\n -------\n predictions : ndarray\n Tuple of prediction matrix.\n vw_data : dict\n Vowpal Wabbit data.\n\n \"\"\"\n corpus_size = write_corpus_as_vw(chunk, self._corpus_filename)\n\n cmd = self._get_vw_predict_command(corpus_size)\n vw_data = _parse_vw_output(_run_vw_command(cmd))\n vw_data['corpus_size'] = corpus_size\n\n predictions = numpy.zeros((corpus_size, self.num_topics), dtype=numpy.float32)\n\n with utils.open(self._predict_filename, 'rb') as fhandle:\n for i, line in enumerate(fhandle):\n predictions[i, :] = line.split()\n\n predictions = predictions / predictions.sum(axis=1, keepdims=True)\n\n return predictions, vw_data\n\n def __getitem__(self, bow, eps=0.01):\n \"\"\"Convert document or corpus in BoW format to LDA vectors in BoW format\n\n Parameters\n ----------\n bow : {list of (int, int), iterable of list of (int, int)}\n Document or corpus in BoW format.\n eps : float\n Threshold value (all topics with probability < `eps` will be ignored.\n\n Returns\n -------\n list of (int, float)\n LDA vector for document **OR**\n list of list of (int, float)\n LDA vectors for corpus.\n\n \"\"\"\n is_corpus, dummy_corpus = utils.is_corpus(bow)\n if not is_corpus:\n bow = [bow]\n\n predictions = self._predict(bow)[0]\n\n topics = []\n for row in predictions:\n row_topics = []\n for topic_id, val in enumerate(row):\n if val > eps:\n row_topics.append((topic_id, val))\n topics.append(row_topics)\n\n return topics if is_corpus else topics[0]\n\n def _get_filename(self, name):\n \"\"\"Get path to given filename in temp directory.\n\n Parameters\n ----------\n name : str\n Name of the file.\n\n Returns\n -------\n str\n Path to a file.\n\n \"\"\"\n return os.path.join(self.tmp_dir, name)\n\n @property\n def _model_filename(self):\n \"\"\"Get path to file to write Vowpal Wabbit model to.\n\n Returns\n -------\n str\n Path to file to write Vowpal Wabbit model to.\n\n \"\"\"\n return self._get_filename('model.vw')\n\n @property\n def _cache_filename(self):\n \"\"\"Get path to file to write Vowpal Wabbit cache to.\n\n Returns\n -------\n str\n Path to file to write Vowpal Wabbit cache to.\n\n \"\"\"\n return self._get_filename('cache.vw')\n\n @property\n def _corpus_filename(self):\n \"\"\"Get path to file to write Vowpal Wabbit corpus to.\n\n Returns\n -------\n str\n Path to file to write Vowpal Wabbit corpus to.\n\n \"\"\"\n return self._get_filename('corpus.vw')\n\n @property\n def _topics_filename(self):\n \"\"\"Get path to file to write Vowpal Wabbit topics to.\n\n Returns\n -------\n str\n Path to file to write Vowpal Wabbit topics to.\n\n \"\"\"\n return self._get_filename('topics.vw')\n\n @property\n def _predict_filename(self):\n \"\"\"Get path to file to write Vowpal Wabbit predictions to.\n\n Returns\n -------\n str\n Path to file to write Vowpal Wabbit predictions to.\n\n \"\"\"\n return self._get_filename('predict.vw')\n\n def __str__(self):\n \"\"\"Get text representation of model.\"\"\"\n fields = ['num_terms', 'num_topics', 'chunksize', 'alpha', 'eta']\n kv = [\"{0}={1}\".format(field, getattr(self, field)) for field in fields]\n return \"{0}({1})\".format(self.__class__.__name__, ', '.join(kv))\n\n\ndef corpus_to_vw(corpus):\n \"\"\"Convert corpus to Vowpal Wabbit format.\n\n Parameters\n ----------\n corpus : iterable of list of (int, int)\n Collection of texts in BoW format.\n\n\n Notes\n -----\n\n Vowpal Wabbit format ::\n\n | 4:7 14:1 22:8 6:3\n | 14:22 22:4 0:1 1:3\n | 7:2 8:2\n\n\n Yields\n ------\n str\n Corpus in Vowpal Wabbit, line by line.\n\n \"\"\"\n for entries in corpus:\n line = ['|']\n for word_id, count in entries:\n line.append(\"{0}:{1}\".format(word_id, count))\n yield ' '.join(line)\n\n\ndef write_corpus_as_vw(corpus, filename):\n \"\"\"Covert `corpus` to Vowpal Wabbit format and save it to `filename`.\n\n Parameters\n ----------\n corpus : iterable of list of (int, int)\n Collection of texts in BoW format.\n filename : str\n Path to output file.\n\n Returns\n -------\n int\n Number of lines in `filename`.\n\n \"\"\"\n logger.debug(\"Writing corpus to: %s\", filename)\n\n corpus_size = 0\n with utils.open(filename, 'wb') as corpus_file:\n for line in corpus_to_vw(corpus):\n corpus_file.write(line.encode('utf-8') + b'\\n')\n corpus_size += 1\n\n return corpus_size\n\n\ndef _parse_vw_output(text):\n \"\"\"Get dict of useful fields from Vowpal Wabbit's output.\n\n Parameters\n ----------\n text : str\n Text from vw file.\n\n Returns\n -------\n dict of (str, float)\n Dictionary with field \"average_loss\", lower bound on mean per-word log-perplexity.\n\n \"\"\"\n data = {}\n for line in text.splitlines():\n if line.startswith('average loss'):\n data['average_loss'] = float(line.split('=')[1])\n break\n\n return data\n\n\ndef _run_vw_command(cmd):\n \"\"\"Execute given Vowpal Wabbit command, log stdout and stderr.\n\n Parameters\n ----------\n cmd : str\n Given Vowpal Wabbit command to execute.\n\n Returns\n -------\n str\n Stdout and stderr.\n\n Raises\n ------\n subprocess.CalledProcessError\n If something goes wrong.\n\n \"\"\"\n logger.info(\"Running Vowpal Wabbit command: %s\", ' '.join(cmd))\n proc = subprocess.Popen(cmd, stdout=subprocess.PIPE,\n stderr=subprocess.STDOUT)\n output = proc.communicate()[0].decode('utf-8')\n logger.debug(\"Vowpal Wabbit output: %s\", output)\n\n if proc.returncode != 0:\n raise subprocess.CalledProcessError(proc.returncode, ' '.join(cmd), output=output)\n\n return output\n\n\ndef vwmodel2ldamodel(vw_model, iterations=50):\n \"\"\"Convert :class:`~gensim.models.wrappers.ldavowpalwabbit.LdaVowpalWabbit` to\n :class:`~gensim.models.ldamodel.LdaModel`.\n\n This works by simply copying the training model weights (alpha, beta...) from a trained vwmodel\n into the gensim model.\n\n Parameters\n ----------\n vw_model : :class:`~gensim.models.wrappers.ldavowpalwabbit.LdaVowpalWabbit`\n Trained Vowpal Wabbit model.\n iterations : int\n Number of iterations to be used for inference of the new :class:`~gensim.models.ldamodel.LdaModel`.\n\n Returns\n -------\n :class:`~gensim.models.ldamodel.LdaModel`.\n Gensim native LDA.\n\n \"\"\"\n model_gensim = LdaModel(\n num_topics=vw_model.num_topics, id2word=vw_model.id2word, chunksize=vw_model.chunksize,\n passes=vw_model.passes, alpha=vw_model.alpha, eta=vw_model.eta, decay=vw_model.decay,\n offset=vw_model.offset, iterations=iterations, gamma_threshold=vw_model.gamma_threshold,\n dtype=numpy.float32\n )\n model_gensim.expElogbeta[:] = vw_model._get_topics()\n return model_gensim\n",
"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n#\n# Copyright (C) 2013 Radim Rehurek <[email protected]>\n# Licensed under the GNU LGPL v2.1 - http://www.gnu.org/licenses/lgpl.html\n\n\"\"\"This module contains functions to perform aggregation on a list of values obtained from the confirmation measure.\"\"\"\n\nimport logging\nimport numpy as np\n\nlogger = logging.getLogger(__name__)\n\n\ndef arithmetic_mean(confirmed_measures):\n \"\"\"\n Perform the arithmetic mean aggregation on the output obtained from\n the confirmation measure module.\n\n Parameters\n ----------\n confirmed_measures : list of float\n List of calculated confirmation measure on each set in the segmented topics.\n\n Returns\n -------\n `numpy.float`\n Arithmetic mean of all the values contained in confirmation measures.\n\n Examples\n --------\n .. sourcecode:: pycon\n\n >>> from gensim.topic_coherence.aggregation import arithmetic_mean\n >>> arithmetic_mean([1.1, 2.2, 3.3, 4.4])\n 2.75\n\n \"\"\"\n return np.mean(confirmed_measures)\n",
"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n#\n# Copyright (C) 2013 Radim Rehurek <[email protected]>\n# Licensed under the GNU LGPL v2.1 - http://www.gnu.org/licenses/lgpl.html\n\n\"\"\"This module contains classes for analyzing the texts of a corpus to accumulate\nstatistical information about word occurrences.\"\"\"\n\nimport itertools\nimport logging\nimport multiprocessing as mp\nimport sys\nfrom collections import Counter\n\nimport numpy as np\nimport scipy.sparse as sps\nfrom six import iteritems, string_types\n\nfrom gensim import utils\nfrom gensim.models.word2vec import Word2Vec\n\nlogger = logging.getLogger(__name__)\n\n\ndef _ids_to_words(ids, dictionary):\n \"\"\"Convert an iterable of ids to their corresponding words using a dictionary.\n Abstract away the differences between the HashDictionary and the standard one.\n\n Parameters\n ----------\n ids: dict\n Dictionary of ids and their words.\n dictionary: :class:`~gensim.corpora.dictionary.Dictionary`\n Input gensim dictionary\n\n Returns\n -------\n set\n Corresponding words.\n\n Examples\n --------\n .. sourcecode:: pycon\n\n >>> from gensim.corpora.dictionary import Dictionary\n >>> from gensim.topic_coherence import text_analysis\n >>>\n >>> dictionary = Dictionary()\n >>> ids = {1: 'fake', 4: 'cats'}\n >>> dictionary.id2token = {1: 'fake', 2: 'tokens', 3: 'rabbids', 4: 'cats'}\n >>>\n >>> text_analysis._ids_to_words(ids, dictionary)\n set(['cats', 'fake'])\n\n \"\"\"\n if not dictionary.id2token: # may not be initialized in the standard gensim.corpora.Dictionary\n setattr(dictionary, 'id2token', {v: k for k, v in dictionary.token2id.items()})\n\n top_words = set()\n for word_id in ids:\n word = dictionary.id2token[word_id]\n if isinstance(word, set):\n top_words = top_words.union(word)\n else:\n top_words.add(word)\n\n return top_words\n\n\nclass BaseAnalyzer(object):\n \"\"\"Base class for corpus and text analyzers.\n\n Attributes\n ----------\n relevant_ids : dict\n Mapping\n _vocab_size : int\n Size of vocabulary.\n id2contiguous : dict\n Mapping word_id -> number.\n log_every : int\n Interval for logging.\n _num_docs : int\n Number of documents.\n\n \"\"\"\n def __init__(self, relevant_ids):\n \"\"\"\n\n Parameters\n ----------\n relevant_ids : dict\n Mapping\n\n Examples\n --------\n .. sourcecode:: pycon\n\n >>> from gensim.topic_coherence import text_analysis\n >>> ids = {1: 'fake', 4: 'cats'}\n >>> base = text_analysis.BaseAnalyzer(ids)\n >>> # should return {1: 'fake', 4: 'cats'} 2 {1: 0, 4: 1} 1000 0\n >>> print(base.relevant_ids, base._vocab_size, base.id2contiguous, base.log_every, base._num_docs)\n {1: 'fake', 4: 'cats'} 2 {1: 0, 4: 1} 1000 0\n\n \"\"\"\n self.relevant_ids = relevant_ids\n self._vocab_size = len(self.relevant_ids)\n self.id2contiguous = {word_id: n for n, word_id in enumerate(self.relevant_ids)}\n self.log_every = 1000\n self._num_docs = 0\n\n @property\n def num_docs(self):\n return self._num_docs\n\n @num_docs.setter\n def num_docs(self, num):\n self._num_docs = num\n if self._num_docs % self.log_every == 0:\n logger.info(\n \"%s accumulated stats from %d documents\",\n self.__class__.__name__, self._num_docs)\n\n def analyze_text(self, text, doc_num=None):\n raise NotImplementedError(\"Base classes should implement analyze_text.\")\n\n def __getitem__(self, word_or_words):\n if isinstance(word_or_words, string_types) or not hasattr(word_or_words, '__iter__'):\n return self.get_occurrences(word_or_words)\n else:\n return self.get_co_occurrences(*word_or_words)\n\n def get_occurrences(self, word_id):\n \"\"\"Return number of docs the word occurs in, once `accumulate` has been called.\"\"\"\n return self._get_occurrences(self.id2contiguous[word_id])\n\n def _get_occurrences(self, word_id):\n raise NotImplementedError(\"Base classes should implement occurrences\")\n\n def get_co_occurrences(self, word_id1, word_id2):\n \"\"\"Return number of docs the words co-occur in, once `accumulate` has been called.\"\"\"\n return self._get_co_occurrences(self.id2contiguous[word_id1], self.id2contiguous[word_id2])\n\n def _get_co_occurrences(self, word_id1, word_id2):\n raise NotImplementedError(\"Base classes should implement co_occurrences\")\n\n\nclass UsesDictionary(BaseAnalyzer):\n \"\"\"A BaseAnalyzer that uses a Dictionary, hence can translate tokens to counts.\n The standard BaseAnalyzer can only deal with token ids since it doesn't have the token2id\n mapping.\n\n Attributes\n ----------\n relevant_words : set\n Set of words that occurrences should be accumulated for.\n dictionary : :class:`~gensim.corpora.dictionary.Dictionary`\n Dictionary based on text\n token2id : dict\n Mapping from :class:`~gensim.corpora.dictionary.Dictionary`\n\n \"\"\"\n def __init__(self, relevant_ids, dictionary):\n \"\"\"\n\n Parameters\n ----------\n relevant_ids : dict\n Mapping\n dictionary : :class:`~gensim.corpora.dictionary.Dictionary`\n Dictionary based on text\n\n Examples\n --------\n .. sourcecode:: pycon\n\n >>> from gensim.topic_coherence import text_analysis\n >>> from gensim.corpora.dictionary import Dictionary\n >>>\n >>> ids = {1: 'foo', 2: 'bar'}\n >>> dictionary = Dictionary([['foo', 'bar', 'baz'], ['foo', 'bar', 'bar', 'baz']])\n >>> udict = text_analysis.UsesDictionary(ids, dictionary)\n >>>\n >>> print(udict.relevant_words)\n set([u'foo', u'baz'])\n\n \"\"\"\n super(UsesDictionary, self).__init__(relevant_ids)\n self.relevant_words = _ids_to_words(self.relevant_ids, dictionary)\n self.dictionary = dictionary\n self.token2id = dictionary.token2id\n\n def get_occurrences(self, word):\n \"\"\"Return number of docs the word occurs in, once `accumulate` has been called.\"\"\"\n try:\n word_id = self.token2id[word]\n except KeyError:\n word_id = word\n return self._get_occurrences(self.id2contiguous[word_id])\n\n def _word2_contiguous_id(self, word):\n try:\n word_id = self.token2id[word]\n except KeyError:\n word_id = word\n return self.id2contiguous[word_id]\n\n def get_co_occurrences(self, word1, word2):\n \"\"\"Return number of docs the words co-occur in, once `accumulate` has been called.\"\"\"\n word_id1 = self._word2_contiguous_id(word1)\n word_id2 = self._word2_contiguous_id(word2)\n return self._get_co_occurrences(word_id1, word_id2)\n\n\nclass InvertedIndexBased(BaseAnalyzer):\n \"\"\"Analyzer that builds up an inverted index to accumulate stats.\"\"\"\n\n def __init__(self, *args):\n \"\"\"\n\n Parameters\n ----------\n args : dict\n Look at :class:`~gensim.topic_coherence.text_analysis.BaseAnalyzer`\n\n Examples\n --------\n .. sourcecode:: pycon\n\n >>> from gensim.topic_coherence import text_analysis\n >>>\n >>> ids = {1: 'fake', 4: 'cats'}\n >>> ininb = text_analysis.InvertedIndexBased(ids)\n >>>\n >>> print(ininb._inverted_index)\n [set([]) set([])]\n\n \"\"\"\n super(InvertedIndexBased, self).__init__(*args)\n self._inverted_index = np.array([set() for _ in range(self._vocab_size)])\n\n def _get_occurrences(self, word_id):\n return len(self._inverted_index[word_id])\n\n def _get_co_occurrences(self, word_id1, word_id2):\n s1 = self._inverted_index[word_id1]\n s2 = self._inverted_index[word_id2]\n return len(s1.intersection(s2))\n\n def index_to_dict(self):\n contiguous2id = {n: word_id for word_id, n in iteritems(self.id2contiguous)}\n return {contiguous2id[n]: doc_id_set for n, doc_id_set in enumerate(self._inverted_index)}\n\n\nclass CorpusAccumulator(InvertedIndexBased):\n \"\"\"Gather word occurrence stats from a corpus by iterating over its BoW representation.\"\"\"\n\n def analyze_text(self, text, doc_num=None):\n \"\"\"Build an inverted index from a sequence of corpus texts.\"\"\"\n doc_words = frozenset(x[0] for x in text)\n top_ids_in_doc = self.relevant_ids.intersection(doc_words)\n for word_id in top_ids_in_doc:\n self._inverted_index[self.id2contiguous[word_id]].add(self._num_docs)\n\n def accumulate(self, corpus):\n for document in corpus:\n self.analyze_text(document)\n self.num_docs += 1\n return self\n\n\nclass WindowedTextsAnalyzer(UsesDictionary):\n \"\"\"Gather some stats about relevant terms of a corpus by iterating over windows of texts.\"\"\"\n\n def __init__(self, relevant_ids, dictionary):\n \"\"\"\n\n Parameters\n ----------\n relevant_ids : set of int\n Relevant id\n dictionary : :class:`~gensim.corpora.dictionary.Dictionary`\n Dictionary instance with mappings for the relevant_ids.\n\n \"\"\"\n super(WindowedTextsAnalyzer, self).__init__(relevant_ids, dictionary)\n self._none_token = self._vocab_size # see _iter_texts for use of none token\n\n def accumulate(self, texts, window_size):\n relevant_texts = self._iter_texts(texts)\n windows = utils.iter_windows(\n relevant_texts, window_size, ignore_below_size=False, include_doc_num=True)\n\n for doc_num, virtual_document in windows:\n self.analyze_text(virtual_document, doc_num)\n self.num_docs += 1\n return self\n\n def _iter_texts(self, texts):\n dtype = np.uint16 if np.iinfo(np.uint16).max >= self._vocab_size else np.uint32\n for text in texts:\n if self.text_is_relevant(text):\n yield np.fromiter((\n self.id2contiguous[self.token2id[w]] if w in self.relevant_words\n else self._none_token\n for w in text), dtype=dtype, count=len(text))\n\n def text_is_relevant(self, text):\n \"\"\"Check if the text has any relevant words.\"\"\"\n for word in text:\n if word in self.relevant_words:\n return True\n return False\n\n\nclass InvertedIndexAccumulator(WindowedTextsAnalyzer, InvertedIndexBased):\n \"\"\"Build an inverted index from a sequence of corpus texts.\"\"\"\n\n def analyze_text(self, window, doc_num=None):\n for word_id in window:\n if word_id is not self._none_token:\n self._inverted_index[word_id].add(self._num_docs)\n\n\nclass WordOccurrenceAccumulator(WindowedTextsAnalyzer):\n \"\"\"Accumulate word occurrences and co-occurrences from a sequence of corpus texts.\"\"\"\n\n def __init__(self, *args):\n super(WordOccurrenceAccumulator, self).__init__(*args)\n self._occurrences = np.zeros(self._vocab_size, dtype='uint32')\n self._co_occurrences = sps.lil_matrix((self._vocab_size, self._vocab_size), dtype='uint32')\n\n self._uniq_words = np.zeros((self._vocab_size + 1,), dtype=bool) # add 1 for none token\n self._counter = Counter()\n\n def __str__(self):\n return self.__class__.__name__\n\n def accumulate(self, texts, window_size):\n self._co_occurrences = self._co_occurrences.tolil()\n self.partial_accumulate(texts, window_size)\n self._symmetrize()\n return self\n\n def partial_accumulate(self, texts, window_size):\n \"\"\"Meant to be called several times to accumulate partial results.\n\n Notes\n -----\n The final accumulation should be performed with the `accumulate` method as opposed to this one.\n This method does not ensure the co-occurrence matrix is in lil format and does not\n symmetrize it after accumulation.\n\n \"\"\"\n self._current_doc_num = -1\n self._token_at_edge = None\n self._counter.clear()\n\n super(WordOccurrenceAccumulator, self).accumulate(texts, window_size)\n for combo, count in iteritems(self._counter):\n self._co_occurrences[combo] += count\n\n return self\n\n def analyze_text(self, window, doc_num=None):\n self._slide_window(window, doc_num)\n mask = self._uniq_words[:-1] # to exclude none token\n if mask.any():\n self._occurrences[mask] += 1\n self._counter.update(itertools.combinations(np.nonzero(mask)[0], 2))\n\n def _slide_window(self, window, doc_num):\n if doc_num != self._current_doc_num:\n self._uniq_words[:] = False\n self._uniq_words[np.unique(window)] = True\n self._current_doc_num = doc_num\n else:\n self._uniq_words[self._token_at_edge] = False\n self._uniq_words[window[-1]] = True\n\n self._token_at_edge = window[0]\n\n def _symmetrize(self):\n \"\"\"Word pairs may have been encountered in (i, j) and (j, i) order.\n\n Notes\n -----\n Rather than enforcing a particular ordering during the update process,\n we choose to symmetrize the co-occurrence matrix after accumulation has completed.\n\n \"\"\"\n co_occ = self._co_occurrences\n co_occ.setdiag(self._occurrences) # diagonal should be equal to occurrence counts\n self._co_occurrences = \\\n co_occ + co_occ.T - sps.diags(co_occ.diagonal(), offsets=0, dtype='uint32')\n\n def _get_occurrences(self, word_id):\n return self._occurrences[word_id]\n\n def _get_co_occurrences(self, word_id1, word_id2):\n return self._co_occurrences[word_id1, word_id2]\n\n def merge(self, other):\n self._occurrences += other._occurrences\n self._co_occurrences += other._co_occurrences\n self._num_docs += other._num_docs\n\n\nclass PatchedWordOccurrenceAccumulator(WordOccurrenceAccumulator):\n \"\"\"Monkey patched for multiprocessing worker usage, to move some of the logic to the master process.\"\"\"\n def _iter_texts(self, texts):\n return texts # master process will handle this\n\n\nclass ParallelWordOccurrenceAccumulator(WindowedTextsAnalyzer):\n \"\"\"Accumulate word occurrences in parallel.\n\n Attributes\n ----------\n processes : int\n Number of processes to use; must be at least two.\n args :\n Should include `relevant_ids` and `dictionary` (see :class:`~UsesDictionary.__init__`).\n kwargs :\n Can include `batch_size`, which is the number of docs to send to a worker at a time.\n If not included, it defaults to 64.\n \"\"\"\n\n def __init__(self, processes, *args, **kwargs):\n super(ParallelWordOccurrenceAccumulator, self).__init__(*args)\n if processes < 2:\n raise ValueError(\n \"Must have at least 2 processes to run in parallel; got %d\" % processes)\n self.processes = processes\n self.batch_size = kwargs.get('batch_size', 64)\n\n def __str__(self):\n return \"%s(processes=%s, batch_size=%s)\" % (\n self.__class__.__name__, self.processes, self.batch_size)\n\n def accumulate(self, texts, window_size):\n workers, input_q, output_q = self.start_workers(window_size)\n try:\n self.queue_all_texts(input_q, texts, window_size)\n interrupted = False\n except KeyboardInterrupt:\n logger.warn(\"stats accumulation interrupted; <= %d documents processed\", self._num_docs)\n interrupted = True\n\n accumulators = self.terminate_workers(input_q, output_q, workers, interrupted)\n return self.merge_accumulators(accumulators)\n\n def start_workers(self, window_size):\n \"\"\"Set up an input and output queue and start processes for each worker.\n\n Notes\n -----\n The input queue is used to transmit batches of documents to the workers.\n The output queue is used by workers to transmit the WordOccurrenceAccumulator instances.\n\n Parameters\n ----------\n window_size : int\n\n Returns\n -------\n (list of lists)\n Tuple of (list of workers, input queue, output queue).\n \"\"\"\n input_q = mp.Queue(maxsize=self.processes)\n output_q = mp.Queue()\n workers = []\n for _ in range(self.processes):\n accumulator = PatchedWordOccurrenceAccumulator(self.relevant_ids, self.dictionary)\n worker = AccumulatingWorker(input_q, output_q, accumulator, window_size)\n worker.start()\n workers.append(worker)\n\n return workers, input_q, output_q\n\n def yield_batches(self, texts):\n \"\"\"Return a generator over the given texts that yields batches of `batch_size` texts at a time.\"\"\"\n batch = []\n for text in self._iter_texts(texts):\n batch.append(text)\n if len(batch) == self.batch_size:\n yield batch\n batch = []\n\n if batch:\n yield batch\n\n def queue_all_texts(self, q, texts, window_size):\n \"\"\"Sequentially place batches of texts on the given queue until `texts` is consumed.\n The texts are filtered so that only those with at least one relevant token are queued.\n \"\"\"\n for batch_num, batch in enumerate(self.yield_batches(texts)):\n q.put(batch, block=True)\n before = self._num_docs / self.log_every\n self._num_docs += sum(len(doc) - window_size + 1 for doc in batch)\n if before < (self._num_docs / self.log_every):\n logger.info(\n \"%d batches submitted to accumulate stats from %d documents (%d virtual)\",\n (batch_num + 1), (batch_num + 1) * self.batch_size, self._num_docs)\n\n def terminate_workers(self, input_q, output_q, workers, interrupted=False):\n \"\"\"Wait until all workers have transmitted their WordOccurrenceAccumulator instances, then terminate each.\n\n Warnings\n --------\n We do not use join here because it has been shown to have some issues\n in Python 2.7 (and even in later versions). This method also closes both the input and output queue.\n If `interrupted` is False (normal execution), a None value is placed on the input queue for\n each worker. The workers are looking for this sentinel value and interpret it as a signal to\n terminate themselves. If `interrupted` is True, a KeyboardInterrupt occurred. The workers are\n programmed to recover from this and continue on to transmit their results before terminating.\n So in this instance, the sentinel values are not queued, but the rest of the execution\n continues as usual.\n\n \"\"\"\n if not interrupted:\n for _ in workers:\n input_q.put(None, block=True)\n\n accumulators = []\n while len(accumulators) != len(workers):\n accumulators.append(output_q.get())\n logger.info(\"%d accumulators retrieved from output queue\", len(accumulators))\n\n for worker in workers:\n if worker.is_alive():\n worker.terminate()\n\n input_q.close()\n output_q.close()\n return accumulators\n\n def merge_accumulators(self, accumulators):\n \"\"\"Merge the list of accumulators into a single `WordOccurrenceAccumulator` with all\n occurrence and co-occurrence counts, and a `num_docs` that reflects the total observed\n by all the individual accumulators.\n\n \"\"\"\n accumulator = WordOccurrenceAccumulator(self.relevant_ids, self.dictionary)\n for other_accumulator in accumulators:\n accumulator.merge(other_accumulator)\n # Workers do partial accumulation, so none of the co-occurrence matrices are symmetrized.\n # This is by design, to avoid unnecessary matrix additions/conversions during accumulation.\n accumulator._symmetrize()\n logger.info(\"accumulated word occurrence stats for %d virtual documents\", accumulator.num_docs)\n return accumulator\n\n\nclass AccumulatingWorker(mp.Process):\n \"\"\"Accumulate stats from texts fed in from queue.\"\"\"\n\n def __init__(self, input_q, output_q, accumulator, window_size):\n super(AccumulatingWorker, self).__init__()\n self.input_q = input_q\n self.output_q = output_q\n self.accumulator = accumulator\n self.accumulator.log_every = sys.maxsize # avoid logging in workers\n self.window_size = window_size\n\n def run(self):\n try:\n self._run()\n except KeyboardInterrupt:\n logger.info(\n \"%s interrupted after processing %d documents\",\n self.__class__.__name__, self.accumulator.num_docs)\n except Exception:\n logger.exception(\"worker encountered unexpected exception\")\n finally:\n self.reply_to_master()\n\n def _run(self):\n batch_num = -1\n n_docs = 0\n while True:\n batch_num += 1\n docs = self.input_q.get(block=True)\n if docs is None: # sentinel value\n logger.debug(\"observed sentinel value; terminating\")\n break\n\n self.accumulator.partial_accumulate(docs, self.window_size)\n n_docs += len(docs)\n logger.debug(\n \"completed batch %d; %d documents processed (%d virtual)\",\n batch_num, n_docs, self.accumulator.num_docs)\n\n logger.debug(\n \"finished all batches; %d documents processed (%d virtual)\",\n n_docs, self.accumulator.num_docs)\n\n def reply_to_master(self):\n logger.info(\"serializing accumulator to return to master...\")\n self.output_q.put(self.accumulator, block=False)\n logger.info(\"accumulator serialized\")\n\n\nclass WordVectorsAccumulator(UsesDictionary):\n \"\"\"Accumulate context vectors for words using word vector embeddings.\n\n Attributes\n ----------\n model: Word2Vec (:class:`~gensim.models.keyedvectors.KeyedVectors`)\n If None, a new Word2Vec model is trained on the given text corpus. Otherwise,\n it should be a pre-trained Word2Vec context vectors.\n model_kwargs:\n if model is None, these keyword arguments will be passed through to the Word2Vec constructor.\n \"\"\"\n\n def __init__(self, relevant_ids, dictionary, model=None, **model_kwargs):\n super(WordVectorsAccumulator, self).__init__(relevant_ids, dictionary)\n self.model = model\n self.model_kwargs = model_kwargs\n\n def not_in_vocab(self, words):\n uniq_words = set(utils.flatten(words))\n return set(word for word in uniq_words if word not in self.model.vocab)\n\n def get_occurrences(self, word):\n \"\"\"Return number of docs the word occurs in, once `accumulate` has been called.\"\"\"\n try:\n self.token2id[word] # is this a token or an id?\n except KeyError:\n word = self.dictionary.id2token[word]\n return self.model.vocab[word].count\n\n def get_co_occurrences(self, word1, word2):\n \"\"\"Return number of docs the words co-occur in, once `accumulate` has been called.\"\"\"\n raise NotImplementedError(\"Word2Vec model does not support co-occurrence counting\")\n\n def accumulate(self, texts, window_size):\n if self.model is not None:\n logger.debug(\"model is already trained; no accumulation necessary\")\n return self\n\n kwargs = self.model_kwargs.copy()\n if window_size is not None:\n kwargs['window'] = window_size\n kwargs['min_count'] = kwargs.get('min_count', 1)\n kwargs['sg'] = kwargs.get('sg', 1)\n kwargs['hs'] = kwargs.get('hw', 0)\n\n self.model = Word2Vec(**kwargs)\n self.model.build_vocab(texts)\n self.model.train(texts, total_examples=self.model.corpus_count, epochs=self.model.epochs)\n self.model = self.model.wv # retain KeyedVectors\n return self\n\n def ids_similarity(self, ids1, ids2):\n words1 = self._words_with_embeddings(ids1)\n words2 = self._words_with_embeddings(ids2)\n return self.model.n_similarity(words1, words2)\n\n def _words_with_embeddings(self, ids):\n if not hasattr(ids, '__iter__'):\n ids = [ids]\n\n words = [self.dictionary.id2token[word_id] for word_id in ids]\n return [word for word in words if word in self.model.vocab]\n"
] |
[
[
"numpy.exp2",
"numpy.zeros"
],
[
"numpy.mean"
],
[
"numpy.nonzero",
"numpy.unique",
"numpy.iinfo",
"numpy.zeros",
"scipy.sparse.lil_matrix"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [
"1.7",
"1.0",
"0.10",
"1.2",
"0.14",
"0.19",
"1.5",
"0.12",
"0.17",
"0.13",
"1.6",
"1.4",
"1.9",
"1.3",
"1.10",
"0.15",
"0.18",
"0.16",
"1.8"
],
"tensorflow": []
}
] |
eaaskt/nlu
|
[
"77382be572ce59f15d8ea9c5cd653615c39891d1"
] |
[
"capsnet-arch/test.py"
] |
[
"import math\nimport os\n\nimport data_loader\nimport model_s2i\nimport flags\nimport util\nimport numpy as np\nimport tensorflow as tf\nfrom seqeval.metrics import accuracy_score\nfrom seqeval.metrics import f1_score\nfrom seqeval.metrics import precision_score\nfrom seqeval.metrics import recall_score\nfrom sklearn.metrics import accuracy_score as scikit_accuracy\nfrom sklearn.metrics import classification_report\nfrom sklearn.metrics import confusion_matrix\nfrom sklearn.metrics import f1_score as scikit_f1\nimport matplotlib.pyplot as plt\nimport html_report_generator\nimport conf_levels_generator\n\n\nINTENTS_ORDER = [\n 'aprindeLumina',\n 'stingeLumina',\n 'cresteIntensitateLumina',\n 'scadeIntensitateLumina',\n 'cresteTemperatura',\n 'scadeTemperatura',\n 'seteazaTemperatura',\n 'cresteIntensitateMuzica',\n 'scadeIntensitateMuzica',\n 'puneMuzica',\n 'opresteMuzica',\n 'pornesteTV',\n 'opresteTV',\n 'schimbaCanalTV',\n]\n\nINTENT_CLASSES = {\n 'aprindeLumina': 'lumina',\n 'cresteIntensitateLumina': 'lumina',\n 'cresteIntensitateMuzica': 'media',\n 'cresteTemperatura': 'temperatura',\n 'opresteMuzica': 'media',\n 'opresteTV': 'media',\n 'pornesteTV': 'media',\n 'puneMuzica': 'media',\n 'scadeIntensitateLumina': 'lumina',\n 'scadeIntensitateMuzica': 'media',\n 'scadeTemperatura': 'temperatura',\n 'schimbaCanalTV': 'media',\n 'schimbaIntensitateMuzica': 'media',\n 'seteazaTemperatura': 'temperatura',\n 'stingeLumina': 'lumina',\n 'x': 'x'\n}\n\nINTENT_TRANSLATIONS = {\n 'aprindeLumina': 'TurnOnLight',\n 'cresteIntensitateLumina': 'IncreaseLightIntensity',\n 'cresteIntensitateMuzica': 'IncreaseVolume',\n 'cresteTemperatura': 'IncreaseTemperature',\n 'opresteMuzica': 'StopMusic',\n 'opresteTV': 'StopTV',\n 'pornesteTV': 'StartTV',\n 'puneMuzica': 'PlayMusic',\n 'scadeIntensitateLumina': 'DecreaseLightIntensity',\n 'scadeIntensitateMuzica': 'DecreaseVolume',\n 'scadeTemperatura': 'DecreaseTemperature',\n 'schimbaCanalTV': 'ChangeTVChannel',\n 'schimbaIntensitateMuzica': 'ChangeVolume',\n 'seteazaTemperatura': 'SetTemperature',\n 'stingeLumina': 'TurnOffLight',\n 'x': 'x'\n}\n\nINTENT_CLASS_TRANSLATIONS = {\n 'lumina': 'light',\n 'temperatura': 'temperature',\n 'media': 'media'\n}\n\n\ndef plot_confusion_matrix(y_true, y_pred, labels,\n normalize=False,\n title=None,\n cmap=plt.cm.Blues,\n numbers=False):\n \"\"\"\n This function prints and plots the confusion matrix.\n Normalization can be applied by setting `normalize=True`.\n Args:\n y_true: true slot labels\n y_pred: predicted slot labels\n labels: list of class labels, will be places on the axes\n title: title of plot\n cmap: colormap\n numbers: True if numbers should be shown inside the confusion matrix, if many classes it is recommended\n that this is set to False\n \"\"\"\n if not title:\n if normalize:\n title = 'Normalized confusion matrix'\n else:\n title = 'Confusion matrix, without normalization'\n\n # Compute confusion matrix\n cm = confusion_matrix(y_true, y_pred, labels=labels)\n # Only use the labels that appear in the data\n if normalize:\n cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]\n\n fig, ax = plt.subplots()\n im = ax.imshow(cm, interpolation='nearest', cmap=cmap)\n ax.figure.colorbar(im, ax=ax)\n # We want to show all ticks...\n ax.set(xticks=np.arange(cm.shape[1]),\n yticks=np.arange(cm.shape[0]),\n # ... and label them with the respective list entries\n xticklabels=labels, yticklabels=labels,\n title=title,\n ylabel='True label',\n xlabel='Predicted label')\n\n # Rotate the tick labels and set their alignment.\n plt.setp(ax.get_xticklabels(), rotation='vertical', ha='right',\n rotation_mode='anchor')\n plt.tight_layout()\n\n # Loop over data dimensions and create text annotations.\n if numbers:\n\n fmt = '.2f' if normalize else 'd'\n thresh = cm.max() / 2.\n for i in range(cm.shape[0]):\n for j in range(cm.shape[1]):\n ax.text(j, i, format(cm[i, j], fmt),\n ha='center', va='center',\n color='white' if cm[i, j] > thresh else 'black')\n # fig.tight_layout()\n return ax\n\n\ndef eval_seq_scores(y_true, y_pred):\n \"\"\" Performs sequence evaluation on slot labels\n Args:\n y_true: true slot labels\n y_pred: predicted slot labels\n Returns:\n scores: dict containing the evaluation scores: f1, accuracy, precision, recall\n \"\"\"\n scores = dict()\n scores['f1'] = f1_score(y_true, y_pred)\n scores['accuracy'] = accuracy_score(y_true, y_pred)\n scores['precision'] = precision_score(y_true, y_pred)\n scores['recall'] = recall_score(y_true, y_pred)\n return scores\n\n\ndef evaluate_test(capsnet, data, FLAGS, sess, log_errs=False, epoch=0, translate_eng=False):\n \"\"\" Evaluates the model on the test set\n Args:\n capsnet: CapsNet model\n data: test data dict\n FLAGS: TensorFlow flags\n sess: TensorFlow session\n log_errs: if True, the intent and slot errors will be logged to a error file and confusion matrices will\n be displayed\n epoch: current epoch\n translate_eng: whether the plots should show the English translations of the intents or not\n Returns:\n f_score: intent detection F1 score\n scores['f1']: slot filling F1 score\n \"\"\"\n x_te = data['x_te']\n sentences_length_te = data['sentences_len_te']\n y_intents_te = data['y_intents_te']\n y_slots_te = data['y_slots_te']\n slots_dict = data['slots_dict']\n intents_dict = data['intents_dict']\n one_hot_intents = data['one_hot_intents_te']\n one_hot_slots = data['one_hot_slots_te']\n if log_errs:\n x_text_te = data['x_text_te']\n\n total_intent_pred = []\n total_intent_conf_level = []\n total_slots_pred = []\n total_attention = []\n\n num_samples = len(x_te)\n batch_size = FLAGS.batch_size\n test_batch = int(math.ceil(num_samples / float(batch_size)))\n for i in range(test_batch):\n begin_index = i * batch_size\n end_index = min((i + 1) * batch_size, num_samples)\n batch_te = x_te[begin_index: end_index]\n batch_sentences_len = sentences_length_te[begin_index: end_index]\n batch_intents_one_hot = one_hot_intents[begin_index: end_index]\n batch_slots_one_hot = one_hot_slots[begin_index: end_index]\n batch_size = end_index - begin_index\n\n mask = util.calculate_mask(batch_sentences_len, FLAGS.max_sentence_length, batch_size, FLAGS.r)\n\n if FLAGS.use_attention:\n [intent_outputs, slots_outputs, slot_weights_c, attention] = sess.run([\n capsnet.intent_output_vectors, capsnet.slot_output_vectors, capsnet.slot_weights_c, capsnet.attention],\n feed_dict={capsnet.input_x: batch_te, capsnet.sentences_length: batch_sentences_len,\n capsnet.encoded_intents: batch_intents_one_hot, capsnet.encoded_slots: batch_slots_one_hot,\n capsnet.keep_prob: 1.0,\n capsnet.attention_mask: mask})\n # attention is shaped ?, 5, 12\n total_attention += np.ndarray.tolist(attention)\n else:\n [intent_outputs, slots_outputs, slot_weights_c] = sess.run([\n capsnet.intent_output_vectors, capsnet.slot_output_vectors, capsnet.slot_weights_c],\n feed_dict={capsnet.input_x: batch_te, capsnet.sentences_length: batch_sentences_len,\n capsnet.encoded_intents: batch_intents_one_hot, capsnet.encoded_slots: batch_slots_one_hot,\n capsnet.keep_prob: 1.0})\n\n intent_outputs_reduced_dim = tf.squeeze(intent_outputs, axis=[1, 4])\n intent_outputs_norm = util.safe_norm(intent_outputs_reduced_dim)\n # intent_outputs_norm = tf.norm(intent_outputs_reduced_dim, ord='euclidean', axis=-1)\n slot_weights_c_reduced_dim = tf.squeeze(slot_weights_c, axis=[3, 4])\n\n [intent_predictions, slot_predictions] = sess.run([intent_outputs_norm, slot_weights_c_reduced_dim])\n\n te_batch_intent_pred = np.argmax(intent_predictions, axis=1)\n total_intent_conf_level += np.ndarray.tolist(intent_predictions)\n total_intent_pred += np.ndarray.tolist(te_batch_intent_pred)\n\n te_batch_slots_pred = np.argmax(slot_predictions, axis=2)\n total_slots_pred += (np.ndarray.tolist(te_batch_slots_pred))\n\n print(' TEST SET PERFORMANCE ')\n print('Intent detection')\n intents_acc = scikit_accuracy(y_intents_te, total_intent_pred)\n y_intents_true = np.ndarray.tolist(y_intents_te)\n y_intent_labels_true = [intents_dict[i] for i in y_intents_true]\n y_intent_labels_pred = [intents_dict[i] for i in total_intent_pred]\n intent_confidence_tuples = [[(intents_dict[x], conf[x]) for x in range(len(conf))] for conf in total_intent_conf_level]\n [conf.sort(key=lambda tup: tup[1], reverse=True) for conf in intent_confidence_tuples]\n intents_set = set(y_intent_labels_true)\n intents = [x for x in INTENTS_ORDER if x in intents_set]\n f_score = scikit_f1(y_intent_labels_true, y_intent_labels_pred, average='micro', labels=intents)\n print(classification_report(y_intent_labels_true, y_intent_labels_pred, digits=4))\n print('Intent accuracy %lf' % intents_acc)\n print('F score %lf' % f_score)\n\n y_slots_te_true = np.ndarray.tolist(y_slots_te)\n y_slot_labels_true = [[slots_dict[slot_idx] for slot_idx in ex] for ex in y_slots_te_true]\n y_slot_labels_pred = [[slots_dict[slot_idx] for slot_idx in ex] for ex in total_slots_pred]\n scores = eval_seq_scores(y_slot_labels_true, y_slot_labels_pred)\n print('Slot filling')\n print('F1 score: %lf' % scores['f1'])\n print('Accuracy: %lf' % scores['accuracy'])\n print('Precision: %lf' % scores['precision'])\n print('Recall: %lf' % scores['recall'])\n\n # Write errors to error log\n if log_errs:\n if FLAGS.scenario_num != '':\n errors_dir = FLAGS.errors_dir + 'scenario' + FLAGS.scenario_num + '/'\n if not os.path.exists(errors_dir):\n os.makedirs(errors_dir)\n else:\n errors_dir = FLAGS.errors_dir\n\n if translate_eng:\n y_intent_labels_true_conf = [INTENT_TRANSLATIONS[x] for x in y_intent_labels_true]\n y_intent_labels_pred_conf = [INTENT_TRANSLATIONS[x] for x in y_intent_labels_pred]\n intents_conf = [INTENT_TRANSLATIONS[x] for x in intents]\n else:\n y_intent_labels_true_conf = y_intent_labels_true\n y_intent_labels_pred_conf = y_intent_labels_pred\n intents_conf = intents\n\n plot_confusion_matrix(y_intent_labels_true_conf, y_intent_labels_pred_conf, labels=intents_conf,\n title='Confusion matrix', normalize=True, numbers=False)\n if translate_eng:\n fig_title = 'confusion_mats/conf_mat_eng_{}.png'.format(FLAGS.scenario_num)\n else:\n fig_title = 'confusion_mats/conf_mat_{}.png'.format(FLAGS.scenario_num)\n plt.savefig(fig_title)\n # plt.show()\n\n # For super-class confusion mat\n if 'x' in y_intent_labels_true or 'x' in y_intent_labels_pred:\n intent_classes_labels = ['lumina', 'temperatura', 'media', 'x']\n else:\n intent_classes_labels = ['lumina', 'temperatura', 'media']\n if translate_eng:\n intent_classes_true = [INTENT_CLASS_TRANSLATIONS[INTENT_CLASSES[intent]] for intent in y_intent_labels_true]\n intent_classes_pred = [INTENT_CLASS_TRANSLATIONS[INTENT_CLASSES[intent]] for intent in y_intent_labels_pred]\n intent_classes_labels = [INTENT_CLASS_TRANSLATIONS[x] for x in intent_classes_labels]\n else:\n intent_classes_true = [INTENT_CLASSES[intent] for intent in y_intent_labels_true]\n intent_classes_pred = [INTENT_CLASSES[intent] for intent in y_intent_labels_pred]\n plot_confusion_matrix(intent_classes_true, intent_classes_pred, labels=intent_classes_labels,\n title='Confusion matrix', normalize=True, numbers=True)\n # plt.show()\n if translate_eng:\n superclass_fig_title = 'confusion_mats/conf_mat_eng_{}_superclasses.png'.format(FLAGS.scenario_num)\n else:\n superclass_fig_title = 'confusion_mats/conf_mat_{}_superclasses.png'.format(FLAGS.scenario_num)\n plt.savefig(superclass_fig_title)\n incorrect_intents = {}\n i = 0\n for t, pr in zip(y_intent_labels_true, y_intent_labels_pred):\n if t != pr:\n if t not in incorrect_intents:\n incorrect_intents[t] = []\n incorrect_intents[t].append((' '.join(x_text_te[i]), pr))\n i += 1\n\n with open(os.path.join(errors_dir, 'errors.txt'), 'w', encoding='utf-8') as f:\n f.write('INTENT ERRORS\\n')\n for k, v in incorrect_intents.items():\n f.write(k + '\\n')\n for intent in v:\n f.write('{} -> {}\\n'.format(intent[0], intent[1]))\n f.write('\\n')\n\n # View incorrect slot sequences\n f.write('SLOT ERRORS\\n')\n i = 0\n for v, pr in zip(y_slot_labels_true, y_slot_labels_pred):\n if v != pr:\n f.write(' '.join(x_text_te[i]) + '\\n')\n f.write(str(v) + '\\n')\n f.write(str(pr) + '\\n')\n f.write('\\n')\n i += 1\n\n conf_levels_generator.generate_conf_reports(FLAGS, y_intent_labels_true, y_intent_labels_pred,\n y_slot_labels_true, y_slot_labels_pred,\n x_text_te, intent_confidence_tuples)\n if FLAGS.use_attention:\n html_report_generator.generateHtmlReport(FLAGS, y_intent_labels_true, y_intent_labels_pred,\n y_slot_labels_true, y_slot_labels_pred,\n x_text_te, total_attention, intent_confidence_tuples)\n\n return f_score, scores['f1']\n\n\ndef test(model, data, FLAGS):\n\n # Testing\n test_data = dict()\n test_data['x_te'] = data['x_te']\n test_data['x_text_te'] = data['x_text_te']\n test_data['y_intents_te'] = data['y_intents_te']\n test_data['y_slots_te'] = data['y_slots_te']\n test_data['sentences_len_te'] = data['sentences_len_te']\n test_data['slots_dict'] = data['slots_dict']\n test_data['intents_dict'] = data['intents_dict']\n test_data['one_hot_intents_te'] = data['encoded_intents_te']\n test_data['one_hot_slots_te'] = data['encoded_slots_te']\n\n tf.reset_default_graph()\n config = tf.ConfigProto()\n with tf.Session(config=config) as sess:\n # Instantiate Model\n capsnet = model(FLAGS)\n if FLAGS.scenario_num != '':\n ckpt_dir = FLAGS.ckpt_dir + 'scenario' + FLAGS.scenario_num + '/'\n else:\n ckpt_dir = FLAGS.ckpt_dir\n if os.path.exists(ckpt_dir):\n print('Restoring Variables from Checkpoint for testing')\n saver = tf.train.Saver()\n saver.restore(sess, tf.train.latest_checkpoint(ckpt_dir))\n intent_f_score, slot_f_score = evaluate_test(capsnet, test_data, FLAGS, sess,\n log_errs=True, translate_eng=False)\n print('Intent F1: %lf' % intent_f_score)\n print('Slot F1: %lf' % slot_f_score)\n return intent_f_score, slot_f_score\n else:\n print('No trained model exists in checkpoint dir!')\n\n\ndef main():\n word2vec_path = '../../romanian_word_vecs/cleaned-vectors-diacritice-cc-100.vec'\n\n training_data_path = '../data-capsnets/diacritics/scenario33/train.txt'\n test_data_path = '../data-capsnets/diacritics/scenario33/test.txt'\n\n FLAGS = flags.define_app_flags('33-vec-fasttext-100')\n\n # Load data\n print('------------------load word2vec begin-------------------')\n w2v = data_loader.load_w2v(word2vec_path)\n print('------------------load word2vec end---------------------')\n\n # When using the new 100-dim word vec model (conll, not fasttext), the data should all be in lowercase\n isLowercase = False\n data = data_loader.read_datasets(w2v, training_data_path, test_data_path, test=True, lowercase=isLowercase)\n flags.set_data_flags(data)\n\n test(model_s2i.SemCapsNet, data, FLAGS)\n\n\nif __name__ == '__main__':\n main()\n"
] |
[
[
"tensorflow.train.Saver",
"matplotlib.pyplot.tight_layout",
"tensorflow.train.latest_checkpoint",
"numpy.arange",
"matplotlib.pyplot.subplots",
"sklearn.metrics.confusion_matrix",
"tensorflow.squeeze",
"tensorflow.ConfigProto",
"matplotlib.pyplot.savefig",
"tensorflow.reset_default_graph",
"numpy.argmax",
"tensorflow.Session",
"numpy.ndarray.tolist",
"sklearn.metrics.f1_score",
"sklearn.metrics.classification_report",
"sklearn.metrics.accuracy_score"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"1.10",
"1.12",
"1.4",
"1.13",
"1.5",
"1.7",
"0.12",
"1.0",
"1.2"
]
}
] |
PACarniglia/Stone-Soup
|
[
"f190be0bb577650db26cb10d4c86b5c790224d4d"
] |
[
"stonesoup/mixturereducer/gaussianmixture.py"
] |
[
"# -*- coding: utf-8 -*-\nfrom scipy.spatial import distance as dist\nimport uuid\n\nfrom ..base import Property\nfrom .base import MixtureReducer\nfrom ..types.state import TaggedWeightedGaussianState, WeightedGaussianState\nfrom operator import attrgetter\n\n\nclass GaussianMixtureReducer(MixtureReducer):\n \"\"\"\n Gaussian Mixture Reducer class:\n\n Reduces the number of components in a Gaussian mixture to increase\n computational efficiency. See [1] for details.\n Achieved in two ways: pruning and merging.\n Pruning is the act of removing low weight components from the mixture\n that fall below a pruning threshold.\n Merging is the act of combining similar components in the mixture\n that fall with a distance threshold into a single component.\n\n References\n ----------\n [1] B.-N. Vo and W.-K. Ma, “The Gaussian Mixture Probability Hypothesis\n Density Filter,” Signal Processing,IEEE Transactions on, vol. 54, no. 11,\n pp. 4091–4104, 2006..\n \"\"\"\n\n prune_threshold: float = Property(default=1e-9, doc=\"Threshold for pruning.\")\n merge_threshold: float = Property(default=16, doc='Threshold for merging')\n merging: bool = Property(default=True, doc='Flag for merging')\n pruning: bool = Property(default=True, doc='Flag for pruning')\n\n def reduce(self, components_list):\n \"\"\"\n Reduce the components of Gaussian Mixture :class:`list`\n through pruning and merging\n\n Parameters\n ----------\n components_list : :class:`~.list`\n The components of Gaussian Mixture\n\n Returns\n -------\n :class:`~.list`\n Reduced components\n\n \"\"\"\n if len(components_list) > 0:\n if self.pruning:\n components_list = self.prune(components_list)\n if len(components_list) > 1 & self.merging:\n components_list = self.merge(components_list)\n return components_list\n\n def prune(self, components_list):\n \"\"\"\n Pruning is the act of removing low weight components from the mixture\n that fall below a pruning threshold :attr:`prune_threshold`.\n\n Parameters\n ----------\n components_list : :class:`~.list`\n The components of Gaussian Mixture to be pruned\n\n Returns\n -------\n remaining_components : :class:`~.GaussianMixtureState`\n Components that remain after pruning\n\n \"\"\"\n # Prune low weight components\n pruned_weight_sum = 0\n for component in components_list:\n if component.weight < self.prune_threshold:\n pruned_weight_sum += component.weight\n\n remaining_components = [component for component in components_list\n if component.weight > self.prune_threshold]\n # Distribute pruned weights across remaining components\n for component in remaining_components:\n component.weight += \\\n pruned_weight_sum / len(remaining_components)\n return remaining_components\n\n def merge_components(self, component_1, component_2):\n \"\"\"\n Merge two similar components\n\n Parameters\n ----------\n component_1 : :class:`~.WeightedGaussianState`\n First component to be merged\n component_2 : :class:`~.WeightedGaussianState`\n Second component to be merged\n\n Returns\n -------\n merged_component : :class:`~.WeightedGaussianState`\n Merged Gaussian component\n\n \"\"\"\n weight_sum = component_1.weight+component_2.weight\n w1 = component_1.weight / weight_sum\n w2 = component_2.weight / weight_sum\n merged_mean = component_1.mean*w1 + component_2.mean*w2\n merged_covar = component_1.covar*w1 + component_2.covar*w2\n mu1_minus_m2 = component_1.mean - component_2.mean\n merged_covar = merged_covar + \\\n mu1_minus_m2*mu1_minus_m2.T*w1*w2\n merged_weight = component_1.weight + component_2.weight\n if merged_weight > 1:\n merged_weight = 1\n if isinstance(component_1, TaggedWeightedGaussianState):\n merged_component = TaggedWeightedGaussianState(\n state_vector=merged_mean,\n covar=merged_covar,\n weight=merged_weight,\n tag=component_1.tag,\n timestamp=component_1.timestamp\n )\n elif isinstance(component_1, WeightedGaussianState):\n merged_component = WeightedGaussianState(\n state_vector=merged_mean,\n covar=merged_covar,\n weight=merged_weight,\n timestamp=component_1.timestamp\n )\n\n return merged_component\n\n def merge(self, components_list):\n \"\"\"\n Merging is the act of combining similar components in the mixture\n that fall with a distance threshold :attr:`merge_threshold` into\n a single component.\n\n Parameters\n ----------\n components_list : :class:`~.list`\n Components of the Gaussian Mixture to be merged\n\n Returns\n -------\n :class:`~.list`\n Merged components\n\n \"\"\"\n # Sort components by weight\n remaining_components = sorted(\n components_list, key=attrgetter('weight'))\n\n merged_components = []\n final_merged_components = []\n while remaining_components:\n # Get highest weighted component\n best_component = remaining_components.pop()\n # Check for similar components\n # (modifying list in loop, so copy used)\n for component in remaining_components.copy():\n # Calculate distance between component and best component\n distance = dist.mahalanobis(\n best_component.mean, component.mean, best_component.covar)\n # Merge if similar\n if distance < self.merge_threshold:\n remaining_components.remove(component)\n best_component = self.merge_components(\n best_component, component\n )\n # Add potentially merged component to new mixture\n merged_components.append(best_component)\n if all(isinstance(component, TaggedWeightedGaussianState)\n for component in merged_components):\n # Check for duplicate tags\n components_tags = set(component.tag for component in merged_components)\n if len(components_tags) != len(merged_components):\n # There are duplicatze tags so assign\n # new tags to the lower weighted shared ones\n for shared_tag in components_tags:\n shared_components = sorted(\n (component for component in merged_components\n if component.tag == shared_tag),\n key=attrgetter('weight'),\n reverse=True)\n final_merged_components.append(shared_components[0])\n for component in shared_components[1:]:\n # Assign a new uuid\n component.tag = str(uuid.uuid4())\n final_merged_components.append(component)\n else:\n # No duplicates\n final_merged_components.extend(merged_components)\n else:\n # Just weighted components (no tags)\n final_merged_components.extend(merged_components)\n # Assign merged components to the mixture\n return final_merged_components\n"
] |
[
[
"scipy.spatial.distance.mahalanobis"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [
"0.13",
"1.6",
"0.14",
"1.10",
"0.15",
"1.4",
"0.16",
"1.9",
"0.19",
"1.5",
"0.18",
"1.2",
"1.7",
"0.12",
"1.0",
"0.17",
"1.3",
"1.8"
],
"tensorflow": []
}
] |
CavitationDetection/XGBoost_ASFE
|
[
"a55bbf2d91b1b30f116bed9eddce62d78bf89b83"
] |
[
"feature_extration.py"
] |
[
"import os \nimport scipy.io as scio\nimport math\nfrom scipy import stats\nimport numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt\nimport math\nimport csv\n'''\n1. Central Trend Statistics:\n --- mean \n --- median\n --- low quartile\n --- upper quartile\n2. Dispersion Degree Statistics:\n --- minimum\n --- maximum\n --- inter quartile range\n --- standard deviation\n --- root mean square\n --- square root amplitude\n3. Distribution Shape Statistics\n --- kurtosis\n --- skewness\n --- shape factor\n --- clearance shape\n --- crest factor\n'''\n\npath = \"/TestSplitFFT\"\nfile_name = os.listdir(path)\nfile_name.sort(key=lambda x:int(x.split('.')[0]))\n\nfeature_list = []\nfor info in file_name:\n domain = os.path.abspath(path)\n info = os.path.join(domain,info)\n data = pd.read_csv(info,header=None)\n\n # central trend statistics\n data_mean = np.mean(data)\n data_median = data.median()\n data_quartile_025 = data.quantile(0.25)\n data_quartile_075 = data.quantile(0.75)\n\n # dispersion degree statistics\n data_Minimum = np.min(data)\n data_Maximum = np.max(data)\n data_quartile = data_quartile_075 - data_quartile_025\n data_std = np.std(data)\n data_rms = np.sqrt((np.mean(data**2)))\n data_sra = (np.sum(np.sqrt(np.abs(data)))/len(data))**2\n\n # distribution shape statistics\n data_kurtosis = data.kurt()\n data_skew = data.skew()\n\n data_avg = np.mean(np.abs(data))\n data_ff = data_rms / data_avg\n\n data_clf = np.max(np.abs(data)) / data_sra\n data_cf = np.max(np.abs(data)) / data_rms\n \n feature_list = [data_mean, data_median, data_quartile_025, data_quartile_075, data_Maximum, data_Minimum, data_quartile, data_std, data_rms, data_sra, data_kurtosis, data_skew, data_ff, data_clf, data_cf]\n feature_list = pd.DataFrame(data=feature_list).T\n feature_list.to_csv(\"./test_features.csv\",sep=',',mode='a',index=False,encoding='utf-8',header=None)\nprint(\"Work Done\")\n \n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n"
] |
[
[
"pandas.read_csv",
"numpy.abs",
"numpy.min",
"pandas.DataFrame",
"numpy.max",
"numpy.std",
"numpy.mean"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [
"2.0",
"1.4",
"1.3",
"1.1",
"1.5",
"1.2"
],
"scipy": [],
"tensorflow": []
}
] |
ZendriXXX/predict-python
|
[
"fe0360b4888980421f8f91f158d6523729bfc5f7"
] |
[
"src/encoding/declare/declare_mining.py"
] |
[
"\"\"\"\nMain file for deviance mining\n\"\"\"\nimport numpy as np\n\nfrom src.encoding.declare.declare_templates import *\n\n\ndef apply_template_to_log(template, candidate, log):\n \"\"\"returns the log with template applied\n\n :param template:\n :param candidate:\n :param log:\n :return:\n \"\"\"\n results = []\n for trace in log:\n result, vacuity = apply_template(template, log[trace], candidate)\n\n results.append(result)\n\n return results\n\n\ndef find_if_satisfied_by_class(constraint_result, transformed_log, labels, support_true, support_false):\n \"\"\"returns two boolean variable show if class is trusted\n\n :param constraint_result:\n :param transformed_log:\n :param labels:\n :param support_true:\n :param support_false:\n :return:\n \"\"\"\n fulfill_true = 0\n fulfill_false = 0\n for i, trace in enumerate(transformed_log):\n ## TODO: Find if it is better to have > 0 or != 0.\n if constraint_result[i] > 0:\n #if constraint_result[i] != 0:\n if labels[trace] == 'false':\n fulfill_false += 1\n else:\n fulfill_true += 1\n\n true_pass = fulfill_true >= support_true\n false_pass = fulfill_false >= support_false\n\n return true_pass, false_pass\n\n\ndef generate_train_candidate_constraints(candidates, templates, transformed_log, labels, constraint_support_true,\n constraint_support_false, filter_t=True):\n \"\"\"returns the train-candidate's constraints\n\n :param candidates:\n :param templates:\n :param transformed_log:\n :param labels:\n :param constraint_support_true:\n :param constraint_support_false:\n :param filter_t:\n :return:\n \"\"\"\n all_results = {}\n for template in templates:\n print(\"Started working on {}\".format(template))\n for candidate in candidates:\n if len(candidate) == template_sizes[template]:\n candidate_name = template + \":\" + str(candidate)\n constraint_result = apply_template_to_log(template, candidate, transformed_log)\n satis_true, satis_false = find_if_satisfied_by_class(constraint_result, transformed_log, labels,\n constraint_support_true,\n constraint_support_false)\n\n if not filter_t or (satis_true or satis_false):\n all_results[candidate_name] = constraint_result\n\n return all_results\n\n\ndef transform_results_to_numpy(results, labels, transformed_log, cols):\n \"\"\"\n Transforms results structure into numpy arrays\n :param results:\n :param labels:\n :param transformed_log:\n :param cols:\n :return:\n \"\"\"\n labels = [labels[trace] for trace in transformed_log]\n trace_names = [trace for trace in transformed_log]\n matrix = []\n featurenames = []\n\n if cols is None:\n for feature, result in results.items():\n matrix.append(result)\n featurenames.append(feature)\n else:\n for c in cols:\n if c not in ['trace_id', 'label']:\n if c in results:\n matrix.append(results[c])\n else:\n matrix.append([0 for _ in range(len(transformed_log))])\n featurenames.append(c)\n\n nparray_data = np.array(matrix).T\n nparray_labels = np.array(labels)\n nparray_names = np.array(trace_names)\n return nparray_data, nparray_labels, featurenames, nparray_names\n\n\ndef filter_candidates_by_support(candidates, transformed_log, labels, support_true, support_false): #TODO JONAS, no idea what this does\n \"\"\"returns candidates filtered using given support_true and support_false\n\n :param candidates:\n :param transformed_log:\n :param labels:\n :param support_true:\n :param support_false:\n :return:\n \"\"\"\n filtered_candidates = []\n for candidate in candidates:\n count_false = 0\n count_true = 0\n for trace in transformed_log:\n ev_ct = 0\n for event in candidate:\n if event in [event for event in transformed_log[trace]]:\n ev_ct += 1\n else:\n break\n if ev_ct == len(candidate): # all candidate events in trace\n if labels[trace] == 'false':\n count_false += 1\n else:\n count_true += 1\n\n if count_false >= support_false or count_true >= support_true:\n filtered_candidates.append(candidate)\n break\n\n return filtered_candidates\n\n"
] |
[
[
"numpy.array"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
tzuliu/Contrastive-Multiple-Correspondence-Analysis-cMCA
|
[
"a59a5c36dd5d4ac04205627827e792322742462d",
"a59a5c36dd5d4ac04205627827e792322742462d"
] |
[
"Replication Python and R Codes/Figure_4/cMCA_UTAS2012_LDPDPJ_new2.py",
"Replication Python and R Codes/Figure_10/cMCA_UTAS2012_LDPDPJ_loading_2.py"
] |
[
"import pandas as pd\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport prince\nfrom sklearn.cluster import DBSCAN\nimport itertools\nfrom cmca import CMCA\nfrom ccmca import CCMCA\nplt.style.use('ggplot')\n\nalpha = r'$ \\alpha $'\n\n## Set up color\ntableau10 = {\n 'blue': '#507AA6',\n 'orange': '#F08E39',\n 'red': '#DF585C',\n 'teal': '#78B7B2',\n 'green': '#5BA053',\n 'yellow': '#ECC854',\n 'purple': '#AF7BA1',\n 'pink': '#FD9EA9',\n 'brown': '#9A7460',\n 'gray': '#BAB0AC',\n 0: '#507AA6',\n 1: '#F08E39',\n 2: '#DF585C',\n 3: '#78B7B2',\n 4: '#5BA053',\n 5: '#ECC854',\n 6: '#AF7BA1',\n 7: '#FD9EA9',\n 8: '#9A7460',\n 9: '#BAB0AC',\n -1: '#BAB0AC',\n 'LDP': '#507AA6',\n 'DPJ': '#F08E39',\n \"JRP\": '#DF585C',\n}\n\n## Recode NA by data type\ndef fillna_based_on_dtype(df):\n for key in dict(df.dtypes).keys():\n if df.dtypes[key] == np.object:\n df[key] = df[key].fillna('na')\n else:\n df[key] = df[key].fillna(99)\n\n\n## Extract data by parties\ndef csv_to_mats(csv, rtype=\"v\", jrp=False):\n df = pd.read_csv(csv)\n if rtype == \"v\":\n df = df[df.cv != \"candidate\"]\n else:\n df = df[df.cv != \"voter\"]\n\n X = df.iloc[:,np.r_[3,7:12,14:df.shape[1]]]\n\n if jrp:\n X_ldp = X[X[\"psup_short\"] == \"LDP\"]\n X_dpj = X[X[\"psup_short\"] == \"DPJ\"]\n X_jrp = X[X[\"psup_short\"] == \"JRP\"]\n\n print(\"missing value ratio (LDP)\", X_ldp.isna().sum().sum() / (X_ldp.shape[0] * X_ldp.shape[1]))\n print(\"missing value ratio (DPJ)\", X_dpj.isna().sum().sum() / (X_dpj.shape[0] * X_dpj.shape[1]))\n print(\"missing value ratio (JRP)\", X_jrp.isna().sum().sum() / (X_dpj.shape[0] * X_dpj.shape[1]))\n\n fillna_based_on_dtype(X_ldp)\n fillna_based_on_dtype(X_dpj)\n fillna_based_on_dtype(X_jrp)\n else:\n X_ldp = X[X[\"psup_short\"] == \"LDP\"]\n X_dpj = X[X[\"psup_short\"] == \"DPJ\"]\n\n print(\"missing value ratio (LDP)\", X_ldp.isna().sum().sum() / (X_ldp.shape[0] * X_ldp.shape[1]))\n print(\"missing value ratio (DPJ)\", X_dpj.isna().sum().sum() / (X_dpj.shape[0] * X_dpj.shape[1]))\n\n fillna_based_on_dtype(X_ldp)\n fillna_based_on_dtype(X_dpj)\n\n\n if jrp:\n return (X_ldp, X_dpj, X_jrp)\n else:\n return (X_ldp, X_dpj)\n\n## Load data\nX_ldp, X_dpj, X_jrp = csv_to_mats('./utas12_ooc.csv', rtype=\"v\", jrp=True)\nX_ldp['policy00'] = X_ldp['policy00'].replace([0,3,4,5,6,7,8,9,10], [1,2,3,3,3,4,4,5,5])\nX_dpj['policy00'] = X_dpj['policy00'].replace([0,3,4,5,6,7,8,9,10], [1,2,3,3,3,4,4,5,5])\nX_jrp['policy00'] = X_jrp['policy00'].replace([0,3,4,5,6,7,8,9,10], [1,2,3,3,3,4,4,5,5])\nX = pd.concat([X_ldp, X_dpj, X_jrp])\nprint(X_ldp.shape, X_dpj.shape, X_jrp.shape, X.shape)\n\n##Disctionay for Level and Party\nparty = {\"LDP\":\"LDP\", \"DPJ\":\"DPJ\", \"JRP\":\"JRP\"}\n\n##Fitting cMCA and export plots\ncmca = CMCA(n_components=2, copy=True, check_input=True)\ncmca = cmca.fit(fg=X_ldp.iloc[:,6:X.shape[1]], bg=X_dpj.iloc[:,6:X.shape[1]], alpha=1.5)\nanti_asia = list()\nfor u,v in zip(X_ldp['policy56'],X_ldp['policy54']):\n if u==5 or v==5:\n anti_asia.append(4)\n else:\n anti_asia.append(6)\n\nY_fg = np.array(cmca.transform(X_ldp.iloc[:,6:X.shape[1]]))\nY_bg = np.array(cmca.transform(X_dpj.iloc[:,6:X.shape[1]]))\n\nX_ldp[\"anti_asia\"] = anti_asia\n\nf = plt.figure()\nplt.xlim([-1.5, 2])\nplt.ylim([-1.5, 1.5])\nfor label in X_ldp['anti_asia'].unique():\n plt.scatter(Y_fg[X_ldp['anti_asia'] == label, 0], Y_fg[X_ldp['anti_asia'] == label, 1], c=tableau10[label], label=label, alpha=0.6, linewidths=0)\n#plt.scatter(Y_bg[:, 0], Y_bg[:, 1], c=tableau10[X_dpj[\"psup_short\"].iloc[0]], label=party[X_dpj[\"psup_short\"].iloc[0]], alpha=0.8, linewidths=0)\nhandles, labels = plt.gca().get_legend_handles_labels()\nhandles = [handles[1],handles[0]]#,handles[2]]\nlabels = [\"LDP_Anti\", \"LDP_Oth\"]#, \"DPJ\"]\nplt.legend(handles, labels, loc=\"lower right\", shadow=False, scatterpoints=1, fontsize=8)\nplt.xlabel('cPC1')\nplt.ylabel('cPC2')\nplt.title(\"cMCA (tg: LDP, bg: DPJ, \" + str(alpha) + \": 1.5)\")\nplt.show()\nf.savefig(\"cMCA_UTAS2012_ldpdpj_new2.pdf\", bbox_inches='tight')\n",
"import pandas as pd\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport prince\nfrom sklearn.cluster import DBSCAN\nimport itertools\nfrom cmca import CMCA\nfrom ccmca import CCMCA\nplt.style.use('ggplot')\n\nalpha = r'$ \\alpha $'\n\n## Set up color\ntableau10 = {\n 'orange': '#F08E39',\n 'blue': '#507AA6',\n 'red': '#DF585C',\n 'teal': '#78B7B2',\n 'green': '#5BA053',\n 'yellow': '#ECC854',\n 'purple': '#AF7BA1',\n 'pink': '#FD9EA9',\n 'brown': '#9A7460',\n 'gray': '#BAB0AC',\n 0: '#F08E39',\n 1: '#507AA6',\n 2: '#DF585C',\n 3: '#78B7B2',\n 4: '#5BA053',\n 5: '#ECC854',\n 6: '#AF7BA1',\n 7: '#FD9EA9',\n 8: '#9A7460',\n 9: '#800000',\n -1: '#BAB0AC',\n 'LDP': '#507AA6',\n 'DPJ': '#F08E39',\n \"JRP\": '#DF585C',\n 'DEM': '#507AA6',\n 'REP': '#F08E39',\n}\n\n## Recode NA by data type\ndef fillna_based_on_dtype(df):\n for key in dict(df.dtypes).keys():\n if df.dtypes[key] == np.object:\n df[key] = df[key].fillna('na')\n else:\n df[key] = df[key].fillna(99)\n\n\n## Extract data by parties\ndef csv_to_mats(csv, rtype=\"v\", jrp=False):\n df = pd.read_csv(csv)\n if rtype == \"v\":\n df = df[df.cv != \"candidate\"]\n else:\n df = df[df.cv != \"voter\"]\n\n X = df.iloc[:,np.r_[3,7:12,14:df.shape[1]]]\n\n if jrp:\n X_ldp = X[X[\"psup_short\"] == \"LDP\"]\n X_dpj = X[X[\"psup_short\"] == \"DPJ\"]\n X_jrp = X[X[\"psup_short\"] == \"JRP\"]\n\n print(\"missing value ratio (LDP)\", X_ldp.isna().sum().sum() / (X_ldp.shape[0] * X_ldp.shape[1]))\n print(\"missing value ratio (DPJ)\", X_dpj.isna().sum().sum() / (X_dpj.shape[0] * X_dpj.shape[1]))\n print(\"missing value ratio (JRP)\", X_jrp.isna().sum().sum() / (X_dpj.shape[0] * X_dpj.shape[1]))\n\n fillna_based_on_dtype(X_ldp)\n fillna_based_on_dtype(X_dpj)\n fillna_based_on_dtype(X_jrp)\n else:\n X_ldp = X[X[\"psup_short\"] == \"LDP\"]\n X_dpj = X[X[\"psup_short\"] == \"DPJ\"]\n\n print(\"missing value ratio (LDP)\", X_ldp.isna().sum().sum() / (X_ldp.shape[0] * X_ldp.shape[1]))\n print(\"missing value ratio (DPJ)\", X_dpj.isna().sum().sum() / (X_dpj.shape[0] * X_dpj.shape[1]))\n\n fillna_based_on_dtype(X_ldp)\n fillna_based_on_dtype(X_dpj)\n\n\n if jrp:\n return (X_ldp, X_dpj, X_jrp)\n else:\n return (X_ldp, X_dpj)\n\n## Load data\nX_ldp, X_dpj, X_jrp = csv_to_mats('./utas12_ooc.csv', rtype=\"v\", jrp=True)\nX_ldp['policy00'] = X_ldp['policy00'].replace([0,3,4,5,6,7,8,9,10], [1,2,3,3,3,4,4,5,5])\nX_dpj['policy00'] = X_dpj['policy00'].replace([0,3,4,5,6,7,8,9,10], [1,2,3,3,3,4,4,5,5])\nX_jrp['policy00'] = X_jrp['policy00'].replace([0,3,4,5,6,7,8,9,10], [1,2,3,3,3,4,4,5,5])\nX = pd.concat([X_ldp, X_dpj, X_jrp])\nprint(X_ldp.shape, X_dpj.shape, X_jrp.shape, X.shape)\n\n##Disctionay for Level and Party\nparty = {\"LDP\":\"LDP\", \"DPJ\":\"DPJ\", \"JRP\":\"JRP\"}\n\n##Fitting cMCA and export plots\ncmca = CMCA(n_components=2, copy=True, check_input=True)\ncmca = cmca.fit(fg=X_ldp.iloc[:,6:X.shape[1]], bg=X_dpj.iloc[:,6:X.shape[1]], alpha=1.5)\nY_fg = np.array(cmca.transform(X_ldp.iloc[:,6:X.shape[1]]))\nY_bg = np.array(cmca.transform(X_dpj.iloc[:,6:X.shape[1]]))\nY_fg_col = np.array(cmca.transform(X_ldp.iloc[:,6:(X.shape[1])], axis='col'))\nprefix_to_info = cmca.gen_prefix_to_info()\n\nused_others_label = False\nf = plt.figure()\nfor key in prefix_to_info.keys():\n indices = prefix_to_info[key]['indices']\n rank_1 = prefix_to_info[key]['loading_ranks_norm_1']\n rank_1 = rank_1 if rank_1 < 9 else -1\n texts = [int(float(postfix)) for postfix in prefix_to_info[key]['postfixes']]\n label = key if rank_1 >= 0 else 'others'\n if label == 'others':\n if used_others_label:\n label = None\n else:\n used_others_label = True\n\n plt.scatter(cmca.loadings[indices, 0], cmca.loadings[indices, 1], c=tableau10[rank_1], label=label)\n for i, txt in enumerate(texts):\n plt.annotate(txt, (cmca.loadings[indices[i], 0], cmca.loadings[indices[i], 1]), fontsize=8)\nplt.title('Loadings (cPC2)')\nxpad = (cmca.loadings[:, 0].max() - cmca.loadings[:, 0].min()) * 0.1\nypad = (cmca.loadings[:, 1].max() - cmca.loadings[:, 1].min()) * 0.1\nplt.xlim([cmca.loadings[:, 0].min() - xpad, cmca.loadings[:, 0].max() + xpad])\nplt.ylim([cmca.loadings[:, 1].min() - ypad, cmca.loadings[:, 1].max() + ypad])\nplt.tight_layout()\nhandles, labels = plt.gca().get_legend_handles_labels()\nhandles = [handles[9], handles[8], handles[7], handles[5], handles[6], handles[2], handles[4], handles[3], handles[1], handles[0]]\nlabels = [labels[9], labels[8], labels[7], labels[5], labels[6], labels[2], labels[4], labels[3], labels[1], labels[0]]\nplt.xlabel('cPC1')\nplt.ylabel('cPC2')\nplt.legend(handles, labels, loc='best', shadow=False, scatterpoints=1, fontsize=8)\nplt.show()\nf.savefig(\"cMCA_UTAS2012_ldpdpj_loading_2.pdf\", bbox_inches='tight')\n"
] |
[
[
"matplotlib.pyplot.legend",
"pandas.concat",
"pandas.read_csv",
"matplotlib.pyplot.gca",
"matplotlib.pyplot.scatter",
"matplotlib.pyplot.ylim",
"matplotlib.pyplot.xlim",
"matplotlib.pyplot.ylabel",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.show",
"matplotlib.pyplot.style.use",
"matplotlib.pyplot.figure"
],
[
"matplotlib.pyplot.legend",
"pandas.concat",
"matplotlib.pyplot.tight_layout",
"pandas.read_csv",
"matplotlib.pyplot.gca",
"matplotlib.pyplot.title",
"matplotlib.pyplot.scatter",
"matplotlib.pyplot.annotate",
"matplotlib.pyplot.ylabel",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.show",
"matplotlib.pyplot.style.use",
"matplotlib.pyplot.figure"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [
"2.0",
"1.4",
"1.3",
"1.1",
"1.5",
"1.2"
],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [
"2.0",
"1.4",
"1.3",
"1.1",
"1.5",
"1.2"
],
"scipy": [],
"tensorflow": []
}
] |
jaesik817/tf-gqn
|
[
"9ff2142f8817ee571dbc3fc99284311c44f4e48d"
] |
[
"tests/test_gqn_objective.py"
] |
[
"\"\"\"\nQuick test script to shape-check graph definition of full GQN model with ELBO\nobjective with random toy data.\n\"\"\"\n\nimport os\nimport sys\nSCRIPT_PATH = os.path.dirname(os.path.abspath(__file__))\nTF_GQN_HOME = os.path.abspath(os.path.join(SCRIPT_PATH, '..'))\nsys.path.append(TF_GQN_HOME)\n\nimport tensorflow as tf\nimport numpy as np\n\nfrom gqn.gqn_params import PARAMS\nfrom gqn.gqn_graph import gqn_draw\nfrom gqn.gqn_objective import gqn_draw_elbo\n\n# constants\n_BATCH_SIZE = 1\n_CONTEXT_SIZE = PARAMS.CONTEXT_SIZE\n_DIM_POSE = PARAMS.POSE_CHANNELS\n_DIM_H_IMG = PARAMS.IMG_HEIGHT\n_DIM_W_IMG = PARAMS.IMG_WIDTH\n_DIM_C_IMG = PARAMS.IMG_CHANNELS\n_SEQ_LENGTH = PARAMS.SEQ_LENGTH\n\n# input placeholders\nquery_pose = tf.placeholder(\n shape=[_BATCH_SIZE, _DIM_POSE], dtype=tf.float32)\ntarget_frame = tf.placeholder(\n shape=[_BATCH_SIZE, _DIM_H_IMG, _DIM_W_IMG, _DIM_C_IMG],\n dtype=tf.float32)\ncontext_poses = tf.placeholder(\n shape=[_BATCH_SIZE, _CONTEXT_SIZE, _DIM_POSE],\n dtype=tf.float32)\ncontext_frames = tf.placeholder(\n shape=[_BATCH_SIZE, _CONTEXT_SIZE, _DIM_H_IMG, _DIM_W_IMG, _DIM_C_IMG],\n dtype=tf.float32)\n\n# graph definition\nnet, ep_gqn = gqn_draw(\n query_pose=query_pose,\n target_frame=target_frame,\n context_poses=context_poses,\n context_frames=context_frames,\n model_params=PARAMS,\n is_training=True\n)\n\n# loss definition\nmu_target = net\nsigma_target = tf.constant( # additional parameter tuned during training\n value=1.0, dtype=tf.float32,\n shape=[_BATCH_SIZE, _DIM_H_IMG, _DIM_W_IMG, _DIM_C_IMG])\nmu_q, sigma_q, mu_pi, sigma_pi = [], [], [], []\n# collecting endpoints for ELBO computation\nfor i in range(_SEQ_LENGTH):\n mu_q.append(ep_gqn[\"mu_q_%d\" % i])\n sigma_q.append(ep_gqn[\"sigma_q_%d\" % i])\n mu_pi.append(ep_gqn[\"mu_pi_%d\" % i])\n sigma_pi.append(ep_gqn[\"sigma_pi_%d\" % i])\nelbo, ep_elbo = gqn_draw_elbo(\n mu_target, sigma_target,\n mu_q, sigma_q,\n mu_pi, sigma_pi,\n target_frame)\n\n# feed random input through the graph\nwith tf.Session() as sess:\n sess.run(tf.global_variables_initializer())\n loss = sess.run(\n elbo,\n feed_dict={\n query_pose : np.random.rand(_BATCH_SIZE, _DIM_POSE),\n target_frame : np.random.rand(_BATCH_SIZE, _DIM_H_IMG, _DIM_W_IMG, _DIM_C_IMG),\n context_poses : np.random.rand(_BATCH_SIZE, _CONTEXT_SIZE, _DIM_POSE),\n context_frames : np.random.rand(_BATCH_SIZE, _CONTEXT_SIZE, _DIM_H_IMG, _DIM_W_IMG, _DIM_C_IMG),\n })\n print(loss)\n print(loss.shape)\n for ep, t in ep_gqn.items():\n print(ep, t)\n\nprint(\"TEST PASSED!\")\n"
] |
[
[
"tensorflow.constant",
"tensorflow.placeholder",
"tensorflow.global_variables_initializer",
"numpy.random.rand",
"tensorflow.Session"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"1.10",
"1.12",
"1.4",
"1.5",
"1.7",
"0.12",
"1.0",
"1.2"
]
}
] |
JoanRosell/arviz
|
[
"0fde5058d5a09469b784972c2197f032eb27238a"
] |
[
"arviz/plots/backends/__init__.py"
] |
[
"# pylint: disable=no-member,invalid-name,redefined-outer-name\n\"\"\"ArviZ plotting backends.\"\"\"\nimport re\n\nimport numpy as np\nfrom pandas import DataFrame\n\nfrom ...rcparams import rcParams\n\n\ndef to_cds(\n data,\n var_names=None,\n groups=None,\n dimensions=None,\n group_info=True,\n var_name_format=None,\n index_origin=None,\n):\n \"\"\"Transform data to ColumnDataSource (CDS) compatible with Bokeh.\n\n Uses `_ARVIZ_GROUP_` and `_ARVIZ_CDS_SELECTION_`to separate var_name\n from group and dimensions in CDS columns.\n\n Parameters\n ----------\n data : obj\n Any object that can be converted to an az.InferenceData object\n Refer to documentation of az.convert_to_inference_data for details\n var_names : str or list of str, optional\n Variables to be processed, if None all variables are processed.\n groups : str or list of str, optional\n Select groups for CDS. Default groups are {\"posterior_groups\", \"prior_groups\",\n \"posterior_groups_warmup\"}\n - posterior_groups: posterior, posterior_predictive, sample_stats\n - prior_groups: prior, prior_predictive, sample_stats_prior\n - posterior_groups_warmup: warmup_posterior, warmup_posterior_predictive,\n warmup_sample_stats\n ignore_groups : str or list of str, optional\n Ignore specific groups from CDS.\n dimension : str, or list of str, optional\n Select dimensions along to slice the data. By default uses (\"chain\", \"draw\").\n group_info : bool\n Add group info for `var_name_format`\n var_name_format : str or tuple of tuple of string, optional\n Select column name format for non-scalar input.\n Predefined options are {\"brackets\", \"underscore\", \"cds\"}\n \"brackets\":\n - add_group_info == False: theta[0,0]\n - add_group_info == True: theta_posterior[0,0]\n \"underscore\":\n - add_group_info == False: theta_0_0\n - add_group_info == True: theta_posterior_0_0_\n \"cds\":\n - add_group_info == False: theta_ARVIZ_CDS_SELECTION_0_0\n - add_group_info == True: theta_ARVIZ_GROUP_posterior__ARVIZ_CDS_SELECTION_0_0\n tuple:\n Structure:\n tuple: (dim_info, group_info)\n dim_info: (str: `.join` separator,\n str: dim_separator_start,\n str: dim_separator_end)\n group_info: (str: group separator start, str: group separator end)\n Example: ((\",\", \"[\", \"]\"), (\"_\", \"\"))\n - add_group_info == False: theta[0,0]\n - add_group_info == True: theta_posterior[0,0]\n index_origin : int, optional\n Start parameter indices from `index_origin`. Either 0 or 1.\n\n Returns\n -------\n bokeh.models.ColumnDataSource object\n \"\"\"\n from ...utils import flatten_inference_data_to_dict\n\n if var_name_format is None:\n var_name_format = \"cds\"\n\n cds_dict = flatten_inference_data_to_dict(\n data=data,\n var_names=var_names,\n groups=groups,\n dimensions=dimensions,\n group_info=group_info,\n index_origin=index_origin,\n var_name_format=var_name_format,\n )\n cds_data = ColumnDataSource(DataFrame.from_dict(cds_dict, orient=\"columns\"))\n return cds_data\n\n\ndef output_notebook(*args, **kwargs):\n \"\"\"Wrap bokeh.plotting.output_notebook.\"\"\"\n import bokeh.plotting as bkp\n\n return bkp.output_notebook(*args, **kwargs)\n\n\ndef output_file(*args, **kwargs):\n \"\"\"Wrap bokeh.plotting.output_file.\"\"\"\n import bokeh.plotting as bkp\n\n return bkp.output_file(*args, **kwargs)\n\n\ndef ColumnDataSource(*args, **kwargs):\n \"\"\"Wrap bokeh.models.ColumnDataSource.\"\"\"\n from bokeh.models import ColumnDataSource\n\n return ColumnDataSource(*args, **kwargs)\n\n\ndef create_layout(ax, force_layout=False):\n \"\"\"Transform bokeh array of figures to layout.\"\"\"\n ax = np.atleast_2d(ax)\n subplot_order = rcParams[\"plot.bokeh.layout.order\"]\n if force_layout:\n from bokeh.layouts import gridplot as layout\n\n ax = ax.tolist()\n layout_args = {\n \"sizing_mode\": rcParams[\"plot.bokeh.layout.sizing_mode\"],\n \"toolbar_location\": rcParams[\"plot.bokeh.layout.toolbar_location\"],\n }\n elif any(item in subplot_order for item in (\"row\", \"column\")):\n # check number of rows\n match = re.match(r\"(\\d*)(row|column)\", subplot_order)\n n = int(match.group(1)) if match.group(1) is not None else 1\n subplot_order = match.group(2)\n # set up 1D list of axes\n ax = [item for item in ax.ravel().tolist() if item is not None]\n layout_args = {\"sizing_mode\": rcParams[\"plot.bokeh.layout.sizing_mode\"]}\n if subplot_order == \"row\" and n == 1:\n from bokeh.layouts import row as layout\n elif subplot_order == \"column\" and n == 1:\n from bokeh.layouts import column as layout\n else:\n from bokeh.layouts import layout\n\n if n != 1:\n ax = np.array(ax + [None for _ in range(int(np.ceil(len(ax) / n)) - len(ax))])\n if subplot_order == \"row\":\n ax = ax.reshape(n, -1)\n else:\n ax = ax.reshape(-1, n)\n ax = ax.tolist()\n else:\n if subplot_order in (\"square\", \"square_trimmed\"):\n ax = [item for item in ax.ravel().tolist() if item is not None]\n n = int(np.ceil(len(ax) ** 0.5))\n ax = ax + [None for _ in range(n ** 2 - len(ax))]\n ax = np.array(ax).reshape(n, n)\n ax = ax.tolist()\n if (subplot_order == \"square_trimmed\") and any(\n all(item is None for item in row) for row in ax\n ):\n from bokeh.layouts import layout\n\n ax = [row for row in ax if not all(item is None for item in row)]\n layout_args = {\"sizing_mode\": rcParams[\"plot.bokeh.layout.sizing_mode\"]}\n else:\n from bokeh.layouts import gridplot as layout\n\n layout_args = {\n \"sizing_mode\": rcParams[\"plot.bokeh.layout.sizing_mode\"],\n \"toolbar_location\": rcParams[\"plot.bokeh.layout.toolbar_location\"],\n }\n # ignore \"fixed\" sizing_mode without explicit width and height\n if layout_args.get(\"sizing_mode\", \"\") == \"fixed\":\n layout_args.pop(\"sizing_mode\")\n return layout(ax, **layout_args)\n\n\ndef show_layout(ax, show=True, force_layout=False):\n \"\"\"Create a layout and call bokeh show.\"\"\"\n if show is None:\n show = rcParams[\"plot.bokeh.show\"]\n if show:\n import bokeh.plotting as bkp\n\n layout = create_layout(ax, force_layout=force_layout)\n bkp.show(layout)\n\n\ndef _copy_docstring(lib, function):\n \"\"\"Extract docstring from function.\"\"\"\n import importlib\n\n try:\n module = importlib.import_module(lib)\n func = getattr(module, function)\n doc = func.__doc__\n except ImportError:\n doc = \"Failed to import function {} from {}\".format(function, lib)\n\n if not isinstance(doc, str):\n doc = \"\"\n return doc\n\n\noutput_notebook.__doc__ += \"\\n\\n\" + _copy_docstring(\"bokeh.plotting\", \"output_notebook\")\noutput_file.__doc__ += \"\\n\\n\" + _copy_docstring(\"bokeh.plotting\", \"output_file\")\nColumnDataSource.__doc__ += \"\\n\\n\" + _copy_docstring(\"bokeh.models\", \"ColumnDataSource\")\n"
] |
[
[
"numpy.atleast_2d",
"numpy.array",
"pandas.DataFrame.from_dict"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [
"0.23",
"0.21",
"2.0",
"1.4",
"0.19",
"1.1",
"1.5",
"1.2",
"0.24",
"0.20",
"1.0",
"0.25",
"1.3"
],
"scipy": [],
"tensorflow": []
}
] |
quancs/pytorch-lightning
|
[
"c6478414ee2521d8768afba7dc6699326c056e17"
] |
[
"pytorch_lightning/overrides/distributed.py"
] |
[
"# Copyright The PyTorch Lightning team.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\nimport itertools\nfrom typing import Any, cast, Iterator, List, Optional, Sized, Union\n\nimport torch\nfrom torch import Tensor\nfrom torch.nn.parallel import DistributedDataParallel\nfrom torch.utils.data import BatchSampler, DistributedSampler, Sampler\n\nimport pytorch_lightning as pl\nfrom pytorch_lightning.overrides.base import _LightningModuleWrapperBase\n\n\nclass LightningDistributedModule(_LightningModuleWrapperBase):\n def __init__(self, pl_module: \"pl.LightningModule\") -> None:\n \"\"\"Wraps the user's LightningModule and redirects the forward call to the appropriate method, either\n ``training_step``, ``validation_step``, ``test_step`` or ``predict``. This class is used in combination\n with :class:`~torch.nn.parallel.DistributedDataParallel` as shown in the example.\n\n Example:\n\n ddp_model = torch.nn.parallel.DistributedDataParallel(\n module=LightningDistributedModule(lightning_module),\n device_ids=[local_rank],\n ...\n )\n\n Args:\n pl_module: the model to wrap\n \"\"\"\n super().__init__(pl_module)\n\n\ndef _find_tensors(\n obj: Union[Tensor, list, tuple, dict, Any]\n) -> Union[List[Tensor], itertools.chain]: # pragma: no-cover\n \"\"\"Recursively find all tensors contained in the specified object.\"\"\"\n if isinstance(obj, Tensor):\n return [obj]\n if isinstance(obj, (list, tuple)):\n return itertools.chain(*map(_find_tensors, obj))\n if isinstance(obj, dict):\n return itertools.chain(*map(_find_tensors, obj.values()))\n return []\n\n\n# In manual_optimization, we need to call reducer prepare_for_backward.\n# Note: Keep track of Pytorch DDP and update if there is a change\n# https://github.com/pytorch/pytorch/blob/v1.7.1/torch/nn/parallel/distributed.py#L626-L638\ndef prepare_for_backward(model: DistributedDataParallel, output: Any) -> None:\n # `prepare_for_backward` is `DistributedDataParallel` specific.\n if not isinstance(model, DistributedDataParallel):\n return\n if torch.is_grad_enabled() and model.require_backward_grad_sync:\n model.require_forward_param_sync = True # type: ignore[assignment]\n # We'll return the output object verbatim since it is a freeform\n # object. We need to find any tensors in this object, though,\n # because we need to figure out which parameters were used during\n # this forward pass, to ensure we short circuit reduction for any\n # unused parameters. Only if `find_unused_parameters` is set.\n args = list(_find_tensors(output)) if model.find_unused_parameters else []\n reducer = cast(torch._C._distributed_c10d.Reducer, model.reducer)\n reducer.prepare_for_backward(args)\n else:\n model.require_forward_param_sync = False # type: ignore[assignment]\n\n\nclass UnrepeatedDistributedSampler(DistributedSampler):\n \"\"\"A fork of the PyTorch DistributedSampler that doesn't repeat data, instead allowing the number of batches\n per process to be off-by-one from each other. This makes this sampler usable for predictions (it's\n deterministic and doesn't require shuffling). It is potentially unsafe to use this sampler for training,\n because during training the DistributedDataParallel syncs buffers on each forward pass, so it could freeze if\n one of the processes runs one fewer batch. During prediction, buffers are only synced on the first batch, so\n this is safe to use as long as each process runs at least one batch. We verify this in an assert.\n\n Taken from https://github.com/jpuigcerver/PyLaia/blob/v1.0.0/laia/data/unpadded_distributed_sampler.py\n and https://github.com/pytorch/pytorch/issues/25162#issuecomment-634146002\n \"\"\"\n\n def __init__(self, *args: Any, **kwargs: Any) -> None:\n super().__init__(*args, **kwargs)\n if not isinstance(self.dataset, Sized):\n raise TypeError(\"The given dataset must implement the `__len__` method.\")\n self.num_samples = len(range(self.rank, len(self.dataset), self.num_replicas))\n self.total_size = len(self.dataset)\n # If any process has at least one batch, every other process needs to\n # have at least one batch, or the DistributedDataParallel could lock up.\n assert self.num_samples >= 1 or self.total_size == 0\n\n def __iter__(self) -> Iterator[List[int]]:\n if not isinstance(self.dataset, Sized):\n raise TypeError(\"The given dataset must implement the `__len__` method.\")\n if self.shuffle:\n # deterministically shuffle based on epoch\n g = torch.Generator()\n g.manual_seed(self.epoch)\n indices = torch.randperm(len(self.dataset), generator=g).tolist()\n else:\n indices = list(range(len(self.dataset)))\n\n assert len(indices) == self.total_size\n\n # subsample\n indices = indices[self.rank : self.total_size : self.num_replicas]\n assert len(indices) == self.num_samples\n\n return iter(indices)\n\n\nclass IndexBatchSamplerWrapper:\n \"\"\"This class is used to wrap a :class:`torch.utils.data.BatchSampler` and capture its indices.\"\"\"\n\n def __init__(self, sampler: BatchSampler) -> None:\n self._sampler = sampler\n self.batch_indices: Optional[List[int]] = None\n\n def __iter__(self) -> Iterator[List[int]]:\n for batch in self._sampler:\n self.batch_indices = batch\n yield batch\n\n def __len__(self) -> int:\n return len(self._sampler)\n\n @property\n def drop_last(self) -> bool:\n return self._sampler.drop_last\n\n @property\n def batch_size(self) -> int:\n return self._sampler.batch_size\n\n @property\n def sampler(self) -> Sampler:\n return self._sampler.sampler\n"
] |
[
[
"torch.Generator",
"torch.is_grad_enabled"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
ManoelRIos/sorting-algorithm
|
[
"eea85e2e3a084ac0f3ac4f872adfee3440b5f3f3"
] |
[
"bar.py"
] |
[
"import matplotlib.pyplot as plt\nimport pandas as pd\nimport numpy as np\n\nalist = [1,2,3,4,5,6,7,8,9,10]\nxlist = [1,2,3,4,5,6,7,8,9,10]\nplt.bar(alist, xlist)\nplt.show()"
] |
[
[
"matplotlib.pyplot.show",
"matplotlib.pyplot.bar"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
MiWeiss/probability
|
[
"3da47c7c26a83ccb0734461836d127f938586abb",
"3da47c7c26a83ccb0734461836d127f938586abb",
"3da47c7c26a83ccb0734461836d127f938586abb"
] |
[
"tensorflow_probability/python/experimental/vi/surrogate_posteriors.py",
"tensorflow_probability/python/sts/structural_time_series.py",
"tensorflow_probability/python/sts/forecast_test.py"
] |
[
"# Copyright 2019 The TensorFlow Probability Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ============================================================================\n\"\"\"Utilities for constructing surrogate posteriors.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\n# [internal] enable type annotations\nfrom __future__ import print_function\n\nimport functools\n\nimport tensorflow.compat.v2 as tf\nfrom tensorflow_probability.python import util as tfp_util\nfrom tensorflow_probability.python.bijectors import chain\nfrom tensorflow_probability.python.bijectors import identity\nfrom tensorflow_probability.python.bijectors import invert\nfrom tensorflow_probability.python.bijectors import joint_map\nfrom tensorflow_probability.python.bijectors import reshape\nfrom tensorflow_probability.python.bijectors import restructure\nfrom tensorflow_probability.python.bijectors import scale_matvec_linear_operator\nfrom tensorflow_probability.python.bijectors import shift\nfrom tensorflow_probability.python.bijectors import softplus\nfrom tensorflow_probability.python.bijectors import split\nfrom tensorflow_probability.python.distributions import batch_broadcast\nfrom tensorflow_probability.python.distributions import independent\nfrom tensorflow_probability.python.distributions import joint_distribution_util\nfrom tensorflow_probability.python.distributions import normal\nfrom tensorflow_probability.python.distributions import sample\nfrom tensorflow_probability.python.distributions import transformed_distribution\nfrom tensorflow_probability.python.experimental.vi.util import trainable_linear_operators\nfrom tensorflow_probability.python.internal import dtype_util\nfrom tensorflow_probability.python.internal import prefer_static as ps\nfrom tensorflow_probability.python.internal import samplers\n\nfrom tensorflow.python.util import deprecation # pylint: disable=g-direct-tensorflow-import\nfrom tensorflow.python.util import nest # pylint: disable=g-direct-tensorflow-import\n\n\ndef build_trainable_location_scale_distribution(initial_loc,\n initial_scale,\n event_ndims,\n distribution_fn=normal.Normal,\n validate_args=False,\n name=None):\n \"\"\"Builds a variational distribution from a location-scale family.\n\n Args:\n initial_loc: Float `Tensor` initial location.\n initial_scale: Float `Tensor` initial scale.\n event_ndims: Integer `Tensor` number of event dimensions in `initial_loc`.\n distribution_fn: Optional constructor for a `tfd.Distribution` instance\n in a location-scale family. This should have signature `dist =\n distribution_fn(loc, scale, validate_args)`.\n Default value: `tfd.Normal`.\n validate_args: Python `bool`. Whether to validate input with asserts. This\n imposes a runtime cost. If `validate_args` is `False`, and the inputs are\n invalid, correct behavior is not guaranteed.\n Default value: `False`.\n name: Python `str` name prefixed to ops created by this function.\n Default value: `None` (i.e.,\n 'build_trainable_location_scale_distribution').\n\n Returns:\n posterior_dist: A `tfd.Distribution` instance.\n \"\"\"\n with tf.name_scope(name or 'build_trainable_location_scale_distribution'):\n dtype = dtype_util.common_dtype([initial_loc, initial_scale],\n dtype_hint=tf.float32)\n initial_loc = initial_loc * tf.ones(tf.shape(initial_scale), dtype=dtype)\n initial_scale = initial_scale * tf.ones_like(initial_loc)\n\n loc = tf.Variable(initial_value=initial_loc, name='loc')\n scale = tfp_util.TransformedVariable(\n initial_scale, softplus.Softplus(), name='scale')\n posterior_dist = distribution_fn(loc=loc, scale=scale,\n validate_args=validate_args)\n\n # Ensure the distribution has the desired number of event dimensions.\n static_event_ndims = tf.get_static_value(event_ndims)\n if static_event_ndims is None or static_event_ndims > 0:\n posterior_dist = independent.Independent(\n posterior_dist,\n reinterpreted_batch_ndims=event_ndims,\n validate_args=validate_args)\n\n return posterior_dist\n\n\ndef _get_event_shape_shallow_structure(event_shape):\n \"\"\"Gets shallow structure, treating lists of ints at the leaves as atomic.\"\"\"\n def _not_list_of_ints(s):\n if isinstance(s, list) or isinstance(s, tuple):\n return not all(isinstance(x, int) for x in s)\n return True\n\n return nest.get_traverse_shallow_structure(_not_list_of_ints, event_shape)\n\n\n# Default constructors for `build_factored_surrogate_posterior`.\n_sample_uniform_initial_loc = functools.partial(\n samplers.uniform, minval=-2., maxval=2., dtype=tf.float32)\n_build_trainable_normal_dist = functools.partial(\n build_trainable_location_scale_distribution,\n distribution_fn=normal.Normal)\n\n\[email protected](\n '2021-07-01',\n '`build_factored_surrogate_posterior` is deprecated. Use '\n '`build_affine_surrogate_posterior` with `operators=\"diag\"` instead.')\[email protected]_args(\n '2021-03-15',\n '`constraining_bijectors` is deprecated, use `bijector` instead',\n 'constraining_bijectors')\ndef build_factored_surrogate_posterior(\n event_shape=None,\n bijector=None,\n constraining_bijectors=None,\n initial_unconstrained_loc=_sample_uniform_initial_loc,\n initial_unconstrained_scale=1e-2,\n trainable_distribution_fn=_build_trainable_normal_dist,\n seed=None,\n validate_args=False,\n name=None):\n \"\"\"Builds a joint variational posterior that factors over model variables.\n\n By default, this method creates an independent trainable Normal distribution\n for each variable, transformed using a bijector (if provided) to\n match the support of that variable. This makes extremely strong\n assumptions about the posterior: that it is approximately normal (or\n transformed normal), and that all model variables are independent.\n\n Args:\n event_shape: `Tensor` shape, or nested structure of `Tensor` shapes,\n specifying the event shape(s) of the posterior variables.\n bijector: Optional `tfb.Bijector` instance, or nested structure of such\n instances, defining support(s) of the posterior variables. The structure\n must match that of `event_shape` and may contain `None` values. A\n posterior variable will be modeled as\n `tfd.TransformedDistribution(underlying_dist, bijector)` if a\n corresponding constraining bijector is specified, otherwise it is modeled\n as supported on the unconstrained real line.\n constraining_bijectors: Deprecated alias for `bijector`.\n initial_unconstrained_loc: Optional Python `callable` with signature\n `tensor = initial_unconstrained_loc(shape, seed)` used to sample\n real-valued initializations for the unconstrained representation of each\n variable. May alternately be a nested structure of\n `Tensor`s, giving specific initial locations for each variable; these\n must have structure matching `event_shape` and shapes determined by the\n inverse image of `event_shape` under `bijector`, which may optionally be\n prefixed with a common batch shape.\n Default value: `functools.partial(tf.random.stateless_uniform,\n minval=-2., maxval=2., dtype=tf.float32)`.\n initial_unconstrained_scale: Optional scalar float `Tensor` initial\n scale for the unconstrained distributions, or a nested structure of\n `Tensor` initial scales for each variable.\n Default value: `1e-2`.\n trainable_distribution_fn: Optional Python `callable` with signature\n `trainable_dist = trainable_distribution_fn(initial_loc, initial_scale,\n event_ndims, validate_args)`. This is called for each model variable to\n build the corresponding factor in the surrogate posterior. It is expected\n that the distribution returned is supported on unconstrained real values.\n Default value: `functools.partial(\n tfp.experimental.vi.build_trainable_location_scale_distribution,\n distribution_fn=tfd.Normal)`, i.e., a trainable Normal distribution.\n seed: PRNG seed; see `tfp.random.sanitize_seed` for details. This is used\n only when `initial_loc` is not specified.\n validate_args: Python `bool`. Whether to validate input with asserts. This\n imposes a runtime cost. If `validate_args` is `False`, and the inputs are\n invalid, correct behavior is not guaranteed.\n Default value: `False`.\n name: Python `str` name prefixed to ops created by this function.\n Default value: `None` (i.e., 'build_factored_surrogate_posterior').\n\n Returns:\n surrogate_posterior: A `tfd.Distribution` instance whose samples have\n shape and structure matching that of `event_shape` or `initial_loc`.\n\n ### Examples\n\n Consider a Gamma model with unknown parameters, expressed as a joint\n Distribution:\n\n ```python\n Root = tfd.JointDistributionCoroutine.Root\n def model_fn():\n concentration = yield Root(tfd.Exponential(1.))\n rate = yield Root(tfd.Exponential(1.))\n y = yield tfd.Sample(tfd.Gamma(concentration=concentration, rate=rate),\n sample_shape=4)\n model = tfd.JointDistributionCoroutine(model_fn)\n ```\n\n Let's use variational inference to approximate the posterior over the\n data-generating parameters for some observed `y`. We'll build a\n surrogate posterior distribution by specifying the shapes of the latent\n `rate` and `concentration` parameters, and that both are constrained to\n be positive.\n\n ```python\n surrogate_posterior = tfp.experimental.vi.build_factored_surrogate_posterior(\n event_shape=model.event_shape_tensor()[:-1], # Omit the observed `y`.\n bijector=[tfb.Softplus(), # Rate is positive.\n tfb.Softplus()]) # Concentration is positive.\n ```\n\n This creates a trainable joint distribution, defined by variables in\n `surrogate_posterior.trainable_variables`. We use `fit_surrogate_posterior`\n to fit this distribution by minimizing a divergence to the true posterior.\n\n ```python\n y = [0.2, 0.5, 0.3, 0.7]\n losses = tfp.vi.fit_surrogate_posterior(\n lambda rate, concentration: model.log_prob([rate, concentration, y]),\n surrogate_posterior=surrogate_posterior,\n num_steps=100,\n optimizer=tf.optimizers.Adam(0.1),\n sample_size=10)\n\n # After optimization, samples from the surrogate will approximate\n # samples from the true posterior.\n samples = surrogate_posterior.sample(100)\n posterior_mean = [tf.reduce_mean(x) for x in samples] # mean ~= [1.1, 2.1]\n posterior_std = [tf.math.reduce_std(x) for x in samples] # std ~= [0.3, 0.8]\n ```\n\n If we wanted to initialize the optimization at a specific location, we can\n specify one when we build the surrogate posterior. This function requires the\n initial location to be specified in *unconstrained* space; we do this by\n inverting the constraining bijectors (note this section also demonstrates the\n creation of a dict-structured model).\n\n ```python\n initial_loc = {'concentration': 0.4, 'rate': 0.2}\n bijector={'concentration': tfb.Softplus(), # Rate is positive.\n 'rate': tfb.Softplus()} # Concentration is positive.\n initial_unconstrained_loc = tf.nest.map_fn(\n lambda b, x: b.inverse(x) if b is not None else x, bijector, initial_loc)\n surrogate_posterior = tfp.experimental.vi.build_factored_surrogate_posterior(\n event_shape=tf.nest.map_fn(tf.shape, initial_loc),\n bijector=bijector,\n initial_unconstrained_loc=initial_unconstrained_state,\n initial_unconstrained_scale=1e-4)\n ```\n\n \"\"\"\n\n with tf.name_scope(name or 'build_factored_surrogate_posterior'):\n bijector = deprecation.deprecated_argument_lookup(\n 'bijector', bijector, 'constraining_bijectors', constraining_bijectors)\n\n seed = tfp_util.SeedStream(seed, salt='build_factored_surrogate_posterior')\n\n # Convert event shapes to Tensors.\n shallow_structure = _get_event_shape_shallow_structure(event_shape)\n event_shape = nest.map_structure_up_to(\n shallow_structure, lambda s: tf.convert_to_tensor(s, dtype=tf.int32),\n event_shape)\n\n if nest.is_nested(bijector):\n bijector = nest.map_structure(\n lambda b: identity.Identity() if b is None else b,\n bijector)\n\n # Support mismatched nested structures for backwards compatibility (e.g.\n # non-nested `event_shape` and a single-element list of `bijector`s).\n bijector = nest.pack_sequence_as(event_shape, nest.flatten(bijector))\n\n event_space_bijector = joint_map.JointMap(\n bijector, validate_args=validate_args)\n else:\n event_space_bijector = bijector\n\n if event_space_bijector is None:\n unconstrained_event_shape = event_shape\n else:\n unconstrained_event_shape = (\n event_space_bijector.inverse_event_shape_tensor(event_shape))\n\n # Construct initial locations for the internal unconstrained dists.\n if callable(initial_unconstrained_loc): # Sample random initialization.\n initial_unconstrained_loc = nest.map_structure(\n lambda s: initial_unconstrained_loc(shape=s, seed=seed()),\n unconstrained_event_shape)\n\n if not nest.is_nested(initial_unconstrained_scale):\n initial_unconstrained_scale = nest.map_structure(\n lambda _: initial_unconstrained_scale,\n unconstrained_event_shape)\n\n # Extract the rank of each event, so that we build distributions with the\n # correct event shapes.\n unconstrained_event_ndims = nest.map_structure(\n ps.rank_from_shape,\n unconstrained_event_shape)\n\n # Build the component surrogate posteriors.\n unconstrained_distributions = nest.map_structure_up_to(\n unconstrained_event_shape,\n lambda loc, scale, ndims: trainable_distribution_fn( # pylint: disable=g-long-lambda\n loc, scale, ndims, validate_args=validate_args),\n initial_unconstrained_loc,\n initial_unconstrained_scale,\n unconstrained_event_ndims)\n\n base_distribution = (\n joint_distribution_util.independent_joint_distribution_from_structure(\n unconstrained_distributions, validate_args=validate_args))\n if event_space_bijector is None:\n return base_distribution\n return transformed_distribution.TransformedDistribution(\n base_distribution, event_space_bijector)\n\n\ndef build_affine_surrogate_posterior(\n event_shape,\n operators='diag',\n bijector=None,\n base_distribution=normal.Normal,\n dtype=tf.float32,\n batch_shape=(),\n seed=None,\n validate_args=False,\n name=None):\n \"\"\"Builds a joint variational posterior with a given `event_shape`.\n\n This function builds a surrogate posterior by applying a trainable\n transformation to a standard base distribution and constraining the samples\n with `bijector`. The surrogate posterior has event shape equal to\n the input `event_shape`.\n\n This function is a convenience wrapper around\n `build_affine_surrogate_posterior_from_base_distribution` that allows the\n user to pass in the desired posterior `event_shape` instead of\n pre-constructed base distributions (at the expense of full control over the\n base distribution types and parameterizations).\n\n Args:\n event_shape: (Nested) event shape of the posterior.\n operators: Either a string or a list/tuple containing `LinearOperator`\n subclasses, `LinearOperator` instances, or callables returning\n `LinearOperator` instances. Supported string values are \"diag\" (to create\n a mean-field surrogate posterior) and \"tril\" (to create a full-covariance\n surrogate posterior). A list/tuple may be passed to induce other\n posterior covariance structures. If the list is flat, a\n `tf.linalg.LinearOperatorBlockDiag` instance will be created and applied\n to the base distribution. Otherwise the list must be singly-nested and\n have a first element of length 1, second element of length 2, etc.; the\n elements of the outer list are interpreted as rows of a lower-triangular\n block structure, and a `tf.linalg.LinearOperatorBlockLowerTriangular`\n instance is created. For complete documentation and examples, see\n `tfp.experimental.vi.util.build_trainable_linear_operator_block`, which\n receives the `operators` arg if it is list-like.\n Default value: `\"diag\"`.\n bijector: `tfb.Bijector` instance, or nested structure of `tfb.Bijector`\n instances, that maps (nested) values in R^n to the support of the\n posterior. (This can be the `experimental_default_event_space_bijector` of\n the distribution over the prior latent variables.)\n Default value: `None` (i.e., the posterior is over R^n).\n base_distribution: A `tfd.Distribution` subclass parameterized by `loc` and\n `scale`. The base distribution of the transformed surrogate has `loc=0.`\n and `scale=1.`.\n Default value: `tfd.Normal`.\n dtype: The `dtype` of the surrogate posterior.\n Default value: `tf.float32`.\n batch_shape: Batch shape (Python tuple, list, or int) of the surrogate\n posterior, to enable parallel optimization from multiple initializations.\n Default value: `()`.\n seed: Python integer to seed the random number generator for initial values.\n Default value: `None`.\n validate_args: Python `bool`. Whether to validate input with asserts. This\n imposes a runtime cost. If `validate_args` is `False`, and the inputs are\n invalid, correct behavior is not guaranteed.\n Default value: `False`.\n name: Python `str` name prefixed to ops created by this function.\n Default value: `None` (i.e., 'build_affine_surrogate_posterior').\n\n Returns:\n surrogate_distribution: Trainable `tfd.Distribution` with event shape equal\n to `event_shape`.\n\n #### Examples\n\n ```python\n tfd = tfp.distributions\n tfb = tfp.bijectors\n\n # Define a joint probabilistic model.\n Root = tfd.JointDistributionCoroutine.Root\n def model_fn():\n concentration = yield Root(tfd.Exponential(1.))\n rate = yield Root(tfd.Exponential(1.))\n y = yield tfd.Sample(\n tfd.Gamma(concentration=concentration, rate=rate),\n sample_shape=4)\n model = tfd.JointDistributionCoroutine(model_fn)\n\n # Assume the `y` are observed, such that the posterior is a joint distribution\n # over `concentration` and `rate`. The posterior event shape is then equal to\n # the first two components of the model's event shape.\n posterior_event_shape = model.event_shape_tensor()[:-1]\n\n # Constrain the posterior values to be positive using the `Exp` bijector.\n bijector = [tfb.Exp(), tfb.Exp()]\n\n # Build a full-covariance surrogate posterior.\n surrogate_posterior = (\n tfp.experimental.vi.build_affine_surrogate_posterior(\n event_shape=posterior_event_shape,\n operators='tril',\n bijector=bijector))\n\n # For an example defining `'operators'` as a list to express an alternative\n # covariance structure, see\n # `build_affine_surrogate_posterior_from_base_distribution`.\n\n # Fit the model.\n y = [0.2, 0.5, 0.3, 0.7]\n target_model = model.experimental_pin(y=y)\n losses = tfp.vi.fit_surrogate_posterior(\n target_model.unnormalized_log_prob,\n surrogate_posterior,\n num_steps=100,\n optimizer=tf.optimizers.Adam(0.1),\n sample_size=10)\n ```\n \"\"\"\n with tf.name_scope(name or 'build_affine_surrogate_posterior'):\n\n event_shape = nest.map_structure_up_to(\n _get_event_shape_shallow_structure(event_shape),\n lambda s: tf.convert_to_tensor(s, dtype=tf.int32),\n event_shape)\n\n if nest.is_nested(bijector):\n bijector = joint_map.JointMap(\n nest.map_structure(\n lambda b: identity.Identity() if b is None else b,\n bijector), validate_args=validate_args)\n\n if bijector is None:\n unconstrained_event_shape = event_shape\n else:\n unconstrained_event_shape = (\n bijector.inverse_event_shape_tensor(event_shape))\n\n standard_base_distribution = nest.map_structure(\n lambda s: sample.Sample( # pylint: disable=g-long-lambda\n base_distribution(loc=tf.zeros([], dtype=dtype), scale=1.),\n sample_shape=s, validate_args=validate_args),\n unconstrained_event_shape)\n if batch_shape:\n standard_base_distribution = nest.map_structure(\n lambda d: batch_broadcast.BatchBroadcast( # pylint: disable=g-long-lambda\n d, to_shape=batch_shape, validate_args=validate_args),\n standard_base_distribution)\n\n return build_affine_surrogate_posterior_from_base_distribution(\n standard_base_distribution,\n operators=operators,\n bijector=bijector,\n seed=seed,\n validate_args=validate_args)\n\n\ndef build_affine_surrogate_posterior_from_base_distribution(\n base_distribution,\n operators='diag',\n bijector=None,\n initial_unconstrained_loc_fn=_sample_uniform_initial_loc,\n seed=None,\n validate_args=False,\n name=None):\n \"\"\"Builds a variational posterior by linearly transforming base distributions.\n\n This function builds a surrogate posterior by applying a trainable\n transformation to a base distribution (typically a `tfd.JointDistribution`) or\n nested structure of base distributions, and constraining the samples with\n `bijector`. Note that the distributions must have event shapes corresponding\n to the *pretransformed* surrogate posterior -- that is, if `bijector` contains\n a shape-changing bijector, then the corresponding base distribution event\n shape is the inverse event shape of the bijector applied to the desired\n surrogate posterior shape. The surrogate posterior is constucted as follows:\n\n 1. Flatten the base distribution event shapes to vectors, and pack the base\n distributions into a `tfd.JointDistribution`.\n 2. Apply a trainable blockwise LinearOperator bijector to the joint base\n distribution.\n 3. Apply the constraining bijectors and return the resulting trainable\n `tfd.TransformedDistribution` instance.\n\n Args:\n base_distribution: `tfd.Distribution` instance (typically a\n `tfd.JointDistribution`), or a nested structure of `tfd.Distribution`\n instances.\n operators: Either a string or a list/tuple containing `LinearOperator`\n subclasses, `LinearOperator` instances, or callables returning\n `LinearOperator` instances. Supported string values are \"diag\" (to create\n a mean-field surrogate posterior) and \"tril\" (to create a full-covariance\n surrogate posterior). A list/tuple may be passed to induce other\n posterior covariance structures. If the list is flat, a\n `tf.linalg.LinearOperatorBlockDiag` instance will be created and applied\n to the base distribution. Otherwise the list must be singly-nested and\n have a first element of length 1, second element of length 2, etc.; the\n elements of the outer list are interpreted as rows of a lower-triangular\n block structure, and a `tf.linalg.LinearOperatorBlockLowerTriangular`\n instance is created. For complete documentation and examples, see\n `tfp.experimental.vi.util.build_trainable_linear_operator_block`, which\n receives the `operators` arg if it is list-like.\n Default value: `\"diag\"`.\n bijector: `tfb.Bijector` instance, or nested structure of `tfb.Bijector`\n instances, that maps (nested) values in R^n to the support of the\n posterior. (This can be the `experimental_default_event_space_bijector` of\n the distribution over the prior latent variables.)\n Default value: `None` (i.e., the posterior is over R^n).\n initial_unconstrained_loc_fn: Optional Python `callable` with signature\n `initial_loc = initial_unconstrained_loc_fn(shape, dtype, seed)` used to\n sample real-valued initializations for the unconstrained location of\n each variable.\n Default value: `functools.partial(tf.random.stateless_uniform,\n minval=-2., maxval=2., dtype=tf.float32)`.\n seed: Python integer to seed the random number generator for initial values.\n Default value: `None`.\n validate_args: Python `bool`. Whether to validate input with asserts. This\n imposes a runtime cost. If `validate_args` is `False`, and the inputs are\n invalid, correct behavior is not guaranteed.\n Default value: `False`.\n name: Python `str` name prefixed to ops created by this function.\n Default value: `None` (i.e.,\n 'build_affine_surrogate_posterior_from_base_distribution').\n\n Returns:\n surrogate_distribution: Trainable `tfd.JointDistribution` instance.\n Raises:\n NotImplementedError: Base distributions with mixed dtypes are not supported.\n\n #### Examples\n ```python\n tfd = tfp.distributions\n tfb = tfp.bijectors\n\n # Fit a multivariate Normal surrogate posterior on the Eight Schools model\n # [1].\n\n treatment_effects = [28., 8., -3., 7., -1., 1., 18., 12.]\n treatment_stddevs = [15., 10., 16., 11., 9., 11., 10., 18.]\n\n def model_fn():\n avg_effect = yield tfd.Normal(loc=0., scale=10., name='avg_effect')\n log_stddev = yield tfd.Normal(loc=5., scale=1., name='log_stddev')\n school_effects = yield tfd.Sample(\n tfd.Normal(loc=avg_effect, scale=tf.exp(log_stddev)),\n sample_shape=[8],\n name='school_effects')\n treatment_effects = yield tfd.Independent(\n tfd.Normal(loc=school_effects, scale=treatment_stddevs),\n reinterpreted_batch_ndims=1,\n name='treatment_effects')\n model = tfd.JointDistributionCoroutineAutoBatched(model_fn)\n\n # Pin the observed values in the model.\n target_model = model.experimental_pin(treatment_effects=treatment_effects)\n\n # Define a lower triangular structure of `LinearOperator` subclasses that\n # models full covariance among latent variables except for the 8 dimensions\n # of `school_effect`, which are modeled as independent (using\n # `LinearOperatorDiag`).\n operators = [\n [tf.linalg.LinearOperatorLowerTriangular],\n [tf.linalg.LinearOperatorFullMatrix, LinearOperatorLowerTriangular],\n [tf.linalg.LinearOperatorFullMatrix, LinearOperatorFullMatrix,\n tf.linalg.LinearOperatorDiag]]\n\n\n # Constrain the posterior values to the support of the prior.\n bijector = target_model.experimental_default_event_space_bijector()\n\n # Build a full-covariance surrogate posterior.\n surrogate_posterior = (\n tfp.experimental.vi.build_affine_surrogate_posterior_from_base_distribution(\n base_distribution=base_distribution,\n operators=operators,\n bijector=bijector))\n\n # Fit the model.\n losses = tfp.vi.fit_surrogate_posterior(\n target_model.unnormalized_log_prob,\n surrogate_posterior,\n num_steps=100,\n optimizer=tf.optimizers.Adam(0.1),\n sample_size=10)\n ```\n\n #### References\n\n [1] Andrew Gelman, John Carlin, Hal Stern, David Dunson, Aki Vehtari, and\n Donald Rubin. Bayesian Data Analysis, Third Edition.\n Chapman and Hall/CRC, 2013.\n\n \"\"\"\n with tf.name_scope(\n name or 'build_affine_surrogate_posterior_from_base_distribution'):\n\n if nest.is_nested(base_distribution):\n base_distribution = (\n joint_distribution_util.independent_joint_distribution_from_structure(\n base_distribution, validate_args=validate_args))\n\n if nest.is_nested(bijector):\n bijector = joint_map.JointMap(\n nest.map_structure(\n lambda b: identity.Identity() if b is None else b, bijector),\n validate_args=validate_args)\n\n batch_shape = base_distribution.batch_shape_tensor()\n if tf.nest.is_nested(batch_shape): # Base is a classic JointDistribution.\n batch_shape = functools.reduce(ps.broadcast_shape,\n tf.nest.flatten(batch_shape))\n event_shape = base_distribution.event_shape_tensor()\n flat_event_size = nest.flatten(\n nest.map_structure(ps.reduce_prod, event_shape))\n\n base_dtypes = set(nest.flatten(base_distribution.dtype))\n if len(base_dtypes) > 1:\n raise NotImplementedError(\n 'Base distributions with mixed dtype are not supported. Saw '\n 'components of dtype {}'.format(base_dtypes))\n base_dtype = list(base_dtypes)[0]\n\n num_components = len(flat_event_size)\n if operators == 'diag':\n operators = [tf.linalg.LinearOperatorDiag] * num_components\n elif operators == 'tril':\n operators = [\n [tf.linalg.LinearOperatorFullMatrix] * i\n + [tf.linalg.LinearOperatorLowerTriangular]\n for i in range(num_components)]\n elif isinstance(operators, str):\n raise ValueError(\n 'Unrecognized operator type {}. Valid operators are \"diag\", \"tril\", '\n 'or a structure that can be passed to '\n '`tfp.experimental.vi.util.build_trainable_linear_operator_block` as '\n 'the `operators` arg.'.format(operators))\n\n if nest.is_nested(operators):\n seed, operators_seed = samplers.split_seed(seed)\n operators = (\n trainable_linear_operators.build_trainable_linear_operator_block(\n operators,\n block_dims=flat_event_size,\n dtype=base_dtype,\n batch_shape=batch_shape,\n seed=operators_seed))\n\n linop_bijector = (\n scale_matvec_linear_operator.ScaleMatvecLinearOperatorBlock(\n scale=operators, validate_args=validate_args))\n loc_bijector = joint_map.JointMap(\n tf.nest.map_structure(\n lambda s, seed: shift.Shift( # pylint: disable=g-long-lambda\n tf.Variable(\n initial_unconstrained_loc_fn(\n ps.concat([batch_shape, [s]], axis=0),\n dtype=base_dtype,\n seed=seed))),\n flat_event_size,\n samplers.split_seed(seed, n=len(flat_event_size))),\n validate_args=validate_args)\n\n unflatten_and_reshape = chain.Chain(\n [joint_map.JointMap(\n nest.map_structure(reshape.Reshape, event_shape),\n validate_args=validate_args),\n restructure.Restructure(\n nest.pack_sequence_as(event_shape, range(num_components)))],\n validate_args=validate_args)\n\n bijectors = [] if bijector is None else [bijector]\n bijectors.extend(\n [unflatten_and_reshape,\n loc_bijector, # Allow the mean of the standard dist to shift from 0.\n linop_bijector]) # Apply LinOp to scale the standard dist.\n bijector = chain.Chain(bijectors, validate_args=validate_args)\n\n flat_base_distribution = invert.Invert(\n unflatten_and_reshape)(base_distribution)\n\n return transformed_distribution.TransformedDistribution(\n flat_base_distribution, bijector=bijector, validate_args=validate_args)\n\n\ndef build_split_flow_surrogate_posterior(\n event_shape,\n trainable_bijector,\n constraining_bijector=None,\n base_distribution=normal.Normal,\n batch_shape=(),\n dtype=tf.float32,\n validate_args=False,\n name=None):\n \"\"\"Builds a joint variational posterior by splitting a normalizing flow.\n\n Args:\n event_shape: (Nested) event shape of the surrogate posterior.\n trainable_bijector: A trainable `tfb.Bijector` instance that operates on\n `Tensor`s (not structures), e.g. `tfb.MaskedAutoregressiveFlow` or\n `tfb.RealNVP`. This bijector transforms the base distribution before it is\n split.\n constraining_bijector: `tfb.Bijector` instance, or nested structure of\n `tfb.Bijector` instances, that maps (nested) values in R^n to the support\n of the posterior. (This can be the\n `experimental_default_event_space_bijector` of the distribution over the\n prior latent variables.)\n Default value: `None` (i.e., the posterior is over R^n).\n base_distribution: A `tfd.Distribution` subclass parameterized by `loc` and\n `scale`. The base distribution for the transformed surrogate has `loc=0.`\n and `scale=1.`.\n Default value: `tfd.Normal`.\n batch_shape: The `batch_shape` of the output distribution.\n Default value: `()`.\n dtype: The `dtype` of the surrogate posterior.\n Default value: `tf.float32`.\n validate_args: Python `bool`. Whether to validate input with asserts. This\n imposes a runtime cost. If `validate_args` is `False`, and the inputs are\n invalid, correct behavior is not guaranteed.\n Default value: `False`.\n name: Python `str` name prefixed to ops created by this function.\n Default value: `None` (i.e., 'build_split_flow_surrogate_posterior').\n\n Returns:\n surrogate_distribution: Trainable `tfd.TransformedDistribution` with event\n shape equal to `event_shape`.\n\n ### Examples\n ```python\n\n # Train a normalizing flow on the Eight Schools model [1].\n\n treatment_effects = [28., 8., -3., 7., -1., 1., 18., 12.]\n treatment_stddevs = [15., 10., 16., 11., 9., 11., 10., 18.]\n model = tfd.JointDistributionNamed({\n 'avg_effect':\n tfd.Normal(loc=0., scale=10., name='avg_effect'),\n 'log_stddev':\n tfd.Normal(loc=5., scale=1., name='log_stddev'),\n 'school_effects':\n lambda log_stddev, avg_effect: (\n tfd.Independent(\n tfd.Normal(\n loc=avg_effect[..., None] * tf.ones(8),\n scale=tf.exp(log_stddev[..., None]) * tf.ones(8),\n name='school_effects'),\n reinterpreted_batch_ndims=1)),\n 'treatment_effects': lambda school_effects: tfd.Independent(\n tfd.Normal(loc=school_effects, scale=treatment_stddevs),\n reinterpreted_batch_ndims=1)\n })\n\n # Pin the observed values in the model.\n target_model = model.experimental_pin(treatment_effects=treatment_effects)\n\n # Create a Masked Autoregressive Flow bijector.\n net = tfb.AutoregressiveNetwork(2, hidden_units=[16, 16], dtype=tf.float32)\n maf = tfb.MaskedAutoregressiveFlow(shift_and_log_scale_fn=net)\n\n # Build and fit the surrogate posterior.\n surrogate_posterior = (\n tfp.experimental.vi.build_split_flow_surrogate_posterior(\n event_shape=target_model.event_shape_tensor(),\n trainable_bijector=maf,\n constraining_bijector=(\n target_model.experimental_default_event_space_bijector())))\n\n losses = tfp.vi.fit_surrogate_posterior(\n target_model.unnormalized_log_prob,\n surrogate_posterior,\n num_steps=100,\n optimizer=tf.optimizers.Adam(0.1),\n sample_size=10)\n ```\n\n #### References\n\n [1] Andrew Gelman, John Carlin, Hal Stern, David Dunson, Aki Vehtari, and\n Donald Rubin. Bayesian Data Analysis, Third Edition.\n Chapman and Hall/CRC, 2013.\n\n \"\"\"\n with tf.name_scope(name or 'build_split_flow_surrogate_posterior'):\n\n shallow_structure = _get_event_shape_shallow_structure(event_shape)\n event_shape = nest.map_structure_up_to(\n shallow_structure, ps.convert_to_shape_tensor, event_shape)\n\n if nest.is_nested(constraining_bijector):\n constraining_bijector = joint_map.JointMap(\n nest.map_structure(\n lambda b: identity.Identity() if b is None else b,\n constraining_bijector), validate_args=validate_args)\n\n if constraining_bijector is None:\n unconstrained_event_shape = event_shape\n else:\n unconstrained_event_shape = (\n constraining_bijector.inverse_event_shape_tensor(event_shape))\n\n flat_base_event_shape = nest.flatten(unconstrained_event_shape)\n flat_base_event_size = nest.map_structure(\n tf.reduce_prod, flat_base_event_shape)\n event_size = tf.reduce_sum(flat_base_event_size)\n\n base_distribution = sample.Sample(\n base_distribution(tf.zeros(batch_shape, dtype=dtype), scale=1.),\n [event_size])\n\n # After transforming base distribution samples with `trainable_bijector`,\n # split them into vector-valued components.\n split_bijector = split.Split(\n flat_base_event_size, validate_args=validate_args)\n\n # Reshape the vectors to the correct posterior event shape.\n event_reshape = joint_map.JointMap(\n nest.map_structure(reshape.Reshape, unconstrained_event_shape),\n validate_args=validate_args)\n\n # Restructure the flat list of components to the correct posterior\n # structure.\n event_unflatten = restructure.Restructure(\n nest.pack_sequence_as(\n unconstrained_event_shape, range(len(flat_base_event_shape))))\n\n bijectors = [] if constraining_bijector is None else [constraining_bijector]\n bijectors.extend(\n [event_reshape, event_unflatten, split_bijector, trainable_bijector])\n bijector = chain.Chain(bijectors, validate_args=validate_args)\n\n return transformed_distribution.TransformedDistribution(\n base_distribution, bijector=bijector, validate_args=validate_args)\n",
"# Copyright 2018 The TensorFlow Probability Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ============================================================================\n\"\"\"Structural Time Series base class.\"\"\"\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport collections\n\n# Dependency imports\nimport tensorflow.compat.v2 as tf\n\nfrom tensorflow_probability.python import util as tfp_util\nfrom tensorflow_probability.python.internal import distribution_util\nfrom tensorflow_probability.python.sts.internal import util as sts_util\n\ntfl = tf.linalg\n\nParameter = collections.namedtuple('Parameter', ['name', 'prior', 'bijector'])\n\n\nclass StructuralTimeSeries(object):\n \"\"\"Base class for structural time series models.\n\n A StructuralTimeSeries object represents a declarative specification of a\n structural time series model, including priors on model parameters.\n It implements a joint probability model\n `p(params, y) = p(params) p(y | params)`,\n where `params` denotes a list of real-valued parameters specified by the child\n class, and `p(y | params)` is a linear Gaussian state space model with\n structure determined by the child class.\n \"\"\"\n\n def __init__(self, parameters, latent_size, name='StructuralTimeSeries'):\n \"\"\"Construct a specification for a structural time series model.\n\n Args:\n parameters: list of Parameter namedtuples, each specifying the name\n and prior distribution of a model parameter along with a\n bijective transformation from an unconstrained space to the support\n of that parameter. The order of this list determines the canonical\n parameter ordering used by fitting and inference algorithms.\n latent_size: Python `int` specifying the dimensionality of the latent\n state space for this model.\n name: Python `str` name for this model component.\n \"\"\"\n\n self._parameters = parameters\n self._latent_size = latent_size\n self._name = name\n\n @property\n def parameters(self):\n \"\"\"List of Parameter(name, prior, bijector) namedtuples for this model.\"\"\"\n return self._parameters\n\n @property\n def latent_size(self):\n \"\"\"Python `int` dimensionality of the latent space in this model.\"\"\"\n return self._latent_size\n\n @property\n def name(self):\n \"\"\"Name of this model component.\"\"\"\n return self._name\n\n @property\n def batch_shape(self):\n \"\"\"Static batch shape of models represented by this component.\n\n Returns:\n batch_shape: A `tf.TensorShape` giving the broadcast batch shape of\n all model parameters. This should match the batch shape of\n derived state space models, i.e.,\n `self.make_state_space_model(...).batch_shape`. It may be partially\n defined or unknown.\n \"\"\"\n batch_shape = tf.TensorShape([])\n for param in self.parameters:\n batch_shape = tf.broadcast_static_shape(\n batch_shape, param.prior.batch_shape)\n return batch_shape\n\n def batch_shape_tensor(self):\n \"\"\"Runtime batch shape of models represented by this component.\n\n Returns:\n batch_shape: `int` `Tensor` giving the broadcast batch shape of\n all model parameters. This should match the batch shape of\n derived state space models, i.e.,\n `self.make_state_space_model(...).batch_shape_tensor()`.\n \"\"\"\n batch_shape = tf.constant([], dtype=tf.int32)\n for param in self.parameters:\n batch_shape = tf.broadcast_dynamic_shape(\n batch_shape, param.prior.batch_shape_tensor())\n return batch_shape\n\n def _canonicalize_param_vals_as_map(self, param_vals):\n \"\"\"If given an ordered list of parameter values, build a name:value map.\n\n This is a utility method that allows parameter values to be specified as\n either lists or dicts, by transforming lists to a canonical dict\n representation.\n\n Args:\n param_vals: Python list (or other `iterable`) of `Tensor` values\n corresponding to the parameters listed in `self.parameters`,\n OR a map (Python `dict`) of parameter names to values.\n\n Returns:\n param_map: Python `dict` mapping from the names given in `self.parameters`\n to the specified parameter values.\n \"\"\"\n if hasattr(param_vals, 'keys'):\n param_map = param_vals\n else:\n param_map = {p.name: v for (p, v) in zip(self.parameters, param_vals)}\n\n return param_map\n\n def make_state_space_model(self,\n num_timesteps,\n param_vals,\n initial_state_prior=None,\n initial_step=0,\n **linear_gaussian_ssm_kwargs):\n \"\"\"Instantiate this model as a Distribution over specified `num_timesteps`.\n\n Args:\n num_timesteps: Python `int` number of timesteps to model.\n param_vals: a list of `Tensor` parameter values in order corresponding to\n `self.parameters`, or a dict mapping from parameter names to values.\n initial_state_prior: an optional `Distribution` instance overriding the\n default prior on the model's initial state. This is used in forecasting\n (\"today's prior is yesterday's posterior\").\n initial_step: optional `int` specifying the initial timestep to model.\n This is relevant when the model contains time-varying components,\n e.g., holidays or seasonality.\n **linear_gaussian_ssm_kwargs: Optional additional keyword arguments to\n to the base `tfd.LinearGaussianStateSpaceModel` constructor.\n\n Returns:\n dist: a `LinearGaussianStateSpaceModel` Distribution object.\n \"\"\"\n return self._make_state_space_model(\n num_timesteps=num_timesteps,\n param_map=self._canonicalize_param_vals_as_map(param_vals),\n initial_state_prior=initial_state_prior,\n initial_step=initial_step,\n **linear_gaussian_ssm_kwargs)\n\n def prior_sample(self,\n num_timesteps,\n initial_step=0,\n params_sample_shape=(),\n trajectories_sample_shape=(),\n seed=None):\n \"\"\"Sample from the joint prior over model parameters and trajectories.\n\n Args:\n num_timesteps: Scalar `int` `Tensor` number of timesteps to model.\n initial_step: Optional scalar `int` `Tensor` specifying the starting\n timestep.\n Default value: 0.\n params_sample_shape: Number of possible worlds to sample iid from the\n parameter prior, or more generally, `Tensor` `int` shape to fill with\n iid samples.\n Default value: `[]` (i.e., draw a single sample and don't expand the\n shape).\n trajectories_sample_shape: For each sampled set of parameters, number\n of trajectories to sample, or more generally, `Tensor` `int` shape to\n fill with iid samples.\n Default value: `[]` (i.e., draw a single sample and don't expand the\n shape).\n seed: PRNG seed; see `tfp.random.sanitize_seed` for details.\n\n Returns:\n trajectories: `float` `Tensor` of shape\n `trajectories_sample_shape + params_sample_shape + [num_timesteps, 1]`\n containing all sampled trajectories.\n param_samples: list of sampled parameter value `Tensor`s, in order\n corresponding to `self.parameters`, each of shape\n `params_sample_shape + prior.batch_shape + prior.event_shape`.\n \"\"\"\n\n seed = tfp_util.SeedStream(\n seed, salt='StructuralTimeSeries_prior_sample')\n\n with tf.name_scope('prior_sample'):\n param_samples = [\n p.prior.sample(params_sample_shape, seed=seed(), name=p.name)\n for p in self.parameters\n ]\n model = self.make_state_space_model(\n num_timesteps=num_timesteps,\n initial_step=initial_step,\n param_vals=param_samples)\n return model.sample(trajectories_sample_shape, seed=seed()), param_samples\n\n def joint_log_prob(self, observed_time_series):\n \"\"\"Build the joint density `log p(params) + log p(y|params)` as a callable.\n\n Args:\n observed_time_series: Observed `Tensor` trajectories of shape\n `sample_shape + batch_shape + [num_timesteps, 1]` (the trailing\n `1` dimension is optional if `num_timesteps > 1`), where\n `batch_shape` should match `self.batch_shape` (the broadcast batch\n shape of all priors on parameters for this structural time series\n model). Any `NaN`s are interpreted as missing observations; missingness\n may be also be explicitly specified by passing a\n `tfp.sts.MaskedTimeSeries` instance.\n\n Returns:\n log_joint_fn: A function taking a `Tensor` argument for each model\n parameter, in canonical order, and returning a `Tensor` log probability\n of shape `batch_shape`. Note that, *unlike* `tfp.Distributions`\n `log_prob` methods, the `log_joint` sums over the `sample_shape` from y,\n so that `sample_shape` does not appear in the output log_prob. This\n corresponds to viewing multiple samples in `y` as iid observations from a\n single model, which is typically the desired behavior for parameter\n inference.\n \"\"\"\n\n with tf.name_scope('joint_log_prob'):\n [\n observed_time_series,\n mask\n ] = sts_util.canonicalize_observed_time_series_with_mask(\n observed_time_series)\n\n num_timesteps = distribution_util.prefer_static_value(\n tf.shape(observed_time_series))[-2]\n\n def log_joint_fn(*param_vals, **param_kwargs):\n \"\"\"Generated log-density function.\"\"\"\n\n if param_kwargs:\n if param_vals: raise ValueError(\n 'log_joint_fn saw both positional args ({}) and named args ({}). '\n 'This is not supported: you have to choose!'.format(\n param_vals, param_kwargs))\n param_vals = [param_kwargs[p.name] for p in self.parameters]\n\n # Sum the log_prob values from parameter priors.\n param_lp = sum([\n param.prior.log_prob(param_val)\n for (param, param_val) in zip(self.parameters, param_vals)\n ])\n\n # Build a linear Gaussian state space model and evaluate the marginal\n # log_prob on observations.\n lgssm = self.make_state_space_model(\n param_vals=param_vals, num_timesteps=num_timesteps)\n observation_lp = lgssm.log_prob(observed_time_series, mask=mask)\n\n # Sum over likelihoods from iid observations. Without this sum,\n # adding `param_lp + observation_lp` would broadcast the param priors\n # over the sample shape, which incorrectly multi-counts the param\n # priors.\n sample_ndims = tf.maximum(0,\n tf.rank(observation_lp) - tf.rank(param_lp))\n observation_lp = tf.reduce_sum(\n observation_lp, axis=tf.range(sample_ndims))\n\n return param_lp + observation_lp\n\n return log_joint_fn\n",
"# Copyright 2018 The TensorFlow Probability Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ============================================================================\n\"\"\"Tests for STS forecasting methods.\"\"\"\n\n# Dependency imports\n\nimport numpy as np\nimport tensorflow.compat.v1 as tf1\nimport tensorflow.compat.v2 as tf\nimport tensorflow_probability as tfp\nfrom tensorflow_probability.python import distributions as tfd\nfrom tensorflow_probability.python.internal import test_util\n\n\nclass _ForecastTest(object):\n\n def _build_model(self, observed_time_series,\n prior_batch_shape=(),\n initial_effect_prior_scale=1.,\n constant_offset=None):\n seasonal = tfp.sts.Seasonal(\n num_seasons=4,\n observed_time_series=observed_time_series,\n initial_effect_prior=tfd.Normal(\n loc=self._build_tensor(np.zeros(prior_batch_shape)),\n scale=self._build_tensor(initial_effect_prior_scale)),\n constrain_mean_effect_to_zero=False, # Simplifies analysis.\n name='seasonal')\n return tfp.sts.Sum(components=[seasonal],\n constant_offset=constant_offset,\n observed_time_series=observed_time_series)\n\n def test_one_step_predictive_correctness(self):\n observed_time_series_ = np.array([1., -1., -3., 4., 0.5, 2., 1., 3.])\n observed_time_series = self._build_tensor(observed_time_series_)\n model = self._build_model(\n observed_time_series,\n constant_offset=0.) # Simplifies analytic calculations.\n\n drift_scale = 0.1\n observation_noise_scale = 0.01\n params = {'seasonal/_drift_scale': self._build_tensor([drift_scale]),\n 'observation_noise_scale': self._build_tensor(\n [observation_noise_scale])}\n\n onestep_dist = tfp.sts.one_step_predictive(model, observed_time_series,\n parameter_samples=params)\n onestep_mean_, onestep_scale_ = self.evaluate(\n (onestep_dist.mean(), onestep_dist.stddev()))\n\n # Since Seasonal is just a set of interleaved random walks, it's\n # straightforward to compute the forecast analytically.\n # For the first (num_seasons - 1) steps, the one-step-ahead\n # forecast mean/scale are just the prior Normal(0., 1.). After that,\n # the predicted `n`th step depends on the posterior from the\n # `n - num_seasons` step.\n num_seasons = 4\n effect_posterior_precision = 1. + 1/observation_noise_scale**2\n effect_posterior_means = (\n (observed_time_series_[:num_seasons] / observation_noise_scale**2)\n / effect_posterior_precision)\n effect_posterior_variance = 1/effect_posterior_precision\n observation_predictive_variance = (\n effect_posterior_variance + drift_scale**2 + observation_noise_scale**2)\n\n expected_onestep_mean = np.concatenate([np.zeros(4),\n effect_posterior_means])\n expected_onestep_scale = np.concatenate([\n [np.sqrt(1.**2 + observation_noise_scale**2)] * 4,\n [np.sqrt(observation_predictive_variance)] * 4])\n self.assertAllClose(onestep_mean_, expected_onestep_mean)\n self.assertAllClose(onestep_scale_, expected_onestep_scale)\n\n def test_one_step_predictive_with_batch_shape(self):\n num_param_samples = 5\n num_timesteps = 4\n batch_shape = [3, 2]\n observed_time_series = self._build_tensor(np.random.randn(\n *(batch_shape + [num_timesteps])))\n model = self._build_model(observed_time_series,\n prior_batch_shape=batch_shape[1:])\n prior_samples = [param.prior.sample(num_param_samples)\n for param in model.parameters]\n\n onestep_dist = tfp.sts.one_step_predictive(model, observed_time_series,\n parameter_samples=prior_samples)\n\n self.evaluate(tf1.global_variables_initializer())\n if self.use_static_shape:\n self.assertAllEqual(onestep_dist.batch_shape.as_list(), batch_shape)\n else:\n self.assertAllEqual(self.evaluate(onestep_dist.batch_shape_tensor()),\n batch_shape)\n onestep_mean_ = self.evaluate(onestep_dist.mean())\n self.assertAllEqual(onestep_mean_.shape, batch_shape + [num_timesteps])\n\n def test_forecast_correctness(self):\n observed_time_series_ = np.array([1., -1., -3., 4.])\n observed_time_series = self._build_tensor(observed_time_series_)\n model = self._build_model(\n observed_time_series,\n constant_offset=0.) # Simplifies analytic calculations.\n\n drift_scale = 0.1\n observation_noise_scale = 0.01\n params = {'seasonal/_drift_scale': self._build_tensor([drift_scale]),\n 'observation_noise_scale': self._build_tensor(\n [observation_noise_scale])}\n\n forecast_dist = tfp.sts.forecast(model, observed_time_series,\n parameter_samples=params,\n num_steps_forecast=8,\n include_observation_noise=True)\n forecast_mean = forecast_dist.mean()[..., 0]\n forecast_scale = forecast_dist.stddev()[..., 0]\n forecast_mean_, forecast_scale_ = self.evaluate(\n (forecast_mean, forecast_scale))\n\n # Since Seasonal is just a set of interleaved random walks, it's\n # straightforward to compute the forecast analytically.\n effect_posterior_precision = 1. + 1/observation_noise_scale**2\n effect_posterior_means = (\n (observed_time_series_ / observation_noise_scale**2)\n / effect_posterior_precision)\n effect_posterior_variance = 1/effect_posterior_precision\n observation_predictive_variance = (\n effect_posterior_variance + drift_scale**2 + observation_noise_scale**2)\n\n expected_forecast_mean = np.concatenate([effect_posterior_means,\n effect_posterior_means])\n expected_forecast_scale = np.concatenate([\n [np.sqrt(observation_predictive_variance)] * 4,\n [np.sqrt(observation_predictive_variance + drift_scale**2)] * 4])\n self.assertAllClose(forecast_mean_, expected_forecast_mean)\n self.assertAllClose(forecast_scale_, expected_forecast_scale)\n\n # Also test forecasting the noise-free function.\n forecast_dist = tfp.sts.forecast(model, observed_time_series,\n parameter_samples=params,\n num_steps_forecast=8,\n include_observation_noise=False)\n forecast_mean = forecast_dist.mean()[..., 0]\n forecast_scale = forecast_dist.stddev()[..., 0]\n forecast_mean_, forecast_scale_ = self.evaluate(\n (forecast_mean, forecast_scale))\n\n noiseless_predictive_variance = (effect_posterior_variance + drift_scale**2)\n expected_forecast_scale = np.concatenate([\n [np.sqrt(noiseless_predictive_variance)] * 4,\n [np.sqrt(noiseless_predictive_variance + drift_scale**2)] * 4])\n self.assertAllClose(forecast_mean_, expected_forecast_mean)\n self.assertAllClose(forecast_scale_, expected_forecast_scale)\n\n def test_forecast_from_hmc(self):\n # test that we can directly plug in the output of an HMC chain as\n # the input to `forecast`, as done in the example, with no `sess.run` call.\n num_results = 5\n num_timesteps = 4\n num_steps_forecast = 3\n batch_shape = [1, 2]\n observed_time_series = self._build_tensor(np.random.randn(\n *(batch_shape + [num_timesteps])))\n model = self._build_model(observed_time_series)\n samples, _ = tfp.sts.fit_with_hmc(\n model, observed_time_series,\n num_results=num_results,\n num_warmup_steps=2,\n num_variational_steps=2)\n\n forecast_dist = tfp.sts.forecast(model, observed_time_series,\n parameter_samples=samples,\n num_steps_forecast=num_steps_forecast)\n\n forecast_mean = forecast_dist.mean()[..., 0]\n forecast_scale = forecast_dist.stddev()[..., 0]\n\n sample_shape = [10]\n forecast_samples = forecast_dist.sample(sample_shape)[..., 0]\n\n self.evaluate(tf1.global_variables_initializer())\n forecast_mean_, forecast_scale_, forecast_samples_ = self.evaluate(\n (forecast_mean, forecast_scale, forecast_samples))\n self.assertAllEqual(forecast_mean_.shape,\n batch_shape + [num_steps_forecast])\n self.assertAllEqual(forecast_scale_.shape,\n batch_shape + [num_steps_forecast])\n self.assertAllEqual(forecast_samples_.shape,\n sample_shape + batch_shape + [num_steps_forecast])\n\n def test_forecast_with_batch_shape(self):\n num_param_samples = 5\n num_timesteps = 4\n num_steps_forecast = 6\n batch_shape = [3, 2]\n observed_time_series = self._build_tensor(np.random.randn(\n *(batch_shape + [num_timesteps])))\n\n # By not passing a constant offset, we test that the default behavior\n # (setting constant offset to the observed mean) works when the observed\n # time series has batch shape.\n model = self._build_model(observed_time_series,\n prior_batch_shape=batch_shape[1:])\n prior_samples = [param.prior.sample(num_param_samples)\n for param in model.parameters]\n\n forecast_dist = tfp.sts.forecast(model, observed_time_series,\n parameter_samples=prior_samples,\n num_steps_forecast=num_steps_forecast)\n\n self.evaluate(tf1.global_variables_initializer())\n if self.use_static_shape:\n self.assertAllEqual(forecast_dist.batch_shape.as_list(), batch_shape)\n else:\n self.assertAllEqual(self.evaluate(forecast_dist.batch_shape_tensor()),\n batch_shape)\n forecast_mean = forecast_dist.mean()[..., 0]\n forecast_mean_ = self.evaluate(forecast_mean)\n self.assertAllEqual(forecast_mean_.shape,\n batch_shape + [num_steps_forecast])\n\n def test_methods_handle_masked_inputs(self):\n num_param_samples = 5\n num_timesteps = 4\n num_steps_forecast = 2\n\n # Build a time series with `NaN`s that will propagate if not properly\n # masked.\n observed_time_series_ = np.random.randn(num_timesteps)\n is_missing_ = np.random.randn(num_timesteps) > 0\n observed_time_series_[is_missing_] = np.nan\n observed_time_series = tfp.sts.MaskedTimeSeries(\n self._build_tensor(observed_time_series_),\n is_missing=self._build_tensor(is_missing_, dtype=np.bool_))\n\n model = self._build_model(observed_time_series)\n prior_samples = [param.prior.sample(num_param_samples)\n for param in model.parameters]\n\n forecast_dist = tfp.sts.forecast(model, observed_time_series,\n parameter_samples=prior_samples,\n num_steps_forecast=num_steps_forecast)\n\n forecast_mean_, forecast_stddev_ = self.evaluate((\n forecast_dist.mean(),\n forecast_dist.stddev()))\n self.assertTrue(np.all(np.isfinite(forecast_mean_)))\n self.assertTrue(np.all(np.isfinite(forecast_stddev_)))\n\n onestep_dist = tfp.sts.one_step_predictive(\n model, observed_time_series,\n parameter_samples=prior_samples)\n onestep_mean_, onestep_stddev_ = self.evaluate((\n onestep_dist.mean(),\n onestep_dist.stddev()))\n self.assertTrue(np.all(np.isfinite(onestep_mean_)))\n self.assertTrue(np.all(np.isfinite(onestep_stddev_)))\n\n def test_impute_missing(self):\n time_series_with_nans = self._build_tensor(\n [-1., 1., np.nan, 2.4, np.nan, np.nan, 2.])\n observed_time_series = tfp.sts.MaskedTimeSeries(\n time_series=time_series_with_nans,\n is_missing=tf.math.is_nan(time_series_with_nans))\n\n # Build model with a near-uniform prior on the initial effect. In principle\n # we should use a large scale like 1e8 here, but we use 1e2 because\n # increasing the scale triggers numerical issues with Kalman smoothing\n # described in b/138414045.\n model = self._build_model(observed_time_series,\n initial_effect_prior_scale=1e2)\n\n # Impute values using manually-set parameters, which will allow us to\n # compute the expected results analytically.\n drift_scale = 1.0\n noise_scale = 0.1\n parameter_samples = {'observation_noise_scale': [noise_scale],\n 'seasonal/_drift_scale': [drift_scale]}\n imputed_series_dist = tfp.sts.impute_missing_values(\n model, observed_time_series, parameter_samples)\n imputed_noisy_series_dist = tfp.sts.impute_missing_values(\n model, observed_time_series, parameter_samples,\n include_observation_noise=True)\n\n # Compare imputed mean to expected mean.\n mean_, stddev_ = self.evaluate([imputed_series_dist.mean(),\n imputed_series_dist.stddev()])\n noisy_mean_, noisy_stddev_ = self.evaluate([\n imputed_noisy_series_dist.mean(),\n imputed_noisy_series_dist.stddev()])\n self.assertAllClose(mean_, [-1., 1., 2., 2.4, -1., 1., 2.], atol=1e-2)\n self.assertAllClose(mean_, noisy_mean_, atol=1e-2)\n\n # Compare imputed stddevs to expected stddevs.\n drift_plus_noise_scale = np.sqrt(noise_scale**2 + drift_scale**2)\n expected_stddev = np.array([noise_scale,\n noise_scale,\n drift_plus_noise_scale,\n noise_scale,\n drift_plus_noise_scale,\n drift_plus_noise_scale,\n noise_scale])\n self.assertAllClose(stddev_, expected_stddev, atol=1e-2)\n self.assertAllClose(noisy_stddev_,\n np.sqrt(stddev_**2 + noise_scale**2), atol=1e-2)\n\n def _build_tensor(self, ndarray, dtype=None):\n \"\"\"Convert a numpy array to a TF placeholder.\n\n Args:\n ndarray: any object convertible to a numpy array via `np.asarray()`.\n dtype: optional `dtype`.\n\n Returns:\n placeholder: a TensorFlow `placeholder` with default value given by the\n provided `ndarray`, dtype given by `self.dtype` (if not specified), and\n shape specified statically only if `self.use_static_shape` is `True`.\n \"\"\"\n\n ndarray = np.asarray(ndarray).astype(self.dtype if dtype is None else dtype)\n return tf1.placeholder_with_default(\n ndarray, shape=ndarray.shape if self.use_static_shape else None)\n\n\n@test_util.test_all_tf_execution_regimes\nclass ForecastTestStatic32(test_util.TestCase, _ForecastTest):\n dtype = np.float32\n use_static_shape = True\n\n\n# Run in graph mode only to reduce test weight.\nclass ForecastTestDynamic32(test_util.TestCase, _ForecastTest):\n dtype = np.float32\n use_static_shape = False\n\n\n# Run in graph mode only to reduce test weight.\nclass ForecastTestStatic64(test_util.TestCase, _ForecastTest):\n dtype = np.float64\n use_static_shape = True\n\nif __name__ == '__main__':\n tf.test.main()\n"
] |
[
[
"tensorflow.compat.v2.Variable",
"tensorflow.compat.v2.ones_like",
"tensorflow.compat.v2.get_static_value",
"tensorflow.compat.v2.nest.flatten",
"tensorflow.compat.v2.nest.is_nested",
"tensorflow.python.util.nest.flatten",
"tensorflow.compat.v2.name_scope",
"tensorflow.python.util.nest.map_structure_up_to",
"tensorflow.python.util.deprecation.deprecated_args",
"tensorflow.compat.v2.shape",
"tensorflow.compat.v2.convert_to_tensor",
"tensorflow.python.util.nest.is_nested",
"tensorflow.compat.v2.zeros",
"tensorflow.python.util.nest.get_traverse_shallow_structure",
"tensorflow.compat.v2.reduce_sum",
"tensorflow.python.util.nest.map_structure",
"tensorflow.python.util.deprecation.deprecated",
"tensorflow.python.util.deprecation.deprecated_argument_lookup"
],
[
"tensorflow.compat.v2.rank",
"tensorflow.compat.v2.name_scope",
"tensorflow.compat.v2.shape",
"tensorflow.compat.v2.range",
"tensorflow.compat.v2.broadcast_static_shape",
"tensorflow.compat.v2.TensorShape",
"tensorflow.compat.v2.constant"
],
[
"numpy.sqrt",
"tensorflow.compat.v2.test.main",
"numpy.isfinite",
"tensorflow.compat.v2.math.is_nan",
"numpy.asarray",
"tensorflow.compat.v1.global_variables_initializer",
"numpy.concatenate",
"numpy.random.randn",
"numpy.array",
"tensorflow.compat.v1.placeholder_with_default",
"numpy.zeros"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"2.8",
"1.10",
"1.12",
"2.7",
"2.6",
"1.4",
"1.13",
"2.3",
"2.4",
"2.9",
"1.5",
"1.7",
"2.5",
"1.0",
"2.2",
"1.2",
"2.10"
]
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
gildas-ev/Game-of-Life
|
[
"5d101b5f63c2ef201442d2b78909b1ed048dba36"
] |
[
"file.py"
] |
[
"# Imports\r\nimport numpy as np\r\nfrom scipy.ndimage import convolve\r\nfrom matplotlib import pyplot as plt\r\nfrom matplotlib import animation\r\nimport pickle\r\nimport time\r\n\r\nshape = (300, 300) # Shape of the grid (width x height)\r\ndensity = 0.30 # ~Living cells density at t0\r\nn_steps = 250 # Number of steps\r\ndelay = 0.1 # Delay between each frame in s\r\n\r\ndef gen(shape, density): # Generate the grid\r\n space = np.random.uniform(size=(shape)) # Random float array (between 0 and 1)\r\n space = np.where(space < density, 1, 0) # Where space < density cells are living\r\n return space\r\n\r\ndef step(space): # Compute the next step of space\r\n initial_shape = space.shape # Save initial shape\r\n pattern = np.array([[1,1,1], [1,0,1], [1,1,1]])\r\n neighbours = convolve(space, pattern, mode='constant') # Convolve sum the neighbours of each cells, according to the pattern\r\n space, neighbours = space.flatten(), neighbours.flatten() # From 2d to 1d array\r\n def new_value(space, neighbours): # Return the new cell state\r\n if neighbours == 3: # If there are 3 neighbours, the cell will be alive\r\n return 1\r\n elif neighbours == 2: # If there are 2 neighbours, the state of the cell will not change\r\n return space\r\n return 0 # In all other cases, the cell will die\r\n new = np.array(list(map(new_value, space, neighbours))) # Compute next step array by mapping with new_value function\r\n return new.reshape(initial_shape) # Return the reshaped new array\r\n\r\n# Initialize space (create a random grid or import a pattern)\r\nspace = gen(shape, density) # Create a random grid\r\n#with open('patterns/pentadecathlon.pickle', 'rb') as handle: # Import a pattern\r\n #space = pickle.load(handle)\r\n\r\n# Compute all steps\r\nsnapshots = [space]\r\nfor loop in range(n_steps-1):\r\n snapshots.append(step(snapshots[-1]))\r\n\r\n# Create figure\r\nfig = plt.figure()\r\nim = plt.imshow(space, interpolation='none', aspect='auto', vmin=0, vmax=1)\r\n\r\ndef animate_func(i): # Return each step\r\n im.set_array(snapshots[i])\r\n return [im]\r\n\r\nanim = animation.FuncAnimation(fig,\r\n animate_func,\r\n frames = n_steps,\r\n interval = (delay*1000)) # Animation\r\n\r\nanim.save(f'{int(time.time())}.html') # Save the animation in dir\r\nplt.show()\r\n"
] |
[
[
"matplotlib.pyplot.imshow",
"scipy.ndimage.convolve",
"matplotlib.animation.FuncAnimation",
"numpy.random.uniform",
"numpy.array",
"matplotlib.pyplot.show",
"numpy.where",
"matplotlib.pyplot.figure"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [
"1.7",
"1.0",
"0.10",
"1.2",
"0.14",
"0.19",
"1.5",
"0.12",
"0.17",
"0.13",
"1.6",
"1.4",
"1.9",
"1.3",
"1.10",
"0.15",
"0.18",
"0.16",
"1.8"
],
"tensorflow": []
}
] |
isabella232/group_testing
|
[
"b1023442d3b8ef98c04dd1c406d5cc8349d37da7"
] |
[
"group_testing/group_selectors/random.py"
] |
[
"# coding=utf-8\n# Copyright 2020 Google LLC.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n# Lint as: python3\n\"\"\"Defines several random group selection strategies.\"\"\"\n\nimport itertools\nfrom typing import Tuple\n\nimport gin\nfrom group_testing import utils\nfrom group_testing.group_selectors import group_selector\nimport jax\nimport jax.numpy as np\nimport numpy as onp\nimport scipy.special\n\n\n\[email protected]\nclass RandomSelector(group_selector.GroupSelector):\n \"\"\"Selects groups randomly.\"\"\"\n\n def get_groups(self, rng, state):\n shape = (state.extra_tests_needed, state.num_patients)\n threshold = state.max_group_size / state.num_patients\n return jax.random.uniform(rng, shape=shape) < threshold\n\n\[email protected]\nclass Mezard(group_selector.GroupSelector):\n \"\"\"Selects groups randomly with predefined/adaptive-to-infection-rate size.\"\"\"\n\n def __init__(self, group_size: int = None):\n super().__init__()\n self.group_size = group_size\n\n def get_groups(self, rng, state):\n \"\"\"Produces random design matrix fixed number of 1s per line.\n\n Args:\n rng: np.ndarray<int>[2]: the random key.\n state: the current state.State of the system.\n\n Returns:\n A np.array<bool>[num_groups, patients].\n \"\"\"\n if self.group_size is None:\n if np.size(state.prior_infection_rate) == 1:\n # candidate group sizes\n group_sizes = np.arange(state.max_group_size) + 1\n sensitivity = utils.select_from_sizes(state.prior_sensitivity,\n group_sizes)\n specificity = utils.select_from_sizes(state.prior_specificity,\n group_sizes)\n rho = specificity + sensitivity - 1\n utility_size_groups = (sensitivity - rho *\n (1 - state.prior_infection_rate)**group_sizes -\n 0.5)**2\n group_size = group_sizes[np.argmin(utility_size_groups)]\n else:\n group_size = state.max_group_size\n else:\n group_size = self.group_size\n\n group_size = int(np.squeeze(group_size))\n new_groups = np.empty((0, state.num_patients), dtype=bool)\n for _ in range(state.extra_tests_needed):\n rng, rng_shuffle = jax.random.split(rng, 2)\n vec = np.zeros((1, state.num_patients), dtype=bool)\n idx = jax.random.permutation(rng_shuffle, np.arange(state.num_patients))\n vec = jax.ops.index_update(vec, [0, idx[0:group_size]], True)\n new_groups = np.concatenate((new_groups, vec), axis=0)\n return new_groups\n\n\ndef macula_matrix(d: int, k: int, n: int):\n \"\"\"Produces d-separable design matrix.\"\"\"\n # https://core.ac.uk/download/pdf/82758506.pdf\n n_groups = int(scipy.special.comb(n, d))\n n_cols = int(scipy.special.comb(n, k))\n new_groups = np.zeros((n_groups, n_cols), dtype=bool)\n comb_groups = itertools.combinations(range(n), d)\n comb_cols = itertools.combinations(range(n), k)\n d_vec = np.zeros((n_groups, n), dtype=bool)\n k_vec = np.zeros((n_cols, n), dtype=bool)\n for i, comb_g in enumerate(comb_groups):\n d_vec[i, comb_g] = True\n for j, comb_c in enumerate(comb_cols):\n k_vec[j, comb_c] = True\n\n for i in range(n_groups):\n for j in range(n_cols):\n new_groups[i, j] = np.all(\n np.logical_or(np.logical_not(d_vec[i, :]), k_vec[j, :]))\n return new_groups\n\n\ndef sample_groups_of_size(shape: Tuple[int], group_size: int):\n \"\"\"Creates a matrix with k rows, of size n with g ones randomly placed.\"\"\"\n random_groups = onp.zeros(shape, dtype=bool)\n num_tests, num_patients = shape\n for i in range(num_tests):\n random_subset = onp.random.choice(num_patients, group_size, replace=False)\n random_groups[i, random_subset] = True\n return random_groups\n\n\[email protected]\ndef count_mean(values_arr: onp.ndarray):\n return onp.mean(values_arr)\n\n\[email protected]\ndef count_min(values_arr: onp.ndarray):\n return onp.min(values_arr)\n\n\ndef eval_disjoint(mat: onp.ndarray, d: int, count_fn):\n \"\"\"Evaluates how disjoint a matrix is based on count_fn.\"\"\"\n num_rows, num_cols = mat.shape\n count = 0\n for s in itertools.combinations(range(num_cols), d):\n boolean_sum_subset = onp.sum(mat[:, s], axis=1) > 0\n boolean_sum_mat = onp.broadcast_to(boolean_sum_subset[:, None],\n (num_rows, num_cols - d))\n complement_subset_mat = onp.delete(mat, s, axis=1)\n ng_diag = onp.amax(complement_subset_mat > boolean_sum_mat, axis=0)\n count += count_fn(1 * ng_diag)\n return count\n\n\ndef is_disjoint(mat: onp.ndarray, d: int):\n \"\"\"Checks that a matrix is d-disjoint.\"\"\"\n num_rows, num_cols = mat.shape\n for s in itertools.combinations(range(num_cols), d):\n boolean_sum_subset = onp.sum(mat[:, s], axis=1) > 0\n boolean_sum_mat = onp.broadcast_to(boolean_sum_subset[:, None],\n (num_rows, num_cols - d))\n complement_subset_mat = onp.delete(mat, s, axis=1)\n ng_diag = onp.amax(complement_subset_mat > boolean_sum_mat, axis=0)\n if not min(ng_diag):\n return False\n return True\n\n\ndef sample_disjoint_matrix(num_cols: int,\n num_rows: int,\n n_max_test: int,\n d: int,\n max_tries=1e2):\n \"\"\"Samples matrix without replacement until disjoint check passes.\"\"\"\n attempt = 0\n while attempt < max_tries:\n attempt += 1\n groups = sample_groups_of_size((num_rows, num_cols), n_max_test)\n if is_disjoint(groups, d):\n return groups\n return None\n\n\ndef sample_maxeval_disjoint_matrix(num_cols: int,\n num_rows: int,\n n_max_test: int,\n d: int,\n max_tries=100,\n count_fn=count_mean):\n \"\"\"Samples matrices, returns the most count-disjoint matrix.\"\"\"\n random_groups = sample_groups_of_size((num_rows, num_cols), n_max_test)\n count = eval_disjoint(random_groups, d, count_fn=count_fn)\n max_count = int(scipy.special.comb(num_cols, d))\n attempt = 0\n while attempt < max_tries and count < max_count:\n attempt += 1\n random_groups_iter = sample_groups_of_size((num_rows, num_cols), n_max_test)\n count_iter = eval_disjoint(random_groups, d, count_fn=count_fn)\n if count_iter > count:\n count = count_iter\n random_groups = random_groups_iter\n if count == max_count:\n return random_groups, count\n return random_groups, count\n\n\[email protected]\nclass RandomDisjoint(group_selector.GroupSelector):\n \"\"\"Selects groups randomly with predefined size.\"\"\"\n\n def __init__(self, max_iter=1, method='count', count_fn=count_mean):\n super().__init__()\n self.max_iter = max_iter\n self.method = method\n self.count_fn = count_fn\n\n def get_groups(self, state):\n \"\"\"Produces random design matrix with nmax 1s per line.\n\n Args:\n state: the current state.State of the system.\n\n Returns:\n A np.array<bool>[num_groups, patients].\n \"\"\"\n if self.method == 'single':\n new_groups = sample_groups_of_size(\n (state.num_patients, state.extra_tests_needed), state.max_group_size)\n # if prior_infection_rate is a scalar take average otherwise sum\n if np.size(self.prior_infection_rate) == 1:\n max_infected = int(\n np.ceil(self.prior_infection_rate * state.num_patients))\n elif np.size(self.prior_infection_rate) == state.num_patients:\n max_infected = int(np.sum(self.prior_infection_rate))\n\n if self.method == 'disjoint':\n new_groups = sample_disjoint_matrix(state.num_patients,\n state.extra_tests_needed,\n state.max_group_size, max_infected,\n self.max_iter)\n if new_groups is None:\n raise ValueError('No satisfying matrix found after max iterations')\n if self.method == 'count':\n new_groups, _ = sample_maxeval_disjoint_matrix(\n state.num_patients, state.extra_tests_needed, state.max_group_size,\n max_infected, self.max_iter, self.count_fn)\n\n return np.array(new_groups)\n"
] |
[
[
"numpy.amax",
"numpy.random.choice",
"numpy.min",
"numpy.delete",
"numpy.mean",
"numpy.broadcast_to",
"numpy.zeros",
"numpy.sum"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
sassystacks/TissueMechanicsLab
|
[
"0f881a57ebf7cbadfeb2041daabd4e4b79b25b91",
"0f881a57ebf7cbadfeb2041daabd4e4b79b25b91",
"0f881a57ebf7cbadfeb2041daabd4e4b79b25b91"
] |
[
"Analysis/CardioVascularLab/Testing/BiaxDataBinning/BinRawBiaxData.py",
"Analysis/CardioVascularLab/ExVivo/Analyzer/TransitionProperties.py",
"Analysis/CardioVascularLab/ExVivo/uniaxanalysis/getproperties.py"
] |
[
"import os\nimport pandas as pd\nimport seaborn as sns\nimport matplotlib.pyplot as plt\nimport numpy as np\n\nclass ReadParams:\n\n def __init__(self):\n self.DotFileHeaders = [ 'FrameTime', 'X0', 'Y0', 'X1', 'Y1', 'X2', 'Y2',\n 'X3', 'Y3', 'X4', 'Y4', 'E11', 'E22', 'E12',\n 'PS1', 'SA1', 'PS2', 'SA2']\n self.Directions = ['X0', 'Y0', 'X1', 'Y1', 'X2', 'Y2',\n 'X3', 'Y3', 'X4', 'Y4']\n self.HeadersToPop = ['E11', 'E22', 'E12', 'PS1', 'SA1', 'PS2', 'SA2']\n\ndef _plotXYPairs(df,params):\n\n\n fig, axes = plt.subplots(nrows=int(len(params)/2), ncols=2)\n fig.subplots_adjust(hspace=0.5)\n fig.suptitle('Each point vs time')\n # ax = axes.flatten()\n time = df['FrameTime']\n\n\n for ax, direction in zip(axes.flatten(),df[params]):\n\n ax.scatter(time,df[direction])\n ax.set(title=direction.upper(), xlabel='time')\n\n plt.show()\n\n\ndef _BuildFileList(f):\n\n if os.path.isdir(f): # if f is a directory\n topDir = f\n t_fileList = os.listdir(f)\n fileList = [os.path.join(topDir,file) for file in t_fileList]\n\n elif os.path.isfile(f): # if f is just a file\n fileList = [f]\n\n else:\n fileList = [] # if f isn't either a file or a directory\n print(\"This isn't a file or a directory\")\n\n return fileList\n\ndef _ConvertTimeToFloatStartingZero(df):\n # Convert the time to time delta type\n df['FrameTime'] = pd.to_timedelta(df['FrameTime'])\n df = df.assign(FrameTime = [x.seconds * 1.0 for x in df['FrameTime']] )\n df['FrameTime'] = df['FrameTime'] - df['FrameTime'][0]\n\n return df\n\ndef _main(f):\n\n files = _BuildFileList(f)\n\n try:\n # Read the csv\n df = pd.read_csv(f,skiprows=1,header=None)\n # Set the column names from\n df.columns = ReadParams().DotFileHeaders\n # Remove all the columns that aren't position or time\n [df.pop(x) for x in ReadParams().HeadersToPop]\n\n df = _ConvertTimeToFloatStartingZero(df)\n\n # _plotXYPairs(df,['Y0','Y1','Y2','Y3'])\n _plotXYPairs(df,['X0','X1','X2','X3'])\n\n\n except Exception as e:\n print(e)\n\n\n\nif __name__ == \"__main__\":\n\n import argparse\n\n parser = argparse.ArgumentParser()\n group = parser.add_mutually_exclusive_group(required=True)\n group.add_argument('-fo','--folder',help=\"Enter an entire folder to have the data binned\")\n group.add_argument('-f','--file',help=\"Enter a single file to have the data binned\")\n\n args = parser.parse_args()\n if args.folder:\n f = args.folder\n else:\n f = args.file\n\n _main(f)\n #\n # df = pd.read_csv(f)\n #\n # df = df[['Sample','Specimen','Width','Thickness']]\n # df = df.iloc[::2,:]\n #\n # df.to_csv(\"AAAMissingDimensions_Jan2018.csv\",index=False)\n",
"import numpy as np\nimport matplotlib.pyplot as plt\nfrom rdp import rdp\n\n'''\nthis class is used in conjunction with the rdp algorithm where the values\nfrom the rdp are used to identify specific points in the graph.\ninputs:\n stress_strain = numpy array\n'''\nclass ProcessTransitionProperties:\n\n def __init__(self, stress_strain=np.array([]), stress_strain_norm=np.array([]), identifier='', eps=0.01):\n\n # send in the data as a n x 2 numpy array\n self.eps = eps\n self.stress_strain = stress_strain\n self.stress_strain_norm = stress_strain_norm\n\n self.identifier = identifier\n\n self.transition_index_start = None\n self.transition_index_end = None\n\n self.mtm_low = None\n self.mtm_high = None\n self.transition_stress_strain_start = [None,None]\n self.transition_stress_strain_end = [None,None]\n self.max_stress = None\n self.max_stress_indx = None\n self._runTransitionProps()\n # run the RDP algorithm on the normalized data\n\n\n def _setStressStrain(self,array1,array2):\n\n if array1.shape[1] == 2 and array2.shape[1] == 2:\n self.stress_strain = array1\n self.stress_strain_norm = array2\n else:\n print(\"The stress strain data must be a 2 dimensional numpy array\")\n\n def _invNorm(self, array_norm, array_orig):\n #separate arrays into X and Y components\n x_unzip1, y_unzip1 = map(list,zip(*array_norm))\n x_unzip, y_unzip = map(list,zip(*array_orig))\n\n\n xdata = np.array(x_unzip)\n ydata = np.array(y_unzip)\n\n x_norm = np.array(x_unzip1)\n y_norm = np.array(y_unzip1)\n\n\n #unnormalize data\n x_inv = x_norm * (np.max(xdata) - np.min(xdata)) + np.min(xdata)\n y_inv = y_norm * (np.max(ydata) - np.min(ydata)) + np.min(ydata)\n\n # combine arrays together\n unscaledArray = np.stack((x_inv, y_inv), axis = -1)\n\n return unscaledArray\n\n def _runTransitionProps(self):\n if self.stress_strain.size:\n self.rdp_norm = rdp(self.stress_strain_norm,epsilon=self.eps) #\n self.rdp = self._invNorm(self.rdp_norm, self.stress_strain)\n\n # Filter it to remove lines that are artifacts of the test\n self._filterRDP()\n\n self._setAllValues()\n else:\n self.rdp = np.array([])\n\n def _normalizeData(self):\n\n if self.stress_strain.size:\n pass\n\n def _outputAllValues(self,outputDict=None):\n\n # This is to check if there was a dictionary input to append the values\n # to. If there wasn't then create a new one.\n if outputDict is None:\n outputDict = {}\n\n outputDict['MTMLow_' + self.identifier] = self.mtm_low\n outputDict['MTMhigh_' + self.identifier] = self.mtm_high\n outputDict['MaxStress_' + self.identifier] = self.max_stress\n outputDict['T_Stress_Start_' + self.identifier] = \\\n self.transition_stress_strain_start[1]\n outputDict['T_Strain_Start_' + self.identifier] = \\\n self.transition_stress_strain_start[0]\n outputDict['T_Stress_End_' + self.identifier] = \\\n self.transition_stress_strain_end[1]\n outputDict['T_Strain_End_' + self.identifier] = \\\n self.transition_stress_strain_end[0]\n outputDict['Elbow_Region_' + self.identifier] = self.elbow\n\n return outputDict\n\n def _setAllValues(self):\n #print(self.rdp)\n self._setMaxStress()\n self._setRDP()\n if self.rdp.any():\n self._setTransitionIndexStart(self.rdp[1])\n self._setMTMLow(self.rdp[0],self.rdp[1])\n self.elbow = False\n\n # Use this to check if the slope of the lines is increasing\n\n if len(self.rdp) > 2:\n # mtmhighpoint = self._fitLineForMTMHigh(self.rdp[-2],\n # self.rdp[-1])\n self._setTransitionStressStrainStart()\n self._setMTMHigh(self.rdp[-2],self.rdp[-1]) #get MTMhigh for no elbow as well\n\n if len(self.rdp) > 3:\n self._setTransitionIndexEnd(self.rdp[-2])\n self._setTransitionStressStrainEnd()\n self.elbow = True\n\n if len(self.rdp) < 4: # clear transition stress and strain for no elbow\n #print(self.transition_stress_strain_end)\n empty = np.empty(self.transition_stress_strain_end.size)\n empty[:]=np.NaN\n self.transition_stress_strain_end = empty\n self.transition_stress_strain_start = empty\n\n # self._testPlotter([self.stress_strain,self.rdp])\n\n\n def _setTransitionIndexEnd(self,p):\n\n self.transition_index_end = \\\n self._getIndexAtPoint(self.stress_strain[...,[0]],\n p[0])[0][0]\n\n def _setTransitionIndexStart(self,p):\n\n self.transition_index_start = \\\n self._getIndexAtPoint(self.stress_strain[...,[0]],p[0])[0][0]\n\n def _getIndexAtPoint(self,data,value):\n data_round = np.around(data, 3)\n value = round(value, 3)\n\n return np.where(data_round == value)\n\n def _slopeFrom2Points(self,p1,p2):\n slope = 0\n if not np.array_equal(p1,p2):\n slope = (p2[1] - p1[1])/(p2[0] - p1[0])\n return slope\n\n def _setMTMLow(self,p1,p2):\n # This is the first line identified by RDP algorithm\n self.mtm_low = self._slopeFrom2Points(p1,p2)\n\n def _setMTMHigh(self,p1,p2):\n # If there are more than 2 lines returned by the RDP then\n # there is an elbow and a second mtm. The furthest point is identified,\n # from the the last line segment and a line segment is fit to that point.\n self.mtm_high = self._slopeFrom2Points(p1,p2)\n\n def _setRDP(self):\n self.RDP = self.rdp\n # def _setElbow(self):\n # #If there is an elbow set as true\n # self.elbow = True\n\n def _setTransitionStressStrainEnd(self):\n # Stress at the end of the non linear portion of curve.\n # if there are more than 3 lines desribing the curve, this is the stress\n # that is at the beginning of the mtm high\n\n self.transition_stress_strain_end = \\\n self.stress_strain[self.transition_index_end]\n\n def _setTransitionStressStrainStart(self):\n # Stress at the onset of the non linear portion of curve.\n # if there are more than 2 lines desribing the curve, this is the stress\n # that is at the end of the MTMLow\n\n self.transition_stress_strain_start = \\\n self.stress_strain[self.transition_index_start]\n\n def _setMaxStress(self):\n # This is the maximum stress value in the data.\n self.max_stress = max(self.stress_strain[...,1])\n self.max_stress_indx = np.argmax(self.stress_strain[...,1])\n\n def _distancesFromLineDefinedByTwoPoints(self, p_line1, p_line2, points):\n lineDistance = np.sqrt((p_line2[1] - p_line1[1])** \\\n + (p_line2[0] - p_line1[0]))\n distances = np.abs((p_line2[1] - p_line2[1])*points[...,0] \\\n -(p_line2[0] - p_line1[0])*points[...,1] \\\n + p_line2[0]*p_line1[1] - p_line2[1]*p_line1[0]) \\\n / lineDistance\n return distances\n\n def _testPlotter(self,datas):\n fig, ax = plt.subplots(figsize=(10,20))\n\n for data in datas:\n if len(data) > 1:\n ax.plot(data[...,0], data[...,1])\n else:\n ax.scatter(data[0],data[1])\n plt.show()\n\n def _fitLineForMTMHigh(self,p1,p2):\n\n index = self._getIndexAtPoint(self.stress_strain, p1)[0][0]\n\n pointDistances = self._distancesFromLineDefinedByTwoPoints(p1, p2,\n self.stress_strain[index:])\n\n\n subIndex = np.argmax(pointDistances)\n\n fullIndex = index + subIndex\n\n\n return self.stress_strain[fullIndex]\n\n def _fitLineForMTM(self):\n\n self.LTM_line = np.concatenate((self.rdp[0], self.rdp[1]), axis = 0)\n self.HTM_line = np.concatenate((self.rdp[-2], self.rdp[-1]), axis = 0)\n\n return self.LTM_line, self.HTM_line\n\n def _filterRDP(self):\n\n # Make sure that the second slope isn't less than the first. If\n # it is that means it's not a transition zone\n # import pdb; pdb.set_trace()\n t_rdp = []\n count = 0\n\n # t_rdp.append(self.rdp[count + 1])\n m1 = 0\n m2 = self._slopeFrom2Points(self.rdp[count], self.rdp[count + 1])\n # count += 1\n # m2 = self._slopeFrom2Points(self.rdp[count], self.rdp[count + 1])\n t_rdp.append(self.rdp[count])\n count += 1\n t_rdp.append(self.rdp[count])\n\n while (count + 1) < len(self.rdp) :\n\n m1 = m2\n # m1 = self._slopeFrom2Points(self.rdp[0], self.rdp[1])\n m2 = self._slopeFrom2Points(self.rdp[count], self.rdp[count + 1])\n if m2 > m1:\n count += 1\n t_rdp.append(self.rdp[count])\n else:\n break\n\n self.rdp = np.array(t_rdp)\n # if m2 > m1:\n # self.checkTransition = 1\n # return self.checkTransitioncount += 1\n",
"\n'''\nThis class inherits from the parsecsv module to extract properties\nin the uniax data. It also converts the force Displacement data into\nStress and strain. The program finds a linear region before the point of Failure\na neohookean fit, and a slope of a tangent line at 15 percent of the strain.\n\nTo Do:\n\n-Make 15 percent tangent function\n-Make NeoHookean fit function\n-vary targetVal in reduceData()\n-Plot Fig function\n\nNotes:\n-Turned off print\n'''\n\n\nfrom matplotlib import pyplot as plt\nimport pandas as pd\nimport numpy as np\nimport uniaxanalysis.parsecsv # homemade module\nfrom scipy import signal\n\n\nclass getproperties(object):\n\n def __init__(self, fileDimslist, stresstype='cauchy', straintype='engineering', targetReduction=1000, **kwargs):\n # Inherit from parsecsv to be able to extract data\n # super(getproperties,self).__init__(**kwargs)\n\n # These variables are specific to the getproperties() class\n self.smooth_width = kwargs.get('smooth_width', 101) # for smoothing the curve\n self.chkderivative = kwargs.get('chkderivate', .05)\n self.timeStep = kwargs.get(\"timestep\", None)\n self.stresstype = stresstype\n self.straintype = straintype\n\n # The standard deviation for the windowed gaussian function\n self.std = kwargs.get('std', 7)\n\n # Parse fileDimslist for values needed. input format for fileDimslist:\n # [\"Sample_Specimen\",\"Filename\",\"Width\",\"Thickness\",\"G-G\"]\n self.sample = fileDimslist[0]\n self.fname = fileDimslist[1]\n\n # Only needed for a single calculation each\n width = fileDimslist[2]\n thickness = fileDimslist[3]\n initialLength = fileDimslist[4]\n\n [force, displacement, time] = self.getForceDisplacement(skip=5) # Get from CSV\n if targetReduction is not None:\n [displacement, force] = self.reduceData(\n targetReduction, displacement, force) # reduce number of data points\n\n self.calcStrain(displacement,initialLength)\n\n self.calcStress(force, width, thickness, displacement, initialLength)\n\n\n # use moving average to smooth data\n self.stress = self.applySavgol(self.stress)\n\n #self.strain = self.movingAverage(self.strain)\n\n # Index where failure occurs\n self.failIndx = self.findFailure(self.stress)\n\n # Get the second derivative of the gaussian convolution of the curve\n self.secondDer = self.convolveWithGauss(self.stress[:self.failIndx], smooth_width=101, bounds=3)\n\n # returns a list of 2 numbers linearRange[0] is the start of the linear range\n # linearRange[1] is the end of the linear range\n\n self.linearRange = self.getlinearRange(self.secondDer)\n start = self.linearRange[0]\n end = self.linearRange[1]\n\n # This is a little messy but returns the function of the line, and the coefficients\n func,linearCoefficients = self.fitlineToData(self.strain[start:end],self.stress[start:end])\n\n # build the line for the linear region\n self.xline = np.linspace(self.strain[start],self.strain[end],100)\n self.yline = self.yValuesFromPolyFit(self.xline, func)\n\n # get the slope of the line for the stiffness\n self.stiffness = linearCoefficients[0]\n\n # get the failure point and set as the strength\n self.strength = self.stress[self.failIndx]\n\n #find maximum\n #self.maxInd = self._findMax(self.stress, index=10)\n\n # truncate data\n # self.strain, self.stress = self.truncData(self.strain, self.stress, self.maxInd)\n\n #normalize\n self.stress_norm, self.strain_norm = self._normalizeData(self.stress, self.strain, self.failIndx)\n\n # Use the Ramer-Douglas-Peucker Algorithm to create a linear representaiton\n # self.rdpOutput = self.testRDP(self.stress[:self.failIndx], .1)\n\n\n def testRDP(self,data,eps):\n from .rdp import rdp\n\n output = rdp(data, eps)\n\n return output\n\n def getlinearRange(self,data,derivativecutoff=0.1,closeToZero=0.003):\n\n\n # find the most minimum point on the second derivative curve\n # step back to a point that is at some cuttoff\n\n\n index2 = np.argmin(data) - 1\n val = data[index2]\n\n while abs(val) < closeToZero:\n index2 -= 1\n val = data[index2]\n\n index2 += 1\n der = 10000\n\n t_index = index2 - 1\n\n while abs(der) > closeToZero:\n t_index -= 1\n der = (data[t_index] - data[t_index-1])\n\n # set the minimum index to a quarter of the data set\n minIndex = int(t_index*.25)\n maxDer = 0\n\n # step backwards until the minimum index in the range is reached\n while t_index > minIndex:\n\n der = (data[t_index] - data[t_index-1])\n # if der < 0:\n # print(\"The index is {0}... the derivative is... {1} and the current minimum is... {2}.\".format(t_index,der,Minder))\n # check if the change in values is steeper than the previous\n if abs(der) > maxDer:\n\n maxDer = der\n index1 = t_index\n\n t_index -= 1\n\n index1 = self.stepTillMax(data, index1)\n\n return [index1,index2]\n\n def stepTillMax(self,data,index):\n currentValue = data[index]\n nextValue= data[index+1]\n\n while currentValue < nextValue:\n index += 1\n currentValue = data[index]\n nextValue = data[index + 1]\n\n return index -1\n\n def fitlineToData(self,x,y):\n\n # fit a line from the first index to the last\n coefficients = np.polyfit(x, y, 1)\n f = np.poly1d(coefficients)\n\n return f,coefficients\n\n def yValuesFromPolyFit(self,x,f):\n\n return np.polyval(f,x)\n\n\n def applySavgol(self,data,winlen=71,porder=2):\n from scipy import signal\n # smooth the data\n data = signal.savgol_filter(data, winlen, porder, deriv=0,\n delta=1.0, axis=-1, mode='interp', cval=0.0)\n return data\n\n def getForceDisplacement(self, skip=None, *args):\n # get the Force and Displacement Data from csv\n # Read csv dimensions and populate list of patient and specimen concantenated\n\n df = pd.read_csv(self.fname, index_col=False, skiprows=skip) # CSV to full pandas dataframe\n\n # Convert dataframe to numpy arrays\n try: # Use this if the columns have labels\n force = df.loc[:, \"Force\"].values # extract force from full pandas dataframe\n displacement = df.loc[:, \"Displacement\"].values # extract Displacement\n time = df.loc[:, \"Time\"].values # extract Displacement\n\n # then try this.... columns need to be time,displacement, force in that order\n except KeyError:\n\n force = df.iloc[:, 2].values # extract force from full pandas dataframe\n displacement = df.iloc[:, 1].values # extract Displacement\n time = df.iloc[:, 0].values # extract Displacement\n\n return force, displacement, time\n\n def reduceData(self, target, x, y):\n\n # reduce the data to a target number of points\n\n reduceData = int(np.ceil(len(y)/target))\n\n # Reduce the data by a number so every nth reduceData\n x = x[::reduceData]\n y = y[::reduceData]\n\n return x, y\n\n def calcStress(self, force, width, thickness,displacement,initialLength):\n\n # Calulate the cauchy stress if specified defaults to the 1st Piola-Kirchoff\n # aka engineering stress\n if self.stresstype == \"cauchy\":\n stretch = (displacement - displacement[0])/initialLength + 1\n self.stress = force/(width*thickness)*stretch\n\n elif self.stresstype == \"firstpiola\":\n self.stress = force/(width*thickness)\n\n else:\n # 1st Piola stress\n print(\"hmmm need to pass in (firstpiola or cauchy)\")\n\n def calcStrain(self, disp, g_g):\n\n disp = disp-disp[0] # Zero the displacement for the first measurement\n strain = disp/g_g # engineering strain\n\n if self.straintype == 'engineering':\n self.strain = strain # Get strain\n elif self.straintype == 'stretch':\n self.strain = strain + 1\n else:\n print(\"What the fuck are you doing.... (engineering or stretch)\")\n\n def _findMax(self, data, index):\n\n currVal = data[index]\n nextVal = data[index + 1]\n\n while currVal < nextVal:\n index += 1\n\n currVal = data[index]\n nextVal = data[index + 1]\n\n maxInd = index\n\n return maxInd\n\n def _normalizeData(self, ydata, xdata,ind):\n '''\n This function normalizes x and y data between 0 and 1.\n Inputs: numpy arrays (x and y) and starting index.\n Returns: normalized data (as 2 separate arrays)\n '''\n\n x_norm = (xdata - np.min(xdata[:ind])) / (np.max(xdata[:ind]) - np.min(xdata[:ind]))\n y_norm = (ydata - np.min(ydata[:ind])) / (np.max(ydata[:ind]) - np.min(ydata[:ind]))\n\n return y_norm, x_norm\n\n def fitRange(self, *args):\n\n minRange = args[0]\n maxRange = args[1]\n\n fitRange = np.logical_and(self.epsilon > minRange, self.epsilon <\n maxRange) # Get range to fit (logical array)\n\n xFit = self.epsilon[fitRange] # range of strain between min and max range\n yFit = self.sigma[fitRange] # range of stress for the the same range\n\n return xFit, yFit\n\n def firstDerivative15percent(self, *args):\n pass\n\n def fitCurve15percent(self, *args):\n pass\n # Fits a 2nd order olynomial between 10 an 10 percent strain then finds the linear\n # tangent from derivative at 15 percent\n xFit, yFit = self.fitRange(0.10, 0.20) # find values in the range\n coeff = np.polyfit(xFit, yFit, 2) # return coeffiecient of line from derivative\n d_dx = 2*coeff[0]*xFit - coeff[1]\n\n return xFit\n\n def fitNeoHookean(self, *args):\n from scipy.optimize import curve_fit\n # Fit the neohookean model sigman = 2*c*(lamda-1/lamda^2) calls fitRange function\n\n xFit, yFit = self.fitRange(0.05, 0.15)\n lamda = xFit + 1 # convert strain to stretch\n\n # Fit the data to the nonlinear curve_fit\n popt, pcov = curve_fit(neoHookeanCurve, xFit, yFit)\n # get the values of stress for coresponding parameters\n yNH = self.neoHookeanCurve(xFit, *popt)\n\n return popt, xFit, yNH # return the c_0 parameter and the values of fit\n\n def neoHookeanCurve(self, x, c):\n # The model to be fit to the Data\n\n return 2*c*(x-x ^ -2)\n\n def calcDerivative(self, indx, y, h):\n # Calculates a simple numerical derivative based on a single stepbackward\n # inputs :\n # - indx the point to start the numberical\n # - y is the array that represents the values of the function\n # ` - h is the step size in the numberical derivative`\n\n der = (y[indx]-y[indx-1])/h # numerical derivative\n\n return der\n\n def convolveWithGauss(self, data, smooth_width=101, bounds=3):\n\n padder = int(smooth_width/2)\n\n # pad the data\n paddedData = np.pad(data, (padder, padder), 'edge')\n\n # convolve the data with the second derivative of a gaussian distribution\n\n # Create the x values to apply to the gaussian distribution\n x1 = np.linspace(-bounds, bounds, smooth_width)\n norm = np.sum(np.exp(-x1**2)) * (x1[1]-x1[0]) # ad hoc normalization\n\n y1 = (4*x1**2 - 2) * np.exp(-x1**2) / smooth_width * 8 # norm*(x1[1]-x1[0])\n\n # # Create a gaussian distribution to convolve the data with\n # window = signal.gaussian(201, std=3)\n\n # calculate second order deriv.\n y_convPadded = np.convolve(paddedData, y1, mode=\"same\")\n\n y_conv = y_convPadded[padder:-padder]\n\n return y_conv\n\n def findLinear(self, disp, force, minimumSlope=0.05):\n\n\n # create convolution kernel for calculating\n # the smoothed second order derivative\n\n # inputs -\n # disp = displacement array\n # force = force array\n\n indx = np.argmax(self.stress)\n x = self.strain[:indx]\n y = self.stress[:indx]\n\n # smooth_width = self.smooth_width\n # x1 = np.linspace(-3, 3, smooth_width)\n # norm = np.sum(np.exp(-x1**2)) * (x1[1]-x1[0]) # ad hoc normalization\n #\n # y1 = (4*x1**2 - 2) * np.exp(-x1**2) / smooth_width * 8 # norm*(x1[1]-x1[0])\n\n # Create a gaussian distribution to convolve the data with\n window = signal.gaussian(self.smooth_width, std=self.std)\n\n # calculate second order deriv.\n y_conv = np.convolve(y, window, mode=\"same\")\n\n # convolve the 2nd derivative curve to determine points where the curve changes\n conv_array = np.convolve(y_conv, [-1, 0, 1])\n\n # Get the points where the convolution changes signs.... this is when it does to linear\n zero_crossings = np.where(np.diff(np.sign(conv_array)))[0]\n\n while minimumSlope > 0.0001:\n\n for num, crossing in enumerate(zero_crossings):\n\n if conv_array[zero_crossings[num]-1] < 0 and x[zero_crossings[num]] > 0.2:\n\n # set the minimum point in the range as the first maxima\n minrange = zero_crossings[num]\n\n # set the minimum point in the range as the first minima\n maxrange = zero_crossings[num+1]\n\n # Find the slope of the line at the local maxima\n a = self.calcDerivative(minrange, y_conv, x[1])\n\n if a > minimumSlope: # if the slope is greater than this value break the loop\n\n # get the stress where the loop broke\n stressAtstart = self.stress[zero_crossings[num]]\n stressAtstart = np.around(stressAtstart, 3)\n print (\"The derivate for {} was {} at {} MPa\".format(\n self.sample, np.around(a, 3), stressAtstart)) # print the stress at that point\n break\n minimumSlope /= 10 # reduce slope criteria by one order if magnitude if it has not convergedd\n\n # calculate polynomial\n x_fit = x[minrange:maxrange]\n y_fit = y[minrange:maxrange]\n\n plt.plot(x_fit, y_fit, color=\"b\", linewidth=8, label=\"stuff that fits\")\n plt.plot(x, y, color=\"r\")\n plt.show()\n\n print (\"Linear region starts at ... {} MPa\".format(np.around(self.stress[minrange], 3)))\n print (\"Linear region ends at ... {} MPa\".format(np.around(self.stress[maxrange], 3)))\n print (\"~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\")\n z = np.polyfit(x_fit, y_fit, 1)\n f = np.poly1d(z)\n\n # calculate new x's and y's\n self.xline = np.linspace(x[minrange-100], x[maxrange+100], 50) # test\n # self.xline = np.linspace(x_fit[0], x_fit[-1], 50) # original\n self.yline = f(self.xline)\n\n return x, y_conv\n\n def plotTitle(self, nameparts):\n\n import os\n import re\n\n a = os.path.basename(nameparts)\n a = re.split(r'[ ,_]', a)\n title = a[0] + \"_\" + a[1]\n\n return title\n\n def findFailure(self, data):\n\n # convolve the data with kernel\n # pick out the points where the sign changes\n\n indxArray = signal.argrelmax(data, axis=0, order=1, mode='clip')\n # return index of the point of failure\n\n if indxArray[0].any():\n indx = indxArray[0][0]\n else:\n indx = np.argmax(data)\n return indx\n\n def movingAverage(self, a, n=3):\n ret = np.cumsum(a, dtype=float)\n ret[n:] = ret[n:] - ret[:-n]\n return ret[n - 1:] / n\n\n def truncData(self, xdata, ydata, index):\n # Truncate the data based on the maximum value in ydata\n\n\n xTrunc = xdata[:(index+1)]\n yTrunc = ydata[:(index+1)]\n\n return xTrunc, yTrunc\n\n def get_closest(self, data, val):\n # function used to find the closest values given to the values in the data\n # set\n # minus the x value from the data set\n minValIndex = np.argmin(abs(data - val))\n # return the value\n return minValIndex\n\n def sort_data(self, xdata, ydata, xvals=[], yvals=[]):\n # Takes 2 inputs and 2 arrays of data and finds the range that\n # is within the 2 input values\n\n # get the indices of the data to values to use, x data is used to create the range\n Index = [self.get_closest(xdata, val) for val in xvals]\n\n # sort the array in ascending order\n Index = np.sort(np.array(Index))\n\n # define the data from the indices provided\n xFit = xdata[Index[0]:Index[1]]\n yFit = ydata[Index[0]:Index[1]]\n\n return xFit, yFit\n\n def manual_max(self, xdata, ydata, xvals=[], yvals=[]):\n # finds the maximum value in an array from a larger data set given 2 indices\n # to mark the beginning and end of a range\n\n # Call class method sort_data to return the data set from minimum to\n # maximum values\n xFit, yFit = self.sort_data(xdata, ydata, xvals, yvals)\n\n # Find the indes of the maximum value in the data set\n maxIndx = np.argmax(yFit)\n\n # Define the maximum value in y and x\n xAtMax = xFit[maxIndx]\n yAtMax = yFit[maxIndx]\n\n # set the class variable of strength as the max from user input\n self.strength = yAtMax\n\n return xAtMax, yAtMax\n\n def manual_linear(self, xdata, ydata, xvals=[], yvals=[]):\n # fit a line between the two points in the data\n\n # Call class method sort_data to return the data set from minimum to\n # maximum values\n xFit, yFit = self.sort_data(xdata, ydata, xvals, yvals)\n\n # fit a line from the first index to the last\n z = np.polyfit(xFit, yFit, 1)\n f = np.poly1d(z)\n\n # get the y values that correspond to the line\n yLine = np.polyval(f, xFit)\n\n # get the value of the slope of the linea\n slope = f[1]\n\n # Set the class variable of stiffness to the slope of the line\n self.stiffness = slope\n\n return slope, [xFit, yLine] # return the x and y values of the line\n\n def return_XYdata(self, *args):\n\n return {'x': self.x, 'y': self.y, 'xMax': self.xMax,\n 'yMax': self.yMax, 'title': self.pltTitle,\n 'xline': self.xline, 'yline': self.yline}\n"
] |
[
[
"pandas.to_timedelta",
"matplotlib.pyplot.show",
"pandas.read_csv"
],
[
"numpy.sqrt",
"numpy.array_equal",
"numpy.min",
"numpy.abs",
"numpy.around",
"matplotlib.pyplot.subplots",
"numpy.stack",
"numpy.empty",
"numpy.concatenate",
"numpy.max",
"numpy.argmax",
"numpy.array",
"numpy.where",
"matplotlib.pyplot.show"
],
[
"numpy.polyfit",
"numpy.poly1d",
"numpy.linspace",
"numpy.around",
"numpy.cumsum",
"matplotlib.pyplot.plot",
"numpy.max",
"numpy.argmin",
"numpy.exp",
"scipy.optimize.curve_fit",
"numpy.polyval",
"scipy.signal.savgol_filter",
"pandas.read_csv",
"numpy.pad",
"numpy.argmax",
"numpy.min",
"scipy.signal.argrelmax",
"matplotlib.pyplot.show",
"numpy.array",
"numpy.logical_and",
"numpy.convolve",
"numpy.sign",
"scipy.signal.gaussian"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [
"2.0",
"1.4",
"1.3",
"1.1",
"1.5",
"1.2"
],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [
"2.0",
"1.4",
"1.1",
"1.5",
"1.2",
"1.3"
],
"scipy": [
"0.14",
"0.15",
"0.19",
"0.18",
"1.0",
"0.17",
"0.16"
],
"tensorflow": []
}
] |
AIpioneer/paddle2onnx
|
[
"ba7388bf472347b1cf7de5d79edf8aad5f5c7d47"
] |
[
"paddle2onnx/op_mapper/tensor.py"
] |
[
"# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom __future__ import absolute_import\n\nimport numpy as np\nfrom paddle2onnx.constant import dtypes\nfrom paddle2onnx.op_mapper import OpMapper as op_mapper\nfrom paddle2onnx.op_mapper import mapper_helper\n\n\n@op_mapper('concat')\nclass Concat():\n support_opset_verison_range = (1, 12)\n\n @classmethod\n def opset_1(cls, graph, node, **kw):\n node = graph.make_node(\n 'Concat',\n inputs=node.input('X'),\n outputs=node.output('Out'),\n axis=node.attr('axis'))\n\n\n@op_mapper('expand_as_v2')\nclass ExpandV2():\n support_opset_verison_range = (8, 12)\n\n @classmethod\n def opset_8(cls, graph, node, **kw):\n target_shape = graph.make_node(\n 'Shape', inputs=node.input('target_tensor'))\n\n node = graph.make_node(\n 'Expand',\n inputs=[node.input('X', 0), target_shape],\n outputs=node.output('Out'))\n\n\n@op_mapper('shape')\nclass Shape():\n support_opset_verison_range = (1, 12)\n\n @classmethod\n def opset_1(cls, graph, node, **kw):\n graph.make_node(\n 'Shape', inputs=node.input('Input'), outputs=node.output('Out'))\n\n\n@op_mapper('split')\nclass Split():\n support_opset_verison_range = (1, 12)\n\n @classmethod\n def opset_1(cls, graph, node, **kw):\n sections = node.attr('sections')\n if len(sections) > 0:\n graph.make_node(\n 'Split',\n inputs=node.input('X'),\n outputs=node.output('Out'),\n axis=node.attr('axis'),\n split=sections)\n else:\n graph.make_node(\n 'Split',\n inputs=node.input('X'),\n outputs=node.output('Out'),\n axis=node.attr('axis'))\n\n\n@op_mapper('slice')\nclass Slice():\n support_opset_verison_range = (1, 12)\n\n @classmethod\n def opset_1(cls, graph, node, **kw):\n axes = node.attr('axes')\n starts = node.attr('starts')\n ends = node.attr('ends')\n graph.make_node(\n \"Slice\",\n inputs=[node.input('Input')[0]],\n outputs=node.output('Out'),\n axes=axes,\n starts=starts,\n ends=ends)\n\n @classmethod\n def opset_10(cls, graph, node, **kw):\n axes = node.attr('axes')\n starts = node.attr('starts')\n ends = node.attr('ends')\n\n axes_node = graph.make_node(\n 'Constant', attrs={'dtype': dtypes.ONNX.INT64,\n 'value': axes})\n starts_node = graph.make_node(\n 'Constant', attrs={'dtype': dtypes.ONNX.INT64,\n 'value': starts})\n ends_node = graph.make_node(\n 'Constant', attrs={'dtype': dtypes.ONNX.INT64,\n 'value': ends})\n graph.make_node(\n \"Slice\",\n inputs=[node.input('Input')[0], starts_node, ends_node, axes_node],\n outputs=node.output('Out'))\n\n\n@op_mapper('fill_constant')\nclass Constant():\n support_opset_verison_range = (1, 12)\n\n @classmethod\n def opset_1(cls, graph, node, **kw):\n value = node.attr('value')\n dtype = node.attr('dtype')\n shape = node.attr('shape')\n value = np.ones(shape) * value\n value = value.astype(dtypes.DTYPE_PADDLE_NUMPY_MAP[dtype])\n graph.make_node(\n 'Constant',\n inputs=[],\n outputs=node.output('Out'),\n attrs={\n 'dims': shape,\n 'dtype': dtypes.DTYPE_PADDLE_ONNX_MAP[dtype],\n 'value': value\n })\n\n\n@op_mapper('fill_any_like')\nclass FullLike():\n '''\n fill_any_like is kernel for paddle op::full_like & ones_like\n '''\n support_opset_verison_range = (9, 12)\n\n @classmethod\n def opset_9(cls, graph, node, **kw):\n shape_node = graph.make_node('Shape', inputs=node.input('X'))\n value = node.attr('value')\n dtype = node.attr('dtype')\n input_dtype = node.input_var('X', 0).dtype\n if dtype is None:\n dtype = input_dtype\n dtype = dtypes.DTYPE_PADDLE_ONNX_MAP[dtype]\n graph.make_node(\n 'ConstantOfShape',\n inputs=[shape_node],\n outputs=node.output('Out'),\n dims=[1],\n dtype=dtype,\n value=value)\n\n\n@op_mapper('gather')\nclass Gather():\n support_opset_verison_range = (1, 12)\n\n @classmethod\n def opset_1(cls, graph, node, **kw):\n if len(node.input_shape('Index', 0)) == 1:\n # gather\n graph.make_node(\n 'Gather',\n inputs=[node.input('X', 0), node.input('Index', 0)],\n outputs=node.output('Out'))\n else:\n raise Exception(\n \"please try to convert OP:gather(indices's rank >1) with opset_version >= 11.\"\n )\n\n @classmethod\n def opset_11(cls, graph, node, **kw):\n if len(node.input_shape('Index', 0)) == 1:\n # gather\n graph.make_node(\n 'Gather',\n inputs=[node.input('X', 0), node.input('Index', 0)],\n outputs=node.output('Out'))\n else:\n # gather_nd \n graph.make_node(\n 'GatherND',\n inputs=[node.input('X', 0), node.input('Index', 0)],\n outputs=node.output('Out'))\n\n\n@op_mapper('squeeze2')\nclass Squeeze():\n support_opset_verison_range = (1, 12)\n\n @classmethod\n def opset_1(cls, graph, node, **kw):\n axes = node.attr('axes')\n graph.make_node(\n 'Squeeze',\n inputs=[node.input('X', 0)],\n outputs=node.output('Out'),\n axes=axes)\n\n\n@op_mapper('assign_value')\nclass Assign():\n support_opset_verison_range = (1, 12)\n\n @classmethod\n def opset_1(cls, graph, node, **kw):\n if len(node.input_names) > 0:\n graph.make_node(\n 'Identity', inputs=node.input('X'), outputs=node.output('Out'))\n else:\n value = np.array(node.attr('fp32_values'))\n if value is None:\n value = np.array(node.attr('int32_values'))\n parameter = {\n 'data': value,\n 'dtype': node.attr('dtype'),\n 'shape': node.attr('shape')\n }\n graph.parameters[node.output('Out', 0)] = parameter\n\n\n@op_mapper('transpose2')\nclass Transpose():\n support_opset_verison_range = (1, 12)\n\n @classmethod\n def opset_1(cls, graph, node, **kw):\n graph.make_node(\n 'Transpose',\n inputs=node.input('X'),\n outputs=node.output('Out'),\n perm=node.attr('axis'))\n\n\n@op_mapper('flatten2')\nclass Flatten():\n support_opset_verison_range = (1, 12)\n\n @classmethod\n def opset_1(cls, graph, node, **kw):\n graph.make_node(\n 'Flatten',\n inputs=node.input('X'),\n outputs=node.output('Out'),\n axis=node.attr('axis'))\n\n\n@op_mapper('flatten_contiguous_range')\nclass FlattenContiguousRange():\n support_opset_verison_range = (5, 12)\n\n @classmethod\n def opset_5(cls, graph, node, **kw):\n dims = len(node.input_shape('X', 0))\n start_axis = node.attr('start_axis')\n end_axis = node.attr('stop_axis')\n shape_node = graph.make_node('Shape', inputs=node.input('X'))\n slice1 = mapper_helper.slice_helper(\n graph, shape_node, axes=[0], starts=[0], ends=[start_axis])\n slices = [\n slice1, graph.make_node(\n 'Constant', value=[-1], dtype=dtypes.ONNX.INT64)\n ]\n if end_axis < dims - 1:\n slice3 = mapper_helper.slice_helper(\n graph, shape_node, axes=[0], starts=[end_axis + 1],\n ends=[dims])\n slices = [\n slice1, graph.make_node(\n 'Constant', value=[-1], dtype=dtypes.ONNX.INT64), slice3\n ]\n final_shape = graph.make_node('Concat', inputs=slices, axis=0)\n graph.make_node(\n 'Reshape',\n inputs=[node.input('X')[0], final_shape],\n outputs=node.output('Out'))\n\n\n@op_mapper('reshape2')\nclass Reshape():\n support_opset_verison_range = (5, 12)\n\n @classmethod\n def opset_5(cls, graph, node, **kw):\n if len(node.input('ShapeTensor')) > 1:\n cast_shape_nodes = []\n for i in range(len(node.input('ShapeTensor'))):\n dim = node.input('ShapeTensor')[i]\n cast_node = graph.make_node(\n 'Cast', inputs=[dim], to=dtypes.ONNX.INT64)\n cast_shape_nodes.append(cast_node)\n shape_node = graph.make_node(\n 'Concat', inputs=cast_shape_nodes, axis=-1)\n graph.make_node(\n 'Reshape',\n inputs=[node.input('X')[0], shape_node],\n outputs=node.output('Out'))\n elif len(node.input('ShapeTensor')) == 1:\n cast_shape_node = graph.make_node(\n 'Cast', inputs=node.input('ShapeTensor'), to=dtypes.ONNX.INT64)\n graph.make_node(\n 'Reshape',\n inputs=[node.input('X')[0], cast_shape_node],\n outputs=node.output('Out'))\n elif node.attr('shape') is not None and len(node.attr('shape')) > 0:\n shape_node = graph.make_node(\n 'Constant',\n attrs={\n 'dtype': dtypes.ONNX.INT64,\n 'value': node.attr('shape')\n })\n reshape_node = graph.make_node(\n 'Reshape',\n inputs=[node.input('X')[0], shape_node],\n outputs=node.output('Out'))\n\n\n@op_mapper('unsqueeze2')\nclass Unsqueeze():\n support_opset_verison_range = (1, 12)\n\n @classmethod\n def opset_1(cls, graph, node, **kw):\n graph.make_node(\n 'Unsqueeze',\n inputs=node.input('X'),\n outputs=node.output('Out'),\n axes=node.attr('axes'))\n\n\n@op_mapper('reciprocal')\nclass Reciprocal():\n support_opset_verison_range = (1, 12)\n\n @classmethod\n def opset_1(cls, graph, node, **kw):\n graph.make_node(\n 'Reciprocal', inputs=node.input('X'), outputs=node.output('Out'))\n\n\n@op_mapper('cast')\nclass Cast():\n support_opset_verison_range = (1, 12)\n\n @classmethod\n def opset_1(cls, graph, node, **kw):\n graph.make_node(\n 'Cast',\n inputs=node.input('X'),\n outputs=node.output('Out'),\n to=dtypes.DTYPE_PADDLE_ONNX_MAP[node.attr('out_dtype')])\n\n\n@op_mapper('clip')\nclass Clip():\n support_opset_verison_range = (1, 12)\n\n @classmethod\n def opset_1(cls, graph, node, **kw):\n min_value = node.attr('min')\n max_value = node.attr('max')\n mapper_helper.clip_helper(graph,\n node.input('X', 0), max_value, min_value,\n node.output('Out', 0))\n\n\n@op_mapper(['pad2d', 'pad3d'])\nclass Pad():\n support_opset_verison_range = (1, 12)\n\n @classmethod\n def opset_1(cls, graph, node, **kw):\n pads = cls.convert_padding(node, **kw)\n value = None\n if node.attr('pad_value') is not None:\n value = node.attr('pad_value')\n elif node.attr('value') is not None:\n value = node.attr('value')\n graph.make_node(\n 'Pad',\n inputs=node.input('X'),\n outputs=node.output('Out'),\n mode=node.attr('mode'),\n value=value,\n pads=pads)\n\n @classmethod\n def opset_11(cls, graph, node, **kw):\n pads = cls.convert_padding(node, **kw)\n pads_node = graph.make_node(\n 'Constant', attrs={'dtype': dtypes.ONNX.INT64,\n 'value': pads})\n value = None\n if node.attr('pad_value') is not None:\n value = node.attr('pad_value')\n elif node.attr('value') is not None:\n value = node.attr('value')\n value_node = graph.make_node(\n 'Constant', attrs={'dtype': dtypes.ONNX.FLOAT,\n 'value': value})\n\n graph.make_node(\n 'Pad',\n inputs=node.input('X') + [pads_node, value_node],\n outputs=node.output('Out'),\n mode=node.attr('mode'))\n\n @classmethod\n def convert_padding(cls, node, **kw):\n x_shape = node.input_shape('X', 0)\n paddings = node.attr('paddings')\n onnx_paddings = None\n #TODO support pads is Variable\n if node.attr('data_format') == 'NCHW':\n onnx_paddings = [\n 0, 0, paddings[0], paddings[2], 0, 0, paddings[1], paddings[3]\n ]\n elif node.attr('data_format') == 'NHWC':\n onnx_paddings = [\n 0, paddings[0], paddings[2], 0, 0, paddings[1], paddings[3], 0\n ]\n elif node.attr('data_format') == 'NCDHW':\n onnx_paddings = [\n 0, 0, paddings[4], paddings[2], paddings[0], 0, 0, paddings[5],\n paddings[3], paddings[1]\n ]\n return onnx_paddings\n\n\n@op_mapper('uniform_random_batch_size_like')\nclass UniformRandom():\n support_opset_verison_range = (1, 12)\n\n @classmethod\n def opset_1(cls, graph, node, **kw):\n graph.make_node(\n 'RandomUniformLike',\n inputs=node.input('Input'),\n outputs=node.output('Out'),\n high=node.attr('max'),\n dtype=dtypes.DTYPE_PADDLE_ONNX_MAP[node.attr('dtype')],\n low=node.attr('min'),\n seed=float(node.attr('seed')), )\n\n\n@op_mapper('uniform_random')\nclass UniformRandom():\n support_opset_verison_range = (1, 12)\n\n @classmethod\n def opset_1(cls, graph, node, **kw):\n shape = node.output_shape('Out', 0)\n graph.make_node(\n 'RandomUniform',\n outputs=node.output('Out'),\n high=node.attr('max'),\n dtype=dtypes.DTYPE_PADDLE_ONNX_MAP[node.attr('dtype')],\n low=node.attr('min'),\n seed=float(node.attr('seed')),\n shape=shape)\n\n\n@op_mapper(\n [\n 'bilinear_interp', 'nearest_interp', 'bilinear_interp_v2',\n 'nearest_interp_v2'\n ],\n mapper_dict={\n 'bilinear_interp': 'linear',\n 'nearest_interp': 'nearest',\n 'bilinear_interp_v2': 'linear',\n 'nearest_interp_v2': 'nearest'\n })\nclass Resize():\n support_opset_verison_range = (9, 12)\n\n @classmethod\n def opset_9(cls, graph, node, **kw):\n inputs = [node.input('X')[0]]\n resize_type = kw['mapper_dict'][node.type]\n if node.attr('align_corners') or node.attr('align_mode') == 0:\n raise Exception(\n \"Resize in onnx(opset<=10) only support coordinate_transformation_mode: \" \\\n \"'asymmetric', Try converting with opset_version 11\"\n )\n if len(node.input('OutSize')) > 0 or len(node.input('SizeTensor')) > 0:\n in_shape, out_shape = cls.compute_output_shape(\n graph, node, opset_version=9)\n cast_shape_node2 = graph.make_node(\n 'Cast', inputs=[out_shape], to=dtypes.ONNX.FLOAT)\n cast_shape_node0 = graph.make_node(\n 'Cast', inputs=[in_shape], to=dtypes.ONNX.FLOAT)\n node_h_w_scales = graph.make_node(\n 'Div', inputs=[cast_shape_node2, cast_shape_node0])\n inputs.append(node_h_w_scales)\n elif 'Scale' in node.inputs and len(node.input('Scale')) > 0:\n scale = node.input('Scale')[0]\n inputs.append(out_shape)\n else:\n out_shape = [node.attr('out_h'), node.attr('out_w')]\n scale = node.attr('scale')\n if out_shape.count(-1) > 0:\n scale_node = graph.make_node(\n 'Constant',\n attrs={\n 'dtype': dtypes.ONNX.FLOAT,\n 'value': [1, 1, scale, scale]\n })\n inputs.append(scale_node)\n else:\n raise Exception(\"Unexpected situation happend\")\n graph.make_node(\n 'Upsample',\n inputs=inputs,\n outputs=node.output('Out'),\n mode=resize_type)\n\n @classmethod\n def opset_10(cls, graph, node, **kw):\n inputs = [node.input('X')[0]]\n resize_type = kw['mapper_dict'][node.type]\n if node.attr('align_corners') or node.attr('align_mode') == 0:\n raise Exception(\n \"Resize in onnx(opset<=10) only support coordinate_transformation_mode:\" \\\n \" 'asymmetric', Try converting with opset_version 11\"\n )\n if len(node.input('OutSize')) > 0 or len(node.input('SizeTensor')) > 0:\n in_shape, out_shape = cls.compute_output_shape(graph, node)\n cast_shape_node2 = graph.make_node(\n 'Cast', inputs=[out_shape], to=dtypes.ONNX.FLOAT)\n cast_shape_node0 = graph.make_node(\n 'Cast', inputs=[in_shape], to=dtypes.ONNX.FLOAT)\n node_h_w_scales = graph.make_node(\n 'Div', inputs=[cast_shape_node2, cast_shape_node0])\n inputs.append(node_h_w_scales)\n elif 'Scale' in node.inputs and len(node.input('Scale')) > 0:\n scale = node.input('Scale')[0]\n inputs.append(scale)\n else:\n out_shape = [node.attr('out_h'), node.attr('out_w')]\n scale = node.attr('scale')\n if isinstance(scale, float):\n scale = [1, 1, scale, scale]\n else:\n scale = [1, 1] + scale\n if out_shape.count(-1) > 0:\n scale_node = graph.make_node(\n 'Constant',\n attrs={'dtype': dtypes.ONNX.FLOAT,\n 'value': scale})\n inputs.append(scale_node)\n else:\n raise Exception(\"Unexpected situation happend\")\n graph.make_node(\n 'Resize',\n inputs=inputs,\n outputs=node.output('Out'),\n mode=resize_type)\n\n @classmethod\n def opset_11(cls, graph, node, **kw):\n node_lists = []\n resize_type = kw['mapper_dict'][node.type]\n coordinate_transformation_mode = ''\n if node.attr('align_corners'):\n coordinate_transformation_mode = 'align_corners'\n elif node.type == 'nearest_interp':\n coordinate_transformation_mode = 'half_pixel'\n else:\n if node.attr('align_mode') == 1:\n coordinate_transformation_mode = 'asymmetric'\n else:\n coordinate_transformation_mode = 'half_pixel'\n roi_node = graph.make_node(\n 'Constant',\n attrs={\n 'dtype': dtypes.ONNX.FLOAT,\n 'value': [1, 1, 1, 1, 1, 1, 1, 1]\n })\n inputs = [node.input('X')[0], roi_node]\n node_lists.append(roi_node)\n if len(node.input('OutSize')) > 0 or len(node.input('SizeTensor')) > 0:\n empty_node = graph.make_node(\n 'Constant', attrs={'dtype': dtypes.ONNX.FLOAT,\n 'value': []})\n inputs.append(empty_node)\n _, out_shape = cls.compute_output_shape(graph, node)\n inputs.append(out_shape)\n elif len(node.input('Scale')) > 0:\n scale = node.input('Scale')[0]\n inputs.append(scale)\n else:\n out_shape = [node.attr('out_h'), node.attr('out_w')]\n scale = node.attr('scale')\n if isinstance(scale, float):\n scale = [1, 1, scale, scale]\n else:\n scale = [1, 1] + scale\n\n if out_shape.count(-1) > 0:\n scale_node = graph.make_node(\n 'Constant',\n attrs={'dtype': dtypes.ONNX.FLOAT,\n 'value': scale})\n inputs.append(scale_node)\n else:\n empty_node = graph.make_node(\n 'Constant',\n attrs={'dtype': dtypes.ONNX.FLOAT,\n 'value': []})\n in_shape, out_shape = cls.compute_output_shape_by_size(graph,\n node)\n inputs += [empty_node, out_shape]\n graph.make_node(\n 'Resize',\n inputs=inputs,\n outputs=node.output('Out'),\n mode=resize_type,\n coordinate_transformation_mode=coordinate_transformation_mode)\n\n @classmethod\n def compute_output_shape(cls, graph, node, opset_version=10):\n shape_node0 = graph.make_node('Shape', inputs=node.input('X'))\n if opset_version < 10:\n shape_node1 = graph.make_node(\n 'Slice', inputs=[shape_node0], starts=[0], ends=[2])\n else:\n starts_node = graph.make_node(\n 'Constant', attrs={'dtype': dtypes.ONNX.INT64,\n 'value': [0]})\n ends_node = graph.make_node(\n 'Constant', attrs={'dtype': dtypes.ONNX.INT64,\n 'value': [2]})\n shape_node1 = graph.make_node(\n 'Slice', inputs=[shape_node0, starts_node, ends_node])\n if len(node.input('OutSize')) > 0:\n cast_shape_node = graph.make_node(\n 'Cast', inputs=node.input('OutSize'), to=dtypes.ONNX.INT64)\n else:\n concat_shape_node = graph.make_node(\n \"Concat\", inputs=node.input('SizeTensor'), axis=0)\n cast_shape_node = graph.make_node(\n 'Cast', inputs=[concat_shape_node], to=dtypes.ONNX.INT64)\n shape_node2 = graph.make_node(\n 'Concat', inputs=[shape_node1, cast_shape_node], axis=0)\n return shape_node0, shape_node2\n\n @classmethod\n def compute_output_shape_by_size(cls, graph, node, opset_version=10):\n shape_node0 = graph.make_node('Shape', inputs=node.input('X'))\n if opset_version < 10:\n shape_node1 = graph.make_node(\n 'Slice', inputs=[shape_node0], starts=[0], ends=[2])\n else:\n starts_node = graph.make_node(\n 'Constant', attrs={'dtype': dtypes.ONNX.INT64,\n 'value': [0]})\n ends_node = graph.make_node(\n 'Constant', attrs={'dtype': dtypes.ONNX.INT64,\n 'value': [2]})\n shape_node1 = graph.make_node(\n 'Slice', inputs=[shape_node0, starts_node, ends_node])\n out_shape = [node.attr('out_h'), node.attr('out_w')]\n shape_node2 = graph.make_node(\n 'Constant', attrs={'dtype': dtypes.ONNX.INT64,\n 'value': out_shape})\n shape_node3 = graph.make_node(\n 'Concat', inputs=[shape_node1, shape_node2], axis=0)\n return shape_node0, shape_node3\n"
] |
[
[
"numpy.ones"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
ThijsHrm/nxviz
|
[
"2bfdbebd7313b629677aeb91aebf48cd851a05dd"
] |
[
"examples/matrix/barbell.py"
] |
[
"\"\"\"\nDisplays a NetworkX barbell graph to screen using a CircosPlot.\n\nFeatures of this example:\n- MatrixPlot\n- Styling matrix plot with different colormap.\n\"\"\"\n\nimport matplotlib.pyplot as plt\nimport networkx as nx\n\nfrom nxviz.plots import MatrixPlot\n\nG = nx.barbell_graph(m1=10, m2=3)\n\n# Instantiate a MatrixPlot with no custom styling.\nm = MatrixPlot(G)\n\n# Change the cmap prior to drawing.\nm.cmap = plt.cm.get_cmap('Greens')\nm.draw()\nplt.show()\n"
] |
[
[
"matplotlib.pyplot.show",
"matplotlib.pyplot.cm.get_cmap"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
magnuel14/test-face
|
[
"5f3e6016cc7a83aeb96c4ef003760315a448e317"
] |
[
"utils/label_map_util.py"
] |
[
"# Copyright 2017 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\n\"\"\"Label map utility functions.\"\"\"\n\nimport logging\n\nimport tensorflow as tf\nfrom google.protobuf import text_format\nfrom protos import string_int_label_map_pb2\n\n\ndef _validate_label_map(label_map):\n \"\"\"Checks if a label map is valid.\n\n Args:\n label_map: StringIntLabelMap to validate.\n\n Raises:\n ValueError: if label map is invalid.\n \"\"\"\n for item in label_map.item:\n if item.id < 1:\n raise ValueError('Label map ids should be >= 1.')\n\n\ndef create_category_index(categories):\n \"\"\"Creates dictionary of COCO compatible categories keyed by category id.\n\n Args:\n categories: a list of dicts, each of which has the following keys:\n 'id': (required) an integer id uniquely identifying this category.\n 'name': (required) string representing category name\n e.g., 'cat', 'dog', 'pizza'.\n\n Returns:\n category_index: a dict containing the same entries as categories, but keyed\n by the 'id' field of each category.\n \"\"\"\n category_index = {}\n for cat in categories:\n category_index[cat['id']] = cat\n return category_index\n\n\ndef convert_label_map_to_categories(label_map,\n max_num_classes,\n use_display_name=True):\n \"\"\"Loads label map proto and returns categories list compatible with eval.\n\n This function loads a label map and returns a list of dicts, each of which\n has the following keys:\n 'id': (required) an integer id uniquely identifying this category.\n 'name': (required) string representing category name\n e.g., 'cat', 'dog', 'pizza'.\n We only allow class into the list if its id-label_id_offset is\n between 0 (inclusive) and max_num_classes (exclusive).\n If there are several items mapping to the same id in the label map,\n we will only keep the first one in the categories list.\n\n Args:\n label_map: a StringIntLabelMapProto or None. If None, a default categories\n list is created with max_num_classes categories.\n max_num_classes: maximum number of (consecutive) label indices to include.\n use_display_name: (boolean) choose whether to load 'display_name' field\n as category name. If False or if the display_name field does not exist,\n uses 'name' field as category names instead.\n Returns:\n categories: a list of dictionaries representing all possible categories.\n \"\"\"\n categories = []\n list_of_ids_already_added = []\n if not label_map:\n label_id_offset = 1\n for class_id in range(max_num_classes):\n categories.append({\n 'id': class_id + label_id_offset,\n 'name': 'category_{}'.format(class_id + label_id_offset)\n })\n return categories\n for item in label_map.item:\n if not 0 < item.id <= max_num_classes:\n logging.info('Ignore item %d since it falls outside of requested '\n 'label range.', item.id)\n continue\n if use_display_name and item.HasField('display_name'):\n name = item.display_name\n else:\n name = item.name\n if item.id not in list_of_ids_already_added:\n list_of_ids_already_added.append(item.id)\n categories.append({'id': item.id, 'name': name})\n return categories\n\n\ndef load_labelmap(path):\n \"\"\"Loads label map proto.\n\n Args:\n path: path to StringIntLabelMap proto text file.\n Returns:\n a StringIntLabelMapProto\n \"\"\"\n #with tf.gfile.GFile(path, 'r') as fid:\n with tf.io.gfile.GFile(path, 'r') as fid:\n label_map_string = fid.read()\n label_map = string_int_label_map_pb2.StringIntLabelMap()\n try:\n text_format.Merge(label_map_string, label_map)\n except text_format.ParseError:\n label_map.ParseFromString(label_map_string)\n _validate_label_map(label_map)\n return label_map\n\n\ndef get_label_map_dict(label_map_path, use_display_name=False):\n \"\"\"Reads a label map and returns a dictionary of label names to id.\n\n Args:\n label_map_path: path to label_map.\n use_display_name: whether to use the label map items' display names as keys.\n\n Returns:\n A dictionary mapping label names to id.\n \"\"\"\n label_map = load_labelmap(label_map_path)\n label_map_dict = {}\n for item in label_map.item:\n if use_display_name:\n label_map_dict[item.display_name] = item.id\n else:\n label_map_dict[item.name] = item.id\n return label_map_dict\n\n\ndef create_category_index_from_labelmap(label_map_path):\n \"\"\"Reads a label map and returns a category index.\n\n Args:\n label_map_path: Path to `StringIntLabelMap` proto text file.\n\n Returns:\n A category index, which is a dictionary that maps integer ids to dicts\n containing categories, e.g.\n {1: {'id': 1, 'name': 'dog'}, 2: {'id': 2, 'name': 'cat'}, ...}\n \"\"\"\n label_map = load_labelmap(label_map_path)\n max_num_classes = max(item.id for item in label_map.item)\n categories = convert_label_map_to_categories(label_map, max_num_classes)\n return create_category_index(categories)\n\n\ndef create_class_agnostic_category_index():\n \"\"\"Creates a category index with a single `object` class.\"\"\"\n return {1: {'id': 1, 'name': 'object'}}\n"
] |
[
[
"tensorflow.io.gfile.GFile"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
IlyaOrson/control_neuralode
|
[
"4fb10aae2315ace5565f59dbfc04e23efd140f53"
] |
[
"plots/plots.py"
] |
[
"import numpy as np\nimport pandas as pd\nimport matplotlib as mpl\nimport matplotlib.pyplot as plt\nfrom pathlib import Path\n\nplt.style.use(\"seaborn-colorblind\") # \"ggplot\"\n\npalette = plt.cm.Dark2.colors\n\nfont = {\"family\": \"STIXGeneral\", \"size\": 16}\nsavefig = {\"dpi\": 600, \"bbox\": \"tight\"}\nlines = {\"linewidth\": 4}\nfigure = {\"figsize\": (8, 4)}\naxes = {\"prop_cycle\": mpl.cycler(color=palette)}\nlegend = {\"fontsize\": \"x-large\"} # medium for presentations, x-large for papers\n\nmpl.rc(\"font\", **font)\nmpl.rc(\"savefig\", **savefig)\nmpl.rc(\"lines\", **lines)\nmpl.rc(\"figure\", **figure)\nmpl.rc(\"axes\", **axes)\nmpl.rc(\"legend\", **legend)\n\n# # van der Pol unconstrained\n# data = pd.read_csv(\"./plots/van der Pol unconstrained.csv\")\n\n# fig = data.plot(\"t\", [\"x1\", \"x2\", \"u\"])\n# fig.set_xlabel(\"time\")\n# plt.savefig(\"./plots/van_der_pol_unconstrained.pdf\")\n# plt.savefig(\"./plots/van_der_pol_unconstrained.svg\")\n\n# # van der Pol constrained\n# data = pd.read_csv(\"./plots/van der Pol constrained.csv\")\n\n# fig = data.plot(\"t\", [\"x1\", \"x2\", \"u\"])\n# fig.axhline(y=-0.4, zorder=100, color= plt.gca().lines[0].get_color(), ls=\"--\", alpha=0.7)\n# fig.set_xlabel(\"time\")\n# plt.savefig(\"./plots/van_der_pol_constrained.pdf\")\n# plt.savefig(\"./plots/van_der_pol_constrained.svg\")\n\n# # photo production\n# data = pd.read_csv(\"./plots/photo production.csv\")\n\n# fig = data.plot(\"t\", [\"y1\", \"y2\"])\n# fig.set_xlabel(\"time\")\n# plt.savefig(\"./plots/photo_production_states.pdf\")\n# plt.savefig(\"./plots/photo_production_states.svg\")\n\n# fig = data.plot(\"t\", [\"u1\", \"u2\"])\n# fig.set_xlabel(\"time\")\n# plt.savefig(\"./plots/photo_production_controls.pdf\")\n# plt.savefig(\"./plots/photo_production_controls.svg\")\n\n\n#################################### bioreactor ####################################\n\n# 2021-04-20T15_37_02.048 # 2021-04-27T15_26_07.084\nresults_dir = Path(\"./data/bioreactor.jl/2021-05-01T14_04_16.822/\")\n\ndata_paths = list(results_dir.glob('*.csv'))\ndef extract_delta(path):\n return float(path.stem.rsplit(\"_\", 1)[1])\ndeltas = [extract_delta(path) for path in data_paths]\ndeltas.sort(reverse=True)\nordered_paths = sorted(data_paths, key=lambda path: extract_delta(path), reverse=True)\ncolors = {\n \"Purples\": mpl.cm.Purples(np.linspace(0.2,1,len(deltas))),\n \"Blues\": mpl.cm.Blues(np.linspace(0.2,1,len(deltas))),\n \"Greens\": mpl.cm.Greens(np.linspace(0.2,1,len(deltas))),\n \"Oranges\": mpl.cm.Oranges(np.linspace(0.2,1,len(deltas))),\n \"Reds\": mpl.cm.Reds(np.linspace(0.2,1,len(deltas))),\n}\n\n\nfig = plt.plot((0,240), (800, 800), zorder=110, color=\"orange\", alpha=0.7, ls=\"--\")\nplt.plot((235,245), (150, 150), zorder=110, color=\"orange\", alpha=0.7, ls=\"--\")\nfor i, filepath in enumerate(ordered_paths):\n\n data = pd.read_csv(filepath)\n data.plot(\"t\", \"x2\", ax=fig[0].axes, color=colors[\"Purples\"][i], label=f\"δ={deltas[i]}\", legend=False, alpha=0.7)\n\nplt.legend(fontsize=\"x-small\", loc='center left', bbox_to_anchor=(1.02, 0.5))\nplt.title(r\"$x_2$\")\nplt.xlabel(\"time\")\nplt.savefig(\"./plots/bioreactor_constraints_x2.pdf\")\nplt.savefig(\"./plots/bioreactor_constraints_x2.svg\")\nplt.show()\n\n\nfig = plt.plot((0,240), (0.03,0.03), zorder=100, color=\"orange\", ls=\"--\")\nfor i, filepath in enumerate(ordered_paths):\n\n data = pd.read_csv(filepath)\n data[\"g_x1_x3\"] = 0.011*data[\"x1\"] - data[\"x3\"]\n data.plot(\"t\", \"g_x1_x3\", ax=fig[0].axes, color=colors[\"Blues\"][i], label=f\"δ={deltas[i]}\", legend=False, alpha=0.7)\n\nplt.legend(fontsize=\"x-small\", loc='center left', bbox_to_anchor=(1.02, 0.5))\nplt.title(r\"$0.011 x_1 - x_3$\")\nplt.xlabel(\"time\")\nplt.savefig(\"./plots/bioreactor_constraints_x1_x3.pdf\")\nplt.savefig(\"./plots/bioreactor_constraints_x1_x3.svg\")\nplt.show()\n\n\ndelta = deltas[-1]\ndata = pd.read_csv(results_dir / f\"delta_{delta}.csv\")\n\n# fig, axs = plt.subplots(\n# 4, 1,\n# sharex=True,\n# constrained_layout=True,\n# squeeze=True,\n# figsize=(8, 4*4)\n# )\n# data.plot(\"t\", \"x1\", ax=axs[0], color=palette[0], label=r\"$C_X$\")\n# axs[0].legend(loc='lower right')\n# data.plot(\"t\", \"x3\", ax=axs[1], color=palette[1], label=r\"$C_{q_c}$\")\n# axs[1].legend(loc='lower right')\n# data.plot(\"t\", \"c1\", ax=axs[2], color=palette[2], label=r\"$I$\")\n# axs[2].legend(loc='lower right')\n# # axs[2].ticklabel_format(useMathText=True)\n# data.plot(\"t\", \"c2\", ax=axs[3], color=palette[3], label=r\"$F_N$\")\n# axs[3].legend(loc='lower right')\n# # axs[3].ticklabel_format(useMathText=True)\n# plt.setp(axs[3], xlabel=\"time\")\n# plt.savefig(\"./plots/bioreactor_x1_x3_c1_c2.pdf\")\n# plt.savefig(\"./plots/bioreactor_x1_x3_c1_c2.svg\")\n# plt.show()\n\ndef two_axis(data, cols, labels=None, refs=None, colors=None, ref_colors=None, alpha=None, ref_alpha=None, saveas=None):\n assert len(cols) == 2\n fig, ax1 = plt.subplots(\n # constrained_layout=True, # incompatible with subplots_adjust and or tight_layout\n squeeze=True,\n )\n ax2 = ax1.twinx()\n axs = [ax1, ax2]\n if not labels:\n labels = cols\n if not colors:\n colors = palette\n if not ref_colors:\n ref_colors = palette\n\n for i in range(len(cols)):\n data.plot(\"t\", cols[i], ax=axs[i], color=colors[i], label=labels[i], legend=False, alpha=alpha)\n # plt.setp(axs[i].spines.values(), color=palette[i]) # colors full box :(\n plt.setp(axs[i].spines[\"right\"], color=colors[i])\n axs[i].tick_params(axis=\"y\", colors=colors[i])\n if refs and refs[i]:\n axs[i].axhline(y=refs[i], zorder=100, color=ref_colors[i], ls=\"dashdot\", alpha=ref_alpha)\n\n ax1.set_xlabel('time')\n fig.tight_layout() # otherwise the right y-label is slightly clipped\n fig.legend(bbox_to_anchor=(0.92,0.22), loc=\"lower right\", fontsize=\"medium\")\n plt.savefig(saveas, bbox_inches=\"tight\")\n plt.show()\n\ndef four_axis(data, cols, labels=None, refs=None, alpha=None, saveas=None):\n assert len(cols) < 5\n fig, ax = plt.subplots(\n # constrained_layout=True, # incompatible with subplots_adjust and or tight_layout\n squeeze=True,\n )\n axs = [ax]\n if len(cols) > 1:\n for _ in range(len(cols)-1):\n axs.append(ax.twinx())\n fig.subplots_adjust(right=0.8)\n # plt.setp(axs[0].spines[\"left\"], color=palette[0]) # https://stackoverflow.com/a/20371140/6313433\n\n if len(axs) > 1:\n axs[1].spines[\"right\"].set_position((\"axes\", -0.15))\n axs[1].tick_params(axis=\"y\", colors=palette[1], direction=\"in\", pad=-35)\n if len(axs) > 3:\n axs[3].spines[\"right\"].set_position((\"axes\", 1.15))\n\n if labels:\n assert len(labels) == len(cols)\n else:\n labels = cols\n for i in range(len(cols)):\n data.plot(\"t\", cols[i], ax=axs[i], color=palette[i], label=labels[i], legend=False, alpha=alpha)\n # plt.setp(axs[i].spines.values(), color=palette[i]) # colors full box :(\n plt.setp(axs[i].spines[\"right\"], color=palette[i])\n axs[i].tick_params(axis=\"y\", colors=palette[i])\n if refs and refs[i]:\n axs[i].axhline(y=refs[i], zorder=100, color=palette[i], ls=\"--\", alpha=alpha)\n\n plt.setp(axs[-1].spines[\"left\"], color=palette[0]) # https://stackoverflow.com/a/20371140/6313433\n fig.legend(bbox_to_anchor=(0.8,0.1), loc=\"lower right\", fontsize=\"medium\")\n ax.set_xlabel(\"time\")\n plt.savefig(saveas, bbox_inches=\"tight\")\n plt.show()\n\n# four_axis(\n# data,\n# cols=[\"x1\", \"x3\", \"c1\", \"c2\"],\n# labels=[r\"$C_X$\", r\"$C_{q_c}$\", r\"$I$\", r\"$F_N$\"],\n# saveas=\"./plots/bioreactor_x1_x3_c1_c2.pdf\"\n# )\n# four_axis(\n# data,\n# cols=[\"x1\", \"x3\", \"c1\", \"c2\"],\n# labels=[r\"$C_X$\", r\"$C_{q_c}$\", r\"$I$\", r\"$F_N$\"],\n# saveas=\"./plots/bioreactor_x1_x3_c1_c2.svg\"\n# )\n\n#################################### semibatch_reactor ####################################\n\nresults_dir = Path(\"./data/semibatch_reactor.jl/2021-04-20T13_42_03.829/\") # 2021-04-20T13_42_03.829, 2021-04-20T15_18_03.731\ndata = pd.read_csv(results_dir / \"data.csv\")\n\nfig = data.plot(\"t\", [\"x1\",\"x2\",\"x3\"], label=[r\"$C_A$\", r\"$C_B$\", r\"$C_C$\"])\nfig.legend(fontsize=\"medium\")\nfig.set_xlabel(\"time\")\nplt.savefig(\"./plots/semibatch_x1_x2_x3.pdf\")\nplt.savefig(\"./plots/semibatch_x1_x2_x3.svg\")\nplt.show()\n\nfig, axs = plt.subplots(1, 1, sharex=True, constrained_layout=True, squeeze=True, figsize=(8, 4*1))\ndata.plot(\"t\", \"x4\", ax=axs, color=palette[3], label=r\"$T$\")\naxs.axhline(y=420, zorder=100, color=\"orange\", ls=\"--\", alpha=0.7)\naxs.legend(loc='center right')\naxs.set_xlabel(\"time\")\nplt.savefig(\"./plots/semibatch_x4.pdf\")\nplt.savefig(\"./plots/semibatch_x4.svg\")\nplt.show()\n\nfig, axs = plt.subplots(1, 1, sharex=True, constrained_layout=True, squeeze=True, figsize=(8, 4*1))\ndata.plot(\"t\", \"x5\", ax=axs, color=palette[4], label=r\"$V$\")\naxs.axhline(y=200, zorder=100, color=\"orange\", ls=\"--\", alpha=0.7)\naxs.legend(loc='center right')\naxs.set_xlabel(\"time\")\nplt.savefig(\"./plots/semibatch_x5.pdf\")\nplt.savefig(\"./plots/semibatch_x5.svg\")\nplt.show()\n\ntwo_axis(\n data,\n cols=[\"c1\", \"c2\"],\n colors=[palette[6], palette[7]],\n labels=[r\"$F$\", r\"$T_a$\"],\n saveas=\"./plots/semibatch_c1_c2.pdf\"\n)\ntwo_axis(\n data,\n cols=[\"c1\", \"c2\"],\n colors=[palette[6], palette[7]],\n labels=[r\"$F$\", r\"$T_a$\"],\n saveas=\"./plots/semibatch_c1_c2.svg\"\n)\n\n# these do not work out-of-the-box due to the overlapping horizontal line reference\n\n# two_axis(\n# data,\n# cols=[\"x4\", \"x5\"],\n# labels=[r\"$T$\", r\"$V$\"],\n# refs=[420, 200],\n# ref_alpha=0.6,\n# saveas=\"./plots/semibatch_x4_x5.svg\"\n# )\n\n# four_axis(\n# data,\n# cols=[\"x4\", \"x5\", \"c1\", \"c2\"],\n# labels=[r\"$T$\", r\"$V$\", r\"$F$\", r\"$T_a$\"],\n# refs=[420, 200, None, None],\n# saveas=\"./plots/semibatch_x4_x5_c1_c2_alt.svg\"\n# )\n\n\n#################################### set-point tracking ####################################\n'''\ndef ref_track(data, reversible, saveas=None):\n assert reversible is not None\n if reversible:\n y1s = 0.408126\n y2s = 3.29763\n us = 370\n else:\n y1s = 0.433848\n y2s = 0.659684\n us = 3.234\n\n fig, axs = plt.subplots(3, 1, sharex=True, constrained_layout=True, squeeze=True, figsize=(8, 3*4))\n # plt.xlabel(\"time\")\n data.plot(\"t\", \"x1\", ax=axs[0], color=palette[0], label=r\"$x_1$\")\n axs[0].axhline(y=y1s, zorder=100, color=\"orange\", ls=\"--\", alpha=0.7)\n axs[0].legend(loc='center right')\n data.plot(\"t\", \"x2\", ax=axs[1], color=palette[1], label=r\"$x_2$\")\n axs[1].axhline(y=y2s, zorder=100, color=\"orange\", ls=\"--\", alpha=0.7)\n axs[1].legend(loc='center right')\n data.plot(\"t\", \"c1\", ax=axs[2], color=palette[2], label=r\"$c_1$\")\n axs[2].axhline(y=us, zorder=100, color=\"orange\", ls=\"--\", alpha=0.7)\n axs[2].legend(loc='center right')\n axs[2].set_xlabel(\"time\")\n if saveas:\n plt.savefig(saveas)\n plt.show()\n\n# case 1\nresults_dir = Path(\"./data/reference_tracking.jl/2021-04-20T16_20_04.482/\")\ndata = pd.read_csv(results_dir / \"data.csv\")\n\nref_track(data, reversible=True, saveas=\"./plots/reftrack_case1.pdf\")\nref_track(data, reversible=True, saveas=\"./plots/reftrack_case1.svg\")\n\n# case 4\nresults_dir = Path(\"./data/reference_tracking.jl/2021-04-21T17_58_04.656/\")\ndata = pd.read_csv(results_dir / \"data.csv\")\n\nref_track(data, reversible=False, saveas=\"./plots/reftrack_case4.pdf\")\nref_track(data, reversible=False, saveas=\"./plots/reftrack_case4.svg\")\n\n# custom case\n'''"
] |
[
[
"matplotlib.pyplot.legend",
"pandas.read_csv",
"matplotlib.pyplot.title",
"matplotlib.pyplot.subplots",
"matplotlib.pyplot.savefig",
"matplotlib.pyplot.plot",
"matplotlib.pyplot.setp",
"matplotlib.cycler",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.show",
"matplotlib.pyplot.style.use",
"matplotlib.rc"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [
"2.0",
"1.4",
"1.1",
"1.5",
"1.2",
"1.3"
],
"scipy": [],
"tensorflow": []
}
] |
KonstantinKlepikov/scikit-fda
|
[
"93c4ad80aaba8739b4f90932a2a759d6f5960387"
] |
[
"tests/test_oneway_anova.py"
] |
[
"import unittest\nimport numpy as np\nimport pytest\n\nfrom skfda.representation import FDataGrid\nfrom skfda.representation.basis import Fourier\nfrom skfda.datasets import fetch_gait\nfrom skfda.inference.anova import oneway_anova, v_asymptotic_stat, \\\n v_sample_stat\n\n\nclass OnewayAnovaTests(unittest.TestCase):\n\n def test_oneway_anova_args(self):\n with self.assertRaises(ValueError):\n oneway_anova()\n with self.assertRaises(ValueError):\n oneway_anova(1, '2')\n with self.assertRaises(ValueError):\n oneway_anova(FDataGrid([0]), n_reps=-2)\n\n def test_v_stats_args(self):\n with self.assertRaises(ValueError):\n v_sample_stat(1, [1])\n with self.assertRaises(ValueError):\n v_sample_stat(FDataGrid([0]), [0, 1])\n with self.assertRaises(ValueError):\n v_asymptotic_stat(1, [1])\n with self.assertRaises(ValueError):\n v_asymptotic_stat(FDataGrid([0]), [0, 1])\n with self.assertRaises(ValueError):\n v_asymptotic_stat(FDataGrid([[1, 1, 1], [1, 1, 1]]), [0, 0])\n\n\n def test_v_stats(self):\n n_features = 50\n weights = [1, 2, 3]\n t = np.linspace(0, 1, n_features)\n m1 = [1 for _ in range(n_features)]\n m2 = [2 for _ in range(n_features)]\n m3 = [3 for _ in range(n_features)]\n fd = FDataGrid([m1, m2, m3], sample_points=t)\n self.assertEqual(v_sample_stat(fd, weights), 7.0)\n self.assertAlmostEqual(v_sample_stat(fd.to_basis(Fourier(n_basis=5)),\n weights), 7.0)\n res = (1 - 2 * np.sqrt(1 / 2)) ** 2 + (1 - 3 * np.sqrt(1 / 3)) ** 2 \\\n + (2 - 3 * np.sqrt(2 / 3)) ** 2\n self.assertAlmostEqual(v_asymptotic_stat(fd, weights), res)\n self.assertAlmostEqual(v_asymptotic_stat(fd.to_basis(Fourier(\n n_basis=5)), weights), res)\n\n def test_asymptotic_behaviour(self):\n dataset = fetch_gait()\n fd = dataset['data'].coordinates[1]\n fd1 = fd[0:5]\n fd2 = fd[5:10]\n fd3 = fd[10:15]\n\n n_little_sim = 10\n\n sims = np.array([oneway_anova(fd1, fd2, fd3, n_reps=500)[1] for _ in\n range(n_little_sim)])\n little_sim = np.mean(sims)\n big_sim = oneway_anova(fd1, fd2, fd3, n_reps=2000)[1]\n self.assertAlmostEqual(little_sim, big_sim, delta=0.05)\n\n fd = fd.to_basis(Fourier(n_basis=5))\n fd1 = fd[0:5]\n fd2 = fd[5:10]\n\n sims = np.array([oneway_anova(fd1, fd2, n_reps=500)[1] for _ in\n range(n_little_sim)])\n little_sim = np.mean(sims)\n big_sim = oneway_anova(fd1, fd2, n_reps=2000)[1]\n self.assertAlmostEqual(little_sim, big_sim, delta=0.05)\n\n\nif __name__ == '__main__':\n print()\n unittest.main()\n"
] |
[
[
"numpy.mean",
"numpy.sqrt",
"numpy.linspace"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
declanmillar/neural-networks-and-deep-learning
|
[
"e84209e73ea2cbed4b86ff46b4f5676a65a381a9"
] |
[
"src/network2.py"
] |
[
"\"\"\"\r\nnetwork2.py\r\n~~~~~~~~~~~~~~\r\nAn improved version of network.py, implementing the stochastic\r\ngradient descent learning algorithm for a feedforward neural network.\r\nImprovements include the addition of the cross-entropy cost function,\r\nregularization, and better initialization of network weights. Note\r\nthat I have focused on making the code simple, easily readable, and\r\neasily modifiable. It is not optimized, and omits many desirable\r\nfeatures.\r\n\"\"\"\r\n\r\n#### Libraries\r\n# Standard library\r\nimport json\r\nimport random\r\nimport sys\r\n\r\n# Third-party libraries\r\nimport numpy as np\r\n\r\n\r\n#### Define the quadratic and cross-entropy cost functions\r\n\r\n\r\nclass QuadraticCost(object):\r\n @staticmethod\r\n def fn(a, y):\r\n \"\"\"\r\n Return the cost associated with an output ``a`` and desired output ``y``.\r\n \"\"\"\r\n return 0.5 * np.linalg.norm(a - y) ** 2\r\n\r\n @staticmethod\r\n def delta(z, a, y):\r\n \"\"\"Return the error delta from the output layer.\"\"\"\r\n return (a - y) * sigmoid_prime(z)\r\n\r\n\r\nclass CrossEntropyCost(object):\r\n @staticmethod\r\n def fn(a, y):\r\n \"\"\"Return the cost associated with an output ``a`` and desired output\r\n ``y``. Note that np.nan_to_num is used to ensure numerical\r\n stability. In particular, if both ``a`` and ``y`` have a 1.0\r\n in the same slot, then the expression (1-y)*np.log(1-a)\r\n returns nan. The np.nan_to_num ensures that that is converted\r\n to the correct value (0.0).\r\n\r\n \"\"\"\r\n return np.sum(np.nan_to_num(-y * np.log(a) - (1 - y) * np.log(1 - a)))\r\n\r\n @staticmethod\r\n def delta(z, a, y):\r\n \"\"\"Return the error delta from the output layer. Note that the\r\n parameter ``z`` is not used by the method. It is included in\r\n the method's parameters in order to make the interface\r\n consistent with the delta method for other cost classes.\r\n\r\n \"\"\"\r\n return a - y\r\n\r\n\r\n#### Main Network class\r\nclass Network(object):\r\n def __init__(self, sizes, cost=CrossEntropyCost):\r\n \"\"\"\r\n The list ``sizes`` contains the number of neurons in the respective\r\n layers of the network. For example, if the list was [2, 3, 1]\r\n then it would be a three-layer network, with the first layer\r\n containing 2 neurons, the second layer 3 neurons, and the\r\n third layer 1 neuron. The biases and weights for the network\r\n are initialized randomly, using\r\n ``self.default_weight_initializer`` (see docstring for that\r\n method).\r\n \"\"\"\r\n self.num_layers = len(sizes)\r\n self.sizes = sizes\r\n self.default_weight_initializer()\r\n self.cost = cost\r\n\r\n def default_weight_initializer(self):\r\n \"\"\"Initialize each weight using a Gaussian distribution with mean 0\r\n and standard deviation 1 over the square root of the number of\r\n weights connecting to the same neuron. Initialize the biases\r\n using a Gaussian distribution with mean 0 and standard\r\n deviation 1.\r\n\r\n Note that the first layer is assumed to be an input layer, and\r\n by convention we won't set any biases for those neurons, since\r\n biases are only ever used in computing the outputs from later\r\n layers.\r\n\r\n \"\"\"\r\n self.biases = [np.random.randn(y, 1) for y in self.sizes[1:]]\r\n self.weights = [np.random.randn(y, x) / np.sqrt(x) for x, y in zip(self.sizes[:-1], self.sizes[1:])]\r\n\r\n def large_weight_initializer(self):\r\n \"\"\"Initialize the weights using a Gaussian distribution with mean 0\r\n and standard deviation 1. Initialize the biases using a\r\n Gaussian distribution with mean 0 and standard deviation 1.\r\n\r\n Note that the first layer is assumed to be an input layer, and\r\n by convention we won't set any biases for those neurons, since\r\n biases are only ever used in computing the outputs from later\r\n layers.\r\n\r\n This weight and bias initializer uses the same approach as in\r\n Chapter 1, and is included for purposes of comparison. It\r\n will usually be better to use the default weight initializer\r\n instead.\r\n\r\n \"\"\"\r\n self.biases = [np.random.randn(y, 1) for y in self.sizes[1:]]\r\n self.weights = [np.random.randn(y, x) for x, y in zip(self.sizes[:-1], self.sizes[1:])]\r\n\r\n def feedforward(self, a):\r\n \"\"\"Return the output of the network if ``a`` is input.\"\"\"\r\n for b, w in zip(self.biases, self.weights):\r\n a = sigmoid(np.dot(w, a) + b)\r\n return a\r\n\r\n def SGD(\r\n self,\r\n training_data,\r\n epochs,\r\n mini_batch_size,\r\n eta,\r\n lmbda=0.0,\r\n evaluation_data=None,\r\n monitor_evaluation_cost=False,\r\n monitor_evaluation_accuracy=False,\r\n monitor_training_cost=False,\r\n monitor_training_accuracy=False,\r\n ):\r\n \"\"\"Train the neural network using mini-batch stochastic gradient\r\n descent. The ``training_data`` is a list of tuples ``(x, y)``\r\n representing the training inputs and the desired outputs. The\r\n other non-optional parameters are self-explanatory, as is the\r\n regularization parameter ``lmbda``. The method also accepts\r\n ``evaluation_data``, usually either the validation or test\r\n data. We can monitor the cost and accuracy on either the\r\n evaluation data or the training data, by setting the\r\n appropriate flags. The method returns a tuple containing four\r\n lists: the (per-epoch) costs on the evaluation data, the\r\n accuracies on the evaluation data, the costs on the training\r\n data, and the accuracies on the training data. All values are\r\n evaluated at the end of each training epoch. So, for example,\r\n if we train for 30 epochs, then the first element of the tuple\r\n will be a 30-element list containing the cost on the\r\n evaluation data at the end of each epoch. Note that the lists\r\n are empty if the corresponding flag is not set.\r\n\r\n \"\"\"\r\n if evaluation_data:\r\n n_data = len(evaluation_data)\r\n n = len(training_data)\r\n evaluation_cost, evaluation_accuracy = [], []\r\n training_cost, training_accuracy = [], []\r\n for j in range(epochs):\r\n random.shuffle(training_data)\r\n mini_batches = [training_data[k : k + mini_batch_size] for k in range(0, n, mini_batch_size)]\r\n for mini_batch in mini_batches:\r\n self.update_mini_batch(mini_batch, eta, lmbda, len(training_data))\r\n print(\"Epoch %s training complete\" % j)\r\n if monitor_training_cost:\r\n cost = self.total_cost(training_data, lmbda)\r\n training_cost.append(cost)\r\n print(\"Cost on training data: {}\".format(cost))\r\n if monitor_training_accuracy:\r\n accuracy = self.accuracy(training_data, convert=True)\r\n training_accuracy.append(accuracy)\r\n print(\"Accuracy on training data: {} / {}\".format(accuracy, n))\r\n if monitor_evaluation_cost:\r\n cost = self.total_cost(evaluation_data, lmbda, convert=True)\r\n evaluation_cost.append(cost)\r\n print(\"Cost on evaluation data: {}\".format(cost))\r\n if monitor_evaluation_accuracy:\r\n accuracy = self.accuracy(evaluation_data)\r\n evaluation_accuracy.append(accuracy)\r\n print(\"Accuracy on evaluation data: {} / {}\".format(self.accuracy(evaluation_data), n_data))\r\n print\r\n return evaluation_cost, evaluation_accuracy, training_cost, training_accuracy\r\n\r\n def update_mini_batch(self, mini_batch, eta, lmbda, n):\r\n \"\"\"Update the network's weights and biases by applying gradient\r\n descent using backpropagation to a single mini batch. The\r\n ``mini_batch`` is a list of tuples ``(x, y)``, ``eta`` is the\r\n learning rate, ``lmbda`` is the regularization parameter, and\r\n ``n`` is the total size of the training data set.\r\n\r\n \"\"\"\r\n nabla_b = [np.zeros(b.shape) for b in self.biases]\r\n nabla_w = [np.zeros(w.shape) for w in self.weights]\r\n for x, y in mini_batch:\r\n delta_nabla_b, delta_nabla_w = self.backprop(x, y)\r\n nabla_b = [nb + dnb for nb, dnb in zip(nabla_b, delta_nabla_b)]\r\n nabla_w = [nw + dnw for nw, dnw in zip(nabla_w, delta_nabla_w)]\r\n self.weights = [(1 - eta * (lmbda / n)) * w - (eta / len(mini_batch)) * nw for w, nw in zip(self.weights, nabla_w)]\r\n self.biases = [b - (eta / len(mini_batch)) * nb for b, nb in zip(self.biases, nabla_b)]\r\n\r\n def backprop(self, x, y):\r\n \"\"\"Return a tuple ``(nabla_b, nabla_w)`` representing the\r\n gradient for the cost function C_x. ``nabla_b`` and\r\n ``nabla_w`` are layer-by-layer lists of numpy arrays, similar\r\n to ``self.biases`` and ``self.weights``.\"\"\"\r\n nabla_b = [np.zeros(b.shape) for b in self.biases]\r\n nabla_w = [np.zeros(w.shape) for w in self.weights]\r\n # feedforward\r\n activation = x\r\n activations = [x] # list to store all the activations, layer by layer\r\n zs = [] # list to store all the z vectors, layer by layer\r\n for b, w in zip(self.biases, self.weights):\r\n z = np.dot(w, activation) + b\r\n zs.append(z)\r\n activation = sigmoid(z)\r\n activations.append(activation)\r\n # backward pass\r\n delta = (self.cost).delta(zs[-1], activations[-1], y)\r\n nabla_b[-1] = delta\r\n nabla_w[-1] = np.dot(delta, activations[-2].transpose())\r\n # Note that the variable l in the loop below is used a little\r\n # differently to the notation in Chapter 2 of the book. Here,\r\n # l = 1 means the last layer of neurons, l = 2 is the\r\n # second-last layer, and so on. It's a renumbering of the\r\n # scheme in the book, used here to take advantage of the fact\r\n # that Python can use negative indices in lists.\r\n for l in range(2, self.num_layers):\r\n z = zs[-l]\r\n sp = sigmoid_prime(z)\r\n delta = np.dot(self.weights[-l + 1].transpose(), delta) * sp\r\n nabla_b[-l] = delta\r\n nabla_w[-l] = np.dot(delta, activations[-l - 1].transpose())\r\n return (nabla_b, nabla_w)\r\n\r\n def accuracy(self, data, convert=False):\r\n \"\"\"Return the number of inputs in ``data`` for which the neural\r\n network outputs the correct result. The neural network's\r\n output is assumed to be the index of whichever neuron in the\r\n final layer has the highest activation.\r\n\r\n The flag ``convert`` should be set to False if the data set is\r\n validation or test data (the usual case), and to True if the\r\n data set is the training data. The need for this flag arises\r\n due to differences in the way the results ``y`` are\r\n represented in the different data sets. In particular, it\r\n flags whether we need to convert between the different\r\n representations. It may seem strange to use different\r\n representations for the different data sets. Why not use the\r\n same representation for all three data sets? It's done for\r\n efficiency reasons -- the program usually evaluates the cost\r\n on the training data and the accuracy on other data sets.\r\n These are different types of computations, and using different\r\n representations speeds things up. More details on the\r\n representations can be found in\r\n mnist_loader.load_data_wrapper.\r\n\r\n \"\"\"\r\n if convert:\r\n results = [(np.argmax(self.feedforward(x)), np.argmax(y)) for (x, y) in data]\r\n else:\r\n results = [(np.argmax(self.feedforward(x)), y) for (x, y) in data]\r\n return sum(int(x == y) for (x, y) in results)\r\n\r\n def total_cost(self, data, lmbda, convert=False):\r\n \"\"\"Return the total cost for the data set ``data``. The flag\r\n ``convert`` should be set to False if the data set is the\r\n training data (the usual case), and to True if the data set is\r\n the validation or test data. See comments on the similar (but\r\n reversed) convention for the ``accuracy`` method, above.\r\n \"\"\"\r\n cost = 0.0\r\n for x, y in data:\r\n a = self.feedforward(x)\r\n if convert:\r\n y = vectorized_result(y)\r\n cost += self.cost.fn(a, y) / len(data)\r\n cost += 0.5 * (lmbda / len(data)) * sum(np.linalg.norm(w) ** 2 for w in self.weights)\r\n return cost\r\n\r\n def save(self, filename):\r\n \"\"\"Save the neural network to the file ``filename``.\"\"\"\r\n data = {\r\n \"sizes\": self.sizes,\r\n \"weights\": [w.tolist() for w in self.weights],\r\n \"biases\": [b.tolist() for b in self.biases],\r\n \"cost\": str(self.cost.__name__),\r\n }\r\n f = open(filename, \"w\")\r\n json.dump(data, f)\r\n f.close()\r\n\r\n\r\n#### Loading a Network\r\ndef load(filename):\r\n \"\"\"Load a neural network from the file ``filename``. Returns an\r\n instance of Network.\r\n\r\n \"\"\"\r\n f = open(filename, \"r\")\r\n data = json.load(f)\r\n f.close()\r\n cost = getattr(sys.modules[__name__], data[\"cost\"])\r\n net = Network(data[\"sizes\"], cost=cost)\r\n net.weights = [np.array(w) for w in data[\"weights\"]]\r\n net.biases = [np.array(b) for b in data[\"biases\"]]\r\n return net\r\n\r\n\r\n#### Miscellaneous functions\r\ndef vectorized_result(j):\r\n \"\"\"Return a 10-dimensional unit vector with a 1.0 in the j'th position\r\n and zeroes elsewhere. This is used to convert a digit (0...9)\r\n into a corresponding desired output from the neural network.\r\n\r\n \"\"\"\r\n e = np.zeros((10, 1))\r\n e[j] = 1.0\r\n return e\r\n\r\n\r\ndef sigmoid(z):\r\n \"\"\"The sigmoid function.\"\"\"\r\n return 1.0 / (1.0 + np.exp(-z))\r\n\r\n\r\ndef sigmoid_prime(z):\r\n \"\"\"Derivative of the sigmoid function.\"\"\"\r\n return sigmoid(z) * (1 - sigmoid(z))\r\n\r\n"
] |
[
[
"numpy.dot",
"numpy.log",
"numpy.sqrt",
"numpy.linalg.norm",
"numpy.argmax",
"numpy.random.randn",
"numpy.array",
"numpy.exp",
"numpy.zeros"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
juliusf/Genetic-SRCPSP
|
[
"0ab31ff9e929576a922eaab4f8d6618cf02e8ce8"
] |
[
"deepThought/stats/phase_type.py"
] |
[
"__author__ = 'jules'\n\nimport numpy as np\n\nfrom scipy.stats import uniform\n\nfrom customDistributions import Hyperexp2_gen, Erlangk_gen, Gamma, BranchingErlang, TruncatedErlangk_gen, MixtureNormUni, TruncatedHyperexp2\n\nimport math\n\ndef infer_distribution(job):\n\n X = np.array(job.execution_history)\n mean = np.mean(X)\n cx = np.sqrt(np.var(X)) / mean\n cxsqrd = cx * cx\n\n if cxsqrd >= 1:\n return approximate_truncated_hyperexp2(cxsqrd, mean, job)\n #return approximate_hyperexp2(cxsqrd, mean)\n\n elif cxsqrd >= 0.5:\n return approximate_truncated_erlangk(cxsqrd, mean, job)\n #return approximate_erlangk(cxsqrd, mean)\n\n else:\n\n #return approximate_gamma(cxsqrd, mean)\n #return approximate_truncated_erlangk(cxsqrd, mean, job)\n #return approximate_erlangk(cxsqrd, mean)\n #return approximate_mix_gaussian_uniform(X)\n return approximate_uniform(cxsqrd, mean)\n\ndef infer_inverted_cdf(task):\n cx = task.deviation / task.mean\n cxsqrd = cx * cx\n\n #truncated distribution\n if task.execution_time_limit != -1:\n if cxsqrd >= 1:\n return approximate_truncated_hyperexp2(cxsqrd, task.mean, task)\n else:\n return approximate_truncated_erlangk(cxsqrd, task.mean, task)\n else:\n if cxsqrd >=1:\n return approximate_hyperexp2(cxsqrd,task.mean)\n else:\n return approximate_erlangk(cxsqrd, task.mean)\n\n\ndef approximate_mix_gaussian_uniform(X):\n mu = np.mean(X)\n sigma = np.var(X)\n p = 0.1\n a = 0.0\n b = (2 * mu) * (1-p) #check whether this could be approximated otherwise\n\n mu1 = mu / p\n mu2 = (1.0/2.0) * (a+b)\n sigma2 = np.sqrt( (1.0/12.0) * ((b-a)**2) )\n #sigma1 = np.sqrt( -(mu1)**2 + (mu2**2) - ((mu2**2)/p) + (sigma2**2) + (sigma /p) - (sigma2**2)/p )\n sigma1 = np.sqrt(-mu1**2 - sigma2 ** 2 + (sigma2 / p) + (mu2/p) + (sigma/p)) # produces complex results! can't be handled by np.sqrt()\n dist = MixtureNormUni(p, sigma1, mu, a, b)\n dist.name = \"gaussian uniform mixture\"\n return dist\n\n\ndef approximate_uniform(cxsqrd, mean):\n b = 2 * mean\n dist = uniform(scale=b)\n dist.name = \"uniform\"\n return dist\n\ndef approximate_branching_erlang(cxsqrd, mean):\n k =int(math.ceil(1.0/cxsqrd))\n a = ( 2*k*cxsqrd + (k - 2) - np.sqrt( (k**2) + 4 - 4 * k * cxsqrd ))/( 2*(k-1)*(cxsqrd+1) )\n mu = (k - a * (k-1))/mean\n dist = BranchingErlang(a,mu,k)\n dist.name = \"Branching Erlang\"\n return dist\n\ndef approximate_lognorm(cxsqrd, mean):\n pass\n #dist = lognorm([sigma], loc=mu)\n #dist.name = \"lognorm\"\n #return dist\n\ndef approximate_erlangk(cxsqrd, mean):\n k = 2\n while True: #solve it like a hacker!\n if ( (1.0/k) <= cxsqrd) and ( 1.0 / (k -1.0)) >= cxsqrd:\n break\n else:\n k +=1\n\n #p = (1.0/ (1 + cxsqrd)) * ( k * cxsqrd - (k * (1 + cxsqrd) - (k ** 2) * cxsqrd)**(1.0/2.0) )\n p = (1.0/ (1 + cxsqrd)) * ( k * cxsqrd -np.sqrt(k*(1+cxsqrd) -k*k*cxsqrd ))\n mu = (k - p) / mean\n dist =Erlangk_gen(\"Erlang_k-1,k\", mu=mu, p=p, k=k)\n dist.name = \"Erlangk, k-1\"\n return dist\n\ndef approximate_truncated_erlangk(cxsqrd, mean, job):\n a = 0\n if job.execution_time_limit == -1:\n b = 86400 # 24h\n else:\n b = job.execution_time_limit\n\n k = 2\n while True: #solve it like a hacker!\n if ( (1.0/k) <= cxsqrd) and ( 1.0 / (k -1.0)) >= cxsqrd:\n break\n else:\n k +=1\n\n\n p = (1.0/ (1 + cxsqrd)) * ( k * cxsqrd -np.sqrt(k*(1+cxsqrd) -k*k*cxsqrd ))\n mu = (k - p) / mean\n dist =TruncatedErlangk_gen(\"Erlang_k-1,k\", mu=mu, p=p, k=k, a=a,b=b)\n dist.name = \"Truncated Erlangk, k-1\"\n return dist\n\ndef approximate_hyperexp2(cxsqrd, mean):\n p1 = (1.0/2.0) * (1.0 + np.sqrt( ( cxsqrd - 1.0) / (cxsqrd + 1.0)) )\n p2 = 1.0 - p1\n\n mu1 = (2.0 * p1) / mean\n mu2 = (2.0 * p2) / mean\n dist= Hyperexp2_gen(\"bar\", lambda1=mu1, lambda2=mu2, p=p1)\n dist.name = \"Hyperexp2\"\n return dist\n\ndef approximate_truncated_hyperexp2(cxsqrd, mean, job):\n a = 0\n if job.execution_time_limit == -1:\n b = 86400 # 24h\n else:\n b = job.execution_time_limit\n\n p1 = (1.0/2.0) * (1.0 + np.sqrt( ( cxsqrd - 1.0) / (cxsqrd + 1.0)) )\n p2 = 1.0 - p1\n\n mu1 = (2.0 * p1) / mean\n mu2 = (2.0 * p2) / mean\n dist= TruncatedHyperexp2(\"bar\", lambda1=mu1, lambda2=mu2, p=p1, a=a, b=b)\n dist.name = \"truncated Hyperexp2\"\n return dist\ndef approximate_gamma(cxsqrd, mean):\n mu = 1 / mean\n alpha = 1 / cxsqrd\n dist = Gamma(alpha, mu)\n dist.name = \"Gamma\"\n return dist\n"
] |
[
[
"numpy.sqrt",
"scipy.stats.uniform",
"numpy.mean",
"numpy.var",
"numpy.array"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
dfface/DoctorKG
|
[
"6bd6ebec8244a9ce0a2c8c278a704f02b9afaaf8",
"6bd6ebec8244a9ce0a2c8c278a704f02b9afaaf8"
] |
[
"tools/deepke/relation_extraction/few_shot/lit_models/util.py",
"tools/deepke/relation_extraction/multimodal/modules/dataset.py"
] |
[
"import numpy as np \r\n\r\n\r\ndef dialog_f1_eval(logits, labels):\r\n def getpred(result, T1=0.5, T2=0.4):\r\n # 使用阈值得到preds, result = logits\r\n # T2 表示如果都低于T2 那么就是 no relation, 否则选取一个最大的\r\n ret = []\r\n for i in range(len(result)):\r\n r = []\r\n maxl, maxj = -1, -1\r\n for j in range(len(result[i])):\r\n if result[i][j] > T1:\r\n r += [j]\r\n if result[i][j] > maxl:\r\n maxl = result[i][j]\r\n maxj = j\r\n if len(r) == 0:\r\n if maxl <= T2:\r\n r = [36]\r\n else:\r\n r += [maxj]\r\n ret.append(r)\r\n return ret\r\n\r\n def geteval(devp, data):\r\n correct_sys, all_sys = 0, 0\r\n correct_gt = 0\r\n\r\n for i in range(len(data)):\r\n # 每一个样本 都是[1,4,...,20] 表示有1,4,20 是1, 如果没有就是[36]\r\n for id in data[i]:\r\n if id != 36:\r\n # 标签中 1 的个数\r\n correct_gt += 1\r\n if id in devp[i]:\r\n # 预测正确\r\n correct_sys += 1\r\n\r\n for id in devp[i]:\r\n if id != 36:\r\n all_sys += 1\r\n\r\n precision = 1 if all_sys == 0 else correct_sys / all_sys\r\n recall = 0 if correct_gt == 0 else correct_sys / correct_gt\r\n f_1 = 2 * precision * recall / (precision + recall) if precision + recall != 0 else 0\r\n return f_1\r\n\r\n logits = np.asarray(logits)\r\n logits = list(1 / (1 + np.exp(-logits)))\r\n\r\n temp_labels = []\r\n for l in labels:\r\n t = []\r\n for i in range(36):\r\n if l[i] == 1:\r\n t += [i]\r\n if len(t) == 0:\r\n t = [36]\r\n temp_labels.append(t)\r\n assert (len(labels) == len(logits))\r\n labels = temp_labels\r\n\r\n bestT2 = bestf_1 = 0\r\n for T2 in range(51):\r\n devp = getpred(logits, T2=T2 / 100.)\r\n f_1 = geteval(devp, labels)\r\n if f_1 > bestf_1:\r\n bestf_1 = f_1\r\n bestT2 = T2 / 100.\r\n\r\n return dict(f1=bestf_1, T2=bestT2)\r\n\r\n\r\n\r\ndef f1_eval(logits, labels):\r\n def getpred(result, T1 = 0.5, T2 = 0.4) :\r\n # 使用阈值得到preds, result = logits\r\n # T2 表示如果都低于T2 那么就是 no relation, 否则选取一个最大的\r\n ret = []\r\n for i in range(len(result)):\r\n r = []\r\n maxl, maxj = -1, -1\r\n for j in range(len(result[i])):\r\n if result[i][j] > T1:\r\n r += [j]\r\n if result[i][j] > maxl:\r\n maxl = result[i][j]\r\n maxj = j\r\n if len(r) == 0:\r\n if maxl <= T2:\r\n r = [36]\r\n else:\r\n r += [maxj]\r\n ret.append(r)\r\n return ret\r\n\r\n def geteval(devp, data):\r\n correct_sys, all_sys = 0, 0\r\n correct_gt = 0\r\n \r\n for i in range(len(data)):\r\n # 每一个样本 都是[1,4,...,20] 表示有1,4,20 是1, 如果没有就是[36]\r\n for id in data[i]:\r\n if id != 36:\r\n # 标签中 1 的个数\r\n correct_gt += 1\r\n if id in devp[i]:\r\n # 预测正确\r\n correct_sys += 1\r\n\r\n for id in devp[i]:\r\n if id != 36:\r\n all_sys += 1\r\n\r\n precision = 1 if all_sys == 0 else correct_sys/all_sys\r\n recall = 0 if correct_gt == 0 else correct_sys/correct_gt\r\n f_1 = 2*precision*recall/(precision+recall) if precision+recall != 0 else 0\r\n return f_1\r\n\r\n logits = np.asarray(logits)\r\n logits = list(1 / (1 + np.exp(-logits)))\r\n\r\n temp_labels = []\r\n for l in labels:\r\n t = []\r\n for i in range(36):\r\n if l[i] == 1:\r\n t += [i]\r\n if len(t) == 0:\r\n t = [36]\r\n temp_labels.append(t)\r\n assert(len(labels) == len(logits))\r\n labels = temp_labels\r\n \r\n bestT2 = bestf_1 = 0\r\n for T2 in range(51):\r\n devp = getpred(logits, T2=T2/100.)\r\n f_1 = geteval(devp, labels)\r\n if f_1 > bestf_1:\r\n bestf_1 = f_1\r\n bestT2 = T2/100.\r\n\r\n return bestf_1, bestT2\r\n\r\n\r\n\r\ndef compute_f1(logits, labels):\r\n n_gold = n_pred = n_correct = 0\r\n preds = np.argmax(logits, axis=-1)\r\n for pred, label in zip(preds, labels):\r\n if pred != 0:\r\n n_pred += 1\r\n if label != 0:\r\n n_gold += 1\r\n if pred != 0 and label != 0 and (pred == label):\r\n n_correct += 1\r\n if n_correct == 0:\r\n return {'precision': 0.0, 'recall': 0.0, 'f1': 0.0}\r\n else:\r\n prec = n_correct * 1.0 / n_pred\r\n recall = n_correct * 1.0 / n_gold\r\n if prec + recall > 0:\r\n f1 = 2.0 * prec * recall / (prec + recall)\r\n else:\r\n f1 = 0.0\r\n return {'precision': prec, 'recall': recall, 'f1': f1}\r\n\r\n\r\ndef acc(logits, labels):\r\n preds = np.argmax(logits, axis=-1)\r\n return (preds == labels).mean()\r\n\r\nfrom collections import Counter\r\ndef f1_score(output, label, rel_num=42, na_num=13):\r\n correct_by_relation = Counter()\r\n guess_by_relation = Counter()\r\n gold_by_relation = Counter()\r\n output = np.argmax(output, axis=-1)\r\n\r\n for i in range(len(output)):\r\n guess = output[i]\r\n gold = label[i]\r\n\r\n if guess == na_num:\r\n guess = 0\r\n elif guess < na_num:\r\n guess += 1\r\n\r\n if gold == na_num:\r\n gold = 0\r\n elif gold < na_num:\r\n gold += 1\r\n\r\n if gold == 0 and guess == 0:\r\n continue\r\n if gold == 0 and guess != 0:\r\n guess_by_relation[guess] += 1\r\n if gold != 0 and guess == 0:\r\n gold_by_relation[gold] += 1\r\n if gold != 0 and guess != 0:\r\n guess_by_relation[guess] += 1\r\n gold_by_relation[gold] += 1\r\n if gold == guess:\r\n correct_by_relation[gold] += 1\r\n \r\n f1_by_relation = Counter()\r\n recall_by_relation = Counter()\r\n prec_by_relation = Counter()\r\n for i in range(1, rel_num):\r\n recall = 0\r\n if gold_by_relation[i] > 0:\r\n recall = correct_by_relation[i] / gold_by_relation[i]\r\n precision = 0\r\n if guess_by_relation[i] > 0:\r\n precision = correct_by_relation[i] / guess_by_relation[i]\r\n if recall + precision > 0 :\r\n f1_by_relation[i] = 2 * recall * precision / (recall + precision)\r\n recall_by_relation[i] = recall\r\n prec_by_relation[i] = precision\r\n\r\n micro_f1 = 0\r\n if sum(guess_by_relation.values()) != 0 and sum(correct_by_relation.values()) != 0:\r\n recall = sum(correct_by_relation.values()) / sum(gold_by_relation.values())\r\n prec = sum(correct_by_relation.values()) / sum(guess_by_relation.values()) \r\n micro_f1 = 2 * recall * prec / (recall+prec)\r\n\r\n return dict(f1=micro_f1)",
"import random\r\nimport os\r\nimport torch\r\nimport json\r\nimport ast\r\nfrom PIL import Image\r\nfrom torch.utils.data import Dataset\r\nfrom transformers import BertTokenizer\r\nfrom ..models.clip.processing_clip import CLIPProcessor\r\nimport logging\r\nlogger = logging.getLogger(__name__)\r\n\r\n\r\nclass MMREProcessor(object):\r\n def __init__(self, data_path, re_path, args):\r\n self.args = args\r\n self.data_path = data_path\r\n self.re_path = re_path\r\n self.tokenizer = BertTokenizer.from_pretrained(args.bert_name, do_lower_case=True)\r\n self.tokenizer.add_special_tokens({'additional_special_tokens':['<s>', '</s>', '<o>', '</o>']})\r\n\r\n self.clip_processor = CLIPProcessor.from_pretrained(args.vit_name)\r\n self.aux_processor = CLIPProcessor.from_pretrained(args.vit_name)\r\n self.aux_processor.feature_extractor.size, self.aux_processor.feature_extractor.crop_size = args.aux_size, args.aux_size\r\n self.rcnn_processor = CLIPProcessor.from_pretrained(args.vit_name)\r\n self.rcnn_processor.feature_extractor.size, self.rcnn_processor.feature_extractor.crop_size = args.rcnn_size, args.rcnn_size\r\n\r\n\r\n def load_from_file(self, mode=\"train\"):\r\n load_file = self.data_path[mode]\r\n logger.info(\"Loading data from {}\".format(load_file))\r\n with open(load_file, \"r\", encoding=\"utf-8\") as f:\r\n lines = f.readlines()\r\n words, relations, heads, tails, imgids, dataid = [], [], [], [], [], []\r\n for i, line in enumerate(lines):\r\n line = ast.literal_eval(line) # str to dict\r\n words.append(line['token'])\r\n relations.append(line['relation'])\r\n heads.append(line['h']) # {name, pos}\r\n tails.append(line['t'])\r\n imgids.append(line['img_id'])\r\n dataid.append(i)\r\n\r\n assert len(words) == len(relations) == len(heads) == len(tails) == (len(imgids))\r\n\r\n # 辅助图像\r\n aux_imgs = None\r\n # if not self.use_clip_vit:\r\n aux_path = self.data_path[mode+\"_auximgs\"]\r\n aux_imgs = torch.load(aux_path)\r\n rcnn_imgs = torch.load(self.data_path[mode+'_img2crop'])\r\n return {'words':words, 'relations':relations, 'heads':heads, 'tails':tails, 'imgids': imgids, 'dataid': dataid, 'aux_imgs':aux_imgs, \"rcnn_imgs\":rcnn_imgs}\r\n\r\n\r\n def get_relation_dict(self):\r\n with open(self.re_path, 'r', encoding=\"utf-8\") as f:\r\n line = f.readlines()[0]\r\n re_dict = json.loads(line)\r\n return re_dict\r\n\r\n def get_rel2id(self, train_path):\r\n with open(self.re_path, 'r', encoding=\"utf-8\") as f:\r\n line = f.readlines()[0]\r\n re_dict = json.loads(line)\r\n re2id = {key:[] for key in re_dict.keys()}\r\n with open(train_path, \"r\", encoding=\"utf-8\") as f:\r\n lines = f.readlines()\r\n for i, line in enumerate(lines):\r\n line = ast.literal_eval(line) # str to dict\r\n assert line['relation'] in re2id\r\n re2id[line['relation']].append(i)\r\n return re2id\r\n\r\n\r\nclass MMREDataset(Dataset):\r\n def __init__(self, processor, transform, img_path=None, aux_img_path=None, mode=\"train\") -> None:\r\n self.processor = processor\r\n self.args = self.processor.args\r\n self.transform = transform\r\n self.max_seq = self.args.max_seq\r\n self.img_path = img_path[mode] if img_path is not None else img_path\r\n self.aux_img_path = aux_img_path[mode] if aux_img_path is not None else aux_img_path\r\n self.rcnn_img_path = 'data'\r\n self.mode = mode\r\n self.data_dict = self.processor.load_from_file(mode)\r\n self.re_dict = self.processor.get_relation_dict()\r\n self.tokenizer = self.processor.tokenizer\r\n self.aux_size = self.args.aux_size\r\n self.rcnn_size = self.args.rcnn_size\r\n \r\n def __len__(self):\r\n return len(self.data_dict['words'])\r\n\r\n def __getitem__(self, idx):\r\n word_list, relation, head_d, tail_d, imgid = self.data_dict['words'][idx], self.data_dict['relations'][idx], self.data_dict['heads'][idx], self.data_dict['tails'][idx], self.data_dict['imgids'][idx]\r\n item_id = self.data_dict['dataid'][idx]\r\n # [CLS] ... <s> head </s> ... <o> tail <o/> .. [SEP]\r\n head_pos, tail_pos = head_d['pos'], tail_d['pos']\r\n # insert <s> <s/> <o> <o/>\r\n extend_word_list = []\r\n for i in range(len(word_list)):\r\n if i == head_pos[0]:\r\n extend_word_list.append('<s>')\r\n if i == head_pos[1]:\r\n extend_word_list.append('</s>')\r\n if i == tail_pos[0]:\r\n extend_word_list.append('<o>')\r\n if i == tail_pos[1]:\r\n extend_word_list.append('</o>')\r\n extend_word_list.append(word_list[i])\r\n extend_word_list = \" \".join(extend_word_list) # list不会进行子词分词\r\n encode_dict = self.tokenizer.encode_plus(text=extend_word_list, max_length=self.max_seq, truncation=True, padding='max_length')\r\n input_ids, token_type_ids, attention_mask = encode_dict['input_ids'], encode_dict['token_type_ids'], encode_dict['attention_mask']\r\n input_ids, token_type_ids, attention_mask = torch.tensor(input_ids), torch.tensor(token_type_ids), torch.tensor(attention_mask)\r\n \r\n re_label = self.re_dict[relation] # label to id\r\n\r\n # image process\r\n if self.img_path is not None:\r\n try:\r\n img_path = os.path.join(self.img_path, imgid)\r\n img_path = img_path.replace('test', 'train')\r\n image = Image.open(img_path).convert('RGB')\r\n image = self.processor.clip_processor(images=image, return_tensors='pt')['pixel_values'].squeeze()\r\n except:\r\n img_path = os.path.join(self.img_path, 'inf.png')\r\n image = Image.open(img_path).convert('RGB')\r\n image = self.processor.clip_processor(images=image, return_tensors='pt')['pixel_values'].squeeze()\r\n if self.aux_img_path is not None:\r\n # 辅助图像\r\n aux_imgs = []\r\n aux_img_paths = []\r\n imgid = imgid.split(\".\")[0]\r\n if item_id in self.data_dict['aux_imgs']:\r\n aux_img_paths = self.data_dict['aux_imgs'][item_id]\r\n aux_img_paths = [os.path.join(self.aux_img_path, path) for path in aux_img_paths]\r\n # 大于3需要舍弃\r\n for i in range(min(3, len(aux_img_paths))):\r\n aux_img = Image.open(aux_img_paths[i]).convert('RGB')\r\n aux_img = self.processor.aux_processor(images=aux_img, return_tensors='pt')['pixel_values'].squeeze()\r\n aux_imgs.append(aux_img)\r\n\r\n #小于3需要加padding-0\r\n for i in range(3-len(aux_imgs)):\r\n aux_imgs.append(torch.zeros((3, self.aux_size, self.aux_size))) \r\n\r\n aux_imgs = torch.stack(aux_imgs, dim=0)\r\n assert len(aux_imgs) == 3\r\n\r\n if self.rcnn_img_path is not None:\r\n rcnn_imgs = []\r\n rcnn_img_paths = []\r\n if imgid in self.data_dict['rcnn_imgs']:\r\n rcnn_img_paths = self.data_dict['rcnn_imgs'][imgid]\r\n rcnn_img_paths = [os.path.join(self.rcnn_img_path, path) for path in rcnn_img_paths]\r\n # 大于3需要舍弃\r\n for i in range(min(3, len(rcnn_img_paths))):\r\n rcnn_img = Image.open(rcnn_img_paths[i]).convert('RGB')\r\n rcnn_img = self.processor.rcnn_processor(images=rcnn_img, return_tensors='pt')['pixel_values'].squeeze()\r\n rcnn_imgs.append(rcnn_img)\r\n #小于3需要加padding-0\r\n for i in range(3-len(rcnn_imgs)):\r\n rcnn_imgs.append(torch.zeros((3, self.rcnn_size, self.rcnn_size))) \r\n\r\n rcnn_imgs = torch.stack(rcnn_imgs, dim=0)\r\n assert len(rcnn_imgs) == 3\r\n return input_ids, token_type_ids, attention_mask, torch.tensor(re_label), image, aux_imgs, rcnn_imgs\r\n\r\n return input_ids, token_type_ids, attention_mask, torch.tensor(re_label), image, aux_imgs\r\n \r\n return input_ids, token_type_ids, attention_mask, torch.tensor(re_label)\r\n \r\n\r\n"
] |
[
[
"numpy.asarray",
"numpy.argmax",
"numpy.exp"
],
[
"torch.stack",
"torch.tensor",
"torch.zeros",
"torch.load"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
carlov93/predictive_maintenance
|
[
"eb00b82bde02668387d0308571296a82f78abef6"
] |
[
"src/ai/utils/data_preperator.py"
] |
[
"import pandas as pd\nfrom torch.utils.data import Dataset, DataLoader\nfrom sklearn.preprocessing import StandardScaler\nimport numpy as np\nfrom keras.preprocessing.sequence import TimeseriesGenerator\nimport torch\n\n\nclass DataPreperator:\n \"\"\"\n This class provides methods for pre-processing the sensor data so it can\n be used for training the LSTM network.\n \"\"\"\n def __init__(self, path, ignored_features, stake_training_data,\n features_not_to_scale, first_order_difference=False):\n self.path = path\n self.dataset = self.load_data()\n self.scaler = StandardScaler()\n self.first_order_difference = first_order_difference\n self.ignored_features = ignored_features\n self.features_not_to_scale = features_not_to_scale\n self.stake = stake_training_data\n \n def load_data(self):\n return pd.read_csv(self.path)\n \n def scale_data(self, train_data, val_data):\n \"\"\"\n This method scales the data by removing the mean and scaling to unit variance.\n If features_not_to_scale is not empty, those features are not scaled.\n \"\"\"\n if len(self.features_not_to_scale) == 0:\n self.scaler.fit(train_data)\n train_scaled = self.scaler.transform(train_data)\n val_scaled = self.scaler.transform(val_data)\n \n else:\n categorical_features_train = train_data.loc[:, self.features_not_to_scale]\n continous_features_train = train_data.drop(labels=self.features_not_to_scale, axis=1)\n categorical_features_val = val_data.loc[:, self.features_not_to_scale]\n continous_features_val = val_data.drop(labels=self.features_not_to_scale, axis=1)\n\n self.scaler.fit(continous_features_train)\n continous_train_scaled = self.scaler.transform(continous_features_train)\n continous_val_scaled = self.scaler.transform(continous_features_val)\n\n # Combine categorical and scaled features \n train_scaled = np.concatenate((continous_train_scaled,\n categorical_features_train), axis=1)\n val_scaled = np.concatenate((continous_val_scaled,\n categorical_features_val), axis=1)\n return train_scaled, val_scaled\n \n def drop_features(self):\n self.dataset = self.dataset.drop(labels=self.ignored_features, axis=1)\n \n def first_order_difference(self):\n \"\"\"\n This method calculates the 1-th order discrete difference along the given axis\n for removing a trend.\n \"\"\"\n self.dataset = self.dataset.diff(periods=1)\n self.dataset = self.dataset.dropna()\n \n def provide_statistics(self):\n return self.scaler.mean_, self.scaler.var_\n \n def prepare_data(self):\n \"\"\"\n This function wraps the pre-processing methods and split the data into train\n and validation data.\n :return: Training and val data with dimension [batch, sequence_length, features]\n \"\"\"\n self.drop_features()\n if self.first_order_difference:\n self.first_order_difference()\n amount_training_data = round(len(self.dataset)*self.stake)\n train_data = self.dataset.iloc[0:amount_training_data, :]\n val_data = self.dataset.iloc[amount_training_data:, :]\n train_preprocessed, val_preporcessed = self.scale_data(train_data, val_data)\n return train_preprocessed, val_preporcessed\n\n\nclass DataPreperatorPrediction:\n \"\"\"\n This class provides methods for scaling the sensor data accordingly to the mean\n and variance of the training data.\n The first column of the csv file has to be the ID of each sample!\n \"\"\"\n def __init__(self, path, ignored_features, mean_training_data,\n var_training_data, first_order_difference=False):\n self.path = path\n self.dataset = self.load_data()\n self.mean_training_data = mean_training_data\n self.var_training_data = var_training_data\n self.first_order_difference = first_order_difference\n self.ignored_features = ignored_features\n \n def load_data(self):\n return pd.read_csv(self.path)\n \n def drop_features(self):\n for feature in self.ignored_features:\n self.dataset = self.dataset.drop(labels=feature, axis=1)\n \n def first_order_difference(self):\n \"\"\"\n This method calculates the 1-th order discrete difference along the given axis\n for removing a trend.\n \"\"\"\n self.dataset = self.dataset.diff(periods=1)\n self.dataset = self.dataset.dropna()\n \n def scale_data(self):\n data_numpy = self.dataset.values\n data_scaled = np.zeros(shape=(len(data_numpy[:, 0]), data_numpy.shape[1]))\n \n # Copy ID of each sample\n data_scaled[:, 0] = data_numpy[:, 0]\n \n i = 1 # because first (i=0) feature is ID\n for mean, var in zip(self.mean_training_data, self.var_training_data):\n data_scaled[:, i] = np.subtract(data_numpy[:, i], mean)\n data_scaled[:, i] = np.divide(data_scaled[:, i], np.sqrt(var))\n i += 1\n return data_scaled\n \n def prepare_data(self):\n \"\"\"\n This function wraps the pre-processing methods.\n :return:\n \"\"\"\n self.drop_features()\n if self.first_order_difference:\n self.first_order_difference()\n preprocessed_data = self.scale_data()\n return preprocessed_data \n"
] |
[
[
"pandas.read_csv",
"numpy.sqrt",
"numpy.subtract",
"numpy.concatenate",
"sklearn.preprocessing.StandardScaler"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [
"2.0",
"1.4",
"1.1",
"1.5",
"1.2",
"1.3"
],
"scipy": [],
"tensorflow": []
}
] |
markcx/pecan-dataport-DB
|
[
"2b3f628ea9e202bb736d75c49a3ffa50db93729d"
] |
[
"module/convert_yaml_to_hdf5.py"
] |
[
"from __future__ import print_function, division\n\nfrom copy import deepcopy\nfrom os import listdir\nfrom os.path import isdir, isfile, join, splitext\nfrom sys import stderr\n\nimport pandas as pd\nimport yaml\nfrom six import iteritems\n\nfrom .object_concatenation import get_appliance_types\n# from meta_info import feed_mapping\n\n# table columns #\nfeed_mapping = {\n 'air1': {'type': 'air conditioner'},\n 'air2': {'type': 'air conditioner'},\n 'air3': {'type': 'air conditioner'},\n 'airwindowunit1': {'type': 'air conditioner'},\n 'aquarium1': {'type': 'appliance'},\n 'bathroom1': {'type': 'sockets', 'room': 'bathroom'},\n 'bathroom2': {'type': 'sockets', 'room': 'bathroom'},\n 'bedroom1': {'type': 'sockets', 'room': 'bedroom'},\n 'bedroom2': {'type': 'sockets', 'room': 'bedroom'},\n 'bedroom3': {'type': 'sockets', 'room': 'bedroom'},\n 'bedroom4': {'type': 'sockets', 'room': 'bedroom'},\n 'bedroom5': {'type': 'sockets', 'room': 'bedroom'},\n 'battery1': {}, # new field, need mapping\n 'car1': {'type': 'electric vehicle'},\n 'circpump1': {}, # new field, need mapping\n 'clotheswasher1': {'type': 'washing machine'},\n 'clotheswasher_dryg1': {'type': 'washer dryer'},\n 'diningroom1': {'type': 'sockets', 'room': 'dining room'},\n 'diningroom2': {'type': 'sockets', 'room': 'dining room'},\n 'dishwasher1': {'type': 'dish washer'},\n 'disposal1': {'type': 'waste disposal unit'},\n 'drye1': {'type': 'spin dryer'},\n 'dryg1': {'type': 'spin dryer'},\n 'freezer1': {'type': 'freezer'},\n 'furnace1': {'type': 'electric furnace'},\n 'furnace2': {'type': 'electric furnace'},\n 'garage1': {'type': 'sockets', 'room': 'dining room'},\n 'garage2': {'type': 'sockets', 'room': 'dining room'},\n 'grid': {},\n 'heater1': {'type': 'electric space heater'},\n 'heater2': {'type': 'electric space heater'},\n 'heater3': {'type': 'electric space heater'},\n 'housefan1': {'type': 'electric space heater'},\n 'icemaker1': {'type': 'appliance'},\n 'jacuzzi1': {'type': 'electric hot tub heater'},\n 'kitchen1': {'type': 'sockets', 'room': 'kitchen'},\n 'kitchen2': {'type': 'sockets', 'room': 'kitchen'},\n 'kitchenapp1': {'type': 'sockets', 'room': 'kitchen'},\n 'kitchenapp2': {'type': 'sockets', 'room': 'kitchen'},\n 'lights_plugs1': {'type': 'light'},\n 'lights_plugs2': {'type': 'light'},\n 'lights_plugs3': {'type': 'light'},\n 'lights_plugs4': {'type': 'light'},\n 'lights_plugs5': {'type': 'light'},\n 'lights_plugs6': {'type': 'light'},\n 'livingroom1': {'type': 'sockets', 'room': 'living room'},\n 'livingroom2': {'type': 'sockets', 'room': 'living room'},\n 'microwave1': {'type': 'microwave'},\n 'office1': {'type': 'sockets', 'room': 'office'},\n 'outsidelights_plugs1': {'type': 'sockets', 'room': 'outside'},\n 'outsidelights_plugs2': {'type': 'sockets', 'room': 'outside'},\n 'oven1': {'type': 'oven'},\n 'oven2': {'type': 'oven'},\n 'pool1': {'type': 'electric swimming pool heater'},\n 'pool2': {'type': 'electric swimming pool heater'},\n 'poollight1': {'type': 'light'},\n 'poolpump1': {'type': 'electric swimming pool heater'},\n 'pump1': {'type': 'appliance'},\n 'range1': {'type': 'stove'},\n 'refrigerator1': {'type': 'fridge'},\n 'refrigerator2': {'type': 'fridge'},\n 'security1': {'type': 'security alarm'},\n 'sewerpump1': {}, # new field, need mapping\n 'shed1': {'type': 'sockets', 'room': 'shed'},\n 'solar': {'type': 'solar'},\n 'solar2': {'type': 'solar'},\n 'sprinkler1': {'type': 'appliance'},\n 'sumppump1': {}, # new field, need mapping\n 'utilityroom1': {'type': 'sockets', 'room': 'utility room'},\n 'venthood1': {'type': 'appliance'},\n 'waterheater1': {'type': 'electric water heating appliance'},\n 'waterheater2': {'type': 'electric water heating appliance'},\n 'wellpump1': {}, # new field, need mapping\n 'winecooler1': {'type': 'appliance'},\n 'leg1v': {},\n 'leg2v': {},\n 'grid_l1': { 'type': 'grid l1'}, # new field\n 'grid_l2': { 'type': 'grid l2'} # new field\n}\n\nfeed_ignore = ['solar', 'solar2', 'grid', 'leg1v', 'leg2v', 'battery1', 'circpump1',\n 'sewerpump1', 'sumppump1', 'wellpump1']\n\nLEVEL_NAMES = ['physical_quantity', 'type']\n\n\ndef fetch_appliance_typedict():\n appliance_types = []\n for k, v in feed_mapping.items():\n if bool(v):\n appliance_types.append(v['type'])\n return appliance_types\n\n\nclass NilmMetadataError(Exception):\n pass\n\n\ndef convert_yaml_to_hdf5(yaml_dir, hdf_filename):\n \"\"\"Converts a NILM Metadata YAML instance to HDF5.\n\n Also does a set of sanity checks on the metadata.\n\n Parameters\n ----------\n yaml_dir : str\n Directory path of all *.YAML files describing this dataset.\n hdf_filename : str\n Filename and path of output HDF5 file. If file exists then will\n attempt to append metadata to file. If file does not exist then\n will create it.\n \"\"\"\n\n assert isdir(yaml_dir)\n store = pd.HDFStore(hdf_filename, 'a')\n\n # Load Dataset and MeterDevice metadata\n metadata = _load_file(yaml_dir, 'dataset.yaml')\n meter_devices = _load_file(yaml_dir, 'meter_devices.yaml')\n metadata['meter_devices'] = meter_devices\n store.root._v_attrs.metadata = metadata\n\n # Load buildings\n building_filenames = [fname for fname in listdir(yaml_dir)\n if fname.startswith('building')\n and fname.endswith('.yaml')]\n\n for fname in building_filenames:\n building = splitext(fname)[0] # e.g. 'building1'\n try:\n group = store._handle.create_group('/', building)\n except:\n group = store._handle.get_node('/' + building)\n building_metadata = _load_file(yaml_dir, fname)\n elec_meters = building_metadata['elec_meters']\n _deep_copy_meters(elec_meters)\n _set_data_location(elec_meters, building)\n _sanity_check_meters(elec_meters, meter_devices)\n _sanity_check_appliances(building_metadata)\n group._f_setattr('metadata', building_metadata)\n\n store.close()\n print(\"Done converting YAML metadata to HDF5!\")\n\n\ndef save_yaml_to_datastore(yaml_dir, store):\n \"\"\"Saves a NILM Metadata YAML instance to a NILMTK datastore.\n\n Parameters\n ----------\n yaml_dir : str\n Directory path of all *.YAML files describing this dataset.\n store : DataStore\n DataStore object\n \"\"\"\n\n assert isdir(yaml_dir)\n\n # Load Dataset and MeterDevice metadata\n metadata = _load_file(yaml_dir, 'dataset.yaml')\n print(\"Loaded metadata\")\n meter_devices = _load_file(yaml_dir, 'meter_devices.yaml')\n metadata['meter_devices'] = meter_devices\n store.save_metadata('/', metadata)\n\n # Load buildings\n building_filenames = [fname for fname in listdir(yaml_dir)\n if fname.startswith('building')\n and fname.endswith('.yaml')]\n\n for fname in building_filenames:\n building = splitext(fname)[0] # e.g. 'building1'\n building_metadata = _load_file(yaml_dir, fname)\n elec_meters = building_metadata['elec_meters']\n _deep_copy_meters(elec_meters)\n _set_data_location(elec_meters, building)\n _sanity_check_meters(elec_meters, meter_devices)\n _sanity_check_appliances(building_metadata)\n store.save_metadata('/' + building, building_metadata)\n\n store.close()\n print(\"Done converting YAML metadata to HDF5!\")\n\n\ndef _load_file(yaml_dir, yaml_filename):\n yaml_full_filename = join(yaml_dir, yaml_filename)\n if isfile(yaml_full_filename):\n with open(yaml_full_filename, 'rb') as fh:\n return yaml.safe_load(fh)\n else:\n print(yaml_full_filename, \"not found.\", file=stderr)\n\n\ndef _deep_copy_meters(elec_meters):\n for meter_instance, meter in iteritems(elec_meters):\n elec_meters[meter_instance] = deepcopy(meter)\n\n\ndef _set_data_location(elec_meters, building):\n \"\"\"Goes through each ElecMeter in elec_meters and sets `data_location`.\n Modifies `elec_meters` in place.\n\n Parameters\n ----------\n elec_meters : dict of dicts\n building : string e.g. 'building1'\n \"\"\"\n for meter_instance in elec_meters:\n data_location = '/{:s}/elec/meter{:d}'.format(building, meter_instance)\n elec_meters[meter_instance]['data_location'] = data_location\n\n\ndef _sanity_check_meters(meters, meter_devices):\n \"\"\"\n Checks:\n * Make sure all meter devices map to meter_device keys\n * Makes sure all IDs are unique\n \"\"\"\n if len(meters) != len(set(meters)):\n raise NilmMetadataError(\"elec_meters not unique\")\n\n for meter_instance, meter in iteritems(meters):\n assert meter['device_model'] in meter_devices\n\n\ndef _sanity_check_appliances(building_metadata):\n \"\"\"\n Checks:\n * Make sure we use proper NILM Metadata names.\n * Make sure there aren't multiple appliance types with same instance\n \"\"\"\n appliances = building_metadata['appliances']\n appliance_types = get_appliance_types()\n building_instance = building_metadata['instance']\n REQUIRED_KEYS = ['type', 'instance', 'meters']\n\n if bool(appliance_types) is False:\n appliance_types = fetch_appliance_typedict()\n # print(appliance_types)\n # raise NotImplementedError(appliances)\n for appliance in appliances:\n if not isinstance(appliance, dict):\n raise NilmMetadataError(\n \"Appliance '{}' is {} when it should be a dict.\"\n .format(appliance, type(appliance)))\n\n # Generate string for specifying which is the problematic\n # appliance for error messages:\n appl_string = (\"ApplianceType '{}', instance '{}', in building {:d}\"\n .format(appliance.get('type'),\n appliance.get('instance'),\n building_instance))\n\n # Check required keys are all present\n for key in REQUIRED_KEYS:\n if key not in appliance:\n raise NilmMetadataError(\"key '{}' missing for {}\"\n .format(key, appl_string))\n\n appl_type = appliance['type']\n\n # check all appliance names are valid\n if appl_type not in appliance_types:\n raise NilmMetadataError(\n appl_string + \" not in appliance_types.\"\n \" In other words, '{}' is not a recognised appliance type.\"\n .format(appl_type))\n\n # Check appliance references valid meters\n meters = appliance['meters']\n if len(meters) != len(set(meters)):\n msg = \"In {}, meters '{}' not unique.\".format(appl_string, meters)\n raise NilmMetadataError(msg)\n\n for meter in meters:\n if meter != 0 and meter not in building_metadata['elec_meters']:\n msg = (\"In ({}), meter '{:d}' is not in\"\n \" this building's 'elec_meters'\"\n .format(appl_string, meter))\n raise NilmMetadataError(msg)\n\n # Check list of instances for each appliance is valid.\n appliance_instances = {}\n for appliance in appliances:\n appl_type = appliance['type']\n instances = appliance_instances.setdefault(appl_type, [])\n instances.append(appliance['instance'])\n\n for appliance_type, instances in iteritems(appliance_instances):\n instances.sort()\n correct_instances = list(range(1, len(instances) + 1))\n if instances != correct_instances:\n msg = (\"In building {:d}, appliance '{}' appears {:d} time(s).\"\n \" Yet the list of instances is '{}'. The list of instances\"\n \" should be '{}'.\"\n .format(building_metadata['instance'], appliance_type,\n len(instances), instances, correct_instances))\n raise NilmMetadataError(msg)\n"
] |
[
[
"pandas.HDFStore"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [
"0.23",
"0.21",
"2.0",
"1.4",
"0.19",
"1.1",
"1.5",
"1.2",
"0.24",
"0.20",
"1.0",
"0.25",
"1.3"
],
"scipy": [],
"tensorflow": []
}
] |
mitchellvitez/tensorflow
|
[
"675799d1056fd05acc61ba85bdf8db9b86be0139"
] |
[
"tensorflow/python/ops/math_ops.py"
] |
[
"# Copyright 2015 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Math Operations.\n\nNote: Functions taking `Tensor` arguments can also take anything accepted by\n`tf.convert_to_tensor`.\n\nNote: Elementwise binary operations in TensorFlow follow [numpy-style\nbroadcasting](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html).\n\nTensorFlow provides a variety of math functions including:\n\n* Basic arithmetic operators and trigonometric functions.\n* Special math functions (like: `tf.math.igamma` and `tf.math.zeta`)\n* Complex number functions (like: `tf.math.imag` and `tf.math.angle`)\n* Reductions and scans (like: `tf.math.reduce_mean` and `tf.math.cumsum`)\n* Segment functions (like: `tf.math.segment_sum`)\n\nSee: `tf.linalg` for matrix and tensor functions.\n\n<a id=Segmentation></a>\n\n## About Segmentation\n\nTensorFlow provides several operations that you can use to perform common\nmath computations on tensor segments.\nHere a segmentation is a partitioning of a tensor along\nthe first dimension, i.e. it defines a mapping from the first dimension onto\n`segment_ids`. The `segment_ids` tensor should be the size of\nthe first dimension, `d0`, with consecutive IDs in the range `0` to `k`,\nwhere `k<d0`.\nIn particular, a segmentation of a matrix tensor is a mapping of rows to\nsegments.\n\nFor example:\n\n```python\nc = tf.constant([[1,2,3,4], [-1,-2,-3,-4], [5,6,7,8]])\ntf.math.segment_sum(c, tf.constant([0, 0, 1]))\n# ==> [[0 0 0 0]\n# [5 6 7 8]]\n```\n\nThe standard `segment_*` functions assert that the segment indices are sorted.\nIf you have unsorted indices use the equivalent `unsorted_segment_` function.\nThese functions take an additional argument `num_segments` so that the output\ntensor can be efficiently allocated.\n\n``` python\nc = tf.constant([[1,2,3,4], [-1,-2,-3,-4], [5,6,7,8]])\ntf.math.unsorted_segment_sum(c, tf.constant([0, 1, 0]), num_segments=2)\n# ==> [[ 6, 8, 10, 12],\n# [-1, -2, -3, -4]]\n```\n\n\"\"\"\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport numpy as np\nimport six\nfrom six.moves import builtins\nfrom six.moves import xrange # pylint: disable=redefined-builtin\n\nfrom tensorflow.python.eager import context\nfrom tensorflow.python.framework import constant_op\nfrom tensorflow.python.framework import dtypes\nfrom tensorflow.python.framework import graph_util\nfrom tensorflow.python.framework import ops\nfrom tensorflow.python.framework import sparse_tensor\nfrom tensorflow.python.framework import tensor_shape\nfrom tensorflow.python.ops import array_ops\nfrom tensorflow.python.ops import gen_array_ops\nfrom tensorflow.python.ops import gen_data_flow_ops\nfrom tensorflow.python.ops import gen_math_ops\nfrom tensorflow.python.ops import gen_nn_ops\nfrom tensorflow.python.ops import gen_sparse_ops\n# go/tf-wildcard-import\n# pylint: disable=wildcard-import\nfrom tensorflow.python.ops.gen_math_ops import *\n# pylint: enable=wildcard-import\nfrom tensorflow.python.platform import tf_logging as logging\nfrom tensorflow.python.util import compat\nfrom tensorflow.python.util import deprecation\nfrom tensorflow.python.util import dispatch\nfrom tensorflow.python.util import nest\nfrom tensorflow.python.util.tf_export import tf_export\n\n# Aliases for some automatically-generated names.\nlinspace = gen_math_ops.lin_space\nnextafter = gen_math_ops.next_after\n\narg_max = deprecation.deprecated(None, \"Use `tf.math.argmax` instead\")(arg_max) # pylint: disable=used-before-assignment\narg_min = deprecation.deprecated(None, \"Use `tf.math.argmin` instead\")(arg_min) # pylint: disable=used-before-assignment\ntf_export(v1=[\"arg_max\"])(arg_max)\ntf_export(v1=[\"arg_min\"])(arg_min)\n\n\n# This is set by resource_variable_ops.py. It is included in this way since\n# there is a circular dependency between math_ops and resource_variable_ops\n_resource_variable_type = None\n\n\ndef _set_doc(doc):\n\n def _decorator(func):\n func.__doc__ = doc\n return func\n\n return _decorator\n\n\n# pylint: disable=redefined-builtin\n@tf_export(v1=[\"math.argmax\", \"argmax\"])\[email protected]_args(None, \"Use the `axis` argument instead\",\n \"dimension\")\n@_set_doc(\n gen_math_ops.arg_max.__doc__.replace(\"dimensions\",\n \"axes\").replace(\"dimension\", \"axis\"))\ndef argmax(input,\n axis=None,\n name=None,\n dimension=None,\n output_type=dtypes.int64):\n axis = deprecation.deprecated_argument_lookup(\"axis\", axis, \"dimension\",\n dimension)\n return argmax_v2(input, axis, output_type, name)\n\n\n@tf_export(\"math.argmax\", \"argmax\", v1=[])\ndef argmax_v2(input, axis=None, output_type=dtypes.int64, name=None):\n \"\"\"Returns the index with the largest value across axes of a tensor.\n\n Note that in case of ties the identity of the return value is not guaranteed.\n\n For example:\n\n >>> A = tf.constant([2, 20, 30, 3, 6])\n >>> tf.math.argmax(A) # A[2] is maximum in tensor A\n <tf.Tensor: shape=(), dtype=int64, numpy=2>\n >>> B = tf.constant([[2, 20, 30, 3, 6], [3, 11, 16, 1, 8],\n ... [14, 45, 23, 5, 27]])\n >>> tf.math.argmax(B, 0)\n <tf.Tensor: shape=(5,), dtype=int64, numpy=array([2, 2, 0, 2, 2])>\n >>> tf.math.argmax(B, 1)\n <tf.Tensor: shape=(3,), dtype=int64, numpy=array([2, 2, 1])>\n\n Args:\n input: A `Tensor`.\n axis: An integer, the axis to reduce across. Default to 0.\n output_type: An optional output dtype (`tf.int32` or `tf.int64`). Defaults\n to `tf.int64`.\n name: An optional name for the operation.\n\n Returns:\n A `Tensor` of type `output_type`.\n \"\"\"\n if axis is None:\n axis = 0\n return gen_math_ops.arg_max(input, axis, name=name, output_type=output_type)\n\n\n@tf_export(v1=[\"math.argmin\", \"argmin\"])\[email protected]_args(None, \"Use the `axis` argument instead\",\n \"dimension\")\n@_set_doc(\n gen_math_ops.arg_min.__doc__.replace(\"dimensions\",\n \"axes\").replace(\"dimension\", \"axis\"))\ndef argmin(input,\n axis=None,\n name=None,\n dimension=None,\n output_type=dtypes.int64):\n axis = deprecation.deprecated_argument_lookup(\"axis\", axis, \"dimension\",\n dimension)\n return argmin_v2(input, axis, output_type, name)\n\n\n@tf_export(\"math.argmin\", \"argmin\", v1=[])\ndef argmin_v2(input, axis=None, output_type=dtypes.int64, name=None):\n \"\"\"Returns the index with the smallest value across axes of a tensor.\n\n Note that in case of ties the identity of the return value is not guaranteed.\n\n Args:\n input: A `Tensor`. Must be one of the following types: `float32`, `float64`,\n `int32`, `uint8`, `int16`, `int8`, `complex64`, `int64`, `qint8`,\n `quint8`, `qint32`, `bfloat16`, `uint16`, `complex128`, `half`, `uint32`,\n `uint64`.\n axis: A `Tensor`. Must be one of the following types: `int32`, `int64`.\n int32 or int64, must be in the range `-rank(input), rank(input))`.\n Describes which axis of the input Tensor to reduce across. For vectors,\n use axis = 0.\n output_type: An optional `tf.DType` from: `tf.int32, tf.int64`. Defaults to\n `tf.int64`.\n name: A name for the operation (optional).\n\n Returns:\n A `Tensor` of type `output_type`.\n\n Usage:\n ```python\n import tensorflow as tf\n a = [1, 10, 26.9, 2.8, 166.32, 62.3]\n b = tf.math.argmin(input = a)\n c = tf.keras.backend.eval(b)\n # c = 0\n # here a[0] = 1 which is the smallest element of a across axis 0\n ```\n \"\"\"\n if axis is None:\n axis = 0\n return gen_math_ops.arg_min(input, axis, name=name, output_type=output_type)\n\n\n# pylint: enable=redefined-builtin\n\n\n# pylint: disable=anomalous-backslash-in-string,protected-access\n# pylint: disable=g-docstring-has-escape\n@tf_export(\"math.abs\", \"abs\")\[email protected]_dispatch_support\ndef abs(x, name=None): # pylint: disable=redefined-builtin\n r\"\"\"Computes the absolute value of a tensor.\n\n Given a tensor of integer or floating-point values, this operation returns a\n tensor of the same type, where each element contains the absolute value of the\n corresponding element in the input.\n\n Given a tensor `x` of complex numbers, this operation returns a tensor of type\n `float32` or `float64` that is the absolute value of each element in `x`. For\n a complex number \\\\(a + bj\\\\), its absolute value is computed as \\\\(\\sqrt{a^2\n + b^2}\\\\). For example:\n\n >>> x = tf.constant([[-2.25 + 4.75j], [-3.25 + 5.75j]])\n >>> tf.abs(x)\n <tf.Tensor: shape=(2, 1), dtype=float64, numpy=\n array([[5.25594901],\n [6.60492241]])>\n\n Args:\n x: A `Tensor` or `SparseTensor` of type `float16`, `float32`, `float64`,\n `int32`, `int64`, `complex64` or `complex128`.\n name: A name for the operation (optional).\n\n Returns:\n A `Tensor` or `SparseTensor` of the same size, type and sparsity as `x`,\n with absolute values. Note, for `complex64` or `complex128` input, the\n returned `Tensor` will be of type `float32` or `float64`, respectively.\n \"\"\"\n with ops.name_scope(name, \"Abs\", [x]) as name:\n x = ops.convert_to_tensor(x, name=\"x\")\n if x.dtype.is_complex:\n return gen_math_ops.complex_abs(x, Tout=x.dtype.real_dtype, name=name)\n return gen_math_ops._abs(x, name=name)\n\n\n# pylint: enable=g-docstring-has-escape\n\n\n# pylint: disable=redefined-builtin\ndef _bucketize(input, boundaries, name=None):\n return gen_math_ops.bucketize(input=input, boundaries=boundaries, name=name)\n\n\n# pylint: enable=redefined-builtin\n\n\nclass DivideDelegateWithName(object):\n \"\"\"Use Python2/Python3 division delegation to implement divide for tensors.\"\"\"\n\n def __init__(self, x, name):\n \"\"\"Construct DivideDelegateWithName.\n\n Args:\n x: Tensor to use as left operand in operator overloads\n name: The name that is preferred for the op created.\n \"\"\"\n self.x = x\n self.name = name\n\n def __truediv__(self, y):\n return _truediv_python3(self.x, y, self.name)\n\n def __floordiv__(self, y):\n return floordiv(self.x, y, self.name)\n\n def __div__(self, y):\n return _div_python2(self.x, y, self.name)\n\n\n@tf_export(\"math.divide\", \"divide\")\[email protected]_dispatch_support\ndef divide(x, y, name=None):\n \"\"\"Computes Python style division of `x` by `y`.\n\n For example:\n\n >>> x = tf.constant([16, 12, 11])\n >>> y = tf.constant([4, 6, 2])\n >>> tf.divide(x,y)\n <tf.Tensor: shape=(3,), dtype=float64,\n numpy=array([4. , 2. , 5.5])>\n\n Args:\n x: A `Tensor`\n y: A `Tensor`\n name: A name for the operation (optional).\n\n Returns:\n A `Tensor` with same shape as input\n \"\"\"\n\n if name is not None:\n # Cannot use tensors operator overload, because it has no way to track\n # override names. Use a dummy class to track the runtime division behavior\n return DivideDelegateWithName(x, name) / y\n else:\n return x / y\n\n\n@tf_export(\"math.multiply\", \"multiply\")\[email protected]_dispatch_support\ndef multiply(x, y, name=None):\n \"\"\"Returns an element-wise x * y.\n\n For example:\n\n >>> x = tf.constant(([1, 2, 3, 4]))\n >>> tf.math.multiply(x, x)\n <tf.Tensor: shape=(4,), dtype=..., numpy=array([ 1, 4, 9, 16], dtype=int32)>\n\n Since `tf.math.multiply` will convert its arguments to `Tensor`s, you can also\n pass in non-`Tensor` arguments:\n\n >>> tf.math.multiply(7,6)\n <tf.Tensor: shape=(), dtype=int32, numpy=42>\n\n If `x.shape` is not thes same as `y.shape`, they will be broadcast to a\n compatible shape. (More about broadcasting\n [here](https://docs.scipy.org/doc/numpy/user/basics.broadcasting.html).)\n\n For example:\n\n >>> x = tf.ones([1, 2]);\n >>> y = tf.ones([2, 1]);\n >>> x * y # Taking advantage of operator overriding\n <tf.Tensor: shape=(2, 2), dtype=float32, numpy=\n array([[1., 1.],\n [1., 1.]], dtype=float32)>\n\n Args:\n x: A Tensor. Must be one of the following types: `bfloat16`,\n `half`, `float32`, `float64`, `uint8`, `int8`, `uint16`,\n `int16`, `int32`, `int64`, `complex64`, `complex128`.\n y: A `Tensor`. Must have the same type as `x`.\n name: A name for the operation (optional).\n\n Returns:\n\n A `Tensor`. Has the same type as `x`.\n\n Raises:\n\n * InvalidArgumentError: When `x` and `y` have incomptatible shapes or types.\n \"\"\"\n\n return gen_math_ops.mul(x, y, name)\n\n\n# TODO(aselle): put deprecation in after another round of global code changes\[email protected](\n \"2016-12-30\",\n \"`tf.mul(x, y)` is deprecated; use `tf.math.multiply(x, y)` or `x * y`\")\ndef _mul(x, y, name=None):\n return gen_math_ops.mul(x, y, name)\n\n\n_mul.__doc__ = (\n gen_math_ops.mul.__doc__ + (\"\" if _mul.__doc__ is None else _mul.__doc__))\n\n\n@tf_export(\"math.subtract\", \"subtract\")\[email protected]_dispatch_support\ndef subtract(x, y, name=None):\n return gen_math_ops.sub(x, y, name)\n\n\nsubtract.__doc__ = gen_math_ops.sub.__doc__.replace(\"`Sub`\", \"`tf.subtract`\")\n\n\n# TODO(aselle): put deprecation in after another round of global code changes\[email protected](\n \"2016-12-30\",\n \"`tf.sub(x, y)` is deprecated, please use `tf.subtract(x, y)` or `x - y`\")\ndef _sub(x, y, name=None):\n return gen_math_ops.sub(x, y, name)\n\n\n_sub.__doc__ = (\n gen_math_ops.sub.__doc__ + (\"\" if _sub.__doc__ is None else _sub.__doc__))\n\nnegative = gen_math_ops.neg\n\n\n# pylint: disable=g-docstring-has-escape\[email protected](\n \"2016-12-30\",\n \"`tf.neg(x)` is deprecated, please use `tf.negative(x)` or `-x`\")\ndef _neg(x, name=None):\n \"\"\"Computes numerical negative value element-wise.\n\n I.e., \\\\(y = -x\\\\).\n\n Args:\n x: A `Tensor` or `SparseTensor`. Must be one of the following types: `half`,\n `float32`, `float64`, `int32`, `int64`, `complex64`, `complex128`.\n name: A name for the operation (optional).\n\n Returns:\n A `Tensor` or `SparseTensor`, respectively. Has the same type as `x`.\n \"\"\"\n return negative(x, name)\n\n\n# pylint: enable=g-docstring-has-escape\n\n\n@tf_export(v1=[\"math.scalar_mul\", \"scalar_mul\"])\ndef scalar_mul(scalar, x, name=None):\n \"\"\"Multiplies a scalar times a `Tensor` or `IndexedSlices` object.\n\n Intended for use in gradient code which might deal with `IndexedSlices`\n objects, which are easy to multiply by a scalar but more expensive to\n multiply with arbitrary tensors.\n\n Args:\n scalar: A 0-D scalar `Tensor`. Must have known shape.\n x: A `Tensor` or `IndexedSlices` to be scaled.\n name: A name for the operation (optional).\n\n Returns:\n `scalar * x` of the same type (`Tensor` or `IndexedSlices`) as `x`.\n\n Raises:\n ValueError: if scalar is not a 0-D `scalar`.\n \"\"\"\n scalar = ops.convert_to_tensor(\n scalar, dtype=x.dtype.base_dtype, name=\"scalar\")\n shape = scalar.get_shape()\n if shape.ndims == 0:\n if isinstance(x, ops.IndexedSlices):\n return ops.IndexedSlices(\n gen_math_ops.mul(scalar, x.values, name), x.indices, x.dense_shape)\n else:\n return gen_math_ops.mul(scalar, x, name)\n else:\n raise ValueError(\"Only scalar multiply works, got shape %s\" % shape)\n\n\n@tf_export(\"math.scalar_mul\", \"scalar_mul\", v1=[])\n@_set_doc(scalar_mul.__doc__)\ndef scalar_mul_v2(scalar, x, name=None):\n with ops.name_scope(name, \"scalar_mul\", [x]) as name:\n return scalar_mul(scalar, x, name)\n\n\n@tf_export(\"math.pow\", \"pow\")\[email protected]_dispatch_support\ndef pow(x, y, name=None): # pylint: disable=redefined-builtin\n r\"\"\"Computes the power of one value to another.\n\n Given a tensor `x` and a tensor `y`, this operation computes \\\\(x^y\\\\) for\n corresponding elements in `x` and `y`. For example:\n\n ```python\n x = tf.constant([[2, 2], [3, 3]])\n y = tf.constant([[8, 16], [2, 3]])\n tf.pow(x, y) # [[256, 65536], [9, 27]]\n ```\n\n Args:\n x: A `Tensor` of type `float16`, `float32`, `float64`, `int32`, `int64`,\n `complex64`, or `complex128`.\n y: A `Tensor` of type `float16`, `float32`, `float64`, `int32`, `int64`,\n `complex64`, or `complex128`.\n name: A name for the operation (optional).\n\n Returns:\n A `Tensor`.\n \"\"\"\n with ops.name_scope(name, \"Pow\", [x]) as name:\n return gen_math_ops._pow(x, y, name=name)\n\n\n# pylint: disable=redefined-builtin,redefined-outer-name\n@tf_export(\"dtypes.complex\", \"complex\")\[email protected]_dispatch_support\ndef complex(real, imag, name=None):\n r\"\"\"Converts two real numbers to a complex number.\n\n Given a tensor `real` representing the real part of a complex number, and a\n tensor `imag` representing the imaginary part of a complex number, this\n operation returns complex numbers elementwise of the form \\\\(a + bj\\\\), where\n *a* represents the `real` part and *b* represents the `imag` part.\n\n The input tensors `real` and `imag` must have the same shape.\n\n For example:\n\n ```python\n real = tf.constant([2.25, 3.25])\n imag = tf.constant([4.75, 5.75])\n tf.complex(real, imag) # [[2.25 + 4.75j], [3.25 + 5.75j]]\n ```\n\n Args:\n real: A `Tensor`. Must be one of the following types: `float32`, `float64`.\n imag: A `Tensor`. Must have the same type as `real`.\n name: A name for the operation (optional).\n\n Returns:\n A `Tensor` of type `complex64` or `complex128`.\n\n Raises:\n TypeError: Real and imag must be correct types\n \"\"\"\n real = ops.convert_to_tensor(real, name=\"real\")\n imag = ops.convert_to_tensor(imag, name=\"imag\")\n with ops.name_scope(name, \"Complex\", [real, imag]) as name:\n input_types = (real.dtype, imag.dtype)\n if input_types == (dtypes.float64, dtypes.float64):\n Tout = dtypes.complex128\n elif input_types == (dtypes.float32, dtypes.float32):\n Tout = dtypes.complex64\n else:\n raise TypeError(\"real and imag have incorrect types: \"\n \"{} {}\".format(real.dtype.name, imag.dtype.name))\n return gen_math_ops._complex(real, imag, Tout=Tout, name=name)\n\n\n@tf_export(\"math.sign\", \"sign\")\[email protected]_dispatch_support\ndef sign(x, name=None):\n \"\"\"Returns an element-wise indication of the sign of a number.\n\n y = sign(x) = -1 if x < 0; 0 if x == 0; 1 if x > 0.\n\n For complex numbers, y = sign(x) = x / |x| if x != 0, otherwise y = 0.\n\n Example usage:\n\n >>> tf.math.sign([0., 2., -3.])\n <tf.Tensor: ... numpy=array([ 0., 1., -1.], dtype=float32)>\n\n Args:\n x: A Tensor. Must be one of the following types: bfloat16, half, float32,\n float64, int32, int64, complex64, complex128.\n name: A name for the operation (optional).\n\n Returns:\n A Tensor. Has the same type as x.\n\n If x is a SparseTensor, returns SparseTensor(x.indices,\n tf.math.sign(x.values, ...), x.dense_shape).\n \"\"\"\n x = ops.convert_to_tensor(x)\n if x.dtype in (dtypes.complex64, dtypes.complex128):\n return gen_math_ops.div_no_nan(\n x,\n cast(\n gen_math_ops.complex_abs(\n x,\n Tout=dtypes.float32\n if x.dtype == dtypes.complex64 else dtypes.float64),\n dtype=x.dtype),\n name=name)\n return gen_math_ops.sign(x, name=name)\n\n\n@tf_export(\"math.real\", v1=[\"math.real\", \"real\"])\[email protected]_endpoints(\"real\")\[email protected]_dispatch_support\ndef real(input, name=None):\n r\"\"\"Returns the real part of a complex (or real) tensor.\n\n Given a tensor `input`, this operation returns a tensor of type `float` that\n is the real part of each element in `input` considered as a complex number.\n\n For example:\n\n ```python\n x = tf.constant([-2.25 + 4.75j, 3.25 + 5.75j])\n tf.math.real(x) # [-2.25, 3.25]\n ```\n\n If `input` is already real, it is returned unchanged.\n\n Args:\n input: A `Tensor`. Must have numeric type.\n name: A name for the operation (optional).\n\n Returns:\n A `Tensor` of type `float32` or `float64`.\n \"\"\"\n with ops.name_scope(name, \"Real\", [input]) as name:\n input = ops.convert_to_tensor(input, name=\"input\")\n if input.dtype.is_complex:\n real_dtype = input.dtype.real_dtype\n return gen_math_ops.real(input, Tout=real_dtype, name=name)\n else:\n return input\n\n\n@tf_export(\"math.imag\", v1=[\"math.imag\", \"imag\"])\[email protected]_endpoints(\"imag\")\[email protected]_dispatch_support\ndef imag(input, name=None):\n r\"\"\"Returns the imaginary part of a complex (or real) tensor.\n\n Given a tensor `input`, this operation returns a tensor of type `float` that\n is the imaginary part of each element in `input` considered as a complex\n number. If `input` is real, a tensor of all zeros is returned.\n\n For example:\n\n ```python\n x = tf.constant([-2.25 + 4.75j, 3.25 + 5.75j])\n tf.math.imag(x) # [4.75, 5.75]\n ```\n\n Args:\n input: A `Tensor`. Must be one of the following types: `float`, `double`,\n `complex64`, `complex128`.\n name: A name for the operation (optional).\n\n Returns:\n A `Tensor` of type `float32` or `float64`.\n \"\"\"\n with ops.name_scope(name, \"Imag\", [input]) as name:\n input = ops.convert_to_tensor(input, name=\"input\")\n if input.dtype.is_complex:\n return gen_math_ops.imag(input, Tout=input.dtype.real_dtype, name=name)\n else:\n return array_ops.zeros_like(input)\n\n\n@tf_export(\"math.angle\", v1=[\"math.angle\", \"angle\"])\[email protected]_endpoints(\"angle\")\[email protected]_dispatch_support\ndef angle(input, name=None):\n r\"\"\"Returns the element-wise argument of a complex (or real) tensor.\n\n Given a tensor `input`, this operation returns a tensor of type `float` that\n is the argument of each element in `input` considered as a complex number.\n\n The elements in `input` are considered to be complex numbers of the form\n \\\\(a + bj\\\\), where *a* is the real part and *b* is the imaginary part.\n If `input` is real then *b* is zero by definition.\n\n The argument returned by this function is of the form \\\\(atan2(b, a)\\\\).\n If `input` is real, a tensor of all zeros is returned.\n\n For example:\n\n ```\n input = tf.constant([-2.25 + 4.75j, 3.25 + 5.75j], dtype=tf.complex64)\n tf.math.angle(input).numpy()\n # ==> array([2.0131705, 1.056345 ], dtype=float32)\n ```\n\n Args:\n input: A `Tensor`. Must be one of the following types: `float`, `double`,\n `complex64`, `complex128`.\n name: A name for the operation (optional).\n\n Returns:\n A `Tensor` of type `float32` or `float64`.\n \"\"\"\n with ops.name_scope(name, \"Angle\", [input]) as name:\n input = ops.convert_to_tensor(input, name=\"input\")\n if input.dtype.is_complex:\n return gen_math_ops.angle(input, Tout=input.dtype.real_dtype, name=name)\n else:\n return array_ops.zeros_like(input)\n\n\n# pylint: enable=redefined-outer-name,redefined-builtin\n\n\n@tf_export(\"math.round\", \"round\")\[email protected]_dispatch_support\ndef round(x, name=None): # pylint: disable=redefined-builtin\n \"\"\"Rounds the values of a tensor to the nearest integer, element-wise.\n\n Rounds half to even. Also known as bankers rounding. If you want to round\n according to the current system rounding mode use tf::cint.\n For example:\n\n ```python\n x = tf.constant([0.9, 2.5, 2.3, 1.5, -4.5])\n tf.round(x) # [ 1.0, 2.0, 2.0, 2.0, -4.0 ]\n ```\n\n Args:\n x: A `Tensor` of type `float16`, `float32`, `float64`, `int32`, or `int64`.\n name: A name for the operation (optional).\n\n Returns:\n A `Tensor` of same shape and type as `x`.\n \"\"\"\n x = ops.convert_to_tensor(x, name=\"x\")\n if x.dtype.is_integer:\n return x\n else:\n return gen_math_ops.round(x, name=name)\n\n\n@tf_export(\"cast\", \"dtypes.cast\")\[email protected]_dispatch_support\ndef cast(x, dtype, name=None):\n \"\"\"Casts a tensor to a new type.\n\n The operation casts `x` (in case of `Tensor`) or `x.values`\n (in case of `SparseTensor` or `IndexedSlices`) to `dtype`.\n\n For example:\n\n >>> x = tf.constant([1.8, 2.2], dtype=tf.float32)\n >>> tf.dtypes.cast(x, tf.int32)\n <tf.Tensor: shape=(2,), dtype=int32, numpy=array([1, 2], dtype=int32)>\n\n The operation supports data types (for `x` and `dtype`) of\n `uint8`, `uint16`, `uint32`, `uint64`, `int8`, `int16`, `int32`, `int64`,\n `float16`, `float32`, `float64`, `complex64`, `complex128`, `bfloat16`.\n In case of casting from complex types (`complex64`, `complex128`) to real\n types, only the real part of `x` is returned. In case of casting from real\n types to complex types (`complex64`, `complex128`), the imaginary part of the\n returned value is set to `0`. The handling of complex types here matches the\n behavior of numpy.\n\n Args:\n x: A `Tensor` or `SparseTensor` or `IndexedSlices` of numeric type. It could\n be `uint8`, `uint16`, `uint32`, `uint64`, `int8`, `int16`, `int32`,\n `int64`, `float16`, `float32`, `float64`, `complex64`, `complex128`,\n `bfloat16`.\n dtype: The destination type. The list of supported dtypes is the same as\n `x`.\n name: A name for the operation (optional).\n\n Returns:\n A `Tensor` or `SparseTensor` or `IndexedSlices` with same shape as `x` and\n same type as `dtype`.\n\n Raises:\n TypeError: If `x` cannot be cast to the `dtype`.\n \"\"\"\n base_type = dtypes.as_dtype(dtype).base_dtype\n if isinstance(x,\n (ops.Tensor, _resource_variable_type)) and base_type == x.dtype:\n return x\n with ops.name_scope(name, \"Cast\", [x]) as name:\n if isinstance(x, sparse_tensor.SparseTensor):\n values_cast = cast(x.values, base_type, name=name)\n x = sparse_tensor.SparseTensor(x.indices, values_cast, x.dense_shape)\n elif isinstance(x, ops.IndexedSlices):\n values_cast = cast(x.values, base_type, name=name)\n x = ops.IndexedSlices(values_cast, x.indices, x.dense_shape)\n else:\n # TODO(josh11b): If x is not already a Tensor, we could return\n # ops.convert_to_tensor(x, dtype=dtype, ...) here, but that\n # allows some conversions that cast() can't do, e.g. casting numbers to\n # strings.\n x = ops.convert_to_tensor(x, name=\"x\")\n if x.dtype.base_dtype != base_type:\n x = gen_math_ops.cast(x, base_type, name=name)\n if x.dtype.is_complex and base_type.is_floating:\n logging.warn(\"Casting complex to real discards imaginary part.\")\n return x\n\n\n@tf_export(\"dtypes.saturate_cast\", \"saturate_cast\")\[email protected]_dispatch_support\ndef saturate_cast(value, dtype, name=None):\n \"\"\"Performs a safe saturating cast of `value` to `dtype`.\n\n This function casts the input to `dtype` without applying any scaling. If\n there is a danger that values would over or underflow in the cast, this op\n applies the appropriate clamping before the cast.\n\n Args:\n value: A `Tensor`.\n dtype: The desired output `DType`.\n name: A name for the operation (optional).\n\n Returns:\n `value` safely cast to `dtype`.\n \"\"\"\n # When casting to a type with smaller representable range, clamp.\n # Note that this covers casting to unsigned types as well.\n with ops.name_scope(name, \"saturate_cast\", [value]) as name:\n value = ops.convert_to_tensor(value, name=\"value\")\n dtype = dtypes.as_dtype(dtype).base_dtype\n if value.dtype.min < dtype.min:\n value = gen_math_ops.maximum(\n value,\n ops.convert_to_tensor(dtype.min, dtype=value.dtype, name=\"min\"))\n if value.dtype.max > dtype.max:\n value = gen_math_ops.minimum(\n value,\n ops.convert_to_tensor(dtype.max, dtype=value.dtype, name=\"max\"))\n return cast(value, dtype, name=name)\n\n\[email protected](date=None, instructions=\"Use `tf.cast` instead.\")\n@tf_export(v1=[\"to_float\"])\ndef to_float(x, name=\"ToFloat\"):\n \"\"\"Casts a tensor to type `float32`.\n\n Args:\n x: A `Tensor` or `SparseTensor` or `IndexedSlices`.\n name: A name for the operation (optional).\n\n Returns:\n A `Tensor` or `SparseTensor` or `IndexedSlices` with same shape as `x` with\n type `float32`.\n\n Raises:\n TypeError: If `x` cannot be cast to the `float32`.\n \"\"\"\n return cast(x, dtypes.float32, name=name)\n\n\[email protected](date=None, instructions=\"Use `tf.cast` instead.\")\n@tf_export(v1=[\"to_double\"])\ndef to_double(x, name=\"ToDouble\"):\n \"\"\"Casts a tensor to type `float64`.\n\n Args:\n x: A `Tensor` or `SparseTensor` or `IndexedSlices`.\n name: A name for the operation (optional).\n\n Returns:\n A `Tensor` or `SparseTensor` or `IndexedSlices` with same shape as `x` with\n type `float64`.\n\n Raises:\n TypeError: If `x` cannot be cast to the `float64`.\n \"\"\"\n return cast(x, dtypes.float64, name=name)\n\n\[email protected](date=None, instructions=\"Use `tf.cast` instead.\")\n@tf_export(v1=[\"to_int32\"])\ndef to_int32(x, name=\"ToInt32\"):\n \"\"\"Casts a tensor to type `int32`.\n\n Args:\n x: A `Tensor` or `SparseTensor` or `IndexedSlices`.\n name: A name for the operation (optional).\n\n Returns:\n A `Tensor` or `SparseTensor` or `IndexedSlices` with same shape as `x` with\n type `int32`.\n\n Raises:\n TypeError: If `x` cannot be cast to the `int32`.\n \"\"\"\n return cast(x, dtypes.int32, name=name)\n\n\[email protected](date=None, instructions=\"Use `tf.cast` instead.\")\n@tf_export(v1=[\"to_int64\"])\ndef to_int64(x, name=\"ToInt64\"):\n \"\"\"Casts a tensor to type `int64`.\n\n Args:\n x: A `Tensor` or `SparseTensor` or `IndexedSlices`.\n name: A name for the operation (optional).\n\n Returns:\n A `Tensor` or `SparseTensor` or `IndexedSlices` with same shape as `x` with\n type `int64`.\n\n Raises:\n TypeError: If `x` cannot be cast to the `int64`.\n \"\"\"\n return cast(x, dtypes.int64, name=name)\n\n\[email protected](date=None, instructions=\"Use `tf.cast` instead.\")\n@tf_export(v1=[\"to_bfloat16\"])\ndef to_bfloat16(x, name=\"ToBFloat16\"):\n \"\"\"Casts a tensor to type `bfloat16`.\n\n Args:\n x: A `Tensor` or `SparseTensor` or `IndexedSlices`.\n name: A name for the operation (optional).\n\n Returns:\n A `Tensor` or `SparseTensor` or `IndexedSlices` with same shape as `x` with\n type `bfloat16`.\n\n Raises:\n TypeError: If `x` cannot be cast to the `bfloat16`.\n \"\"\"\n return cast(x, dtypes.bfloat16, name=name)\n\n\[email protected](date=None, instructions=\"Use `tf.cast` instead.\")\n@tf_export(v1=[\"to_complex64\"])\ndef to_complex64(x, name=\"ToComplex64\"):\n \"\"\"Casts a tensor to type `complex64`.\n\n Args:\n x: A `Tensor` or `SparseTensor` or `IndexedSlices`.\n name: A name for the operation (optional).\n\n Returns:\n A `Tensor` or `SparseTensor` or `IndexedSlices` with same shape as `x` with\n type `complex64`.\n\n Raises:\n TypeError: If `x` cannot be cast to the `complex64`.\n \"\"\"\n return cast(x, dtypes.complex64, name=name)\n\n\[email protected](date=None, instructions=\"Use `tf.cast` instead.\")\n@tf_export(v1=[\"to_complex128\"])\ndef to_complex128(x, name=\"ToComplex128\"):\n \"\"\"Casts a tensor to type `complex128`.\n\n Args:\n x: A `Tensor` or `SparseTensor` or `IndexedSlices`.\n name: A name for the operation (optional).\n\n Returns:\n A `Tensor` or `SparseTensor` or `IndexedSlices` with same shape as `x` with\n type `complex128`.\n\n Raises:\n TypeError: If `x` cannot be cast to the `complex128`.\n \"\"\"\n return cast(x, dtypes.complex128, name=name)\n\n\nops.Tensor._override_operator(\"__neg__\", gen_math_ops.neg)\nops.Tensor._override_operator(\"__abs__\", abs)\n# __invert__ corresponds to the ~ operator. Here we follow the numpy convention\n# ~ marks an elementwise bit-wise inverse. This is only implemented for boolean\n# tensors and will throw a TypeError if used on nonboolean arrays\nops.Tensor._override_operator(\"__invert__\", gen_math_ops.logical_not)\n\n\ndef _OverrideBinaryOperatorHelper(func, op_name, clazz_object=ops.Tensor):\n \"\"\"Register operators with different tensor and scalar versions.\n\n If `clazz_object` is `SparseTensor`, assumes `func` takes `(sp_indices,\n sp_values, sp_shape, dense)` and outputs `(new_sp_values)`.\n\n Args:\n func: the operator\n op_name: name of the operator being overridden\n clazz_object: class to override for. Either `Tensor` or `SparseTensor`.\n \"\"\"\n\n def binary_op_wrapper(x, y):\n with ops.name_scope(None, op_name, [x, y]) as name:\n if isinstance(x, ops.Tensor) and isinstance(y, ops.Tensor):\n return func(x, y, name=name)\n elif not isinstance(y, sparse_tensor.SparseTensor):\n try:\n y = ops.convert_to_tensor_v2(\n y, dtype_hint=x.dtype.base_dtype, name=\"y\")\n except TypeError:\n # If the RHS is not a tensor, it might be a tensor aware object\n # that can implement the operator with knowledge of itself\n # and the tensor.\n if hasattr(type(y), \"__r%s__\" % op_name):\n return NotImplemented\n else:\n raise\n return func(x, y, name=name)\n\n def binary_op_wrapper_sparse(sp_x, y):\n with ops.name_scope(None, op_name, [sp_x, y]) as name:\n y = ops.convert_to_tensor(y, dtype=sp_x.dtype.base_dtype, name=\"y\")\n return sparse_tensor.SparseTensor(\n sp_x.indices,\n func(sp_x.indices, sp_x.values, sp_x.dense_shape, y, name=name),\n sp_x.dense_shape)\n\n def r_binary_op_wrapper(y, x):\n with ops.name_scope(None, op_name, [x, y]) as name:\n x = ops.convert_to_tensor(x, dtype=y.dtype.base_dtype, name=\"x\")\n return func(x, y, name=name)\n\n # Propagate func.__doc__ to the wrappers\n try:\n doc = func.__doc__\n except AttributeError:\n doc = None\n binary_op_wrapper.__doc__ = doc\n r_binary_op_wrapper.__doc__ = doc\n binary_op_wrapper_sparse.__doc__ = doc\n\n if clazz_object is ops.Tensor:\n clazz_object._override_operator(\"__%s__\" % op_name, binary_op_wrapper)\n del binary_op_wrapper\n clazz_object._override_operator(\"__r%s__\" % op_name, r_binary_op_wrapper)\n del r_binary_op_wrapper\n else:\n clazz_object._override_operator(\"__%s__\" % op_name,\n binary_op_wrapper_sparse)\n del binary_op_wrapper_sparse\n\n\n# Conversion table for __truediv__. None entries mean no conversion required.\n_TRUEDIV_TABLE = {\n dtypes.uint8: dtypes.float32,\n dtypes.int8: dtypes.float32,\n dtypes.uint16: dtypes.float32,\n dtypes.int16: dtypes.float32,\n dtypes.int32: dtypes.float64,\n dtypes.int64: dtypes.float64,\n dtypes.bfloat16: None,\n dtypes.float16: None,\n dtypes.float32: None,\n dtypes.float64: None,\n dtypes.complex64: None,\n dtypes.complex128: None,\n}\n\n\n# NOTE: the support of \"sparse (true)div dense\" is currently not baked in into\n# \"tf.(true_)div()\". Until such an API decision is made, the supported usage is\n# to explicitly use the \"/\" operator to invoke either truediv or div.\ndef _sparse_dense_truediv(sp_indices, sp_values, sp_shape, y, name=None):\n \"\"\"Internal helper function for 'sp_t / dense_t'.\"\"\"\n with ops.name_scope(name, \"truediv\",\n [sp_indices, sp_values, sp_shape, y]) as name:\n sp_values = ops.convert_to_tensor(sp_values, name=\"sp_values\")\n y = ops.convert_to_tensor(y, name=\"y\")\n x_dtype = sp_values.dtype.base_dtype\n y_dtype = y.dtype.base_dtype\n if x_dtype != y_dtype:\n raise TypeError(\"x and y must have the same dtype, got %r != %r\" %\n (x_dtype, y_dtype))\n try:\n dtype = _TRUEDIV_TABLE[x_dtype]\n except KeyError:\n raise TypeError(\"Invalid dtype %r in __truediv__\" % x_dtype)\n if dtype is not None:\n sp_values = cast(sp_values, dtype)\n y = cast(y, dtype)\n return gen_sparse_ops.sparse_dense_cwise_div(\n sp_indices, sp_values, sp_shape, y, name=name)\n\n\ndef _truediv_python3(x, y, name=None):\n with ops.name_scope(name, \"truediv\", [x, y]) as name:\n x = ops.convert_to_tensor(x, name=\"x\")\n y = ops.convert_to_tensor(y, name=\"y\")\n x_dtype = x.dtype.base_dtype\n y_dtype = y.dtype.base_dtype\n if x_dtype != y_dtype:\n raise TypeError(\"x and y must have the same dtype, got %r != %r\" %\n (x_dtype, y_dtype))\n try:\n dtype = _TRUEDIV_TABLE[x_dtype]\n except KeyError:\n raise TypeError(\"Invalid dtype %r in __truediv__\" % x_dtype)\n if dtype is not None:\n x = cast(x, dtype)\n y = cast(y, dtype)\n return gen_math_ops.real_div(x, y, name=name)\n\n\ndef _div_python2(x, y, name=None):\n \"\"\"Divide two values using Python 2 semantics.\n\n Used for Tensor.__div__.\n\n Args:\n x: `Tensor` numerator of real numeric type.\n y: `Tensor` denominator of real numeric type.\n name: A name for the operation (optional).\n\n Returns:\n `x / y` returns the quotient of x and y.\n \"\"\"\n\n with ops.name_scope(name, \"div\", [x, y]) as name:\n x = ops.convert_to_tensor(x, name=\"x\")\n y = ops.convert_to_tensor(y, name=\"y\", dtype=x.dtype.base_dtype)\n x_dtype = x.dtype.base_dtype\n y_dtype = y.dtype.base_dtype\n if x_dtype != y_dtype:\n raise TypeError(\"x and y must have the same dtype, got %r != %r\" %\n (x_dtype, y_dtype))\n if x_dtype.is_floating or x_dtype.is_complex:\n return gen_math_ops.real_div(x, y, name=name)\n else:\n return gen_math_ops.floor_div(x, y, name=name)\n\n\n@tf_export(\"math.truediv\", \"truediv\")\[email protected]_dispatch_support\ndef truediv(x, y, name=None):\n \"\"\"Divides x / y elementwise (using Python 3 division operator semantics).\n\n NOTE: Prefer using the Tensor operator or tf.divide which obey Python\n division operator semantics.\n\n This function forces Python 3 division operator semantics where all integer\n arguments are cast to floating types first. This op is generated by normal\n `x / y` division in Python 3 and in Python 2.7 with\n `from __future__ import division`. If you want integer division that rounds\n down, use `x // y` or `tf.math.floordiv`.\n\n `x` and `y` must have the same numeric type. If the inputs are floating\n point, the output will have the same type. If the inputs are integral, the\n inputs are cast to `float32` for `int8` and `int16` and `float64` for `int32`\n and `int64` (matching the behavior of Numpy).\n\n Args:\n x: `Tensor` numerator of numeric type.\n y: `Tensor` denominator of numeric type.\n name: A name for the operation (optional).\n\n Returns:\n `x / y` evaluated in floating point.\n\n Raises:\n TypeError: If `x` and `y` have different dtypes.\n \"\"\"\n return _truediv_python3(x, y, name)\n\n\[email protected](\n date=None,\n instructions=\"Deprecated in favor of operator or tf.math.divide.\")\n@tf_export(v1=[\"div\"])\ndef div(x, y, name=None):\n \"\"\"Divides x / y elementwise (using Python 2 division operator semantics).\n\n NOTE: Prefer using the Tensor division operator or tf.divide which obey Python\n 3 division operator semantics.\n\n This function divides `x` and `y`, forcing Python 2 semantics. That is, if `x`\n and `y` are both integers then the result will be an integer. This is in\n contrast to Python 3, where division with `/` is always a float while division\n with `//` is always an integer.\n\n Args:\n x: `Tensor` numerator of real numeric type.\n y: `Tensor` denominator of real numeric type.\n name: A name for the operation (optional).\n\n Returns:\n `x / y` returns the quotient of x and y.\n \"\"\"\n return _div_python2(x, y, name)\n\n\n@tf_export(\"math.divide_no_nan\", v1=[\"math.divide_no_nan\", \"div_no_nan\"])\[email protected]_endpoints(\"div_no_nan\")\[email protected]_dispatch_support\ndef div_no_nan(x, y, name=None):\n \"\"\"Computes a safe divide which returns 0 if the y is zero.\n\n Args:\n x: A `Tensor`. Must be one of the following types: `float32`, `float64`.\n y: A `Tensor` whose dtype is compatible with `x`.\n name: A name for the operation (optional).\n\n Returns:\n The element-wise value of the x divided by y.\n \"\"\"\n\n with ops.name_scope(name, \"div_no_nan\", [x, y]) as name:\n x = ops.convert_to_tensor(x, name=\"x\")\n y = ops.convert_to_tensor(y, name=\"y\", dtype=x.dtype.base_dtype)\n return gen_math_ops.div_no_nan(x, y, name=name)\n\n\n@tf_export(\"math.multiply_no_nan\")\[email protected]_dispatch_support\ndef multiply_no_nan(x, y, name=None):\n \"\"\"Computes the product of x and y and returns 0 if the y is zero, even if x is NaN or infinite.\n\n Args:\n x: A `Tensor`. Must be one of the following types: `float32`, `float64`.\n y: A `Tensor` whose dtype is compatible with `x`.\n name: A name for the operation (optional).\n\n Returns:\n The element-wise value of the x times y.\n \"\"\"\n\n with ops.name_scope(name, \"multiply_no_nan\", [x, y]) as name:\n x = ops.convert_to_tensor(x, name=\"x\")\n y = ops.convert_to_tensor(y, name=\"y\", dtype=x.dtype.base_dtype)\n x_dtype = x.dtype.base_dtype\n y_dtype = y.dtype.base_dtype\n if x_dtype != y_dtype:\n raise TypeError(\"x and y must have the same dtype, got %r != %r\" %\n (x_dtype, y_dtype))\n return gen_math_ops.mul_no_nan(x, y, name=name)\n\n\n# TODO(aselle): This should be removed\nmod = gen_math_ops.floor_mod\n\n\n# TODO(aselle): Deprecate this once all internal functionality uses\n# tf.truncatediv\n@tf_export(\"math.floordiv\", v1=[\"math.floordiv\", \"floordiv\"])\[email protected]_dispatch_support\[email protected]_endpoints(\"floordiv\")\ndef floordiv(x, y, name=None):\n \"\"\"Divides `x / y` elementwise, rounding toward the most negative integer.\n\n The same as `tf.compat.v1.div(x,y)` for integers, but uses\n `tf.floor(tf.compat.v1.div(x,y))` for\n floating point arguments so that the result is always an integer (though\n possibly an integer represented as floating point). This op is generated by\n `x // y` floor division in Python 3 and in Python 2.7 with\n `from __future__ import division`.\n\n `x` and `y` must have the same type, and the result will have the same type\n as well.\n\n Args:\n x: `Tensor` numerator of real numeric type.\n y: `Tensor` denominator of real numeric type.\n name: A name for the operation (optional).\n\n Returns:\n `x / y` rounded down.\n\n Raises:\n TypeError: If the inputs are complex.\n \"\"\"\n with ops.name_scope(name, \"floordiv\", [x, y]) as name:\n return gen_math_ops.floor_div(x, y, name=name)\n\n\nrealdiv = gen_math_ops.real_div\ntruncatediv = gen_math_ops.truncate_div\n# TODO(aselle): Rename this to floordiv when we can.\nfloor_div = gen_math_ops.floor_div\ntruncatemod = gen_math_ops.truncate_mod\nfloormod = gen_math_ops.floor_mod\n\n\ndef _add_dispatch(x, y, name=None):\n \"\"\"Dispatches to add for strings and add_v2 for all other types.\"\"\"\n if x.dtype == dtypes.string:\n return gen_math_ops.add(x, y, name=name)\n else:\n return gen_math_ops.add_v2(x, y, name=name)\n\n\ndef _mul_dispatch(x, y, name=None):\n \"\"\"Dispatches cwise mul for \"Dense*Dense\" and \"Dense*Sparse\".\"\"\"\n is_tensor_y = isinstance(y, ops.Tensor)\n if is_tensor_y:\n return gen_math_ops.mul(x, y, name=name)\n else:\n assert isinstance(y, sparse_tensor.SparseTensor) # Case: Dense * Sparse.\n new_vals = gen_sparse_ops.sparse_dense_cwise_mul(y.indices, y.values,\n y.dense_shape, x, name)\n return sparse_tensor.SparseTensor(y.indices, new_vals, y.dense_shape)\n\n\n# NOTE(aselle): When integer division is added for sparse_dense_cwise,\n# div, truediv, and floordiv should be delegated appropriately for\n# Python sematnics, analogous to dense cwise tensor operations.\n_OverrideBinaryOperatorHelper(gen_sparse_ops.sparse_dense_cwise_div, \"div\",\n sparse_tensor.SparseTensor)\n_OverrideBinaryOperatorHelper(_sparse_dense_truediv, \"truediv\",\n sparse_tensor.SparseTensor)\n_OverrideBinaryOperatorHelper(gen_sparse_ops.sparse_dense_cwise_mul, \"mul\",\n sparse_tensor.SparseTensor)\n\n_OverrideBinaryOperatorHelper(_add_dispatch, \"add\")\n_OverrideBinaryOperatorHelper(gen_math_ops.sub, \"sub\")\n_OverrideBinaryOperatorHelper(_mul_dispatch, \"mul\")\n_OverrideBinaryOperatorHelper(_div_python2, \"div\")\n_OverrideBinaryOperatorHelper(_truediv_python3, \"truediv\")\n_OverrideBinaryOperatorHelper(floordiv, \"floordiv\")\n_OverrideBinaryOperatorHelper(gen_math_ops.floor_mod, \"mod\")\n_OverrideBinaryOperatorHelper(pow, \"pow\")\n\n\n@tf_export(\"math.logical_xor\", v1=[\"math.logical_xor\", \"logical_xor\"])\[email protected]_dispatch_support\[email protected]_endpoints(\"logical_xor\")\ndef logical_xor(x, y, name=\"LogicalXor\"):\n \"\"\"Logical XOR function.\n\n x ^ y = (x | y) & ~(x & y)\n\n The operation works for the following input types:\n\n - Two single elements of type `bool`\n - One `tf.Tensor` of type `bool` and one single `bool`, where the result will\n be calculated by applying logical XOR with the single element to each\n element in the larger Tensor.\n - Two `tf.Tensor` objects of type `bool` of the same shape. In this case,\n the result will be the element-wise logical XOR of the two input tensors.\n\n Usage:\n\n >>> a = tf.constant([True])\n >>> b = tf.constant([False])\n >>> tf.math.logical_xor(a, b)\n <tf.Tensor: shape=(1,), dtype=bool, numpy=array([ True])>\n\n >>> c = tf.constant([True])\n >>> x = tf.constant([False, True, True, False])\n >>> tf.math.logical_xor(c, x)\n <tf.Tensor: shape=(4,), dtype=bool, numpy=array([ True, False, False, True])>\n\n >>> y = tf.constant([False, False, True, True])\n >>> z = tf.constant([False, True, False, True])\n >>> tf.math.logical_xor(y, z)\n <tf.Tensor: shape=(4,), dtype=bool, numpy=array([False, True, True, False])>\n\n Args:\n x: A `tf.Tensor` type bool.\n y: A `tf.Tensor` of type bool.\n name: A name for the operation (optional).\n\n Returns:\n A `tf.Tensor` of type bool with the same size as that of x or y.\n \"\"\"\n # TODO(alemi) Make this a cwise op if people end up relying on it.\n return gen_math_ops.logical_and(\n gen_math_ops.logical_or(x, y),\n gen_math_ops.logical_not(gen_math_ops.logical_and(x, y)),\n name=name)\n\n\n@tf_export(\"math.logical_and\", \"logical_and\")\[email protected]_dispatch_support\ndef logical_and(x, y, name=None):\n \"\"\"Logical AND function.\n\n The operation works for the following input types:\n\n - Two single elements of type `bool`\n - One `tf.Tensor` of type `bool` and one single `bool`, where the result will\n be calculated by applying logical AND with the single element to each\n element in the larger Tensor.\n - Two `tf.Tensor` objects of type `bool` of the same shape. In this case,\n the result will be the element-wise logical AND of the two input tensors.\n\n Usage:\n\n >>> a = tf.constant([True])\n >>> b = tf.constant([False])\n >>> tf.math.logical_and(a, b)\n <tf.Tensor: shape=(1,), dtype=bool, numpy=array([False])>\n\n >>> c = tf.constant([True])\n >>> x = tf.constant([False, True, True, False])\n >>> tf.math.logical_and(c, x)\n <tf.Tensor: shape=(4,), dtype=bool, numpy=array([False, True, True, False])>\n\n >>> y = tf.constant([False, False, True, True])\n >>> z = tf.constant([False, True, False, True])\n >>> tf.math.logical_and(y, z)\n <tf.Tensor: shape=(4,), dtype=bool, numpy=array([False, False, False, True])>\n\n Args:\n x: A `tf.Tensor` type bool.\n y: A `tf.Tensor` of type bool.\n name: A name for the operation (optional).\n\n Returns:\n A `tf.Tensor` of type bool with the same size as that of x or y.\n \"\"\"\n return gen_math_ops.logical_and(x, y, name)\n\n\n_OverrideBinaryOperatorHelper(gen_math_ops.logical_and, \"and\")\n_OverrideBinaryOperatorHelper(gen_math_ops.logical_or, \"or\")\n_OverrideBinaryOperatorHelper(logical_xor, \"xor\")\n\nops.Tensor._override_operator(\"__lt__\", gen_math_ops.less)\nops.Tensor._override_operator(\"__le__\", gen_math_ops.less_equal)\nops.Tensor._override_operator(\"__gt__\", gen_math_ops.greater)\nops.Tensor._override_operator(\"__ge__\", gen_math_ops.greater_equal)\n\n\n@tf_export(\"math.equal\", \"equal\")\[email protected]_dispatch_support\ndef equal(x, y, name=None):\n \"\"\"Returns the truth value of (x == y) element-wise.\n\n Performs a [broadcast](\n https://docs.scipy.org/doc/numpy/user/basics.broadcasting.html) with the\n arguments and then an element-wise equality comparison, returning a Tensor of\n boolean values.\n\n For example:\n\n >>> x = tf.constant([2, 4])\n >>> y = tf.constant(2)\n >>> tf.math.equal(x, y)\n <tf.Tensor: shape=(2,), dtype=bool, numpy=array([ True, False])>\n\n >>> x = tf.constant([2, 4])\n >>> y = tf.constant([2, 4])\n >>> tf.math.equal(x, y)\n <tf.Tensor: shape=(2,), dtype=bool, numpy=array([ True, True])>\n\n Args:\n x: A `tf.Tensor` or `tf.SparseTensor` or `tf.IndexedSlices`.\n y: A `tf.Tensor` or `tf.SparseTensor` or `tf.IndexedSlices`.\n name: A name for the operation (optional).\n\n Returns:\n A `tf.Tensor` of type bool with the same size as that of x or y.\n\n Raises:\n `tf.errors.InvalidArgumentError`: If shapes of arguments are incompatible\n \"\"\"\n return gen_math_ops.equal(x, y, name=name)\n\n\n@tf_export(\"math.not_equal\", \"not_equal\")\[email protected]_dispatch_support\ndef not_equal(x, y, name=None):\n \"\"\"Returns the truth value of (x != y) element-wise.\n\n Performs a [broadcast](\n https://docs.scipy.org/doc/numpy/user/basics.broadcasting.html) with the\n arguments and then an element-wise inequality comparison, returning a Tensor\n of boolean values.\n\n For example:\n\n >>> x = tf.constant([2, 4])\n >>> y = tf.constant(2)\n >>> tf.math.not_equal(x, y)\n <tf.Tensor: shape=(2,), dtype=bool, numpy=array([False, True])>\n\n >>> x = tf.constant([2, 4])\n >>> y = tf.constant([2, 4])\n >>> tf.math.not_equal(x, y)\n <tf.Tensor: shape=(2,), dtype=bool, numpy=array([False, False])>\n\n Args:\n x: A `tf.Tensor` or `tf.SparseTensor` or `tf.IndexedSlices`.\n y: A `tf.Tensor` or `tf.SparseTensor` or `tf.IndexedSlices`.\n name: A name for the operation (optional).\n\n Returns:\n A `tf.Tensor` of type bool with the same size as that of x or y.\n\n Raises:\n `tf.errors.InvalidArgumentError`: If shapes of arguments are incompatible\n \"\"\"\n return gen_math_ops.not_equal(x, y, name=name)\n\n\ndef tensor_equals(self, other):\n \"\"\"Compares two tensors element-wise for equality.\"\"\"\n if other is None:\n return False\n g = getattr(self, \"graph\", None)\n if (ops.Tensor._USE_EQUALITY and ops.executing_eagerly_outside_functions() and\n (g is None or g.building_function)):\n return gen_math_ops.equal(self, other, incompatible_shape_error=False)\n else:\n # In legacy graph mode, tensor equality is object equality\n return self is other\n\n\ndef tensor_not_equals(self, other):\n \"\"\"Compares two tensors element-wise for equality.\"\"\"\n if other is None:\n return True\n if ops.Tensor._USE_EQUALITY and ops.executing_eagerly_outside_functions():\n return gen_math_ops.not_equal(self, other, incompatible_shape_error=False)\n else:\n # In legacy graph mode, tensor equality is object equality\n return self is not other\n\n\nops.Tensor._override_operator(\"__eq__\", tensor_equals)\nops.Tensor._override_operator(\"__ne__\", tensor_not_equals)\n\n\n@tf_export(\"range\")\ndef range(start, limit=None, delta=1, dtype=None, name=\"range\"): # pylint: disable=redefined-builtin\n \"\"\"Creates a sequence of numbers.\n\n Creates a sequence of numbers that begins at `start` and extends by\n increments of `delta` up to but not including `limit`.\n\n The dtype of the resulting tensor is inferred from the inputs unless\n it is provided explicitly.\n\n Like the Python builtin `range`, `start` defaults to 0, so that\n `range(n) = range(0, n)`.\n\n For example:\n\n >>> start = 3\n >>> limit = 18\n >>> delta = 3\n >>> tf.range(start, limit, delta)\n <tf.Tensor: shape=(5,), dtype=int32,\n numpy=array([ 3, 6, 9, 12, 15], dtype=int32)>\n\n >>> start = 3\n >>> limit = 1\n >>> delta = -0.5\n >>> tf.range(start, limit, delta)\n <tf.Tensor: shape=(4,), dtype=float32,\n numpy=array([3. , 2.5, 2. , 1.5], dtype=float32)>\n\n >>> limit = 5\n >>> tf.range(limit)\n <tf.Tensor: shape=(5,), dtype=int32,\n numpy=array([0, 1, 2, 3, 4], dtype=int32)>\n\n Args:\n start: A 0-D `Tensor` (scalar). Acts as first entry in the range if `limit`\n is not None; otherwise, acts as range limit and first entry defaults to 0.\n limit: A 0-D `Tensor` (scalar). Upper limit of sequence, exclusive. If None,\n defaults to the value of `start` while the first entry of the range\n defaults to 0.\n delta: A 0-D `Tensor` (scalar). Number that increments `start`. Defaults to\n 1.\n dtype: The type of the elements of the resulting tensor.\n name: A name for the operation. Defaults to \"range\".\n\n Returns:\n An 1-D `Tensor` of type `dtype`.\n\n @compatibility(numpy)\n Equivalent to np.arange\n @end_compatibility\n \"\"\"\n if limit is None:\n start, limit = 0, start\n\n with ops.name_scope(name, \"Range\", [start, limit, delta]) as name:\n if not isinstance(start, ops.Tensor):\n start = ops.convert_to_tensor(start, dtype=dtype, name=\"start\")\n if not isinstance(limit, ops.Tensor):\n limit = ops.convert_to_tensor(limit, dtype=dtype, name=\"limit\")\n if not isinstance(delta, ops.Tensor):\n delta = ops.convert_to_tensor(delta, dtype=dtype, name=\"delta\")\n\n # infer dtype if not explicitly provided\n if dtype is None:\n dtype_hierarchy = [\n dtypes.int32, dtypes.int64, dtypes.float32, dtypes.float64\n ]\n assert all(arg.dtype in dtype_hierarchy for arg in [start, limit, delta])\n inferred_dtype = max([arg.dtype for arg in [start, limit, delta]],\n key=dtype_hierarchy.index)\n else:\n inferred_dtype = dtype\n # Always try perform a cast even start/limit/delta are already tensors.\n # This will revole the case where start/limit/delta's original's dtype\n # is different from provided dtype.\n start = cast(start, inferred_dtype)\n limit = cast(limit, inferred_dtype)\n delta = cast(delta, inferred_dtype)\n\n return gen_math_ops._range(start, limit, delta, name=name)\n\n\ndef _range_tensor_conversion_function(value, dtype=None, name=None,\n as_ref=False):\n del as_ref\n return range(value.start, value.stop, value.step, dtype=dtype, name=name)\n\n\nif not six.PY2:\n ops.register_tensor_conversion_function(builtins.range,\n _range_tensor_conversion_function)\n\n# Reduction operations\ndef _ReductionDims(x, axis, reduction_indices=None): # pylint: disable=invalid-name\n \"\"\"Returns range(0, rank(x)) if reduction_indices is None.\"\"\"\n # TODO(aselle): Remove this after deprecation\n if reduction_indices is not None:\n if axis is not None:\n raise ValueError(\"Can't specify both axis' and 'reduction_indices'.\")\n axis = reduction_indices\n if axis is not None:\n return axis\n else:\n # Fast path: avoid creating Rank and Range ops if ndims is known.\n if isinstance(x, ops.Tensor):\n rank = x.shape.rank\n if rank is not None:\n return constant_op.constant(np.arange(rank, dtype=np.int32))\n elif (isinstance(x, sparse_tensor.SparseTensor) and\n x.dense_shape.shape.is_fully_defined()):\n rank = x.dense_shape.shape.dims[0].value # sparse.dense_shape is 1-D.\n return constant_op.constant(np.arange(rank, dtype=np.int32))\n\n # Otherwise, we rely on Range and Rank to do the right thing at run-time.\n return range(0, array_ops.rank(x))\n\n\ndef _has_fully_defined_shape(tensor):\n \"\"\"Returns true if tensor has a fully defined shape.\"\"\"\n return isinstance(tensor, ops.EagerTensor) or tensor.shape.is_fully_defined()\n\n\ndef _may_reduce_to_scalar(keepdims, axis, output):\n \"\"\"Set a reduction's output shape to be a scalar if we are certain.\"\"\"\n if not _has_fully_defined_shape(output) and (not keepdims) and (\n axis is None):\n output.set_shape(())\n return output\n\n\n@tf_export(v1=[\"math.reduce_sum\", \"reduce_sum\"])\[email protected]_args(None,\n \"keep_dims is deprecated, use keepdims instead\",\n \"keep_dims\")\ndef reduce_sum_v1(input_tensor,\n axis=None,\n keepdims=None,\n name=None,\n reduction_indices=None,\n keep_dims=None):\n \"\"\"Computes the sum of elements across dimensions of a tensor.\n\n Reduces `input_tensor` along the dimensions given in `axis`.\n Unless `keepdims` is true, the rank of the tensor is reduced by 1 for each\n entry in `axis`. If `keepdims` is true, the reduced dimensions\n are retained with length 1.\n\n If `axis` is None, all dimensions are reduced, and a\n tensor with a single element is returned.\n\n For example:\n\n ```python\n x = tf.constant([[1, 1, 1], [1, 1, 1]])\n tf.reduce_sum(x) # 6\n tf.reduce_sum(x, 0) # [2, 2, 2]\n tf.reduce_sum(x, 1) # [3, 3]\n tf.reduce_sum(x, 1, keepdims=True) # [[3], [3]]\n tf.reduce_sum(x, [0, 1]) # 6\n ```\n\n Args:\n input_tensor: The tensor to reduce. Should have numeric type.\n axis: The dimensions to reduce. If `None` (the default), reduces all\n dimensions. Must be in the range `[-rank(input_tensor),\n rank(input_tensor))`.\n keepdims: If true, retains reduced dimensions with length 1.\n name: A name for the operation (optional).\n reduction_indices: The old (deprecated) name for axis.\n keep_dims: Deprecated alias for `keepdims`.\n\n Returns:\n The reduced tensor, of the same dtype as the input_tensor.\n\n @compatibility(numpy)\n Equivalent to np.sum apart the fact that numpy upcast uint8 and int32 to\n int64 while tensorflow returns the same dtype as the input.\n @end_compatibility\n \"\"\"\n axis = deprecation.deprecated_argument_lookup(\"axis\", axis,\n \"reduction_indices\",\n reduction_indices)\n keepdims = deprecation.deprecated_argument_lookup(\"keepdims\", keepdims,\n \"keep_dims\", keep_dims)\n return reduce_sum(input_tensor, axis, keepdims, name)\n\n\n@tf_export(\"math.reduce_sum\", \"reduce_sum\", v1=[])\[email protected]_dispatch_support\ndef reduce_sum(input_tensor, axis=None, keepdims=False, name=None):\n \"\"\"Computes the sum of elements across dimensions of a tensor.\n\n Reduces `input_tensor` along the dimensions given in `axis`.\n Unless `keepdims` is true, the rank of the tensor is reduced by 1 for each\n entry in `axis`. If `keepdims` is true, the reduced dimensions\n are retained with length 1.\n\n If `axis` is None, all dimensions are reduced, and a\n tensor with a single element is returned.\n\n For example:\n\n ```python\n x = tf.constant([[1, 1, 1], [1, 1, 1]])\n tf.reduce_sum(x) # 6\n tf.reduce_sum(x, 0) # [2, 2, 2]\n tf.reduce_sum(x, 1) # [3, 3]\n tf.reduce_sum(x, 1, keepdims=True) # [[3], [3]]\n tf.reduce_sum(x, [0, 1]) # 6\n ```\n\n Args:\n input_tensor: The tensor to reduce. Should have numeric type.\n axis: The dimensions to reduce. If `None` (the default), reduces all\n dimensions. Must be in the range `[-rank(input_tensor),\n rank(input_tensor))`.\n keepdims: If true, retains reduced dimensions with length 1.\n name: A name for the operation (optional).\n\n Returns:\n The reduced tensor, of the same dtype as the input_tensor.\n\n @compatibility(numpy)\n Equivalent to np.sum apart the fact that numpy upcast uint8 and int32 to\n int64 while tensorflow returns the same dtype as the input.\n @end_compatibility\n \"\"\"\n\n return reduce_sum_with_dims(input_tensor, axis, keepdims, name,\n _ReductionDims(input_tensor, axis))\n\n\ndef reduce_sum_with_dims(input_tensor,\n axis=None,\n keepdims=False,\n name=None,\n dims=None):\n keepdims = False if keepdims is None else keepdims\n return _may_reduce_to_scalar(\n keepdims, axis,\n gen_math_ops._sum(input_tensor, dims, keepdims, name=name))\n\n\n@tf_export(\"math.reduce_euclidean_norm\")\ndef reduce_euclidean_norm(input_tensor, axis=None, keepdims=False, name=None):\n \"\"\"Computes the Euclidean norm of elements across dimensions of a tensor.\n\n Reduces `input_tensor` along the dimensions given in `axis`.\n Unless `keepdims` is true, the rank of the tensor is reduced by 1 for each\n entry in `axis`. If `keepdims` is true, the reduced dimensions\n are retained with length 1.\n\n If `axis` is None, all dimensions are reduced, and a\n tensor with a single element is returned.\n\n For example:\n\n ```python\n x = tf.constant([[1, 2, 3], [1, 1, 1]])\n tf.reduce_euclidean_norm(x) # sqrt(17)\n tf.reduce_euclidean_norm(x, 0) # [sqrt(2), sqrt(5), sqrt(10)]\n tf.reduce_euclidean_norm(x, 1) # [sqrt(14), sqrt(3)]\n tf.reduce_euclidean_norm(x, 1, keepdims=True) # [[sqrt(14)], [sqrt(3)]]\n tf.reduce_euclidean_norm(x, [0, 1]) # sqrt(17)\n ```\n\n Args:\n input_tensor: The tensor to reduce. Should have numeric type.\n axis: The dimensions to reduce. If `None` (the default), reduces all\n dimensions. Must be in the range `[-rank(input_tensor),\n rank(input_tensor))`.\n keepdims: If true, retains reduced dimensions with length 1.\n name: A name for the operation (optional).\n\n Returns:\n The reduced tensor, of the same dtype as the input_tensor.\n \"\"\"\n return _may_reduce_to_scalar(\n keepdims, axis,\n gen_math_ops.euclidean_norm(\n input_tensor, _ReductionDims(input_tensor, axis), keepdims,\n name=name))\n\n\n@tf_export(v1=[\"math.count_nonzero\", \"count_nonzero\"])\[email protected]_args(None,\n \"keep_dims is deprecated, use keepdims instead\",\n \"keep_dims\")\[email protected]_args(\n None, \"reduction_indices is deprecated, use axis instead\",\n \"reduction_indices\")\ndef count_nonzero(input_tensor=None,\n axis=None,\n keepdims=None,\n dtype=dtypes.int64,\n name=None,\n reduction_indices=None,\n keep_dims=None,\n input=None): # pylint: disable=redefined-builtin\n \"\"\"Computes number of nonzero elements across dimensions of a tensor.\n\n Reduces `input_tensor` along the dimensions given in `axis`.\n Unless `keepdims` is true, the rank of the tensor is reduced by 1 for each\n entry in `axis`. If `keepdims` is true, the reduced dimensions\n are retained with length 1.\n\n If `axis` has no entries, all dimensions are reduced, and a\n tensor with a single element is returned.\n\n **NOTE** Floating point comparison to zero is done by exact floating point\n equality check. Small values are **not** rounded to zero for purposes of\n the nonzero check.\n\n For example:\n\n ```python\n x = tf.constant([[0, 1, 0], [1, 1, 0]])\n tf.math.count_nonzero(x) # 3\n tf.math.count_nonzero(x, 0) # [1, 2, 0]\n tf.math.count_nonzero(x, 1) # [1, 2]\n tf.math.count_nonzero(x, 1, keepdims=True) # [[1], [2]]\n tf.math.count_nonzero(x, [0, 1]) # 3\n ```\n\n **NOTE** Strings are compared against zero-length empty string `\"\"`. Any\n string with a size greater than zero is already considered as nonzero.\n\n For example:\n ```python\n x = tf.constant([\"\", \"a\", \" \", \"b\", \"\"])\n tf.math.count_nonzero(x) # 3, with \"a\", \" \", and \"b\" as nonzero strings.\n ```\n\n Args:\n input_tensor: The tensor to reduce. Should be of numeric type, `bool`, or\n `string`.\n axis: The dimensions to reduce. If `None` (the default), reduces all\n dimensions. Must be in the range `[-rank(input_tensor),\n rank(input_tensor))`.\n keepdims: If true, retains reduced dimensions with length 1.\n dtype: The output dtype; defaults to `tf.int64`.\n name: A name for the operation (optional).\n reduction_indices: The old (deprecated) name for axis.\n keep_dims: Deprecated alias for `keepdims`.\n input: Overrides input_tensor. For compatibility.\n\n Returns:\n The reduced tensor (number of nonzero values).\n \"\"\"\n keepdims = deprecation.deprecated_argument_lookup(\"keepdims\", keepdims,\n \"keep_dims\", keep_dims)\n input_tensor = deprecation.deprecated_argument_lookup(\"input\", input,\n \"input_tensor\",\n input_tensor)\n axis = deprecation.deprecated_argument_lookup(\"axis\", axis,\n \"reduction_indices\",\n reduction_indices)\n\n return count_nonzero_v2(input_tensor, axis, keepdims, dtype, name)\n\n\n@tf_export(\"math.count_nonzero\", v1=[])\ndef count_nonzero_v2(\n input, # pylint: disable=redefined-builtin\n axis=None,\n keepdims=None,\n dtype=dtypes.int64,\n name=None):\n \"\"\"Computes number of nonzero elements across dimensions of a tensor.\n\n Reduces `input` along the dimensions given in `axis`.\n Unless `keepdims` is true, the rank of the tensor is reduced by 1 for each\n entry in `axis`. If `keepdims` is true, the reduced dimensions\n are retained with length 1.\n\n If `axis` has no entries, all dimensions are reduced, and a\n tensor with a single element is returned.\n\n **NOTE** Floating point comparison to zero is done by exact floating point\n equality check. Small values are **not** rounded to zero for purposes of\n the nonzero check.\n\n For example:\n\n ```python\n x = tf.constant([[0, 1, 0], [1, 1, 0]])\n tf.math.count_nonzero(x) # 3\n tf.math.count_nonzero(x, 0) # [1, 2, 0]\n tf.math.count_nonzero(x, 1) # [1, 2]\n tf.math.count_nonzero(x, 1, keepdims=True) # [[1], [2]]\n tf.math.count_nonzero(x, [0, 1]) # 3\n ```\n\n **NOTE** Strings are compared against zero-length empty string `\"\"`. Any\n string with a size greater than zero is already considered as nonzero.\n\n For example:\n ```python\n x = tf.constant([\"\", \"a\", \" \", \"b\", \"\"])\n tf.math.count_nonzero(x) # 3, with \"a\", \" \", and \"b\" as nonzero strings.\n ```\n\n Args:\n input: The tensor to reduce. Should be of numeric type, `bool`, or `string`.\n axis: The dimensions to reduce. If `None` (the default), reduces all\n dimensions. Must be in the range `[-rank(input), rank(input))`.\n keepdims: If true, retains reduced dimensions with length 1.\n dtype: The output dtype; defaults to `tf.int64`.\n name: A name for the operation (optional).\n\n Returns:\n The reduced tensor (number of nonzero values).\n \"\"\"\n if keepdims is None:\n keepdims = False\n with ops.name_scope(name, \"count_nonzero\", [input]):\n input = ops.convert_to_tensor(input, name=\"input\")\n # A scalar of 'zero' is enough as `not_equal` will broadcast.\n zero = array_ops.zeros([], dtype=input.dtype)\n return cast(\n reduce_sum(\n # int64 reduction happens on GPU\n cast(gen_math_ops.not_equal(input, zero), dtypes.int64),\n axis=axis,\n keepdims=keepdims),\n dtype=dtype)\n\n\n@tf_export(v1=[\"math.reduce_mean\", \"reduce_mean\"])\ndef reduce_mean_v1(input_tensor,\n axis=None,\n keepdims=None,\n name=None,\n reduction_indices=None,\n keep_dims=None):\n \"\"\"Computes the mean of elements across dimensions of a tensor.\n\n Reduces `input_tensor` along the dimensions given in `axis` by computing the\n mean of elements across the dimensions in `axis`.\n Unless `keepdims` is true, the rank of the tensor is reduced by 1 for each\n entry in `axis`. If `keepdims` is true, the reduced dimensions\n are retained with length 1.\n\n If `axis` is None, all dimensions are reduced, and a tensor with a single\n element is returned.\n\n For example:\n\n >>> x = tf.constant([[1., 1.], [2., 2.]])\n >>> tf.reduce_mean(x)\n <tf.Tensor: shape=(), dtype=float32, numpy=1.5>\n >>> tf.reduce_mean(x, 0)\n <tf.Tensor: shape=(2,), dtype=float32, numpy=array([1.5, 1.5], dtype=float32)>\n >>> tf.reduce_mean(x, 1)\n <tf.Tensor: shape=(2,), dtype=float32, numpy=array([1., 2.], dtype=float32)>\n\n Args:\n input_tensor: The tensor to reduce. Should have numeric type.\n axis: The dimensions to reduce. If `None` (the default), reduces all\n dimensions. Must be in the range `[-rank(input_tensor),\n rank(input_tensor))`.\n keepdims: If true, retains reduced dimensions with length 1.\n name: A name for the operation (optional).\n reduction_indices: The old (deprecated) name for axis.\n keep_dims: Deprecated alias for `keepdims`.\n\n Returns:\n The reduced tensor.\n\n @compatibility(numpy)\n Equivalent to np.mean\n\n Please note that `np.mean` has a `dtype` parameter that could be used to\n specify the output type. By default this is `dtype=float64`. On the other\n hand, `tf.reduce_mean` has an aggressive type inference from `input_tensor`,\n for example:\n\n >>> x = tf.constant([1, 0, 1, 0])\n >>> tf.reduce_mean(x)\n <tf.Tensor: shape=(), dtype=int32, numpy=0>\n >>> y = tf.constant([1., 0., 1., 0.])\n >>> tf.reduce_mean(y)\n <tf.Tensor: shape=(), dtype=float32, numpy=0.5>\n\n @end_compatibility\n \"\"\"\n axis = deprecation.deprecated_argument_lookup(\"axis\", axis,\n \"reduction_indices\",\n reduction_indices)\n keepdims = deprecation.deprecated_argument_lookup(\"keepdims\", keepdims,\n \"keep_dims\", keep_dims)\n return reduce_mean(input_tensor, axis, keepdims, name)\n\n\n@tf_export(\"math.reduce_mean\", \"reduce_mean\", v1=[])\[email protected]_dispatch_support\ndef reduce_mean(input_tensor, axis=None, keepdims=False, name=None):\n \"\"\"Computes the mean of elements across dimensions of a tensor.\n\n Reduces `input_tensor` along the dimensions given in `axis` by computing the\n mean of elements across the dimensions in `axis`.\n Unless `keepdims` is true, the rank of the tensor is reduced by 1 for each\n entry in `axis`. If `keepdims` is true, the reduced dimensions are retained\n with length 1.\n\n If `axis` is None, all dimensions are reduced, and a tensor with a single\n element is returned.\n\n For example:\n\n >>> x = tf.constant([[1., 1.], [2., 2.]])\n >>> tf.reduce_mean(x)\n <tf.Tensor: shape=(), dtype=float32, numpy=1.5>\n >>> tf.reduce_mean(x, 0)\n <tf.Tensor: shape=(2,), dtype=float32, numpy=array([1.5, 1.5], dtype=float32)>\n >>> tf.reduce_mean(x, 1)\n <tf.Tensor: shape=(2,), dtype=float32, numpy=array([1., 2.], dtype=float32)>\n\n Args:\n input_tensor: The tensor to reduce. Should have numeric type.\n axis: The dimensions to reduce. If `None` (the default), reduces all\n dimensions. Must be in the range `[-rank(input_tensor),\n rank(input_tensor))`.\n keepdims: If true, retains reduced dimensions with length 1.\n name: A name for the operation (optional).\n\n Returns:\n The reduced tensor.\n\n @compatibility(numpy)\n Equivalent to np.mean\n\n Please note that `np.mean` has a `dtype` parameter that could be used to\n specify the output type. By default this is `dtype=float64`. On the other\n hand, `tf.reduce_mean` has an aggressive type inference from `input_tensor`,\n for example:\n\n >>> x = tf.constant([1, 0, 1, 0])\n >>> tf.reduce_mean(x)\n <tf.Tensor: shape=(), dtype=int32, numpy=0>\n >>> y = tf.constant([1., 0., 1., 0.])\n >>> tf.reduce_mean(y)\n <tf.Tensor: shape=(), dtype=float32, numpy=0.5>\n\n @end_compatibility\n \"\"\"\n keepdims = False if keepdims is None else keepdims\n return _may_reduce_to_scalar(\n keepdims, axis,\n gen_math_ops.mean(\n input_tensor, _ReductionDims(input_tensor, axis), keepdims,\n name=name))\n\n\n@tf_export(\"math.reduce_variance\")\ndef reduce_variance(input_tensor, axis=None, keepdims=False, name=None):\n \"\"\"Computes the variance of elements across dimensions of a tensor.\n\n Reduces `input_tensor` along the dimensions given in `axis`.\n Unless `keepdims` is true, the rank of the tensor is reduced by 1 for each\n entry in `axis`. If `keepdims` is true, the reduced dimensions\n are retained with length 1.\n\n If `axis` is None, all dimensions are reduced, and a\n tensor with a single element is returned.\n\n For example:\n\n ```python\n x = tf.constant([[1., 2.], [3., 4.]])\n tf.reduce_variance(x) # 1.25\n tf.reduce_variance(x, 0) # [1., 1.]\n tf.reduce_variance(x, 1) # [0.25, 0.25]\n ```\n\n Args:\n input_tensor: The tensor to reduce. Should have numeric type.\n axis: The dimensions to reduce. If `None` (the default), reduces all\n dimensions. Must be in the range `[-rank(input_tensor),\n rank(input_tensor))`.\n keepdims: If true, retains reduced dimensions with length 1.\n name: A name scope for the associated operations (optional).\n\n Returns:\n The reduced tensor, of the same dtype as the input_tensor.\n\n @compatibility(numpy)\n Equivalent to np.var\n\n Please note that `np.var` has a `dtype` parameter that could be used to\n specify the output type. By default this is `dtype=float64`. On the other\n hand, `tf.reduce_variance` has an aggressive type inference from\n `input_tensor`,\n @end_compatibility\n \"\"\"\n name = name if name else \"reduce_variance\"\n with ops.name_scope(name):\n means = reduce_mean(input_tensor, axis=axis, keepdims=True)\n squared_deviations = gen_math_ops.square(input_tensor - means)\n return reduce_mean(squared_deviations, axis=axis, keepdims=keepdims)\n\n\n@tf_export(\"math.reduce_std\")\ndef reduce_std(input_tensor, axis=None, keepdims=False, name=None):\n \"\"\"Computes the standard deviation of elements across dimensions of a tensor.\n\n Reduces `input_tensor` along the dimensions given in `axis`.\n Unless `keepdims` is true, the rank of the tensor is reduced by 1 for each\n entry in `axis`. If `keepdims` is true, the reduced dimensions\n are retained with length 1.\n\n If `axis` is None, all dimensions are reduced, and a\n tensor with a single element is returned.\n\n For example:\n\n ```python\n x = tf.constant([[1., 2.], [3., 4.]])\n tf.reduce_std(x) # 1.1180339887498949\n tf.reduce_std(x, 0) # [1., 1.]\n tf.reduce_std(x, 1) # [0.5, 0.5]\n ```\n\n Args:\n input_tensor: The tensor to reduce. Should have numeric type.\n axis: The dimensions to reduce. If `None` (the default), reduces all\n dimensions. Must be in the range `[-rank(input_tensor),\n rank(input_tensor))`.\n keepdims: If true, retains reduced dimensions with length 1.\n name: A name scope for the associated operations (optional).\n\n Returns:\n The reduced tensor, of the same dtype as the input_tensor.\n\n @compatibility(numpy)\n Equivalent to np.std\n\n Please note that `np.std` has a `dtype` parameter that could be used to\n specify the output type. By default this is `dtype=float64`. On the other\n hand, `tf.reduce_std` has an aggressive type inference from `input_tensor`,\n @end_compatibility\n \"\"\"\n name = name if name else \"reduce_std\"\n with ops.name_scope(name):\n variance = reduce_variance(input_tensor, axis=axis, keepdims=keepdims)\n return gen_math_ops.sqrt(variance)\n\n\n@tf_export(\"math.reduce_prod\", \"reduce_prod\", v1=[])\[email protected]_dispatch_support\ndef reduce_prod(input_tensor, axis=None, keepdims=False, name=None):\n \"\"\"Computes the product of elements across dimensions of a tensor.\n\n Reduces `input_tensor` along the dimensions given in `axis`.\n Unless `keepdims` is true, the rank of the tensor is reduced by 1 for each\n entry in `axis`. If `keepdims` is true, the reduced dimensions\n are retained with length 1.\n\n If `axis` is None, all dimensions are reduced, and a\n tensor with a single element is returned.\n\n Args:\n input_tensor: The tensor to reduce. Should have numeric type.\n axis: The dimensions to reduce. If `None` (the default), reduces all\n dimensions. Must be in the range `[-rank(input_tensor),\n rank(input_tensor))`.\n keepdims: If true, retains reduced dimensions with length 1.\n name: A name for the operation (optional).\n\n Returns:\n The reduced tensor.\n\n @compatibility(numpy)\n Equivalent to np.prod\n @end_compatibility\n \"\"\"\n keepdims = False if keepdims is None else keepdims\n return _may_reduce_to_scalar(\n keepdims, axis,\n gen_math_ops.prod(\n input_tensor, _ReductionDims(input_tensor, axis), keepdims,\n name=name))\n\n\n@tf_export(v1=[\"math.reduce_prod\", \"reduce_prod\"])\[email protected]_args(None,\n \"keep_dims is deprecated, use keepdims instead\",\n \"keep_dims\")\ndef reduce_prod_v1(input_tensor,\n axis=None,\n keepdims=None,\n name=None,\n reduction_indices=None,\n keep_dims=None):\n \"\"\"Computes the product of elements across dimensions of a tensor.\n\n Reduces `input_tensor` along the dimensions given in `axis`.\n Unless `keepdims` is true, the rank of the tensor is reduced by 1 for each\n entry in `axis`. If `keepdims` is true, the reduced dimensions\n are retained with length 1.\n\n If `axis` is None, all dimensions are reduced, and a\n tensor with a single element is returned.\n\n Args:\n input_tensor: The tensor to reduce. Should have numeric type.\n axis: The dimensions to reduce. If `None` (the default), reduces all\n dimensions. Must be in the range `[-rank(input_tensor),\n rank(input_tensor))`.\n keepdims: If true, retains reduced dimensions with length 1.\n name: A name for the operation (optional).\n reduction_indices: The old (deprecated) name for axis.\n keep_dims: Deprecated alias for `keepdims`.\n\n Returns:\n The reduced tensor.\n\n @compatibility(numpy)\n Equivalent to np.prod\n @end_compatibility\n \"\"\"\n axis = deprecation.deprecated_argument_lookup(\"axis\", axis,\n \"reduction_indices\",\n reduction_indices)\n keepdims = deprecation.deprecated_argument_lookup(\"keepdims\", keepdims,\n \"keep_dims\", keep_dims)\n return reduce_prod(input_tensor, axis, keepdims, name)\n\n\n@tf_export(v1=[\"math.reduce_min\", \"reduce_min\"])\[email protected]_args(None,\n \"keep_dims is deprecated, use keepdims instead\",\n \"keep_dims\")\ndef reduce_min_v1(input_tensor,\n axis=None,\n keepdims=None,\n name=None,\n reduction_indices=None,\n keep_dims=None):\n \"\"\"Computes the minimum of elements across dimensions of a tensor.\n\n Reduces `input_tensor` along the dimensions given in `axis`.\n Unless `keepdims` is true, the rank of the tensor is reduced by 1 for each\n entry in `axis`. If `keepdims` is true, the reduced dimensions\n are retained with length 1.\n\n If `axis` is None, all dimensions are reduced, and a\n tensor with a single element is returned.\n\n Args:\n input_tensor: The tensor to reduce. Should have real numeric type.\n axis: The dimensions to reduce. If `None` (the default), reduces all\n dimensions. Must be in the range `[-rank(input_tensor),\n rank(input_tensor))`.\n keepdims: If true, retains reduced dimensions with length 1.\n name: A name for the operation (optional).\n reduction_indices: The old (deprecated) name for axis.\n keep_dims: Deprecated alias for `keepdims`.\n\n Returns:\n The reduced tensor.\n\n @compatibility(numpy)\n Equivalent to np.min\n @end_compatibility\n \"\"\"\n axis = deprecation.deprecated_argument_lookup(\"axis\", axis,\n \"reduction_indices\",\n reduction_indices)\n keepdims = deprecation.deprecated_argument_lookup(\"keepdims\", keepdims,\n \"keep_dims\", keep_dims)\n return reduce_min(input_tensor, axis, keepdims, name)\n\n\n@tf_export(\"math.reduce_min\", \"reduce_min\", v1=[])\[email protected]_dispatch_support\ndef reduce_min(input_tensor, axis=None, keepdims=False, name=None):\n \"\"\"Computes the minimum of elements across dimensions of a tensor.\n\n Reduces `input_tensor` along the dimensions given in `axis`.\n Unless `keepdims` is true, the rank of the tensor is reduced by 1 for each\n entry in `axis`. If `keepdims` is true, the reduced dimensions\n are retained with length 1.\n\n If `axis` is None, all dimensions are reduced, and a\n tensor with a single element is returned.\n\n Args:\n input_tensor: The tensor to reduce. Should have real numeric type.\n axis: The dimensions to reduce. If `None` (the default), reduces all\n dimensions. Must be in the range `[-rank(input_tensor),\n rank(input_tensor))`.\n keepdims: If true, retains reduced dimensions with length 1.\n name: A name for the operation (optional).\n\n Returns:\n The reduced tensor.\n\n For example:\n >>> a = tf.constant([[1, 2], [3, 4]])\n >>> tf.reduce_min(a)\n <tf.Tensor: shape=(), dtype=int32, numpy=1>\n\n @compatibility(numpy)\n Equivalent to np.min\n @end_compatibility\n \"\"\"\n keepdims = False if keepdims is None else keepdims\n return _may_reduce_to_scalar(\n keepdims, axis,\n gen_math_ops._min(\n input_tensor, _ReductionDims(input_tensor, axis), keepdims,\n name=name))\n\n\n@tf_export(v1=[\"math.reduce_max\", \"reduce_max\"])\[email protected]_args(None,\n \"keep_dims is deprecated, use keepdims instead\",\n \"keep_dims\")\ndef reduce_max_v1(input_tensor,\n axis=None,\n keepdims=None,\n name=None,\n reduction_indices=None,\n keep_dims=None):\n \"\"\"Computes the maximum of elements across dimensions of a tensor.\n\n Reduces `input_tensor` along the dimensions given in `axis`.\n Unless `keepdims` is true, the rank of the tensor is reduced by 1 for each\n entry in `axis`. If `keepdims` is true, the reduced dimensions\n are retained with length 1.\n\n If `axis` is None, all dimensions are reduced, and a\n tensor with a single element is returned.\n\n Args:\n input_tensor: The tensor to reduce. Should have real numeric type.\n axis: The dimensions to reduce. If `None` (the default), reduces all\n dimensions. Must be in the range `[-rank(input_tensor),\n rank(input_tensor))`.\n keepdims: If true, retains reduced dimensions with length 1.\n name: A name for the operation (optional).\n reduction_indices: The old (deprecated) name for axis.\n keep_dims: Deprecated alias for `keepdims`.\n\n Returns:\n The reduced tensor.\n\n @compatibility(numpy)\n Equivalent to np.max\n @end_compatibility\n \"\"\"\n axis = deprecation.deprecated_argument_lookup(\"axis\", axis,\n \"reduction_indices\",\n reduction_indices)\n keepdims = deprecation.deprecated_argument_lookup(\"keepdims\", keepdims,\n \"keep_dims\", keep_dims)\n return reduce_max(input_tensor, axis, keepdims, name)\n\n\n@tf_export(\"math.reduce_max\", \"reduce_max\", v1=[])\[email protected]_dispatch_support\ndef reduce_max(input_tensor, axis=None, keepdims=False, name=None):\n \"\"\"Computes the maximum of elements across dimensions of a tensor.\n\n Reduces `input_tensor` along the dimensions given in `axis`.\n Unless `keepdims` is true, the rank of the tensor is reduced by 1 for each\n entry in `axis`. If `keepdims` is true, the reduced dimensions\n are retained with length 1.\n\n If `axis` is None, all dimensions are reduced, and a\n tensor with a single element is returned.\n\n Usage example:\n\n >>> x = tf.constant([5, 1, 2, 4])\n >>> print(tf.reduce_max(x))\n tf.Tensor(5, shape=(), dtype=int32)\n >>> x = tf.constant([-5, -1, -2, -4])\n >>> print(tf.reduce_max(x))\n tf.Tensor(-1, shape=(), dtype=int32)\n >>> x = tf.constant([4, float('nan')])\n >>> print(tf.reduce_max(x))\n tf.Tensor(4.0, shape=(), dtype=float32)\n >>> x = tf.constant([float('nan'), float('nan')])\n >>> print(tf.reduce_max(x))\n tf.Tensor(-inf, shape=(), dtype=float32)\n >>> x = tf.constant([float('-inf'), float('inf')])\n >>> print(tf.reduce_max(x))\n tf.Tensor(inf, shape=(), dtype=float32)\n\n See the numpy docs for `np.amax` and `np.nanmax` behavior.\n\n Args:\n input_tensor: The tensor to reduce. Should have real numeric type.\n axis: The dimensions to reduce. If `None` (the default), reduces all\n dimensions. Must be in the range `[-rank(input_tensor),\n rank(input_tensor))`.\n keepdims: If true, retains reduced dimensions with length 1.\n name: A name for the operation (optional).\n\n Returns:\n The reduced tensor.\n \"\"\"\n return reduce_max_with_dims(input_tensor, axis, keepdims, name,\n _ReductionDims(input_tensor, axis))\n\n\ndef reduce_max_with_dims(input_tensor,\n axis=None,\n keepdims=False,\n name=None,\n dims=None):\n keepdims = False if keepdims is None else keepdims\n return _may_reduce_to_scalar(\n keepdims, axis,\n gen_math_ops._max(input_tensor, dims, keepdims, name=name))\n\n\n@tf_export(v1=[\"math.reduce_all\", \"reduce_all\"])\[email protected]_args(None,\n \"keep_dims is deprecated, use keepdims instead\",\n \"keep_dims\")\ndef reduce_all_v1(input_tensor,\n axis=None,\n keepdims=None,\n name=None,\n reduction_indices=None,\n keep_dims=None):\n \"\"\"Computes the \"logical and\" of elements across dimensions of a tensor.\n\n Reduces `input_tensor` along the dimensions given in `axis`.\n Unless `keepdims` is true, the rank of the tensor is reduced by 1 for each\n entry in `axis`. If `keepdims` is true, the reduced dimensions\n are retained with length 1.\n\n If `axis` is None, all dimensions are reduced, and a\n tensor with a single element is returned.\n\n For example:\n\n ```python\n x = tf.constant([[True, True], [False, False]])\n tf.reduce_all(x) # False\n tf.reduce_all(x, 0) # [False, False]\n tf.reduce_all(x, 1) # [True, False]\n ```\n\n Args:\n input_tensor: The boolean tensor to reduce.\n axis: The dimensions to reduce. If `None` (the default), reduces all\n dimensions. Must be in the range `[-rank(input_tensor),\n rank(input_tensor))`.\n keepdims: If true, retains reduced dimensions with length 1.\n name: A name for the operation (optional).\n reduction_indices: The old (deprecated) name for axis.\n keep_dims: Deprecated alias for `keepdims`.\n\n Returns:\n The reduced tensor.\n\n @compatibility(numpy)\n Equivalent to np.all\n @end_compatibility\n \"\"\"\n axis = deprecation.deprecated_argument_lookup(\"axis\", axis,\n \"reduction_indices\",\n reduction_indices)\n keepdims = deprecation.deprecated_argument_lookup(\"keepdims\", keepdims,\n \"keep_dims\", keep_dims)\n return reduce_all(input_tensor, axis, keepdims, name)\n\n\n@tf_export(\"reduce_all\", \"math.reduce_all\", v1=[])\[email protected]_dispatch_support\ndef reduce_all(input_tensor, axis=None, keepdims=False, name=None):\n \"\"\"Computes the \"logical and\" of elements across dimensions of a tensor.\n\n Reduces `input_tensor` along the dimensions given in `axis`.\n Unless `keepdims` is true, the rank of the tensor is reduced by 1 for each\n entry in `axis`. If `keepdims` is true, the reduced dimensions\n are retained with length 1.\n\n If `axis` is None, all dimensions are reduced, and a\n tensor with a single element is returned.\n\n For example:\n\n ```python\n x = tf.constant([[True, True], [False, False]])\n tf.reduce_all(x) # False\n tf.reduce_all(x, 0) # [False, False]\n tf.reduce_all(x, 1) # [True, False]\n ```\n\n Args:\n input_tensor: The boolean tensor to reduce.\n axis: The dimensions to reduce. If `None` (the default), reduces all\n dimensions. Must be in the range `[-rank(input_tensor),\n rank(input_tensor))`.\n keepdims: If true, retains reduced dimensions with length 1.\n name: A name for the operation (optional).\n\n Returns:\n The reduced tensor.\n\n @compatibility(numpy)\n Equivalent to np.all\n @end_compatibility\n \"\"\"\n keepdims = False if keepdims is None else keepdims\n return _may_reduce_to_scalar(\n keepdims, axis,\n gen_math_ops._all(\n input_tensor, _ReductionDims(input_tensor, axis), keepdims,\n name=name))\n\n\n@tf_export(v1=[\"math.reduce_any\", \"reduce_any\"])\[email protected]_args(None,\n \"keep_dims is deprecated, use keepdims instead\",\n \"keep_dims\")\ndef reduce_any_v1(input_tensor,\n axis=None,\n keepdims=None,\n name=None,\n reduction_indices=None,\n keep_dims=None):\n \"\"\"Computes the \"logical or\" of elements across dimensions of a tensor.\n\n Reduces `input_tensor` along the dimensions given in `axis`.\n Unless `keepdims` is true, the rank of the tensor is reduced by 1 for each\n entry in `axis`. If `keepdims` is true, the reduced dimensions\n are retained with length 1.\n\n If `axis` is None, all dimensions are reduced, and a\n tensor with a single element is returned.\n\n For example:\n\n ```python\n x = tf.constant([[True, True], [False, False]])\n tf.reduce_any(x) # True\n tf.reduce_any(x, 0) # [True, True]\n tf.reduce_any(x, 1) # [True, False]\n ```\n\n Args:\n input_tensor: The boolean tensor to reduce.\n axis: The dimensions to reduce. If `None` (the default), reduces all\n dimensions. Must be in the range `[-rank(input_tensor),\n rank(input_tensor))`.\n keepdims: If true, retains reduced dimensions with length 1.\n name: A name for the operation (optional).\n reduction_indices: The old (deprecated) name for axis.\n keep_dims: Deprecated alias for `keepdims`.\n\n Returns:\n The reduced tensor.\n\n @compatibility(numpy)\n Equivalent to np.any\n @end_compatibility\n \"\"\"\n axis = deprecation.deprecated_argument_lookup(\"axis\", axis,\n \"reduction_indices\",\n reduction_indices)\n keepdims = deprecation.deprecated_argument_lookup(\"keepdims\", keepdims,\n \"keep_dims\", keep_dims)\n return reduce_any(input_tensor, axis, keepdims, name)\n\n\n@tf_export(\"math.reduce_any\", \"reduce_any\", v1=[])\[email protected]_dispatch_support\ndef reduce_any(input_tensor, axis=None, keepdims=False, name=None):\n \"\"\"Computes the \"logical or\" of elements across dimensions of a tensor.\n\n Reduces `input_tensor` along the dimensions given in `axis`.\n Unless `keepdims` is true, the rank of the tensor is reduced by 1 for each\n entry in `axis`. If `keepdims` is true, the reduced dimensions\n are retained with length 1.\n\n If `axis` is None, all dimensions are reduced, and a\n tensor with a single element is returned.\n\n For example:\n\n ```python\n x = tf.constant([[True, True], [False, False]])\n tf.reduce_any(x) # True\n tf.reduce_any(x, 0) # [True, True]\n tf.reduce_any(x, 1) # [True, False]\n ```\n\n Args:\n input_tensor: The boolean tensor to reduce.\n axis: The dimensions to reduce. If `None` (the default), reduces all\n dimensions. Must be in the range `[-rank(input_tensor),\n rank(input_tensor))`.\n keepdims: If true, retains reduced dimensions with length 1.\n name: A name for the operation (optional).\n\n Returns:\n The reduced tensor.\n\n @compatibility(numpy)\n Equivalent to np.any\n @end_compatibility\n \"\"\"\n keepdims = False if keepdims is None else keepdims\n return _may_reduce_to_scalar(\n keepdims, axis,\n gen_math_ops._any(\n input_tensor, _ReductionDims(input_tensor, axis), keepdims,\n name=name))\n\n\n@tf_export(v1=[\"math.reduce_logsumexp\", \"reduce_logsumexp\"])\[email protected]_args(None,\n \"keep_dims is deprecated, use keepdims instead\",\n \"keep_dims\")\ndef reduce_logsumexp_v1(input_tensor,\n axis=None,\n keepdims=None,\n name=None,\n reduction_indices=None,\n keep_dims=None):\n \"\"\"Computes log(sum(exp(elements across dimensions of a tensor))).\n\n Reduces `input_tensor` along the dimensions given in `axis`.\n Unless `keepdims` is true, the rank of the tensor is reduced by 1 for each\n entry in `axis`. If `keepdims` is true, the reduced dimensions\n are retained with length 1.\n\n If `axis` has no entries, all dimensions are reduced, and a\n tensor with a single element is returned.\n\n This function is more numerically stable than log(sum(exp(input))). It avoids\n overflows caused by taking the exp of large inputs and underflows caused by\n taking the log of small inputs.\n\n For example:\n\n ```python\n x = tf.constant([[0., 0., 0.], [0., 0., 0.]])\n tf.reduce_logsumexp(x) # log(6)\n tf.reduce_logsumexp(x, 0) # [log(2), log(2), log(2)]\n tf.reduce_logsumexp(x, 1) # [log(3), log(3)]\n tf.reduce_logsumexp(x, 1, keepdims=True) # [[log(3)], [log(3)]]\n tf.reduce_logsumexp(x, [0, 1]) # log(6)\n ```\n\n Args:\n input_tensor: The tensor to reduce. Should have numeric type.\n axis: The dimensions to reduce. If `None` (the default), reduces all\n dimensions. Must be in the range `[-rank(input_tensor),\n rank(input_tensor))`.\n keepdims: If true, retains reduced dimensions with length 1.\n name: A name for the operation (optional).\n reduction_indices: The old (deprecated) name for axis.\n keep_dims: Deprecated alias for `keepdims`.\n\n Returns:\n The reduced tensor.\n \"\"\"\n axis = deprecation.deprecated_argument_lookup(\"axis\", axis,\n \"reduction_indices\",\n reduction_indices)\n keepdims = deprecation.deprecated_argument_lookup(\"keepdims\", keepdims,\n \"keep_dims\", keep_dims)\n return reduce_logsumexp(input_tensor, axis, keepdims, name)\n\n\n@tf_export(\"math.reduce_logsumexp\", \"reduce_logsumexp\", v1=[])\ndef reduce_logsumexp(input_tensor, axis=None, keepdims=False, name=None):\n \"\"\"Computes log(sum(exp(elements across dimensions of a tensor))).\n\n Reduces `input_tensor` along the dimensions given in `axis`.\n Unless `keepdims` is true, the rank of the tensor is reduced by 1 for each\n entry in `axis`. If `keepdims` is true, the reduced dimensions\n are retained with length 1.\n\n If `axis` has no entries, all dimensions are reduced, and a\n tensor with a single element is returned.\n\n This function is more numerically stable than log(sum(exp(input))). It avoids\n overflows caused by taking the exp of large inputs and underflows caused by\n taking the log of small inputs.\n\n For example:\n\n ```python\n x = tf.constant([[0., 0., 0.], [0., 0., 0.]])\n tf.reduce_logsumexp(x) # log(6)\n tf.reduce_logsumexp(x, 0) # [log(2), log(2), log(2)]\n tf.reduce_logsumexp(x, 1) # [log(3), log(3)]\n tf.reduce_logsumexp(x, 1, keepdims=True) # [[log(3)], [log(3)]]\n tf.reduce_logsumexp(x, [0, 1]) # log(6)\n ```\n\n Args:\n input_tensor: The tensor to reduce. Should have numeric type.\n axis: The dimensions to reduce. If `None` (the default), reduces all\n dimensions. Must be in the range `[-rank(input_tensor),\n rank(input_tensor))`.\n keepdims: If true, retains reduced dimensions with length 1.\n name: A name for the operation (optional).\n\n Returns:\n The reduced tensor.\n \"\"\"\n keepdims = False if keepdims is None else keepdims\n input_tensor = ops.convert_to_tensor(input_tensor)\n with ops.name_scope(name, \"ReduceLogSumExp\", [input_tensor]) as name:\n reduce_dim = _ReductionDims(input_tensor, axis)\n raw_max = reduce_max_with_dims(\n input_tensor, axis=axis, keepdims=True, dims=reduce_dim)\n my_max = array_ops.stop_gradient(\n gen_math_ops.select(\n gen_math_ops.is_finite(raw_max), raw_max,\n gen_array_ops.zeros_like(raw_max)))\n result = gen_math_ops.log(\n reduce_sum_with_dims(\n gen_math_ops.exp(gen_math_ops.sub(input_tensor, my_max)),\n axis=axis,\n keepdims=keepdims,\n dims=reduce_dim))\n if not keepdims:\n my_max = array_ops.reshape(my_max, gen_array_ops.shape(result))\n result = gen_math_ops.add(result, my_max)\n return _may_reduce_to_scalar(keepdims, axis, result)\n\n\n@tf_export(\"linalg.trace\", v1=[\"linalg.trace\", \"trace\"])\[email protected]_endpoints(\"trace\")\[email protected]_dispatch_support\ndef trace(x, name=None):\n \"\"\"Compute the trace of a tensor `x`.\n\n `trace(x)` returns the sum along the main diagonal of each inner-most matrix\n in x. If x is of rank `k` with shape `[I, J, K, ..., L, M, N]`, then output\n is a tensor of rank `k-2` with dimensions `[I, J, K, ..., L]` where\n\n `output[i, j, k, ..., l] = trace(x[i, j, i, ..., l, :, :])`\n\n For example:\n\n ```python\n x = tf.constant([[1, 2], [3, 4]])\n tf.linalg.trace(x) # 5\n\n x = tf.constant([[1, 2, 3],\n [4, 5, 6],\n [7, 8, 9]])\n tf.linalg.trace(x) # 15\n\n x = tf.constant([[[1, 2, 3],\n [4, 5, 6],\n [7, 8, 9]],\n [[-1, -2, -3],\n [-4, -5, -6],\n [-7, -8, -9]]])\n tf.linalg.trace(x) # [15, -15]\n ```\n\n Args:\n x: tensor.\n name: A name for the operation (optional).\n\n Returns:\n The trace of input tensor.\n \"\"\"\n with ops.name_scope(name, \"Trace\", [x]) as name:\n x = ops.convert_to_tensor(x, name=\"x\")\n return reduce_sum(array_ops.matrix_diag_part(x), [-1], name=name)\n\n\n@tf_export(\"linalg.matmul\", \"matmul\")\[email protected]_dispatch_support\ndef matmul(a,\n b,\n transpose_a=False,\n transpose_b=False,\n adjoint_a=False,\n adjoint_b=False,\n a_is_sparse=False,\n b_is_sparse=False,\n name=None):\n \"\"\"Multiplies matrix `a` by matrix `b`, producing `a` * `b`.\n\n The inputs must, following any transpositions, be tensors of rank >= 2\n where the inner 2 dimensions specify valid matrix multiplication dimensions,\n and any further outer dimensions specify matching batch size.\n\n Both matrices must be of the same type. The supported types are:\n `float16`, `float32`, `float64`, `int32`, `complex64`, `complex128`.\n\n Either matrix can be transposed or adjointed (conjugated and transposed) on\n the fly by setting one of the corresponding flag to `True`. These are `False`\n by default.\n\n If one or both of the matrices contain a lot of zeros, a more efficient\n multiplication algorithm can be used by setting the corresponding\n `a_is_sparse` or `b_is_sparse` flag to `True`. These are `False` by default.\n This optimization is only available for plain matrices (rank-2 tensors) with\n datatypes `bfloat16` or `float32`.\n\n A simple 2-D tensor matrix multiplication:\n\n >>> a = tf.constant([1, 2, 3, 4, 5, 6], shape=[2, 3])\n >>> a # 2-D tensor\n <tf.Tensor: shape=(2, 3), dtype=int32, numpy=\n array([[1, 2, 3],\n [4, 5, 6]], dtype=int32)>\n >>> b = tf.constant([7, 8, 9, 10, 11, 12], shape=[3, 2])\n >>> b # 2-D tensor\n <tf.Tensor: shape=(3, 2), dtype=int32, numpy=\n array([[ 7, 8],\n [ 9, 10],\n [11, 12]], dtype=int32)>\n >>> c = tf.matmul(a, b)\n >>> c # `a` * `b`\n <tf.Tensor: shape=(2, 2), dtype=int32, numpy=\n array([[ 58, 64],\n [139, 154]], dtype=int32)>\n\n A batch matrix multiplication with batch shape [2]:\n\n >>> a = tf.constant(np.arange(1, 13, dtype=np.int32), shape=[2, 2, 3])\n >>> a # 3-D tensor\n <tf.Tensor: shape=(2, 2, 3), dtype=int32, numpy=\n array([[[ 1, 2, 3],\n [ 4, 5, 6]],\n [[ 7, 8, 9],\n [10, 11, 12]]], dtype=int32)>\n >>> b = tf.constant(np.arange(13, 25, dtype=np.int32), shape=[2, 3, 2])\n >>> b # 3-D tensor\n <tf.Tensor: shape=(2, 3, 2), dtype=int32, numpy=\n array([[[13, 14],\n [15, 16],\n [17, 18]],\n [[19, 20],\n [21, 22],\n [23, 24]]], dtype=int32)>\n >>> c = tf.matmul(a, b)\n >>> c # `a` * `b`\n <tf.Tensor: shape=(2, 2, 2), dtype=int32, numpy=\n array([[[ 94, 100],\n [229, 244]],\n [[508, 532],\n [697, 730]]], dtype=int32)>\n\n Since python >= 3.5 the @ operator is supported\n (see [PEP 465](https://www.python.org/dev/peps/pep-0465/)). In TensorFlow,\n it simply calls the `tf.matmul()` function, so the following lines are\n equivalent:\n\n >>> d = a @ b @ [[10], [11]]\n >>> d = tf.matmul(tf.matmul(a, b), [[10], [11]])\n\n Args:\n a: `tf.Tensor` of type `float16`, `float32`, `float64`, `int32`,\n `complex64`, `complex128` and rank > 1.\n b: `tf.Tensor` with same type and rank as `a`.\n transpose_a: If `True`, `a` is transposed before multiplication.\n transpose_b: If `True`, `b` is transposed before multiplication.\n adjoint_a: If `True`, `a` is conjugated and transposed before\n multiplication.\n adjoint_b: If `True`, `b` is conjugated and transposed before\n multiplication.\n a_is_sparse: If `True`, `a` is treated as a sparse matrix. Notice, this\n **does not support `tf.sparse.SparseTensor`**, it just makes optimizations\n that assume most values in `a` are zero.\n See `tf.sparse.sparse_dense_matmul`\n for some support for `tf.SparseTensor` multiplication.\n b_is_sparse: If `True`, `b` is treated as a sparse matrix. Notice, this\n **does not support `tf.sparse.SparseTensor`**, it just makes optimizations\n that assume most values in `a` are zero.\n See `tf.sparse.sparse_dense_matmul`\n for some support for `tf.SparseTensor` multiplication.\n name: Name for the operation (optional).\n\n Returns:\n A `tf.Tensor` of the same type as `a` and `b` where each inner-most matrix\n is the product of the corresponding matrices in `a` and `b`, e.g. if all\n transpose or adjoint attributes are `False`:\n\n `output[..., i, j] = sum_k (a[..., i, k] * b[..., k, j])`,\n for all indices `i`, `j`.\n\n Note: This is matrix product, not element-wise product.\n\n\n Raises:\n ValueError: If `transpose_a` and `adjoint_a`, or `transpose_b` and\n `adjoint_b` are both set to `True`.\n \"\"\"\n with ops.name_scope(name, \"MatMul\", [a, b]) as name:\n if transpose_a and adjoint_a:\n raise ValueError(\"Only one of transpose_a and adjoint_a can be True.\")\n if transpose_b and adjoint_b:\n raise ValueError(\"Only one of transpose_b and adjoint_b can be True.\")\n\n if context.executing_eagerly():\n if not isinstance(a, (ops.EagerTensor, _resource_variable_type)):\n a = ops.convert_to_tensor(a, name=\"a\")\n if not isinstance(b, (ops.EagerTensor, _resource_variable_type)):\n b = ops.convert_to_tensor(b, name=\"b\")\n else:\n a = ops.convert_to_tensor(a, name=\"a\")\n b = ops.convert_to_tensor(b, name=\"b\")\n\n # TODO(apassos) remove _shape_tuple here when it is not needed.\n a_shape = a._shape_tuple() # pylint: disable=protected-access\n b_shape = b._shape_tuple() # pylint: disable=protected-access\n\n output_may_have_non_empty_batch_shape = (\n (a_shape is None or len(a_shape) > 2) or\n (b_shape is None or len(b_shape) > 2))\n\n if (not a_is_sparse and\n not b_is_sparse) and output_may_have_non_empty_batch_shape:\n # BatchMatmul does not support transpose, so we conjugate the matrix and\n # use adjoint instead. Conj() is a noop for real matrices.\n if transpose_a:\n a = conj(a)\n adjoint_a = True\n if transpose_b:\n b = conj(b)\n adjoint_b = True\n return gen_math_ops.batch_mat_mul_v2(\n a, b, adj_x=adjoint_a, adj_y=adjoint_b, name=name)\n\n # Neither matmul nor sparse_matmul support adjoint, so we conjugate\n # the matrix and use transpose instead. Conj() is a noop for real\n # matrices.\n if adjoint_a:\n a = conj(a)\n transpose_a = True\n if adjoint_b:\n b = conj(b)\n transpose_b = True\n\n use_sparse_matmul = False\n if a_is_sparse or b_is_sparse:\n sparse_matmul_types = [dtypes.bfloat16, dtypes.float32]\n use_sparse_matmul = (\n a.dtype in sparse_matmul_types and b.dtype in sparse_matmul_types)\n if ((a.dtype == dtypes.bfloat16 or b.dtype == dtypes.bfloat16) and\n a.dtype != b.dtype):\n # matmul currently doesn't handle mixed-precision inputs.\n use_sparse_matmul = True\n if use_sparse_matmul:\n ret = sparse_matmul(\n a,\n b,\n transpose_a=transpose_a,\n transpose_b=transpose_b,\n a_is_sparse=a_is_sparse,\n b_is_sparse=b_is_sparse,\n name=name)\n # sparse_matmul always returns float32, even with\n # bfloat16 inputs. This prevents us from configuring bfloat16 training.\n # casting to bfloat16 also matches non-sparse matmul behavior better.\n if a.dtype == dtypes.bfloat16 and b.dtype == dtypes.bfloat16:\n ret = cast(ret, dtypes.bfloat16)\n return ret\n else:\n return gen_math_ops.mat_mul(\n a, b, transpose_a=transpose_a, transpose_b=transpose_b, name=name)\n\n\n@tf_export(\"linalg.matvec\")\ndef matvec(a,\n b,\n transpose_a=False,\n adjoint_a=False,\n a_is_sparse=False,\n b_is_sparse=False,\n name=None):\n \"\"\"Multiplies matrix `a` by vector `b`, producing `a` * `b`.\n\n The matrix `a` must, following any transpositions, be a tensor of rank >= 2,\n with `shape(a)[-1] == shape(b)[-1]`, and `shape(a)[:-2]` able to broadcast\n with `shape(b)[:-1]`.\n\n Both `a` and `b` must be of the same type. The supported types are:\n `float16`, `float32`, `float64`, `int32`, `complex64`, `complex128`.\n\n Matrix `a` can be transposed or adjointed (conjugated and transposed) on\n the fly by setting one of the corresponding flag to `True`. These are `False`\n by default.\n\n If one or both of the inputs contain a lot of zeros, a more efficient\n multiplication algorithm can be used by setting the corresponding\n `a_is_sparse` or `b_is_sparse` flag to `True`. These are `False` by default.\n This optimization is only available for plain matrices/vectors (rank-2/1\n tensors) with datatypes `bfloat16` or `float32`.\n\n For example:\n\n ```python\n # 2-D tensor `a`\n # [[1, 2, 3],\n # [4, 5, 6]]\n a = tf.constant([1, 2, 3, 4, 5, 6], shape=[2, 3])\n\n # 1-D tensor `b`\n # [7, 9, 11]\n b = tf.constant([7, 9, 11], shape=[3])\n\n # `a` * `b`\n # [ 58, 64]\n c = tf.linalg.matvec(a, b)\n\n\n # 3-D tensor `a`\n # [[[ 1, 2, 3],\n # [ 4, 5, 6]],\n # [[ 7, 8, 9],\n # [10, 11, 12]]]\n a = tf.constant(np.arange(1, 13, dtype=np.int32),\n shape=[2, 2, 3])\n\n # 2-D tensor `b`\n # [[13, 14, 15],\n # [16, 17, 18]]\n b = tf.constant(np.arange(13, 19, dtype=np.int32),\n shape=[2, 3])\n\n # `a` * `b`\n # [[ 86, 212],\n # [410, 563]]\n c = tf.linalg.matvec(a, b)\n ```\n\n Args:\n a: `Tensor` of type `float16`, `float32`, `float64`, `int32`, `complex64`,\n `complex128` and rank > 1.\n b: `Tensor` with same type as `a` and compatible dimensions.\n transpose_a: If `True`, `a` is transposed before multiplication.\n adjoint_a: If `True`, `a` is conjugated and transposed before\n multiplication.\n a_is_sparse: If `True`, `a` is treated as a sparse matrix.\n b_is_sparse: If `True`, `b` is treated as a sparse matrix.\n name: Name for the operation (optional).\n\n Returns:\n A `Tensor` of the same type as `a` and `b` where each inner-most vector is\n the product of the corresponding matrices in `a` and vectors in `b`, e.g. if\n all transpose or adjoint attributes are `False`:\n\n `output`[..., i] = sum_k (`a`[..., i, k] * `b`[..., k]), for all indices i.\n\n Note: This is matrix-vector product, not element-wise product.\n\n\n Raises:\n ValueError: If transpose_a and adjoint_a are both set to True.\n \"\"\"\n with ops.name_scope(name, \"MatVec\", [a, b]) as name:\n output = matmul(\n a,\n array_ops.expand_dims(b, axis=-1),\n transpose_a=transpose_a,\n adjoint_a=adjoint_a,\n a_is_sparse=a_is_sparse,\n b_is_sparse=b_is_sparse)\n return array_ops.squeeze(output, axis=-1)\n\n\n_OverrideBinaryOperatorHelper(matmul, \"matmul\")\n\nsparse_matmul = deprecation.deprecated(None, \"Use `tf.linalg.matmul` instead\")(\n gen_math_ops.sparse_mat_mul)\ntf_export(v1=[\"sparse_matmul\"])(sparse_matmul)\n\n\[email protected](\"MatMul\", \"flops\")\ndef _calc_mat_mul_flops(graph, node):\n \"\"\"Calculates the compute resources needed for MatMul.\"\"\"\n transpose_a = node.attr[\"transpose_a\"].b\n a_shape = graph_util.tensor_shape_from_node_def_name(graph, node.input[0])\n a_shape.assert_is_fully_defined()\n if transpose_a:\n k = int(a_shape[0])\n else:\n k = int(a_shape[1])\n output_shape = graph_util.tensor_shape_from_node_def_name(graph, node.name)\n output_shape.assert_is_fully_defined()\n output_count = np.prod(output_shape.as_list())\n return ops.OpStats(\"flops\", (k * output_count * 2))\n\n\[email protected](\"BatchMatMul\", \"flops\")\[email protected](\"BatchMatMulV2\", \"flops\")\ndef _calc_batch_mat_mul_flops(graph, node):\n \"\"\"Calculates the compute resources needed for BatchMatMul.\"\"\"\n transpose_a = node.attr[\"transpose_a\"].b\n a_shape = graph_util.tensor_shape_from_node_def_name(graph, node.input[0])\n a_shape.assert_is_fully_defined()\n if transpose_a:\n k = int(a_shape[-2])\n else:\n k = int(a_shape[-1])\n output_shape = graph_util.tensor_shape_from_node_def_name(graph, node.name)\n output_shape.assert_is_fully_defined()\n output_count = np.prod(output_shape.as_list())\n return ops.OpStats(\"flops\", (k * output_count * 2))\n\n\ndef _as_indexed_slices(x, optimize=True):\n \"\"\"Convert 'x' to IndexedSlices.\n\n Convert a dense Tensor to a block-sparse IndexedSlices.\n\n Args:\n x: Either a Tensor object, or an IndexedSlices object.\n optimize: if true, attempt to optimize the conversion of 'x'.\n\n Returns:\n An IndexedSlices object.\n\n Raises:\n TypeError: If 'x' is not a Tensor or an IndexedSlices object.\n \"\"\"\n # TODO(touts): op_scope\n if not isinstance(x, (ops.Tensor, ops.IndexedSlices)):\n raise TypeError(\"Not a Tensor or IndexedSlices: %s\" % type(x))\n if isinstance(x, ops.IndexedSlices):\n return x\n x_shape = array_ops.shape_internal(x, optimize=optimize)\n return ops.IndexedSlices(x, range(0, x_shape[0]), x_shape)\n\n\ndef _as_indexed_slices_list(inputs, optimize=True):\n \"\"\"Convert all elements of 'inputs' to IndexedSlices.\n\n Additionally, homogenize the types of all the indices to\n either int32 or int64.\n\n Args:\n inputs: List containing either Tensor or IndexedSlices objects.\n optimize: if true, attempt to optimize the conversion of each input.\n\n Returns:\n A list of IndexedSlices objects.\n\n Raises:\n TypeError: If 'inputs' is not a list or a tuple.\n \"\"\"\n if not isinstance(inputs, (list, tuple)):\n raise TypeError(\"Expected a list or tuple, not a %s\" % type(inputs))\n outputs = [_as_indexed_slices(i, optimize=optimize) for i in inputs]\n with_int32_index = [\n o.indices for o in outputs if o.indices.dtype == dtypes.int32\n ]\n if not with_int32_index or len(with_int32_index) == len(outputs):\n return outputs\n casted_outputs = []\n for o in outputs:\n if o.indices.dtype == dtypes.int32:\n casted_outputs.append(\n ops.IndexedSlices(o.values, cast(o.indices, dtypes.int64),\n o.dense_shape))\n else:\n casted_outputs.append(o)\n return casted_outputs\n\n\n@tf_export(\"math.add_n\", \"add_n\")\[email protected]_dispatch_support\ndef add_n(inputs, name=None):\n \"\"\"Adds all input tensors element-wise.\n\n `tf.math.add_n` performs the same operation as `tf.math.accumulate_n`, but it\n waits for all of its inputs to be ready before beginning to sum.\n This buffering can result in higher memory consumption when inputs are ready\n at different times, since the minimum temporary storage required is\n proportional to the input size rather than the output size.\n\n This op does not [broadcast](\n https://docs.scipy.org/doc/numpy-1.13.0/user/basics.broadcasting.html)\n its inputs. If you need broadcasting, use `tf.math.add` (or the `+` operator)\n instead.\n\n For example:\n\n >>> a = tf.constant([[3, 5], [4, 8]])\n >>> b = tf.constant([[1, 6], [2, 9]])\n >>> tf.math.add_n([a, b, a])\n <tf.Tensor: shape=(2, 2), dtype=int32, numpy=\n array([[ 7, 16],\n [10, 25]], dtype=int32)>\n\n Args:\n inputs: A list of `tf.Tensor` or `tf.IndexedSlices` objects, each with the\n same shape and type. `tf.IndexedSlices` objects will be converted into\n dense tensors prior to adding.\n name: A name for the operation (optional).\n\n Returns:\n A `tf.Tensor` of the same shape and type as the elements of `inputs`.\n\n Raises:\n ValueError: If `inputs` don't all have same shape and dtype or the shape\n cannot be inferred.\n \"\"\"\n if not inputs or not isinstance(inputs, (list, tuple)):\n raise ValueError(\"inputs must be a list of at least one \"\n \"Tensor/IndexedSlices with the same dtype and shape\")\n inputs = ops.convert_n_to_tensor_or_indexed_slices(inputs)\n if not all(isinstance(x, (ops.Tensor, ops.IndexedSlices)) for x in inputs):\n raise ValueError(\"inputs must be a list of at least one \"\n \"Tensor/IndexedSlices with the same dtype and shape\")\n\n if len(inputs) == 1:\n if isinstance(inputs[0], ops.IndexedSlices):\n values = ops.convert_to_tensor(inputs[0])\n else:\n values = inputs[0]\n if name:\n return array_ops.identity(values, name=name)\n return values\n return gen_math_ops.add_n(inputs, name=name)\n\n\n@tf_export(\"math.accumulate_n\", v1=[\"math.accumulate_n\", \"accumulate_n\"])\[email protected]_endpoints(\"accumulate_n\")\ndef accumulate_n(inputs, shape=None, tensor_dtype=None, name=None):\n \"\"\"Returns the element-wise sum of a list of tensors.\n\n Optionally, pass `shape` and `tensor_dtype` for shape and type checking,\n otherwise, these are inferred.\n\n `accumulate_n` performs the same operation as `tf.math.add_n`.\n\n For example:\n\n ```python\n a = tf.constant([[1, 2], [3, 4]])\n b = tf.constant([[5, 0], [0, 6]])\n tf.math.accumulate_n([a, b, a]) # [[7, 4], [6, 14]]\n\n # Explicitly pass shape and type\n tf.math.accumulate_n([a, b, a], shape=[2, 2], tensor_dtype=tf.int32)\n # [[7, 4],\n # [6, 14]]\n ```\n\n Args:\n inputs: A list of `Tensor` objects, each with same shape and type.\n shape: Expected shape of elements of `inputs` (optional). Also controls the\n output shape of this op, which may affect type inference in other ops. A\n value of `None` means \"infer the input shape from the shapes in `inputs`\".\n tensor_dtype: Expected data type of `inputs` (optional). A value of `None`\n means \"infer the input dtype from `inputs[0]`\".\n name: A name for the operation (optional).\n\n Returns:\n A `Tensor` of same shape and type as the elements of `inputs`.\n\n Raises:\n ValueError: If `inputs` don't all have same shape and dtype or the shape\n cannot be inferred.\n \"\"\"\n\n def _input_error():\n return ValueError(\"inputs must be a list of at least one Tensor with the \"\n \"same dtype and shape\")\n\n if not inputs or not isinstance(inputs, (list, tuple)):\n raise _input_error()\n inputs = ops.convert_n_to_tensor_or_indexed_slices(inputs)\n if not all(isinstance(x, ops.Tensor) for x in inputs):\n raise _input_error()\n if not all(x.dtype == inputs[0].dtype for x in inputs):\n raise _input_error()\n if shape is not None:\n shape = tensor_shape.as_shape(shape)\n else:\n shape = tensor_shape.unknown_shape()\n for input_tensor in inputs:\n if isinstance(input_tensor, ops.Tensor):\n shape = shape.merge_with(input_tensor.get_shape())\n\n # tensor_dtype is for safety only; operator's output type computed in C++\n if tensor_dtype is not None and tensor_dtype != inputs[0].dtype:\n raise TypeError(\"tensor_dtype is {}, but input is of type {}\".format(\n tensor_dtype, inputs[0].dtype))\n\n if len(inputs) == 1 and name is None:\n return inputs[0]\n elif len(inputs) == 1 and name is not None:\n return array_ops.identity(inputs[0], name=name)\n return add_n(inputs, name=name)\n\n\[email protected](\"AccumulateNV2\")\ndef _accumulate_n_grad(op, grad):\n \"\"\"Same as gradient for AddN. Copies the gradient to all inputs.\"\"\"\n # Not broadcasting.\n return [grad] * len(op.inputs)\n\n\n@tf_export(\"math.sigmoid\", \"nn.sigmoid\", \"sigmoid\")\ndef sigmoid(x, name=None):\n r\"\"\"Computes sigmoid of `x` element-wise.\n\n Formula for calculating sigmoid(x): `y = 1 / (1 + exp(-x))`.\n\n For x \\in (-inf, inf) => sigmoid(x) \\in (0, 1)\n\n Example Usage:\n\n If a positive number is large, then its sigmoid will approach to 1 since the\n formula will be `y = <large_num> / (1 + <large_num>)`\n\n >>> x = tf.constant([0.0, 1.0, 50.0, 100.0])\n >>> tf.math.sigmoid(x)\n <tf.Tensor: shape=(4,), dtype=float32,\n numpy=array([0.5 , 0.7310586, 1. , 1. ], dtype=float32)>\n\n If a negative number is large, its sigmoid will approach to 0 since the\n formula will be `y = 1 / (1 + <large_num>)`\n\n >>> x = tf.constant([-100.0, -50.0, -1.0, 0.0])\n >>> tf.math.sigmoid(x)\n <tf.Tensor: shape=(4,), dtype=float32, numpy=\n array([0.0000000e+00, 1.9287499e-22, 2.6894143e-01, 0.5],\n dtype=float32)>\n\n Args:\n x: A Tensor with type `float16`, `float32`, `float64`, `complex64`, or\n `complex128`.\n name: A name for the operation (optional).\n\n Returns:\n A Tensor with the same type as `x`.\n \n Usage Example:\n \n >>> x = tf.constant([-128.0, 0.0, 128.0], dtype=tf.float32)\n >>> tf.sigmoid(x)\n <tf.Tensor: shape=(3,), dtype=float32,\n numpy=array([0. , 0.5, 1. ], dtype=float32)>\n\n @compatibility(scipy)\n Equivalent to scipy.special.expit\n @end_compatibility\n \"\"\"\n with ops.name_scope(name, \"Sigmoid\", [x]) as name:\n x = ops.convert_to_tensor(x, name=\"x\")\n return gen_math_ops.sigmoid(x, name=name)\n\n\n@tf_export(\"math.log_sigmoid\", v1=[\"math.log_sigmoid\", \"log_sigmoid\"])\[email protected]_dispatch_support\[email protected]_endpoints(\"log_sigmoid\")\ndef log_sigmoid(x, name=None):\n \"\"\"Computes log sigmoid of `x` element-wise.\n\n Specifically, `y = log(1 / (1 + exp(-x)))`. For numerical stability,\n we use `y = -tf.nn.softplus(-x)`.\n\n Args:\n x: A Tensor with type `float32` or `float64`.\n name: A name for the operation (optional).\n\n Returns:\n A Tensor with the same type as `x`.\n \"\"\"\n with ops.name_scope(name, \"LogSigmoid\", [x]) as name:\n x = ops.convert_to_tensor(x, name=\"x\")\n return gen_math_ops.neg(gen_nn_ops.softplus(-x), name=name)\n\n\n@tf_export(\"math.bincount\", v1=[])\ndef bincount(arr,\n weights=None,\n minlength=None,\n maxlength=None,\n dtype=dtypes.int32,\n name=None):\n \"\"\"Counts the number of occurrences of each value in an integer array.\n\n If `minlength` and `maxlength` are not given, returns a vector with length\n `tf.reduce_max(arr) + 1` if `arr` is non-empty, and length 0 otherwise.\n If `weights` are non-None, then index `i` of the output stores the sum of the\n value in `weights` at each index where the corresponding value in `arr` is\n `i`.\n\n ```python\n values = tf.constant([1,1,2,3,2,4,4,5])\n tf.math.bincount(values) #[0 2 2 1 2 1]\n ```\n Vector length = Maximum element in vector `values` is 5. Adding 1, which is 6\n will be the vector length.\n\n Each bin value in the output indicates number of occurrences of the particular\n index. Here, index 1 in output has a value 2. This indicates value 1 occurs\n two times in `values`.\n\n ```python\n values = tf.constant([1,1,2,3,2,4,4,5])\n weights = tf.constant([1,5,0,1,0,5,4,5])\n tf.math.bincount(values, weights=weights) #[0 6 0 1 9 5]\n ```\n Bin will be incremented by the corresponding weight instead of 1.\n Here, index 1 in output has a value 6. This is the summation of weights\n corresponding to the value in `values`.\n\n Args:\n arr: An int32 tensor of non-negative values.\n weights: If non-None, must be the same shape as arr. For each value in\n `arr`, the bin will be incremented by the corresponding weight instead of\n 1.\n minlength: If given, ensures the output has length at least `minlength`,\n padding with zeros at the end if necessary.\n maxlength: If given, skips values in `arr` that are equal or greater than\n `maxlength`, ensuring that the output has length at most `maxlength`.\n dtype: If `weights` is None, determines the type of the output bins.\n name: A name scope for the associated operations (optional).\n\n Returns:\n A vector with the same dtype as `weights` or the given `dtype`. The bin\n values.\n\n Raises:\n `InvalidArgumentError` if negative values are provided as an input.\n\n \"\"\"\n name = \"bincount\" if name is None else name\n with ops.name_scope(name):\n arr = ops.convert_to_tensor(arr, name=\"arr\", dtype=dtypes.int32)\n array_is_nonempty = reduce_prod(array_ops.shape(arr)) > 0\n output_size = cast(array_is_nonempty, dtypes.int32) * (reduce_max(arr) + 1)\n if minlength is not None:\n minlength = ops.convert_to_tensor(\n minlength, name=\"minlength\", dtype=dtypes.int32)\n output_size = gen_math_ops.maximum(minlength, output_size)\n if maxlength is not None:\n maxlength = ops.convert_to_tensor(\n maxlength, name=\"maxlength\", dtype=dtypes.int32)\n output_size = gen_math_ops.minimum(maxlength, output_size)\n if weights is not None:\n weights = ops.convert_to_tensor(weights, name=\"weights\")\n return gen_math_ops.unsorted_segment_sum(weights, arr, output_size)\n weights = constant_op.constant([], dtype)\n return gen_math_ops.bincount(arr, output_size, weights)\n\n\n@tf_export(v1=[\"math.bincount\", \"bincount\"])\[email protected]_endpoints(\"bincount\")\ndef bincount_v1(arr,\n weights=None,\n minlength=None,\n maxlength=None,\n dtype=dtypes.int32):\n \"\"\"Counts the number of occurrences of each value in an integer array.\n\n If `minlength` and `maxlength` are not given, returns a vector with length\n `tf.reduce_max(arr) + 1` if `arr` is non-empty, and length 0 otherwise.\n If `weights` are non-None, then index `i` of the output stores the sum of the\n value in `weights` at each index where the corresponding value in `arr` is\n `i`.\n\n Args:\n arr: An int32 tensor of non-negative values.\n weights: If non-None, must be the same shape as arr. For each value in\n `arr`, the bin will be incremented by the corresponding weight instead of\n 1.\n minlength: If given, ensures the output has length at least `minlength`,\n padding with zeros at the end if necessary.\n maxlength: If given, skips values in `arr` that are equal or greater than\n `maxlength`, ensuring that the output has length at most `maxlength`.\n dtype: If `weights` is None, determines the type of the output bins.\n\n Returns:\n A vector with the same dtype as `weights` or the given `dtype`. The bin\n values.\n \"\"\"\n return bincount(arr, weights, minlength, maxlength, dtype)\n\n\n@tf_export(\"math.cumsum\", \"cumsum\")\ndef cumsum(x, axis=0, exclusive=False, reverse=False, name=None):\n \"\"\"Compute the cumulative sum of the tensor `x` along `axis`.\n\n By default, this op performs an inclusive cumsum, which means that the first\n element of the input is identical to the first element of the output:\n For example:\n\n >>> # tf.cumsum([a, b, c]) # [a, a + b, a + b + c]\n >>> x = tf.constant([2, 4, 6, 8])\n >>> tf.cumsum(x)\n <tf.Tensor: shape=(4,), dtype=int32,\n numpy=array([ 2, 6, 12, 20], dtype=int32)>\n\n >>> # using varying `axis` values\n >>> y = tf.constant([[2, 4, 6, 8], [1,3,5,7]])\n >>> tf.cumsum(y, axis=0)\n <tf.Tensor: shape=(2, 4), dtype=int32, numpy=\n array([[ 2, 4, 6, 8],\n [ 3, 7, 11, 15]], dtype=int32)>\n >>> tf.cumsum(y, axis=1)\n <tf.Tensor: shape=(2, 4), dtype=int32, numpy=\n array([[ 2, 6, 12, 20],\n [ 1, 4, 9, 16]], dtype=int32)>\n\n By setting the `exclusive` kwarg to `True`, an exclusive cumsum is performed\n instead:\n\n >>> # tf.cumsum([a, b, c], exclusive=True) => [0, a, a + b]\n >>> x = tf.constant([2, 4, 6, 8])\n >>> tf.cumsum(x, exclusive=True)\n <tf.Tensor: shape=(4,), dtype=int32,\n numpy=array([ 0, 2, 6, 12], dtype=int32)>\n\n By setting the `reverse` kwarg to `True`, the cumsum is performed in the\n opposite direction:\n\n >>> # tf.cumsum([a, b, c], reverse=True) # [a + b + c, b + c, c]\n >>> x = tf.constant([2, 4, 6, 8])\n >>> tf.cumsum(x, reverse=True)\n <tf.Tensor: shape=(4,), dtype=int32,\n numpy=array([20, 18, 14, 8], dtype=int32)>\n\n This is more efficient than using separate `tf.reverse` ops.\n The `reverse` and `exclusive` kwargs can also be combined:\n\n >>> # tf.cumsum([a, b, c], exclusive=True, reverse=True) # [b + c, c, 0]\n >>> x = tf.constant([2, 4, 6, 8])\n >>> tf.cumsum(x, exclusive=True, reverse=True)\n <tf.Tensor: shape=(4,), dtype=int32,\n numpy=array([18, 14, 8, 0], dtype=int32)>\n\n Args:\n x: A `Tensor`. Must be one of the following types: `float32`, `float64`,\n `int64`, `int32`, `uint8`, `uint16`, `int16`, `int8`, `complex64`,\n `complex128`, `qint8`, `quint8`, `qint32`, `half`.\n axis: A `Tensor` of type `int32` (default: 0). Must be in the range\n `[-rank(x), rank(x))`.\n exclusive: If `True`, perform exclusive cumsum.\n reverse: A `bool` (default: False).\n name: A name for the operation (optional).\n\n Returns:\n A `Tensor`. Has the same type as `x`.\n \"\"\"\n with ops.name_scope(name, \"Cumsum\", [x]) as name:\n x = ops.convert_to_tensor(x, name=\"x\")\n return gen_math_ops.cumsum(\n x, axis, exclusive=exclusive, reverse=reverse, name=name)\n\n\n@tf_export(\"math.cumprod\", v1=[\"math.cumprod\", \"cumprod\"])\[email protected]_endpoints(\"cumprod\")\ndef cumprod(x, axis=0, exclusive=False, reverse=False, name=None):\n \"\"\"Compute the cumulative product of the tensor `x` along `axis`.\n\n By default, this op performs an inclusive cumprod, which means that the\n first element of the input is identical to the first element of the output:\n\n ```python\n tf.math.cumprod([a, b, c]) # [a, a * b, a * b * c]\n ```\n\n By setting the `exclusive` kwarg to `True`, an exclusive cumprod is\n performed\n instead:\n\n ```python\n tf.math.cumprod([a, b, c], exclusive=True) # [1, a, a * b]\n ```\n\n By setting the `reverse` kwarg to `True`, the cumprod is performed in the\n opposite direction:\n\n ```python\n tf.math.cumprod([a, b, c], reverse=True) # [a * b * c, b * c, c]\n ```\n\n This is more efficient than using separate `tf.reverse` ops.\n The `reverse` and `exclusive` kwargs can also be combined:\n\n ```python\n tf.math.cumprod([a, b, c], exclusive=True, reverse=True) # [b * c, c, 1]\n ```\n\n Args:\n x: A `Tensor`. Must be one of the following types: `float32`, `float64`,\n `int64`, `int32`, `uint8`, `uint16`, `int16`, `int8`, `complex64`,\n `complex128`, `qint8`, `quint8`, `qint32`, `half`.\n axis: A `Tensor` of type `int32` (default: 0). Must be in the range\n `[-rank(x), rank(x))`.\n exclusive: If `True`, perform exclusive cumprod.\n reverse: A `bool` (default: False).\n name: A name for the operation (optional).\n\n Returns:\n A `Tensor`. Has the same type as `x`.\n \"\"\"\n with ops.name_scope(name, \"Cumprod\", [x]) as name:\n x = ops.convert_to_tensor(x, name=\"x\")\n return gen_math_ops.cumprod(\n x, axis, exclusive=exclusive, reverse=reverse, name=name)\n\n\n@tf_export(\"math.cumulative_logsumexp\", v1=[\"math.cumulative_logsumexp\"])\ndef cumulative_logsumexp(x, axis=0, exclusive=False, reverse=False, name=None):\n \"\"\"Compute the cumulative log-sum-exp of the tensor `x` along `axis`.\n\n By default, this op performs an inclusive cumulative log-sum-exp, which means\n that the first element of the input is identical to the first element of\n the output.\n\n This operation is significantly more numerically stable than the equivalent\n tensorflow operation `tf.math.log(tf.math.cumsum(tf.math.exp(x)))`, although\n computes the same result given infinite numerical precision. However, note\n that in some cases, it may be less stable than `tf.math.reduce_logsumexp`\n for a given element, as it applies the \"log-sum-exp trick\" in a different\n way.\n\n More precisely, where `tf.math.reduce_logsumexp` uses the following trick:\n\n ```\n log(sum(exp(x))) == log(sum(exp(x - max(x)))) + max(x)\n ```\n\n it cannot be directly used here as there is no fast way of applying it\n to each prefix `x[:i]`. Instead, this function implements a prefix\n scan using pairwise log-add-exp, which is a commutative and associative\n (up to floating point precision) operator:\n\n ```\n log_add_exp(x, y) = log(exp(x) + exp(y))\n = log(1 + exp(min(x, y) - max(x, y))) + max(x, y)\n ```\n\n However, reducing using the above operator leads to a different computation\n tree (logs are taken repeatedly instead of only at the end), and the maximum\n is only computed pairwise instead of over the entire prefix. In general, this\n leads to a different and slightly less precise computation.\n\n Args:\n x: A `Tensor`. Must be one of the following types: `float16`, `float32`,\n `float64`.\n axis: A `Tensor` of type `int32` or `int64` (default: 0). Must be in the\n range `[-rank(x), rank(x))`.\n exclusive: If `True`, perform exclusive cumulative log-sum-exp.\n reverse: If `True`, performs the cumulative log-sum-exp in the reverse\n direction.\n name: A name for the operation (optional).\n\n Returns:\n A `Tensor`. Has the same shape and type as `x`.\n \"\"\"\n with ops.name_scope(name, \"CumulativeLogsumexp\", [x]) as name:\n x = ops.convert_to_tensor(x, name=\"x\")\n return gen_math_ops.cumulative_logsumexp(\n x, axis, exclusive=exclusive, reverse=reverse, name=name)\n\n\n@tf_export(\"math.conj\", v1=[\"math.conj\", \"conj\"])\[email protected]_dispatch_support\[email protected]_endpoints(\"conj\")\ndef conj(x, name=None):\n r\"\"\"Returns the complex conjugate of a complex number.\n\n Given a tensor `input` of complex numbers, this operation returns a tensor of\n complex numbers that are the complex conjugate of each element in `input`. The\n complex numbers in `input` must be of the form \\\\(a + bj\\\\), where *a* is the\n real part and *b* is the imaginary part.\n\n The complex conjugate returned by this operation is of the form \\\\(a - bj\\\\).\n\n For example:\n\n # tensor 'input' is [-2.25 + 4.75j, 3.25 + 5.75j]\n tf.math.conj(input) ==> [-2.25 - 4.75j, 3.25 - 5.75j]\n\n If `x` is real, it is returned unchanged.\n\n Args:\n x: `Tensor` to conjugate. Must have numeric or variant type.\n name: A name for the operation (optional).\n\n Returns:\n A `Tensor` that is the conjugate of `x` (with the same type).\n\n Raises:\n TypeError: If `x` is not a numeric tensor.\n \"\"\"\n if isinstance(x, ops.Tensor):\n dt = x.dtype\n if dt.is_floating or dt.is_integer:\n return x\n with ops.name_scope(name, \"Conj\", [x]) as name:\n x = ops.convert_to_tensor(x, name=\"x\")\n if x.dtype.is_complex or x.dtype == dtypes.variant:\n return gen_math_ops.conj(x, name=name)\n elif x.dtype.is_floating or x.dtype.is_integer:\n return x\n else:\n raise TypeError(\"Expected numeric or variant tensor, got dtype %r\" %\n x.dtype)\n\n\ndef reduced_shape(input_shape, axes):\n \"\"\"Helper function for reduction ops.\n\n Args:\n input_shape: 1-D Tensor, the shape of the Tensor being reduced.\n axes: 1-D Tensor, the reduction axes.\n\n Returns:\n A 1-D Tensor, the output shape as if keepdims were set to True.\n \"\"\"\n if context.executing_eagerly():\n input_shape = input_shape.numpy()\n axes = axes.numpy()\n input_shape[axes] = 1\n return input_shape\n\n # Example:\n # cast needed for SparseTensor reductions\n input_shape = cast(input_shape, dtypes.int32) # [2, 3, 5, 7]\n axes = cast(axes, dtypes.int32) # [1, 2]\n\n input_rank = array_ops.size(input_shape) # 4\n axes = (axes + input_rank) % input_rank\n axes_shape = array_ops.shape(axes) # [2]\n return gen_data_flow_ops.dynamic_stitch( # [2, 1, 1, 7]\n [\n range(input_rank), # [0, 1, 2, 3]\n axes\n ], # [1, 2]\n [\n input_shape, # [2, 3, 5, 7]\n array_ops.fill(axes_shape, 1)\n ]) # [1, 1]\n\n\ndef _unsorted_segment_N(data, segment_ids, num_segments):\n \"\"\" Helper function for unsorted_segment_mean/_sqrtN.\n\n Computes the number\n of segment entries with 0-entries set to 1 to allow division by N.\n \"\"\"\n num_segments = ops.convert_to_tensor(num_segments)\n # bincount doesn't support negative indices so we use unsorted_segment_sum\n segment_ids_shape = array_ops.shape_internal(segment_ids)\n ones_tensor = array_ops.ones(segment_ids_shape, dtype=data.dtype)\n n = gen_math_ops.unsorted_segment_sum(ones_tensor, segment_ids, num_segments)\n # add dimensions for all non-reduced axes\n broadcastable_shape = array_ops.concat(\n [num_segments[array_ops.newaxis],\n array_ops.ones([array_ops.rank(data)\n - array_ops.rank(segment_ids)],\n dtype=num_segments.dtype)],\n axis=0)\n n = array_ops.reshape(n, broadcastable_shape)\n return gen_math_ops.maximum(n, 1)\n\n\n@tf_export(\n \"math.unsorted_segment_mean\",\n v1=[\"math.unsorted_segment_mean\", \"unsorted_segment_mean\"])\[email protected]_endpoints(\"unsorted_segment_mean\")\[email protected]_dispatch_support\ndef unsorted_segment_mean(data, segment_ids, num_segments, name=None):\n r\"\"\"Computes the mean along segments of a tensor.\n\n Read [the section on\n segmentation](https://www.tensorflow.org/versions/r2.0/api_docs/python/tf/math#about_segmentation)\n for an explanation of segments.\n\n This operator is similar to the unsorted segment sum operator found\n [here](../../../api_docs/python/math_ops.md#UnsortedSegmentSum).\n Instead of computing the sum over segments, it computes the mean of all\n entries belonging to a segment such that:\n\n \\\\(output_i = 1/N_i \\sum_{j...} data[j...]\\\\) where the sum is over tuples\n `j...` such that `segment_ids[j...] == i` with \\\\N_i\\\\ being the number of\n occurrences of id \\\\i\\\\.\n\n If there is no entry for a given segment ID `i`, it outputs 0.\n\n If the given segment ID `i` is negative, the value is dropped and will not\n be added to the sum of the segment.\n\n Args:\n data: A `Tensor` with floating point or complex dtype.\n segment_ids: An integer tensor whose shape is a prefix of `data.shape`.\n num_segments: An integer scalar `Tensor`. The number of distinct segment\n IDs.\n name: A name for the operation (optional).\n\n Returns:\n A `Tensor`. Has same shape as data, except for the first `segment_ids.rank`\n dimensions, which are replaced with a single dimension which has size\n `num_segments`.\n \"\"\"\n with ops.name_scope(name, \"UnsortedSegmentMean\"):\n data = ops.convert_to_tensor(data)\n segment_ids = ops.convert_to_tensor(segment_ids)\n N = _unsorted_segment_N(data, segment_ids, num_segments)\n summed = gen_math_ops.unsorted_segment_sum(data, segment_ids, num_segments)\n return summed / N\n\n\n@tf_export(\n \"math.unsorted_segment_sqrt_n\",\n v1=[\"math.unsorted_segment_sqrt_n\", \"unsorted_segment_sqrt_n\"])\[email protected]_endpoints(\"unsorted_segment_sqrt_n\")\[email protected]_dispatch_support\ndef unsorted_segment_sqrt_n(data, segment_ids, num_segments, name=None):\n r\"\"\"Computes the sum along segments of a tensor divided by the sqrt(N).\n\n Read [the section on\n segmentation](https://www.tensorflow.org/versions/r2.0/api_docs/python/tf/math#about_segmentation)\n for an explanation of segments.\n\n This operator is similar to the unsorted segment sum operator found\n [here](../../../api_docs/python/math_ops.md#UnsortedSegmentSum).\n Additionally to computing the sum over segments, it divides the results by\n sqrt(N).\n\n \\\\(output_i = 1/sqrt(N_i) \\sum_{j...} data[j...]\\\\) where the sum is over\n tuples `j...` such that `segment_ids[j...] == i` with \\\\N_i\\\\ being the\n number of occurrences of id \\\\i\\\\.\n\n If there is no entry for a given segment ID `i`, it outputs 0.\n\n Note that this op only supports floating point and complex dtypes,\n due to tf.sqrt only supporting these types.\n\n If the given segment ID `i` is negative, the value is dropped and will not\n be added to the sum of the segment.\n\n Args:\n data: A `Tensor` with floating point or complex dtype.\n segment_ids: An integer tensor whose shape is a prefix of `data.shape`.\n num_segments: An integer scalar `Tensor`. The number of distinct segment\n IDs.\n name: A name for the operation (optional).\n\n Returns:\n A `Tensor`. Has same shape as data, except for the first `segment_ids.rank`\n dimensions, which are replaced with a single dimension which has size\n `num_segments`.\n \"\"\"\n with ops.name_scope(name, \"UnsortedSegmentSqrtN\"):\n data = ops.convert_to_tensor(data)\n segment_ids = ops.convert_to_tensor(segment_ids)\n N = _unsorted_segment_N(data, segment_ids, num_segments)\n summed = gen_math_ops.unsorted_segment_sum(data, segment_ids, num_segments)\n return summed / gen_math_ops.sqrt(N)\n\n\n@tf_export(v1=[\"sparse.segment_sum\", \"sparse_segment_sum\"])\[email protected]_endpoints(\"sparse_segment_sum\")\ndef sparse_segment_sum(data,\n indices,\n segment_ids,\n name=None,\n num_segments=None):\n r\"\"\"Computes the sum along sparse segments of a tensor.\n\n Read [the section on\n segmentation](https://www.tensorflow.org/versions/r2.0/api_docs/python/tf/math#about_segmentation)\n for an explanation of segments.\n\n Like `tf.math.segment_sum`, but `segment_ids` can have rank less than `data`'s\n first dimension, selecting a subset of dimension 0, specified by `indices`.\n `segment_ids` is allowed to have missing ids, in which case the output will\n be zeros at those indices. In those cases `num_segments` is used to determine\n the size of the output.\n\n For example:\n\n ```python\n c = tf.constant([[1,2,3,4], [-1,-2,-3,-4], [5,6,7,8]])\n\n # Select two rows, one segment.\n tf.sparse.segment_sum(c, tf.constant([0, 1]), tf.constant([0, 0]))\n # => [[0 0 0 0]]\n\n # Select two rows, two segment.\n tf.sparse.segment_sum(c, tf.constant([0, 1]), tf.constant([0, 1]))\n # => [[ 1 2 3 4]\n # [-1 -2 -3 -4]]\n\n # With missing segment ids.\n tf.sparse.segment_sum(c, tf.constant([0, 1]), tf.constant([0, 2]),\n num_segments=4)\n # => [[ 1 2 3 4]\n # [ 0 0 0 0]\n # [-1 -2 -3 -4]\n # [ 0 0 0 0]]\n\n # Select all rows, two segments.\n tf.sparse.segment_sum(c, tf.constant([0, 1, 2]), tf.constant([0, 0, 1]))\n # => [[0 0 0 0]\n # [5 6 7 8]]\n\n # Which is equivalent to:\n tf.math.segment_sum(c, tf.constant([0, 0, 1]))\n ```\n\n Args:\n data: A `Tensor` with data that will be assembled in the output.\n indices: A 1-D `Tensor` with indices into `data`. Has same rank as\n `segment_ids`.\n segment_ids: A 1-D `Tensor` with indices into the output `Tensor`. Values\n should be sorted and can be repeated.\n name: A name for the operation (optional).\n num_segments: An optional int32 scalar. Indicates the size of the output\n `Tensor`.\n\n Returns:\n A `tensor` of the shape as data, except for dimension 0 which\n has size `k`, the number of segments specified via `num_segments` or\n inferred for the last element in `segments_ids`.\n \"\"\"\n if num_segments is not None:\n return gen_math_ops.sparse_segment_sum_with_num_segments(\n data=data,\n indices=indices,\n segment_ids=segment_ids,\n num_segments=num_segments,\n name=name)\n else:\n return gen_math_ops.sparse_segment_sum(\n data=data, indices=indices, segment_ids=segment_ids, name=name)\n\n\n@tf_export(\"sparse.segment_sum\", v1=[])\ndef sparse_segment_sum_v2(data,\n indices,\n segment_ids,\n num_segments=None,\n name=None):\n r\"\"\"Computes the sum along sparse segments of a tensor.\n\n Read [the section on\n segmentation](https://www.tensorflow.org/versions/r2.0/api_docs/python/tf/math#about_segmentation)\n for an explanation of segments.\n\n Like `tf.math.segment_sum`, but `segment_ids` can have rank less than `data`'s\n first dimension, selecting a subset of dimension 0, specified by `indices`.\n `segment_ids` is allowed to have missing ids, in which case the output will\n be zeros at those indices. In those cases `num_segments` is used to determine\n the size of the output.\n\n For example:\n\n ```python\n c = tf.constant([[1,2,3,4], [-1,-2,-3,-4], [5,6,7,8]])\n\n # Select two rows, one segment.\n tf.sparse.segment_sum(c, tf.constant([0, 1]), tf.constant([0, 0]))\n # => [[0 0 0 0]]\n\n # Select two rows, two segment.\n tf.sparse.segment_sum(c, tf.constant([0, 1]), tf.constant([0, 1]))\n # => [[ 1 2 3 4]\n # [-1 -2 -3 -4]]\n\n # With missing segment ids.\n tf.sparse.segment_sum(c, tf.constant([0, 1]), tf.constant([0, 2]),\n num_segments=4)\n # => [[ 1 2 3 4]\n # [ 0 0 0 0]\n # [-1 -2 -3 -4]\n # [ 0 0 0 0]]\n\n # Select all rows, two segments.\n tf.sparse.segment_sum(c, tf.constant([0, 1, 2]), tf.constant([0, 0, 1]))\n # => [[0 0 0 0]\n # [5 6 7 8]]\n\n # Which is equivalent to:\n tf.math.segment_sum(c, tf.constant([0, 0, 1]))\n ```\n\n Args:\n data: A `Tensor` with data that will be assembled in the output.\n indices: A 1-D `Tensor` with indices into `data`. Has same rank as\n `segment_ids`.\n segment_ids: A 1-D `Tensor` with indices into the output `Tensor`. Values\n should be sorted and can be repeated.\n num_segments: An optional int32 scalar. Indicates the size of the output\n `Tensor`.\n name: A name for the operation (optional).\n\n Returns:\n A `tensor` of the shape as data, except for dimension 0 which\n has size `k`, the number of segments specified via `num_segments` or\n inferred for the last element in `segments_ids`.\n \"\"\"\n return sparse_segment_sum(\n data, indices, segment_ids, name=name, num_segments=num_segments)\n\n\n@tf_export(v1=[\"sparse.segment_mean\", \"sparse_segment_mean\"])\[email protected]_endpoints(\"sparse_segment_mean\")\ndef sparse_segment_mean(data,\n indices,\n segment_ids,\n name=None,\n num_segments=None):\n r\"\"\"Computes the mean along sparse segments of a tensor.\n\n Read [the section on\n segmentation](https://www.tensorflow.org/versions/r2.0/api_docs/python/tf/math#about_segmentation)\n for an explanation of segments.\n\n Like `tf.math.segment_mean`, but `segment_ids` can have rank less than\n `data`'s first dimension, selecting a subset of dimension 0, specified by\n `indices`.\n `segment_ids` is allowed to have missing ids, in which case the output will\n be zeros at those indices. In those cases `num_segments` is used to determine\n the size of the output.\n\n Args:\n data: A `Tensor` with data that will be assembled in the output.\n indices: A 1-D `Tensor` with indices into `data`. Has same rank as\n `segment_ids`.\n segment_ids: A 1-D `Tensor` with indices into the output `Tensor`. Values\n should be sorted and can be repeated.\n name: A name for the operation (optional).\n num_segments: An optional int32 scalar. Indicates the size of the output\n `Tensor`.\n\n Returns:\n A `tensor` of the shape as data, except for dimension 0 which\n has size `k`, the number of segments specified via `num_segments` or\n inferred for the last element in `segments_ids`.\n \"\"\"\n if num_segments is not None:\n return gen_math_ops.sparse_segment_mean_with_num_segments(\n data=data,\n indices=indices,\n segment_ids=segment_ids,\n num_segments=num_segments,\n name=name)\n else:\n return gen_math_ops.sparse_segment_mean(\n data=data, indices=indices, segment_ids=segment_ids, name=name)\n\n\n@tf_export(\"sparse.segment_mean\", v1=[])\ndef sparse_segment_mean_v2(data,\n indices,\n segment_ids,\n num_segments=None,\n name=None):\n r\"\"\"Computes the mean along sparse segments of a tensor.\n\n Read [the section on\n segmentation](https://www.tensorflow.org/versions/r2.0/api_docs/python/tf/math#about_segmentation)\n for an explanation of segments.\n\n Like `tf.math.segment_mean`, but `segment_ids` can have rank less than\n `data`'s first dimension, selecting a subset of dimension 0, specified by\n `indices`.\n `segment_ids` is allowed to have missing ids, in which case the output will\n be zeros at those indices. In those cases `num_segments` is used to determine\n the size of the output.\n\n Args:\n data: A `Tensor` with data that will be assembled in the output.\n indices: A 1-D `Tensor` with indices into `data`. Has same rank as\n `segment_ids`.\n segment_ids: A 1-D `Tensor` with indices into the output `Tensor`. Values\n should be sorted and can be repeated.\n num_segments: An optional int32 scalar. Indicates the size of the output\n `Tensor`.\n name: A name for the operation (optional).\n\n Returns:\n A `tensor` of the shape as data, except for dimension 0 which\n has size `k`, the number of segments specified via `num_segments` or\n inferred for the last element in `segments_ids`.\n \"\"\"\n return sparse_segment_mean(\n data, indices, segment_ids, name=name, num_segments=num_segments)\n\n\n@tf_export(v1=[\"sparse.segment_sqrt_n\", \"sparse_segment_sqrt_n\"])\[email protected]_endpoints(\"sparse_segment_sqrt_n\")\ndef sparse_segment_sqrt_n(data,\n indices,\n segment_ids,\n name=None,\n num_segments=None):\n r\"\"\"Computes the sum along sparse segments of a tensor divided by the sqrt(N).\n\n `N` is the size of the segment being reduced.\n\n Args:\n data: A `Tensor` with data that will be assembled in the output.\n indices: A 1-D `Tensor` with indices into `data`. Has same rank as\n `segment_ids`.\n segment_ids: A 1-D `Tensor` with indices into the output `Tensor`. Values\n should be sorted and can be repeated.\n name: A name for the operation (optional).\n num_segments: An optional int32 scalar. Indicates the size of the output\n `Tensor`.\n\n Returns:\n A `tensor` of the shape as data, except for dimension 0 which\n has size `k`, the number of segments specified via `num_segments` or\n inferred for the last element in `segments_ids`.\n \"\"\"\n if num_segments is not None:\n return gen_math_ops.sparse_segment_sqrt_n_with_num_segments(\n data=data,\n indices=indices,\n segment_ids=segment_ids,\n num_segments=num_segments,\n name=name)\n else:\n return gen_math_ops.sparse_segment_sqrt_n(\n data=data, indices=indices, segment_ids=segment_ids, name=name)\n\n\n@tf_export(\"sparse.segment_sqrt_n\", v1=[])\ndef sparse_segment_sqrt_n_v2(data,\n indices,\n segment_ids,\n num_segments=None,\n name=None):\n r\"\"\"Computes the sum along sparse segments of a tensor divided by the sqrt(N).\n\n Read [the section on\n segmentation](https://www.tensorflow.org/versions/r2.0/api_docs/python/tf/math#about_segmentation)\n for an explanation of segments.\n\n Like `tf.sparse.segment_mean`, but instead of dividing by the size of the\n segment, `N`, divide by `sqrt(N)` instead.\n\n Args:\n data: A `Tensor` with data that will be assembled in the output.\n indices: A 1-D `Tensor` with indices into `data`. Has same rank as\n `segment_ids`.\n segment_ids: A 1-D `Tensor` with indices into the output `Tensor`. Values\n should be sorted and can be repeated.\n num_segments: An optional int32 scalar. Indicates the size of the output\n `Tensor`.\n name: A name for the operation (optional).\n\n Returns:\n A `tensor` of the shape as data, except for dimension 0 which\n has size `k`, the number of segments specified via `num_segments` or\n inferred for the last element in `segments_ids`.\n \"\"\"\n return sparse_segment_sqrt_n(\n data, indices, segment_ids, name=name, num_segments=num_segments)\n\n\n@tf_export(\"tensordot\", \"linalg.tensordot\")\ndef tensordot(a, b, axes, name=None):\n r\"\"\"Tensor contraction of a and b along specified axes and outer product.\n\n Tensordot (also known as tensor contraction) sums the product of elements\n from `a` and `b` over the indices specified by `a_axes` and `b_axes`.\n The lists `a_axes` and `b_axes` specify those pairs of axes along which to\n contract the tensors. The axis `a_axes[i]` of `a` must have the same dimension\n as axis `b_axes[i]` of `b` for all `i` in `range(0, len(a_axes))`. The lists\n `a_axes` and `b_axes` must have identical length and consist of unique\n integers that specify valid axes for each of the tensors. Additionally\n outer product is supported by passing `axes=0`.\n\n This operation corresponds to `numpy.tensordot(a, b, axes)`.\n\n Example 1: When `a` and `b` are matrices (order 2), the case `axes = 1`\n is equivalent to matrix multiplication.\n\n Example 2: When `a` and `b` are matrices (order 2), the case\n `axes = [[1], [0]]` is equivalent to matrix multiplication.\n\n Example 3: When `a` and `b` are matrices (order 2), the case `axes=0` gives\n the outer product, a tensor of order 4.\n\n Example 4: Suppose that \\\\(a_{ijk}\\\\) and \\\\(b_{lmn}\\\\) represent two\n tensors of order 3. Then, `contract(a, b, [[0], [2]])` is the order 4 tensor\n \\\\(c_{jklm}\\\\) whose entry\n corresponding to the indices \\\\((j,k,l,m)\\\\) is given by:\n\n \\\\( c_{jklm} = \\sum_i a_{ijk} b_{lmi} \\\\).\n\n In general, `order(c) = order(a) + order(b) - 2*len(axes[0])`.\n\n Args:\n a: `Tensor` of type `float32` or `float64`.\n b: `Tensor` with the same type as `a`.\n axes: Either a scalar `N`, or a list or an `int32` `Tensor` of shape [2, k].\n If axes is a scalar, sum over the last N axes of a and the first N axes of\n b in order. If axes is a list or `Tensor` the first and second row contain\n the set of unique integers specifying axes along which the contraction is\n computed, for `a` and `b`, respectively. The number of axes for `a` and\n `b` must be equal. If `axes=0`, computes the outer product between `a` and\n `b`.\n name: A name for the operation (optional).\n\n Returns:\n A `Tensor` with the same type as `a`.\n\n Raises:\n ValueError: If the shapes of `a`, `b`, and `axes` are incompatible.\n IndexError: If the values in axes exceed the rank of the corresponding\n tensor.\n \"\"\"\n\n def _tensordot_reshape(a, axes, flipped=False):\n \"\"\"Helper method to perform transpose and reshape for contraction op.\n\n This method is helpful in reducing `math_ops.tensordot` to `math_ops.matmul`\n using `array_ops.transpose` and `array_ops.reshape`. The method takes a\n tensor and performs the correct transpose and reshape operation for a given\n set of indices. It returns the reshaped tensor as well as a list of indices\n necessary to reshape the tensor again after matrix multiplication.\n\n Args:\n a: `Tensor`.\n axes: List or `int32` `Tensor` of unique indices specifying valid axes of\n `a`.\n flipped: An optional `bool`. Defaults to `False`. If `True`, the method\n assumes that `a` is the second argument in the contraction operation.\n\n Returns:\n A tuple `(reshaped_a, free_dims, free_dims_static)` where `reshaped_a` is\n the tensor `a` reshaped to allow contraction via `matmul`, `free_dims` is\n either a list of integers or an `int32` `Tensor`, depending on whether\n the shape of a is fully specified, and free_dims_static is either a list\n of integers and None values, or None, representing the inferred\n static shape of the free dimensions\n \"\"\"\n if a.get_shape().is_fully_defined() and isinstance(axes, (list, tuple)):\n shape_a = a.get_shape().as_list()\n axes = [i if i >= 0 else i + len(shape_a) for i in axes]\n free = [i for i in xrange(len(shape_a)) if i not in axes]\n free_dims = [shape_a[i] for i in free]\n prod_free = int(np.prod([shape_a[i] for i in free]))\n prod_axes = int(np.prod([shape_a[i] for i in axes]))\n perm = list(axes) + free if flipped else free + list(axes)\n new_shape = [prod_axes, prod_free] if flipped else [prod_free, prod_axes]\n if (perm != np.arange(len(shape_a))).any():\n a_trans = array_ops.transpose(a, perm)\n else:\n a_trans = a\n if a_trans.get_shape().as_list() != new_shape:\n reshaped_a = array_ops.reshape(a_trans, new_shape)\n else:\n reshaped_a = a_trans\n return reshaped_a, free_dims, free_dims\n else:\n if a.get_shape().ndims is not None and isinstance(axes, (list, tuple)):\n shape_a = a.get_shape().as_list()\n axes = [i if i >= 0 else i + len(shape_a) for i in axes]\n free = [i for i in xrange(len(shape_a)) if i not in axes]\n axes_dims = [shape_a[i] for i in axes]\n free_dims = [shape_a[i] for i in free]\n free_dims_static = free_dims\n axes = ops.convert_to_tensor(axes, dtype=dtypes.int32, name=\"axes\")\n free = ops.convert_to_tensor(free, dtype=dtypes.int32, name=\"free\")\n shape_a = array_ops.shape(a)\n else:\n free_dims_static = None\n shape_a = array_ops.shape(a)\n rank_a = array_ops.rank(a)\n axes = ops.convert_to_tensor(axes, dtype=dtypes.int32, name=\"axes\")\n axes = array_ops.where(axes >= 0, axes, axes + rank_a)\n free, _ = array_ops.setdiff1d(range(rank_a), axes)\n free_dims = array_ops.gather(shape_a, free)\n axes_dims = array_ops.gather(shape_a, axes)\n prod_free_dims = reduce_prod(free_dims)\n prod_axes_dims = reduce_prod(axes_dims)\n if flipped:\n perm = array_ops.concat([axes, free], 0)\n new_shape = array_ops.stack([prod_axes_dims, prod_free_dims])\n else:\n perm = array_ops.concat([free, axes], 0)\n new_shape = array_ops.stack([prod_free_dims, prod_axes_dims])\n reshaped_a = array_ops.reshape(array_ops.transpose(a, perm), new_shape)\n return reshaped_a, free_dims, free_dims_static\n\n def _tensordot_axes(a, axes):\n \"\"\"Generates two sets of contraction axes for the two tensor arguments.\"\"\"\n a_shape = a.get_shape()\n if isinstance(axes, compat.integral_types):\n if axes < 0:\n raise ValueError(\"'axes' must be at least 0.\")\n if a_shape.ndims is not None:\n if axes > a_shape.ndims:\n raise ValueError(\"'axes' must not be larger than the number of \"\n \"dimensions of tensor %s.\" % a)\n return (list(xrange(a_shape.ndims - axes,\n a_shape.ndims)), list(xrange(axes)))\n else:\n rank = array_ops.rank(a)\n return (range(rank - axes, rank,\n dtype=dtypes.int32), range(axes, dtype=dtypes.int32))\n elif isinstance(axes, (list, tuple)):\n if len(axes) != 2:\n raise ValueError(\"'axes' must be an integer or have length 2.\")\n a_axes = axes[0]\n b_axes = axes[1]\n if isinstance(a_axes, compat.integral_types) and \\\n isinstance(b_axes, compat.integral_types):\n a_axes = [a_axes]\n b_axes = [b_axes]\n if len(a_axes) != len(b_axes):\n raise ValueError(\n \"Different number of contraction axes 'a' and 'b', %s != %s.\" %\n (len(a_axes), len(b_axes)))\n return a_axes, b_axes\n else:\n axes = ops.convert_to_tensor(axes, name=\"axes\", dtype=dtypes.int32)\n return axes[0], axes[1]\n\n with ops.name_scope(name, \"Tensordot\", [a, b, axes]) as name:\n a = ops.convert_to_tensor(a, name=\"a\")\n b = ops.convert_to_tensor(b, name=\"b\")\n a_axes, b_axes = _tensordot_axes(a, axes)\n a_reshape, a_free_dims, a_free_dims_static = _tensordot_reshape(a, a_axes)\n b_reshape, b_free_dims, b_free_dims_static = _tensordot_reshape(\n b, b_axes, True)\n ab_matmul = matmul(a_reshape, b_reshape)\n if isinstance(a_free_dims, list) and isinstance(b_free_dims, list):\n if (ab_matmul.get_shape().is_fully_defined() and\n ab_matmul.get_shape().as_list() == a_free_dims + b_free_dims):\n return ab_matmul\n else:\n return array_ops.reshape(\n ab_matmul, a_free_dims + b_free_dims, name=name)\n else:\n a_free_dims = ops.convert_to_tensor(a_free_dims, dtype=dtypes.int32)\n b_free_dims = ops.convert_to_tensor(b_free_dims, dtype=dtypes.int32)\n product = array_ops.reshape(\n ab_matmul, array_ops.concat([a_free_dims, b_free_dims], 0), name=name)\n if a_free_dims_static is not None and b_free_dims_static is not None:\n product.set_shape(a_free_dims_static + b_free_dims_static)\n return product\n\n\n@tf_export(\"math.polyval\")\ndef polyval(coeffs, x, name=None):\n r\"\"\"Computes the elementwise value of a polynomial.\n\n If `x` is a tensor and `coeffs` is a list n + 1 tensors,\n this function returns the value of the n-th order polynomial\n\n p(x) = coeffs[n-1] + coeffs[n-2] * x + ... + coeffs[0] * x**(n-1)\n\n evaluated using Horner's method, i.e.\n\n p(x) = coeffs[n-1] + x * (coeffs[n-2] + ... + x * (coeffs[1] +\n x * coeffs[0]))\n \n Usage Example:\n \n >>> coefficients = [1.0, 2.5, -4.2]\n >>> x = 5.0\n >>> y = tf.math.polyval(coefficients, x)\n >>> y\n <tf.Tensor: shape=(), dtype=float32, numpy=33.3>\n\n Usage Example:\n\n >>> tf.math.polyval([2, 1, 0], 3) # evaluates 2 * (3**2) + 1 * (3**1) + 0 * (3**0)\n <tf.Tensor: shape=(), dtype=int32, numpy=21>\n\n `tf.math.polyval` can also be used in polynomial regression. Taking\n advantage of this function can facilitate writing a polynomial equation\n as compared to explicitly writing it out, especially for higher degree\n polynomials.\n\n >>> x = tf.constant(3)\n >>> theta1 = tf.Variable(2)\n >>> theta2 = tf.Variable(1)\n >>> theta3 = tf.Variable(0)\n >>> tf.math.polyval([theta1, theta2, theta3], x)\n <tf.Tensor: shape=(), dtype=int32, numpy=21>\n\n Args:\n coeffs: A list of `Tensor` representing the coefficients of the polynomial.\n x: A `Tensor` representing the variable of the polynomial.\n name: A name for the operation (optional).\n\n Returns:\n A `tensor` of the shape as the expression p(x) with usual broadcasting\n rules for element-wise addition and multiplication applied.\n\n @compatibility(numpy)\n Equivalent to numpy.polyval.\n @end_compatibility\n \"\"\"\n if not isinstance(coeffs, list):\n raise ValueError(\"Argument coeffs must be list type \"\n \"found {}.\".format(type(coeffs)))\n\n with ops.name_scope(name, \"polyval\", nest.flatten(coeffs) + [x]) as name:\n x = ops.convert_to_tensor(x, name=\"x\")\n if len(coeffs) < 1:\n return array_ops.zeros_like(x, name=name)\n coeffs = [\n ops.convert_to_tensor(coeff, name=(\"coeff_%d\" % index))\n for index, coeff in enumerate(coeffs)\n ]\n p = coeffs[0]\n for c in coeffs[1:]:\n p = c + p * x\n return p\n\n\n@tf_export(\"math.reciprocal_no_nan\")\ndef reciprocal_no_nan(x, name=None):\n \"\"\"Performs a safe reciprocal operation, element wise.\n\n If a particular element is zero, the reciprocal for that element is\n also set to zero.\n\n For example:\n ```python\n x = tf.constant([2.0, 0.5, 0, 1], dtype=tf.float32)\n tf.math.reciprocal_no_nan(x) # [ 0.5, 2, 0.0, 1.0 ]\n ```\n\n Args:\n x: A `Tensor` of type `float16`, `float32`, `float64` `complex64` or\n `complex128`.\n name: A name for the operation (optional).\n\n Returns:\n A `Tensor` of same shape and type as `x`.\n\n Raises:\n TypeError: x must be of a valid dtype.\n\n \"\"\"\n\n with ops.name_scope(name, \"reciprocal_no_nan\", [x]) as scope:\n x = ops.convert_to_tensor(x, name=\"x\")\n one = constant_op.constant(1, dtype=x.dtype.base_dtype, name=\"one\")\n return gen_math_ops.div_no_nan(one, x, name=scope)\n\n\n@tf_export(\"math.xlog1py\")\[email protected]_dispatch_support\ndef xlog1py(x, y, name=None):\n r\"\"\"Compute x * log1p(y).\n\n Given `x` and `y`, compute `x * log1p(y)`. This function safely returns\n zero when `x = 0`, no matter what the value of `y` is.\n\n Example:\n\n >>> tf.math.xlog1py(0., 1.)\n <tf.Tensor: shape=(), dtype=float32, numpy=0.>\n >>> tf.math.xlog1py(1., 1.)\n <tf.Tensor: shape=(), dtype=float32, numpy=0.6931472>\n >>> tf.math.xlog1py(2., 2.)\n <tf.Tensor: shape=(), dtype=float32, numpy=2.1972246>\n >>> tf.math.xlog1py(0., -1.)\n <tf.Tensor: shape=(), dtype=float32, numpy=0.>\n\n Args:\n x: A `tf.Tensor` of type `bfloat16`, `half`, `float32`, `float64`,\n `complex64`, `complex128`\n y: A `tf.Tensor` of type `bfloat16`, `half`, `float32`, `float64`,\n `complex64`, `complex128`\n name: A name for the operation (optional).\n\n Returns:\n `x * log1p(y)`.\n\n @compatibility(scipy)\n Equivalent to scipy.special.xlog1py\n @end_compatibility\n \"\"\"\n with ops.name_scope(name, \"xlog1py\", [x]):\n return gen_math_ops.xlog1py(x, y)\n\n\n@tf_export(\"math.erfinv\")\[email protected]_dispatch_support\ndef erfinv(x, name=None):\n \"\"\"Compute inverse error function.\n\n Given `x`, compute the inverse error function of `x`. This function\n is the inverse of `tf.math.erf`.\n\n Args:\n x: `Tensor` with type `float` or `double`.\n name: A name for the operation (optional).\n Returns:\n Inverse error function of `x`.\n \"\"\"\n with ops.name_scope(name, \"erfinv\", [x]):\n return gen_math_ops.erfinv(x)\n\n\n@tf_export(\"math.ndtri\")\[email protected]_dispatch_support\ndef ndtri(x, name=None):\n \"\"\"Compute quantile of Standard Normal.\n\n Args:\n x: `Tensor` with type `float` or `double`.\n name: A name for the operation (optional).\n Returns:\n Inverse error function of `x`.\n \"\"\"\n with ops.name_scope(name, \"ndtri\", [x]):\n return gen_math_ops.ndtri(x)\n\n\n@tf_export(\"math.ceil\", v1=[\"math.ceil\", \"ceil\"])\[email protected]_endpoints(\"ceil\")\[email protected]_dispatch_support\ndef ceil(x, name=None):\n \"\"\"Return the ceiling of the input, element-wise.\n\n For example:\n\n >>> tf.math.ceil([-1.7, -1.5, -0.2, 0.2, 1.5, 1.7, 2.0])\n <tf.Tensor: shape=(7,), dtype=float32,\n numpy=array([-1., -1., -0., 1., 2., 2., 2.], dtype=float32)>\n\n Args:\n x: A `tf.Tensor`. Must be one of the following types: `bfloat16`, `half`,\n `float32`, `float64`. `int32`\n name: A name for the operation (optional).\n\n Returns:\n A `tf.Tensor`. Has the same type as `x`.\n\n @compatibility(numpy)\n Equivalent to np.ceil\n @end_compatibility\n \"\"\"\n return gen_math_ops.ceil(x, name)\n\n\n@tf_export(\"math.sqrt\", \"sqrt\")\[email protected]_dispatch_support\ndef sqrt(x, name=None): # pylint: disable=redefined-builtin\n r\"\"\"Computes element-wise square root of the input tensor.\n\n Note: This operation does not support integer types.\n\n >>> x = tf.constant([[4.0], [16.0]])\n >>> tf.sqrt(x)\n <tf.Tensor: shape=(2, 1), dtype=float32, numpy=\n array([[2.],\n [4.]], dtype=float32)>\n >>> y = tf.constant([[-4.0], [16.0]])\n >>> tf.sqrt(y)\n <tf.Tensor: shape=(2, 1), dtype=float32, numpy=\n array([[nan],\n [ 4.]], dtype=float32)>\n >>> z = tf.constant([[-1.0], [16.0]], dtype=tf.complex128)\n >>> tf.sqrt(z)\n <tf.Tensor: shape=(2, 1), dtype=complex128, numpy=\n array([[0.0+1.j],\n [4.0+0.j]])>\n\n Note: In order to support complex complex, please provide an input tensor\n of `complex64` or `complex128`.\n\n Args:\n x: A `tf.Tensor` of type `bfloat16`, `half`, `float32`, `float64`,\n `complex64`, `complex128`\n name: A name for the operation (optional).\n\n Returns:\n A `tf.Tensor` of same size, type and sparsity as `x`.\n \"\"\"\n return gen_math_ops.sqrt(x, name)\n\n\n# pylint: disable=g-docstring-has-escape\n@tf_export(\"math.exp\", \"exp\")\[email protected]_dispatch_support\ndef exp(x, name=None):\n r\"\"\"Computes exponential of x element-wise. \\\\(y = e^x\\\\).\n\n This function computes the exponential of the input tensor element-wise.\n i.e. `math.exp(x)` or \\\\(e^x\\\\), where `x` is the input tensor.\n \\\\(e\\\\) denotes Euler's number and is approximately equal to 2.718281.\n Output is positive for any real input.\n\n >>> x = tf.constant(2.0)\n >>> tf.math.exp(x)\n <tf.Tensor: shape=(), dtype=float32, numpy=7.389056>\n\n >>> x = tf.constant([2.0, 8.0])\n >>> tf.math.exp(x)\n <tf.Tensor: shape=(2,), dtype=float32,\n numpy=array([ 7.389056, 2980.958 ], dtype=float32)>\n\n For complex numbers, the exponential value is calculated as\n \\\\(e^{x+iy}={e^x}{e^{iy}}={e^x}{\\\\cos(y)+i\\\\sin(y)}\\\\)\n\n For `1+1j` the value would be computed as:\n \\\\(e^1{\\\\cos(1)+i\\\\sin(1)} = 2.7182817 \\\\times (0.5403023+0.84147096j)\\\\)\n\n >>> x = tf.constant(1 + 1j)\n >>> tf.math.exp(x)\n <tf.Tensor: shape=(), dtype=complex128,\n numpy=(1.4686939399158851+2.2873552871788423j)>\n\n Args:\n x: A `tf.Tensor`. Must be one of the following types: `bfloat16`, `half`,\n `float32`, `float64`, `complex64`, `complex128`.\n name: A name for the operation (optional).\n\n Returns:\n A `tf.Tensor`. Has the same type as `x`.\n\n @compatibility(numpy)\n Equivalent to np.exp\n @end_compatibility\n \"\"\"\n return gen_math_ops.exp(x, name)\n\n\n# pylint: enable=g-docstring-has-escape\n\n\n@tf_export(\"math.sobol_sample\")\ndef sobol_sample(dim, num_results, skip=0, dtype=dtypes.float32, name=None):\n \"\"\"Generates points from the Sobol sequence.\n\n Creates a Sobol sequence with `num_results` samples. Each sample has dimension\n `dim`. Skips the first `skip` samples.\n\n Args:\n dim: Positive scalar `Tensor` representing each sample's dimension.\n num_results: Positive scalar `Tensor` of dtype int32. The number of Sobol\n points to return in the output.\n skip: (Optional) Positive scalar `Tensor` of dtype int32. The number of\n initial points of the Sobol sequence to skip. Default value is 0.\n dtype: (Optional) The `tf.Dtype` of the sample. One of: `tf.float32` or\n `tf.float64`. Defaults to `tf.float32`.\n name: (Optional) Python `str` name prefixed to ops created by this function.\n\n Returns:\n `Tensor` of samples from Sobol sequence with `shape` [num_results, dim].\n \"\"\"\n with ops.name_scope(name, \"sobol\", [dim, num_results, skip]):\n return gen_math_ops.sobol_sample(dim, num_results, skip, dtype=dtype)\n"
] |
[
[
"tensorflow.python.ops.gen_math_ops.real",
"tensorflow.python.framework.ops.executing_eagerly_outside_functions",
"tensorflow.python.ops.gen_math_ops.sign",
"tensorflow.python.ops.gen_math_ops.sparse_segment_mean",
"tensorflow.python.framework.graph_util.tensor_shape_from_node_def_name",
"tensorflow.python.ops.array_ops.fill",
"tensorflow.python.framework.ops.RegisterGradient",
"tensorflow.python.util.tf_export.tf_export",
"tensorflow.python.ops.gen_math_ops.floor_div",
"tensorflow.python.ops.array_ops.matrix_diag_part",
"tensorflow.python.ops.gen_math_ops.equal",
"tensorflow.python.ops.gen_math_ops.not_equal",
"tensorflow.python.util.deprecation.deprecated_args",
"tensorflow.python.ops.gen_math_ops.sparse_segment_mean_with_num_segments",
"tensorflow.python.ops.gen_math_ops.minimum",
"tensorflow.python.ops.gen_math_ops.bucketize",
"tensorflow.python.ops.gen_math_ops.ceil",
"tensorflow.python.util.nest.flatten",
"tensorflow.python.ops.gen_math_ops._range",
"tensorflow.python.framework.ops.convert_to_tensor_v2",
"tensorflow.python.framework.ops.convert_n_to_tensor_or_indexed_slices",
"tensorflow.python.ops.gen_math_ops.unsorted_segment_sum",
"tensorflow.python.ops.gen_sparse_ops.sparse_dense_cwise_div",
"tensorflow.python.ops.array_ops.transpose",
"tensorflow.python.ops.gen_math_ops.maximum",
"tensorflow.python.ops.gen_math_ops.arg_max.__doc__.replace",
"tensorflow.python.ops.array_ops.where",
"tensorflow.python.ops.gen_nn_ops.softplus",
"tensorflow.python.ops.gen_math_ops.arg_min",
"tensorflow.python.ops.gen_math_ops.bincount",
"tensorflow.python.ops.gen_math_ops.cumulative_logsumexp",
"tensorflow.python.ops.gen_math_ops.cumsum",
"tensorflow.python.ops.gen_math_ops.ndtri",
"tensorflow.python.ops.gen_math_ops.square",
"tensorflow.python.framework.dtypes.as_dtype",
"tensorflow.python.framework.ops.Tensor._override_operator",
"tensorflow.python.framework.ops.convert_to_tensor",
"tensorflow.python.framework.tensor_shape.unknown_shape",
"tensorflow.python.ops.gen_math_ops._pow",
"tensorflow.python.ops.array_ops.stack",
"tensorflow.python.ops.gen_math_ops.round",
"tensorflow.python.ops.array_ops.shape_internal",
"tensorflow.python.platform.tf_logging.warn",
"tensorflow.python.ops.array_ops.concat",
"tensorflow.python.ops.gen_math_ops.conj",
"tensorflow.python.ops.gen_math_ops._sum",
"tensorflow.python.ops.gen_math_ops.add_n",
"tensorflow.python.ops.gen_math_ops._abs",
"tensorflow.python.ops.gen_math_ops.sparse_segment_sum_with_num_segments",
"tensorflow.python.ops.array_ops.shape",
"tensorflow.python.ops.gen_math_ops.mat_mul",
"tensorflow.python.ops.gen_sparse_ops.sparse_dense_cwise_mul",
"tensorflow.python.ops.gen_math_ops.real_div",
"tensorflow.python.util.deprecation.deprecated_endpoints",
"tensorflow.python.ops.array_ops.squeeze",
"tensorflow.python.ops.gen_math_ops._complex",
"tensorflow.python.eager.context.executing_eagerly",
"tensorflow.python.ops.gen_math_ops.add_v2",
"tensorflow.python.ops.array_ops.identity",
"tensorflow.python.framework.ops.IndexedSlices",
"tensorflow.python.ops.array_ops.gather",
"tensorflow.python.ops.array_ops.size",
"tensorflow.python.ops.array_ops.ones",
"tensorflow.python.framework.ops.register_tensor_conversion_function",
"tensorflow.python.ops.gen_math_ops.sparse_segment_sqrt_n",
"tensorflow.python.ops.gen_math_ops._max",
"tensorflow.python.ops.gen_math_ops.mul_no_nan",
"tensorflow.python.ops.array_ops.zeros_like",
"tensorflow.python.ops.gen_math_ops.sigmoid",
"tensorflow.python.ops.gen_math_ops.xlog1py",
"tensorflow.python.ops.gen_math_ops.complex_abs",
"tensorflow.python.ops.gen_math_ops.erfinv",
"tensorflow.python.ops.gen_math_ops.angle",
"tensorflow.python.ops.gen_math_ops.cast",
"tensorflow.python.framework.ops.RegisterStatistics",
"tensorflow.python.ops.array_ops.reshape",
"tensorflow.python.framework.ops.name_scope",
"tensorflow.python.ops.gen_math_ops.is_finite",
"tensorflow.python.ops.gen_array_ops.shape",
"tensorflow.python.util.deprecation.deprecated_argument_lookup",
"tensorflow.python.ops.gen_math_ops.logical_or",
"tensorflow.python.ops.gen_math_ops.arg_min.__doc__.replace",
"tensorflow.python.ops.gen_math_ops.imag",
"tensorflow.python.ops.gen_math_ops.mul",
"tensorflow.python.ops.gen_math_ops.sobol_sample",
"tensorflow.python.ops.array_ops.zeros",
"tensorflow.python.ops.gen_math_ops.sub",
"tensorflow.python.ops.gen_math_ops.exp",
"tensorflow.python.ops.gen_math_ops.batch_mat_mul_v2",
"tensorflow.python.ops.array_ops.rank",
"tensorflow.python.framework.ops.OpStats",
"numpy.arange",
"tensorflow.python.ops.gen_math_ops.arg_max",
"tensorflow.python.framework.sparse_tensor.SparseTensor",
"tensorflow.python.ops.gen_math_ops.logical_and",
"tensorflow.python.ops.gen_math_ops.div_no_nan",
"tensorflow.python.util.deprecation.deprecated",
"tensorflow.python.ops.gen_array_ops.zeros_like",
"tensorflow.python.ops.gen_math_ops.sub.__doc__.replace",
"tensorflow.python.ops.gen_math_ops.cumprod",
"tensorflow.python.ops.gen_math_ops.add",
"tensorflow.python.ops.gen_math_ops.sparse_segment_sum",
"numpy.prod",
"tensorflow.python.ops.gen_math_ops.sqrt",
"tensorflow.python.framework.tensor_shape.as_shape",
"tensorflow.python.ops.array_ops.expand_dims",
"tensorflow.python.ops.gen_math_ops.sparse_segment_sqrt_n_with_num_segments",
"tensorflow.python.framework.constant_op.constant"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"1.10",
"2.7",
"1.12",
"2.6",
"2.2",
"1.13",
"2.3",
"2.4",
"2.9",
"1.5",
"1.7",
"2.5",
"2.8",
"2.10"
]
}
] |
quoniammm/mine-pytorch
|
[
"09d2e6a4a11b472187012be035600a0c84f4cfa5",
"09d2e6a4a11b472187012be035600a0c84f4cfa5"
] |
[
"torch_basic/nmt(seq2seq&attention).py",
"awsome_implements/attention_is_all_you_need/m-tensorflow/train.py"
] |
[
"#%%\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nfrom torch.autograd import Variable\nfrom torch import optim\n\nimport re\nimport time\nimport jieba\nimport random\nimport math\nimport string\n\n#%%\n# # 1.数据处理部分\nUSE_CUDA = False\npath = 'data/cmn-eng/'\nSOS_token = 0\nEOS_token = 1\nMAX_LENGTH = 10\n\ndef isChinese(sen):\n zhPattern = re.compile(u'[\\u4e00-\\u9fa5]+')\n return zhPattern.search(sen)\n# 简化句子 便于处理\ndef normalize_string(s):\n s = re.sub(r\"[!!?.()()\"\"?。“”,,']\", r\" \", s)\n return s\n\nclass Lang:\n def __init__(self, name):\n self.name = name\n self.word2index = {}\n self.word2count = {}\n self.index2word = {0: \"SOS\", 1: \"EOS\"}\n self.n_words = 2 # Count SOS and EOS\n \n def index_words(self, sentence):\n sen_list = []\n if isChinese(sentence):\n sen_list = jieba.cut(sentence)\n else:\n sen_list = sentence.split(' ')\n \n for word in sen_list:\n self.index_word(word)\n\n def index_word(self, word):\n if word not in self.word2index:\n self.word2index[word] = self.n_words\n self.word2count[word] = 1\n self.index2word[self.n_words] = word\n self.n_words += 1\n else:\n self.word2count[word] += 1\n\n\ndef read_sen(path, lang1, lang2, reverse=False):\n with open(path + '{}-{}.txt'.format(lang1, lang2)) as f:\n lines = f.readlines()\n pairs = []\n for line in lines:\n line = line.strip()\n if reverse:\n line = line.split('\\t')\n line.reverse()\n line = \"\\t\".join(line)\n \n pair = [normalize_string(sen) for sen in line.split('\\t')]\n pairs.append(pair)\n \n if reverse:\n input_lang = Lang(lang2)\n output_lang = Lang(lang1) \n else:\n input_lang = Lang(lang1) \n output_lang = Lang(lang2) \n\n print(\"input_lang is {}\".format(input_lang.n_words))\n \n return input_lang, output_lang, pairs\n\ndef data_preprocess(path, lang1, lang2, reverse=False):\n print(\"Read lines......\")\n input_lang, output_lang, pairs = read_sen(path, lang1, lang2, reverse)\n print(\"Trimmed to {} sentence pairs\".format(len(pairs)))\n \n print(\"Indexing words......\")\n for pair in pairs:\n input_lang.index_words(pair[0])\n output_lang.index_words(pair[1])\n \n return input_lang, output_lang, pairs\n \ninput_lang, output_lang, pairs = data_preprocess(path, 'eng', 'cmn')\nfor i in range(5):\n print(random.choice(pairs))\n\n#%%\n# # 2.pytorch 搭建模型\n# ## 2.1.数据部分\ndef indexes_from_sentence(lang, sen):\n if isChinese(sen):\n sen = jieba.cut('')\n else:\n sen = sen.split(' ')\n \n return [lang.word2index[word] for word in sen]\n\ndef variable_from_sentence(lang, sen):\n ixs = indexes_from_sentence(lang, sen)\n ixs.append(EOS_token)\n var = Variable(torch.LongTensor(ixs).view(-1, 1))\n if USE_CUDA: \n var = var.cuda()\n \n return var\n \n\ndef variables_from_pair(pair):\n input_variable = variable_from_sentence(input_lang, pair[0])\n output_variable = variable_from_sentence(output_lang, pair[1])\n\n return (input_variable, output_variable)\n\n\n#%%\n# ## 2.2.模型搭建\n# 编码层\nclass EncoderRNN(nn.Module):\n def __init__(self, input_size, hidden_size, n_layers=1):\n super(EncoderRNN, self).__init__()\n\n self.input_size = input_size\n self.hidden_size = hidden_size\n self.n_layers = n_layers\n\n self.embedding = nn.Embedding(input_size, hidden_size)\n self.gru = nn.GRU(hidden_size, hidden_size, n_layers, batch_first=True)\n\n def forward(self, word_inputs, hidden):\n seq_len = len(word_inputs)\n embedded = self.embedding(word_inputs).view(1, seq_len, -1)\n output, hidden = self.gru(embedded, hidden)\n return output, hidden\n\n def init_hidden(self):\n hidden = Variable(torch.zeros(self.n_layers, 1, self.hidden_size))\n if USE_CUDA: hidden = hidden.cuda()\n return hidden\n# Attn 层\nclass Attn(nn.Module):\n def __init__(self, method, hidden_size):\n super(Attn, self).__init__()\n\n self.method = method\n self.hidden_size = hidden_size\n\n if self.method == 'general':\n self.attn = nn.Linear(self.hidden_size, self.hidden_size)\n\n # else self.method = 'concat':\n # self.attn = \n # self.other = \n\n def forward(self, hidden, encoder_outputs):\n seq_len = encoder_outputs.size()[1]\n\n attn_energies = Variable(torch.zeros(seq_len))\n if USE_CUDA:\n attn_energies.cuda()\n\n for i in range(seq_len):\n attn_energies[i] = self.score(hidden, encoder_outputs[0][i])\n\n return F.softmax(attn_energies).unsqueeze(0).unsqueeze(0)\n \n def score(self, hidden, encoder_output):\n if self.method == 'general':\n energy = self.attn(encoder_output)\n # 矩阵维度有些不理解\n energy = torch.dot(hidden.view(-1), energy.view(-1))\n return energy\n# 改进的解码层\nclass AttnDecoderRNN(nn.Module):\n def __init__(self, attn_model, hidden_size, output_size, n_layers=1, dropout_p=.1):\n super(AttnDecoderRNN, self).__init__()\n # 定义参数\n self.attn_model = attn_model\n self.hidden_size = hidden_size\n self.output_size = output_size\n self.n_layers = n_layers\n self.dropout_p = dropout_p\n # 定义层\n self.embedding = nn.Embedding(output_size, hidden_size)\n self.gru = nn.GRU(hidden_size * 2, hidden_size, n_layers, dropout=dropout_p, batch_first=True)\n # 为什么乘 2\n self.out = nn.Linear(hidden_size * 2, output_size)\n\n if attn_model != 'none':\n self.attn = Attn(attn_model, hidden_size)\n\n def forward(self, word_input, last_context, last_hidden, encoder_outputs):\n word_embedded = self.embedding(word_input).view(1, 1, -1)\n\n rnn_input = torch.cat((word_embedded, last_context.unsqueeze(0)), 2)\n rnn_output, hidden = self.gru(rnn_input, last_hidden)\n\n attn_weights = self.attn(rnn_output.squeeze(0), encoder_outputs)\n context = attn_weights.bmm(encoder_outputs)\n # print(\"context size is {}\".format(context.size()))\n rnn_output = rnn_output.squeeze(1)\n context = context.squeeze(0)\n # print(\"context size is {}\".format(context.size())) \n # 这块还有点不理解\n output = F.log_softmax(self.out(torch.cat((rnn_output, context), 1)))\n\n return output, context, hidden, attn_weights\n\n#%%\n# 对模型进行测试\nencoder_test = EncoderRNN(10, 10, 2)\ndecoder_test = AttnDecoderRNN('general', 10, 10, 2)\n\nprint(encoder_test)\nprint(decoder_test)\n\nencoder_hidden = encoder_test.init_hidden()\nword_input = Variable(torch.LongTensor([1, 9, 3, 4]))\n\nif USE_CUDA:\n encoder_test.cuda()\n word_input.cuda()\n\nencoder_outputs, encoder_hidden = encoder_test(word_input, encoder_hidden)\n\nword_inputs = Variable(torch.LongTensor([1, 2, 6, 6, 8]))\n# 不是很理解\ndecoder_attns = torch.zeros(1, 5, 4)\ndecoder_hidden = encoder_hidden \ndecoder_context = Variable(torch.zeros(1, decoder_test.hidden_size))\n\nif USE_CUDA:\n decoder_test.cuda()\n word_inputs = word_inputs.cuda()\n decoder_context = decoder_context.cuda()\n\nfor i in range(5):\n decoder_output, decoder_context, deocder_hidden, decoder_attn = decoder_test(word_inputs[i], decoder_context, decoder_hidden, encoder_outputs)\n decoder_attns[0, i] = decoder_attn.squeeze(0).cpu().data\n print(decoder_attns)\n\n \n#%%\n# 训练\nteacher_forcing_ratio = .5\nclip = .5\n\nattn_model = 'general'\nhidden_size = 500\nn_layers = 2\ndropout_p = .5\n\nencoder = EncoderRNN(input_lang.n_words, hidden_size, n_layers)\ndecoder = AttnDecoderRNN(attn_model, hidden_size, output_lang.n_words, n_layers, dropout_p=dropout_p)\n\nif USE_CUDA:\n encdoer.cuda()\n decoder.cuda()\n\nlearning_rate = .0001\nencoder_optimizer = optim.Adam(encoder.parameters(), lr=learning_rate)\ndecoder_optimizer = optim.Adam(decoder.parameters(), lr=learning_rate)\n\n#评判标准\ncriterion = nn.NLLLoss()\n\nn_epochs = 50000\nplot_every = 200\nprint_every = 1000\n\nstart = time.time()\nplot_losses = []\nprint_loss_total = 0\nplot_loss_total = 0\n\ndef as_minutes(s):\n m = math.floor(s / 60)\n s -= m * 60\n return '%dm %ds' % (m, s)\n\ndef time_since(since, percent):\n now = time.time()\n s = now - since\n es = s / (percent)\n rs = es - s\n return '%s (- %s)' % (as_minutes(s), as_minutes(rs))\n\n\ndef train(input_variable, target_variable, encoder, decoder, encoder_optimizer, decoder_optimizer, criterion):\n encoder_optimizer.zero_grad()\n decoder_optimizer.zero_grad()\n loss = 0\n\n input_length = input_variable.size()[0]\n target_length = target_variable.size()[0]\n\n encoder_hidden = encoder.init_hidden()\n encoder_outputs, encoder_hidden = encoder(input_variable, encoder_hidden)\n \n decoder_input = Variable(torch.LongTensor([[SOS_token]]))\n decoder_context = Variable(torch.zeros(1, decoder.hidden_size))\n decoder_hidden = encoder_hidden\n\n if USE_CUDA:\n decoder_input = decoder_input.cuda()\n decoder_context = decoder_input.cuda()\n\n use_teacher_forcing = random.random() < teacher_forcing_ratio\n\n if use_teacher_forcing:\n \n # Teacher forcing: Use the ground-truth target as the next input\n for di in range(target_length):\n decoder_output, decoder_context, decoder_hidden, decoder_attention = decoder(decoder_input, decoder_context, decoder_hidden, encoder_outputs)\n loss += criterion(decoder_output, target_variable[di])\n decoder_input = target_variable[di] # Next target is next input\n\n else:\n # Without teacher forcing: use network's own prediction as the next input\n for di in range(target_length):\n decoder_output, decoder_context, decoder_hidden, decoder_attention = decoder(decoder_input, decoder_context, decoder_hidden, encoder_outputs)\n loss += criterion(decoder_output, target_variable[di])\n \n # Get most likely word index (highest value) from output\n topv, topi = decoder_output.data.topk(1)\n ni = topi[0][0]\n \n decoder_input = Variable(torch.LongTensor([[ni]])) # Chosen word is next input\n if USE_CUDA: decoder_input = decoder_input.cuda()\n\n # Stop at end of sentence (not necessary when using known targets)\n if ni == EOS_token: break\n \n loss.backward()\n torch.nn.utils.clip_grad_norm(encoder.parameters(), clip)\n torch.nn.utils.clip_grad_norm(decoder.parameters(), clip) \n encoder_optimizer.step()\n decoder_optimizer.step()\n\n return loss.data[0]/target_length\n\n\n# Begin!\nfor epoch in range(1, n_epochs + 1):\n \n # Get training data for this cycle\n training_pair = variables_from_pair(random.choice(pairs))\n input_variable = training_pair[0]\n target_variable = training_pair[1]\n\n # Run the train function\n loss = train(input_variable, target_variable, encoder, decoder, encoder_optimizer, decoder_optimizer, criterion)\n\n # Keep track of loss\n print_loss_total += loss\n plot_loss_total += loss\n\n if epoch == 0: continue\n\n if epoch % print_every == 0:\n print_loss_avg = print_loss_total / print_every\n print_loss_total = 0\n print_summary = '%s (%d %d%%) %.4f' % (time_since(start, epoch / n_epochs), epoch, epoch / n_epochs * 100, print_loss_avg)\n print(print_summary)\n\n if epoch % plot_every == 0:\n plot_loss_avg = plot_loss_total / plot_every\n plot_losses.append(plot_loss_avg)\n plot_loss_total = 0",
"import tensorflow as tf\n\nfrom hyperparams import Hyperparams as hp\nfrom data_load import get_batch_data, load_de_vocab, load_en_vocab\nfrom modules import *\nimport os, codecs\nfrom tqdm import tqdm\n\nclass Graph():\n def __init__(self, is_training=True):\n pass\n\nif __name__ == '__main__': \n # Load vocabulary \n de2idx, idx2de = load_de_vocab()\n en2idx, idx2en = load_en_vocab()\n \n # Construct graph\n g = Graph(\"train\"); print(\"Graph loaded\")\n \n # Start session\n sv = tf.train.Supervisor(graph=g.graph, \n logdir=hp.logdir,\n save_model_secs=0)\n with sv.managed_session() as sess:\n for epoch in range(1, hp.num_epochs+1): \n if sv.should_stop(): break\n for step in tqdm(range(g.num_batch), total=g.num_batch, ncols=70, leave=False, unit='b'):\n sess.run(g.train_op)\n \n gs = sess.run(g.global_step) \n sv.saver.save(sess, hp.logdir + '/model_epoch_%02d_gs_%d' % (epoch, gs))\n \n print(\"Done\")\n "
] |
[
[
"torch.LongTensor",
"torch.nn.NLLLoss",
"torch.nn.functional.softmax",
"torch.zeros",
"torch.cat",
"torch.nn.GRU",
"torch.nn.Embedding",
"torch.nn.Linear"
],
[
"tensorflow.train.Supervisor"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"1.10"
]
}
] |
tschoi6712/pythonDataAnalysis2nd
|
[
"63e366d4dee52f7e4df6cf4d988a85d6de5b00e4",
"63e366d4dee52f7e4df6cf4d988a85d6de5b00e4",
"63e366d4dee52f7e4df6cf4d988a85d6de5b00e4",
"63e366d4dee52f7e4df6cf4d988a85d6de5b00e4"
] |
[
"Chapter01/ch1-3.gettingStarted.py",
"Chapter09/chh9-2.nltk.py",
"Chapter11/f2py/fort_demo.py",
"Chapter09/chh9-1.nltk.py"
] |
[
"\"\"\"\nMatplotlib plots\n\"\"\"\n\nfrom sklearn.datasets import load_iris\nfrom sklearn.datasets import load_boston\nfrom matplotlib import pyplot as plt\n\n\niris = load_iris()\nprint(iris.DESCR)\ndata = iris.data\nplt.plot(data[:, 0], data[:, 1], \".\")\nplt.show()\n\n\nboston = load_boston()\nprint(boston.DESCR)\ndata = boston.data\nplt.plot(data[:, 2], data[:, 4], \"+\")\nplt.show()\n\n\n",
"\"\"\"\n문자 데이터와 소셜미디어 분석하기(2)\n\"\"\"\n\n\n## 5. Sentiment Analysis(감성분석) - 영화 리뷰 분류(긍정적과 부정적)\nimport random\nfrom nltk.corpus import movie_reviews\nfrom nltk.corpus import stopwords\nfrom nltk import FreqDist\nfrom nltk import NaiveBayesClassifier\nfrom nltk.classify import accuracy\nimport string\n\n\n# 영화 리뷰 문서 라벨링\nlabeled_docs = [(list(movie_reviews.words(fid)), cat)\n for cat in movie_reviews.categories()\n for fid in movie_reviews.fileids(cat)]\nrandom.seed(42)\nrandom.shuffle(labeled_docs)\n\nreview_words = movie_reviews.words()\nprint(\"# Review Words\", len(review_words))\n\nsw = set(stopwords.words('english'))\npunctuation = set(string.punctuation)\n\ndef isStopWord(word):\n return word in sw or word in punctuation\n\nfiltered = [w.lower() for w in review_words if not isStopWord(w.lower())]\nprint(\"# After filter\", len(filtered))\n\n# 가장 빈도수가 높은 상위 5%의 단어\nwords = FreqDist(filtered)\nN = int(.05 * len(words.keys()))\nword_features = list(words.keys())[:N]\n\n# 단어 갯수를 측정 기준으로 삼는 함수\ndef doc_features(doc):\n doc_words = FreqDist(w for w in doc if not isStopWord(w))\n features = {}\n for word in word_features:\n features['count (%s)' % word] = (doc_words.get(word, 0))\n return features\n\nfeaturesets = [(doc_features(d), c) for (d,c) in labeled_docs]\ntrain_set, test_set = featuresets[200:], featuresets[:200]\nclassifier = NaiveBayesClassifier.train(train_set)\nprint(\"Accuracy\", accuracy(classifier, test_set))\n\nprint(classifier.show_most_informative_features())\n\n\n\n## 6. Creating Word Clouds(워드 클라우드 만들기)\nfrom nltk.corpus import movie_reviews\nfrom nltk.corpus import stopwords\nfrom nltk.corpus import names\nfrom nltk import FreqDist\nfrom sklearn.feature_extraction.text import TfidfVectorizer\nimport itertools\nimport pandas as pd\nimport numpy as np\nimport string\n\nsw = set(stopwords.words('english'))\npunctuation = set(string.punctuation)\nall_names = set([name.lower() for name in names.words()])\n\ndef isStopWord(word):\n return (word in sw or word in punctuation) or not word.isalpha() or word in all_names\n\nreview_words = movie_reviews.words()\nfiltered = [w.lower() for w in review_words if not isStopWord(w.lower())]\n\nwords = FreqDist(filtered)\n\ntexts = []\n\nfor fid in movie_reviews.fileids():\n texts.append(\" \".join([w.lower() for w in movie_reviews.words(fid)\n if not isStopWord(w.lower()) and words[w.lower()] > 1]))\n\nvectorizer = TfidfVectorizer(stop_words='english')\nmatrix = vectorizer.fit_transform(texts)\nsums = np.array(matrix.sum(axis=0)).ravel()\n\nranks = []\n\nfor word, val in zip(vectorizer.get_feature_names(), sums):\n ranks.append((word, val))\n\ndf = pd.DataFrame(ranks, columns=[\"term\", \"tfidf\"])\ndf = df.sort_values(['tfidf'])\nprint(df.head())\n\nN = int(.01 * len(df))\ndf = df.tail(N)\n\nfor term, tfidf in zip(df[\"term\"].values, df[\"tfidf\"].values):\n print(term, \":\", tfidf)\n\n\n\n## 7. Social Network Analysis(소셜미디어 분석)\n#pip install networkx\n\nimport networkx as nx\nimport matplotlib.pyplot as plt\nimport numpy as np\n\n\nprint([s for s in dir(nx) if s.endswith('graph')])\n\nG = nx.davis_southern_women_graph()\nplt.hist(list(dict(nx.degree(G)).values()))\nplt.show()\n\nplt.figure(figsize=(8,8))\npos = nx.spring_layout(G)\nnx.draw(G, node_size=10)\nnx.draw_networkx_labels(G, pos)\nplt.show()\n\n\n\n",
"import fort_sum\nimport numpy as np\n\nrain = np.load('ch10.rain.npy')\nfort_sum.sumarray(rain, len(rain))\nrain = .1 * rain\nrain[rain < 0] = .025\nprint(\"Numpy\", rain.sum())\n",
"\"\"\"\n문자 데이터와 소셜미디어 분석하기(1)\n\"\"\"\n# pip install nltk scikit-learn\n# >python\n# >>>import nltk\n# >>>nltk.download()\n\n\n## 1. Filtering out stopwords, names, and numbers(불용어, 고유명사, 숫자 걸러내기)\nimport nltk\n\n# 영어 말뭉치를 불러오기\nsw = set(nltk.corpus.stopwords.words('english'))\nprint(\"Stop words:\", list(sw)[:7])\n\n# 구텐베르크 말뭉치 불러오기\ngb = nltk.corpus.gutenberg\nprint(\"Gutenberg files:\\n\", gb.fileids()[-5:])\n\n# 파일에서 몇 개의 문장을 추출\ntext_sent = gb.sents(\"milton-paradise.txt\")[:2]\nprint(\"Unfiltered:\", text_sent)\n\n# 추출된 문장에서 불용어를 걸러내기\nfor sent in text_sent:\n filtered = [w for w in sent if w.lower() not in sw]\n print(\"Filtered:\\n\", filtered)\n tagged = nltk.pos_tag(filtered)\n print(\"Tagged:\\n\", tagged)\n\n words = []\n for word in tagged:\n if word[1] != 'NNP' and word[1] != 'CD':\n words.append(word[0])\n\n print(\"Words:\\n\", words)\n\n\n\n## 2. Bag of words model(단어 주머니 모델)\n\nimport sklearn as sk\n\n# 말뭉치에서 두개의 문서를 불러오기\nhamlet = gb.raw(\"shakespeare-hamlet.txt\")\nmacbeth = gb.raw(\"shakespeare-macbeth.txt\")\n\n# 불용어를 제외하고 피처벡터를 생성\ncv = sk.feature_extraction.text.CountVectorizer(stop_words='english')\nprint(\"Feature vector:\\n\", cv.fit_transform([hamlet, macbeth]).toarray())\n\n# 두 문서 사이에서 피쳐(유일한 단어)를 출력\nprint(\"Features:\\n\", cv.get_feature_names()[:5])\n\n\n\n## 3. Analyzing word frequencies(단어 빈도수 분석)\nimport nltk\nimport string\n\ngb = nltk.corpus.gutenberg\nwords = gb.words(\"shakespeare-caesar.txt\")\n\nsw = set(nltk.corpus.stopwords.words('english'))\npunctuation = set(string.punctuation)\nfiltered = [w.lower() for w in words if w.lower() not in sw and w.lower() not in punctuation]\n\nfd = nltk.FreqDist(filtered)\nprint(\"Words\", list(fd.keys())[:5])\nprint(\"Counts\", list(fd.values())[:5])\nprint(\"Max\", fd.max())\nprint(\"Count\", fd['d'])\n\nfd = nltk.FreqDist(nltk.bigrams(filtered))\nprint(\"Bigrams\", list(fd.keys())[:5])\nprint(\"Counts\", list(fd.values())[:5])\nprint(\"Bigram Max\", fd.max())\nprint(\"Bigram count\", fd[('let', 'vs')])\n\n\n\n## 4. Naive Bayesian(나이브 베이즈 분류기)\nimport nltk\nimport string\nimport random\n\nsw = set(nltk.corpus.stopwords.words('english'))\npunctuation = set(string.punctuation)\n\n\ndef word_features(word):\n return {'len': len(word)}\n\n\ndef isStopword(word):\n return word in sw or word in punctuation\n\n\ngb = nltk.corpus.gutenberg\nwords = gb.words(\"shakespeare-caesar.txt\")\n\n# 단어에 라벨 붙이기\nlabeled_words = ([(word.lower(), isStopword(word.lower())) for word in words])\nrandom.seed(42)\nrandom.shuffle(labeled_words)\nprint(labeled_words[:5])\n\n# 각 단어별 길이를 측정\nfeaturesets = [(word_features(n), word) for (n, word) in labeled_words]\n\n# 데이터를 학습시키기\ncutoff = int(.9 * len(featuresets))\ntrain_set, test_set = featuresets[:cutoff], featuresets[cutoff:]\nclassifier = nltk.NaiveBayesClassifier.train(train_set)\n\n# 데이터의 단어가 분류되었는지 학습\nprint(\"'behold' class\", classifier.classify(word_features('behold')))\nprint(\"'the' class\", classifier.classify(word_features('the')))\n\n# 분류 정확도\nprint(\"Accuracy\", nltk.classify.accuracy(classifier, test_set))\nprint(classifier.show_most_informative_features(5))\n\n\n\n\n\n\n\n"
] |
[
[
"matplotlib.pyplot.plot",
"matplotlib.pyplot.show",
"sklearn.datasets.load_iris",
"sklearn.datasets.load_boston"
],
[
"matplotlib.pyplot.show",
"sklearn.feature_extraction.text.TfidfVectorizer",
"pandas.DataFrame",
"matplotlib.pyplot.figure"
],
[
"numpy.load"
],
[
"sklearn.feature_extraction.text.CountVectorizer"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [
"0.23",
"0.21",
"2.0",
"1.4",
"0.19",
"1.1",
"1.5",
"1.2",
"0.24",
"0.20",
"1.0",
"0.25",
"1.3"
],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
TonySoloProjects/network_log_visualization
|
[
"bcf1b648009abcd15a3d71a7057610a583ee0dde"
] |
[
"network_log_plotter_v02.py"
] |
[
"\"\"\"\nNetworkLogPlotter visualizes NetworkLogReader objects to find relationships between\nserver send and receive errors on a network.\n\nCreated by: Tony Held [email protected]\nCreated on: 2020/09/10\nCopyright © 2020 Tony Held. All rights reserved.\n\"\"\"\n\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport plotly.graph_objects as go\nimport networkx as nx\nimport ipywidgets as widgets\n\nimport pprint\npp = pprint.PrettyPrinter(indent=4) # pretty printer\n\nfrom network_log_reader_v02 import NetworkLogReader\n\n\nclass NetworkLogPlotter:\n \"\"\"Class to plot/visualize NetworkLogReader objects.\n\n This can be converted into a module rather than a class if all methods stay static.\"\"\"\n\n @staticmethod\n def plot_cumulative_errors(sender_fails, receiver_fails):\n \"\"\"Visualize the network errors as cumulative distribution plot\n to gain intuition on the number of servers participating in failures.\n\n Parameters\n ----------\n sender_fails : array-like\n # of sending fails per server\n receiver_fails : array-like\n # of receiving fails per server\n\n Notes\n -------\n 1. It is assumed that the list is sorted in descending order.\n \"\"\"\n # Create cumulative sum of fails\n sender_fails_total = np.sum(sender_fails)\n sender_fails_cumulative = np.cumsum(sender_fails)\n sender_fails_percent = sender_fails_cumulative / sender_fails_total * 100\n\n receiver_fails_total = np.sum(receiver_fails)\n receiver_fails_cumulative = np.cumsum(receiver_fails)\n receiver_fails_percent = receiver_fails_cumulative / receiver_fails_total * 100\n\n # Plot the cumulative fail data via matplotlib\n fig1, ax1 = plt.subplots(1)\n ax1.plot(sender_fails_percent, '-', label=f\"Sender Fails\")\n ax1.plot(receiver_fails_percent, '-', label=f\"Receiver Fails\")\n ax1.set_xlabel('Number of Servers')\n ax1.set_ylabel('Percent of Failures')\n ax1.legend()\n ax1.set_title(f\"# of Server's Involved in {receiver_fails_total} Total Fails\")\n plt.show()\n\n @staticmethod\n def plot_network1(nx_graph, plot_data):\n \"\"\"Visualize network with built in networkx and matplotlib routines.\n\n This can run very slowly! Use customized plot routines (plot_network3) instead.\n\n Parameters\n ----------\n nx_graph :\n networkx.graph object\n plot_data : dict\n plot_data object created by NetworkLogReader\n \"\"\"\n plot_types = [nx.draw, nx.draw_networkx, nx.draw_kamada_kawai, nx.draw_spring]\n\n # plot using the networkx built-in drawing routines\n fig1, ax1 = plt.subplots(1)\n nx.draw(nx_graph, with_labels=False)\n\n x = plot_data['Node Coordinates'][0]\n y = plot_data['Node Coordinates'][1]\n\n # plot using matplotlib using coordinates from layout data\n fig2, ax2 = plt.subplots(1)\n # x = self.node_coordinates[:, 0]\n # y = self.node_coordinates[:, 1]\n ax2.plot(x, y, 'bx')\n\n plt.show()\n\n @staticmethod\n def plot_network3(nlr, plot_node, edge_type):\n \"\"\"Visualize NetworkLogReader object with customized plotly routines.\n\n Parameters\n ----------\n nlr : NetworkLogReader\n Plot based on plot_data attribute.\n plot_node: str\n The node of interest (the one selected in the figure).\n edge_type : ['Send', 'Receive', 'Send+Receive']\n Type of connection to analyse.\n\n Returns\n -------\n fig : plotly.graph_objs._figurewidget.FigureWidget\n Figure widget capable of responding to click events\n\n Notes\n -------\n \"\"\"\n nlp = NetworkLogPlotter\n\n # Step 1. Gather plot input based on nlr, node, edge_type\n # -------------------------------------------------------------\n # Lines that represent the edges between nodes\n shapes = nlr.plot_data[plot_node][edge_type]['shape_data']\n\n # Node locations\n x_coord = nlr.plot_data['Node Coordinates'][0]\n y_coord = nlr.plot_data['Node Coordinates'][1]\n\n # Node color and hover text\n node_text = nlr.plot_data[plot_node][edge_type]['node_text']\n node_color = nlr.plot_data[plot_node][edge_type]['node_color']\n\n # Step 2. Create trace and figure with edge trace in the layout\n # -------------------------------------------------------------\n node_trace = nlp.create_scatter(edge_type, node_color, node_text, x_coord, y_coord)\n fig = nlp.create_figure(plot_node, node_trace, shapes);\n fig.write_html(\"graphics/network_errors_v02.html\")\n return fig\n\n @staticmethod\n def create_scatter(edge_type, node_color, node_text, x_coord, y_coord):\n \"\"\"Create plotly scatterplot.\n\n Parameters\n ----------\n edge_type : ['Send', 'Receive', 'Send+Receive']\n Type of connection to analyse.\n node_color : list of numeric\n A value for each node is created with the same numeric type as the weight in the original\n error log file (likely integer) with length of self.plot_data['Node Names'].\n node_text : list of str\n Text to include as hover text in plotly routines that includes a caption for each node\n with a length of self.plot_data['Node Names'].\n x_coord : [float]\n x coordinates of nodes\n y_coord : [float]\n y coordinates of nodes\n\n Returns\n -------\n scatter : plotly.graph_objs._scatter.Scatter\n plotly scatter plot\n \"\"\"\n scatter = go.Scatter(\n x=x_coord, y=y_coord,\n mode='markers',\n hoverinfo='text',\n text=node_text,\n marker=dict(\n showscale=True,\n # colorscale options\n # 'Greys' | 'YlGnBu' | 'Greens' | 'YlOrRd' | 'Bluered' | 'RdBu' |\n # 'Reds' | 'Blues' | 'Picnic' | 'Rainbow' | 'Portland' | 'Jet' |\n # 'Hot' | 'Blackbody' | 'Earth' | 'Electric' | 'Viridis' |\n colorscale='Reds',\n # reversescale=True,\n color=node_color,\n size=10,\n colorbar=dict(\n thickness=15,\n title=f'Num of Failures<br>{edge_type}',\n xanchor='left',\n titleside='right'\n ),\n line_width=2))\n return scatter\n\n @staticmethod\n def create_figure(plot_node, node_trace, my_shapes):\n \"\"\"Create a plotly figure based on node trace and edge shape\n\n Parameters\n ----------\n plot_node: str\n The node of interest (the one selected in the figure).\n node_trace : plotly.graph_objs._scatter.Scatter\n Scatter plot of node location\n my_shapes : [dict]\n Shape objects suitable for plotly layout inclusion.\n\n Returns\n -------\n fig : plotly.graph_objs._figurewidget.FigureWidget\n Figure widget capable of responding to click events\n \"\"\"\n fig = go.FigureWidget(data=[node_trace],\n layout=go.Layout(\n title=f'Interactive Graph of Network Failures<br>Selected Node: {plot_node}',\n titlefont_size=16,\n showlegend=False,\n hovermode='closest',\n margin=dict(b=20, l=5, r=5, t=40),\n annotations=[dict(\n text=\"<a href='https://www.youtube.com/watch?v=dQw4w9WgXcQ'> Click me for more info</a>\",\n showarrow=False,\n xref=\"paper\", yref=\"paper\",\n x=0.005, y=-0.002)],\n xaxis=dict(showgrid=False, zeroline=False, showticklabels=False),\n yaxis=dict(showgrid=False, zeroline=False, showticklabels=False),\n shapes=my_shapes)\n )\n return fig\n\n @staticmethod\n def update_figure(fig, nlr, plot_node, edge_type):\n \"\"\"Update network figure based on change in plot_node and/or edge_type.\n\n Parameters\n ----------\n fig : plotly.graph_objs._figurewidget.FigureWidget\n Original figure created by create_figure(...)\n nlr : NetworkLogReader\n Plot based on plot_data attribute.\n plot_node: str\n The node of interest (the one selected in the figure).\n edge_type : ['Send', 'Receive', 'Send+Receive']\n Type of connection to analyse.\n \"\"\"\n\n # Lines that represent the edges between nodes\n my_shapes = nlr.plot_data[plot_node][edge_type]['shape_data']\n\n # Node color and hover text\n node_text = nlr.plot_data[plot_node][edge_type]['node_text']\n node_color = nlr.plot_data[plot_node][edge_type]['node_color']\n\n # update scatter trace\n scatter = fig.data[0]\n scatter.text = node_text\n scatter.marker.color = node_color\n scatter.marker.colorbar.title = f'Num of Failures<br>{edge_type}'\n\n # update figure layout\n fig.layout.title = f'Interactive Graph of Network Failures<br>Selected Node: {plot_node}'\n fig.layout.shapes = my_shapes\n\n @staticmethod\n def make_widgets(nlr, fig):\n \"\"\"\n Create widgets for interactive figure created with plot_network3.\n\n Parameters\n ----------\n nlr : NetworkLogReader\n reader associated with error log file\n\n fig : plotly.graph_objs._figurewidget.FigureWidget\n Figure to receive interactive widgets\n\n Returns\n --------\n \"\"\"\n # Create interactive widgets/callback to create interactive network figure\n\n edge_types = ['Send', 'Receive', 'Send+Receive']\n node_names = list(nlr.unique_nodes)\n num_nodes = len(node_names)\n\n # Figure widgets, create them with a dummy state and then\n # change the value to invoke the event handler before first use\n slider = widgets.IntSlider(\n min=0,\n max=num_nodes - 1,\n value=1,\n description='Node #: ')\n\n slider_label = widgets.Label(value=\"Node Name: \")\n\n node_hb = widgets.HBox([slider, slider_label])\n\n drop = widgets.Dropdown(\n options=edge_types,\n value=edge_types[1],\n description='Errors: ',\n disabled=False, )\n\n drop_label = widgets.Label(value=\"Error Type: \")\n\n drop_hb = widgets.HBox([drop, drop_label])\n\n # widget handlers\n def on_slider_value_change(change):\n new_node_number = change['new']\n new_node_name = node_names[new_node_number]\n slider_label.value = f'Node Name: {new_node_name}'\n NetworkLogPlotter.update_figure(fig, nlr, plot_node=new_node_name, edge_type=drop.value)\n\n def on_drop_value_change(change):\n new_edge_type = change['new']\n drop_label.value = f'Error Type: {new_edge_type}'\n node_name = node_names[slider.value]\n NetworkLogPlotter.update_figure(fig, nlr, plot_node=node_name, edge_type=new_edge_type)\n\n slider.observe(on_slider_value_change, names='value')\n drop.observe(on_drop_value_change, names='value')\n\n # Initialize (changing from the instantiated value will invoke the handlers)\n slider.value = 0\n drop.value = edge_types[2]\n\n # Callback for clicking on the scatterplot\n # Changes the slider widget which will update the figure and synch with the slider\n def update_point(trace, points, selector):\n node_id = points.point_inds[0]\n slider.value = node_id\n\n scatter = fig.data[0]\n scatter.on_click(update_point)\n\n return node_hb, drop\n"
] |
[
[
"matplotlib.pyplot.subplots",
"numpy.cumsum",
"matplotlib.pyplot.show",
"numpy.sum"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
conradbm/streamml2
|
[
"f2c5cec0c397b348b5faf5ea965ec8dc21518d5f"
] |
[
"streamml2/tests/baseline/test_model_selection_flow_classification.py"
] |
[
"\"\"\"\n#\n#\n#\n#\n# Model Selection Example (Classification)\n# test_model_selection_flow.py\n#\n#\n#\n#\n#\n#\n\nModel Selection Params:\n def flow(self, \n models_to_flow=[], \n params=None, \n test_size=0.2, \n nfolds=3, \n nrepeats=3,\n pos_split=1,\n n_jobs=1, \n metrics=[], \n verbose=False, \n regressors=True,\n modelSelection=False,\n cut=None):\n\n\"\"\"\n\nimport pandas as pd\nimport numpy as np\nfrom streamml2_test.streams import ModelSelectionStream\nfrom streamml2_test.streams import TransformationStream\nfrom streamml2_test.streamml2.utils.helpers import *\nfrom sklearn.datasets import load_iris\n\niris=load_iris()\nX=pd.DataFrame(iris['data'], columns=iris['feature_names'])\ny=pd.DataFrame(iris['target'], columns=['target'])\nX2=TransformationStream(X).flow([\"scale\",\"normalize\"])\nmodels=get_model_selection_classifiers()\nparams=get_model_selection_classifiers_params()\nprint(models)\nresults_dict = ModelSelectionStream(X2,y).flow(models,\n params={},\n regressors=False,\n verbose=True)\n\nfor k in results_dict.keys():\n print(k)\n print(results_dict[k])\n"
] |
[
[
"sklearn.datasets.load_iris",
"pandas.DataFrame"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [
"0.23",
"0.21",
"2.0",
"1.4",
"0.19",
"1.1",
"1.5",
"1.2",
"0.24",
"0.20",
"1.0",
"0.25",
"1.3"
],
"scipy": [],
"tensorflow": []
}
] |
zhengknight/tensorpack
|
[
"726747313fb2f189dd195d32087897b16a23be0a",
"726747313fb2f189dd195d32087897b16a23be0a",
"726747313fb2f189dd195d32087897b16a23be0a",
"726747313fb2f189dd195d32087897b16a23be0a",
"726747313fb2f189dd195d32087897b16a23be0a",
"726747313fb2f189dd195d32087897b16a23be0a"
] |
[
"tensorpack/tfutils/summary.py",
"tensorpack/callbacks/group.py",
"examples/ResNet/resnet_model.py",
"tensorpack/tfutils/optimizer.py",
"tensorpack/callbacks/inference_runner.py",
"examples/HED/hed.py"
] |
[
"# -*- coding: utf-8 -*-\n# File: summary.py\n\n\nimport six\nimport tensorflow as tf\nimport re\nfrom six.moves import range\nfrom contextlib import contextmanager\n\nfrom tensorflow.python.training import moving_averages\n\nfrom ..utils import logger\nfrom ..utils.argtools import graph_memoized\nfrom ..utils.naming import MOVING_SUMMARY_OPS_KEY\nfrom .tower import get_current_tower_context\nfrom .symbolic_functions import rms\nfrom .scope_utils import cached_name_scope\n\n__all__ = ['add_tensor_summary', 'add_param_summary',\n 'add_activation_summary', 'add_moving_summary',\n ]\n\n\n# some scope stuff to use internally...\n@graph_memoized\ndef _get_cached_vs(name):\n with tf.variable_scope(name) as scope:\n return scope\n\n\n@contextmanager\ndef _enter_vs_reuse_ns(name):\n vs = _get_cached_vs(name)\n # XXX Not good to enter the cached vs directly, because this will clean-up custom getter\n # with tf.variable_scope(name, reuse=tf.AUTO_REUSE): # available in 1.4 only\n with tf.variable_scope(vs):\n with tf.name_scope(vs.original_name_scope):\n yield vs\n\n\ndef create_scalar_summary(name, v):\n \"\"\"\n Args:\n name (str):\n v (float): scalar value\n Returns:\n tf.Summary: a tf.Summary object with name and simple scalar value v.\n \"\"\"\n assert isinstance(name, six.string_types), type(name)\n v = float(v)\n s = tf.Summary()\n s.value.add(tag=name, simple_value=v)\n return s\n\n\ndef create_image_summary(name, val):\n \"\"\"\n Args:\n name(str):\n val(np.ndarray): 4D tensor of NHWC. assume RGB if C==3.\n Can be either float or uint8. Range has to be [0,255].\n\n Returns:\n tf.Summary:\n \"\"\"\n assert isinstance(name, six.string_types), type(name)\n n, h, w, c = val.shape\n val = val.astype('uint8')\n s = tf.Summary()\n for k in range(n):\n arr = val[k]\n # CV2 will only write correctly in BGR chanel order\n if c == 3:\n arr = cv2.cvtColor(arr, cv2.COLOR_RGB2BGR)\n elif c == 4:\n arr = cv2.cvtColor(arr, cv2.COLOR_RGBA2BGRA)\n tag = name if n == 1 else '{}/{}'.format(name, k)\n\n retval, img_str = cv2.imencode('.png', arr)\n if not retval:\n # Encoding has failed.\n continue\n img_str = img_str.tostring()\n\n img = tf.Summary.Image()\n img.height = h\n img.width = w\n # 1 - grayscale 3 - RGB 4 - RGBA\n img.colorspace = c\n img.encoded_image_string = img_str\n s.value.add(tag=tag, image=img)\n return s\n\n\ndef add_tensor_summary(x, types, name=None, collections=None,\n main_tower_only=True):\n \"\"\"\n Summarize a tensor by different methods.\n\n Args:\n x (tf.Tensor): a tensor to summarize\n types (list[str]): summary types, can be scalar/histogram/sparsity/mean/rms\n name (str): summary name. Defaults to be the op name.\n collections (list[str]): collections of the summary ops.\n main_tower_only (bool): Only run under main training tower. If\n set to True, calling this function under other TowerContext\n has no effect.\n\n Example:\n\n .. code-block:: python\n\n with tf.name_scope('mysummaries'): # to not mess up tensorboard\n add_tensor_summary(\n tensor, ['histogram', 'rms', 'sparsity'], name='mytensor')\n \"\"\"\n types = set(types)\n if name is None:\n name = x.op.name\n ctx = get_current_tower_context()\n if ctx is not None and not ctx.is_main_training_tower:\n return\n\n SUMMARY_TYPES_DIC = {\n 'scalar': lambda: tf.summary.scalar(name + '-summary', x, collections=collections),\n 'histogram': lambda: tf.summary.histogram(name + '-histogram', x, collections=collections),\n 'sparsity': lambda: tf.summary.scalar(\n name + '-sparsity', tf.nn.zero_fraction(x),\n collections=collections),\n 'mean': lambda: tf.summary.scalar(\n name + '-mean', tf.reduce_mean(x),\n collections=collections),\n 'rms': lambda: tf.summary.scalar(\n name + '-rms', rms(x), collections=collections)\n }\n for typ in types:\n SUMMARY_TYPES_DIC[typ]()\n\n\ndef add_activation_summary(x, types=None, name=None, collections=None):\n \"\"\"\n Call :func:`add_tensor_summary` under a reused 'activation-summary' name scope.\n This function is a no-op if not calling from main training tower.\n\n Args:\n x (tf.Tensor): the tensor to summary.\n types (list[str]): summary types, defaults to ``['sparsity', 'rms', 'histogram']``.\n name (str): if is None, use x.name.\n collections (list[str]): collections of the summary ops.\n \"\"\"\n ndim = x.get_shape().ndims\n if ndim < 2:\n logger.warn(\"Cannot summarize scalar activation {}\".format(x.name))\n return\n if types is None:\n types = ['sparsity', 'rms', 'histogram']\n with cached_name_scope('activation-summary'):\n add_tensor_summary(x, types, name=name, collections=collections)\n\n\ndef add_param_summary(*summary_lists, **kwargs):\n \"\"\"\n Add summary ops for all trainable variables matching the regex, under a\n reused 'param-summary' name scope.\n This function is a no-op if not calling from main training tower.\n\n Args:\n summary_lists (list): each is (regex, [list of summary type]).\n Summary type is defined in :func:`add_tensor_summary`.\n collections (list[str]): collections of the summary ops.\n\n Example:\n\n .. code-block:: python\n\n add_param_summary(\n ('.*/W', ['histogram', 'rms']),\n ('.*/gamma', ['scalar']),\n )\n \"\"\"\n collections = kwargs.pop('collections', None)\n assert len(kwargs) == 0, \"Unknown kwargs: \" + str(kwargs)\n ctx = get_current_tower_context()\n if ctx is not None and not ctx.is_main_training_tower:\n return\n\n params = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES)\n with cached_name_scope('param-summary'):\n for p in params:\n name = p.op.name\n for rgx, actions in summary_lists:\n if not rgx.endswith('$'):\n rgx = rgx + '$'\n if re.match(rgx, name):\n add_tensor_summary(p, actions, name=name, collections=collections)\n\n\ndef add_moving_summary(*args, **kwargs):\n \"\"\"\n Summarize the moving average for scalar tensors.\n This function is a no-op if not calling from main training tower.\n\n Args:\n args: scalar tensors to summarize\n decay (float): the decay rate. Defaults to 0.95.\n collection (str or None): the name of the collection to add EMA-maintaining ops.\n The default will work together with the default\n :class:`MovingAverageSummary` callback.\n summary_collections ([str]): the names of collections to add the\n summary op. Default is TF's default (`tf.GraphKeys.SUMMARIES`).\n\n Returns:\n [tf.Tensor]: list of tensors returned by assign_moving_average,\n which can be used to maintain the EMA.\n \"\"\"\n decay = kwargs.pop('decay', 0.95)\n coll = kwargs.pop('collection', MOVING_SUMMARY_OPS_KEY)\n summ_coll = kwargs.pop('summary_collections', None)\n assert len(kwargs) == 0, \"Unknown arguments: \" + str(kwargs)\n\n ctx = get_current_tower_context()\n # allow ctx to be none\n if ctx is not None and not ctx.is_main_training_tower:\n return []\n if tf.get_variable_scope().reuse is True:\n logger.warn(\"add_moving_summary() called under reuse=True scope, ignored.\")\n return []\n\n if len(args) == 1 and isinstance(args[0], (list, tuple)):\n logger.warn(\"add_moving_summary() takes positional args instead of an iterable of tensors!\")\n args = args[0]\n\n for x in args:\n assert isinstance(x, (tf.Tensor, tf.Variable)), x\n assert x.get_shape().ndims == 0, \\\n \"add_moving_summary() only accepts scalar tensor! Got one with {}\".format(x.get_shape())\n # TODO variable not saved under distributed\n\n ema_ops = []\n for c in args:\n name = re.sub('tower[0-9]+/', '', c.op.name)\n with tf.name_scope(None):\n if not c.dtype.is_floating:\n c = tf.cast(c, tf.float32)\n # assign_moving_average creates variables with op names, therefore clear ns first.\n with _enter_vs_reuse_ns('EMA') as vs:\n ema_var = tf.get_variable(name, shape=c.shape, dtype=c.dtype,\n initializer=tf.constant_initializer(), trainable=False)\n ns = vs.original_name_scope\n with tf.name_scope(ns): # reuse VS&NS so that EMA_1 won't appear\n ema_op = moving_averages.assign_moving_average(\n ema_var, c, decay,\n zero_debias=True, name=name + '_EMA_apply')\n ema_ops.append(ema_op)\n with tf.name_scope(None):\n tf.summary.scalar(\n name + '-summary', ema_op,\n collections=summ_coll) # write the EMA value as a summary\n if coll is not None:\n for op in ema_ops:\n tf.add_to_collection(coll, op)\n return ema_ops\n\n\ntry:\n import cv2\nexcept ImportError:\n from ..utils.develop import create_dummy_func\n create_image_summary = create_dummy_func('create_image_summary', 'cv2') # noqa\n",
"# -*- coding: utf-8 -*-\n# File: group.py\n\n\nimport tensorflow as tf\nfrom contextlib import contextmanager\nfrom time import time as timer\nimport traceback\nimport six\n\nfrom .base import Callback\nfrom .hooks import CallbackToHook\nfrom ..utils import logger\nfrom ..utils.utils import humanize_time_delta\n\nif six.PY3:\n from time import perf_counter as timer # noqa\n\n__all__ = ['Callbacks']\n\n\nclass CallbackTimeLogger(object):\n def __init__(self):\n self.times = []\n self.tot = 0\n\n def add(self, name, time):\n self.tot += time\n self.times.append((name, time))\n\n @contextmanager\n def timed_callback(self, name):\n s = timer()\n yield\n self.add(name, timer() - s)\n\n def log(self):\n\n \"\"\" log the time of some heavy callbacks \"\"\"\n if self.tot < 3:\n return\n msgs = []\n for name, t in self.times:\n if t / self.tot > 0.3 and t > 1:\n msgs.append(name + \": \" + humanize_time_delta(t))\n logger.info(\n \"Callbacks took {:.3f} sec in total. {}\".format(\n self.tot, '; '.join(msgs)))\n\n\nclass Callbacks(Callback):\n \"\"\"\n A container to hold all callbacks, and trigger them iteratively.\n Note that it does nothing to before_run/after_run.\n \"\"\"\n\n def __init__(self, cbs):\n \"\"\"\n Args:\n cbs(list): a list of :class:`Callback` instances.\n \"\"\"\n # check type\n for cb in cbs:\n assert isinstance(cb, Callback), cb.__class__\n self.cbs = cbs\n\n def _setup_graph(self):\n with tf.name_scope(None): # clear the name scope\n for cb in self.cbs:\n cb.setup_graph(self.trainer)\n\n def _before_train(self):\n for cb in self.cbs:\n cb.before_train()\n\n def _after_train(self):\n for cb in self.cbs:\n # make sure callbacks are properly finalized\n try:\n cb.after_train()\n except Exception:\n traceback.print_exc()\n\n def get_hooks(self):\n return [CallbackToHook(cb) for cb in self.cbs]\n\n def trigger_step(self):\n for cb in self.cbs:\n cb.trigger_step()\n\n def _trigger_epoch(self):\n tm = CallbackTimeLogger()\n\n for cb in self.cbs:\n display_name = str(cb)\n with tm.timed_callback(display_name):\n cb.trigger_epoch()\n tm.log()\n\n def _before_epoch(self):\n for cb in self.cbs:\n cb.before_epoch()\n\n def _after_epoch(self):\n for cb in self.cbs:\n cb.after_epoch()\n",
"# -*- coding: utf-8 -*-\n# File: resnet_model.py\n\nimport tensorflow as tf\n\nfrom tensorpack.tfutils.argscope import argscope, get_arg_scope\nfrom tensorpack.models import (\n Conv2D, MaxPooling, GlobalAvgPooling, BatchNorm, BNReLU, FullyConnected)\n\n\ndef resnet_shortcut(l, n_out, stride, activation=tf.identity):\n data_format = get_arg_scope()['Conv2D']['data_format']\n n_in = l.get_shape().as_list()[1 if data_format in ['NCHW', 'channels_first'] else 3]\n if n_in != n_out: # change dimension when channel is not the same\n return Conv2D('convshortcut', l, n_out, 1, strides=stride, activation=activation)\n else:\n return l\n\n\ndef apply_preactivation(l, preact):\n if preact == 'bnrelu':\n shortcut = l # preserve identity mapping\n l = BNReLU('preact', l)\n else:\n shortcut = l\n return l, shortcut\n\n\ndef get_bn(zero_init=False):\n \"\"\"\n Zero init gamma is good for resnet. See https://arxiv.org/abs/1706.02677.\n \"\"\"\n if zero_init:\n return lambda x, name=None: BatchNorm('bn', x, gamma_initializer=tf.zeros_initializer())\n else:\n return lambda x, name=None: BatchNorm('bn', x)\n\n\ndef preresnet_basicblock(l, ch_out, stride, preact):\n l, shortcut = apply_preactivation(l, preact)\n l = Conv2D('conv1', l, ch_out, 3, strides=stride, activation=BNReLU)\n l = Conv2D('conv2', l, ch_out, 3)\n return l + resnet_shortcut(shortcut, ch_out, stride)\n\n\ndef preresnet_bottleneck(l, ch_out, stride, preact):\n # stride is applied on the second conv, following fb.resnet.torch\n l, shortcut = apply_preactivation(l, preact)\n l = Conv2D('conv1', l, ch_out, 1, activation=BNReLU)\n l = Conv2D('conv2', l, ch_out, 3, strides=stride, activation=BNReLU)\n l = Conv2D('conv3', l, ch_out * 4, 1)\n return l + resnet_shortcut(shortcut, ch_out * 4, stride)\n\n\ndef preresnet_group(name, l, block_func, features, count, stride):\n with tf.variable_scope(name):\n for i in range(0, count):\n with tf.variable_scope('block{}'.format(i)):\n # first block doesn't need activation\n l = block_func(l, features,\n stride if i == 0 else 1,\n 'no_preact' if i == 0 else 'bnrelu')\n # end of each group need an extra activation\n l = BNReLU('bnlast', l)\n return l\n\n\ndef resnet_basicblock(l, ch_out, stride):\n shortcut = l\n l = Conv2D('conv1', l, ch_out, 3, strides=stride, activation=BNReLU)\n l = Conv2D('conv2', l, ch_out, 3, activation=get_bn(zero_init=True))\n out = l + resnet_shortcut(shortcut, ch_out, stride, activation=get_bn(zero_init=False))\n return tf.nn.relu(out)\n\n\ndef resnet_bottleneck(l, ch_out, stride, stride_first=False):\n \"\"\"\n stride_first: original resnet put stride on first conv. fb.resnet.torch put stride on second conv.\n \"\"\"\n shortcut = l\n l = Conv2D('conv1', l, ch_out, 1, strides=stride if stride_first else 1, activation=BNReLU)\n l = Conv2D('conv2', l, ch_out, 3, strides=1 if stride_first else stride, activation=BNReLU)\n l = Conv2D('conv3', l, ch_out * 4, 1, activation=get_bn(zero_init=True))\n out = l + resnet_shortcut(shortcut, ch_out * 4, stride, activation=get_bn(zero_init=False))\n return tf.nn.relu(out)\n\n\ndef se_resnet_bottleneck(l, ch_out, stride):\n shortcut = l\n l = Conv2D('conv1', l, ch_out, 1, activation=BNReLU)\n l = Conv2D('conv2', l, ch_out, 3, strides=stride, activation=BNReLU)\n l = Conv2D('conv3', l, ch_out * 4, 1, activation=get_bn(zero_init=True))\n\n squeeze = GlobalAvgPooling('gap', l)\n squeeze = FullyConnected('fc1', squeeze, ch_out // 4, activation=tf.nn.relu)\n squeeze = FullyConnected('fc2', squeeze, ch_out * 4, activation=tf.nn.sigmoid)\n data_format = get_arg_scope()['Conv2D']['data_format']\n ch_ax = 1 if data_format in ['NCHW', 'channels_first'] else 3\n shape = [-1, 1, 1, 1]\n shape[ch_ax] = ch_out * 4\n l = l * tf.reshape(squeeze, shape)\n out = l + resnet_shortcut(shortcut, ch_out * 4, stride, activation=get_bn(zero_init=False))\n return tf.nn.relu(out)\n\n\ndef resnet_group(name, l, block_func, features, count, stride):\n with tf.variable_scope(name):\n for i in range(0, count):\n with tf.variable_scope('block{}'.format(i)):\n l = block_func(l, features, stride if i == 0 else 1)\n return l\n\n\ndef resnet_backbone(image, num_blocks, group_func, block_func):\n with argscope(Conv2D, use_bias=False,\n kernel_initializer=tf.variance_scaling_initializer(scale=2.0, mode='fan_out')):\n # Note that this pads the image by [2, 3] instead of [3, 2].\n # Similar things happen in later stride=2 layers as well.\n l = Conv2D('conv0', image, 64, 7, strides=2, activation=BNReLU)\n l = MaxPooling('pool0', l, pool_size=3, strides=2, padding='SAME')\n l = group_func('group0', l, block_func, 64, num_blocks[0], 1)\n l = group_func('group1', l, block_func, 128, num_blocks[1], 2)\n l = group_func('group2', l, block_func, 256, num_blocks[2], 2)\n l = group_func('group3', l, block_func, 512, num_blocks[3], 2)\n l = GlobalAvgPooling('gap', l)\n logits = FullyConnected('linear', l, 1000,\n kernel_initializer=tf.random_normal_initializer(stddev=0.01))\n return logits\n",
"# -*- coding: utf-8 -*-\n# File: optimizer.py\n\n\nimport tensorflow as tf\nfrom contextlib import contextmanager\nfrom .gradproc import FilterNoneGrad, GradientProcessor\n\n__all__ = ['apply_grad_processors', 'ProxyOptimizer',\n 'PostProcessOptimizer', 'VariableAssignmentOptimizer',\n 'AccumGradOptimizer']\n\n\nclass ProxyOptimizer(tf.train.Optimizer):\n \"\"\"\n A transparent proxy which delegates all methods of :class:`tf.train.Optimizer`\n \"\"\"\n def __init__(self, opt, name='ProxyOptimizer'):\n assert isinstance(opt, tf.train.Optimizer), opt\n super(ProxyOptimizer, self).__init__(False, name)\n self._opt = opt\n\n def compute_gradients(self, *args, **kwargs):\n return self._opt.compute_gradients(*args, **kwargs)\n\n def get_slot(self, *args, **kwargs):\n return self._opt.get_slot(*args, **kwargs)\n\n def get_slot_names(self, *args, **kwargs):\n return self._opt.get_slot_names(*args, **kwargs)\n\n def apply_gradients(self, *args, **kwargs):\n return self._opt.apply_gradients(*args, **kwargs)\n\n\ndef apply_grad_processors(opt, gradprocs):\n \"\"\"\n Wrapper around optimizers to apply gradient processors.\n\n Args:\n opt (tf.train.Optimizer):\n gradprocs (list[GradientProcessor]): gradient processors to add to the\n optimizer.\n\n Returns:\n a :class:`tf.train.Optimizer` instance which runs the gradient\n processors before updating the variables.\n \"\"\"\n assert isinstance(gradprocs, (list, tuple)), gradprocs\n for gp in gradprocs:\n assert isinstance(gp, GradientProcessor), gp\n\n class _ApplyGradientProcessor(ProxyOptimizer):\n def __init__(self, opt, gradprocs):\n self._gradprocs = gradprocs[:]\n super(_ApplyGradientProcessor, self).__init__(opt)\n\n def apply_gradients(self, grads_and_vars,\n global_step=None, name=None):\n g = self._apply(grads_and_vars)\n return self._opt.apply_gradients(g, global_step, name)\n\n def _apply(self, g):\n for proc in self._gradprocs:\n g = proc.process(g)\n return g\n\n return _ApplyGradientProcessor(opt, gradprocs)\n\n\nclass PostProcessOptimizer(ProxyOptimizer):\n \"\"\"\n An optimizer which applies some \"post-processing operation\" per variable\n (e.g. clipping, quantization) after the gradient update.\n \"\"\"\n def __init__(self, opt, func, colocate=True):\n \"\"\"\n Args:\n opt (tf.train.Optimizer):\n func (tf.Variable -> tf.Operation or None): the operation needed\n to perform for this variable after the gradient update.\n colocate (boolean): colocate the function with the variable.\n \"\"\"\n super(PostProcessOptimizer, self).__init__(opt)\n self._func = func\n self._colocate = colocate\n\n def apply_gradients(self, grads_and_vars, global_step=None, name=None):\n update_op = super(PostProcessOptimizer, self).apply_gradients(\n grads_and_vars, global_step)\n ops = []\n with tf.control_dependencies([update_op]):\n for _, var in grads_and_vars:\n with self._maybe_colocate(var):\n op = self._func(var)\n if op is not None:\n assert isinstance(op, tf.Operation), op\n ops.append(op)\n update_op = tf.group(update_op, *ops, name=name)\n return update_op\n\n @contextmanager\n def _maybe_colocate(self, var):\n G = tf.get_default_graph()\n if self._colocate:\n with G.colocate_with(var):\n yield\n else:\n yield\n\n\nclass VariableAssignmentOptimizer(PostProcessOptimizer):\n \"\"\"\n An optimizer which assigns each variable a new value (e.g. clipping,\n quantization) after the gradient update.\n \"\"\"\n def __init__(self, opt, func):\n \"\"\"\n Args:\n opt (tf.train.Optimizer):\n func (tf.Variable -> tf.Tensor or None): the new value to be\n assigned to this variable after the gradient update.\n \"\"\"\n def f(v):\n t = func(v)\n if t is None:\n return t\n return tf.assign(v, t, use_locking=False).op\n super(VariableAssignmentOptimizer, self).__init__(opt, f)\n\n\nclass AccumGradOptimizer(ProxyOptimizer):\n \"\"\"\n An optimizer which accumulates gradients across :math:`k` :meth:`minimize` calls,\n and apply them together in every :math:`k`th :meth:`minimize` call.\n This is roughly the same as using a :math:`k` times larger batch size plus a\n :math:`k` times larger learning rate, but uses much less memory.\n\n Note that this implementation may not support all models.\n E.g., it doesn't support sparse gradient update.\n \"\"\"\n\n def __init__(self, opt, niter):\n \"\"\"\n Args:\n opt (tf.train.Optimizer): the underlying sub-optimizer.\n niter (int): number of iterations to accumulate gradients.\n \"\"\"\n super(AccumGradOptimizer, self).__init__(opt, 'AccumGrad')\n self._niter = int(niter)\n\n def _create_accum_slots(self, var_list):\n slots = []\n for v in var_list:\n # TODO an option to not colocate the accumulators with variables (to save more memory)\n s = self._zeros_slot(v, \"accum\", self._name)\n slots.append(s)\n return slots\n\n def apply_gradients(self, grads_and_vars, global_step=None, name=None):\n assert global_step is None, \\\n \"AccumGradOptimizer doesn't support the option global_step! \" \\\n \"Please maintain it yourself.\"\n grads_and_vars = FilterNoneGrad().process(grads_and_vars)\n vs = []\n for g, v in grads_and_vars:\n assert isinstance(g, tf.Tensor) and isinstance(v, tf.Variable), \\\n \"AccumGradOptimizer only works for dense update! \" \\\n \"Types of v and g are {} and {}\".format(type(v), type(g))\n vs.append(v)\n\n with tf.control_dependencies(None):\n slots = self._create_accum_slots(vs)\n slots_and_vars = [(s, gv[1]) for s, gv in zip(slots, grads_and_vars)]\n\n # Create the counter on the same device as the first variable.\n with tf.variable_scope(self._name), \\\n vs[0].graph.colocate_with(vs[0]):\n counter = tf.Variable(\n 0, name=\"counter\", trainable=False, dtype=tf.int32)\n\n with tf.name_scope('AccumGradOptimizer'):\n ops = []\n for s, gv in zip(slots, grads_and_vars):\n g, v = gv\n ops.append(s.assign_add(g))\n update_counter = tf.assign_add(counter, 1, name='update_counter')\n update_slot_op = tf.group(update_counter, *ops, name='update_slot')\n\n def update_grad():\n update_op = self._opt.apply_gradients(slots_and_vars)\n with tf.control_dependencies([update_op]):\n clear_ops = [tf.assign(s, tf.zeros_like(s)) for s in slots]\n return tf.group(*clear_ops, name='update_grad')\n\n pred = tf.equal(tf.mod(counter, self._niter), 0)\n with tf.control_dependencies([update_slot_op]):\n if name is None:\n name = 'cond_update_grad'\n op = tf.cond(pred, update_grad, tf.no_op, name=name).op\n return op\n\n\nif __name__ == '__main__':\n # run it with \"python -m tensorpack.tfutils.optimizer\"\n\n x = tf.get_variable('x', shape=[6])\n cost = tf.reduce_sum(tf.abs(x), name='cost')\n opt = tf.train.GradientDescentOptimizer(0.01)\n opt = AccumGradOptimizer(opt, 5)\n min_op = opt.minimize(cost)\n\n sess = tf.Session()\n sess.run(tf.global_variables_initializer())\n with sess.as_default():\n for k in range(20):\n min_op.run()\n print(x.eval())\n",
"# -*- coding: utf-8 -*-\n# File: inference_runner.py\n\n\nimport sys\nimport tensorflow as tf\nfrom tensorflow.python.training.monitored_session \\\n import _HookedSession as HookedSession\n\nimport itertools\nfrom contextlib import contextmanager\nimport tqdm\nfrom six.moves import range\n\nfrom ..utils import logger\nfrom ..utils.utils import get_tqdm_kwargs\nfrom ..dataflow.base import DataFlow\nfrom ..tfutils.tower import PredictTowerContext\n\nfrom ..input_source import (\n InputSource, FeedInput, QueueInput, StagingInput)\n\nfrom .base import Callback\nfrom .group import Callbacks\nfrom .inference import Inferencer\n\n__all__ = ['InferenceRunnerBase', 'InferenceRunner',\n 'DataParallelInferenceRunner']\n\n\ndef _device_from_int(dev):\n return '/gpu:{}'.format(dev) if dev >= 0 else '/cpu:0'\n\n\nclass InferencerToHook(tf.train.SessionRunHook):\n def __init__(self, inf, fetches):\n self._inf = inf\n self._fetches = fetches\n\n def before_run(self, _):\n return tf.train.SessionRunArgs(fetches=self._fetches)\n\n def after_run(self, _, run_values):\n self._inf.on_fetches(run_values.results)\n\n\n@contextmanager\ndef _inference_context():\n msg = \"You might need to check your input implementation.\"\n try:\n yield\n except (StopIteration, tf.errors.CancelledError):\n logger.error(\n \"[InferenceRunner] input stopped before reaching its size()! \" + msg)\n raise\n except tf.errors.OutOfRangeError: # tf.data reaches an end\n pass\n\n\nclass InferenceRunnerBase(Callback):\n \"\"\" Base class for inference runner.\n\n Note:\n 1. InferenceRunner will use `input.size()` to determine\n how much iterations to run, so you're responsible to ensure that\n `size()` is reasonable.\n 2. Only works with instances of `TowerTrainer`.\n \"\"\"\n def __init__(self, input, infs):\n \"\"\"\n Args:\n input (InputSource): the input to use. Must have ``size()``.\n infs (list[Inferencer]): list of :class:`Inferencer` to run.\n \"\"\"\n self._input_source = input\n if not isinstance(infs, list):\n self.infs = [infs]\n else:\n self.infs = infs\n for v in self.infs:\n assert isinstance(v, Inferencer), v\n\n try:\n self._size = input.size()\n except NotImplementedError:\n self._size = 0\n\n self._hooks = []\n\n def register_hook(self, hook):\n \"\"\"\n Args:\n hook (tf.train.SessionRunHook):\n \"\"\"\n self._hooks.append(hook)\n\n def _before_train(self):\n self._hooked_sess = HookedSession(self.trainer.sess, self._hooks)\n self._input_callbacks.before_train()\n if self._size > 0:\n logger.info(\"[InferenceRunner] Will eval {} iterations\".format(self._size))\n else:\n logger.warn(\"[InferenceRunner] Got an InputSource with unknown size! Will iterate until OutOfRangeError!\")\n\n def _after_train(self):\n self._input_callbacks.after_train()\n\n\nclass InferenceRunner(InferenceRunnerBase):\n \"\"\"\n A callback that runs a list of :class:`Inferencer` on some :class:`InputSource`.\n \"\"\"\n\n def __init__(self, input, infs, tower_name='InferenceTower', device=0):\n \"\"\"\n Args:\n input (InputSource or DataFlow): The :class:`InputSource` to run\n inference on. If given a DataFlow, will use :class:`FeedInput`.\n infs (list): a list of :class:`Inferencer` instances.\n tower_name (str): the name scope of the tower to build. Need to set a\n different one if multiple InferenceRunner are used.\n device (int): the device to use\n \"\"\"\n if isinstance(input, DataFlow):\n input = FeedInput(input, infinite=True) # TODO a better way to handle inference size\n assert isinstance(input, InputSource), input\n assert not isinstance(input, StagingInput), input\n self._tower_name = tower_name\n self._device = _device_from_int(device)\n super(InferenceRunner, self).__init__(input, infs)\n\n def _build_hook(self, inf):\n out_names = inf.get_fetches()\n fetches = self._tower_handle.get_tensors(out_names)\n return InferencerToHook(inf, fetches)\n\n def _setup_graph(self):\n assert self.trainer.tower_func is not None, \"You must set tower_func of the trainer to use InferenceRunner!\"\n tower_func = self.trainer.tower_func\n input_callbacks = self._input_source.setup(tower_func.inputs_desc)\n\n logger.info(\"[InferenceRunner] Building tower '{}' on device {} ...\".format(self._tower_name, self._device))\n with tf.variable_scope(tf.get_variable_scope(), reuse=True), \\\n tf.device(self._device), \\\n PredictTowerContext(\n self._tower_name, vs_name=self.trainer._main_tower_vs_name):\n tower_func(*self._input_source.get_input_tensors())\n self._tower_handle = tower_func.towers[-1]\n\n for h in [self._build_hook(inf) for inf in self.infs]:\n self.register_hook(h)\n # trigger_{step,epoch}, {before,after}_epoch is ignored.\n # We assume that InputSource callbacks won't use these methods\n self._input_callbacks = Callbacks(input_callbacks)\n for h in self._input_callbacks.get_hooks():\n self.register_hook(h)\n\n for inf in self.infs:\n inf.setup_graph(self.trainer)\n self._input_callbacks.setup_graph(self.trainer)\n\n def _trigger(self):\n for inf in self.infs:\n inf.before_epoch()\n\n self._input_source.reset_state()\n # iterate over the data, and run the hooked session\n with _inference_context(), \\\n tqdm.tqdm(total=self._size, **get_tqdm_kwargs()) as pbar:\n num_itr = self._size if self._size > 0 else sys.maxsize\n for _ in range(num_itr):\n self._hooked_sess.run(fetches=[])\n pbar.update()\n for inf in self.infs:\n inf.trigger_epoch()\n\n\nclass DataParallelInferenceRunner(InferenceRunnerBase):\n \"\"\"\n Inference with data-parallel support on multiple GPUs.\n It will build one predict tower on each GPU, and run prediction\n with a large total batch in parallel on all GPUs.\n It will run the remainder (when the total size of input is not a multiple of #GPU)\n sequentially.\n \"\"\"\n def __init__(self, input, infs, gpus, tower_name='InferenceTower'):\n \"\"\"\n Args:\n input (DataFlow or QueueInput)\n gpus (int or list[int]): #gpus, or list of GPU id\n \"\"\"\n if isinstance(gpus, int):\n gpus = list(range(gpus))\n self._devices = [_device_from_int(k) for k in gpus]\n self._tower_names = ['{}{}'.format(tower_name, k) for k in range(len(gpus))]\n\n if isinstance(input, DataFlow):\n input = QueueInput(input)\n assert isinstance(input, QueueInput), input\n super(DataParallelInferenceRunner, self).__init__(input, infs)\n assert self._size > 0, \"Input for DataParallelInferenceRunner must have a size!\"\n\n self._hooks = []\n self._hooks_parallel = []\n\n def _setup_graph(self):\n self._handles = []\n\n assert self.trainer.tower_func is not None, \"You must set tower_func of the trainer to use InferenceRunner!\"\n tower_func = self.trainer.tower_func\n input_callbacks = self._input_source.setup(tower_func.inputs_desc)\n with tf.variable_scope(tf.get_variable_scope(), reuse=True):\n for idx, dev in enumerate(self._devices):\n with tf.device(dev), PredictTowerContext(\n self._tower_names[idx], vs_name=self.trainer._main_tower_vs_name):\n tower_func(*self._input_source.get_input_tensors())\n self._handles.append(tower_func.towers[-1])\n\n # setup callbacks and hooks\n self._input_callbacks = Callbacks(input_callbacks)\n\n # TODO InputSource might have hooks which break us.\n # e.g. hooks from StagingInput will force the consumption\n # of nr_tower datapoints in every run.\n input_hooks = self._input_callbacks.get_hooks()\n self._hooks.extend([self._build_hook(inf) for inf in self.infs] + input_hooks)\n self._hooks_parallel.extend([self._build_hook_parallel(inf) for inf in self.infs] + input_hooks)\n\n for inf in self.infs:\n inf.setup_graph(self.trainer)\n self._input_callbacks.setup_graph(self.trainer)\n\n def register_hook(self, h):\n logger.info(\n \"[DataParallelInferenceRunner] Registering hook {} on both parallel and sequential inference.\")\n self._hooks.append(h)\n self._hooks_parallel.append(h)\n\n class InferencerToHookDataParallel(InferencerToHook):\n def __init__(self, inf, fetches, size):\n \"\"\"\n Args:\n size(int): number of tensors to fetch per tower\n \"\"\"\n super(DataParallelInferenceRunner.InferencerToHookDataParallel, self).__init__(inf, fetches)\n assert len(self._fetches) % size == 0\n self._sz = size\n\n def after_run(self, _, run_values):\n res = run_values.results\n for i in range(0, len(res), self._sz):\n vals = res[i:i + self._sz]\n self._inf.on_fetches(vals)\n\n def _build_hook_parallel(self, inf):\n out_names = inf.get_fetches()\n sz = len(out_names)\n fetches = list(itertools.chain(*[t.get_tensors(out_names) for t in self._handles]))\n return self.InferencerToHookDataParallel(inf, fetches, sz)\n\n def _build_hook(self, inf):\n out_names = inf.get_fetches()\n fetches = self._handles[0].get_tensors(out_names)\n return InferencerToHook(inf, fetches)\n\n def _before_train(self):\n super(DataParallelInferenceRunner, self)._before_train()\n self._parallel_hooked_sess = HookedSession(self.trainer.sess, self._hooks_parallel)\n\n def _trigger(self):\n for inf in self.infs:\n inf.before_epoch()\n\n total = self._size\n nr_tower = len(self._devices)\n self._input_source.reset_state()\n with _inference_context():\n with tqdm.tqdm(total=total, **get_tqdm_kwargs()) as pbar:\n while total >= nr_tower:\n self._parallel_hooked_sess.run(fetches=[])\n pbar.update(nr_tower)\n total -= nr_tower\n # take care of the rest\n for _ in range(total):\n self._hooked_sess.run(fetches=[])\n pbar.update(1)\n for inf in self.infs:\n inf.trigger_epoch()\n",
"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n# File: hed.py\n# Author: Yuxin Wu\n\nimport cv2\nimport tensorflow as tf\nimport argparse\nfrom six.moves import zip\nimport os\n\n\nfrom tensorpack import *\nfrom tensorpack.dataflow import dataset\nfrom tensorpack.utils.gpu import get_num_gpu\nfrom tensorpack.tfutils import optimizer, gradproc\nfrom tensorpack.tfutils.summary import add_moving_summary, add_param_summary\n\n\ndef class_balanced_sigmoid_cross_entropy(logits, label, name='cross_entropy_loss'):\n \"\"\"\n The class-balanced cross entropy loss,\n as in `Holistically-Nested Edge Detection\n <http://arxiv.org/abs/1504.06375>`_.\n\n Args:\n logits: of shape (b, ...).\n label: of the same shape. the ground truth in {0,1}.\n Returns:\n class-balanced cross entropy loss.\n \"\"\"\n with tf.name_scope('class_balanced_sigmoid_cross_entropy'):\n y = tf.cast(label, tf.float32)\n\n count_neg = tf.reduce_sum(1. - y)\n count_pos = tf.reduce_sum(y)\n beta = count_neg / (count_neg + count_pos)\n\n pos_weight = beta / (1 - beta)\n cost = tf.nn.weighted_cross_entropy_with_logits(logits=logits, targets=y, pos_weight=pos_weight)\n cost = tf.reduce_mean(cost * (1 - beta))\n zero = tf.equal(count_pos, 0.0)\n return tf.where(zero, 0.0, cost, name=name)\n\n\nclass Model(ModelDesc):\n def inputs(self):\n return [tf.placeholder(tf.float32, [None, None, None, 3], 'image'),\n tf.placeholder(tf.int32, [None, None, None], 'edgemap')]\n\n def build_graph(self, image, edgemap):\n image = image - tf.constant([104, 116, 122], dtype='float32')\n edgemap = tf.expand_dims(edgemap, 3, name='edgemap4d')\n\n def branch(name, l, up):\n with tf.variable_scope(name):\n l = Conv2D('convfc', l, 1, kernel_size=1, activation=tf.identity,\n use_bias=True,\n kernel_initializer=tf.constant_initializer())\n while up != 1:\n l = BilinearUpSample('upsample{}'.format(up), l, 2)\n up = up / 2\n return l\n\n with argscope(Conv2D, kernel_size=3, activation=tf.nn.relu):\n l = Conv2D('conv1_1', image, 64)\n l = Conv2D('conv1_2', l, 64)\n b1 = branch('branch1', l, 1)\n l = MaxPooling('pool1', l, 2)\n\n l = Conv2D('conv2_1', l, 128)\n l = Conv2D('conv2_2', l, 128)\n b2 = branch('branch2', l, 2)\n l = MaxPooling('pool2', l, 2)\n\n l = Conv2D('conv3_1', l, 256)\n l = Conv2D('conv3_2', l, 256)\n l = Conv2D('conv3_3', l, 256)\n b3 = branch('branch3', l, 4)\n l = MaxPooling('pool3', l, 2)\n\n l = Conv2D('conv4_1', l, 512)\n l = Conv2D('conv4_2', l, 512)\n l = Conv2D('conv4_3', l, 512)\n b4 = branch('branch4', l, 8)\n l = MaxPooling('pool4', l, 2)\n\n l = Conv2D('conv5_1', l, 512)\n l = Conv2D('conv5_2', l, 512)\n l = Conv2D('conv5_3', l, 512)\n b5 = branch('branch5', l, 16)\n\n final_map = Conv2D('convfcweight',\n tf.concat([b1, b2, b3, b4, b5], 3), 1, kernel_size=1,\n kernel_initializer=tf.constant_initializer(0.2),\n use_bias=False, activation=tf.identity)\n costs = []\n for idx, b in enumerate([b1, b2, b3, b4, b5, final_map]):\n output = tf.nn.sigmoid(b, name='output{}'.format(idx + 1))\n xentropy = class_balanced_sigmoid_cross_entropy(\n b, edgemap,\n name='xentropy{}'.format(idx + 1))\n costs.append(xentropy)\n\n # some magic threshold\n pred = tf.cast(tf.greater(output, 0.5), tf.int32, name='prediction')\n wrong = tf.cast(tf.not_equal(pred, edgemap), tf.float32)\n wrong = tf.reduce_mean(wrong, name='train_error')\n\n if get_current_tower_context().is_training:\n wd_w = tf.train.exponential_decay(2e-4, get_global_step_var(),\n 80000, 0.7, True)\n wd_cost = tf.multiply(wd_w, regularize_cost('.*/W', tf.nn.l2_loss), name='wd_cost')\n costs.append(wd_cost)\n\n add_param_summary(('.*/W', ['histogram'])) # monitor W\n total_cost = tf.add_n(costs, name='cost')\n add_moving_summary(wrong, total_cost, *costs)\n return total_cost\n\n def optimizer(self):\n lr = tf.get_variable('learning_rate', initializer=3e-5, trainable=False)\n opt = tf.train.AdamOptimizer(lr, epsilon=1e-3)\n return optimizer.apply_grad_processors(\n opt, [gradproc.ScaleGradient(\n [('convfcweight.*', 0.1), ('conv5_.*', 5)])])\n\n\ndef get_data(name):\n isTrain = name == 'train'\n ds = dataset.BSDS500(name, shuffle=True)\n\n class CropMultiple16(imgaug.ImageAugmentor):\n def _get_augment_params(self, img):\n newh = img.shape[0] // 16 * 16\n neww = img.shape[1] // 16 * 16\n assert newh > 0 and neww > 0\n diffh = img.shape[0] - newh\n h0 = 0 if diffh == 0 else self.rng.randint(diffh)\n diffw = img.shape[1] - neww\n w0 = 0 if diffw == 0 else self.rng.randint(diffw)\n return (h0, w0, newh, neww)\n\n def _augment(self, img, param):\n h0, w0, newh, neww = param\n return img[h0:h0 + newh, w0:w0 + neww]\n\n if isTrain:\n shape_aug = [\n imgaug.RandomResize(xrange=(0.7, 1.5), yrange=(0.7, 1.5),\n aspect_ratio_thres=0.15),\n imgaug.RotationAndCropValid(90),\n CropMultiple16(),\n imgaug.Flip(horiz=True),\n imgaug.Flip(vert=True)\n ]\n else:\n # the original image shape (321x481) in BSDS is not a multiple of 16\n IMAGE_SHAPE = (320, 480)\n shape_aug = [imgaug.CenterCrop(IMAGE_SHAPE)]\n ds = AugmentImageComponents(ds, shape_aug, (0, 1), copy=False)\n\n def f(m): # thresholding\n m[m >= 0.50] = 1\n m[m < 0.50] = 0\n return m\n ds = MapDataComponent(ds, f, 1)\n\n if isTrain:\n augmentors = [\n imgaug.Brightness(63, clip=False),\n imgaug.Contrast((0.4, 1.5)),\n ]\n ds = AugmentImageComponent(ds, augmentors, copy=False)\n ds = BatchDataByShape(ds, 8, idx=0)\n ds = PrefetchDataZMQ(ds, 1)\n else:\n ds = BatchData(ds, 1)\n return ds\n\n\ndef view_data():\n ds = RepeatedData(get_data('train'), -1)\n ds.reset_state()\n for ims, edgemaps in ds.get_data():\n for im, edgemap in zip(ims, edgemaps):\n assert im.shape[0] % 16 == 0 and im.shape[1] % 16 == 0, im.shape\n cv2.imshow(\"im\", im / 255.0)\n cv2.waitKey(1000)\n cv2.imshow(\"edge\", edgemap)\n cv2.waitKey(1000)\n\n\ndef get_config():\n logger.auto_set_dir()\n dataset_train = get_data('train')\n steps_per_epoch = dataset_train.size() * 40\n dataset_val = get_data('val')\n\n return TrainConfig(\n dataflow=dataset_train,\n callbacks=[\n ModelSaver(),\n ScheduledHyperParamSetter('learning_rate', [(30, 6e-6), (45, 1e-6), (60, 8e-7)]),\n HumanHyperParamSetter('learning_rate'),\n InferenceRunner(dataset_val,\n BinaryClassificationStats('prediction', 'edgemap4d'))\n ],\n model=Model(),\n steps_per_epoch=steps_per_epoch,\n max_epoch=100,\n )\n\n\ndef run(model_path, image_path, output):\n pred_config = PredictConfig(\n model=Model(),\n session_init=get_model_loader(model_path),\n input_names=['image'],\n output_names=['output' + str(k) for k in range(1, 7)])\n predictor = OfflinePredictor(pred_config)\n im = cv2.imread(image_path)\n assert im is not None\n im = cv2.resize(\n im, (im.shape[1] // 16 * 16, im.shape[0] // 16 * 16)\n )[None, :, :, :].astype('float32')\n outputs = predictor(im)\n if output is None:\n for k in range(6):\n pred = outputs[k][0]\n cv2.imwrite(\"out{}.png\".format(\n '-fused' if k == 5 else str(k + 1)), pred * 255)\n else:\n pred = outputs[5][0]\n cv2.imwrite(output, pred * 255)\n\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser()\n parser.add_argument('--gpu', help='comma separated list of GPU(s) to use.')\n parser.add_argument('--load', help='load model')\n parser.add_argument('--view', help='view dataset', action='store_true')\n parser.add_argument('--run', help='run model on images')\n parser.add_argument('--output', help='fused output filename. default to out-fused.png')\n args = parser.parse_args()\n if args.gpu:\n os.environ['CUDA_VISIBLE_DEVICES'] = args.gpu\n\n if args.view:\n view_data()\n elif args.run:\n run(args.load, args.run, args.output)\n else:\n config = get_config()\n if args.load:\n config.session_init = get_model_loader(args.load)\n launch_train_with_config(\n config,\n SyncMultiGPUTrainer(max(get_num_gpu(), 1)))\n"
] |
[
[
"tensorflow.python.training.moving_averages.assign_moving_average",
"tensorflow.Summary.Image",
"tensorflow.reduce_mean",
"tensorflow.get_collection",
"tensorflow.cast",
"tensorflow.nn.zero_fraction",
"tensorflow.constant_initializer",
"tensorflow.add_to_collection",
"tensorflow.name_scope",
"tensorflow.variable_scope",
"tensorflow.Summary",
"tensorflow.get_variable_scope",
"tensorflow.summary.scalar",
"tensorflow.summary.histogram"
],
[
"tensorflow.name_scope"
],
[
"tensorflow.nn.relu",
"tensorflow.zeros_initializer",
"tensorflow.reshape",
"tensorflow.variance_scaling_initializer",
"tensorflow.variable_scope",
"tensorflow.random_normal_initializer"
],
[
"tensorflow.cond",
"tensorflow.get_variable",
"tensorflow.assign_add",
"tensorflow.control_dependencies",
"tensorflow.Variable",
"tensorflow.assign",
"tensorflow.mod",
"tensorflow.global_variables_initializer",
"tensorflow.zeros_like",
"tensorflow.train.GradientDescentOptimizer",
"tensorflow.name_scope",
"tensorflow.Session",
"tensorflow.variable_scope",
"tensorflow.get_default_graph",
"tensorflow.group",
"tensorflow.abs"
],
[
"tensorflow.python.training.monitored_session._HookedSession",
"tensorflow.device",
"tensorflow.train.SessionRunArgs",
"tensorflow.get_variable_scope"
],
[
"tensorflow.not_equal",
"tensorflow.get_variable",
"tensorflow.constant",
"tensorflow.concat",
"tensorflow.reduce_mean",
"tensorflow.greater",
"tensorflow.reduce_sum",
"tensorflow.cast",
"tensorflow.equal",
"tensorflow.expand_dims",
"tensorflow.placeholder",
"tensorflow.constant_initializer",
"tensorflow.name_scope",
"tensorflow.where",
"tensorflow.train.AdamOptimizer",
"tensorflow.variable_scope",
"tensorflow.add_n",
"tensorflow.nn.weighted_cross_entropy_with_logits"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"1.10"
]
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"1.10",
"1.12",
"1.4",
"1.13",
"1.5",
"1.7",
"0.12",
"1.0",
"1.2"
]
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"1.4",
"1.5",
"1.7",
"1.0",
"1.2"
]
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"1.10",
"1.12",
"1.4",
"1.13",
"1.5",
"1.7",
"0.12",
"1.0",
"1.2"
]
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"1.10"
]
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"1.10"
]
}
] |
plin1112/cclib
|
[
"b048c9a60a33dd4d8e0ebe93671c5a35b6c018e3",
"b048c9a60a33dd4d8e0ebe93671c5a35b6c018e3"
] |
[
"cclib/parser/qchemparser.py",
"test/bridge/testpyquante.py"
] |
[
"# -*- coding: utf-8 -*-\n#\n# Copyright (c) 2020, the cclib development team\n#\n# This file is part of cclib (http://cclib.github.io) and is distributed under\n# the terms of the BSD 3-Clause License.\n\n\"\"\"Parser for Q-Chem output files\"\"\"\n\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport itertools\nimport math\nimport re\n\nimport numpy\n\nfrom cclib.parser import logfileparser\nfrom cclib.parser import utils\n\n\nclass QChem(logfileparser.Logfile):\n \"\"\"A Q-Chem log file.\"\"\"\n\n def __init__(self, *args, **kwargs):\n\n # Call the __init__ method of the superclass\n super(QChem, self).__init__(logname=\"QChem\", *args, **kwargs)\n\n def __str__(self):\n \"\"\"Return a string representation of the object.\"\"\"\n return \"QChem log file %s\" % (self.filename)\n\n def __repr__(self):\n \"\"\"Return a representation of the object.\"\"\"\n return 'QChem(\"%s\")' % (self.filename)\n\n def normalisesym(self, label):\n \"\"\"Q-Chem does not require normalizing symmetry labels.\"\"\"\n return label\n\n def before_parsing(self):\n\n # Keep track of whether or not we're performing an\n # (un)restricted calculation.\n self.unrestricted = False\n self.is_rohf = False\n\n # Keep track of whether or not this is a fragment calculation,\n # so that only the supersystem is parsed.\n self.is_fragment_section = False\n # These headers identify when a fragment section is\n # entered/exited.\n self.fragment_section_headers = (\n 'Guess MOs from converged MOs on fragments',\n 'CP correction for fragment',\n )\n self.supersystem_section_headers = (\n 'Done with SCF on isolated fragments',\n 'Done with counterpoise correction on fragments',\n )\n\n # Compile the dashes-and-or-spaces-only regex.\n self.re_dashes_and_spaces = re.compile(r'^[\\s-]+$')\n\n # Compile the regex for extracting the atomic index from an\n # aoname.\n self.re_atomindex = re.compile(r'(\\d+)_')\n\n # A maximum of 6 columns per block when printing matrices. The\n # Fock matrix is 4.\n self.ncolsblock = 6\n\n # By default, when asked to print orbitals via\n # `scf_print`/`scf_final_print` and/or `print_orbitals`,\n # Q-Chem will print all occupieds and the first 5 virtuals.\n #\n # When the number is set for `print_orbitals`, that section of\n # the output will display (NOcc + that many virtual) MOs, but\n # any other sections present due to\n # `scf_print`/`scf_final_print` will still only display (NOcc\n # + 5) MOs. It is the `print_orbitals` section that `aonames`\n # is parsed from.\n #\n # Note that the (AO basis) density matrix is always (NBasis *\n # NBasis)!\n self.norbdisp_alpha = self.norbdisp_beta = 5\n self.norbdisp_alpha_aonames = self.norbdisp_beta_aonames = 5\n self.norbdisp_set = False\n\n self.alpha_mo_coefficient_headers = (\n 'RESTRICTED (RHF) MOLECULAR ORBITAL COEFFICIENTS',\n 'ALPHA MOLECULAR ORBITAL COEFFICIENTS'\n )\n\n self.gradient_headers = (\n 'Full Analytical Gradient',\n 'Gradient of SCF Energy',\n 'Gradient of MP2 Energy',\n )\n\n self.hessian_headers = (\n 'Hessian of the SCF Energy',\n 'Final Hessian.',\n )\n\n self.wfn_method = [\n 'HF',\n 'MP2', 'RI-MP2', 'LOCAL_MP2', 'MP4',\n 'CCD', 'CCSD', 'CCSD(T)',\n 'QCISD', 'QCISD(T)'\n ]\n\n def after_parsing(self):\n\n # If parsing a fragment job, each of the geometries appended to\n # `atomcoords` may be of different lengths, which will prevent\n # conversion from a list to NumPy array.\n # Take the length of the first geometry as correct, and remove\n # all others with different lengths.\n if len(self.atomcoords) > 1:\n correctlen = len(self.atomcoords[0])\n self.atomcoords[:] = [coords for coords in self.atomcoords\n if len(coords) == correctlen]\n # At the moment, there is no similar correction for other properties!\n\n # QChem does not print all MO coefficients by default, but rather\n # up to HOMO+5. So, fill up the missing values with NaNs. If there are\n # other cases where coefficient are missing, but different ones, this\n # general afterthought might not be appropriate and the fix will\n # need to be done while parsing.\n if hasattr(self, 'mocoeffs'):\n for im in range(len(self.mocoeffs)):\n _nmo, _nbasis = self.mocoeffs[im].shape\n if (_nmo, _nbasis) != (self.nmo, self.nbasis):\n coeffs = numpy.empty((self.nmo, self.nbasis))\n coeffs[:] = numpy.nan\n coeffs[0:_nmo, 0:_nbasis] = self.mocoeffs[im]\n self.mocoeffs[im] = coeffs\n\n # When parsing the 'MOLECULAR ORBITAL COEFFICIENTS' block for\n # `aonames`, Q-Chem doesn't print the principal quantum number\n # for each shell; this needs to be added.\n if hasattr(self, 'aonames') and hasattr(self, 'atombasis'):\n angmom = ('', 'S', 'P', 'D', 'F', 'G', 'H', 'I')\n for atom in self.atombasis:\n bfcounts = dict()\n for bfindex in atom:\n atomname, bfname = self.aonames[bfindex].split('_')\n # Keep track of how many times each shell type has\n # appeared.\n if bfname in bfcounts:\n bfcounts[bfname] += 1\n else:\n # Make sure the starting number for type of\n # angular momentum begins at the appropriate\n # principal quantum number (1S, 2P, 3D, 4F,\n # ...).\n bfcounts[bfname] = angmom.index(bfname[0])\n newbfname = '{}{}'.format(bfcounts[bfname], bfname)\n self.aonames[bfindex] = '_'.join([atomname, newbfname])\n\n # Assign the number of core electrons replaced by ECPs.\n if hasattr(self, 'user_input') and self.user_input.get('rem') is not None:\n if self.user_input['rem'].get('ecp') is not None:\n ecp_is_gen = (self.user_input['rem']['ecp'] == 'gen')\n if ecp_is_gen:\n assert 'ecp' in self.user_input\n has_iprint = hasattr(self, 'possible_ecps')\n\n if not ecp_is_gen and not has_iprint:\n msg = \"\"\"ECPs are present, but the number of core \\\nelectrons isn't printed at all. Rerun with \"iprint >= 100\" to get \\\ncoreelectrons.\"\"\"\n self.logger.warning(msg)\n self.incorrect_coreelectrons = True\n elif ecp_is_gen and not has_iprint:\n nmissing = sum(ncore == 0\n for (_, _, ncore) in self.user_input['ecp'])\n if nmissing > 1:\n msg = \"\"\"ECPs are present, but coreelectrons can only \\\nbe guessed for one element at most. Rerun with \"iprint >= 100\" to get \\\ncoreelectrons.\"\"\"\n self.logger.warning(msg)\n self.incorrect_coreelectrons = True\n elif self.user_input['molecule'].get('charge') is None:\n msg = \"\"\"ECPs are present, but the total charge \\\ncannot be determined. Rerun without `$molecule read`.\"\"\"\n self.logger.warning(msg)\n self.incorrect_coreelectrons = True\n else:\n user_charge = self.user_input['molecule']['charge']\n # First, assign the entries given\n # explicitly.\n for entry in self.user_input['ecp']:\n element, _, ncore = entry\n if ncore > 0:\n self._assign_coreelectrons_to_element(element, ncore)\n # Because of how the charge is calculated\n # during extract(), this is the number of\n # remaining core electrons that need to be\n # assigned ECP centers. Filter out the\n # remaining entries, of which there should\n # only be one.\n core_sum = self.coreelectrons.sum() if hasattr(self, 'coreelectrons') else 0\n remainder = self.charge - user_charge - core_sum\n entries = [entry\n for entry in self.user_input['ecp']\n if entry[2] == 0]\n if len(entries) != 0:\n assert len(entries) == 1\n element, _, ncore = entries[0]\n assert ncore == 0\n self._assign_coreelectrons_to_element(\n element, remainder, ncore_is_total_count=True)\n elif not ecp_is_gen and has_iprint:\n atomsymbols = [self.table.element[atomno] for atomno in self.atomnos]\n for i in range(self.natom):\n if atomsymbols[i] in self.possible_ecps:\n self.coreelectrons[i] = self.possible_ecps[atomsymbols[i]]\n else:\n assert ecp_is_gen and has_iprint\n for entry in self.user_input['ecp']:\n element, _, ncore = entry\n # If ncore is non-zero, then it must be\n # user-defined, and we take that\n # value. Otherwise, look it up.\n if ncore == 0:\n ncore = self.possible_ecps[element]\n self._assign_coreelectrons_to_element(element, ncore)\n\n # Check to see if the charge is consistent with the input\n # section. It may not be if using an ECP.\n if hasattr(self, 'user_input'):\n if self.user_input.get('molecule') is not None:\n user_charge = self.user_input['molecule'].get('charge')\n if user_charge is not None:\n self.set_attribute('charge', user_charge)\n\n def parse_charge_section(self, inputfile, chargetype):\n \"\"\"Parse the population analysis charge block.\"\"\"\n self.skip_line(inputfile, 'blank')\n line = next(inputfile)\n has_spins = False\n if 'Spin' in line:\n if not hasattr(self, 'atomspins'):\n self.atomspins = dict()\n has_spins = True\n spins = []\n self.skip_line(inputfile, 'dashes')\n if not hasattr(self, 'atomcharges'):\n self.atomcharges = dict()\n charges = []\n line = next(inputfile)\n\n while list(set(line.strip())) != ['-']:\n elements = line.split()\n charge = utils.float(elements[2])\n charges.append(charge)\n if has_spins:\n spin = utils.float(elements[3])\n spins.append(spin)\n line = next(inputfile)\n\n self.atomcharges[chargetype] = numpy.array(charges)\n if has_spins:\n self.atomspins[chargetype] = numpy.array(spins)\n\n @staticmethod\n def parse_matrix(inputfile, nrows, ncols, ncolsblock):\n \"\"\"Q-Chem prints most matrices in a standard format; parse the matrix\n into a NumPy array of the appropriate shape.\n \"\"\"\n nparray = numpy.empty(shape=(nrows, ncols))\n line = next(inputfile)\n assert len(line.split()) == min(ncolsblock, ncols)\n colcounter = 0\n while colcounter < ncols:\n # If the line is just the column header (indices)...\n if line[:5].strip() == '':\n line = next(inputfile)\n rowcounter = 0\n while rowcounter < nrows:\n row = list(map(float, line.split()[1:]))\n assert len(row) == min(ncolsblock, (ncols - colcounter))\n nparray[rowcounter][colcounter:colcounter + ncolsblock] = row\n line = next(inputfile)\n rowcounter += 1\n colcounter += ncolsblock\n return nparray\n\n def parse_matrix_aonames(self, inputfile, nrows, ncols):\n \"\"\"Q-Chem prints most matrices in a standard format; parse the matrix\n into a preallocated NumPy array of the appropriate shape.\n\n Rather than have one routine for parsing all general matrices\n and the 'MOLECULAR ORBITAL COEFFICIENTS' block, use a second\n which handles `aonames`.\n \"\"\"\n bigmom = ('d', 'f', 'g', 'h')\n nparray = numpy.empty(shape=(nrows, ncols))\n line = next(inputfile)\n assert len(line.split()) == min(self.ncolsblock, ncols)\n colcounter = 0\n split_fixed = utils.WidthSplitter((4, 3, 5, 6, 10, 10, 10, 10, 10, 10))\n while colcounter < ncols:\n # If the line is just the column header (indices)...\n if line[:5].strip() == '':\n line = next(inputfile)\n # Do nothing for now.\n if 'eigenvalues' in line:\n line = next(inputfile)\n rowcounter = 0\n while rowcounter < nrows:\n row = split_fixed.split(line)\n # Only take the AO names on the first time through.\n if colcounter == 0:\n if len(self.aonames) != self.nbasis:\n # Apply the offset for rows where there is\n # more than one atom of any element in the\n # molecule.\n offset = 1\n if row[2] != '':\n name = self.atommap.get(row[1] + str(row[2]))\n else:\n name = self.atommap.get(row[1] + '1')\n # For l > 1, there is a space between l and\n # m_l when using spherical functions.\n shell = row[2 + offset]\n if shell in bigmom:\n shell = ''.join([shell, row[3 + offset]])\n aoname = ''.join([name, '_', shell.upper()])\n self.aonames.append(aoname)\n row = list(map(float, row[-min(self.ncolsblock, (ncols - colcounter)):]))\n nparray[rowcounter][colcounter:colcounter + self.ncolsblock] = row\n line = next(inputfile)\n rowcounter += 1\n colcounter += self.ncolsblock\n return nparray\n\n def parse_orbital_energies_and_symmetries(self, inputfile):\n \"\"\"Parse the 'Orbital Energies (a.u.)' block appearing after SCF converges,\n which optionally includes MO symmetries. Based upon the\n Occupied/Virtual labeling, the HOMO is also parsed.\n \"\"\"\n energies = []\n symbols = []\n\n line = next(inputfile)\n # Sometimes Q-Chem gets a little confused...\n while \"MOs\" not in line:\n line = next(inputfile)\n line = next(inputfile)\n\n # The end of the block is either a blank line or only dashes.\n while not self.re_dashes_and_spaces.search(line) \\\n and not 'Warning : Irrep of orbital' in line:\n if 'Occupied' in line or 'Virtual' in line:\n # A nice trick to find where the HOMO is.\n if 'Virtual' in line:\n homo = len(energies) - 1\n line = next(inputfile)\n tokens = line.split()\n # If the line contains letters, it must be the MO\n # symmetries. Otherwise, it's the energies.\n if re.search(\"[a-zA-Z]\", line):\n symbols.extend(tokens[1::2])\n else:\n for e in tokens:\n try:\n energy = utils.convertor(utils.float(e), 'hartree', 'eV')\n except ValueError:\n energy = numpy.nan\n energies.append(energy)\n line = next(inputfile)\n\n # MO symmetries are either not present or there is one for each MO\n # (energy).\n assert len(symbols) in (0, len(energies))\n\n return energies, symbols, homo\n\n\n def generate_atom_map(self):\n \"\"\"Generate the map to go from Q-Chem atom numbering:\n 'C1', 'C2', 'C3', 'C4', 'C5', 'C6', 'H1', 'H2', 'H3', 'H4', 'C7', ...\n to cclib atom numbering:\n 'C1', 'C2', 'C3', 'C4', 'C5', 'C6', 'H7', 'H8', 'H9', 'H10', 'C11', ...\n for later use.\n \"\"\"\n\n # Generate the desired order.\n order_proper = [element + str(num)\n for element, num in zip(self.atomelements,\n itertools.count(start=1))]\n # We need separate counters for each element.\n element_counters = {element: itertools.count(start=1)\n for element in set(self.atomelements)}\n # Generate the Q-Chem printed order.\n order_qchem = [element + str(next(element_counters[element]))\n for element in self.atomelements]\n # Combine the orders into a mapping.\n atommap = {k: v for k, v, in zip(order_qchem, order_proper)}\n return atommap\n\n def generate_formula_histogram(self):\n \"\"\"From the atomnos, generate a histogram that represents the\n molecular formula.\n \"\"\"\n\n histogram = dict()\n for element in self.atomelements:\n if element in histogram.keys():\n histogram[element] += 1\n else:\n histogram[element] = 1\n return histogram\n\n def extract(self, inputfile, line):\n \"\"\"Extract information from the file object inputfile.\"\"\"\n\n # Extract the version number and optionally the version\n # control info.\n if any(version_trigger in line for version_trigger in (\"Q-Chem\", \"Unrecognized platform\", \"Version\")):\n # Part 1 matches\n # - `Q-Chem 4.3.0 for Intel X86 EM64T Linux`\n # Part 2 matches\n # - `Unrecognized platform!!! 4.0.0.1`\n # Part 3 matches\n # - `Intel X86 EM64T Linux Version 4.1.0.1 `\n # but not\n # - `Additional authors for Version 3.1:`\n # - `Q-Chem, Version 4.1, Q-Chem, Inc., Pittsburgh, PA (2013).`\n match = re.search(\n r\"Q-Chem\\s([\\d\\.]*)\\sfor|\"\n r\"Unrecognized platform!!!\\s([\\d\\.]*)\\b|\"\n r\"Version\\s([\\d\\.]*)\\s*$\",\n line\n )\n if match:\n groups = [s for s in match.groups() if s is not None]\n assert len(groups) == 1\n package_version = groups[0]\n self.metadata[\"package_version\"] = package_version\n self.metadata[\"legacy_package_version\"] = package_version\n # Avoid \"Last SVN revision\" entry.\n if \"SVN revision\" in line and \"Last\" not in line:\n svn_revision = line.split()[3]\n line = next(inputfile)\n svn_branch = line.split()[3].replace(\"/\", \"_\")\n if \"package_version\" in self.metadata:\n self.metadata[\"package_version\"] = \"{}dev+{}-{}\".format(\n self.metadata[\"package_version\"], svn_branch, svn_revision\n )\n\n # Disable/enable parsing for fragment sections.\n if any(message in line for message in self.fragment_section_headers):\n self.is_fragment_section = True\n if any(message in line for message in self.supersystem_section_headers):\n self.is_fragment_section = False\n\n if not self.is_fragment_section:\n\n # If the input section is repeated back, parse the $rem and\n # $molecule sections.\n if line[0:11] == 'User input:':\n self.user_input = dict()\n self.skip_line(inputfile, 'd')\n while list(set(line.strip())) != ['-']:\n\n if line.strip().lower() == '$rem':\n\n self.user_input['rem'] = dict()\n\n while line.strip().lower() != '$end':\n\n line = next(inputfile).lower()\n if line.strip() == '$end':\n break\n # Apparently calculations can run without\n # a matching $end...this terminates the\n # user input section no matter what.\n if line.strip() == ('-' * 62):\n break\n\n tokens = line.split()\n # Allow blank lines.\n if len(tokens) == 0:\n continue\n # Entries may be separated by an equals\n # sign, and can have comments, for example:\n # ecp gen\n # ecp = gen\n # ecp gen ! only on first chlorine\n # ecp = gen only on first chlorine\n assert len(tokens) >= 2\n keyword = tokens[0]\n if tokens[1] == '=':\n option = tokens[2]\n else:\n option = tokens[1]\n self.user_input['rem'][keyword] = option\n\n if keyword == 'method':\n method = option.upper()\n if method in self.wfn_method:\n self.metadata[\"methods\"].append(method)\n else:\n self.metadata[\"methods\"].append('DFT')\n self.metadata[\"functional\"] = method\n\n if keyword == 'exchange':\n self.metadata[\"methods\"].append('DFT')\n self.metadata[\"functional\"] = option\n\n if keyword == 'print_orbitals':\n # Stay with the default value if a number isn't\n # specified.\n if option in ('true', 'false'):\n continue\n else:\n norbdisp_aonames = int(option)\n self.norbdisp_alpha_aonames = norbdisp_aonames\n self.norbdisp_beta_aonames = norbdisp_aonames\n self.norbdisp_set = True\n\n if line.strip().lower() == '$ecp':\n\n self.user_input['ecp'] = []\n line = next(inputfile)\n\n while line.strip().lower() != '$end':\n\n while list(set(line.strip())) != ['*']:\n\n # Parse the element for this ECP\n # entry. If only the element is on\n # this line, or the 2nd token is 0, it\n # applies to all atoms; if it's > 0,\n # then it indexes (1-based) that\n # specific atom in the whole molecule.\n tokens = line.split()\n assert len(tokens) > 0\n element = tokens[0][0].upper() + tokens[0][1:].lower()\n assert element in self.table.element\n if len(tokens) > 1:\n assert len(tokens) == 2\n index = int(tokens[1]) - 1\n else:\n index = -1\n line = next(inputfile)\n\n # Next comes the ECP definition. If\n # the line contains only a single\n # item, it's a built-in ECP, otherwise\n # it's a full definition.\n tokens = line.split()\n if len(tokens) == 1:\n ncore = 0\n line = next(inputfile)\n else:\n assert len(tokens) == 3\n ncore = int(tokens[2])\n # Don't parse the remainder of the\n # ECP definition.\n while list(set(line.strip())) != ['*']:\n line = next(inputfile)\n\n entry = (element, index, ncore)\n self.user_input['ecp'].append(entry)\n\n line = next(inputfile)\n\n if line.strip().lower() == '$end':\n break\n\n if line.strip().lower() == '$molecule':\n\n self.user_input['molecule'] = dict()\n line = next(inputfile)\n\n # Don't read the molecule, only the\n # supersystem charge and multiplicity.\n if line.split()[0].lower() == 'read':\n pass\n else:\n charge, mult = [int(x) for x in line.split()]\n self.user_input['molecule']['charge'] = charge\n self.user_input['molecule']['mult'] = mult\n\n line = next(inputfile).lower()\n\n # Parse the basis set name\n if 'Requested basis set' in line:\n self.metadata[\"basis_set\"] = line.split()[-1]\n\n # Parse the general basis for `gbasis`, in the style used by\n # Gaussian.\n if 'Basis set in general basis input format:' in line:\n self.skip_lines(inputfile, ['d', '$basis'])\n line = next(inputfile)\n if not hasattr(self, 'gbasis'):\n self.gbasis = []\n # The end of the general basis block.\n while '$end' not in line:\n atom = []\n # 1. Contains element symbol and atomic index of\n # basis functions; if 0, applies to all atoms of\n # same element.\n assert len(line.split()) == 2\n line = next(inputfile)\n # The end of each atomic block.\n while '****' not in line:\n # 2. Contains the type of basis function {S, SP,\n # P, D, F, G, H, ...}, the number of primitives,\n # and the weight of the final contracted function.\n bfsplitline = line.split()\n assert len(bfsplitline) == 3\n bftype = bfsplitline[0]\n nprim = int(bfsplitline[1])\n line = next(inputfile)\n # 3. The primitive basis functions that compose\n # the contracted basis function; there are `nprim`\n # of them. The first value is the exponent, and\n # the second value is the contraction\n # coefficient. If `bftype == 'SP'`, the primitives\n # are for both S- and P-type basis functions but\n # with separate contraction coefficients,\n # resulting in three columns.\n if bftype == 'SP':\n primitives_S = []\n primitives_P = []\n else:\n primitives = []\n # For each primitive in the contracted basis\n # function...\n for iprim in range(nprim):\n primsplitline = line.split()\n exponent = float(primsplitline[0])\n if bftype == 'SP':\n assert len(primsplitline) == 3\n coefficient_S = float(primsplitline[1])\n coefficient_P = float(primsplitline[2])\n primitives_S.append((exponent, coefficient_S))\n primitives_P.append((exponent, coefficient_P))\n else:\n assert len(primsplitline) == 2\n coefficient = float(primsplitline[1])\n primitives.append((exponent, coefficient))\n line = next(inputfile)\n if bftype == 'SP':\n bf_S = ('S', primitives_S)\n bf_P = ('P', primitives_P)\n atom.append(bf_S)\n atom.append(bf_P)\n else:\n bf = (bftype, primitives)\n atom.append(bf)\n # Move to the next contracted basis function\n # as long as we don't hit the '****' atom\n # delimiter.\n self.gbasis.append(atom)\n line = next(inputfile)\n\n if line.strip() == 'The following effective core potentials will be applied':\n\n # Keep track of all elements that may have an ECP on\n # them. *Which* centers have an ECP can't be\n # determined here, so just take the number of valence\n # electrons, then later later figure out the centers\n # and do core = Z - valence.\n self.possible_ecps = dict()\n # This will fail if an element has more than one kind\n # of ECP.\n\n split_fixed = utils.WidthSplitter((4, 13, 20, 2, 14, 14))\n\n self.skip_lines(inputfile, ['d', 'header', 'header', 'd'])\n line = next(inputfile)\n while list(set(line.strip())) != ['-']:\n tokens = split_fixed.split(line)\n if tokens[0] != '':\n element = tokens[0]\n valence = int(tokens[1])\n ncore = self.table.number[element] - valence\n self.possible_ecps[element] = ncore\n line = next(inputfile)\n\n if 'TIME STEP #' in line:\n tokens = line.split()\n self.append_attribute('time', float(tokens[8]))\n\n # Extract the atomic numbers and coordinates of the atoms.\n if 'Standard Nuclear Orientation' in line:\n if \"Angstroms\" in line:\n convertor = lambda x: x\n elif 'Bohr' in line:\n convertor = lambda x: utils.convertor(x, 'bohr', 'Angstrom')\n else:\n raise ValueError(\"Unknown units in coordinate header: {}\".format(line))\n self.skip_lines(inputfile, ['cols', 'dashes'])\n atomelements = []\n atomcoords = []\n line = next(inputfile)\n while list(set(line.strip())) != ['-']:\n entry = line.split()\n atomelements.append(entry[1])\n atomcoords.append([convertor(float(value)) for value in entry[2:]])\n line = next(inputfile)\n\n self.append_attribute('atomcoords', atomcoords)\n\n # We calculate and handle atomnos no matter what, since in\n # the case of fragment calculations the atoms may change,\n # along with the charge and spin multiplicity.\n self.atomnos = []\n self.atomelements = []\n for atomelement in atomelements:\n self.atomelements.append(atomelement)\n if atomelement == 'GH':\n self.atomnos.append(0)\n else:\n self.atomnos.append(self.table.number[atomelement])\n self.natom = len(self.atomnos)\n self.atommap = self.generate_atom_map()\n self.formula_histogram = self.generate_formula_histogram()\n\n # Number of electrons.\n # Useful for determining the number of occupied/virtual orbitals.\n if 'Nuclear Repulsion Energy' in line:\n line = next(inputfile)\n nelec_re_string = r'There are(\\s+[0-9]+) alpha and(\\s+[0-9]+) beta electrons'\n match = re.findall(nelec_re_string, line.strip())\n self.set_attribute('nalpha', int(match[0][0].strip()))\n self.set_attribute('nbeta', int(match[0][1].strip()))\n self.norbdisp_alpha += self.nalpha\n self.norbdisp_alpha_aonames += self.nalpha\n self.norbdisp_beta += self.nbeta\n self.norbdisp_beta_aonames += self.nbeta\n # Calculate the spin multiplicity (2S + 1), where S is the\n # total spin of the system.\n S = (self.nalpha - self.nbeta) / 2\n mult = int(2 * S + 1)\n self.set_attribute('mult', mult)\n # Calculate the molecular charge as the difference between\n # the atomic numbers and the number of electrons.\n if hasattr(self, 'atomnos'):\n charge = sum(self.atomnos) - (self.nalpha + self.nbeta)\n self.set_attribute('charge', charge)\n\n # Number of basis functions.\n if 'basis functions' in line:\n if not hasattr(self, 'nbasis'):\n self.set_attribute('nbasis', int(line.split()[-3]))\n # In the case that there are fewer basis functions\n # (and therefore MOs) than default number of MOs\n # displayed, reset the display values.\n self.norbdisp_alpha = min(self.norbdisp_alpha, self.nbasis)\n self.norbdisp_alpha_aonames = min(self.norbdisp_alpha_aonames, self.nbasis)\n self.norbdisp_beta = min(self.norbdisp_beta, self.nbasis)\n self.norbdisp_beta_aonames = min(self.norbdisp_beta_aonames, self.nbasis)\n\n # Check for whether or not we're peforming an\n # (un)restricted calculation.\n if 'calculation will be' in line:\n if ' restricted' in line:\n self.unrestricted = False\n if 'unrestricted' in line:\n self.unrestricted = True\n if hasattr(self, 'nalpha') and hasattr(self, 'nbeta'):\n if self.nalpha != self.nbeta:\n self.unrestricted = True\n self.is_rohf = True\n\n # Section with SCF iterations goes like this:\n #\n # SCF converges when DIIS error is below 1.0E-05\n # ---------------------------------------\n # Cycle Energy DIIS Error\n # ---------------------------------------\n # 1 -381.9238072190 1.39E-01\n # 2 -382.2937212775 3.10E-03\n # 3 -382.2939780242 3.37E-03\n # ...\n #\n scf_success_messages = (\n 'Convergence criterion met',\n 'corrected energy'\n )\n scf_failure_messages = (\n 'SCF failed to converge',\n 'Convergence failure'\n )\n if 'SCF converges when ' in line:\n if not hasattr(self, 'scftargets'):\n self.scftargets = []\n target = float(line.split()[-1])\n self.scftargets.append([target])\n\n # We should have the header between dashes now,\n # but sometimes there are lines before the first dashes.\n while not 'Cycle Energy' in line:\n line = next(inputfile)\n self.skip_line(inputfile, 'd')\n\n values = []\n iter_counter = 1\n line = next(inputfile)\n while not any(message in line for message in scf_success_messages):\n\n # Some trickery to avoid a lot of printing that can occur\n # between each SCF iteration.\n entry = line.split()\n if len(entry) > 0:\n if entry[0] == str(iter_counter):\n # Q-Chem only outputs one error metric.\n error = float(entry[2])\n values.append([error])\n iter_counter += 1\n\n try:\n line = next(inputfile)\n # Is this the end of the file for some reason?\n except StopIteration:\n self.logger.warning('File terminated before end of last SCF! Last error: {}'.format(error))\n break\n\n # We've converged, but still need the last iteration.\n if any(message in line for message in scf_success_messages):\n entry = line.split()\n error = float(entry[2])\n values.append([error])\n iter_counter += 1\n\n # This is printed in regression QChem4.2/dvb_sp_unconverged.out\n # so use it to bail out when convergence fails.\n if any(message in line for message in scf_failure_messages):\n break\n\n if not hasattr(self, 'scfvalues'):\n self.scfvalues = []\n self.scfvalues.append(numpy.array(values))\n\n # Molecular orbital coefficients.\n\n # Try parsing them from this block (which comes from\n # `scf_final_print = 2``) rather than the combined\n # aonames/mocoeffs/moenergies block (which comes from\n # `print_orbitals = true`).\n if 'Final Alpha MO Coefficients' in line:\n if not hasattr(self, 'mocoeffs'):\n self.mocoeffs = []\n mocoeffs = QChem.parse_matrix(inputfile, self.nbasis, self.norbdisp_alpha, self.ncolsblock)\n self.mocoeffs.append(mocoeffs.transpose())\n\n if 'Final Beta MO Coefficients' in line:\n mocoeffs = QChem.parse_matrix(inputfile, self.nbasis, self.norbdisp_beta, self.ncolsblock)\n self.mocoeffs.append(mocoeffs.transpose())\n\n if 'Total energy in the final basis set' in line:\n if not hasattr(self, 'scfenergies'):\n self.scfenergies = []\n scfenergy = float(line.split()[-1])\n self.scfenergies.append(utils.convertor(scfenergy, 'hartree', 'eV'))\n\n # Geometry optimization.\n\n if 'Maximum Tolerance Cnvgd?' in line:\n line_g = next(inputfile).split()[1:3]\n line_d = next(inputfile).split()[1:3]\n line_e = next(inputfile).split()[2:4]\n\n if not hasattr(self, 'geotargets'):\n self.geotargets = [line_g[1], line_d[1], utils.float(line_e[1])]\n if not hasattr(self, 'geovalues'):\n self.geovalues = []\n maxg = utils.float(line_g[0])\n maxd = utils.float(line_d[0])\n ediff = utils.float(line_e[0])\n geovalues = [maxg, maxd, ediff]\n self.geovalues.append(geovalues)\n\n if '** OPTIMIZATION CONVERGED **' in line:\n if not hasattr(self, 'optdone'):\n self.optdone = []\n self.optdone.append(len(self.atomcoords))\n\n if '** MAXIMUM OPTIMIZATION CYCLES REACHED **' in line:\n if not hasattr(self, 'optdone'):\n self.optdone = []\n\n # Moller-Plesset corrections.\n\n # There are multiple modules in Q-Chem for calculating MPn energies:\n # cdman, ccman, and ccman2, all with different output.\n #\n # MP2, RI-MP2, and local MP2 all default to cdman, which has a simple\n # block of output after the regular SCF iterations.\n #\n # MP3 is handled by ccman2.\n #\n # MP4 and variants are handled by ccman.\n\n # This is the MP2/cdman case.\n if 'MP2 total energy' in line:\n if not hasattr(self, 'mpenergies'):\n self.mpenergies = []\n mp2energy = float(line.split()[4])\n mp2energy = utils.convertor(mp2energy, 'hartree', 'eV')\n self.mpenergies.append([mp2energy])\n\n # This is the MP3/ccman2 case.\n if line[1:11] == 'MP2 energy' and line[12:19] != 'read as':\n if not hasattr(self, 'mpenergies'):\n self.mpenergies = []\n mpenergies = []\n mp2energy = float(line.split()[3])\n mpenergies.append(mp2energy)\n line = next(inputfile)\n line = next(inputfile)\n # Just a safe check.\n if 'MP3 energy' in line:\n mp3energy = float(line.split()[3])\n mpenergies.append(mp3energy)\n mpenergies = [utils.convertor(mpe, 'hartree', 'eV')\n for mpe in mpenergies]\n self.mpenergies.append(mpenergies)\n\n # This is the MP4/ccman case.\n if 'EHF' in line:\n if not hasattr(self, 'mpenergies'):\n self.mpenergies = []\n mpenergies = []\n\n while list(set(line.strip())) != ['-']:\n\n if 'EMP2' in line:\n mp2energy = float(line.split()[2])\n mpenergies.append(mp2energy)\n if 'EMP3' in line:\n mp3energy = float(line.split()[2])\n mpenergies.append(mp3energy)\n if 'EMP4SDQ' in line:\n mp4sdqenergy = float(line.split()[2])\n mpenergies.append(mp4sdqenergy)\n # This is really MP4SD(T)Q.\n if 'EMP4 ' in line:\n mp4sdtqenergy = float(line.split()[2])\n mpenergies.append(mp4sdtqenergy)\n\n line = next(inputfile)\n\n mpenergies = [utils.convertor(mpe, 'hartree', 'eV')\n for mpe in mpenergies]\n self.mpenergies.append(mpenergies)\n\n # Coupled cluster corrections.\n # Hopefully we only have to deal with ccman2 here.\n\n if 'CCD total energy' in line:\n if not hasattr(self, 'ccenergies'):\n self.ccenergies = []\n ccdenergy = float(line.split()[-1])\n ccdenergy = utils.convertor(ccdenergy, 'hartree', 'eV')\n self.ccenergies.append(ccdenergy)\n if 'CCSD total energy' in line:\n has_triples = False\n if not hasattr(self, 'ccenergies'):\n self.ccenergies = []\n ccsdenergy = float(line.split()[-1])\n # Make sure we aren't actually doing CCSD(T).\n line = next(inputfile)\n line = next(inputfile)\n if 'CCSD(T) total energy' in line:\n has_triples = True\n ccsdtenergy = float(line.split()[-1])\n ccsdtenergy = utils.convertor(ccsdtenergy, 'hartree', 'eV')\n self.ccenergies.append(ccsdtenergy)\n if not has_triples:\n ccsdenergy = utils.convertor(ccsdenergy, 'hartree', 'eV')\n self.ccenergies.append(ccsdenergy)\n\n if line[:11] == \" CCSD T1^2\":\n t1_squared = float(line.split()[3])\n t1_norm = math.sqrt(t1_squared)\n self.metadata[\"t1_diagnostic\"] = t1_norm / math.sqrt(2 * (self.nalpha + self.nbeta))\n\n # Electronic transitions. Works for both CIS and TDDFT.\n if 'Excitation Energies' in line:\n\n # Restricted:\n # ---------------------------------------------------\n # TDDFT/TDA Excitation Energies\n # ---------------------------------------------------\n #\n # Excited state 1: excitation energy (eV) = 3.6052\n # Total energy for state 1: -382.167872200685\n # Multiplicity: Triplet\n # Trans. Mom.: 0.0000 X 0.0000 Y 0.0000 Z\n # Strength : 0.0000\n # D( 33) --> V( 3) amplitude = 0.2618\n # D( 34) --> V( 2) amplitude = 0.2125\n # D( 35) --> V( 1) amplitude = 0.9266\n #\n # Unrestricted:\n # Excited state 2: excitation energy (eV) = 2.3156\n # Total energy for state 2: -381.980177630969\n # <S**2> : 0.7674\n # Trans. Mom.: -2.7680 X -0.1089 Y 0.0000 Z\n # Strength : 0.4353\n # S( 1) --> V( 1) amplitude = -0.3105 alpha\n # D( 34) --> S( 1) amplitude = 0.9322 beta\n\n self.skip_lines(inputfile, ['dashes', 'blank'])\n line = next(inputfile)\n\n etenergies = []\n etsyms = []\n etoscs = []\n etsecs = []\n spinmap = {'alpha': 0, 'beta': 1}\n\n while list(set(line.strip())) != ['-']:\n\n # Take the total energy for the state and subtract from the\n # ground state energy, rather than just the EE;\n # this will be more accurate.\n if 'Total energy for state' in line:\n energy = utils.convertor(float(line.split()[5]), 'hartree', 'wavenumber')\n etenergy = energy - utils.convertor(self.scfenergies[-1], 'eV', 'wavenumber')\n etenergies.append(etenergy)\n # if 'excitation energy' in line:\n # etenergy = utils.convertor(float(line.split()[-1]), 'eV', 'wavenumber')\n # etenergies.append(etenergy)\n if 'Multiplicity' in line:\n etsym = line.split()[1]\n etsyms.append(etsym)\n if 'Strength' in line:\n strength = float(line.split()[-1])\n etoscs.append(strength)\n\n # This is the list of transitions.\n if 'amplitude' in line:\n sec = []\n while line.strip() != '':\n if self.unrestricted:\n spin = spinmap[line[42:47].strip()]\n else:\n spin = 0\n\n # There is a subtle difference between TDA and RPA calcs,\n # because in the latter case each transition line is\n # preceeded by the type of vector: X or Y, name excitation\n # or deexcitation (see #154 for details). For deexcitations,\n # we will need to reverse the MO indices. Note also that Q-Chem\n # starts reindexing virtual orbitals at 1.\n if line[5] == '(':\n ttype = 'X'\n startidx = int(line[6:9]) - 1\n endidx = int(line[17:20]) - 1 + self.nalpha\n contrib = float(line[34:41].strip())\n else:\n assert line[5] == \":\"\n ttype = line[4]\n startidx = int(line[9:12]) - 1\n endidx = int(line[20:23]) - 1 + self.nalpha\n contrib = float(line[37:44].strip())\n\n start = (startidx, spin)\n end = (endidx, spin)\n if ttype == 'X':\n sec.append([start, end, contrib])\n elif ttype == 'Y':\n sec.append([end, start, contrib])\n else:\n raise ValueError('Unknown transition type: %s' % ttype)\n line = next(inputfile)\n etsecs.append(sec)\n\n line = next(inputfile)\n\n self.set_attribute('etenergies', etenergies)\n self.set_attribute('etsyms', etsyms)\n self.set_attribute('etoscs', etoscs)\n self.set_attribute('etsecs', etsecs)\n\n # Static and dynamic polarizability from mopropman.\n if 'Polarizability (a.u.)' in line:\n if not hasattr(self, 'polarizabilities'):\n self.polarizabilities = []\n while 'Full Tensor' not in line:\n line = next(inputfile)\n self.skip_line(inputfile, 'blank')\n polarizability = [next(inputfile).split() for _ in range(3)]\n self.polarizabilities.append(numpy.array(polarizability))\n\n # Static polarizability from finite difference or\n # responseman.\n if line.strip() in ('Static polarizability tensor [a.u.]',\n 'Polarizability tensor [a.u.]'):\n if not hasattr(self, 'polarizabilities'):\n self.polarizabilities = []\n polarizability = [next(inputfile).split() for _ in range(3)]\n self.polarizabilities.append(numpy.array(polarizability))\n\n # Molecular orbital energies and symmetries.\n if line.strip() == 'Orbital Energies (a.u.) and Symmetries':\n\n # --------------------------------------------------------------\n # Orbital Energies (a.u.) and Symmetries\n # --------------------------------------------------------------\n #\n # Alpha MOs, Restricted\n # -- Occupied --\n # -10.018 -10.018 -10.008 -10.008 -10.007 -10.007 -10.006 -10.005\n # 1 Bu 1 Ag 2 Bu 2 Ag 3 Bu 3 Ag 4 Bu 4 Ag\n # -9.992 -9.992 -0.818 -0.755 -0.721 -0.704 -0.670 -0.585\n # 5 Ag 5 Bu 6 Ag 6 Bu 7 Ag 7 Bu 8 Bu 8 Ag\n # -0.561 -0.532 -0.512 -0.462 -0.439 -0.410 -0.400 -0.397\n # 9 Ag 9 Bu 10 Ag 11 Ag 10 Bu 11 Bu 12 Bu 12 Ag\n # -0.376 -0.358 -0.349 -0.330 -0.305 -0.295 -0.281 -0.263\n # 13 Bu 14 Bu 13 Ag 1 Au 15 Bu 14 Ag 15 Ag 1 Bg\n # -0.216 -0.198 -0.160\n # 2 Au 2 Bg 3 Bg\n # -- Virtual --\n # 0.050 0.091 0.116 0.181 0.280 0.319 0.330 0.365\n # 3 Au 4 Au 4 Bg 5 Au 5 Bg 16 Ag 16 Bu 17 Bu\n # 0.370 0.413 0.416 0.422 0.446 0.469 0.496 0.539\n # 17 Ag 18 Bu 18 Ag 19 Bu 19 Ag 20 Bu 20 Ag 21 Ag\n # 0.571 0.587 0.610 0.627 0.646 0.693 0.743 0.806\n # 21 Bu 22 Ag 22 Bu 23 Bu 23 Ag 24 Ag 24 Bu 25 Ag\n # 0.816\n # 25 Bu\n #\n # Beta MOs, Restricted\n # -- Occupied --\n # -10.018 -10.018 -10.008 -10.008 -10.007 -10.007 -10.006 -10.005\n # 1 Bu 1 Ag 2 Bu 2 Ag 3 Bu 3 Ag 4 Bu 4 Ag\n # -9.992 -9.992 -0.818 -0.755 -0.721 -0.704 -0.670 -0.585\n # 5 Ag 5 Bu 6 Ag 6 Bu 7 Ag 7 Bu 8 Bu 8 Ag\n # -0.561 -0.532 -0.512 -0.462 -0.439 -0.410 -0.400 -0.397\n # 9 Ag 9 Bu 10 Ag 11 Ag 10 Bu 11 Bu 12 Bu 12 Ag\n # -0.376 -0.358 -0.349 -0.330 -0.305 -0.295 -0.281 -0.263\n # 13 Bu 14 Bu 13 Ag 1 Au 15 Bu 14 Ag 15 Ag 1 Bg\n # -0.216 -0.198 -0.160\n # 2 Au 2 Bg 3 Bg\n # -- Virtual --\n # 0.050 0.091 0.116 0.181 0.280 0.319 0.330 0.365\n # 3 Au 4 Au 4 Bg 5 Au 5 Bg 16 Ag 16 Bu 17 Bu\n # 0.370 0.413 0.416 0.422 0.446 0.469 0.496 0.539\n # 17 Ag 18 Bu 18 Ag 19 Bu 19 Ag 20 Bu 20 Ag 21 Ag\n # 0.571 0.587 0.610 0.627 0.646 0.693 0.743 0.806\n # 21 Bu 22 Ag 22 Bu 23 Bu 23 Ag 24 Ag 24 Bu 25 Ag\n # 0.816\n # 25 Bu\n # --------------------------------------------------------------\n\n self.skip_line(inputfile, 'dashes')\n line = next(inputfile)\n energies_alpha, symbols_alpha, homo_alpha = self.parse_orbital_energies_and_symmetries(inputfile)\n # Only look at the second block if doing an unrestricted calculation.\n # This might be a problem for ROHF/ROKS.\n if self.unrestricted:\n energies_beta, symbols_beta, homo_beta = self.parse_orbital_energies_and_symmetries(inputfile)\n\n # For now, only keep the last set of MO energies, even though it is\n # printed at every step of geometry optimizations and fragment jobs.\n self.set_attribute('moenergies', [numpy.array(energies_alpha)])\n self.set_attribute('homos', [homo_alpha])\n self.set_attribute('mosyms', [symbols_alpha])\n if self.unrestricted:\n self.moenergies.append(numpy.array(energies_beta))\n self.homos.append(homo_beta)\n self.mosyms.append(symbols_beta)\n\n self.set_attribute('nmo', len(self.moenergies[0]))\n\n # Molecular orbital energies, no symmetries.\n if line.strip() == 'Orbital Energies (a.u.)':\n\n # In the case of no orbital symmetries, the beta spin block is not\n # present for restricted calculations.\n\n # --------------------------------------------------------------\n # Orbital Energies (a.u.)\n # --------------------------------------------------------------\n #\n # Alpha MOs\n # -- Occupied --\n # ******* -38.595 -34.580 -34.579 -34.578 -19.372 -19.372 -19.364\n # -19.363 -19.362 -19.362 -4.738 -3.252 -3.250 -3.250 -1.379\n # -1.371 -1.369 -1.365 -1.364 -1.362 -0.859 -0.855 -0.849\n # -0.846 -0.840 -0.836 -0.810 -0.759 -0.732 -0.729 -0.704\n # -0.701 -0.621 -0.610 -0.595 -0.587 -0.584 -0.578 -0.411\n # -0.403 -0.355 -0.354 -0.352\n # -- Virtual --\n # -0.201 -0.117 -0.099 -0.086 0.020 0.031 0.055 0.067\n # 0.075 0.082 0.086 0.092 0.096 0.105 0.114 0.148\n #\n # Beta MOs\n # -- Occupied --\n # ******* -38.561 -34.550 -34.549 -34.549 -19.375 -19.375 -19.367\n # -19.367 -19.365 -19.365 -4.605 -3.105 -3.103 -3.102 -1.385\n # -1.376 -1.376 -1.371 -1.370 -1.368 -0.863 -0.858 -0.853\n # -0.849 -0.843 -0.839 -0.818 -0.765 -0.738 -0.737 -0.706\n # -0.702 -0.624 -0.613 -0.600 -0.591 -0.588 -0.585 -0.291\n # -0.291 -0.288 -0.275\n # -- Virtual --\n # -0.139 -0.122 -0.103 0.003 0.014 0.049 0.049 0.059\n # 0.061 0.070 0.076 0.081 0.086 0.090 0.098 0.106\n # 0.138\n # --------------------------------------------------------------\n\n self.skip_line(inputfile, 'dashes')\n line = next(inputfile)\n energies_alpha, _, homo_alpha = self.parse_orbital_energies_and_symmetries(inputfile)\n # Only look at the second block if doing an unrestricted calculation.\n # This might be a problem for ROHF/ROKS.\n if self.unrestricted:\n energies_beta, _, homo_beta = self.parse_orbital_energies_and_symmetries(inputfile)\n\n # For now, only keep the last set of MO energies, even though it is\n # printed at every step of geometry optimizations and fragment jobs.\n self.set_attribute('moenergies', [numpy.array(energies_alpha)])\n self.set_attribute('homos', [homo_alpha])\n if self.unrestricted:\n self.moenergies.append(numpy.array(energies_beta))\n self.homos.append(homo_beta)\n\n self.set_attribute('nmo', len(self.moenergies[0]))\n\n # Molecular orbital coefficients.\n\n # This block comes from `print_orbitals = true/{int}`. Less\n # precision than `scf_final_print >= 2` for `mocoeffs`, but\n # important for `aonames` and `atombasis`.\n\n if any(header in line\n for header in self.alpha_mo_coefficient_headers):\n\n # If we've asked to display more virtual orbitals than\n # there are MOs present in the molecule, fix that now.\n if hasattr(self, 'nmo') and hasattr(self, 'nalpha') and hasattr(self, 'nbeta'):\n self.norbdisp_alpha_aonames = min(self.norbdisp_alpha_aonames, self.nmo)\n self.norbdisp_beta_aonames = min(self.norbdisp_beta_aonames, self.nmo)\n\n if not hasattr(self, 'mocoeffs'):\n self.mocoeffs = []\n if not hasattr(self, 'atombasis'):\n self.atombasis = []\n for n in range(self.natom):\n self.atombasis.append([])\n if not hasattr(self, 'aonames'):\n self.aonames = []\n # We could also attempt to parse `moenergies` here, but\n # nothing is gained by it.\n\n mocoeffs = self.parse_matrix_aonames(inputfile, self.nbasis, self.norbdisp_alpha_aonames)\n # Only use these MO coefficients if we don't have them\n # from `scf_final_print`.\n if len(self.mocoeffs) == 0:\n self.mocoeffs.append(mocoeffs.transpose())\n\n # Go back through `aonames` to create `atombasis`.\n assert len(self.aonames) == self.nbasis\n for aoindex, aoname in enumerate(self.aonames):\n atomindex = int(self.re_atomindex.search(aoname).groups()[0]) - 1\n self.atombasis[atomindex].append(aoindex)\n assert len(self.atombasis) == len(self.atomnos)\n\n if 'BETA MOLECULAR ORBITAL COEFFICIENTS' in line:\n\n mocoeffs = self.parse_matrix_aonames(inputfile, self.nbasis, self.norbdisp_beta_aonames)\n if len(self.mocoeffs) == 1:\n self.mocoeffs.append(mocoeffs.transpose())\n\n # Population analysis.\n\n if 'Ground-State Mulliken Net Atomic Charges' in line:\n self.parse_charge_section(inputfile, 'mulliken')\n if 'Hirshfeld Atomic Charges' in line:\n self.parse_charge_section(inputfile, 'hirshfeld')\n if 'Ground-State ChElPG Net Atomic Charges' in line:\n self.parse_charge_section(inputfile, 'chelpg')\n\n # Multipole moments are not printed in lexicographical order,\n # so we need to parse and sort them. The units seem OK, but there\n # is some uncertainty about the reference point and whether it\n # can be changed.\n #\n # Notice how the letter/coordinate labels change to coordinate ranks\n # after hexadecapole moments, and need to be translated. Additionally,\n # after 9-th order moments the ranks are not necessarily single digits\n # and so there are spaces between them.\n #\n # -----------------------------------------------------------------\n # Cartesian Multipole Moments\n # LMN = < X^L Y^M Z^N >\n # -----------------------------------------------------------------\n # Charge (ESU x 10^10)\n # 0.0000\n # Dipole Moment (Debye)\n # X 0.0000 Y 0.0000 Z 0.0000\n # Tot 0.0000\n # Quadrupole Moments (Debye-Ang)\n # XX -50.9647 XY -0.1100 YY -50.1441\n # XZ 0.0000 YZ 0.0000 ZZ -58.5742\n # ...\n # 5th-Order Moments (Debye-Ang^4)\n # 500 0.0159 410 -0.0010 320 0.0005\n # 230 0.0000 140 0.0005 050 0.0012\n # ...\n # -----------------------------------------------------------------\n #\n if \"Cartesian Multipole Moments\" in line:\n\n # This line appears not by default, but only when\n # `multipole_order` > 4:\n line = inputfile.next()\n if 'LMN = < X^L Y^M Z^N >' in line:\n line = inputfile.next()\n\n # The reference point is always the origin, although normally the molecule\n # is moved so that the center of charge is at the origin.\n self.reference = [0.0, 0.0, 0.0]\n self.moments = [self.reference]\n\n # Watch out! This charge is in statcoulombs without the exponent!\n # We should expect very good agreement, however Q-Chem prints\n # the charge only with 5 digits, so expect 1e-4 accuracy.\n charge_header = inputfile.next()\n assert charge_header.split()[0] == \"Charge\"\n charge = float(inputfile.next().strip())\n charge = utils.convertor(charge, 'statcoulomb', 'e') * 1e-10\n # Allow this to change until fragment jobs are properly implemented.\n # assert abs(charge - self.charge) < 1e-4\n\n # This will make sure Debyes are used (not sure if it can be changed).\n line = inputfile.next()\n assert line.strip() == \"Dipole Moment (Debye)\"\n\n while \"-----\" not in line:\n\n # The current multipole element will be gathered here.\n multipole = []\n\n line = inputfile.next()\n while (\"-----\" not in line) and (\"Moment\" not in line):\n\n cols = line.split()\n\n # The total (norm) is printed for dipole but not other multipoles.\n if cols[0] == 'Tot':\n line = inputfile.next()\n continue\n\n # Find and replace any 'stars' with NaN before moving on.\n for i in range(len(cols)):\n if '***' in cols[i]:\n cols[i] = numpy.nan\n\n # The moments come in pairs (label followed by value) up to the 9-th order,\n # although above hexadecapoles the labels are digits representing the rank\n # in each coordinate. Above the 9-th order, ranks are not always single digits,\n # so there are spaces between them, which means moments come in quartets.\n if len(self.moments) < 5:\n for i in range(len(cols)//2):\n lbl = cols[2*i]\n m = cols[2*i + 1]\n multipole.append([lbl, m])\n elif len(self.moments) < 10:\n for i in range(len(cols)//2):\n lbl = cols[2*i]\n lbl = 'X'*int(lbl[0]) + 'Y'*int(lbl[1]) + 'Z'*int(lbl[2])\n m = cols[2*i + 1]\n multipole.append([lbl, m])\n else:\n for i in range(len(cols)//4):\n lbl = 'X'*int(cols[4*i]) + 'Y'*int(cols[4*i + 1]) + 'Z'*int(cols[4*i + 2])\n m = cols[4*i + 3]\n multipole.append([lbl, m])\n\n line = inputfile.next()\n\n # Sort should use the first element when sorting lists,\n # so this should simply work, and afterwards we just need\n # to extract the second element in each list (the actual moment).\n multipole.sort()\n multipole = [m[1] for m in multipole]\n self.moments.append(multipole)\n\n # For `method = force` or geometry optimizations,\n # the gradient is printed.\n if any(header in line for header in self.gradient_headers):\n if not hasattr(self, 'grads'):\n self.grads = []\n if 'SCF' in line:\n ncolsblock = self.ncolsblock\n else:\n ncolsblock = 5\n grad = QChem.parse_matrix(inputfile, 3, self.natom, ncolsblock)\n self.grads.append(grad.T)\n\n # (Static) polarizability from frequency calculations.\n if 'Polarizability Matrix (a.u.)' in line:\n if not hasattr(self, 'polarizabilities'):\n self.polarizabilities = []\n polarizability = []\n self.skip_line(inputfile, 'index header')\n for _ in range(3):\n line = next(inputfile)\n ss = line.strip()[1:]\n polarizability.append([ss[0:12], ss[13:24], ss[25:36]])\n # For some reason the sign is inverted.\n self.polarizabilities.append(-numpy.array(polarizability, dtype=float))\n\n # For IR-related jobs, the Hessian is printed (dim: 3*natom, 3*natom).\n # Note that this is *not* the mass-weighted Hessian.\n if any(header in line for header in self.hessian_headers):\n if not hasattr(self, 'hessian'):\n dim = 3*self.natom\n self.hessian = QChem.parse_matrix(inputfile, dim, dim, self.ncolsblock)\n\n # Start of the IR/Raman frequency section.\n if 'VIBRATIONAL ANALYSIS' in line:\n\n while 'STANDARD THERMODYNAMIC QUANTITIES' not in line:\n ## IR, optional Raman:\n #\n # **********************************************************************\n # ** **\n # ** VIBRATIONAL ANALYSIS **\n # ** -------------------- **\n # ** **\n # ** VIBRATIONAL FREQUENCIES (CM**-1) AND NORMAL MODES **\n # ** FORCE CONSTANTS (mDYN/ANGSTROM) AND REDUCED MASSES (AMU) **\n # ** INFRARED INTENSITIES (KM/MOL) **\n ##** RAMAN SCATTERING ACTIVITIES (A**4/AMU) AND DEPOLARIZATION RATIOS **\n # ** **\n # **********************************************************************\n #\n #\n # Mode: 1 2 3\n # Frequency: -106.88 -102.91 161.77\n # Force Cnst: 0.0185 0.0178 0.0380\n # Red. Mass: 2.7502 2.8542 2.4660\n # IR Active: NO YES YES\n # IR Intens: 0.000 0.000 0.419\n # Raman Active: YES NO NO\n ##Raman Intens: 2.048 0.000 0.000\n ##Depolar: 0.750 0.000 0.000\n # X Y Z X Y Z X Y Z\n # C 0.000 0.000 -0.100 -0.000 0.000 -0.070 -0.000 -0.000 -0.027\n # C 0.000 0.000 0.045 -0.000 0.000 -0.074 0.000 -0.000 -0.109\n # C 0.000 0.000 0.148 -0.000 -0.000 -0.074 0.000 0.000 -0.121\n # (...)\n # H -0.000 -0.000 0.422 -0.000 -0.000 0.499 0.000 0.000 -0.285\n # TransDip 0.000 -0.000 -0.000 0.000 -0.000 -0.000 -0.000 0.000 0.021\n #\n # Mode: 4 5 6\n # ...\n #\n # There isn't any symmetry information for normal modes present\n # in Q-Chem.\n # if not hasattr(self, 'vibsyms'):\n # self.vibsyms = []\n if 'Frequency:' in line:\n if not hasattr(self, 'vibfreqs'):\n self.vibfreqs = []\n vibfreqs = map(float, line.split()[1:])\n self.vibfreqs.extend(vibfreqs)\n\n if 'IR Intens:' in line:\n if not hasattr(self, 'vibirs'):\n self.vibirs = []\n vibirs = map(float, line.split()[2:])\n self.vibirs.extend(vibirs)\n\n if 'Raman Intens:' in line:\n if not hasattr(self, 'vibramans'):\n self.vibramans = []\n vibramans = map(float, line.split()[2:])\n self.vibramans.extend(vibramans)\n\n # This is the start of the displacement block.\n if line.split()[0:3] == ['X', 'Y', 'Z']:\n if not hasattr(self, 'vibdisps'):\n self.vibdisps = []\n disps = []\n for k in range(self.natom):\n line = next(inputfile)\n numbers = list(map(float, line.split()[1:]))\n N = len(numbers) // 3\n if not disps:\n for n in range(N):\n disps.append([])\n for n in range(N):\n disps[n].append(numbers[3*n:(3*n)+3])\n self.vibdisps.extend(disps)\n\n line = next(inputfile)\n\n # Anharmonic vibrational analysis.\n # Q-Chem includes 3 theories: VPT2, TOSH, and VCI.\n # For now, just take the VPT2 results.\n\n # if 'VIBRATIONAL ANHARMONIC ANALYSIS' in line:\n\n # while list(set(line.strip())) != ['=']:\n # if 'VPT2' in line:\n # if not hasattr(self, 'vibanharms'):\n # self.vibanharms = []\n # self.vibanharms.append(float(line.split()[-1]))\n # line = next(inputfile)\n\n if 'STANDARD THERMODYNAMIC QUANTITIES AT' in line:\n\n if not hasattr(self, 'temperature'):\n self.temperature = float(line.split()[4])\n # Not supported yet.\n if not hasattr(self, 'pressure'):\n self.pressure = float(line.split()[7])\n self.skip_line(inputfile, 'blank')\n\n line = next(inputfile)\n if self.natom == 1:\n assert 'Translational Enthalpy' in line\n else:\n assert 'Imaginary Frequencies' in line\n line = next(inputfile)\n # Not supported yet.\n assert 'Zero point vibrational energy' in line\n if not hasattr(self, 'zpe'):\n # Convert from kcal/mol to Hartree/particle.\n self.zpe = utils.convertor(float(line.split()[4]),\n 'kcal/mol', 'hartree')\n atommasses = []\n while 'Translational Enthalpy' not in line:\n if 'Has Mass' in line:\n atommass = float(line.split()[6])\n atommasses.append(atommass)\n line = next(inputfile)\n if not hasattr(self, 'atommasses'):\n self.atommasses = numpy.array(atommasses)\n\n while line.strip():\n line = next(inputfile)\n\n line = next(inputfile)\n assert 'Total Enthalpy' in line\n if not hasattr(self, 'enthalpy'):\n enthalpy = float(line.split()[2])\n self.enthalpy = utils.convertor(enthalpy,\n 'kcal/mol', 'hartree')\n line = next(inputfile)\n assert 'Total Entropy' in line\n if not hasattr(self, 'entropy'):\n entropy = float(line.split()[2]) * self.temperature / 1000\n # This is the *temperature dependent* entropy.\n self.entropy = utils.convertor(entropy,\n 'kcal/mol', 'hartree')\n if not hasattr(self, 'freeenergy'):\n self.freeenergy = self.enthalpy - self.entropy\n\n if line[:16] == ' Total job time:':\n self.metadata['success'] = True\n\n # TODO:\n # 'enthalpy' (incorrect)\n # 'entropy' (incorrect)\n # 'freeenergy' (incorrect)\n # 'nocoeffs'\n # 'nooccnos'\n # 'vibanharms'\n",
"# -*- coding: utf-8 -*-\n#\n# Copyright (c) 2020, the cclib development team\n#\n# This file is part of cclib (http://cclib.github.io) and is distributed under\n# the terms of the BSD 3-Clause License.\n\nimport unittest\n\nimport numpy\n\nfrom cclib.bridge import cclib2pyquante\nfrom ..test_data import getdatafile\nfrom cclib.parser.utils import find_package\n\nfrom numpy.testing import assert_array_almost_equal\n\n\nclass PyquanteTest(unittest.TestCase):\n \"\"\"Tests for the cclib2pyquante bridge in cclib.\"\"\"\n\n def setUp(self):\n super(PyquanteTest, self).setUp()\n self._found_pyquante = find_package(\"PyQuante\")\n self.data, self.logfile = getdatafile(\"Gaussian\", \"basicGaussian16\", [\"water_ccsd.log\"])\n\n def test_makepyquante(self):\n # Test older PyQuante bridge\n from PyQuante.hartree_fock import hf\n from PyQuante.Molecule import Molecule\n\n reference = Molecule(\n \"h2o\",\n [(8, (0, 0, 0.119159)), (1, (0, 0.790649, -0.476637)), (1, (0, -0.790649, -0.476637)),],\n units=\"Angstroms\",\n )\n refen, reforbe, reforbs = hf(reference)\n\n pyqmol = cclib2pyquante.makepyquante(self.data)\n en, orbe, orbs = hf(pyqmol)\n\n self.assertAlmostEqual(en, refen, delta=1.0e-6)\n\n\nclass pyquante2Test(unittest.TestCase):\n \"\"\"Tests for the cclib2pyquante bridge in cclib.\"\"\"\n\n def setUp(self):\n super(pyquante2Test, self).setUp()\n self._found_pyquante2 = find_package(\"pyquante2\")\n self.data, self.logfile = getdatafile(\"Gaussian\", \"basicGaussian16\", [\"water_ccsd.log\"])\n\n def test_makepyquante(self):\n # Test pyquante2 bridge\n from pyquante2 import molecule, rhf, h2o, basisset\n\n bfs = basisset(h2o)\n # Copied from water_ccsd.log\n refmol = molecule(\n [(8, 0.0, 0.0, 0.119159), (1, 0, 0.790649, -0.476637), (1, 0, -0.790649, -0.476637)],\n units=\"Angstroms\",\n )\n refsolver = rhf(refmol, bfs)\n refsolver.converge()\n\n pyqmol = cclib2pyquante.makepyquante(self.data)\n pyqsolver = rhf(pyqmol, bfs)\n pyqsolver.converge()\n\n assert_array_almost_equal(refsolver.energies[-1], pyqsolver.energies[-1], decimal=6)\n\n\nif __name__ == \"__main__\":\n unittest.main()\n"
] |
[
[
"numpy.array",
"numpy.empty"
],
[
"numpy.testing.assert_array_almost_equal"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
samialperen/adaboost_vs_svm
|
[
"9967378607dbde936f06214b0fcfd042c60a9759"
] |
[
"code/linear_svm.py"
] |
[
"# This script contains code for Linear SVMs\n# Author: Sami Alperen Akgun\n# Email: [email protected]\n\nimport os\nimport numpy as np\nfrom matplotlib import pyplot as plt \nfrom sklearn import svm #svm library from sklearn Python module\n\ndef read_data(data_path, filename,feature_number):\n \"\"\"\n This function reads the data from data_path/filename\n WARNING: This function assumes that features of data\n is separated by commas in the file\n Input: data_path --> The full directory path of data\n filename --> name of the file (With extension)\n feature_number --> Total feature number in the data\n Output: X --> numpy array that contains feature values\n size(X) --> sample size x feature number\n Y --> numpy array that contains labels\n size(Y) --> sample size x 1\n \"\"\"\n\n with open(data_path + \"/\" + filename, 'r', encoding='utf-8-sig') as f: \n X = np.genfromtxt(f, delimiter=',')[:,0:feature_number]\n\n\n # Last column of datafile contains output labels\n Y = np.genfromtxt(data_path + \"/\" + filename,delimiter=\",\")[:,feature_number]\n Y = Y.reshape(X.shape[0])\n\n return X,Y\n\n\ndef plot_svc_decision_function(model, ax=None, plot_support=True):\n \"\"\" Obtained from https://jakevdp.github.io/PythonDataScienceHandbook/05.07-support-vector-machines.html\"\"\"\n \"\"\" It is obtained in order to plot margins \"\"\"\n \"\"\" Otherwise, one can see my own version below to plot decision boundaries without margins!\"\"\"\n \"\"\"Plot the decision function for a 2D SVC\"\"\"\n if ax is None:\n ax = plt.gca()\n xlim = ax.get_xlim() \n ylim = ax.get_ylim() \n \n # create grid to evaluate model\n x = np.linspace(xlim[0], xlim[1], 30)\n y = np.linspace(ylim[0], ylim[1], 30)\n Y, X = np.meshgrid(y, x)\n xy = np.vstack([X.ravel(), Y.ravel()]).T\n P = model.decision_function(xy).reshape(X.shape)\n \n # plot decision boundary and margins\n ax.contour(X, Y, P, colors='k',\n levels=[-1, 0, 1], alpha=0.5,\n linestyles=['--', '-', '--'])\n \n # plot support vectors\n if plot_support:\n ax.scatter(model.support_vectors_[:, 0],\n model.support_vectors_[:, 1],\n s=300, linewidth=1, facecolors='none');\n #ax.set_xlim(xlim)\n #ax.set_ylim(ylim)\n\ndef main():\n \"\"\"\n This is the main function of this script\n \"\"\"\n\n ##### 1-) Load data\n # If you run the code from pattern_recognition_assignment3 path, uncomment below\n data_dir = os.getcwd() + '/data' \n # If you run the code from code directory, uncomment below\n #data_path = os.getcwd() + \"..\" / \"data\"/\n X_dset1, Y_dset1 = read_data(data_dir,\"dataset1.csv\",2)\n X_dset2, Y_dset2 = read_data(data_dir,\"dataset2.csv\",2)\n \n ##### 1-) Visualize the dataset on a figure\n class0_X_dset1 = X_dset1[Y_dset1==0 ,:]\n class1_X_dset1 = X_dset1[Y_dset1==1 ,:]\n class0_X_dset2 = X_dset2[Y_dset2==0 ,:]\n class1_X_dset2 = X_dset2[Y_dset2==1 ,:]\n \n \n fig0 = plt.figure()\n plt.plot(class0_X_dset1[:,0],class0_X_dset1[:,1],'ro',label=\"Class 0\")\n plt.plot(class1_X_dset1[:,0],class1_X_dset1[:,1],'gx',label=\"Class 1\")\n plt.title(\"Dataset1 Dataset Visualization\")\n plt.xlabel(\"x1\")\n plt.ylabel(\"x2\")\n plt.legend()\n\n fig0_2 = plt.figure()\n plt.plot(class0_X_dset2[:,0],class0_X_dset2[:,1],'ro',label=\"Class 0\")\n plt.plot(class1_X_dset2[:,0],class1_X_dset2[:,1],'gx',label=\"Class 1\")\n plt.title(\"Dataset2 Dataset Visualization\")\n plt.xlabel(\"x1\")\n plt.ylabel(\"x2\")\n plt.legend()\n\n \n ##### 2-) Train a linear SVM on the datase\n # Try with different c values\n c_values = [0.001,0.01,0.1,1]\n\n ### c = 0.001 case\n clf_dset1_p001 = svm.SVC(kernel='linear', C = c_values[0])\n clf_dset1_p001.fit(X_dset1,Y_dset1)\n \n weights = clf_dset1_p001.coef_[0]\n m = -weights[0] / weights[1]\n x_axis = np.linspace(0,5,100)\n y_axis = m * x_axis - clf_dset1_p001.intercept_[0] / weights[1]\n\n fig1 = plt.figure()\n #plt.plot(x_axis, y_axis)\n plt.plot(class0_X_dset1[:,0],class0_X_dset1[:,1],'ro',label=\"Class 0\")\n plt.plot(class1_X_dset1[:,0],class1_X_dset1[:,1],'gx',label=\"Class 1\")\n plot_svc_decision_function(clf_dset1_p001)\n plt.title(\"Linear SVM with c=%.3f for Dataset1\" %c_values[0])\n plt.xlabel(\"x1\")\n plt.ylabel(\"x2\")\n plt.legend()\n\n\n clf_dset2_p001 = svm.SVC(kernel='linear', C = c_values[0])\n clf_dset2_p001.fit(X_dset2,Y_dset2)\n\n weights_2 = clf_dset2_p001.coef_[0]\n m_2 = -weights_2[0] / weights_2[1]\n x_axis_2 = np.linspace(-0.6,0.3,100)\n y_axis_2 = m_2 * x_axis_2 - clf_dset2_p001.intercept_[0] / weights_2[1]\n\n fig1_2 = plt.figure()\n #plt.plot(x_axis_2, y_axis_2)\n plt.plot(class0_X_dset2[:,0],class0_X_dset2[:,1],'ro',label=\"Class 0\")\n plt.plot(class1_X_dset2[:,0],class1_X_dset2[:,1],'gx',label=\"Class 1\")\n plot_svc_decision_function(clf_dset2_p001)\n plt.title(\"Linear SVM with c=%.3f for Dataset2\" %c_values[0])\n plt.xlabel(\"x1\")\n plt.ylabel(\"x2\")\n plt.legend()\n \n\n ### c = 0.01 case\n clf_dset1_p01 = svm.SVC(kernel='linear', C = c_values[1])\n clf_dset1_p01.fit(X_dset1,Y_dset1)\n\n weights2 = clf_dset1_p01.coef_[0]\n m2 = -weights2[0] / weights2[1]\n x_axis2 = np.linspace(0,5,100)\n y_axis2 = m2 * x_axis2 - clf_dset1_p01.intercept_[0] / weights2[1]\n\n fig2 = plt.figure()\n plt.plot(x_axis2, y_axis2)\n plt.plot(class0_X_dset1[:,0],class0_X_dset1[:,1],'ro',label=\"Class 0\")\n plt.plot(class1_X_dset1[:,0],class1_X_dset1[:,1],'gx',label=\"Class 1\")\n plot_svc_decision_function(clf_dset1_p01)\n plt.title(\"Linear SVM with c=%.3f for Dataset1\" %c_values[1])\n plt.xlabel(\"x1\")\n plt.ylabel(\"x2\")\n plt.legend()\n\n\n clf_dset2_p01 = svm.SVC(kernel='linear', C = c_values[1])\n clf_dset2_p01.fit(X_dset2,Y_dset2)\n\n weights2_2 = clf_dset2_p01.coef_[0]\n m2_2 = -weights2_2[0] / weights2_2[1]\n x_axis2_2 = np.linspace(-0.6,0.3,100)\n y_axis2_2 = m2_2 * x_axis2_2 - clf_dset2_p01.intercept_[0] / weights2_2[1]\n\n fig2_2 = plt.figure()\n #plt.plot(x_axis2_2, y_axis2_2)\n plt.plot(class0_X_dset2[:,0],class0_X_dset2[:,1],'ro',label=\"Class 0\")\n plt.plot(class1_X_dset2[:,0],class1_X_dset2[:,1],'gx',label=\"Class 1\")\n plot_svc_decision_function(clf_dset2_p01)\n plt.title(\"Linear SVM with c=%.3f for Dataset2\" %c_values[1])\n plt.xlabel(\"x1\")\n plt.ylabel(\"x2\")\n plt.legend()\n\n ### c = 0.1 case\n clf_dset1_p1 = svm.SVC(kernel='linear', C = c_values[2])\n clf_dset1_p1.fit(X_dset1,Y_dset1)\n\n weights3 = clf_dset1_p1.coef_[0]\n m3 = -weights3[0] / weights3[1]\n x_axis3 = np.linspace(0,5,100)\n y_axis3 = m3 * x_axis3 - clf_dset1_p1.intercept_[0] / weights3[1]\n\n fig3 = plt.figure()\n #plt.plot(x_axis3, y_axis3)\n plt.plot(class0_X_dset1[:,0],class0_X_dset1[:,1],'ro',label=\"Class 0\")\n plt.plot(class1_X_dset1[:,0],class1_X_dset1[:,1],'gx',label=\"Class 1\")\n plot_svc_decision_function(clf_dset1_p1)\n plt.title(\"Linear SVM with c=%.3f for Dataset1\" %c_values[2])\n plt.xlabel(\"x1\")\n plt.ylabel(\"x2\")\n plt.legend()\n\n\n clf_dset2_p1 = svm.SVC(kernel='linear', C = c_values[2])\n clf_dset2_p1.fit(X_dset2,Y_dset2)\n\n weights3_2 = clf_dset2_p1.coef_[0]\n m3_2 = -weights3_2[0] / weights3_2[1]\n x_axis3_2 = np.linspace(-0.6,0.3,100)\n y_axis3_2 = m3_2 * x_axis3_2 - clf_dset2_p1.intercept_[0] / weights3_2[1]\n\n fig3_2 = plt.figure()\n #plt.plot(x_axis3_2, y_axis3_2)\n plt.plot(class0_X_dset2[:,0],class0_X_dset2[:,1],'ro',label=\"Class 0\")\n plt.plot(class1_X_dset2[:,0],class1_X_dset2[:,1],'gx',label=\"Class 1\")\n plot_svc_decision_function(clf_dset2_p1)\n plt.title(\"Linear SVM with c=%.3f for Dataset2\" %c_values[2])\n plt.xlabel(\"x1\")\n plt.ylabel(\"x2\")\n plt.legend()\n\n ### c = 1 case\n clf_dset1_1 = svm.SVC(kernel='linear', C = c_values[3])\n clf_dset1_1.fit(X_dset1,Y_dset1)\n\n weights4 = clf_dset1_1.coef_[0]\n m4 = -weights4[0] / weights4[1]\n x_axis4 = np.linspace(0,5,100)\n y_axis4 = m4 * x_axis4 - clf_dset1_1.intercept_[0] / weights4[1]\n\n fig4 = plt.figure()\n #plt.plot(x_axis4, y_axis4)\n plt.plot(class0_X_dset1[:,0],class0_X_dset1[:,1],'ro',label=\"Class 0\")\n plt.plot(class1_X_dset1[:,0],class1_X_dset1[:,1],'gx',label=\"Class 1\")\n plot_svc_decision_function(clf_dset1_1)\n plt.title(\"Linear SVM with c=%.3f for Dataset1\" %c_values[3])\n plt.xlabel(\"x1\")\n plt.ylabel(\"x2\")\n plt.legend()\n\n\n clf_dset2_1 = svm.SVC(kernel='linear', C = c_values[3])\n clf_dset2_1.fit(X_dset2,Y_dset2)\n\n weights4_2 = clf_dset2_1.coef_[0]\n m4_2 = -weights4_2[0] / weights4_2[1]\n x_axis4_2 = np.linspace(-0.6,0.3,100)\n y_axis4_2 = m4_2 * x_axis4_2 - clf_dset2_1.intercept_[0] / weights4_2[1]\n\n fig4_2 = plt.figure()\n #plt.plot(x_axis4_2, y_axis4_2)\n plt.plot(class0_X_dset2[:,0],class0_X_dset2[:,1],'ro',label=\"Class 0\")\n plt.plot(class1_X_dset2[:,0],class1_X_dset2[:,1],'gx',label=\"Class 1\")\n plot_svc_decision_function(clf_dset2_1)\n plt.title(\"Linear SVM with c=%.3f for Dataset2\" %c_values[3])\n plt.xlabel(\"x1\")\n plt.ylabel(\"x2\")\n plt.legend()\n\n\n\n\n\n\n\n\n\n\n\n plt.show(block=False) # show all the figures at once\n plt.waitforbuttonpress(1)\n input(\"Please press any key to close all figures.\")\n plt.close(\"all\")\n\n \n\n\nif __name__ == \"__main__\": main()\n"
] |
[
[
"matplotlib.pyplot.legend",
"matplotlib.pyplot.gca",
"matplotlib.pyplot.title",
"numpy.linspace",
"numpy.genfromtxt",
"matplotlib.pyplot.plot",
"matplotlib.pyplot.ylabel",
"sklearn.svm.SVC",
"matplotlib.pyplot.close",
"matplotlib.pyplot.waitforbuttonpress",
"matplotlib.pyplot.xlabel",
"numpy.meshgrid",
"matplotlib.pyplot.show",
"matplotlib.pyplot.figure"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
stjordanis/Hyperactive
|
[
"5acf247d8023ff6761593b9d0954bdd912d20aed"
] |
[
"examples/optimization_techniques/ensemble_optimizer.py"
] |
[
"from sklearn.datasets import load_iris\nfrom sklearn.neighbors import KNeighborsClassifier\nfrom sklearn.model_selection import cross_val_score\n\nfrom sklearn.svm import SVR\nfrom sklearn.tree import DecisionTreeRegressor\nfrom sklearn.neural_network import MLPRegressor\n\nfrom hyperactive import Hyperactive, EnsembleOptimizer\n\ndata = load_iris()\nX, y = data.data, data.target\n\n\ndef model(opt):\n knr = KNeighborsClassifier(n_neighbors=opt[\"n_neighbors\"])\n scores = cross_val_score(knr, X, y, cv=5)\n score = scores.mean()\n\n return score\n\n\nsearch_space = {\n \"n_neighbors\": list(range(1, 100)),\n}\n\nhyper = Hyperactive()\nhyper.add_search(model, search_space, n_iter=100)\nhyper.run()\n\nsearch_data = hyper.results(model)\n\noptimizer = EnsembleOptimizer(\n estimators=[SVR(), DecisionTreeRegressor(), MLPRegressor()],\n xi=0.02,\n warm_start_smbo=search_data,\n rand_rest_p=0.05,\n)\n\nhyper = Hyperactive()\nhyper.add_search(model, search_space, optimizer=optimizer, n_iter=100)\nhyper.run()\n"
] |
[
[
"sklearn.model_selection.cross_val_score",
"sklearn.tree.DecisionTreeRegressor",
"sklearn.datasets.load_iris",
"sklearn.neighbors.KNeighborsClassifier",
"sklearn.svm.SVR",
"sklearn.neural_network.MLPRegressor"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
orclassiq/amazon-braket-sdk-python
|
[
"69acaf54237ecbee14b5b5f0549fa28e32eba83b"
] |
[
"src/braket/circuits/gates.py"
] |
[
"# Copyright Amazon.com Inc. or its affiliates. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\"). You\n# may not use this file except in compliance with the License. A copy of\n# the License is located at\n#\n# http://aws.amazon.com/apache2.0/\n#\n# or in the \"license\" file accompanying this file. This file is\n# distributed on an \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF\n# ANY KIND, either express or implied. See the License for the specific\n# language governing permissions and limitations under the License.\n\nfrom typing import Iterable\n\nimport numpy as np\n\nimport braket.ir.jaqcd as ir\nfrom braket.circuits import circuit\nfrom braket.circuits.angled_gate import AngledGate\nfrom braket.circuits.gate import Gate\nfrom braket.circuits.instruction import Instruction\nfrom braket.circuits.quantum_operator_helpers import (\n is_unitary,\n verify_quantum_operator_matrix_dimensions,\n)\nfrom braket.circuits.qubit import QubitInput\nfrom braket.circuits.qubit_set import QubitSet, QubitSetInput\n\n\"\"\"\nTo add a new gate:\n 1. Implement the class and extend `Gate`\n 2. Add a method with the `@circuit.subroutine(register=True)` decorator. Method name\n will be added into the `Circuit` class. This method is the default way\n clients add this gate to a circuit.\n 3. Register the class with the `Gate` class via `Gate.register_gate()`.\n\"\"\"\n\n# Single qubit gates #\n\n\nclass H(Gate):\n \"\"\"Hadamard gate.\"\"\"\n\n def __init__(self):\n super().__init__(qubit_count=None, ascii_symbols=[\"H\"])\n\n def to_ir(self, target: QubitSet):\n return ir.H.construct(target=target[0])\n\n def to_matrix(self) -> np.ndarray:\n return 1.0 / np.sqrt(2.0) * np.array([[1.0, 1.0], [1.0, -1.0]], dtype=complex)\n\n @staticmethod\n def fixed_qubit_count() -> int:\n return 1\n\n @staticmethod\n @circuit.subroutine(register=True)\n def h(target: QubitSetInput) -> Iterable[Instruction]:\n \"\"\"Registers this function into the circuit class.\n\n Args:\n target (Qubit, int, or iterable of Qubit / int): Target qubit(s)\n\n Returns:\n Iterable[Instruction]: `Iterable` of H instructions.\n\n Examples:\n >>> circ = Circuit().h(0)\n >>> circ = Circuit().h([0, 1, 2])\n \"\"\"\n return [Instruction(H(), target=qubit) for qubit in QubitSet(target)]\n\n\nGate.register_gate(H)\n\n\nclass I(Gate): # noqa: E742, E261\n \"\"\"Identity gate.\"\"\"\n\n def __init__(self):\n super().__init__(qubit_count=None, ascii_symbols=[\"I\"])\n\n def to_ir(self, target: QubitSet):\n return ir.I.construct(target=target[0])\n\n def to_matrix(self) -> np.ndarray:\n return np.eye(2, dtype=complex)\n\n @staticmethod\n def fixed_qubit_count() -> int:\n return 1\n\n @staticmethod\n @circuit.subroutine(register=True)\n def i(target: QubitSetInput) -> Iterable[Instruction]:\n \"\"\"Registers this function into the circuit class.\n\n Args:\n target (Qubit, int, or iterable of Qubit / int): Target qubit(s)\n\n Returns:\n Iterable[Instruction]: `Iterable` of I instructions.\n\n Examples:\n >>> circ = Circuit().i(0)\n >>> circ = Circuit().i([0, 1, 2])\n \"\"\"\n return [Instruction(I(), target=qubit) for qubit in QubitSet(target)]\n\n\nGate.register_gate(I)\n\n\nclass X(Gate):\n \"\"\"Pauli-X gate.\"\"\"\n\n def __init__(self):\n super().__init__(qubit_count=None, ascii_symbols=[\"X\"])\n\n def to_ir(self, target: QubitSet):\n return ir.X.construct(target=target[0])\n\n def to_matrix(self) -> np.ndarray:\n return np.array([[0.0, 1.0], [1.0, 0.0]], dtype=complex)\n\n @staticmethod\n def fixed_qubit_count() -> int:\n return 1\n\n @staticmethod\n @circuit.subroutine(register=True)\n def x(target: QubitSetInput) -> Iterable[Instruction]:\n \"\"\"Registers this function into the circuit class.\n\n Args:\n target (Qubit, int, or iterable of Qubit / int): Target qubit(s)\n\n Returns:\n Iterable[Instruction]: `Iterable` of X instructions.\n\n Examples:\n >>> circ = Circuit().x(0)\n >>> circ = Circuit().x([0, 1, 2])\n \"\"\"\n return [Instruction(X(), target=qubit) for qubit in QubitSet(target)]\n\n\nGate.register_gate(X)\n\n\nclass Y(Gate):\n \"\"\"Pauli-Y gate.\"\"\"\n\n def __init__(self):\n super().__init__(qubit_count=None, ascii_symbols=[\"Y\"])\n\n def to_ir(self, target: QubitSet):\n return ir.Y.construct(target=target[0])\n\n def to_matrix(self) -> np.ndarray:\n return np.array([[0.0, -1.0j], [1.0j, 0.0]], dtype=complex)\n\n @staticmethod\n def fixed_qubit_count() -> int:\n return 1\n\n @staticmethod\n @circuit.subroutine(register=True)\n def y(target: QubitSetInput) -> Iterable[Instruction]:\n \"\"\"Registers this function into the circuit class.\n\n Args:\n target (Qubit, int, or iterable of Qubit / int): Target qubit(s)\n\n Returns:\n Iterable[Instruction]: `Iterable` of Y instructions.\n\n Examples:\n >>> circ = Circuit().y(0)\n >>> circ = Circuit().y([0, 1, 2])\n \"\"\"\n return [Instruction(Y(), target=qubit) for qubit in QubitSet(target)]\n\n\nGate.register_gate(Y)\n\n\nclass Z(Gate):\n \"\"\"Pauli-Z gate.\"\"\"\n\n def __init__(self):\n super().__init__(qubit_count=None, ascii_symbols=[\"Z\"])\n\n def to_ir(self, target: QubitSet):\n return ir.Z.construct(target=target[0])\n\n def to_matrix(self) -> np.ndarray:\n return np.array([[1.0, 0.0], [0.0, -1.0]], dtype=complex)\n\n @staticmethod\n def fixed_qubit_count() -> int:\n return 1\n\n @staticmethod\n @circuit.subroutine(register=True)\n def z(target: QubitSetInput) -> Iterable[Instruction]:\n \"\"\"Registers this function into the circuit class.\n\n Args:\n target (Qubit, int, or iterable of Qubit / int): Target qubit(s)\n\n Returns:\n Iterable[Instruction]: `Iterable` of Z instructions.\n\n Examples:\n >>> circ = Circuit().z(0)\n >>> circ = Circuit().z([0, 1, 2])\n \"\"\"\n return [Instruction(Z(), target=qubit) for qubit in QubitSet(target)]\n\n\nGate.register_gate(Z)\n\n\nclass S(Gate):\n \"\"\"S gate.\"\"\"\n\n def __init__(self):\n super().__init__(qubit_count=None, ascii_symbols=[\"S\"])\n\n def to_ir(self, target: QubitSet):\n return ir.S.construct(target=target[0])\n\n def to_matrix(self) -> np.ndarray:\n\n return np.array([[1.0, 0.0], [0.0, 1.0j]], dtype=complex)\n\n @staticmethod\n def fixed_qubit_count() -> int:\n return 1\n\n @staticmethod\n @circuit.subroutine(register=True)\n def s(target: QubitSetInput) -> Iterable[Instruction]:\n \"\"\"Registers this function into the circuit class.\n\n Args:\n target (Qubit, int, or iterable of Qubit / int): Target qubit(s)\n\n Returns:\n Iterable[Instruction]: `Iterable` of S instructions.\n\n Examples:\n >>> circ = Circuit().s(0)\n >>> circ = Circuit().s([0, 1, 2])\n \"\"\"\n return [Instruction(S(), target=qubit) for qubit in QubitSet(target)]\n\n\nGate.register_gate(S)\n\n\nclass Si(Gate):\n \"\"\"Conjugate transpose of S gate.\"\"\"\n\n def __init__(self):\n super().__init__(qubit_count=None, ascii_symbols=[\"Si\"])\n\n def to_ir(self, target: QubitSet):\n return ir.Si.construct(target=target[0])\n\n def to_matrix(self) -> np.ndarray:\n return np.array([[1, 0], [0, -1j]], dtype=complex)\n\n @staticmethod\n def fixed_qubit_count() -> int:\n return 1\n\n @staticmethod\n @circuit.subroutine(register=True)\n def si(target: QubitSetInput) -> Iterable[Instruction]:\n \"\"\"Registers this function into the circuit class.\n\n Args:\n target (Qubit, int, or iterable of Qubit / int): Target qubit(s)\n\n Returns:\n Iterable[Instruction]: Iterable of Si instructions.\n\n Examples:\n >>> circ = Circuit().si(0)\n >>> circ = Circuit().si([0, 1, 2])\n \"\"\"\n return [Instruction(Si(), target=qubit) for qubit in QubitSet(target)]\n\n\nGate.register_gate(Si)\n\n\nclass T(Gate):\n \"\"\"T gate.\"\"\"\n\n def __init__(self):\n super().__init__(qubit_count=None, ascii_symbols=[\"T\"])\n\n def to_ir(self, target: QubitSet):\n return ir.T.construct(target=target[0])\n\n def to_matrix(self) -> np.ndarray:\n return np.array([[1.0, 0.0], [0.0, np.exp(1j * np.pi / 4)]], dtype=complex)\n\n @staticmethod\n def fixed_qubit_count() -> int:\n return 1\n\n @staticmethod\n @circuit.subroutine(register=True)\n def t(target: QubitSetInput) -> Iterable[Instruction]:\n \"\"\"Registers this function into the circuit class.\n\n Args:\n target (Qubit, int, or iterable of Qubit / int): Target qubit(s)\n\n Returns:\n Iterable[Instruction]: `Iterable` of T instructions.\n\n Examples:\n >>> circ = Circuit().t(0)\n >>> circ = Circuit().t([0, 1, 2])\n \"\"\"\n return [Instruction(T(), target=qubit) for qubit in QubitSet(target)]\n\n\nGate.register_gate(T)\n\n\nclass Ti(Gate):\n \"\"\"Conjugate transpose of T gate.\"\"\"\n\n def __init__(self):\n super().__init__(qubit_count=None, ascii_symbols=[\"Ti\"])\n\n def to_ir(self, target: QubitSet):\n return ir.Ti.construct(target=target[0])\n\n def to_matrix(self) -> np.ndarray:\n return np.array([[1.0, 0.0], [0.0, np.exp(-1j * np.pi / 4)]], dtype=complex)\n\n @staticmethod\n def fixed_qubit_count() -> int:\n return 1\n\n @staticmethod\n @circuit.subroutine(register=True)\n def ti(target: QubitSetInput) -> Iterable[Instruction]:\n \"\"\"Registers this function into the circuit class.\n\n Args:\n target (Qubit, int, or iterable of Qubit / int): Target qubit(s)\n\n Returns:\n Iterable[Instruction]: `Iterable` of Ti instructions.\n\n Examples:\n >>> circ = Circuit().ti(0)\n >>> circ = Circuit().ti([0, 1, 2])\n \"\"\"\n return [Instruction(Ti(), target=qubit) for qubit in QubitSet(target)]\n\n\nGate.register_gate(Ti)\n\n\nclass V(Gate):\n \"\"\"Square root of not gate.\"\"\"\n\n def __init__(self):\n super().__init__(qubit_count=None, ascii_symbols=[\"V\"])\n\n def to_ir(self, target: QubitSet):\n return ir.V.construct(target=target[0])\n\n def to_matrix(self) -> np.ndarray:\n return np.array([[0.5 + 0.5j, 0.5 - 0.5j], [0.5 - 0.5j, 0.5 + 0.5j]], dtype=complex)\n\n @staticmethod\n def fixed_qubit_count() -> int:\n return 1\n\n @staticmethod\n @circuit.subroutine(register=True)\n def v(target: QubitSetInput) -> Iterable[Instruction]:\n \"\"\"Registers this function into the circuit class.\n\n Args:\n target (Qubit, int, or iterable of Qubit / int): Target qubit(s)\n\n Returns:\n Iterable[Instruction]: `Iterable` of V instructions.\n\n Examples:\n >>> circ = Circuit().v(0)\n >>> circ = Circuit().v([0, 1, 2])\n \"\"\"\n return [Instruction(V(), target=qubit) for qubit in QubitSet(target)]\n\n\nGate.register_gate(V)\n\n\nclass Vi(Gate):\n \"\"\"Conjugate transpose of square root of not gate.\"\"\"\n\n def __init__(self):\n super().__init__(qubit_count=None, ascii_symbols=[\"Vi\"])\n\n def to_ir(self, target: QubitSet):\n return ir.Vi.construct(target=target[0])\n\n def to_matrix(self) -> np.ndarray:\n return np.array(([[0.5 - 0.5j, 0.5 + 0.5j], [0.5 + 0.5j, 0.5 - 0.5j]]), dtype=complex)\n\n @staticmethod\n def fixed_qubit_count() -> int:\n return 1\n\n @staticmethod\n @circuit.subroutine(register=True)\n def vi(target: QubitSetInput) -> Iterable[Instruction]:\n \"\"\"Registers this function into the circuit class.\n\n Args:\n target (Qubit, int, or iterable of Qubit / int): Target qubit(s)\n\n Returns:\n Iterable[Instruction]: `Iterable` of Vi instructions.\n\n Examples:\n >>> circ = Circuit().vi(0)\n >>> circ = Circuit().vi([0, 1, 2])\n \"\"\"\n return [Instruction(Vi(), target=qubit) for qubit in QubitSet(target)]\n\n\nGate.register_gate(Vi)\n\n\n# Single qubit gates with rotation #\n\n\nclass Rx(AngledGate):\n \"\"\"X-axis rotation gate.\n\n Args:\n angle (float): angle in radians.\n \"\"\"\n\n def __init__(self, angle: float):\n super().__init__(angle=angle, qubit_count=None, ascii_symbols=[\"Rx({:.3g})\".format(angle)])\n\n def to_ir(self, target: QubitSet):\n return ir.Rx.construct(target=target[0], angle=self.angle)\n\n def to_matrix(self) -> np.ndarray:\n cos = np.cos(self.angle / 2)\n sin = np.sin(self.angle / 2)\n return np.array([[cos, -1j * sin], [-1j * sin, cos]], dtype=complex)\n\n @staticmethod\n def fixed_qubit_count() -> int:\n return 1\n\n @staticmethod\n @circuit.subroutine(register=True)\n def rx(target: QubitInput, angle: float) -> Iterable[Instruction]:\n \"\"\"Registers this function into the circuit class.\n\n Args:\n target (Qubit or int): Target qubit index.\n angle (float): Angle in radians.\n\n Returns:\n Iterable[Instruction]: Rx instruction.\n\n Examples:\n >>> circ = Circuit().rx(0, 0.15)\n \"\"\"\n return [Instruction(Rx(angle), target=qubit) for qubit in QubitSet(target)]\n\n\nGate.register_gate(Rx)\n\n\nclass Ry(AngledGate):\n \"\"\"Y-axis rotation gate.\n\n Args:\n angle (float): angle in radians.\n \"\"\"\n\n def __init__(self, angle: float):\n super().__init__(angle=angle, qubit_count=None, ascii_symbols=[\"Ry({:.3g})\".format(angle)])\n\n def to_ir(self, target: QubitSet):\n return ir.Ry.construct(target=target[0], angle=self.angle)\n\n def to_matrix(self) -> np.ndarray:\n cos = np.cos(self.angle / 2)\n sin = np.sin(self.angle / 2)\n return np.array([[cos, -sin], [+sin, cos]], dtype=complex)\n\n @staticmethod\n def fixed_qubit_count() -> int:\n return 1\n\n @staticmethod\n @circuit.subroutine(register=True)\n def ry(target: QubitInput, angle: float) -> Iterable[Instruction]:\n \"\"\"Registers this function into the circuit class.\n\n Args:\n target (Qubit or int): Target qubit index.\n angle (float): Angle in radians.\n\n Returns:\n Iterable[Instruction]: Ry instruction.\n\n Examples:\n >>> circ = Circuit().ry(0, 0.15)\n \"\"\"\n return [Instruction(Ry(angle), target=qubit) for qubit in QubitSet(target)]\n\n\nGate.register_gate(Ry)\n\n\nclass Rz(AngledGate):\n \"\"\"Z-axis rotation gate.\n\n Args:\n angle (float): angle in radians.\n \"\"\"\n\n def __init__(self, angle: float):\n super().__init__(angle=angle, qubit_count=None, ascii_symbols=[\"Rz({:.3g})\".format(angle)])\n\n def to_ir(self, target: QubitSet):\n return ir.Rz.construct(target=target[0], angle=self.angle)\n\n def to_matrix(self) -> np.ndarray:\n return np.array(\n [[np.exp(-1j * self.angle / 2), 0], [0, np.exp(1j * self.angle / 2)]], dtype=complex\n )\n\n @staticmethod\n def fixed_qubit_count() -> int:\n return 1\n\n @staticmethod\n @circuit.subroutine(register=True)\n def rz(target: QubitInput, angle: float) -> Iterable[Instruction]:\n \"\"\"Registers this function into the circuit class.\n\n Args:\n target (Qubit or int): Target qubit index.\n angle (float): Angle in radians.\n\n Returns:\n Iterable[Instruction]: Rz instruction.\n\n Examples:\n >>> circ = Circuit().rz(0, 0.15)\n \"\"\"\n return [Instruction(Rz(angle), target=qubit) for qubit in QubitSet(target)]\n\n\nGate.register_gate(Rz)\n\n\nclass PhaseShift(AngledGate):\n \"\"\"Phase shift gate.\n\n Args:\n angle (float): angle in radians.\n \"\"\"\n\n def __init__(self, angle: float):\n super().__init__(\n angle=angle, qubit_count=None, ascii_symbols=[\"PHASE({:.3g})\".format(angle)]\n )\n\n def to_ir(self, target: QubitSet):\n return ir.PhaseShift.construct(target=target[0], angle=self.angle)\n\n def to_matrix(self) -> np.ndarray:\n return np.array([[1.0, 0.0], [0.0, np.exp(1j * self.angle)]], dtype=complex)\n\n @staticmethod\n def fixed_qubit_count() -> int:\n return 1\n\n @staticmethod\n @circuit.subroutine(register=True)\n def phaseshift(target: QubitInput, angle: float) -> Iterable[Instruction]:\n \"\"\"Registers this function into the circuit class.\n\n Args:\n target (Qubit or int): Target qubit index.\n angle (float): Angle in radians.\n\n Returns:\n Iterable[Instruction]: PhaseShift instruction.\n\n Examples:\n >>> circ = Circuit().phaseshift(0, 0.15)\n \"\"\"\n return [Instruction(PhaseShift(angle), target=qubit) for qubit in QubitSet(target)]\n\n\nGate.register_gate(PhaseShift)\n\n\n# Two qubit gates #\n\n\nclass CNot(Gate):\n \"\"\"Controlled NOT gate.\"\"\"\n\n def __init__(self):\n super().__init__(qubit_count=None, ascii_symbols=[\"C\", \"X\"])\n\n def to_ir(self, target: QubitSet):\n return ir.CNot.construct(control=target[0], target=target[1])\n\n def to_matrix(self) -> np.ndarray:\n return np.array(\n [\n [1.0, 0.0, 0.0, 0.0],\n [0.0, 1.0, 0.0, 0.0],\n [0.0, 0.0, 0.0, 1.0],\n [0.0, 0.0, 1.0, 0.0],\n ],\n dtype=complex,\n )\n\n @staticmethod\n def fixed_qubit_count() -> int:\n return 2\n\n @staticmethod\n @circuit.subroutine(register=True)\n def cnot(control: QubitInput, target: QubitInput) -> Instruction:\n \"\"\"Registers this function into the circuit class.\n\n Args:\n control (Qubit or int): Control qubit index.\n target (Qubit or int): Target qubit index.\n\n Returns:\n Instruction: CNot instruction.\n\n Examples:\n >>> circ = Circuit().cnot(0, 1)\n \"\"\"\n return Instruction(CNot(), target=[control, target])\n\n\nGate.register_gate(CNot)\n\n\nclass Swap(Gate):\n \"\"\"Swap gate.\"\"\"\n\n def __init__(self):\n super().__init__(qubit_count=None, ascii_symbols=[\"SWAP\", \"SWAP\"])\n\n def to_ir(self, target: QubitSet):\n return ir.Swap.construct(targets=[target[0], target[1]])\n\n def to_matrix(self) -> np.ndarray:\n return np.array(\n [\n [1.0, 0.0, 0.0, 0.0],\n [0.0, 0.0, 1.0, 0.0],\n [0.0, 1.0, 0.0, 0.0],\n [0.0, 0.0, 0.0, 1.0],\n ],\n dtype=complex,\n )\n\n @staticmethod\n def fixed_qubit_count() -> int:\n return 2\n\n @staticmethod\n @circuit.subroutine(register=True)\n def swap(target1: QubitInput, target2: QubitInput) -> Instruction:\n \"\"\"Registers this function into the circuit class.\n\n Args:\n target1 (Qubit or int): Target qubit 1 index.\n target2 (Qubit or int): Target qubit 2 index.\n\n Returns:\n Instruction: Swap instruction.\n\n Examples:\n >>> circ = Circuit().swap(0, 1)\n \"\"\"\n return Instruction(Swap(), target=[target1, target2])\n\n\nGate.register_gate(Swap)\n\n\nclass ISwap(Gate):\n \"\"\"ISwap gate.\"\"\"\n\n def __init__(self):\n super().__init__(qubit_count=None, ascii_symbols=[\"ISWAP\", \"ISWAP\"])\n\n def to_ir(self, target: QubitSet):\n return ir.ISwap.construct(targets=[target[0], target[1]])\n\n def to_matrix(self) -> np.ndarray:\n return np.array(\n [\n [1.0, 0.0, 0.0, 0.0],\n [0.0, 0.0, 1.0j, 0.0],\n [0.0, 1.0j, 0.0, 0.0],\n [0.0, 0.0, 0.0, 1.0],\n ],\n dtype=complex,\n )\n\n @staticmethod\n def fixed_qubit_count() -> int:\n return 2\n\n @staticmethod\n @circuit.subroutine(register=True)\n def iswap(target1: QubitInput, target2: QubitInput) -> Instruction:\n \"\"\"Registers this function into the circuit class.\n\n Args:\n target1 (Qubit or int): Target qubit 1 index.\n target2 (Qubit or int): Target qubit 2 index.\n\n Returns:\n Instruction: ISwap instruction.\n\n Examples:\n >>> circ = Circuit().iswap(0, 1)\n \"\"\"\n return Instruction(ISwap(), target=[target1, target2])\n\n\nGate.register_gate(ISwap)\n\n\nclass PSwap(AngledGate):\n \"\"\"PSwap gate.\n\n Args:\n angle (float): angle in radians.\n \"\"\"\n\n def __init__(self, angle: float):\n super().__init__(\n angle=angle,\n qubit_count=None,\n ascii_symbols=[\"PSWAP({:.3g})\".format(angle), \"PSWAP({:.3g})\".format(angle)],\n )\n\n def to_ir(self, target: QubitSet):\n return ir.PSwap.construct(targets=[target[0], target[1]], angle=self.angle)\n\n def to_matrix(self) -> np.ndarray:\n return np.array(\n [\n [1.0, 0.0, 0.0, 0.0],\n [0.0, 0.0, np.exp(1j * self.angle), 0.0],\n [0.0, np.exp(1j * self.angle), 0.0, 0.0],\n [0.0, 0.0, 0.0, 1.0],\n ],\n dtype=complex,\n )\n\n @staticmethod\n def fixed_qubit_count() -> int:\n return 2\n\n @staticmethod\n @circuit.subroutine(register=True)\n def pswap(target1: QubitInput, target2: QubitInput, angle: float) -> Instruction:\n \"\"\"Registers this function into the circuit class.\n\n Args:\n target1 (Qubit or int): Target qubit 1 index.\n target2 (Qubit or int): Target qubit 2 index.\n\n Returns:\n Instruction: PSwap instruction.\n\n Examples:\n >>> circ = Circuit().pswap(0, 1, 0.15)\n \"\"\"\n return Instruction(PSwap(angle), target=[target1, target2])\n\n\nGate.register_gate(PSwap)\n\n\nclass XY(AngledGate):\n \"\"\"XY gate.\n\n Reference: https://arxiv.org/abs/1912.04424v1\n\n Args:\n angle (float): angle in radians.\n \"\"\"\n\n def __init__(self, angle: float):\n super().__init__(\n angle=angle,\n qubit_count=None,\n ascii_symbols=[\"XY({:.3g})\".format(angle), \"XY({:.3g})\".format(angle)],\n )\n\n def to_ir(self, target: QubitSet):\n return ir.XY.construct(targets=[target[0], target[1]], angle=self.angle)\n\n def to_matrix(self) -> np.ndarray:\n cos = np.cos(self.angle / 2)\n sin = np.sin(self.angle / 2)\n return np.array(\n [\n [1.0, 0.0, 0.0, 0.0],\n [0.0, cos, 1.0j * sin, 0.0],\n [0.0, 1.0j * sin, cos, 0.0],\n [0.0, 0.0, 0.0, 1.0],\n ],\n dtype=complex,\n )\n\n @staticmethod\n def fixed_qubit_count() -> int:\n return 2\n\n @staticmethod\n @circuit.subroutine(register=True)\n def xy(target1: QubitInput, target2: QubitInput, angle: float) -> Instruction:\n \"\"\"Registers this function into the circuit class.\n\n Args:\n target1 (Qubit or int): Target qubit 1 index.\n target2 (Qubit or int): Target qubit 2 index.\n\n Returns:\n Instruction: XY instruction.\n\n Examples:\n >>> circ = Circuit().xy(0, 1, 0.15)\n \"\"\"\n return Instruction(XY(angle), target=[target1, target2])\n\n\nGate.register_gate(XY)\n\n\nclass CPhaseShift(AngledGate):\n \"\"\"Controlled phase shift gate.\n\n Args:\n angle (float): angle in radians.\n \"\"\"\n\n def __init__(self, angle: float):\n super().__init__(\n angle=angle, qubit_count=None, ascii_symbols=[\"C\", \"PHASE({:.3g})\".format(angle)]\n )\n\n def to_ir(self, target: QubitSet):\n return ir.CPhaseShift.construct(control=target[0], target=target[1], angle=self.angle)\n\n def to_matrix(self) -> np.ndarray:\n return np.diag([1.0, 1.0, 1.0, np.exp(1j * self.angle)])\n\n @staticmethod\n def fixed_qubit_count() -> int:\n return 2\n\n @staticmethod\n @circuit.subroutine(register=True)\n def cphaseshift(control: QubitInput, target: QubitInput, angle: float) -> Instruction:\n \"\"\"Registers this function into the circuit class.\n\n Args:\n control (Qubit or int): Control qubit index.\n target (Qubit or int): Target qubit index.\n angle (float): Angle in radians.\n\n Returns:\n Instruction: CPhaseShift instruction.\n\n Examples:\n >>> circ = Circuit().cphaseshift(0, 1, 0.15)\n \"\"\"\n return Instruction(CPhaseShift(angle), target=[control, target])\n\n\nGate.register_gate(CPhaseShift)\n\n\nclass CPhaseShift00(AngledGate):\n \"\"\"Controlled phase shift gate for phasing the \\\\|00> state.\n\n Args:\n angle (float): angle in radians.\n \"\"\"\n\n def __init__(self, angle: float):\n super().__init__(\n angle=angle, qubit_count=None, ascii_symbols=[\"C\", \"PHASE00({:.3g})\".format(angle)]\n )\n\n def to_ir(self, target: QubitSet):\n return ir.CPhaseShift00.construct(control=target[0], target=target[1], angle=self.angle)\n\n def to_matrix(self) -> np.ndarray:\n return np.diag([np.exp(1j * self.angle), 1.0, 1.0, 1.0])\n\n @staticmethod\n def fixed_qubit_count() -> int:\n return 2\n\n @staticmethod\n @circuit.subroutine(register=True)\n def cphaseshift00(control: QubitInput, target: QubitInput, angle: float) -> Instruction:\n \"\"\"Registers this function into the circuit class.\n\n Args:\n control (Qubit or int): Control qubit index.\n target (Qubit or int): Target qubit index.\n angle (float): Angle in radians.\n\n Returns:\n Instruction: CPhaseShift00 instruction.\n\n Examples:\n >>> circ = Circuit().cphaseshift00(0, 1, 0.15)\n \"\"\"\n return Instruction(CPhaseShift00(angle), target=[control, target])\n\n\nGate.register_gate(CPhaseShift00)\n\n\nclass CPhaseShift01(AngledGate):\n \"\"\"Controlled phase shift gate for phasing the \\\\|01> state.\n\n Args:\n angle (float): angle in radians.\n \"\"\"\n\n def __init__(self, angle: float):\n super().__init__(\n angle=angle, qubit_count=None, ascii_symbols=[\"C\", \"PHASE01({:.3g})\".format(angle)]\n )\n\n def to_ir(self, target: QubitSet):\n return ir.CPhaseShift01.construct(control=target[0], target=target[1], angle=self.angle)\n\n def to_matrix(self) -> np.ndarray:\n return np.diag([1.0, np.exp(1j * self.angle), 1.0, 1.0])\n\n @staticmethod\n def fixed_qubit_count() -> int:\n return 2\n\n @staticmethod\n @circuit.subroutine(register=True)\n def cphaseshift01(control: QubitInput, target: QubitInput, angle: float) -> Instruction:\n \"\"\"Registers this function into the circuit class.\n\n Args:\n control (Qubit or int): Control qubit index.\n target (Qubit or int): Target qubit index.\n angle (float): Angle in radians.\n\n Returns:\n Instruction: CPhaseShift01 instruction.\n\n Examples:\n >>> circ = Circuit().cphaseshift01(0, 1, 0.15)\n \"\"\"\n return Instruction(CPhaseShift01(angle), target=[control, target])\n\n\nGate.register_gate(CPhaseShift01)\n\n\nclass CPhaseShift10(AngledGate):\n \"\"\"Controlled phase shift gate for phasing the \\\\|10> state.\n\n Args:\n angle (float): angle in radians.\n \"\"\"\n\n def __init__(self, angle: float):\n super().__init__(\n angle=angle, qubit_count=None, ascii_symbols=[\"C\", \"PHASE10({:.3g})\".format(angle)]\n )\n\n def to_ir(self, target: QubitSet):\n return ir.CPhaseShift10.construct(control=target[0], target=target[1], angle=self.angle)\n\n def to_matrix(self) -> np.ndarray:\n return np.diag([1.0, 1.0, np.exp(1j * self.angle), 1.0])\n\n @staticmethod\n def fixed_qubit_count() -> int:\n return 2\n\n @staticmethod\n @circuit.subroutine(register=True)\n def cphaseshift10(control: QubitInput, target: QubitInput, angle: float) -> Instruction:\n \"\"\"Registers this function into the circuit class.\n\n Args:\n control (Qubit or int): Control qubit index.\n target (Qubit or int): Target qubit index.\n angle (float): Angle in radians.\n\n Returns:\n Instruction: CPhaseShift10 instruction.\n\n Examples:\n >>> circ = Circuit().cphaseshift10(0, 1, 0.15)\n \"\"\"\n return Instruction(CPhaseShift10(angle), target=[control, target])\n\n\nGate.register_gate(CPhaseShift10)\n\n\nclass CV(Gate):\n \"\"\"Controlled Sqrt of NOT gate.\"\"\"\n\n def __init__(self):\n super().__init__(qubit_count=None, ascii_symbols=[\"C\", \"V\"])\n\n def to_ir(self, target: QubitSet):\n return ir.CV.construct(control=target[0], target=target[1])\n\n def to_matrix(self) -> np.ndarray:\n return np.array(\n [\n [1.0, 0.0, 0.0, 0.0],\n [0.0, 1.0, 0.0, 0.0],\n [0.0, 0.0, 0.5 + 0.5j, 0.5 - 0.5j], # if the control bit, then apply the V gate\n [0.0, 0.0, 0.5 - 0.5j, 0.5 + 0.5j], # which is the sqrt(NOT) gate.\n ],\n dtype=complex,\n )\n\n @staticmethod\n def fixed_qubit_count() -> int:\n return 2\n\n @staticmethod\n @circuit.subroutine(register=True)\n def cv(control: QubitInput, target: QubitInput) -> Instruction:\n \"\"\"Registers this function into the circuit class.\n\n Args:\n control (Qubit or int): Control qubit index.\n target (Qubit or int): Target qubit index.\n\n Returns:\n Instruction: CV instruction.\n\n Examples:\n >>> circ = Circuit().cv(0, 1)\n \"\"\"\n return Instruction(CV(), target=[control, target])\n\n\nGate.register_gate(CV)\n\n\nclass CY(Gate):\n \"\"\"Controlled Pauli-Y gate.\"\"\"\n\n def __init__(self):\n super().__init__(qubit_count=None, ascii_symbols=[\"C\", \"Y\"])\n\n def to_ir(self, target: QubitSet):\n return ir.CY.construct(control=target[0], target=target[1])\n\n def to_matrix(self) -> np.ndarray:\n return np.array(\n [\n [1.0, 0.0, 0.0, 0.0],\n [0.0, 1.0, 0.0, 0.0],\n [0.0, 0.0, 0.0, -1.0j],\n [0.0, 0.0, +1.0j, 0.0],\n ],\n dtype=complex,\n )\n\n @staticmethod\n def fixed_qubit_count() -> int:\n return 2\n\n @staticmethod\n @circuit.subroutine(register=True)\n def cy(control: QubitInput, target: QubitInput) -> Instruction:\n \"\"\"Registers this function into the circuit class.\n\n Args:\n control (Qubit or int): Control qubit index.\n target (Qubit or int): Target qubit index.\n\n Returns:\n Instruction: CY instruction.\n\n Examples:\n >>> circ = Circuit().cy(0, 1)\n \"\"\"\n return Instruction(CY(), target=[control, target])\n\n\nGate.register_gate(CY)\n\n\nclass CZ(Gate):\n \"\"\"Controlled Pauli-Z gate.\"\"\"\n\n def __init__(self):\n super().__init__(qubit_count=None, ascii_symbols=[\"C\", \"Z\"])\n\n def to_ir(self, target: QubitSet):\n return ir.CZ.construct(control=target[0], target=target[1])\n\n def to_matrix(self) -> np.ndarray:\n return np.diag([complex(1.0), 1.0, 1.0, -1.0])\n\n @staticmethod\n def fixed_qubit_count() -> int:\n return 2\n\n @staticmethod\n @circuit.subroutine(register=True)\n def cz(control: QubitInput, target: QubitInput) -> Instruction:\n \"\"\"Registers this function into the circuit class.\n\n Args:\n control (Qubit or int): Control qubit index.\n target (Qubit or int): Target qubit index.\n\n Returns:\n Instruction: CZ instruction.\n\n Examples:\n >>> circ = Circuit().cz(0, 1)\n \"\"\"\n return Instruction(CZ(), target=[control, target])\n\n\nGate.register_gate(CZ)\n\n\nclass XX(AngledGate):\n \"\"\"Ising XX coupling gate.\n\n Reference: https://arxiv.org/abs/1707.06356\n\n Args:\n angle (float): angle in radians.\n \"\"\"\n\n def __init__(self, angle: float):\n super().__init__(\n angle=angle,\n qubit_count=None,\n ascii_symbols=[\"XX({:.3g})\".format(angle), \"XX({:.3g})\".format(angle)],\n )\n\n def to_ir(self, target: QubitSet):\n return ir.XX.construct(targets=[target[0], target[1]], angle=self.angle)\n\n def to_matrix(self) -> np.ndarray:\n cos = np.cos(self.angle / 2)\n isin = 1.0j * np.sin(self.angle / 2)\n return np.array(\n [\n [cos, 0.0, 0.0, -isin],\n [0.0, cos, -isin, 0.0],\n [0.0, -isin, cos, 0.0],\n [-isin, 0.0, 0.0, cos],\n ],\n dtype=complex,\n )\n\n @staticmethod\n def fixed_qubit_count() -> int:\n return 2\n\n @staticmethod\n @circuit.subroutine(register=True)\n def xx(target1: QubitInput, target2: QubitInput, angle: float) -> Instruction:\n \"\"\"Registers this function into the circuit class.\n\n Args:\n target1 (Qubit or int): Target qubit 1 index.\n target2 (Qubit or int): Target qubit 2 index.\n angle (float): Angle in radians.\n\n Returns:\n Instruction: XX instruction.\n\n Examples:\n >>> circ = Circuit().xx(0, 1, 0.15)\n \"\"\"\n return Instruction(XX(angle), target=[target1, target2])\n\n\nGate.register_gate(XX)\n\n\nclass YY(AngledGate):\n \"\"\"Ising YY coupling gate.\n\n Reference: https://arxiv.org/abs/1707.06356\n\n Args:\n angle (float): angle in radians.\n \"\"\"\n\n def __init__(self, angle: float):\n super().__init__(\n angle=angle,\n qubit_count=None,\n ascii_symbols=[\"YY({:.3g})\".format(angle), \"YY({:.3g})\".format(angle)],\n )\n\n def to_ir(self, target: QubitSet):\n return ir.YY.construct(targets=[target[0], target[1]], angle=self.angle)\n\n def to_matrix(self) -> np.ndarray:\n cos = np.cos(self.angle / 2)\n isin = 1.0j * np.sin(self.angle / 2)\n return np.array(\n [\n [cos, 0.0, 0.0, isin],\n [0.0, cos, -isin, 0.0],\n [0.0, -isin, cos, 0.0],\n [isin, 0.0, 0.0, cos],\n ],\n dtype=complex,\n )\n\n @staticmethod\n def fixed_qubit_count() -> int:\n return 2\n\n @staticmethod\n @circuit.subroutine(register=True)\n def yy(target1: QubitInput, target2: QubitInput, angle: float) -> Instruction:\n \"\"\"Registers this function into the circuit class.\n\n Args:\n target1 (Qubit or int): Target qubit 1 index.\n target2 (Qubit or int): Target qubit 2 index.\n angle (float): Angle in radians.\n\n Returns:\n Instruction: YY instruction.\n\n Examples:\n >>> circ = Circuit().yy(0, 1, 0.15)\n \"\"\"\n return Instruction(YY(angle), target=[target1, target2])\n\n\nGate.register_gate(YY)\n\n\nclass ZZ(AngledGate):\n \"\"\"Ising ZZ coupling gate.\n\n Reference: https://arxiv.org/abs/1707.06356\n\n Args:\n angle (float): angle in radians.\n \"\"\"\n\n def __init__(self, angle: float):\n super().__init__(\n angle=angle,\n qubit_count=None,\n ascii_symbols=[\"ZZ({:.3g})\".format(angle), \"ZZ({:.3g})\".format(angle)],\n )\n\n def to_ir(self, target: QubitSet):\n return ir.ZZ.construct(targets=[target[0], target[1]], angle=self.angle)\n\n def to_matrix(self) -> np.ndarray:\n return np.array(\n [\n [np.exp(-1j * (self.angle / 2)), 0.0, 0.0, 0.0],\n [0.0, np.exp(1j * (self.angle / 2)), 0.0, 0.0],\n [0.0, 0.0, np.exp(1j * (self.angle / 2)), 0.0],\n [0.0, 0.0, 0.0, np.exp(-1j * (self.angle / 2))],\n ],\n dtype=complex,\n )\n\n @staticmethod\n def fixed_qubit_count() -> int:\n return 2\n\n @staticmethod\n @circuit.subroutine(register=True)\n def zz(target1: QubitInput, target2: QubitInput, angle: float) -> Instruction:\n \"\"\"Registers this function into the circuit class.\n\n Args:\n target1 (Qubit or int): Target qubit 1 index.\n target2 (Qubit or int): Target qubit 2 index.\n angle (float): Angle in radians.\n\n Returns:\n Instruction: ZZ instruction.\n\n Examples:\n >>> circ = Circuit().zz(0, 1, 0.15)\n \"\"\"\n return Instruction(ZZ(angle), target=[target1, target2])\n\n\nGate.register_gate(ZZ)\n\n\n# Three qubit gates #\n\n\nclass CCNot(Gate):\n \"\"\"CCNOT gate or Toffoli gate.\"\"\"\n\n def __init__(self):\n super().__init__(qubit_count=None, ascii_symbols=[\"C\", \"C\", \"X\"])\n\n def to_ir(self, target: QubitSet):\n return ir.CCNot.construct(controls=[target[0], target[1]], target=target[2])\n\n def to_matrix(self) -> np.ndarray:\n return np.array(\n [\n [1, 0, 0, 0, 0, 0, 0, 0],\n [0, 1, 0, 0, 0, 0, 0, 0],\n [0, 0, 1, 0, 0, 0, 0, 0],\n [0, 0, 0, 1, 0, 0, 0, 0],\n [0, 0, 0, 0, 1, 0, 0, 0],\n [0, 0, 0, 0, 0, 1, 0, 0],\n [0, 0, 0, 0, 0, 0, 0, 1],\n [0, 0, 0, 0, 0, 0, 1, 0],\n ],\n dtype=complex,\n )\n\n @staticmethod\n def fixed_qubit_count() -> int:\n return 3\n\n @staticmethod\n @circuit.subroutine(register=True)\n def ccnot(control1: QubitInput, control2: QubitInput, target: QubitInput) -> Instruction:\n \"\"\"Registers this function into the circuit class.\n\n Args:\n control1 (Qubit or int): Control qubit 1 index.\n control2 (Qubit or int): Control qubit 2 index.\n target (Qubit or int): Target qubit index.\n\n Returns:\n Instruction: CCNot instruction.\n\n Examples:\n >>> circ = Circuit().ccnot(0, 1, 2)\n \"\"\"\n return Instruction(CCNot(), target=[control1, control2, target])\n\n\nGate.register_gate(CCNot)\n\n\nclass CSwap(Gate):\n \"\"\"Controlled Swap gate.\"\"\"\n\n def __init__(self):\n super().__init__(qubit_count=None, ascii_symbols=[\"C\", \"SWAP\", \"SWAP\"])\n\n def to_ir(self, target: QubitSet):\n return ir.CSwap.construct(control=target[0], targets=[target[1], target[2]])\n\n def to_matrix(self) -> np.ndarray:\n return np.array(\n [\n [1, 0, 0, 0, 0, 0, 0, 0],\n [0, 1, 0, 0, 0, 0, 0, 0],\n [0, 0, 1, 0, 0, 0, 0, 0],\n [0, 0, 0, 1, 0, 0, 0, 0],\n [0, 0, 0, 0, 1, 0, 0, 0],\n [0, 0, 0, 0, 0, 0, 1, 0],\n [0, 0, 0, 0, 0, 1, 0, 0],\n [0, 0, 0, 0, 0, 0, 0, 1],\n ],\n dtype=complex,\n )\n\n @staticmethod\n def fixed_qubit_count() -> int:\n return 3\n\n @staticmethod\n @circuit.subroutine(register=True)\n def cswap(control: QubitInput, target1: QubitInput, target2: QubitInput) -> Instruction:\n \"\"\"Registers this function into the circuit class.\n\n Args:\n control (Qubit or int): Control qubit index\n target1 (Qubit or int): Target qubit 1 index.\n target2 (Qubit or int): Target qubit 2 index.\n\n Returns:\n Instruction: CSwap instruction.\n\n Examples:\n >>> circ = Circuit().cswap(0, 1, 2)\n \"\"\"\n return Instruction(CSwap(), target=[control, target1, target2])\n\n\nGate.register_gate(CSwap)\n\n\nclass Unitary(Gate):\n \"\"\"Arbitrary unitary gate\n\n Args:\n matrix (numpy.ndarray): Unitary matrix which defines the gate.\n display_name (str): Name to be used for an instance of this unitary gate\n for circuit diagrams. Defaults to `U`.\n\n Raises:\n ValueError: If `matrix` is not a two-dimensional square matrix,\n or has a dimension length that is not a positive power of 2,\n or is not unitary.\n \"\"\"\n\n def __init__(self, matrix: np.ndarray, display_name: str = \"U\"):\n verify_quantum_operator_matrix_dimensions(matrix)\n self._matrix = np.array(matrix, dtype=complex)\n qubit_count = int(np.log2(self._matrix.shape[0]))\n\n if not is_unitary(self._matrix):\n raise ValueError(f\"{self._matrix} is not unitary\")\n\n super().__init__(qubit_count=qubit_count, ascii_symbols=[display_name] * qubit_count)\n\n def to_matrix(self):\n return np.array(self._matrix)\n\n def to_ir(self, target: QubitSet):\n return ir.Unitary.construct(\n targets=[qubit for qubit in target],\n matrix=Unitary._transform_matrix_to_ir(self._matrix),\n )\n\n def __eq__(self, other):\n if isinstance(other, Unitary):\n return self.matrix_equivalence(other)\n return NotImplemented\n\n @staticmethod\n def _transform_matrix_to_ir(matrix: np.ndarray):\n return [[[element.real, element.imag] for element in row] for row in matrix.tolist()]\n\n @staticmethod\n @circuit.subroutine(register=True)\n def unitary(targets: QubitSet, matrix: np.ndarray, display_name: str = \"U\") -> Instruction:\n \"\"\"Registers this function into the circuit class.\n\n Args:\n targets (QubitSet): Target qubits.\n matrix (numpy.ndarray): Unitary matrix which defines the gate. Matrix should be\n compatible with the supplied targets, with `2 ** len(targets) == matrix.shape[0]`.\n display_name (str): Name to be used for an instance of this unitary gate\n for circuit diagrams. Defaults to `U`.\n\n Returns:\n Instruction: Unitary instruction.\n\n Raises:\n ValueError: If `matrix` is not a two-dimensional square matrix,\n or has a dimension length that is not compatible with the `targets`,\n or is not unitary,\n\n Examples:\n >>> circ = Circuit().unitary(matrix=np.array([[0, 1],[1, 0]]), targets=[0])\n \"\"\"\n if 2 ** len(targets) != matrix.shape[0]:\n raise ValueError(\"Dimensions of the supplied unitary are incompatible with the targets\")\n\n return Instruction(Unitary(matrix, display_name), target=targets)\n\n\nGate.register_gate(Unitary)\n"
] |
[
[
"numpy.log2",
"numpy.sqrt",
"numpy.eye",
"numpy.cos",
"numpy.sin",
"numpy.exp",
"numpy.array"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
iwoithe/Gimel-Studio
|
[
"e2750576e72edee6f2f4c268045b81459df82d89"
] |
[
"src/GimelStudio/corenodes/convert/to_normal_map_node.py"
] |
[
"# THIS FILE IS A PART OF GIMEL STUDIO AND IS LICENSED UNDER THE SAME TERMS:\n# ----------------------------------------------------------------------------\n# Gimel Studio Copyright 2019-2021 by Noah Rahm and contributors\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ----------------------------------------------------------------------------\n\nimport numpy as np\nimport scipy.ndimage\nimport scipy.misc\nfrom PIL import ImageFilter\n\nfrom GimelStudio import api\n\n# FIXME: hack!\nfrom GimelStudio.utils.image import ArrayFromImage, ArrayToImage\n\n\nclass ToNormalMapNode(api.NodeBase):\n def __init__(self, _id):\n api.NodeBase.__init__(self, _id)\n\n def SmoothGaussian(self, im, sigma):\n \"\"\" Blurs the normals. \"\"\"\n if sigma == 0:\n return im\n\n im_smooth = im.astype(float)\n kernel_x = np.arange(-3 * sigma, 3 * sigma + 1).astype(float)\n kernel_x = np.exp((-(kernel_x**2)) / (2 * (sigma**2)))\n\n im_smooth = scipy.ndimage.convolve(im_smooth, kernel_x[np.newaxis])\n\n im_smooth = scipy.ndimage.convolve(im_smooth, kernel_x[np.newaxis].T)\n\n return im_smooth\n\n def Gradient(self, im_smooth):\n \"\"\" Calculates the gradient for the normal map. \"\"\"\n gradient_x = im_smooth.astype(float)\n gradient_y = im_smooth.astype(float)\n\n kernel = np.arange(-1, 2).astype(float)\n kernel = - kernel / 2\n\n gradient_x = scipy.ndimage.convolve(gradient_x, kernel[np.newaxis])\n gradient_y = scipy.ndimage.convolve(gradient_y, kernel[np.newaxis].T)\n\n return gradient_x, gradient_y\n\n def Sobel(self, im_smooth):\n \"\"\" Calculates another type of gradient for the normal map. \"\"\"\n gradient_x = im_smooth.astype(float)\n gradient_y = im_smooth.astype(float)\n\n kernel = np.array([[-1, 0, 1], [-2, 0, 2], [-1, 0, 1]])\n\n gradient_x = scipy.ndimage.convolve(gradient_x, kernel)\n gradient_y = scipy.ndimage.convolve(gradient_y, kernel.T)\n\n return gradient_x, gradient_y\n\n def ComputeNormalMap(self, gradient_x, gradient_y, intensity=1):\n \"\"\" Calculates the normals of an image and returns a normal map. \"\"\"\n width = gradient_x.shape[1]\n height = gradient_x.shape[0]\n max_x = np.max(gradient_x)\n max_y = np.max(gradient_y)\n\n max_value = max_x\n\n if max_y > max_x:\n max_value = max_y\n\n normal_map = np.zeros((height, width, 3), dtype=np.float32)\n\n intensity = 1 / intensity\n\n strength = max_value / (max_value * intensity)\n\n normal_map[..., 0] = gradient_x / max_value\n normal_map[..., 1] = gradient_y / max_value\n normal_map[..., 2] = 1 / strength\n\n norm = np.sqrt(np.power(normal_map[..., 0], 2) +\n np.power(normal_map[..., 1], 2) + np.power(normal_map[..., 2], 2))\n\n normal_map[..., 0] /= norm\n normal_map[..., 1] /= norm\n normal_map[..., 2] /= norm\n\n normal_map *= 0.5\n normal_map += 0.5\n\n return normal_map\n\n @property\n def NodeMeta(self):\n meta_info = {\n \"label\": \"To Normal Map\",\n \"author\": \"Correct Syntax\",\n \"version\": (2, 2, 0),\n \"supported_app_version\": (0, 5, 0),\n \"category\": \"CONVERT\",\n \"description\": \"Converts the image into a normal map texture for use in 3D.\",\n }\n return meta_info\n\n def NodeInitProps(self):\n p1 = api.PositiveIntegerProp(\n idname=\"Sigma\",\n default=1,\n min_val=1,\n max_val=25,\n widget=api.SLIDER_WIDGET,\n label=\"Sigma:\",\n )\n p2 = api.PositiveIntegerProp(\n idname=\"Intensity\",\n default=1,\n min_val=1,\n max_val=25,\n widget=api.SLIDER_WIDGET,\n label=\"Intensity:\",\n )\n\n self.NodeAddProp(p1)\n self.NodeAddProp(p2)\n\n def NodeInitParams(self):\n p = api.RenderImageParam('Image')\n\n self.NodeAddParam(p)\n\n def NodeEvaluation(self, eval_info):\n image1 = eval_info.EvaluateParameter('Image')\n sigma_val = eval_info.EvaluateProperty('Sigma')\n intensity_val = eval_info.EvaluateProperty('Intensity')\n\n # Convert the current image data to an array that scipy can use\n im = ArrayFromImage(image1.GetImage())\n\n # Create the image\n if im.ndim == 3:\n im_grey = np.zeros((im.shape[0], im.shape[1])).astype(float)\n im_grey = (im[..., 0] * 0.3 + im[..., 1] * 0.6 + im[..., 2] * 0.1)\n im = im_grey\n\n im_smooth = self.SmoothGaussian(im, sigma_val)\n sobel_x, sobel_y = self.Sobel(im_smooth)\n\n # Calculate the normal map\n generated_normal_map = self.ComputeNormalMap(\n sobel_x,\n sobel_y,\n intensity_val\n )\n\n image = api.RenderImage()\n image.SetAsImage(\n ArrayToImage(generated_normal_map).convert('RGBA')\n )\n self.NodeSetThumb(image.GetImage())\n return image\n\n\napi.RegisterNode(ToNormalMapNode, \"corenode_tonormalmap\")\n"
] |
[
[
"numpy.power",
"numpy.arange",
"numpy.max",
"numpy.exp",
"numpy.array",
"numpy.zeros"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
zacharyCormack/Assortment
|
[
"52a60f1ec207dfb9a9f154204c0b24cb4b129043"
] |
[
"plots/anim.py"
] |
[
"#!/usr/bin/python3\n\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport matplotlib.animation as animation\n\nfig = plt.figure(figsize=(8, 8), facecolor=\"lightgray\")\n\nax = plt.subplot(111, frameon=False)\n\ndata = np.random.normal(.5, .5, (128, 150))\nX = np.linspace(-1.2, 1.2, data.shape[-1])\nG = 1.5 * np.exp(-4 * X ** 2)\n\n# Generate line plots\nget_y = lambda pdist, value, pos: 400*(1 - pdist) + (1/(1+1/pdist)-pdist)*(\n\t10*value*(np.sin(pos*17)+3)*(np.sin(pos*13)+3) - pos**2*6) - 225\n\nlines = []\nfor i in range(len(data)):\n\t# one point perspective\n\t# add 5 to i to make start not immediate\n\tpdist = 1 / (i+3)\n\txscale = 2 * pdist\n\ty = get_y(pdist, G*data[i], X)\n\tline, = ax.plot(xscale*X, y, color=\"dimgray\", lw=xscale*12)\n\tlines.append(line)\n\nax.set_ylim(0, 200)\n\nax.set_xticks([])\nax.set_yticks([])\n\n\ndef update(*args):\n\tdata[:, 1:] = data[:, :-1]\n\tdata[:, 0] = np.random.normal(.5, .5, len(data))\n\n\tfor i in range(len(data)):\n\t\tlines[i].set_ydata(get_y(1 / (i+3), G*data[i], X))\n\n\t# Return modified artists\n\treturn lines\n\n# Construct the animation, using the update function as the animation director.\nanim = animation.FuncAnimation(fig, update, interval=7)\nplt.show()\n"
] |
[
[
"numpy.linspace",
"numpy.sin",
"numpy.random.normal",
"matplotlib.pyplot.subplot",
"matplotlib.animation.FuncAnimation",
"numpy.exp",
"matplotlib.pyplot.show",
"matplotlib.pyplot.figure"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
andrewkwolek/numpy
|
[
"cbccbe9dee293ff2bf0167e37443ce4975781562"
] |
[
"numpy/f2py/tests/test_array_from_pyobj.py"
] |
[
"import os\nimport sys\nimport copy\nimport platform\nimport pytest\n\nimport numpy as np\n\nfrom numpy.core.multiarray import typeinfo\nfrom . import util\n\nwrap = None\n\n\ndef setup_module():\n \"\"\"\n Build the required testing extension module\n\n \"\"\"\n global wrap\n\n # Check compiler availability first\n if not util.has_c_compiler():\n pytest.skip(\"No C compiler available\")\n\n if wrap is None:\n config_code = \"\"\"\n config.add_extension('test_array_from_pyobj_ext',\n sources=['wrapmodule.c', 'fortranobject.c'],\n define_macros=[])\n \"\"\"\n d = os.path.dirname(__file__)\n src = [\n util.getpath(\"tests\", \"src\", \"array_from_pyobj\", \"wrapmodule.c\"),\n util.getpath(\"src\", \"fortranobject.c\"),\n util.getpath(\"src\", \"fortranobject.h\"),\n ]\n wrap = util.build_module_distutils(src, config_code,\n \"test_array_from_pyobj_ext\")\n\n\ndef flags_info(arr):\n flags = wrap.array_attrs(arr)[6]\n return flags2names(flags)\n\n\ndef flags2names(flags):\n info = []\n for flagname in [\n \"CONTIGUOUS\",\n \"FORTRAN\",\n \"OWNDATA\",\n \"ENSURECOPY\",\n \"ENSUREARRAY\",\n \"ALIGNED\",\n \"NOTSWAPPED\",\n \"WRITEABLE\",\n \"WRITEBACKIFCOPY\",\n \"UPDATEIFCOPY\",\n \"BEHAVED\",\n \"BEHAVED_RO\",\n \"CARRAY\",\n \"FARRAY\",\n ]:\n if abs(flags) & getattr(wrap, flagname, 0):\n info.append(flagname)\n return info\n\n\nclass Intent:\n def __init__(self, intent_list=[]):\n self.intent_list = intent_list[:]\n flags = 0\n for i in intent_list:\n if i == \"optional\":\n flags |= wrap.F2PY_OPTIONAL\n else:\n flags |= getattr(wrap, \"F2PY_INTENT_\" + i.upper())\n self.flags = flags\n\n def __getattr__(self, name):\n name = name.lower()\n if name == \"in_\":\n name = \"in\"\n return self.__class__(self.intent_list + [name])\n\n def __str__(self):\n return \"intent(%s)\" % (\",\".join(self.intent_list))\n\n def __repr__(self):\n return \"Intent(%r)\" % (self.intent_list)\n\n def is_intent(self, *names):\n for name in names:\n if name not in self.intent_list:\n return False\n return True\n\n def is_intent_exact(self, *names):\n return len(self.intent_list) == len(names) and self.is_intent(*names)\n\n\nintent = Intent()\n\n_type_names = [\n \"BOOL\",\n \"BYTE\",\n \"UBYTE\",\n \"SHORT\",\n \"USHORT\",\n \"INT\",\n \"UINT\",\n \"LONG\",\n \"ULONG\",\n \"LONGLONG\",\n \"ULONGLONG\",\n \"FLOAT\",\n \"DOUBLE\",\n \"CFLOAT\",\n]\n\n_cast_dict = {\"BOOL\": [\"BOOL\"]}\n_cast_dict[\"BYTE\"] = _cast_dict[\"BOOL\"] + [\"BYTE\"]\n_cast_dict[\"UBYTE\"] = _cast_dict[\"BOOL\"] + [\"UBYTE\"]\n_cast_dict[\"BYTE\"] = [\"BYTE\"]\n_cast_dict[\"UBYTE\"] = [\"UBYTE\"]\n_cast_dict[\"SHORT\"] = _cast_dict[\"BYTE\"] + [\"UBYTE\", \"SHORT\"]\n_cast_dict[\"USHORT\"] = _cast_dict[\"UBYTE\"] + [\"BYTE\", \"USHORT\"]\n_cast_dict[\"INT\"] = _cast_dict[\"SHORT\"] + [\"USHORT\", \"INT\"]\n_cast_dict[\"UINT\"] = _cast_dict[\"USHORT\"] + [\"SHORT\", \"UINT\"]\n\n_cast_dict[\"LONG\"] = _cast_dict[\"INT\"] + [\"LONG\"]\n_cast_dict[\"ULONG\"] = _cast_dict[\"UINT\"] + [\"ULONG\"]\n\n_cast_dict[\"LONGLONG\"] = _cast_dict[\"LONG\"] + [\"LONGLONG\"]\n_cast_dict[\"ULONGLONG\"] = _cast_dict[\"ULONG\"] + [\"ULONGLONG\"]\n\n_cast_dict[\"FLOAT\"] = _cast_dict[\"SHORT\"] + [\"USHORT\", \"FLOAT\"]\n_cast_dict[\"DOUBLE\"] = _cast_dict[\"INT\"] + [\"UINT\", \"FLOAT\", \"DOUBLE\"]\n\n_cast_dict[\"CFLOAT\"] = _cast_dict[\"FLOAT\"] + [\"CFLOAT\"]\n\n# 32 bit system malloc typically does not provide the alignment required by\n# 16 byte long double types this means the inout intent cannot be satisfied\n# and several tests fail as the alignment flag can be randomly true or fals\n# when numpy gains an aligned allocator the tests could be enabled again\n#\n# Furthermore, on macOS ARM64, LONGDOUBLE is an alias for DOUBLE.\nif ((np.intp().dtype.itemsize != 4 or np.clongdouble().dtype.alignment <= 8)\n and sys.platform != \"win32\"\n and (platform.system(), platform.processor()) != (\"Darwin\", \"arm\")):\n _type_names.extend([\"LONGDOUBLE\", \"CDOUBLE\", \"CLONGDOUBLE\"])\n _cast_dict[\"LONGDOUBLE\"] = _cast_dict[\"LONG\"] + [\n \"ULONG\",\n \"FLOAT\",\n \"DOUBLE\",\n \"LONGDOUBLE\",\n ]\n _cast_dict[\"CLONGDOUBLE\"] = _cast_dict[\"LONGDOUBLE\"] + [\n \"CFLOAT\",\n \"CDOUBLE\",\n \"CLONGDOUBLE\",\n ]\n _cast_dict[\"CDOUBLE\"] = _cast_dict[\"DOUBLE\"] + [\"CFLOAT\", \"CDOUBLE\"]\n\n\nclass Type:\n _type_cache = {}\n\n def __new__(cls, name):\n if isinstance(name, np.dtype):\n dtype0 = name\n name = None\n for n, i in typeinfo.items():\n if not isinstance(i, type) and dtype0.type is i.type:\n name = n\n break\n obj = cls._type_cache.get(name.upper(), None)\n if obj is not None:\n return obj\n obj = object.__new__(cls)\n obj._init(name)\n cls._type_cache[name.upper()] = obj\n return obj\n\n def _init(self, name):\n self.NAME = name.upper()\n info = typeinfo[self.NAME]\n self.type_num = getattr(wrap, \"NPY_\" + self.NAME)\n assert self.type_num == info.num\n self.dtype = np.dtype(info.type)\n self.type = info.type\n self.elsize = info.bits / 8\n self.dtypechar = info.char\n\n def cast_types(self):\n return [self.__class__(_m) for _m in _cast_dict[self.NAME]]\n\n def all_types(self):\n return [self.__class__(_m) for _m in _type_names]\n\n def smaller_types(self):\n bits = typeinfo[self.NAME].alignment\n types = []\n for name in _type_names:\n if typeinfo[name].alignment < bits:\n types.append(Type(name))\n return types\n\n def equal_types(self):\n bits = typeinfo[self.NAME].alignment\n types = []\n for name in _type_names:\n if name == self.NAME:\n continue\n if typeinfo[name].alignment == bits:\n types.append(Type(name))\n return types\n\n def larger_types(self):\n bits = typeinfo[self.NAME].alignment\n types = []\n for name in _type_names:\n if typeinfo[name].alignment > bits:\n types.append(Type(name))\n return types\n\n\nclass Array:\n def __init__(self, typ, dims, intent, obj):\n self.type = typ\n self.dims = dims\n self.intent = intent\n self.obj_copy = copy.deepcopy(obj)\n self.obj = obj\n\n # arr.dtypechar may be different from typ.dtypechar\n self.arr = wrap.call(typ.type_num, dims, intent.flags, obj)\n\n assert isinstance(self.arr, np.ndarray)\n\n self.arr_attr = wrap.array_attrs(self.arr)\n\n if len(dims) > 1:\n if self.intent.is_intent(\"c\"):\n assert (intent.flags & wrap.F2PY_INTENT_C)\n assert not self.arr.flags[\"FORTRAN\"]\n assert self.arr.flags[\"CONTIGUOUS\"]\n assert (not self.arr_attr[6] & wrap.FORTRAN)\n else:\n assert (not intent.flags & wrap.F2PY_INTENT_C)\n assert self.arr.flags[\"FORTRAN\"]\n assert not self.arr.flags[\"CONTIGUOUS\"]\n assert (self.arr_attr[6] & wrap.FORTRAN)\n\n if obj is None:\n self.pyarr = None\n self.pyarr_attr = None\n return\n\n if intent.is_intent(\"cache\"):\n assert isinstance(obj, np.ndarray), repr(type(obj))\n self.pyarr = np.array(obj).reshape(*dims).copy()\n else:\n self.pyarr = np.array(\n np.array(obj, dtype=typ.dtypechar).reshape(*dims),\n order=self.intent.is_intent(\"c\") and \"C\" or \"F\",\n )\n assert self.pyarr.dtype == typ\n self.pyarr.setflags(write=self.arr.flags[\"WRITEABLE\"])\n assert self.pyarr.flags[\"OWNDATA\"], (obj, intent)\n self.pyarr_attr = wrap.array_attrs(self.pyarr)\n\n if len(dims) > 1:\n if self.intent.is_intent(\"c\"):\n assert not self.pyarr.flags[\"FORTRAN\"]\n assert self.pyarr.flags[\"CONTIGUOUS\"]\n assert (not self.pyarr_attr[6] & wrap.FORTRAN)\n else:\n assert self.pyarr.flags[\"FORTRAN\"]\n assert not self.pyarr.flags[\"CONTIGUOUS\"]\n assert (self.pyarr_attr[6] & wrap.FORTRAN)\n\n assert self.arr_attr[1] == self.pyarr_attr[1] # nd\n assert self.arr_attr[2] == self.pyarr_attr[2] # dimensions\n if self.arr_attr[1] <= 1:\n assert self.arr_attr[3] == self.pyarr_attr[3], repr((\n self.arr_attr[3],\n self.pyarr_attr[3],\n self.arr.tobytes(),\n self.pyarr.tobytes(),\n )) # strides\n assert self.arr_attr[5][-2:] == self.pyarr_attr[5][-2:] # descr\n assert self.arr_attr[6] == self.pyarr_attr[6], repr((\n self.arr_attr[6],\n self.pyarr_attr[6],\n flags2names(0 * self.arr_attr[6] - self.pyarr_attr[6]),\n flags2names(self.arr_attr[6]),\n intent,\n )) # flags\n\n if intent.is_intent(\"cache\"):\n assert self.arr_attr[5][3] >= self.type.elsize\n else:\n assert self.arr_attr[5][3] == self.type.elsize\n assert (self.arr_equal(self.pyarr, self.arr))\n\n if isinstance(self.obj, np.ndarray):\n if typ.elsize == Type(obj.dtype).elsize:\n if not intent.is_intent(\"copy\") and self.arr_attr[1] <= 1:\n assert self.has_shared_memory()\n\n def arr_equal(self, arr1, arr2):\n if arr1.shape != arr2.shape:\n return False\n return (arr1 == arr2).all()\n\n def __str__(self):\n return str(self.arr)\n\n def has_shared_memory(self):\n \"\"\"Check that created array shares data with input array.\"\"\"\n if self.obj is self.arr:\n return True\n if not isinstance(self.obj, np.ndarray):\n return False\n obj_attr = wrap.array_attrs(self.obj)\n return obj_attr[0] == self.arr_attr[0]\n\n\nclass TestIntent:\n def test_in_out(self):\n assert str(intent.in_.out) == \"intent(in,out)\"\n assert intent.in_.c.is_intent(\"c\")\n assert not intent.in_.c.is_intent_exact(\"c\")\n assert intent.in_.c.is_intent_exact(\"c\", \"in\")\n assert intent.in_.c.is_intent_exact(\"in\", \"c\")\n assert not intent.in_.is_intent(\"c\")\n\n\nclass TestSharedMemory:\n num2seq = [1, 2]\n num23seq = [[1, 2, 3], [4, 5, 6]]\n\n @pytest.fixture(autouse=True, scope=\"class\", params=_type_names)\n def setup_type(self, request):\n request.cls.type = Type(request.param)\n request.cls.array = lambda self, dims, intent, obj: Array(\n Type(request.param), dims, intent, obj)\n\n def test_in_from_2seq(self):\n a = self.array([2], intent.in_, self.num2seq)\n assert not a.has_shared_memory()\n\n def test_in_from_2casttype(self):\n for t in self.type.cast_types():\n obj = np.array(self.num2seq, dtype=t.dtype)\n a = self.array([len(self.num2seq)], intent.in_, obj)\n if t.elsize == self.type.elsize:\n assert a.has_shared_memory(), repr((self.type.dtype, t.dtype))\n else:\n assert not a.has_shared_memory()\n\n @pytest.mark.parametrize(\"write\", [\"w\", \"ro\"])\n @pytest.mark.parametrize(\"order\", [\"C\", \"F\"])\n @pytest.mark.parametrize(\"inp\", [\"2seq\", \"23seq\"])\n def test_in_nocopy(self, write, order, inp):\n \"\"\"Test if intent(in) array can be passed without copies\"\"\"\n seq = getattr(self, \"num\" + inp)\n obj = np.array(seq, dtype=self.type.dtype, order=order)\n obj.setflags(write=(write == \"w\"))\n a = self.array(obj.shape,\n ((order == \"C\" and intent.in_.c) or intent.in_), obj)\n assert a.has_shared_memory()\n\n def test_inout_2seq(self):\n obj = np.array(self.num2seq, dtype=self.type.dtype)\n a = self.array([len(self.num2seq)], intent.inout, obj)\n assert a.has_shared_memory()\n\n try:\n a = self.array([2], intent.in_.inout, self.num2seq)\n except TypeError as msg:\n if not str(msg).startswith(\n \"failed to initialize intent(inout|inplace|cache) array\"):\n raise\n else:\n raise SystemError(\"intent(inout) should have failed on sequence\")\n\n def test_f_inout_23seq(self):\n obj = np.array(self.num23seq, dtype=self.type.dtype, order=\"F\")\n shape = (len(self.num23seq), len(self.num23seq[0]))\n a = self.array(shape, intent.in_.inout, obj)\n assert a.has_shared_memory()\n\n obj = np.array(self.num23seq, dtype=self.type.dtype, order=\"C\")\n shape = (len(self.num23seq), len(self.num23seq[0]))\n try:\n a = self.array(shape, intent.in_.inout, obj)\n except ValueError as msg:\n if not str(msg).startswith(\n \"failed to initialize intent(inout) array\"):\n raise\n else:\n raise SystemError(\n \"intent(inout) should have failed on improper array\")\n\n def test_c_inout_23seq(self):\n obj = np.array(self.num23seq, dtype=self.type.dtype)\n shape = (len(self.num23seq), len(self.num23seq[0]))\n a = self.array(shape, intent.in_.c.inout, obj)\n assert a.has_shared_memory()\n\n def test_in_copy_from_2casttype(self):\n for t in self.type.cast_types():\n obj = np.array(self.num2seq, dtype=t.dtype)\n a = self.array([len(self.num2seq)], intent.in_.copy, obj)\n assert not a.has_shared_memory()\n\n def test_c_in_from_23seq(self):\n a = self.array(\n [len(self.num23seq), len(self.num23seq[0])], intent.in_,\n self.num23seq)\n assert not a.has_shared_memory()\n\n def test_in_from_23casttype(self):\n for t in self.type.cast_types():\n obj = np.array(self.num23seq, dtype=t.dtype)\n a = self.array(\n [len(self.num23seq), len(self.num23seq[0])], intent.in_, obj)\n assert not a.has_shared_memory()\n\n def test_f_in_from_23casttype(self):\n for t in self.type.cast_types():\n obj = np.array(self.num23seq, dtype=t.dtype, order=\"F\")\n a = self.array(\n [len(self.num23seq), len(self.num23seq[0])], intent.in_, obj)\n if t.elsize == self.type.elsize:\n assert a.has_shared_memory()\n else:\n assert not a.has_shared_memory()\n\n def test_c_in_from_23casttype(self):\n for t in self.type.cast_types():\n obj = np.array(self.num23seq, dtype=t.dtype)\n a = self.array(\n [len(self.num23seq), len(self.num23seq[0])], intent.in_.c, obj)\n if t.elsize == self.type.elsize:\n assert a.has_shared_memory()\n else:\n assert not a.has_shared_memory()\n\n def test_f_copy_in_from_23casttype(self):\n for t in self.type.cast_types():\n obj = np.array(self.num23seq, dtype=t.dtype, order=\"F\")\n a = self.array(\n [len(self.num23seq), len(self.num23seq[0])], intent.in_.copy,\n obj)\n assert not a.has_shared_memory()\n\n def test_c_copy_in_from_23casttype(self):\n for t in self.type.cast_types():\n obj = np.array(self.num23seq, dtype=t.dtype)\n a = self.array(\n [len(self.num23seq), len(self.num23seq[0])], intent.in_.c.copy,\n obj)\n assert not a.has_shared_memory()\n\n def test_in_cache_from_2casttype(self):\n for t in self.type.all_types():\n if t.elsize != self.type.elsize:\n continue\n obj = np.array(self.num2seq, dtype=t.dtype)\n shape = (len(self.num2seq), )\n a = self.array(shape, intent.in_.c.cache, obj)\n assert a.has_shared_memory()\n\n a = self.array(shape, intent.in_.cache, obj)\n assert a.has_shared_memory()\n\n obj = np.array(self.num2seq, dtype=t.dtype, order=\"F\")\n a = self.array(shape, intent.in_.c.cache, obj)\n assert a.has_shared_memory()\n\n a = self.array(shape, intent.in_.cache, obj)\n assert a.has_shared_memory(), repr(t.dtype)\n\n try:\n a = self.array(shape, intent.in_.cache, obj[::-1])\n except ValueError as msg:\n if not str(msg).startswith(\n \"failed to initialize intent(cache) array\"):\n raise\n else:\n raise SystemError(\n \"intent(cache) should have failed on multisegmented array\")\n\n def test_in_cache_from_2casttype_failure(self):\n for t in self.type.all_types():\n if t.elsize >= self.type.elsize:\n continue\n obj = np.array(self.num2seq, dtype=t.dtype)\n shape = (len(self.num2seq), )\n try:\n self.array(shape, intent.in_.cache, obj) # Should succeed\n except ValueError as msg:\n if not str(msg).startswith(\n \"failed to initialize intent(cache) array\"):\n raise\n else:\n raise SystemError(\n \"intent(cache) should have failed on smaller array\")\n\n def test_cache_hidden(self):\n shape = (2, )\n a = self.array(shape, intent.cache.hide, None)\n assert a.arr.shape == shape\n\n shape = (2, 3)\n a = self.array(shape, intent.cache.hide, None)\n assert a.arr.shape == shape\n\n shape = (-1, 3)\n try:\n a = self.array(shape, intent.cache.hide, None)\n except ValueError as msg:\n if not str(msg).startswith(\n \"failed to create intent(cache|hide)|optional array\"):\n raise\n else:\n raise SystemError(\n \"intent(cache) should have failed on undefined dimensions\")\n\n def test_hidden(self):\n shape = (2, )\n a = self.array(shape, intent.hide, None)\n assert a.arr.shape == shape\n assert a.arr_equal(a.arr, np.zeros(shape, dtype=self.type.dtype))\n\n shape = (2, 3)\n a = self.array(shape, intent.hide, None)\n assert a.arr.shape == shape\n assert a.arr_equal(a.arr, np.zeros(shape, dtype=self.type.dtype))\n assert a.arr.flags[\"FORTRAN\"] and not a.arr.flags[\"CONTIGUOUS\"]\n\n shape = (2, 3)\n a = self.array(shape, intent.c.hide, None)\n assert a.arr.shape == shape\n assert a.arr_equal(a.arr, np.zeros(shape, dtype=self.type.dtype))\n assert not a.arr.flags[\"FORTRAN\"] and a.arr.flags[\"CONTIGUOUS\"]\n\n shape = (-1, 3)\n try:\n a = self.array(shape, intent.hide, None)\n except ValueError as msg:\n if not str(msg).startswith(\n \"failed to create intent(cache|hide)|optional array\"):\n raise\n else:\n raise SystemError(\n \"intent(hide) should have failed on undefined dimensions\")\n\n def test_optional_none(self):\n shape = (2, )\n a = self.array(shape, intent.optional, None)\n assert a.arr.shape == shape\n assert a.arr_equal(a.arr, np.zeros(shape, dtype=self.type.dtype))\n\n shape = (2, 3)\n a = self.array(shape, intent.optional, None)\n assert a.arr.shape == shape\n assert a.arr_equal(a.arr, np.zeros(shape, dtype=self.type.dtype))\n assert a.arr.flags[\"FORTRAN\"] and not a.arr.flags[\"CONTIGUOUS\"]\n\n shape = (2, 3)\n a = self.array(shape, intent.c.optional, None)\n assert a.arr.shape == shape\n assert a.arr_equal(a.arr, np.zeros(shape, dtype=self.type.dtype))\n assert not a.arr.flags[\"FORTRAN\"] and a.arr.flags[\"CONTIGUOUS\"]\n\n def test_optional_from_2seq(self):\n obj = self.num2seq\n shape = (len(obj), )\n a = self.array(shape, intent.optional, obj)\n assert a.arr.shape == shape\n assert not a.has_shared_memory()\n\n def test_optional_from_23seq(self):\n obj = self.num23seq\n shape = (len(obj), len(obj[0]))\n a = self.array(shape, intent.optional, obj)\n assert a.arr.shape == shape\n assert not a.has_shared_memory()\n\n a = self.array(shape, intent.optional.c, obj)\n assert a.arr.shape == shape\n assert not a.has_shared_memory()\n\n def test_inplace(self):\n obj = np.array(self.num23seq, dtype=self.type.dtype)\n assert not obj.flags[\"FORTRAN\"] and obj.flags[\"CONTIGUOUS\"]\n shape = obj.shape\n a = self.array(shape, intent.inplace, obj)\n assert obj[1][2] == a.arr[1][2], repr((obj, a.arr))\n a.arr[1][2] = 54\n assert obj[1][2] == a.arr[1][2] == np.array(54, dtype=self.type.dtype)\n assert a.arr is obj\n assert obj.flags[\"FORTRAN\"] # obj attributes are changed inplace!\n assert not obj.flags[\"CONTIGUOUS\"]\n\n def test_inplace_from_casttype(self):\n for t in self.type.cast_types():\n if t is self.type:\n continue\n obj = np.array(self.num23seq, dtype=t.dtype)\n assert obj.dtype.type == t.type\n assert obj.dtype.type is not self.type.type\n assert not obj.flags[\"FORTRAN\"] and obj.flags[\"CONTIGUOUS\"]\n shape = obj.shape\n a = self.array(shape, intent.inplace, obj)\n assert obj[1][2] == a.arr[1][2], repr((obj, a.arr))\n a.arr[1][2] = 54\n assert obj[1][2] == a.arr[1][2] == np.array(54,\n dtype=self.type.dtype)\n assert a.arr is obj\n assert obj.flags[\"FORTRAN\"] # obj attributes changed inplace!\n assert not obj.flags[\"CONTIGUOUS\"]\n assert obj.dtype.type is self.type.type # obj changed inplace!\n"
] |
[
[
"numpy.core.multiarray.typeinfo.items",
"numpy.intp",
"numpy.dtype",
"numpy.clongdouble",
"numpy.array",
"numpy.zeros"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
niazangels/context_attentive_ir
|
[
"989fbfee5a0ac6b7ac7429bdee36fe6ed93ee234",
"989fbfee5a0ac6b7ac7429bdee36fe6ed93ee234"
] |
[
"neuroir/inputters/ranker/data.py",
"neuroir/models/ranker.py"
] |
[
"# src: https://github.com/facebookresearch/DrQA/blob/master/drqa/reader/data.py\r\nimport numpy as np\r\n\r\nfrom torch.utils.data import Dataset\r\nfrom torch.utils.data.sampler import Sampler\r\nfrom .vector import vectorize\r\n\r\n\r\n# ------------------------------------------------------------------------------\r\n# PyTorch dataset class for MSMARCO data.\r\n# ------------------------------------------------------------------------------\r\n\r\n\r\nclass RankerDataset(Dataset):\r\n def __init__(self, examples, model, shuffle=False):\r\n self.model = model\r\n self.examples = examples\r\n self.shuffle = shuffle\r\n\r\n def __len__(self):\r\n return len(self.examples)\r\n\r\n def __getitem__(self, index):\r\n return vectorize(self.examples[index],\r\n self.model,\r\n shuffle=self.shuffle)\r\n\r\n def lengths(self):\r\n return [(max([len(doc.tokens) for doc in ex.documents]),\r\n len(ex.tokens)) for ex in self.examples]\r\n\r\n\r\n# ------------------------------------------------------------------------------\r\n# PyTorch sampler returning batched of sorted lengths (by doc and query).\r\n# ------------------------------------------------------------------------------\r\n\r\nclass SortedBatchSampler(Sampler):\r\n def __init__(self, lengths, batch_size, shuffle=True):\r\n self.lengths = lengths\r\n self.batch_size = batch_size\r\n self.shuffle = shuffle\r\n\r\n def __iter__(self):\r\n lengths = np.array(\r\n [(-l[0], -l[1], np.random.random()) for l in self.lengths],\r\n dtype=[('l1', np.int_), ('l2', np.int_), ('rand', np.float_)]\r\n )\r\n indices = np.argsort(lengths, order=('l1', 'l2', 'rand'))\r\n batches = [indices[i:i + self.batch_size]\r\n for i in range(0, len(indices), self.batch_size)]\r\n if self.shuffle:\r\n np.random.shuffle(batches)\r\n return iter([i for batch in batches for i in batch])\r\n\r\n def __len__(self):\r\n return len(self.lengths)\r\n",
"import copy\nimport logging\n\nimport torch\nimport torch.optim as optim\nimport torch.nn.functional as f\n\nfrom tqdm import tqdm\nfrom prettytable import PrettyTable\n\nfrom neuroir.config import override_model_args\nfrom neuroir.rankers.dssm import DSSM\nfrom neuroir.rankers.cdssm import CDSSM\nfrom neuroir.rankers.duet import DUET\nfrom neuroir.rankers.esm import ESM\nfrom neuroir.rankers.arci import ARCI\nfrom neuroir.rankers.arcii import ARCII\nfrom neuroir.rankers.drmm import DRMM\nfrom neuroir.rankers.mtensor import MatchTensor\nfrom neuroir.utils.misc import count_file_lines\n\nlogger = logging.getLogger(__name__)\n\n\nclass Ranker(object):\n \"\"\"High level model that handles intializing the underlying network\n architecture, saving, updating examples, and predicting examples.\n \"\"\"\n\n # --------------------------------------------------------------------------\n # Initialization\n # --------------------------------------------------------------------------\n\n def __init__(self, args, src_dict, state_dict=None):\n # Book-keeping.\n self.args = args\n self.src_dict = src_dict\n self.args.src_vocab_size = len(src_dict)\n self.updates = 0\n self.use_cuda = False\n self.parallel = False\n\n if args.model_type.upper() == 'DSSM':\n self.network = DSSM(self.args)\n self.criterion = self.compute_loss\n elif args.model_type.upper() == 'CDSSM':\n self.network = CDSSM(self.args)\n self.criterion = self.compute_loss\n elif args.model_type.upper() == 'ESM':\n self.network = ESM(self.args)\n elif args.model_type.upper() == 'DUET':\n self.network = DUET(self.args)\n self.criterion = torch.nn.BCEWithLogitsLoss()\n elif args.model_type.upper() == 'ARCI':\n self.network = ARCI(self.args)\n self.criterion = torch.nn.BCEWithLogitsLoss()\n elif args.model_type.upper() == 'ARCII':\n self.network = ARCII(self.args)\n self.criterion = torch.nn.BCEWithLogitsLoss()\n elif args.model_type.upper() == 'DRMM':\n self.network = DRMM(self.args)\n self.criterion = torch.nn.BCEWithLogitsLoss()\n elif args.model_type.upper() == 'MATCH_TENSOR':\n self.network = MatchTensor(self.args)\n self.criterion = torch.nn.BCEWithLogitsLoss()\n else:\n raise RuntimeError('Unsupported model: %s' % args.model_type)\n\n # Load saved state\n if state_dict:\n # Load buffer separately\n if 'fixed_embedding' in state_dict:\n fixed_embedding = state_dict.pop('fixed_embedding')\n self.network.load_state_dict(state_dict)\n self.network.register_buffer('fixed_embedding', fixed_embedding)\n else:\n self.network.load_state_dict(state_dict)\n\n @staticmethod\n def compute_loss(predictions, target):\n \"\"\"\n Compute negative log-likelihood loss for a batch of predictions.\n :param predictions: 2d tensor [batch_size x num_rel_docs_per_query]\n :param target: 2d tensor [batch_size x num_rel_docs_per_query]\n :return: average negative log-likelihood loss over the input mini-batch [autograd Variable]\n \"\"\"\n predictions = f.log_softmax(predictions, dim=-1)\n loss = -(predictions * target).sum(1)\n return loss.mean()\n\n def count_parameters(self):\n return sum(p.numel() for p in self.network.parameters() if p.requires_grad)\n\n def layer_wise_parameters(self):\n table = PrettyTable()\n table.field_names = [\"Layer Name\", \"Output Shape\", \"Param #\"]\n table.align[\"Layer Name\"] = \"l\"\n table.align[\"Output Shape\"] = \"r\"\n table.align[\"Param #\"] = \"r\"\n for name, parameters in self.network.named_parameters():\n if parameters.requires_grad:\n table.add_row([name, str(list(parameters.shape)), parameters.numel()])\n return table\n\n def load_embeddings(self, words, embedding_file):\n \"\"\"Load pretrained embeddings for a given list of words, if they exist.\n Args:\n words: iterable of tokens. Only those that are indexed in the\n dictionary are kept.\n embedding_file: path to text file of embeddings, space separated.\n \"\"\"\n emb_layer = self.network.word_embeddings\n words = {w for w in words if w in self.src_dict}\n logger.info('Loading pre-trained embeddings for %d words from %s' %\n (len(words), embedding_file))\n\n # When normalized, some words are duplicated. (Average the embeddings).\n vec_counts, embedding = {}, {}\n with open(embedding_file) as f:\n # Skip first line if of form count/dim.\n line = f.readline().rstrip().split(' ')\n if len(line) != 2:\n f.seek(0)\n\n duplicates = set()\n for line in tqdm(f, total=count_file_lines(embedding_file)):\n parsed = line.rstrip().split(' ')\n assert (len(parsed) == emb_layer.word_vec_size + 1)\n w = self.src_dict.normalize(parsed[0])\n if w in words:\n vec = torch.Tensor([float(i) for i in parsed[1:]])\n if w not in vec_counts:\n vec_counts[w] = 1\n embedding[w] = vec\n else:\n duplicates.add(w)\n vec_counts[w] = vec_counts[w] + 1\n embedding[w].add_(vec)\n\n if len(duplicates) > 0:\n logging.warning(\n 'WARN: Duplicate embedding found for %s' % ', '.join(duplicates)\n )\n\n for w, c in vec_counts.items():\n embedding[w].div_(c)\n\n emb_layer.init_word_vectors(self.src_dict, embedding, self.args.fix_embeddings)\n logger.info('Loaded %d embeddings (%.2f%%)' %\n (len(vec_counts), 100 * len(vec_counts) / len(words)))\n\n def init_optimizer(self, state_dict=None, use_gpu=True):\n \"\"\"Initialize an optimizer for the free parameters of the network.\n Args:\n state_dict: optimizer's state dict\n use_gpu: required to move state_dict to GPU\n \"\"\"\n if self.args.fix_embeddings:\n for p in self.network.word_embeddings.parameters():\n p.requires_grad = False\n\n parameters = [p for p in self.network.parameters() if p.requires_grad]\n if self.args.optimizer == 'sgd':\n self.optimizer = optim.SGD(parameters, self.args.learning_rate,\n momentum=self.args.momentum,\n weight_decay=self.args.weight_decay)\n elif self.args.optimizer == 'adam':\n self.optimizer = optim.Adam(parameters, self.args.learning_rate,\n weight_decay=self.args.weight_decay)\n elif self.args.optimizer == 'adamax':\n self.optimizer = optim.Adamax(parameters, self.args.learning_rate,\n weight_decay=self.args.weight_decay)\n elif self.args.optimizer == 'adadelta':\n self.optimizer = optim.Adadelta(parameters, self.args.learning_rate,\n weight_decay=self.args.weight_decay)\n else:\n raise RuntimeError('Unsupported optimizer: %s' % self.args.optimizer)\n\n if state_dict is not None:\n self.optimizer.load_state_dict(state_dict)\n # FIXME: temp soln - https://github.com/pytorch/pytorch/issues/2830\n if use_gpu:\n for state in self.optimizer.state.values():\n for k, v in state.items():\n if isinstance(v, torch.Tensor):\n state[k] = v.cuda()\n\n # --------------------------------------------------------------------------\n # Learning\n # --------------------------------------------------------------------------\n\n def update(self, ex):\n \"\"\"Forward a batch of examples; step the optimizer to update weights.\"\"\"\n if not self.optimizer:\n raise RuntimeError('No optimizer set.')\n\n # Train mode\n self.network.train()\n\n documents = ex['doc_rep']\n queries = ex['que_rep']\n que_len = ex['que_len']\n doc_len = ex['doc_len']\n labels = ex['label'].float()\n if self.use_cuda:\n documents = documents.cuda(non_blocking=True)\n queries = queries.cuda(non_blocking=True)\n labels = labels.cuda(non_blocking=True)\n que_len = que_len.cuda(non_blocking=True)\n doc_len = doc_len.cuda(non_blocking=True)\n\n # Run forward\n scores = self.network(queries, que_len, documents, doc_len)\n loss = self.criterion(scores, labels)\n if self.parallel:\n loss = loss.mean()\n\n # Clear gradients and run backward\n self.optimizer.zero_grad()\n loss.backward()\n\n # Clip gradients\n torch.nn.utils.clip_grad_norm(self.network.parameters(),\n self.args.grad_clipping)\n\n # Update parameters\n self.optimizer.step()\n self.updates += 1\n\n return loss\n\n # --------------------------------------------------------------------------\n # Prediction\n # --------------------------------------------------------------------------\n\n def predict(self, ex):\n \"\"\"Forward a batch of examples only to get predictions.\n Args:\n ex: the batch examples\n Output:\n predictions: #batch predicted sequences\n \"\"\"\n # Eval mode\n self.network.eval()\n\n documents = ex['doc_rep']\n queries = ex['que_rep']\n que_len = ex['que_len']\n doc_len = ex['doc_len']\n if self.use_cuda:\n documents = documents.cuda(non_blocking=True)\n queries = queries.cuda(non_blocking=True)\n que_len = que_len.cuda(non_blocking=True)\n doc_len = doc_len.cuda(non_blocking=True)\n\n # Run forward\n scores = self.network(queries, que_len, documents, doc_len)\n scores = f.softmax(scores, dim=-1)\n\n return scores\n\n # --------------------------------------------------------------------------\n # Saving and loading\n # --------------------------------------------------------------------------\n\n def save(self, filename):\n if self.parallel:\n network = self.network.module\n else:\n network = self.network\n state_dict = copy.copy(network.state_dict())\n if 'fixed_embedding' in state_dict:\n state_dict.pop('fixed_embedding')\n params = {\n 'state_dict': state_dict,\n 'src_dict': self.src_dict,\n 'args': self.args,\n }\n try:\n torch.save(params, filename)\n except BaseException:\n logger.warning('WARN: Saving failed... continuing anyway.')\n\n def checkpoint(self, filename, epoch):\n if self.parallel:\n network = self.network.module\n else:\n network = self.network\n params = {\n 'state_dict': network.state_dict(),\n 'src_dict': self.src_dict,\n 'args': self.args,\n 'epoch': epoch,\n 'optimizer': self.optimizer.state_dict(),\n }\n try:\n torch.save(params, filename)\n except BaseException:\n logger.warning('WARN: Saving failed... continuing anyway.')\n\n @staticmethod\n def load(filename, new_args=None):\n logger.info('Loading model %s' % filename)\n saved_params = torch.load(\n filename, map_location=lambda storage, loc: storage\n )\n src_dict = saved_params['src_dict']\n state_dict = saved_params['state_dict']\n args = saved_params['args']\n if new_args:\n args = override_model_args(args, new_args)\n return Ranker(args, src_dict, state_dict)\n\n @staticmethod\n def load_checkpoint(filename, use_gpu=True):\n logger.info('Loading model %s' % filename)\n saved_params = torch.load(\n filename, map_location=lambda storage, loc: storage\n )\n src_dict = saved_params['src_dict']\n state_dict = saved_params['state_dict']\n epoch = saved_params['epoch']\n optimizer = saved_params['optimizer']\n args = saved_params['args']\n model = Ranker(args, src_dict, state_dict)\n model.init_optimizer(optimizer, use_gpu)\n return model, epoch\n\n # --------------------------------------------------------------------------\n # Runtime\n # --------------------------------------------------------------------------\n\n def cuda(self):\n self.use_cuda = True\n self.network = self.network.cuda()\n\n def cpu(self):\n self.use_cuda = False\n self.network = self.network.cpu()\n\n def parallelize(self):\n \"\"\"Use data parallel to copy the model across several gpus.\n This will take all gpus visible with CUDA_VISIBLE_DEVICES.\n \"\"\"\n self.parallel = True\n self.network = torch.nn.DataParallel(self.network)\n"
] |
[
[
"numpy.argsort",
"numpy.random.random",
"numpy.random.shuffle"
],
[
"torch.optim.Adam",
"torch.nn.functional.softmax",
"torch.nn.functional.log_softmax",
"torch.load",
"torch.optim.Adamax",
"torch.nn.functional.readline",
"torch.nn.BCEWithLogitsLoss",
"torch.optim.SGD",
"torch.nn.DataParallel",
"torch.optim.Adadelta",
"torch.nn.functional.seek",
"torch.save"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
Yoshanuikabundi/openff-qcsubmit
|
[
"c39233d1ad845e7cdcb264a1be8e21b0bd5fcd2b"
] |
[
"openff/qcsubmit/datasets/entries.py"
] |
[
"\"\"\"\nAll of the individual dataset entry types are defined here.\n\"\"\"\n\nfrom typing import Any, Dict, List, Optional, Tuple\n\nimport numpy as np\nimport openff.toolkit.topology as off\nimport qcelemental as qcel\nfrom pydantic import Field, validator\nfrom simtk import unit\n\nfrom openff.qcsubmit.common_structures import (\n DatasetConfig,\n MoleculeAttributes,\n TDSettings,\n)\nfrom openff.qcsubmit.constraints import Constraints\nfrom openff.qcsubmit.exceptions import ConstraintError, DihedralConnectionError\nfrom openff.qcsubmit.validators import (\n check_constraints,\n check_improper_connection,\n check_linear_torsions,\n check_torsion_connection,\n check_valence_connectivity,\n)\n\n\nclass DatasetEntry(DatasetConfig):\n \"\"\"\n A basic data class to construct the datasets which holds any information about the molecule and options used in\n the qcarchive calculation.\n\n Note:\n * ``extras`` are passed into the qcelemental.models.Molecule on creation.\n * any extras that should passed to the calculation like extra constrains should be passed to ``keywords``.\n \"\"\"\n\n index: str = Field(\n ...,\n description=\"The index name the molecule will be stored under in QCArchive. Note that if multipule geometries are provided the index will be augmented with a value indecating the conformer number so -0, -1.\",\n )\n initial_molecules: List[qcel.models.Molecule] = Field(\n ...,\n description=\"A list of QCElemental Molecule objects which contain the geometries to be used as inputs for the calculation.\",\n )\n attributes: MoleculeAttributes = Field(\n ...,\n description=\"The complete set of required cmiles attributes for the molecule.\",\n )\n extras: Optional[Dict[str, Any]] = Field(\n {},\n description=\"Any extra information that should be injected into the QCElemental models before being submited like the cmiles information.\",\n )\n keywords: Optional[Dict[str, Any]] = Field(\n {},\n description=\"Any extra keywords that should be used in the QCArchive calculation should be passed here.\",\n )\n\n _qcel_molecule_validator = validator(\n \"initial_molecules\", allow_reuse=True, each_item=True\n )(check_valence_connectivity)\n\n def __init__(self, off_molecule: Optional[off.Molecule] = None, **kwargs):\n \"\"\"\n Init the dataclass handling conversions of the molecule first.\n This is needed to make sure the extras are passed into the qcschema molecule.\n \"\"\"\n\n extras = kwargs[\"extras\"]\n # if we get an off_molecule we need to convert it\n if off_molecule is not None:\n if off_molecule.n_conformers == 0:\n off_molecule.generate_conformers(n_conformers=1)\n schema_mols = [\n off_molecule.to_qcschema(conformer=conformer, extras=extras)\n for conformer in range(off_molecule.n_conformers)\n ]\n kwargs[\"initial_molecules\"] = schema_mols\n\n super().__init__(**kwargs)\n\n # now we need to process all of the initial molecules to make sure the cmiles is present\n # and force c1 symmetry\n initial_molecules = []\n for mol in self.initial_molecules:\n extras = mol.extras or {}\n extras[\n \"canonical_isomeric_explicit_hydrogen_mapped_smiles\"\n ] = self.attributes.canonical_isomeric_explicit_hydrogen_mapped_smiles\n mol_data = mol.dict()\n mol_data[\"extras\"] = extras\n # put into strict c1 symmetry\n mol_data[\"fix_symmetry\"] = \"c1\"\n initial_molecules.append(qcel.models.Molecule.parse_obj(mol_data))\n # now assign the new molecules\n self.initial_molecules = initial_molecules\n\n def get_off_molecule(self, include_conformers: bool = True) -> off.Molecule:\n \"\"\"Build and openforcefield.topology.Molecule representation of the input molecule.\n\n Parameters:\n include_conformers: If `True` all of the input conformers are included else they are dropped.\n \"\"\"\n\n molecule = self.attributes.to_openff_molecule()\n molecule.name = self.index\n if include_conformers:\n for conformer in self.initial_molecules:\n geometry = unit.Quantity(np.array(conformer.geometry), unit=unit.bohr)\n molecule.add_conformer(geometry.in_units_of(unit.angstrom))\n return molecule\n\n\nclass OptimizationEntry(DatasetEntry):\n \"\"\"\n An optimization dataset specific entry class which can handle constraints.\n \"\"\"\n\n constraints: Constraints = Field(\n Constraints(),\n description=\"Any constraints which should be used during an optimization.\",\n )\n\n def __init__(self, off_molecule: Optional[off.Molecule] = None, **kwargs):\n \"\"\"\n Here we handle the constraints before calling the super.\n \"\"\"\n # if the constraints are in the keywords move them out for validation\n if \"constraints\" in kwargs[\"keywords\"]:\n constraint_dict = kwargs[\"keywords\"].pop(\"constraints\")\n constraints = Constraints(**constraint_dict)\n kwargs[\"constraints\"] = constraints\n\n super().__init__(off_molecule, **kwargs)\n # validate any constraints being added\n check_constraints(\n constraints=self.constraints,\n molecule=self.get_off_molecule(include_conformers=False),\n )\n\n def add_constraint(\n self,\n constraint: str,\n constraint_type: str,\n indices: List[int],\n bonded: bool = True,\n **kwargs,\n ) -> None:\n \"\"\"\n Add new constraint of the given type.\n\n Parameters:\n constraint: The major type of constraint, freeze or set\n constraint_type: the constraint sub type, angle, distance etc\n indices: The atom indices the constraint should be placed on\n bonded: If the constraint is intended to be put a bonded set of atoms\n kwargs: Any extra information needed by the constraint, for the set class they need a value `value=float`\n \"\"\"\n if constraint.lower() == \"freeze\":\n self.constraints.add_freeze_constraint(\n constraint_type=constraint_type, indices=indices, bonded=bonded\n )\n elif constraint.lower() == \"set\":\n self.constraints.add_set_constraint(\n constraint_type=constraint_type,\n indices=indices,\n bonded=bonded,\n **kwargs,\n )\n else:\n raise ConstraintError(\n f\"The constraint {constraint} is not available please chose from freeze or set.\"\n )\n # run the constraint check\n check_constraints(\n constraints=self.constraints,\n molecule=self.get_off_molecule(include_conformers=False),\n )\n\n @property\n def formatted_keywords(self) -> Dict[str, Any]:\n \"\"\"\n Format the keywords with the constraints values.\n \"\"\"\n import copy\n\n if self.constraints.has_constraints:\n constraints = self.constraints.dict()\n keywords = copy.deepcopy(self.keywords)\n keywords[\"constraints\"] = constraints\n return keywords\n else:\n return self.keywords\n\n\nclass TorsionDriveEntry(DatasetEntry):\n \"\"\"\n A Torsiondrive dataset specific class which can check dihedral indices and store torsiondrive specific settings with built in validation.\n \"\"\"\n\n dihedrals: List[Tuple[int, int, int, int]] = Field(\n ...,\n description=\"The list of dihedrals that should be driven, currently only 1D or 2D torsions are supported.\",\n )\n keywords: Optional[TDSettings] = Field(\n TDSettings(),\n description=\"The torsiondrive keyword settings which can be used to overwrite the general global settings used in the dataset allowing for finner control.\",\n )\n\n def __init__(self, off_molecule: Optional[off.Molecule] = None, **kwargs):\n\n super().__init__(off_molecule, **kwargs)\n # now validate the torsions check proper first\n off_molecule = self.get_off_molecule(include_conformers=False)\n\n # now validate the dihedrals\n for torsion in self.dihedrals:\n # check for linear torsions\n check_linear_torsions(torsion, off_molecule)\n try:\n check_torsion_connection(torsion=torsion, molecule=off_molecule)\n except DihedralConnectionError:\n # if this fails as well raise\n try:\n check_improper_connection(improper=torsion, molecule=off_molecule)\n except DihedralConnectionError:\n raise DihedralConnectionError(\n f\"The dihedral {torsion} for molecule {off_molecule} is not a valid\"\n f\" proper/improper torsion.\"\n )\n\n\nclass FilterEntry(DatasetConfig):\n \"\"\"\n A basic data class that contains information on components run in a workflow and the associated molecules which were\n removed by it.\n \"\"\"\n\n component: str = Field(\n ...,\n description=\"The name of the component ran, this should be one of the components registered with qcsubmit.\",\n )\n component_settings: Dict[str, Any] = Field(\n ...,\n description=\"The run time settings of the component used to filter the molecules.\",\n )\n component_provenance: Dict[str, str] = Field(\n ...,\n description=\"A dictionary of the version information of all dependencies of the component.\",\n )\n molecules: List[str]\n\n def __init__(self, off_molecules: List[off.Molecule] = None, **kwargs):\n \"\"\"\n Init the dataclass handling conversions of the molecule first.\n \"\"\"\n if off_molecules is not None:\n molecules = [\n molecule.to_smiles(isomeric=True, explicit_hydrogens=True)\n for molecule in off_molecules\n ]\n kwargs[\"molecules\"] = molecules\n\n super().__init__(**kwargs)\n\n def add_molecule(self, molecule: off.Molecule) -> None:\n \"\"\"\n Add a molecule to this filter.\n \"\"\"\n self.molecules.append(\n molecule.to_smiles(isomeric=True, explicit_hydrogens=True)\n )\n"
] |
[
[
"numpy.array"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
OMG-ICFP-FTW/icfp2021
|
[
"91b87968ce3fc6a8b6d6c6e362433f6de2463345",
"91b87968ce3fc6a8b6d6c6e362433f6de2463345"
] |
[
"aray/scripts/plot_boxlet.py",
"aray/scripts/plot_intersect.py"
] |
[
"#!/usr/bin/env python\n# boxlet.py - Boxlet class and utilities\n\n# %%\nimport matplotlib.pyplot as plt\nimport random\nfrom collections import namedtuple, defaultdict\nfrom dataclasses import dataclass\nfrom typing import List, Optional, Tuple, Union\nfrom itertools import product\n\nfrom aray.types import Point, Quad\nfrom aray.boxlet import Boxlet\nfrom aray.problem import Problem\nfrom aray.util import ceil, floor\n\n\n\ndef ccw(A, B, C):\n return (C.y-A.y) * (B.x-A.x) > (B.y-A.y) * (C.x-A.x)\n\n\ndef check_intersection(A: Point, B: Point, C: Point, D: Point) -> bool:\n \"\"\" Check if there is an intersection between line segments AB and CD \"\"\"\n if A == C or A == D or B == C or B == D:\n return False\n return ccw(A, C, D) != ccw(B, C, D) and ccw(A, B, C) != ccw(A, B, D)\n\n\nfig, axes = plt.subplots(2, 2)\nfor ax in axes.flat:\n # generate random 2d points within [0, 10]\n while True:\n polygon = [Point(random.uniform(0, 10), random.uniform(0, 10))\n for _ in range(4)]\n cycle = polygon + [polygon[0]]\n edges = [(cycle[i], cycle[i + 1]) for i in range(len(cycle) - 1)]\n simple = not any(check_intersection(e1[0], e1[1], e2[0], e2[1])\n for e1, e2 in product(edges, edges))\n if simple:\n break\n\n print('polygon=', polygon)\n\n ax.set_xlim(-1, 11)\n ax.set_ylim(-1, 11)\n ax.set_xticks(range(11))\n ax.set_yticks(range(11))\n ax.grid(which='major', axis='x', linewidth=0.75, color='k', alpha=0.1)\n ax.grid(which='major', axis='y', linewidth=0.75, color='k', alpha=0.1)\n ax.set_aspect('equal')\n\n ax.plot([c.x for c in cycle], [c.y for c in cycle], 'k-', linewidth=1)\n\n if simple:\n boxlets = Boxlet.from_polygon(polygon)\n print('boxlets', boxlets)\n for boxlet in boxlets:\n print('plotting boxlet', boxlet)\n points = list(boxlet.iter_points())\n xs = [p.x for p in points]\n ys = [p.y for p in points]\n assert xs, f'no points {boxlet}'\n assert ys, f'no points {boxlet}'\n # print('xs', xs, 'ys', ys)\n ax.scatter(xs, ys, s=8)\nplt.show()\n\n# %%\n\n# generate random 2d points within [0, 10]\n# number = random.randint(1,79)\nnumber = 31\npolygon = Problem.get(number).hole\ncycle = polygon + [polygon[0]]\n\nM = max(int(max(p.x for p in polygon) + 1), 10)\nN = max(int(max(p.y for p in polygon) + 1), 10)\n\nfig, ax = plt.subplots(figsize=(10, 10))\nax.set_xlim(-1, M)\nax.set_ylim(-1, N)\nax.set_xticks(range(M))\nax.set_yticks(range(N))\nax.grid(which='major', axis='x', linewidth=0.75, color='k', alpha=0.1)\nax.grid(which='major', axis='y', linewidth=0.75, color='k', alpha=0.1)\nax.set_aspect('equal')\n\n\nax.plot([c.x for c in cycle], [c.y for c in cycle], 'k-', linewidth=1)\n\nif simple:\n boxlets = Boxlet.from_polygon(polygon)\n # print('boxlets', boxlets)\n for boxlet in boxlets:\n # print('plotting boxlet', boxlet)\n points = list(boxlet.iter_points())\n xs = [p.x for p in points]\n ys = [p.y for p in points]\n assert xs, f'no points {boxlet}'\n assert ys, f'no points {boxlet}'\n # print('xs', xs, 'ys', ys)\n ax.scatter(xs, ys, s=8)\n\n\nproblematic_edge = [Point(x=32, y=0), Point(x=57, y=20)]\n\n# plot this edge in blue\nax.plot([problematic_edge[0].x, problematic_edge[1].x],\n [problematic_edge[0].y, problematic_edge[1].y],\n 'b-', linewidth=2)\n\n\nplt.show()\n\n\n# %%\n57 - 32",
"#!/usr/bin/env python\n\n# %%\nimport matplotlib.pyplot as plt\nimport random\nfrom collections import namedtuple, defaultdict\nfrom dataclasses import dataclass\nfrom typing import List, Optional, Tuple, Union\n\nfrom aray.types import Point\nfrom aray.intersect import intersection, intersections, maybe_intersection\nfrom aray.util import floor, ceil\n\nimport random\nN = 4\nA = Point(random.uniform(0, N), random.uniform(0, N))\nB = Point(random.uniform(0, N), random.uniform(0, N))\nx = random.uniform(0, N)\n\nfig, ax = plt.subplots()\nax.set_xlim(-1, N + 1)\nax.set_ylim(-1, N + 1)\nax.set_xticks(range(N + 1))\nax.set_yticks(range(N + 1))\nax.grid(which='major', axis='x', linewidth=0.75, color='k', alpha=0.1)\nax.grid(which='major', axis='y', linewidth=0.75, color='k', alpha=0.1)\nax.set_aspect('equal')\n\n# Plot AB with points on the ends\nax.plot([A.x, B.x], [A.y, B.y], 'ko:')\n# plot x as a vertical line\nax.plot([x, x], [0, N], 'r', linewidth=1)\n# Plot intersections\npoints = intersections(A, B)\nprint('points', points)\nax.scatter([p.x for p in points], [p.y for p in points], color='g')\n# Plot intersection\np = intersection(A, B, x)\nax.scatter([p.x], [p.y], color='b')\n# Plot intersection\np = maybe_intersection(A, B, x)\nif p:\n ax.scatter([p.x], [p.y], color='c')\n\n\n# %%\nA = Point(random.uniform(0, N), random.uniform(0, N))\nB = Point(random.uniform(0, N), random.uniform(0, N))\nassert intersection(A, B, A.x) == A\nassert intersection(A, B, B.x) == B"
] |
[
[
"matplotlib.pyplot.show",
"matplotlib.pyplot.subplots"
],
[
"matplotlib.pyplot.subplots"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
dingobye/TensorComprehensions
|
[
"0579904b4c812708cc6157c12dddebd4735bdf28"
] |
[
"test_python/layers/test_scale.py"
] |
[
"# Copyright (c) 2017-present, Facebook, Inc.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n##############################################################################\n\nimport tensor_comprehensions as tc\n\nimport torch\nimport torch.cuda\nimport unittest\n\n\nclass TestScale(unittest.TestCase):\n\n # NOTE: take note of use of {{ }}\n def test_scale(self):\n LANG = \"\"\"\n def scale(float(M, N) I) -> (O) {{\n O(m, n) = I(m, n) * {s}\n }}\n \"\"\"\n scale = tc.define(LANG, name=\"scale\", constants={\"s\": 10})\n inp = torch.randn(100, 128).cuda()\n out = scale(inp)\n\n\nif __name__ == '__main__':\n unittest.main()\n"
] |
[
[
"torch.randn"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
Lornatang/tensorflow-tutorial
|
[
"789012d02e800351efd92b3fa4d051ccb5b06ee7"
] |
[
"caltech/val.py"
] |
[
"import numpy as np\nimport tensorflow as tf\nimport cv2\n\nimport model\nimport input_data\n\n\nBATCH_SIZE = 1\nN_CLASSES = 4\nIMG_SIZE = 224\n\nX = tf.placeholder(tf.float32, shape=[IMG_SIZE, IMG_SIZE, 3])\n\n\ndef get_one_image(filepath):\n \"\"\"Read image to train.\n Args:\n filepath: raw_data dir.\n\n Returns:\n image: random read images from raw_data.\n\n \"\"\"\n # n = len(filepath)\n # ind = np.random.randint(0, n)\n # Randomly select the test images\n # file = filepath[ind]\n\n data = cv2.imread(filepath)\n cv2.imshow('img', data)\n # cv2.waitKey(0)\n data = cv2.resize(data, (224, 224))\n image = np.array(data)\n return image\n\n\ndef evaluate_one_image(data):\n \"\"\"\n Args:\n data: image raw_data for array\n\n \"\"\"\n image = tf.cast(data, tf.float32)\n image = tf.image.per_image_standardization(image)\n image = tf.reshape(image, [1, 224, 224, 3])\n\n logit = model.inference(image, N_CLASSES, BATCH_SIZE)\n\n logit = tf.nn.softmax(logit)\n\n # you need to change the directories to yours.\n logs_train_dir = 'logs'\n\n with tf.Session() as sess:\n saver = tf.train.Saver()\n print(\"Reading checkpoints...\")\n ckpt = tf.train.get_checkpoint_state(logs_train_dir)\n if ckpt and ckpt.model_checkpoint_path:\n global_step = ckpt.model_checkpoint_path.split(\n '/')[-1].split('-')[-1]\n saver.restore(sess, ckpt.model_checkpoint_path)\n print('Loading success, global_step is %s' % global_step)\n else:\n print('No checkpoint file found')\n\n prediction = sess.run(logit, feed_dict={X: data})\n index = np.argmax(prediction)\n if index == 0:\n print(\"This is a airplane\")\n elif index == 1:\n print(\"This is a car\")\n elif index == 2:\n print(\"This is a face\")\n else:\n print(\"This is a motorbike\")\n\n\nif __name__ == '__main__':\n train_dir = 'data'\n val, val_label = input_data.get_files(train_dir)\n img = get_one_image('/Users/mac/Desktop/airplane.jpg')\n evaluate_one_image(img)\n"
] |
[
[
"tensorflow.train.get_checkpoint_state",
"tensorflow.nn.softmax",
"tensorflow.cast",
"tensorflow.reshape",
"tensorflow.placeholder",
"numpy.argmax",
"tensorflow.image.per_image_standardization",
"tensorflow.Session",
"tensorflow.train.Saver",
"numpy.array"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"1.10"
]
}
] |
ArqiSoft/ml-services
|
[
"0c9beacc4a98c3f55ed56969a8b7eb84c4209c21",
"0c9beacc4a98c3f55ed56969a8b7eb84c4209c21",
"0c9beacc4a98c3f55ed56969a8b7eb84c4209c21"
] |
[
"Source/sds_tools/learner/fingerprints.py",
"Source/sds_tools/predict.py",
"Source/sds_tools/general_helper.py"
] |
[
"\"\"\"\nModule which contain all data needed to calculate fingerprints/descriptors\nparameters. Contain internal codes, names, molstring and header getters.\n\"\"\"\nimport os\nimport pickle\n\nimport keras\nimport numpy as np\nimport tensorflow\nfrom keras.models import load_model\nfrom rdkit import Chem\nfrom rdkit.Avalon.pyAvalonTools import GetAvalonCountFP\nfrom rdkit.Chem import (\n AllChem, MACCSkeys, PatternFingerprint, RDKFingerprint, LayeredFingerprint,\n rdReducedGraphs, Descriptors, MolToSmiles, DataStructs\n)\nfrom rdkit.Chem.Pharm2D import Generate, Gobbi_Pharm2D\nfrom rdkit.DataStructs.cDataStructs import ConvertToExplicit\n\nfrom learner.seq2seq.preprocessor import Preprocessor\n\n\n# all using in OSDR fingerprints\nFCFP = 'Feature-Connectivity FingerPrint'\nECFP = 'Extended-Connectivity Fingerprint'\nFCFC = 'Feature Connectivity Fingerprint Count vector'\nFCFC_CHIRALITY = 'Feature Connectivity Fingerprint Count vector with Chirality'\nECFC = 'Extented Connectivity Fingerprint Count vector'\nECFC_CHIRALITY = 'Extented Connectivity Fingerprint Count vector with Chirality'\nATOM_PAIRS = 'RDKit Hashed Atom Pairs count vector'\nMACCS = '166 public MACCS keys + 1 zero bit'\nPATTERN = 'Experimental SMARTS patterns-based fingerprint'\nAVALON = 'Avalon count FPs from Avalon cheminformatcis toolkit'\nRDK = 'RDKit in-house subgraph-based count vector'\nLAYERED = 'Layered path-based-FP'\nERG = 'Extended reduced graph approach uses pharmacophore-type'\nPHARMA = 'PHARMA'\nDESC = 'ALL of the RDKit supported descriptors'\nCAN2CAN = 'CAN2CAN'\nENUM2CAN = 'ENUM2CAN'\nCAN2ENUM = 'CAN2ENUM'\nENUM2ENUM = 'ENUM2ENUM'\n\nPROJECT_PATH = os.path.abspath(os.path.dirname(__file__))\nCAN2CAN_PATH = os.path.join(PROJECT_PATH, 'seq2seq', 'CAN2CAN')\nENUM2CAN_PATH = os.path.join(PROJECT_PATH, 'seq2seq', 'ENUM2CAN')\nCAN2ENUM_PATH = os.path.join(PROJECT_PATH, 'seq2seq', 'CAN2ENUM')\nENUM2ENUM_PATH = os.path.join(PROJECT_PATH, 'seq2seq', 'ENUM2ENUM')\n\nCHAR_TO_INT_ENUM2CAN = pickle.load(\n open(os.path.join(ENUM2CAN_PATH, 'char_to_int.obj'), 'rb'))\nINT_TO_CHAR_ENUM2CAN = pickle.load(\n open(os.path.join(ENUM2CAN_PATH, 'int_to_char.obj'), 'rb'))\nSMILES_LEN_ENUM2CAN = pickle.load(\n open(os.path.join(ENUM2CAN_PATH, 'smiles_len.obj'), 'rb'))\nCHARSET_ENUM2CAN = pickle.load(open(os.path.join(ENUM2CAN_PATH, 'charset.obj'), 'rb'))\n\nCHAR_TO_INT_CAN2CAN = pickle.load(\n open(os.path.join(CAN2CAN_PATH, 'char_to_int.obj'), 'rb'))\nINT_TO_CHAR_CAN2CAN = pickle.load(\n open(os.path.join(CAN2CAN_PATH, 'int_to_char.obj'), 'rb'))\nSMILES_LEN_CAN2CAN = pickle.load(\n open(os.path.join(CAN2CAN_PATH, 'smiles_len.obj'), 'rb'))\nCHARSET_CAN2CAN = pickle.load(open(os.path.join(CAN2CAN_PATH, 'charset.obj'), 'rb'))\n\nCHAR_TO_INT_CAN2ENUM = pickle.load(\n open(os.path.join(CAN2ENUM_PATH, 'char_to_int.obj'), 'rb'))\nINT_TO_CHAR_CAN2ENUM = pickle.load(\n open(os.path.join(CAN2ENUM_PATH, 'int_to_char.obj'), 'rb'))\nSMILES_LEN_CAN2ENUM = pickle.load(\n open(os.path.join(CAN2ENUM_PATH, 'smiles_len.obj'), 'rb'))\nCHARSET_CAN2ENUM = pickle.load(open(os.path.join(CAN2ENUM_PATH, 'charset.obj'), 'rb'))\n\nCHAR_TO_INT_ENUM2ENUM = pickle.load(\n open(os.path.join(ENUM2ENUM_PATH, 'char_to_int.obj'), 'rb'))\nINT_TO_CHAR_ENUM2ENUM = pickle.load(\n open(os.path.join(ENUM2ENUM_PATH, 'int_to_char.obj'), 'rb'))\nSMILES_LEN_ENUM2ENUM = pickle.load(\n open(os.path.join(ENUM2ENUM_PATH, 'smiles_len.obj'), 'rb'))\nCHARSET_ENUM2ENUM = pickle.load(open(os.path.join(ENUM2ENUM_PATH, 'charset.obj'), 'rb'))\n\nkeras.backend.clear_session()\nCAN2CAN_SMI_TO_LAT_MODEL = load_model(os.path.join(CAN2CAN_PATH, 'smi2lat_cpu'))\nENUM2CAN_SMI_TO_LAT_MODEL = load_model(os.path.join(ENUM2CAN_PATH, 'smi2lat_cpu'))\nCAN2ENUM_SMI_TO_LAT_MODEL = load_model(os.path.join(CAN2ENUM_PATH, 'smi2lat_cpu'))\nENUM2ENUM_SMI_TO_LAT_MODEL = load_model(os.path.join(ENUM2ENUM_PATH, 'smi2lat_cpu'))\nSMI_TO_LAT_GRAPH = tensorflow.get_default_graph()\nSMI_TO_LAT_SESSION = keras.backend.get_session()\n\n\ndef fcfp_molstring(molecule, fptype):\n \"\"\"\n Method for make molstring for fcfp fingerprint\n\n :param molecule: molecule object\n :param fptype: type, radius and size of fingerprint\n :type fptype: dict\n :return: molstring for fcfp fingerprint\n \"\"\"\n arr = np.zeros((1,), dtype=int)\n DataStructs.ConvertToNumpyArray(\n AllChem.GetMorganFingerprintAsBitVect(\n molecule, fptype['Radius'], fptype['Size'], useFeatures=True\n ), arr\n )\n\n return arr\n\n\ndef ecfp_molstring(molecule, fptype):\n \"\"\"\n Method for make molstring for ecfp fingerprint\n\n :param molecule: molecule object\n :param fptype: type, radius and size of fingerprint\n :type fptype: dict\n :return: molstring for ecfp fingerprint\n \"\"\"\n arr = np.zeros((1,), dtype=int)\n DataStructs.ConvertToNumpyArray(\n AllChem.GetMorganFingerprintAsBitVect(\n molecule, fptype['Radius'], fptype['Size'], useFeatures=False\n ), arr\n )\n\n return arr\n\n\ndef fcfc_molstring(molecule, fptype):\n \"\"\"\n Method for make molstring for fcfc without chirality fingerprint\n\n :param molecule: molecule object\n :param fptype: type, radius and size of fingerprint\n :type fptype: dict\n :return: molstring for fcfc without chirality fingerprint\n \"\"\"\n arr = np.zeros((1,), dtype=int)\n DataStructs.ConvertToNumpyArray(\n AllChem.GetHashedMorganFingerprint(\n molecule, fptype['Radius'], fptype['Size'], useFeatures=True\n ), arr\n )\n\n return arr\n\n\ndef fcfc_chirality_molstring(molecule, fptype):\n \"\"\"\n Method for make molstring for fcfc with chirality fingerprint\n\n :param molecule: molecule object\n :param fptype: type, radius and size of fingerprint\n :type fptype: dict\n :return: molstring for fcfc with chirality fingerprint\n \"\"\"\n arr = np.zeros((1,), dtype=int)\n DataStructs.ConvertToNumpyArray(\n AllChem.GetHashedMorganFingerprint(\n molecule, fptype['Radius'], fptype['Size'], useFeatures=True,\n useChirality=True\n ), arr\n )\n\n return arr\n\n\ndef ecfc_molstring(molecule, fptype):\n \"\"\"\n Method for make molstring for ecfc without chirality fingerprint\n\n :param molecule: molecule object\n :param fptype: type, radius and size of fingerprint\n :type fptype: dict\n :return: molstring for ecfc without chirality fingerprint\n \"\"\"\n arr = np.zeros((1,), dtype=int)\n DataStructs.ConvertToNumpyArray(\n AllChem.GetHashedMorganFingerprint(\n molecule, fptype['Radius'], fptype['Size'], useFeatures=False\n ), arr\n )\n\n return arr\n\n\ndef ecfc_chirality_molstring(molecule, fptype):\n \"\"\"\n Method for make molstring for ecfc with chirality fingerprint\n\n :param molecule: molecule object\n :param fptype: type, radius and size of fingerprint\n :type fptype: dict\n :return: molstring for ecfc with chirality fingerprint\n \"\"\"\n\n arr = np.zeros((1,), dtype=int)\n DataStructs.ConvertToNumpyArray(\n AllChem.GetHashedMorganFingerprint(\n molecule, fptype['Radius'], fptype['Size'], useFeatures=False,\n useChirality=True\n ), arr\n )\n\n return arr\n\n\ndef atom_pairs_molstring(molecule, fptype):\n \"\"\"\n Method for make molstring for atom pairs fingerprint\n\n :param molecule: molecule object\n :param fptype: type, radius and size of fingerprint\n :type fptype: dict\n :return: molstring for atom pairs fingerprint\n \"\"\"\n arr = np.zeros((1,), dtype=int)\n DataStructs.ConvertToNumpyArray(\n AllChem.GetHashedAtomPairFingerprint(\n molecule, nBits=fptype['Size'], includeChirality=True\n ), arr\n )\n\n return arr\n\n\ndef maccs_molstring(molecule, fptype):\n \"\"\"\n Method for make molstring for maccs fingerprint\n\n :param molecule: molecule object\n :param fptype: type, radius and size of fingerprint\n :type fptype: dict\n :return: molstring for maccs fingerprint\n \"\"\"\n arr = np.zeros((1,), dtype=int)\n DataStructs.ConvertToNumpyArray(\n MACCSkeys.GenMACCSKeys(molecule), arr)\n\n return arr\n\n\ndef pattern_molstring(molecule, fptype):\n \"\"\"\n Method for make molstring for pattern fingerprint\n\n :param molecule: molecule object\n :param fptype: type, radius and size of fingerprint\n :type fptype: dict\n :return: molstring for pattern fingerprint\n \"\"\"\n arr = np.zeros((1,), dtype=int)\n DataStructs.ConvertToNumpyArray(\n PatternFingerprint(molecule, fptype['Size']), arr)\n\n return arr\n\n\ndef avalon_molstring(molecule, fptype):\n \"\"\"\n Method for make molstring for avalon fingerprint\n\n :param molecule: molecule object\n :param fptype: type, radius and size of fingerprint\n :type fptype: dict\n :return: molstring for avalon fingerprint\n \"\"\"\n arr = np.zeros((1,), dtype=int)\n DataStructs.ConvertToNumpyArray(\n GetAvalonCountFP(molecule, nBits=fptype['Size']), arr)\n\n return arr\n\n\ndef rdk_molstring(molecule, fptype):\n \"\"\"\n Method for make molstring for rdk fingerprint\n\n :param molecule: molecule object\n :param fptype: type, radius and size of fingerprint\n :type fptype: dict\n :return: molstring for rdk fingerprint\n \"\"\"\n arr = np.zeros((1,), dtype=int)\n DataStructs.ConvertToNumpyArray(\n RDKFingerprint(molecule, fpSize=fptype['Size']), arr)\n\n return arr\n\n\ndef layered_molstring(molecule, fptype):\n \"\"\"\n Method for make molstring for layered fingerprint\n\n :param molecule: molecule object\n :param fptype: type, radius and size of fingerprint\n :type fptype: dict\n :return: molstring for layered fingerprint\n \"\"\"\n\n arr = np.zeros((1,), dtype=int)\n DataStructs.ConvertToNumpyArray(\n LayeredFingerprint(molecule, fpSize=fptype['Size']), arr)\n\n return arr\n\n\ndef erg_molstring(molecule, fptype):\n \"\"\"\n Method for make molstring for erg fingerprint\n\n :param molecule: molecule object\n :param fptype: type, radius and size of fingerprint\n :type fptype: dict\n :return: molstring for erg fingerprint\n \"\"\"\n\n return rdReducedGraphs.GetErGFingerprint(molecule)\n\n\ndef pharma_molstring(molecule, fptype):\n \"\"\"\n Method for make molstring for pharma fingerprint\n\n :param molecule: molecule object\n :param fptype: type, radius and size of fingerprint\n :type fptype: dict\n :return: molstring for pharma fingerprint\n \"\"\"\n arr = np.zeros((1,), dtype=int)\n DataStructs.ConvertToNumpyArray(\n ConvertToExplicit(\n Generate.Gen2DFingerprint(molecule, Gobbi_Pharm2D.factory)\n ), arr\n )\n\n return arr\n\n\ndef can2can(molecule, fptype):\n \"\"\"\n Function returns a latent vector - based feature vector for a molecule\n which is extracted from canonical-to-canonical seq2seq SMILES autoencoder\n :param molecule: rdkit molecule object\n :param fptype: useless here\n :return: latent vector - based feature vector\n \"\"\"\n\n smile = Chem.MolToSmiles(molecule, canonical=True)\n smile = Preprocessor.prepare_smiles(smile, SMILES_LEN_CAN2CAN)\n if smile is None:\n return [None for x in range(256)]\n smile_vec = Preprocessor.vectorize(smile, CHARSET_CAN2CAN, CHAR_TO_INT_CAN2CAN, SMILES_LEN_CAN2CAN)[0]\n\n keras.backend.set_session(SMI_TO_LAT_SESSION)\n\n with SMI_TO_LAT_GRAPH.as_default():\n latent_vec = CAN2CAN_SMI_TO_LAT_MODEL.predict(smile_vec)[0]\n\n return latent_vec\n\n\ndef enum2enum(molecule, fptype):\n \"\"\"\n Function returns a latent vector - based feature vector for a molecule\n which is extracted from enumerated-to-enumerated seq2seq SMILES autoencoder\n :param molecule: rdkit molecule object\n :param fptype: useless here\n :return: latent vector - based feature vector\n \"\"\"\n\n smile = Chem.MolToSmiles(molecule, canonical=True)\n smile = Preprocessor.prepare_smiles(smile, SMILES_LEN_ENUM2ENUM)\n if smile is None:\n return [None for x in range(256)]\n smile_vec = Preprocessor.vectorize(smile, CHARSET_ENUM2ENUM, CHAR_TO_INT_ENUM2ENUM, SMILES_LEN_ENUM2ENUM)[0]\n\n keras.backend.set_session(SMI_TO_LAT_SESSION)\n\n with SMI_TO_LAT_GRAPH.as_default():\n latent_vec = ENUM2ENUM_SMI_TO_LAT_MODEL.predict(smile_vec)[0]\n\n return latent_vec\n\n\ndef enum2can(molecule, fptype):\n \"\"\"\n Function returns a latent vector - based feature vector for a molecule\n which is extracted from enumerated-to-canonical seq2seq SMILES autoencoder\n :param molecule: rdkit molecule object\n :param fptype: useless here\n :return: latent vector - based feature vector\n \"\"\"\n\n smile = Chem.MolToSmiles(molecule, canonical=True)\n smile = Preprocessor.prepare_smiles(smile, SMILES_LEN_ENUM2CAN)\n if smile is None:\n return [None for x in range(256)]\n smile_vec = Preprocessor.vectorize(smile, CHARSET_ENUM2CAN, CHAR_TO_INT_ENUM2CAN, SMILES_LEN_ENUM2CAN)[0]\n\n keras.backend.set_session(SMI_TO_LAT_SESSION)\n\n with SMI_TO_LAT_GRAPH.as_default():\n latent_vec = ENUM2CAN_SMI_TO_LAT_MODEL.predict(smile_vec)[0]\n\n return latent_vec\n\ndef can2enum(molecule, fptype):\n \"\"\"\n Function returns a latent vector - based feature vector for a molecule\n which is extracted from canonical-to-enumerated seq2seq SMILES autoencoder\n :param molecule: rdkit molecule object\n :param fptype: useless here\n :return: latent vector - based feature vector\n \"\"\"\n\n smile = Chem.MolToSmiles(molecule, canonical=True)\n smile = Preprocessor.prepare_smiles(smile, SMILES_LEN_CAN2ENUM)\n if smile is None:\n return [None for x in range(256)]\n smile_vec = Preprocessor.vectorize(smile, CHARSET_CAN2ENUM, CHAR_TO_INT_CAN2ENUM, SMILES_LEN_CAN2ENUM)[0]\n\n keras.backend.set_session(SMI_TO_LAT_SESSION)\n\n with SMI_TO_LAT_GRAPH.as_default():\n latent_vec = CAN2ENUM_SMI_TO_LAT_MODEL.predict(smile_vec)[0]\n\n return latent_vec\n\ndef get_headers(fingerprint, size, diameter):\n name = get_fingerprint_name(fingerprint)\n if diameter:\n header = ['{}{}_{}'.format(name, diameter, i) for i in range(size)]\n else:\n header = ['{}_{}'.format(name, i) for i in range(size)]\n\n return header\n\n\ndef get_desc_data(molecule):\n \"\"\"\n Custom function that calculates and returns every available molecular\n descriptor in RDKit chemoinfo toolkt with corresponding header (name) for each\n :param molecule: rdkit's molecule object\n :return: values of descriptors and their headers\n \"\"\"\n desc_dict = dict(Descriptors.descList)\n descs = list(desc_dict.keys())\n descs.remove('Ipc')\n ans = {}\n for descname in descs:\n try:\n desc = desc_dict[descname]\n bin_value = desc(molecule)\n except (ValueError, TypeError, ZeroDivisionError) as exception:\n print(\n 'Descriptor {} wasn\\'t calculated for a molecule {} due to {}'.format(\n str(descname), str(MolToSmiles(molecule)), str(exception))\n )\n bin_value = 'NaN'\n\n bin_name = 'DESC_{}'.format(descname)\n ans[bin_name] = bin_value\n\n molstring = np.fromiter(ans.values(), dtype=float)\n headers = np.fromiter(ans.keys(), dtype='S32')\n\n return molstring, headers\n\n# fingerprints 'object'\n# contain name, code, molstring (function), headers (function)\n# using to apply fingerprints to some chemical dataset\nFINGERPRINTS = {\n # name of each fingerprint\n 'name': {\n FCFP: 'FCFP',\n ECFP: 'ECFP',\n FCFC: 'FCFC',\n ECFC: 'ECFC',\n ATOM_PAIRS: 'ATOM_PAIRS',\n MACCS: 'MACCS',\n PATTERN: 'PATTERN',\n AVALON: 'AVALON',\n RDK: 'RDK',\n LAYERED: 'LAYERED',\n ERG: 'EGR',\n PHARMA: 'PHARMA',\n DESC: 'DESC',\n FCFC_CHIRALITY: 'FCFC_CHIRALITY',\n ECFC_CHIRALITY: 'ECFC_CHIRALITY',\n CAN2CAN: 'CAN2CAN',\n ENUM2CAN: 'ENUM2CAN',\n CAN2ENUM: 'CAN2ENUM',\n ENUM2ENUM: 'ENUM2ENUM'\n },\n # code of each fingerprint\n 'code': {\n 1: FCFP,\n 2: ECFP,\n 3: FCFC,\n 4: ECFC,\n 5: ATOM_PAIRS,\n 6: MACCS,\n 7: PATTERN,\n 8: AVALON,\n 9: RDK,\n 10: LAYERED,\n 11: ERG,\n 12: PHARMA,\n 13: DESC,\n 14: FCFC_CHIRALITY,\n 15: ECFC_CHIRALITY,\n 16: CAN2CAN,\n 17: ENUM2CAN,\n 18: CAN2ENUM,\n 19: ENUM2ENUM\n },\n # methods for get molstring for each fingerprint\n 'molstring': {\n FCFP: fcfp_molstring,\n ECFP: ecfp_molstring,\n FCFC: fcfc_molstring,\n ECFC: ecfc_molstring,\n ATOM_PAIRS: atom_pairs_molstring,\n MACCS: maccs_molstring,\n PATTERN: pattern_molstring,\n AVALON: avalon_molstring,\n RDK: rdk_molstring,\n LAYERED: layered_molstring,\n ERG: erg_molstring,\n PHARMA: pharma_molstring,\n FCFC_CHIRALITY: fcfc_chirality_molstring,\n ECFC_CHIRALITY: ecfc_chirality_molstring,\n CAN2CAN: can2can,\n ENUM2CAN: enum2can,\n CAN2ENUM: can2enum,\n ENUM2ENUM: enum2enum\n },\n # methods for get header for each fingerprint\n 'headers': {\n FCFP: get_headers,\n ECFP: get_headers,\n FCFC: get_headers,\n ECFC: get_headers,\n ATOM_PAIRS: get_headers,\n MACCS: get_headers,\n PATTERN: get_headers,\n AVALON: get_headers,\n RDK: get_headers,\n LAYERED: get_headers,\n ERG: get_headers,\n PHARMA: get_headers,\n FCFC_CHIRALITY: get_headers,\n ECFC_CHIRALITY: get_headers,\n CAN2CAN: get_headers,\n ENUM2CAN: get_headers,\n CAN2ENUM: get_headers,\n ENUM2ENUM: get_headers\n }\n}\n\n\ndef fingerprint_name_by_code(fingerprint_code):\n \"\"\"\n Method which return fingerprint name by fingerprint number.\n fingerprint_number may contain any 'intable' type\n\n :param fingerprint_code: number of fingerprint\n :type fingerprint_code: str\n :return: algorithm name or 'Unknown fingerprint'\n if fingerprint number not exist in fingerprint dict\n :rtype: str\n \"\"\"\n\n fingerprint_number_as_int = int(fingerprint_code)\n fingerprint_name = 'Unknown fingerprint'\n if fingerprint_number_as_int in FINGERPRINTS['code']:\n fingerprint = FINGERPRINTS['code'][fingerprint_number_as_int]\n fingerprint_name = FINGERPRINTS['name'][fingerprint]\n\n return fingerprint_name\n\n\ndef fingerprint_type_by_name(name):\n \"\"\"\n Method which return fingerprint type by its name,\n or None if type does not exist\n\n :param name: name which want to correspond to type\n :type name: str\n :return: fingerprint type or None\n if fingerprint with given name does not exist\n \"\"\"\n\n for fingerprint_type, fingerprint_name in FINGERPRINTS['name'].items():\n if name == fingerprint_name:\n return fingerprint_type\n\n return None\n\n\ndef get_fingerprint_name(fingerprint):\n return FINGERPRINTS['name'][fingerprint]\n\n\ndef validate_fingerprints(fingerprints):\n \"\"\"\n Method for validate fingerprints, raise exception id invalid\n Fingerpints should be type of list, with less than 5 entries.\n Each entry is dict, which can have only 'Type', 'Radius' and 'Size' keys.\n Size value can be only int type, and have 0, 256, 512, 1024, 2048 values\n Radius value can be only int type, and have 0, 2, 3, 4 values\n Type value can be only str type, and have one of the FINGERPRINTS['name']\n\n :param fingerprints: user's input fingerprints\n \"\"\"\n\n # check fingerprints type, should be list\n if not isinstance(fingerprints, list):\n raise Exception('User input Fingerprints should be list')\n # check fingerprints count, should be 4 or less\n if not len(fingerprints) <= 4:\n raise Exception(\n 'User input Fingerprints should contain 4 or less fingerprints')\n # define possible keys for each fingerprint\n possible_fingerprint_keys = ('Type', 'Radius', 'Size')\n for fingerprint in fingerprints:\n fingerprint_keys = set(fingerprint.keys())\n # check current fingerprint keys\n if not fingerprint_keys.issubset(possible_fingerprint_keys):\n raise Exception(\n 'Fingerprint {} have wrong key value'.format(fingerprint))\n # check current fingerprint size\n if 'Size' in fingerprint_keys:\n validate_fingerprint_size(fingerprint)\n # check current fingerprint radius\n if 'Radius' in fingerprint_keys:\n validate_fingerprint_radius(fingerprint)\n # 'Type' MUST be in fingerprint keys\n if 'Type' not in fingerprint_keys:\n raise Exception('Fingerprint {} have not required key Type'.format(\n fingerprint))\n # check current fingerprint type\n validate_fingerprint_type(fingerprint)\n\n\ndef validate_fingerprint_size(fingerprint):\n \"\"\"\n Method for validate fingerprint size, raise exception id invalid\n Size value can be only int type, and have 0, 256, 512, 1024, 2048 values\n\n :param fingerprint: fingerprint for validation\n \"\"\"\n\n # get fingerprint size value\n size = fingerprint['Size']\n # check fingerprint size type, should be int\n if not isinstance(size, int):\n raise Exception(\n 'Fingerprint {} Size should be int'.format(fingerprint)\n )\n # check fingerprint size value, should be 0, 256, 512, 1024 or 2048\n if size not in [0, 128, 256, 512, 1024, 2048]:\n raise Exception(\n 'Fingerprint {} Size should be 128, 256, 512, 1024 or 2048'.format(\n fingerprint\n )\n )\n\n\ndef validate_fingerprint_radius(fingerprint):\n \"\"\"\n Method for validate fingerprint radius, raise exception id invalid\n Radius value can be only int type, and have 0, 2, 3, 4 values\n\n :param fingerprint: fingerprint for validation\n \"\"\"\n\n # get fingerprint radius value\n radius = fingerprint['Radius']\n # check fingerprint radius type, should be int\n if not isinstance(radius, int):\n raise Exception(\n 'Fingerprint {} Radius should be int'.format(fingerprint)\n )\n # check fingerprint radius value, should be 0, 2, 3, 4\n if not 2 <= radius <= 4 and radius != 0:\n raise Exception(\n 'Fingerprint {} Radius should be in interval [2, 4]'.format(\n fingerprint))\n\n\ndef validate_fingerprint_type(fingerprint):\n \"\"\"\n Method for validate fingerprint type, raise exception id invalid\n Type value can be only str type, and have one of the FINGERPRINTS['name']\n Change any fingerprint type case to upper case\n\n :param fingerprint: fingerprint for validation\n \"\"\"\n # get fingerprint type value\n fingerprint_type = fingerprint['Type']\n # check fingerprint 'Type' type, should be str\n if not isinstance(fingerprint_type, str):\n raise Exception(\n 'Fingerprint {} Type should be str'.format(fingerprint)\n )\n # get list of possible fingerprint types\n possible_fingerprints = FINGERPRINTS['name'].values()\n fingerprint_type = fingerprint_type.upper()\n fingerprint['Type'] = fingerprint_type\n # check fingerprint 'Type' value\n if fingerprint_type not in possible_fingerprints:\n raise Exception('Fingerprint {} Type should be: {}'.format(\n fingerprint, possible_fingerprints))\n\n\ndef get_molstring_and_headers(molecule, fptype):\n \"\"\"\n Function that generates a certain type of fingerprint\\descriptors vector for\n a molecule and returns corresponding feature vector and headers\n :param molecule: molecule object\n :param fptype: type, radius and size of fingerprint\n :type fptype: dict\n :return: feature vector and headers\n \"\"\"\n fingerprint = fingerprint_type_by_name(fptype['Type'])\n if not fingerprint:\n raise ValueError('Unsupported FPtype: {}'.format(fptype['Type']))\n\n if fingerprint == DESC:\n molstring, headers = get_desc_data(molecule)\n else:\n molstring = FINGERPRINTS['molstring'][fingerprint](molecule, fptype)\n diameter = None\n if 'Radius' in fptype.keys():\n diameter = fptype['Radius'] * 2\n headers = FINGERPRINTS['headers'][fingerprint](\n fingerprint, len(molstring), diameter)\n\n return molstring, headers\n",
"import os\nfrom operator import itemgetter\nfrom os import listdir\n\nimport numpy as np\nimport pandas as pd\nfrom keras.models import load_model\n\nfrom general_helper import coeff_determination\nfrom processor import sdf_to_csv\nfrom rdkit import Chem\nfrom sklearn.externals import joblib\n\nimport sklearn\nprint(sklearn.__version__)\n\n\n\nsuppl = Chem.SDMolSupplier(\n 'C:\\PycharmProjects\\ml-data-qsar\\TEST\\LC50\\LC50_training.sdf')\nmolecules = [x for x in suppl if x is not None]\nmolecules = molecules\n\nfptype = [{'Type': 'DESC'},\n {'Type': 'MACCS'},\n {'Type': 'FCFC','Size': 512,'Radius':3},\n {'Type': 'AVALON','Size': 512}]\ndataframe = sdf_to_csv('LC50_prediction', fptype=fptype, molecules=molecules)\n\n\nfolder_path = 'C:\\PycharmProjects\\ml-models\\\\UBC\\Half_LIfe_U_2018_03_18__14_24_16_DESC_MACCS_FCFC_512_3_AVALON_512_scaled___'\nmodels_paths = [os.path.join(folder_path, x) for x in listdir(folder_path) if x.split('.')[-1] == 'h5']\ntransformers = [os.path.join(folder_path, x) for x in listdir(folder_path) if x.split('.')[-1] == 'sav']\n\n\npredicted_test_y_vectors = []\ndf_predict_clf = pd.DataFrame()\nfor transformer in transformers:\n trans = joblib.load(transformer)\n\nfor path_to_model in models_paths:\n model_base = load_model(\n path_to_model,\n custom_objects={'coeff_determination': coeff_determination}\n )\n test_predict_tmp = model_base.predict(dataframe)\n print(test_predict_tmp)\n predicted_test_y_vectors.append(test_predict_tmp)\n print('Loading of model complete')\n\n\nmean_predicted = np.mean(predicted_test_y_vectors, axis=0)\npredicted_mols = itemgetter(*vectorized)(molecules)\n\ndf_predict_clf['Compound_SMILES'] = [\n Chem.MolToSmiles(mol, isomericSmiles=True) for mol in predicted_mols\n ]\n# df_predict_clf['ID'] = [\n# mol.GetProp('index') for mol in predicted_mols\n# ]\ndf_predict_clf['value'] = mean_predicted\ndf_predict_clf.to_csv('predicted_LC50_DNN_SSP_test.csv')\n",
"\"\"\"\nModule which contain base methods for ml services\n\"\"\"\nimport glob\nimport io\nimport json\nimport os\nimport shutil\nimport sys\nimport traceback\nimport uuid\nimport zipfile\nfrom time import time\n\nimport keras\nimport numpy\nimport requests\nimport tensorflow\nfrom oauthlib.oauth2 import BackendApplicationClient\nfrom rdkit import Chem\nfrom requests_oauthlib import OAuth2Session\nfrom requests_toolbelt import MultipartEncoder\nfrom scipy import sparse\nfrom sklearn.externals import joblib\n\nfrom MLLogger import BaseMLLogger\n\nos.environ['OAUTHLIB_INSECURE_TRANSPORT'] = '1'\nLOGGER = BaseMLLogger(log_name='logger', log_file_name='sds-ml-logger')\n# define modules default global variables\nCLIENT_ID = None\nCLIENT_SECRET = None\nSCOPE = None\nTOKEN_URL = None\nBLOB_URL = None\nTEMP_FOLDER = os.getcwd()\nOSDR_API_URL = 'https://api.dev.dataledger.io/osdr/v1/api'\nSCALER_FILENAME = 'scaler.sav'\nDENSITY_MODEL_FILENAME = 'density_model.sav'\nDISTANCE_MATRIX_FILENAME = 'distance_matrix.npz'\nTRAIN_MEAN_FILENAME = 'train_mean.npy'\nK_MEANS_FILENAME = 'k_means.ksav'\nMODEL_ADDITIONAL_FILES = [\n SCALER_FILENAME, DENSITY_MODEL_FILENAME, TRAIN_MEAN_FILENAME,\n DISTANCE_MATRIX_FILENAME, K_MEANS_FILENAME\n]\nMODELS_IN_MEMORY_CACHE = dict()\nNUMPY_PROCESSOR_DTYPES = [\n ('name', 'U15'), ('molecule_number', 'i8'), ('value', 'f8')\n]\n\ntry:\n TEMP_FOLDER = os.environ['OSDR_TEMP_FILES_FOLDER']\nexcept KeyError as undefined_key:\n LOGGER.error(\n 'Temporary folder path not defined. Use default value: {}'.format(\n TEMP_FOLDER\n )\n )\n\ntry:\n CLIENT_ID = os.environ['OSDR_ML_MODELER_CLIENT_ID']\n CLIENT_SECRET = os.environ['OSDR_ML_MODELER_CLIENT_SECRET']\n SCOPE = ['api', 'osdr-api']\n TOKEN_URL = os.environ['OSDR_BLOB_SERVICE_TOKEN_URL']\n BLOB_URL = '{}/blobs'.format(os.environ['OSDR_BLOB_SERVICE_URL'])\n OSDR_API_URL = os.environ['OSDR_API_URL']\nexcept KeyError as undefined_key:\n LOGGER.error('Environment variables not defined. Use default values')\n LOGGER.error('Undefined key: {}'.format(undefined_key))\n\nif not os.path.exists(TEMP_FOLDER):\n os.makedirs(TEMP_FOLDER)\n\n\ndef post_data_to_blob(\n oauth, multipart_object, blob_url=BLOB_URL, bucket_id=CLIENT_ID\n):\n \"\"\"\n Method for send POST http request\n to oauth session with multipart_object in body\n\n :param oauth: using in ml service OAuth2Session object\n :param multipart_object: using in ml service MultipartEncoder object\n :param blob_url: blob storage url\n :param bucket_id: bucket id value to POST in\n :return: response of oauth http POST operation\n \"\"\"\n\n post_response = oauth.post(\n '{}/{}'.format(blob_url, bucket_id),\n headers={'Content-Type': multipart_object.content_type},\n data=multipart_object, verify=False\n )\n\n return post_response\n\n\ndef get_file_info_from_blob(\n oauth, blob_id, blob_url=BLOB_URL, bucket_id=CLIENT_ID\n):\n \"\"\"\n Method to get any file info from blob storage, by blob bucket and id,\n if file and info exist\n\n :param oauth: using in ml service OAuth2Session object\n :param blob_id: blob storage id of needed file\n :param blob_url: blob storage api URL\n :param bucket_id: blob bucket with needed file\n :return: response of GET request with file info, status code etc\n \"\"\"\n\n url = '{}/{}/{}/info'.format(blob_url, bucket_id, blob_id)\n response = oauth.get(url)\n\n return response\n\n\ndef get_user_info_from_osdr(oauth, user_id, osdr_api_url=OSDR_API_URL):\n # TODO make docstring there\n url = '{}/users/{}/public-info'.format(osdr_api_url, user_id)\n response = oauth.get(url, verify=False)\n\n return response\n\n\ndef get_file_from_blob(\n file_blob_id, oauth, blob_url=BLOB_URL, bucket_id=CLIENT_ID\n):\n file_url = '{}/{}/{}'.format(blob_url, bucket_id, file_blob_id)\n response = oauth.get(file_url, verify=False)\n\n return response\n\n\ndef delete_data_from_blob(oauth, blob_bucket_id, blob_id=CLIENT_ID):\n \"\"\"\n Method for delete entry from blob storage\n\n :param oauth: using in ml service OAuth2Session object\n :param blob_bucket_id: id of blob bucket from which we want delete entry\n :param blob_id: blob storage id of entry which we want to delete\n :return: deletion operation status code\n \"\"\"\n\n delete_response = oauth.delete(\n '{}/{}/{}'.format(BLOB_URL, blob_bucket_id, blob_id))\n\n return delete_response\n\n\ndef fetch_token(oauth):\n \"\"\"\n Method for fetching oauth token and logging that operation result\n\n :param oauth: using in ml service OAuth2Session object\n :return: oauth token\n \"\"\"\n\n headers = {\n 'Content-Type': 'application/x-www-form-urlencoded',\n }\n data = [\n ('grant_type', 'client_credentials'),\n ('client_id', CLIENT_ID),\n ('client_secret', CLIENT_SECRET),\n ]\n LOGGER.info('Token URL: {}'.format(TOKEN_URL))\n LOGGER.info('Token headers: {}'.format(headers))\n LOGGER.info('Token data: {}'.format(data))\n response = requests.post(TOKEN_URL, headers=headers, data=data)\n\n try:\n token = response.json()\n oauth.token = token\n LOGGER.info('Status code: {}'.format(response.status_code))\n LOGGER.info('Token: {}'.format(token))\n except json.JSONDecodeError:\n raise Exception(\n 'Fetch token exception. URL: {} , ID: {} , secret: {}, status code: {}'.format(\n TOKEN_URL, CLIENT_ID, CLIENT_SECRET, response.status_code\n )\n )\n\n return token\n\n\ndef get_multipart_object(body, file_path, file_type, additional_fields=None):\n \"\"\"\n Method which create and return MultipartEncoder object,\n which would be posting later in ml service\n\n :param body: rabbitmq message as dict, for take ParentId, UserId\n :param file_path: path to file which would be encoded\n :param file_type: type of file which would be encoded\n :param additional_fields: additional fields, which you want to add\n :return: encoded MultipartEncoder object\n \"\"\"\n\n # base ID fields of multipart object as dict\n if additional_fields and 'ParentId' in additional_fields.keys():\n parent_id = additional_fields['ParentId']\n else:\n parent_id = body['ParentId']\n dict_with_fields = {\n 'parentId': parent_id,\n 'userId': body['UserId']\n }\n # base file field of multipart object\n file_dict = {\n 'file': (os.path.basename(file_path), open(file_path, 'rb'), file_type)\n }\n # add additional fields to created multipart object\n if additional_fields:\n dict_with_fields.update(additional_fields)\n\n # file(s) must loaded after metadata. metadata after file will be erased\n dict_with_fields.update(file_dict)\n\n multipart_object = MultipartEncoder(fields=dict_with_fields)\n\n return multipart_object\n\n\ndef get_oauth():\n \"\"\"\n Method which create and return OAuth2Session object for ml service\n\n :return: OAuth2Session object\n \"\"\"\n\n client = BackendApplicationClient(client_id=CLIENT_ID)\n oauth = OAuth2Session(client=client, scope=SCOPE)\n oauth.verify = False\n\n return oauth\n\n\ndef make_directory(directory_path):\n \"\"\"\n Method for create directory if it not exist\n\n :param directory_path: path to directory which we want to create\n :type directory_path: str\n \"\"\"\n\n if not os.path.isdir(directory_path):\n os.makedirs(directory_path)\n\n\ndef validate_kfold(k_fold):\n \"\"\"\n Method for validate k-fold value, raise exception if invalid value\n Value should be int, and in interval [2, 10]\n\n :param k_fold: k-fold value\n :type k_fold: float\n \"\"\"\n\n # check k-fold type, should be int\n if not isinstance(k_fold, int):\n raise Exception('User input KFold should be int')\n # check k-fold in interval [2, 10]\n if not 2 <= k_fold <= 10:\n raise Exception('User input KFold should be in interval [2, 10]')\n\n\ndef validate_subsample_size(subsample_size):\n \"\"\"\n Method for validate subsample size value, raise exception if invalid value\n Value should be float, and in interval (0.1, 1]\n\n :param subsample_size: subsample_size value\n :type subsample_size: float\n \"\"\"\n\n # check subsample size type, should be float\n if not isinstance(subsample_size, float):\n raise Exception('User input SubSampleSize should be float')\n # check subsample size in interval (0.1, 1]\n if not 0.1 < subsample_size <= 1.0:\n raise Exception(\n 'User input SubSampleSize should be in interval (0.1, 1]')\n\n\ndef validate_test_datatset_size(test_dataset_size):\n \"\"\"\n Method for validate test dataset size value, raise exception if invalid\n Value should be float, and in interval [0, 0.5]\n\n :param test_dataset_size: test_dataset_size value\n :type test_dataset_size: float\n \"\"\"\n\n # check test dataset size size in interval [0, 0.5]\n if not 0.0 <= test_dataset_size <= 0.5:\n raise Exception(\n 'User input TestDatasetSize should be in interval [0.0, 0.5]')\n\n\ndef make_stream_from_sdf(body, oauth):\n sdf_url = '{}/{}/{}'.format(\n BLOB_URL, body['SourceBucket'], body['SourceBlobId'])\n fetch_token(oauth)\n LOGGER.info('Loading Dataset from: {}'.format(sdf_url))\n sdf = oauth.get(sdf_url, verify=False)\n stream = io.BytesIO(sdf.content)\n\n LOGGER.info('SDF: {}'.format(sdf))\n LOGGER.info('SDF URL: {}'.format(sdf_url))\n LOGGER.info('SDF HEADERS: {}'.format(sdf.headers))\n\n dataset_file_name = dict(sdf.headers)['Content-Disposition']\n dataset_file_name = dataset_file_name.split('filename=')[-1]\n body['SourceFileName'] = dataset_file_name\n\n return stream\n\n\ndef logging_exception_message(logger):\n \"\"\"\n Method for catch exception message and write it to log\n\n :param logger: used logger\n \"\"\"\n\n exception_type, exception, traceback_text = sys.exc_info()\n # traceback message object as string\n traceback_message = '\\n'.join(\n traceback.format_tb(traceback_text))\n # make error traceback message\n exception_message = 'Internal server error.\\n'\n exception_message += '{}{}: {}\\n'.format(\n traceback_message, exception_type.__name__, exception\n )\n logger.error(exception_message)\n\n\ndef get_file_as_bytes_by_url(oauth, get_url):\n if not get_url:\n return None\n\n # get scaler from blobstorage\n get_response = oauth.get(get_url, verify=False)\n\n # get file as binary object from GET response body\n return get_response.content\n\n\ndef make_blob_url(parameters, bucket_key, blob_id_key, blob_url=BLOB_URL):\n if not parameters[blob_id_key]:\n return None\n\n return '{}/{}/{}'.format(\n blob_url, parameters[bucket_key], parameters[blob_id_key])\n\n\ndef write_file_to_temporary_folder(\n file_as_bytes, file_name, temporary_folder=TEMP_FOLDER\n):\n make_directory(temporary_folder)\n\n temporary_file_path = '{}/{}_{}'.format(\n temporary_folder, uuid.uuid1(), file_name)\n\n file_to_write = open(temporary_file_path, 'wb')\n file_to_write.write(file_as_bytes)\n file_to_write.close()\n\n return temporary_file_path\n\n\ndef write_model_to_temporary_folder(oauth, model_url, write_path_folder):\n \"\"\"\n Method which get and write model file to temporary folder.\n Extract model files to temporary folder if model have few files.\n Files takes from linked blob storage (BLOB_URL variable)\n\n :return: temporary folder name\n \"\"\"\n\n # get model file from blob storage\n # make GET request to blob storage\n get_response = oauth.get(model_url, verify=False)\n # get file as binary object from GET response body\n model_file = get_response.content\n # get filename\n model_file_name = dict(get_response.headers)['Content-Disposition']\n model_file_name = model_file_name.split('filename=')[-1]\n if '\"' in model_file_name:\n model_file_name = model_file_name.replace('\"', '')\n\n # make temporary folder with model file\n model_file_path = write_file_to_temporary_folder(\n model_file, model_file_name, temporary_folder=write_path_folder)\n\n # get model file extension\n file_extension = model_file_name.split('.')[-1]\n\n # if model contain only one file\n if file_extension == 'h5' or file_extension == 'sav':\n pass\n # if model contain few files in archive\n elif file_extension == 'zip':\n # extract model file from archive to temporary directory\n archive_name = model_file_path\n models_archive = zipfile.ZipFile(archive_name)\n models_archive.extractall(write_path_folder)\n models_archive.close()\n\n # remove archive\n os.remove(model_file_path)\n else:\n # throw exception if file extension unknown\n raise TypeError(\n 'Unknown model extension: {}'.format(file_extension))\n\n return write_path_folder\n\n\ndef get_dataset(oauth, body):\n \"\"\"\n Method which get dataset from blobstorage or local storage,\n if run_mode set to 'local', as binary object\n\n :return: dataset as binary object, and dataset filename\n \"\"\"\n\n dataset_url = make_blob_url(body, 'DatasetBucket', 'DatasetBlobId')\n\n # get dataset from blobstorage\n get_response = oauth.get(dataset_url, verify=False)\n # get file as binary object from GET response body\n dataset = get_response.content\n # get filename\n dataset_file_name = dict(get_response.headers)['Content-Disposition']\n dataset_file_name = dataset_file_name.split('filename=')[-1]\n\n return dataset, dataset_file_name\n\n\ndef get_model_info(oauth, model_id, model_bucket):\n \"\"\"\n Method to get model information from blob storage by using model blob id\n and model blob bucket\n\n :param oauth: OAuth2 object\n :param model_id: model entry blob id uuid\n :param model_bucket: model bucket\n :return: model information from blob storage\n :type model_id: str\n :type model_bucket: str\n :rtype: dict\n \"\"\"\n\n blob_model_info_as_string = get_file_info_from_blob(\n oauth, model_id, bucket_id=model_bucket\n ).json()['metadata']\n\n if 'ModelInfo' in blob_model_info_as_string.keys():\n info_key = 'ModelInfo'\n elif 'modelInfo' in blob_model_info_as_string.keys():\n info_key = 'modelInfo'\n else:\n raise KeyError('No model info')\n\n return json.loads(blob_model_info_as_string[info_key])\n\n\ndef prepare_prediction_parameters(oauth, prediction_parameters, model_info):\n \"\"\"\n Prepare all needed files and parameters (key/value pair) to make\n successfully predictions. Does not matter SSP or classic prediction.\n Upload model files to cache to use it later\n\n :param oauth: OAuth2 object\n :param prediction_parameters: prediction parameters\n (dict using for making predictions)\n :param model_info: information about model,\n in general loaded from blobstorage\n :type prediction_parameters: dict\n :type model_info: dict\n \"\"\"\n\n prepare_prediction_files(oauth, prediction_parameters, model_info)\n prepare_model_info(prediction_parameters, model_info)\n\n if model_info['ModelBlobId'] not in MODELS_IN_MEMORY_CACHE.keys():\n MODELS_IN_MEMORY_CACHE[model_info['ModelBlobId']] = cache_model(\n prediction_parameters['ModelsFolder'])\n\n prediction_parameters['Models'] = MODELS_IN_MEMORY_CACHE[\n model_info['ModelBlobId']]\n\n\ndef prepare_prediction_files(oauth, prediction_parameters, model_info):\n \"\"\"\n Method to upload trained model from blobstorage to HDD, using model info\n Save loaded model to temporary folder with using model blob id, so\n all models would be 'unique'\n\n :param oauth: OAuth2 object\n :param prediction_parameters: prediction parameters\n (dict using for making predictions)\n :param model_info: information about model,\n in general loaded from blobstorage\n :type prediction_parameters: dict\n :type model_info: dict\n \"\"\"\n\n models_folder = '{}/SSP_temp_models/{}'.format(\n TEMP_FOLDER, model_info['ModelBlobId'])\n\n if not os.path.exists(models_folder):\n model_url = make_blob_url(\n model_info, 'ModelBucket', 'ModelBlobId')\n models_folder = write_model_to_temporary_folder(\n oauth, model_url, models_folder)\n\n prediction_parameters['ModelsFolder'] = models_folder\n\n\ndef prepare_model_info(prediction_parameters, model_info):\n \"\"\"\n Method to update prediction parameters by using model info\n\n :param prediction_parameters: prediction parameters\n (dict using for making predictions)\n :param model_info: information about model,\n in general loaded from blobstorage\n :type prediction_parameters: dict\n :type model_info: dict\n \"\"\"\n\n prediction_parameters['Fingerprints'] = model_info['Fingerprints']\n prediction_parameters['ClassName'] = model_info['ClassName']\n prediction_parameters['DensityMean'] = model_info['DensityMean']\n prediction_parameters['DensityStd'] = model_info['DensityStd']\n prediction_parameters['DistanceMean'] = model_info['DistanceMean']\n prediction_parameters['DistanceStd'] = model_info['DistanceStd']\n prediction_parameters['TrainShape'] = model_info['TrainShape']\n prediction_parameters['Modi'] = model_info['Modi']\n prediction_parameters['ModelType'] = model_info['ModelType']\n prediction_parameters['ModelCode'] = model_info['ModelCode']\n\n\ndef cache_model(models_folder_path):\n \"\"\"\n Method to load model files (with needed objects sich as graph or session),\n additional models files (such as scaler, density model etc) from HDD to\n in-memory storage.\n Using to fast access in predictions or something else\n\n :param models_folder_path: path to folder with models and additional files\n :return: dict with linked models data ('in-memory storage' object), which\n can be used later for fast predictions or something else\n :type models_folder_path: str\n :rtype: dict\n \"\"\"\n\n cached_files = {\n 'models': list(),\n 'models_names': list(),\n 'graphs': list(),\n 'sessions': list(),\n 'models_number': 0,\n 'distance_matrix': None,\n 'scaler': None,\n 'train_mean': None,\n 'density_model': None,\n 'k_means': None\n }\n\n cache_model_files(cached_files, models_folder_path)\n cache_additional_files(cached_files, models_folder_path)\n\n return cached_files\n\n\ndef cache_model_files(cached_files, models_folder_path):\n \"\"\"\n Method to store model files (*.sav or *h5) in memory\n Also load graphs and sessions to storage, it 'must have' to using model\n Process data from HDD to memory storage ONLY if model NOT in storage yet\n\n :param cached_files: in-memory storage\n :param models_folder_path: path to folder with models files\n :type cached_files: dict\n :type models_folder_path: str\n \"\"\"\n\n start_timer = time()\n for model_file_path in glob.glob('{}/*'.format(models_folder_path)):\n model_file_name = model_file_path.split('/')[-1]\n if model_file_name in MODEL_ADDITIONAL_FILES:\n continue\n\n model, graph, session = prepare_model_by_path(model_file_path)\n cached_files['models_names'].append(model_file_name)\n cached_files['models'].append(model)\n cached_files['graphs'].append(graph)\n cached_files['sessions'].append(session)\n cached_files['models_number'] += 1\n LOGGER.info('MODELS LOAD: {} sec'.format(time() - start_timer))\n\n\ndef cache_additional_files(cached_files, models_folder_path):\n \"\"\"\n Method to store additional model files (such as scaler, density model etc)\n in memory. Process data from HDD to memory storage\n\n :param cached_files: in-memory storage\n :param models_folder_path: path to folder with additional models files\n :type cached_files: dict\n :type models_folder_path: str\n \"\"\"\n\n start_timer = time()\n cached_files['train_mean'] = numpy.loadtxt(\n '{}/{}'.format(models_folder_path, TRAIN_MEAN_FILENAME))\n LOGGER.info('TRAIN MEAN LOAD: {} sec'.format(time() - start_timer))\n\n start_timer = time()\n distance_matrix_path = '{}/{}'.format(\n models_folder_path, DISTANCE_MATRIX_FILENAME)\n cached_files['distance_matrix'] = sparse.load_npz(\n distance_matrix_path\n ).todense()\n LOGGER.info('DISTANCE MATRIX LOAD: {} sec'.format(time() - start_timer))\n\n start_timer = time()\n cached_files['density_model'] = joblib.load(\n '{}/{}'.format(models_folder_path, DENSITY_MODEL_FILENAME))\n LOGGER.info('DENSITY MODEL LOAD: {} sec'.format(time() - start_timer))\n\n start_timer = time()\n cached_files['scaler'] = None\n scaler_path = '{}/{}'.format(models_folder_path, SCALER_FILENAME)\n if scaler_path:\n cached_files['scaler'] = joblib.load(scaler_path)\n LOGGER.info('SCALER LOAD: {} sec'.format(time() - start_timer))\n\n start_timer = time()\n cached_files['k_means'] = joblib.load(\n '{}/{}'.format(models_folder_path, K_MEANS_FILENAME))\n LOGGER.info('K MEANS LOAD: {} sec'.format(time() - start_timer))\n\n\ndef prepare_model_by_path(model_path):\n \"\"\"\n Method to load model in memory (*.sav or *h5 file) with needed graph and\n session values. Clear graph and session before load each model.\n Return all needed objects to make predictions using loaded model, such as\n loaded model, graph and session objects\n\n :param model_path:\n :return: loaded model, graph for model and session for model\n :type model_path: str\n \"\"\"\n\n # dnn file extension\n if '.h5' in model_path:\n keras.backend.clear_session()\n model = keras.models.load_model(\n model_path,\n custom_objects={'coeff_determination': coeff_determination}\n )\n\n graph = tensorflow.get_default_graph()\n session = keras.backend.get_session()\n\n # classic file extension\n elif '.sav' in model_path:\n model = joblib.load(model_path)\n graph = tensorflow.get_default_graph()\n session = keras.backend.get_session()\n\n # unknown extension\n else:\n raise ValueError('Unknown model file name: {}'.format(model_path))\n\n return model, graph, session\n\n\ndef get_molecules_from_sdf_bytes(dataset):\n \"\"\"\n Method which make RDKit molecules from dataset bytes-object\n\n :param dataset: bytearray with molecules\n :return: list of RDKit molecules\n :type dataset: bytearray\n :rtype: list\n \"\"\"\n\n stream = io.BytesIO(dataset)\n supplier = Chem.ForwardSDMolSupplier(stream)\n molecules = [x for x in supplier if x]\n\n return molecules\n\n\ndef molecules_from_mol_strings(strings_list):\n \"\"\"\n Method to make RDKit molecules from mol strings\n Return RDKit molecules list\n\n :param strings_list: list of mol strings\n :return: RDKit molecules list\n :type strings_list: list\n :rtype: list\n \"\"\"\n\n molecules = []\n for string in strings_list:\n molecules.append(Chem.MolFromMolBlock(string))\n\n return molecules\n\n\ndef molecules_from_smiles(smiles_list):\n \"\"\"\n Method to make RDKit molecules from smiles\n Return RDKit molecules list\n\n :param smiles_list: list of SMILES strings\n :return: RDKit molecules list\n :type smiles_list: list\n :rtype: list\n \"\"\"\n\n molecules = []\n for smiles in smiles_list:\n molecules.append(Chem.MolFromSmiles(smiles))\n\n return molecules\n\n\ndef clear_models_folder(temporary_folder=TEMP_FOLDER):\n \"\"\"\n Method to clear temporary models folder before upload models\n Remove all in folder!!\n\n :param temporary_folder: path to temporary folder\n :type temporary_folder: str\n \"\"\"\n\n # make ssp folder path\n models_folder = '{}/SSP_temp_models'.format(temporary_folder)\n # remove temporary directory\n shutil.rmtree(models_folder, ignore_errors=True)\n\n\ndef TMP_from_numpy_by_field_name(ndarray, field_name, equal_values):\n \"\"\"\n Temporary method to get columns from numpy array using column name and\n filter values\n\n :param ndarray: numpy ndarray with 'value' and 'name' fields,\n which should be saved to csv file\n :param field_name: name of field to search in\n :param equal_values: values to search in ndarray by field name\n :type field_name: str\n :type equal_values: list\n :return: filtered numpy ndarray\n \"\"\"\n\n filtered_list = list()\n for value in equal_values:\n filtered_values = ndarray[numpy.where(ndarray[field_name] == value)]\n if len(filtered_values) > 0:\n filtered_list.append(filtered_values)\n\n return numpy.array(filtered_list)\n\n\ndef numpy_to_csv(numpy_array, csv_path):\n \"\"\"\n Temporary method to save numpy ndarray 'value' to cvs file with headers\n Using csv_path to save file\n\n :param numpy_array: numpy ndarray with 'value' and 'name' fields,\n which should be saved to csv file\n :param csv_path: csv file path to save numpy array\n :type csv_path: str\n \"\"\"\n\n with open(csv_path, 'w') as csv_file:\n csv_file.write(','.join(numpy_array[0]['name']) + '\\n')\n numpy.savetxt(csv_file, numpy_array['value'], fmt='%s', delimiter=',')\n\n\ndef get_inchi_key(molecule):\n \"\"\"\n Method to get InChi from rdkit 'molecule' entity\n\n :param molecule: rdkit 'molecule' entity\n :return: inchi_key text string\n :rtype: str\n \"\"\"\n\n inchi = Chem.MolToInchi(molecule)\n inchi_key = Chem.InchiToInchiKey(inchi)\n\n return inchi_key\n\n\ndef coeff_determination(y_true, y_predicted):\n \"\"\"\n Method for calculate determination coefficient\n\n :param y_true: true y value(s)\n :param y_predicted: predicted y value(s)\n :return: determination coefficient\n \"\"\"\n\n ss_res = keras.backend.sum(keras.backend.square(y_true - y_predicted))\n ss_tot = keras.backend.sum(\n keras.backend.square(y_true - keras.backend.mean(y_true)))\n\n return 1 - ss_res/(ss_tot + keras.backend.epsilon())\n\n\ndef single_fold_selector(x, y):\n \"\"\"\n Method to get all indexes from x array.\n Like stratified split but for single fold only\n\n :param x: array of x values\n :param y: array of y values\n :type x: numpy.ndarray\n :type y: numpy.ndarray\n :return: indexes for train and valid sets\n :rtype: list\n \"\"\"\n\n return [[numpy.arange(x.shape[0]), numpy.arange(x.shape[0])]]\n\n\ndef get_distance(x_predict, centroid, train_mean, train_shape):\n \"\"\"\n\n :param x_predict: feature vector for prediction\n :param centroid: centroid of train set\n :param train_mean: mean value of train set\n :param train_shape: shape of train set\n :return: Mahalanobis distance between feature vector and x_train\n \"\"\"\n # TODO description there\n variance_covariance_matrix = numpy.matmul(\n numpy.transpose(centroid), centroid\n ) / train_shape\n transposed_covariance_matrix = numpy.transpose(variance_covariance_matrix)\n Mahalanobis_distance = numpy.sqrt(\n numpy.matmul(\n numpy.matmul(\n x_predict - train_mean, transposed_covariance_matrix\n ),\n numpy.transpose(x_predict - train_mean)\n )\n )\n\n return Mahalanobis_distance\n"
] |
[
[
"tensorflow.get_default_graph",
"numpy.zeros"
],
[
"sklearn.externals.joblib.load",
"numpy.mean",
"pandas.DataFrame"
],
[
"sklearn.externals.joblib.load",
"numpy.arange",
"scipy.sparse.load_npz",
"numpy.matmul",
"numpy.transpose",
"numpy.savetxt",
"tensorflow.get_default_graph",
"numpy.array",
"numpy.where"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"1.10",
"1.12",
"1.4",
"1.13",
"1.5",
"1.7",
"0.12",
"1.0",
"1.2"
]
},
{
"matplotlib": [],
"numpy": [],
"pandas": [
"0.23",
"0.21",
"2.0",
"1.4",
"0.19",
"1.1",
"1.5",
"1.2",
"0.24",
"0.20",
"1.0",
"0.25",
"1.3"
],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [
"1.6",
"1.10",
"1.4",
"1.9",
"0.19",
"1.5",
"1.2",
"1.7",
"1.0",
"1.3",
"1.8"
],
"tensorflow": [
"1.10",
"1.12",
"1.4",
"1.13",
"1.5",
"1.7",
"0.12",
"1.0",
"1.2"
]
}
] |
MattCJones/flightcondition
|
[
"264df161f1ca9152bc77c9d1a1b8bc29652f8ef4"
] |
[
"src/tests/test_flightcondition.py"
] |
[
"#!/usr/bin/env python\n\"\"\"\nTest flight condition functionality.\n\nAuthor: Matthew C. Jones\nEmail: [email protected]\n\n:copyright: 2021 Matthew C. Jones\n:license: MIT License, see LICENSE for more details.\n\"\"\"\n\n# flake8: noqa E203\n\nimport pytest\nimport re\n\nfrom shlex import split\nfrom subprocess import run\n\nfrom numpy import array\n\nfrom flightcondition import Atmosphere, FlightCondition, unit, dimless\nfrom common import assert_field, myapprox\n\nh_geom_arr = [0, 30e3] * unit('ft')\n\n\ndef test_TAS():\n \"\"\"Test true airspeed calculations. \"\"\"\n\n fc = FlightCondition(h_geom_arr, TAS=300*unit('knots'))\n\n TAS_truth = array([300, 300]) * unit('knots')\n assert_field(fc.TAS, TAS_truth)\n\n CAS_truth = array([300, 187.7518]) * unit('knots')\n assert_field(fc.CAS, CAS_truth)\n\n EAS_truth = array([300, 183.6448]) * unit('knots')\n assert_field(fc.EAS, EAS_truth)\n\n mach_truth = array([0.4535, 0.5090]) * dimless\n assert_field(fc.M_inf, mach_truth)\n\n\ndef test_CAS():\n \"\"\"Test calibrated airspeed calculations. \"\"\"\n\n fc = FlightCondition(h_geom_arr, CAS=300*unit('knots'))\n\n TAS_truth = array([300, 465.6309]) * unit('knots')\n assert_field(fc.TAS, TAS_truth)\n\n CAS_truth = array([300, 300]) * unit('knots')\n assert_field(fc.CAS, CAS_truth)\n\n EAS_truth = array([300, 285.0357]) * unit('knots')\n assert_field(fc.EAS, EAS_truth)\n\n mach_truth = array([0.4535, 0.7900]) * dimless\n assert_field(fc.M_inf, mach_truth)\n\n\ndef test_EAS():\n \"\"\"Test equivalent airspeed calculations. \"\"\"\n\n fc = FlightCondition(h_geom_arr, EAS=300*unit('knots'))\n\n TAS_truth = array([300, 490.0764]) * unit('knots')\n assert_field(fc.TAS, TAS_truth)\n\n CAS_truth = array([300, 317.3602]) * unit('knots')\n assert_field(fc.CAS, CAS_truth)\n\n EAS_truth = array([300, 300]) * unit('knots')\n assert_field(fc.EAS, EAS_truth)\n\n mach_truth = array([0.4535, 0.8314]) * dimless\n assert_field(fc.M_inf, mach_truth)\n\n\ndef test_mach():\n \"\"\"Test Mach number calculations. \"\"\"\n\n fc = FlightCondition(h_geom_arr, M_inf=0.88*dimless)\n\n TAS_truth = array([582.1012, 518.7004]) * unit('knots')\n assert_field(fc.TAS, TAS_truth)\n\n CAS_truth = array([582.1012, 337.977]) * unit('knots')\n assert_field(fc.CAS, CAS_truth)\n\n EAS_truth = array([582.1012, 317.5222]) * unit('knots')\n assert_field(fc.EAS, EAS_truth)\n\n mach_truth = array([0.88, 0.88]) * dimless\n assert_field(fc.M_inf, mach_truth)\n\n\ndef test_reynolds_number():\n \"\"\"Test Reynolds number calculations. \"\"\"\n\n ell = 5.34 * unit('ft')\n h_geom = 44.5 * unit('km')\n M_inf_ = 0.93 * dimless\n fc = FlightCondition(h_geom, M_inf=M_inf_)\n\n Re_test = fc.reynolds_number(ell).magnitude\n Re_truth = 62278\n\n assert Re_test == myapprox(Re_truth)\n\n\ndef test_input_altitude_bounds():\n \"\"\"Test that input altitude is properly bounded. Both FlightCondition\n and Atmosphere are covered in test since embedded Atmosphere object\n raises error.\n \"\"\"\n\n M_inf_ = 0.44 * dimless\n atm = Atmosphere(0*unit('km'))\n\n h_below_min = atm._h_min*1.01\n with pytest.raises(ValueError) as e_info:\n FlightCondition(h_below_min, M_inf=M_inf_)\n\n h_above_max = atm._h_max*1.01\n with pytest.raises(ValueError) as e_info:\n FlightCondition(h_above_max, M_inf=M_inf_)\n\n\ndef test_mach_bounds():\n \"\"\"Test that input is properly bounded. \"\"\"\n\n h_geom = 13.37 * unit('km')\n fc = FlightCondition(h_geom, M_inf=0.42*dimless)\n\n M_below_min = fc._mach_min - (0.00001*dimless)\n with pytest.raises(ValueError) as e_info:\n FlightCondition(h_geom, M_inf=M_below_min)\n\n M_above_max = fc._mach_max*1.01\n with pytest.raises(ValueError) as e_info:\n FlightCondition(h_geom, M_inf=M_above_max)\n\n\ndef test_command_line_interface():\n \"\"\"Test that command line interface is running properly. \"\"\"\n cmd_str = \"flightcondition --alt 23 kft --EAS 233 kt\"\n out = run(split(cmd_str), capture_output=True)\n out_str = out.stdout.decode()\n print(\"DEBUG\")\n print(type(out_str))\n print(out_str)\n out_regex = r\"\"\"[=]+\n\\s+Flight Condition.*\n[=]+\n[-]+\\s+Speed Quantities\\s+[-]+\n.*EAS\\s+= 233 kt\n.*\n[-]+\\s+Altitude Quantities\\s+[-]+\n.*h\\s+= 23 kft\n.*\"\"\"\n re_out = re.search(out_regex, out_str, re.DOTALL)\n assert re_out\n"
] |
[
[
"numpy.array"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
changhoonhahn/GQP_mock_challenge
|
[
"831d5423edd9955ee1bda8d41e44d30cd3c6bd4b",
"831d5423edd9955ee1bda8d41e44d30cd3c6bd4b",
"831d5423edd9955ee1bda8d41e44d30cd3c6bd4b",
"831d5423edd9955ee1bda8d41e44d30cd3c6bd4b"
] |
[
"run/mocha_P2_provabgs.py",
"run/mocha_SP1_tau.py",
"run/fm_lgal.py",
"gqp_mc/fm.py"
] |
[
"'''\n\nP2 test (LGalaxies mocks) using provabgs model \n\n'''\nimport os, sys\nimport pickle \nimport numpy as np\nfrom functools import partial\nfrom multiprocessing.pool import Pool \n# --- gqp_mc ---\nfrom gqp_mc import util as UT \n# --- provabgs ---\nfrom provabgs import infer as Infer\nfrom provabgs import models as Models\n\n#####################################################################\n# input \n#####################################################################\ni0 = int(sys.argv[1]) \ni1 = int(sys.argv[2])\nniter = int(sys.argv[3])\nn_cpu = int(sys.argv[4])\n#####################################################################\n\n# read mock wavelength, flux, inverse variance, and theta \ndat_dir = os.path.join(UT.dat_dir(), 'mini_mocha')\nflux_obs = np.load(os.path.join(dat_dir, 'mocha_p2.flux.npy'))[:,:3]\nivar_obs = np.load(os.path.join(dat_dir, 'mocha_p2.ivar.npy'))[:,:3]\ntheta = pickle.load(open(os.path.join(dat_dir, 'l2.theta.p'), 'rb') )\n\nz_obs = theta['redshift']\n\n# declare model \nm_nmf = Models.NMF(burst=True, emulator=True)\n\n# set prior \nprior = Infer.load_priors([\n Infer.UniformPrior(7., 12.5, label='sed'),\n Infer.FlatDirichletPrior(4, label='sed'), # flat dirichilet priors\n Infer.UniformPrior(0., 1., label='sed'), # burst fraction\n Infer.UniformPrior(1e-2, 13.27, label='sed'), # tburst\n Infer.LogUniformPrior(4.5e-5, 1.5e-2, label='sed'), # log uniform priors on ZH coeff\n Infer.LogUniformPrior(4.5e-5, 1.5e-2, label='sed'), # log uniform priors on ZH coeff\n Infer.UniformPrior(0., 3., label='sed'), # uniform priors on dust1\n Infer.UniformPrior(0., 3., label='sed'), # uniform priors on dust2\n Infer.UniformPrior(-2., 1., label='sed') # uniform priors on dust_index\n])\n\n\ndef run_mcmc(i_obs): \n fchain_npy = os.path.join(dat_dir, 'L2', 'P2.provabgs.%i.chain.npy' % i_obs)\n fchain_p = os.path.join(dat_dir, 'L2', 'P2.provabgs.%i.chain.p' % i_obs)\n\n if os.path.isfile(fchain_npy) and os.path.isfile(fchain_p): \n return None \n \n # desi MCMC object\n desi_mcmc = Infer.desiMCMC(model=m_nmf, prior=prior)\n\n # run MCMC\n zeus_chain = desi_mcmc.run(\n bands='desi', # g, r, z\n photo_obs=flux_obs[i_obs], \n photo_ivar_obs=ivar_obs[i_obs], \n zred=z_obs[i_obs],\n vdisp=0.,\n sampler='zeus',\n nwalkers=30,\n burnin=0,\n opt_maxiter=2000,\n niter=niter,\n progress=False,\n debug=True)\n chain = zeus_chain['mcmc_chain']\n\n # save chain \n np.save(fchain_npy, chain)\n pickle.dump(zeus_chain, open(fchain_p, 'wb'))\n return None \n\npool = Pool(processes=n_cpu) \npool.map(partial(run_mcmc), np.arange(i0, i1+1))\npool.close()\npool.terminate()\npool.join()\n",
"'''\n\nSP1 test (PROVABGS mocks) using tau model \n\nThe PROVABGS mocks are generated using provabgs. All at redshift z=0.2. They have a\nsomewhat toned down BGS-like noise \n\n'''\nimport os, sys\nimport pickle \nimport numpy as np\nfrom functools import partial\nfrom multiprocessing.pool import Pool \n# --- gqp_mc ---\nfrom gqp_mc import util as UT \n# --- provabgs ---\nfrom provabgs import infer as Infer\nfrom provabgs import models as Models\n\n#####################################################################\n# input \n#####################################################################\ni0 = int(sys.argv[1]) \ni1 = int(sys.argv[2])\nniter = int(sys.argv[3])\nn_cpu = int(sys.argv[4])\n#####################################################################\n\n# read mock wavelength, flux, inverse variance, and theta \ndat_dir = os.path.join(UT.dat_dir(), 'mini_mocha')\ntheta_obs = np.load(os.path.join(dat_dir, 'provabgs_mocks', 'provabgs_mock.theta.npy')) \nwave_obs = np.load(os.path.join(dat_dir, 'mocha_s1.wave.npy')) \nflux_obs = np.load(os.path.join(dat_dir, 'mocha_s1.flux.npy'))\nivar_obs = np.load(os.path.join(dat_dir, 'mocha_s1.ivar.npy')) \nphoto_obs = np.load(os.path.join(dat_dir, 'mocha_p1.flux.npy'))[:,:3]\nivar_photo_obs = np.load(os.path.join(dat_dir, 'mocha_p1.ivar.npy'))[:,:3]\n\n# all flux at z = 0.2 \nz_obs = 0.2\n\n# declare model \nm_tau = Models.Tau(burst=True, emulator=False)\ntage = m_tau.cosmo.age(z_obs).value\n\n# set prior \nprior = Infer.load_priors([\n Infer.UniformPrior(9., 12., label='sed'), \n Infer.UniformPrior(0.3, 1e1, label='sed'), # tau SFH\n Infer.UniformPrior(0., 0.2, label='sed'), # constant SFH\n Infer.UniformPrior(0., tage-2., label='sed'), # start time\n Infer.UniformPrior(0., 0.5, label='sed'), # fburst\n Infer.UniformPrior(0., tage, label='sed'), # tburst\n Infer.UniformPrior(1e-6, 1e-3, label='sed'), # metallicity\n Infer.UniformPrior(0., 4., label='sed')])\n\n\ndef run_mcmc(i_obs): \n fchain_npy = os.path.join(dat_dir, 'provabgs_mocks', 'SP1.tau_model.%i.chain.npy' % i_obs)\n fchain_p = os.path.join(dat_dir, 'provabgs_mocks', 'SP1.tau_model.%i.chain.p' % i_obs)\n\n if os.path.isfile(fchain_npy) and os.path.isfile(fchain_p): \n return None \n \n # desi MCMC object\n desi_mcmc = Infer.desiMCMC(model=m_tau, prior=prior)\n\n # run MCMC\n zeus_chain = desi_mcmc.run(\n wave_obs=wave_obs,\n flux_obs=flux_obs[i_obs],\n flux_ivar_obs=ivar_obs,\n bands='desi', # g, r, z\n photo_obs=photo_obs[i_obs], \n photo_ivar_obs=ivar_photo_obs[i_obs], \n zred=z_obs,\n vdisp=0.,\n sampler='zeus',\n nwalkers=30,\n burnin=0,\n opt_maxiter=2000,\n niter=niter,\n progress=False,\n debug=True)\n chain = zeus_chain['mcmc_chain']\n\n # save chain \n np.save(fchain_npy, chain)\n pickle.dump(zeus_chain, open(fchain_p, 'wb'))\n return None \n\npool = Pool(processes=n_cpu) \npool.map(partial(run_mcmc), np.arange(i0, i1+1))\npool.close()\npool.terminate()\npool.join()\n",
"'''\n\nforward model BGS photometry and spectroscopy for TNG spectra\n\n'''\nimport os \nimport glob\nimport h5py \nimport pickle \nimport numpy as np \nimport scipy as sp \n# --- astropy --- \nimport astropy.units as u\nimport astropy.constants as const\nfrom astropy.io import fits\nfrom astropy.table import Table\n# --- gqp_mc ---\nimport gqp_mc.fm as FM \nimport gqp_mc.util as UT \n\n\n#version = '1.1' # 12/10/2020 \nversion = '1.2' # 05/24/2021 \n\n\ndef fm_Lgal_fsps_mini(): \n ''' take just the first 100 galaxies \n '''\n n_mini = 100 \n\n # read in meta-data\n fmeta = os.path.join(UT.dat_dir(), 'mini_mocha',\n 'lgal.mocha.fsps.v%s.meta.p' % (version))\n meta = pickle.load(open(fmeta, 'rb'))\n\n meta_mini = {}\n for k in meta.keys(): \n if isinstance(meta[k], list): \n meta_mini[k] = meta[k][:n_mini]\n elif isinstance(meta[k], np.ndarray): \n meta_mini[k] = meta[k][:n_mini]\n elif isinstance(meta[k], np.float): \n meta_mini[k] = meta[k]\n else: \n print(type(meta[k]))\n raise ValueError\n\n fmeta = os.path.join(UT.dat_dir(), 'mini_mocha',\n 'lgal.mini_mocha.fsps.v%s.meta.p' % (version))\n pickle.dump(meta_mini, open(fmeta, 'wb')) # meta-data\n\n # the rest \n fmocha = h5py.File(os.path.join(UT.dat_dir(), 'mini_mocha',\n 'lgal.mocha.fsps.v%s.hdf5' % (version)), 'r')\n mini_mocha = {} \n for k in fmocha.keys(): \n if 'wave' in k: \n mini_mocha[k] = fmocha[k][...]\n else: \n mini_mocha[k] = fmocha[k][...][:n_mini]\n fmocha.close() \n\n fmini = h5py.File(os.path.join(UT.dat_dir(), 'mini_mocha',\n 'lgal.mini_mocha.fsps.v%s.hdf5' % (version)), 'w')\n for k in mini_mocha.keys(): \n fmini.create_dataset(k, data=mini_mocha[k])\n fmini.close() \n return None \n\n\ndef fm_Lgal_fsps(): \n ''' construct mock spectra and photometry using L-Galaxies SED constructed\n using FSPS MILES stellar library and MIST isochrones\n '''\n # read in LGal SED\n flgal = os.path.join('/global/cfs/cdirs/desi/mocks/LGal_spectra',\n 'Lgal_fsps_mocha.p')\n lgal_dict = pickle.load(open(flgal, 'rb'))\n ################################################################################\n # 0. compile meta-data\n meta = {} \n meta['t_lookback'] = lgal_dict['t_sfh']\n meta['dt'] = lgal_dict['dt']\n meta['sfh_disk'] = lgal_dict['sfh_disk']\n meta['sfh_bulge'] = lgal_dict['sfh_bulge']\n meta['Z_disk'] = lgal_dict['Z_disk']\n meta['Z_bulge'] = lgal_dict['Z_bulge']\n meta['logM_disk'] = [np.log10(np.sum(sfh)) for sfh in lgal_dict['sfh_disk']]\n meta['logM_bulge'] = [np.log10(np.sum(sfh)) for sfh in lgal_dict['sfh_bulge']]\n meta['logM_total'] = [np.log10(np.sum(sfh0) + np.sum(sfh1)) for sfh0, sfh1\n in zip(lgal_dict['sfh_disk'], lgal_dict['sfh_bulge'])]\n\n # mass weighted age and metallicity \n t_age_mw, z_mw = [], [] \n for i in range(len(lgal_dict['dt'])): \n t_age_mw.append(\n np.sum(lgal_dict['t_sfh'][i] * (lgal_dict['sfh_disk'][i] + lgal_dict['sfh_bulge'][i])) /\n np.sum(lgal_dict['sfh_disk'][i] + lgal_dict['sfh_bulge'][i])\n )\n z_mw.append(\n np.sum(lgal_dict['Z_disk'][i] * lgal_dict['sfh_disk'][i] +\n lgal_dict['Z_bulge'][i] * lgal_dict['sfh_bulge'][i]) / \n np.sum(lgal_dict['sfh_disk'][i] + lgal_dict['sfh_bulge'][i])\n )\n meta['t_age_MW'] = t_age_mw \n meta['Z_MW'] = z_mw\n meta['redshift'] = lgal_dict['redshift'] \n meta['cosi'] = lgal_dict['cosi']\n meta['tau_ism'] = lgal_dict['tauISM']\n meta['tau_bc'] = lgal_dict['tauBC']\n meta['vd_disk'] = lgal_dict['vd_disk']\n meta['vd_bulge'] = lgal_dict['vd_bulge']\n print('%.2f < z < %.2f' % (np.min(meta['redshift']), np.max(meta['redshift'])))\n\n ################################################################################\n # 1. generate 'true' photometry from noiseless spectra \n wave = np.array(lgal_dict['wave_obs'])\n # convert from Lsun/A/m2 --> 1e-17 erg/s/A/cm2\n flux_dust = np.array(lgal_dict['flux_dust']) * 3.846e33 * 1e-4 * 1e17\n # interpoalte to save wavelength grid\n wave_lin = np.arange(1e3, 3e5, 0.2)\n flux_dust_interp = np.zeros((flux_dust.shape[0], len(wave_lin)))\n for i in range(flux_dust.shape[0]): \n interp_flux_dust = sp.interpolate.interp1d(wave[i], flux_dust[i], fill_value='extrapolate') \n flux_dust_interp[i,:] = interp_flux_dust(wave_lin) \n\n bands = ['g', 'r', 'z', 'w1', 'w2', 'w3', 'w4']\n photo_true, mag_true = FM.Photo_DESI(wave, flux_dust, bands=bands) \n ################################################################################\n # 2. assign uncertainties to the photometry and fiberfrac using BGS targets from the Legacy survey \n bgs_targets = h5py.File(os.path.join(UT.dat_dir(), 'bgs.1400deg2.rlim21.0.hdf5'), 'r')\n n_targets = len(bgs_targets['ra'][...]) \n\n bgs_photo = np.zeros((n_targets, len(bands))) \n bgs_photo_ivar = np.zeros((n_targets, len(bands)))\n bgs_fiberflux = np.zeros(n_targets) # r-band fiber flux\n for ib, band in enumerate(bands): \n bgs_photo[:,ib] = bgs_targets['flux_%s' % band][...] \n bgs_photo_ivar[:,ib] = bgs_targets['flux_ivar_%s' % band][...] \n bgs_fiberflux = bgs_targets['fiberflux_r'][...]\n \n from scipy.spatial import cKDTree as KDTree\n # construct KD tree from BGS targets (currently downsampled) \n #bgs_features = np.array([bgs_photo[:,0], bgs_photo[:,1], bgs_photo[:,2], \n # bgs_photo[:,0] - bgs_photo[:,1], bgs_photo[:,1] - bgs_photo[:,2]]).T\n bgs_features = np.array([bgs_photo[:,1], bgs_photo[:,0] - bgs_photo[:,1], bgs_photo[:,1] - bgs_photo[:,2]]).T\n tree = KDTree(bgs_features) \n # match ivars and fiberflux \n lgal_features = np.array([photo_true[:,1], photo_true[:,0] - photo_true[:,1], photo_true[:,1] - photo_true[:,2]]).T\n dist, indx = tree.query(lgal_features)\n\n photo_ivars = bgs_photo_ivar[indx,:] \n photo_fiber_true = bgs_fiberflux[indx] \n ################################################################################\n # 3. apply noise model to photometry\n # 3.a. apply the uncertainty to the photometry to get \"measured\" photometry. \n photo_meas = photo_true + photo_ivars**-0.5 * np.random.randn(photo_true.shape[0], photo_true.shape[1]) \n\n f_fiber = photo_fiber_true/photo_true[:,1] # (r fiber flux) / (r total flux) \n assert f_fiber.max() <= 1.\n meta['logM_fiber'] = np.log10(f_fiber) + meta['logM_total']\n\n # apply uncertainty to fiber flux as well \n photo_fiber_meas = photo_fiber_true + f_fiber * photo_ivars[:,1]**-0.5 * np.random.randn(photo_true.shape[0]) \n photo_ivar_fiber = f_fiber**-2 * photo_ivars[:,1] \n\n # 3.b. get fiber spectra by scaling down noiseless Lgal source spectra\n spectra_fiber = flux_dust_interp * f_fiber[:,None] # 10e-17 erg/s/cm2/A\n\n ################################################################################\n # 4. generate BGS like spectra\n from feasibgs import spectral_sims as BGS_spec_sim\n from feasibgs import forwardmodel as BGS_fm\n\n Idark = BGS_spec_sim.nominal_dark_sky()\n fdesi = BGS_fm.fakeDESIspec()\n\n spectra_bgs = {} \n\n fbgs = os.path.join(UT.dat_dir(), 'mini_mocha', 'fsps.bgs_spec.fsps.v%s.fits' % version)\n print(fbgs)\n bgs_spec = fdesi.simExposure(\n wave_lin, # wavelength \n spectra_fiber, # fiber spectra flux \n exptime=180.,\n airmass=1.1,\n Isky=[Idark[0].value, Idark[1].value],\n filename=fbgs\n )\n\n spectra_bgs['wave_b'] = bgs_spec.wave['b']\n spectra_bgs['wave_r'] = bgs_spec.wave['r']\n spectra_bgs['wave_z'] = bgs_spec.wave['z']\n spectra_bgs['flux_b'] = bgs_spec.flux['b']\n spectra_bgs['flux_r'] = bgs_spec.flux['r']\n spectra_bgs['flux_z'] = bgs_spec.flux['z']\n \n spectra_bgs['ivar_b'] = bgs_spec.ivar['b']\n spectra_bgs['ivar_r'] = bgs_spec.ivar['r']\n spectra_bgs['ivar_z'] = bgs_spec.ivar['z']\n\n spectra_bgs['res_b'] = bgs_spec.resolution_data['b']\n spectra_bgs['res_r'] = bgs_spec.resolution_data['r']\n spectra_bgs['res_z'] = bgs_spec.resolution_data['z']\n ################################################################################\n # 5. write out everything \n # meta-data to pickle file\n fmeta = os.path.join(UT.dat_dir(), 'mini_mocha',\n 'lgal.mocha.fsps.v%s.meta.p' % (version))\n pickle.dump(meta, open(fmeta, 'wb')) # meta-data\n \n # the rest \n fout = h5py.File(os.path.join(UT.dat_dir(), 'mini_mocha', \n 'lgal.mocha.fsps.v%s.hdf5' % (version)), 'w')\n # photometry \n for i, b in enumerate(bands): \n # 'true' \n fout.create_dataset('photo_flux_%s_true' % b, data=photo_true[:,i]) \n fout.create_dataset('photo_ivar_%s_true' % b, data=photo_ivars[:,i]) \n # 'measured'\n fout.create_dataset('photo_flux_%s_meas' % b, data=photo_meas[:,i]) \n\n # fiber flux \n fout.create_dataset('photo_fiberflux_r_true', data=photo_fiber_true) \n fout.create_dataset('photo_fiberflux_r_meas', data=photo_fiber_meas) \n fout.create_dataset('photo_fiberflux_r_ivar', data=photo_ivar_fiber) \n fout.create_dataset('frac_fiber', data=f_fiber) # fraction of flux in fiber\n \n # spectroscopy \n # noiseless source spectra \n wlim = (wave_lin < 2e5) & (wave_lin > 1e3) # truncating the spectra \n fout.create_dataset('spec_wave_source', data=wave_lin[wlim]) \n fout.create_dataset('spec_flux_source', data=flux_dust_interp[:,wlim]) \n # noiseless source spectra in fiber \n fout.create_dataset('spec_fiber_flux_source', data=spectra_fiber[:,wlim])\n \n # BGS source spectra \n for k in spectra_bgs.keys(): \n fout.create_dataset('spec_%s_bgs' % k, data=spectra_bgs[k]) \n fout.close() \n return None \n\n\ndef fm_Lgal_mini_mocha(lib='bc03'): \n ''' generate spectroscopy and photometry for the mini Mock Challenge (MoCha)\n \n * input: galaxy properties (SFH, ZH, etc), noiseless spectra \n * \"true\" photometry directly from noiseless spectra\n * assign photometric uncertainty and fiber flux using legacy imaging \n * \"measured\" photometry and fiber flux + fiber source spectra (scaled down noiseless spectra), \n * \"BGS\" spectra\n '''\n from scipy.spatial import cKDTree as KDTree\n # read in mini mocha galids \n fids = os.path.join(UT.dat_dir(), 'mini_mocha', 'lgal.galids.%s.txt' % lib)\n galids = np.loadtxt(fids, skiprows=1) \n\n # get Lgal meta data \n _meta = _lgal_metadata(galids)\n \n # get noiseless source spectra \n _meta_spec, spectra_s = _lgal_noiseless_spectra(galids, lib=lib) \n\n # compile meta-data \n meta = {} \n for k in _meta.keys(): meta[k] = _meta[k]\n for k in _meta_spec.keys(): meta[k] = _meta_spec[k] \n print('%.2f < z < %.2f' % (meta['redshift'].min(), meta['redshift'].max()))\n\n # 1. generate 'true' photometry from noiseless spectra \n bands = ['g', 'r', 'z', 'w1', 'w2', 'w3', 'w4']\n photo_true, _ = FM.Photo_DESI(spectra_s['wave'], spectra_s['flux_dust'],\n bands=bands) \n \n # 2. assign uncertainties to the photometry using BGS targets from the Legacy survey \n bgs_targets = h5py.File(os.path.join(UT.dat_dir(), 'bgs.1400deg2.rlim21.0.hdf5'), 'r')\n n_targets = len(bgs_targets['ra'][...]) \n\n bgs_photo = np.zeros((n_targets, len(bands))) \n bgs_photo_ivar = np.zeros((n_targets, len(bands)))\n bgs_fiberflux = np.zeros(n_targets) # r-band fiber flux\n for ib, band in enumerate(bands): \n bgs_photo[:,ib] = bgs_targets['flux_%s' % band][...] \n bgs_photo_ivar[:,ib] = bgs_targets['flux_ivar_%s' % band][...] \n bgs_fiberflux = bgs_targets['fiberflux_r'][...]\n \n # construct KD tree from BGS targets (currently downsampled) \n bgs_features = np.array([bgs_photo[:,0], bgs_photo[:,1], bgs_photo[:,2], \n bgs_photo[:,0] - bgs_photo[:,1], bgs_photo[:,1] - bgs_photo[:,2]]).T\n tree = KDTree(bgs_features) \n # match ivars and fiberflux \n match_features = np.array([photo_true[:,0], photo_true[:,1], photo_true[:,2], \n photo_true[:,0] - photo_true[:,1], photo_true[:,1] - photo_true[:,2]]).T\n dist, indx = tree.query(match_features)\n photo_ivars = bgs_photo_ivar[indx,:] \n photo_fiber_true = bgs_fiberflux[indx] \n\n # 3.a. apply the uncertainty to the photometry to get \"measured\" photometry. \n photo_meas = photo_true + photo_ivars**-0.5 * np.random.randn(photo_true.shape[0], photo_true.shape[1]) \n\n f_fiber = photo_fiber_true/photo_true[:,1] # (r fiber flux) / (r total flux) \n assert f_fiber.max() <= 1.\n meta['logM_fiber'] = np.log10(f_fiber) + meta['logM_total']\n\n # apply uncertainty to fiber flux as well \n photo_fiber_meas = photo_fiber_true + f_fiber * photo_ivars[:,1]**-0.5 * np.random.randn(photo_true.shape[0]) \n photo_ivar_fiber = f_fiber**-2 * photo_ivars[:,1] \n\n # 3.b. get fiber spectra by scaling down noiseless Lgal source spectra\n spectra_fiber = spectra_s['flux_dust'] * f_fiber[:,None] # 10e-17 erg/s/cm2/A\n\n # 4. generate BGS like spectra\n from feasibgs import spectral_sims as BGS_spec_sim\n from feasibgs import forwardmodel as BGS_fm\n\n Idark = BGS_spec_sim.nominal_dark_sky()\n fdesi = BGS_fm.fakeDESIspec()\n\n spectra_bgs = {} \n\n fbgs = os.path.join(UT.dat_dir(), 'mini_mocha', 'fsps.bgs_spec.%s.v%s.fits' % (lib, version))\n bgs_spec = fdesi.simExposure(\n spectra_s['wave'], # wavelength \n spectra_fiber, # fiber spectra flux \n exptime=180.,\n airmass=1.1,\n Isky=[Idark[0].value, Idark[1].value],\n filename=fbgs\n )\n\n spectra_bgs['wave_b'] = bgs_spec.wave['b']\n spectra_bgs['wave_r'] = bgs_spec.wave['r']\n spectra_bgs['wave_z'] = bgs_spec.wave['z']\n spectra_bgs['flux_b'] = bgs_spec.flux['b']\n spectra_bgs['flux_r'] = bgs_spec.flux['r']\n spectra_bgs['flux_z'] = bgs_spec.flux['z']\n \n spectra_bgs['ivar_b'] = bgs_spec.ivar['b']\n spectra_bgs['ivar_r'] = bgs_spec.ivar['r']\n spectra_bgs['ivar_z'] = bgs_spec.ivar['z']\n\n spectra_bgs['res_b'] = bgs_spec.resolution_data['b']\n spectra_bgs['res_r'] = bgs_spec.resolution_data['r']\n spectra_bgs['res_z'] = bgs_spec.resolution_data['z']\n\n # write out everything \n fmeta = os.path.join(UT.dat_dir(), 'mini_mocha',\n 'lgal.mini_mocha.%s.v%s.meta.p' % (lib, version))\n fout = h5py.File(os.path.join(UT.dat_dir(), 'mini_mocha', \n 'lgal.mini_mocha.%s.v%s.hdf5' % (lib, version)), 'w')\n\n pickle.dump(meta, open(fmeta, 'wb')) # meta-data\n\n # photometry \n for i, b in enumerate(bands): \n # 'true' \n fout.create_dataset('photo_flux_%s_true' % b, data=photo_true[:,i]) \n fout.create_dataset('photo_ivar_%s_true' % b, data=photo_ivars[:,i]) \n # 'measured'\n fout.create_dataset('photo_flux_%s_meas' % b, data=photo_meas[:,i]) \n\n # fiber flux \n fout.create_dataset('photo_fiberflux_r_true', data=photo_fiber_true) \n fout.create_dataset('photo_fiberflux_r_meas', data=photo_fiber_meas) \n fout.create_dataset('photo_fiberflux_r_ivar', data=photo_ivar_fiber) \n fout.create_dataset('frac_fiber', data=f_fiber) # fraction of flux in fiber\n \n # spectroscopy \n # noiseless source spectra \n wlim = (spectra_s['wave'] < 2e5) & (spectra_s['wave'] > 1e3) # truncating the spectra \n fout.create_dataset('spec_wave_source', data=spectra_s['wave'][wlim]) \n fout.create_dataset('spec_flux_source', data=spectra_s['flux_dust'][:,wlim]) \n # noiseless source spectra in fiber \n fout.create_dataset('spec_fiber_flux_source', data=spectra_fiber[:,wlim])\n \n # BGS source spectra \n for k in spectra_bgs.keys(): \n fout.create_dataset('spec_%s_bgs' % k, data=spectra_bgs[k]) \n fout.close() \n return None \n\n\ndef _mini_mocha_galid(lib='bc03'): \n ''' pick 100 unique Lgal galids that roughly fall under the BGS target selection \n for the mini mock challenge: r < 20. \n '''\n # gather all galids \n galids = [] \n dir_inputs = os.path.join(UT.lgal_dir(), 'gal_inputs')\n for finput in glob.glob(dir_inputs+'/*'): \n galids.append(int(os.path.basename(finput).split('_')[2]))\n galids = np.array(galids) \n n_id = len(galids) \n\n # get noiseless source spectra \n _, spectra_s = _lgal_noiseless_spectra(galids, lib=lib)\n # get DECAM photometry \n photo, _ = FM.Photo_DESI(spectra_s['wave'], spectra_s['flux_dust']) \n\n target_selection = (photo[:,1] <= 20.) \n print('%i Lgal galaxies within target_selection' % np.sum(target_selection)) \n\n # now randomly choose 100 galids \n mini_galids = np.random.choice(galids[target_selection], size=100, replace=False) \n fids = os.path.join(UT.dat_dir(), 'mini_mocha', 'lgal.galids.%s.txt' % lib)\n np.savetxt(fids, mini_galids, fmt='%i', header='%i Lgal galids for mini mock challenge' % len(mini_galids)) \n return None \n\n\ndef _lgal_noiseless_spectra(galids, lib='bc03'): \n ''' return noiseless source spectra of Lgal galaxies given the galids and \n the library. The spectra is interpolated to a standard wavelength grid. \n '''\n n_id = len(galids) \n\n if lib == 'bc03': str_lib = 'BC03_Stelib'\n elif lib == 'fsps': str_lib = 'FSPS_uvmiles'\n else: raise ValueError\n\n # noiseless source spectra\n _Fsource = lambda galid: os.path.join(UT.lgal_dir(), 'templates', \n 'gal_spectrum_%i_BGS_template_%s.fits' % (galid, str_lib)) \n \n wavemin, wavemax = 3000.0, 3e5\n wave = np.arange(wavemin, wavemax, 0.2)\n flux_dust = np.zeros((n_id, len(wave)))\n flux_nodust = np.zeros((n_id, len(wave)))\n \n redshift, cosi, tau_ism, tau_bc, vd_disk, vd_bulge = [], [], [], [], [], [] \n for i, galid in enumerate(galids): \n f_source = fits.open(_Fsource(galid)) \n # grab extra meta data from header\n hdr = f_source[0].header\n redshift.append( hdr['REDSHIFT'])\n cosi.append( hdr['COSI'])\n tau_ism.append( hdr['TAUISM'])\n tau_bc.append( hdr['TAUBC'])\n vd_disk.append( hdr['VD_DISK'])\n vd_bulge.append( hdr['VD_BULGE'])\n\n specin = f_source[1].data\n \n _flux_dust = specin['flux_dust_nonoise'] * 1e-4 * 1e7 *1e17 #from W/A/m2 to 10e-17 erg/s/cm2/A\n _flux_nodust = specin['flux_nodust_nonoise'] * 1e-4 * 1e7 *1e17 #from W/A/m2 to 10e-17 erg/s/cm2/A\n\n interp_flux_dust = sp.interpolate.interp1d(specin['wave'], _flux_dust, fill_value='extrapolate') \n interp_flux_nodust = sp.interpolate.interp1d(specin['wave'], _flux_nodust, fill_value='extrapolate') \n\n flux_dust[i,:] = interp_flux_dust(wave) \n flux_nodust[i,:] = interp_flux_nodust(wave) \n\n meta = {\n 'redshift': np.array(redshift), \n 'cosi': np.array(cosi), \n 'tau_ism': np.array(tau_ism), \n 'tau_bc': np.array(tau_bc), \n 'vd_disk': np.array(vd_disk), \n 'vd_bulge': np.array(vd_bulge) \n } \n spectra = {\n 'wave': wave, \n 'flux_dust': flux_dust, \n 'flux_nodust': flux_nodust\n } \n return meta, spectra\n\n\ndef _lgal_metadata(galids): \n ''' return galaxy properties (meta data) of Lgal galaxies \n given the galids \n '''\n tlookback, dt = [], [] \n sfh_disk, sfh_bulge, Z_disk, Z_bulge, logM_disk, logM_bulge, logM_total = [], [], [], [], [], [], []\n t_age_MW, Z_MW = [], [] \n for i, galid in enumerate(galids): \n f_input = os.path.join(UT.lgal_dir(), 'gal_inputs', \n 'gal_input_%i_BGS_template_FSPS_uvmiles.csv' % galid) \n gal_input = Table.read(f_input, delimiter=' ')\n\n tlookback.append(gal_input['sfh_t']) # lookback time (age) \n dt.append(gal_input['dt'])\n # SF history \n sfh_disk.append(gal_input['sfh_disk'])\n sfh_bulge.append(gal_input['sfh_bulge'])\n # metalicity history \n Z_disk.append(gal_input['Z_disk'])\n Z_bulge.append(gal_input['Z_bulge'])\n # formed mass \n logM_disk.append(np.log10(np.sum(gal_input['sfh_disk'])))\n logM_bulge.append(np.log10(np.sum(gal_input['sfh_bulge'])))\n logM_total.append(np.log10(np.sum(gal_input['sfh_disk']) + np.sum(gal_input['sfh_bulge'])))\n # mass weighted\n t_age_MW.append(np.sum(gal_input['sfh_t'] * (gal_input['sfh_disk'] + gal_input['sfh_bulge'])) / np.sum(gal_input['sfh_disk'] + gal_input['sfh_bulge']))\n Z_MW.append(np.sum(gal_input['Z_disk'] * gal_input['sfh_disk'] + gal_input['Z_bulge'] * gal_input['sfh_bulge']) / np.sum(gal_input['sfh_disk'] + gal_input['sfh_bulge']))\n \n meta = {} \n meta['galid'] = galids\n meta['t_lookback'] = tlookback\n meta['dt'] = dt \n meta['sfh_disk'] = sfh_disk\n meta['sfh_bulge'] = sfh_bulge\n meta['Z_disk'] = Z_disk\n meta['Z_bulge'] = Z_bulge\n meta['logM_disk'] = logM_disk\n meta['logM_bulge'] = logM_bulge\n meta['logM_total'] = logM_total\n meta['t_age_MW'] = t_age_MW\n meta['Z_MW'] = Z_MW\n return meta\n\n\ndef QA_fm_Lgal_mini_mocha(lib='bc03'): \n ''' quality assurance/sanity plots \n '''\n import matplotlib as mpl\n import matplotlib.pyplot as plt\n mpl.rcParams['text.usetex'] = True\n mpl.rcParams['font.family'] = 'serif'\n mpl.rcParams['axes.linewidth'] = 1.5\n mpl.rcParams['axes.xmargin'] = 1\n mpl.rcParams['xtick.labelsize'] = 'x-large'\n mpl.rcParams['xtick.major.size'] = 5\n mpl.rcParams['xtick.major.width'] = 1.5\n mpl.rcParams['ytick.labelsize'] = 'x-large'\n mpl.rcParams['ytick.major.size'] = 5\n mpl.rcParams['ytick.major.width'] = 1.5\n mpl.rcParams['legend.frameon'] = False\n\n # read mini mocha data \n fmm = h5py.File(os.path.join(UT.dat_dir(), 'mini_mocha', \n 'lgal.mini_mocha.%s.v%s.hdf5' % (lib, version)), 'r')\n\n ngal = fmm['spec_flux_source'][...].shape[0]\n \n # plot BGS spectra and source spectra for sanity checks \n fig = plt.figure(figsize=(15,15))\n for ii, i in enumerate(np.random.choice(np.arange(ngal), size=3, replace=False)): \n sub = fig.add_subplot(3,1,ii+1)\n for band in ['b', 'r', 'z']: \n sub.plot(fmm['spec_wave_%s_bgs' % band][...], fmm['spec_flux_%s_bgs' % band][...][0,i,:], c='C0') \n sub.plot(fmm['spec_wave_source'][...],\n fmm['spec_fiber_flux_source'][...][i,:],\n c='k', ls='--') \n sub.set_xlim(3.6e3, 9.8e3)\n sub.set_ylim(-2, 10)\n sub.set_xlabel('wavelength', fontsize=25) \n fig.savefig(os.path.join(UT.dat_dir(), 'mini_mocha', \n 'lgal.mini_mocha.%s.v%s.png' % (lib, version)), bbox_inches='tight') \n return None \n\n\nif __name__==\"__main__\": \n fm_Lgal_fsps_mini()\n #fm_Lgal_fsps()\n #_mini_mocha_galid(lib='fsps')\n #fm_Lgal_mini_mocha(lib='fsps')\n #QA_fm_Lgal_mini_mocha()\n",
"'''\n\nsubmodule for forward modeling spectrophotometry \n\n\n'''\nimport os \nimport numpy as np \nfrom speclite import filters as specFilter\n\n\ndef Photo_DESI(wave, spectra, bands=['g', 'r', 'z']): \n ''' generate photometry by convolving the input spectrum with DECAM and WISE \n bandpasses: g, r, z, W1, W2, W3, W4 filters. \n\n :param wave: \n wavelength of input spectra in Angstroms. 2D array Nspec x Nwave.\n\n :param fluxes: \n fluxes of input spectra. This should be noiseless source spectra. \n 2D array Nspec x Nwave. In units of 10e-17 erg/s/cm2/A \n '''\n wave = np.atleast_2d(wave)\n assert wave.shape[1] == spectra.shape[1] \n\n n_spec = spectra.shape[0] # number of spectra \n if wave.shape[0] == 1: wave = np.tile(wave, (n_spec, 1))\n\n from astropy import units as U\n\n filter_dict = {'g': 'decam2014-g', 'r': 'decam2014-r', 'z': 'decam2014-z',\n 'w1': 'wise2010-W1', 'w2': 'wise2010-W2', 'w3': 'wise2010-W3',\n 'w4': 'wise2010-W4'}\n \n # load DECAM g, r, z and WISE W1-4\n filter_response = specFilter.load_filters(\n *tuple([filter_dict[b] for b in bands]))\n \n # apply filters\n fluxes = np.zeros((n_spec, len(bands))) # photometric flux in nanomaggies \n for i in range(n_spec): \n spectrum = spectra[i] \n\n # apply filters\n flux = np.array(filter_response.get_ab_maggies(\n np.atleast_2d(spectrum) * 1e-17 * U.erg/U.s/U.cm**2/U.Angstrom, \n wave[i,:]*U.Angstrom))\n # convert to nanomaggies \n fluxes[i,:] = 1e9 * np.array([flux[0][i] for i in range(len(bands))])\n \n # calculate magnitudes (not advised due to NaNs) \n mags = 22.5 - 2.5 * np.log10(fluxes) \n return fluxes, mags \n\n\ndef Spec_BGS(wave, flux, exptime, airmass, Isky, filename=None):\n ''' Given noiseless spectra, simulate noisy BGS spectra with Isky \n sky brightness, exptime sec exposure time, and airmass. Wrapper for \n FM.fakeDESIspec().simExposure \n \n :param wave: \n wavelength of spectra. Nwave\n :param flux: \n noiseless spectra in units of 1e-17 erg/s/cm2/A. Nspec x Nwave\n :param exptime: \n exposure time \n :param airmass: \n airmass \n :param Isky: \n [wave_sky, sky_brightness]. sky brightness is in units of \n 1e-17 erg / Ang / arcsec^2 / cm^2 / sec\n :param filename: \n If specified, the output fits file. (default: None) \n\n :return bgs_spec: \n data structure with all BGS data from the DESI spectrographs: \n bgs.wave['b'], bgs.wave['r'], bgs.wave['z'] \n bgs.flux['b'], bgs.flux['r'], bgs.flux['z'] \n bgs.ivar['b'], bgs.ivar['r'], bgs.ivar['z'] \n '''\n # requires desiutil, desimodel, desisim, desispec, desitarget,\n # also requires numba, fitsio, healpy, pandas, astroplan... shoot me in the face!\n from feasibgs import forwardmodel as FM \n\n fdesi = FM.fakeDESIspec()\n bgs_spec = fdesi.simExposure(wave, flux, exptime=exptime, airmass=airmass, Isky=Isky, filename=filename) \n return bgs_spec \n"
] |
[
[
"numpy.arange",
"numpy.save"
],
[
"numpy.arange",
"numpy.save"
],
[
"numpy.sum",
"numpy.random.choice",
"numpy.min",
"numpy.arange",
"numpy.max",
"scipy.interpolate.interp1d",
"numpy.log10",
"numpy.random.randn",
"numpy.array",
"numpy.zeros",
"scipy.spatial.cKDTree",
"numpy.loadtxt",
"matplotlib.pyplot.figure"
],
[
"numpy.atleast_2d",
"numpy.log10",
"numpy.tile"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
wangyendt/deeplearning_models
|
[
"47883b6c65b8d05a0d1c5737f1552df6476ded34"
] |
[
"sklearn/sklearn learning/demonstration/auto_examples_python/applications/plot_stock_market.py"
] |
[
"\"\"\"\n=======================================\nVisualizing the stock market structure\n=======================================\n\nThis example employs several unsupervised learning techniques to extract\nthe stock market structure from variations in historical quotes.\n\nThe quantity that we use is the daily variation in quote price: quotes\nthat are linked tend to cofluctuate during a day.\n\n.. _stock_market:\n\nLearning a graph structure\n--------------------------\n\nWe use sparse inverse covariance estimation to find which quotes are\ncorrelated conditionally on the others. Specifically, sparse inverse\ncovariance gives us a graph, that is a list of connection. For each\nsymbol, the symbols that it is connected too are those useful to explain\nits fluctuations.\n\nClustering\n----------\n\nWe use clustering to group together quotes that behave similarly. Here,\namongst the :ref:`various clustering techniques <clustering>` available\nin the scikit-learn, we use :ref:`affinity_propagation` as it does\nnot enforce equal-size clusters, and it can choose automatically the\nnumber of clusters from the data.\n\nNote that this gives us a different indication than the graph, as the\ngraph reflects conditional relations between variables, while the\nclustering reflects marginal properties: variables clustered together can\nbe considered as having a similar impact at the level of the full stock\nmarket.\n\nEmbedding in 2D space\n---------------------\n\nFor visualization purposes, we need to lay out the different symbols on a\n2D canvas. For this we use :ref:`manifold` techniques to retrieve 2D\nembedding.\n\n\nVisualization\n-------------\n\nThe output of the 3 models are combined in a 2D graph where nodes\nrepresents the stocks and edges the:\n\n- cluster labels are used to define the color of the nodes\n- the sparse covariance model is used to display the strength of the edges\n- the 2D embedding is used to position the nodes in the plan\n\nThis example has a fair amount of visualization-related code, as\nvisualization is crucial here to display the graph. One of the challenge\nis to position the labels minimizing overlap. For this we use an\nheuristic based on the direction of the nearest neighbor along each\naxis.\n\"\"\"\n\n# Author: Gael Varoquaux [email protected]\n# License: BSD 3 clause\n\nimport sys\n\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom matplotlib.collections import LineCollection\n\nimport pandas as pd\n\nfrom sklearn import cluster, covariance, manifold\n\nprint(__doc__)\n\n\n# #############################################################################\n# Retrieve the data from Internet\n\n# The data is from 2003 - 2008. This is reasonably calm: (not too long ago so\n# that we get high-tech firms, and before the 2008 crash). This kind of\n# historical data can be obtained for from APIs like the quandl.com and\n# alphavantage.co ones.\n\nsymbol_dict = {\n 'TOT': 'Total',\n 'XOM': 'Exxon',\n 'CVX': 'Chevron',\n 'COP': 'ConocoPhillips',\n 'VLO': 'Valero Energy',\n 'MSFT': 'Microsoft',\n 'IBM': 'IBM',\n 'TWX': 'Time Warner',\n 'CMCSA': 'Comcast',\n 'CVC': 'Cablevision',\n 'YHOO': 'Yahoo',\n 'DELL': 'Dell',\n 'HPQ': 'HP',\n 'AMZN': 'Amazon',\n 'TM': 'Toyota',\n 'CAJ': 'Canon',\n 'SNE': 'Sony',\n 'F': 'Ford',\n 'HMC': 'Honda',\n 'NAV': 'Navistar',\n 'NOC': 'Northrop Grumman',\n 'BA': 'Boeing',\n 'KO': 'Coca Cola',\n 'MMM': '3M',\n 'MCD': 'McDonald\\'s',\n 'PEP': 'Pepsi',\n 'K': 'Kellogg',\n 'UN': 'Unilever',\n 'MAR': 'Marriott',\n 'PG': 'Procter Gamble',\n 'CL': 'Colgate-Palmolive',\n 'GE': 'General Electrics',\n 'WFC': 'Wells Fargo',\n 'JPM': 'JPMorgan Chase',\n 'AIG': 'AIG',\n 'AXP': 'American express',\n 'BAC': 'Bank of America',\n 'GS': 'Goldman Sachs',\n 'AAPL': 'Apple',\n 'SAP': 'SAP',\n 'CSCO': 'Cisco',\n 'TXN': 'Texas Instruments',\n 'XRX': 'Xerox',\n 'WMT': 'Wal-Mart',\n 'HD': 'Home Depot',\n 'GSK': 'GlaxoSmithKline',\n 'PFE': 'Pfizer',\n 'SNY': 'Sanofi-Aventis',\n 'NVS': 'Novartis',\n 'KMB': 'Kimberly-Clark',\n 'R': 'Ryder',\n 'GD': 'General Dynamics',\n 'RTN': 'Raytheon',\n 'CVS': 'CVS',\n 'CAT': 'Caterpillar',\n 'DD': 'DuPont de Nemours'}\n\n\nsymbols, names = np.array(sorted(symbol_dict.items())).T\n\nquotes = []\n\nfor symbol in symbols:\n print('Fetching quote history for %r' % symbol, file=sys.stderr)\n url = ('https://raw.githubusercontent.com/scikit-learn/examples-data/'\n 'master/financial-data/{}.csv')\n quotes.append(pd.read_csv(url.format(symbol)))\n\nclose_prices = np.vstack([q['close'] for q in quotes])\nopen_prices = np.vstack([q['open'] for q in quotes])\n\n# The daily variations of the quotes are what carry most information\nvariation = close_prices - open_prices\n\n\n# #############################################################################\n# Learn a graphical structure from the correlations\nedge_model = covariance.GraphicalLassoCV()\n\n# standardize the time series: using correlations rather than covariance\n# is more efficient for structure recovery\nX = variation.copy().T\nX /= X.std(axis=0)\nedge_model.fit(X)\n\n# #############################################################################\n# Cluster using affinity propagation\n\n_, labels = cluster.affinity_propagation(edge_model.covariance_)\nn_labels = labels.max()\n\nfor i in range(n_labels + 1):\n print('Cluster %i: %s' % ((i + 1), ', '.join(names[labels == i])))\n\n# #############################################################################\n# Find a low-dimension embedding for visualization: find the best position of\n# the nodes (the stocks) on a 2D plane\n\n# We use a dense eigen_solver to achieve reproducibility (arpack is\n# initiated with random vectors that we don't control). In addition, we\n# use a large number of neighbors to capture the large-scale structure.\nnode_position_model = manifold.LocallyLinearEmbedding(\n n_components=2, eigen_solver='dense', n_neighbors=6)\n\nembedding = node_position_model.fit_transform(X.T).T\n\n# #############################################################################\n# Visualization\nplt.figure(1, facecolor='w', figsize=(10, 8))\nplt.clf()\nax = plt.axes([0., 0., 1., 1.])\nplt.axis('off')\n\n# Display a graph of the partial correlations\npartial_correlations = edge_model.precision_.copy()\nd = 1 / np.sqrt(np.diag(partial_correlations))\npartial_correlations *= d\npartial_correlations *= d[:, np.newaxis]\nnon_zero = (np.abs(np.triu(partial_correlations, k=1)) > 0.02)\n\n# Plot the nodes using the coordinates of our embedding\nplt.scatter(embedding[0], embedding[1], s=100 * d ** 2, c=labels,\n cmap=plt.cm.nipy_spectral)\n\n# Plot the edges\nstart_idx, end_idx = np.where(non_zero)\n# a sequence of (*line0*, *line1*, *line2*), where::\n# linen = (x0, y0), (x1, y1), ... (xm, ym)\nsegments = [[embedding[:, start], embedding[:, stop]]\n for start, stop in zip(start_idx, end_idx)]\nvalues = np.abs(partial_correlations[non_zero])\nlc = LineCollection(segments,\n zorder=0, cmap=plt.cm.hot_r,\n norm=plt.Normalize(0, .7 * values.max()))\nlc.set_array(values)\nlc.set_linewidths(15 * values)\nax.add_collection(lc)\n\n# Add a label to each node. The challenge here is that we want to\n# position the labels to avoid overlap with other labels\nfor index, (name, label, (x, y)) in enumerate(\n zip(names, labels, embedding.T)):\n\n dx = x - embedding[0]\n dx[index] = 1\n dy = y - embedding[1]\n dy[index] = 1\n this_dx = dx[np.argmin(np.abs(dy))]\n this_dy = dy[np.argmin(np.abs(dx))]\n if this_dx > 0:\n horizontalalignment = 'left'\n x = x + .002\n else:\n horizontalalignment = 'right'\n x = x - .002\n if this_dy > 0:\n verticalalignment = 'bottom'\n y = y + .002\n else:\n verticalalignment = 'top'\n y = y - .002\n plt.text(x, y, name, size=10,\n horizontalalignment=horizontalalignment,\n verticalalignment=verticalalignment,\n bbox=dict(facecolor='w',\n edgecolor=plt.cm.nipy_spectral(label / float(n_labels)),\n alpha=.6))\n\nplt.xlim(embedding[0].min() - .15 * embedding[0].ptp(),\n embedding[0].max() + .10 * embedding[0].ptp(),)\nplt.ylim(embedding[1].min() - .03 * embedding[1].ptp(),\n embedding[1].max() + .03 * embedding[1].ptp())\n\nplt.show()\n"
] |
[
[
"numpy.diag",
"numpy.abs",
"matplotlib.pyplot.scatter",
"sklearn.manifold.LocallyLinearEmbedding",
"matplotlib.pyplot.axes",
"matplotlib.pyplot.clf",
"sklearn.cluster.affinity_propagation",
"matplotlib.pyplot.axis",
"numpy.triu",
"matplotlib.pyplot.show",
"numpy.where",
"sklearn.covariance.GraphicalLassoCV",
"numpy.vstack",
"matplotlib.pyplot.figure"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
lkuligin/cs231n
|
[
"b0749792a845e4ff11bda9204fb8bf7a7dcc99ea",
"b0749792a845e4ff11bda9204fb8bf7a7dcc99ea"
] |
[
"assignment3/classifiers/squeezenet.py",
"assignment1/classifiers/tests/test_knn.py"
] |
[
"import tensorflow as tf\n\nNUM_CLASSES = 1000\n\ndef fire_module(x,inp,sp,e11p,e33p):\n with tf.variable_scope(\"fire\"):\n with tf.variable_scope(\"squeeze\"):\n W = tf.get_variable(\"weights\",shape=[1,1,inp,sp])\n b = tf.get_variable(\"bias\",shape=[sp])\n s = tf.nn.conv2d(x,W,[1,1,1,1],\"VALID\")+b\n s = tf.nn.relu(s)\n with tf.variable_scope(\"e11\"):\n W = tf.get_variable(\"weights\",shape=[1,1,sp,e11p])\n b = tf.get_variable(\"bias\",shape=[e11p])\n e11 = tf.nn.conv2d(s,W,[1,1,1,1],\"VALID\")+b\n e11 = tf.nn.relu(e11)\n with tf.variable_scope(\"e33\"):\n W = tf.get_variable(\"weights\",shape=[3,3,sp,e33p])\n b = tf.get_variable(\"bias\",shape=[e33p])\n e33 = tf.nn.conv2d(s,W,[1,1,1,1],\"SAME\")+b\n e33 = tf.nn.relu(e33)\n return tf.concat([e11,e33],3)\n\n\nclass SqueezeNet(object):\n def extract_features(self, input=None, reuse=True):\n if input is None:\n input = self.image\n x = input\n layers = []\n with tf.variable_scope('features', reuse=reuse):\n with tf.variable_scope('layer0'):\n W = tf.get_variable(\"weights\",shape=[3,3,3,64])\n b = tf.get_variable(\"bias\",shape=[64])\n x = tf.nn.conv2d(x,W,[1,2,2,1],\"VALID\")\n x = tf.nn.bias_add(x,b)\n layers.append(x)\n with tf.variable_scope('layer1'):\n x = tf.nn.relu(x)\n layers.append(x)\n with tf.variable_scope('layer2'):\n x = tf.nn.max_pool(x,[1,3,3,1],strides=[1,2,2,1],padding='VALID')\n layers.append(x)\n with tf.variable_scope('layer3'):\n x = fire_module(x,64,16,64,64)\n layers.append(x)\n with tf.variable_scope('layer4'):\n x = fire_module(x,128,16,64,64)\n layers.append(x)\n with tf.variable_scope('layer5'):\n x = tf.nn.max_pool(x,[1,3,3,1],strides=[1,2,2,1],padding='VALID')\n layers.append(x)\n with tf.variable_scope('layer6'):\n x = fire_module(x,128,32,128,128)\n layers.append(x)\n with tf.variable_scope('layer7'):\n x = fire_module(x,256,32,128,128)\n layers.append(x)\n with tf.variable_scope('layer8'):\n x = tf.nn.max_pool(x,[1,3,3,1],strides=[1,2,2,1],padding='VALID')\n layers.append(x)\n with tf.variable_scope('layer9'):\n x = fire_module(x,256,48,192,192)\n layers.append(x)\n with tf.variable_scope('layer10'):\n x = fire_module(x,384,48,192,192)\n layers.append(x)\n with tf.variable_scope('layer11'):\n x = fire_module(x,384,64,256,256)\n layers.append(x)\n with tf.variable_scope('layer12'):\n x = fire_module(x,512,64,256,256)\n layers.append(x)\n return layers\n\n def __init__(self, save_path=None, sess=None):\n \"\"\"Create a SqueezeNet model.\n Inputs:\n - save_path: path to TensorFlow checkpoint\n - sess: TensorFlow session\n - input: optional input to the model. If None, will use placeholder for input.\n \"\"\"\n self.image = tf.placeholder('float',shape=[None,None,None,3],name='input_image')\n self.labels = tf.placeholder('int32', shape=[None], name='labels')\n self.layers = []\n x = self.image\n self.layers = self.extract_features(x, reuse=False)\n self.features = self.layers[-1]\n with tf.variable_scope('classifier'):\n with tf.variable_scope('layer0'):\n x = self.features\n self.layers.append(x)\n with tf.variable_scope('layer1'):\n W = tf.get_variable(\"weights\",shape=[1,1,512,1000])\n b = tf.get_variable(\"bias\",shape=[1000])\n x = tf.nn.conv2d(x,W,[1,1,1,1],\"VALID\")\n x = tf.nn.bias_add(x,b)\n self.layers.append(x)\n with tf.variable_scope('layer2'):\n x = tf.nn.relu(x)\n self.layers.append(x)\n with tf.variable_scope('layer3'):\n x = tf.nn.avg_pool(x,[1,13,13,1],strides=[1,13,13,1],padding='VALID')\n self.layers.append(x)\n self.classifier = tf.reshape(x,[-1, NUM_CLASSES])\n\n if save_path is not None:\n saver = tf.train.Saver()\n saver = tf.train.import_meta_graph('.'.join((save_path, 'meta')))\n saver.restore(sess, save_path)\n self.loss = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(labels=tf.one_hot(self.labels, NUM_CLASSES), logits=self.classifier))\n",
"import numpy as np\nimport os\nimport sys\nimport unittest\n\nsys.path.insert(0,'..')\nsys.path.insert(0,'../../../utils')\nimport knn\nimport test_utils\n\n\nclass KNearestNeighborTest(test_utils.TestCaseWithParams):\n\n def setUp(self):\n self.classifier = knn.KNearestNeighbor()\n self.classifier.train(np.array([[1,2,3,4],[2,3,4,5],[3,4,5,6]]), np.array([0,1,1]))\n\n def test_compute_distances_two_loops(self):\n dists = self.classifier.compute_distances_two_loops(self.kwargs['input'])\n np.testing.assert_allclose(dists, self.kwargs['dists'])\n\n def test_compute_distances_one_loop(self):\n dists = self.classifier.compute_distances_one_loop(self.kwargs['input'])\n np.testing.assert_allclose(dists, self.kwargs['dists'])\n\n def test_compute_distances_no_loops(self):\n dists = self.classifier.compute_distances_no_loops(self.kwargs['input'])\n np.testing.assert_allclose(dists, self.kwargs['dists'])\n\n def test_predict_labels(self):\n pred = self.classifier.predict_labels(self.kwargs['dists'], 1)\n np.testing.assert_allclose(pred, self.kwargs['pred_k1'])\n pred = self.classifier.predict_labels(self.kwargs['dists'], 2)\n np.testing.assert_allclose(pred, self.kwargs['pred_k2'])\n\n\nif __name__ == '__main__':\n suite = unittest.TestSuite()\n test_case1 = {'dists': np.array([[2.,0,2], [6,4,2]]), 'input': np.array([[2,3,4,5], [4,5,6,7]]), 'pred_k1': np.array([1., 1]), 'pred_k2': np.array([0., 1])}\n suite.addTest(test_utils.TestCaseWithParams.get_suite(KNearestNeighborTest, kwargs=test_case1))\n unittest.TextTestRunner(verbosity=2).run(suite)\n"
] |
[
[
"tensorflow.nn.bias_add",
"tensorflow.nn.relu",
"tensorflow.get_variable",
"tensorflow.concat",
"tensorflow.nn.max_pool",
"tensorflow.reshape",
"tensorflow.placeholder",
"tensorflow.nn.avg_pool",
"tensorflow.one_hot",
"tensorflow.variable_scope",
"tensorflow.train.Saver",
"tensorflow.nn.conv2d"
],
[
"numpy.array",
"numpy.testing.assert_allclose"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"1.10"
]
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
jasonZhang892/ncnn
|
[
"c2fb93b6ff99045dd76aae2d41218a15df189247"
] |
[
"tools/pnnx/tests/ncnn/test_squeezenet1_1.py"
] |
[
"# Tencent is pleased to support the open source community by making ncnn available.\n#\n# Copyright (C) 2021 THL A29 Limited, a Tencent company. All rights reserved.\n#\n# Licensed under the BSD 3-Clause License (the \"License\"); you may not use this file except\n# in compliance with the License. You may obtain a copy of the License at\n#\n# https://opensource.org/licenses/BSD-3-Clause\n#\n# Unless required by applicable law or agreed to in writing, software distributed\n# under the License is distributed on an \"AS IS\" BASIS, WITHOUT WARRANTIES OR\n# CONDITIONS OF ANY KIND, either express or implied. See the License for the\n# specific language governing permissions and limitations under the License.\n\nimport torch\nimport torchvision.models as models\n\ndef test():\n net = models.squeezenet1_1().half().float()\n net.eval()\n\n torch.manual_seed(0)\n x = torch.rand(1, 3, 224, 224)\n\n a = net(x)\n\n # export torchscript\n mod = torch.jit.trace(net, x)\n mod.save(\"test_squeezenet1_1.pt\")\n\n # torchscript to pnnx\n import os\n os.system(\"../../src/pnnx test_squeezenet1_1.pt inputshape=[1,3,224,224]\")\n\n # ncnn inference\n import test_squeezenet1_1_ncnn\n b = test_squeezenet1_1_ncnn.test_inference()\n\n return torch.allclose(a, b, 1e-2, 1e-2)\n\nif __name__ == \"__main__\":\n if test():\n exit(0)\n else:\n exit(1)\n"
] |
[
[
"torch.manual_seed",
"torch.allclose",
"torch.jit.trace",
"torch.rand"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
varshiths/scg-augmented
|
[
"f5188237feb4bc2c4f61351a862bc1b6ce1ce85f"
] |
[
"misc/corpus_freq.py"
] |
[
"'''\nTo be run from root folder as misc/corpus_freq.py\nGenerates the biases from given corpus and places in data folder\n\nuses spacy backend.\n\nto setup:\npip install -U spacy[cuda92]\npython -m spacy download en\n'''\n\nimport json\nimport numpy as np\nimport sng_parser\nimport codecs\n\nfrom tqdm import tqdm\n\nVG_SGG_DICT_FN = \"data/stanford_filtered/VG-SGG-dicts.json\"\nCAPTIONS_FILE = \"data/visgenome/captions.txt\"\nOUTPUT_FILE = \"data/captions_freq.npy\"\n\ndef load_info(info_file):\n info = json.load(open(info_file, 'r'))\n info['label_to_idx']['__background__'] = 0\n info['predicate_to_idx']['__background__'] = 0\n class_to_ind = info['label_to_idx']\n predicate_to_ind = info['predicate_to_idx']\n return class_to_ind, predicate_to_ind\n\nclass_to_ind, predicate_to_ind = load_info(VG_SGG_DICT_FN)\n\nobjs = class_to_ind.keys()\npreds = predicate_to_ind.keys()\n\ndef get_tuples(graph):\n entities = graph[\"entities\"]\n relations = graph[\"relations\"]\n tups = []\n for relation in relations:\n tups.append((\n entities[relation[\"subject\"]][\"lemma_head\"],\n relation[\"lemma_relation\"], \n entities[relation[\"object\"]][\"lemma_head\"],\n ))\n del graph\n del entities\n del relations\n return tups\n\nnum_classes = len(objs)\nnum_predicates = len(preds)\n\n# corpus is one sentence in each line\nwith codecs.open(CAPTIONS_FILE) as f:\n print(\"Reading file...\")\n caps = [ x.strip() for x in f.readlines() if x.strip() != \"\" ]\n\n# from joblib import Parallel, delayed\nimport multiprocessing\nimport math\n\ndef batch_iterate(caps, nthreads):\n nitems = len(caps)\n batch_size = math.ceil(nitems / nthreads)\n for i in range(nthreads):\n yield caps[ i*batch_size : (i+1)*batch_size ]\n\ndef myfunc(batch_caps):\n grels = np.zeros((\n num_classes,\n num_classes,\n num_predicates,\n ), dtype=np.int64)\n for i, cap in enumerate(tqdm(batch_caps)):\n # print(\"{}: {}\".format(i, cap))\n tups = get_tuples(sng_parser.parse(cap))\n for s, r, o in tups:\n if r in preds and s in objs and o in objs:\n grels[ class_to_ind[s], class_to_ind[o], predicate_to_ind[r] ] += 1\n return grels\n\ndef mygraphfunc(batch_graphs):\n grels = np.zeros((\n num_classes,\n num_classes,\n num_predicates,\n ), dtype=np.int64)\n for i, graph in enumerate(tqdm(batch_graphs)):\n # print(\"{}: {}\".format(i, cap))\n tups = get_tuples(graph)\n for s, r, o in tups:\n if r in preds and s in objs and o in objs:\n grels[ class_to_ind[s], class_to_ind[o], predicate_to_ind[r] ] += 1\n return grels\n\n\n# num_cores = multiprocessing.cpu_count()\n# # num_cores = 2\n# pool = multiprocessing.Pool(processes=num_cores)\n# results = sum(pool.map( myfunc, batch_iterate(caps, nthreads=num_cores) ))\n# results = myfunc(caps)\n\ngraphs = sng_parser.batch_parse(caps, batch_size=100000, n_threads=4)\nresults = mygraphfunc(graphs)\n\nnp.save(OUTPUT_FILE, results)\n"
] |
[
[
"numpy.zeros",
"numpy.save"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
sillsdev/machine.py
|
[
"61c0b29e706636a2353a1afc0b3ee372db7e632f"
] |
[
"machine/translation/tensorflow/saved_model_nmt_engine.py"
] |
[
"from dataclasses import dataclass\nfrom typing import Any, Iterable, Sequence\n\nimport tensorflow as tf\n\nfrom ...annotations import Range\nfrom ...utils.typeshed import StrPath\nfrom ..translation_engine import TranslationEngine\nfrom ..translation_result import TranslationResult\nfrom ..translation_result_builder import TranslationResultBuilder\nfrom ..translation_sources import TranslationSources\nfrom ..word_alignment_matrix import WordAlignmentMatrix\n\n\n@dataclass\nclass SavedModelTranslateSignature:\n signature_key: str = \"serving_default\"\n input_tokens_key: str = \"tokens\"\n input_length_key: str = \"length\"\n input_ref_key: str = \"ref\"\n input_ref_length_key: str = \"ref_length\"\n output_tokens_key: str = \"tokens\"\n output_length_key: str = \"length\"\n output_alignment_key: str = \"alignment\"\n\n\nclass SavedModelNmtEngine(TranslationEngine):\n def __init__(\n self, model_filename: StrPath, signature: SavedModelTranslateSignature = SavedModelTranslateSignature()\n ) -> None:\n self._signature = signature\n self._model: Any = tf.saved_model.load(str(model_filename))\n self._translate_fn = self._model.signatures[signature.signature_key]\n\n def translate(self, segment: Sequence[str]) -> TranslationResult:\n return next(iter(self.translate_n(1, segment)))\n\n def translate_n(self, n: int, segment: Sequence[str]) -> Iterable[TranslationResult]:\n inputs = {\n self._signature.input_tokens_key: tf.constant([segment], dtype=tf.string),\n self._signature.input_length_key: tf.constant([len(segment)], dtype=tf.int32),\n self._signature.input_ref_key: tf.constant([[\"\"]], dtype=tf.string),\n self._signature.input_ref_length_key: tf.constant([1], dtype=tf.int32),\n }\n outputs = self._translate_fn(**inputs)\n output_tokens = outputs[self._signature.output_tokens_key]\n output_lengths = outputs[self._signature.output_length_key]\n output_alignments = outputs[self._signature.output_alignment_key]\n output_count = output_lengths.shape[0]\n i = 0\n while i < n or i < output_count:\n output_length_i = int(output_lengths[0][i].numpy())\n output_tokens_i = output_tokens[0][i][:output_length_i]\n builder = TranslationResultBuilder()\n for word in output_tokens_i.numpy():\n builder.append_word(word.decode(\"utf-8\"), TranslationSources.NMT)\n\n alignment = output_alignments[0][i]\n src_indices = tf.argmax(alignment[:output_length_i], axis=-1).numpy()\n wa_matrix = WordAlignmentMatrix.from_word_pairs(\n len(segment), output_length_i, set(zip(src_indices, range(output_length_i)))\n )\n builder.mark_phrase(Range.create(0, len(segment)), wa_matrix)\n\n yield builder.to_result(segment)\n i += 1\n"
] |
[
[
"tensorflow.argmax",
"tensorflow.constant"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"1.10",
"1.12",
"1.4",
"1.5",
"1.7",
"0.12",
"1.0",
"1.2"
]
}
] |
punkdit/bruhat
|
[
"3231eacc49fd3464542f7eb72684751371d9876c"
] |
[
"bruhat/serre_example.py"
] |
[
"#!/usr/bin/env python3\n\n\"\"\"\nExercise 13.4 from Serre \"Linear Representations of Finite Groups \", 1977.\n\"\"\"\n\nimport sys, os\n\nimport numpy\n\nfrom element import Linear, Z, Q\nfrom action import mulclose\n\nM = Linear(4, Q)\n\n\ndef quaternion(a, b, c, d):\n # build matrix representation of quaternion\n A = M.get([\n [a, -b, -c, -d],\n [b, a, -d, c],\n [c, d, a, -b],\n [d, -c, b, a]])\n return A\n\n\ne = quaternion(1, 0, 0, 0)\ni = quaternion(0, 1, 0, 0)\nj = quaternion(0, 0, 1, 0)\nk = quaternion(0, 0, 0, 1)\nbasis = [e, i, j, k]\n\n\ndef dot(a, b):\n a = numpy.array(a.value)\n b = numpy.array(b.value)\n c = a*b\n return c.sum()\n\n\ndef get_rep(A, left=True):\n Arep = []\n for v in basis:\n row = []\n for u in basis:\n if left:\n B = A*u # left action\n else:\n B = u*A # right action\n r = dot(B, v)\n row.append(r/4)\n Arep.append(row)\n Arep = M.get(Arep)\n return Arep\n\n\ndef test():\n\n assert i*i == -e\n assert j*j == -e\n assert k*k == -e\n for a in [i, j, k]:\n for b in [i, j, k]:\n if a!=b:\n assert a*b == -b*a\n\n assert i*j*k == -e\n\n one = Q.one\n A = (one/2)*(i+j+k-e)\n assert A!=e\n assert A**2!=e\n assert A**3==e\n\n Q_8 = mulclose([i, j, k])\n assert len(Q_8)==8\n\n # Q_8 acts by right multiplication, C_3 by left multiplication\n\n Arep = get_rep(A)\n\n Qrep = [get_rep(V, False) for V in [i, j, k]]\n for V in Qrep:\n #print(V)\n assert V*Arep == Arep*V\n\n G = mulclose(Qrep + [Arep])\n assert len(G) == 24\n\n chi = []\n G = list(G)\n G.sort(key = get_order)\n for g in G:\n print(str(get_order(g)).rjust(3), end=\" \")\n chi.append(g.trace())\n print()\n for x in chi:\n print(str(x).rjust(3), end=\" \")\n print()\n \n\ndef get_order(g):\n n = 1\n a = g\n while a*a != a: # identity\n a = a*g\n n += 1\n\n return n\n\n\nif __name__ == \"__main__\":\n\n test()\n \n"
] |
[
[
"numpy.array"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
newerZGQ/newerZGQ.github.io
|
[
"79c9e510c7011b6284022d7880687247c5745e65"
] |
[
"2017/12/26/convolution-introduction/figure.py"
] |
[
"import matplotlib.pyplot as plt\nimport numpy as np\n\nplt.axis([0,4,0,2])\nplt.xlabel(\"τ\")\n\nX = np.linspace(np.pi/6, np.pi, 256, endpoint = True)\nA = 0.5*np.cos(3 * X) + 1\nplt.plot(X,A)\nplt.plot([1.5,3.5],[0.2,1])\n\nplt.text(2.2,1.5,\"f(τ)\",fontsize = 15)\nplt.text(3.5,1.1,\"h(t-τ)\",fontsize = 15)\n\nplt.show()\n"
] |
[
[
"numpy.linspace",
"numpy.cos",
"matplotlib.pyplot.plot",
"matplotlib.pyplot.axis",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.text",
"matplotlib.pyplot.show"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
Axeldnahcram/panel
|
[
"ac04ed1c08a993cd678fb162deb5fa567184ec40"
] |
[
"panel/widgets/slider.py"
] |
[
"\"\"\"\nDefines the Widget base class which provides bi-directional\ncommunication between the rendered dashboard and the Widget\nparameters.\n\"\"\"\nfrom six import string_types\n\nimport param\nimport numpy as np\n\nfrom bokeh.models import CustomJS\nfrom bokeh.models.formatters import TickFormatter\nfrom bokeh.models.widgets import (\n DateSlider as _BkDateSlider, DateRangeSlider as _BkDateRangeSlider,\n RangeSlider as _BkRangeSlider, Slider as _BkSlider)\n\nfrom ..config import config\nfrom ..io import state\nfrom ..util import (\n edit_readonly, param_reprs, unicode_repr, value_as_datetime, value_as_date\n)\nfrom ..viewable import Layoutable\nfrom ..layout import Column, Row\nfrom .base import Widget, CompositeWidget\nfrom .input import IntInput, FloatInput, StaticText\n\n\nclass _SliderBase(Widget):\n\n bar_color = param.Color(default=\"#e6e6e6\", doc=\"\"\"\n Color of the slider bar as a hexidecimal RGB value.\"\"\")\n\n direction = param.ObjectSelector(default='ltr', objects=['ltr', 'rtl'],\n doc=\"\"\"\n Whether the slider should go from left-to-right ('ltr') or\n right-to-left ('rtl')\"\"\")\n\n orientation = param.ObjectSelector(default='horizontal',\n objects=['horizontal', 'vertical'], doc=\"\"\"\n Whether the slider should be oriented horizontally or\n vertically.\"\"\")\n\n show_value = param.Boolean(default=True, doc=\"\"\"\n Whether to show the widget value.\"\"\")\n\n tooltips = param.Boolean(default=True, doc=\"\"\"\n Whether the slider handle should display tooltips.\"\"\")\n\n _widget_type = _BkSlider\n\n __abstract = True\n\n def __init__(self, **params):\n if 'value' in params and 'value_throttled' in self.param:\n params['value_throttled'] = params['value']\n super().__init__(**params)\n\n def __repr__(self, depth=0):\n return '{cls}({params})'.format(cls=type(self).__name__,\n params=', '.join(param_reprs(self, ['value_throttled'])))\n\n def _process_property_change(self, msg):\n if config.throttled:\n if \"value\" in msg:\n del msg[\"value\"]\n if \"value_throttled\" in msg:\n msg[\"value\"] = msg[\"value_throttled\"]\n return super()._process_property_change(msg)\n\n def _update_model(self, events, msg, root, model, doc, comm):\n if 'value_throttled' in msg:\n del msg['value_throttled']\n\n return super()._update_model(events, msg, root, model, doc, comm)\n\n\nclass ContinuousSlider(_SliderBase):\n\n format = param.ClassSelector(class_=string_types+(TickFormatter,), doc=\"\"\"\n Allows defining a custom format string or bokeh TickFormatter.\"\"\")\n\n _supports_embed = True\n\n __abstract = True\n\n def __init__(self, **params):\n if 'value' not in params:\n params['value'] = params.get('start', self.start)\n super().__init__(**params)\n\n def _get_embed_state(self, root, values=None, max_opts=3):\n ref = root.ref['id']\n w_model, parent = self._models[ref]\n _, _, doc, comm = state._views[ref]\n\n # Compute sampling\n start, end, step = w_model.start, w_model.end, w_model.step\n if values is None:\n span = end-start\n dtype = int if isinstance(step, int) else float\n if (span/step) > (max_opts-1):\n step = dtype(span/(max_opts-1))\n values = [dtype(v) for v in np.arange(start, end+step, step)]\n elif any(v < start or v > end for v in values):\n raise ValueError('Supplied embed states for %s widget outside '\n 'of valid range.' % type(self).__name__)\n\n # Replace model\n layout_opts = {k: v for k, v in self.param.get_param_values()\n if k in Layoutable.param and k != 'name'}\n dw = DiscreteSlider(options=values, name=self.name, **layout_opts)\n dw.link(self, value='value')\n self._models.pop(ref)\n index = parent.children.index(w_model)\n with config.set(embed=True):\n w_model = dw._get_model(doc, root, parent, comm)\n link = CustomJS(code=dw._jslink.code['value'], args={\n 'source': w_model.children[1], 'target': w_model.children[0]})\n parent.children[index] = w_model\n w_model = w_model.children[1]\n w_model.js_on_change('value', link)\n\n return (dw, w_model, values, lambda x: x.value, 'value', 'cb_obj.value')\n\n\nclass FloatSlider(ContinuousSlider):\n\n start = param.Number(default=0.0)\n\n end = param.Number(default=1.0)\n\n value = param.Number(default=0.0)\n\n value_throttled = param.Number(default=None, constant=True)\n\n step = param.Number(default=0.1)\n\n _rename = {'name': 'title'}\n\n\nclass IntSlider(ContinuousSlider):\n\n value = param.Integer(default=0)\n\n value_throttled = param.Integer(default=None, constant=True)\n\n start = param.Integer(default=0)\n\n end = param.Integer(default=1)\n\n step = param.Integer(default=1)\n\n _rename = {'name': 'title'}\n\n def _process_property_change(self, msg):\n msg = super()._process_property_change(msg)\n if 'value' in msg:\n msg['value'] = msg['value'] if msg['value'] is None else int(msg['value'])\n if 'value_throttled' in msg:\n throttled = msg['value_throttled']\n msg['value_throttled'] = throttled if throttled is None else int(throttled)\n return msg\n\n\nclass DateSlider(_SliderBase):\n\n value = param.Date(default=None)\n\n value_throttled = param.Date(default=None, constant=True)\n\n start = param.Date(default=None)\n\n end = param.Date(default=None)\n\n _rename = {'name': 'title'}\n\n _source_transforms = {'value': None, 'value_throttled': None, 'start': None, 'end': None}\n\n _widget_type = _BkDateSlider\n\n def __init__(self, **params):\n if 'value' not in params:\n params['value'] = params.get('start', self.start)\n super().__init__(**params)\n\n def _process_property_change(self, msg):\n msg = super()._process_property_change(msg)\n if 'value' in msg:\n msg['value'] = value_as_date(msg['value'])\n if 'value_throttled' in msg:\n msg['value_throttled'] = value_as_date(msg['value_throttled'])\n return msg\n\n\nclass DiscreteSlider(CompositeWidget, _SliderBase):\n\n options = param.ClassSelector(default=[], class_=(dict, list))\n\n value = param.Parameter()\n\n value_throttled = param.Parameter(constant=True)\n\n formatter = param.String(default='%.3g')\n\n _source_transforms = {'value': None, 'value_throttled': None, 'options': None}\n\n _rename = {'formatter': None}\n\n _supports_embed = True\n\n _text_link = \"\"\"\n var labels = {labels}\n target.text = labels[source.value]\n \"\"\"\n\n _style_params = [p for p in list(Layoutable.param) if p != 'name'] + ['orientation']\n\n def __init__(self, **params):\n self._syncing = False\n super().__init__(**params)\n if 'formatter' not in params and all(isinstance(v, (int, np.int_)) for v in self.values):\n self.formatter = '%d'\n if self.value is None and None not in self.values and self.options:\n self.value = self.values[0]\n elif self.value not in self.values:\n raise ValueError('Value %s not a valid option, '\n 'ensure that the supplied value '\n 'is one of the declared options.'\n % self.value)\n\n self._text = StaticText(margin=(5, 0, 0, 5), style={'white-space': 'nowrap'})\n self._slider = None\n self._composite = Column(self._text, self._slider)\n self._update_options()\n self.param.watch(self._update_options, ['options', 'formatter'])\n self.param.watch(self._update_value, 'value')\n self.param.watch(self._update_value, 'value_throttled')\n self.param.watch(self._update_style, self._style_params)\n\n def _update_options(self, *events):\n values, labels = self.values, self.labels\n if self.value not in values:\n value = 0\n self.value = values[0]\n else:\n value = values.index(self.value)\n\n self._slider = IntSlider(\n start=0, end=len(self.options)-1, value=value, tooltips=False,\n show_value=False, margin=(0, 5, 5, 5),\n orientation=self.orientation,\n _supports_embed=False\n )\n self._update_style()\n js_code = self._text_link.format(\n labels='['+', '.join([unicode_repr(l) for l in labels])+']'\n )\n self._jslink = self._slider.jslink(self._text, code={'value': js_code})\n self._slider.param.watch(self._sync_value, 'value')\n self._slider.param.watch(self._sync_value, 'value_throttled')\n self._text.value = labels[value]\n self._composite[1] = self._slider\n\n def _update_value(self, event):\n \"\"\"\n This will update the IntSlider (behind the scene)\n based on changes to the DiscreteSlider (front).\n\n _syncing options is to avoid infinite loop.\n\n event.name is either value or value_throttled.\n \"\"\"\n\n values = self.values\n if getattr(self, event.name) not in values:\n with param.edit_constant(self):\n setattr(self, event.name, values[0])\n return\n index = self.values.index(getattr(self, event.name))\n if self._syncing:\n return\n try:\n self._syncing = True\n with param.edit_constant(self._slider):\n setattr(self._slider, event.name, index)\n if event.name == 'value':\n with param.discard_events(self._text):\n self._text.value = self.labels[index]\n finally:\n self._syncing = False\n\n def _update_style(self, *events):\n style = {p: getattr(self, p) for p in self._style_params}\n margin = style.pop('margin')\n if isinstance(margin, tuple):\n if len(margin) == 2:\n t = b = margin[0]\n r = l = margin[1]\n else:\n t, r, b, l = margin\n else:\n t = r = b = l = margin\n text_margin = (t, 0, 0, l)\n slider_margin = (0, r, b, l)\n text_style = {k: v for k, v in style.items()\n if k not in ('style', 'orientation')}\n self._text.param.set_param(margin=text_margin, **text_style)\n self._slider.param.set_param(margin=slider_margin, **style)\n if self.width:\n style['width'] = self.width + l + r\n col_style = {k: v for k, v in style.items()\n if k != 'orientation'}\n self._composite.param.set_param(**col_style)\n\n def _sync_value(self, event):\n \"\"\"\n This will update the DiscreteSlider (front)\n based on changes to the IntSlider (behind the scene).\n\n _syncing options is to avoid infinite loop.\n\n event.name is either value or value_throttled.\n \"\"\"\n\n if self._syncing:\n return\n try:\n self._syncing = True\n with param.edit_constant(self):\n setattr(self, event.name, self.values[event.new])\n finally:\n self._syncing = False\n\n def _get_embed_state(self, root, values=None, max_opts=3):\n model = self._composite[1]._models[root.ref['id']][0]\n if values is None:\n values = self.values\n elif any(v not in self.values for v in values):\n raise ValueError(\"Supplieed embed states were not found \"\n \"in the %s widgets' values list.\" % type(self).__name__)\n return self, model, values, lambda x: x.value, 'value', 'cb_obj.value'\n\n @property\n def labels(self):\n title = (self.name + ': ' if self.name else '')\n if isinstance(self.options, dict):\n return [title + ('<b>%s</b>' % o) for o in self.options]\n else:\n return [title + ('<b>%s</b>' % (o if isinstance(o, string_types) else (self.formatter % o)))\n for o in self.options]\n @property\n def values(self):\n return list(self.options.values()) if isinstance(self.options, dict) else self.options\n\n\n\nclass _RangeSliderBase(_SliderBase):\n\n value = param.Tuple(length=2)\n\n value_start = param.Parameter(readonly=True)\n\n value_end = param.Parameter(readonly=True)\n\n __abstract = True\n\n def __init__(self, **params):\n if 'value' not in params:\n params['value'] = (params.get('start', self.start),\n params.get('end', self.end))\n params['value_start'], params['value_end'] = params['value']\n with edit_readonly(self):\n super().__init__(**params)\n\n @param.depends('value', watch=True)\n def _sync_values(self):\n vs, ve = self.value\n with edit_readonly(self):\n self.param.set_param(value_start=vs, value_end=ve)\n\n def _process_property_change(self, msg):\n msg = super()._process_property_change(msg)\n if 'value' in msg:\n msg['value'] = tuple(msg['value'])\n if 'value_throttled' in msg:\n msg['value_throttled'] = tuple(msg['value_throttled'])\n return msg\n\n\nclass RangeSlider(_RangeSliderBase):\n\n format = param.ClassSelector(class_=string_types+(TickFormatter,), doc=\"\"\"\n Allows defining a custom format string or bokeh TickFormatter.\"\"\")\n\n value = param.Range(default=(0, 1))\n\n value_start = param.Number(default=0, readonly=True)\n\n value_end = param.Number(default=1, readonly=True)\n\n value_throttled = param.Range(default=None, constant=True)\n\n start = param.Number(default=0)\n\n end = param.Number(default=1)\n\n step = param.Number(default=0.1)\n\n _rename = {'name': 'title', 'value_start': None, 'value_end': None}\n\n _widget_type = _BkRangeSlider\n\n def __init__(self, **params):\n super().__init__(**params)\n values = [self.value[0], self.value[1], self.start, self.end]\n if (all(v is None or isinstance(v, int) for v in values) and\n 'step' not in params):\n self.step = 1\n\n\nclass IntRangeSlider(RangeSlider):\n\n start = param.Integer(default=0)\n\n end = param.Integer(default=1)\n\n step = param.Integer(default=1)\n\n def _process_property_change(self, msg):\n msg = super()._process_property_change(msg)\n if 'value' in msg:\n msg['value'] = tuple([v if v is None else int(v)\n for v in msg['value']])\n if 'value_throttled' in msg:\n msg['value_throttled'] = tuple([v if v is None else int(v)\n for v in msg['value_throttled']])\n return msg\n\n\nclass DateRangeSlider(_RangeSliderBase):\n\n value = param.Tuple(default=(None, None), length=2)\n\n value_start = param.Date(default=None, readonly=True)\n\n value_end = param.Date(default=None, readonly=True)\n\n value_throttled = param.Tuple(default=None, length=2, constant=True)\n\n start = param.Date(default=None)\n\n end = param.Date(default=None)\n\n step = param.Number(default=1)\n\n _source_transforms = {'value': None, 'value_throttled': None,\n 'start': None, 'end': None, 'step': None}\n\n _rename = {'name': 'title', 'value_start': None, 'value_end': None}\n\n _widget_type = _BkDateRangeSlider\n\n def _process_param_change(self, msg):\n msg = super()._process_param_change(msg)\n if msg.get('value') == (None, None):\n del msg['value']\n if msg.get('value_throttled') == (None, None):\n del msg['value_throttled']\n return msg\n\n def _process_property_change(self, msg):\n msg = super()._process_property_change(msg)\n if 'value' in msg:\n v1, v2 = msg['value']\n msg['value'] = (value_as_datetime(v1), value_as_datetime(v2))\n if 'value_throttled' in msg:\n v1, v2 = msg['value_throttled']\n msg['value_throttled'] = (value_as_datetime(v1), value_as_datetime(v2))\n return msg\n\n\nclass _EditableContinuousSlider(CompositeWidget):\n \"\"\"\n The EditableFloatSlider extends the FloatSlider by adding a text\n input field to manually edit the value and potentially override\n the bounds.\n \"\"\"\n\n editable = param.Boolean(default=True, doc=\"\"\"\n Whether the value is editable via the text input.\"\"\")\n\n show_value = param.Boolean(default=False, readonly=True, precedence=-1, doc=\"\"\"\n Whether to show the widget value.\"\"\")\n\n _composite_type = Column\n _slider_widget = None\n _input_widget = None\n __abstract = True\n\n def __init__(self, **params):\n if not 'width' in params and not 'sizing_mode' in params:\n params['width'] = 300\n super().__init__(**params)\n self._label = StaticText(margin=0, align='end')\n self._slider = self._slider_widget(\n value=self.value, margin=(0, 0, 5, 0), sizing_mode='stretch_width'\n )\n self._slider.param.watch(self._sync_value, 'value')\n self._slider.param.watch(self._sync_value, 'value_throttled')\n\n self._value_edit = self._input_widget(\n margin=0, align='end', css_classes=['slider-edit']\n )\n self._value_edit.param.watch(self._sync_value, 'value')\n self._value_edit.param.watch(self._sync_value, 'value_throttled')\n self._value_edit.jscallback(args={'slider': self._slider}, value=\"\"\"\n if (cb_obj.value < slider.start)\n slider.start = cb_obj.value\n else if (cb_obj.value > slider.end)\n slider.end = cb_obj.value\n \"\"\")\n\n label = Row(self._label, self._value_edit)\n self._composite.extend([label, self._slider])\n self._update_editable()\n self._update_layout()\n self._update_name()\n self._update_slider()\n self._update_value()\n\n @param.depends('width', 'height', 'sizing_mode', watch=True)\n def _update_layout(self):\n self._value_edit.sizing_mode = self.sizing_mode\n if self.sizing_mode not in ('stretch_width', 'stretch_both'):\n w = (self.width or 300)//4\n self._value_edit.width = w\n\n @param.depends('editable', watch=True)\n def _update_editable(self):\n self._value_edit.disabled = not self.editable\n\n @param.depends('name', watch=True)\n def _update_name(self):\n if self.name:\n label = f'{self.name}:'\n margin = (0, 10, 0, 0)\n else:\n label = ''\n margin = (0, 0, 0, 0)\n self._label.param.set_param(**{'margin': margin, 'value': label})\n\n @param.depends('start', 'end', 'step', 'bar_color', 'direction',\n 'show_value', 'tooltips', 'format', watch=True)\n def _update_slider(self):\n self._slider.param.set_param(**{\n 'format': self.format,\n 'start': self.start,\n 'end': self.end,\n 'step': self.step,\n 'bar_color': self.bar_color,\n 'direction': self.direction,\n 'show_value': self.show_value,\n 'tooltips': self.tooltips\n })\n self._value_edit.step = self.step\n\n @param.depends('value', watch=True)\n def _update_value(self):\n self._slider.value = self.value\n self._value_edit.value = self.value\n\n def _sync_value(self, event):\n with param.edit_constant(self):\n self.param.set_param(**{event.name: event.new})\n\n\nclass EditableFloatSlider(_EditableContinuousSlider, FloatSlider):\n\n _slider_widget = FloatSlider\n _input_widget = FloatInput\n\n\nclass EditableIntSlider(_EditableContinuousSlider, IntSlider):\n\n _slider_widget = IntSlider\n _input_widget = IntInput\n\n\nclass EditableRangeSlider(CompositeWidget, _SliderBase):\n \"\"\"\n The EditableRangeSlider extends the RangeSlider by adding text\n input fields to manually edit the range and potentially override\n the bounds.\n \"\"\"\n\n editable = param.Tuple(default=(True, True), doc=\"\"\"\n Whether the lower and upper values are editable.\"\"\")\n\n end = param.Number(default=1., doc=\"Upper bound of the range.\")\n\n format = param.ClassSelector(default='0.0[0000]', class_=string_types+(TickFormatter,), doc=\"\"\"\n Allows defining a custom format string or bokeh TickFormatter.\"\"\")\n\n show_value = param.Boolean(default=False, readonly=True, precedence=-1, doc=\"\"\"\n Whether to show the widget value.\"\"\")\n\n start = param.Number(default=0., doc=\"Lower bound of the range.\")\n\n step = param.Number(default=0.1, doc=\"Slider and number input step.\")\n\n value = param.Range(default=(0, 1), doc=\"Current range value.\")\n\n value_throttled = param.Range(default=None, constant=True)\n\n _composite_type = Column\n\n def __init__(self, **params):\n if not 'width' in params and not 'sizing_mode' in params:\n params['width'] = 300\n super().__init__(**params)\n self._label = StaticText(margin=0, align='end')\n self._slider = RangeSlider(margin=(0, 0, 5, 0), show_value=False)\n self._slider.param.watch(self._sync_value, 'value')\n self._slider.param.watch(self._sync_value, 'value_throttled')\n self._start_edit = FloatInput(min_width=50, margin=0, format=self.format,\n css_classes=['slider-edit'])\n self._end_edit = FloatInput(min_width=50, margin=(0, 0, 0, 10), format=self.format,\n css_classes=['slider-edit'])\n self._start_edit.param.watch(self._sync_start_value, 'value')\n self._start_edit.param.watch(self._sync_start_value, 'value_throttled')\n self._end_edit.param.watch(self._sync_end_value, 'value')\n self._end_edit.param.watch(self._sync_end_value, 'value_throttled')\n\n sep = StaticText(value='...', margin=(0, 2, 0, 2), align='end')\n edit = Row(self._label, self._start_edit, sep, self._end_edit,\n sizing_mode='stretch_width', margin=0)\n self._composite.extend([edit, self._slider])\n self._slider.jscallback(args={'start': self._start_edit, 'end': self._end_edit}, value=\"\"\"\n let [min, max] = cb_obj.value\n start.value = min\n end.value = max\n \"\"\")\n self._start_edit.jscallback(args={'slider': self._slider}, value=\"\"\"\n if (cb_obj.value < slider.start) {\n slider.start = cb_obj.value\n } else if (cb_obj.value > slider.end) {\n slider.end = cb_obj.value\n }\n \"\"\")\n self._end_edit.jscallback(args={'slider': self._slider}, value=\"\"\"\n if (cb_obj.value < slider.start) {\n slider.start = cb_obj.value\n } else if (cb_obj.value > slider.end) {\n slider.end = cb_obj.value\n }\n \"\"\")\n self._update_editable()\n self._update_layout()\n self._update_name()\n self._update_slider()\n self._update_value()\n\n @param.depends('editable', watch=True)\n def _update_editable(self):\n self._start_edit.disabled = not self.editable[0]\n self._end_edit.disabled = not self.editable[1]\n\n @param.depends('name', watch=True)\n def _update_name(self):\n if self.name:\n label = f'{self.name}:'\n margin = (0, 10, 0, 0)\n else:\n label = ''\n margin = (0, 0, 0, 0)\n self._label.param.set_param(**{'margin': margin, 'value': label})\n\n @param.depends('width', 'height', 'sizing_mode', watch=True)\n def _update_layout(self):\n self._start_edit.sizing_mode = self.sizing_mode\n self._end_edit.sizing_mode = self.sizing_mode\n if self.sizing_mode not in ('stretch_width', 'stretch_both'):\n w = (self.width or 300)//4\n self._start_edit.width = w\n self._end_edit.width = w\n\n @param.depends('start', 'end', 'step', 'bar_color', 'direction',\n 'show_value', 'tooltips', 'name', 'format', watch=True)\n def _update_slider(self):\n self._slider.param.set_param(**{\n 'format': self.format,\n 'start': self.start,\n 'end': self.end,\n 'step': self.step,\n 'bar_color': self.bar_color,\n 'direction': self.direction,\n 'show_value': self.show_value,\n 'tooltips': self.tooltips,\n })\n self._start_edit.step = self.step\n self._end_edit.step = self.step\n\n @param.depends('value', watch=True)\n def _update_value(self):\n self._slider.value = self.value\n self._start_edit.value = self.value[0]\n self._end_edit.value = self.value[1]\n\n def _sync_value(self, event):\n with param.edit_constant(self):\n self.param.set_param(**{event.name: event.new})\n\n def _sync_start_value(self, event):\n end = self.value[1] if event.name == 'value' else self.value_throttled[1]\n with param.edit_constant(self):\n self.param.set_param(\n **{event.name: (event.new, end)}\n )\n\n def _sync_end_value(self, event):\n start = self.value[0] if event.name == 'value' else self.value_throttled[0]\n with param.edit_constant(self):\n self.param.set_param(\n **{event.name: (start, event.new)}\n )\n"
] |
[
[
"numpy.arange"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
napoler/bert-cn
|
[
"62c18cc03b7fa1831fccb0faa15853952dc15280"
] |
[
"bert_server/run/pregenerate_training_data.py"
] |
[
"from argparse import ArgumentParser\nfrom pathlib import Path\nfrom tqdm import tqdm, trange\nfrom tempfile import TemporaryDirectory\nimport shelve\n\nfrom random import random, randrange, randint, shuffle, choice, sample\nfrom pytorch_pretrained_bert.tokenization import BertTokenizer\nimport numpy as np\nimport json\n\n\nclass DocumentDatabase:\n def __init__(self, reduce_memory=False):\n if reduce_memory:\n self.temp_dir = TemporaryDirectory()\n self.working_dir = Path(self.temp_dir.name)\n self.document_shelf_filepath = self.working_dir / 'shelf.db'\n self.document_shelf = shelve.open(str(self.document_shelf_filepath),\n flag='n', protocol=-1)\n self.documents = None\n else:\n self.documents = []\n self.document_shelf = None\n self.document_shelf_filepath = None\n self.temp_dir = None\n self.doc_lengths = []\n self.doc_cumsum = None\n self.cumsum_max = None\n self.reduce_memory = reduce_memory\n\n def add_document(self, document):\n if not document:\n return\n if self.reduce_memory:\n current_idx = len(self.doc_lengths)\n self.document_shelf[str(current_idx)] = document\n else:\n self.documents.append(document)\n self.doc_lengths.append(len(document))\n\n def _precalculate_doc_weights(self):\n self.doc_cumsum = np.cumsum(self.doc_lengths)\n self.cumsum_max = self.doc_cumsum[-1]\n\n def sample_doc(self, current_idx, sentence_weighted=True):\n # Uses the current iteration counter to ensure we don't sample the same doc twice\n if sentence_weighted:\n # With sentence weighting, we sample docs proportionally to their sentence length\n if self.doc_cumsum is None or len(self.doc_cumsum) != len(self.doc_lengths):\n self._precalculate_doc_weights()\n rand_start = self.doc_cumsum[current_idx]\n rand_end = rand_start + self.cumsum_max - self.doc_lengths[current_idx]\n sentence_index = randrange(rand_start, rand_end) % self.cumsum_max\n sampled_doc_index = np.searchsorted(self.doc_cumsum, sentence_index, side='right')\n else:\n # If we don't use sentence weighting, then every doc has an equal chance to be chosen\n sampled_doc_index = (current_idx + randrange(1, len(self.doc_lengths))) % len(self.doc_lengths)\n assert sampled_doc_index != current_idx\n if self.reduce_memory:\n return self.document_shelf[str(sampled_doc_index)]\n else:\n return self.documents[sampled_doc_index]\n\n def __len__(self):\n return len(self.doc_lengths)\n\n def __getitem__(self, item):\n if self.reduce_memory:\n return self.document_shelf[str(item)]\n else:\n return self.documents[item]\n\n def __enter__(self):\n return self\n\n def __exit__(self, exc_type, exc_val, traceback):\n if self.document_shelf is not None:\n self.document_shelf.close()\n if self.temp_dir is not None:\n self.temp_dir.cleanup()\n\n\ndef truncate_seq_pair(tokens_a, tokens_b, max_num_tokens):\n \"\"\"Truncates a pair of sequences to a maximum sequence length. Lifted from Google's BERT repo.\"\"\"\n while True:\n total_length = len(tokens_a) + len(tokens_b)\n if total_length <= max_num_tokens:\n break\n\n trunc_tokens = tokens_a if len(tokens_a) > len(tokens_b) else tokens_b\n assert len(trunc_tokens) >= 1\n\n # We want to sometimes truncate from the front and sometimes from the\n # back to add more randomness and avoid biases.\n if random() < 0.5:\n del trunc_tokens[0]\n else:\n trunc_tokens.pop()\n\n\ndef create_masked_lm_predictions(tokens, masked_lm_prob, max_predictions_per_seq, vocab_list):\n \"\"\"Creates the predictions for the masked LM objective. This is mostly copied from the Google BERT repo, but\n with several refactors to clean it up and remove a lot of unnecessary variables.\"\"\"\n cand_indices = []\n for (i, token) in enumerate(tokens):\n if token == \"[CLS]\" or token == \"[SEP]\":\n continue\n cand_indices.append(i)\n\n num_to_mask = min(max_predictions_per_seq,\n max(1, int(round(len(tokens) * masked_lm_prob))))\n shuffle(cand_indices)\n mask_indices = sorted(sample(cand_indices, num_to_mask))\n masked_token_labels = []\n for index in mask_indices:\n # 80% of the time, replace with [MASK]\n if random() < 0.8:\n masked_token = \"[MASK]\"\n else:\n # 10% of the time, keep original\n if random() < 0.5:\n masked_token = tokens[index]\n # 10% of the time, replace with random word\n else:\n masked_token = choice(vocab_list)\n masked_token_labels.append(tokens[index])\n # Once we've saved the true label for that token, we can overwrite it with the masked version\n tokens[index] = masked_token\n\n return tokens, mask_indices, masked_token_labels\n\n\ndef create_instances_from_document(\n doc_database, doc_idx, max_seq_length, short_seq_prob,\n masked_lm_prob, max_predictions_per_seq, vocab_list):\n \"\"\"This code is mostly a duplicate of the equivalent function from Google BERT's repo.\n However, we make some changes and improvements. Sampling is improved and no longer requires a loop in this function.\n Also, documents are sampled proportionally to the number of sentences they contain, which means each sentence\n (rather than each document) has an equal chance of being sampled as a false example for the NextSentence task.\"\"\"\n document = doc_database[doc_idx]\n # Account for [CLS], [SEP], [SEP]\n max_num_tokens = max_seq_length - 3\n\n # We *usually* want to fill up the entire sequence since we are padding\n # to `max_seq_length` anyways, so short sequences are generally wasted\n # computation. However, we *sometimes*\n # (i.e., short_seq_prob == 0.1 == 10% of the time) want to use shorter\n # sequences to minimize the mismatch between pre-training and fine-tuning.\n # The `target_seq_length` is just a rough target however, whereas\n # `max_seq_length` is a hard limit.\n target_seq_length = max_num_tokens\n if random() < short_seq_prob:\n target_seq_length = randint(2, max_num_tokens)\n\n # We DON'T just concatenate all of the tokens from a document into a long\n # sequence and choose an arbitrary split point because this would make the\n # next sentence prediction task too easy. Instead, we split the input into\n # segments \"A\" and \"B\" based on the actual \"sentences\" provided by the user\n # input.\n instances = []\n current_chunk = []\n current_length = 0\n i = 0\n while i < len(document):\n segment = document[i]\n current_chunk.append(segment)\n current_length += len(segment)\n if i == len(document) - 1 or current_length >= target_seq_length:\n if current_chunk:\n # `a_end` is how many segments from `current_chunk` go into the `A`\n # (first) sentence.\n a_end = 1\n if len(current_chunk) >= 2:\n a_end = randrange(1, len(current_chunk))\n\n tokens_a = []\n for j in range(a_end):\n tokens_a.extend(current_chunk[j])\n\n tokens_b = []\n\n # Random next\n if len(current_chunk) == 1 or random() < 0.5:\n is_random_next = True\n target_b_length = target_seq_length - len(tokens_a)\n\n # Sample a random document, with longer docs being sampled more frequently\n random_document = doc_database.sample_doc(current_idx=doc_idx, sentence_weighted=True)\n\n random_start = randrange(0, len(random_document))\n for j in range(random_start, len(random_document)):\n tokens_b.extend(random_document[j])\n if len(tokens_b) >= target_b_length:\n break\n # We didn't actually use these segments so we \"put them back\" so\n # they don't go to waste.\n num_unused_segments = len(current_chunk) - a_end\n i -= num_unused_segments\n # Actual next\n else:\n is_random_next = False\n for j in range(a_end, len(current_chunk)):\n tokens_b.extend(current_chunk[j])\n truncate_seq_pair(tokens_a, tokens_b, max_num_tokens)\n\n assert len(tokens_a) >= 1\n assert len(tokens_b) >= 1\n\n tokens = [\"[CLS]\"] + tokens_a + [\"[SEP]\"] + tokens_b + [\"[SEP]\"]\n # The segment IDs are 0 for the [CLS] token, the A tokens and the first [SEP]\n # They are 1 for the B tokens and the final [SEP]\n segment_ids = [0 for _ in range(len(tokens_a) + 2)] + [1 for _ in range(len(tokens_b) + 1)]\n\n tokens, masked_lm_positions, masked_lm_labels = create_masked_lm_predictions(\n tokens, masked_lm_prob, max_predictions_per_seq, vocab_list)\n\n instance = {\n \"tokens\": tokens,\n \"segment_ids\": segment_ids,\n \"is_random_next\": is_random_next,\n \"masked_lm_positions\": masked_lm_positions,\n \"masked_lm_labels\": masked_lm_labels}\n instances.append(instance)\n current_chunk = []\n current_length = 0\n i += 1\n\n return instances\n\n\ndef main():\n parser = ArgumentParser()\n parser.add_argument('--train_corpus', type=Path, required=True)\n parser.add_argument(\"--output_dir\", type=Path, required=True)\n # parser.add_argument(\"--bert_model\", type=str, required=True,\n # choices=[\"bert-base-uncased\", \"bert-large-uncased\", \"bert-base-cased\",\n # \"bert-base-multilingual\", \"bert-base-chinese\"])\n\n parser.add_argument(\"--bert_model\", type=str, required=True)\n parser.add_argument(\"--do_lower_case\", action=\"store_true\")\n\n parser.add_argument(\"--reduce_memory\", action=\"store_true\",\n help=\"Reduce memory usage for large datasets by keeping data on disc rather than in memory\")\n\n parser.add_argument(\"--epochs_to_generate\", type=int, default=3,\n help=\"Number of epochs of data to pregenerate\")\n parser.add_argument(\"--max_seq_len\", type=int, default=128)\n parser.add_argument(\"--short_seq_prob\", type=float, default=0.1,\n help=\"Probability of making a short sentence as a training example\")\n parser.add_argument(\"--masked_lm_prob\", type=float, default=0.15,\n help=\"Probability of masking each token for the LM task\")\n parser.add_argument(\"--max_predictions_per_seq\", type=int, default=20,\n help=\"Maximum number of tokens to mask in each sequence\")\n\n args = parser.parse_args()\n\n tokenizer = BertTokenizer.from_pretrained(args.bert_model, do_lower_case=args.do_lower_case)\n vocab_list = list(tokenizer.vocab.keys())\n with DocumentDatabase(reduce_memory=args.reduce_memory) as docs:\n with args.train_corpus.open() as f:\n doc = []\n for line in tqdm(f, desc=\"Loading Dataset\", unit=\" lines\"):\n line = line.strip()\n if line == \"\":\n docs.add_document(doc)\n doc = []\n else:\n tokens = tokenizer.tokenize(line)\n doc.append(tokens)\n if doc:\n docs.add_document(doc) # If the last doc didn't end on a newline, make sure it still gets added\n if len(docs) <= 1:\n exit(\"ERROR: No document breaks were found in the input file! These are necessary to allow the script to \"\n \"ensure that random NextSentences are not sampled from the same document. Please add blank lines to \"\n \"indicate breaks between documents in your input file. If your dataset does not contain multiple \"\n \"documents, blank lines can be inserted at any natural boundary, such as the ends of chapters, \"\n \"sections or paragraphs.\")\n\n args.output_dir.mkdir(exist_ok=True)\n for epoch in trange(args.epochs_to_generate, desc=\"Epoch\"):\n epoch_filename = args.output_dir / f\"epoch_{epoch}.json\"\n num_instances = 0\n with epoch_filename.open('w') as epoch_file:\n for doc_idx in trange(len(docs), desc=\"Document\"):\n doc_instances = create_instances_from_document(\n docs, doc_idx, max_seq_length=args.max_seq_len, short_seq_prob=args.short_seq_prob,\n masked_lm_prob=args.masked_lm_prob, max_predictions_per_seq=args.max_predictions_per_seq,\n vocab_list=vocab_list)\n doc_instances = [json.dumps(instance) for instance in doc_instances]\n for instance in doc_instances:\n epoch_file.write(instance + '\\n')\n num_instances += 1\n metrics_file = args.output_dir / f\"epoch_{epoch}_metrics.json\"\n with metrics_file.open('w') as metrics_file:\n metrics = {\n \"num_training_examples\": num_instances,\n \"max_seq_len\": args.max_seq_len\n }\n metrics_file.write(json.dumps(metrics))\n\n\nif __name__ == '__main__':\n main()\n"
] |
[
[
"numpy.cumsum",
"numpy.searchsorted"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
guyongqiangx/construct
|
[
"72c80035f703d29b1dc62a3e08831d68b29a6e40",
"72c80035f703d29b1dc62a3e08831d68b29a6e40"
] |
[
"tests/test_core.py",
"construct/lib/containers.py"
] |
[
"# -*- coding: utf-8 -*-\n\nfrom declarativeunittest import *\nfrom construct import *\nfrom construct.lib import *\n\n\ndef test_bytes():\n d = Bytes(4)\n common(d, b\"1234\", b\"1234\", 4)\n assert d.parse(b\"1234567890\") == b\"1234\"\n assert raises(d.parse, b\"\") == StreamError\n assert raises(d.build, b\"looooooooooooooong\") == StreamError\n assert d.build(1) == b\"\\x00\\x00\\x00\\x01\"\n assert d.build(0x01020304) == b\"\\x01\\x02\\x03\\x04\"\n\n d = Bytes(this.n)\n common(d, b\"1234\", b\"1234\", 4, n=4)\n assert d.parse(b\"1234567890\",n=4) == b\"1234\"\n assert d.build(1, n=4) == b\"\\x00\\x00\\x00\\x01\"\n assert raises(d.build, b\"\", n=4) == StreamError\n assert raises(d.build, b\"toolong\", n=4) == StreamError\n assert raises(d.sizeof) == SizeofError\n assert raises(d.sizeof, n=4) == 4\n\ndef test_greedybytes():\n common(GreedyBytes, b\"1234\", b\"1234\", SizeofError)\n\ndef test_bitwise():\n common(Bitwise(Bytes(8)), b\"\\xff\", b\"\\x01\\x01\\x01\\x01\\x01\\x01\\x01\\x01\", 1)\n common(Bitwise(Array(8,Bit)), b\"\\xff\", [1,1,1,1,1,1,1,1], 1)\n common(Bitwise(Array(2,Nibble)), b\"\\xff\", [15,15], 1)\n common(Bitwise(Array(1,Octet)), b\"\\xff\", [255], 1)\n\n common(Bitwise(GreedyBytes), bytes(10), bytes(80), SizeofError)\n\ndef test_bytewise():\n common(Bitwise(Bytewise(Bytes(1))), b\"\\xff\", b\"\\xff\", 1)\n common(BitStruct(\"p1\"/Nibble, \"num\"/Bytewise(Int24ub), \"p2\"/Nibble), b\"\\xf0\\x10\\x20\\x3f\", Container(p1=15, num=0x010203, p2=15), 4)\n common(Bitwise(Sequence(Nibble, Bytewise(Int24ub), Nibble)), b\"\\xf0\\x10\\x20\\x3f\", [0x0f,0x010203,0x0f], 4)\n\n common(Bitwise(Bytewise(GreedyBytes)), bytes(10), bytes(10), SizeofError)\n\ndef test_ints():\n common(Byte, b\"\\xff\", 255, 1)\n common(Short, b\"\\x00\\xff\", 255, 2)\n common(Int, b\"\\x00\\x00\\x00\\xff\", 255, 4)\n common(Long, b\"\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\xff\", 255, 8)\n\n common(Int8ub, b\"\\x01\", 0x01, 1)\n common(Int16ub, b\"\\x01\\x02\", 0x0102, 2)\n common(Int32ub, b\"\\x01\\x02\\x03\\x04\", 0x01020304, 4)\n common(Int64ub, b\"\\x01\\x02\\x03\\x04\\x05\\x06\\x07\\x08\", 0x0102030405060708, 8)\n\n common(Int8sb, b\"\\x01\", 0x01, 1)\n common(Int16sb, b\"\\x01\\x02\", 0x0102, 2)\n common(Int32sb, b\"\\x01\\x02\\x03\\x04\", 0x01020304, 4)\n common(Int64sb, b\"\\x01\\x02\\x03\\x04\\x05\\x06\\x07\\x08\", 0x0102030405060708, 8)\n common(Int8sb, b\"\\xff\", -1, 1)\n common(Int16sb, b\"\\xff\\xff\", -1, 2)\n common(Int32sb, b\"\\xff\\xff\\xff\\xff\", -1, 4)\n common(Int64sb, b\"\\xff\\xff\\xff\\xff\\xff\\xff\\xff\\xff\", -1, 8)\n\n common(Int8ul, b\"\\x01\", 0x01, 1)\n common(Int16ul, b\"\\x01\\x02\", 0x0201, 2)\n common(Int32ul, b\"\\x01\\x02\\x03\\x04\", 0x04030201, 4)\n common(Int64ul, b\"\\x01\\x02\\x03\\x04\\x05\\x06\\x07\\x08\", 0x0807060504030201, 8)\n\n common(Int8sl, b\"\\x01\", 0x01, 1)\n common(Int16sl, b\"\\x01\\x02\", 0x0201, 2)\n common(Int32sl, b\"\\x01\\x02\\x03\\x04\", 0x04030201, 4)\n common(Int64sl, b\"\\x01\\x02\\x03\\x04\\x05\\x06\\x07\\x08\", 0x0807060504030201, 8)\n common(Int8sl, b\"\\xff\", -1, 1)\n common(Int16sl, b\"\\xff\\xff\", -1, 2)\n common(Int32sl, b\"\\xff\\xff\\xff\\xff\", -1, 4)\n common(Int64sl, b\"\\xff\\xff\\xff\\xff\\xff\\xff\\xff\\xff\", -1, 8)\n\ndef test_ints24():\n common(Int24ub, b\"\\x01\\x02\\x03\", 0x010203, 3)\n common(Int24ul, b\"\\x01\\x02\\x03\", 0x030201, 3)\n common(Int24sb, b\"\\xff\\xff\\xff\", -1, 3)\n common(Int24sl, b\"\\xff\\xff\\xff\", -1, 3)\n\n@xfail(not supportshalffloats, reason=\"Half-precision floats were introduced in 3.6\")\ndef test_halffloats():\n common(Half, b\"\\x00\\x00\", 0., 2)\n common(Half, b\"\\x35\\x55\", 0.333251953125, 2)\n\ndef test_floats():\n common(Single, b\"\\x00\\x00\\x00\\x00\", 0., 4)\n common(Single, b\"?\\x99\\x99\\x9a\", 1.2000000476837158, 4)\n common(Double, b\"\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\", 0., 8)\n common(Double, b\"?\\xf3333333\", 1.2, 8)\n\ndef test_formatfield():\n d = FormatField(\"<\",\"L\")\n common(d, b\"\\x01\\x02\\x03\\x04\", 0x04030201, 4)\n assert raises(d.parse, b\"\") == StreamError\n assert raises(d.parse, b\"\\x01\\x02\") == StreamError\n assert raises(d.build, 2**100) == FormatFieldError\n assert raises(d.build, 1e9999) == FormatFieldError\n assert raises(d.build, \"string not int\") == FormatFieldError\n\ndef test_formatfield_ints_randomized():\n for endianess,dtype in itertools.product(\"<>=\",\"bhlqBHLQ\"):\n d = FormatField(endianess, dtype)\n for i in range(100):\n obj = random.randrange(0, 256**d.sizeof()//2)\n assert d.parse(d.build(obj)) == obj\n data = os.urandom(d.sizeof())\n assert d.build(d.parse(data)) == data\n\ndef test_formatfield_floats_randomized():\n # there is a roundoff error because Python float is a C double\n # http://stackoverflow.com/questions/39619636/struct-unpackstruct-packfloat-has-roundoff-error\n # and analog although that was misplaced\n # http://stackoverflow.com/questions/39676482/struct-packstruct-unpackfloat-is-inconsistent-on-py3\n for endianess,dtype in itertools.product(\"<>=\",\"fd\"):\n d = FormatField(endianess, dtype)\n for i in range(100):\n x = random.random()*12345\n if dtype == \"d\":\n assert d.parse(d.build(x)) == x\n else:\n assert abs(d.parse(d.build(x)) - x) < 1e-3\n for i in range(100):\n b = os.urandom(d.sizeof())\n if not math.isnan(d.parse(b)):\n assert d.build(d.parse(b)) == b\n\ndef test_bytesinteger():\n d = BytesInteger(4, signed=True, swapped=False)\n common(d, b\"\\x01\\x02\\x03\\x04\", 0x01020304, 4)\n common(d, b\"\\xff\\xff\\xff\\xff\", -1, 4)\n assert raises(BytesInteger(this.missing).sizeof) == SizeofError\n assert raises(BytesInteger(4, signed=False).build, -1) == IntegerError\n common(BytesInteger(0), b\"\", 0, 0)\n\ndef test_bitsinteger():\n d = BitsInteger(8)\n common(d, b\"\\x01\\x01\\x01\\x01\\x01\\x01\\x01\\x01\", 255, 8)\n d = BitsInteger(8, signed=True)\n common(d, b\"\\x01\\x01\\x01\\x01\\x01\\x01\\x01\\x01\", -1, 8)\n d = BitsInteger(16, swapped=True)\n common(d, b\"\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x01\\x01\\x01\\x01\\x01\\x01\\x01\\x01\", 0xff00, 16)\n assert raises(BitsInteger(this.missing).sizeof) == SizeofError\n assert raises(BitsInteger(8, signed=False).build, -1) == IntegerError\n common(BitsInteger(0), b\"\", 0, 0)\n\ndef test_varint():\n common(VarInt, b\"\\x80\\x80\\x80\\x80\\x80\\x80\\x80\\x80\\x80\\x80\\x80\\x80\\x80\\x80\\x80\\x80\\x80\\x10\", 2**123, SizeofError)\n for n in [0,1,5,100,255,256,65535,65536,2**32,2**100]:\n assert VarInt.parse(VarInt.build(n)) == n\n for n in range(0, 127):\n common(VarInt, int2byte(n), n, SizeofError)\n\n assert raises(VarInt.parse, b\"\") == StreamError\n assert raises(VarInt.build, -1) == IntegerError\n\ndef test_varint_issue_705():\n d = Struct('namelen' / VarInt, 'name' / Bytes(this.namelen))\n d.build(Container(namelen = 400, name = bytes(400)))\n\ndef test_paddedstring():\n common(PaddedString(10, \"utf8\"), b\"hello\\x00\\x00\\x00\\x00\\x00\", u\"hello\", 10)\n\n d = PaddedString(100, \"ascii\")\n assert d.parse(b\"X\"*100) == u\"X\"*100\n assert d.build(u\"X\"*100) == b\"X\"*100\n assert raises(d.build, u\"X\"*200) == PaddingError\n\n for e,us in [(\"utf8\",1),(\"utf16\",2),(\"utf_16_le\",2),(\"utf32\",4),(\"utf_32_le\",4)]:\n s = u\"Афон\"\n data = (s.encode(e)+bytes(100))[:100]\n common(PaddedString(100, e), data, s, 100)\n s = u\"\"\n data = bytes(100)\n common(PaddedString(100, e), data, s, 100)\n\n for e in [\"ascii\",\"utf8\",\"utf16\",\"utf-16-le\",\"utf32\",\"utf-32-le\"]:\n PaddedString(10, e).sizeof() == 10\n PaddedString(this.n, e).sizeof(n=10) == 10\n\ndef test_pascalstring():\n for e,us in [(\"utf8\",1),(\"utf16\",2),(\"utf_16_le\",2),(\"utf32\",4),(\"utf_32_le\",4)]:\n for sc in [Byte, Int16ub, Int16ul, VarInt]:\n s = u\"Афон\"\n data = sc.build(len(s.encode(e))) + s.encode(e)\n common(PascalString(sc, e), data, s)\n common(PascalString(sc, e), sc.build(0), u\"\")\n\n for e in [\"utf8\",\"utf16\",\"utf-16-le\",\"utf32\",\"utf-32-le\",\"ascii\"]:\n raises(PascalString(Byte, e).sizeof) == SizeofError\n raises(PascalString(VarInt, e).sizeof) == SizeofError\n\ndef test_cstring():\n for e,us in [(\"utf8\",1),(\"utf16\",2),(\"utf_16_le\",2),(\"utf32\",4),(\"utf_32_le\",4)]:\n s = u\"Афон\"\n common(CString(e), s.encode(e)+bytes(us), s)\n common(CString(e), bytes(us), u\"\")\n\n CString(\"utf8\").build(s) == b'\\xd0\\x90\\xd1\\x84\\xd0\\xbe\\xd0\\xbd'+b\"\\x00\"\n CString(\"utf16\").build(s) == b'\\xff\\xfe\\x10\\x04D\\x04>\\x04=\\x04'+b\"\\x00\\x00\"\n CString(\"utf32\").build(s) == b'\\xff\\xfe\\x00\\x00\\x10\\x04\\x00\\x00D\\x04\\x00\\x00>\\x04\\x00\\x00=\\x04\\x00\\x00'+b\"\\x00\\x00\\x00\\x00\"\n\n for e in [\"utf8\",\"utf16\",\"utf-16-le\",\"utf32\",\"utf-32-le\",\"ascii\"]:\n raises(CString(e).sizeof) == SizeofError\n\ndef test_greedystring():\n for e,us in [(\"utf8\",1),(\"utf16\",2),(\"utf_16_le\",2),(\"utf32\",4),(\"utf_32_le\",4)]:\n s = u\"Афон\"\n common(GreedyString(e), s.encode(e), s)\n common(GreedyString(e), b\"\", u\"\")\n\n for e in [\"utf8\",\"utf16\",\"utf-16-le\",\"utf32\",\"utf-32-le\",\"ascii\"]:\n raises(GreedyString(e).sizeof) == SizeofError\n\ndef test_string_encodings():\n # checks that \"-\" is replaced with \"_\"\n common(GreedyString(\"utf-8\"), b\"\", u\"\")\n common(GreedyString(\"utf-8\"), b'\\xd0\\x90\\xd1\\x84\\xd0\\xbe\\xd0\\xbd', u\"Афон\")\n\ndef test_flag():\n common(Flag, b\"\\x00\", False, 1)\n common(Flag, b\"\\x01\", True, 1)\n Flag.parse(b\"\\xff\") == True\n\ndef test_enum():\n d = Enum(Byte, one=1, two=2, four=4, eight=8)\n common(d, b\"\\x01\", \"one\", 1)\n common(d, b\"\\xff\", 255, 1)\n assert d.parse(b\"\\x01\") == d.one\n assert d.parse(b\"\\x01\") == \"one\"\n assert int(d.parse(b\"\\x01\")) == 1\n assert d.parse(b\"\\xff\") == 255\n assert int(d.parse(b\"\\xff\")) == 255\n assert d.build(8) == b'\\x08'\n assert d.build(255) == b\"\\xff\"\n assert d.build(d.eight) == b'\\x08'\n assert d.one == \"one\"\n assert int(d.one) == 1\n assert raises(d.build, \"unknown\") == MappingError\n assert raises(lambda: d.missing) == AttributeError\n\n@xfail(not supportsintenum, raises=AttributeError, reason=\"IntEnum introduced in 3.4, IntFlag introduced in 3.6\")\ndef test_enum_enum34():\n import enum\n class E(enum.IntEnum):\n a = 1\n class F(enum.IntEnum):\n b = 2\n common(Enum(Byte, E, F), b\"\\x01\", \"a\", 1)\n common(Enum(Byte, E, F), b\"\\x02\", \"b\", 1)\n\n@xfail(not supportsintflag, raises=AttributeError, reason=\"IntEnum introduced in 3.4, IntFlag introduced in 3.6\")\ndef test_enum_enum36():\n import enum\n class E(enum.IntEnum):\n a = 1\n class F(enum.IntFlag):\n b = 2\n common(Enum(Byte, E, F), b\"\\x01\", \"a\", 1)\n common(Enum(Byte, E, F), b\"\\x02\", \"b\", 1)\n\ndef test_enum_issue_298():\n st = Struct(\n \"ctrl\" / Enum(Byte,\n NAK = 0x15,\n STX = 0x02,\n ),\n Probe(),\n \"optional\" / If(this.ctrl == \"NAK\", Byte),\n )\n common(st, b\"\\x15\\xff\", Container(ctrl='NAK')(optional=255))\n common(st, b\"\\x02\", Container(ctrl='STX')(optional=None))\n\n # FlagsEnum is not affected by same bug\n st = Struct(\n \"flags\" / FlagsEnum(Byte, a=1),\n Check(lambda ctx: ctx.flags == Container(_flagsenum=True)(a=1)),\n )\n common(st, b\"\\x01\", dict(flags=Container(_flagsenum=True)(a=True)), 1)\n\n # Flag is not affected by same bug\n st = Struct(\n \"flag\" / Flag,\n Check(lambda ctx: ctx.flag == True),\n )\n common(st, b\"\\x01\", dict(flag=True), 1)\n\ndef test_enum_issue_677():\n d = Enum(Byte, one=1)\n common(d, b\"\\xff\", 255, 1)\n common(d, b\"\\x01\", EnumIntegerString.new(1, \"one\"), 1)\n assert isinstance(d.parse(b\"\\x01\"), EnumIntegerString)\n d = Enum(Byte, one=1).compile()\n common(d, b\"\\xff\", 255, 1)\n common(d, b\"\\x01\", EnumIntegerString.new(1, \"one\"), 1)\n assert isinstance(d.parse(b\"\\x01\"), EnumIntegerString)\n\n d = Struct(\"e\" / Enum(Byte, one=1))\n assert str(d.parse(b\"\\x01\")) == 'Container: \\n e = (enum) one 1'\n assert str(d.parse(b\"\\xff\")) == 'Container: \\n e = (enum) (unknown) 255'\n d = Struct(\"e\" / Enum(Byte, one=1)).compile()\n assert str(d.parse(b\"\\x01\")) == 'Container: \\n e = (enum) one 1'\n assert str(d.parse(b\"\\xff\")) == 'Container: \\n e = (enum) (unknown) 255'\n\ndef test_flagsenum():\n d = FlagsEnum(Byte, one=1, two=2, four=4, eight=8)\n common(d, b\"\\x03\", Container(_flagsenum=True)(one=True)(two=True)(four=False)(eight=False), 1)\n assert d.build({}) == b'\\x00'\n assert d.build(dict(one=True,two=True)) == b'\\x03'\n assert d.build(8) == b'\\x08'\n assert d.build(1|2) == b'\\x03'\n assert d.build(255) == b\"\\xff\"\n assert d.build(d.eight) == b'\\x08'\n assert d.build(d.one|d.two) == b'\\x03'\n assert raises(d.build, dict(unknown=True)) == MappingError\n assert raises(d.build, \"unknown\") == MappingError\n assert d.one == \"one\"\n assert d.one|d.two == \"one|two\"\n assert raises(lambda: d.missing) == AttributeError\n\n@xfail(not supportsintenum, raises=AttributeError, reason=\"IntEnum introduced in 3.4, IntFlag introduced in 3.6\")\ndef test_flagsenum_enum34():\n import enum\n class E(enum.IntEnum):\n a = 1\n class F(enum.IntEnum):\n b = 2\n common(FlagsEnum(Byte, E, F), b\"\\x01\", Container(_flagsenum=True)(a=True,b=False), 1)\n common(FlagsEnum(Byte, E, F), b\"\\x02\", Container(_flagsenum=True)(a=False,b=True), 1)\n common(FlagsEnum(Byte, E, F), b\"\\x03\", Container(_flagsenum=True)(a=True,b=True), 1)\n\n@xfail(not supportsintflag, raises=AttributeError, reason=\"IntEnum introduced in 3.4, IntFlag introduced in 3.6\")\ndef test_flagsenum_enum36():\n import enum\n class E(enum.IntEnum):\n a = 1\n class F(enum.IntFlag):\n b = 2\n common(FlagsEnum(Byte, E, F), b\"\\x01\", Container(_flagsenum=True)(a=True,b=False), 1)\n common(FlagsEnum(Byte, E, F), b\"\\x02\", Container(_flagsenum=True)(a=False,b=True), 1)\n common(FlagsEnum(Byte, E, F), b\"\\x03\", Container(_flagsenum=True)(a=True,b=True), 1)\n\ndef test_mapping():\n x = object\n d = Mapping(Byte, {x:0})\n common(d, b\"\\x00\", x, 1)\n\ndef test_struct():\n common(Struct(), b\"\", Container(), 0)\n common(Struct(\"a\"/Int16ub, \"b\"/Int8ub), b\"\\x00\\x01\\x02\", Container(a=1,b=2), 3)\n common(Struct(\"a\"/Struct(\"b\"/Byte)), b\"\\x01\", Container(a=Container(b=1)), 1)\n common(Struct(Const(b\"\\x00\"), Padding(1), Pass, Terminated), bytes(2), {}, SizeofError)\n assert raises(Struct(\"missingkey\"/Byte).build, {}) == KeyError\n assert raises(Struct(Bytes(this.missing)).sizeof) == SizeofError\n d = Struct(Computed(7), Const(b\"JPEG\"), Pass, Terminated)\n assert d.build(None) == d.build({})\n\ndef test_struct_nested_embedded():\n d = Struct(\"a\"/Byte, \"b\"/Int16ub, \"inner\"/Struct(\"c\"/Byte, \"d\"/Byte))\n common(d, b\"\\x01\\x00\\x02\\x03\\x04\", Container(a=1,b=2,inner=Container(c=3,d=4)), 5)\n d = Struct(\"a\"/Byte, \"b\"/Int16ub, Embedded(\"inner\"/Struct(\"c\"/Byte, \"d\"/Byte)))\n common(d, b\"\\x01\\x00\\x02\\x03\\x04\", Container(a=1,b=2,c=3,d=4), 5)\n\n@xfail(not supportskwordered, reason=\"ordered kw was introduced in 3.6\")\ndef test_struct_kwctor():\n d = Struct(a=Byte, b=Byte, c=Byte, d=Byte)\n common(d, b\"\\x01\\x02\\x03\\x04\", Container(a=1,b=2,c=3,d=4), 4)\n\ndef test_struct_proper_context():\n # adjusted to support new embedding semantics\n d1 = Struct(\n \"x\"/Byte,\n \"inner\"/Struct(\n \"y\"/Byte,\n \"a\"/Computed(this._.x+1),\n \"b\"/Computed(this.y+2),\n ),\n \"c\"/Computed(this.x+3),\n \"d\"/Computed(this.inner.y+4),\n )\n d2 = Struct(\n \"x\"/Byte,\n \"inner\"/Embedded(Struct(\n \"y\"/Byte,\n \"a\"/Computed(this.x+1), # important\n \"b\"/Computed(this.y+2), # important\n )),\n \"c\"/Computed(this.x+3),\n \"d\"/Computed(this.y+4),\n )\n assert d1.parse(b\"\\x01\\x0f\") == Container(x=1)(inner=Container(y=15)(a=2)(b=17))(c=4)(d=19)\n assert d2.parse(b\"\\x01\\x0f\") == Container(x=1)(y=15)(a=2)(b=17)(c=4)(d=19)\n\ndef test_struct_sizeof_context_nesting():\n st = Struct(\n \"a\" / Computed(1),\n \"inner\" / Struct(\n \"b\" / Computed(2),\n Check(this._.a == 1),\n Check(this.b == 2),\n ),\n Check(this.a == 1),\n Check(this.inner.b == 2),\n )\n st.sizeof()\n\ndef test_struct_issue_566():\n inner = Struct(\n Embedded(Struct(\n \"b\" / Byte,\n )),\n \"c\" / If(this._.a > 0, Byte),\n )\n outer = Struct(\n \"a\" / Byte,\n \"inner\" / inner,\n )\n outer.parse(b'\\x01\\x02\\x03') == Container(a=1)(inner=Container(b=2)(c=3))\n outer.build(Container(a=1)(inner=Container(b=2)(c=3))) == b'\\x01\\x02\\x80\\x03\\x04'\n\ndef test_sequence():\n common(Sequence(), b\"\", [], 0)\n common(Sequence(Int8ub, Int16ub), b\"\\x01\\x00\\x02\", [1,2], 3)\n common(Int8ub >> Int16ub, b\"\\x01\\x00\\x02\", [1,2], 3)\n d = Sequence(Computed(7), Const(b\"JPEG\"), Pass, Terminated)\n assert d.build(None) == d.build([None,None,None,None])\n\ndef test_sequence_nested_embedded():\n common(Sequence(Int8ub, Int16ub, Sequence(Int8ub, Int8ub)), b\"\\x01\\x00\\x02\\x03\\x04\", [1,2,[3,4]], 5)\n common(Sequence(Int8ub, Int16ub, Embedded(Sequence(Int8ub, Int8ub))), b\"\\x01\\x00\\x02\\x03\\x04\", [1,2,3,4], 5)\n\ndef test_array():\n common(Byte[0], b\"\", [], 0)\n common(Byte[4], b\"1234\", [49,50,51,52], 4)\n\n d = Array(3, Byte)\n common(d, b\"\\x01\\x02\\x03\", [1,2,3], 3)\n assert d.parse(b\"\\x01\\x02\\x03additionalgarbage\") == [1,2,3]\n assert raises(d.parse, b\"\") == StreamError\n assert raises(d.build, [1,2]) == RangeError\n assert raises(d.build, [1,2,3,4,5,6,7,8]) == RangeError\n\n d = Array(this.n, Byte)\n common(d, b\"\\x01\\x02\\x03\", [1,2,3], 3, n=3)\n assert d.parse(b\"\\x01\\x02\\x03\", n=3) == [1,2,3]\n assert d.parse(b\"\\x01\\x02\\x03additionalgarbage\", n=3) == [1,2,3]\n assert raises(d.parse, b\"\", n=3) == StreamError\n assert raises(d.build, [1,2], n=3) == RangeError\n assert raises(d.build, [1,2,3,4,5,6,7,8], n=3) == RangeError\n assert raises(d.sizeof) == SizeofError\n assert raises(d.sizeof, n=3) == 3\n\ndef test_array_nontellable():\n assert Array(5, Byte).parse_stream(devzero) == [0,0,0,0,0]\n\ndef test_greedyrange():\n common(GreedyRange(Byte), b\"\", [], SizeofError)\n common(GreedyRange(Byte), b\"\\x01\\x02\", [1,2], SizeofError)\n assert GreedyRange(Byte, discard=False).parse(b\"\\x01\\x02\") == [1,2]\n assert GreedyRange(Byte, discard=True).parse(b\"\\x01\\x02\") == []\n\ndef test_repeatuntil():\n d = RepeatUntil(obj_ == 9, Byte)\n common(d, b\"\\x02\\x03\\x09\", [2,3,9], SizeofError)\n assert d.parse(b\"\\x02\\x03\\x09additionalgarbage\") == [2,3,9]\n assert raises(d.parse, b\"\\x02\\x03\\x08\") == StreamError\n assert raises(d.build, [2,3,8]) == RepeatError\n\n d = RepeatUntil(lambda x,lst,ctx: lst[-2:] == [0,0], Byte)\n # d = RepeatUntil(lst_[-2:] == [0,0], Byte)\n assert d.parse(b\"\\x01\\x00\\x00\\xff\") == [1,0,0]\n assert d.build([1,0,0,4]) == b\"\\x01\\x00\\x00\"\n d = RepeatUntil(True, Byte)\n assert d.parse(b\"\\x00\") == [0]\n assert d.build([0]) == b\"\\x00\"\n\ndef test_const():\n common(Const(b\"MZ\"), b\"MZ\", b\"MZ\", 2)\n common(Const(b\"MZ\", Bytes(2)), b\"MZ\", b\"MZ\", 2)\n common(Const(255, Int32ul), b\"\\xff\\x00\\x00\\x00\", 255, 4)\n assert raises(Const(b\"MZ\").parse, b\"???\") == ConstError\n assert raises(Const(b\"MZ\").build, b\"???\") == ConstError\n assert raises(Const(255, Int32ul).parse, b\"\\x00\\x00\\x00\\x00\") == ConstError\n assert Struct(Const(b\"MZ\")).build({}) == b\"MZ\"\n # non-prefixed string literals are unicode on Python 3\n assert raises(lambda: Const(u\"no prefix string\")) == StringError\n\ndef test_computed():\n common(Computed(255), b\"\", 255, 0)\n common(Computed(lambda ctx: 255), b\"\", 255, 0)\n assert Computed(255).build(None) == b\"\"\n assert Struct(Computed(255)).build({}) == b\"\"\n assert raises(Computed(this.missing).parse, b\"\") == KeyError\n assert raises(Computed(this[\"missing\"]).parse, b\"\") == KeyError\n\ndef test_index():\n d = Array(3, Bytes(this._index+1))\n common(d, b\"abbccc\", [b\"a\", b\"bb\", b\"ccc\"])\n d = GreedyRange(Bytes(this._index+1))\n common(d, b\"abbccc\", [b\"a\", b\"bb\", b\"ccc\"])\n d = RepeatUntil(lambda o,l,ctx: ctx._index == 2, Bytes(this._index+1))\n common(d, b\"abbccc\", [b\"a\", b\"bb\", b\"ccc\"])\n\n d = Array(3, Struct(\"i\" / Index))\n common(d, b\"\", [Container(i=0),Container(i=1),Container(i=2)], 0)\n d = GreedyRange(Struct(\"i\" / Index, \"d\" / Bytes(this.i+1)))\n common(d, b\"abbccc\", [Container(i=0,d=b\"a\"),Container(i=1,d=b\"bb\"),Container(i=2,d=b\"ccc\")])\n d = RepeatUntil(lambda o,l,ctx: ctx._index == 2, Index)\n common(d, b\"\", [0,1,2])\n\ndef test_rebuild():\n d = Struct(\n \"count\" / Rebuild(Byte, len_(this.items)),\n \"items\"/Byte[this.count],\n )\n assert d.parse(b\"\\x02ab\") == Container(count=2)(items=[97,98])\n assert d.build(dict(count=None,items=[255])) == b\"\\x01\\xff\"\n assert d.build(dict(count=-1,items=[255])) == b\"\\x01\\xff\"\n assert d.build(dict(items=[255])) == b\"\\x01\\xff\"\n\ndef test_rebuild_issue_664():\n d = Struct(\n \"bytes\" / Bytes(1),\n Check(this.bytes == b\"\\x00\"),\n \"bytesinteger\" / BytesInteger(4),\n Check(this.bytesinteger == 255),\n \"pascalstring\" / PascalString(Byte, \"utf8\"),\n Check(this.pascalstring == u\"text\"),\n \"enum\" / Enum(Byte, label=255),\n Check(this.enum == \"label\"),\n \"flagsenum\" / FlagsEnum(Byte, label=255),\n Check(lambda this: this.flagsenum == Container(label=True)),\n \"upfield\" / Computed(200),\n \"nestedstruct\" / Struct(\n \"nestedfield\" / Computed(255),\n Check(this._.upfield == 200),\n Check(this.nestedfield == 255),\n ),\n Check(this.upfield == 200),\n Check(this.nestedstruct.nestedfield == 255),\n \"sequence\" / Sequence(Computed(1), Computed(2), Computed(3), Computed(4)),\n Check(this.sequence == [1,2,3,4]),\n \"array\" / Array(4, Byte),\n Check(this.array == [1,2,3,4]),\n \"greedyrange\" / GreedyRange(Byte),\n Check(this.greedyrange == [1,2,3,4]),\n \"repeatuntil\" / RepeatUntil(obj_ == 4, Byte),\n Check(this.repeatuntil == [1,2,3,4]),\n # Timestamp\n # Union\n # IfThenElse\n )\n obj = Container(\n bytes = 0,\n bytesinteger = 255,\n pascalstring = u\"text\",\n enum = \"label\",\n flagsenum = dict(label=True),\n # nestedstruct = dict(),\n # sequence = [1,2,3,4],\n array = [1,2,3,4],\n greedyrange = [1,2,3,4],\n repeatuntil = [1,2,3,4],\n )\n d.build(obj)\n\ndef test_default():\n d = Default(Byte, 0)\n common(d, b\"\\xff\", 255, 1)\n d.build(None) == b\"\\x00\"\n\ndef test_check():\n common(Check(True), b\"\", None, 0)\n common(Check(this.x == 255), b\"\", None, 0, x=255)\n common(Check(len_(this.a) == 3), b\"\", None, 0, a=[1,2,3])\n assert raises(Check(False).parse, b\"\") == CheckError\n assert raises(Check(this.x == 255).parse, b\"\", x=0) == CheckError\n assert raises(Check(len_(this.a) == 3).parse, b\"\", a=[]) == CheckError\n\ndef test_error():\n assert raises(Error.parse, b\"\") == ExplicitError\n assert raises(Error.build, None) == ExplicitError\n assert (\"x\"/Int8sb >> IfThenElse(this.x > 0, Int8sb, Error)).parse(b\"\\x01\\x05\") == [1,5]\n assert raises((\"x\"/Int8sb >> IfThenElse(this.x > 0, Int8sb, Error)).parse, b\"\\xff\\x05\") == ExplicitError\n\ndef test_focusedseq():\n common(FocusedSeq(\"num\", Const(b\"MZ\"), \"num\"/Byte, Terminated), b\"MZ\\xff\", 255, SizeofError)\n common(FocusedSeq(this._.s, Const(b\"MZ\"), \"num\"/Byte, Terminated), b\"MZ\\xff\", 255, SizeofError, s=\"num\")\n\n assert raises(FocusedSeq(\"missing\", Pass).parse, b\"\") == UnboundLocalError\n assert raises(FocusedSeq(\"missing\", Pass).build, {}) == UnboundLocalError\n assert raises(FocusedSeq(\"missing\", Pass).sizeof) == 0\n assert raises(FocusedSeq(this.missing, Pass).parse, b\"\") == KeyError\n assert raises(FocusedSeq(this.missing, Pass).build, {}) == KeyError\n assert raises(FocusedSeq(this.missing, Pass).sizeof) == 0\n\ndef test_pickled():\n import pickle\n obj = [(), 1, 2.3, {}, [], bytes(1), \"\"]\n data = pickle.dumps(obj)\n common(Pickled, data, obj)\n\n@xfail(not supportsnumpy, raises=ImportError, reason=\"numpy not installed?\")\ndef test_numpy():\n import numpy\n obj = numpy.array([1,2,3], dtype=numpy.int64)\n assert numpy.array_equal(Numpy.parse(Numpy.build(obj)), obj)\n\ndef test_namedtuple():\n coord = collections.namedtuple(\"coord\", \"x y z\")\n d = NamedTuple(\"coord\", \"x y z\", Array(3, Byte))\n common(d, b\"123\", coord(49,50,51), 3)\n d = NamedTuple(\"coord\", \"x y z\", GreedyRange(Byte))\n common(d, b\"123\", coord(49,50,51), SizeofError)\n d = NamedTuple(\"coord\", \"x y z\", Struct(\"x\"/Byte, \"y\"/Byte, \"z\"/Byte))\n common(d, b\"123\", coord(49,50,51), 3)\n d = NamedTuple(\"coord\", \"x y z\", Sequence(Byte, Byte, Byte))\n common(d, b\"123\", coord(49,50,51), 3)\n\n assert raises(lambda: NamedTuple(\"coord\", \"x y z\", BitStruct(\"x\"/Byte, \"y\"/Byte, \"z\"/Byte))) == NamedTupleError\n\ndef test_timestamp():\n import arrow\n d = Timestamp(Int64ub, 1, 1970)\n common(d, b'\\x00\\x00\\x00\\x00ZIz\\x00', arrow.Arrow(2018,1,1), 8)\n d = Timestamp(Int64ub, 1, 1904)\n common(d, b'\\x00\\x00\\x00\\x00\\xd6o*\\x80', arrow.Arrow(2018,1,1), 8)\n d = Timestamp(Int64ub, 10**-7, 1600)\n common(d, b'\\x01\\xd4\\xa2.\\x1a\\xa8\\x00\\x00', arrow.Arrow(2018,1,1), 8)\n d = Timestamp(Int32ub, \"msdos\", \"msdos\")\n common(d, b'H9\\x8c\"', arrow.Arrow(2016,1,25,17,33,4), 4)\n\ndef test_hex():\n d = Hex(Int32ub)\n common(d, b\"\\x00\\x00\\x01\\x02\", 0x0102, 4)\n obj = d.parse(b\"\\x00\\x00\\x01\\x02\")\n assert str(obj) == \"0x00000102\"\n assert str(obj) == \"0x00000102\"\n\n d = Hex(GreedyBytes)\n common(d, b\"\\x00\\x00\\x01\\x02\", b\"\\x00\\x00\\x01\\x02\")\n common(d, b\"\", b\"\")\n obj = d.parse(b\"\\x00\\x00\\x01\\x02\")\n assert str(obj) == \"unhexlify('00000102')\"\n assert str(obj) == \"unhexlify('00000102')\"\n\n d = Hex(RawCopy(Int32ub))\n common(d, b\"\\x00\\x00\\x01\\x02\", dict(data=b\"\\x00\\x00\\x01\\x02\", value=0x0102, offset1=0, offset2=4, length=4), 4)\n obj = d.parse(b\"\\x00\\x00\\x01\\x02\")\n assert str(obj) == \"unhexlify('00000102')\"\n assert str(obj) == \"unhexlify('00000102')\"\n\ndef test_hexdump():\n d = HexDump(GreedyBytes)\n common(d, b\"abcdef\", b\"abcdef\")\n common(d, b\"\", b\"\")\n obj = d.parse(b\"\\x00\\x00\\x01\\x02\")\n repr = \\\n'''hexundump(\"\"\"\n0000 00 00 01 02 ....\n\"\"\")\n'''\n pass\n assert str(obj) == repr\n assert str(obj) == repr\n\n d = HexDump(RawCopy(Int32ub))\n common(d, b\"\\x00\\x00\\x01\\x02\", dict(data=b\"\\x00\\x00\\x01\\x02\", value=0x0102, offset1=0, offset2=4, length=4), 4)\n obj = d.parse(b\"\\x00\\x00\\x01\\x02\")\n repr = \\\n'''hexundump(\"\"\"\n0000 00 00 01 02 ....\n\"\"\")\n'''\n assert str(obj) == repr\n assert str(obj) == repr\n\ndef test_regression_188():\n # Hex HexDump were not inheriting subcon flags\n d = Struct(Hex(Const(b\"MZ\")))\n assert d.parse(b\"MZ\") == Container()\n assert d.build(dict()) == b\"MZ\"\n d = Struct(HexDump(Const(b\"MZ\")))\n assert d.parse(b\"MZ\") == Container()\n assert d.build(dict()) == b\"MZ\"\n\ndef test_union():\n d = Union(None, \"a\"/Bytes(2), \"b\"/Int16ub)\n assert d.parse(b\"\\x01\\x02\") == Container(a=b\"\\x01\\x02\")(b=0x0102)\n assert raises(Union(123, Pass).parse, b\"\") == KeyError\n assert raises(Union(\"missing\", Pass).parse, b\"\") == KeyError\n assert d.build(dict(a=b\"zz\")) == b\"zz\"\n assert d.build(dict(b=0x0102)) == b\"\\x01\\x02\"\n assert raises(d.build, {}) == UnionError\n\n d = Union(None, \"a\"/Bytes(2), \"b\"/Int16ub, Pass)\n assert d.build({}) == b\"\"\n\n # build skips parsefrom, invalid or not\n assert raises(Union(123, Pass).build, {}) == b\"\"\n assert raises(Union(\"missing\", Pass).build, {}) == b\"\"\n\n assert raises(Union(None, Byte).sizeof) == SizeofError\n assert raises(Union(None, VarInt).sizeof) == SizeofError\n assert raises(Union(0, Byte, VarInt).sizeof) == SizeofError\n assert raises(Union(1, Byte, VarInt).sizeof) == SizeofError\n assert raises(Union(123, Pass).sizeof) == SizeofError\n assert raises(Union(\"missing\", Pass).sizeof) == SizeofError\n assert raises(Union(this.missing, Pass).sizeof) == SizeofError\n\n # regression check, so first subcon is not parsefrom by accident\n assert raises(Union, Byte, VarInt) == UnionError\n\ndef test_union_embedded():\n d = Union(None, \"a\"/Int16ub, Embedded(Struct(\"b\"/Int8ub, \"c\"/Int8ub))) >> Byte\n assert d.parse(b\"\\x01\\x02\\x03\") == [Container(a=0x0102, b=0x01, c=0x01), 0x01]\n\n d = Union(None, \"a\"/Int16ub, Embedded(Struct(\"b\"/Int8ub, \"c\"/Int8ub)))\n assert d.parse(b\"\\x01\\x02\") == Container(a=0x0102, b=0x01, c=0x01)\n assert d.build(dict(a=0x0102)) == b\"\\x01\\x02\"\n assert d.build(dict(b=0x01)) == b\"\\x01\"\n assert d.build(dict(c=0x01)) == b\"\\x01\"\n assert raises(d.build, dict()) == UnionError\n\n@xfail(not supportskwordered, reason=\"ordered kw was introduced in 3.6\")\ndef test_union_kwctor():\n d = Union(None, a=Int8ub, b=Int16ub, c=Int32ub)\n assert d.parse(b\"\\x01\\x02\\x03\\x04\") == Container(a=0x01,b=0x0102,c=0x01020304)\n assert d.build(Container(c=0x01020304)) == b\"\\x01\\x02\\x03\\x04\"\n\ndef test_union_issue_348():\n d = Union(None,\n Int8=Prefixed(Int16ub, GreedyRange(Int8ub)),\n Int16=Prefixed(Int16ub, GreedyRange(Int16ub)),\n Int32=Prefixed(Int16ub, GreedyRange(Int32ub)),\n )\n assert d.parse(b'\\x00\\x04\\x11\\x22\\x33\\x44') == {'Int16': [4386, 13124], 'Int32': [287454020], 'Int8': [17, 34, 51, 68]}\n assert d.build(dict(Int16=[4386, 13124])) == b'\\x00\\x04\\x11\\x22\\x33\\x44'\n assert d.build(dict(Int32=[287454020])) == b'\\x00\\x04\\x11\\x22\\x33\\x44'\n\ndef test_select():\n d = Select(Int32ub, Int16ub, Int8ub)\n common(d, b\"\\x00\\x00\\x00\\x07\", 7)\n assert raises(Select(Int32ub, Int16ub).parse, b\"\") == SelectError\n assert raises(Select(Byte).sizeof) == SizeofError\n\n@xfail(not supportskwordered, reason=\"ordered kw was introduced in 3.6\")\ndef test_select_kwctor():\n d = Select(a=Int8ub, b=Int16ub, c=Int32ub)\n assert d.parse(b\"\\x01\\x02\\x03\\x04\") == 0x01\n assert d.build(0x01020304) == b\"\\x01\\x02\\x03\\x04\"\n\ndef test_optional():\n d = Optional(Int32ul)\n assert d.parse(b\"\\x01\\x00\\x00\\x00\") == 1\n assert d.build(1) == b\"\\x01\\x00\\x00\\x00\"\n assert d.parse(b\"???\") == None\n assert d.build(None) == b\"\"\n assert raises(d.sizeof) == SizeofError\n\ndef test_if():\n common(If(True, Byte), b\"\\x01\", 1, 1)\n common(If(False, Byte), b\"\", None, 0)\n\ndef test_ifthenelse():\n common(IfThenElse(True, Int8ub, Int16ub), b\"\\x01\", 1, 1)\n common(IfThenElse(False, Int8ub, Int16ub), b\"\\x00\\x01\", 1, 2)\n\ndef test_switch():\n d = Switch(this.x, {1:Int8ub, 2:Int16ub, 4:Int32ub})\n common(d, b\"\\x01\", 0x01, 1, x=1)\n common(d, b\"\\x01\\x02\", 0x0102, 2, x=2)\n assert d.parse(b\"\", x=255) == None\n assert d.build(None, x=255) == b\"\"\n assert raises(d.sizeof) == SizeofError\n assert raises(d.sizeof, x=1) == 1\n\n d = Switch(this.x, {}, default=Byte)\n common(d, b\"\\x01\", 1, 1, x=255)\n\ndef test_switch_issue_357():\n inner = Struct(\n \"computed\" / Computed(4),\n )\n inner2 = Struct(\n \"computed\" / Computed(7),\n )\n st1 = Struct(\n \"a\" / inner,\n \"b\" / Switch(5, {1: inner2}, inner),\n Probe(),\n )\n st2 = Struct(\n \"a\" / inner,\n \"b\" / Switch(5, {}, inner),\n Probe(),\n )\n assert st1.parse(b\"\") == st2.parse(b\"\")\n\ndef test_embeddedswitch():\n d = EmbeddedSwitch(\n Struct(\n \"type\" / Byte,\n ),\n this.type,\n {\n 0: Struct(\"name\" / PascalString(Byte, \"utf8\")),\n 1: Struct(\"value\" / Byte),\n }\n )\n common(d, b\"\\x00\\x00\", Container(type=0, name=u\"\", value=None))\n common(d, b\"\\x01\\x00\", Container(type=1, name=None, value=0))\n\ndef test_embeddedswitch_issue_684():\n d = EmbeddedSwitch(\n Struct(\n \"type\" / Byte,\n ),\n this.type,\n {\n 1: Struct(\"value\" / Byte),\n 2: Struct(\"value\" / Byte),\n }\n )\n d.parse(b\"\\x01\\xff\") == Container(type=1, value=None)\n\ndef test_stopif():\n d = Struct(\"x\"/Byte, StopIf(this.x == 0), \"y\"/Byte)\n common(d, b\"\\x00\", Container(x=0))\n common(d, b\"\\x01\\x02\", Container(x=1,y=2))\n\n d = Sequence(\"x\"/Byte, StopIf(this.x == 0), \"y\"/Byte)\n common(d, b\"\\x00\", [0])\n common(d, b\"\\x01\\x02\", [1,None,2])\n\n d = GreedyRange(FocusedSeq(\"x\", \"x\"/Byte, StopIf(this.x == 0)))\n assert d.parse(b\"\\x01\\x00?????\") == [1]\n assert d.build([]) == b\"\"\n assert d.build([0]) == b\"\\x00\"\n assert d.build([1]) == b\"\\x01\"\n assert d.build([1,0,2]) == b\"\\x01\\x00\"\n\ndef test_padding():\n common(Padding(4), b\"\\x00\\x00\\x00\\x00\", None, 4)\n assert raises(Padding, 4, pattern=b\"?????\") == PaddingError\n assert raises(Padding, 4, pattern=u\"?\") == PaddingError\n\ndef test_padded():\n common(Padded(4, Byte), b\"\\x01\\x00\\x00\\x00\", 1, 4)\n assert raises(Padded, 4, Byte, pattern=b\"?????\") == PaddingError\n assert raises(Padded, 4, Byte, pattern=u\"?\") == PaddingError\n assert Padded(4, VarInt).sizeof() == 4\n assert Padded(4, Byte[this.missing]).sizeof() == 4\n\ndef test_aligned():\n common(Aligned(4, Byte), b\"\\x01\\x00\\x00\\x00\", 1, 4)\n common(Struct(\"a\"/Aligned(4, Byte), \"b\"/Byte), b\"\\x01\\x00\\x00\\x00\\x02\", Container(a=1)(b=2), 5)\n assert Aligned(4, Int8ub).build(1) == b\"\\x01\\x00\\x00\\x00\"\n assert Aligned(4, Int16ub).build(1) == b\"\\x00\\x01\\x00\\x00\"\n assert Aligned(4, Int32ub).build(1) == b\"\\x00\\x00\\x00\\x01\"\n assert Aligned(4, Int64ub).build(1) == b\"\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x01\"\n d = Aligned(this.m, Byte)\n common(d, b\"\\xff\\x00\", 255, 2, m=2)\n assert raises(d.sizeof) == SizeofError\n assert raises(d.sizeof, m=2) == 2\n\ndef test_alignedstruct():\n d = AlignedStruct(4, \"a\"/Int8ub, \"b\"/Int16ub)\n common(d, b\"\\x01\\x00\\x00\\x00\\x00\\x05\\x00\\x00\", Container(a=1)(b=5), 8)\n\ndef test_bitstruct():\n d = BitStruct(\"a\"/BitsInteger(3), \"b\"/Flag, Padding(3), \"c\"/Nibble, \"d\"/BitsInteger(5))\n common(d, b\"\\xe1\\x1f\", Container(a=7)(b=False)(c=8)(d=31), 2)\n d = BitStruct(\"a\"/BitsInteger(3), \"b\"/Flag, Padding(3), \"c\"/Nibble, \"sub\"/Struct(\"d\"/Nibble, \"e\"/Bit))\n common(d, b\"\\xe1\\x1f\", Container(a=7)(b=False)(c=8)(sub=Container(d=15)(e=1)), 2)\n\ndef test_pointer():\n common(Pointer(2, Byte), b\"\\x00\\x00\\x07\", 7, 0)\n common(Pointer(lambda ctx: 2, Byte), b\"\\x00\\x00\\x07\", 7, 0)\n\n d = Struct(\n 'inner' / Struct(),\n 'x' / Pointer(0, Byte, stream=this.inner._io),\n )\n d.parse(bytes(20)) == 0\n\ndef test_peek():\n d = Peek(Int8ub)\n assert d.parse(b\"\\x01\") == 1\n assert d.parse(b\"\") == None\n assert d.build(1) == b\"\"\n assert d.build(None) == b\"\"\n assert d.sizeof() == 0\n d = Peek(VarInt)\n assert d.sizeof() == 0\n\n d = Struct(\"a\"/Peek(Int8ub), \"b\"/Int16ub)\n common(d, b\"\\x01\\x02\", Container(a=0x01)(b=0x0102), 2)\n d = Struct(Peek(\"a\"/Byte), Peek(\"b\"/Int16ub))\n d.parse(b\"\\x01\\x02\") == Container(a=0x01)(b=0x0102)\n d.build(Container(a=0x01)(b=0x0102)) == b\"\\x01\\x02\"\n d.sizeof() == 0\n\ndef test_seek():\n d = Seek(5)\n assert d.parse(b\"\") == 5\n assert d.build(None) == b\"\"\n assert (d >> Byte).parse(b\"01234x\") == [5,120]\n assert (d >> Byte).build([5,255]) == b\"\\x00\\x00\\x00\\x00\\x00\\xff\"\n assert (Bytes(10) >> d >> Byte).parse(b\"0123456789\") == [b\"0123456789\",5,ord('5')]\n assert (Bytes(10) >> d >> Byte).build([b\"0123456789\",None,255]) == b\"01234\\xff6789\"\n assert Struct(\"data\"/Bytes(10), d, \"addin\"/Byte).parse(b\"0123456789\") == Container(data=b\"0123456789\")(addin=53)\n assert Struct(\"data\"/Bytes(10), d, \"addin\"/Byte).build(dict(data=b\"0123456789\",addin=53)) == b\"01234\\x356789\"\n assert (Seek(10,1) >> Seek(-5,1) >> Bytes(1)).parse(b\"0123456789\") == [10,5,b\"5\"]\n assert (Seek(10,1) >> Seek(-5,1) >> Bytes(1)).build([None,None,255]) == b\"\\x00\\x00\\x00\\x00\\x00\\xff\"\n assert raises(d.sizeof) == SizeofError\n\ndef test_tell():\n assert Tell.parse(b\"\") == 0\n assert Tell.build(None) == b\"\"\n assert Tell.sizeof() == 0\n assert Struct(\"a\"/Tell, \"b\"/Byte, \"c\"/Tell).parse(b\"\\xff\") == Container(a=0)(b=255)(c=1)\n assert Struct(\"a\"/Tell, \"b\"/Byte, \"c\"/Tell).build(Container(a=0)(b=255)(c=1)) == b\"\\xff\"\n assert Struct(\"a\"/Tell, \"b\"/Byte, \"c\"/Tell).build(dict(b=255)) == b\"\\xff\"\n\ndef test_pass():\n common(Pass, b\"\", None, 0)\n common(Struct(\"empty\"/Pass), b\"\", Container(empty=None), 0)\n\ndef test_terminated():\n common(Terminated, b\"\", None, SizeofError)\n common(Struct(Terminated), b\"\", Container(), SizeofError)\n common(BitStruct(Terminated), b\"\", Container(), SizeofError)\n assert raises(Terminated.parse, b\"x\") == TerminatedError\n assert raises(Struct(Terminated).parse, b\"x\") == TerminatedError\n assert raises(BitStruct(Terminated).parse, b\"x\") == TerminatedError\n\ndef test_rawcopy():\n d = RawCopy(Byte)\n assert d.parse(b\"\\xff\") == dict(data=b\"\\xff\", value=255, offset1=0, offset2=1, length=1)\n assert d.build(dict(data=b\"\\xff\")) == b\"\\xff\"\n assert d.build(dict(value=255)) == b\"\\xff\"\n assert d.sizeof() == 1\n d = RawCopy(Padding(1))\n assert d.build(None) == b'\\x00'\n\ndef test_rawcopy_issue_289():\n # When you build from a full dict that has all the keys, the if data kicks in, and replaces the context entry with a subset of a dict it had to begin with.\n st = Struct(\n \"raw\" / RawCopy(Struct(\"x\"/Byte, \"len\"/Byte)),\n \"array\" / Byte[this.raw.value.len],\n )\n print(st.parse(b\"\\x01\\x02\\xff\\x00\"))\n print(st.build(dict(raw=dict(value=dict(x=1, len=2)), array=[0xff, 0x01])))\n print(st.build(st.parse(b\"\\x01\\x02\\xff\\x00\")))\n # this is not buildable, array is not passed and cannot be deduced from raw data\n # print(st.build(dict(raw=dict(data=b\"\\x01\\x02\\xff\\x00\"))))\n\ndef test_rawcopy_issue_358():\n # RawCopy overwritten context value with subcon return obj regardless of None\n d = Struct(\"a\"/RawCopy(Byte), \"check\"/Check(this.a.value == 255))\n assert d.build(dict(a=dict(value=255))) == b\"\\xff\"\n\ndef test_byteswapped():\n d = ByteSwapped(Bytes(5))\n common(d, b\"12345\", b\"54321\", 5)\n d = ByteSwapped(Struct(\"a\"/Byte, \"b\"/Byte))\n common(d, b\"\\x01\\x02\", Container(a=2)(b=1), 2)\n\ndef test_byteswapped_from_issue_70():\n d = ByteSwapped(BitStruct(\"flag1\"/Bit, \"flag2\"/Bit, Padding(2), \"number\"/BitsInteger(16), Padding(4)))\n assert d.parse(b'\\xd0\\xbc\\xfa') == Container(flag1=1)(flag2=1)(number=0xabcd)\n d = BitStruct(\"flag1\"/Bit, \"flag2\"/Bit, Padding(2), \"number\"/BitsInteger(16), Padding(4))\n assert d.parse(b'\\xfa\\xbc\\xd1') == Container(flag1=1)(flag2=1)(number=0xabcd)\n\ndef test_bitsswapped():\n d = BitsSwapped(Bytes(2))\n common(d, b\"\\x0f\\x01\", b\"\\xf0\\x80\", 2)\n d = Bitwise(Bytes(8))\n common(d, b\"\\xf2\", b'\\x01\\x01\\x01\\x01\\x00\\x00\\x01\\x00', 1)\n d = BitsSwapped(Bitwise(Bytes(8)))\n common(d, b\"\\xf2\", b'\\x00\\x01\\x00\\x00\\x01\\x01\\x01\\x01', 1)\n d = BitStruct(\"a\"/Nibble, \"b\"/Nibble)\n common(d, b\"\\xf1\", Container(a=15)(b=1), 1)\n d = BitsSwapped(BitStruct(\"a\"/Nibble, \"b\"/Nibble))\n common(d, b\"\\xf1\", Container(a=8)(b=15), 1)\n\ndef test_prefixed():\n d = Prefixed(Byte, Int16ul)\n assert d.parse(b\"\\x02\\xff\\xff??????\") == 65535\n assert d.build(65535) == b\"\\x02\\xff\\xff\"\n assert d.sizeof() == 3\n d = Prefixed(VarInt, GreedyBytes)\n assert d.parse(b\"\\x03abc??????\") == b\"abc\"\n assert d.build(b\"abc\") == b'\\x03abc'\n assert raises(d.sizeof) == SizeofError\n d = Prefixed(Byte, Sequence(Peek(Byte), Int16ub, GreedyBytes))\n assert d.parse(b\"\\x02\\x00\\xff????????\") == [0,255,b'']\n\n d = Prefixed(Byte, GreedyBytes)\n common(d, b\"\\x0a\"+bytes(10), bytes(10), SizeofError)\n d = Prefixed(Byte, GreedyString(\"utf-8\"))\n common(d, b\"\\x0a\"+bytes(10), u\"\\x00\"*10, SizeofError)\n\ndef test_prefixedarray():\n common(PrefixedArray(Byte,Byte), b\"\\x02\\x0a\\x0b\", [10,11], SizeofError)\n assert PrefixedArray(Byte, Byte).parse(b\"\\x03\\x01\\x02\\x03\") == [1,2,3]\n assert PrefixedArray(Byte, Byte).parse(b\"\\x00\") == []\n assert PrefixedArray(Byte, Byte).build([1,2,3]) == b\"\\x03\\x01\\x02\\x03\"\n assert raises(PrefixedArray(Byte, Byte).parse, b\"\") == StreamError\n assert raises(PrefixedArray(Byte, Byte).parse, b\"\\x03\\x01\") == StreamError\n assert raises(PrefixedArray(Byte, Byte).sizeof) == SizeofError\n\ndef test_fixedsized():\n d = FixedSized(10, Byte)\n common(d, b'\\xff\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00', 255, 10)\n d = FixedSized(-255, Byte)\n assert raises(d.parse, bytes(10)) == PaddingError\n assert raises(d.build, 0) == PaddingError\n assert raises(d.sizeof) == PaddingError\n d = FixedSized(10, GreedyBytes)\n common(d, bytes(10), bytes(10), 10)\n d = FixedSized(10, GreedyString(\"utf-8\"))\n common(d, bytes(10), u\"\\x00\"*10, 10)\n\ndef test_nullterminated():\n d = NullTerminated(Byte)\n common(d, b'\\xff\\x00', 255, SizeofError)\n d = NullTerminated(GreedyBytes, include=True)\n assert d.parse(b'\\xff\\x00') == b'\\xff\\x00'\n d = NullTerminated(GreedyBytes, include=False)\n assert d.parse(b'\\xff\\x00') == b'\\xff'\n d = NullTerminated(GreedyBytes, consume=True) >> GreedyBytes\n assert d.parse(b'\\xff\\x00') == [b'\\xff', b'']\n d = NullTerminated(GreedyBytes, consume=False) >> GreedyBytes\n assert d.parse(b'\\xff\\x00') == [b'\\xff', b'\\x00']\n d = NullTerminated(GreedyBytes, require=True)\n assert raises(d.parse, b'\\xff') == StreamError\n d = NullTerminated(GreedyBytes, require=False)\n assert d.parse(b'\\xff') == b'\\xff'\n d = NullTerminated(GreedyBytes)\n common(d, bytes(1), b\"\", SizeofError)\n d = NullTerminated(GreedyString(\"utf-8\"))\n common(d, bytes(1), u\"\", SizeofError)\n d = NullTerminated(GreedyBytes, term=bytes(2))\n common(d, b\"\\x01\\x00\\x00\\x02\\x00\\x00\", b\"\\x01\\x00\\x00\\x02\", SizeofError)\n\ndef test_nullstripped():\n d = NullStripped(GreedyBytes)\n common(d, b'\\xff', b'\\xff', SizeofError)\n assert d.parse(b'\\xff\\x00\\x00') == b'\\xff'\n assert d.build(b'\\xff') == b'\\xff'\n d = NullStripped(GreedyBytes, pad=b'\\x05')\n common(d, b'\\xff', b'\\xff', SizeofError)\n assert d.parse(b'\\xff\\x05\\x05') == b'\\xff'\n assert d.build(b'\\xff') == b'\\xff'\n d = NullStripped(GreedyString(\"utf-8\"))\n assert d.parse(bytes(10)) == u\"\"\n assert d.build(u\"\") == b\"\"\n d = NullStripped(GreedyBytes, pad=bytes(2))\n assert d.parse(bytes(10)) == b\"\"\n assert d.parse(bytes(11)) == b\"\"\n\ndef test_restreamdata():\n d = RestreamData(b\"\\x01\", Int8ub)\n common(d, b\"\", 1, 0)\n d = RestreamData(b\"\", Padding(1))\n assert d.build(None) == b''\n\n d = RestreamData(io.BytesIO(b\"\\x01\\x02\"), Int16ub)\n assert d.parse(b\"\\x01\\x02\\x00\") == 0x0102\n assert d.build(None) == b''\n\n d = RestreamData(NullTerminated(GreedyBytes), Int16ub)\n assert d.parse(b\"\\x01\\x02\\x00\") == 0x0102\n assert d.build(None) == b''\n\n d = RestreamData(FixedSized(2, GreedyBytes), Int16ub)\n assert d.parse(b\"\\x01\\x02\\x00\") == 0x0102\n assert d.build(None) == b''\n\n@xfail(reason=\"unknown, either StreamError or KeyError due to this.entire or this._.entire\")\ndef test_restreamdata_issue_701():\n d = Struct(\n 'entire' / GreedyBytes,\n 'ac' / RestreamData(this.entire, Struct(\n 'a' / Byte,\n Bytes(len_(this._.entire)-1),\n 'c' / Byte,\n )),\n )\n # StreamError: stream read less then specified amount, expected 1, found 0\n assert d.parse(b'\\x01GGGGGGGGGG\\x02') == Container(entire=b'\\x01GGGGGGGGGG\\x02', ac=Container(a=1,b=2))\n\n d = FocusedSeq('ac'\n 'entire' / GreedyBytes,\n 'ac' / RestreamData(this.entire, Struct(\n 'a' / Byte,\n Bytes(len_(this._.entire)-1),\n 'c' / Byte,\n )),\n )\n # KeyError: 'entire'\n assert d.parse(b'\\x01GGGGGGGGGG\\x02') == Container(a=1,b=2)\n\ndef test_transformed():\n d = Transformed(Bytes(16), bytes2bits, 2, bits2bytes, 2)\n common(d, bytes(2), bytes(16), 2)\n d = Transformed(GreedyBytes, bytes2bits, None, bits2bytes, None)\n common(d, bytes(2), bytes(16), SizeofError)\n d = Transformed(GreedyString(\"utf-8\"), bytes2bits, None, bits2bytes, None)\n common(d, bytes(2), u\"\\x00\"*16, SizeofError)\n\ndef test_transformed_issue_676():\n d = Struct(\n 'inner1' / BitStruct(\n 'a' / Default(BitsInteger(8), 0),\n ),\n 'inner2' / BitStruct(\n 'a' / Default(BitsInteger(lambda this: 8), 0),\n ),\n Probe(),\n Check(this.inner1.a == 0),\n Check(this.inner2.a == 0),\n )\n d.build({})\n\ndef test_restreamed():\n d = Restreamed(Int16ub, ident, 1, ident, 1, ident)\n common(d, b\"\\x00\\x01\", 1, 2)\n d = Restreamed(VarInt, ident, 1, ident, 1, ident)\n assert raises(d.sizeof) == SizeofError\n d = Restreamed(Bytes(2), lambda b: b*2, 1, lambda b: b[0:1], 1, lambda n: n*2)\n common(d, b\"aa\", b\"aa\", 4)\n\ndef test_restreamed_partial_read():\n d = Restreamed(Bytes(255), ident, 1, ident, 1, ident)\n assert raises(d.parse, b\"\") == StreamError\n\ndef test_processxor():\n d = ProcessXor(0, Int16ub)\n common(d, b\"\\xf0\\x0f\", 0xf00f, 2)\n d = ProcessXor(0xf0, Int16ub)\n common(d, b\"\\x00\\xff\", 0xf00f, 2)\n d = ProcessXor(bytes(10), Int16ub)\n common(d, b\"\\xf0\\x0f\", 0xf00f, 2)\n d = ProcessXor(b\"\\xf0\\xf0\\xf0\\xf0\\xf0\", Int16ub)\n common(d, b\"\\x00\\xff\", 0xf00f, 2)\n\n d = ProcessXor(0xf0, GreedyBytes)\n common(d, b\"\\x00\\xff\", b\"\\xf0\\x0f\", SizeofError)\n d = ProcessXor(b\"\\xf0\\xf0\\xf0\\xf0\\xf0\", GreedyBytes)\n common(d, b\"\\x00\\xff\", b\"\\xf0\\x0f\", SizeofError)\n d = ProcessXor(b\"X\", GreedyString(\"utf-8\"))\n common(d, b\"\\x00\", u\"X\", SizeofError)\n d = ProcessXor(b\"XXXXX\", GreedyString(\"utf-8\"))\n common(d, b\"\\x00\", u\"X\", SizeofError)\n\ndef test_processrotateleft():\n d = ProcessRotateLeft(0, 1, GreedyBytes)\n common(d, bytes(10), bytes(10))\n d = ProcessRotateLeft(0, 2, GreedyBytes)\n common(d, bytes(10), bytes(10))\n d = ProcessRotateLeft(4, 1, GreedyBytes)\n common(d, b'\\x0f\\xf0', b'\\xf0\\x0f')\n d = ProcessRotateLeft(4, 2, GreedyBytes)\n common(d, b'\\x0f\\xf0', b'\\xff\\x00')\n\ndef test_checksum():\n d = Struct(\n \"fields\" / RawCopy(Struct(\n \"a\" / Byte,\n \"b\" / Byte,\n )),\n \"checksum\" / Checksum(Bytes(64), lambda data: hashlib.sha512(data).digest(), this.fields.data),\n )\n\n c = hashlib.sha512(b\"\\x01\\x02\").digest()\n assert d.parse(b\"\\x01\\x02\"+c) == Container(fields=dict(data=b\"\\x01\\x02\", value=Container(a=1)(b=2), offset1=0, offset2=2, length=2))(checksum=c)\n assert d.build(dict(fields=dict(data=b\"\\x01\\x02\"))) == b\"\\x01\\x02\"+c\n assert d.build(dict(fields=dict(value=dict(a=1,b=2)))) == b\"\\x01\\x02\"+c\n\ndef test_checksum_nonbytes_issue_323():\n st = Struct(\n \"vals\" / Byte[2],\n \"checksum\" / Checksum(Byte, lambda vals: sum(vals) & 0xFF, this.vals),\n )\n assert st.parse(b\"\\x00\\x00\\x00\") == Container(vals=[0, 0])(checksum=0)\n assert raises(st.parse, b\"\\x00\\x00\\x01\") == ChecksumError\n\ndef test_compressed_zlib():\n zeros = bytes(10000)\n d = Compressed(GreedyBytes, \"zlib\")\n assert d.parse(d.build(zeros)) == zeros\n assert len(d.build(zeros)) < 50\n assert raises(d.sizeof) == SizeofError\n d = Compressed(GreedyBytes, \"zlib\", level=9)\n assert d.parse(d.build(zeros)) == zeros\n assert len(d.build(zeros)) < 50\n assert raises(d.sizeof) == SizeofError\n\n@xfail(not PY>=(3,2), raises=AttributeError, reason=\"gzip module was added in 3.2\")\ndef test_compressed_gzip():\n zeros = bytes(10000)\n d = Compressed(GreedyBytes, \"gzip\")\n assert d.parse(d.build(zeros)) == zeros\n assert len(d.build(zeros)) < 50\n assert raises(d.sizeof) == SizeofError\n d = Compressed(GreedyBytes, \"gzip\", level=9)\n assert d.parse(d.build(zeros)) == zeros\n assert len(d.build(zeros)) < 50\n assert raises(d.sizeof) == SizeofError\n\ndef test_compressed_bzip2():\n zeros = bytes(10000)\n d = Compressed(GreedyBytes, \"bzip2\")\n assert d.parse(d.build(zeros)) == zeros\n assert len(d.build(zeros)) < 50\n assert raises(d.sizeof) == SizeofError\n d = Compressed(GreedyBytes, \"bzip2\", level=9)\n assert d.parse(d.build(zeros)) == zeros\n assert len(d.build(zeros)) < 50\n assert raises(d.sizeof) == SizeofError\n\n@xfail(not PY>=(3,3), raises=ImportError, reason=\"lzma module was added in 3.3\")\ndef test_compressed_lzma():\n zeros = bytes(10000)\n d = Compressed(GreedyBytes, \"lzma\")\n assert d.parse(d.build(zeros)) == zeros\n assert len(d.build(zeros)) < 200\n assert raises(d.sizeof) == SizeofError\n d = Compressed(GreedyBytes, \"lzma\", level=9)\n assert d.parse(d.build(zeros)) == zeros\n assert len(d.build(zeros)) < 200\n assert raises(d.sizeof) == SizeofError\n\ndef test_compressed_prefixed():\n zeros = bytes(10000)\n d = Prefixed(VarInt, Compressed(GreedyBytes, \"zlib\"))\n st = Struct(\"one\"/d, \"two\"/d)\n assert st.parse(st.build(Container(one=zeros,two=zeros))) == Container(one=zeros,two=zeros)\n assert raises(d.sizeof) == SizeofError\n\ndef test_rebuffered():\n data = b\"0\" * 1000\n assert Rebuffered(Array(1000,Byte)).parse_stream(io.BytesIO(data)) == [48]*1000\n assert Rebuffered(Array(1000,Byte), tailcutoff=50).parse_stream(io.BytesIO(data)) == [48]*1000\n assert Rebuffered(Byte).sizeof() == 1\n assert raises(Rebuffered(Byte).sizeof) == 1\n assert raises(Rebuffered(VarInt).sizeof) == SizeofError\n\ndef test_lazy():\n d = Struct(\n 'dup' / Lazy(Computed(this.exists)),\n 'exists' / Computed(1),\n )\n obj = d.parse(b'')\n assert obj.dup() == 1\n\n d = Lazy(Byte)\n x = d.parse(b'\\x00')\n assert x() == 0\n assert d.build(0) == b'\\x00'\n assert d.build(x) == b'\\x00'\n assert d.sizeof() == 1\n\ndef test_lazystruct():\n d = LazyStruct(\n \"num1\" / Int8ub,\n \"num2\" / BytesInteger(1),\n \"prefixed1\" / Prefixed(Byte, Byte),\n \"prefixed2\" / Prefixed(Byte, Byte, includelength=True),\n \"prefixedarray\" / PrefixedArray(Byte, Byte),\n )\n obj = d.parse(b\"\\x00\\x00\\x01\\x00\\x02\\x00\\x01\\x00\")\n assert obj.num1 == obj[\"num1\"] == obj[0] == 0\n assert obj.num2 == obj[\"num2\"] == obj[1] == 0\n assert obj.prefixed1 == obj[\"prefixed1\"] == obj[2] == 0\n assert obj.prefixed2 == obj[\"prefixed2\"] == obj[3] == 0\n assert obj.prefixedarray == obj[\"prefixedarray\"] == obj[4] == [0]\n assert len(obj) == 5\n assert list(obj.keys()) == ['num1', 'num2', 'prefixed1', 'prefixed2', 'prefixedarray']\n assert list(obj.values()) == [0, 0, 0, 0, [0]]\n assert list(obj.items()) == [('num1', 0), ('num2', 0), ('prefixed1', 0), ('prefixed2', 0), ('prefixedarray', [0])]\n assert repr(obj) == \"<LazyContainer: 5 items cached, 5 subcons>\"\n assert str(obj) == \"<LazyContainer: 5 items cached, 5 subcons>\"\n assert d.build(obj) == b\"\\x00\\x00\\x01\\x00\\x02\\x00\\x01\\x00\"\n assert d.build(Container(obj)) == b\"\\x00\\x00\\x01\\x00\\x02\\x00\\x01\\x00\"\n assert raises(d.sizeof) == SizeofError\n\ndef test_lazyarray():\n d = LazyArray(5, Int8ub)\n obj = d.parse(b\"\\x00\\x01\\x02\\x03\\x04\")\n assert repr(obj) == \"<LazyListContainer: 0 of 5 items cached>\"\n for i in range(5):\n assert obj[i] == i\n assert obj[:] == [0,1,2,3,4]\n assert obj == [0,1,2,3,4]\n assert list(obj) == [0,1,2,3,4]\n assert len(obj) == 5\n assert repr(obj) == \"<LazyListContainer: 5 of 5 items cached>\"\n assert str(obj) == \"<LazyListContainer: 5 of 5 items cached>\"\n assert d.build([0,1,2,3,4]) == b\"\\x00\\x01\\x02\\x03\\x04\"\n assert d.build(ListContainer([0,1,2,3,4])) == b\"\\x00\\x01\\x02\\x03\\x04\"\n assert d.build(obj) == b\"\\x00\\x01\\x02\\x03\\x04\"\n assert d.build(obj[:]) == b\"\\x00\\x01\\x02\\x03\\x04\"\n assert d.sizeof() == 5\n\n d = LazyArray(5, VarInt)\n obj = d.parse(b\"\\x00\\x01\\x02\\x03\\x04\")\n assert repr(obj) == \"<LazyListContainer: 5 of 5 items cached>\"\n for i in range(5):\n assert obj[i] == i\n assert obj[:] == [0,1,2,3,4]\n assert obj == [0,1,2,3,4]\n assert list(obj) == [0,1,2,3,4]\n assert len(obj) == 5\n assert repr(obj) == \"<LazyListContainer: 5 of 5 items cached>\"\n assert str(obj) == \"<LazyListContainer: 5 of 5 items cached>\"\n assert d.build([0,1,2,3,4]) == b\"\\x00\\x01\\x02\\x03\\x04\"\n assert d.build(ListContainer([0,1,2,3,4])) == b\"\\x00\\x01\\x02\\x03\\x04\"\n assert d.build(obj) == b\"\\x00\\x01\\x02\\x03\\x04\"\n assert d.build(obj[:]) == b\"\\x00\\x01\\x02\\x03\\x04\"\n assert raises(d.sizeof) == SizeofError\n\ndef test_lazybound():\n d = LazyBound(lambda: Byte)\n common(d, b\"\\x01\", 1)\n\n d = Struct(\n \"value\" / Byte,\n \"next\" / If(this.value > 0, LazyBound(lambda: d)),\n )\n common(d, b\"\\x05\\x09\\x00\", Container(value=5)(next=Container(value=9)(next=Container(value=0)(next=None))))\n\n d = Struct(\n \"value\" / Byte,\n \"next\" / GreedyBytes,\n )\n data = b\"\\x05\\x09\\x00\"\n while data:\n x = d.parse(data)\n data = x.next\n print(x)\n\ndef test_expradapter():\n MulDiv = ExprAdapter(Byte, obj_ * 7, obj_ // 7)\n assert MulDiv.parse(b\"\\x06\") == 42\n assert MulDiv.build(42) == b\"\\x06\"\n assert MulDiv.sizeof() == 1\n\n Ident = ExprAdapter(Byte, obj_-1, obj_+1)\n assert Ident.parse(b\"\\x02\") == 1\n assert Ident.build(1) == b\"\\x02\"\n assert Ident.sizeof() == 1\n\ndef test_exprsymmetricadapter():\n pass\n\ndef test_exprvalidator():\n One = ExprValidator(Byte, lambda obj,ctx: obj in [1,3,5])\n assert One.parse(b\"\\x01\") == 1\n assert raises(One.parse, b\"\\xff\") == ValidationError\n assert One.build(5) == b\"\\x05\"\n assert raises(One.build, 255) == ValidationError\n assert One.sizeof() == 1\n\ndef test_ipaddress_adapter_issue_95():\n class IpAddressAdapter(Adapter):\n def _encode(self, obj, context, path):\n return list(map(int, obj.split(\".\")))\n def _decode(self, obj, context, path):\n return \"{0}.{1}.{2}.{3}\".format(*obj)\n IpAddress = IpAddressAdapter(Byte[4])\n\n assert IpAddress.parse(b\"\\x7f\\x80\\x81\\x82\") == \"127.128.129.130\"\n assert IpAddress.build(\"127.1.2.3\") == b\"\\x7f\\x01\\x02\\x03\"\n assert IpAddress.sizeof() == 4\n\n IpAddress = ExprAdapter(Byte[4],\n encoder = lambda obj,ctx: list(map(int, obj.split(\".\"))),\n decoder = lambda obj,ctx: \"{0}.{1}.{2}.{3}\".format(*obj), )\n\n assert IpAddress.parse(b\"\\x7f\\x80\\x81\\x82\") == \"127.128.129.130\"\n assert IpAddress.build(\"127.1.2.3\") == b\"\\x7f\\x01\\x02\\x03\"\n assert IpAddress.sizeof() == 4\n\ndef test_oneof():\n assert OneOf(Byte,[4,5,6,7]).parse(b\"\\x05\") == 5\n assert OneOf(Byte,[4,5,6,7]).build(5) == b\"\\x05\"\n assert raises(OneOf(Byte,[4,5,6,7]).parse, b\"\\x08\") == ValidationError\n assert raises(OneOf(Byte,[4,5,6,7]).build, 8) == ValidationError\n\ndef test_noneof():\n assert NoneOf(Byte,[4,5,6,7]).parse(b\"\\x08\") == 8\n assert raises(NoneOf(Byte,[4,5,6,7]).parse, b\"\\x06\") == ValidationError\n\ndef test_filter():\n d = Filter(obj_ != 0, GreedyRange(Byte))\n assert d.parse(b\"\\x00\\x02\\x00\") == [2]\n assert d.build([0,1,0,2,0]) == b\"\\x01\\x02\"\n\ndef test_slicing():\n d = Slicing(Array(4,Byte), 4, 1, 3, empty=0)\n assert d.parse(b\"\\x01\\x02\\x03\\x04\") == [2,3]\n assert d.build([2,3]) == b\"\\x00\\x02\\x03\\x00\"\n assert d.sizeof() == 4\n\ndef test_indexing():\n d = Indexing(Array(4,Byte), 4, 2, empty=0)\n assert d.parse(b\"\\x01\\x02\\x03\\x04\") == 3\n assert d.build(3) == b\"\\x00\\x00\\x03\\x00\"\n assert d.sizeof() == 4\n\ndef test_probe():\n common(Probe(), b\"\", None, 0)\n common(Probe(lookahead=32), b\"\", None, 0)\n\n common(Struct(Probe()), b\"\", {}, 0)\n common(Struct(Probe(lookahead=32)), b\"\", {}, 0)\n common(Struct(\"value\"/Computed(7), Probe(this.value)), b\"\", dict(value=7), 0)\n\ndef test_debugger():\n common(Debugger(Byte), b\"\\xff\", 255, 1)\n\ndef test_repr():\n assert repr(Byte) == '<FormatField>'\n assert repr(\"num\"/Byte) == '<Renamed num <FormatField>>'\n assert repr(Default(Byte, 0)) == '<Default +nonbuild <FormatField>>'\n assert repr(Struct()) == '<Struct +nonbuild>'\n\ndef test_operators():\n common(Struct(\"new\" / (\"old\" / Byte)), b\"\\x01\", Container(new=1), 1)\n common(Struct(Renamed(Renamed(Byte, newname=\"old\"), newname=\"new\")), b\"\\x01\", Container(new=1), 1)\n\n common(Array(4, Byte), b\"\\x01\\x02\\x03\\x04\", [1,2,3,4], 4)\n common(Byte[4], b\"\\x01\\x02\\x03\\x04\", [1,2,3,4], 4)\n common(Struct(\"nums\" / Byte[4]), b\"\\x01\\x02\\x03\\x04\", Container(nums=[1,2,3,4]), 4)\n\n common(Int8ub >> Int16ub, b\"\\x01\\x00\\x02\", [1,2], 3)\n common(Int8ub >> Int16ub >> Int32ub, b\"\\x01\\x00\\x02\\x00\\x00\\x00\\x03\", [1,2,3], 7)\n common(Int8ub[2] >> Int16ub[2], b\"\\x01\\x02\\x00\\x03\\x00\\x04\", [[1,2],[3,4]], 6)\n\n common(Sequence(Embedded(Sequence(Int8ub)), Embedded(Sequence(Int16ub)) ), b\"\\x01\\x00\\x02\", [1,2], 3)\n common(Sequence(Int8ub) >> Sequence(Int16ub), b\"\\x01\\x00\\x02\", [1,2], 3)\n common(Struct(\"count\"/Byte, \"items\"/Byte[this.count], Pass, Terminated), b\"\\x03\\x01\\x02\\x03\", Container(count=3)(items=[1,2,3]), SizeofError)\n common(\"count\"/Byte + \"items\"/Byte[this.count] + Pass + Terminated, b\"\\x03\\x01\\x02\\x03\", Container(count=3)(items=[1,2,3]), SizeofError)\n common(Struct(Embedded(Struct(a=Byte)), Embedded(Struct(b=Byte)) ), b\"\\x01\\x02\", Container(a=1)(b=2), 2)\n common(Struct(a=Byte) + Struct(b=Byte), b\"\\x01\\x02\", Container(a=1)(b=2), 2)\n\n d = Byte * \"description\"\n assert d.docs == \"description\"\n d = \"description\" * Byte\n assert d.docs == \"description\"\n \"\"\"\n description\n \"\"\" * \\\n Byte\n assert d.docs == \"description\"\n d = Renamed(Renamed(Byte, newdocs=\"old\"), newdocs=\"new\")\n assert d.docs == \"new\"\n\ndef test_operators_issue_87():\n assert (\"string_name\" / Byte).parse(b\"\\x01\") == 1\n assert (u\"unicode_name\" / Byte).parse(b\"\\x01\") == 1\n assert (b\"bytes_name\" / Byte).parse(b\"\\x01\") == 1\n assert (None / Byte).parse(b\"\\x01\") == 1\n\ndef test_from_issue_76():\n d = Aligned(4, Struct(\"a\"/Byte, \"f\"/Bytes(lambda ctx: ctx.a)))\n common(d, b\"\\x02\\xab\\xcd\\x00\", Container(a=2)(f=b\"\\xab\\xcd\"))\n\ndef test_from_issue_60():\n Header = Struct(\n \"type\" / Int8ub,\n \"size\" / Switch(lambda ctx: ctx.type,\n {\n 0: Int8ub,\n 1: Int16ub,\n 2: Int32ub,\n }),\n \"length\" / Tell,\n )\n assert Header.parse(b\"\\x00\\x05\") == Container(type=0)(size=5)(length=2)\n assert Header.parse(b\"\\x01\\x00\\x05\") == Container(type=1)(size=5)(length=3)\n assert Header.parse(b\"\\x02\\x00\\x00\\x00\\x05\") == Container(type=2)(size=5)(length=5)\n assert Header.build(dict(type=0, size=5)) == b\"\\x00\\x05\"\n assert Header.build(dict(type=1, size=5)) == b\"\\x01\\x00\\x05\"\n assert Header.build(dict(type=2, size=5)) == b\"\\x02\\x00\\x00\\x00\\x05\"\n\n HeaderData = Struct(\n Embedded(Header),\n \"data\" / Bytes(lambda ctx: ctx.size),\n )\n assert HeaderData.parse(b\"\\x00\\x0512345\") == Container(type=0)(size=5)(length=2)(data=b\"12345\")\n assert HeaderData.parse(b\"\\x01\\x00\\x0512345\") == Container(type=1)(size=5)(length=3)(data=b\"12345\")\n assert HeaderData.parse(b\"\\x02\\x00\\x00\\x00\\x0512345\") == Container(type=2)(size=5)(length=5)(data=b\"12345\")\n assert HeaderData.build(dict(type=0, size=5, data=b\"12345\")) == b\"\\x00\\x0512345\"\n assert HeaderData.build(dict(type=1, size=5, data=b\"12345\")) == b\"\\x01\\x00\\x0512345\"\n assert HeaderData.build(dict(type=2, size=5, data=b\"12345\")) == b\"\\x02\\x00\\x00\\x00\\x0512345\"\n\ndef test_from_issue_171():\n attributes = BitStruct(\n \"attr\" / Aligned(8, Array(3, Struct(\n \"attrCode\" / BitsInteger(16),\n \"attrValue\" / Switch(this.attrCode, {\n 34: BitsInteger(8),\n 205: BitsInteger(2),\n 512: BitsInteger(2),\n }),\n ))),\n )\n blob = b\"\\x00\\x22\\x82\\x00\\xCD\\x80\\x80\\x10\"\n assert attributes.parse(blob) == Container(attr=[\n Container(attrCode=34)(attrValue=130),\n Container(attrCode=205)(attrValue=2),\n Container(attrCode=512)(attrValue=1), ])\n\ndef test_from_issue_175():\n @FuncPath\n def comp_(num_array):\n return sum(x << ((len(num_array)-1-i)*8) for i,x in enumerate(num_array))\n\n test = Struct(\n \"numArray\" / RepeatUntil(obj_ < 128, Byte),\n \"value\" / Computed(comp_(this.numArray))\n )\n assert test.parse(b'\\x87\\x0f').value == 34575\n\ndef test_from_issue_71():\n Inner = Struct(\n 'name' / PascalString(Byte, \"utf8\"),\n 'occupation' / PascalString(Byte, \"utf8\"),\n )\n Outer = Struct(\n 'struct_type' / Int16ub,\n 'payload_len' / Int16ub,\n 'payload' / RawCopy(Inner),\n 'serial' / Int16ub,\n 'checksum' / Checksum(Bytes(64),\n lambda data: hashlib.sha512(data).digest(),\n this.payload.data),\n Check(len_(this.payload.data) == this.payload_len),\n Terminated,\n )\n\n payload = Inner.build(Container(\n name=u\"unknown\",\n occupation=u\"worker\",\n ))\n Outer.build(Container(\n struct_type=9001,\n payload_len=len(payload),\n payload=Container(data=payload),\n serial=12345,\n ))\n\ndef test_from_issue_231():\n u = Union(0, \"raw\"/Byte[8], \"ints\"/Int[2])\n s = Struct(\"u\"/u, \"d\"/Byte[4])\n\n buildret = s.build(dict(u=dict(ints=[1,2]),d=[0,1,2,3]))\n assert buildret == b\"\\x00\\x00\\x00\\x01\\x00\\x00\\x00\\x02\\x00\\x01\\x02\\x03\"\n assert s.build(s.parse(buildret)) == buildret\n\ndef test_from_issue_246():\n NumVertices = Bitwise(Aligned(8, Struct(\n 'numVx4' / BitsInteger(4),\n 'numVx8' / If(this.numVx4 == 0, BitsInteger(8)),\n 'numVx16' / If(this.numVx4 == 0 & this.numVx8 == 255, BitsInteger(16)),\n )))\n common(NumVertices, b'\\x02\\x30', Container(numVx4=0, numVx8=35, numVx16=None))\n\n testBit = BitStruct(\n 'a' / BitsInteger(8),\n 'b' / If(this.a == 97, BitsInteger(8))\n )\n testByte = Struct(\n 'a' / Byte,\n 'b' / If(this.a == 97, Byte)\n )\n common(testBit, b'ab', Container(a=97, b=98))\n common(testByte, b'ab', Container(a=97, b=98))\n\n NumVertices = Union(None,\n 'numVx4' / Bitwise(Aligned(8, Struct('num'/ BitsInteger(4) ))),\n 'numVx8' / Bitwise(Aligned(8, Struct('num'/ BitsInteger(12)))),\n 'numVx16'/ Bitwise(Aligned(8, Struct('num'/ BitsInteger(28)))),\n )\n assert NumVertices.parse(b'\\x01\\x34\\x56\\x70') == Container(numVx4=Container(num=0))(numVx8=Container(num=19))(numVx16=Container(num=1262951))\n\ndef test_from_issue_244():\n class AddIndexes(Adapter):\n def _decode(self, obj, context, path):\n for i,con in enumerate(obj):\n con.index = i\n return obj\n\n d = AddIndexes(Struct(\"num\"/Byte)[4])\n assert d.parse(b\"abcd\") == [Container(num=97)(index=0),Container(num=98)(index=1),Container(num=99)(index=2),Container(num=100)(index=3),]\n\ndef test_from_issue_269():\n d = Struct(\"enabled\" / Byte, If(this.enabled, Padding(2)))\n assert d.build(dict(enabled=1)) == b\"\\x01\\x00\\x00\"\n assert d.build(dict(enabled=0)) == b\"\\x00\"\n d = Struct(\"enabled\" / Byte, \"pad\" / If(this.enabled, Padding(2)))\n assert d.build(dict(enabled=1)) == b\"\\x01\\x00\\x00\"\n assert d.build(dict(enabled=0)) == b\"\\x00\"\n\ndef test_hanging_issue_280():\n d = BitStruct('a'/BitsInteger(20), 'b'/BitsInteger(12))\n assert raises(d.parse, b'\\x00') == StreamError\n\ndef test_from_issue_324():\n d = Struct(\n \"vals\" / Prefixed(Byte, RawCopy(\n Struct(\"a\" / Byte[2]),\n )),\n \"checksum\" / Checksum(\n Byte,\n lambda data: sum(iterateints(data)) & 0xFF,\n this.vals.data\n ),\n )\n assert d.build(dict(vals=dict(value=dict(a=[0,1])))) == b\"\\x02\\x00\\x01\\x01\"\n assert d.build(dict(vals=dict(data=b\"\\x00\\x01\"))) == b\"\\x02\\x00\\x01\\x01\"\n\ndef test_from_issue_357():\n inner = Struct(\n \"computed\" / Computed(4),\n )\n st1 = Struct(\n \"a\" / inner,\n Check(this.a.computed == 4),\n )\n st2 = Struct(\n \"b\" / Switch(0, {}, inner),\n Check(this.b.computed == 4),\n )\n assert st1.build(dict(a={})) == b\"\"\n assert st2.build(dict(b={})) == b\"\"\n\ndef test_context_is_container():\n d = Struct(Check(lambda ctx: type(ctx) is Container))\n d.parse(b\"\")\n\ndef test_from_issue_362():\n FORMAT = Struct(\n \"my_tell\" / Tell,\n \"my_byte\" / Byte,\n )\n BIT_FORMAT = BitStruct(\n \"my_tell\" / Tell,\n \"my_bits\" / Bit[8],\n )\n for i in range(5):\n assert FORMAT.parse(b'\\x00').my_tell == 0\n for i in range(5):\n assert BIT_FORMAT.parse(b'\\x00').my_tell == 0\n\ndef test_this_expresion_compare_container():\n st = Struct(\n \"flags\" / FlagsEnum(Byte, a=1),\n Check(lambda this: this.flags == Container(_flagsenum=True)(a=1)),\n )\n common(st, b\"\\x01\", dict(flags=Container(_flagsenum=True)(a=True)), 1)\n\n@xfail(reason=\"unknown causes\")\ndef test_pickling_constructs():\n # it seems there are few problems:\n # - singletons still dont pickle (_pickle.PicklingError: Can't pickle <class 'construct.core.GreedyBytes'>: it's not the same object as construct.core.GreedyBytes)\n # - this expressions, ExprMixin added __get(set)state__\n # - FormatField uses a packer that needs to be re-created\n # what was fixed so far:\n # - singleton decorator adds __reduce__ to instance\n\n import pickle\n\n d = Struct(\n # - singletons still dont pickle\n \"count\" / Byte,\n # - singletons still dont pickle\n \"greedybytes\" / Prefixed(Byte, GreedyBytes),\n \"formatfield\" / FormatField(\"=\",\"Q\"),\n \"bytesinteger\" / BytesInteger(1),\n # - singletons still dont pickle\n \"varint\" / VarInt,\n \"text1\" / PascalString(Byte, \"utf8\"),\n \"text2\" / CString(\"utf8\"),\n \"enum\" / Enum(Byte, zero=0),\n \"flagsenum\" / FlagsEnum(Byte, zero=0),\n \"array1\" / Byte[5],\n # - uses this-expression\n # \"array2\" / Byte[this.count],\n \"greedyrange\" / Prefixed(Byte, GreedyRange(Byte)),\n # - its a macro around Switch, should reimplement\n # \"if1\" / IfThenElse(True, Byte, Byte),\n \"padding\" / Padding(1),\n \"peek\" / Peek(Byte),\n # - singletons still dont pickle\n \"tell\" / Tell,\n # - unknown causes\n # \"this1\" / Byte[this.count],\n # \"obj_1\" / RepeatUntil(obj_ == 0, Byte),\n # \"len_1\" / Computed(len_(this.array1)),\n )\n data = bytes(100)\n\n du = pickle.loads(pickle.dumps(d, protocol=-1))\n assert du.parse(data) == d.parse(data)\n\ndef test_exposing_members_attributes():\n d = Struct(\n \"animal\" / Enum(Byte, giraffe=1),\n )\n assert isinstance(d.animal, Renamed)\n assert isinstance(d.animal.subcon, Enum)\n assert d.animal.giraffe == \"giraffe\"\n\n d = Sequence(\n \"animal\" / Enum(Byte, giraffe=1),\n )\n assert isinstance(d.animal, Renamed)\n assert isinstance(d.animal.subcon, Enum)\n assert d.animal.giraffe == \"giraffe\"\n\n d = FocusedSeq(0,\n \"animal\" / Enum(Byte, giraffe=1),\n )\n assert isinstance(d.animal, Renamed)\n assert isinstance(d.animal.subcon, Enum)\n assert d.animal.giraffe == \"giraffe\"\n\n d = Union(None,\n \"animal\" / Enum(Byte, giraffe=1),\n )\n assert isinstance(d.animal, Renamed)\n assert isinstance(d.animal.subcon, Enum)\n assert d.animal.giraffe == \"giraffe\"\n\ndef test_exposing_members_context():\n d = Struct(\n \"count\" / Byte,\n \"data\" / Bytes(lambda this: this.count - this._subcons.count.sizeof()),\n Check(lambda this: this._subcons.count.sizeof() == 1),\n )\n common(d, b\"\\x05four\", Container(count=5, data=b\"four\"))\n\n d = Sequence(\n \"count\" / Byte,\n \"data\" / Bytes(lambda this: this.count - this._subcons.count.sizeof()),\n Check(lambda this: this._subcons.count.sizeof() == 1),\n )\n common(d, b\"\\x05four\", [5,b\"four\",None])\n\n d = FocusedSeq(\"count\",\n \"count\" / Byte,\n \"data\" / Padding(lambda this: this.count - this._subcons.count.sizeof()),\n Check(lambda this: this._subcons.count.sizeof() == 1),\n )\n common(d, b'\\x04\\x00\\x00\\x00', 4, SizeofError)\n\n d = Union(None,\n \"chars\" / Byte[4],\n \"data\" / Bytes(lambda this: this._subcons.chars.sizeof()),\n Check(lambda this: this._subcons.chars.sizeof() == 4),\n )\n assert d.parse(b\"\\x01\\x02\\x03\\x04\") == dict(chars=[1,2,3,4],data=b\"\\x01\\x02\\x03\\x04\")\n\ndef test_isparsingbuilding():\n d = Struct(\n Check(this._parsing & this._._parsing),\n Check(~this._building & ~this._._building),\n Check(~this._sizing & ~this._._sizing),\n )\n d.parse(b'')\n d = Struct(\n Check(~this._parsing & ~this._._parsing),\n Check(this._building & this._._building),\n Check(~this._sizing & ~this._._sizing),\n )\n d.build(None)\n d = Struct(\n Check(~this._parsing & ~this._._parsing),\n Check(~this._building & ~this._._building),\n Check(this._sizing & this._._sizing),\n )\n d.sizeof()\n # ---------------------------------\n d = Sequence(\n Check(this._parsing & this._._parsing),\n Check(~this._building & ~this._._building),\n Check(~this._sizing & ~this._._sizing),\n )\n d.parse(b'')\n d = Sequence(\n Check(~this._parsing & ~this._._parsing),\n Check(this._building & this._._building),\n Check(~this._sizing & ~this._._sizing),\n )\n d.build(None)\n d = Sequence(\n Check(~this._parsing & ~this._._parsing),\n Check(~this._building & ~this._._building),\n Check(this._sizing & this._._sizing),\n )\n d.sizeof()\n # ---------------------------------\n d = FocusedSeq(\"none\",\n \"none\" / Pass,\n Check(this._parsing & this._._parsing),\n Check(~this._building & ~this._._building),\n Check(~this._sizing & ~this._._sizing),\n )\n d.parse(b'')\n d = FocusedSeq(\"none\",\n \"none\" / Pass,\n Check(~this._parsing & ~this._._parsing),\n Check(this._building & this._._building),\n Check(~this._sizing & ~this._._sizing),\n )\n d.build(None)\n d = FocusedSeq(\"none\",\n \"none\" / Pass,\n Check(~this._parsing & ~this._._parsing),\n Check(~this._building & ~this._._building),\n Check(this._sizing & this._._sizing),\n )\n d.sizeof()\n # ---------------------------------\n d = Union(None,\n \"none\" / Pass,\n Check(this._parsing & this._._parsing),\n Check(~this._building & ~this._._building),\n Check(~this._sizing & ~this._._sizing),\n )\n d.parse(b'')\n d = Union(None,\n \"none\" / Pass,\n Check(~this._parsing & ~this._._parsing),\n Check(this._building & this._._building),\n Check(~this._sizing & ~this._._sizing),\n )\n d.build(dict(none=None))\n d = Union(None,\n \"none\" / Pass,\n Check(~this._parsing & ~this._._parsing),\n Check(~this._building & ~this._._building),\n Check(this._sizing & this._._sizing),\n )\n # doesnt check context because _sizeof just raises the error\n assert raises(d.sizeof) == SizeofError\n # ---------------------------------\n d = LazyStruct(\n Check(this._parsing & this._._parsing),\n Check(~this._building & ~this._._building),\n Check(~this._sizing & ~this._._sizing),\n )\n d.parse(b'')\n d = LazyStruct(\n Check(~this._parsing & ~this._._parsing),\n Check(this._building & this._._building),\n Check(~this._sizing & ~this._._sizing),\n )\n d.build({})\n d = LazyStruct(\n Check(~this._parsing & ~this._._parsing),\n Check(~this._building & ~this._._building),\n Check(this._sizing & this._._sizing),\n )\n d.sizeof()\n\ndef test_struct_stream():\n d = Struct(\n 'fixed' / FixedSized(10, Struct(\n 'data' / GreedyBytes,\n # check a substream\n Check(lambda this: stream_size(this._io) == 10),\n Check(lambda this: stream_iseof(this._io)),\n # checks parent original stream\n Check(lambda this: stream_size(this._._io) == 20),\n Check(lambda this: not stream_iseof(this._._io)),\n )),\n # checks mid-parsing\n Check(lambda this: stream_tell(this._io) == 10),\n Check(lambda this: stream_size(this._io) == 20),\n Check(lambda this: not stream_iseof(this._io)),\n 'rest' / GreedyBytes,\n # checks after parsed to EOF\n Check(lambda this: stream_tell(this._io) == 20),\n Check(lambda this: stream_size(this._io) == 20),\n Check(lambda this: stream_iseof(this._io)),\n Check(lambda this: stream_seek(this._io, 0, 2) == 20),\n # checks nested struct stream\n Check(lambda this: stream_tell(this.fixed._io) == 10),\n Check(lambda this: stream_size(this.fixed._io) == 10),\n )\n d.parse(bytes(20))\n\n d = Struct()\n d.parse(bytes(20))\n d.parse_file(\"/dev/zero\")\n\ndef test_struct_root_topmost():\n d = Struct(\n 'x' / Computed(1),\n 'inner' / Struct(\n 'inner2' / Struct(\n 'x' / Computed(this._root.x),\n 'z' / Computed(this._params.z),\n 'zz' / Computed(this._root._.z),\n ),\n ),\n Probe(),\n )\n # setGlobalPrintPrivateEntries(True)\n # d.parse(b'', z=2)\n assert d.parse(b\"\", z=2) == Container(x=1, inner=Container(inner2=Container(x=1,z=2,zz=2)))\n\ndef test_parsedhook_repeatersdiscard():\n outputs = []\n def printobj(obj, ctx):\n outputs.append(obj)\n d = GreedyRange(Byte * printobj, discard=True)\n assert d.parse(b\"\\x01\\x02\\x03\") == []\n assert outputs == [1,2,3]\n\n outputs = []\n def printobj(obj, ctx):\n outputs.append(obj)\n d = Array(3, Byte * printobj, discard=True)\n assert d.parse(b\"\\x01\\x02\\x03\") == []\n assert outputs == [1,2,3]\n\n outputs = []\n def printobj(obj, ctx):\n outputs.append(obj)\n d = RepeatUntil(lambda obj,lst,ctx: ctx._index == 2, Byte * printobj, discard=True)\n assert d.parse(b\"\\x01\\x02\\x03\") == []\n assert outputs == [1,2,3]\n\n@xfail(not supportsksyexport, reason=\"KSY exporter requires yaml and unicode strings\")\ndef test_exportksy():\n d = Struct(\n \"nothing\" / Pass * \"field docstring\",\n\n \"data1\" / Bytes(10),\n \"data2\" / GreedyBytes,\n\n \"bitstruct\" / BitStruct(\n \"flag\" / Flag,\n \"padding\" / Padding(7),\n \"int32\" / Int32ub,\n \"int32le\" / BytesInteger(4),\n \"int4a\" / Nibble,\n \"int4b\" / BitsInteger(4),\n ),\n\n \"int32\" / Int32ub,\n \"float32\" / Float32b,\n \"int32le\" / BytesInteger(4, swapped=True),\n \"varint\" / VarInt,\n\n \"string1\" / PaddedString(10, \"utf8\"),\n \"string2\" / PascalString(Byte, \"utf8\"),\n \"string3\" / CString(\"utf8\"),\n \"string4\" / GreedyString(\"utf8\"),\n\n \"flag\" / Flag,\n \"enum\" / Enum(Byte, one=1, two=2),\n \"flagsenum\" / FlagsEnum(Byte, one=1, two=2),\n\n \"struct1\" / Struct(Byte, \"named\"/Byte),\n \"sequence1\" / Sequence(Byte, \"named\"/Byte),\n\n \"array2d\" / Array(5, Array(5, Byte)),\n \"greedyrange\" / GreedyRange(Byte),\n \"repeatuntil\" / RepeatUntil(obj_ == 0, Byte),\n\n \"const1\" / Const(b\"ABCD\"),\n \"const2\" / Const(1, Int32ub),\n # Computed\n # Index\n \"rebuild\" / Rebuild(Byte, 0),\n \"default\" / Default(Byte, 0),\n \"namedtuple1\" / NamedTuple(\"coord\", \"x y z\", \"x\"/Byte + \"y\"/Byte + \"z\"/Byte),\n \"namedtuple2\" / NamedTuple(\"coord\", \"x y z\", Byte >> Byte >> Byte),\n \"namedtuple3\" / NamedTuple(\"coord\", \"x y z\", Byte[3]),\n \"namedtuple4\" / NamedTuple(\"coord\", \"x y z\", GreedyRange(Byte)),\n \"timestamp1\" / Timestamp(Int32ub, 1, 1970),\n \"timestamp2\" / Timestamp(Int32ub, \"msdos\", \"msdos\"),\n \"hex\" / Hex(Int32ub),\n \"hexdump\" / HexDump(Int32ub),\n\n # Union\n \"if1\" / If(this.num == 0, Byte),\n \"ifthenelse1\" / IfThenElse(this.num == 0, Byte, Byte),\n # Switch\n\n \"padding\" / Padding(5),\n \"padded\" / Padded(5, Byte),\n\n \"pointer1\" / Pointer(0x1000, Int32ub),\n \"pointer2\" / Pointer(this.pointer1, Int32ub),\n \"pass1\" / Pass,\n # Terminated\n\n \"prefixed\" / Prefixed(Byte, GreedyBytes),\n \"prefixedarray\" / PrefixedArray(Byte, Byte),\n # Compressed\n ) * \\\n \"struct docstring\"\n print(d.export_ksy(filename=\"example_ksy.ksy\"))\n\n@xfail(reason=\"both sizeof fail because length is 1 level up than when parsing\")\ndef test_from_issue_692():\n # https://stackoverflow.com/questions/44747202/pythons-construct-sizeof-for-construct-depending-on-its-parent\n\n AttributeHandleValuePair = Struct(\n \"handle\" / Int16ul,\n \"value\" / GreedyBytes,\n )\n AttReadByTypeResponse = Struct(\n \"length\" / Int8ul, # The size in bytes of each handle/value pair\n \"datalist\" / Array(2, FixedSized(this.length, AttributeHandleValuePair)),\n )\n assert AttReadByTypeResponse.parse(b\"\\x04\\x01\\x02\\x03\\x04\\x01\\x02\\x03\\x04\") == Container(length=4,datalist=[dict(handle=0x0201,value=b'\\x03\\x04'),dict(handle=0x0201,value=b'\\x03\\x04')])\n assert AttReadByTypeResponse.sizeof(length=4) == 1+2*4\n\n AttributeHandleValuePair = Struct(\n \"handle\" / Int16ul,\n \"value\" / Bytes(this._.length - 2),\n )\n AttReadByTypeResponse = Struct(\n \"length\" / Int8ul, # The size in bytes of each handle/value pair\n \"datalist\" / AttributeHandleValuePair[2],\n )\n assert AttReadByTypeResponse.parse(b\"\\x04\\x01\\x02\\x03\\x04\\x01\\x02\\x03\\x04\") == Container(length=4,datalist=[dict(handle=0x0201,value=b'\\x03\\x04'),dict(handle=0x0201,value=b'\\x03\\x04')])\n assert AttReadByTypeResponse.sizeof(length=4) == 1+2*(2+4-2)\n\n@xfail(reason=\"GreedyRange seeking inside Restreamed\")\ndef test_greedyrange_issue_697():\n d = BitStruct(\n \"rest\" / Bytewise(GreedyRange(Byte)),\n )\n d.parse(bytes(5))\n\ndef test_greedybytes_issue_697():\n d = BitStruct(\n \"rest\" / Bytewise(GreedyBytes),\n )\n d.parse(bytes(5))\n\ndef test_hex_issue_709():\n # Make sure, the fix doesn't destroy already working code\n d = Hex(Bytes(1))\n obj = d.parse(b\"\\xff\")\n assert \"unhexlify('ff')\" in str(obj)\n\n d = Struct(\"x\" / Hex(Byte))\n obj = d.parse(b\"\\xff\")\n assert \"x = 0xFF\" in str(obj)\n\n d = HexDump(Bytes(1))\n obj = d.parse(b\"\\xff\")\n assert \"hexundump\" in str(obj)\n\n # The following checks only succeed after fixing the issue\n d = Struct(\"x\" / Hex(Bytes(1)))\n obj = d.parse(b\"\\xff\")\n assert \"x = unhexlify('ff')\" in str(obj)\n\n d = Struct(\"x\" / HexDump(Bytes(1)))\n obj = d.parse(b\"\\xff\")\n assert \"x = hexundump\" in str(obj)\n\n d = Struct(\"x\" / Struct(\"y\" / Hex(Bytes(1))))\n obj = d.parse(b\"\\xff\")\n assert \"y = unhexlify('ff')\" in str(obj)\n",
"from construct.lib.py3compat import *\nimport re\n\n\nglobalPrintFullStrings = False\nglobalPrintFalseFlags = False\nglobalPrintPrivateEntries = False\n\n\ndef setGlobalPrintFullStrings(enabled=False):\n r\"\"\"\n When enabled, Container __str__ produces full content of bytes and unicode strings, otherwise and by default, it produces truncated output (16 bytes and 32 characters).\n\n :param enabled: bool\n \"\"\"\n global globalPrintFullStrings\n globalPrintFullStrings = enabled\n\n\ndef setGlobalPrintFalseFlags(enabled=False):\n r\"\"\"\n When enabled, Container __str__ that was produced by FlagsEnum parsing prints all values, otherwise and by default, it prints only the values that are True.\n\n :param enabled: bool\n \"\"\"\n global globalPrintFalseFlags\n globalPrintFalseFlags = enabled\n\n\ndef setGlobalPrintPrivateEntries(enabled=False):\n r\"\"\"\n When enabled, Container __str__ shows keys like _ _index _etc, otherwise and by default, it hides those keys. __repr__ never shows private entries.\n\n :param enabled: bool\n \"\"\"\n global globalPrintPrivateEntries\n globalPrintPrivateEntries = enabled\n\n\ndef recursion_lock(retval=\"<recursion detected>\", lock_name=\"__recursion_lock__\"):\n \"\"\"Used internally.\"\"\"\n def decorator(func):\n def wrapper(self, *args, **kw):\n if getattr(self, lock_name, False):\n return retval\n setattr(self, lock_name, True)\n try:\n return func(self, *args, **kw)\n finally:\n delattr(self, lock_name)\n\n wrapper.__name__ = func.__name__\n return wrapper\n\n return decorator\n\n\nclass Container(dict):\n r\"\"\"\n Generic ordered dictionary that allows both key and attribute access, and preserves key order by insertion. Adding keys is preferred using \\*\\*entrieskw (requires Python 3.6). Equality does NOT check item order. Also provides regex searching.\n\n Example::\n\n # empty dict\n >>> Container()\n # list of pairs, not recommended\n >>> Container([ (\"name\",\"anonymous\"), (\"age\",21) ])\n # This syntax requires Python 3.6\n >>> Container(name=\"anonymous\", age=21)\n # This syntax is for internal use only\n >>> Container(name=\"anonymous\")(age=21)\n # copies another dict\n >>> Container(dict2)\n >>> Container(container2)\n\n ::\n\n >>> print(repr(obj))\n Container(text='utf8 decoded string...')(value=123)\n >>> print(obj)\n Container\n text = u'utf8 decoded string...' (total 22)\n value = 123\n \"\"\"\n __slots__ = [\"__keys_order__\", \"__recursion_lock__\"]\n\n def __getattr__(self, name):\n try:\n if name in self.__slots__:\n try:\n return object.__getattribute__(self, name)\n except AttributeError as e:\n if name == \"__keys_order__\":\n r = []\n object.__setattr__(self, \"__keys_order__\", r)\n return r\n else:\n raise e\n else:\n return self[name]\n except KeyError:\n raise AttributeError(name)\n\n def __setattr__(self, name, value):\n try:\n if name in self.__slots__:\n return object.__setattr__(self, name, value)\n else:\n self[name] = value\n except KeyError:\n raise AttributeError(name)\n\n def __delattr__(self, name):\n try:\n if name in self.__slots__:\n return object.__delattr__(self, name)\n else:\n del self[name]\n except KeyError:\n raise AttributeError(name)\n\n def __setitem__(self, key, value):\n if key not in self:\n self.__keys_order__.append(key)\n dict.__setitem__(self, key, value)\n\n def __delitem__(self, key):\n \"\"\"Removes an item from the Container in linear time O(n).\"\"\"\n if key in self:\n self.__keys_order__.remove(key)\n dict.__delitem__(self, key)\n\n def __init__(self, *args, **entrieskw):\n self.__keys_order__ = []\n for arg in args:\n if isinstance(arg, dict):\n for k,v in arg.items():\n self[k] = v\n else:\n for k,v in arg:\n self[k] = v\n for k,v in entrieskw.items():\n self[k] = v\n\n def __call__(self, **entrieskw):\n \"\"\"Chains adding new entries to the same container. See ctor.\"\"\"\n for k,v in entrieskw.items():\n self[k] = v\n return self\n\n def keys(self):\n return iter(self.__keys_order__)\n\n def values(self):\n return (self[k] for k in self.__keys_order__)\n\n def items(self):\n return ((k, self[k]) for k in self.__keys_order__)\n\n __iter__ = keys\n\n def clear(self):\n \"\"\"Removes all items.\"\"\"\n dict.clear(self)\n self.__keys_order__ = []\n\n def pop(self, key):\n \"\"\"Removes and returns the value for a given key, raises KeyError if not found.\"\"\"\n val = dict.pop(self, key)\n self.__keys_order__.remove(key)\n return val\n\n def popitem(self):\n \"\"\"Removes and returns the last key and value from order.\"\"\"\n k = self.__keys_order__.pop()\n v = dict.pop(self, k)\n return k, v\n\n def update(self, seqordict):\n \"\"\"Appends items from another dict/Container or list-of-tuples.\"\"\"\n if isinstance(seqordict, dict):\n seqordict = seqordict.items()\n for k,v in seqordict:\n self[k] = v\n\n def __getstate__(self):\n return self.__keys_order__\n\n def __setstate__(self, state):\n self.__keys_order__ = state\n\n def copy(self):\n return Container(self)\n\n __update__ = update\n\n __copy__ = copy\n\n def __dir__(self):\n \"\"\"For auto completion of attributes based on container values.\"\"\"\n return list(self.keys()) + list(self.__class__.__dict__) + dir(super(Container, self))\n\n def __eq__(self, other):\n if self is other:\n return True\n if not isinstance(other, dict):\n return False\n def isequal(v1, v2):\n if v1.__class__.__name__ == \"ndarray\" or v2.__class__.__name__ == \"ndarray\":\n import numpy\n return numpy.array_equal(v1, v2)\n return v1 == v2\n for k,v in self.items():\n if isinstance(k, unicodestringtype) and k.startswith(u\"_\"):\n continue\n if isinstance(k, bytestringtype) and k.startswith(b\"_\"):\n continue\n if k not in other or not isequal(v, other[k]):\n return False\n return True\n\n @recursion_lock()\n def __repr__(self):\n parts = []\n for k,v in self.items():\n if isinstance(k, str) and k.startswith(\"_\"):\n continue\n if isinstance(v, stringtypes):\n parts.append(str(k) + \"=\" + reprstring(v))\n else:\n parts.append(str(k) + \"=\" + repr(v))\n return \"Container(%s)\" % \", \".join(parts)\n\n @recursion_lock()\n def __str__(self):\n indentation = \"\\n \"\n text = [\"Container: \"]\n isflags = getattr(self, \"_flagsenum\", False)\n for k,v in self.items():\n if isinstance(k, str) and k.startswith(\"_\") and not globalPrintPrivateEntries:\n continue\n if isflags and not v and not globalPrintFalseFlags:\n continue\n text.extend([indentation, str(k), \" = \"])\n if v.__class__.__name__ == \"EnumInteger\":\n text.append(\"(enum) (unknown) %s\" % (v, ))\n elif v.__class__.__name__ == \"EnumIntegerString\":\n text.append(\"(enum) %s %s\" % (v, v.intvalue, ))\n elif v.__class__.__name__ in [\"HexDisplayedBytes\", \"HexDumpDisplayedBytes\"]:\n text.append(indentation.join(str(v).split(\"\\n\")))\n elif isinstance(v, bytestringtype):\n printingcap = 16\n if len(v) <= printingcap or globalPrintFullStrings:\n text.append(\"%s (total %d)\" % (reprstring(v), len(v)))\n else:\n text.append(\"%s... (truncated, total %d)\" % (reprstring(v[:printingcap]), len(v)))\n elif isinstance(v, unicodestringtype):\n printingcap = 32\n if len(v) <= printingcap or globalPrintFullStrings:\n text.append(\"%s (total %d)\" % (reprstring(v), len(v)))\n else:\n text.append(\"%s... (truncated, total %d)\" % (reprstring(v[:printingcap]), len(v)))\n else:\n text.append(indentation.join(str(v).split(\"\\n\")))\n return \"\".join(text)\n\n def _search(self, compiled_pattern, search_all):\n items = []\n for key in self.keys():\n try:\n if isinstance(self[key], (Container,ListContainer)):\n ret = self[key]._search(compiled_pattern, search_all)\n if ret is not None:\n if search_all:\n items.extend(ret)\n else:\n return ret\n elif compiled_pattern.match(key):\n if search_all:\n items.append(self[key])\n else:\n return self[key]\n except:\n pass\n if search_all:\n return items\n else:\n return None\n\n def search(self, pattern):\n \"\"\"\n Searches a container (non-recursively) using regex.\n \"\"\"\n compiled_pattern = re.compile(pattern)\n return self._search(compiled_pattern, False)\n\n def search_all(self, pattern):\n \"\"\"\n Searches a container (recursively) using regex.\n \"\"\"\n compiled_pattern = re.compile(pattern)\n return self._search(compiled_pattern, True)\n\n\nclass ListContainer(list):\n r\"\"\"\n Generic container like list. Provides pretty-printing. Also provides regex searching.\n\n Example::\n\n >>> ListContainer()\n >>> ListContainer([1, 2, 3])\n\n ::\n\n >>> print(repr(obj))\n [1, 2, 3]\n >>> print(obj)\n ListContainer\n 1\n 2\n 3\n \"\"\"\n\n @recursion_lock()\n def __repr__(self):\n return \"ListContainer(%s)\" % (list.__repr__(self), )\n\n @recursion_lock()\n def __str__(self):\n indentation = \"\\n \"\n text = [\"ListContainer: \"]\n for k in self:\n text.append(indentation)\n lines = str(k).split(\"\\n\")\n text.append(indentation.join(lines))\n return \"\".join(text)\n\n def _search(self, compiled_pattern, search_all):\n items = []\n for item in self:\n try:\n ret = item._search(compiled_pattern, search_all)\n except:\n continue\n if ret is not None:\n if search_all:\n items.extend(ret)\n else:\n return ret\n if search_all:\n return items\n else:\n return None\n\n def search(self, pattern):\n \"\"\"\n Searches a container (non-recursively) using regex.\n \"\"\"\n compiled_pattern = re.compile(pattern)\n return self._search(compiled_pattern, False)\n\n def search_all(self, pattern):\n \"\"\"\n Searches a container (recursively) using regex.\n \"\"\"\n compiled_pattern = re.compile(pattern)\n return self._search(compiled_pattern, True)\n"
] |
[
[
"numpy.array"
],
[
"numpy.array_equal"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
Simon0xzx/metaworld
|
[
"2d441eed70b6f5cb1f35883b0517c4bd2812268c",
"2d441eed70b6f5cb1f35883b0517c4bd2812268c"
] |
[
"metaworld/__init__.py",
"metaworld/policies/sawyer_sweep_into_v1_policy.py"
] |
[
"\"\"\"Proposal for a simple, understandable MetaWorld API.\"\"\"\nimport abc\nimport pickle\nfrom collections import OrderedDict\nfrom typing import List, NamedTuple, Type\n\nimport metaworld.envs.mujoco.env_dict as _env_dict\nimport numpy as np\n\n\nEnvName = str\n\n\nclass Task(NamedTuple):\n \"\"\"All data necessary to describe a single MDP.\n\n Should be passed into a MetaWorldEnv's set_task method.\n \"\"\"\n\n env_name: EnvName\n data: bytes # Contains env parameters like random_init and *a* goal\n\n\nclass MetaWorldEnv:\n \"\"\"Environment that requires a task before use.\n\n Takes no arguments to its constructor, and raises an exception if used\n before `set_task` is called.\n \"\"\"\n\n def set_task(self, task: Task) -> None:\n \"\"\"Set the task.\n\n Raises:\n ValueError: If task.env_name is different from the current task.\n\n \"\"\"\n\n\nclass Benchmark(abc.ABC):\n \"\"\"A Benchmark.\n\n When used to evaluate an algorithm, only a single instance should be used.\n \"\"\"\n\n @abc.abstractmethod\n def __init__(self):\n pass\n\n @property\n def train_classes(self) -> 'OrderedDict[EnvName, Type]':\n \"\"\"Get all of the environment classes used for training.\"\"\"\n return self._train_classes\n\n @property\n def test_classes(self) -> 'OrderedDict[EnvName, Type]':\n \"\"\"Get all of the environment classes used for testing.\"\"\"\n return self._test_classes\n\n @property\n def train_tasks(self) -> List[Task]:\n \"\"\"Get all of the training tasks for this benchmark.\"\"\"\n return self._train_tasks\n\n @property\n def test_tasks(self) -> List[Task]:\n \"\"\"Get all of the test tasks for this benchmark.\"\"\"\n return self._test_tasks\n\n\n_ML_OVERRIDE = dict(partially_observable=True)\n_MT_OVERRIDE = dict(partially_observable=False)\n\n_N_GOALS = 50\n\n\ndef _encode_task(env_name, data):\n return Task(env_name=env_name, data=pickle.dumps(data))\n\n\ndef _make_tasks(classes, args_kwargs, kwargs_override):\n tasks = []\n for (env_name, args) in args_kwargs.items():\n assert len(args['args']) == 0\n env_cls = classes[env_name]\n env = env_cls()\n env._freeze_rand_vec = False\n env._set_task_called = True\n rand_vecs = []\n kwargs = args['kwargs'].copy()\n del kwargs['task_id']\n env._set_task_inner(**kwargs)\n for _ in range(_N_GOALS):\n env.reset()\n rand_vecs.append(env._last_rand_vec)\n unique_task_rand_vecs = np.unique(np.array(rand_vecs), axis=0)\n assert unique_task_rand_vecs.shape[0] == _N_GOALS\n\n env.close()\n for rand_vec in rand_vecs:\n kwargs = args['kwargs'].copy()\n del kwargs['task_id']\n kwargs.update(dict(rand_vec=rand_vec, env_cls=env_cls))\n kwargs.update(kwargs_override)\n tasks.append(_encode_task(env_name, kwargs))\n return tasks\n\n\ndef _ml1_env_names():\n key_train = _env_dict.HARD_MODE_ARGS_KWARGS['train']\n key_test = _env_dict.HARD_MODE_ARGS_KWARGS['test']\n tasks = sum([list(key_train)], list(key_test))\n assert len(tasks) == 50\n return tasks\n\n\nclass ML1(Benchmark):\n\n ENV_NAMES = _ml1_env_names()\n\n def __init__(self, env_name):\n super().__init__()\n try:\n cls = _env_dict.HARD_MODE_CLS_DICT['train'][env_name]\n args_kwargs = _env_dict.HARD_MODE_ARGS_KWARGS['train'][env_name]\n except KeyError:\n cls = _env_dict.HARD_MODE_CLS_DICT['test'][env_name]\n args_kwargs = _env_dict.HARD_MODE_ARGS_KWARGS['test'][env_name]\n self._train_classes = OrderedDict([(env_name, cls)])\n self._test_classes = self._train_classes\n self._train_ = OrderedDict([(env_name, cls)])\n self._train_tasks = _make_tasks(self._train_classes,\n {env_name: args_kwargs},\n _ML_OVERRIDE)\n self._test_tasks = _make_tasks(self._test_classes,\n {env_name: args_kwargs},\n _ML_OVERRIDE)\n \nclass MT1(Benchmark):\n\n ENV_NAMES = _ml1_env_names()\n\n def __init__(self, env_name):\n super().__init__()\n try:\n cls = _env_dict.HARD_MODE_CLS_DICT['train'][env_name]\n args_kwargs = _env_dict.HARD_MODE_ARGS_KWARGS['train'][env_name]\n except KeyError:\n cls = _env_dict.HARD_MODE_CLS_DICT['test'][env_name]\n args_kwargs = _env_dict.HARD_MODE_ARGS_KWARGS['test'][env_name]\n self._train_classes = OrderedDict([(env_name, cls)])\n self._test_classes = OrderedDict()\n self._train_ = OrderedDict([(env_name, cls)])\n self._train_tasks = _make_tasks(self._train_classes,\n {env_name: args_kwargs},\n _MT_OVERRIDE)\n self._test_tasks = []\n\n\nclass ML10(Benchmark):\n\n def __init__(self):\n super().__init__()\n self._train_classes = _env_dict.MEDIUM_MODE_CLS_DICT['train']\n self._test_classes = _env_dict.MEDIUM_MODE_CLS_DICT['test']\n train_kwargs = _env_dict.medium_mode_train_args_kwargs\n self._train_tasks = _make_tasks(self._train_classes,\n train_kwargs,\n _ML_OVERRIDE)\n test_kwargs = _env_dict.medium_mode_test_args_kwargs\n self._test_tasks = _make_tasks(self._test_classes,\n test_kwargs,\n _ML_OVERRIDE)\n\n\nclass ML45(Benchmark):\n\n def __init__(self):\n super().__init__()\n self._train_classes = _env_dict.HARD_MODE_CLS_DICT['train']\n self._test_classes = _env_dict.HARD_MODE_CLS_DICT['test']\n train_kwargs = _env_dict.HARD_MODE_ARGS_KWARGS['train']\n self._train_tasks = _make_tasks(self._train_classes,\n train_kwargs,\n _ML_OVERRIDE)\n self._test_tasks = _make_tasks(self._test_classes,\n _env_dict.HARD_MODE_ARGS_KWARGS['test'],\n _ML_OVERRIDE)\n\n\nclass MT10(Benchmark):\n\n def __init__(self):\n super().__init__()\n self._train_classes = _env_dict.EASY_MODE_CLS_DICT\n self._test_classes = OrderedDict()\n train_kwargs = _env_dict.EASY_MODE_ARGS_KWARGS\n self._train_tasks = _make_tasks(self._train_classes,\n train_kwargs,\n _MT_OVERRIDE)\n self._test_tasks = []\n\n\nclass MT50(Benchmark):\n\n def __init__(self):\n super().__init__()\n self._train_classes = _env_dict.HARD_MODE_CLS_DICT['train'].copy()\n # We're going to modify it, so make a copy\n train_kwargs = _env_dict.HARD_MODE_ARGS_KWARGS['train'].copy()\n test_kwargs = _env_dict.HARD_MODE_ARGS_KWARGS['test']\n for (env_name, cls) in _env_dict.HARD_MODE_CLS_DICT['test'].items():\n assert env_name not in self._train_classes\n assert env_name not in train_kwargs\n self._train_classes[env_name] = cls\n train_kwargs[env_name] = test_kwargs[env_name]\n self._test_classes = OrderedDict()\n self._train_tasks = _make_tasks(self._train_classes,\n train_kwargs,\n _MT_OVERRIDE)\n self._test_tasks = []\n",
"import numpy as np\n\nfrom metaworld.policies.action import Action\nfrom metaworld.policies.policy import Policy, assert_fully_parsed, move\n\n\nclass SawyerSweepIntoV1Policy(Policy):\n\n @staticmethod\n @assert_fully_parsed\n def _parse_obs(obs):\n return {\n 'hand_pos': obs[:3],\n 'cube_pos': obs[3:6],\n 'unused_info': obs[6:],\n }\n\n def get_action(self, obs):\n o_d = self._parse_obs(obs)\n\n action = Action({\n 'delta_pos': np.arange(3),\n 'grab_effort': 3\n })\n\n action['delta_pos'] = move(o_d['hand_pos'], to_xyz=self._desired_pos(o_d), p=25.)\n action['grab_effort'] = self._grab_effort(o_d)\n\n return action.array\n\n @staticmethod\n def _desired_pos(o_d):\n pos_curr = o_d['hand_pos']\n pos_cube = o_d['cube_pos'] + np.array([.0, .0, .015])\n\n if np.linalg.norm(pos_curr[:2] - pos_cube[:2]) > 0.04:\n return pos_cube + np.array([0., 0., 0.3])\n elif abs(pos_curr[2] - pos_cube[2]) > 0.02:\n return pos_cube\n else:\n return np.array([.0, .8, .015])\n\n @staticmethod\n def _grab_effort(o_d):\n pos_curr = o_d['hand_pos']\n pos_cube = o_d['cube_pos']\n\n if np.linalg.norm(pos_curr[:2] - pos_cube[:2]) > 0.04 \\\n or abs(pos_curr[2] - pos_cube[2]) > 0.15:\n return -1.\n else:\n return .7\n"
] |
[
[
"numpy.array"
],
[
"numpy.arange",
"numpy.array",
"numpy.linalg.norm"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [
"1.10",
"1.12",
"1.11",
"1.19",
"1.24",
"1.13",
"1.16",
"1.9",
"1.18",
"1.23",
"1.21",
"1.22",
"1.20",
"1.7",
"1.15",
"1.14",
"1.17",
"1.8"
],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
robinupham/angular_binning
|
[
"da3f6bf32efd8bad1a7f61a9a457f521ed8ebe87"
] |
[
"angular_binning/mask.py"
] |
[
"\"\"\"\nFunctions to do with masks and mixing matrices.\n\"\"\"\n\nimport copy\nimport time\nimport warnings\n\nimport healpy as hp\nimport matplotlib.cm\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport pymaster as nmt\n\n\ndef generate_mask(wmap_mask_path, nside, target_fsky, mask_save_path):\n \"\"\"\n Generate a Stage-IV-like mask by manipulating the WMAP temperature mask and then adding random holes until the\n target sky fraction is reached.\n\n Args:\n wmap_mask_path (str): Path to the WMAP temperature mask fits file.\n nside (int): HEALPix resolution to use.\n target_fsky (float): Sky fraction to achieve. Holes will be added at random to reach this value.\n msk_save_path (str): Path to save the final mask as a fits file.\n \"\"\"\n\n print('Loading')\n input_mask = hp.fitsfunc.read_map(wmap_mask_path, dtype=float, verbose=False)\n assert input_mask.shape == (hp.pixelfunc.nside2npix(nside), )\n assert np.amin(input_mask) == 0\n assert np.amax(input_mask) == 1\n print('Input mask fsky =', np.mean(input_mask))\n\n print('Rotating')\n rotated_mask = hp.rotator.Rotator(coord=['E', 'G']).rotate_map_alms(input_mask)\n print('Rotated mask fsky =', np.mean(rotated_mask))\n\n # Clip back to 0-1\n rotated_mask = np.clip(rotated_mask, 0, 1)\n assert np.amin(rotated_mask) == 0\n assert np.amax(rotated_mask) == 1\n\n print('Multiplying')\n dual_mask = input_mask * rotated_mask\n assert np.amin(dual_mask) == 0\n assert np.amax(dual_mask) == 1\n print('Dual mask fsky =', np.mean(dual_mask))\n\n # Iteratively take out holes until the desired fsky is reached\n mask = dual_mask\n rng = np.random.default_rng()\n npix = hp.pixelfunc.nside2npix(nside)\n while np.mean(mask) > target_fsky:\n\n # Select non-zero pixel as the centre of the hole\n have_hole_centre = False\n while not have_hole_centre:\n hole_centre = rng.integers(npix)\n if mask[hole_centre] > 0:\n have_hole_centre = True\n\n # Mask the centre\n mask[hole_centre] = 0\n\n # Mask the immediate neighbours, then their neighbours with a 50% chance, etc.\n neighbours = hole_centre\n hole_size = 0\n while hole_size < 6: # max size\n hole_size += 1\n neighbours = hp.pixelfunc.get_all_neighbours(nside, neighbours)\n mask[neighbours] = 0\n if rng.integers(2) > 0:\n break\n print('fsky = ', np.mean(mask), end='\\r')\n print()\n\n # Final checks\n assert np.all(np.isfinite(mask))\n assert np.amin(mask) == 0\n assert np.amax(mask) == 1\n\n # Plot\n with warnings.catch_warnings():\n warnings.simplefilter('ignore') # ignore mollview warnings\n hp.visufunc.mollview(mask)\n plt.show()\n\n # Save to disk\n hp.fitsfunc.write_map(mask_save_path, mask, dtype=float)\n print('Saved ' + mask_save_path)\n\n\ndef plot_mask(mask_path, save_path=None):\n \"\"\"\n Plot Mollweide projection of a mask, with colour bar.\n\n Args:\n mask_path (str): Path to mask FITS file.\n save_path (str, optional): Path to save plot to (default None). If None, plot is displayed.\n \"\"\"\n\n # Load mask\n mask = hp.fitsfunc.read_map(mask_path, dtype=float, verbose=False)\n\n # Calculate Mollweide projection\n with warnings.catch_warnings():\n warnings.simplefilter('ignore') # mollview's plotting code creates warnings\n mask_proj = hp.visufunc.mollview(mask, return_projected_map=True)\n plt.close()\n\n # Plot\n plt.rcParams.update({'font.size': 7})\n cmap = copy.copy(matplotlib.cm.get_cmap('cividis'))\n cmap.set_bad(color='white')\n plt.imshow(mask_proj, origin='lower', cmap=cmap, interpolation='none')\n plt.gca().axis('off')\n plt.colorbar(shrink=0.4, aspect=10)\n\n # Save or show\n if save_path is not None:\n plt.savefig(save_path, bbox_inches='tight')\n print('Saved ' + save_path)\n else:\n plt.show()\n\n\ndef get_3x2pt_mixmats(mask_path, nside, lmin, lmax_mix, lmax_out, save_path):\n \"\"\"\n Calculate all 3x2pt mixing matrices from a mask using NaMaster, and save to disk in a single file.\n\n Args:\n mask_path (str): Path to mask FITS file. If None, full sky is assumed, in which case the mixing matrices should\n be diagonal.\n nside (int): HEALPix resolution to use.\n lmin (int): Minimum l to include in mixing matrices.\n lmax_mix (int): Maximum l to include in input to mixing matrices.\n lmax_out (int): Maximum l to include in output from mixing matrices.\n save_path (str): Path to save output, as a single numpy .npz file containing all mixing matrices.\n \"\"\"\n\n # Load and rescale mask, and calculate fsky\n if mask_path is not None:\n print('Loading and rescaling mask')\n mask = hp.pixelfunc.ud_grade(hp.read_map(mask_path, dtype=float), nside)\n assert np.amin(mask) == 0\n assert np.amax(mask) == 1\n else:\n print('Full sky')\n mask = np.ones(hp.pixelfunc.nside2npix(nside))\n assert np.all(np.isfinite(mask))\n fsky = np.mean(mask)\n print(f'fsky = {fsky:.3f}')\n\n # Create NaMaster binning scheme as individual Cls\n print('Creating binning scheme')\n bins = nmt.NmtBin.from_lmax_linear(lmax_mix, 1)\n\n # Calculate mixing matrices for spin 0-0, 0-2 (equivalent to 2-0), and 2-2\n field_spin0 = nmt.NmtField(mask, None, spin=0, lite=True)\n field_spin2 = nmt.NmtField(mask, None, spin=2, lite=True)\n workspace_spin00 = nmt.NmtWorkspace()\n print(f'Calculating mixing matrix 1 / 3 at {time.strftime(\"%c\")}')\n workspace_spin00.compute_coupling_matrix(field_spin0, field_spin0, bins)\n workspace_spin02 = nmt.NmtWorkspace()\n print(f'Calculating mixing matrix 2 / 3 at {time.strftime(\"%c\")}')\n workspace_spin02.compute_coupling_matrix(field_spin0, field_spin2, bins)\n workspace_spin22 = nmt.NmtWorkspace()\n print(f'Calculating mixing matrix 3 / 3 at {time.strftime(\"%c\")}')\n workspace_spin22.compute_coupling_matrix(field_spin2, field_spin2, bins)\n\n # Extract the relevant mixing matrices\n print('Extracting mixing matrices')\n # For 0-0 there is only a single mixing matrix\n mixmats_spin00 = workspace_spin00.get_coupling_matrix()\n mixmat_nn_to_nn = mixmats_spin00\n # For 0-2 they are arranged NE->NE, NB->NE // NE->NB NB->NB, per l, so select every other row and column\n mixmats_spin02 = workspace_spin02.get_coupling_matrix()\n mixmat_ne_to_ne = mixmats_spin02[::2, ::2]\n # For 2-2 there are 4x4 elements per l, ordered EE, EB, BE, BB. We only need EE->EE and BB->EE,\n # so select every 4th row and the 1st and 4th columns from each block\n mixmats_spin22 = workspace_spin22.get_coupling_matrix()\n mixmat_ee_to_ee = mixmats_spin22[::4, ::4]\n mixmat_bb_to_ee = mixmats_spin22[::4, 3::4]\n\n # Check everything has the correct shape\n mixmat_shape = (lmax_mix + 1, lmax_mix + 1)\n assert mixmat_nn_to_nn.shape == mixmat_shape\n assert mixmat_ne_to_ne.shape == mixmat_shape\n assert mixmat_ee_to_ee.shape == mixmat_shape\n assert mixmat_bb_to_ee.shape == mixmat_shape\n\n # Trim to required output range\n mixmat_nn_to_nn = mixmat_nn_to_nn[lmin:(lmax_out + 1), lmin:]\n mixmat_ne_to_ne = mixmat_ne_to_ne[lmin:(lmax_out + 1), lmin:]\n mixmat_ee_to_ee = mixmat_ee_to_ee[lmin:(lmax_out + 1), lmin:]\n mixmat_bb_to_ee = mixmat_bb_to_ee[lmin:(lmax_out + 1), lmin:]\n\n # Do some final checks\n n_ell_out = lmax_out - lmin + 1\n n_ell_in = lmax_mix - lmin + 1\n mixmat_out_shape = (n_ell_out, n_ell_in)\n assert mixmat_nn_to_nn.shape == mixmat_out_shape\n assert mixmat_ne_to_ne.shape == mixmat_out_shape\n assert mixmat_ee_to_ee.shape == mixmat_out_shape\n assert mixmat_bb_to_ee.shape == mixmat_out_shape\n assert np.all(np.isfinite(mixmat_nn_to_nn))\n assert np.all(np.isfinite(mixmat_ne_to_ne))\n assert np.all(np.isfinite(mixmat_ee_to_ee))\n assert np.all(np.isfinite(mixmat_bb_to_ee))\n\n # Save to disk\n header = (f'Mixing matrices. Output from {__file__}.get_3x2pt_mixmats for mask_path = {mask_path}, '\n f'nside = {nside}, lmin = {lmin}, lmax_mix = {lmax_mix}, lmax_out = {lmax_out}, at {time.strftime(\"%c\")}')\n np.savez_compressed(save_path, mixmat_nn_to_nn=mixmat_nn_to_nn, mixmat_ne_to_ne=mixmat_ne_to_ne,\n mixmat_ee_to_ee=mixmat_ee_to_ee, mixmat_bb_to_ee=mixmat_bb_to_ee, header=header)\n print('Saved ' + save_path)\n"
] |
[
[
"matplotlib.pyplot.gca",
"matplotlib.pyplot.imshow",
"numpy.amax",
"numpy.isfinite",
"numpy.clip",
"numpy.amin",
"matplotlib.pyplot.savefig",
"matplotlib.pyplot.colorbar",
"numpy.savez_compressed",
"numpy.mean",
"matplotlib.pyplot.close",
"matplotlib.pyplot.rcParams.update",
"matplotlib.pyplot.show",
"numpy.random.default_rng"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
kaka-lin/csv-ptool
|
[
"1e0cc106fd7d58d767427791121bc9ed99fa431f"
] |
[
"app/csv_handle.py"
] |
[
"# -*- coding: utf-8 -*-\nimport csv\nimport numpy as np\n\nclass CSVHandle():\n def __init__(self):\n pass\n \n def read(self, file):\n data = []\n data_title = []\n\n with open(file, 'r') as csv_file:\n rows = csv.reader(csv_file)\n for row in rows:\n coulmn = len(row)\n data.append(row)\n\n for title in range(coulmn):\n data_title.append(str(title)) \n \n data = np.array(data)\n data_title = [data_title]\n\n return data, data_title\n \n def readHioki(self, file):\n data = []\n data_title = []\n\n with open(file, 'r') as csv_file:\n rows = csv.reader(csv_file)\n i = 0\n for row in rows:\n i += 1\n\n if i >= 12:\n data.append(row)\n elif i == 11:\n data_title.append(row)\n \n data = np.array(data)\n data_title = list(data_title)\n\n return data, data_title\n\n def readAgilent(self, file):\n data = []\n data_title = []\n\n with open(file, 'r', encoding='utf-16') as csv_file:\n rows = csv.reader(csv_file)\n i = 0\n for row in rows:\n i += 1\n\n if i >= 23:\n data.append(row)\n elif i == 22:\n data_title.append(row)\n \n \n data = np.array(data)\n data_title = list(data_title)\n\n return data, data_title\n \n"
] |
[
[
"numpy.array"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
Songner/image_classfication
|
[
"c1f15b2b96544e859e14a92373eb57c6a2644a93",
"c1f15b2b96544e859e14a92373eb57c6a2644a93",
"c1f15b2b96544e859e14a92373eb57c6a2644a93"
] |
[
"ImageProcessing-Python/blog06-resize/blog06-image05.py",
"ImageProcessing-Python/blog22-fft/blog22-image03.py",
"ImageProcessing-Python/blog07-threshold/blog07-image06.py"
] |
[
"#encoding:utf-8\r\nimport cv2 \r\nimport numpy as np\r\nimport matplotlib.pyplot as plt\r\n \r\n#读取图片\r\nimg = cv2.imread('scenery.png')\r\nsrc = cv2.cvtColor(img,cv2.COLOR_BGR2RGB)\r\n\r\n#图像翻转\r\n#0以X轴为对称轴翻转 >0以Y轴为对称轴翻转 <0X轴Y轴翻转\r\nimg1 = cv2.flip(src, 0)\r\nimg2 = cv2.flip(src, 1)\r\nimg3 = cv2.flip(src, -1)\r\n\r\n#显示图形\r\ntitles = ['Source', 'Image1', 'Image2', 'Image3'] \r\nimages = [src, img1, img2, img3] \r\nfor i in xrange(4): \r\n plt.subplot(2,2,i+1), plt.imshow(images[i], 'gray') \r\n plt.title(titles[i]) \r\n plt.xticks([]),plt.yticks([]) \r\nplt.show() \r\n",
"# -*- coding: utf-8 -*-\r\nimport numpy as np\r\nimport cv2\r\nfrom matplotlib import pyplot as plt\r\n\r\n#读取图像\r\nimg = cv2.imread('lena.png', 0)\r\n\r\n#傅里叶变换\r\ndft = cv2.dft(np.float32(img), flags = cv2.DFT_COMPLEX_OUTPUT)\r\n\r\n#将频谱低频从左上角移动至中心位置\r\ndft_shift = np.fft.fftshift(dft)\r\n\r\n#频谱图像双通道复数转换为0-255区间\r\nresult = 20*np.log(cv2.magnitude(dft_shift[:,:,0], dft_shift[:,:,1]))\r\n\r\n#显示图像\r\nplt.subplot(121), plt.imshow(img, cmap = 'gray')\r\nplt.title('Input Image'), plt.xticks([]), plt.yticks([])\r\nplt.subplot(122), plt.imshow(result, cmap = 'gray')\r\nplt.title('Magnitude Spectrum'), plt.xticks([]), plt.yticks([])\r\nplt.show()\r\n",
"#encoding:utf-8\r\nimport cv2 \r\nimport numpy as np \r\nimport matplotlib.pyplot as plt\r\n\r\n#读取图像\r\nimg=cv2.imread('miao.jpg')\r\nlenna_img = cv2.cvtColor(img,cv2.COLOR_BGR2RGB)\r\nGrayImage=cv2.cvtColor(img,cv2.COLOR_BGR2GRAY) \r\n\r\n#阈值化处理\r\nret,thresh1=cv2.threshold(GrayImage,127,255,cv2.THRESH_BINARY) \r\nret,thresh2=cv2.threshold(GrayImage,127,255,cv2.THRESH_BINARY_INV) \r\nret,thresh3=cv2.threshold(GrayImage,127,255,cv2.THRESH_TRUNC) \r\nret,thresh4=cv2.threshold(GrayImage,127,255,cv2.THRESH_TOZERO) \r\nret,thresh5=cv2.threshold(GrayImage,127,255,cv2.THRESH_TOZERO_INV)\r\n\r\n#显示结果\r\ntitles = ['Gray Image','BINARY','BINARY_INV','TRUNC','TOZERO','TOZERO_INV'] \r\nimages = [GrayImage, thresh1, thresh2, thresh3, thresh4, thresh5] \r\nfor i in xrange(6): \r\n plt.subplot(2,3,i+1),plt.imshow(images[i],'gray') \r\n plt.title(titles[i]) \r\n plt.xticks([]),plt.yticks([]) \r\nplt.show()\r\n"
] |
[
[
"matplotlib.pyplot.imshow",
"matplotlib.pyplot.yticks",
"matplotlib.pyplot.title",
"matplotlib.pyplot.subplot",
"matplotlib.pyplot.xticks",
"matplotlib.pyplot.show"
],
[
"matplotlib.pyplot.imshow",
"matplotlib.pyplot.yticks",
"matplotlib.pyplot.title",
"numpy.fft.fftshift",
"matplotlib.pyplot.subplot",
"numpy.float32",
"matplotlib.pyplot.xticks",
"matplotlib.pyplot.show"
],
[
"matplotlib.pyplot.imshow",
"matplotlib.pyplot.yticks",
"matplotlib.pyplot.title",
"matplotlib.pyplot.subplot",
"matplotlib.pyplot.xticks",
"matplotlib.pyplot.show"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
ajdillhoff/3dhpe-udd
|
[
"5a77818670060710dff2b1b617a96481e5dc20a0",
"5a77818670060710dff2b1b617a96481e5dc20a0",
"5a77818670060710dff2b1b617a96481e5dc20a0",
"5a77818670060710dff2b1b617a96481e5dc20a0"
] |
[
"model/ResNet.py",
"model/PoseNet.py",
"datasets/LHSynthDataset.py",
"model/JointLayer.py"
] |
[
"import torch\nimport torch.nn as nn\nfrom torch.hub import load_state_dict_from_url\n\n\n__all__ = ['ResNet', 'resnet18', 'resnet34', 'resnet50', 'resnet101',\n 'resnet152', 'resnext50_32x4d', 'resnext101_32x8d',\n 'wide_resnet50_2', 'wide_resnet101_2']\n\n\nmodel_urls = {\n 'resnet18': 'https://download.pytorch.org/models/resnet18-5c106cde.pth',\n 'resnet34': 'https://download.pytorch.org/models/resnet34-333f7ec4.pth',\n 'resnet50': 'https://download.pytorch.org/models/resnet50-19c8e357.pth',\n 'resnet101': 'https://download.pytorch.org/models/resnet101-5d3b4d8f.pth',\n 'resnet152': 'https://download.pytorch.org/models/resnet152-b121ed2d.pth',\n 'resnext50_32x4d': 'https://download.pytorch.org/models/resnext50_32x4d-7cdf4587.pth',\n 'resnext101_32x8d': 'https://download.pytorch.org/models/resnext101_32x8d-8ba56ff5.pth',\n 'wide_resnet50_2': 'https://download.pytorch.org/models/wide_resnet50_2-95faca4d.pth',\n 'wide_resnet101_2': 'https://download.pytorch.org/models/wide_resnet101_2-32ee1156.pth',\n}\n\n\ndef conv3x3(in_planes, out_planes, stride=1, groups=1, dilation=1):\n \"\"\"3x3 convolution with padding\"\"\"\n return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride,\n padding=dilation, groups=groups, bias=False, dilation=dilation)\n\n\ndef conv1x1(in_planes, out_planes, stride=1):\n \"\"\"1x1 convolution\"\"\"\n return nn.Conv2d(in_planes, out_planes, kernel_size=1, stride=stride, bias=False)\n\n\nclass BasicBlock(nn.Module):\n expansion = 1\n __constants__ = ['downsample']\n\n def __init__(self, inplanes, planes, stride=1, downsample=None, groups=1,\n base_width=64, dilation=1, norm_layer=None):\n super(BasicBlock, self).__init__()\n if norm_layer is None:\n norm_layer = nn.BatchNorm2d\n if groups != 1 or base_width != 64:\n raise ValueError('BasicBlock only supports groups=1 and base_width=64')\n if dilation > 1:\n raise NotImplementedError(\"Dilation > 1 not supported in BasicBlock\")\n # Both self.conv1 and self.downsample layers downsample the input when stride != 1\n self.conv1 = conv3x3(inplanes, planes, stride)\n if norm_layer is nn.GroupNorm:\n self.bn1 = norm_layer(32, planes)\n else:\n self.bn1 = norm_layer(planes)\n self.relu = nn.ReLU(inplace=True)\n self.conv2 = conv3x3(planes, planes)\n if norm_layer is nn.GroupNorm:\n self.bn2 = norm_layer(32, planes)\n else:\n self.bn2 = norm_layer(planes)\n self.downsample = downsample\n self.stride = stride\n\n def forward(self, x):\n identity = x\n\n out = self.conv1(x)\n out = self.bn1(out)\n out = self.relu(out)\n\n out = self.conv2(out)\n out = self.bn2(out)\n\n if self.downsample is not None:\n identity = self.downsample(x)\n\n out += identity\n out = self.relu(out)\n\n return out\n\n\nclass Bottleneck(nn.Module):\n expansion = 4\n __constants__ = ['downsample']\n\n def __init__(self, inplanes, planes, stride=1, downsample=None, groups=1,\n base_width=64, dilation=1, norm_layer=None):\n super(Bottleneck, self).__init__()\n if norm_layer is None:\n norm_layer = nn.BatchNorm2d\n width = int(planes * (base_width / 64.)) * groups\n # Both self.conv2 and self.downsample layers downsample the input when stride != 1\n self.conv1 = conv1x1(inplanes, width)\n self.bn1 = norm_layer(width)\n self.conv2 = conv3x3(width, width, stride, groups, dilation)\n self.bn2 = norm_layer(width)\n self.conv3 = conv1x1(width, planes * self.expansion)\n self.bn3 = norm_layer(planes * self.expansion)\n self.relu = nn.ReLU(inplace=True)\n self.downsample = downsample\n self.stride = stride\n\n def forward(self, x):\n identity = x\n\n out = self.conv1(x)\n out = self.bn1(out)\n out = self.relu(out)\n\n out = self.conv2(out)\n out = self.bn2(out)\n out = self.relu(out)\n\n out = self.conv3(out)\n out = self.bn3(out)\n\n if self.downsample is not None:\n identity = self.downsample(x)\n\n out += identity\n out = self.relu(out)\n\n return out\n\n\nclass ResNet(nn.Module):\n\n def __init__(self, block, layers, num_joints=17, num_features=4,\n zero_init_residual=False, groups=1, width_per_group=64,\n replace_stride_with_dilation=None, norm_layer=None):\n super(ResNet, self).__init__()\n self.num_joints = num_joints\n self.num_features = num_features\n\n if norm_layer is None:\n norm_layer = nn.BatchNorm2d\n self._norm_layer = norm_layer\n\n self.inplanes = 64\n self.dilation = 1\n if replace_stride_with_dilation is None:\n # each element in the tuple indicates if we should replace\n # the 2x2 stride with a dilated convolution instead\n replace_stride_with_dilation = [False, False, False]\n if len(replace_stride_with_dilation) != 3:\n raise ValueError(\"replace_stride_with_dilation should be None \"\n \"or a 3-element tuple, got {}\".format(replace_stride_with_dilation))\n self.groups = groups\n self.base_width = width_per_group\n self.conv1 = nn.Conv2d(1, self.inplanes, kernel_size=7, stride=2, padding=3,\n bias=False)\n if norm_layer is nn.GroupNorm:\n self.bn1 = norm_layer(32, self.inplanes)\n else:\n self.bn1 = norm_layer(self.inplanes)\n self.relu = nn.ReLU(inplace=True)\n self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)\n self.layer1 = self._make_layer(block, 64, layers[0])\n self.layer2 = self._make_layer(block, 128, layers[1], stride=2,\n dilate=replace_stride_with_dilation[0])\n self.layer3 = self._make_layer(block, 256, layers[2], stride=2,\n dilate=replace_stride_with_dilation[1])\n self.layer4 = self._make_layer(block, 512, layers[3], stride=2,\n dilate=replace_stride_with_dilation[2])\n self.avgpool = nn.AdaptiveAvgPool2d((1, 1))\n self.fc = nn.Linear(512 * block.expansion, num_features * num_joints)\n\n for m in self.modules():\n if isinstance(m, nn.Conv2d):\n nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')\n elif isinstance(m, (nn.BatchNorm2d, nn.GroupNorm)):\n nn.init.constant_(m.weight, 1)\n nn.init.constant_(m.bias, 0)\n\n # Zero-initialize the last BN in each residual branch,\n # so that the residual branch starts with zeros, and each residual block behaves like an identity.\n # This improves the model by 0.2~0.3% according to https://arxiv.org/abs/1706.02677\n if zero_init_residual:\n for m in self.modules():\n if isinstance(m, Bottleneck):\n nn.init.constant_(m.bn3.weight, 0)\n elif isinstance(m, BasicBlock):\n nn.init.constant_(m.bn2.weight, 0)\n\n def _make_layer(self, block, planes, blocks, stride=1, dilate=False):\n norm_layer = self._norm_layer\n downsample = None\n previous_dilation = self.dilation\n if dilate:\n self.dilation *= stride\n stride = 1\n if stride != 1 or self.inplanes != planes * block.expansion:\n if norm_layer is nn.GroupNorm:\n downsample = nn.Sequential(\n conv1x1(self.inplanes, planes * block.expansion, stride),\n norm_layer(32, planes * block.expansion),\n )\n else:\n downsample = nn.Sequential(\n conv1x1(self.inplanes, planes * block.expansion, stride),\n norm_layer(planes * block.expansion),\n )\n\n layers = []\n layers.append(block(self.inplanes, planes, stride, downsample, self.groups,\n self.base_width, previous_dilation, norm_layer))\n self.inplanes = planes * block.expansion\n for _ in range(1, blocks):\n layers.append(block(self.inplanes, planes, groups=self.groups,\n base_width=self.base_width, dilation=self.dilation,\n norm_layer=norm_layer))\n\n return nn.Sequential(*layers)\n\n def forward(self, x):\n x = self.conv1(x)\n x = self.bn1(x)\n x = self.relu(x)\n x = self.maxpool(x)\n\n x = self.layer1(x)\n x = self.layer2(x)\n x = self.layer3(x)\n x = self.layer4(x)\n\n x = self.avgpool(x)\n x = torch.flatten(x, 1)\n x = self.fc(x)\n\n return x.view(-1, self.num_joints, self.num_features)\n\n\ndef _resnet(arch, block, layers, pretrained, progress, **kwargs):\n model = ResNet(block, layers, **kwargs)\n if pretrained:\n state_dict = load_state_dict_from_url(model_urls[arch],\n progress=progress)\n model.load_state_dict(state_dict)\n return model\n\n\ndef resnet18(pretrained=False, progress=True, **kwargs):\n r\"\"\"ResNet-18 model from\n `\"Deep Residual Learning for Image Recognition\" <https://arxiv.org/pdf/1512.03385.pdf>`_\n\n Args:\n pretrained (bool): If True, returns a model pre-trained on ImageNet\n progress (bool): If True, displays a progress bar of the download to stderr\n \"\"\"\n return _resnet('resnet18', BasicBlock, [2, 2, 2, 2], pretrained, progress,\n **kwargs)\n\n\ndef resnet34(pretrained=False, progress=True, **kwargs):\n r\"\"\"ResNet-34 model from\n `\"Deep Residual Learning for Image Recognition\" <https://arxiv.org/pdf/1512.03385.pdf>`_\n\n Args:\n pretrained (bool): If True, returns a model pre-trained on ImageNet\n progress (bool): If True, displays a progress bar of the download to stderr\n \"\"\"\n return _resnet('resnet34', BasicBlock, [3, 4, 6, 3], pretrained, progress,\n **kwargs)\n\n\ndef resnet50(pretrained=False, progress=True, **kwargs):\n r\"\"\"ResNet-50 model from\n `\"Deep Residual Learning for Image Recognition\" <https://arxiv.org/pdf/1512.03385.pdf>`_\n\n Args:\n pretrained (bool): If True, returns a model pre-trained on ImageNet\n progress (bool): If True, displays a progress bar of the download to stderr\n \"\"\"\n return _resnet('resnet50', Bottleneck, [3, 4, 6, 3], pretrained, progress,\n **kwargs)\n\n\ndef resnet101(pretrained=False, progress=True, **kwargs):\n r\"\"\"ResNet-101 model from\n `\"Deep Residual Learning for Image Recognition\" <https://arxiv.org/pdf/1512.03385.pdf>`_\n\n Args:\n pretrained (bool): If True, returns a model pre-trained on ImageNet\n progress (bool): If True, displays a progress bar of the download to stderr\n \"\"\"\n return _resnet('resnet101', Bottleneck, [3, 4, 23, 3], pretrained, progress,\n **kwargs)\n\n\ndef resnet152(pretrained=False, progress=True, **kwargs):\n r\"\"\"ResNet-152 model from\n `\"Deep Residual Learning for Image Recognition\" <https://arxiv.org/pdf/1512.03385.pdf>`_\n\n Args:\n pretrained (bool): If True, returns a model pre-trained on ImageNet\n progress (bool): If True, displays a progress bar of the download to stderr\n \"\"\"\n return _resnet('resnet152', Bottleneck, [3, 8, 36, 3], pretrained, progress,\n **kwargs)\n\n\ndef resnext50_32x4d(pretrained=False, progress=True, **kwargs):\n r\"\"\"ResNeXt-50 32x4d model from\n `\"Aggregated Residual Transformation for Deep Neural Networks\" <https://arxiv.org/pdf/1611.05431.pdf>`_\n\n Args:\n pretrained (bool): If True, returns a model pre-trained on ImageNet\n progress (bool): If True, displays a progress bar of the download to stderr\n \"\"\"\n kwargs['groups'] = 32\n kwargs['width_per_group'] = 4\n return _resnet('resnext50_32x4d', Bottleneck, [3, 4, 6, 3],\n pretrained, progress, **kwargs)\n\n\ndef resnext101_32x8d(pretrained=False, progress=True, **kwargs):\n r\"\"\"ResNeXt-101 32x8d model from\n `\"Aggregated Residual Transformation for Deep Neural Networks\" <https://arxiv.org/pdf/1611.05431.pdf>`_\n\n Args:\n pretrained (bool): If True, returns a model pre-trained on ImageNet\n progress (bool): If True, displays a progress bar of the download to stderr\n \"\"\"\n kwargs['groups'] = 32\n kwargs['width_per_group'] = 8\n return _resnet('resnext101_32x8d', Bottleneck, [3, 4, 23, 3],\n pretrained, progress, **kwargs)\n\n\ndef wide_resnet50_2(pretrained=False, progress=True, **kwargs):\n r\"\"\"Wide ResNet-50-2 model from\n `\"Wide Residual Networks\" <https://arxiv.org/pdf/1605.07146.pdf>`_\n\n The model is the same as ResNet except for the bottleneck number of channels\n which is twice larger in every block. The number of channels in outer 1x1\n convolutions is the same, e.g. last block in ResNet-50 has 2048-512-2048\n channels, and in Wide ResNet-50-2 has 2048-1024-2048.\n\n Args:\n pretrained (bool): If True, returns a model pre-trained on ImageNet\n progress (bool): If True, displays a progress bar of the download to stderr\n \"\"\"\n kwargs['width_per_group'] = 64 * 2\n return _resnet('wide_resnet50_2', Bottleneck, [3, 4, 6, 3],\n pretrained, progress, **kwargs)\n\n\ndef wide_resnet101_2(pretrained=False, progress=True, **kwargs):\n r\"\"\"Wide ResNet-101-2 model from\n `\"Wide Residual Networks\" <https://arxiv.org/pdf/1605.07146.pdf>`_\n\n The model is the same as ResNet except for the bottleneck number of channels\n which is twice larger in every block. The number of channels in outer 1x1\n convolutions is the same, e.g. last block in ResNet-50 has 2048-512-2048\n channels, and in Wide ResNet-50-2 has 2048-1024-2048.\n\n Args:\n pretrained (bool): If True, returns a model pre-trained on ImageNet\n progress (bool): If True, displays a progress bar of the download to stderr\n \"\"\"\n kwargs['width_per_group'] = 64 * 2\n return _resnet('wide_resnet101_2', Bottleneck, [3, 4, 23, 3],\n pretrained, progress, **kwargs)\n\n",
"# PoseNet.py\n# Alex Dillhoff ([email protected])\n# Model definition for the 2D heatmap prediction network.\n\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\n\nfrom base import BaseModel\nfrom .TFConv2D import TFConv2D\n\n\nclass PosePrior(nn.Module):\n \"\"\"Implements the PosePrior architecture.\n\n The purpose of this network is to estimate the 3D coordinates of the hand\n using 2D heatmaps.\n\n This architecture is defined in:\n Zimmermann, C., & Brox, T. (2017).\n Learning to Estimate 3D Hand Pose from Single RGB Images.\n Retrieved from http://arxiv.org/abs/1705.01389\n \"\"\"\n\n def __init__(self, num_joints=9):\n \"\"\"Defines and initializes the network.\"\"\"\n\n super(PosePrior, self).__init__()\n self.num_joints = num_joints\n self.conv_pose_0_1 = nn.Conv2d(num_joints, 32, 3, padding=1)\n self.conv_pose_0_2 = TFConv2D(32, 32, 3, stride=2)\n self.conv_pose_1_1 = nn.Conv2d(32, 64, 3, padding=1)\n self.conv_pose_1_2 = TFConv2D(64, 64, 3, stride=2)\n self.conv_pose_2_1 = nn.Conv2d(64, 128, 3, padding=1)\n self.conv_pose_2_2 = TFConv2D(128, 128, 3, stride=2)\n\n self.fc_rel0 = nn.Linear(2048, 512)\n self.fc_rel1 = nn.Linear(512, 512)\n self.fc_xyz = nn.Linear(512, num_joints * 3)\n\n def forward(self, x):\n \"\"\"Forward pass through PosePrior.\n\n Args:\n x - (batch x num_joints x 256 x 256): 2D keypoint heatmaps.\n\n Returns:\n (batch x num_joints x 3): xyz coordinates of the hand in 3D space.\n \"\"\"\n\n s = x.shape\n x = F.leaky_relu(self.conv_pose_0_1(x))\n x = F.leaky_relu(self.conv_pose_0_2(x))\n x = F.leaky_relu(self.conv_pose_1_1(x))\n x = F.leaky_relu(self.conv_pose_1_2(x))\n x = F.leaky_relu(self.conv_pose_2_1(x))\n x = F.leaky_relu(self.conv_pose_2_2(x))\n\n # Permute before reshaping since these weights are loaded from a TF\n # model.\n x = torch.reshape(x.permute(0, 2, 3, 1), (s[0], -1))\n\n x = F.leaky_relu(self.fc_rel0(x))\n x = F.leaky_relu(self.fc_rel1(x))\n x = self.fc_xyz(x)\n\n return torch.reshape(x, (s[0], self.num_joints, 3))\n\n\nclass PoseNet(BaseModel):\n \"\"\"Implements the PoseNet architecture.\n\n This architecture is defined in:\n Zimmermann, C., & Brox, T. (2017).\n Learning to Estimate 3D Hand Pose from Single RGB Images.\n Retrieved from http://arxiv.org/abs/1705.01389\n \"\"\"\n\n def __init__(self, num_joints):\n \"\"\"Defines and initializes the network.\"\"\"\n\n super(PoseNet, self).__init__()\n # Stage 1\n self.conv1_1 = nn.Conv2d(3, 64, 3, padding=1)\n self.conv1_2 = nn.Conv2d(64, 64, 3, padding=1)\n self.conv2_1 = nn.Conv2d(64, 128, 3, padding=1)\n self.conv2_2 = nn.Conv2d(128, 128, 3, padding=1)\n self.conv3_1 = nn.Conv2d(128, 256, 3, padding=1)\n self.conv3_2 = nn.Conv2d(256, 256, 3, padding=1)\n self.conv3_3 = nn.Conv2d(256, 256, 3, padding=1)\n self.conv3_4 = nn.Conv2d(256, 256, 3, padding=1)\n self.conv4_1 = nn.Conv2d(256, 512, 3, padding=1)\n self.conv4_2 = nn.Conv2d(512, 512, 3, padding=1)\n self.conv4_3 = nn.Conv2d(512, 256, 3, padding=1)\n self.conv4_4 = nn.Conv2d(256, 256, 3, padding=1)\n self.conv4_5 = nn.Conv2d(256, 256, 3, padding=1)\n self.conv4_6 = nn.Conv2d(256, 256, 3, padding=1)\n self.conv4_7 = nn.Conv2d(256, 128, 3, padding=1)\n self.conv5_1 = nn.Conv2d(128, 512, 1)\n self.conv5_2 = nn.Conv2d(512, num_joints, 1)\n self.pool = nn.MaxPool2d(2, 2)\n\n # Stage 2\n self.conv6_1 = nn.Conv2d(128 + num_joints, 128, 7, padding=3)\n self.conv6_2 = nn.Conv2d(128, 128, 7, padding=3)\n self.conv6_3 = nn.Conv2d(128, 128, 7, padding=3)\n self.conv6_4 = nn.Conv2d(128, 128, 7, padding=3)\n self.conv6_5 = nn.Conv2d(128, 128, 7, padding=3)\n self.conv6_6 = nn.Conv2d(128, 128, 1)\n self.conv6_7 = nn.Conv2d(128, num_joints, 1)\n\n # Stage 3\n self.conv7_1 = nn.Conv2d(128 + num_joints, 128, 7, padding=3)\n self.conv7_2 = nn.Conv2d(128, 128, 7, padding=3)\n self.conv7_3 = nn.Conv2d(128, 128, 7, padding=3)\n self.conv7_4 = nn.Conv2d(128, 128, 7, padding=3)\n self.conv7_5 = nn.Conv2d(128, 128, 7, padding=3)\n self.conv7_6 = nn.Conv2d(128, 128, 1)\n self.conv7_7 = nn.Conv2d(128, num_joints, 1)\n\n self.pose_prior = PosePrior(num_joints)\n\n def forward(self, x):\n \"\"\"Forward pass through PoseNet.\n\n Args:\n x - [batch x 3 x 256 x 256]: Color image containing a cropped\n image of the hand.\n\n Returns:\n [batch x num_joints x 32 x 32] hand keypoint heatmaps.\n \"\"\"\n\n # Stage 1\n x = F.leaky_relu(self.conv1_1(x)) # 1\n x = F.leaky_relu(self.conv1_2(x)) # 2\n x = self.pool(x) # 3\n x = F.leaky_relu(self.conv2_1(x)) # 4\n x = F.leaky_relu(self.conv2_2(x)) # 5\n x = self.pool(x) # 6\n x = F.leaky_relu(self.conv3_1(x)) # 7\n x = F.leaky_relu(self.conv3_2(x)) # 8\n x = F.leaky_relu(self.conv3_3(x)) # 9\n x = F.leaky_relu(self.conv3_4(x)) # 10\n x = self.pool(x) # 11\n x = F.leaky_relu(self.conv4_1(x)) # 12\n x = F.leaky_relu(self.conv4_2(x)) # 13\n x = F.leaky_relu(self.conv4_3(x)) # 14\n x = F.leaky_relu(self.conv4_4(x)) # 15\n x = F.leaky_relu(self.conv4_5(x)) # 16\n x = F.leaky_relu(self.conv4_6(x)) # 17\n encoding = F.leaky_relu(self.conv4_7(x)) # 18\n x = F.leaky_relu(self.conv5_1(encoding))\n scoremap = self.conv5_2(x)\n\n # Stage 2\n x = torch.cat([scoremap, encoding], dim=1)\n x = F.leaky_relu(self.conv6_1(x))\n x = F.leaky_relu(self.conv6_2(x))\n x = F.leaky_relu(self.conv6_3(x))\n x = F.leaky_relu(self.conv6_4(x))\n x = F.leaky_relu(self.conv6_5(x))\n x = F.leaky_relu(self.conv6_6(x))\n scoremap = self.conv6_7(x)\n\n # Stage 3\n x = torch.cat([scoremap, encoding], dim=1)\n x = F.leaky_relu(self.conv7_1(x))\n x = F.leaky_relu(self.conv7_2(x))\n x = F.leaky_relu(self.conv7_3(x))\n x = F.leaky_relu(self.conv7_4(x))\n x = F.leaky_relu(self.conv7_5(x))\n x = F.leaky_relu(self.conv7_6(x))\n x = self.conv7_7(x)\n\n return self.pose_prior(x)\n",
"import os\nimport sys\nimport pickle\n\nimport torch\nimport torchvision\nimport scipy.io as sio\nimport numpy as np\nfrom PIL import Image\n\nmodule_path = os.path.abspath(os.path.join('..'))\nif module_path not in sys.path:\n sys.path.append(module_path)\n\nfrom utils.transforms import get_point_cloud\n\n\nclass LHSynthDataset(torch.utils.data.Dataset):\n \"\"\"Synthetic Dataset using Libhand with wrist. This is the model used in\n the paper by Tompson et al.\"\"\"\n\n def __init__(self, root_dir, sample_transform=None,\n target_transform=None, num_points=1000, noise_coeff=0.0):\n \"\"\"\n Args:\n root_dir (string): Path to the data.\n sample_transform (callable, optional): Optional transform to be\n applied to the sample.\n target_transform (callable, optional): Optional transform to be\n applied to the target.\n num_points (int, optional): Number of points to sample in the\n point cloud.\n noise_coeff (float, optional): Factor of additive random noise to\n add to each sample.\n \"\"\"\n self.root_dir = root_dir\n self.sample_transform = sample_transform\n self.target_transform = target_transform\n self.num_points = num_points\n self.num_kp = 26\n self.noise_coeff = noise_coeff\n self.proj = np.array([[1.302294, 0.0, 0.0, 0.0],\n [0.0, 1.732051, 0.0, 0.0],\n [0.0, 0.0, -1.025316, -0.202532],\n [0.0, 0.0, -1.0, 1.0]])\n self.proj_inv = np.linalg.inv(self.proj)\n self.keypoint_names = ['carpals',\n 'metacarpals',\n 'finger5joint1', # 2\n 'finger5joint2',\n 'finger5joint3',\n 'finger5joint3tip',\n 'Bone',\n 'finger1joint1', # 7\n 'finger1joint2',\n 'finger1joint3',\n 'finger1joint3tip',\n 'Bone.001',\n 'finger2joint1', # 12\n 'finger2joint2',\n 'finger2joint3',\n 'finger2joint3tip',\n 'Bone.002',\n 'finger3joint1', # 17\n 'finger3joint2',\n 'finger3joint3',\n 'finger3joint3tip',\n 'Bone.003',\n 'finger4joint1', # 22\n 'finger4joint2',\n 'finger4joint3',\n 'finger4joint3tip']\n keypoint_file = os.path.join(root_dir, 'annotations.pkl')\n if os.path.isfile(keypoint_file):\n self.keypoint_gt = self.load_keypoints(keypoint_file)\n else:\n self.keypoint_gt = None\n\n original_length = len([name for name in os.listdir(os.path.join(self.root_dir, 'depth/'))])\n self.idxs = list([i for i in range(original_length)])\n\n def __len__(self):\n return len(self.idxs)\n\n def __getitem__(self, idx):\n idx = self.idxs[idx]\n # sample_name = os.path.join(self.root_dir, 'color/{}.png'.format(idx))\n depth_name = os.path.join(self.root_dir, 'depth/depth_{}.png'.format(idx))\n\n # sample = Image.open(sample_name)\n sample = Image.open(depth_name)\n w, h = sample.size\n\n kps2d = self.keypoint_gt[idx].copy()\n kps3d = self.uvd_to_xyz(kps2d.copy(), h, w)\n kps3d = torch.tensor(kps3d, dtype=torch.float32)\n\n sample = np.asarray(sample, np.float32)\n # sample = self.process_depth(sample)\n\n bbox = self.get_bbox(kps2d[1:])\n norm_size = torch.norm(kps3d[6] - kps3d[1])\n center = kps3d[1:].mean(0)\n\n sample, padding = self.crop_depth(sample, bbox)\n target = self.depth_to_pc(sample.copy(), bbox, padding)\n target = torch.tensor(target, dtype=torch.float32)\n target = self.normalize(target, center, norm_size)\n kps3d = self.normalize(kps3d, center, norm_size)\n\n if self.sample_transform:\n sample = self.sample_transform(sample)\n sample = self.normalize_depth(sample)\n\n if self.noise_coeff > 0:\n mask_idxs = sample != 1\n noise = torch.rand_like(sample) * self.noise_coeff\n sample[mask_idxs] += noise[mask_idxs]\n sample[sample > 1] = 1\n\n kps3d[:, 2] *= -1.0\n target[:, 2] *= -1.0\n\n return sample, target, kps3d\n\n def load_keypoints(self, annotation_path):\n \"\"\"Loads joint annotations for synthetic data.\"\"\"\n samples = 0\n with open(annotation_path, mode='rb') as file:\n try:\n num_samples = pickle.load(file)\n annotations = np.zeros((num_samples, self.num_kp, 3))\n while samples < num_samples:\n anno = pickle.load(file)\n for i, v in enumerate(anno):\n joint_idxs = self.keypoint_names.index(v)\n kp_t = np.array([float(anno[v][0]),\n float(anno[v][1]),\n float(anno[v][2]) * -1000.0])\n annotations[samples, joint_idxs] = kp_t\n samples += 1\n except EOFError:\n print(\"ERROR: EOFError\")\n\n return annotations\n\n def crop_depth(self, img, bbox):\n \"\"\"Crop the depth image to the bounding box.\n\n If the cropped image is not square, 0-value padding will be added.\n\n Args:\n img (float, H x W x D): Depth array.\n bbox (float, 6): Bounding box of the hand in image space.\n\n Returns:\n Cropped image (float, H_c x W_c x D) and the row and column\n padding size added to the image (int, 2 x 2).\n \"\"\"\n xstart = bbox[0]\n xend = bbox[1]\n ystart = bbox[2]\n yend = bbox[3]\n zstart = bbox[4]\n zend = bbox[5]\n\n cropped = img[max(ystart, 0):min(yend, img.shape[0]), max(xstart, 0):min(xend, img.shape[1])].copy()\n #\n # Crop z bound\n mask1 = np.logical_and(cropped < zstart, cropped != 0)\n mask2 = np.logical_and(cropped > zend, cropped != 0)\n cropped[mask1] = zstart\n cropped[mask2] = 0.0\n\n if cropped.shape[0] > cropped.shape[1]:\n diff = cropped.shape[0] - cropped.shape[1]\n row_pad = [0, 0]\n if diff % 2 == 1:\n col_pad = [int(diff / 2), int(diff / 2) + 1]\n else:\n col_pad = [int(diff / 2), int(diff / 2)]\n else:\n diff = cropped.shape[1] - cropped.shape[0]\n col_pad = [0, 0]\n if diff % 2 == 1:\n row_pad = [int(diff / 2), int(diff / 2) + 1]\n else:\n row_pad = [int(diff / 2), int(diff / 2)]\n\n return np.pad(cropped, (row_pad, col_pad), mode='constant', constant_values=0), (row_pad, col_pad)\n\n\n def get_bbox(self, keypoints, pad=20):\n \"\"\"Calculates a 3d bounding box.\n\n Args:\n keypoints (array): 3d keypoints of the hand in either image or 3d\n space.\n pad (int): Amount of padding to add to the bounding box for all\n sides.\n Returns:\n 6 values defining the bounding cube.\n \"\"\"\n\n joints_min = keypoints.min(0) - pad\n joints_max = keypoints.max(0) + pad\n return np.array([joints_min[0], joints_max[0],\n joints_min[1], joints_max[1],\n joints_min[2], joints_max[2]]).astype(np.int)\n\n def process_depth(self, depth, depth_min=0.1, depth_max=8.0):\n depth = depth.copy()\n # The closest value is -1\n bg_idxs = depth == 0\n depth[bg_idxs] = 0.0\n return depth\n\n def uvd_to_xyz(self, uvd_points, height, width):\n proj = np.array([[1.302294, 0.0, 0.0, 0.0],\n [0.0, 1.732051, 0.0, 0.0],\n [0.0, 0.0, -1.025316, -0.202532],\n [0.0, 0.0, -1.0, 0.0]])\n\n num_points = uvd_points.shape[0]\n\n z_vals = uvd_points[:, 2]\n half_height = height / 2\n half_width = width / 2\n #\n # Convert to HCS\n uvd_points[:, 0] = (uvd_points[:, 0] - half_width) / half_width\n uvd_points[:, 1] = ((half_height - uvd_points[:, 1]) / half_height)\n uvd_points[:, 0] *= uvd_points[:, 2]\n uvd_points[:, 1] *= uvd_points[:, 2]\n uvd_points = np.concatenate((uvd_points, np.ones((num_points, 1))), axis=1)\n\n # HCS -> World\n points_xyz = uvd_points @ self.proj_inv\n points_xyz = points_xyz[:, :3]\n points_xyz[:, 2] = z_vals\n\n return points_xyz\n\n def normalize_depth(self, depth_img):\n \"\"\"Normalize depth image to be in range [-1, 1].\n Returns a clone of the original image.\n \"\"\"\n norm_img = depth_img.clone()\n bg_mask = (norm_img == 0)\n fg_mask = (norm_img > 0)\n min_val = norm_img[fg_mask].min()\n max_val = norm_img[fg_mask].max()\n norm_img[fg_mask] -= min_val\n norm_img[fg_mask] /= (max_val - min_val)\n norm_img[fg_mask] *= 2.0\n norm_img[fg_mask] -= 1.0\n norm_img[bg_mask] = 1.0\n\n return norm_img\n\n def normalize(self, points, center, norm_size):\n \"\"\"Normalize a set of points centered on `center` and scaled by\n `norm_size`.\n\n Args:\n center (float, array): Location to center object.\n norm_size (float): Scale factor.\n Returns:\n Normalized array of points.\n \"\"\"\n #\n # Normalize\n norm_points = points.clone()\n norm_points -= center\n norm_points /= norm_size\n\n return norm_points\n\n def depth_to_pc(self, depth_img, bbox, padding):\n \"\"\"Transforms the depth image into a point cloud representation.\n\n Args:\n depth_img (array): depth image.\n bbox (float, array): bounding box of the hand in (u, v, d).\n padding (int, array): row and column padding added to the cropped\n image from earlier pre-processing.\n Returns:\n Point cloud representation of hand.\n \"\"\"\n\n xstart = bbox[0] - padding[1][0]\n ystart = bbox[2] - padding[0][0]\n\n # Convert to point cloud\n depth_img = torch.from_numpy(depth_img).unsqueeze(0)\n p_ndc = get_point_cloud(depth_img, self.num_points, 0)\n p_ndc = p_ndc.squeeze(0).numpy()\n p_ndc[:, 0] += xstart\n p_ndc[:, 1] += ystart\n pc = self.uvd_to_xyz(p_ndc, 480, 640)\n\n return pc\n",
"import torch\nimport torch.nn.functional as F\n\nimport utils.quaternion as quat\nfrom base import BaseModel\nfrom utils.util import register_hook\n\n\nclass JointLayer(BaseModel):\n \"\"\"Calculates rotation between two vectors.\"\"\"\n def __init__(self, offset, orientation):\n super(JointLayer, self).__init__()\n self.orientation = torch.nn.Parameter(orientation, requires_grad=False)\n self.offset = torch.nn.Parameter(offset, requires_grad=False)\n\n def forward(self, input_offsets, q_parents):\n \"\"\" Calculates the quaternion rotations which transform the model\n offsets to the input offsets.\n\n Inputs are transformed to the space shared by the model using the\n inverse of `q_parents`. Being in the same space allows a simple\n calculation of the difference in rotation.\n\n Args:\n input_offsets (N, 3): Input vectors of the keypoint w.r.t. their\n parent.\n q_parents (N, 4): Quaternion transformations which provide the\n total derived transformation up to the parent.\n \"\"\"\n model_offsets = self.offset.repeat(input_offsets.shape[0], 1)\n derived_orientation = quat.qmul(q_parents,\n self.orientation.repeat(q_parents.shape[0], 1))\n inputs_object = quat.q_rot(quat.q_inv(derived_orientation), input_offsets)\n q_diffs = quat.find_q_v(F.normalize(model_offsets),\n F.normalize(inputs_object))\n new_qs = quat.qmul(self.orientation.repeat(q_diffs.shape[0], 1),\n q_diffs)\n new_q_parents = quat.qmul(q_parents, new_qs)\n new_ps = quat.q_rot(quat.q_inv(new_q_parents), input_offsets)\n\n return new_q_parents, new_qs, new_ps\n"
] |
[
[
"torch.nn.Sequential",
"torch.nn.init.constant_",
"torch.nn.Conv2d",
"torch.nn.Linear",
"torch.nn.MaxPool2d",
"torch.nn.AdaptiveAvgPool2d",
"torch.flatten",
"torch.nn.ReLU",
"torch.hub.load_state_dict_from_url",
"torch.nn.init.kaiming_normal_"
],
[
"torch.cat",
"torch.reshape",
"torch.nn.Conv2d",
"torch.nn.Linear",
"torch.nn.MaxPool2d"
],
[
"torch.norm",
"numpy.pad",
"numpy.logical_and",
"torch.rand_like",
"numpy.linalg.inv",
"numpy.asarray",
"torch.from_numpy",
"torch.tensor",
"numpy.ones",
"numpy.array",
"numpy.zeros"
],
[
"torch.nn.functional.normalize",
"torch.nn.Parameter"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
sixy6e/pygmt
|
[
"63d23d4e0702edb94234f10d23d210cdf1adb54b"
] |
[
"pygmt/clib/session.py"
] |
[
"\"\"\"\nDefines the Session class to create and destroy a GMT API session and provides access to\nthe API functions. Uses ctypes to wrap most of the core functions from the C API.\n\"\"\"\nimport sys\nimport ctypes as ctp\nfrom contextlib import contextmanager\n\nfrom packaging.version import Version\nimport numpy as np\n\nfrom ..exceptions import (\n GMTCLibError,\n GMTCLibNoSessionError,\n GMTInvalidInput,\n GMTVersionError,\n)\nfrom .loading import load_libgmt\nfrom .conversion import (\n kwargs_to_ctypes_array,\n vectors_to_arrays,\n dataarray_to_matrix,\n as_c_contiguous,\n)\n\nFAMILIES = [\n \"GMT_IS_DATASET\",\n \"GMT_IS_GRID\",\n \"GMT_IS_PALETTE\",\n \"GMT_IS_MATRIX\",\n \"GMT_IS_VECTOR\",\n]\n\nVIAS = [\"GMT_VIA_MATRIX\", \"GMT_VIA_VECTOR\"]\n\nGEOMETRIES = [\n \"GMT_IS_NONE\",\n \"GMT_IS_POINT\",\n \"GMT_IS_LINE\",\n \"GMT_IS_POLYGON\",\n \"GMT_IS_PLP\",\n \"GMT_IS_SURFACE\",\n]\n\nMODES = [\"GMT_CONTAINER_ONLY\", \"GMT_OUTPUT\"]\n\nREGISTRATIONS = [\"GMT_GRID_PIXEL_REG\", \"GMT_GRID_NODE_REG\"]\n\nDTYPES = {\n \"float64\": \"GMT_DOUBLE\",\n \"float32\": \"GMT_FLOAT\",\n \"int64\": \"GMT_LONG\",\n \"int32\": \"GMT_INT\",\n \"uint64\": \"GMT_ULONG\",\n \"uint32\": \"GMT_UINT\",\n}\n\n\nclass Session:\n \"\"\"\n A GMT API session where most operations involving the C API happen.\n\n Works as a context manager (for use in a ``with`` block) to create a GMT C API\n session and destroy it in the end to clean up memory.\n\n Functions of the shared library are exposed as methods of this class. Most methods\n MUST be used with an open session (inside a ``with`` block). If creating GMT data\n structures to communicate data, put that code inside the same ``with`` block as the\n API calls that will use the data.\n\n By default, will let :mod:`ctypes` try to find the GMT shared library (``libgmt``).\n If the environment variable ``GMT_LIBRARY_PATH`` is set, will look for the shared\n library in the directory specified by it.\n\n A ``GMTVersionError`` exception will be raised if the GMT shared library reports a\n version < 6.0.0.\n\n The ``session_pointer`` attribute holds a ctypes pointer to the currently open\n session.\n\n Raises\n ------\n GMTCLibNotFoundError\n If there was any problem loading the library (couldn't find it or couldn't\n access the functions).\n GMTCLibNoSessionError\n If you try to call a method outside of a 'with' block.\n GMTVersionError\n If the minimum required version of GMT is not found.\n\n Examples\n --------\n\n >>> from pygmt.datasets import load_earth_relief\n >>> from pygmt.helpers import GMTTempFile\n >>> grid = load_earth_relief()\n >>> type(grid)\n <class 'xarray.core.dataarray.DataArray'>\n >>> # Create a session and destroy it automatically when exiting the \"with\" block.\n >>> with Session() as ses:\n ... # Create a virtual file and link to the memory block of the grid.\n ... with ses.virtualfile_from_grid(grid) as fin:\n ... # Create a temp file to use as output.\n ... with GMTTempFile() as fout:\n ... # Call the grdinfo module with the virtual file as input and the.\n ... # temp file as output.\n ... ses.call_module(\"grdinfo\", \"{} -C ->{}\".format(fin, fout.name))\n ... # Read the contents of the temp file before it's deleted.\n ... print(fout.read().strip())\n -180 180 -90 90 -8596 5559 1 1 361 181\n \"\"\"\n\n # The minimum version of GMT required\n required_version = \"6.0.0\"\n\n @property\n def session_pointer(self):\n \"\"\"\n The :class:`ctypes.c_void_p` pointer to the current open GMT session.\n\n Raises\n ------\n GMTCLibNoSessionError\n If trying to access without a currently open GMT session (i.e.,\n outside of the context manager).\n\n \"\"\"\n if not hasattr(self, \"_session_pointer\") or self._session_pointer is None:\n raise GMTCLibNoSessionError(\"No currently open GMT API session.\")\n return self._session_pointer\n\n @session_pointer.setter\n def session_pointer(self, session):\n \"\"\"\n Set the session void pointer.\n \"\"\"\n self._session_pointer = session\n\n @property\n def info(self):\n \"Dictionary with the GMT version and default paths and parameters.\"\n if not hasattr(self, \"_info\"):\n self._info = {\n \"version\": self.get_default(\"API_VERSION\"),\n \"padding\": self.get_default(\"API_PAD\"),\n \"binary dir\": self.get_default(\"API_BINDIR\"),\n \"share dir\": self.get_default(\"API_SHAREDIR\"),\n # This segfaults for some reason\n # 'data dir': self.get_default(\"API_DATADIR\"),\n \"plugin dir\": self.get_default(\"API_PLUGINDIR\"),\n \"library path\": self.get_default(\"API_LIBRARY\"),\n \"cores\": self.get_default(\"API_CORES\"),\n # API_IMAGE_LAYOUT not defined if GMT is not compiled with GDAL\n # \"image layout\": self.get_default(\"API_IMAGE_LAYOUT\"),\n \"grid layout\": self.get_default(\"API_GRID_LAYOUT\"),\n }\n return self._info\n\n def __enter__(self):\n \"\"\"\n Create a GMT API session and check the libgmt version.\n\n Calls :meth:`~gmt.clib.Session.create`.\n\n Raises\n ------\n GMTVersionError\n If the version reported by libgmt is less than ``Session.required_version``.\n Will destroy the session before raising the exception.\n\n \"\"\"\n self.create(\"pygmt-session\")\n # Need to store the version info because 'get_default' won't work after the\n # session is destroyed.\n version = self.info[\"version\"]\n if Version(version) < Version(self.required_version):\n self.destroy()\n raise GMTVersionError(\n \"Using an incompatible GMT version {}. Must be equal or newer than {}.\".format(\n version, self.required_version\n )\n )\n return self\n\n def __exit__(self, exc_type, exc_value, traceback):\n \"\"\"\n Destroy the currently open GMT API session.\n\n Calls :meth:`~gmt.clib.Session.destroy`.\n \"\"\"\n self.destroy()\n\n def __getitem__(self, name):\n \"\"\"\n Get the value of a GMT constant (C enum) from gmt_resources.h\n\n Used to set configuration values for other API calls. Wraps ``GMT_Get_Enum``.\n\n Parameters\n ----------\n name : str\n The name of the constant (e.g., ``\"GMT_SESSION_EXTERNAL\"``)\n\n Returns\n -------\n constant : int\n Integer value of the constant. Do not rely on this value because it might\n change.\n\n Raises\n ------\n GMTCLibError\n If the constant doesn't exist.\n\n \"\"\"\n c_get_enum = self.get_libgmt_func(\n \"GMT_Get_Enum\", argtypes=[ctp.c_void_p, ctp.c_char_p], restype=ctp.c_int\n )\n\n # The C lib introduced the void API pointer to GMT_Get_Enum so that it's\n # consistent with other functions. It doesn't use the pointer so we can pass in\n # None (NULL pointer). We can't give it the actual pointer because we need to\n # call GMT_Get_Enum when creating a new API session pointer (chicken-and-egg\n # type of thing).\n session = None\n\n value = c_get_enum(session, name.encode())\n\n if value is None or value == -99999:\n raise GMTCLibError(\"Constant '{}' doesn't exits in libgmt.\".format(name))\n\n return value\n\n def get_libgmt_func(self, name, argtypes=None, restype=None):\n \"\"\"\n Get a ctypes function from the libgmt shared library.\n\n Assigns the argument and return type conversions for the function.\n\n Use this method to access a C function from libgmt.\n\n Parameters\n ----------\n name : str\n The name of the GMT API function.\n argtypes : list\n List of ctypes types used to convert the Python input arguments for\n the API function.\n restype : ctypes type\n The ctypes type used to convert the input returned by the function\n into a Python type.\n\n Returns\n -------\n function\n The GMT API function.\n\n Examples\n --------\n\n >>> from ctypes import c_void_p, c_int\n >>> with Session() as lib:\n ... func = lib.get_libgmt_func('GMT_Destroy_Session',\n ... argtypes=[c_void_p], restype=c_int)\n >>> type(func)\n <class 'ctypes.CDLL.__init__.<locals>._FuncPtr'>\n\n \"\"\"\n if not hasattr(self, \"_libgmt\"):\n self._libgmt = load_libgmt()\n function = getattr(self._libgmt, name)\n if argtypes is not None:\n function.argtypes = argtypes\n if restype is not None:\n function.restype = restype\n return function\n\n def create(self, name):\n \"\"\"\n Create a new GMT C API session.\n\n This is required before most other methods of :class:`pygmt.clib.Session` can be\n called.\n\n .. warning::\n\n Usage of :class:`~gmt.clib.Session` as a context manager in a ``with`` block\n is preferred over calling :meth:`~gmt.clib.Session.create` and\n :meth:`~gmt.clib.Session.destroy` manually.\n\n Calls ``GMT_Create_Session`` and generates a new ``GMTAPI_CTRL`` struct, which\n is a :class:`ctypes.c_void_p` pointer. Sets the ``session_pointer`` attribute to\n this pointer.\n\n Remember to terminate the current session using :meth:`pygmt.clib.Session.destroy`\n before creating a new one.\n\n Parameters\n ----------\n name : str\n A name for this session. Doesn't really affect the outcome.\n\n \"\"\"\n try:\n # Won't raise an exception if there is a currently open session\n self.session_pointer # pylint: disable=pointless-statement\n # In this case, fail to create a new session until the old one is destroyed\n raise GMTCLibError(\n \"Failed to create a GMT API session: There is a currently open session.\"\n \" Must destroy it fist.\"\n )\n # If the exception is raised, this means that there is no open session and we're\n # free to create a new one.\n except GMTCLibNoSessionError:\n pass\n\n c_create_session = self.get_libgmt_func(\n \"GMT_Create_Session\",\n argtypes=[ctp.c_char_p, ctp.c_uint, ctp.c_uint, ctp.c_void_p],\n restype=ctp.c_void_p,\n )\n\n # Capture the output printed by GMT into this list. Will use it later to\n # generate error messages for the exceptions raised by API calls.\n self._error_log = []\n\n @ctp.CFUNCTYPE(ctp.c_int, ctp.c_void_p, ctp.c_char_p)\n def print_func(file_pointer, message): # pylint: disable=unused-argument\n \"\"\"\n Callback function that the GMT C API will use to print log and error\n messages. We'll capture the messages and print them to stderr so that they\n will show up on the Jupyter notebook.\n \"\"\"\n message = message.decode().strip()\n self._error_log.append(message)\n # flush to make sure the messages are printed even if we have a crash.\n print(message, file=sys.stderr, flush=True)\n return 0\n\n # Need to store a copy of the function because ctypes doesn't and it will be\n # garbage collected otherwise\n self._print_callback = print_func\n\n padding = self[\"GMT_PAD_DEFAULT\"]\n session_type = self[\"GMT_SESSION_EXTERNAL\"]\n\n session = c_create_session(name.encode(), padding, session_type, print_func)\n\n if session is None:\n raise GMTCLibError(\n \"Failed to create a GMT API session:\\n{}\".format(self._error_message)\n )\n\n self.session_pointer = session\n\n @property\n def _error_message(self):\n \"\"\"\n A string with all error messages emitted by the C API.\n\n Only includes messages with the string ``\"[ERROR]\"`` in them.\n \"\"\"\n msg = \"\"\n if hasattr(self, \"_error_log\"):\n msg = \"\\n\".join(line for line in self._error_log if \"[ERROR]\" in line)\n return msg\n\n def destroy(self):\n \"\"\"\n Destroy the currently open GMT API session.\n\n .. warning::\n\n Usage of :class:`~gmt.clib.Session` as a context manager in a ``with`` block\n is preferred over calling :meth:`~gmt.clib.Session.create` and\n :meth:`~gmt.clib.Session.destroy` manually.\n\n Calls ``GMT_Destroy_Session`` to terminate and free the memory of a registered\n ``GMTAPI_CTRL`` session (the pointer for this struct is stored in the\n ``session_pointer`` attribute).\n\n Always use this method after you are done using a C API session. The session\n needs to be destroyed before creating a new one. Otherwise, some of the\n configuration files might be left behind and can influence subsequent API calls.\n\n Sets the ``session_pointer`` attribute to ``None``.\n \"\"\"\n c_destroy_session = self.get_libgmt_func(\n \"GMT_Destroy_Session\", argtypes=[ctp.c_void_p], restype=ctp.c_int\n )\n\n status = c_destroy_session(self.session_pointer)\n if status:\n raise GMTCLibError(\n \"Failed to destroy GMT API session:\\n{}\".format(self._error_message)\n )\n\n self.session_pointer = None\n\n def get_default(self, name):\n \"\"\"\n Get the value of a GMT default parameter (library version, paths, etc).\n\n Possible default parameter names include:\n\n * ``\"API_VERSION\"``: The GMT version\n * ``\"API_PAD\"``: The grid padding setting\n * ``\"API_BINDIR\"``: The binary file directory\n * ``\"API_SHAREDIR\"``: The share directory\n * ``\"API_DATADIR\"``: The data directory\n * ``\"API_PLUGINDIR\"``: The plugin directory\n * ``\"API_LIBRARY\"``: The core library path\n * ``\"API_CORES\"``: The number of cores\n * ``\"API_IMAGE_LAYOUT\"``: The image/band layout\n * ``\"API_GRID_LAYOUT\"``: The grid layout\n\n Parameters\n ----------\n name : str\n The name of the default parameter (e.g., ``\"API_VERSION\"``)\n\n Returns\n -------\n value : str\n The default value for the parameter.\n\n Raises\n ------\n GMTCLibError\n If the parameter doesn't exist.\n\n \"\"\"\n c_get_default = self.get_libgmt_func(\n \"GMT_Get_Default\",\n argtypes=[ctp.c_void_p, ctp.c_char_p, ctp.c_char_p],\n restype=ctp.c_int,\n )\n\n # Make a string buffer to get a return value\n value = ctp.create_string_buffer(10000)\n\n status = c_get_default(self.session_pointer, name.encode(), value)\n\n if status != 0:\n raise GMTCLibError(\n \"Error getting default value for '{}' (error code {}).\".format(\n name, status\n )\n )\n\n return value.value.decode()\n\n def call_module(self, module, args):\n \"\"\"\n Call a GMT module with the given arguments.\n\n Makes a call to ``GMT_Call_Module`` from the C API using mode\n ``GMT_MODULE_CMD`` (arguments passed as a single string).\n\n Most interactions with the C API are done through this function.\n\n Parameters\n ----------\n module : str\n Module name (``'coast'``, ``'basemap'``, etc).\n args : str\n String with the command line arguments that will be passed to the\n module (for example, ``'-R0/5/0/10 -JM'``).\n\n Raises\n ------\n GMTCLibError\n If the returned status code of the function is non-zero.\n\n \"\"\"\n c_call_module = self.get_libgmt_func(\n \"GMT_Call_Module\",\n argtypes=[ctp.c_void_p, ctp.c_char_p, ctp.c_int, ctp.c_void_p],\n restype=ctp.c_int,\n )\n\n mode = self[\"GMT_MODULE_CMD\"]\n status = c_call_module(\n self.session_pointer, module.encode(), mode, args.encode()\n )\n if status != 0:\n raise GMTCLibError(\n \"Module '{}' failed with status code {}:\\n{}\".format(\n module, status, self._error_message\n )\n )\n\n def create_data(self, family, geometry, mode, **kwargs):\n \"\"\"\n Create an empty GMT data container.\n\n Parameters\n ----------\n family : str\n A valid GMT data family name (e.g., ``'GMT_IS_DATASET'``). See the\n ``data_families`` attribute for valid names.\n geometry : str\n A valid GMT data geometry name (e.g., ``'GMT_IS_POINT'``). See the\n ``data_geometries`` attribute for valid names.\n mode : str\n A valid GMT data mode (e.g., ``'GMT_OUTPUT'``). See the\n ``data_modes`` attribute for valid names.\n dim : list of 4 integers\n The dimensions of the dataset. See the documentation for the GMT C\n API function ``GMT_Create_Data`` (``src/gmt_api.c``) for the full\n range of options regarding 'dim'. If ``None``, will pass in the\n NULL pointer.\n ranges : list of 4 floats\n The dataset extent. Also a bit of a complicated argument. See the C\n function documentation. It's called ``range`` in the C function but\n it would conflict with the Python built-in ``range`` function.\n inc : list of 2 floats\n The increments between points of the dataset. See the C function\n documentation.\n registration : int\n The node registration (what the coordinates mean). Can be\n ``'GMT_GRID_PIXEL_REG'`` or ``'GMT_GRID_NODE_REG'``. Defaults to\n ``'GMT_GRID_NODE_REG'``.\n pad : int\n The grid padding. Defaults to ``GMT_PAD_DEFAULT``.\n\n Returns\n -------\n data_ptr : int\n A ctypes pointer (an integer) to the allocated ``GMT_Dataset``\n object.\n\n \"\"\"\n c_create_data = self.get_libgmt_func(\n \"GMT_Create_Data\",\n argtypes=[\n ctp.c_void_p, # API\n ctp.c_uint, # family\n ctp.c_uint, # geometry\n ctp.c_uint, # mode\n ctp.POINTER(ctp.c_uint64), # dim\n ctp.POINTER(ctp.c_double), # range\n ctp.POINTER(ctp.c_double), # inc\n ctp.c_uint, # registration\n ctp.c_int, # pad\n ctp.c_void_p,\n ], # data\n restype=ctp.c_void_p,\n )\n\n family_int = self._parse_constant(family, valid=FAMILIES, valid_modifiers=VIAS)\n mode_int = self._parse_constant(\n mode, valid=MODES, valid_modifiers=[\"GMT_GRID_IS_GEO\"]\n )\n geometry_int = self._parse_constant(geometry, valid=GEOMETRIES)\n registration_int = self._parse_constant(\n kwargs.get(\"registration\", \"GMT_GRID_NODE_REG\"), valid=REGISTRATIONS\n )\n\n # Convert dim, ranges, and inc to ctypes arrays if given (will be None\n # if not given to represent NULL pointers)\n dim = kwargs_to_ctypes_array(\"dim\", kwargs, ctp.c_uint64 * 4)\n ranges = kwargs_to_ctypes_array(\"ranges\", kwargs, ctp.c_double * 4)\n inc = kwargs_to_ctypes_array(\"inc\", kwargs, ctp.c_double * 2)\n\n # Use a NULL pointer (None) for existing data to indicate that the\n # container should be created empty. Fill it in later using put_vector\n # and put_matrix.\n data_ptr = c_create_data(\n self.session_pointer,\n family_int,\n geometry_int,\n mode_int,\n dim,\n ranges,\n inc,\n registration_int,\n self._parse_pad(family, kwargs),\n None,\n )\n\n if data_ptr is None:\n raise GMTCLibError(\"Failed to create an empty GMT data pointer.\")\n\n return data_ptr\n\n def _parse_pad(self, family, kwargs):\n \"\"\"\n Parse and return an appropriate value for pad if none is given.\n\n Pad is a bit tricky because, for matrix types, pad control the matrix\n ordering (row or column major). Using the default pad will set it to\n column major and mess things up with the numpy arrays.\n \"\"\"\n pad = kwargs.get(\"pad\", None)\n if pad is None:\n if \"MATRIX\" in family:\n pad = 0\n else:\n pad = self[\"GMT_PAD_DEFAULT\"]\n return pad\n\n def _parse_constant(self, constant, valid, valid_modifiers=None):\n \"\"\"\n Parse a constant, convert it to an int, and validate it.\n\n The GMT C API takes certain defined constants, like ``'GMT_IS_GRID'``,\n that need to be validated and converted to integer values using\n :meth:`pygmt.clib.Session.__getitem__`.\n\n The constants can also take a modifier by appending another constant\n name, e.g. ``'GMT_IS_GRID|GMT_VIA_MATRIX'``. The two parts must be\n converted separately and their values are added.\n\n If valid modifiers are not given, then will assume that modifiers are\n not allowed. In this case, will raise a\n :class:`~gmt.exceptions.GMTInvalidInput` exception if given a modifier.\n\n Parameters\n ----------\n constant : str\n The name of a valid GMT API constant, with an optional modifier.\n valid : list of str\n A list of valid values for the constant. Will raise a\n :class:`~gmt.exceptions.GMTInvalidInput` exception if the given\n value is not on the list.\n \"\"\"\n parts = constant.split(\"|\")\n name = parts[0]\n nmodifiers = len(parts) - 1\n if nmodifiers > 1:\n raise GMTInvalidInput(\n \"Only one modifier is allowed in constants, {} given: '{}'\".format(\n nmodifiers, constant\n )\n )\n if nmodifiers > 0 and valid_modifiers is None:\n raise GMTInvalidInput(\n \"Constant modifiers not allowed since valid values were not \"\n + \"given: '{}'\".format(constant)\n )\n if name not in valid:\n raise GMTInvalidInput(\n \"Invalid constant argument '{}'. Must be one of {}.\".format(\n name, str(valid)\n )\n )\n if (\n nmodifiers > 0\n and valid_modifiers is not None\n and parts[1] not in valid_modifiers\n ):\n raise GMTInvalidInput(\n \"Invalid constant modifier '{}'. Must be one of {}.\".format(\n parts[1], str(valid_modifiers)\n )\n )\n integer_value = sum(self[part] for part in parts)\n return integer_value\n\n def _check_dtype_and_dim(self, array, ndim):\n \"\"\"\n Check that a numpy array has the given dimensions and is a valid data\n type.\n\n Parameters\n ----------\n array : numpy array\n The array to be tested.\n ndim : int\n The desired dimension of the array.\n\n Returns\n -------\n gmt_type : int\n The GMT constant value representing this data type.\n\n Raises\n ------\n GMTCLibError\n If the array has the wrong dimensions or is an unsupported data\n type.\n\n Examples\n --------\n\n >>> import numpy as np\n >>> data = np.array([1, 2, 3], dtype='float64')\n >>> with Session() as ses:\n ... gmttype = ses._check_dtype_and_dim(data, ndim=1)\n ... gmttype == ses[\"GMT_DOUBLE\"]\n True\n >>> data = np.ones((5, 2), dtype='float32')\n >>> with Session() as ses:\n ... gmttype = ses._check_dtype_and_dim(data, ndim=2)\n ... gmttype == ses['GMT_FLOAT']\n True\n\n \"\"\"\n if array.dtype.name not in DTYPES:\n raise GMTInvalidInput(\n \"Unsupported numpy data type '{}'.\".format(array.dtype.name)\n )\n if array.ndim != ndim:\n raise GMTInvalidInput(\n \"Expected a numpy 1d array, got {}d.\".format(array.ndim)\n )\n return self[DTYPES[array.dtype.name]]\n\n def put_vector(self, dataset, column, vector):\n \"\"\"\n Attach a numpy 1D array as a column on a GMT dataset.\n\n Use this functions to attach numpy array data to a GMT dataset and pass\n it to GMT modules. Wraps ``GMT_Put_Vector``.\n\n The dataset must be created by :meth:`~gmt.clib.Session.create_data`\n first. Use ``family='GMT_IS_DATASET|GMT_VIA_VECTOR'``.\n\n Not at all numpy dtypes are supported, only: float64, float32, int64,\n int32, uint64, and uint32.\n\n .. warning::\n The numpy array must be C contiguous in memory. If it comes from a\n column slice of a 2d array, for example, you will have to make a\n copy. Use :func:`numpy.ascontiguousarray` to make sure your vector\n is contiguous (it won't copy if it already is).\n\n Parameters\n ----------\n dataset : :class:`ctypes.c_void_p`\n The ctypes void pointer to a ``GMT_Dataset``. Create it with\n :meth:`~gmt.clib.Session.create_data`.\n column : int\n The column number of this vector in the dataset (starting from 0).\n vector : numpy 1d-array\n The array that will be attached to the dataset. Must be a 1d C\n contiguous array.\n\n Raises\n ------\n GMTCLibError\n If given invalid input or ``GMT_Put_Vector`` exits with status !=\n 0.\n\n \"\"\"\n c_put_vector = self.get_libgmt_func(\n \"GMT_Put_Vector\",\n argtypes=[ctp.c_void_p, ctp.c_void_p, ctp.c_uint, ctp.c_uint, ctp.c_void_p],\n restype=ctp.c_int,\n )\n\n gmt_type = self._check_dtype_and_dim(vector, ndim=1)\n vector_pointer = vector.ctypes.data_as(ctp.c_void_p)\n status = c_put_vector(\n self.session_pointer, dataset, column, gmt_type, vector_pointer\n )\n if status != 0:\n raise GMTCLibError(\n \" \".join(\n [\n \"Failed to put vector of type {}\".format(vector.dtype),\n \"in column {} of dataset.\".format(column),\n ]\n )\n )\n\n def put_matrix(self, dataset, matrix, pad=0):\n \"\"\"\n Attach a numpy 2D array to a GMT dataset.\n\n Use this functions to attach numpy array data to a GMT dataset and pass\n it to GMT modules. Wraps ``GMT_Put_Matrix``.\n\n The dataset must be created by :meth:`~gmt.clib.Session.create_data`\n first. Use ``|GMT_VIA_MATRIX'`` in the family.\n\n Not at all numpy dtypes are supported, only: float64, float32, int64,\n int32, uint64, and uint32.\n\n .. warning::\n The numpy array must be C contiguous in memory. Use\n :func:`numpy.ascontiguousarray` to make sure your vector is\n contiguous (it won't copy if it already is).\n\n Parameters\n ----------\n dataset : :class:`ctypes.c_void_p`\n The ctypes void pointer to a ``GMT_Dataset``. Create it with\n :meth:`~gmt.clib.Session.create_data`.\n matrix : numpy 2d-array\n The array that will be attached to the dataset. Must be a 2d C\n contiguous array.\n pad : int\n The amount of padding that should be added to the matrix. Use when\n creating grids for modules that require padding.\n\n Raises\n ------\n GMTCLibError\n If given invalid input or ``GMT_Put_Matrix`` exits with status !=\n 0.\n\n \"\"\"\n c_put_matrix = self.get_libgmt_func(\n \"GMT_Put_Matrix\",\n argtypes=[ctp.c_void_p, ctp.c_void_p, ctp.c_uint, ctp.c_int, ctp.c_void_p],\n restype=ctp.c_int,\n )\n\n gmt_type = self._check_dtype_and_dim(matrix, ndim=2)\n matrix_pointer = matrix.ctypes.data_as(ctp.c_void_p)\n status = c_put_matrix(\n self.session_pointer, dataset, gmt_type, pad, matrix_pointer\n )\n if status != 0:\n raise GMTCLibError(\"Failed to put matrix of type {}.\".format(matrix.dtype))\n\n def write_data(self, family, geometry, mode, wesn, output, data):\n \"\"\"\n Write a GMT data container to a file.\n\n The data container should be created by\n :meth:`~gmt.clib.Session.create_data`.\n\n Wraps ``GMT_Write_Data`` but only allows writing to a file. So the\n ``method`` argument is omitted.\n\n Parameters\n ----------\n family : str\n A valid GMT data family name (e.g., ``'GMT_IS_DATASET'``). See the\n ``data_families`` attribute for valid names. Don't use the\n ``GMT_VIA_VECTOR`` or ``GMT_VIA_MATRIX`` constructs for this. Use\n ``GMT_IS_VECTOR`` and ``GMT_IS_MATRIX`` instead.\n geometry : str\n A valid GMT data geometry name (e.g., ``'GMT_IS_POINT'``). See the\n ``data_geometries`` attribute for valid names.\n mode : str\n How the data is to be written to the file. This option varies\n depending on the given family. See the GMT API documentation for\n details.\n wesn : list or numpy array\n [xmin, xmax, ymin, ymax, zmin, zmax] of the data. Must have 6\n elements.\n output : str\n The output file name.\n data : :class:`ctypes.c_void_p`\n Pointer to the data container created by\n :meth:`~gmt.clib.Session.create_data`.\n\n Raises\n ------\n GMTCLibError\n For invalid input arguments or if the GMT API functions returns a\n non-zero status code.\n\n \"\"\"\n c_write_data = self.get_libgmt_func(\n \"GMT_Write_Data\",\n argtypes=[\n ctp.c_void_p,\n ctp.c_uint,\n ctp.c_uint,\n ctp.c_uint,\n ctp.c_uint,\n ctp.POINTER(ctp.c_double),\n ctp.c_char_p,\n ctp.c_void_p,\n ],\n restype=ctp.c_int,\n )\n\n family_int = self._parse_constant(family, valid=FAMILIES, valid_modifiers=VIAS)\n geometry_int = self._parse_constant(geometry, valid=GEOMETRIES)\n status = c_write_data(\n self.session_pointer,\n family_int,\n self[\"GMT_IS_FILE\"],\n geometry_int,\n self[mode],\n (ctp.c_double * 6)(*wesn),\n output.encode(),\n data,\n )\n if status != 0:\n raise GMTCLibError(\"Failed to write dataset to '{}'\".format(output))\n\n @contextmanager\n def open_virtual_file(self, family, geometry, direction, data):\n \"\"\"\n Open a GMT Virtual File to pass data to and from a module.\n\n GMT uses a virtual file scheme to pass in data to API modules. Use it\n to pass in your GMT data structure (created using\n :meth:`~gmt.clib.Session.create_data`) to a module that expects an input\n or output file.\n\n Use in a ``with`` block. Will automatically close the virtual file when\n leaving the ``with`` block. Because of this, no wrapper for\n ``GMT_Close_VirtualFile`` is provided.\n\n Parameters\n ----------\n family : str\n A valid GMT data family name (e.g., ``'GMT_IS_DATASET'``). Should\n be the same as the one you used to create your data structure.\n geometry : str\n A valid GMT data geometry name (e.g., ``'GMT_IS_POINT'``). Should\n be the same as the one you used to create your data structure.\n direction : str\n Either ``'GMT_IN'`` or ``'GMT_OUT'`` to indicate if passing data to\n GMT or getting it out of GMT, respectively.\n data : int\n The ctypes void pointer to your GMT data structure.\n\n Yields\n ------\n vfname : str\n The name of the virtual file that you can pass to a GMT module.\n\n Examples\n --------\n\n >>> from pygmt.helpers import GMTTempFile\n >>> import os\n >>> import numpy as np\n >>> x = np.array([0, 1, 2, 3, 4])\n >>> y = np.array([5, 6, 7, 8, 9])\n >>> with Session() as lib:\n ... family = 'GMT_IS_DATASET|GMT_VIA_VECTOR'\n ... geometry = 'GMT_IS_POINT'\n ... dataset = lib.create_data(\n ... family=family,\n ... geometry=geometry,\n ... mode='GMT_CONTAINER_ONLY',\n ... dim=[2, 5, 1, 0], # columns, lines, segments, type\n ... )\n ... lib.put_vector(dataset, column=0, vector=x)\n ... lib.put_vector(dataset, column=1, vector=y)\n ... # Add the dataset to a virtual file\n ... vfargs = (family, geometry, 'GMT_IN', dataset)\n ... with lib.open_virtual_file(*vfargs) as vfile:\n ... # Send the output to a temp file so that we can read it\n ... with GMTTempFile() as ofile:\n ... args = '{} ->{}'.format(vfile, ofile.name)\n ... lib.call_module('info', args)\n ... print(ofile.read().strip())\n <vector memory>: N = 5 <0/4> <5/9>\n\n \"\"\"\n c_open_virtualfile = self.get_libgmt_func(\n \"GMT_Open_VirtualFile\",\n argtypes=[\n ctp.c_void_p,\n ctp.c_uint,\n ctp.c_uint,\n ctp.c_uint,\n ctp.c_void_p,\n ctp.c_char_p,\n ],\n restype=ctp.c_int,\n )\n\n c_close_virtualfile = self.get_libgmt_func(\n \"GMT_Close_VirtualFile\",\n argtypes=[ctp.c_void_p, ctp.c_char_p],\n restype=ctp.c_int,\n )\n\n family_int = self._parse_constant(family, valid=FAMILIES, valid_modifiers=VIAS)\n geometry_int = self._parse_constant(geometry, valid=GEOMETRIES)\n direction_int = self._parse_constant(\n direction,\n valid=[\"GMT_IN\", \"GMT_OUT\"],\n valid_modifiers=[\"GMT_IS_REFERENCE\", \"GMT_IS_DUPLICATE\"],\n )\n\n buff = ctp.create_string_buffer(self[\"GMT_STR16\"])\n\n status = c_open_virtualfile(\n self.session_pointer, family_int, geometry_int, direction_int, data, buff\n )\n\n if status != 0:\n raise GMTCLibError(\"Failed to create a virtual file.\")\n\n vfname = buff.value.decode()\n\n try:\n yield vfname\n finally:\n status = c_close_virtualfile(self.session_pointer, vfname.encode())\n if status != 0:\n raise GMTCLibError(\"Failed to close virtual file '{}'.\".format(vfname))\n\n @contextmanager\n def virtualfile_from_vectors(self, *vectors):\n \"\"\"\n Store 1d arrays as columns of a table inside a virtual file.\n\n Use the virtual file name to pass in the data in your vectors to a GMT module.\n\n Context manager (use in a ``with`` block). Yields the virtual file name that you\n can pass as an argument to a GMT module call. Closes the virtual file upon exit\n of the ``with`` block.\n\n Use this instead of creating the data container and virtual file by hand with\n :meth:`~gmt.clib.Session.create_data`, :meth:`~gmt.clib.Session.put_vector`, and\n :meth:`~gmt.clib.Session.open_virtual_file`.\n\n If the arrays are C contiguous blocks of memory, they will be passed without\n copying to GMT. If they are not (e.g., they are columns of a 2D array), they\n will need to be copied to a contiguous block.\n\n Parameters\n ----------\n vectors : 1d arrays\n The vectors that will be included in the array. All must be of the same\n size.\n\n Yields\n ------\n fname : str\n The name of virtual file. Pass this as a file name argument to a GMT module.\n\n Examples\n --------\n\n >>> from pygmt.helpers import GMTTempFile\n >>> import numpy as np\n >>> import pandas as pd\n >>> x = [1, 2, 3]\n >>> y = np.array([4, 5, 6])\n >>> z = pd.Series([7, 8, 9])\n >>> with Session() as ses:\n ... with ses.virtualfile_from_vectors(x, y, z) as fin:\n ... # Send the output to a file so that we can read it\n ... with GMTTempFile() as fout:\n ... ses.call_module('info', '{} ->{}'.format(fin, fout.name))\n ... print(fout.read().strip())\n <vector memory>: N = 3 <1/3> <4/6> <7/9>\n\n \"\"\"\n # Conversion to a C-contiguous array needs to be done here and not in put_matrix\n # because we need to maintain a reference to the copy while it is being used by\n # the C API. Otherwise, the array would be garbage collected and the memory\n # freed. Creating it in this context manager guarantees that the copy will be\n # around until the virtual file is closed. The conversion is implicit in\n # vectors_to_arrays.\n arrays = vectors_to_arrays(vectors)\n\n columns = len(arrays)\n rows = len(arrays[0])\n if not all(len(i) == rows for i in arrays):\n raise GMTInvalidInput(\"All arrays must have same size.\")\n\n family = \"GMT_IS_DATASET|GMT_VIA_VECTOR\"\n geometry = \"GMT_IS_POINT\"\n\n dataset = self.create_data(\n family, geometry, mode=\"GMT_CONTAINER_ONLY\", dim=[columns, rows, 1, 0]\n )\n\n for col, array in enumerate(arrays):\n self.put_vector(dataset, column=col, vector=array)\n\n with self.open_virtual_file(family, geometry, \"GMT_IN\", dataset) as vfile:\n yield vfile\n\n @contextmanager\n def virtualfile_from_matrix(self, matrix):\n \"\"\"\n Store a 2d array as a table inside a virtual file.\n\n Use the virtual file name to pass in the data in your matrix to a GMT module.\n\n Context manager (use in a ``with`` block). Yields the virtual file name that you\n can pass as an argument to a GMT module call. Closes the virtual file upon exit\n of the ``with`` block.\n\n The virtual file will contain the array as a ``GMT_MATRIX`` pretending to be a\n ``GMT_DATASET``.\n\n **Not meant for creating ``GMT_GRID``**. The grid requires more metadata than\n just the data matrix. Use :meth:`~gmt.clib.Session.virtualfile_from_grid`\n instead.\n\n Use this instead of creating the data container and virtual file by hand with\n :meth:`~gmt.clib.Session.create_data`, :meth:`~gmt.clib.Session.put_matrix`, and\n :meth:`~gmt.clib.Session.open_virtual_file`\n\n The matrix must be C contiguous in memory. If it is not (e.g., it is a slice of\n a larger array), the array will be copied to make sure it is.\n\n Parameters\n ----------\n matrix : 2d array\n The matrix that will be included in the GMT data container.\n\n Yields\n ------\n fname : str\n The name of virtual file. Pass this as a file name argument to a GMT module.\n\n Examples\n --------\n\n >>> from pygmt.helpers import GMTTempFile\n >>> import numpy as np\n >>> data = np.arange(12).reshape((4, 3))\n >>> print(data)\n [[ 0 1 2]\n [ 3 4 5]\n [ 6 7 8]\n [ 9 10 11]]\n >>> with Session() as ses:\n ... with ses.virtualfile_from_matrix(data) as fin:\n ... # Send the output to a file so that we can read it\n ... with GMTTempFile() as fout:\n ... ses.call_module('info', '{} ->{}'.format(fin, fout.name))\n ... print(fout.read().strip())\n <matrix memory>: N = 4 <0/9> <1/10> <2/11>\n\n \"\"\"\n # Conversion to a C-contiguous array needs to be done here and not in put_matrix\n # because we need to maintain a reference to the copy while it is being used by\n # the C API. Otherwise, the array would be garbage collected and the memory\n # freed. Creating it in this context manager guarantees that the copy will be\n # around until the virtual file is closed.\n matrix = as_c_contiguous(matrix)\n rows, columns = matrix.shape\n\n family = \"GMT_IS_DATASET|GMT_VIA_MATRIX\"\n geometry = \"GMT_IS_POINT\"\n\n dataset = self.create_data(\n family, geometry, mode=\"GMT_CONTAINER_ONLY\", dim=[columns, rows, 1, 0]\n )\n\n self.put_matrix(dataset, matrix)\n\n with self.open_virtual_file(family, geometry, \"GMT_IN\", dataset) as vfile:\n yield vfile\n\n @contextmanager\n def virtualfile_from_grid(self, grid):\n \"\"\"\n Store a grid in a virtual file.\n\n Use the virtual file name to pass in the data in your grid to a GMT module.\n Grids must be :class:`xarray.DataArray` instances.\n\n Context manager (use in a ``with`` block). Yields the virtual file name that you\n can pass as an argument to a GMT module call. Closes the virtual file upon exit\n of the ``with`` block.\n\n The virtual file will contain the grid as a ``GMT_MATRIX`` with extra metadata.\n\n Use this instead of creating a data container and virtual file by hand with\n :meth:`~gmt.clib.Session.create_data`, :meth:`~gmt.clib.Session.put_matrix`, and\n :meth:`~gmt.clib.Session.open_virtual_file`\n\n The grid data matrix must be C contiguous in memory. If it is not (e.g., it is a\n slice of a larger array), the array will be copied to make sure it is.\n\n Parameters\n ----------\n grid : :class:`xarray.DataArray`\n The grid that will be included in the virtual file.\n\n Yields\n ------\n fname : str\n The name of virtual file. Pass this as a file name argument to a GMT module.\n\n Examples\n --------\n\n >>> from pygmt.datasets import load_earth_relief\n >>> from pygmt.helpers import GMTTempFile\n >>> data = load_earth_relief(resolution='60m')\n >>> print(data.shape)\n (181, 361)\n >>> print(data.lon.values.min(), data.lon.values.max())\n -180.0 180.0\n >>> print(data.lat.values.min(), data.lat.values.max())\n -90.0 90.0\n >>> print(data.values.min(), data.values.max())\n -8596.0 5559.0\n >>> with Session() as ses:\n ... with ses.virtualfile_from_grid(data) as fin:\n ... # Send the output to a file so that we can read it\n ... with GMTTempFile() as fout:\n ... args = '{} -L0 -Cn ->{}'.format(fin, fout.name)\n ... ses.call_module('grdinfo', args)\n ... print(fout.read().strip())\n -180 180 -90 90 -8596 5559 1 1 361 181\n >>> # The output is: w e s n z0 z1 dx dy n_columns n_rows\n\n \"\"\"\n # Conversion to a C-contiguous array needs to be done here and not in put_matrix\n # because we need to maintain a reference to the copy while it is being used by\n # the C API. Otherwise, the array would be garbage collected and the memory\n # freed. Creating it in this context manager guarantees that the copy will be\n # around until the virtual file is closed. The conversion is implicit in\n # dataarray_to_matrix.\n matrix, region, inc = dataarray_to_matrix(grid)\n family = \"GMT_IS_GRID|GMT_VIA_MATRIX\"\n geometry = \"GMT_IS_SURFACE\"\n gmt_grid = self.create_data(\n family, geometry, mode=\"GMT_CONTAINER_ONLY\", ranges=region, inc=inc\n )\n self.put_matrix(gmt_grid, matrix)\n args = (family, geometry, \"GMT_IN|GMT_IS_REFERENCE\", gmt_grid)\n with self.open_virtual_file(*args) as vfile:\n yield vfile\n\n def extract_region(self):\n \"\"\"\n Extract the WESN bounding box of the currently active figure.\n\n Retrieves the information from the PostScript file, so it works for\n country codes as well.\n\n Returns\n -------\n * wesn : 1d array\n A 1D numpy array with the west, east, south, and north dimensions\n of the current figure.\n\n Examples\n --------\n\n >>> import pygmt\n >>> fig = pygmt.Figure()\n >>> fig.coast(region=[0, 10, -20, -10], projection=\"M6i\", frame=True,\n ... land='black')\n >>> with Session() as lib:\n ... wesn = lib.extract_region()\n >>> print(', '.join(['{:.2f}'.format(x) for x in wesn]))\n 0.00, 10.00, -20.00, -10.00\n\n Using ISO country codes for the regions (for example ``'US.HI'`` for\n Hawaii):\n\n >>> fig = pygmt.Figure()\n >>> fig.coast(region='US.HI', projection=\"M6i\", frame=True,\n ... land='black')\n >>> with Session() as lib:\n ... wesn = lib.extract_region()\n >>> print(', '.join(['{:.2f}'.format(x) for x in wesn]))\n -164.71, -154.81, 18.91, 23.58\n\n The country codes can have an extra argument that rounds the region a\n multiple of the argument (for example, ``'US.HI+r5'`` will round the\n region to multiples of 5):\n\n >>> fig = pygmt.Figure()\n >>> fig.coast(region='US.HI+r5', projection=\"M6i\", frame=True,\n ... land='black')\n >>> with Session() as lib:\n ... wesn = lib.extract_region()\n >>> print(', '.join(['{:.2f}'.format(x) for x in wesn]))\n -165.00, -150.00, 15.00, 25.00\n\n \"\"\"\n c_extract_region = self.get_libgmt_func(\n \"GMT_Extract_Region\",\n argtypes=[ctp.c_void_p, ctp.c_char_p, ctp.POINTER(ctp.c_double)],\n restype=ctp.c_int,\n )\n\n wesn = np.empty(4, dtype=np.float64)\n wesn_pointer = wesn.ctypes.data_as(ctp.POINTER(ctp.c_double))\n # The second argument to GMT_Extract_Region is a file pointer to a\n # PostScript file. It's only valid in classic mode. Use None to get a\n # NULL pointer instead.\n status = c_extract_region(self.session_pointer, None, wesn_pointer)\n if status != 0:\n raise GMTCLibError(\"Failed to extract region from current figure.\")\n return wesn\n"
] |
[
[
"numpy.empty"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
CiaranWelsh/large_study
|
[
"e8266dafec9e17dea7eb8a16a6ee38a6bb84d1ee",
"e8266dafec9e17dea7eb8a16a6ee38a6bb84d1ee"
] |
[
"GSS2375_WB_NewDur_Grant/quick_analysis.py",
"large_study/regression_analysis.py"
] |
[
"import pandas\r\n\r\n\r\nf = r'C:\\Users\\Ciaran\\Documents\\LargeStudy\\GSS2375_WB_NewDur_Grant\\GSS2375_RNA.xlsx'\r\n\r\ndata = pandas.read_excel(f)\r\n\r\nimport matplotlib.pyplot as plt\r\nplt.figure()\r\nplt.hist(data['Yield(ug)'],bins=50)\r\nplt.show()",
"from . import parse\nimport os, glob, pandas, numpy\nfrom scipy.interpolate import interp1d\nimport matplotlib.pyplot as plt\nimport seaborn\nfrom scipy.stats.mstats import pearsonr\n\n\"\"\"\nTime Lag Regression\n======================\n\nI've been switching between Python and R but R is annoying so I've gone back to Python. Initially \nI created a couple of R functions to perform linear regression between a single predictor \nand a response variable (i.e. COL1A1 and CTGF). Each data set was split by treatments and cell\ntypes in order to compare coefficients and pearsons correlation. This has worked fairly well. \n\nI now want to introduce a time lag parameter. The idea is that I want to correlate a predictor\nvariable at some time in the past, specified by dt with the response variable. \n\n\nPlan\n====\n1) isolate the two variables of interest (COL1A1 and CTGF for instance)\nDo control, treated and treated/control. Not baseline. Therefore\nremove baseline and added TGF/Control column\n\n2) Interpolate the data using cubic spline for example. specify resolution.\n3) add time delay to the predictor variable\n4) Calculate pearsons coef. Also build in mutual information options for comparison\n5) Find a way of maximizing pearsons or MI accross range of time delay\n\n\n\"\"\"\n\n\n\n\nclass TimeLagRegression(object):\n def __init__(self, data, x, y, interpolation_kind='cubic', step_size=0.1):\n self.data = data\n self.x = x\n self.y = y\n self.interpolation_kind = interpolation_kind\n self.step_size = step_size\n\n ## remove any anomolies (id'ed with PCA)\n self.data = self.remove_anomolies()\n\n ## isolate the x and y variabels\n self.data = self.get_variables()\n\n ## remove baseline\n self.data = self.drop_baseline()\n\n ## unstack the data so that time is along columns\n self.data = self.unstack_data()\n\n ## calculate TGFb / control and add to dataframe\n self.data = self.calc_fc()\n\n ## interpolate the data\n self.interp_data = self.interpolate()\n\n self.do()\n\n # self.time_delay_data = self.calculate_time_delay_data(1)\n\n def remove_anomolies(self):\n \"\"\"\n remove any anomolies such as repeat 6, TGFb, 8h cell line F\n :return:\n \"\"\"\n return self.data.query(\"cell_line != 'F' or treatment != 'TGFb' or replicate != 6 or time == 8\")\n\n def get_variables(self):\n return self.data.query(\"Assay in ['{}', '{}']\".format(self.x, self.y))\n\n def drop_baseline(self):\n \"\"\"\n baseline has too few time points to interpolate and is\n thereby dropped.\n :return:\n \"\"\"\n return self.data.query('treatment != \"Baseline\"')\n\n def unstack_data(self):\n \"\"\"\n get data so that time is along columns\n :return:\n \"\"\"\n df = self.data.unstack()\n return df['Norm2ref']\n\n def calc_fc(self):\n \"\"\"\n into TGFb, control and TGFb/control\n :return:\n \"\"\"\n data = self.data\n data.index = data.index.swaplevel(0, 1)\n fc = data.loc['TGFb'] / data.loc['Control']\n fc['treatment'] = ['TGFb / Control'] * fc.shape[0]\n fc = fc.reset_index()\n fc = fc.set_index(['treatment', 'cell_line', 'replicate', 'Assay'])\n return pandas.concat([data, fc])\n\n def interpolate1timecourse(self, x, y, **kwargs):\n \"\"\"\n Interpolate a time course using scipy.interpolation.interp1d\n :param data: vector of data points to interpolate\n :param kwargs: passed on to interp1d\n :return:\n \"\"\"\n f = interp1d(x, y, kind=kwargs.get('kind'))\n x_new = numpy.arange(x[0], x[-1], step=0.1)\n y_new = f(x_new)\n return pandas.DataFrame(y_new, index=x_new).transpose()\n\n def interpolate(self):\n df_list = []\n for label, df in self.data.groupby(level=['treatment', 'cell_line', 'replicate', 'Assay']):\n x = list(df.loc[label].index)\n y = df.loc[label].values\n interp = self.interpolate1timecourse(x, y, kind=self.interpolation_kind)\n interp.index = df.index\n interp.columns.name = 'time'\n df_list.append(interp)\n return pandas.concat(df_list)\n\n def plot_interpolated(self):\n \"\"\"\n\n :return:\n \"\"\"\n data = self.interp_data.stack().reset_index()\n print(data.head())\n x = data.query(\"Assay == '{}'\".format(self.x))\n y = data.query(\"Assay == '{}'\".format(self.y))\n print(x.shape)\n print(y.shape)\n for label, df, in data.groupby(by=['treatment', 'cell_line']):\n plt.figure()\n seaborn.tsplot(data=df, time='time', unit='replicate', condition='Assay', value=0)\n plt.title('{}_{}'.format(label[0], label[1]))\n plt.show()\n\n def calculate_time_delay_data(self, dt):\n \"\"\"\n Example Data:\n\n time: 0, 1, 2, 3, 4h\n x: 4, 5, 6, 7, 8\n y: 7, 5, 4, 3, 2\n\n time: 0, 1, 2, 3, 4h\n x: 4, 5, 6, 7, 8\n y: 5, 4, 3, 2\n\n :param dt:\n :return:\n \"\"\"\n x = self.interp_data.query('Assay == \"{}\"'.format(self.x))\n y = self.interp_data.query('Assay == \"{}\"'.format(self.y))\n\n ##introduce the time delay\n new_x = [i + dt for i in list(x.columns)]\n x.columns = new_x\n\n ## find intersection between x.columns and y.columns\n x_intersect_y = list(set(x.columns).intersection(set(y.columns)))\n\n x_delay = x[x_intersect_y]\n y = y[x_intersect_y]\n\n return x, y\n\n\n def do(self):\n \"\"\"\n iterate over cell line and treatment.\n For each:\n calculate pearsons correlation\n :return:\n \"\"\"\n\n x, y = self.calculate_time_delay_data(1)\n # print pearsonr(x, y)\n for label, df in x.groupby(level=['treatment', 'cell_line']):\n print(df)\n\n\n def pearsons_correlation(self, x, y):\n return pearsonr(x, y)\n\n\n\n\n\n\n\n\nif __name__ == '__main__':\n dire = r'/home/b3053674/Documents/LargeStudy/GSS2375_WB_NewDur_Grant'\n dire = r'C:\\Users\\Ciaran\\Documents\\large_study\\GSS2375_WB_NewDur_Grant'\n design_file = os.path.join(dire, 'new_design.csv')\n\n data_file = os.path.join(dire, 'DataFromWaferGen2.csv')\n\n df = pandas.read_csv(data_file, index_col=[0, 1, 2, 3, 4])\n df = pandas.DataFrame(df['Norm2ref'])\n\n TLR = TimeLagRegression(df, 'CTGF', 'COL1A1')\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n"
] |
[
[
"pandas.read_excel",
"matplotlib.pyplot.hist",
"matplotlib.pyplot.show",
"matplotlib.pyplot.figure"
],
[
"pandas.concat",
"pandas.read_csv",
"numpy.arange",
"pandas.DataFrame",
"scipy.stats.mstats.pearsonr",
"matplotlib.pyplot.show",
"matplotlib.pyplot.figure"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [
"0.23",
"0.21",
"2.0",
"1.4",
"0.19",
"1.1",
"1.5",
"1.2",
"0.24",
"0.20",
"1.0",
"0.25",
"1.3"
],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [
"2.0",
"1.4",
"1.1",
"1.5",
"1.2",
"1.3"
],
"scipy": [
"1.7",
"1.0",
"0.10",
"1.2",
"0.14",
"0.19",
"1.5",
"0.12",
"0.17",
"0.13",
"1.6",
"1.4",
"1.9",
"1.3",
"1.10",
"0.15",
"0.18",
"0.16",
"1.8"
],
"tensorflow": []
}
] |
KamilDeja/BinPlay
|
[
"a8626e0bd85ed2f0c064b0c78c95a0bc8c0eb14e"
] |
[
"main.py"
] |
[
"import sys\nimport argparse\nimport copy\nimport random\nimport torch\nimport torch.utils.data as data\nfrom random import shuffle\nfrom collections import OrderedDict\n\nimport continual_benchmark.dataloaders.base\nimport continual_benchmark.agents as agents\nimport continual_benchmark.dataloaders as dataloaders\nfrom continual_benchmark.dataloaders.datasetGen import SplitGen, PermutedGen\n\nfrom vae_experiments import models_definition\nfrom vae_experiments import training_functions\nfrom vae_experiments import vae_utils\n\nfrom visualise import *\n\n\nexp_values = [3, 5, 7, 11, 13, 17, 19, 23, 29, 31, 37, 41, 43, 47, 53, 59, 61, 67, 71, 73, 79, 83, 89, 97, 101]\nparts = len(exp_values)\n\n\ndef run(args):\n if not os.path.exists('outputs'):\n os.mkdir('outputs')\n\n train_dataset, val_dataset = dataloaders.base.__dict__[args.dataset](args.dataroot, args.skip_normalization,\n args.train_aug)\n if args.n_permutation > 0:\n train_dataset_splits, val_dataset_splits, task_output_space = PermutedGen(train_dataset, val_dataset,\n args.n_permutation,\n remap_class=not args.no_class_remap)\n else:\n train_dataset_splits, val_dataset_splits, task_output_space = SplitGen(train_dataset, val_dataset,\n first_split_sz=args.first_split_size,\n other_split_sz=args.other_split_size,\n rand_split=args.rand_split,\n remap_class=not args.no_class_remap)\n\n # Calculate constants\n n_classes = train_dataset.number_classes\n\n labels_tasks = {}\n for task_name, task in train_dataset_splits.items():\n labels_tasks[int(task_name)] = task.dataset.class_list\n\n n_tasks = len(labels_tasks)\n\n n_channels = val_dataset.dataset[0][0].size()[0]\n in_size = val_dataset.dataset[0][0].size()[1]\n\n agent_config = {'lr': args.base_lr,\n 'momentum': args.base_momentum,\n 'nesterov': args.base_nesterov,\n 'weight_decay': args.base_weight_decay,\n 'base_schedule': args.base_schedule,\n 'base_model_type': args.base_model_type,\n 'base_model_name': args.base_model_name,\n 'base_model_weights': args.base_model_weights,\n 'out_dim': {'All': args.base_force_out_dim} if args.base_force_out_dim > 0 else task_output_space,\n 'optimizer': args.base_optimizer,\n 'base_print_freq': args.base_print_freq,\n 'score_generated_images_by_freezed_classifier': args.score_generated_images_by_freezed_classifier,\n 'gpuid': args.gpuid}\n\n agent = agents.default.NormalNN(agent_config, n_channels=n_channels, in_size=in_size, n_classes=n_classes,\n d=args.base_model_d, model_bn=args.base_model_bn,max_pool=args.base_max_pool, n_conv=args.base_n_conv,\n dropout_rate=args.base_dropout_rate)\n\n # Decide split ordering\n task_names = sorted(list(task_output_space.keys()), key=int)\n print('Task order:', task_names)\n if args.rand_split_order:\n shuffle(task_names)\n print('Shuffled task order:', task_names)\n acc_table = OrderedDict()\n test_acc_table = OrderedDict()\n\n # Prepare VAE\n local_vae = models_definition.VAE(latent_size=args.gen_latent_size, d=args.gen_d, p_coding=args.gen_p_coding,\n n_dim_coding=args.gen_n_dim_coding, device=device, n_channels=n_channels,\n in_size=in_size).to(device)\n\n print(local_vae)\n class_table = torch.zeros(n_tasks, n_classes, dtype=torch.long)\n global_classes_list = []\n global_n_codes = []\n\n for task_id in range(len(task_names)):\n print(\"######### Task number {} #########\".format(task_id))\n task_name = task_names[task_id]\n\n # VAE\n print(\"Train local VAE model\")\n n_codes = len(train_dataset_splits[task_name])\n train_dataset_loader = torch.utils.data.DataLoader(dataset=train_dataset_splits[task_name],\n batch_size=args.gen_batch_size, shuffle=True, drop_last=False)\n data_loader_stable = torch.utils.data.DataLoader(dataset=train_dataset_splits[task_name],\n batch_size=args.gen_batch_size, shuffle=False, drop_last=False)\n data_loader_total = torch.utils.data.DataLoader(dataset=train_dataset_splits[task_name],\n batch_size=n_codes, shuffle=False, drop_last=False)\n global_n_codes.append(n_codes)\n start_id = int(np.sum(global_n_codes[:task_id]))\n codes_range = range(start_id, start_id + n_codes)\n codes_rep = torch.Tensor()\n\n for exp_value in exp_values:\n codes = codes_range * np.array(\n exp_value ** np.floor(args.gen_latent_size // parts * np.log(2) / np.log(exp_value)),\n dtype=np.longlong) % 2 ** args.gen_latent_size // parts\n codes = torch.tensor(\n models_definition.unpackbits(np.array(codes, dtype=np.longlong), args.gen_latent_size // parts)).float()\n codes_rep = torch.cat([codes_rep, codes], 1)\n\n if args.gen_load_pretrained_models:\n codes_rep = (codes_rep.repeat([args.gen_batch_size, 1, 1]) * 2 - 1)\n else:\n codes_rep = (codes_rep.repeat([args.gen_batch_size, 1, 1]).to(device) * 2 - 1)\n\n if args.gen_load_pretrained_models:\n local_vae.load_state_dict(torch.load(args.gen_pretrained_models_dir + f'model{task_id}_local_vae'))\n global_classes_list = np.load(args.gen_pretrained_models_dir + f'model{task_id}_classes.npy')\n else:\n dataloader_with_codes = training_functions.train_local_generator(local_vae, train_dataset_loader,\n data_loader_stable,\n data_loader_total,\n global_classes_list, task_id, codes_rep,\n args.gen_batch_size,\n n_epochs_pre=args.gen_ae_pre_epochs,\n n_epochs=args.gen_ae_epochs)\n print(\"Done training local VAE model\")\n del codes_rep\n\n if not task_id:\n # First task, initializing global decoder as local_vae's decoder\n curr_global_decoder = copy.deepcopy(local_vae.decoder)\n else:\n print(\"Train global VAE model\")\n # Retraining global decoder with previous global decoder and local_vae\n if args.gen_load_pretrained_models:\n curr_global_decoder = models_definition.Decoder(local_vae.latent_size, args.gen_d*4,\n p_coding=local_vae.p_coding,\n n_dim_coding=local_vae.n_dim_coding,\n device=local_vae.device,\n n_channels=n_channels, in_size=in_size).to(\n local_vae.device)\n curr_global_decoder.load_state_dict(\n torch.load(args.gen_pretrained_models_dir + f'model{task_id}_curr_decoder'))\n else:\n curr_global_decoder = training_functions.train_global_decoder(curr_global_decoder, local_vae,\n dataloader_with_codes, task_id=task_id,\n codes_rep=None, total_n_codes=n_codes,\n global_n_codes=global_n_codes,\n global_classes_list=global_classes_list,\n d=args.gen_d,\n n_epochs=args.gen_ae_epochs,\n batch_size=args.gen_batch_size,\n n_channels=n_channels, in_size=in_size)\n torch.cuda.empty_cache()\n\n # Plotting results for already learned tasks\n if not args.gen_load_pretrained_models:\n vae_utils.plot_results(args.experiment_name, curr_global_decoder, task_id, n_codes, global_n_codes,\n global_classes_list)\n vae_utils.plot_results(args.experiment_name, local_vae.decoder, task_id, n_codes, global_n_codes,\n global_classes_list, 5, \"_local_vae\")\n torch.save(curr_global_decoder.state_dict(), f\"results/{args.experiment_name}/model{task_id}_curr_decoder\")\n torch.save(local_vae.state_dict(), f\"results/{args.experiment_name}/model{task_id}_local_vae\")\n torch.save(agent.model.state_dict(), f\"results/{args.experiment_name}/model{task_id}_classifier\")\n np.save(f\"results/{args.experiment_name}/model{task_id}_classes\", global_classes_list)\n\n # Classifier\n train_loader = data.DataLoader(train_dataset_splits[task_name],\n batch_size=args.base_batch_size,\n shuffle=True,\n num_workers=args.workers)\n\n val_loader = data.DataLoader(val_dataset_splits[task_name],\n batch_size=args.base_batch_size,\n shuffle=True,\n num_workers=args.workers)\n\n agent.learn_batch(train_loader, val_loader, curr_global_decoder, local_vae, class_table, global_classes_list,\n task_id, n_codes, global_n_codes, args.new_task_data_processing)\n\n # Classifier validation\n acc_table[task_name] = OrderedDict()\n for j in range(task_id + 1):\n agent.active_neurons = torch.zeros((1, 4000))\n val_name = task_names[j]\n print('validation split name:', val_name)\n val_data = val_dataset_splits[val_name] if not args.base_eval_on_train_set else train_dataset_splits[val_name]\n val_loader = data.DataLoader(val_data,\n batch_size=args.base_batch_size,\n shuffle=True,\n num_workers=args.workers)\n acc_table[val_name][task_name] = agent.validation(val_loader)\n\n return acc_table, task_names, test_acc_table\n\n\ndef get_args(argv):\n parser = argparse.ArgumentParser()\n\n # General\n parser.add_argument('--experiment_name', type=str, default='default_run', help='Name of current experiment')\n parser.add_argument('--rpath', type=str, default='results/', help='Directory to save results')\n parser.add_argument('--gpuid', nargs=\"+\", type=int, default=[0],\n help=\"The list of gpuid, ex:--gpuid 3 1. Negative value means cpu-only\")\n parser.add_argument('--repeat', type=int, default=1, help=\"Repeat the experiment N times\")\n parser.add_argument('--seed', type=int, required=False,\n help=\"Random seed. If defined all random operations will be reproducible\")\n\n # Data\n parser.add_argument('--dataroot', type=str, default='data', help=\"The root folder of dataset or downloaded data\")\n parser.add_argument('--dataset', type=str, default='MNIST', help=\"MNIST(default)|FashionMNIST|CIFAR10|CIFAR100\")\n parser.add_argument('--n_permutation', type=int, default=0, help=\"Enable permuted tests when >0\")\n parser.add_argument('--first_split_size', type=int, default=2)\n parser.add_argument('--other_split_size', type=int, default=2)\n parser.add_argument('--rand_split', dest='rand_split', default=False, action='store_true',\n help=\"Randomize the classes in splits\")\n parser.add_argument('--rand_split_order', dest='rand_split_order', default=False, action='store_true',\n help=\"Randomize the order of splits\")\n parser.add_argument('--no_class_remap', dest='no_class_remap', default=False, action='store_true',\n help=\"Avoid the dataset with a subset of classes doing the remapping. Ex: [2,5,6 ...] -> [0,1,2 ...]\")\n parser.add_argument('--skip_normalization', action='store_true', help='Loads dataset without normalization')\n parser.add_argument('--train_aug', dest='train_aug', default=False, action='store_true',\n help=\"Allow data augmentation during training\")\n parser.add_argument('--workers', type=int, default=0, help=\"#Thread for dataloader\")\n\n # Learning options\n parser.add_argument('--new_task_data_processing', type=str,\n choices=['original', 'original_through_vae', 'generated'],\n default='original', help=\"Determines train data for base network.\")\n parser.add_argument('--score_generated_images_by_freezed_classifier', default=True, action='store_true',\n help=\"Score generated images by freezed classifier. If false - generator prompts the labels\")\n\n # Base network - currently classfier\n parser.add_argument('--base_batch_size', type=int, default=100)\n parser.add_argument('--base_model_type', type=str, default='mlp',\n help=\"The type (lenet|resnet|cifar_net) of backbone network\")\n parser.add_argument('--base_model_name', type=str, default='MLP', help=\"The name of actual model for the backbone\")\n parser.add_argument('--base_force_out_dim', type=int, default=2,\n help=\"Set 0 to let the task decide the required output dimension\")\n parser.add_argument('--base_schedule', nargs=\"+\", type=int, default=[2],\n help=\"The list of epoch numbers to reduce learning rate by factor of 0.1. Last number is the end epoch\")\n parser.add_argument('--base_print_freq', type=float, default=100, help=\"Print the log at every x iteration\")\n parser.add_argument('--base_model_weights', type=str, default=None,\n help=\"The path to the file for the model weights (*.pth).\")\n parser.add_argument('--base_eval_on_train_set', dest='base_eval_on_train_set', default=False, action='store_true',\n help=\"Force the evaluation on train set\")\n\n parser.add_argument('--base_model_d', type=int, default=64, help=\"Size of base network\")\n parser.add_argument('--base_model_bn', default=True, help=\"Use batch norm in base network\")\n parser.add_argument('--base_max_pool', default=False, help=\"Use max pooling in base network\")\n parser.add_argument('--base_n_conv', type=int, default=3, help=\"Num of convs in base network\")\n parser.add_argument('--base_dropout_rate', type=float, default=0.4, help=\"Dropout rate in base network\")\n\n parser.add_argument('--base_optimizer', type=str, default='Adam',\n help=\"SGD|Adam|RMSprop|amsgrad|Adadelta|Adagrad|Adamax ...\")\n parser.add_argument('--base_lr', type=float, default=0.01, help=\"Learning rate for base network\")\n parser.add_argument('--base_nesterov', action='store_true', help='Whether to use nesterov momentum in base network')\n parser.add_argument('--base_momentum', type=float, default=0)\n parser.add_argument('--base_weight_decay', type=float, default=0)\n\n # Generative network - currently binary latent autoencoder\n parser.add_argument('--gen_batch_size', type=int, default=50)\n parser.add_argument('--gen_n_dim_coding', type=int, default=10,\n help=\"Number of bits used to code task id in binary autoencoder\")\n parser.add_argument('--gen_p_coding', type=int, default=307,\n help=\"Prime number used to calculated codes in binary autoencoder\")\n parser.add_argument('--gen_latent_size', type=int, default=200, help=\"Latent size in binary autoencoder\")\n parser.add_argument('--gen_d', type=int, default=32, help=\"Size of binary autoencoder\")\n parser.add_argument('--gen_ae_pre_epochs', type=int, default=20,\n help=\"Number of epochs to train autoencoder before freezing the codes\")\n parser.add_argument('--gen_ae_epochs', type=int, default=200, help=\"Number of epochs to train autoencoder\")\n parser.add_argument('--gen_load_pretrained_models', default=False, help=\"Load pretrained generative models\")\n parser.add_argument('--gen_pretrained_models_dir', type=str, default=\"results/pretrained_models\",\n help=\"Directory of pretrained generative models\")\n\n args = parser.parse_args(argv)\n\n return args\n\n\nif __name__ == '__main__':\n args = get_args(sys.argv[1:])\n\n torch.cuda.set_device(0)\n device = torch.device(\"cuda\")\n\n if args.seed:\n print(\"Using manual seed = {}\".format(args.seed))\n\n random.seed(args.seed)\n torch.manual_seed(args.seed)\n torch.cuda.manual_seed(args.seed)\n torch.backends.cudnn.deterministic = True\n torch.backends.cudnn.benchmark = False\n else:\n print(\"WARNING: Not using manual seed - your experiments will not be reproducible\")\n\n acc_val, acc_test = {}, {}\n os.makedirs(f\"{args.rpath}{args.experiment_name}\", exist_ok=True)\n with open(f\"{args.rpath}{args.experiment_name}/args.txt\", \"w\") as text_file:\n text_file.write(str(args))\n for r in range(args.repeat):\n acc_val[r], _, acc_test[r] = run(args)\n np.save(f\"{args.rpath}{args.experiment_name}/acc_val.npy\", acc_val)\n np.save(f\"{args.rpath}{args.experiment_name}/acc_test.npy\", acc_test)\n plot_final_results([args.experiment_name])\n"
] |
[
[
"torch.Tensor",
"torch.cuda.set_device",
"torch.zeros",
"torch.manual_seed",
"torch.cuda.manual_seed",
"torch.cat",
"torch.utils.data.DataLoader",
"torch.cuda.empty_cache",
"torch.load",
"torch.device"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
BruceCherniak/blospray
|
[
"b17cf9d88551b3d8eb3794692d841252a1d55760"
] |
[
"render_ospray/__init__.py"
] |
[
"# ======================================================================== #\n# BLOSPRAY - OSPRay as a Blender render engine #\n# Paul Melis, SURFsara <[email protected]> #\n# Render engine definition #\n# ======================================================================== #\n# Copyright 2018-2019 SURFsara #\n# #\n# Licensed under the Apache License, Version 2.0 (the \"License\"); #\n# you may not use this file except in compliance with the License. #\n# You may obtain a copy of the License at #\n# #\n# http://www.apache.org/licenses/LICENSE-2.0 #\n# #\n# Unless required by applicable law or agreed to in writing, software #\n# distributed under the License is distributed on an \"AS IS\" BASIS, #\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. #\n# See the License for the specific language governing permissions and #\n# limitations under the License. #\n# ======================================================================== #\n\nbl_info = {\n \"name\": \"OSPRay\",\n \"author\": \"Paul Melis\",\n \"version\": (0, 0, 3),\n \"blender\": (2, 80, 0),\n \"location\": \"Render > Engine > OSPRay\",\n \"description\": \"OSPRay integration for blender\",\n \"warning\": \"Alpha/Beta quality\",\n \"category\": \"Render\"}\n \nif \"bpy\" in locals():\n import importlib\n if 'ui' in locals():\n importlib.reload(ui)\n if 'properties' in locals():\n importlib.reload(properties)\n if 'connection' in locals():\n importlib.reload(connection)\n #imp.reload(render)\n #imp.reload(update_files)\n \nimport sys, logging, socket, threading, time, traceback, weakref\nfrom math import tan, atan, degrees\nfrom queue import Queue\nfrom select import select\nfrom struct import unpack\n\nimport bpy, bgl\nimport numpy\n\nfrom .common import send_protobuf, receive_protobuf, OSP_FB_RGBA32F\nfrom .sync import BlenderCamera, sync_view\nfrom .connection import Connection\nfrom .messages_pb2 import (\n ClientMessage,\n RenderResult,\n WorldSettings, CameraSettings, LightSettings, RenderSettings,\n)\n\n# bpy.app.background\n\nHOST = 'localhost'\nPORT = 5909\n\ndef setup_logging(logger_name, logfile, console=True):\n\n # Format\n formatter = logging.Formatter('%(asctime)s - %(name)15s - %(levelname)-5s [%(thread)08x] %(message)s') \n \n logger = logging.getLogger(logger_name)\n logger.propagate = False\n logger.setLevel(logging.ERROR)\n\n # Log all to file, truncates existing file\n file_handler = logging.FileHandler(logfile, mode='w')\n file_handler.setLevel(logging.ERROR)\n file_handler.setFormatter(formatter)\n logger.addHandler(file_handler)\n\n # Log info and higher to console\n if console:\n console_handler = logging.StreamHandler()\n console_handler.setLevel(logging.ERROR)\n console_handler.setFormatter(formatter)\n logger.addHandler(console_handler)\n\n logger.info('------- Logging started ----------')\n\n return logger\n\nsetup_logging('blospray', 'blospray.log')\n\n\nclass ReceiveRenderResultThread(threading.Thread):\n\n \"\"\"\n Thread to handle receiving of interactive render results \n (i.e. framebuffer)\n \"\"\"\n\n def __init__(self, engine, connection, result_queue, log, num_samples, initial_reduction_factor):\n threading.Thread.__init__(self)\n self.engine_ref = weakref.ref(engine)\n self.connection = connection\n self.result_queue = result_queue\n self.log = log\n\n self.num_samples = num_samples\n self.initial_reduction_factor = initial_reduction_factor\n\n self._cancel = threading.Event()\n\n def cancel(self):\n \"\"\"Cancel rendering, to be sent by outside thread\"\"\" \n self._cancel.set()\n\n def run(self):\n\n # Start rendering on the server\n self.log.debug('(RRR thread) Sending START_RENDERING to server')\n client_message = ClientMessage()\n client_message.type = ClientMessage.START_RENDERING\n client_message.string_value = \"interactive\"\n client_message.uint_value = self.num_samples \n client_message.uint_value2 = self.initial_reduction_factor\n self.connection.send_protobuf(client_message)\n\n # Loop to get results until the render is either done or canceled\n\n sock = self.connection.sock # XXX \n rsocks = [sock]\n incoming_data = []\n\n framebuffer = None\n fbview = None\n\n # h = receive protobuf length header\n # r = receive RenderResult protobuf\n # f = receive framebuffer data\n mode = 'h' \n bytes_left = 4 \n\n self.log.debug('(RRR thread) Entering receive loop')\n while True:\n\n if self._cancel.is_set(): \n self.log.info('(RRR thread) Got request to cancel thread, sending CANCEL_RENDERING to server')\n client_message = ClientMessage()\n client_message.type = ClientMessage.CANCEL_RENDERING\n self.connection.send_protobuf(client_message) \n self._cancel.clear()\n\n # Check for new incoming data\n r, w, e = select(rsocks, [], [], 0.001)\n\n if len(r) == 0:\n continue\n\n # There's new data available, read some\n\n if mode == 'f':\n #print('bufsize', len(fbview), 'bytes_left', bytes_left)\n n = sock.recv_into(fbview, bytes_left)\n if n == 0:\n # XXX\n self.log.error('(RRR thread) Connection reset by peer, exiting')\n break\n bytes_left -= n\n assert bytes_left >= 0\n fbview = fbview[n:]\n\n else:\n d = sock.recv(bytes_left)\n if d == '':\n # XXX\n self.log.error('(RRR thread) Connection reset by peer, exiting')\n break \n bytes_left -= len(d)\n assert bytes_left >= 0\n\n # Next step?\n\n if mode == 'h': \n\n assert bytes_left == 0 # Assume we got the 4 byte length header in one recv()\n bytes_left = unpack('<I', d)[0]\n assert bytes_left > 0\n mode = 'r'\n\n elif mode == 'r':\n\n incoming_data.append(d)\n\n if bytes_left > 0:\n continue\n \n message = b''.join(incoming_data)\n incoming_data = []\n\n render_result = RenderResult()\n render_result.ParseFromString(message)\n\n self.log.debug('(RRR thread): Render result of type %s' % render_result.type)\n self.log.debug('(RRR thread): %s' % render_result)\n\n if render_result.type == RenderResult.FRAME: \n # XXX can keep buffer if res didn't change\n self.log.debug('allocating empty %d x %d' % (render_result.width, render_result.height))\n \n mode = 'f' \n bytes_left = render_result.file_size\n\n framebuffer = numpy.empty(bytes_left, dtype=numpy.uint8)\n fbview = memoryview(framebuffer)\n \n else:\n # DONE, CANCELED\n self.result_queue.put((render_result, None))\n\n #mode = 'h'\n #bytes_left = 4\n\n # XXX why not break on DONE as well?\n #if render_result.type == RenderResult.CANCELED: \n # break \n\n break\n\n elif mode == 'f':\n\n if bytes_left > 0:\n continue\n\n mode = 'h'\n bytes_left = 4\n\n # Got complete frame buffer, let engine know\n self.result_queue.put((render_result, framebuffer)) \n\n engine = self.engine_ref()\n if engine is not None:\n try: \n self.log.debug('(RRR thread) Tagging for view_draw()')\n engine.tag_redraw()\n except ReferenceError:\n # StructRNA of type OsprayRenderEngine has been removed\n break\n engine = None\n else:\n # Engine has gone away\n break\n \n self.log.info('(RRR thread) Done')\n\n \nclass OsprayRenderEngine(bpy.types.RenderEngine):\n bl_idname = \"OSPRAY\"\n bl_label = \"OSPRay\"\n # See ./source/blender/makesrna/intern/rna_render.c\n # scripts/startup/nodeitems_builtins.py defines the builtin nodes AND\n # for which renderers they are shown\n bl_use_preview = False # Availability of material preview renders\n #bl_use_shading_nodes = True # No longer available in 2.8, see 095df1ac217f3e43667f94ab189a67175bcd7af5\n bl_use_shading_nodes_custom = False # If True will hide cycles shading nodes\n #bl_use_eevee_viewport = True\n \n # Init is called whenever a new render engine instance is created. Multiple\n # instances may exist at the same time, for example for a viewport and final\n # render. \n def __init__(self):\n self.log = logging.getLogger('blospray')\n\n self.log.info('OsprayRenderEngine.__init__() [%s]' % self)\n super(OsprayRenderEngine, self).__init__()\n\n self.connection = None\n self.render_output_connection = None\n\n self.first_view_update = True\n self.rendering_active = False \n self.receive_render_result_thread = None\n\n self.viewport_width = self.viewport_height = None\n \n self.last_view_matrix = None\n self.last_ortho_view_height = None\n self.last_view_camera_zoom = None\n self.last_view_camera_offset = None\n\n self.draw_data = None \n \n # When the render engine instance is destroyed, this is called. Clean up any\n # render engine data here, for example stopping running render threads. \n def __del__(self):\n print('OsprayRenderEngine.__del__()')\n logging.getLogger('blospray').info('[%s] OsprayRenderEngine.__del__() [%s]' % (time.asctime(), self))\n\n if hasattr(self, 'receive_render_result_thread') and self.receive_render_result_thread is not None:\n self.cancel_render_thread()\n \n # XXX doesn't work, apparently self.connection is no longer available here?\n if hasattr(self, 'connection') and self.connection is not None: \n self.connection.close()\n\n def connect(self, depsgraph):\n assert self.connection is None\n ospray = depsgraph.scene.ospray \n self.connection = Connection(self, ospray.host, ospray.port) \n return self.connection.connect()\n\n def connect_render_output(self, depsgraph):\n assert self.render_output_connection is None\n ospray = depsgraph.scene.ospray \n self.render_output_connection = Connection(self, ospray.host, ospray.port) \n assert self.render_output_connection.connect()\n self.render_output_connection.request_render_output()\n\n def start_render_thread(self):\n assert self.receive_render_result_thread is None\n self.log.debug('Starting render thread')\n self.render_result_queue = Queue()\n self.receive_render_result_thread = ReceiveRenderResultThread(self, self.connection, self.render_result_queue, self.log, \n self.num_samples, self.initial_reduction_factor)\n self.receive_render_result_thread.start()\n\n def cancel_render_thread(self):\n assert self.receive_render_result_thread is not None\n self.log.debug('cancel_render_thread(): Waiting for render thread to cancel')\n t0= time.time()\n self.receive_render_result_thread.cancel()\n self.receive_render_result_thread.join()\n t1 = time.time()\n self.log.info('***************** CANCEL AND JOIN: %f' % (t1-t0))\n self.receive_render_result_thread = None\n self.log.debug('cancel_render_thread(): Render thread canceled') \n \n # Final (and preview) render\n \n def update(self, data, depsgraph):\n \"\"\"\n Export scene data for final or material preview render\n \n Note that this method is always called, even when re-rendering\n exactly the same scene or moving just the camera.\n \"\"\"\n self.log.info('OsprayRenderEngine.update() [%s]' % self) \n\n assert not self.is_preview\n\n self.update_succeeded = False\n \n if not self.connect(depsgraph): \n self.report({'ERROR'}, 'Failed to connect to server')\n return \n\n try:\n self.connection.update(data, depsgraph)\n except:\n exc_type, exc_value, exc_traceback = sys.exc_info() \n lines = traceback.format_exception(exc_type, exc_value, exc_traceback)\n self.log.exception('Exception while updating scene on server')\n self.report({'ERROR'}, 'Exception while updating scene on server: %s' % sys.exc_info()[0])\n return \n\n # XXX if we fail connecting here there's no way to \n # signal to blender that it should *not* subsequently call render().\n # Seems update() and render() are always called as a pair. \n # Strange, why have two separate methods then?\n # So we use self.update_succeeded to handle it ourselves.\n\n self.update_succeeded = True\n \n # This is the method called by Blender for both final renders (F12) and\n # small preview for materials, world and lights. \n def render(self, depsgraph):\n \"\"\"Render scene into an image\"\"\"\n self.log.info('OsprayRenderEngine.render() [%s]' % self)\n\n if not self.update_succeeded:\n return\n \n try:\n self.connection.render(depsgraph)\n self.connection.close() \n self.connection = None\n except:\n exc_type, exc_value, exc_traceback = sys.exc_info() \n lines = traceback.format_exception(exc_type, exc_value, exc_traceback)\n self.log.exception('Exception while rendering scene on server')\n self.report({'ERROR'}, 'Exception while rendering scene on server: %s' % sys.exc_info()[0])\n\n # Interactive render\n\n def _print_depsgraph_updates(self, depsgraph):\n print('--- DEPSGRAPH UPDATES '+'-'*50)\n\n types = ['ACTION', 'ARMATURE', 'BRUSH', 'CAMERA', 'CACHEFILE', 'CURVE', 'FONT', 'GREASEPENCIL', 'COLLECTION', 'IMAGE', 'KEY', 'LIGHT', 'LIBRARY', 'LINESTYLE', 'LATTICE', 'MASK', 'MATERIAL', 'META', 'MESH', 'MOVIECLIP', 'NODETREE', 'OBJECT', 'PAINTCURVE', 'PALETTE', 'PARTICLE', 'LIGHT_PROBE', 'SCENE', 'SOUND', 'SPEAKER', 'TEXT', 'TEXTURE', 'WINDOWMANAGER', 'WORLD', 'WORKSPACE']\n for t in types:\n if depsgraph.id_type_updated(t):\n print(\"Type %s updated\" % t)\n\n print()\n \n for update in depsgraph.updates:\n print('Datablock \"%s\" updated (%s)' % (update.id.name, type(update.id)))\n if update.is_updated_geometry:\n print('-- geometry was updated')\n if update.is_updated_transform:\n print('-- transform was updated')\n \n print('-'*50)\n\n def update_scene_from_depsgraph(self, depsgraph):\n self._print_depsgraph_updates(depsgraph) \n\n for update in depsgraph.updates:\n print('Datablock \"%s\" updated (%s)' % (update.id.name, type(update.id)))\n if update.is_updated_geometry:\n print('-- geometry was updated')\n if update.is_updated_transform:\n print('-- transform was updated')\n\n datablock = update.id\n print(datablock)\n print(dir(datablock))\n\n if isinstance(datablock, bpy.types.Material):\n self.connection.send_updated_material(None, depsgraph, datablock, True)\n elif isinstance(datablock, bpy.types.Object): \n if datablock.type == 'LIGHT':\n self.connection.send_updated_light(None, depsgraph, datablock)\n\n # For viewport renders, this method gets called once at the start and\n # whenever the scene or 3D viewport changes. This method is where data\n # should be read from Blender in the same thread. Typically a render\n # thread will be started to do the work while keeping Blender responsive. \n def view_update(self, context, depsgraph):\n \"\"\"Update on data changes for viewport render\"\"\"\n self.log.info('OsprayRenderEngine.view_update() [%s]' % self) \n\n scene = depsgraph.scene\n ospray = scene.ospray\n render = scene.render\n #world = scene.world\n region = context.region \n\n restart_rendering = False \n\n if self.first_view_update: \n\n self.log.debug('view_update(): FIRST')\n\n assert self.receive_render_result_thread is None\n\n # Open connection\n if not self.connect(depsgraph): \n self.log.info('ERROR(view_update): Failed to connect to BLOSPRAY server') \n return\n\n # Renderer type\n self.connection.send_updated_renderer_type(scene.ospray.renderer)\n\n # Render settings\n render_settings = RenderSettings()\n render_settings.renderer = scene.ospray.renderer \n render_settings.max_depth = scene.ospray.max_depth\n render_settings.min_contribution = scene.ospray.min_contribution\n render_settings.variance_threshold = scene.ospray.variance_threshold\n if scene.ospray.renderer == 'scivis':\n render_settings.ao_samples = scene.ospray.ao_samples\n render_settings.ao_radius = scene.ospray.ao_radius\n render_settings.ao_intensity = scene.ospray.ao_intensity\n elif scene.ospray.renderer == 'pathtracer':\n render_settings.roulette_depth = scene.ospray.roulette_depth\n render_settings.max_contribution = scene.ospray.max_contribution\n render_settings.geometry_lights = scene.ospray.geometry_lights\n\n self.connection.send_updated_render_settings(render_settings) \n\n # Framebuffer settings\n viewport_width, viewport_height = region.width, region.height\n self.viewport_width = viewport_width\n self.viewport_height = viewport_height\n # Reduction factor is passed with START_RENDERING\n self.connection.send_updated_framebuffer_settings('interactive', viewport_width, viewport_height, OSP_FB_RGBA32F)\n\n # Send complete (visible) scene\n # XXX put exception handler around whole block above\n try:\n self.log.info('Sending initial scene')\n self.connection.update(None, depsgraph)\n except:\n exc_type, exc_value, exc_traceback = sys.exc_info() \n lines = traceback.format_exception(exc_type, exc_value, exc_traceback)\n self.log.exception('Exception while updating scene on server')\n self.report({'ERROR'}, 'Exception sending initial scene to server: %s' % sys.exc_info()[0])\n return \n\n self.first_view_update = False\n\n restart_rendering = True\n\n else:\n # Cancel render thread and wait for it to finish\n self.log.debug('view_update(): canceling render thread')\n\n if self.receive_render_result_thread is not None:\n self.log.debug('view_update(): canceling render thread')\n self.cancel_render_thread()\n\n # Update scene on server\n self.log.debug('view_update(): SUBSEQUENT')\n\n self.update_scene_from_depsgraph(depsgraph)\n \n restart_rendering = True\n\n if restart_rendering:\n self.log.info('view_update(): restarting rendering')\n assert self.receive_render_result_thread is None\n # Start thread to handle results\n self.num_samples = ospray.viewport_samples\n self.initial_reduction_factor = ospray.reduction_factor\n self.start_render_thread()\n\n # For viewport renders, this method is called whenever Blender redraws\n # the 3D viewport. The renderer is expected to quickly draw the render\n # with OpenGL, and not perform other expensive work.\n # Blender will draw overlays for selection and editing on top of the\n # rendered image automatically. \n def view_draw(self, context, depsgraph):\n \"\"\"\n Draw viewport render\n\n Note: some changes in blender do not cause a view_update(),\n but only a view_draw():\n - Resizing the 3D editor that's in interactive rendering mode\n \"\"\"\n self.log.info('OsprayRenderEngine.view_draw() [%s]' % self) \n\n restart_rendering = False\n update_camera = False \n\n assert len(depsgraph.updates) == 0\n\n scene = depsgraph.scene\n ospray = scene.ospray\n region = context.region\n assert region.type == 'WINDOW' \n assert context.space_data.type == 'VIEW_3D'\n \n region_data = context.region_data\n space_data = context.space_data \n\n # Get viewport dimensions\n viewport_width, viewport_height = viewport_dimensions = region.width, region.height \n \n if viewport_width != self.viewport_width or viewport_height != self.viewport_height:\n self.log.info('view_draw(): viewport size changed to %d x %d' % (viewport_width,viewport_height)) \n\n if self.receive_render_result_thread is not None:\n self.log.debug('view_draw(): canceling render thread')\n self.cancel_render_thread()\n\n self.viewport_width = viewport_width\n self.viewport_height = viewport_height\n # Reduction factor is passed with START_RENDERING\n self.connection.send_updated_framebuffer_settings('interactive', viewport_width, viewport_height, OSP_FB_RGBA32F) \n restart_rendering = True\n update_camera = True\n\n # Camera view \n # XXX clipping and focal length change should trigger camera update\n\n view_matrix = region_data.view_matrix\n if update_camera or view_matrix != self.last_view_matrix or \\\n (region_data.view_perspective == 'ORTHO' and region_data.view_distance != self.last_ortho_view_height) or \\\n (region_data.view_perspective == 'CAMERA' and (region_data.view_camera_zoom != self.last_view_camera_zoom or list(region_data.view_camera_offset) != self.last_view_camera_offset)):\n \n self.log.info('view_draw(): view matrix changed, or camera updated')\n\n if self.receive_render_result_thread is not None:\n self.log.debug('view_draw(): canceling render thread')\n self.cancel_render_thread()\n\n self.connection.send_updated_camera_for_interactive_view(scene.render, region_data, space_data, self.viewport_width, self.viewport_height)\n \n self.last_view_matrix = view_matrix.copy()\n self.last_ortho_view_height = region_data.view_distance\n self.last_view_camera_zoom = region_data.view_camera_zoom\n self.last_view_camera_offset = list(region_data.view_camera_offset)\n \n restart_rendering = True\n\n # Restart rendering if needed\n\n if restart_rendering:\n self.log.info('view_draw(): restarting rendering')\n self.num_samples = ospray.viewport_samples\n self.initial_reduction_factor = ospray.reduction_factor\n self.start_render_thread()\n\n # Bind shader that converts from scene linear to display space\n bgl.glEnable(bgl.GL_BLEND)\n bgl.glBlendFunc(bgl.GL_ONE, bgl.GL_ONE_MINUS_SRC_ALPHA);\n self.bind_display_space_shader(scene) \n\n # Check for incoming render results\n\n while self.render_result_queue.qsize() > 0:\n\n render_result, framebuffer = self.render_result_queue.get() \n\n if render_result.type == RenderResult.FRAME:\n self.log.info('FRAME') \n\n rf = render_result.reduction_factor\n if rf > 1:\n self.update_stats('', 'Rendering sample %d/%d (reduced %dx)' % (render_result.sample, self.num_samples, rf))\n else:\n self.update_stats('', 'Rendering sample %d/%d' % (render_result.sample, self.num_samples))\n \n image_dimensions = render_result.width, render_result.height\n fbpixels = framebuffer.view(numpy.float32)\n\n if not self.draw_data or self.draw_data.image_dimensions != image_dimensions or self.draw_data.viewport_dimensions != viewport_dimensions:\n self.log.info('Creating new CustomDrawData(viewport = %s, image = %s)' % (viewport_dimensions, image_dimensions))\n self.draw_data = CustomDrawData(viewport_dimensions, image_dimensions, fbpixels)\n else:\n self.log.info('Updating pixels of existing CustomDraw')\n self.draw_data.update_pixels(fbpixels)\n\n elif render_result.type == RenderResult.DONE:\n self.log.info('DONE')\n self.rendering_active = False\n self.update_stats('', 'Rendering Done')\n\n elif render_result.type == RenderResult.CANCELED:\n self.log.info('CANCELED')\n # Thread will have exited by itself already\n self.receive_render_result_thread = None\n\n if self.draw_data is not None: \n self.draw_data.draw()\n\n self.unbind_display_space_shader()\n bgl.glDisable(bgl.GL_BLEND) \n \n # Nodes\n\n if False:\n \n def update_script_node(self, node):\n \"\"\"Compile shader script node\"\"\"\n self.log.debug('OsprayRenderEngine.update_script_node() [%s]' % self)\n \n\n\n# Based on https://docs.blender.org/api/current/bpy.types.RenderEngine.html\nclass CustomDrawData:\n\n def __init__(self, viewport_dimensions, image_dimensions, pixels):\n self.log = logging.getLogger('blospray')\n\n self.log.info('CustomDrawData.__init__(viewport_dimensions=%s, image_dimensions=%s, fbpixels=%s) [%s]' % \\\n (viewport_dimensions, image_dimensions, pixels.shape, self)) \n \n viewport_width, viewport_height = self.viewport_dimensions = viewport_dimensions\n image_width, image_height = self.image_dimensions = image_dimensions\n \n assert pixels is not None\n pixels = bgl.Buffer(bgl.GL_FLOAT, image_width * image_height * 4, pixels)\n\n # Generate texture\n self.texture = bgl.Buffer(bgl.GL_INT, 1)\n bgl.glGenTextures(1, self.texture)\n\n bgl.glActiveTexture(bgl.GL_TEXTURE0)\n bgl.glBindTexture(bgl.GL_TEXTURE_2D, self.texture[0])\n bgl.glTexImage2D(bgl.GL_TEXTURE_2D, 0, bgl.GL_RGBA16F, image_width, image_height, 0, bgl.GL_RGBA, bgl.GL_FLOAT, pixels)\n bgl.glTexParameteri(bgl.GL_TEXTURE_2D, bgl.GL_TEXTURE_MIN_FILTER, bgl.GL_NEAREST)\n bgl.glTexParameteri(bgl.GL_TEXTURE_2D, bgl.GL_TEXTURE_MAG_FILTER, bgl.GL_NEAREST)\n bgl.glBindTexture(bgl.GL_TEXTURE_2D, 0)\n\n # Bind shader that converts from scene linear to display space,\n # use the scene's color management settings.\n shader_program = bgl.Buffer(bgl.GL_INT, 1)\n bgl.glGetIntegerv(bgl.GL_CURRENT_PROGRAM, shader_program)\n\n # Generate vertex array\n self.vertex_array = bgl.Buffer(bgl.GL_INT, 1)\n bgl.glGenVertexArrays(1, self.vertex_array)\n bgl.glBindVertexArray(self.vertex_array[0])\n\n texturecoord_location = bgl.glGetAttribLocation(shader_program[0], \"texCoord\")\n position_location = bgl.glGetAttribLocation(shader_program[0], \"pos\")\n\n bgl.glEnableVertexAttribArray(texturecoord_location)\n bgl.glEnableVertexAttribArray(position_location)\n\n # Generate geometry buffers for drawing textured quad\n position = [0.0, 0.0, viewport_width, 0.0, viewport_width, viewport_height, 0.0, viewport_height]\n position = bgl.Buffer(bgl.GL_FLOAT, len(position), position)\n texcoord = [0.0, 0.0, 1.0, 0.0, 1.0, 1.0, 0.0, 1.0]\n texcoord = bgl.Buffer(bgl.GL_FLOAT, len(texcoord), texcoord)\n\n self.vertex_buffer = bgl.Buffer(bgl.GL_INT, 2)\n\n bgl.glGenBuffers(2, self.vertex_buffer)\n bgl.glBindBuffer(bgl.GL_ARRAY_BUFFER, self.vertex_buffer[0])\n bgl.glBufferData(bgl.GL_ARRAY_BUFFER, 32, position, bgl.GL_STATIC_DRAW)\n bgl.glVertexAttribPointer(position_location, 2, bgl.GL_FLOAT, bgl.GL_FALSE, 0, None)\n\n bgl.glBindBuffer(bgl.GL_ARRAY_BUFFER, self.vertex_buffer[1])\n bgl.glBufferData(bgl.GL_ARRAY_BUFFER, 32, texcoord, bgl.GL_STATIC_DRAW)\n bgl.glVertexAttribPointer(texturecoord_location, 2, bgl.GL_FLOAT, bgl.GL_FALSE, 0, None)\n\n bgl.glBindBuffer(bgl.GL_ARRAY_BUFFER, 0)\n bgl.glBindVertexArray(0)\n\n def __del__(self):\n self.log.info('[%s] CustomDrawData.__del__() [%s]' % (time.asctime(), self)) \n bgl.glDeleteBuffers(2, self.vertex_buffer)\n bgl.glDeleteVertexArrays(1, self.vertex_array)\n bgl.glBindTexture(bgl.GL_TEXTURE_2D, 0)\n bgl.glDeleteTextures(1, self.texture)\n\n def update_pixels(self, pixels):\n self.log.info('CustomDrawData.update_pixels(%d x %d, %d) [%s]' % (self.image_dimensions[0], self.image_dimensions[1], pixels.shape[0], self)) \n image_width, image_height = self.image_dimensions\n assert pixels.shape[0] == image_width*image_height*4\n pixels = bgl.Buffer(bgl.GL_FLOAT, image_width * image_height * 4, pixels)\n bgl.glActiveTexture(bgl.GL_TEXTURE0) \n bgl.glBindTexture(bgl.GL_TEXTURE_2D, self.texture[0])\n # XXX glTexSubImage2D\n bgl.glTexImage2D(bgl.GL_TEXTURE_2D, 0, bgl.GL_RGBA16F, image_width, image_height, 0, bgl.GL_RGBA, bgl.GL_FLOAT, pixels)\n bgl.glTexParameteri(bgl.GL_TEXTURE_2D, bgl.GL_TEXTURE_MIN_FILTER, bgl.GL_LINEAR)\n bgl.glTexParameteri(bgl.GL_TEXTURE_2D, bgl.GL_TEXTURE_MAG_FILTER, bgl.GL_LINEAR) \n bgl.glBindTexture(bgl.GL_TEXTURE_2D, 0)\n\n def draw(self):\n self.log.info('CustomDrawData.draw() [%s]' % self) \n bgl.glActiveTexture(bgl.GL_TEXTURE0)\n bgl.glBindTexture(bgl.GL_TEXTURE_2D, self.texture[0])\n bgl.glBindVertexArray(self.vertex_array[0])\n bgl.glDrawArrays(bgl.GL_TRIANGLE_FAN, 0, 4);\n bgl.glBindVertexArray(0)\n bgl.glBindTexture(bgl.GL_TEXTURE_2D, 0)\n\n\n\n\n\nclasses = (\n OsprayRenderEngine,\n)\n\ndef register():\n from bpy.utils import register_class\n \n from . import operators\n from . import properties\n from . import ui\n from . import nodes\n \n properties.register()\n operators.register()\n ui.register()\n nodes.register()\n \n for cls in classes:\n register_class(cls)\n \n \ndef unregister():\n from bpy.utils import unregister_class\n \n from . import properties\n from . import operators\n from . import ui\n from . import nodes\n \n properties.unregister()\n operators.unregister()\n ui.unregister()\n nodes.unregister()\n \n for cls in classes:\n unregister_class(cls)\n \n \nif __name__ == \"__main__\":\n register() \n \n"
] |
[
[
"numpy.empty"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
quantology/sqwrl
|
[
"d5f2d822ee3c2343a7b37bca1a8c4328993fe79c"
] |
[
"sqwrl/__init__.py"
] |
[
"\"\"\"\nTODO - basic:\n - tbl.index.name setting\n - tbl adding data - setting columns, appending, etc.\nTODO - groupby:\n - groupby options - groupby indexing (esp for expr groupbys)\n - groupby push out VirtualTables\n - groupby aggregate multiple agg types, dict agg\n - groupby transform / apply?\nTODO - joins:\n - https://pandas.pydata.org/pandas-docs/stable/merging.html\n - test all hows\n - pd.concat (row-wise: UNION, UNION ALL)\n - pd.merge (https://pandas.pydata.org/pandas-docs/stable/merging.html#database-style-dataframe-joining-merging)\n - todo: move df.join to pd.merge (more general)\n\n\"\"\"\n\nimport copy\nimport operator\nfrom functools import wraps, partialmethod, reduce\nfrom collections.abc import Iterable\nfrom warnings import warn\nimport numbers\n\nimport pandas as pd\nimport numpy as np\nimport sqlalchemy as sa\nfrom sqlalchemy.sql import func\nfrom sqlalchemy.dialects import mssql, postgresql\nimport sympy\nfrom toolz import assoc, valfilter\n#from odo.backends.sql import types as sa_types\n#from odo.backends.sql import discover_typeengine\nimport datashape\n\n__version__ = \"0.1.0\"\n\n# -------------------------------------\n# COPYING FROM ODO TO REMOVE DEPENDENCY\n# from odo https://github.com/blaze/odo/blob/master/odo/backends/sql.py\n\nsa_types = {\n 'int64': sa.BigInteger,\n 'int32': sa.Integer,\n 'int': sa.Integer,\n 'int16': sa.SmallInteger,\n 'float32': sa.REAL,\n 'float64': sa.FLOAT,\n 'float': sa.FLOAT,\n 'real': sa.FLOAT,\n 'string': sa.Text,\n 'date': sa.Date,\n 'time': sa.Time,\n 'datetime': sa.DateTime,\n 'bool': sa.Boolean,\n \"timedelta[unit='D']\": sa.Interval(second_precision=0, day_precision=9),\n \"timedelta[unit='h']\": sa.Interval(second_precision=0, day_precision=0),\n \"timedelta[unit='m']\": sa.Interval(second_precision=0, day_precision=0),\n \"timedelta[unit='s']\": sa.Interval(second_precision=0, day_precision=0),\n \"timedelta[unit='ms']\": sa.Interval(second_precision=3, day_precision=0),\n \"timedelta[unit='us']\": sa.Interval(second_precision=6, day_precision=0),\n \"timedelta[unit='ns']\": sa.Interval(second_precision=9, day_precision=0),\n # ??: sa.types.LargeBinary,\n}\n\nsa_revtypes = dict(map(reversed, sa_types.items()))\n\n# Subclass mssql.TIMESTAMP subclass for use when differentiating between\n# mssql.TIMESTAMP and sa.TIMESTAMP.\n# At the time of this writing, (mssql.TIMESTAMP == sa.TIMESTAMP) is True,\n# which causes a collision when defining the sa_revtypes mappings.\n#\n# See:\n# https://bitbucket.org/zzzeek/sqlalchemy/issues/4092/type-problem-with-mssqltimestamp\nclass MSSQLTimestamp(mssql.TIMESTAMP):\n pass\n\n# Assign the custom subclass as the type to use instead of `mssql.TIMESTAMP`.\nmssql.base.ischema_names['TIMESTAMP'] = MSSQLTimestamp\n\nsa_revtypes.update({\n sa.DATETIME: datashape.datetime_,\n sa.TIMESTAMP: datashape.datetime_,\n sa.FLOAT: datashape.float64,\n sa.DATE: datashape.date_,\n sa.BIGINT: datashape.int64,\n sa.INTEGER: datashape.int_,\n sa.BIGINT: datashape.int64,\n sa.types.NullType: datashape.string,\n sa.REAL: datashape.float32,\n sa.Float: datashape.float64,\n mssql.BIT: datashape.bool_,\n mssql.DATETIMEOFFSET: datashape.string,\n mssql.MONEY: datashape.float64,\n mssql.SMALLMONEY: datashape.float32,\n mssql.UNIQUEIDENTIFIER: datashape.string,\n # The SQL Server TIMESTAMP value doesn't correspond to the ISO Standard\n # It is instead just a binary(8) value with no relation to dates or times\n MSSQLTimestamp: datashape.bytes_,\n})\n\nprecision_types = {\n sa.Float,\n postgresql.base.DOUBLE_PRECISION\n}\n\ndef precision_to_dtype(precision):\n \"\"\"\n Maps a float or double precision attribute to the desired dtype.\n The mappings are as follows:\n [1, 24] -> float32\n [25, 53] -> float64\n Values outside of those ranges raise a ``ValueError``.\n Parameter\n ---------\n precision : int\n A double or float precision. e.g. the value returned by\n `postgresql.base.DOUBLE_PRECISION(precision=53).precision`\n Returns\n -------\n dtype : datashape.dtype (float32|float64)\n The dtype to use for columns of the specified precision.\n \"\"\"\n if isinstance(precision, numbers.Integral):\n if 1 <= precision <= 24:\n return float32\n elif 25 <= precision <= 53:\n return float64\n raise ValueError(\"{} is not a supported precision\".format(precision))\n\n# interval types are special cased in discover_typeengine so remove them from\n# sa_revtypes\nsa_revtypes = valfilter(lambda x: not isinstance(x, sa.Interval), sa_revtypes)\n\ndef discover_typeengine(typ):\n if isinstance(typ, sa.Interval):\n if typ.second_precision is None and typ.day_precision is None:\n return datashape.TimeDelta(unit='us')\n elif typ.second_precision == 0 and typ.day_precision == 0:\n return datashape.TimeDelta(unit='s')\n\n if typ.second_precision in units_of_power and not typ.day_precision:\n units = units_of_power[typ.second_precision]\n elif typ.day_precision > 0:\n units = 'D'\n else:\n raise ValueError('Cannot infer INTERVAL type with parameters'\n 'second_precision=%d, day_precision=%d' %\n (typ.second_precision, typ.day_precision))\n return datashape.TimeDelta(unit=units)\n if type(typ) in precision_types and typ.precision is not None:\n return precision_to_dtype(typ.precision)\n if typ in sa_revtypes:\n return datashape.dshape(sa_revtypes[typ])[0]\n if type(typ) in sa_revtypes:\n return sa_revtypes[type(typ)]\n if isinstance(typ, sa.Numeric):\n return datashape.Decimal(precision=typ.precision, scale=typ.scale)\n if isinstance(typ, (sa.String, sa.Unicode)):\n return datashape.String(typ.length, 'U8')\n else:\n for k, v in sa_revtypes.items():\n if isinstance(k, type) and (isinstance(typ, k) or\n hasattr(typ, 'impl') and\n isinstance(typ.impl, k)):\n return v\n if k == typ:\n return v\n raise NotImplementedError(\"No SQL-datashape match for type %s\" % typ)\n\n# -------------------------------------\n# END COPYING FROM ODO\n# -------------------------------------\n\ndef is_striter(val):\n return isinstance(val, Iterable) and all(isinstance(el, str) for el in val)\n\ndef is_iter_notstr(val):\n return isinstance(val, Iterable) and not isinstance(val, str)\n\ndef and_(*args):\n return reduce(operator.and_, args)\n\ndef _dtype(type_name):\n if type_name == \"string\":\n type_name = \"object\"\n return np.dtype(type_name)\n\nclass DB:\n def __init__(self, engine, verbose=False, check=\"auto\",\n autoindex=True):\n if isinstance(engine, str):\n engine = sa.create_engine(engine, echo=verbose)\n else:\n engine.echo = verbose\n self.engine = engine\n if check == \"auto\":\n try:\n from IPython import get_ipython\n check = get_ipython() is not None\n except ImportError:\n check = False\n self.check = check\n self.autoindex = autoindex\n\n @property\n def metadata(self):\n return sa.MetaData().reflect(bind=self.engine)\n\n @property\n def tables(self):\n return self.engine.table_names()\n\n def __iter__(self):\n return iter(self.tables)\n def __contains__(self, k):\n return k in self.tables\n def __len__(self):\n return len(self.tables)\n\n def __getitem__(self, k):\n assert not self.check or k in self\n return Table(self.engine, k, check=self.check,\n index=self.autoindex)\n \n def __setitem__(self, k, v):\n if k not in self:\n metadata, _ = Table.from_df(v, k)\n metadata.create_all(self.engine)\n self[k].append(v)\n else:\n raise NotImplementedError()\n\n_colobjtypes = {\n str: sa.String\n}\n\ndef to_sqlalchemy_type(s):\n if s.dtype.name in sa_types:\n return sa_types[s.dtype.name]\n el = s.iloc[0]\n if type(el).__name__ in sa_types:\n return sa_types[s.dtype.name]\n for k, v in _colobjtypes.items():\n if isinstance(el, k):\n return v\n raise TypeError(\"unknown type: %s / %s\" % (s.dtype.name, type(el)))\n\n_numeric_types = [typ for typ in sa_types if any(\n typ.startswith(numtyp) for numtyp in ['bool', 'float', 'int', 'timedelta'])]\n\n\nclass VirtualTable:\n def __init__(self, engine, salc, check=True,\n whereclause=None, from_i=None, to_i=None, \n sort_by=[], # (by, asc) tuples\n index=True, columns=None):\n self.engine = engine\n self.sa = salc\n self._whereclause = whereclause\n self._from_i = from_i\n self._to_i = to_i\n self._sort_by = sort_by\n\n if isinstance(index, (str, Expression)):\n index = [index]\n if index == True: # auto-detect\n self._ix = [c.name for c in self.sa_columns if c.primary_key]\n self._ixdata = [c for c in self.sa_columns if c.primary_key]\n elif is_striter(index):\n self._ix = list(index)\n self._ixdata = [self.sa_colmap[col] for col in self._ix]\n elif index == False or index is None:\n self._ix = []\n self._ixdata = []\n elif all(isinstance(ix, Expression) for ix in index):\n self._ix = [c.name for c in index]\n self._ixdata = list(index)\n\n if columns is None:\n self._columns = [c.name for c in self.sa_columns if not c.name in self._ix]\n self._coldata = [c for c in self.sa_columns if not c.name in self._ix]\n elif is_striter(columns):\n self._columns = list(columns)\n self._coldata = [self.sa_colmap[col] for col in self._columns]\n elif all(isinstance(col, Expression) for col in columns):\n self._columns = [c.name for c in columns]\n self._coldata = list(columns)\n\n def copy(self, **new_attrs):\n new = copy.copy(self)\n for k, v in new_attrs.items():\n setattr(new, k, v)\n return new\n\n ## column stuffs\n @property\n def sa_columns(self):\n cols = self.sa.columns\n self.__dict__['sa_columns'] = cols\n return cols\n @property\n def sa_colmap(self):\n colmap = {c.name: c for c in self.sa_columns}\n self.__dict__['sa_colmap'] = colmap\n return colmap\n @property\n def columns(self):\n return self._columns\n @columns.setter\n def columns(self, column_names):\n assert len(column_names) == len(self._coldata)\n self._columns = column_names\n def _colmatches(self, col, singleton=False, required=False):\n matches = [datum for name, datum in zip(self._columns, self._coldata)\n if col == name]\n if required and not matches:\n raise KeyError(\"key %r not found among %r\" % (col, self._columns))\n if singleton:\n if len(matches) > 1:\n raise KeyError(\"ambiguous key %r among %r\" % (col, self._columns))\n matches = matches[0] if matches else None\n return matches\n def rename(self, columns=None):\n if columns is not None:\n if isinstance(columns, Mapping):\n new_cols = [columns.get(col, col) for col in self._columns]\n elif isinstance(columns, Callable):\n new_cols = [columns(col) for col in self._columns]\n else:\n raise TypeError(\"unknown mapper type: %s\" % (type(columns)))\n return self.copy(_columns=new_cols)\n return self\n @property\n def coltypes(self):\n cols = [c for c in self.sa_columns if not c.name in self._ix]\n return pd.Series([str(discover_typeengine(c.type)) for c in cols],\n index=[c.name for c in cols])\n @property\n def dtypes(self):\n return self.coltypes.map(_dtype)\n\n def iteritems(self):\n yield from zip(self._columns, self._coldata)\n items = iteritems\n def keys(self):\n yield from self._columns\n __iter__ = keys\n def __getitem__(self, k):\n if isinstance(k, str):\n colmatches = self._colmatches(k, required=True)\n if len(colmatches) == 1:\n return Expression(self, colmatches[0], k)\n else:\n return self.copy(_columns=[k]*len(colmatches), _coldata=colmatches)\n elif is_striter(k):\n new_columns = []\n new_coldata = []\n for el in k:\n colmatches = self._colmatches(el, required=True)\n new_columns += [el] * len(colmatches)\n new_coldata += colmatches\n return self.copy(_columns=new_columns, _coldata=new_coldata)\n elif isinstance(k, slice):\n return self.islice(k)\n elif isinstance(k, Expression):\n return self.where(k)\n return self._loc(k)\n\n ## indexing\n @property\n def index(self):\n if len(self._ix) == 0:\n return None\n if len(self._ix) == 1:\n return Expression(self, self._ixdata[0], self._ix[0])\n else:\n # multindex...return dataframe??\n return self.copy(_columns=list(_ix), _coldata=list(_ixdata))\n\n def reset_index(self, drop=False):\n if drop:\n return self.copy(_ix=[], _ixdata=[])\n return self.copy(_ix=[], _ixdata=[], _columns=self._columns + self._ix,\n _coldata=self._coldata + self._ixdata)\n\n def set_index(self, keys, drop=True, append=False):\n if isinstance(keys, (str, Expression)):\n keys = [keys]\n new_ix = list(self._ix) if append else []\n new_ixdata = list(self._ixdata) if append else []\n new_columns = list(self._columns)\n new_coldata = list(self._coldata)\n for k in keys:\n if isinstance(k, str):\n new_ixdata.append(self._colmatches(k, singleton=True, required=True))\n new_ix.append(k)\n if drop:\n ix = new_columns.index(k)\n new_columns.pop(ix)\n new_coldata.pop(ix)\n elif isinstance(k, Expression):\n new_ixdata.append(k)\n new_ix.append(k.name)\n return self.copy(_ix=new_ix, _ixdata=new_ixdata,\n _columns=new_columns, _coldata=new_coldata)\n\n ## location\n def _lookup(self, k):\n result = self.where(self.index == k).df\n if len(result) == 1: # and not isinstance(k, sa.sql.elements.ClauseElement):\n return result.iloc[0]\n elif len(result) == 0:\n raise KeyError(\"%r not found in %s\" % (k, self.index))\n return result\n def _loc(self, k):\n # actually returns a dataframe/series for lookups\n # .loc[normal loc, columns??]\n if isinstance(k, tuple) and len(k) == 2:\n condition, cols = k\n if isinstance(cols, str) or is_striter(cols):\n return self._loc(condition)[cols]\n if isinstance(k, slice):\n # slice (greater than: less than)\n if k.step is not None:\n return self._loc(slice(k.start, k.stop))[::k.step]\n if k.start is None and k.stop is not None:\n return self.where(self.index <= k.stop)\n if k.start is not None and k.stop is None:\n return self.where(self.index >= k.start)\n if k.start is not None and k.stop is not None:\n return self.where(self.index >= k.start & self.index <= k.stop)\n return self\n if isinstance(k, Expression):\n # boolean array?\n return self.where(k)\n elif is_iter_notstr(k):\n # list of elements\n results = [self._lookup(el) for el in k]\n result = pd.concat([pd.DataFrame([r]) if isinstance(r, pd.Series) else r\n for r in results])\n result.index.name = self.index.name # ???\n dtypes = dict(zip(self.columns, self.dtypes))\n for col in result.columns:\n result[col] = result[col].astype(dtypes[col])\n return result\n #if all(isinstance(result, pd.Series) for result in results):\n # return pd.DataFrame([self._lookup(el) for el in k])\n #return pd.concat([self._lookup(el) for el in k]) # if some dfs in mix...\n else:\n # single element?\n return self._lookup(k)\n def islice(self, from_i=None, to_i=None, step=None):\n # !? compound with where?\n if isinstance(from_i, slice) and to_i is None and step is None:\n return self.islice(from_i.start, from_i.stop, from_i.step)\n if step is not None:\n assert step == -1 and self._sort_by\n sort_by = [(by, not asc) for by, asc in self._sort_by]\n else:\n sort_by = self._sort_by\n # negative indexes:\n if (from_i is not None and from_i < 0) or (to_i is not None and to_i < 0):\n l = len(self)\n if from_i is not None and from_i < 0:\n from_i += l\n if to_i is not None and to_i < 0:\n to_i += l\n base_from = 0 if self._from_i is None else self._from_i\n base_to = float('inf') if self._to_i is None else self._to_i\n new_from = base_from + (from_i or 0)\n new_to = base_to if to_i is None else min(base_to, base_from + to_i)\n if new_to == float('inf'):\n new_to = None\n return self.copy(_from_i=new_from or None, _to_i=new_to, _sort_by=sort_by)\n @property\n def iloc(self):\n return Indexer(self.islice)\n @property\n def loc(self):\n return Indexer(self._loc)\n def where(self, where):\n if self._from_i or self._to_i:\n warn(\"wheres on slices not accurately implemented, use at your own risk\")\n if self._whereclause is not None:\n where = self._whereclause & where\n return self.copy(_whereclause=where)\n def head(self, n=5):\n return self.islice(0, n)\n def tail(self, n=5):\n return self.islice(-n)\n\n ## sorting\n def sort_values(self, by, ascending=True):\n if self._from_i or self._to_i:\n warn(\"sorts on slices not accurately implemented, use at your own risk\")\n if isinstance(by, (str, Expression)):\n by = [by]\n ascending = [ascending]\n elif ascending in {True, False}:\n ascending = [ascending] * len(by)\n sort_by = list(self._sort_by)\n for k, asc in zip(reversed(by), reversed(ascending)):\n if isinstance(k, str):\n colmatch = self._colmatches(k, singleton=True, required=True)\n sort_by.insert(0, (Expression(self, colmatch, k), asc))\n elif isinstance(k, Expression):\n sort_by.insert(0, (k, asc))\n else:\n raise TypeError(\"unknown type for sort: %s\" % type(k))\n return self.copy(_sort_by=sort_by)\n\n def sort_index(self, ascending=True):\n if self._from_i or self._to_i:\n warn(\"sorts on slices not accurately implemented, use at your own risk\")\n return self.sort_values([Expression(self, datum, ix) for ix, datum in\n zip(self._ix, self._ixdata)], ascending=ascending)\n\n def _query_sorted_by(self, q, by, ascending=True):\n if isinstance(by, (str, Expression)):\n by = [by]\n ascending = [ascending]\n elif ascending in {True, False}:\n ascending = [ascending] * len(by)\n order_by = []\n for k, asc in zip(by, ascending):\n if isinstance(k, str):\n k = self._colmatches(k, singleton=True, required=True)\n elif isinstance(k, Expression):\n k = k.sa\n else:\n raise TypeError(\"unknown by type: %s\" % type(k))\n order_by.append(k if asc else k.desc())\n return q.order_by(*order_by)\n\n ## query interactions\n def connect(self):\n return self.engine.connect()\n def _select_query(self, what, where=None, from_i=None, to_i=None, groupby=None,\n sort_by=None, sort_ascending=True):\n if sort_by is not None:\n return self.sort_values(by=sort_by, ascending=sort_ascending)._select_query(\n what, where=where, from_i=from_i, to_i=to_i, groupby=groupby)\n if where is not None:\n return self.where(where)._select_query(what, from_i=from_i, to_i=to_i, groupby=groupby)\n if from_i is not None or to_i is not None:\n return self.islice(from_i, to_i)._select_query(what, groupby=groupby)\n q = sa.select(what).select_from(self.sa)\n # WHERE\n if self._whereclause is not None:\n q = q.where(self._whereclause.sa)\n # LIMIT\n if self._to_i is not None:\n q = q.limit(self._to_i - (self._from_i or 0))\n # OFFSET\n if self._from_i is not None and self._from_i > 0:\n q = q.offset(self._from_i)\n # SORT\n if self._sort_by is not None:\n q = q.order_by(*[by.sa if asc else by.sa.desc() for by, asc in self._sort_by])\n if groupby is not None:\n q = q.group_by(*groupby)\n return q\n\n def select_row(self, what, **kwargs):\n singleton = not isinstance(what, list)\n if singleton:\n what = [what]\n with self.connect() as conn:\n q = self._select_query(what, **kwargs)\n resp = conn.execute(q).fetchone()\n return resp[0] if singleton else resp\n\n def iterselect(self, what, **kwargs):\n what_dedup = [el for i, el in enumerate(what) if el not in what[:i]]\n ixs = [what_dedup.index(el) for el in what]\n with self.connect() as conn:\n q = self._select_query(what_dedup, **kwargs)\n #yield from conn.execute(q)\n for row in conn.execute(q):\n yield tuple(row[i] for i in ixs)\n\n def itertuples(self, index=True, name=\"Pandas\"):\n names = self._ix + self._columns if index else self._columns\n data = self._ixdata + self._coldata if index else self._coldata\n typ = namedtuple(name, names)\n for row in self.iterselect(data):\n yield typ(*row)\n def iterrows(self):\n n_ix = len(self.ix)\n for row in self.iterselect(self._ixdata + self._coldata):\n # !?! multiindex?\n yield row[:n_ix], pd.Series(row[n_ix:], index=self._columns)\n def to_dataframe(self):\n names = self._ix + self._columns\n data = self._ixdata + self._coldata\n df = pd.DataFrame.from_records(list(self.iterselect(data)), columns=list(range(len(names))))\n if len(self._ix) == 1:\n df.set_index(0, inplace=True)\n df.index.name = self._ix[0]\n elif self._ix:\n df.set_index(list(range(len(self._ix))), inplace=True)\n df.index.names = self._ix\n df.columns = self._columns\n if self._from_i is not None and not self._ix:\n df.index += self._from_i\n return df\n @property\n def data(self):\n return self.to_dataframe()\n @property\n def df(self):\n return self.to_dataframe()\n def __len__(self):\n return self.select_row(sa.func.count()) # count(self.sa) ...\n ## other\n def insert(self, rows):\n ins = self.sa.insert()\n with self.connect() as conn:\n conn.execute(ins, rows)\n def append(self, df):\n if df.index.name is None:\n rows = [row.to_dict() for _, row in df.iterrows()]\n else:\n rows = [assoc(row.to_dict(), df.index.name, ix) for ix, row in df.iterrows()]\n self.insert(rows)\n\n def _agg_pairwise(self, how):\n how = {}.get(how, how)\n cols = self.columns\n fn = getattr(func, how)\n resp = self.select_row([fn(self[col1].sa, self[col2].sa)\n for col1 in cols for col2 in cols])\n result = pd.DataFrame.from_records([resp[i * len(cols):(i + 1) * len(cols)]\n for i in range(len(cols))],\n index=cols, columns=cols)\n return result\n\n def aggregate(self, how, axis=None, skipna=None):\n how = {\"mean\": \"avg\", \"std\": \"stddev\", \"var\": \"variance\"}.get(how, how)\n #assert how in {\"min\", \"max\", \"avg\", \"sum\"}\n fn = getattr(func, how)\n if axis in {None, 0}:\n cols = self.columns\n vals = self.select_row([fn(self[col].sa) for col in cols])\n return pd.Series(vals, index=cols)\n elif axis == 1:\n agg_sa = fn(*[self[col].sa for col in self.columns])\n return Expression(self, agg_sa, how)\n else:\n raise ValueError(\"axis not in {None, 0, 1}: %s\" % axis)\n\n def nunique(self, dropna=True):\n cols = self.columns\n vals = self.select_row([func.count(self[col].sa.distinct()) for col in cols])\n return pd.Series(vals, index=cols)\n def groupby(self, by=None, axis=0, level=None, as_index=True,\n sort=True, group_keys=True, squeeze=False, **kwargs):\n return GroupBy(self, by, sort=sort, as_index=as_index)\n def _repr_html_(self):\n df = self.head().df\n if len(self) > len(df):\n df = df.append(pd.Series(\"...\", index=df.columns, name=\"...\"))\n return df._repr_html_()\n def alias(self, name=None):\n new_sa = self.sa.alias(name=name)\n new_cols = new_sa.columns\n new_ixdata = [getattr(new_cols, c.name) for c in self._ixdata]\n new_coldata = [getattr(new_cols, c.name) for c in self._coldata]\n # !?!? derived columns?\n return self.copy(sa=new_sa, _ixdata=new_ixdata, _coldata=new_coldata)\n def join(self, other, on=None, how=\"left\", lsuffix='', rsuffix='', sort=False):\n assert how in {'left', 'right', 'outer', 'inner'}\n if how == \"right\":\n return other.join(self, on=on, how=\"left\", lsuffix=rsuffix, rsuffix=lsuffix, sort=sort)\n alias_self = self.alias()\n alias_other = other.alias()\n if on is None:\n assert set(alias_self._ix) == set(alias_other._ix), \"mismatched indexes\"\n on_clause = and_(*[ixdata == alias_other._ixdata[alias_other._ix.index(ix)]\n for ix, ixdata in zip(alias_self._ix, alias_self._ixdata)])\n else:\n if isinstance(on, str):\n on = [on]\n on_clause = and_(*[alias_self[col].sa == alias_other[col].sa for col in on])\n \n col_overlap = set(alias_self.columns) & set(alias_other.columns)\n if col_overlap:\n assert lsuffix or rsuffix, \"columns overlap but no suffix specified\"\n self_columns = [str(col) + lsuffix if col in col_overlap else col\n for col in alias_self.columns]\n other_columns = [str(col) + rsuffix if col in col_overlap else col\n for col in alias_other.columns]\n new_cols = self_columns + other_columns\n else:\n new_cols = alias_self.columns + alias_other.columns\n # TODO: select the right columns from self and other, not just table selection\n # - ?? only if columns have been selected???\n new_sa = alias_self.sa.join(alias_other.sa, on_clause, isouter=(how != \"inner\"), full=(how == \"outer\"))\n # TODO: test all hows\n # ?? error in primary keys with new table creation?\n new_table = VirtualTable(self.engine, new_sa, index=False)\n for col in new_table._coldata:\n pass\n #new_table.columns = new_cols\n #onlen = len(alias_self._ix) if on is None else len(on)\n #new_table._ix, new_table._ixdata = new_table._ix[:onlen], new_table._ixdata[:onlen]\n if sort:\n new_table = new_table.sort_index()\n return new_table\n\nclass Table(VirtualTable):\n @staticmethod\n def from_df(df, name, metadata=None):\n metadata = sa.MetaData() if metadata is None else metadata\n cols = [sa.Column(col, to_sqlalchemy_type(df[col])) for col in df.columns]\n if df.index.name is not None:\n ix = df.index.to_series()\n cols = [sa.Column(ix.name, to_sqlalchemy_type(ix), primary_key=ix.is_unique)] + cols\n return metadata, sa.Table(name, metadata, *cols)\n\n def __init__(self, engine, table, **kwargs):\n salc = sa.Table(table, sa.MetaData(), autoload=True, autoload_with=engine)\n super().__init__(engine, salc, **kwargs)\n\nclass Expression:\n def __init__(self, table, salc, name):\n self.table = table\n self.sa = salc\n self.name = name\n def copy(self, **new_attrs):\n new = copy.copy(self)\n for k, v in new_attrs.items():\n setattr(new, k, v)\n return new\n def __repr__(self):\n return \"<%s(%s)>\" % (self.__class__.__name__, repr(self.sa))\n def __len__(self):\n with self.table.connect() as conn:\n q = self.table._select_query([sa.func.count(self.sa)])\n return conn.execute(q).fetchone()[0]\n def __iter__(self):\n with self.table.connect() as conn:\n q = self.table._select_query([self.sa])\n return iter(val for (val,) in conn.execute(q))\n def iteritems(self):\n with self.table.connect() as conn:\n if self.table._ix:\n ixs = self.table._ixdata\n q = self.table._select_query(ixs + [self.sa])\n if len(ixs) == 1:\n return iter(conn.execute(q))\n return iter((row[:-1], row[-1]) for row in conn.execute(q))\n else:\n from_i = self.table._from_i or 0\n q = self.table._select_query([self.sa])\n return iter((i, val) for\n (i, (val,)) in enumerate(conn.execute(q), from_i))\n def __getitem__(self, k):\n if isinstance(k, slice) or isinstance(k, Expression):\n return self.copy(table=self.table[k])\n raise TypeError(\"unrecognized key type: %s\" % type(k))\n def to_series(self):\n tbl = self.table\n vals = []\n ixs = []\n for ix, val in self.iteritems():\n vals.append(val)\n ixs.append(ix)\n if len(tbl._ix) < 2:\n name = tbl._ix[0] if tbl._ix else None\n ix = pd.Index(ixs, name=name)\n else:\n ix = pd.MultiIndex.from_tuples(ixs, names=tbl._ix)\n return pd.Series(vals, index=ix, name=self.name)\n @property\n def data(self):\n return self.to_series()\n @property\n def s(self):\n return self.data\n @property\n def dtype(self):\n return np.dtype(str(discover_typeengine(self.sa.type)))\n @property\n def iloc(self):\n return Indexer(self.islice)\n def _lookup(self, k):\n tbl = self.table\n select = self.copy(table=tbl.where(tbl.index == k))\n result = select.s\n if len(result) == 0:\n raise KeyError(\"%r not found in %s\" % (k, tbl.index))\n return result\n def _loc(self, k):\n # actually returns a series/values for lookups\n if isinstance(k, (slice, Expression)):\n return self.copy(table=self.table._loc(k))\n elif is_iter_notstr(k):\n # list of elements\n return pd.concat([self._lookup(el) for el in k])\n else:\n # single element\n result = self._lookup(k)\n return result.iloc[0] if len(result) == 1 else result\n @property\n def loc(self):\n return Indexer(self._loc)\n def aggregate(self, how, axis=None, skipna=None):\n how = {\"mean\": \"avg\", \"std\": \"stddev\", \"var\": \"variance\"}.get(how, how)\n assert axis in {0, None}\n fn = getattr(func, how)\n return self.table.select_row(fn(self.sa))\n\n def nunique(self, dropna=True):\n return len(self.unique())\n def isnull(self):\n return (self == None)\n isna = isnull\n def notnull(self):\n return (self != None)\n notna = notnull\n def sort_values(self, ascending=True):\n assert ascending in {True, False}\n return self.copy(table=self.table.sort_values(self, ascending=ascending))\n def nlargest(self, n=5):\n return self.sort_values(ascending=False).head(n)\n def nsmallest(self, n=5):\n return self.sort_values(ascending=True).head(n)\n def groupby(self, by=None, axis=0, level=None, as_index=True,\n sort=True, group_keys=True, squeeze=False, **kwargs):\n return GroupBy(self, by, sort=sort, as_index=as_index)\n\n# operator overloading\nfor opname in [\"lt\", \"le\", \"gt\", \"eq\", \"ge\", \"ne\",\n \"mul\", \"add\", \"sub\", \"truediv\", \"pow\",\n \"and_\", \"or_\"]:\n op = getattr(operator, opname)\n def fn(self, other, op=op):\n if hasattr(other, \"sa\"):\n new_sa = op(self.sa, other.sa)\n new_name = self.name if other.name == self.name else None\n else:\n new_sa = op(self.sa, other)\n new_name = self.name\n return Expression(self.table, new_sa, new_name)\n setattr(Expression, \"__%s__\" % opname.strip(\"_\"), fn)\n\n# pass-through to underlying table\nfor method in [\"head\", \"tail\", \"islice\", \"sort_index\", \"where\"]:\n tbl_fn = getattr(Table, method)\n @wraps(tbl_fn)\n def fn(self, *args, tbl_fn=tbl_fn, **kwargs):\n return self.copy(table=tbl_fn(self.table, *args, **kwargs))\n setattr(Expression, method, fn)\nfor sql_func in [\"rank\"]:\n op = getattr(func, sql_func)\n def fn(self, op=op):\n return Expression(self.table, op(self.sa), self.name)\n setattr(Expression, sql_func, fn)\nfor sql_method in [\"startswith\", \"endswith\", \"in_\"]:\n method = getattr(sa.sql.operators.ColumnOperators, sql_method)\n @wraps(method)\n def fn(self, *args, _method=method, **kwargs):\n return Expression(self.table, _method(self.sa, *args, **kwargs), self.name)\n setattr(Expression, sql_method, fn)\nfor sql_method in [\"distinct\"]:\n method = getattr(sa.sql.operators.ColumnOperators, sql_method)\n @wraps(method)\n def fn(self, *args, _method=method, **kwargs):\n return Expression(self.table.reset_index(), _method(self.sa, *args, **kwargs), self.name)\n setattr(Expression, sql_method, fn)\nExpression.isin = Expression.in_\nExpression.unique = Expression.distinct\n\nclass Indexer:\n def __init__(self, getter, setter=None):\n self.getter = getter\n self.setter = setter\n def __getitem__(self, k):\n return self.getter(k)\n\nclass GroupBy:\n def __init__(self, base, by, sort=True, as_index=True):\n assert isinstance(base, (Table, Expression))\n self.base = base\n if isinstance(by, (str, Expression)):\n by = [by]\n self.by = [base[k] if isinstance(k, str) else k for k in by]\n self.sort = sort\n self.as_index = as_index\n def __getitem__(self, k):\n if isinstance(self.base, Table):\n if isinstance(k, str) or is_striter(k):\n return GroupBy(self.base[k], self.by)\n raise TypeError(\"unrecognized key type %s for groupby base type %s\" %\n (type(k), type(self.base)))\n @property\n def table(self):\n return self.base if isinstance(self.base, Table) else self.base.table\n\n def get_group(self, group):\n singleton = len(self.by) == 1\n if singleton and (isinstance(group, str) or not isinstance(group, Iterable)):\n group = [group]\n condition = and_(*[by_el == group_el for by_el, group_el in zip(self.by, group)])\n return self.base.where(condition)\n\n @property\n def groups(self):\n by = [by.sa for by in self.by]\n singleton = len(self.by) == 1\n groups = list(self.table.iterselect(by, groupby=by))\n return {group[0] if singleton else group:\n and_(*[by_el == group_el for by_el, group_el in zip(self.by, group)])\n for group in groups}\n\n def __len__(self):\n by = [by.sa for by in self.by]\n q = self.table._select_query(by, groupby=by).count()\n with self.table.connect() as conn:\n return conn.execute(q).fetchone()[0]\n\n def __iter__(self):\n by = [by.sa for by in self.by]\n singleton = len(self.by) == 1\n sort_by = self.by if self.sort else None\n for group in self.table.iterselect(by, groupby=by, sort_by=sort_by):\n condition = and_(*[by_el == group_el for by_el, group_el in zip(self.by, group)])\n yield group[0] if singleton else group, self.base.where(condition)\n def apply(self, func, *args, **kwargs):\n return pd.concat([func(data.data, *args, **kwargs) for _, data in self])\n def transform(self, func, *args, **kwargs):\n return pd.concat([func(data.data, *args, **kwargs) for _, data in self])\n def size(self):\n bynames = [by.name for by in self.by]\n by = [by.sa for by in self.by]\n vals = []\n ixs = []\n for row in self.table.iterselect(by + [sa.func.count()], groupby=by): # TODO !!, sort_by=sort_by):\n vals.append(row[-1])\n ixs.append(row[:-1])\n if len(bynames) < 2:\n ix = pd.Index(ixs, name=bynames[0])\n else:\n ix = pd.MultiIndex.from_tuples(ixs, names=bynames)\n return pd.Series(vals, index=ix)\n\n def aggregate(self, how, as_df=True):\n # TODO: multiple hows, how dicts...\n how = {\"mean\": \"avg\", \"std\": \"stddev\", \"var\": \"variance\"}.get(how, how)\n valid_types = _numeric_types if how in {\"avg\", \"stddev\", \"variance\", \"sum\"} else sa_types\n fn = getattr(func, how)\n by = [by.sa for by in self.by]\n bynames = [by.name for by in self.by]\n # TODO: return as synthetic table?\n # class VirtualTable - has a base for queries (sa), and a bunch of columns\n if isinstance(self.base, Table):\n colnames = [col for col, dtype in zip(self.base.columns, self.base.coltypes) if dtype in valid_types\n and not col in bynames]\n salc = [by.sa for by in self.by] + [fn(self.base[col].sa) for col in colnames]\n else:\n colnames = [self.base.name]\n salc = [by.sa for by in self.by] + [fn(self.base.sa)]\n ix = self.by if self.as_index else None\n sort_by = self.by if self.sort else None\n if not as_df:\n new_q = self.table._select_query(salc, groupby=by, sort_by=sort_by)\n new_sa = new_q #.from_self()\n vt = VirtualTable(self.table.engine, new_sa)\n vt._ixdata, vt._coldata = vt._coldata[:len(bynames)], vt._coldata[len(bynames):]\n vt._ix, vt._columns = bynames, colnames\n if not self.as_index:\n vt = vt.reset_index()[bynames + colnames]\n return vt\n\n df = pd.DataFrame.from_records(list(self.table.iterselect(salc, groupby=by, sort_by=sort_by)),\n columns=list(range(len(salc))))\n if self.as_index:\n df.set_index(list(range(len(self.by))), inplace=True)\n df.index.names = bynames\n df.columns = colnames\n else:\n df.columns = bynames + colnames\n if not isinstance(self.base, Table) and self.as_index:\n return df[colnames[0]]\n return df\n agg = aggregate\n\nfor agg_fn in [\"min\", \"max\", \"mean\", \"sum\", \"std\", \"var\", \"count\"]:\n def wrapped(self, axis=None, skipna=None, how=agg_fn):\n return self.aggregate(how, axis=axis, skipna=skipna)\n wrapped.__name__ = agg_fn\n setattr(Table, agg_fn, wrapped)\n setattr(Expression, agg_fn, wrapped)\n setattr(GroupBy, agg_fn, partialmethod(GroupBy.aggregate, how=agg_fn))\n\nfor pair_agg_fn in [\"corr\", \"cov\"]:\n def wrapped(self, how=pair_agg_fn):\n return self._agg_pairwise(how)\n wrapped.__name__ = pair_agg_fn\n setattr(Table, pair_agg_fn, wrapped)\n"
] |
[
[
"pandas.Series",
"pandas.MultiIndex.from_tuples",
"pandas.Index",
"numpy.dtype",
"pandas.DataFrame"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [
"0.23",
"0.21",
"2.0",
"1.4",
"1.3",
"0.19",
"1.1",
"1.5",
"0.24",
"0.20",
"1.0",
"0.25",
"1.2"
],
"scipy": [],
"tensorflow": []
}
] |
FrancisCrickInstitute/ActiveUnetSegmentation
|
[
"c54ecb3c9d693ca2b7eead4174921f8ab003af64"
] |
[
"src/unetsl/comparisons.py"
] |
[
"# -*- coding: utf-8 -*-\n\nimport skimage, numpy \nimport imageio\nimport scipy.ndimage.filters as filters\nimport collections\nimport skimage.morphology\nimport skimage.filters\nimport re\nimport io\n\nimport unetsl.data\n\nDEFAULT_CUTOFFS = ( 0.5, 0.8, 0.95)\n\npoint_kernel = numpy.array( \n [\n [ 1, 1, 1],\n [ 1, 10, 1],\n [ 1, 1, 1]\n ] )\n\nendpoint_kernel = numpy.array( \n [\n [ 1, 1, 1],\n [ 1, 0, 1],\n [ 1, 1, 1]\n ] )\n\n\ndef getEndPoints(skeleton):\n points = filters.convolve( skeleton, point_kernel, mode='constant', cval=1.0)\n points = (points==11)*1\n \n return numpy.where(points)\n\ndef non_fix(seg):\n return skimage.morphology.skeletonize(seg)*1.0\n\ndef easy_fix(skeleton):\n \"\"\"\n \n finds easy to fix points and eg two broken lines separated by \n 1px.\n \n \"\"\"\n broken = filters.convolve( skeleton, point_kernel, mode='constant', cval=1.0)\n broken = (broken==11)*1\n \n ends = filters.convolve(broken, endpoint_kernel, mode='constant', cval=0.0)\n \n BORDER_FIX=True\n \n if BORDER_FIX:\n ends[0:1,:] = (ends[0:1, :]==1)*2\n ends[ ends.shape[-2] - 1:, : ] = (ends[ ends.shape[-2] - 1:, : ] !=0 )*2\n ends[:, 0:1] = (ends[ :, 0 : 1 ] !=0 )*2\n ends[:, ends.shape[-1] -1:] = (ends[:, ends.shape[-1] -1:] !=0 )*2\n \n fillers = 1*(ends==2)\n skeleton += fillers\n skeleton = (skeleton>0)*1.0\n skeleton = skimage.morphology.skeletonize(skeleton)*1.0\n \n \n \n return skeleton\n\ndef getColor(ji, cutoffs):\n bad = numpy.array([5, 5, 5],dtype=\"uint8\")\n \n semi_bad = numpy.array( [ 10, 0, 55 ], dtype = \"uint8\" )\n dsb = numpy.array([10, 10, 10 ],dtype=\"uint8\")\n \n decent = numpy.array( [ 125, 0, 20 ], dtype = \"uint8\" )\n dd = numpy.array([125, 20, 20 ],dtype=\"uint8\")\n \n good = numpy.array([255, 165, 0],dtype=\"uint8\")\n gd = numpy.array([0, 90, 30 ],dtype=\"uint8\")\n \n if ji>cutoffs[-1]:\n delta = ( ji - cutoffs[-1] )/ ( 1.0 - cutoffs[-1] )\n return numpy.array([good + gd*delta], dtype=\"uint8\")\n elif ji>cutoffs[-2]:\n delta = ( ji - cutoffs[-2] )/ ( cutoffs[-1] - cutoffs[-2] )\n return numpy.array([decent + dd*delta], dtype=\"uint8\")\n elif ji>cutoffs[-3]:\n delta = ( ji - cutoffs[-3] )/ ( cutoffs[-2] - cutoffs[-3] )\n return numpy.array([semi_bad + dsb*delta], dtype=\"uint8\")\n else:\n return bad\n\ndef postProcess(img):\n \"\"\"\n blurs membrane errors.\n \"\"\"\n white = numpy.array((255, 255, 255))\n me = numpy.all((img == white), axis=2)*1000\n me[0, :] = 0\n me[me.shape[0]-1, :] = 0\n me[:, me.shape[1]-1] = 0\n me[:, 0] = 0\n print(\"membrane \", me.shape)\n me_blr = ( \n filters.gaussian_filter1d(me, sigma=3, axis=0) + \n filters.gaussian_filter1d(me, sigma=3, axis=1) +\n filters.gaussian_filter(me, sigma=2) )\n me_blr[numpy.where(me_blr>255) == 255]\n out = img + numpy.reshape( me_blr, (*me_blr.shape, 1))\n out[numpy.where(out>255)] = 255 \n return out\n\n\ndef jaccardIndex(pred, truth, label_image=None, cutoffs=None):\n \n p_labels = skimage.measure.label(pred, background=1, connectivity=1)\n t_labels = skimage.measure.label(truth, background=1, connectivity=1)\n \n outer = collections.defaultdict(lambda: collections.defaultdict(lambda: 0))\n t_sizes = collections.defaultdict(lambda: 0)\n p_sizes = collections.defaultdict(lambda: 0)\n \n for i in range(t_labels.shape[0]):\n for j in range(t_labels.shape[1]):\n nA = t_labels[i,j]\n nB = p_labels[i,j]\n \n outer[nA][nB] += 1\n t_sizes[nA] += 1\n p_sizes[nB] += 1\n \n \n \n \n membrane = outer[0][0]*1.0/(t_sizes[0] + p_sizes[0] - outer[0][0]) \n best = {}\n for key in outer:\n ts = t_sizes[key]\n lm = outer[key]\n for ik in lm:\n lm[ik] = lm[ik]*1.0/(p_sizes[ik] + ts - lm[ik])\n \n best[key] = max(lm.values())\n \n #if outer[0] != membrane:\n # print(\".\")\n #this fails some times!? \n best[0] = membrane\n \n #label_image[:, :, 0] = p_labels\n #label_image[:, :, 1] = (pred != 0) * 255\n #label_image[:, :, 2] = pred[:, :]\n \n if label_image is not None:\n for i in range(pred.shape[0]):\n for j in range(pred.shape[1]):\n t_label = t_labels[i,j]\n p_label = p_labels[i,j]\n if t_label == 0:\n if p_label == 0:\n label_image[i,j] = [0x0, 0x0, 0x0]\n else:\n label_image[i,j] = [0xff, 0xff, 0xff]\n elif p_label == 0:\n label_image[i,j] = [0, 0, 0]\n else:\n ji = outer[t_label][p_label]\n label_image[i, j] = getColor(ji, cutoffs)\n \n \n return best\n\ndef watershedFix(frame):\n thresh = (frame==0)*1\n \n #local_maxi = skimage.feature.peak_local_max(frame, indices=False, footprint=numpy.ones((5, 5)), labels=thresh)\n \n lbled = skimage.measure.label(thresh, background=0, connectivity=1)\n #print(lbled.shape, lbled.dtype)\n ws = skimage.morphology.watershed(-thresh, lbled)\n s = skimage.morphology.skeletonize(skimage.filters.sobel(ws)!=0)*1\n return s\n\n\ndef createJIImageComparison(prediction, truth, slice_index=0, cutoffs=None):\n \"\"\"\n prediction : name of the prediction to be compared.\n truth : expected output.\n slice_index : sliced that will be renered.\n cutoffs : for creating different color levels.\n \"\"\"\n pimg = skimage.io.imread(prediction)\n timg = skimage.io.imread(truth)\n \n if pimg.shape != timg.shape:\n pimg = numpy.reshape(pimg, timg.shape)\n \n p_slice = pimg[slice_index]\n p_skeleton = 1.0*(p_slice!=0)\n \n t_slice = timg[slice_index]\n t_skeleton = 1.0*(t_slice!=0)\n \n #p_skeleton = watershedFix(p_skeleton)\n #fix-em efore doing the regions\n p_skeleton = non_fix(p_skeleton)\n t_skeleton = non_fix(t_skeleton)\n \n p_skeleton = easy_fix(p_skeleton)\n t_skeleton = easy_fix(t_skeleton)\n \n ji_image = numpy.zeros((t_slice.shape[0], t_slice.shape[1], 3), dtype=\"uint8\")\n jaccardIndex(p_skeleton, t_skeleton, ji_image, cutoffs=cutoffs)\n ji_image = postProcess(ji_image)\n op = io.BytesIO();\n imageio.imsave(op, ji_image, \"png\")\n op.seek(0)\n return op;\n\ndef getEpoch(prediction):\n pat = re.compile(\"-e(\\\\d+)\")\n mo = pat.search(prediction)\n if mo:\n return int(mo.group(1))\n else:\n return -1\n \ndef compare(prediction, truth, ji_image_name=None, cutoffs=None):\n \"\"\"\n \n Takes a prediction and the known ground truth and creates metrics for\n evaluation.\n \n \"\"\"\n pimg = skimage.io.imread(prediction)\n timg = skimage.io.imread(truth)\n \n if pimg.shape != timg.shape:\n pimg = numpy.reshape(pimg, timg.shape)\n \n membrane = 0.0\n cutoffs = cutoffs\n regions = [0.0]*len(cutoffs)\n ends = 0\n mem_cor = 0.0\n over_mem = 0.0\n ji_stack = []\n ji_values = [] \n for i in range(pimg.shape[0]):\n p_slice = pimg[i]\n p_skeleton = 1.0*(p_slice!=0)\n \n t_slice = timg[i]\n t_skeleton = 1.0*(t_slice!=0)\n \n pos = (p_skeleton*t_skeleton)\n over = p_skeleton - pos\n t_sum = numpy.sum(t_skeleton)*1.0\n mem_cor += numpy.sum(pos)/t_sum\n over_mem += numpy.sum(over)/t_sum\n \n #fix-em efore doing the regions\n p_skeleton = non_fix(p_skeleton)\n t_skeleton = non_fix(t_skeleton)\n \n p_skeleton = easy_fix(p_skeleton)\n t_skeleton = easy_fix(t_skeleton)\n \n ji_image = None\n if ji_image_name is not None:\n ji_image = numpy.zeros((t_slice.shape[0], t_slice.shape[1], 3), dtype=\"uint8\")\n ji_stack.append(ji_image)\n ji = jaccardIndex(p_skeleton, t_skeleton, ji_image, cutoffs = cutoffs)\n ji_values.append(ji)\n \n mem = ji[0]\n ji[0] = 0\n regs = [0.0]*len(cutoffs)\n for k in ji:\n v = ji[k]\n for i, c in enumerate(cutoffs):\n if v>c:\n regs[i] += 1\n \n ends += len(getEndPoints(p_skeleton)[0])\n membrane += mem\n for i in range(len(regions)):\n regions[i] += regs[i]/(len(ji)-1)\n mem_cor = mem_cor/pimg.shape[0]\n over_mem = over_mem/pimg.shape[0]\n \n if ji_image_name is not None:\n skimage.io.imsave(ji_image_name, numpy.array(ji_stack , dtype=\"uint8\"))\n \n membrane = membrane/pimg.shape[0]\n for i, r in enumerate(regions):\n regions[i] = r/pimg.shape[0]\n epoch = getEpoch(prediction)\n values = [epoch, ends, mem_cor, over_mem, membrane]\n values += regions\n \n return values, ji_values\n\ndef evaluateSkeleton(prediction, ji_stack=None, cutoffs=DEFAULT_CUTOFFS):\n \"\"\"\n \n Takes a prediction and the known ground truth and creates metrics for\n evaluation.\n \n \"\"\"\n ji_stack = []\n pimg, tags = unetsl.data.loadImage(prediction)\n #channel, z, y, x\n pimg = pimg[0]\n \n membrane = 0.0\n cutoffs = cutoffs\n regions = [0.0]*len(cutoffs)\n ends = 0\n mem_cor = 0.0\n over_mem = 0.0\n ji_values = []\n \n p_slice = None\n #first channel is the skeleton\n for slc in range(pimg.shape[0] - 1):\n \n if p_slice is None:\n #only do it once.\n p_slice = pimg[slc]\n p_skeleton = 1.0*(p_slice!=0)\n #fix-em efore doing the regions\n p_skeleton = non_fix(p_skeleton)\n p_skeleton = easy_fix(p_skeleton)\n \n next_slice = pimg[slc + 1]\n next_skeleton = 1.0*(next_slice!=0)\n next_skeleton = non_fix(next_skeleton)\n next_skeleton = easy_fix(next_skeleton)\n \n ji_image = None\n if ji_stack is not None:\n ji_image = numpy.zeros((p_slice.shape[0], p_slice.shape[1], 3), dtype=\"uint8\")\n ji_stack.append(ji_image)\n\n ji = jaccardIndex(p_skeleton, next_skeleton, ji_image, cutoffs = cutoffs)\n ji_values.append(ji)\n \n mem = ji[0]\n ji[0] = 0\n regs = [0.0]*len(cutoffs)\n for k in ji:\n v = ji[k]\n for i, c in enumerate(cutoffs):\n if v>c:\n regs[i] += 1\n \n ends += len(getEndPoints(p_skeleton)[0])\n membrane += mem\n for i in range(len(regions)):\n regions[i] += regs[i]/(len(ji)-1)\n p_slice = next_slice\n p_skeleton = next_skeleton\n \n mem_cor = mem_cor/pimg.shape[0]\n over_mem = over_mem/pimg.shape[0]\n \n \n membrane = membrane/pimg.shape[0]\n for i, r in enumerate(regions):\n regions[i] = r/pimg.shape[0]\n epoch = getEpoch(prediction)\n values = [epoch, ends, mem_cor, over_mem, membrane]\n values += regions\n if ji_stack is not None:\n label_name=\"labelled/%s\"%prediction.replace(\".tif\", \"-labelled.tif\")\n skimage.io.imsave(\n label_name, \n numpy.array(\n numpy.sum(ji_stack, axis=0)/len(ji_stack),\n dtype=\"uint8\"\n )\n )\n \n return values, ji_values"
] |
[
[
"numpy.reshape",
"numpy.all",
"scipy.ndimage.filters.gaussian_filter",
"scipy.ndimage.filters.gaussian_filter1d",
"scipy.ndimage.filters.convolve",
"numpy.array",
"numpy.where",
"numpy.sum",
"numpy.zeros"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [
"0.13",
"1.6",
"0.14",
"0.15",
"1.4",
"0.10",
"1.3",
"0.19",
"1.5",
"0.18",
"1.2",
"1.7",
"0.12",
"1.0",
"0.17",
"0.16"
],
"tensorflow": []
}
] |
tigerneil/scattertext
|
[
"23351895ada347fae300bf910c2c77f47ac58a35",
"23351895ada347fae300bf910c2c77f47ac58a35",
"23351895ada347fae300bf910c2c77f47ac58a35"
] |
[
"scattertext/features/FeatsFromGeneralInquirer.py",
"scattertext/characteristic/DenseRankCharacteristicness.py",
"scattertext/ScatterChartData.py"
] |
[
"from collections import Counter\nfrom re import split\nfrom sys import version_info\n\nimport pandas as pd\n\nfrom scattertext.Common import GENERAL_INQUIRER_URL\nfrom scattertext.features.FeatsFromSpacyDoc import FeatsFromSpacyDoc\n\n\n\nclass FeatsFromGeneralInquirer(FeatsFromSpacyDoc):\n\tdef __init__(self,\n\t use_lemmas=False,\n\t entity_types_to_censor=set(),\n\t tag_types_to_censor=set(),\n\t strip_final_period=False,\n\t **kwargs):\n\t\t'''\n\t\tParameters\n\t\t----------\n\t\tempath_analyze_function: function (default=empath.Empath().analyze)\n\t\t\tFunction that produces a dictionary mapping Empath categories to\n\n\t\tOther parameters from FeatsFromSpacyDoc.__init__\n\t\t'''\n\t\tself._lexicon_df = self._download_and_parse_general_inquirer()\n\t\tsuper(FeatsFromGeneralInquirer, self).__init__(use_lemmas,\n\t\t entity_types_to_censor,\n\t\t tag_types_to_censor,\n\t\t strip_final_period)\n\n\tdef _download_and_parse_general_inquirer(self):\n\t\tdf = pd.read_csv(GENERAL_INQUIRER_URL, sep='\\t')\n\t\treturn (df.T[2:-4].apply(lambda x: list(df\n\t\t .Entry\n\t\t .apply(lambda x: x.split('#')[0])\n\t\t .loc[x.dropna().index]\n\t\t .drop_duplicates()\n\t\t .apply(str.lower)),\n\t\t axis=1)\n\t\t\t.apply(pd.Series)\n\t\t\t.stack()\n\t\t\t.reset_index()[['level_0', 0]]\n\t\t\t.rename(columns={'level_0': 'cat', 0: 'term'})\n\t\t\t.set_index('term'))\n\n\tdef _analyze(self, doc):\n\t\ttext_df = (pd.DataFrame(pd.Series(Counter(t for t in split(r\"(\\W)\", doc.lower()) if t.strip())))\n\t\t\t.join(self._lexicon_df)\n\t\t\t.dropna()\n\t\t\t.groupby('cat')\n\t\t\t.sum()\n\t\t\t)\n\t\treturn text_df\n\n\tdef get_doc_metadata(self, doc, prefix=''):\n\t\ttopic_counter = Counter()\n\t\tif version_info[0] >= 3:\n\t\t\tdoc = str(doc)\n\t\tfor topic_category, score in self._analyze(doc).to_dict()[0].items():\n\t\t\ttopic_counter[prefix + topic_category] = int(score)\n\t\treturn topic_counter\n\n\tdef has_metadata_term_list(self):\n\t\treturn True\n\n\tdef get_top_model_term_lists(self):\n\t\treturn self._lexicon_df.reset_index().groupby('cat')['term'].apply(list).to_dict()\n",
"import pandas as pd\nfrom scipy.stats import rankdata\n\nfrom scattertext.Scalers import scale\nfrom scattertext.frequencyreaders.DefaultBackgroundFrequencies import DefaultBackgroundFrequencies\nfrom scattertext.termranking import AbsoluteFrequencyRanker\nfrom scattertext.termscoring.RankDifference import RankDifference\n\n\nclass CharacteristicScorer(object):\n\tdef __init__(self,\n\t term_ranker=AbsoluteFrequencyRanker,\n\t background_frequencies=DefaultBackgroundFrequencies,\n\t rerank_ranks=False):\n\t\t'''\n\t\tParameters\n\t\t----------\n\t\tterm_ranker : TermRanker, default is OncePerDocFrequencyRanker\n\t\tbackground_frequencies : BackgroundFrequencies\n\t\trerank_ranks : bool, False by default\n\t\t\torders scores from 0 to 1 by their dense rank\n\t\t'''\n\t\tself.term_ranker = term_ranker\n\t\tself.background_frequencies = background_frequencies\n\t\tself.rerank_ranks = rerank_ranks\n\n\tdef get_scores(self, corpus):\n\t\traise Exception()\n\n\tdef _rerank_scores(self, scores):\n\t\tranks = rankdata(scores, 'dense')\n\t\tranks = ranks / ranks.max()\n\t\treturn ranks, 0.5\n\n\nclass DenseRankCharacteristicness(CharacteristicScorer):\n\tdef get_scores(self, corpus):\n\t\t'''\n\t\tParameters\n\t\t----------\n\t\tcorpus\n\n\t\tReturns\n\t\t-------\n\t\tfloat, pd.Series\n\t\tfloat: point on x-axis at even characteristicness\n\t\tpd.Series: term -> value between 0 and 1, sorted by score in a descending manner\n\t\tBackground scores from corpus\n\t\t'''\n\t\tterm_ranks = self.term_ranker(corpus).get_ranks()\n\n\t\tfreq_df = pd.DataFrame({\n\t\t\t'corpus': term_ranks.sum(axis=1),\n\t\t\t'standard': self.background_frequencies.get_background_frequency_df()['background']}\n\t\t).dropna()\n\t\tcorpus_rank = rankdata(freq_df.corpus, 'dense')\n\t\tstandard_rank = rankdata(freq_df.standard, 'dense')\n\t\tscores = corpus_rank/corpus_rank.max() - standard_rank/standard_rank.max()\n\n\n\t\t#scores = RankDifference().get_scores(bg['corpus'], bg['bg']).sort_values()\n\t\t# import pdb; pdb.set_trace()\n\t\tif self.rerank_ranks:\n\t\t\trank_scores, zero_marker = self._rerank_scores(scores)\n\t\t\tfreq_df['score'] = pd.Series(rank_scores, index=freq_df.index)\n\t\telse:\n\t\t\tif scores.min() < 0 and scores.max() > 0:\n\t\t\t\tzero_marker = -scores.min() / (scores.max() - scores.min())\n\t\t\telif scores.min() > 0:\n\t\t\t\tzero_marker = 0\n\t\t\telse:\n\t\t\t\tzero_marker = 1\n\t\t\tfreq_df['score'] = scale(scores)\n\t\treturn zero_marker, freq_df.sort_values(by='score', ascending=False)['score']\n",
"import numpy as np\n\nfrom scattertext.termranking import AbsoluteFrequencyRanker\n\n\nclass ScatterChartData(object):\n\tdef __init__(self,\n\t minimum_term_frequency=3,\n\t minimum_not_category_term_frequency=0,\n\t jitter=None,\n\t seed=0,\n\t pmi_threshold_coefficient=3,\n\t max_terms=None,\n\t filter_unigrams=False,\n\t term_ranker=AbsoluteFrequencyRanker,\n\t use_non_text_features=False,\n\t term_significance=None,\n\t terms_to_include=None):\n\t\t'''\n\n\t\tParameters\n\t\t----------\n\t\tterm_doc_matrix : TermDocMatrix\n\t\t\tThe term doc matrix to use for the scatter chart.\n\t\tminimum_term_frequency : int, optional\n\t\t\tMinimum times an ngram has to be seen to be included. Default is 3.\n\t\tminimum_not_category_term_frequency : int, optional\n\t\t If an n-gram does not occur in the category, minimum times it\n\t\t must been seen to be included. Default is 0.\n\t\tjitter : float, optional\n\t\t\tMaximum amount of noise to be added to points, 0.2 is a lot. Default is None to disable jitter.\n\t\tseed : float, optional\n\t\t\tRandom seed. Default 0\n\t\tpmi_threshold_coefficient : int\n\t\t\tFilter out bigrams with a PMI of < 2 * pmi_threshold_coefficient. Default is 3\n\t\tmax_terms : int, optional\n\t\t\tMaximum number of terms to include in visualization\n\t\tfilter_unigrams : bool, optional\n\t\t\tIf True, remove unigrams that are part of bigrams. Default is False.\n\t\tterm_ranker : TermRanker, optional\n\t\t\tTermRanker class for determining term frequency ranks.\n\t\tuse_non_text_features : bool, default = False\n\t\t\tUse non-BoW features (e.g., Empath) instead of text features\n\t\tterm_significance : TermSignificance instance or None\n\t\t\tWay of getting significance scores. If None, p values will not be added.\n\t\tterms_to_include : set or None\n\t\t\tOnly annotate these terms in chart\n\t\t'''\n\t\tself.jitter = jitter\n\t\tself.minimum_term_frequency = minimum_term_frequency\n\t\tself.minimum_not_category_term_frequency = minimum_not_category_term_frequency\n\t\tself.seed = seed\n\t\tself.pmi_threshold_coefficient = pmi_threshold_coefficient\n\t\tself.filter_unigrams = filter_unigrams\n\t\tself.term_ranker = term_ranker\n\t\tself.max_terms = max_terms\n\t\tself.use_non_text_features = use_non_text_features\n\t\tself.term_significance = term_significance\n\t\tself.terms_to_include = terms_to_include\n\t\tnp.random.seed(seed)"
] |
[
[
"pandas.read_csv"
],
[
"pandas.Series",
"scipy.stats.rankdata"
],
[
"numpy.random.seed"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [
"2.0",
"1.4",
"1.1",
"1.5",
"1.2",
"1.3"
],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [
"0.23",
"0.21",
"2.0",
"1.4",
"0.19",
"1.1",
"1.5",
"1.2",
"0.24",
"0.20",
"1.0",
"0.25",
"1.3"
],
"scipy": [
"1.6",
"1.10",
"1.4",
"1.9",
"0.19",
"1.5",
"0.18",
"1.2",
"1.7",
"1.0",
"0.17",
"1.3",
"1.8"
],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
powerbi1/keras-tuner
|
[
"cfc6e20956cb8554ee29ef2a1ba4635da7d0228b"
] |
[
"kerastuner/applications/xception.py"
] |
[
"# Copyright 2019 The Keras Tuner Authors\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# https://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport tensorflow as tf\nimport tensorflow.keras as keras\nfrom tensorflow.keras import layers\n\nfrom kerastuner.engine import hypermodel\n\n\nclass HyperXception(hypermodel.HyperModel):\n \"\"\"An Xception HyperModel.\"\"\"\n\n def __init__(self,\n include_top=True,\n input_shape=None,\n input_tensor=None,\n classes=None):\n super(HyperXception, self).__init__()\n if include_top and classes is None:\n raise ValueError('You must specify `classes` when '\n '`include_top=True`')\n\n if input_shape is None and input_tensor is None:\n raise ValueError('You must specify either `input_shape` '\n 'or `input_tensor`.')\n\n self.include_top = include_top\n self.input_shape = input_shape\n self.input_tensor = input_tensor\n self.classes = classes\n\n def build(self, hp):\n activation = hp.Choice('activation', ['relu', 'selu'])\n\n # Model definition.\n if self.input_tensor is not None:\n inputs = tf.keras.utils.get_source_inputs(self.input_tensor)\n x = self.input_tensor\n else:\n inputs = layers.Input(shape=self.input_shape)\n x = inputs\n\n # Initial conv2d.\n conv2d_num_filters = hp.Choice(\n 'conv2d_num_filters', [32, 64, 128], default=64)\n kernel_size = hp.Choice('kernel_size', [3, 5])\n initial_strides = hp.Choice('initial_strides', [2])\n x = conv(x,\n conv2d_num_filters,\n kernel_size=kernel_size,\n activation=activation,\n strides=initial_strides)\n\n # Separable convs.\n sep_num_filters = hp.Range(\n 'sep_num_filters', 128, 768, step=128, default=256)\n num_residual_blocks = hp.Range('num_residual_blocks', 2, 8, default=4)\n for _ in range(num_residual_blocks):\n x = residual(x,\n sep_num_filters,\n activation=activation,\n max_pooling=False)\n # Exit flow.\n x = residual(x,\n 2*sep_num_filters,\n activation=activation,\n max_pooling=True)\n\n pooling = hp.Choice('pooling', ['avg', 'flatten', 'max'])\n if pooling == 'flatten':\n x = layers.Flatten()(x)\n elif pooling == 'avg':\n x = layers.GlobalAveragePooling2D()(x)\n else:\n x = layers.GlobalMaxPooling2D()(x)\n\n if self.include_top:\n # Dense\n num_dense_layers = hp.Range('num_dense_layers', 1, 3)\n dropout_rate = hp.Linear(\n 'dropout_rate', 0.0, 0.6, resolution=0.1, default=0.5)\n dense_use_bn = hp.Choice('dense_use_bn', [True, False])\n for _ in range(num_dense_layers):\n x = dense(x,\n self.classes,\n activation=activation,\n batchnorm=dense_use_bn,\n dropout_rate=dropout_rate)\n output = layers.Dense(self.classes, activation='softmax')(x)\n model = keras.Model(inputs, output, name='Xception')\n\n model.compile(\n optimizer=keras.optimizers.Adam(\n hp.Choice('learning_rate', [1e-3, 1e-4, 1e-5])),\n loss='categorical_crossentropy',\n metrics=['accuracy'])\n return model\n else:\n return keras.Model(inputs, x, name='Xception')\n\n\ndef sep_conv(x, num_filters, kernel_size=(3, 3), activation='relu'):\n if activation == 'selu':\n x = layers.SeparableConv2D(num_filters, kernel_size,\n activation='selu',\n padding='same',\n kernel_initializer='lecun_normal')(x)\n elif activation == 'relu':\n x = layers.SeparableConv2D(num_filters, kernel_size,\n padding='same',\n use_bias=False)(x)\n x = layers.BatchNormalization()(x)\n x = layers.Activation('relu')(x)\n else:\n ValueError('Unkown activation function: %s' % (activation,))\n return x\n\n\ndef residual(x, num_filters,\n kernel_size=(3, 3),\n activation='relu',\n pool_strides=(2, 2),\n max_pooling=True):\n \"Residual block.\"\n if max_pooling:\n res = layers.Conv2D(num_filters, kernel_size=(\n 1, 1), strides=pool_strides, padding='same')(x)\n elif num_filters != keras.backend.int_shape(x)[-1]:\n res = layers.Conv2D(num_filters, kernel_size=(1, 1), padding='same')(x)\n else:\n res = x\n\n x = sep_conv(x, num_filters, kernel_size, activation)\n x = sep_conv(x, num_filters, kernel_size, activation)\n if max_pooling:\n x = layers.MaxPooling2D(\n kernel_size, strides=pool_strides, padding='same')(x)\n\n x = layers.add([x, res])\n return x\n\n\ndef conv(x, num_filters,\n kernel_size=(3, 3), activation='relu', strides=(2, 2)):\n \"2d convolution block.\"\n if activation == 'selu':\n x = layers.Conv2D(num_filters, kernel_size,\n strides=strides, activation='selu',\n padding='same', kernel_initializer='lecun_normal',\n bias_initializer='zeros')(x)\n elif activation == 'relu':\n x = layers.Conv2D(num_filters, kernel_size,\n strides=strides, padding='same', use_bias=False)(x)\n x = layers.BatchNormalization()(x)\n x = layers.Activation('relu')(x)\n else:\n msg = 'Unkown activation function: %s' % activation\n ValueError(msg)\n return x\n\n\ndef dense(x, dims, activation='relu', batchnorm=True, dropout_rate=0):\n if activation == 'selu':\n x = layers.Dense(dims, activation='selu',\n kernel_initializer='lecun_normal',\n bias_initializer='zeros')(x)\n if dropout_rate:\n x = layers.AlphaDropout(dropout_rate)(x)\n elif activation == 'relu':\n x = layers.Dense(dims, activation='relu')(x)\n if batchnorm:\n x = layers.BatchNormalization()(x)\n if dropout_rate:\n x = layers.Dropout(dropout_rate)(x)\n else:\n msg = 'Unkown activation function: %s' % activation\n ValueError(msg)\n return x\n"
] |
[
[
"tensorflow.keras.utils.get_source_inputs",
"tensorflow.keras.layers.Activation",
"tensorflow.keras.layers.GlobalAveragePooling2D",
"tensorflow.keras.layers.Dense",
"tensorflow.keras.backend.int_shape",
"tensorflow.keras.layers.Conv2D",
"tensorflow.keras.Model",
"tensorflow.keras.layers.SeparableConv2D",
"tensorflow.keras.layers.BatchNormalization",
"tensorflow.keras.layers.GlobalMaxPooling2D",
"tensorflow.keras.layers.add",
"tensorflow.keras.layers.Dropout",
"tensorflow.keras.layers.AlphaDropout",
"tensorflow.keras.layers.MaxPooling2D",
"tensorflow.keras.layers.Flatten",
"tensorflow.keras.layers.Input"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"2.7",
"2.2",
"2.3",
"2.4",
"2.5",
"2.6"
]
}
] |
coldenheart/123
|
[
"798768bba7dfaef051a46d8e1df48bc671de5213"
] |
[
"python/contrib/SentimentAnalysis/models/check_output.py"
] |
[
"# coding=utf-8\r\n\r\n# Copyright 2020 Huawei Technologies Co., Ltd\r\n#\r\n# Licensed under the Apache License, Version 2.0 (the \"License\");\r\n# you may not use this file except in compliance with the License.\r\n# You may obtain a copy of the License at\r\n#\r\n# http://www.apache.org/licenses/LICENSE-2.0\r\n#\r\n# Unless required by applicable law or agreed to in writing, software\r\n# distributed under the License is distributed on an \"AS IS\" BASIS,\r\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\r\n# See the License for the specific language governing permissions and\r\n# limitations under the License.\r\n# ============================================================================\r\nimport sys\r\nimport os\r\nimport numpy as np\r\nimport random\r\nimport argparse\r\nimport pickle\r\n\r\nfrom handle_data import dataLoader, CreatVocab\r\nfrom driver.Config import Configurable\r\nfrom handle_data.CreatVocab import *\r\nfrom handle_data.batch_iter import *\r\n\r\n\r\ndef read_bin(config):\r\n try:\r\n file_name = config.decode_path.split('.')[0] + \".bin\"\r\n feature_arr = np.fromfile(file_name, dtype=np.int32).reshape(\r\n config.sentence_max_length, config.batch_size)\r\n except IOError as except_err:\r\n print(except_err)\r\n return 1\r\n else:\r\n print(feature_arr)\r\n print(feature_arr.shape)\r\n return 0\r\n\r\n\r\ndef read_output_bin(config):\r\n try:\r\n file_name = \"../output/\" + config.decode_path + \"_output_0\" + \".bin\"\r\n print(file_name)\r\n logits_arr = np.fromfile(file_name, dtype=np.float32).reshape(\r\n config.batch_size, -1)\r\n except IOError as except_err:\r\n print(except_err)\r\n return 1\r\n else:\r\n print(logits_arr)\r\n print(logits_arr.shape)\r\n return 0, logits_arr\r\n\r\nif __name__ == \"__main__\":\r\n random.seed(233)\r\n np.random.seed(233)\r\n\r\n # vocab_file=os.path.join('chinese_L-12_H-768_A-12', 'vocab.txt')\r\n predecode_path = './data/test.txt' # 解码之前的文件路径\r\n dev_data, sentence_length = dataLoader.decoder_sentence(predecode_path)\r\n\r\n parse = argparse.ArgumentParser()\r\n parse.add_argument('--config_file', type=str, default='default.ini')\r\n parse.add_argument('--thread', type=int, default=1)\r\n parse.add_argument('--use_cuda', action='store_true', default=False)\r\n\r\n parse.add_argument('-bert_config_file',\r\n type=str,\r\n default=os.path.join('chinese_L-12_H-768_A-12',\r\n 'bert_config.json'))\r\n parse.add_argument('-vocab_file',\r\n type=str,\r\n default=os.path.join('chinese_L-12_H-768_A-12',\r\n 'vocab.txt'),\r\n help='bert_vocab')\r\n parse.add_argument(\r\n '-max_seq_length',\r\n type=int,\r\n default=202,\r\n help=\r\n 'The maximum total input sequence length after WordPiece tokenization.'\r\n )\r\n parse.add_argument(\r\n '-warmup_proportion',\r\n type=float,\r\n default=0.1,\r\n help='Proportion of training to perform linear learning rate warmup for '\r\n 'E.g., 0.1 = 10% of training.')\r\n parse.add_argument('-do_lower_case',\r\n type=bool,\r\n default=True,\r\n help='Whether to lower case the input text.')\r\n\r\n args, extra_args = parse.parse_known_args()\r\n config_ = Configurable(args.config_file, extra_args)\r\n\r\n id1 = 0\r\n id2 = 0\r\n id3 = 0\r\n id3, logits_arr_ = read_output_bin(config_)\r\n predicts = np.argmax(logits_arr_, axis=1) #shape = (batch_size,)\r\n print(predicts)\r\n print(predicts.shape)\r\n\r\n if id1 == 0 and id2 == 0 and id3 == 0:\r\n print('success!')\r\n else:\r\n print('faild!')\r\n"
] |
[
[
"numpy.fromfile",
"numpy.argmax",
"numpy.random.seed"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
daivuong7696/tensorflow-open_nsfw
|
[
"153a24b5e0a7e0acaf84998f983f46207b2dc44a"
] |
[
"classify_nsfw.py"
] |
[
"#!/usr/bin/env python\nimport sys\nimport os\nimport argparse\nimport tensorflow as tf\nfrom shutil import copyfile\n\nfrom model import OpenNsfwModel, InputType\nfrom image_utils import create_tensorflow_image_loader\nfrom image_utils import create_yahoo_image_loader\n\nimport numpy as np\n\n\nIMAGE_LOADER_TENSORFLOW = \"tensorflow\"\nIMAGE_LOADER_YAHOO = \"yahoo\"\n\ndef classify_to_folder(args, images_names, images_scores):\n for i in range(len(images_names)):\n source = os.path.join(args.input_file, images_names[i])\n dest = os.path.join(args.input_file, str(round(images_scores[i][1], 1)))\n if not os.path.isdir(dest):\n os.mkdir(dest)\n copyfile(source, os.path.join(dest, images_names[i]))\n\n\ndef main(argv):\n parser = argparse.ArgumentParser()\n\n parser.add_argument(\"input_file\", help=\"Path to the input image.\\\n Only jpeg images are supported.\")\n parser.add_argument(\"-m\", \"--model_weights\", required=True,\n help=\"Path to trained model weights file\")\n\n parser.add_argument(\"-l\", \"--image_loader\",\n default=IMAGE_LOADER_YAHOO,\n help=\"image loading mechanism\",\n choices=[IMAGE_LOADER_YAHOO, IMAGE_LOADER_TENSORFLOW])\n\n parser.add_argument(\"-t\", \"--input_type\",\n default=InputType.TENSOR.name.lower(),\n help=\"input type\",\n choices=[InputType.TENSOR.name.lower(),\n InputType.BASE64_JPEG.name.lower()])\n\n args = parser.parse_args()\n\n model = OpenNsfwModel()\n\n with tf.Session() as sess:\n\n input_type = InputType[args.input_type.upper()]\n model.build(weights_path=args.model_weights, input_type=input_type)\n\n fn_load_image = None\n\n if input_type == InputType.TENSOR:\n if args.image_loader == IMAGE_LOADER_TENSORFLOW:\n fn_load_image = create_tensorflow_image_loader(sess)\n else:\n fn_load_image = create_yahoo_image_loader()\n elif input_type == InputType.BASE64_JPEG:\n import base64\n fn_load_image = lambda filename: np.array([base64.urlsafe_b64encode(open(filename, \"rb\").read())])\n\n sess.run(tf.global_variables_initializer())\n images = []\n images_names = []\n for i in os.listdir(args.input_file):\n images_names.append(i)\n image_path = os.path.join(args.input_file, i)\n image = fn_load_image(image_path)\n if images == []:\n images = image\n print(image_path)\n else:\n images = np.concatenate((images, image), axis=0)\n image = images\n\n predictions = \\\n sess.run(model.predictions,\n feed_dict={model.input: image})\n\n classify_to_folder(args, images_names, predictions)\n #for i in range(len(images_names)):\n # print(\"Results for '{}'\".format(images_names[i]))\n # print(\"\\tSFW score:\\t{}\\n\\tNSFW score:\\t{}\".format(*predictions[i]))\n\nif __name__ == \"__main__\":\n main(sys.argv)\n"
] |
[
[
"numpy.concatenate",
"tensorflow.global_variables_initializer",
"tensorflow.Session"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"1.10",
"1.12",
"1.4",
"1.13",
"1.5",
"1.7",
"0.12",
"1.0",
"1.2"
]
}
] |
oneandwholly/keras
|
[
"dc9db6b494a037db15967d8585a8941be46c0b0e"
] |
[
"keras/callbacks.py"
] |
[
"\"\"\"Callbacks: utilities called at certain points during model training.\n\"\"\"\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport os\nimport csv\nimport six\n\nimport numpy as np\nimport time\nimport json\nimport warnings\nimport io\n\nfrom collections import deque\nfrom collections import OrderedDict\nfrom collections import Iterable\nfrom .utils.generic_utils import Progbar\nfrom . import backend as K\nfrom .engine.training_utils import standardize_input_data\n\ntry:\n import requests\nexcept ImportError:\n requests = None\n\n\nclass CallbackList(object):\n \"\"\"Container abstracting a list of callbacks.\n\n # Arguments\n callbacks: List of `Callback` instances.\n queue_length: Queue length for keeping\n running statistics over callback execution time.\n \"\"\"\n\n def __init__(self, callbacks=None, queue_length=10):\n callbacks = callbacks or []\n self.callbacks = [c for c in callbacks]\n self.queue_length = queue_length\n\n def append(self, callback):\n self.callbacks.append(callback)\n\n def set_params(self, params):\n for callback in self.callbacks:\n callback.set_params(params)\n\n def set_model(self, model):\n for callback in self.callbacks:\n callback.set_model(model)\n\n def on_epoch_begin(self, epoch, logs=None):\n \"\"\"Called at the start of an epoch.\n\n # Arguments\n epoch: integer, index of epoch.\n logs: dictionary of logs.\n \"\"\"\n logs = logs or {}\n for callback in self.callbacks:\n callback.on_epoch_begin(epoch, logs)\n self._delta_t_batch = 0.\n self._delta_ts_batch_begin = deque([], maxlen=self.queue_length)\n self._delta_ts_batch_end = deque([], maxlen=self.queue_length)\n\n def on_epoch_end(self, epoch, logs=None):\n \"\"\"Called at the end of an epoch.\n\n # Arguments\n epoch: integer, index of epoch.\n logs: dictionary of logs.\n \"\"\"\n logs = logs or {}\n for callback in self.callbacks:\n callback.on_epoch_end(epoch, logs)\n\n def on_batch_begin(self, batch, logs=None):\n \"\"\"Called right before processing a batch.\n\n # Arguments\n batch: integer, index of batch within the current epoch.\n logs: dictionary of logs.\n \"\"\"\n logs = logs or {}\n t_before_callbacks = time.time()\n for callback in self.callbacks:\n callback.on_batch_begin(batch, logs)\n self._delta_ts_batch_begin.append(time.time() - t_before_callbacks)\n delta_t_median = np.median(self._delta_ts_batch_begin)\n if (self._delta_t_batch > 0. and\n delta_t_median > 0.95 * self._delta_t_batch and\n delta_t_median > 0.1):\n warnings.warn('Method on_batch_begin() is slow compared '\n 'to the batch update (%f). Check your callbacks.'\n % delta_t_median)\n self._t_enter_batch = time.time()\n\n def on_batch_end(self, batch, logs=None):\n \"\"\"Called at the end of a batch.\n\n # Arguments\n batch: integer, index of batch within the current epoch.\n logs: dictionary of logs.\n \"\"\"\n logs = logs or {}\n if not hasattr(self, '_t_enter_batch'):\n self._t_enter_batch = time.time()\n self._delta_t_batch = time.time() - self._t_enter_batch\n t_before_callbacks = time.time()\n for callback in self.callbacks:\n callback.on_batch_end(batch, logs)\n self._delta_ts_batch_end.append(time.time() - t_before_callbacks)\n delta_t_median = np.median(self._delta_ts_batch_end)\n if (self._delta_t_batch > 0. and\n (delta_t_median > 0.95 * self._delta_t_batch and delta_t_median > 0.1)):\n warnings.warn('In your callbacks, method `on_batch_end()` '\n 'is slow compared to a model step '\n '(%f vs %f). Check your callbacks.'\n % (delta_t_median, self._delta_t_batch))\n\n def on_train_begin(self, logs=None):\n \"\"\"Called at the beginning of training.\n\n # Arguments\n logs: dictionary of logs.\n \"\"\"\n logs = logs or {}\n for callback in self.callbacks:\n callback.on_train_begin(logs)\n\n def on_train_end(self, logs=None):\n \"\"\"Called at the end of training.\n\n # Arguments\n logs: dictionary of logs.\n \"\"\"\n logs = logs or {}\n for callback in self.callbacks:\n callback.on_train_end(logs)\n\n def __iter__(self):\n return iter(self.callbacks)\n\n\nclass Callback(object):\n \"\"\"Abstract base class used to build new callbacks.\n\n # Properties\n params: dict. Training parameters\n (eg. verbosity, batch size, number of epochs...).\n model: instance of `keras.models.Model`.\n Reference of the model being trained.\n\n The `logs` dictionary that callback methods\n take as argument will contain keys for quantities relevant to\n the current batch or epoch.\n\n Currently, the `.fit()` method of the `Sequential` model class\n will include the following quantities in the `logs` that\n it passes to its callbacks:\n\n on_epoch_end: logs include `acc` and `loss`, and\n optionally include `val_loss`\n (if validation is enabled in `fit`), and `val_acc`\n (if validation and accuracy monitoring are enabled).\n on_batch_begin: logs include `size`,\n the number of samples in the current batch.\n on_batch_end: logs include `loss`, and optionally `acc`\n (if accuracy monitoring is enabled).\n \"\"\"\n\n def __init__(self):\n self.validation_data = None\n self.model = None\n\n def set_params(self, params):\n self.params = params\n\n def set_model(self, model):\n self.model = model\n\n def on_epoch_begin(self, epoch, logs=None):\n pass\n\n def on_epoch_end(self, epoch, logs=None):\n pass\n\n def on_batch_begin(self, batch, logs=None):\n pass\n\n def on_batch_end(self, batch, logs=None):\n pass\n\n def on_train_begin(self, logs=None):\n pass\n\n def on_train_end(self, logs=None):\n pass\n\n\nclass BaseLogger(Callback):\n \"\"\"Callback that accumulates epoch averages of metrics.\n\n This callback is automatically applied to every Keras model.\n\n # Arguments\n stateful_metrics: Iterable of string names of metrics that\n should *not* be averaged over an epoch.\n Metrics in this list will be logged as-is in `on_epoch_end`.\n All others will be averaged in `on_epoch_end`.\n \"\"\"\n\n def __init__(self, stateful_metrics=None):\n if stateful_metrics:\n self.stateful_metrics = set(stateful_metrics)\n else:\n self.stateful_metrics = set()\n\n def on_epoch_begin(self, epoch, logs=None):\n self.seen = 0\n self.totals = {}\n\n def on_batch_end(self, batch, logs=None):\n logs = logs or {}\n batch_size = logs.get('size', 0)\n self.seen += batch_size\n\n for k, v in logs.items():\n if k in self.stateful_metrics:\n self.totals[k] = v\n else:\n if k in self.totals:\n self.totals[k] += v * batch_size\n else:\n self.totals[k] = v * batch_size\n\n def on_epoch_end(self, epoch, logs=None):\n if logs is not None:\n for k in self.params['metrics']:\n if k in self.totals:\n # Make value available to next callbacks.\n if k in self.stateful_metrics:\n logs[k] = self.totals[k]\n else:\n logs[k] = self.totals[k] / self.seen\n\n\nclass TerminateOnNaN(Callback):\n \"\"\"Callback that terminates training when a NaN loss is encountered.\n \"\"\"\n\n def on_batch_end(self, batch, logs=None):\n logs = logs or {}\n loss = logs.get('loss')\n if loss is not None:\n if np.isnan(loss) or np.isinf(loss):\n print('Batch %d: Invalid loss, terminating training' % (batch))\n self.model.stop_training = True\n\n\nclass ProgbarLogger(Callback):\n \"\"\"Callback that prints metrics to stdout.\n\n # Arguments\n count_mode: One of \"steps\" or \"samples\".\n Whether the progress bar should\n count samples seen or steps (batches) seen.\n stateful_metrics: Iterable of string names of metrics that\n should *not* be averaged over an epoch.\n Metrics in this list will be logged as-is.\n All others will be averaged over time (e.g. loss, etc).\n\n # Raises\n ValueError: In case of invalid `count_mode`.\n \"\"\"\n\n def __init__(self, count_mode='samples',\n stateful_metrics=None):\n super(ProgbarLogger, self).__init__()\n if count_mode == 'samples':\n self.use_steps = False\n elif count_mode == 'steps':\n self.use_steps = True\n else:\n raise ValueError('Unknown `count_mode`: ' + str(count_mode))\n if stateful_metrics:\n self.stateful_metrics = set(stateful_metrics)\n else:\n self.stateful_metrics = set()\n\n def on_train_begin(self, logs=None):\n self.verbose = self.params['verbose']\n self.epochs = self.params['epochs']\n\n def on_epoch_begin(self, epoch, logs=None):\n if self.verbose:\n print('Epoch %d/%d' % (epoch + 1, self.epochs))\n if self.use_steps:\n target = self.params['steps']\n else:\n target = self.params['samples']\n self.target = target\n self.progbar = Progbar(target=self.target,\n verbose=self.verbose,\n stateful_metrics=self.stateful_metrics)\n self.seen = 0\n\n def on_batch_begin(self, batch, logs=None):\n if self.seen < self.target:\n self.log_values = []\n\n def on_batch_end(self, batch, logs=None):\n logs = logs or {}\n batch_size = logs.get('size', 0)\n if self.use_steps:\n self.seen += 1\n else:\n self.seen += batch_size\n\n for k in self.params['metrics']:\n if k in logs:\n self.log_values.append((k, logs[k]))\n\n # Skip progbar update for the last batch;\n # will be handled by on_epoch_end.\n if self.verbose and self.seen < self.target:\n self.progbar.update(self.seen, self.log_values)\n\n def on_epoch_end(self, epoch, logs=None):\n logs = logs or {}\n for k in self.params['metrics']:\n if k in logs:\n self.log_values.append((k, logs[k]))\n if self.verbose:\n self.progbar.update(self.seen, self.log_values)\n\n\nclass History(Callback):\n \"\"\"Callback that records events into a `History` object.\n\n This callback is automatically applied to\n every Keras model. The `History` object\n gets returned by the `fit` method of models.\n \"\"\"\n\n def on_train_begin(self, logs=None):\n self.epoch = []\n self.history = {}\n\n def on_epoch_end(self, epoch, logs=None):\n logs = logs or {}\n self.epoch.append(epoch)\n for k, v in logs.items():\n self.history.setdefault(k, []).append(v)\n\n\nclass ModelCheckpoint(Callback):\n \"\"\"Save the model after every epoch.\n\n `filepath` can contain named formatting options,\n which will be filled the value of `epoch` and\n keys in `logs` (passed in `on_epoch_end`).\n\n For example: if `filepath` is `weights.{epoch:02d}-{val_loss:.2f}.hdf5`,\n then the model checkpoints will be saved with the epoch number and\n the validation loss in the filename.\n\n # Arguments\n filepath: string, path to save the model file.\n monitor: quantity to monitor.\n verbose: verbosity mode, 0 or 1.\n save_best_only: if `save_best_only=True`,\n the latest best model according to\n the quantity monitored will not be overwritten.\n mode: one of {auto, min, max}.\n If `save_best_only=True`, the decision\n to overwrite the current save file is made\n based on either the maximization or the\n minimization of the monitored quantity. For `val_acc`,\n this should be `max`, for `val_loss` this should\n be `min`, etc. In `auto` mode, the direction is\n automatically inferred from the name of the monitored quantity.\n save_weights_only: if True, then only the model's weights will be\n saved (`model.save_weights(filepath)`), else the full model\n is saved (`model.save(filepath)`).\n period: Interval (number of epochs) between checkpoints.\n \"\"\"\n\n def __init__(self, filepath, monitor='val_loss', verbose=0,\n save_best_only=False, save_weights_only=False,\n mode='auto', period=1):\n super(ModelCheckpoint, self).__init__()\n self.monitor = monitor\n self.verbose = verbose\n self.filepath = filepath\n self.save_best_only = save_best_only\n self.save_weights_only = save_weights_only\n self.period = period\n self.epochs_since_last_save = 0\n\n if mode not in ['auto', 'min', 'max']:\n warnings.warn('ModelCheckpoint mode %s is unknown, '\n 'fallback to auto mode.' % (mode),\n RuntimeWarning)\n mode = 'auto'\n\n if mode == 'min':\n self.monitor_op = np.less\n self.best = np.Inf\n elif mode == 'max':\n self.monitor_op = np.greater\n self.best = -np.Inf\n else:\n if 'acc' in self.monitor or self.monitor.startswith('fmeasure'):\n self.monitor_op = np.greater\n self.best = -np.Inf\n else:\n self.monitor_op = np.less\n self.best = np.Inf\n\n def on_epoch_end(self, epoch, logs=None):\n logs = logs or {}\n self.epochs_since_last_save += 1\n if self.epochs_since_last_save >= self.period:\n self.epochs_since_last_save = 0\n filepath = self.filepath.format(epoch=epoch + 1, **logs)\n if self.save_best_only:\n current = logs.get(self.monitor)\n if current is None:\n warnings.warn('Can save best model only with %s available, '\n 'skipping.' % (self.monitor), RuntimeWarning)\n else:\n if self.monitor_op(current, self.best):\n if self.verbose > 0:\n print('\\nEpoch %05d: %s improved from %0.5f to %0.5f,'\n ' saving model to %s'\n % (epoch + 1, self.monitor, self.best,\n current, filepath))\n self.best = current\n if self.save_weights_only:\n self.model.save_weights(filepath, overwrite=True)\n else:\n self.model.save(filepath, overwrite=True)\n else:\n if self.verbose > 0:\n print('\\nEpoch %05d: %s did not improve from %0.5f' %\n (epoch + 1, self.monitor, self.best))\n else:\n if self.verbose > 0:\n print('\\nEpoch %05d: saving model to %s' % (epoch + 1, filepath))\n if self.save_weights_only:\n self.model.save_weights(filepath, overwrite=True)\n else:\n self.model.save(filepath, overwrite=True)\n\n\nclass EarlyStopping(Callback):\n \"\"\"Stop training when a monitored quantity has stopped improving.\n\n # Arguments\n monitor: quantity to be monitored.\n min_delta: minimum change in the monitored quantity\n to qualify as an improvement, i.e. an absolute\n change of less than min_delta, will count as no\n improvement.\n patience: number of epochs with no improvement\n after which training will be stopped.\n verbose: verbosity mode.\n mode: one of {auto, min, max}. In `min` mode,\n training will stop when the quantity\n monitored has stopped decreasing; in `max`\n mode it will stop when the quantity\n monitored has stopped increasing; in `auto`\n mode, the direction is automatically inferred\n from the name of the monitored quantity.\n baseline: Baseline value for the monitored quantity to reach.\n Training will stop if the model doesn't show improvement\n over the baseline.\n restore_best_weights: whether to restore model weights from\n the epoch with the best value of the monitored quantity.\n If False, the model weights obtained at the last step of\n training are used.\n \"\"\"\n\n def __init__(self,\n monitor='val_loss',\n min_delta=0,\n patience=0,\n verbose=0,\n mode='auto',\n baseline=None,\n restore_best_weights=False):\n super(EarlyStopping, self).__init__()\n\n self.monitor = monitor\n self.baseline = baseline\n self.patience = patience\n self.verbose = verbose\n self.min_delta = min_delta\n self.wait = 0\n self.stopped_epoch = 0\n self.restore_best_weights = restore_best_weights\n self.best_weights = None\n\n if mode not in ['auto', 'min', 'max']:\n warnings.warn('EarlyStopping mode %s is unknown, '\n 'fallback to auto mode.' % mode,\n RuntimeWarning)\n mode = 'auto'\n\n if mode == 'min':\n self.monitor_op = np.less\n elif mode == 'max':\n self.monitor_op = np.greater\n else:\n if 'acc' in self.monitor:\n self.monitor_op = np.greater\n else:\n self.monitor_op = np.less\n\n if self.monitor_op == np.greater:\n self.min_delta *= 1\n else:\n self.min_delta *= -1\n\n def on_train_begin(self, logs=None):\n # Allow instances to be re-used\n self.wait = 0\n self.stopped_epoch = 0\n if self.baseline is not None:\n self.best = self.baseline\n else:\n self.best = np.Inf if self.monitor_op == np.less else -np.Inf\n\n def on_epoch_end(self, epoch, logs=None):\n current = self.get_monitor_value(logs)\n if current is None:\n return\n\n if self.monitor_op(current - self.min_delta, self.best):\n self.best = current\n self.wait = 0\n if self.restore_best_weights:\n self.best_weights = self.model.get_weights()\n else:\n self.wait += 1\n if self.wait >= self.patience:\n self.stopped_epoch = epoch\n self.model.stop_training = True\n if self.restore_best_weights:\n if self.verbose > 0:\n print('Restoring model weights from the end of '\n 'the best epoch')\n self.model.set_weights(self.best_weights)\n\n def on_train_end(self, logs=None):\n if self.stopped_epoch > 0 and self.verbose > 0:\n print('Epoch %05d: early stopping' % (self.stopped_epoch + 1))\n\n def get_monitor_value(self, logs):\n monitor_value = logs.get(self.monitor)\n if monitor_value is None:\n warnings.warn(\n 'Early stopping conditioned on metric `%s` '\n 'which is not available. Available metrics are: %s' %\n (self.monitor, ','.join(list(logs.keys()))), RuntimeWarning\n )\n return monitor_value\n\n\nclass RemoteMonitor(Callback):\n \"\"\"Callback used to stream events to a server.\n\n Requires the `requests` library.\n Events are sent to `root + '/publish/epoch/end/'` by default. Calls are\n HTTP POST, with a `data` argument which is a\n JSON-encoded dictionary of event data.\n If send_as_json is set to True, the content type of the request will be\n application/json. Otherwise the serialized JSON will be send within a form\n\n # Arguments\n root: String; root url of the target server.\n path: String; path relative to `root` to which the events will be sent.\n field: String; JSON field under which the data will be stored.\n The field is used only if the payload is sent within a form\n (i.e. send_as_json is set to False).\n headers: Dictionary; optional custom HTTP headers.\n send_as_json: Boolean; whether the request should be send as\n application/json.\n \"\"\"\n\n def __init__(self,\n root='http://localhost:9000',\n path='/publish/epoch/end/',\n field='data',\n headers=None,\n send_as_json=False):\n super(RemoteMonitor, self).__init__()\n\n self.root = root\n self.path = path\n self.field = field\n self.headers = headers\n self.send_as_json = send_as_json\n\n def on_epoch_end(self, epoch, logs=None):\n if requests is None:\n raise ImportError('RemoteMonitor requires '\n 'the `requests` library.')\n logs = logs or {}\n send = {}\n send['epoch'] = epoch\n for k, v in logs.items():\n if isinstance(v, (np.ndarray, np.generic)):\n send[k] = v.item()\n else:\n send[k] = v\n try:\n if self.send_as_json:\n requests.post(self.root + self.path, json=send, headers=self.headers)\n else:\n requests.post(self.root + self.path,\n {self.field: json.dumps(send)},\n headers=self.headers)\n except requests.exceptions.RequestException:\n warnings.warn('Warning: could not reach RemoteMonitor '\n 'root server at ' + str(self.root))\n\n\nclass LearningRateScheduler(Callback):\n \"\"\"Learning rate scheduler.\n\n # Arguments\n schedule: a function that takes an epoch index as input\n (integer, indexed from 0) and current learning rate\n and returns a new learning rate as output (float).\n verbose: int. 0: quiet, 1: update messages.\n \"\"\"\n\n def __init__(self, schedule, verbose=0):\n super(LearningRateScheduler, self).__init__()\n self.schedule = schedule\n self.verbose = verbose\n\n def on_epoch_begin(self, epoch, logs=None):\n if not hasattr(self.model.optimizer, 'lr'):\n raise ValueError('Optimizer must have a \"lr\" attribute.')\n lr = float(K.get_value(self.model.optimizer.lr))\n try: # new API\n lr = self.schedule(epoch, lr)\n except TypeError: # old API for backward compatibility\n lr = self.schedule(epoch)\n if not isinstance(lr, (float, np.float32, np.float64)):\n raise ValueError('The output of the \"schedule\" function '\n 'should be float.')\n K.set_value(self.model.optimizer.lr, lr)\n if self.verbose > 0:\n print('\\nEpoch %05d: LearningRateScheduler setting learning '\n 'rate to %s.' % (epoch + 1, lr))\n\n def on_epoch_end(self, epoch, logs=None):\n logs = logs or {}\n logs['lr'] = K.get_value(self.model.optimizer.lr)\n\n\nclass TensorBoard(Callback):\n \"\"\"TensorBoard basic visualizations.\n\n [TensorBoard](https://www.tensorflow.org/get_started/summaries_and_tensorboard)\n is a visualization tool provided with TensorFlow.\n\n This callback writes a log for TensorBoard, which allows\n you to visualize dynamic graphs of your training and test\n metrics, as well as activation histograms for the different\n layers in your model.\n\n If you have installed TensorFlow with pip, you should be able\n to launch TensorBoard from the command line:\n ```sh\n tensorboard --logdir=/full_path_to_your_logs\n ```\n\n When using a backend other than TensorFlow, TensorBoard will still work\n (if you have TensorFlow installed), but the only feature available will\n be the display of the losses and metrics plots.\n\n # Arguments\n log_dir: the path of the directory where to save the log\n files to be parsed by TensorBoard.\n histogram_freq: frequency (in epochs) at which to compute activation\n and weight histograms for the layers of the model. If set to 0,\n histograms won't be computed. Validation data (or split) must be\n specified for histogram visualizations.\n write_graph: whether to visualize the graph in TensorBoard.\n The log file can become quite large when\n write_graph is set to True.\n write_grads: whether to visualize gradient histograms in TensorBoard.\n `histogram_freq` must be greater than 0.\n batch_size: size of batch of inputs to feed to the network\n for histograms computation.\n write_images: whether to write model weights to visualize as\n image in TensorBoard.\n embeddings_freq: frequency (in epochs) at which selected embedding\n layers will be saved. If set to 0, embeddings won't be computed.\n Data to be visualized in TensorBoard's Embedding tab must be passed\n as `embeddings_data`.\n embeddings_layer_names: a list of names of layers to keep eye on. If\n None or empty list all the embedding layer will be watched.\n embeddings_metadata: a dictionary which maps layer name to a file name\n in which metadata for this embedding layer is saved. See the\n [details](https://www.tensorflow.org/how_tos/embedding_viz/#metadata_optional)\n about metadata files format. In case if the same metadata file is\n used for all embedding layers, string can be passed.\n embeddings_data: data to be embedded at layers specified in\n `embeddings_layer_names`. Numpy array (if the model has a single\n input) or list of Numpy arrays (if the model has multiple inputs).\n Learn [more about embeddings](\n https://www.tensorflow.org/programmers_guide/embedding).\n update_freq: `'batch'` or `'epoch'` or integer. When using `'batch'`, writes\n the losses and metrics to TensorBoard after each batch. The same\n applies for `'epoch'`. If using an integer, let's say `10000`,\n the callback will write the metrics and losses to TensorBoard every\n 10000 samples. Note that writing too frequently to TensorBoard\n can slow down your training.\n \"\"\"\n\n def __init__(self, log_dir='./logs',\n histogram_freq=0,\n batch_size=32,\n write_graph=True,\n write_grads=False,\n write_images=False,\n embeddings_freq=0,\n embeddings_layer_names=None,\n embeddings_metadata=None,\n embeddings_data=None,\n update_freq='epoch'):\n super(TensorBoard, self).__init__()\n global tf, projector\n try:\n import tensorflow as tf\n from tensorflow.contrib.tensorboard.plugins import projector\n except ImportError:\n raise ImportError('You need the TensorFlow module installed to '\n 'use TensorBoard.')\n\n if K.backend() != 'tensorflow':\n if histogram_freq != 0:\n warnings.warn('You are not using the TensorFlow backend. '\n 'histogram_freq was set to 0')\n histogram_freq = 0\n if write_graph:\n warnings.warn('You are not using the TensorFlow backend. '\n 'write_graph was set to False')\n write_graph = False\n if write_images:\n warnings.warn('You are not using the TensorFlow backend. '\n 'write_images was set to False')\n write_images = False\n if embeddings_freq != 0:\n warnings.warn('You are not using the TensorFlow backend. '\n 'embeddings_freq was set to 0')\n embeddings_freq = 0\n\n self.log_dir = log_dir\n self.histogram_freq = histogram_freq\n self.merged = None\n self.write_graph = write_graph\n self.write_grads = write_grads\n self.write_images = write_images\n self.embeddings_freq = embeddings_freq\n self.embeddings_layer_names = embeddings_layer_names\n self.embeddings_metadata = embeddings_metadata or {}\n self.batch_size = batch_size\n self.embeddings_data = embeddings_data\n if update_freq == 'batch':\n # It is the same as writing as frequently as possible.\n self.update_freq = 1\n else:\n self.update_freq = update_freq\n self.samples_seen = 0\n self.samples_seen_at_last_write = 0\n\n def set_model(self, model):\n self.model = model\n if K.backend() == 'tensorflow':\n self.sess = K.get_session()\n if self.histogram_freq and self.merged is None:\n for layer in self.model.layers:\n for weight in layer.weights:\n mapped_weight_name = weight.name.replace(':', '_')\n tf.summary.histogram(mapped_weight_name, weight)\n if self.write_grads:\n grads = model.optimizer.get_gradients(model.total_loss,\n weight)\n\n def is_indexed_slices(grad):\n return type(grad).__name__ == 'IndexedSlices'\n grads = [\n grad.values if is_indexed_slices(grad) else grad\n for grad in grads]\n tf.summary.histogram('{}_grad'.format(mapped_weight_name),\n grads)\n if self.write_images:\n w_img = tf.squeeze(weight)\n shape = K.int_shape(w_img)\n if len(shape) == 2: # dense layer kernel case\n if shape[0] > shape[1]:\n w_img = tf.transpose(w_img)\n shape = K.int_shape(w_img)\n w_img = tf.reshape(w_img, [1,\n shape[0],\n shape[1],\n 1])\n elif len(shape) == 3: # convnet case\n if K.image_data_format() == 'channels_last':\n # switch to channels_first to display\n # every kernel as a separate image\n w_img = tf.transpose(w_img, perm=[2, 0, 1])\n shape = K.int_shape(w_img)\n w_img = tf.reshape(w_img, [shape[0],\n shape[1],\n shape[2],\n 1])\n elif len(shape) == 1: # bias case\n w_img = tf.reshape(w_img, [1,\n shape[0],\n 1,\n 1])\n else:\n # not possible to handle 3D convnets etc.\n continue\n\n shape = K.int_shape(w_img)\n assert len(shape) == 4 and shape[-1] in [1, 3, 4]\n tf.summary.image(mapped_weight_name, w_img)\n\n if hasattr(layer, 'output'):\n if isinstance(layer.output, list):\n for i, output in enumerate(layer.output):\n tf.summary.histogram('{}_out_{}'.format(layer.name, i),\n output)\n else:\n tf.summary.histogram('{}_out'.format(layer.name),\n layer.output)\n self.merged = tf.summary.merge_all()\n\n if self.write_graph:\n self.writer = tf.summary.FileWriter(self.log_dir,\n self.sess.graph)\n else:\n self.writer = tf.summary.FileWriter(self.log_dir)\n\n if self.embeddings_freq and self.embeddings_data is not None:\n self.embeddings_data = standardize_input_data(self.embeddings_data,\n model.input_names)\n\n embeddings_layer_names = self.embeddings_layer_names\n\n if not embeddings_layer_names:\n embeddings_layer_names = [layer.name for layer in self.model.layers\n if type(layer).__name__ == 'Embedding']\n self.assign_embeddings = []\n embeddings_vars = {}\n\n self.batch_id = batch_id = tf.placeholder(tf.int32)\n self.step = step = tf.placeholder(tf.int32)\n\n for layer in self.model.layers:\n if layer.name in embeddings_layer_names:\n embedding_input = self.model.get_layer(layer.name).output\n embedding_size = np.prod(embedding_input.shape[1:])\n embedding_input = tf.reshape(embedding_input,\n (step, int(embedding_size)))\n shape = (self.embeddings_data[0].shape[0], int(embedding_size))\n embedding = tf.Variable(tf.zeros(shape),\n name=layer.name + '_embedding')\n embeddings_vars[layer.name] = embedding\n batch = tf.assign(embedding[batch_id:batch_id + step],\n embedding_input)\n self.assign_embeddings.append(batch)\n\n self.saver = tf.train.Saver(list(embeddings_vars.values()))\n\n if not isinstance(self.embeddings_metadata, str):\n embeddings_metadata = self.embeddings_metadata\n else:\n embeddings_metadata = {layer_name: self.embeddings_metadata\n for layer_name in embeddings_vars.keys()}\n\n config = projector.ProjectorConfig()\n\n for layer_name, tensor in embeddings_vars.items():\n embedding = config.embeddings.add()\n embedding.tensor_name = tensor.name\n\n if layer_name in embeddings_metadata:\n embedding.metadata_path = embeddings_metadata[layer_name]\n\n projector.visualize_embeddings(self.writer, config)\n\n def on_epoch_end(self, epoch, logs=None):\n logs = logs or {}\n\n if not self.validation_data and self.histogram_freq:\n raise ValueError(\"If printing histograms, validation_data must be \"\n \"provided, and cannot be a generator.\")\n if self.embeddings_data is None and self.embeddings_freq:\n raise ValueError(\"To visualize embeddings, embeddings_data must \"\n \"be provided.\")\n if self.validation_data and self.histogram_freq:\n if epoch % self.histogram_freq == 0:\n\n val_data = self.validation_data\n tensors = (self.model.inputs +\n self.model.targets +\n self.model.sample_weights)\n\n if self.model.uses_learning_phase:\n tensors += [K.learning_phase()]\n\n assert len(val_data) == len(tensors)\n val_size = val_data[0].shape[0]\n i = 0\n while i < val_size:\n step = min(self.batch_size, val_size - i)\n if self.model.uses_learning_phase:\n # do not slice the learning phase\n batch_val = [x[i:i + step] for x in val_data[:-1]]\n batch_val.append(val_data[-1])\n else:\n batch_val = [x[i:i + step] for x in val_data]\n assert len(batch_val) == len(tensors)\n feed_dict = dict(zip(tensors, batch_val))\n result = self.sess.run([self.merged], feed_dict=feed_dict)\n summary_str = result[0]\n self.writer.add_summary(summary_str, epoch)\n i += self.batch_size\n\n if self.embeddings_freq and self.embeddings_data is not None:\n if epoch % self.embeddings_freq == 0:\n # We need a second forward-pass here because we're passing\n # the `embeddings_data` explicitly. This design allows to pass\n # arbitrary data as `embeddings_data` and results from the fact\n # that we need to know the size of the `tf.Variable`s which\n # hold the embeddings in `set_model`. At this point, however,\n # the `validation_data` is not yet set.\n\n # More details in this discussion:\n # https://github.com/keras-team/keras/pull/7766#issuecomment-329195622\n\n embeddings_data = self.embeddings_data\n n_samples = embeddings_data[0].shape[0]\n\n i = 0\n while i < n_samples:\n step = min(self.batch_size, n_samples - i)\n batch = slice(i, i + step)\n\n if type(self.model.input) == list:\n feed_dict = {_input: embeddings_data[idx][batch]\n for idx, _input in enumerate(self.model.input)}\n else:\n feed_dict = {self.model.input: embeddings_data[0][batch]}\n\n feed_dict.update({self.batch_id: i, self.step: step})\n\n if self.model.uses_learning_phase:\n feed_dict[K.learning_phase()] = False\n\n self.sess.run(self.assign_embeddings, feed_dict=feed_dict)\n self.saver.save(self.sess,\n os.path.join(self.log_dir,\n 'keras_embedding.ckpt'),\n epoch)\n\n i += self.batch_size\n\n if self.update_freq == 'epoch':\n index = epoch\n else:\n index = self.samples_seen\n self._write_logs(logs, index)\n\n def _write_logs(self, logs, index):\n for name, value in logs.items():\n if name in ['batch', 'size']:\n continue\n summary = tf.Summary()\n summary_value = summary.value.add()\n if isinstance(value, np.ndarray):\n summary_value.simple_value = value.item()\n else:\n summary_value.simple_value = value\n summary_value.tag = name\n self.writer.add_summary(summary, index)\n self.writer.flush()\n\n def on_train_end(self, _):\n self.writer.close()\n\n def on_batch_end(self, batch, logs=None):\n if self.update_freq != 'epoch':\n self.samples_seen += logs['size']\n samples_seen_since = self.samples_seen - self.samples_seen_at_last_write\n if samples_seen_since >= self.update_freq:\n self._write_logs(logs, self.samples_seen)\n self.samples_seen_at_last_write = self.samples_seen\n\n\nclass ReduceLROnPlateau(Callback):\n \"\"\"Reduce learning rate when a metric has stopped improving.\n\n Models often benefit from reducing the learning rate by a factor\n of 2-10 once learning stagnates. This callback monitors a\n quantity and if no improvement is seen for a 'patience' number\n of epochs, the learning rate is reduced.\n\n # Example\n\n ```python\n reduce_lr = ReduceLROnPlateau(monitor='val_loss', factor=0.2,\n patience=5, min_lr=0.001)\n model.fit(X_train, Y_train, callbacks=[reduce_lr])\n ```\n\n # Arguments\n monitor: quantity to be monitored.\n factor: factor by which the learning rate will\n be reduced. new_lr = lr * factor\n patience: number of epochs with no improvement\n after which learning rate will be reduced.\n verbose: int. 0: quiet, 1: update messages.\n mode: one of {auto, min, max}. In `min` mode,\n lr will be reduced when the quantity\n monitored has stopped decreasing; in `max`\n mode it will be reduced when the quantity\n monitored has stopped increasing; in `auto`\n mode, the direction is automatically inferred\n from the name of the monitored quantity.\n min_delta: threshold for measuring the new optimum,\n to only focus on significant changes.\n cooldown: number of epochs to wait before resuming\n normal operation after lr has been reduced.\n min_lr: lower bound on the learning rate.\n \"\"\"\n\n def __init__(self, monitor='val_loss', factor=0.1, patience=10,\n verbose=0, mode='auto', min_delta=1e-4, cooldown=0, min_lr=0,\n **kwargs):\n super(ReduceLROnPlateau, self).__init__()\n\n self.monitor = monitor\n if factor >= 1.0:\n raise ValueError('ReduceLROnPlateau '\n 'does not support a factor >= 1.0.')\n if 'epsilon' in kwargs:\n min_delta = kwargs.pop('epsilon')\n warnings.warn('`epsilon` argument is deprecated and '\n 'will be removed, use `min_delta` instead.')\n self.factor = factor\n self.min_lr = min_lr\n self.min_delta = min_delta\n self.patience = patience\n self.verbose = verbose\n self.cooldown = cooldown\n self.cooldown_counter = 0 # Cooldown counter.\n self.wait = 0\n self.best = 0\n self.mode = mode\n self.monitor_op = None\n self._reset()\n\n def _reset(self):\n \"\"\"Resets wait counter and cooldown counter.\n \"\"\"\n if self.mode not in ['auto', 'min', 'max']:\n warnings.warn('Learning Rate Plateau Reducing mode %s is unknown, '\n 'fallback to auto mode.' % (self.mode),\n RuntimeWarning)\n self.mode = 'auto'\n if (self.mode == 'min' or\n (self.mode == 'auto' and 'acc' not in self.monitor)):\n self.monitor_op = lambda a, b: np.less(a, b - self.min_delta)\n self.best = np.Inf\n else:\n self.monitor_op = lambda a, b: np.greater(a, b + self.min_delta)\n self.best = -np.Inf\n self.cooldown_counter = 0\n self.wait = 0\n\n def on_train_begin(self, logs=None):\n self._reset()\n\n def on_epoch_end(self, epoch, logs=None):\n logs = logs or {}\n logs['lr'] = K.get_value(self.model.optimizer.lr)\n current = logs.get(self.monitor)\n if current is None:\n warnings.warn(\n 'Reduce LR on plateau conditioned on metric `%s` '\n 'which is not available. Available metrics are: %s' %\n (self.monitor, ','.join(list(logs.keys()))), RuntimeWarning\n )\n\n else:\n if self.in_cooldown():\n self.cooldown_counter -= 1\n self.wait = 0\n\n if self.monitor_op(current, self.best):\n self.best = current\n self.wait = 0\n elif not self.in_cooldown():\n self.wait += 1\n if self.wait >= self.patience:\n old_lr = float(K.get_value(self.model.optimizer.lr))\n if old_lr > self.min_lr:\n new_lr = old_lr * self.factor\n new_lr = max(new_lr, self.min_lr)\n K.set_value(self.model.optimizer.lr, new_lr)\n if self.verbose > 0:\n print('\\nEpoch %05d: ReduceLROnPlateau reducing '\n 'learning rate to %s.' % (epoch + 1, new_lr))\n self.cooldown_counter = self.cooldown\n self.wait = 0\n\n def in_cooldown(self):\n return self.cooldown_counter > 0\n\n\nclass CSVLogger(Callback):\n \"\"\"Callback that streams epoch results to a csv file.\n\n Supports all values that can be represented as a string,\n including 1D iterables such as np.ndarray.\n\n # Example\n\n ```python\n csv_logger = CSVLogger('training.log')\n model.fit(X_train, Y_train, callbacks=[csv_logger])\n ```\n\n # Arguments\n filename: filename of the csv file, e.g. 'run/log.csv'.\n separator: string used to separate elements in the csv file.\n append: True: append if file exists (useful for continuing\n training). False: overwrite existing file,\n \"\"\"\n\n def __init__(self, filename, separator=',', append=False):\n self.sep = separator\n self.filename = filename\n self.append = append\n self.writer = None\n self.keys = None\n self.append_header = True\n if six.PY2:\n self.file_flags = 'b'\n self._open_args = {}\n else:\n self.file_flags = ''\n self._open_args = {'newline': '\\n'}\n super(CSVLogger, self).__init__()\n\n def on_train_begin(self, logs=None):\n if self.append:\n if os.path.exists(self.filename):\n with open(self.filename, 'r' + self.file_flags) as f:\n self.append_header = not bool(len(f.readline()))\n mode = 'a'\n else:\n mode = 'w'\n self.csv_file = io.open(self.filename,\n mode + self.file_flags,\n **self._open_args)\n\n def on_epoch_end(self, epoch, logs=None):\n logs = logs or {}\n\n def handle_value(k):\n is_zero_dim_ndarray = isinstance(k, np.ndarray) and k.ndim == 0\n if isinstance(k, six.string_types):\n return k\n elif isinstance(k, Iterable) and not is_zero_dim_ndarray:\n return '\"[%s]\"' % (', '.join(map(str, k)))\n else:\n return k\n\n if self.keys is None:\n self.keys = sorted(logs.keys())\n\n if self.model.stop_training:\n # We set NA so that csv parsers do not fail for this last epoch.\n logs = dict([(k, logs[k] if k in logs else 'NA') for k in self.keys])\n\n if not self.writer:\n class CustomDialect(csv.excel):\n delimiter = self.sep\n fieldnames = ['epoch'] + self.keys\n if six.PY2:\n fieldnames = [unicode(x) for x in fieldnames]\n self.writer = csv.DictWriter(self.csv_file,\n fieldnames=fieldnames,\n dialect=CustomDialect)\n if self.append_header:\n self.writer.writeheader()\n\n row_dict = OrderedDict({'epoch': epoch})\n row_dict.update((key, handle_value(logs[key])) for key in self.keys)\n self.writer.writerow(row_dict)\n self.csv_file.flush()\n\n def on_train_end(self, logs=None):\n self.csv_file.close()\n self.writer = None\n\n\nclass LambdaCallback(Callback):\n r\"\"\"Callback for creating simple, custom callbacks on-the-fly.\n\n This callback is constructed with anonymous functions that will be called\n at the appropriate time. Note that the callbacks expects positional\n arguments, as:\n\n - `on_epoch_begin` and `on_epoch_end` expect two positional arguments:\n `epoch`, `logs`\n - `on_batch_begin` and `on_batch_end` expect two positional arguments:\n `batch`, `logs`\n - `on_train_begin` and `on_train_end` expect one positional argument:\n `logs`\n\n # Arguments\n on_epoch_begin: called at the beginning of every epoch.\n on_epoch_end: called at the end of every epoch.\n on_batch_begin: called at the beginning of every batch.\n on_batch_end: called at the end of every batch.\n on_train_begin: called at the beginning of model training.\n on_train_end: called at the end of model training.\n\n # Example\n\n ```python\n # Print the batch number at the beginning of every batch.\n batch_print_callback = LambdaCallback(\n on_batch_begin=lambda batch,logs: print(batch))\n\n # Stream the epoch loss to a file in JSON format. The file content\n # is not well-formed JSON but rather has a JSON object per line.\n import json\n json_log = open('loss_log.json', mode='wt', buffering=1)\n json_logging_callback = LambdaCallback(\n on_epoch_end=lambda epoch, logs: json_log.write(\n json.dumps({'epoch': epoch, 'loss': logs['loss']}) + '\\n'),\n on_train_end=lambda logs: json_log.close()\n )\n\n # Terminate some processes after having finished model training.\n processes = ...\n cleanup_callback = LambdaCallback(\n on_train_end=lambda logs: [\n p.terminate() for p in processes if p.is_alive()])\n\n model.fit(...,\n callbacks=[batch_print_callback,\n json_logging_callback,\n cleanup_callback])\n ```\n \"\"\"\n\n def __init__(self,\n on_epoch_begin=None,\n on_epoch_end=None,\n on_batch_begin=None,\n on_batch_end=None,\n on_train_begin=None,\n on_train_end=None,\n **kwargs):\n super(LambdaCallback, self).__init__()\n self.__dict__.update(kwargs)\n if on_epoch_begin is not None:\n self.on_epoch_begin = on_epoch_begin\n else:\n self.on_epoch_begin = lambda epoch, logs: None\n if on_epoch_end is not None:\n self.on_epoch_end = on_epoch_end\n else:\n self.on_epoch_end = lambda epoch, logs: None\n if on_batch_begin is not None:\n self.on_batch_begin = on_batch_begin\n else:\n self.on_batch_begin = lambda batch, logs: None\n if on_batch_end is not None:\n self.on_batch_end = on_batch_end\n else:\n self.on_batch_end = lambda batch, logs: None\n if on_train_begin is not None:\n self.on_train_begin = on_train_begin\n else:\n self.on_train_begin = lambda logs: None\n if on_train_end is not None:\n self.on_train_end = on_train_end\n else:\n self.on_train_end = lambda logs: None\n"
] |
[
[
"tensorflow.zeros",
"numpy.greater",
"numpy.less",
"tensorflow.summary.image",
"tensorflow.squeeze",
"tensorflow.Summary",
"numpy.isnan",
"numpy.median",
"tensorflow.placeholder",
"tensorflow.summary.merge_all",
"tensorflow.contrib.tensorboard.plugins.projector.ProjectorConfig",
"tensorflow.summary.histogram",
"tensorflow.summary.FileWriter",
"tensorflow.transpose",
"tensorflow.assign",
"tensorflow.reshape",
"tensorflow.contrib.tensorboard.plugins.projector.visualize_embeddings",
"numpy.prod",
"numpy.isinf"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"1.10"
]
}
] |
snakers4/deepspeech.pytorch
|
[
"4c9e9648fe56247169d73248340f3a60572d3f5b"
] |
[
"train.py"
] |
[
"import os\nimport gc\nimport json\nimport time\nimport tqdm\nimport argparse\nimport datetime\n\nimport torch.distributed as dist\nimport torch.utils.data.distributed\nfrom warpctc_pytorch import CTCLoss\n\nfrom novograd import (AdamW,\n Novograd)\nfrom linknet import (SemsegLoss,\n MaskSimilarity)\nfrom decoder import GreedyDecoder\nfrom model import DeepSpeech, supported_rnns\nfrom data.utils import reduce_tensor, get_cer_wer\nfrom data.data_loader_aug import (SpectrogramDataset,\n BucketingSampler,\n BucketingLenSampler,\n DistributedBucketingSampler)\n\n\nimport torch\nimport warnings\nfrom torch._six import inf\n\ntq = tqdm.tqdm\n\nVISIBLE_DEVICES = os.environ.get('CUDA_VISIBLE_DEVICES', '').split(',') or ['0']\n\nparser = argparse.ArgumentParser(description='DeepSpeech training')\nparser.add_argument('--train-manifest', metavar='DIR',\n help='path to train manifest csv', default='data/train_manifest.csv')\nparser.add_argument('--cache-dir', metavar='DIR',\n help='path to save temp audio', default='data/cache/')\nparser.add_argument('--train-val-manifest', metavar='DIR',\n help='path to train validation manifest csv', default='')\nparser.add_argument('--val-manifest', metavar='DIR',\n help='path to validation manifest csv', default='data/val_manifest.csv')\nparser.add_argument('--curriculum', metavar='DIR',\n help='path to curriculum file', default='')\nparser.add_argument('--use-curriculum', action='store_true', default=False)\nparser.add_argument('--curriculum-ratio', default=0.5, type=float)\nparser.add_argument('--cl-point', default=0.1, type=float)\nparser.add_argument('--sample-rate', default=16000, type=int, help='Sample rate')\nparser.add_argument('--batch-size', default=20, type=int, help='Batch size for training')\nparser.add_argument('--val-batch-size', default=20, type=int, help='Batch size for training')\nparser.add_argument('--num-workers', default=4, type=int, help='Number of workers used in data-loading')\n\nparser.add_argument('--labels-path', default='labels.json', help='Contains all characters for transcription')\nparser.add_argument('--phonemes-path', default='phonemes_ru.json', help='Contains all phonemes for the Russian language')\nparser.add_argument('--use-bpe', dest='use_bpe', action='store_true', help='Use sentencepiece BPE tokens')\nparser.add_argument('--sp-model', dest='sp_model', default='data/spm_train_v05_cleaned_asr_10s_phoneme.model',\n type=str, help='Pre-trained sentencepiece model')\n\nparser.add_argument('--use-phonemes', action='store_true', default=False)\nparser.add_argument('--phonemes-only', action='store_true', default=False)\nparser.add_argument('--omit-spaces', action='store_true', default=False)\nparser.add_argument('--subword-regularization', action='store_true', default=False)\n\nparser.add_argument('--batch-similar-lens', dest='batch_similar_lens', action='store_true',\n help='Force usage of sampler that batches items with similar duration together')\n\nparser.add_argument('--pytorch-mel', action='store_true', help='Use pytorch based STFT + MEL')\nparser.add_argument('--pytorch-stft', action='store_true', help='Use pytorch based STFT')\nparser.add_argument('--denoise', action='store_true', help='Train a denoising head')\n\n\nparser.add_argument('--use-attention', action='store_true', help='Use attention based decoder instead of CTC')\nparser.add_argument('--double-supervision', action='store_true', help='Use both CTC and attention in sequence')\nparser.add_argument('--naive-split', action='store_true', help='Use a naive DS2 inspired syllable split')\nparser.add_argument('--grapheme-phoneme', action='store_true', help='Use both phonemes and graphemes with BPE to train from scratch')\n\nparser.add_argument('--window-size', default=.02, type=float, help='Window size for spectrogram in seconds')\nparser.add_argument('--window-stride', default=.01, type=float, help='Window stride for spectrogram in seconds')\nparser.add_argument('--window', default='hamming', help='Window type for spectrogram generation')\nparser.add_argument('--hidden-size', default=800, type=int, help='Hidden size of RNNs')\nparser.add_argument('--cnn-width', default=256, type=int, help='w2l-like network width')\nparser.add_argument('--kernel-size', default=7, type=int, help='cnn kernel size')\n\nparser.add_argument('--hidden-layers', default=6, type=int, help='Number of RNN layers')\n\nparser.add_argument('--rnn-type', default='gru', help='Type of the RNN. rnn|gru|lstm are supported')\nparser.add_argument('--decoder-layers', default=4, type=int)\nparser.add_argument('--decoder-girth', default=1, type=int)\n\nparser.add_argument('--dropout', default=0, type=float, help='Fixed dropout for CNN based models')\nparser.add_argument('--epochs', default=70, type=int, help='Number of training epochs')\nparser.add_argument('--cuda', dest='cuda', action='store_true', help='Use cuda to train model')\nparser.add_argument('--lr', '--learning-rate', default=3e-4, type=float, help='initial learning rate')\nparser.add_argument('--optimizer', default='sgd', help='Optimizer - sgd or adam')\nparser.add_argument('--weight-decay', default=0, help='Weight decay for SGD', type=float)\nparser.add_argument('--momentum', default=0.9, type=float, help='momentum')\nparser.add_argument('--batch-norm-momentum', default=0.1, type=float, help='BatchNorm momentum')\n\nparser.add_argument('--max-norm', default=100, type=int, help='Norm cutoff to prevent explosion of gradients')\nparser.add_argument('--norm-warmup-epochs', default=1000, type=int, help='Do gradient clipping only before some epoch')\nparser.add_argument('--gradient-accumulation-steps', default=1, type=int, help='Accumulate gradients for some time first')\n\nparser.add_argument('--learning-anneal', default=1.1, type=float, help='Annealing applied to learning rate every epoch')\nparser.add_argument('--checkpoint-anneal', default=1.0, type=float,\n help='Annealing applied to learning rate every checkpoint')\nparser.add_argument('--silent', dest='silent', action='store_true', help='Turn off progress tracking per iteration')\nparser.add_argument('--checkpoint', dest='checkpoint', action='store_true', help='Enables checkpoint saving of model')\nparser.add_argument('--checkpoint-per-samples', default=0, type=int, help='Save checkpoint per samples. 0 means never save')\nparser.add_argument('--visdom', dest='visdom', action='store_true', help='Turn on visdom graphing')\nparser.add_argument('--enorm', dest='enorm', action='store_true', help='Turn on enorm ( https://github.com/facebookresearch/enorm )')\nparser.add_argument('--tensorboard', dest='tensorboard', action='store_true', help='Turn on tensorboard graphing')\nparser.add_argument('--log-dir', default='visualize/deepspeech_final', help='Location of tensorboard log')\nparser.add_argument('--log-params', dest='log_params', action='store_true', help='Log parameter values and gradients')\nparser.add_argument('--id', default='Deepspeech training', help='Identifier for visdom/tensorboard run')\nparser.add_argument('--save-folder', default='models/', help='Location to save epoch models')\nparser.add_argument('--continue-from', default='', help='Continue from checkpoint model')\nparser.add_argument('--norm', default='max_frame', action=\"store\",\n help='Normalize sounds. Choices: \"mean\", \"frame\", \"max_frame\", \"none\"')\nparser.add_argument('--finetune', dest='finetune', action='store_true',\n help='Finetune the model from checkpoint \"continue_from\"')\nparser.add_argument('--augment', dest='augment', action='store_true', help='Use random tempo and gain perturbations.')\nparser.add_argument('--noise-dir', default=None,\n help='Directory to inject noise into audio. If default, noise Inject not added')\nparser.add_argument('--noise-prob', default=0.4, type=float, help='Probability of noise being added per sample')\nparser.add_argument('--aug-type', default=0, type=int, help='Type of augs to use')\nparser.add_argument('--aug-prob-8khz', default=0, type=float, help='Probability of dropping half of stft frequencies, robustness to 8kHz audio')\nparser.add_argument('--aug-prob-spect', default=0, type=float, help='Probability of applying spectrogram based augmentations')\nparser.add_argument('--noise-min', default=0.0,\n help='Minimum noise level to sample from. (1.0 means all noise, not original signal)', type=float)\nparser.add_argument('--noise-max', default=0.5,\n help='Maximum noise levels to sample from. Maximum 1.0', type=float)\nparser.add_argument('--no-shuffle', dest='no_shuffle', action='store_true',\n help='Turn off shuffling and sample from dataset based on sequence length (smallest to largest)')\nparser.add_argument('--no-sortaGrad', dest='no_sorta_grad', action='store_true',\n help='Turn off ordering of dataset on sequence length for the first epoch.')\nparser.add_argument('--reverse-sort', dest='reverse_sort', action='store_true',\n help='Turn off reverse ordering of dataset on sequence length for the first epoch.')\nparser.add_argument('--no-bidirectional', dest='bidirectional', action='store_false', default=True,\n help='Turn off bi-directional RNNs, introduces lookahead convolution')\nparser.add_argument('--dist-url', default='tcp://127.0.0.1:1550', type=str,\n help='url used to set up distributed training')\nparser.add_argument('--dist-backend', default='gloo', type=str, help='distributed backend')\nparser.add_argument('--world-size', default=1, type=int,\n help='number of distributed processes')\nparser.add_argument('--rank', default=0, type=int,\n help='The rank of this process')\nparser.add_argument('--gpu-rank', default=None,\n help='If using distributed parallel for multi-gpu, sets the GPU for the process')\nparser.add_argument('--data-parallel', dest='data_parallel', action='store_true',\n help='Use data parallel')\n\nparser.add_argument('--use-lookahead', dest='use_lookahead', action='store_true',\n help='Use look ahead optimizer')\n\n\ntorch.manual_seed(123456)\ntorch.cuda.manual_seed_all(123456)\n\n\ndef to_np(x):\n return x.data.cpu().numpy()\n\n\ndef clip_grad_norm_(parameters, max_norm, norm_type=2):\n r\"\"\"Clips gradient norm of an iterable of parameters.\n\n The norm is computed over all gradients together, as if they were\n concatenated into a single vector. Gradients are modified in-place.\n\n Arguments:\n parameters (Iterable[Tensor] or Tensor): an iterable of Tensors or a\n single Tensor that will have gradients normalized\n max_norm (float or int): max norm of the gradients\n norm_type (float or int): type of the used p-norm. Can be ``'inf'`` for\n infinity norm.\n\n Returns:\n Total norm of the parameters (viewed as a single vector).\n \"\"\"\n if isinstance(parameters, torch.Tensor):\n parameters = [parameters]\n parameters = list(filter(lambda p: p.grad is not None, parameters))\n max_norm = float(max_norm)\n norm_type = float(norm_type)\n if norm_type == inf:\n total_norm = max(p.grad.data.abs().max() for p in parameters)\n else:\n total_norm = 0\n for p in parameters:\n param_norm = p.grad.data.norm(norm_type)\n total_norm += param_norm.item() ** norm_type\n total_norm = total_norm ** (1. / norm_type)\n clip_coef = max_norm / (total_norm + 1e-6)\n # print(clip_coef)\n if clip_coef < 1:\n for p in parameters:\n p.grad.data.mul_(clip_coef)\n return total_norm\n\n\ndef calc_grad_norm(parameters, max_norm, norm_type=2):\n if isinstance(parameters, torch.Tensor):\n parameters = [parameters]\n parameters = list(filter(lambda p: p.grad is not None, parameters))\n max_norm = float(max_norm)\n norm_type = float(norm_type)\n if norm_type == inf:\n total_norm = max(p.grad.data.abs().max() for p in parameters)\n else:\n total_norm = 0\n for p in parameters:\n param_norm = p.grad.data.norm(norm_type)\n total_norm += param_norm.item() ** norm_type\n total_norm = total_norm ** (1. / norm_type)\n clip_coef = max_norm / (total_norm + 1e-6)\n return clip_coef\n\n\nclass AverageMeter(object):\n \"\"\"Computes and stores the average and current value\"\"\"\n\n def __init__(self):\n self.val = 0\n self.avg = 0\n self.sum = 0\n self.count = 0\n\n def reset(self):\n self.val = 0\n self.avg = 0\n self.sum = 0\n self.count = 0\n\n def update(self, val, n=1):\n self.val = val\n self.sum += val * n\n self.count += n\n self.avg = self.sum / self.count\n\n\nclass MultipleOptimizer(object):\n def __init__(self, op):\n self.optimizers = op\n\n def zero_grad(self):\n for op in self.optimizers:\n op.zero_grad()\n\n def step(self):\n for op in self.optimizers:\n op.step()\n\n def state_dict(self):\n out = [op.state_dict() for op in self.optimizers]\n return out\n\n def load_state_dict(self,\n states):\n assert len(states) == len(self.optimizers)\n for i in range(len(self.optimizers)):\n self.optimizers[i].load_state_dict(states[i])\n\n\ndef build_optimizer(args_,\n parameters_=None,\n model=None):\n # import aggmo\n # return aggmo.AggMo(model.parameters(), args_.lr, betas=[0, 0.6, 0.9])\n if args_.weight_decay > 0:\n print('Using weight decay {} for SGD'.format(args_.weight_decay))\n\n if args.double_supervision or 'transformer' in args.rnn_type or args.grapheme_phoneme:\n import itertools\n\n adam_lr = 1e-4 # / 10\n sgd_lr = args_.lr\n\n print('Using double supervision, SGD with clipping for CTC, ADAM for s2s')\n print('SGD LR {} / ADAM LR {}'.format(sgd_lr, adam_lr))\n\n if 'transformer' in args.rnn_type:\n print('Using transformer-type double optimizer')\n params_ctc = [model.rnns.layers.parameters()]\n params_adam = [model.rnns.decoder.parameters(),\n model.fc.parameters()]\n else:\n params_ctc = [model.rnns.layers.parameters(),\n model.rnns.ctc_decoder.parameters(),\n model.rnns.ctc_fc.parameters()]\n params_adam = [model.rnns.s2s_decoder.parameters()]\n\n ctc_optimizer = torch.optim.SGD(itertools.chain(*params_ctc),\n lr=args_.lr,\n momentum=args_.momentum,\n nesterov=True)\n s2s_optimizer = torch.optim.Adam(itertools.chain(*params_adam),\n lr=adam_lr)\n\n return MultipleOptimizer([ctc_optimizer, s2s_optimizer])\n elif args_.optimizer == 'sgd':\n print('Using SGD')\n try:\n base_optimizer = torch.optim.SGD(parameters_, lr=args_.lr,\n momentum=args_.momentum, nesterov=True,\n weight_decay=args_.weight_decay)\n if args_.use_lookahead:\n print('Using SGD + Lookahead')\n from lookahead import Lookahead\n return Lookahead(base_optimizer=base_optimizer,\n k=5,\n alpha=0.5)\n return base_optimizer\n except:\n # wo nesterov\n return torch.optim.SGD(parameters_, lr=args_.lr,\n momentum=args_.momentum, nesterov=False,\n weight_decay=args_.weight_decay)\n elif args_.optimizer=='adam':\n print('Using ADAM')\n return torch.optim.Adam(parameters_, lr=args_.lr)\n elif args_.optimizer=='novograd':\n print('Using Novograd')\n return Novograd(parameters_, lr=args_.lr)\n elif args_.optimizer=='adamw':\n print('Using ADAMW')\n return AdamW(parameters_, lr=args_.lr)\n\nviz = None\ntensorboard_writer = None\n\n\nclass PlotWindow:\n def __init__(self, title, suffix, log_x=False, log_y=False):\n self.loss_results = torch.Tensor(10000)\n self.cer_results = torch.Tensor(10000)\n self.wer_results = torch.Tensor(10000)\n self.epochs = torch.arange(1, 10000)\n self.viz_window = None\n self.tb_subplot='/'+suffix\n\n global viz, tensorboard_writer\n hour_now = str(datetime.datetime.now()).split('.', 1)[0][:-3]\n\n self.opts = dict(title=title + ': ' + hour_now, ylabel='', xlabel=suffix, legend=['Loss', 'WER', 'CER'])\n self.opts['layoutopts'] = {'plotly': {}}\n if log_x:\n self.opts['layoutopts']['plotly'] = {'xaxis': {'type': 'log'}}\n if log_y:\n self.opts['layoutopts']['plotly'] = {'yaxis': {'type': 'log'}}\n\n if args.visdom and is_leader:\n if viz is None:\n from visdom import Visdom\n viz = Visdom()\n\n if args.tensorboard and is_leader:\n os.makedirs(args.log_dir, exist_ok=True)\n if tensorboard_writer is None:\n from tensorboardX import SummaryWriter\n tensorboard_writer = SummaryWriter(args.log_dir)\n\n def plot_history(self, position):\n global viz, tensorboard_writer\n\n if is_leader and args.visdom:\n # Add previous scores to visdom graph\n x_axis = self.epochs[0:position]\n y_axis = torch.stack(\n (self.loss_results[0:position],\n self.wer_results[0:position],\n self.cer_results[0:position]),\n dim=1)\n self.viz_window = viz.line(\n X=x_axis,\n Y=y_axis,\n opts=self.opts,\n )\n if is_leader and args.tensorboard:\n # Previous scores to tensorboard logs\n for i in range(position):\n values = {\n 'Avg Train Loss': self.loss_results[i],\n 'Avg WER': self.wer_results[i],\n 'Avg CER': self.cer_results[i]\n }\n tensorboard_writer.add_scalars(args.id+self.tb_subplot,\n values, i + 1)\n\n def plot_progress(self, epoch, avg_loss, cer_avg, wer_avg):\n global viz, tensorboard_writer\n\n if args.visdom and is_leader:\n x_axis = self.epochs[0:epoch + 1]\n y_axis = torch.stack(\n (self.loss_results[0:epoch + 1],\n self.wer_results[0:epoch + 1],\n self.cer_results[0:epoch + 1]), dim=1)\n if self.viz_window is None:\n self.viz_window = viz.line(\n X=x_axis,\n Y=y_axis,\n opts=self.opts,\n )\n else:\n viz.line(\n X=x_axis.unsqueeze(0).expand(y_axis.size(1), x_axis.size(0)).transpose(0, 1), # Visdom fix\n Y=y_axis,\n win=self.viz_window,\n update='replace',\n )\n if args.tensorboard and is_leader:\n values = {\n 'Avg Train Loss': avg_loss,\n 'Avg WER': wer_avg,\n 'Avg CER': cer_avg\n }\n tensorboard_writer.add_scalars(args.id+self.tb_subplot,\n values,\n epoch + 1)\n if args.log_params:\n for tag, value in model.named_parameters():\n tag = tag.replace('.', '/')\n tensorboard_writer.add_histogram(tag, to_np(value), epoch + 1)\n tensorboard_writer.add_histogram(tag + '/grad', to_np(value.grad), epoch + 1)\n\n\nclass LRPlotWindow:\n def __init__(self, title, suffix, log_x=False, log_y=False):\n self.loss_results = torch.Tensor(10000)\n self.epochs = torch.Tensor(10000)\n self.viz_window = None\n self.suffix = suffix\n self.tb_subplot='/'+suffix\n\n global viz, tensorboard_writer\n hour_now = str(datetime.datetime.now()).split('.', 1)[0][:-3]\n\n self.opts = dict(title=title + ': ' + hour_now, ylabel='', xlabel=suffix, legend=['Loss'])\n self.opts['layoutopts'] = {'plotly': {}}\n if log_x:\n self.opts['layoutopts']['plotly'] = {'xaxis': {'type': 'log'}}\n if log_y:\n self.opts['layoutopts']['plotly'] = {'yaxis': {'type': 'log'}}\n\n if args.visdom and is_leader:\n if viz is None:\n from visdom import Visdom\n viz = Visdom()\n\n if args.tensorboard and is_leader:\n os.makedirs(args.log_dir, exist_ok=True)\n if tensorboard_writer is None:\n from tensorboardX import SummaryWriter\n tensorboard_writer = SummaryWriter(args.log_dir)\n\n def plot_progress(self, epoch, avg_loss, cer_avg, wer_avg):\n global viz, tensorboard_writer\n\n if args.visdom and is_leader:\n x_axis = self.epochs[0:epoch + 1]\n y_axis = torch.stack((\n self.loss_results[0:epoch + 1],\n ), dim=1)\n if self.viz_window is None:\n self.viz_window = viz.line(\n X=x_axis,\n Y=y_axis,\n opts=self.opts,\n )\n else:\n viz.line(\n X=x_axis,\n Y=y_axis,\n win=self.viz_window,\n update='replace',\n )\n if args.tensorboard and is_leader:\n values = {\n 'Avg Train Loss': avg_loss,\n }\n tensorboard_writer.add_scalars(args.id+self.tb_subplot,\n values, epoch + 1)\n if args.log_params:\n for tag, value in model.named_parameters():\n tag = tag.replace('.', '/')\n tensorboard_writer.add_histogram(tag, to_np(value), epoch + 1)\n tensorboard_writer.add_histogram(tag + '/grad', to_np(value.grad), epoch + 1)\n\n\ndef get_lr():\n if args.use_lookahead:\n return optimizer.optimizer.state_dict()['param_groups'][0]['lr']\n if args.double_supervision or 'transformer' in args.rnn_type or args.grapheme_phoneme:\n # SGD state\n optim_state = optimizer.optimizers[0].state_dict()\n else:\n optim_state = optimizer.state_dict()\n return optim_state['param_groups'][0]['lr']\n\n\ndef set_lr(lr):\n print('Learning rate annealed to: {lr:.6g}'.format(lr=lr))\n if args.double_supervision or 'transformer' in args.rnn_type or args.grapheme_phoneme:\n # ADAM's LR typically is set 10x lower than SGD\n sgd_optim_state = optimizer.optimizers[0].state_dict()\n sgd_optim_state['param_groups'][0]['lr'] = lr\n optimizer.optimizers[0].load_state_dict(sgd_optim_state)\n\n adam_optim_state = optimizer.optimizers[1].state_dict()\n # always fixed for adam\n adam_optim_state['param_groups'][0]['lr'] = 1e-4\n optimizer.optimizers[1].load_state_dict(adam_optim_state)\n elif args.use_lookahead:\n optim_state = optimizer.optimizer.state_dict()\n optim_state['param_groups'][0]['lr'] = lr\n optimizer.optimizer.load_state_dict(optim_state)\n else:\n optim_state = optimizer.state_dict()\n optim_state['param_groups'][0]['lr'] = lr\n optimizer.load_state_dict(optim_state)\n\n\ndef check_model_quality(epoch, checkpoint, train_loss, train_cer, train_wer):\n gc.collect()\n torch.cuda.empty_cache()\n\n val_cer_sum, val_wer_sum, val_loss_sum = 0, 0, 0\n num_chars, num_words, num_losses = 0, 0, 0\n model.eval()\n\n with torch.no_grad():\n for i, data in tq(enumerate(test_loader), total=len(test_loader)):\n # use if full phoneme decoding will be required\n if False:\n (inputs,\n targets,\n filenames,\n input_percentages,\n target_sizes,\n phoneme_targets,\n phoneme_target_sizes) = data\n elif args.denoise:\n (inputs,\n targets,\n filenames,\n input_percentages,\n target_sizes,\n mask_targets) = data\n else:\n inputs, targets, filenames, input_percentages, target_sizes = data\n input_sizes = input_percentages.mul_(int(inputs.size(3))).int()\n\n # unflatten targets\n split_targets = []\n offset = 0\n for size in target_sizes:\n split_targets.append(targets[offset:offset + size])\n offset += size\n\n if args.use_attention:\n batch_size = inputs.size(0)\n max_len = max(target_sizes)\n # use CTC blank as pad token\n # ctc blank has an index of zero\n trg = torch.zeros(batch_size,\n max_len)\n assert len(target_sizes) == batch_size\n for _, split_target in enumerate(split_targets):\n trg[_, :target_sizes[_]] = split_target\n trg = trg.long().to(device)\n # trg_teacher_forcing = trg[:, :-1]\n trg_val = trg\n\n inputs = inputs.to(device)\n\n if args.use_phonemes or args.grapheme_phoneme:\n (logits, probs,\n output_sizes,\n phoneme_logits, phoneme_probs) = model(inputs, input_sizes)\n elif args.denoise:\n logits, probs, output_sizes, mask_logits = model(inputs, input_sizes)\n elif args.use_attention:\n logits, output_sizes = model(inputs,\n lengths=input_sizes)\n # for our purposes they are the same\n probs = logits\n elif args.double_supervision:\n ctc_logits, s2s_logits, output_sizes = model(inputs,\n lengths=input_sizes)\n # s2s decoder is the final decoder\n probs = s2s_logits\n else:\n logits, probs, output_sizes = model(inputs, input_sizes)\n\n if args.use_attention:\n # this is kind of murky\n # you can calculate this using teacher forcing unrolling\n # or you can just assume\n # that the smart network will produce outputs of similar length to gt\n short_logits = logits[:, :trg_val.size(1), :].contiguous()\n loss = criterion(short_logits.view(-1,\n short_logits.size(-1)),\n trg_val.contiguous().view(-1))\n loss = loss / sum(target_sizes) # average the loss by number of tokens\n loss = loss.to(device)\n elif args.double_supervision:\n # do not bother with loss here\n loss = 0\n loss_value = 0\n else:\n loss = criterion(logits.transpose(0, 1), targets, output_sizes.cpu(), target_sizes)\n loss = loss / inputs.size(0) # average the loss by minibatch\n\n inf = float(\"inf\")\n if args.distributed:\n loss_value = reduce_tensor(loss, args.world_size).item()\n elif args.double_supervision:\n pass\n else:\n loss_value = loss.item()\n if loss_value == inf or loss_value == -inf:\n print(\"WARNING: received an inf loss, setting loss value to 1000\")\n loss_value = 1000\n loss_value = float(loss_value)\n val_loss_sum = (val_loss_sum * 0.998 + loss_value * 0.002) # discount earlier losses\n val_loss_sum += loss_value\n num_losses += 1\n\n decoded_output, _ = decoder.decode(probs, output_sizes,\n use_attention=args.use_attention or args.double_supervision)\n\n target_strings = decoder.convert_to_strings(split_targets)\n for x in range(len(target_strings)):\n transcript, reference = decoded_output[x][0], target_strings[x][0]\n wer, cer, wer_ref, cer_ref = get_cer_wer(decoder, transcript, reference)\n if x < 1:\n print(\"CER: {:6.2f}% WER: {:6.2f}% Filename: {}\".format(cer/cer_ref*100, wer/wer_ref*100, filenames[x]))\n print('Reference:', reference, '\\nTranscript:', transcript)\n\n times_used = test_dataset.curriculum[filenames[x]]['times_used']+1\n test_dataset.update_curriculum(filenames[x],\n reference, transcript,\n None,\n cer / cer_ref, wer / wer_ref,\n times_used=times_used)\n val_wer_sum += wer\n val_cer_sum += cer\n num_words += wer_ref\n num_chars += cer_ref\n\n if args.double_supervision:\n del inputs, targets, input_percentages, input_sizes\n del probs, output_sizes, target_sizes, loss\n del ctc_logits, s2s_logits\n del split_targets\n else:\n del inputs, targets, input_percentages, input_sizes\n del logits, probs, output_sizes, target_sizes, loss\n del split_targets\n\n if args.cuda:\n torch.cuda.synchronize()\n\n val_wer = 100 * val_wer_sum / num_words\n val_cer = 100 * val_cer_sum / num_chars\n\n print('Validation Summary Epoch: [{0}]\\t'\n 'Average WER {wer:.3f}\\t'\n 'Average CER {cer:.3f}\\t'.format(epoch + 1, wer=val_wer, cer=val_cer))\n\n val_loss = val_loss_sum / num_losses\n plots.loss_results[epoch] = train_loss\n plots.wer_results[epoch] = train_wer\n plots.cer_results[epoch] = train_cer\n plots.epochs[epoch] = epoch + 1\n\n checkpoint_plots.loss_results[checkpoint] = val_loss\n checkpoint_plots.wer_results[checkpoint] = val_wer\n checkpoint_plots.cer_results[checkpoint] = val_cer\n checkpoint_plots.epochs[checkpoint] = checkpoint + 1\n\n plots.plot_progress(epoch, train_loss, train_cer, train_wer)\n checkpoint_plots.plot_progress(checkpoint, val_loss, val_cer, val_wer)\n\n if args.checkpoint_anneal != 1.0:\n global lr_plots\n lr_plots.loss_results[checkpoint] = val_loss\n lr_plots.epochs[checkpoint] = get_lr()\n zero_loss = lr_plots.loss_results == 0\n lr_plots.loss_results[zero_loss] = val_loss\n lr_plots.epochs[zero_loss] = get_lr()\n lr_plots.plot_progress(checkpoint, val_loss, val_cer, val_wer)\n\n # only if trainval manifest provided\n # separate scope not to mess with general flow too much\n if args.train_val_manifest != '':\n calculate_trainval_quality_metrics(checkpoint,\n epoch,\n trainval_loader,\n trainval_checkpoint_plots)\n\n return val_wer, val_cer\n\n\ndef calculate_trainval_quality_metrics(checkpoint,\n epoch,\n loader,\n plots_handle):\n val_cer_sum, val_wer_sum, val_loss_sum = 0, 0, 0\n num_chars, num_words, num_losses = 0, 0, 0\n model.eval()\n with torch.no_grad():\n for i, data in tq(enumerate(loader), total=len(loader)):\n # use if full phoneme decoding will be required\n if False:\n (inputs,\n targets,\n filenames,\n input_percentages,\n target_sizes,\n phoneme_targets,\n phoneme_target_sizes) = data\n elif args.denoise:\n (inputs,\n targets,\n filenames,\n input_percentages,\n target_sizes,\n mask_targets) = data\n else:\n inputs, targets, filenames, input_percentages, target_sizes = data\n\n input_sizes = input_percentages.mul_(int(inputs.size(3))).int()\n\n # unflatten targets\n split_targets = []\n offset = 0\n for size in target_sizes:\n split_targets.append(targets[offset:offset + size])\n offset += size\n\n if args.use_attention:\n batch_size = inputs.size(0)\n max_len = max(target_sizes)\n # use CTC blank as pad token\n # ctc blank has an index of zero\n trg = torch.zeros(batch_size,\n max_len)\n assert len(target_sizes) == batch_size\n for _, split_target in enumerate(split_targets):\n trg[_, :target_sizes[_]] = split_target\n trg = trg.long().to(device)\n # trg_teacher_forcing = trg[:, :-1]\n trg_val = trg\n\n inputs = inputs.to(device)\n\n if args.use_phonemes:\n (logits, probs,\n output_sizes,\n phoneme_logits, phoneme_probs) = model(inputs, input_sizes)\n elif args.denoise:\n logits, probs, output_sizes, mask_logits = model(inputs, input_sizes)\n elif args.use_attention:\n logits, output_sizes = model(inputs,\n lengths=input_sizes)\n # for our purposes they are the same\n probs = logits\n elif args.double_supervision:\n ctc_logits, s2s_logits, output_sizes = model(inputs,\n lengths=input_sizes)\n # s2s decoder is the final decoder\n probs = s2s_logits\n else:\n logits, probs, output_sizes = model(inputs, input_sizes)\n\n if args.use_attention:\n # this is kind of murky\n # you can calculate this using teacher forcing unrolling\n # or you can just assume\n # that the smart network will produce outputs of similar length to gt\n\n # some edge cases in annotation also may cause this to fail miserably\n # hence a failsafe\n max_loss_len = min(trg_val.size(1),\n logits.size(1))\n short_logits = logits[:, :max_loss_len, :].contiguous()\n short_trg = trg_val[:, :max_loss_len].contiguous()\n\n loss = criterion(short_logits.view(-1,\n short_logits.size(-1)),\n short_trg.view(-1))\n loss = loss / sum(target_sizes) # average the loss by number of tokens\n loss = loss.to(device)\n elif args.double_supervision:\n # do not bother with loss here\n loss = 0\n loss_value = 0\n else:\n loss = criterion(logits.transpose(0, 1), targets, output_sizes.cpu(), target_sizes)\n loss = loss / inputs.size(0) # average the loss by minibatch\n\n inf = float(\"inf\")\n if args.distributed:\n loss_value = reduce_tensor(loss, args.world_size).item()\n elif args.double_supervision:\n pass\n else:\n loss_value = loss.item()\n if loss_value == inf or loss_value == -inf:\n print(\"WARNING: received an inf loss, setting loss value to 1000\")\n loss_value = 1000\n loss_value = float(loss_value)\n val_loss_sum = (val_loss_sum * 0.998 + loss_value * 0.002) # discount earlier losses\n val_loss_sum += loss_value\n num_losses += 1\n\n decoded_output, _ = decoder.decode(probs, output_sizes,\n use_attention=args.use_attention or args.double_supervision)\n\n target_strings = decoder.convert_to_strings(split_targets)\n for x in range(len(target_strings)):\n transcript, reference = decoded_output[x][0], target_strings[x][0]\n wer, cer, wer_ref, cer_ref = get_cer_wer(decoder, transcript, reference)\n if x < 1:\n print(\"CER: {:6.2f}% WER: {:6.2f}% Filename: {}\".format(cer/cer_ref*100, wer/wer_ref*100, filenames[x]))\n print('Reference:', reference, '\\nTranscript:', transcript)\n\n times_used = trainval_dataset.curriculum[filenames[x]]['times_used']+1\n trainval_dataset.update_curriculum(filenames[x],\n reference, transcript,\n None,\n cer / cer_ref, wer / wer_ref,\n times_used=times_used)\n\n val_wer_sum += wer\n val_cer_sum += cer\n num_words += wer_ref\n num_chars += cer_ref\n\n if args.double_supervision:\n del inputs, targets, input_percentages, input_sizes\n del probs, output_sizes, target_sizes, loss\n del ctc_logits, s2s_logits\n del split_targets\n else:\n del inputs, targets, input_percentages, input_sizes\n del logits, probs, output_sizes, target_sizes, loss\n del split_targets\n\n if args.cuda:\n torch.cuda.synchronize()\n\n val_wer = 100 * val_wer_sum / num_words\n val_cer = 100 * val_cer_sum / num_chars\n\n print('TrainVal Summary Epoch: [{0}]\\t'\n 'Average WER {wer:.3f}\\t'\n 'Average CER {cer:.3f}\\t'.format(epoch + 1, wer=val_wer, cer=val_cer))\n\n val_loss = val_loss_sum / num_losses\n\n plots_handle.loss_results[checkpoint] = val_loss\n plots_handle.wer_results[checkpoint] = val_wer\n plots_handle.cer_results[checkpoint] = val_cer\n plots_handle.epochs[checkpoint] = checkpoint + 1\n plots_handle.plot_progress(checkpoint, val_loss, val_cer, val_wer)\n\n\ndef save_validation_curriculums(save_folder,\n checkpoint,\n epoch,\n iteration=0):\n if iteration>0:\n test_path = '%s/test_checkpoint_%04d_epoch_%02d_iter_%05d.csv' % (save_folder, checkpoint + 1, epoch + 1, iteration + 1)\n else:\n test_path = '%s/test_checkpoint_%04d_epoch_%02d.csv' % (save_folder, checkpoint + 1, epoch + 1)\n print(\"Saving test curriculum to {}\".format(test_path))\n test_dataset.save_curriculum(test_path)\n\n if args.train_val_manifest != '':\n if iteration>0:\n trainval_path = '%s/trainval_checkpoint_%04d_epoch_%02d_iter_%05d.csv' % (save_folder, checkpoint + 1, epoch + 1, iteration + 1)\n else:\n trainval_path = '%s/trainval_checkpoint_%04d_epoch_%02d.csv' % (save_folder, checkpoint + 1, epoch + 1)\n print(\"Saving trainval curriculum to {}\".format(trainval_path))\n trainval_dataset.save_curriculum(trainval_path)\n\n\nclass Trainer:\n def __init__(self):\n self.end = time.time()\n self.train_wer = 0\n self.train_cer = 0\n self.num_words = 0\n self.num_chars = 0\n\n def reset_scores(self):\n self.train_wer = 0\n self.train_cer = 0\n self.num_words = 0\n self.num_chars = 0\n\n def get_cer(self):\n return 100. * self.train_cer / (self.num_chars or 1)\n\n def get_wer(self):\n return 100. * self.train_wer / (self.num_words or 1)\n\n def train_batch(self, epoch, batch_id, data):\n if args.use_phonemes:\n (inputs,\n targets,\n filenames,\n input_percentages,\n target_sizes,\n phoneme_targets,\n phoneme_target_sizes) = data\n elif args.denoise:\n (inputs,\n targets,\n filenames,\n input_percentages,\n target_sizes,\n mask_targets) = data\n\n mask_targets = mask_targets.squeeze(1).to(device)\n elif args.double_supervision:\n (inputs,\n targets, s2s_targets,\n filenames, input_percentages,\n target_sizes, s2s_target_sizes) = data\n else:\n inputs, targets, filenames, input_percentages, target_sizes = data\n input_sizes = input_percentages.mul_(int(inputs.size(3))).int()\n\n # measure data loading time\n data_time.update(time.time() - self.end)\n\n inputs = inputs.to(device)\n input_sizes = input_sizes.to(device)\n\n split_targets = []\n offset = 0\n for size in target_sizes:\n split_targets.append(targets[offset:offset + size])\n offset += size\n\n if args.double_supervision:\n split_s2s_targets = []\n offset = 0\n for size in s2s_target_sizes:\n split_s2s_targets.append(s2s_targets[offset:offset + size])\n offset += size\n\n batch_size = inputs.size(0)\n max_len = max(s2s_target_sizes)\n # use CTC blank as pad token\n # ctc blank has an index of zero\n trg = torch.zeros(batch_size,\n max_len)\n assert len(s2s_target_sizes) == batch_size\n for _, split_target in enumerate(split_s2s_targets):\n trg[_,:s2s_target_sizes[_]] = split_target\n trg = trg.long().to(device)\n trg_teacher_forcing = trg[:, :-1]\n trg_y = trg[:, 1:]\n\n if args.use_attention:\n batch_size = inputs.size(0)\n max_len = max(target_sizes)\n # use CTC blank as pad token\n # ctc blank has an index of zero\n trg = torch.zeros(batch_size,\n max_len)\n assert len(target_sizes) == batch_size\n for _, split_target in enumerate(split_targets):\n trg[_,:target_sizes[_]] = split_target\n trg = trg.long().to(device)\n trg_teacher_forcing = trg[:, :-1]\n trg_y = trg[:, 1:]\n\n if args.use_phonemes:\n (logits, probs,\n output_sizes,\n phoneme_logits, phoneme_probs) = model(inputs, input_sizes)\n elif args.denoise:\n logits, probs, output_sizes, mask_logits = model(inputs, input_sizes)\n elif args.use_attention:\n logits, output_sizes = model(inputs,\n lengths=input_sizes,\n trg=trg_teacher_forcing)\n # for our purposes they are the same\n probs = logits\n elif args.double_supervision:\n ctc_logits, s2s_logits, output_sizes = model(inputs,\n lengths=input_sizes,\n trg=trg_teacher_forcing)\n # s2s decoder is the final decoder\n probs = s2s_logits\n # (batch x sequence x channels) => (seqLength x batch x outputDim)\n ctc_logits = ctc_logits.transpose(0, 1)\n else:\n logits, probs, output_sizes = model(inputs, input_sizes)\n\n if args.double_supervision:\n assert ctc_logits.is_cuda\n assert s2s_logits.is_cuda\n else:\n assert logits.is_cuda\n assert probs.is_cuda\n assert output_sizes.is_cuda\n\n decoded_output, _ = decoder.decode(probs, output_sizes,\n use_attention=args.use_attention or args.double_supervision)\n\n if args.double_supervision:\n target_strings = decoder.convert_to_strings(split_s2s_targets)\n else:\n target_strings = decoder.convert_to_strings(split_targets)\n\n for x in range(len(target_strings)):\n transcript, reference = decoded_output[x][0], target_strings[x][0]\n wer, cer, wer_ref, cer_ref = get_cer_wer(decoder, transcript, reference)\n # accessing dict should be fast\n times_used = train_dataset.curriculum[filenames[x]]['times_used']+1\n train_dataset.update_curriculum(filenames[x],\n reference, transcript,\n None,\n cer / cer_ref, wer / wer_ref,\n times_used=times_used)\n\n self.train_wer += wer\n self.train_cer += cer\n self.num_words += wer_ref\n self.num_chars += cer_ref\n\n if args.use_phonemes:\n phoneme_logits = phoneme_logits.transpose(0, 1) # TxNxH\n\n if not args.use_attention and not args.double_supervision:\n logits = logits.transpose(0, 1) # TxNxH\n\n if not args.double_supervision:\n if torch.isnan(logits).any(): # and args.nan == 'zero':\n # work around bad data\n print(\"WARNING: Working around NaNs in data\")\n logits[torch.isnan(logits)] = 0\n\n if args.use_phonemes:\n # output_sizes should be the same\n # for phoneme and non-phonemes\n loss = criterion(logits,\n targets,\n output_sizes.cpu(),\n target_sizes) + criterion(phoneme_logits,\n phoneme_targets,\n output_sizes.cpu(),\n phoneme_target_sizes)\n loss = loss / inputs.size(0) # average the loss by minibatch\n loss = loss.to(device)\n elif args.denoise:\n ctc_loss = 0\n \"\"\"\n ctc_loss = criterion(logits,\n targets,\n output_sizes.cpu(),\n target_sizes).to(device) / inputs.size(0)\n \"\"\"\n mask_loss = 50.0 * mask_criterion(mask_logits,\n mask_targets).to(device)\n\n if torch.isnan(mask_loss):\n print('Nan loss detected')\n return 102\n\n loss = ctc_loss + mask_loss\n\n inf = float(\"inf\")\n if args.distributed:\n loss_value = reduce_tensor(loss, args.world_size).item()\n else:\n loss_value = loss.item() * args.gradient_accumulation_steps\n\n ctc_loss_value = ctc_loss # .item()\n if ctc_loss_value == inf or ctc_loss_value == -inf:\n print(\"WARNING: received an inf CTC loss, setting loss value to 1000\")\n ctc_loss_value = 1000\n loss_value = 1000\n elif args.use_attention:\n loss = criterion(logits.contiguous().view(-1,\n logits.size(-1)),\n trg_y.contiguous().view(-1))\n loss = loss / sum(target_sizes) # average the loss by number of tokens\n if args.gradient_accumulation_steps > 1: # average loss by accumulation steps\n loss = loss / args.gradient_accumulation_steps\n loss = loss.to(device)\n elif args.double_supervision:\n ctc_loss = ctc_criterion(ctc_logits,\n targets,\n output_sizes.cpu(),\n target_sizes)\n ctc_loss = ctc_loss / inputs.size(0) # average the loss by minibatch\n ctc_loss = ctc_loss.to(device)\n\n s2s_loss = s2s_criterion(s2s_logits.contiguous().view(-1,\n s2s_logits.size(-1)),\n trg_y.contiguous().view(-1))\n # average the loss by number of tokens\n # multiply by 10 for weight\n s2s_loss = 10 * s2s_loss / sum(s2s_target_sizes)\n s2s_loss = s2s_loss.to(device)\n\n loss = ctc_loss + s2s_loss\n\n inf = float(\"inf\")\n if args.distributed:\n loss_value = reduce_tensor(loss, args.world_size).item()\n else:\n loss_value = loss.item() * args.gradient_accumulation_steps\n\n ctc_loss_value = ctc_loss.item()\n if ctc_loss_value == inf or ctc_loss_value == -inf:\n print(\"WARNING: received an inf CTC loss, setting loss value to 1000\")\n ctc_loss_value = 1000\n loss_value = 1000\n else:\n loss = criterion(logits, targets, output_sizes.cpu(), target_sizes)\n loss = loss / inputs.size(0) # average the loss by minibatch\n if args.gradient_accumulation_steps > 1: # average loss by accumulation steps\n loss = loss / args.gradient_accumulation_steps\n loss = loss.to(device)\n\n if not args.denoise:\n inf = float(\"inf\")\n if args.distributed:\n loss_value = reduce_tensor(loss, args.world_size).item()\n else:\n loss_value = loss.item() * args.gradient_accumulation_steps\n\n if loss_value == inf or loss_value == -inf:\n print(\"WARNING: received an inf loss, setting loss value to 1000\")\n loss_value = 1000\n\n loss_value = float(loss_value)\n losses.update(loss_value, inputs.size(0))\n\n if args.denoise:\n mask_accuracy.update(mask_metric(mask_logits, mask_targets).item(),\n inputs.size(0))\n mask_losses.update(mask_loss.item(),\n inputs.size(0))\n ctc_losses.update(ctc_loss_value,\n inputs.size(0))\n elif args.double_supervision:\n ctc_losses.update(ctc_loss_value,\n inputs.size(0))\n s2s_losses.update(s2s_loss.item(),\n inputs.size(0))\n\n # update_curriculum\n\n if (batch_id + 1) % args.gradient_accumulation_steps == 0:\n # compute gradient\n optimizer.zero_grad()\n loss.backward()\n\n # try just lr reduction\n # instead of gradient clipping\n lr_clipping = False\n\n # spare time by doing clipping\n # only once each N epochs\n if args.max_norm > 0:\n if epoch < args.norm_warmup_epochs:\n if lr_clipping:\n raise ValueError('LEGACY')\n clip_coef = calc_grad_norm(model.parameters(),\n args.max_norm)\n underlying_lr = get_lr()\n set_lr(underlying_lr * clip_coef)\n else:\n clip_grad_norm_(model.parameters(),\n args.max_norm)\n else:\n raise ValueError('LEGACY')\n # clip only when gradients explode\n if loss_value == inf or loss_value == -inf:\n clip_grad_norm_(model.parameters(),\n args.max_norm)\n\n # if torch.isnan(logits).any():\n # # work around bad data\n # print(\"WARNING: Skipping NaNs in backward step\")\n # SGD step\n optimizer.step()\n if lr_clipping:\n set_lr(underlying_lr)\n if args.enorm:\n enorm.step()\n\n # measure elapsed time\n batch_time.update(time.time() - self.end)\n if not args.silent:\n if args.denoise:\n print('GPU-{0} Epoch {1} [{2}/{3}]\\t'\n 'Time {batch_time.val:.2f} ({batch_time.avg:.2f})\\t'\n 'Data {data_time.val:.2f} ({data_time.avg:.2f})\\t'\n 'Loss {loss.val:.2f} ({loss.avg:.2f})\\t'\n 'CTC Loss {ctc_losses.val:.2f} ({ctc_losses.avg:.2f})\\t'\n 'Mask Loss {mask_losses.val:.2f} ({mask_losses.avg:.2f})\\t'\n 'Mask {mask_accuracy.val:.2f} ({mask_accuracy.avg:.2f})\\t'.format(\n args.gpu_rank or VISIBLE_DEVICES[0],\n epoch + 1, batch_id + 1, len(train_sampler),\n batch_time=batch_time, data_time=data_time, loss=losses,\n mask_losses=mask_losses, ctc_losses=ctc_losses,\n mask_accuracy=mask_accuracy))\n elif args.double_supervision:\n print('GPU-{0} Epoch {1} [{2}/{3}]\\t'\n 'Time {batch_time.val:.2f} ({batch_time.avg:.2f})\\t'\n 'Data {data_time.val:.2f} ({data_time.avg:.2f})\\t'\n 'Loss {loss.val:.2f} ({loss.avg:.2f})\\t'\n 'CTC Loss {ctc_losses.val:.2f} ({ctc_losses.avg:.2f})\\t'\n 'S2S Loss {s2s_losses.val:.2f} ({s2s_losses.avg:.2f})\\t'.format(\n args.gpu_rank or VISIBLE_DEVICES[0],\n epoch + 1, batch_id + 1, len(train_sampler),\n batch_time=batch_time, data_time=data_time, loss=losses,\n ctc_losses=ctc_losses, s2s_losses=s2s_losses))\n else:\n print('GPU-{0} Epoch {1} [{2}/{3}]\\t'\n 'Time {batch_time.val:.2f} ({batch_time.avg:.2f})\\t'\n 'Data {data_time.val:.2f} ({data_time.avg:.2f})\\t'\n 'Loss {loss.val:.2f} ({loss.avg:.2f})\\t'.format(\n args.gpu_rank or VISIBLE_DEVICES[0],\n epoch + 1, batch_id + 1, len(train_sampler),\n batch_time=batch_time, data_time=data_time, loss=losses))\n\n if args.double_supervision:\n del inputs, targets, input_percentages, input_sizes\n del probs, output_sizes, target_sizes, loss, ctc_loss, s2s_loss\n del s2s_targets, s2s_target_sizes\n del ctc_logits, s2s_logits\n else:\n del inputs, targets, input_percentages, input_sizes\n del logits, probs, output_sizes, target_sizes, loss\n return loss_value\n\n\ndef init_train_set(epoch, from_iter):\n #train_dataset.set_curriculum_epoch(epoch, sample=True)\n train_dataset.set_curriculum_epoch(epoch,\n sample=args.use_curriculum,\n sample_size=args.curriculum_ratio,\n cl_point=args.cl_point)\n global train_loader, train_sampler\n if not args.distributed:\n if args.batch_similar_lens:\n print('Using BucketingLenSampler')\n train_sampler = BucketingLenSampler(train_dataset, batch_size=args.batch_size)\n else:\n train_sampler = BucketingSampler(train_dataset, batch_size=args.batch_size)\n train_sampler.bins = train_sampler.bins[from_iter:]\n else:\n train_sampler = DistributedBucketingSampler(train_dataset,\n batch_size=args.batch_size,\n num_replicas=args.world_size,\n rank=args.rank)\n train_loader = AudioDataLoader(train_dataset,\n num_workers=args.num_workers,\n batch_sampler=train_sampler,\n pin_memory=True)\n\n if (not args.no_shuffle and epoch != 0) or args.no_sorta_grad:\n print(\"Shuffling batches for the following epochs\")\n train_sampler.shuffle(epoch)\n\n\ndef train(from_epoch, from_iter, from_checkpoint):\n print('Starting training with id=\"{}\" at GPU=\"{}\" with lr={}'.format(args.id, args.gpu_rank or VISIBLE_DEVICES[0],\n get_lr()))\n checkpoint_per_batch = 1+(args.checkpoint_per_samples-1) // args.batch_size if args.checkpoint_per_samples > 0 else 0\n trainer = Trainer()\n checkpoint = from_checkpoint\n best_score = None\n for epoch in range(from_epoch, args.epochs):\n init_train_set(epoch, from_iter=from_iter)\n trainer.reset_scores()\n total_loss = 0\n num_losses = 1\n model.train()\n trainer.end = time.time()\n start_epoch_time = time.time()\n\n for i, data in enumerate(train_loader, start=from_iter):\n if i >= len(train_sampler) + start_iter:\n break\n total_loss += trainer.train_batch(epoch, i, data)\n num_losses += 1\n\n if (i + 1) % 50 == 0:\n # deal with GPU memory fragmentation\n gc.collect()\n torch.cuda.empty_cache()\n\n if checkpoint_per_batch > 0 and is_leader:\n if (i + 1) % checkpoint_per_batch == 0:\n file_path = '%s/checkpoint_%04d_epoch_%02d_iter_%05d.model' % (save_folder, checkpoint + 1, epoch + 1, i + 1)\n print(\"Saving checkpoint model to %s\" % file_path)\n if args.use_lookahead:\n _optimizer = optimizer.optimizer\n else:\n _optimizer = optimizer\n torch.save(DeepSpeech.serialize(model, optimizer=_optimizer, epoch=epoch,\n iteration=i,\n loss_results=plots.loss_results,\n wer_results=plots.wer_results,\n cer_results=plots.cer_results,\n checkpoint=checkpoint,\n checkpoint_loss_results=checkpoint_plots.loss_results,\n checkpoint_wer_results=checkpoint_plots.wer_results,\n checkpoint_cer_results=checkpoint_plots.cer_results,\n trainval_checkpoint_loss_results=trainval_checkpoint_plots.loss_results,\n trainval_checkpoint_wer_results=trainval_checkpoint_plots.wer_results,\n trainval_checkpoint_cer_results=trainval_checkpoint_plots.cer_results,\n avg_loss=total_loss / num_losses), file_path)\n train_dataset.save_curriculum(file_path + '.csv')\n del _optimizer\n\n check_model_quality(epoch, checkpoint, total_loss / num_losses, trainer.get_cer(), trainer.get_wer())\n save_validation_curriculums(save_folder, checkpoint + 1, epoch + 1, i + 1)\n checkpoint += 1\n\n gc.collect()\n torch.cuda.empty_cache()\n\n model.train()\n if args.checkpoint_anneal != 1:\n print(\"Checkpoint:\", checkpoint)\n set_lr(get_lr() / args.checkpoint_anneal)\n\n trainer.end = time.time()\n\n epoch_time = time.time() - start_epoch_time\n\n print('Training Summary Epoch: [{0}]\\t'\n 'Time taken (s): {epoch_time:.0f}\\t'\n 'Average Loss {loss:.3f}\\t'.format(epoch + 1, epoch_time=epoch_time, loss=total_loss / num_losses))\n\n from_iter = 0 # Reset start iteration for next epoch\n\n if trainer.num_chars == 0:\n continue\n\n wer_avg, cer_avg = check_model_quality(epoch, checkpoint, total_loss / num_losses, trainer.get_cer(), trainer.get_wer())\n new_score = wer_avg + cer_avg\n checkpoint += 1\n\n if args.checkpoint and is_leader: # checkpoint after the end of each epoch\n file_path = '%s/model_checkpoint_%04d_epoch_%02d.model' % (save_folder, checkpoint+1, epoch + 1)\n if args.use_lookahead:\n _optimizer = optimizer.optimizer\n else:\n _optimizer = optimizer\n torch.save(DeepSpeech.serialize(model,\n optimizer=_optimizer,\n epoch=epoch,\n loss_results=plots.loss_results,\n wer_results=plots.wer_results,\n cer_results=plots.cer_results,\n checkpoint=checkpoint,\n checkpoint_loss_results=checkpoint_plots.loss_results,\n checkpoint_wer_results=checkpoint_plots.wer_results,\n checkpoint_cer_results=checkpoint_plots.cer_results,\n trainval_checkpoint_loss_results=trainval_checkpoint_plots.loss_results,\n trainval_checkpoint_wer_results=trainval_checkpoint_plots.wer_results,\n trainval_checkpoint_cer_results=trainval_checkpoint_plots.cer_results,\n ), file_path)\n train_dataset.save_curriculum(file_path + '.csv')\n save_validation_curriculums(save_folder, checkpoint + 1, epoch + 1, 0)\n del _optimizer\n\n # anneal lr\n print(\"Checkpoint:\", checkpoint)\n set_lr(get_lr() / args.learning_anneal)\n\n if (best_score is None or new_score < best_score) and is_leader:\n print(\"Found better validated model, saving to %s\" % args.model_path)\n if args.use_lookahead:\n _optimizer = optimizer.optimizer\n else:\n _optimizer = optimizer\n torch.save(DeepSpeech.serialize(model,\n optimizer=_optimizer,\n epoch=epoch,\n loss_results=plots.loss_results,\n wer_results=plots.wer_results,\n cer_results=plots.cer_results,\n checkpoint=checkpoint,\n checkpoint_loss_results=checkpoint_plots.loss_results,\n checkpoint_wer_results=checkpoint_plots.wer_results,\n checkpoint_cer_results=checkpoint_plots.cer_results,\n trainval_checkpoint_loss_results=trainval_checkpoint_plots.loss_results,\n trainval_checkpoint_wer_results=trainval_checkpoint_plots.wer_results,\n trainval_checkpoint_cer_results=trainval_checkpoint_plots.cer_results,\n ),\n args.model_path)\n train_dataset.save_curriculum(args.model_path + '.csv')\n del _optimizer\n best_score = new_score\n\n\nif __name__ == '__main__':\n args = parser.parse_args()\n assert args.use_phonemes + args.denoise + args.grapheme_phoneme < 2\n assert args.double_supervision + args.use_attention < 2\n # упячка, я идиот, убейте меня кто-нибудь\n if args.use_phonemes:\n from data.data_loader_aug import AudioDataLoaderPhoneme as AudioDataLoader\n elif args.denoise:\n from data.data_loader_aug import AudioDataLoaderDenoise as AudioDataLoader\n elif args.double_supervision:\n from data.data_loader_aug import AudioDataLoaderDouble as AudioDataLoader\n else:\n from data.data_loader_aug import AudioDataLoader\n\n if args.double_supervision:\n from data.data_loader_aug import AudioDataLoader as AudioDataLoaderVal\n else:\n AudioDataLoaderVal = AudioDataLoader\n\n args.distributed = args.world_size > 1\n args.model_path = os.path.join(args.save_folder, 'best.model')\n\n is_leader = True\n device = torch.device(\"cuda\" if args.cuda else \"cpu\")\n if args.distributed:\n if args.gpu_rank:\n torch.cuda.set_device(int(args.gpu_rank))\n dist.init_process_group(backend=args.dist_backend, init_method=args.dist_url,\n world_size=args.world_size, rank=args.rank)\n is_leader = args.rank == 0 # Only the first proc should save models\n\n save_folder = args.save_folder\n os.makedirs(save_folder, exist_ok=True)\n\n plots = PlotWindow(args.id, 'train_loss_epochs', log_y=True)\n checkpoint_plots = PlotWindow(args.id, 'test_loss_checks', log_y=True)\n if args.train_val_manifest != '':\n trainval_checkpoint_plots = PlotWindow(args.id, 'val_loss_checks', log_y=True)\n else:\n # set all properties to None for easy backwards compatibility\n trainval_checkpoint_plots = t = type('test', (object,), {})()\n trainval_checkpoint_plots.loss_results = None\n trainval_checkpoint_plots.wer_results = None\n trainval_checkpoint_plots.cer_results = None\n lr_plots = LRPlotWindow(args.id, 'lr_finder', log_x=True)\n\n total_avg_loss, start_epoch, start_iter, start_checkpoint = 0, 0, 0, 0\n if args.use_phonemes:\n with open(args.phonemes_path) as phoneme_file:\n phoneme_map = {l: i for i, l\n in enumerate(json.load(phoneme_file))}\n if args.continue_from: # Starting from previous model\n print(\"Loading checkpoint model %s\" % args.continue_from)\n package = torch.load(args.continue_from, map_location=lambda storage, loc: storage)\n # package['dropout']=0.2\n model = DeepSpeech.load_model_package(package)\n # start with non-phoneme model, continue with phonemes\n labels = DeepSpeech.get_labels(model)\n audio_conf = DeepSpeech.get_audio_conf(model)\n\n # in case you need to resume and change audio conf manually\n \"\"\"\n audio_conf = dict(sample_rate=args.sample_rate,\n window_size=args.window_size,\n window_stride=args.window_stride,\n window=args.window,\n noise_dir=args.noise_dir,\n noise_prob=args.noise_prob,\n noise_levels=(args.noise_min, args.noise_max),\n aug_prob_8khz=args.aug_prob_8khz,\n aug_prob_spect=args.aug_prob_spect)\n\n if args.use_phonemes:\n audio_conf['phoneme_count'] = len(phoneme_map)\n audio_conf['phoneme_map'] = phoneme_map\n \"\"\"\n\n if args.use_phonemes and package.get('phoneme_count', 0) == 0:\n model = DeepSpeech.add_phonemes_to_model(model,\n len(phoneme_map))\n audio_conf['phoneme_count'] = len(phoneme_map)\n audio_conf['phoneme_map'] = phoneme_map\n model.phoneme_count = len(phoneme_map)\n\n if args.denoise and package.get('denoise', False) == False:\n model = DeepSpeech.add_denoising_to_model(model)\n print('Model transformed to a denoising one')\n audio_conf['denoise'] = True\n audio_conf['noise_prob'] = args.noise_prob\n audio_conf['aug_type'] = args.aug_type\n audio_conf['pytorch_stft'] = True\n print('Changed audio conf params')\n\n if args.use_attention:\n if args.use_bpe:\n from data.bpe_labels import Labels as BPELabels\n labels = BPELabels(sp_model=args.sp_model,\n use_phonemes=False,\n s2s_decoder=args.use_attention)\n # list instead of string\n labels = labels.label_list\n\n model = DeepSpeech.add_s2s_decoder_to_model(model,\n labels=labels)\n print('Model transformed to a model with full s2s decoder')\n\n # REMOVE LATER\n # audio_conf['noise_dir'] = '../data/augs/*.wav'\n # audio_conf['noise_prob'] = 0.1\n\n if args.double_supervision or 'transformer' in args.rnn_type:\n optimizer = build_optimizer(args,\n model=model)\n else:\n parameters = model.parameters()\n optimizer = build_optimizer(args,\n parameters_=parameters)\n\n if not args.finetune: # Don't want to restart training\n model = model.to(device)\n # when adding phonemes, optimizer state is not full\n try:\n optimizer.load_state_dict(package['optim_dict'])\n # set_lr(args.lr)\n print('Current LR {}'.format(\n optimizer.state_dict()['param_groups'][0]['lr']\n ))\n except:\n if args.double_supervision or 'transformer' in args.rnn_type or args.grapheme_phoneme:\n optim_state = package['optim_dict'][0]\n lr = optim_state['param_groups'][0]['lr']\n print('Just setting the SGD LR {}'.format(lr))\n set_lr(lr)\n else:\n print('Just changing the LR in the optimizer')\n # set_lr(package['optim_dict']['param_groups'][0]['lr'])\n set_lr(args.lr)\n\n start_epoch = int(package.get('epoch', 1)) - 1 # Index start at 0 for training\n start_iter = package.get('iteration', None)\n start_checkpoint = package.get('checkpoint', 0) or 0\n if start_iter is None:\n start_epoch += 1 # We saved model after epoch finished, start at the next epoch.\n start_iter = 0\n else:\n start_iter += 1\n total_avg_loss = int(package.get('avg_loss', 0))\n plots.loss_results = package['loss_results']\n plots.cer_results = package['cer_results']\n plots.wer_results = package['wer_results']\n if package.get('checkpoint_cer_results') is not None:\n checkpoint_plots.loss_results = package.get('checkpoint_loss_results', torch.Tensor(10000))\n checkpoint_plots.cer_results = package.get('checkpoint_cer_results', torch.Tensor(10000))\n checkpoint_plots.wer_results = package.get('checkpoint_wer_results', torch.Tensor(10000))\n if package['cer_results'] is not None and start_epoch > 0:\n plots.plot_history(start_epoch)\n if package.get('checkpoint_cer_results') is not None and start_checkpoint > 0:\n checkpoint_plots.plot_history(start_checkpoint)\n\n if args.train_val_manifest != '':\n if package.get('trainval_checkpoint_cer_results') is not None:\n trainval_checkpoint_plots.loss_results = package.get('trainval_checkpoint_loss_results', torch.Tensor(10000))\n trainval_checkpoint_plots.cer_results = package.get('trainval_checkpoint_cer_results', torch.Tensor(10000))\n trainval_checkpoint_plots.wer_results = package.get('trainval_checkpoint_wer_results', torch.Tensor(10000))\n if package.get('trainval_checkpoint_cer_results') is not None and start_checkpoint > 0:\n trainval_checkpoint_plots.plot_history(start_checkpoint)\n else:\n if args.use_bpe:\n from data.bpe_labels import Labels as BPELabels\n labels = BPELabels(sp_model=args.sp_model,\n use_phonemes=args.phonemes_only,\n s2s_decoder=args.use_attention or args.double_supervision,\n double_supervision=False,\n naive_split=args.naive_split,\n omit_spaces=args.omit_spaces,\n subword_regularization=args.subword_regularization)\n # list instead of string\n labels = labels.label_list\n # in case of double supervision just use the longer\n # i.e. s2s = blank(pad) + base_num + space + eos + sos\n # ctc = blank(pad) + base_num + space + 2\n # len(ctc) = len(s2s) - 1\n else:\n with open(args.labels_path) as label_file:\n # labels is a string\n labels = str(''.join(json.load(label_file)))\n\n assert args.pytorch_stft != args.pytorch_mel\n\n audio_conf = dict(sample_rate=args.sample_rate,\n window_size=args.window_size,\n window_stride=args.window_stride,\n window=args.window,\n noise_dir=args.noise_dir,\n noise_prob=args.noise_prob,\n noise_levels=(args.noise_min, args.noise_max),\n aug_prob_8khz=args.aug_prob_8khz,\n aug_prob_spect=args.aug_prob_spect,\n use_bpe=args.use_bpe,\n sp_model=args.sp_model,\n aug_type=args.aug_type,\n pytorch_mel=args.pytorch_mel,\n pytorch_stft=args.pytorch_stft,\n denoise=args.denoise)\n\n if args.use_phonemes:\n audio_conf['phoneme_count'] = len(phoneme_map)\n audio_conf['phoneme_map'] = phoneme_map\n\n rnn_type = args.rnn_type.lower()\n assert rnn_type in supported_rnns, \"rnn_type should be either lstm, rnn or gru\"\n model = DeepSpeech(rnn_hidden_size=args.hidden_size,\n cnn_width=args.cnn_width,\n nb_layers=args.hidden_layers,\n labels=labels,\n rnn_type=rnn_type,\n audio_conf=audio_conf,\n bidirectional=args.bidirectional,\n bnm=args.batch_norm_momentum,\n dropout=args.dropout,\n phoneme_count=len(phoneme_map) if args.use_phonemes else 0,\n decoder_layers=args.decoder_layers,\n kernel_size=args.kernel_size,\n decoder_girth=args.decoder_girth)\n if args.use_lookahead:\n model = model.to(device)\n\n if args.double_supervision or 'transformer' in args.rnn_type:\n optimizer = build_optimizer(args,\n model=model)\n else:\n parameters = model.parameters()\n optimizer = build_optimizer(args,\n parameters_=parameters)\n\n # enorm = ENorm(model.named_parameters(), optimizer, c=1)\n if args.use_attention:\n criterion = torch.nn.NLLLoss(reduction='sum',\n ignore_index=0) # use ctc blank token as pad token\n elif args.double_supervision:\n ctc_criterion = CTCLoss()\n s2s_criterion = torch.nn.NLLLoss(reduction='sum',\n ignore_index=0) # use ctc blank token as pad token\n else:\n criterion = CTCLoss()\n\n if args.denoise:\n mask_criterion = SemsegLoss(bce_weight=1.0,\n dice_weight=0.0,\n mse_weight=0.0)\n mask_metric = MaskSimilarity(thresholds=[0.05, 0.1, 0.15])\n\n # if double supervision used, s2s head is the last one\n # and actually partakes in the decoding\n decoder = GreedyDecoder(labels,\n cut_after_eos_token=args.use_attention or args.double_supervision,\n eos_token=']')\n\n print('Label length {}'.format(len(labels)))\n print(labels)\n\n print('Audio conf')\n print(audio_conf)\n train_dataset = SpectrogramDataset(audio_conf=audio_conf, cache_path=args.cache_dir,\n manifest_filepath=args.train_manifest,\n labels=labels, normalize=args.norm, augment=args.augment,\n curriculum_filepath=args.curriculum,\n use_attention=args.use_attention,\n double_supervision=args.double_supervision,\n naive_split=args.naive_split,\n phonemes_only=args.phonemes_only,\n omit_spaces=args.omit_spaces,\n subword_regularization=args.subword_regularization)\n test_audio_conf = {**audio_conf,\n 'noise_prob': 0,\n 'aug_prob_8khz':0,\n 'aug_prob_spect':0,\n 'phoneme_count':0,\n 'phoneme_map':None}\n\n print('Test audio conf')\n print(test_audio_conf)\n # no augs on test\n # on test, even in case of double supervision\n # we just need s2s data to validate\n test_dataset = SpectrogramDataset(audio_conf=test_audio_conf,\n cache_path=args.cache_dir,\n manifest_filepath=args.val_manifest,\n labels=labels, normalize=args.norm, augment=False,\n use_attention=args.use_attention or args.double_supervision,\n double_supervision=False,\n naive_split=args.naive_split,\n phonemes_only=args.phonemes_only,\n omit_spaces=args.omit_spaces,\n subword_regularization=False) # turn off augs on val\n\n # if file is specified\n # separate train validation wo domain shift\n # also wo augs\n # on test, even in case of double supervision\n # we just need s2s data to validate\n if args.train_val_manifest != '':\n trainval_dataset = SpectrogramDataset(audio_conf=test_audio_conf,\n cache_path=args.cache_dir,\n manifest_filepath=args.train_val_manifest,\n labels=labels, normalize=args.norm, augment=False,\n use_attention=args.use_attention or args.double_supervision,\n double_supervision=False,\n naive_split=args.naive_split,\n phonemes_only=args.phonemes_only,\n omit_spaces=args.omit_spaces,\n subword_regularization=False) # turn off augs on val\n\n if args.reverse_sort:\n # XXX: A hack to test max memory load.\n train_dataset.ids.reverse()\n\n test_loader = AudioDataLoaderVal(test_dataset,\n batch_size=args.val_batch_size,\n num_workers=args.num_workers)\n\n if args.train_val_manifest != '':\n trainval_loader = AudioDataLoaderVal(trainval_dataset,\n batch_size=args.val_batch_size,\n num_workers=args.num_workers)\n\n if not args.use_lookahead:\n model = model.to(device)\n if args.distributed:\n device_id = [int(args.gpu_rank)] if args.rank else None\n model = torch.nn.parallel.DistributedDataParallel(model,\n device_ids=device_id)\n elif args.data_parallel:\n model = torch.nn.DataParallel(model).to(device)\n print('Using DP')\n\n print(model)\n print(\"Number of parameters: %d\" % DeepSpeech.get_param_size(model))\n\n batch_time = AverageMeter()\n data_time = AverageMeter()\n losses = AverageMeter()\n\n if args.denoise:\n mask_accuracy = AverageMeter()\n mask_losses = AverageMeter()\n ctc_losses = AverageMeter()\n\n if args.double_supervision:\n ctc_losses = AverageMeter()\n s2s_losses = AverageMeter()\n\n train(start_epoch, start_iter, start_checkpoint)\n"
] |
[
[
"torch.optim.Adam",
"torch.nn.NLLLoss",
"torch.cuda.synchronize",
"torch.distributed.init_process_group",
"torch.Tensor",
"torch.load",
"torch.zeros",
"torch.manual_seed",
"torch.isnan",
"torch.cuda.empty_cache",
"torch.no_grad",
"torch.cuda.manual_seed_all",
"torch.arange",
"torch.device",
"torch.optim.SGD",
"torch.nn.DataParallel",
"torch.nn.parallel.DistributedDataParallel",
"torch.stack"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
sdpython/mathenjeu
|
[
"97fc9140ef89ac9c3c6ba46803121fd5d23eb8d1"
] |
[
"_unittests/ut_datalog/test_datalog.py"
] |
[
"\"\"\"\n@brief test tree node (time=2s)\n\"\"\"\nimport os\nimport unittest\nimport datetime\nimport pandas\nfrom pyquickhelper.pycode import ExtTestCase\nfrom mathenjeu.datalog import enumerate_qcmlog, enumerate_qcmlogdf\n\n\nclass TestLocalAppData(ExtTestCase):\n\n def test_datalog(self):\n this = os.path.abspath(os.path.dirname(__file__))\n logs = [os.path.join(this, \"data\", \"QCMApp.log\")]\n obs = list(enumerate_qcmlog(logs))\n exp = {'person_id': 'c241c15008614ea67480', 'alias': 'xavierd',\n 'time': datetime.datetime(2018, 12, 12, 17, 56, 29, 989000),\n 'qtime': 'begin'}\n self.assertEqual(obs[0], exp)\n exp = {'person_id': '8a8c40ad28eb1206efd5',\n 'alias': 'xavierg',\n 'time': datetime.datetime(2018, 12, 12, 23, 10, 37, 527000),\n 'qtime': 'end',\n 'simple_french_qcm-8-ANS': ' ',\n 'simple_french_qcm-8-b': 'ok',\n 'game': 'simple_french_qcm',\n 'qn': '8',\n 'next': 'None',\n 'simple_french_qcm-8-nbvisit': 1.0,\n 'simple_french_qcm-8-good': 0,\n 'simple_french_qcm-8-duration': datetime.timedelta(seconds=1, microseconds=422000)}\n self.assertEqual(obs[-1], exp)\n\n def test_datalog_df(self):\n this = os.path.abspath(os.path.dirname(__file__))\n logs = [os.path.join(this, \"data\", \"QCMApp.log\")]\n dfs = list(enumerate_qcmlogdf(logs))\n self.assertEqual(len(dfs), 5)\n merged = pandas.concat(dfs, sort=False)\n self.assertEqual(merged.shape[0], 5)\n self.assertEqual(merged.shape[1], 58)\n values = list(merged[\"simple_french_qcm-8-ANS\"])\n self.assertIn(\" Prout\", values)\n # print(merged.T)\n\n\nif __name__ == \"__main__\":\n unittest.main()\n"
] |
[
[
"pandas.concat"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [
"0.23",
"0.21",
"2.0",
"1.4",
"0.19",
"1.1",
"1.5",
"1.2",
"0.24",
"0.20",
"1.0",
"0.25",
"1.3"
],
"scipy": [],
"tensorflow": []
}
] |
BjoernBiltzinger/threeML
|
[
"fc3d989173b1613a199633455f260e67fdb50369",
"fc3d989173b1613a199633455f260e67fdb50369"
] |
[
"threeML/test/test_time_series.py",
"threeML/catalogs/Swift.py"
] |
[
"import os\nimport numpy as np\nimport pytest\nfrom threeML.io.file_utils import within_directory\nfrom threeML.utils.time_interval import TimeIntervalSet\nfrom threeML.utils.time_series.event_list import EventListWithDeadTime, EventList\nfrom threeML.utils.data_builders.time_series_builder import TimeSeriesBuilder\nfrom threeML.io.file_utils import within_directory\nfrom threeML.plugins.DispersionSpectrumLike import DispersionSpectrumLike\nfrom threeML.plugins.OGIPLike import OGIPLike\nfrom conftest import get_test_datasets_directory\nimport astropy.io.fits as fits\n\ndatasets_directory = get_test_datasets_directory()\n\n\ndef test_event_list_constructor():\n dummy_times = np.linspace(0, 10, 10)\n dummy_energy = np.zeros_like(dummy_times)\n start = 0\n stop = 10\n\n evt_list = EventList(arrival_times=dummy_times,\n measurement=dummy_energy,\n n_channels=1,\n start_time=start,\n stop_time=stop)\n\n # should only have 10 events\n\n assert evt_list.n_events == 10\n\n with pytest.raises(RuntimeError):\n evt_list.bins\n\n with pytest.raises(AttributeError):\n evt_list.text_bins\n\n assert evt_list.poly_intervals is None\n\n with pytest.raises(AttributeError):\n evt_list.tmax_list\n\n with pytest.raises(AttributeError):\n evt_list.tmin_list\n\n assert evt_list.polynomials is None\n\n assert evt_list._instrument == 'UNKNOWN'\n\n assert evt_list._mission == 'UNKNOWN'\n\n\ndef test_unbinned_fit():\n with within_directory(datasets_directory):\n start, stop = 0, 50\n\n poly = [1]\n\n arrival_times = np.loadtxt('test_event_data.txt')\n\n evt_list = EventListWithDeadTime(arrival_times=arrival_times,\n measurement=np.zeros_like(arrival_times),\n n_channels=1,\n start_time=arrival_times[0],\n stop_time=arrival_times[-1],\n dead_time=np.zeros_like(arrival_times)\n )\n\n evt_list.set_polynomial_fit_interval(\"%f-%f\" % (start + 1, stop - 1), unbinned=True)\n\n results = evt_list.get_poly_info()['coefficients']\n\n evt_list.set_active_time_intervals(\"0-1\")\n\n assert evt_list.time_intervals == TimeIntervalSet.from_list_of_edges([0, 1])\n\n assert evt_list._poly_counts.sum() > 0\n\n evt_list.__repr__()\n\n\ndef test_binned_fit():\n with within_directory(datasets_directory):\n start, stop = 0, 50\n\n poly = [1]\n\n arrival_times = np.loadtxt('test_event_data.txt')\n\n evt_list = EventListWithDeadTime(arrival_times=arrival_times,\n measurement=np.zeros_like(arrival_times),\n n_channels=1,\n start_time=arrival_times[0],\n stop_time=arrival_times[-1],\n dead_time=np.zeros_like(arrival_times)\n )\n\n evt_list.set_polynomial_fit_interval(\"%f-%f\" % (start + 1, stop - 1), unbinned=False)\n\n evt_list.set_active_time_intervals(\"0-1\")\n\n results = evt_list.get_poly_info()['coefficients']\n\n assert evt_list.time_intervals == TimeIntervalSet.from_list_of_edges([0, 1])\n\n assert evt_list._poly_counts.sum() > 0\n\n evt_list.__repr__()\n\n\ndef test_read_gbm_cspec():\n with within_directory(datasets_directory):\n data_dir = os.path.join('gbm', 'bn080916009')\n\n nai3 = TimeSeriesBuilder.from_gbm_cspec_or_ctime('NAI3',\n os.path.join(data_dir, \"glg_cspec_n3_bn080916009_v01.pha\"),\n rsp_file=os.path.join(data_dir,\n \"glg_cspec_n3_bn080916009_v00.rsp2\"),\n poly_order=-1)\n\n nai3.set_active_time_interval('0-1')\n nai3.set_background_interval('-200--10', '100-200')\n\n\n speclike = nai3.to_spectrumlike()\n\n assert isinstance(speclike, DispersionSpectrumLike)\n\n assert not speclike.background_spectrum.is_poisson\n\n speclike = nai3.to_spectrumlike(extract_measured_background=True)\n\n assert isinstance(speclike, DispersionSpectrumLike)\n\n assert speclike.background_spectrum.is_poisson\n\n nai3.write_pha_from_binner('test_from_nai3', start=0, stop=2, overwrite=True)\n\n\ndef test_read_gbm_tte():\n with within_directory(datasets_directory):\n data_dir = os.path.join('gbm', 'bn080916009')\n\n nai3 = TimeSeriesBuilder.from_gbm_tte('NAI3',\n os.path.join(data_dir, \"glg_tte_n3_bn080916009_v01.fit.gz\"),\n rsp_file=os.path.join(data_dir, \"glg_cspec_n3_bn080916009_v00.rsp2\"),\n poly_order=-1)\n\n nai3.set_active_time_interval('0-1')\n nai3.set_background_interval('-20--10', '100-200')\n\n\n\n\n speclike = nai3.to_spectrumlike()\n\n assert isinstance(speclike, DispersionSpectrumLike)\n\n\n assert not speclike.background_spectrum.is_poisson\n\n speclike = nai3.to_spectrumlike(extract_measured_background=True)\n\n assert isinstance(speclike, DispersionSpectrumLike)\n\n assert speclike.background_spectrum.is_poisson\n\n\n\n # test binning\n\n\n # should not have bins yet\n\n\n\n with pytest.raises(RuntimeError):\n nai3.bins\n\n # First catch the errors\n\n\n # This is without specifying the correct options name\n\n\n\n\n\n with pytest.raises(RuntimeError):\n nai3.create_time_bins(start=0, stop=10, method='constant')\n\n with pytest.raises(RuntimeError):\n nai3.create_time_bins(start=0, stop=10, method='significance')\n\n with pytest.raises(RuntimeError):\n nai3.create_time_bins(start=0, stop=10, method='constant', p0=.1)\n\n with pytest.raises(RuntimeError):\n nai3.create_time_bins(start=0, stop=10, method='significance', dt=1)\n\n # now incorrect options\n\n with pytest.raises(RuntimeError):\n nai3.create_time_bins(start=0, stop=10, method='not_a_method')\n\n # Now test values\n\n\n\n nai3.create_time_bins(start=0, stop=10, method='constant', dt=1)\n\n assert len(nai3.bins) == 10\n\n assert nai3.bins.argsort() == range(len(nai3.bins))\n\n nai3.create_time_bins(start=0, stop=10, method='bayesblocks', p0=.1)\n\n assert nai3.bins.argsort() == range(len(nai3.bins))\n\n assert len(nai3.bins) == 5\n\n nai3.create_time_bins(start=0, stop=10, method='significance', sigma=40)\n\n assert nai3.bins.argsort() == range(len(nai3.bins))\n\n assert len(nai3.bins) == 5\n\n nai3.view_lightcurve(use_binner=True)\n\n nai3.write_pha_from_binner('test_from_nai3', overwrite=True)\n\n\ndef test_reading_of_written_pha():\n with within_directory(datasets_directory):\n # check the number of items written\n\n with fits.open('test_from_nai3.rsp') as f:\n # 2 ext + 5 rsp ext\n assert len(f) == 7\n\n # make sure we can read spectrum number\n\n _ = OGIPLike('test', observation='test_from_nai3.pha', spectrum_number=1)\n _ = OGIPLike('test', observation='test_from_nai3.pha', spectrum_number=2)\n\n os.remove('test_from_nai3.pha')\n\n\ndef test_read_lle():\n with within_directory(datasets_directory):\n data_dir = 'lat'\n\n lle = TimeSeriesBuilder.from_lat_lle('lle', os.path.join(data_dir, \"gll_lle_bn080916009_v10.fit\"),\n os.path.join(data_dir, \"gll_pt_bn080916009_v10.fit\"),\n rsp_file=os.path.join(data_dir, \"gll_cspec_bn080916009_v10.rsp\"),\n poly_order=-1)\n\n lle.view_lightcurve()\n\n lle.set_active_time_interval(\"0-10\")\n\n lle.set_background_interval(\"-150-0\", \"100-250\")\n\n speclike = lle.to_spectrumlike()\n\n assert isinstance(speclike, DispersionSpectrumLike)\n\n # will test background with lle data\n\n\n old_coefficients, old_errors = lle.get_background_parameters()\n\n old_tmin_list = lle._time_series.poly_intervals\n\n lle.save_background('temp_lle', overwrite=True)\n\n lle = TimeSeriesBuilder.from_lat_lle('lle', os.path.join(data_dir, \"gll_lle_bn080916009_v10.fit\"),\n os.path.join(data_dir, \"gll_pt_bn080916009_v10.fit\"),\n rsp_file=os.path.join(data_dir, \"gll_cspec_bn080916009_v10.rsp\"),\n restore_background='temp_lle.h5')\n\n new_coefficients, new_errors = lle.get_background_parameters()\n\n new_tmin_list = lle._time_series.poly_intervals\n\n assert new_coefficients == old_coefficients\n\n assert new_errors == old_errors\n\n assert old_tmin_list == new_tmin_list\n",
"import numpy as np\nimport pandas as pd\nimport re\nimport urllib2\n\nimport astropy.table as astro_table\n\nfrom threeML.catalogs.VirtualObservatoryCatalog import VirtualObservatoryCatalog\nfrom threeML.exceptions.custom_exceptions import custom_warnings\nfrom threeML.config.config import threeML_config\nfrom threeML.io.get_heasarc_table_as_pandas import get_heasarc_table_as_pandas\nfrom threeML.io.rich_display import display\n\nimport astropy.time as astro_time\n\n\n_gcn_match = re.compile(\"^\\d{4}GCN\\D?\\.*(\\d*)\\.*\\d\\D$\")\n_trigger_name_match = re.compile(\"^GRB \\d{6}[A-Z]$\")\n\nclass SwiftGRBCatalog(VirtualObservatoryCatalog):\n def __init__(self, update=False):\n \"\"\"\n The Swift GRB catalog. Search for GRBs by trigger\n number, location, T90, and date range.\n\n :param update: force update the XML VO table\n \"\"\"\n\n self._update = update\n\n super(SwiftGRBCatalog, self).__init__('swiftgrb',\n threeML_config['catalogs']['Swift']['Swift GRB catalog'],\n 'Swift GRB catalog')\n\n\n # collect all the instruments also seeing the GRBs\n self._build_other_obs_instruments()\n\n def apply_format(self, table):\n new_table = table['name',\n 'ra', 'dec',\n 'trigger_time',\n 'redshift',\n 'bat_t90',\n 'bat_detection',\n 'xrt_detection',\n 'xrt_flare',\n 'uvot_detection',\n 'radio_detection',\n 'opt_detection'\n\n ]\n\n new_table['ra'].format = '5.3f'\n new_table['dec'].format = '5.3f'\n\n return new_table.group_by('trigger_time')\n\n def _get_vo_table_from_source(self):\n\n self._vo_dataframe = get_heasarc_table_as_pandas('swiftgrb',\n update=self._update,\n cache_time_days=1.)\n\n def _source_is_valid(self, source):\n\n warn_string = \"The trigger %s is not valid. Must be in the form GRB080916009\" % source\n\n match = _trigger_name_match.match(source)\n\n if match is None:\n\n custom_warnings.warn(warn_string)\n\n answer = False\n\n else:\n\n answer = True\n\n return answer\n\n\n\n def _build_other_obs_instruments(self):\n \"\"\"\n builds a list of all the other instruments that observed Swift GRBs\n\n :return:\n \"\"\"\n\n obs_inst_ = map(np.unique, [np.asarray(self._vo_dataframe.other_obs),\n np.asarray(self._vo_dataframe.other_obs2),\n np.asarray(self._vo_dataframe.other_obs3),\n np.asarray(self._vo_dataframe.other_obs4)])\n\n self._other_observings_instruments = filter(lambda x: x != '', np.unique(np.concatenate(obs_inst_)))\n\n @property\n def other_observing_instruments(self):\n\n return self._other_observings_instruments\n\n def query_other_observing_instruments(self, *instruments):\n \"\"\"\n search for observations that were also seen by the requested instrument.\n to see what instruments are available, use the .other_observing_instruments call\n\n\n :param instruments: other instruments\n :return:\n \"\"\"\n\n all_queries = []\n\n for instrument in instruments:\n\n assert instrument in self._other_observings_instruments, \"Other instrument choices include %s\" % (\n ' ,'.join(self._other_observings_instruments))\n\n\n\n query_string = ' other_obs == \"%s\" | other_obs2 == \"%s\" |other_obs3 == \"%s\" |other_obs4 == \"%s\"' %tuple([instrument]*4)\n\n result = self._vo_dataframe.query(query_string)\n\n all_queries.append(result)\n\n query_results = pd.concat(all_queries)\n\n table = astro_table.Table.from_pandas(query_results)\n\n name_column = astro_table.Column(name='name', data=query_results.index)\n table.add_column(name_column, index=0)\n\n out = self.apply_format(table)\n\n self._last_query_results = query_results\n\n\n return out\n\n @staticmethod\n def _get_fermiGBM_trigger_number_from_gcn(gcn_url):\n \"\"\"\n this is a custom function that parses GBM GCNs to find the burst number\n that can later be used to download GBM data. It contains a lot of regex statements\n to handle the variability in the GCNs\n\n\n :param gcn_url: url to gbm gcn\n :return:\n \"\"\"\n\n data = urllib2.urlopen(gcn_url)\n\n string = ''.join(data.readlines()).replace('\\n', '')\n try:\n\n trigger_number = re.search(\"trigger *\\d* */ *(\\d{9}|\\d{6}\\.\\d{3})\", string).group(1).replace('.', '')\n\n except(AttributeError):\n\n try:\n\n trigger_number = re.search(\"GBM *(\\d{9}|\\d{6}\\.\\d{3}), *trigger *\\d*\", string).group(1).replace('.', '')\n\n except(AttributeError):\n\n try:\n\n trigger_number = re.search(\"trigger *\\d* *, *trigcat *(\\d{9}|\\d{6}\\.\\d{3})\", string).group(\n 1).replace('.', '')\n\n except(AttributeError):\n\n try:\n\n trigger_number = re.search(\"trigger *.* */ *\\D{0,3}(\\d{9}|\\d{6}\\.\\d{3})\", string).group(\n 1).replace('.', '')\n\n except(AttributeError):\n\n try:\n\n trigger_number = re.search(\"Trigger number*.* */ *GRB *(\\d{9}|\\d{6}\\.\\d{3})\", string).group(\n 1).replace('.', '')\n\n except(AttributeError):\n\n trigger_number = None\n\n return trigger_number\n\n\n def get_other_observation_information(self):\n \"\"\"\n returns a structured pandas table containing the other observing instruments, their GCNs and if obtainable,\n their trigger numbers/ data identifiers. Currently, the trigger number is only obtained for Fermi-LAT-GBM.\n\n :return:\n \"\"\"\n\n assert self._last_query_results is not None, \"You have to run a query before getting observing information\"\n\n # Loop over the table and build a source for each entry\n sources = {}\n\n for name, row in self._last_query_results.T.iteritems():\n\n # First we want to get the the detectors used in the SCAT file\n\n obs_instrument = {}\n\n for obs in ['xrt', 'uvot', 'bat', 'opt', 'radio']:\n\n obs_detection = \"%s_detection\" % obs\n\n if obs in ['xrt', 'uvot', 'bat']:\n\n obs_ref = \"%s_pos_ref\" % obs\n\n else:\n\n obs_ref = \"%s_ref\" % obs\n\n detect = row[obs_detection]\n\n if detect == 'Y': # or detect== 'U':\n\n observed = True\n\n\n else:\n\n observed = False\n\n if observed:\n\n reference = self._parse_redshift_reference(row[obs_ref])\n\n\n #gcn = \"https://gcn.gsfc.nasa.gov/gcn3/%s.gcn3\" % gcn_number\n\n info = {'reference': reference, 'observed': detect}\n\n\n else:\n\n info = {'GCN': None, 'observed': detect}\n\n obs_instrument[obs] = info\n\n sources[name] = obs_instrument\n\n sources = pd.concat(map(pd.DataFrame, sources.values()), keys=sources.keys())\n\n return sources\n\n def get_other_instrument_information(self):\n \"\"\"\n Return the detectors used for spectral analysis as well as their background\n intervals. Peak flux and fluence intervals are also returned as well as best fit models\n\n :return: observing information dataframe indexed by source\n \"\"\"\n\n assert self._last_query_results is not None, \"You have to run a query before getting observing information\"\n\n sources = {}\n\n for name, row in self._last_query_results.T.iteritems():\n\n obs_instrument = {}\n\n # loop over the observation indices\n for obs in range(1, 5):\n\n if obs == 1:\n\n obs_base = \"other_obs\"\n\n else:\n\n obs_base = \"other_obs%d\" % obs\n\n obs_ref = \"%s_ref\" % obs_base\n\n obs = row[obs_base]\n\n # this means that nothing in this column saw the grb\n if obs == '':\n\n observed = False\n\n\n else:\n\n observed = True\n\n if observed:\n\n # if we saw it then lets get the GCN\n gcn_number = _gcn_match.search(row[obs_ref]).group(1)\n # gcn_number = filter(lambda x: x != '', row[obs_ref].split('.'))[1]\n\n # make the URL\n gcn = \"https://gcn.gsfc.nasa.gov/gcn3/%s.gcn3\" % gcn_number\n\n # just for Fermi GBM, lets get the trigger number\n\n # TODO: add more instruments\n if obs == 'Fermi-GBM':\n\n\n info = {'GCN': gcn, 'trigger number': self._get_fermiGBM_trigger_number_from_gcn(str(gcn))}\n\n else:\n\n info = {'GCN': gcn, 'trigger number': None}\n\n obs_instrument[obs] = info\n\n sources[name] = obs_instrument\n\n # build the data frame\n sources = pd.concat(map(pd.DataFrame, sources.values()), keys=sources.keys())\n\n display(sources)\n\n return sources\n\n def get_redshift(self):\n \"\"\"\n Get the redshift and redshift type from the searched sources\n\n\n :return:\n \"\"\"\n\n assert self._last_query_results is not None, \"You have to run a query before getting observing information\"\n\n redshift_df = (self._last_query_results.loc[:,['redshift','redshift_err','redshift_type','redshift_ref']]).copy(deep=True)\n\n redshift_df = redshift_df.rename(columns={\"redshift\": \"z\", \"redshift_err\": \"z err\",'redshift_type': 'z type','redshift_ref':'reference'})\n\n redshift_df['reference'] = redshift_df['reference'].apply(self._parse_redshift_reference)\n\n return redshift_df\n\n\n @staticmethod\n def _parse_redshift_reference(reference):\n\n if reference == '':\n\n url = None\n\n elif 'GCN' in reference:\n gcn_number = _gcn_match.search(reference).group(1)\n\n url = \"https://gcn.gsfc.nasa.gov/gcn3/%s.gcn3\" % gcn_number\n\n else:\n\n url = \"http://adsabs.harvard.edu/abs/%s\" % reference\n\n\n return url\n\n\n\n\n\n\n"
] |
[
[
"numpy.loadtxt",
"numpy.zeros_like",
"numpy.linspace"
],
[
"numpy.asarray",
"pandas.concat",
"numpy.concatenate"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [
"0.23",
"0.21",
"2.0",
"1.4",
"0.19",
"1.1",
"1.5",
"1.2",
"0.24",
"0.20",
"1.0",
"0.25",
"1.3"
],
"scipy": [],
"tensorflow": []
}
] |
kschweig/HopfieldOfflineRL
|
[
"dcb1d475c2406cd0f64ed37d187d0b4f0890f02f"
] |
[
"source/process_tensorboard_logs/convert_to_csv.py"
] |
[
"import tensorflow as tf\nimport glob\nimport os\nimport re\n\n##########################\n# Settings #\n##########################\n\n# Which experiment to extract\nimport tensorflow.python.framework.errors_impl\n\nex = \"ex4\"\n# Which tag should be extracted\n#'eval/Reward (SMA)' 'eval/Entropy'\ntags = ['eval/Reward (SMA)', 'eval/Action-Value deviation (mean) (SMA)']\n# \"reward\" \"entropy\"\nmarks = [\"return\", \"avd\"]\n\n\nfor t, tag in enumerate(tags):\n os.chdir(os.path.join(\"..\", \"..\", \"runs\", ex))\n files=[]\n outpath = os.path.join(\"..\", \"..\", \"results\", \"csv\", marks[t])\n os.makedirs(outpath, exist_ok=True)\n\n\n for file in glob.glob(os.path.join(\"**\", \"*.tfevents.*\"), recursive=True):\n files.append(file)\n files.sort()\n\n\n data = []\n for file in files:\n\n run = int(re.findall(\"[0-9]+\", file.split(\"/\")[3])[0])\n\n if run == 1 and data != []:\n with open(os.path.join(outpath, f\"{env}_{mode}_{algo}.csv\"), \"w\") as w:\n minlen = len(data[0])\n last_full = 0\n for i, line in enumerate(data):\n if len(line) < minlen:\n # seldom, there occurs the phenomenon that the last reward in the tffiles cannot be read.\n # Then replace all with the values read before. Only a minor difference on 2k iterations.\n print(\"Datapoint at iteration\", i, \"replaced.\")\n line = data[last_full - 1]\n else:\n last_full = i\n w.write(\";\".join([str(l) for l in line]) + \"\\n\")\n data = []\n\n env = file.split(\"/\")[0]\n mode = file.split(\"/\")[1]\n algo = file.split(\"/\")[2]\n\n try:\n i = 0\n for e in tf.compat.v1.train.summary_iterator(file):\n for v in e.summary.value:\n iteration = 0\n if v.tag == tag:\n if len(data) <= i:\n data.append([v.simple_value])\n else:\n data[i].append(v.simple_value)\n i += 1\n except:\n print(f\"Error in obtaining summary from {env}/{mode}/{algo}/{run}, may not contain complete data\")\n\n # write data collected in the last run\n with open(os.path.join(outpath, f\"{env}_{mode}_{algo}.csv\"), \"w\") as w:\n minlen = len(data[0])\n for i, line in enumerate(data):\n if len(line) < minlen:\n # seldom, there occurs the phenomenon that the last reward in the tffiles cannot be read.\n # Then replace all with the values read before. Only a minor difference on 2k iterations.\n print(\"Datapoint at iteration\", i, \"replaced.\")\n line = data[i-1]\n w.write(\";\".join([str(l) for l in line])+\"\\n\")\n"
] |
[
[
"tensorflow.compat.v1.train.summary_iterator"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
Jon3sjns/open-source-contribution
|
[
"b1dc4a90ce3bd5575b74bf2961a9f6867d16aef4"
] |
[
"PYTHON/pre-emptive shortest job first.py"
] |
[
"import numpy as np\nimport tabulate\nimport matplotlib.pyplot as plt\n\n\ndef display(data):\n print(\"ProcessID\\tArrival Time\\tBurst Time\")\n for i in range(len(data)):\n print(\"{}\\t\\t{}\\t\\t{}\".format(\n data[i][0], data[i][1], data[i][2]))\n print(\"\\n\")\n\n\ndef change(processInOrder, arrived):\n for i in range(len(processInOrder)):\n if processInOrder[i][0] == arrived[0][0]:\n processInOrder[i][2] = processInOrder[i][2] - 1\n break\n\n\ndef printGanttChart(RunningQueue):\n finalTimeLine = list(list())\n j = 0\n for i in range(len(RunningQueue)):\n if RunningQueue[j][0] != RunningQueue[i][0] or i == len(RunningQueue) - 1:\n if i == len(RunningQueue) - 1:\n finalTimeLine.append(RunningQueue[i])\n else:\n finalTimeLine.append(RunningQueue[i - 1])\n j = i\n return finalTimeLine\n\n\ndef IndividualValues(l, heading):\n print(f\"\\n{heading}\")\n for i in range(len(l)):\n print(f\"{l[i][0]} : {l[i][1]}\")\n print(\"\\n\")\n\n\nn = 4\nprocessInOrder = [['P1', 3, 1], ['P2', 0, 1], ['P3', 2, 5], ['P4', 3, 2]]\n\ncopyProcessInOrder = list(list())\nfor j in range(len(processInOrder)):\n copyProcessInOrder.append(processInOrder[j].copy())\n\nprocessName = list()\nfor i in range(len(processInOrder)):\n processName.append(processInOrder[i][0])\nprint(tabulate.tabulate(processInOrder, headers=[\n \"ProcessID\", \"Arrival Time\", \"Burst Time\"], tablefmt=\"fancy_grid\"))\nprint()\n\nreadyQueue = list(list())\nRunningQueue = list(list())\nprocessInOrder.sort(key=lambda x: x[1])\nif processInOrder[0][1] == 0:\n processInOrder[0][2] -= 1\n time = 1\n RunningQueue.append([processInOrder[0][0], time])\nelse:\n time = processInOrder[0][1]\n RunningQueue.append([processInOrder[0][0], time])\n# processInOrder[0][2] -= 1\n\nflag = True\nwhile len(processInOrder) > 0:\n readyQueue = list(list())\n arrived = list(list())\n for j in range(len(processInOrder)):\n if processInOrder[j][1] <= time:\n arrived.append(processInOrder[j])\n readyQueue.append(processInOrder[j].copy())\n flag = False\n arrived.sort(key=lambda x: x[2])\n if len(arrived) > 0:\n if arrived[0][2] > 0:\n time += 1\n change(processInOrder, arrived)\n RunningQueue.append([arrived[0][0], time])\n else:\n processInOrder.remove(arrived[0])\n else:\n time += 1\n check = True\n for t in range(len(readyQueue)):\n if readyQueue[t][2] == 0:\n check = False\n break\n if check:\n print(f\"Time {time - 1} : {readyQueue}\")\n flag = True\n\ntable = tabulate.tabulate(RunningQueue, headers=[\n \"Process\", \"Time\"], tablefmt=\"fancy_grid\")\nprint(table)\n\ncopyRunningQueue = RunningQueue.copy()\ncopyRunningQueue.reverse()\n\ncompletionTime = list(list())\nfor i in range(len(processName)):\n for j in range(len(copyRunningQueue)):\n if processName[i] == copyRunningQueue[j][0]:\n completionTime.append([processName[i], copyRunningQueue[j][1]])\n break\n\nturnAroundTime = list(list())\nfor i in range(len(copyProcessInOrder)):\n turnAroundTime.append(\n [copyProcessInOrder[i][0], completionTime[i][1] - copyProcessInOrder[i][1]])\n\n\nwaitingTime = list(list())\nfor i in range(len(copyProcessInOrder)):\n waitingTime.append(\n [copyProcessInOrder[i][0], turnAroundTime[i][1] - copyProcessInOrder[i][2]])\n\nfinalTime = list(list())\nfor i in range(len(copyProcessInOrder)):\n finalTime.append(\n [copyProcessInOrder[i][0], turnAroundTime[i][1], waitingTime[i][1], completionTime[i][1]])\n\nprint(tabulate.tabulate(finalTime, headers=[\n \"ProcessID\", \"TurnAround Time\", \"Waiting Time\", \"Completion Time\"], tablefmt=\"fancy_grid\"))\nprint(\"Average Waiting Time : \" +\n str(sum(list(map(lambda x: x[1], waitingTime))) / len(waitingTime)))\n\nprint(\"Average Turn around Time : \" +\n str(sum(list(map(lambda x: x[1], turnAroundTime))) / len(turnAroundTime)))\n\nprint(\"Average Completion Time : \" +\n str(sum(list(map(lambda x: x[1], completionTime))) / len(completionTime)))\n\n\nfig, gnt = plt.subplots()\ngnt.set_ylim(0, n*10)\ngnt.set_xlim(0, len(RunningQueue), 1)\ngnt.set_xlabel('Time in Seconds')\ngnt.set_yticks([10, 20, 30, 40])\ngnt.set_ylabel('Process')\ngnt.set_yticklabels(processName)\ngnt.grid(True)\nfinalTimeLine = printGanttChart(RunningQueue)\nyAxisMeasurements = list(list())\nfor i, j in zip(range(n), range(n)):\n yAxisMeasurements.append([processName[i], j*10])\nfor i in range(len(finalTimeLine)):\n for j in range(len(yAxisMeasurements)):\n if yAxisMeasurements[j][0] == finalTimeLine[i][0]:\n if i != 0:\n gnt.broken_barh([(finalTimeLine[i - 1][1], finalTimeLine[i]\n [1] - finalTimeLine[i - 1][1])], (yAxisMeasurements[j][1], 10), facecolors=('tab:orange'))\n else:\n gnt.broken_barh([(0, finalTimeLine[i][1])],\n (yAxisMeasurements[j][1], 10), facecolors=('tab:orange'))\nprint(finalTimeLine)\nplt.show()\n"
] |
[
[
"matplotlib.pyplot.show",
"matplotlib.pyplot.subplots"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
alexfikl/pytato
|
[
"0bf3fdfc35aec5911ca8aabd394c1d7207562edb"
] |
[
"pytato/loopy.py"
] |
[
"from __future__ import annotations\n\n__copyright__ = \"\"\"\nCopyright (C) 2021 Kaushik Kulkarni\n\"\"\"\n\n__license__ = \"\"\"\nPermission is hereby granted, free of charge, to any person obtaining a copy\nof this software and associated documentation files (the \"Software\"), to deal\nin the Software without restriction, including without limitation the rights\nto use, copy, modify, merge, publish, distribute, sublicense, and/or sell\ncopies of the Software, and to permit persons to whom the Software is\nfurnished to do so, subject to the following conditions:\n\nThe above copyright notice and this permission notice shall be included in\nall copies or substantial portions of the Software.\n\nTHE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\nIMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\nFITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\nAUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\nLIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\nOUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN\nTHE SOFTWARE.\n\"\"\"\n\n\nimport numpy as np\nimport loopy as lp\nimport pymbolic.primitives as prim\nfrom typing import (Dict, Optional, Any, Iterator, FrozenSet, Union, Sequence,\n Tuple, Iterable, Mapping)\nfrom numbers import Number\nfrom pytato.array import (AbstractResultWithNamedArrays, Array, ShapeType,\n NamedArray, ArrayOrScalar, SizeParam)\nfrom pytato.scalar_expr import SubstitutionMapper, ScalarExpression, EvaluationMapper\nfrom pytools import memoize_method\nfrom pytools.tag import TagsType\nfrom pyrsistent import PMap, pmap\nimport islpy as isl\n\n__doc__ = \"\"\"\n.. currentmodule:: pytato.loopy\n\n.. autoclass:: LoopyCall\n\n.. autoclass:: LoopyCallResult\n\n.. autofunction:: call_loopy\n\"\"\"\n\n\nclass LoopyCall(AbstractResultWithNamedArrays):\n \"\"\"\n An array expression node representing a call to an entrypoint in a\n :mod:`loopy` translation unit.\n \"\"\"\n _mapper_method = \"map_loopy_call\"\n\n def __init__(self,\n translation_unit: \"lp.TranslationUnit\",\n bindings: Dict[str, ArrayOrScalar],\n entrypoint: str):\n entry_kernel = translation_unit[entrypoint]\n super().__init__()\n self._result_names = frozenset({name\n for name, lp_arg in entry_kernel.arg_dict.items()\n if lp_arg.is_output})\n\n self.translation_unit = translation_unit\n self.bindings = bindings\n self.entrypoint = entrypoint\n\n @memoize_method\n def _to_pytato(self, expr: ScalarExpression) -> ScalarExpression:\n from pymbolic.mapper.substitutor import make_subst_func\n return SubstitutionMapper(make_subst_func(self.bindings))(expr)\n\n @property\n def _entry_kernel(self) -> lp.LoopKernel:\n return self.translation_unit[self.entrypoint]\n\n def __hash__(self) -> int:\n return hash((self.translation_unit, tuple(self.bindings.items()),\n self.entrypoint))\n\n def __contains__(self, name: object) -> bool:\n return name in self._result_names\n\n @memoize_method\n def __getitem__(self, name: str) -> LoopyCallResult:\n if name not in self._result_names:\n raise KeyError(name)\n return LoopyCallResult(self, name)\n\n def __len__(self) -> int:\n return len(self._result_names)\n\n def __iter__(self) -> Iterator[str]:\n return iter(self._result_names)\n\n def __eq__(self, other: Any) -> bool:\n if self is other:\n return True\n\n if not isinstance(other, LoopyCall):\n return False\n\n if ((self.entrypoint == other.entrypoint)\n and (self.bindings == other.bindings)\n and (self.translation_unit == other.translation_unit)):\n return True\n return False\n\n\nclass LoopyCallResult(NamedArray):\n \"\"\"\n Named array for :class:`LoopyCall`'s result.\n Inherits from :class:`~pytato.array.NamedArray`.\n \"\"\"\n def __init__(self,\n loopy_call: LoopyCall,\n name: str,\n tags: TagsType = frozenset()) -> None:\n super().__init__(loopy_call, name, tags=tags)\n\n # type-ignore reason: `copy` signature incompatible with super-class\n def copy(self, *, # type: ignore[override]\n loopy_call: Optional[AbstractResultWithNamedArrays] = None,\n name: Optional[str] = None,\n tags: Optional[TagsType] = None) -> LoopyCallResult:\n loopy_call = self._container if loopy_call is None else loopy_call\n name = self.name if name is None else name\n tags = self.tags if tags is None else tags\n assert isinstance(loopy_call, LoopyCall)\n return LoopyCallResult(loopy_call=loopy_call,\n name=name,\n tags=tags)\n\n def expr(self) -> Array:\n raise ValueError(\"Expressions for results of loopy functions aren't defined\")\n\n @property\n def shape(self) -> ShapeType:\n loopy_arg = self._container._entry_kernel.arg_dict[ # type:ignore\n self.name]\n shape: ShapeType = self._container._to_pytato( # type:ignore\n loopy_arg.shape)\n return shape\n\n @property\n def dtype(self) -> np.dtype[Any]:\n loopy_arg = self._container._entry_kernel.arg_dict[ # type:ignore\n self.name]\n return np.dtype(loopy_arg.dtype.numpy_dtype)\n\n\ndef call_loopy(translation_unit: \"lp.TranslationUnit\",\n bindings: Dict[str, ArrayOrScalar],\n entrypoint: Optional[str] = None) -> LoopyCall:\n \"\"\"\n Invokes an entry point of a :class:`loopy.TranslationUnit` on the array inputs as\n specified by *bindings*.\n\n Restrictions on the structure of ``translation_unit[entrypoint]``:\n\n * array arguments of ``translation_unit[entrypoint]`` must either be either\n input-only or output-only.\n * all input-only arguments of ``translation_unit[entrypoint]`` must appear in\n *bindings*.\n * all output-only arguments of ``translation_unit[entrypoint]`` must appear\n in *bindings*.\n * if *translation_unit* has been declared with multiple entrypoints,\n *entrypoint* can not be *None*.\n\n :arg translation_unit: the translation unit to call.\n :arg bindings: mapping from argument names of ``translation_unit[entrypoint]``\n to :class:`pytato.array.Array`.\n :arg entrypoint: the entrypoint of the ``translation_unit`` parameter.\n \"\"\"\n if entrypoint is None:\n if len(translation_unit.entrypoints) != 1:\n raise ValueError(\"cannot infer entrypoint\")\n\n entrypoint, = translation_unit.entrypoints\n\n translation_unit = translation_unit.with_entrypoints(entrypoint)\n\n # {{{ sanity checks\n\n if any([arg.is_input and arg.is_output\n for arg in translation_unit[entrypoint].args]):\n # Pytato DAG cannot have stateful nodes.\n raise ValueError(\"Cannot call a kernel with side-effects.\")\n\n for name in bindings:\n if name not in translation_unit[entrypoint].arg_dict:\n raise ValueError(f\"Kernel '{entrypoint}' got an unexpected input: \"\n f\"'{name}'.\")\n if translation_unit[entrypoint].arg_dict[name].is_output:\n raise ValueError(f\"Kernel '{entrypoint}' got an output arg '{name}' \"\n f\"as input.\")\n\n # {{{ perform shape inference here\n\n bindings = extend_bindings_with_shape_inference(translation_unit[entrypoint],\n pmap(bindings))\n\n # }}}\n\n for arg in translation_unit[entrypoint].args:\n if arg.is_input:\n if arg.name not in bindings:\n raise ValueError(f\"Kernel '{entrypoint}' expects an input\"\n f\" '{arg.name}'\")\n\n arg_binding = bindings[arg.name]\n\n if isinstance(arg, (lp.ArrayArg, lp.ConstantArg)):\n if not isinstance(arg_binding, Array):\n raise ValueError(f\"Argument '{arg.name}' expected to be a \"\n f\"pytato.Array, got {type(arg_binding)}.\")\n else:\n assert isinstance(arg, lp.ValueArg)\n if not (isinstance(arg_binding, Number)\n or (isinstance(arg_binding, Array)\n and arg_binding.shape == ())):\n raise ValueError(f\"Argument '{arg.name}' expected to be a \"\n \" number or a scalar expression, got \"\n f\"{type(arg_binding)}.\")\n\n # }}}\n\n # {{{ infer types of the translation_unit\n\n for name, ary in bindings.items():\n if translation_unit[entrypoint].arg_dict[name].dtype not in [lp.auto,\n None]:\n continue\n\n if isinstance(ary, Array):\n translation_unit = lp.add_dtypes(translation_unit, {name: ary.dtype})\n else:\n assert isinstance(ary, Number)\n translation_unit = lp.add_dtypes(translation_unit, {name: type(ary)})\n\n translation_unit = lp.infer_unknown_types(translation_unit)\n\n # }}}\n\n # {{{ infer shapes of the translation_unit\n\n translation_unit = lp.infer_arg_descr(translation_unit)\n\n # }}}\n\n translation_unit = translation_unit.with_entrypoints(frozenset())\n\n return LoopyCall(translation_unit, bindings, entrypoint)\n\n\n# {{{ shape inference\n\nclass ShapeInferenceFailure(RuntimeError):\n pass\n\n\ndef _get_val_in_bset(bset: isl.BasicSet, idim: int) -> ScalarExpression:\n \"\"\"\n Gets the value of *bset*'s *idim*-th set-dim in terms of it's param-dims.\n\n .. note::\n\n Assumes all constraints in *bset* are equality constraints.\n \"\"\"\n from loopy.symbolic import aff_to_expr\n\n max_val = bset.dim_max(idim)\n\n assert max_val.is_equal(bset.dim_min(idim))\n\n if max_val.n_piece() != 1:\n raise NotImplementedError(\"Shape inference resulted in a piecewise\"\n \" result.\")\n\n (_, aff), = max_val.get_pieces()\n\n return aff_to_expr(aff)\n\n\ndef solve_constraints(variables: Iterable[str],\n parameters: Iterable[str],\n constraints: Sequence[Tuple[ScalarExpression,\n ScalarExpression]],\n\n ) -> Mapping[str, ScalarExpression]:\n \"\"\"\n :arg variables: Names of the variables to solve for\n :arg parameters: Names of the parameters that to express that are allowed\n to be a part of the solution expressions.\n :arg constraints: A :class:`list` of constraints. Each constraint is\n represented as a tuple ``(lhs, rhs)``, that corresponds to the\n constraint ``lhs = rhs``. ``lhs`` and ``rhs`` are quasi-affine\n expressions in *variables* and *constraints*.\n :returns: A mapping from variable name in *variables* to\n :class:`ScalarExpression` obtained after solving for them.\n \"\"\"\n from loopy.symbolic import aff_from_expr\n\n space = isl.Space.create_from_names(isl.DEFAULT_CONTEXT,\n set=variables,\n params=parameters)\n\n shape_inference_bset = isl.BasicSet.universe(space)\n\n for lhs, rhs in constraints:\n # type-ignored reason: no \"(-)\" support for Number\n aff = aff_from_expr(space, lhs-rhs) # type: ignore\n\n shape_inference_bset = (shape_inference_bset\n .add_constraint(isl.Constraint\n .equality_from_aff(aff)))\n\n if shape_inference_bset.is_empty():\n raise ShapeInferenceFailure\n\n solution = {}\n\n # {{{ get the value of each unknown variable\n\n for idim in range(shape_inference_bset.dim(isl.dim_type.set)):\n arg_name = shape_inference_bset.get_dim_name(isl.dim_type.set, idim)\n solved_val = _get_val_in_bset(shape_inference_bset, idim)\n solution[arg_name] = solved_val\n\n # }}}\n\n return solution\n\n\n# {{{ shape inference helpers\n\ndef _lp_var_to_global_namespace(name: str) -> str:\n return f\"_lp_{name}\"\n\n\ndef _lp_var_from_global_namespace(name: str) -> str:\n assert name[:4] == \"_lp_\"\n return name[4:]\n\n\ndef _pt_var_to_global_namespace(name: Optional[str]) -> str:\n assert name is not None # size params are always named\n return f\"_pt_{name}\"\n\n\ndef _get_pt_dim_expr(dim: Union[int, Array]) -> ScalarExpression:\n from pytato.utils import dim_to_index_lambda_components\n from pymbolic.mapper.substitutor import substitute\n dim_expr, dim_bnds = dim_to_index_lambda_components(dim)\n assert all(isinstance(dim_bnd, SizeParam)\n for dim_bnd in dim_bnds.values())\n\n return substitute(dim_expr,\n {k: prim.Variable(v.name)\n for k, v in dim_bnds.items()})\n\n# }}}\n\n\ndef extend_bindings_with_shape_inference(knl: lp.LoopKernel,\n bindings: PMap[str, ArrayOrScalar]\n ) -> Dict[str, ArrayOrScalar]:\n from functools import reduce\n from loopy.symbolic import get_dependencies as lpy_get_deps\n from loopy.kernel.array import ArrayBase\n from pymbolic.mapper.substitutor import make_subst_func\n from pytato.transform import SizeParamGatherer\n\n get_size_param_deps = SizeParamGatherer()\n\n lp_size_params: FrozenSet[str] = reduce(frozenset.union,\n (lpy_get_deps(arg.shape)\n for arg in knl.args\n if isinstance(arg, ArrayBase)),\n frozenset())\n\n pt_size_params: FrozenSet[SizeParam] = reduce(frozenset.union,\n (get_size_param_deps(bnd)\n for bnd in bindings.values()\n if isinstance(bnd, Array)),\n frozenset())\n\n # {{{ mappers to map expressions to a global namespace\n\n pt_subst_map = SubstitutionMapper(\n make_subst_func({\n arg.name: prim.Variable(_pt_var_to_global_namespace(arg\n .name\n ))\n for arg in pt_size_params}))\n\n lp_subst_map = SubstitutionMapper(\n make_subst_func({\n arg: prim.Variable(_lp_var_to_global_namespace(arg))\n for arg in lp_size_params}))\n\n # }}}\n\n constraints = []\n\n # {{{ collect constraints from passed arguments\n\n for lp_arg_name, lp_arg in knl.arg_dict.items():\n if lp_arg_name not in bindings:\n # value not passed => don't add any constraints\n continue\n\n pt_arg = bindings[lp_arg_name]\n\n if isinstance(lp_arg, ArrayBase):\n\n # {{{ sanity checks\n\n if lp_arg.shape is None:\n # no constraints to add here\n continue\n\n if lp_arg.shape is lp.auto:\n # TODO: Can lp.auto as shape really appear here?\n raise NotImplementedError(\"'loopy.auto' as shape dim.\")\n\n assert isinstance(lp_arg.shape, tuple)\n\n if not isinstance(pt_arg, Array):\n raise ValueError(f\"'{knl.name}' got scalar value for '{lp_arg_name}'\"\n \", expected an array.\")\n\n if len(lp_arg.shape) != len(pt_arg.shape):\n raise ValueError(f\"ndim mismatch for argument '{lp_arg_name}'\"\n f\"of '{knl.name}'\")\n\n # }}}\n\n for lp_dim, pt_dim in zip(lp_arg.shape, pt_arg.shape):\n pt_dim_expr = pt_subst_map(_get_pt_dim_expr(pt_dim))\n lp_dim_expr = lp_subst_map(lp_dim)\n constraints.append((pt_dim_expr, lp_dim_expr))\n\n else:\n if lp_arg_name not in lp_size_params:\n continue\n\n assert isinstance(lp_arg, lp.ValueArg)\n assert isinstance(pt_arg, (int, Array))\n pt_arg_expr = pt_subst_map(_get_pt_dim_expr(pt_arg))\n lp_arg_expr = lp_subst_map(prim.Variable(lp_arg.name))\n constraints.append((pt_arg_expr, lp_arg_expr))\n\n # }}}\n\n solutions = solve_constraints(variables={_lp_var_to_global_namespace(var)\n for var in lp_size_params},\n parameters={_pt_var_to_global_namespace(var.name)\n for var in pt_size_params},\n constraints=constraints)\n\n as_pt_size_param = EvaluationMapper({_pt_var_to_global_namespace(arg.name): arg\n for arg in pt_size_params})\n\n for var, val in solutions.items():\n # map the pymbolic expression back into an expression in terms of\n # pt.SizeParams\n var = _lp_var_from_global_namespace(var)\n val = as_pt_size_param(val)\n\n # {{{ respect callee's scalar dtype preference if there exists one\n\n # TODO: remove this once\n # https://github.com/inducer/loopy/issues/442 is resolved.\n if (isinstance(val, Number)\n and knl.arg_dict[var].dtype not in [lp.auto, None]):\n val = knl.arg_dict[var].dtype.numpy_dtype.type(val)\n\n # }}}\n\n bindings = bindings.set(var, val)\n\n return dict(bindings)\n\n# }}}\n\n\n# vim: fdm=marker\n"
] |
[
[
"numpy.dtype"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
nju-websoft/HyperKA
|
[
"cadaf824a739b55211997e73d9948ddbfbe7ce83"
] |
[
"src/hyperka/ea_funcs/test_funcs.py"
] |
[
"import gc\nimport numpy as np\nimport time\nimport ray\n\nfrom hyperka.ea_funcs.utils import div_list\nfrom hyperka.hyperbolic.metric import compute_hyperbolic_distances, normalization\n\ng = 1000000000\n\n\[email protected](num_cpus=1)\ndef cal_rank(task, sim, top_k):\n mean = 0\n mrr = 0\n num = [0 for k in top_k]\n for i in range(len(task)):\n ref = task[i]\n rank = (-sim[i, :]).argsort()\n assert ref in rank\n rank_index = np.where(rank == ref)[0][0]\n mean += (rank_index + 1)\n mrr += 1 / (rank_index + 1)\n for j in range(len(top_k)):\n if rank_index < top_k[j]:\n num[j] += 1\n return mean, mrr, num\n\n\ndef eval_alignment_mul(sim_mat, top_k, nums_threads, mess=\"\"):\n t = time.time()\n ref_num = sim_mat.shape[0]\n t_num = [0 for k in top_k]\n t_mean = 0\n t_mrr = 0\n\n tasks = div_list(np.array(range(ref_num)), nums_threads)\n results = list()\n for task in tasks:\n res = cal_rank.remote(task, sim_mat[task, :], top_k)\n results.append(res)\n for res in ray.get(results):\n mean, mrr, num = res\n t_mean += mean\n t_mrr += mrr\n t_num += np.array(num)\n\n acc = np.array(t_num) / ref_num\n for i in range(len(acc)):\n acc[i] = round(acc[i], 4)\n t_mean /= ref_num\n t_mrr /= ref_num\n print(\"{}, hits@{} = {}, mr = {:.3f}, mrr = {:.3f}, time = {:.3f} s \".format(mess, top_k, acc, t_mean, t_mrr,\n time.time() - t))\n return acc[0]\n\n\ndef cal_rank_multi_embed(frags, dic, sub_embed, embed, top_k):\n mean = 0\n mrr = 0\n num = np.array([0 for k in top_k])\n mean1 = 0\n mrr1 = 0\n num1 = np.array([0 for k in top_k])\n sim_mat = np.matmul(sub_embed, embed.T) # ndarray\n # print(\"matmul sim mat type:\", type(sim_mat))\n prec_set = set()\n aligned_e = None\n for i in range(len(frags)):\n ref = frags[i]\n rank = (-sim_mat[i, :]).argsort()\n aligned_e = rank[0]\n assert ref in rank\n rank_index = np.where(rank == ref)[0][0]\n mean += (rank_index + 1)\n mrr += 1 / (rank_index + 1)\n for j in range(len(top_k)):\n if rank_index < top_k[j]:\n num[j] += 1\n # del rank\n\n if dic is not None and dic.get(ref, -1) > -1:\n e2 = dic.get(ref)\n sim_mat[i, e2] += 1.0\n rank = (-sim_mat[i, :]).argsort()\n aligned_e = rank[0]\n assert ref in rank\n rank_index = np.where(rank == ref)[0][0]\n mean1 += (rank_index + 1)\n mrr1 += 1 / (rank_index + 1)\n for j in range(len(top_k)):\n if rank_index < top_k[j]:\n num1[j] += 1\n # del rank\n else:\n mean1 += (rank_index + 1)\n mrr1 += 1 / (rank_index + 1)\n for j in range(len(top_k)):\n if rank_index < top_k[j]:\n num1[j] += 1\n\n prec_set.add((ref, aligned_e))\n\n del sim_mat\n gc.collect()\n return mean, mrr, num, mean1, mrr1, num1, prec_set\n\n\[email protected](num_cpus=1)\ndef cal_rank_multi_embed_hyperbolic(frags, sub_embed, embed, top_k):\n mr = 0\n mrr = 0\n hits = np.array([0 for k in top_k])\n sim_mat = compute_hyperbolic_similarity_single(sub_embed, embed)\n results = set()\n for i in range(len(frags)):\n ref = frags[i]\n rank = (-sim_mat[i, :]).argsort()\n aligned_e = rank[0]\n assert ref in rank\n rank_index = np.where(rank == ref)[0][0]\n mr += (rank_index + 1)\n mrr += 1 / (rank_index + 1)\n for j in range(len(top_k)):\n if rank_index < top_k[j]:\n hits[j] += 1\n results.add((ref, aligned_e))\n del sim_mat\n return mr, mrr, hits, results\n\n\ndef eval_alignment_hyperbolic_multi(embed1, embed2, top_k, nums_threads, mess=\"\"):\n t = time.time()\n ref_num = embed1.shape[0]\n hits = np.array([0 for k in top_k])\n mr = 0\n mrr = 0\n total_alignment = set()\n\n frags = div_list(np.array(range(ref_num)), nums_threads)\n results = list()\n for frag in frags:\n res = cal_rank_multi_embed_hyperbolic.remote(frag, embed1[frag, :], embed2, top_k)\n results.append(res)\n\n for res in ray.get(results):\n mr1, mrr1, hits1, alignment = res\n mr += mr1\n mrr += mrr1\n hits += hits1\n total_alignment |= alignment\n\n assert len(total_alignment) == ref_num\n\n hits = hits / ref_num\n for i in range(len(hits)):\n hits[i] = round(hits[i], 4)\n mr /= ref_num\n mrr /= ref_num\n print(\"{}, hits@{} = {}, mr = {:.3f}, mrr = {:.3f}, time = {:.3f} s \".format(mess, top_k, hits, mr, mrr,\n time.time() - t))\n gc.collect()\n return hits[0]\n\n\[email protected](num_cpus=1)\ndef cal_csls_neighbor_sim(sim_mat, k):\n sorted_mat = -np.partition(-sim_mat, k + 1, axis=1) # -np.sort(-sim_mat1)\n nearest_k = sorted_mat[:, 0:k]\n sim_values = np.mean(nearest_k, axis=1)\n return sim_values\n\n\ndef csls_neighbor_sim(sim_mat, k, nums_threads):\n tasks = div_list(np.array(range(sim_mat.shape[0])), nums_threads)\n results = list()\n for task in tasks:\n res = cal_csls_neighbor_sim.remote(sim_mat[task, :], k)\n results.append(res)\n sim_values = None\n for res in ray.get(results):\n val = res\n if sim_values is None:\n sim_values = val\n else:\n sim_values = np.append(sim_values, val)\n assert sim_values.shape[0] == sim_mat.shape[0]\n return sim_values\n\n\ndef sim_handler_hyperbolic(embed1, embed2, k, nums_threads):\n tasks = div_list(np.array(range(embed1.shape[0])), nums_threads)\n results = list()\n for task in tasks:\n res = compute_hyperbolic_similarity.remote(embed1[task, :], embed2)\n results.append(res)\n sim_lists = list()\n for res in ray.get(results):\n sim_lists.append(res)\n sim_mat = np.concatenate(sim_lists, axis=0)\n if k == 0:\n return sim_mat\n csls1 = csls_neighbor_sim(sim_mat, k, nums_threads)\n csls2 = csls_neighbor_sim(sim_mat.T, k, nums_threads)\n csls_sim_mat = 2 * sim_mat.T - csls1\n csls_sim_mat = csls_sim_mat.T - csls2\n del sim_mat\n gc.collect()\n return csls_sim_mat\n\n\[email protected](num_cpus=1)\ndef compute_hyperbolic_similarity(embeds1, embeds2):\n x1, y1 = embeds1.shape # <class 'numpy.ndarray'>\n x2, y2 = embeds2.shape\n assert y1 == y2\n dist_vec_list = list()\n for i in range(x1):\n embed1 = embeds1[i, ] # <class 'numpy.ndarray'> (y1,)\n embed1 = np.reshape(embed1, (1, y1)) # (1, y1)\n embed1 = np.repeat(embed1, x2, axis=0) # (x2, y1)\n dist_vec = compute_hyperbolic_distances(embed1, embeds2)\n dist_vec_list.append(dist_vec)\n dis_mat = np.row_stack(dist_vec_list) # (x1, x2)\n return normalization(-dis_mat)\n\n\ndef compute_hyperbolic_similarity_single(embeds1, embeds2):\n x1, y1 = embeds1.shape # <class 'numpy.ndarray'>\n x2, y2 = embeds2.shape\n assert y1 == y2\n dist_vec_list = list()\n for i in range(x1):\n embed1 = embeds1[i, ] # <class 'numpy.ndarray'> (y1,)\n embed1 = np.reshape(embed1, (1, y1)) # (1, y1)\n embed1 = np.repeat(embed1, x2, axis=0) # (x2, y1)\n dist_vec = compute_hyperbolic_distances(embed1, embeds2)\n dist_vec_list.append(dist_vec)\n dis_mat = np.row_stack(dist_vec_list) # (x1, x2)\n return normalization(-dis_mat)"
] |
[
[
"numpy.partition",
"numpy.reshape",
"numpy.matmul",
"numpy.concatenate",
"numpy.append",
"numpy.row_stack",
"numpy.mean",
"numpy.repeat",
"numpy.array",
"numpy.where"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
AmpelProject/Ampel-contrib-sample
|
[
"99677e434f700fd25dc90b1733ec1944b6cc84fd"
] |
[
"ampel/contrib/sample/t2/T2SNcosmoComp.py"
] |
[
"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n# File : ampel/contrib/sample/t2/T2SNcosmoComp.py\n# License : BSD-3-Clause\n# Author : [email protected]\n# Date : 03.04.2021\n# Last Modified Date: 03.04.2021\n# Last Modified By : [email protected]\n\nfrom typing import Dict, List, Optional, Sequence, Any, Tuple\nimport numpy as np\nimport sncosmo\nfrom ampel.type import T2UnitResult\nfrom ampel.view.LightCurve import LightCurve\nfrom astropy.table import Table\nfrom ampel.abstract.AbsLightCurveT2Unit import AbsLightCurveT2Unit\n\n\n\n\nclass T2SNcosmoComp(AbsLightCurveT2Unit):\n \"\"\"\n\n This unit tests whether a \"target\" SNcosmo source provides a better match to a LightCurve\n compared with a \"base\" source. \n A fit quality test is also done.\n\n This class is still only rudimentary. In particular:\n - MW or host galaxy reddening not accounted for!\n - All models assumed to be part of the standard SNcosmo registry.\n - Assumes input lightcurve contains AB magnitudes.\n - Model fit boundaries not propagated to fit.\n - ...\n \n The run method, applied to a LightCurve, will return a dict (T2UnitResult).\n In this \n 'target_match':True\n for lightcurves fulfilling the target criteria. \n (For this sample case we also include the fitted model).\n \n \"\"\"\n\n # Parameters that can, and in some cases need to be configured by the user\n # Name of model for the target search. If not available in the registery, SNcosmo will try to download\n target_model_name: str\n # Name of base comparison model\n base_model_name: str\n # Chi^2 / dof cut for acceptance as potential model\n chi2dof_cut: float = 3.\n # The target model chi^2/dof has to be better, after scaling with this factor \n chicomp_scaling: float = 1.\n # Redshift bound for template fit\n zbound: Tuple[float, float] = (0,0.2)\n\n\n def post_init(self)-> None:\n \"\"\"\n Retrieve models.\n \"\"\"\n self.target_model = sncosmo.Model(source=self.target_model_name)\n self.base_model = sncosmo.Model(source=self.base_model_name)\n \n \n def run(self, light_curve: LightCurve) -> T2UnitResult:\n \"\"\"\n Parameters\n -----------\n light_curve: \"ampel.view.LightCurve\" instance.\n See the LightCurve docstring for more info.\n\n Returns\n -------\n dict\n \"\"\"\n\n self.logger.info('Fitting %s'%(light_curve.stock_id) )\n\n # Create SNCosmo input table\n phot = np.asarray( light_curve.get_ntuples(('jd','magpsf','sigmapsf','fid')) )\n phot_tab = Table(phot,names=('jd','magpsf','sigmapsf','fid'))\n phot_tab['band'] = 'ztfband'\n for fid, fname in zip( [1,2,3], ['ztfg','ztfr','ztfi']):\n phot_tab['band'][phot_tab['fid']==fid] = fname\n phot_tab['flux'] = 10 ** (-(phot_tab['magpsf'] - 25) / 2.5)\n phot_tab['fluxerr'] = np.abs(phot_tab['flux'] * (-phot_tab['sigmapsf'] / 2.5 * np.log(10)))\n phot_tab['zp'] = 25\n phot_tab['zpsys'] = 'ab'\n \n # Fit base match\n try:\n result, fitted_model = sncosmo.fit_lc(\n phot_tab, self.base_model, self.base_model.param_names, bounds={'z':self.zbound}) \n chidof_base = result.chisq / result.ndof\n except RuntimeError:\n # We interpret a poor fit a a weird lightcurve, and exit\n self.logger.info(\"Base fit fails\",extra={\"stock_id\":light_curve.stock_id})\n return {'chidof_base':-1,'chidof_target':0, 'model_match': False, 'info': 'basefit fails'}\n\n \n # Fit target source\n try:\n result, fitted_model = sncosmo.fit_lc(\n phot_tab, self.target_model, self.target_model.param_names, bounds={'z':self.zbound} ) \n chidof_target = result.chisq / result.ndof\n except RuntimeError:\n # We interpret a poor fit a a weird lightcurve, and exit\n self.logger.info(\"Target fit fails\",extra={\"stock_id\":light_curve.stock_id})\n return {'chidof_base':chidof_base,'chidof_target':-1, 'model_match': False, 'info': 'targetfit fails'}\n \n \n # Gather information to propagate / log\n fit_info = {'chidof_base':chidof_base,'chidof_target':chidof_target,\n 'base_model':self.base_model_name, 'target_model':self.target_model_name}\n\n # Crude decision made\n if chidof_target>self.chi2dof_cut:\n fit_info['target_match'] = False\n fit_info['info'] = 'Poor lc fit'\n elif chidof_base < ( chidof_target * self.chicomp_scaling ):\n fit_info['target_match'] = False\n fit_info['info'] = 'Better base fit'\n else:\n fit_info['target_match'] = True\n fit_info['info'] = 'Good match'\n \n \n return fit_info\n"
] |
[
[
"numpy.log"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
econwang/EconML
|
[
"4554706bd9f803985e34399b7fc65035598ce195",
"4554706bd9f803985e34399b7fc65035598ce195",
"4554706bd9f803985e34399b7fc65035598ce195"
] |
[
"econml/tests/test_inference.py",
"econml/ortho_forest.py",
"econml/two_stage_least_squares.py"
] |
[
"# Copyright (c) Microsoft Corporation. All rights reserved.\n# Licensed under the MIT License.\n\nimport numpy as np\nimport unittest\nfrom sklearn.base import clone\nfrom sklearn.preprocessing import PolynomialFeatures\nfrom sklearn.linear_model import LinearRegression, LogisticRegression\nfrom econml.dml import LinearDML\nfrom econml.drlearner import LinearDRLearner\nfrom econml.inference import (BootstrapInference, NormalInferenceResults,\n EmpiricalInferenceResults, PopulationSummaryResults)\n\n\nclass TestInference(unittest.TestCase):\n\n @classmethod\n def setUpClass(cls):\n np.random.seed(123)\n # DGP constants\n cls.n = 1000\n cls.d_w = 3\n cls.d_x = 3\n # Generate data\n cls.X = np.random.uniform(0, 1, size=(cls.n, cls.d_x))\n cls.W = np.random.normal(0, 1, size=(cls.n, cls.d_w))\n cls.T = np.random.binomial(1, .5, size=(cls.n,))\n cls.Y = np.random.normal(0, 1, size=(cls.n, ))\n\n def test_summary(self):\n \"\"\"Tests the inference results summary for continuous treatment estimators.\"\"\"\n # Test inference results when `cate_feature_names` doesn not exist\n\n for inference in [BootstrapInference(n_bootstrap_samples=5), 'auto']:\n cate_est = LinearDML(model_t=LinearRegression(), model_y=LinearRegression(),\n featurizer=PolynomialFeatures(degree=2,\n include_bias=False)\n )\n cate_est.fit(\n TestInference.Y,\n TestInference.T,\n TestInference.X,\n TestInference.W,\n inference=inference\n )\n summary_results = cate_est.summary()\n coef_rows = np.asarray(summary_results.tables[0].data)[1:, 0]\n fnames = PolynomialFeatures(degree=2, include_bias=False).fit(TestInference.X).get_feature_names()\n np.testing.assert_array_equal(coef_rows, fnames)\n intercept_rows = np.asarray(summary_results.tables[1].data)[1:, 0]\n np.testing.assert_array_equal(intercept_rows, ['cate_intercept'])\n\n cate_est = LinearDML(model_t=LinearRegression(), model_y=LinearRegression(),\n featurizer=PolynomialFeatures(degree=2,\n include_bias=False)\n )\n cate_est.fit(\n TestInference.Y,\n TestInference.T,\n TestInference.X,\n TestInference.W,\n inference=inference\n )\n fnames = ['Q' + str(i) for i in range(TestInference.d_x)]\n summary_results = cate_est.summary(feat_name=fnames)\n coef_rows = np.asarray(summary_results.tables[0].data)[1:, 0]\n fnames = PolynomialFeatures(degree=2, include_bias=False).fit(\n TestInference.X).get_feature_names(input_features=fnames)\n np.testing.assert_array_equal(coef_rows, fnames)\n cate_est = LinearDML(model_t=LinearRegression(), model_y=LinearRegression(), featurizer=None)\n cate_est.fit(\n TestInference.Y,\n TestInference.T,\n TestInference.X,\n TestInference.W,\n inference=inference\n )\n summary_results = cate_est.summary()\n coef_rows = np.asarray(summary_results.tables[0].data)[1:, 0]\n np.testing.assert_array_equal(coef_rows, ['X' + str(i) for i in range(TestInference.d_x)])\n\n cate_est = LinearDML(model_t=LinearRegression(), model_y=LinearRegression(), featurizer=None)\n cate_est.fit(\n TestInference.Y,\n TestInference.T,\n TestInference.X,\n TestInference.W,\n inference=inference\n )\n fnames = ['Q' + str(i) for i in range(TestInference.d_x)]\n summary_results = cate_est.summary(feat_name=fnames)\n coef_rows = np.asarray(summary_results.tables[0].data)[1:, 0]\n np.testing.assert_array_equal(coef_rows, fnames)\n\n cate_est = LinearDML(model_t=LinearRegression(), model_y=LinearRegression(), featurizer=None)\n wrapped_est = self._NoFeatNamesEst(cate_est)\n wrapped_est.fit(\n TestInference.Y,\n TestInference.T,\n TestInference.X,\n TestInference.W,\n inference=inference\n )\n summary_results = wrapped_est.summary()\n coef_rows = np.asarray(summary_results.tables[0].data)[1:, 0]\n np.testing.assert_array_equal(coef_rows, ['X' + str(i) for i in range(TestInference.d_x)])\n\n cate_est = LinearDML(model_t=LinearRegression(), model_y=LinearRegression(), featurizer=None)\n wrapped_est = self._NoFeatNamesEst(cate_est)\n wrapped_est.fit(\n TestInference.Y,\n TestInference.T,\n TestInference.X,\n TestInference.W,\n inference=inference\n )\n fnames = ['Q' + str(i) for i in range(TestInference.d_x)]\n summary_results = wrapped_est.summary(feat_name=fnames)\n coef_rows = np.asarray(summary_results.tables[0].data)[1:, 0]\n np.testing.assert_array_equal(coef_rows, fnames)\n\n def test_summary_discrete(self):\n \"\"\"Tests the inference results summary for discrete treatment estimators.\"\"\"\n # Test inference results when `cate_feature_names` doesn not exist\n\n for inference in [BootstrapInference(n_bootstrap_samples=5), 'auto']:\n cate_est = LinearDRLearner(model_regression=LinearRegression(), model_propensity=LogisticRegression(),\n featurizer=PolynomialFeatures(degree=2,\n include_bias=False)\n )\n cate_est.fit(\n TestInference.Y,\n TestInference.T,\n TestInference.X,\n TestInference.W,\n inference=inference\n )\n summary_results = cate_est.summary(T=1)\n coef_rows = np.asarray(summary_results.tables[0].data)[1:, 0]\n fnames = PolynomialFeatures(degree=2, include_bias=False).fit(TestInference.X).get_feature_names()\n np.testing.assert_array_equal(coef_rows, fnames)\n intercept_rows = np.asarray(summary_results.tables[1].data)[1:, 0]\n np.testing.assert_array_equal(intercept_rows, ['cate_intercept'])\n\n cate_est = LinearDRLearner(model_regression=LinearRegression(),\n model_propensity=LogisticRegression(),\n featurizer=PolynomialFeatures(degree=2,\n include_bias=False)\n )\n cate_est.fit(\n TestInference.Y,\n TestInference.T,\n TestInference.X,\n TestInference.W,\n inference=inference\n )\n fnames = ['Q' + str(i) for i in range(TestInference.d_x)]\n summary_results = cate_est.summary(T=1, feat_name=fnames)\n coef_rows = np.asarray(summary_results.tables[0].data)[1:, 0]\n fnames = PolynomialFeatures(degree=2, include_bias=False).fit(\n TestInference.X).get_feature_names(input_features=fnames)\n np.testing.assert_array_equal(coef_rows, fnames)\n cate_est = LinearDRLearner(model_regression=LinearRegression(),\n model_propensity=LogisticRegression(), featurizer=None)\n cate_est.fit(\n TestInference.Y,\n TestInference.T,\n TestInference.X,\n TestInference.W,\n inference=inference\n )\n summary_results = cate_est.summary(T=1)\n coef_rows = np.asarray(summary_results.tables[0].data)[1:, 0]\n np.testing.assert_array_equal(coef_rows, ['X' + str(i) for i in range(TestInference.d_x)])\n\n cate_est = LinearDRLearner(model_regression=LinearRegression(),\n model_propensity=LogisticRegression(), featurizer=None)\n cate_est.fit(\n TestInference.Y,\n TestInference.T,\n TestInference.X,\n TestInference.W,\n inference=inference\n )\n fnames = ['Q' + str(i) for i in range(TestInference.d_x)]\n summary_results = cate_est.summary(T=1, feat_name=fnames)\n coef_rows = np.asarray(summary_results.tables[0].data)[1:, 0]\n np.testing.assert_array_equal(coef_rows, fnames)\n\n cate_est = LinearDRLearner(model_regression=LinearRegression(),\n model_propensity=LogisticRegression(), featurizer=None)\n wrapped_est = self._NoFeatNamesEst(cate_est)\n wrapped_est.fit(\n TestInference.Y,\n TestInference.T,\n TestInference.X,\n TestInference.W,\n inference=inference\n )\n summary_results = wrapped_est.summary(T=1)\n coef_rows = np.asarray(summary_results.tables[0].data)[1:, 0]\n np.testing.assert_array_equal(coef_rows, ['X' + str(i) for i in range(TestInference.d_x)])\n\n cate_est = LinearDRLearner(model_regression=LinearRegression(),\n model_propensity=LogisticRegression(), featurizer=None)\n wrapped_est = self._NoFeatNamesEst(cate_est)\n wrapped_est.fit(\n TestInference.Y,\n TestInference.T,\n TestInference.X,\n TestInference.W,\n inference=inference\n )\n fnames = ['Q' + str(i) for i in range(TestInference.d_x)]\n summary_results = wrapped_est.summary(T=1, feat_name=fnames)\n coef_rows = np.asarray(summary_results.tables[0].data)[1:, 0]\n np.testing.assert_array_equal(coef_rows, fnames)\n\n def test_degenerate_cases(self):\n \"\"\"Test that we return the correct values when our distribution doesn't vary\"\"\"\n predictions = np.array([[1, 0], [1, 1]]) # first component is always 1\n for inf in [EmpiricalInferenceResults(d_t=1, d_y=2,\n pred=np.mean(predictions, axis=0), pred_dist=predictions,\n inf_type='coefficient'),\n NormalInferenceResults(d_t=1, d_y=2,\n pred=np.mean(predictions, axis=0), pred_stderr=np.std(predictions, axis=0),\n inf_type='coefficient')]:\n zs = inf.zstat()\n pv = inf.pvalue()\n # test value 0 is less than estimate of 1 and variance is 0, so z score should be inf\n assert np.isposinf(zs[0])\n # predictions in column 1 have nonzero variance, so the zstat should always be some finite value\n assert np.isfinite(zs[1])\n assert pv[0] == 0 # pvalue should be zero when test value is greater or less than all samples\n\n test_point = np.array([1, 0.5])\n zs = inf.zstat(test_point)\n pv = inf.pvalue(test_point)\n # test value 1 is equal to the estimate of 1 and variance is 0, so z score should be nan\n assert np.isnan(zs[0])\n # predictions in column 1 have nonzero variance, so the zstat should always be some finite value\n assert np.isfinite(zs[1])\n # pvalue is also nan when variance is 0 and the point tested is equal to the point tested\n assert np.isnan(pv[0])\n # pvalue for second column should be greater than zero since some points are on either side\n # of the tested value\n assert 0 < pv[1] <= 1\n\n test_point = np.array([2, 1])\n zs = inf.zstat(test_point)\n pv = inf.pvalue(test_point)\n # test value 2 is greater than estimate of 1 and variance is 0, so z score should be -inf\n assert np.isneginf(zs[0])\n # predictions in column 1 have nonzero variance, so the zstat should always be some finite value\n assert np.isfinite(zs[1])\n # pvalue is also nan when variance is 0 and the point tested is equal to the point tested\n assert pv[0] == 0 # pvalue should be zero when test value is greater or less than all samples\n\n pop = PopulationSummaryResults(np.mean(predictions, axis=0).reshape(1, 2), np.std(\n predictions, axis=0).reshape(1, 2), d_t=1, d_y=2, alpha=0.05, value=0, decimals=3, tol=0.001)\n pop.print() # verify that we can access all attributes even in degenerate case\n\n def test_can_summarize(self):\n LinearDML(model_t=LinearRegression(), model_y=LinearRegression()).fit(\n TestInference.Y,\n TestInference.T,\n TestInference.X,\n TestInference.W\n ).summary()\n\n LinearDRLearner(model_regression=LinearRegression(),\n model_propensity=LogisticRegression(), fit_cate_intercept=False).fit(\n TestInference.Y,\n TestInference.T > 0,\n TestInference.X,\n TestInference.W,\n inference=BootstrapInference(5)\n ).summary(1)\n\n class _NoFeatNamesEst:\n def __init__(self, cate_est):\n self.cate_est = clone(cate_est, safe=False)\n\n def __getattr__(self, name):\n if name != 'cate_feature_names':\n return getattr(self.cate_est, name)\n else:\n return self.__getattribute__(name)\n",
"# Copyright (c) Microsoft Corporation. All rights reserved.\n# Licensed under the MIT License.\n\n\"\"\"Orthogonal Random Forest.\n\nOrthogonal Random Forest (ORF) is an algorithm for heterogenous treatment effect\nestimation. Orthogonal Random Forest combines orthogonalization,\na technique that effectively removes the confounding effect in two-stage estimation,\nwith generalized random forests, a flexible method for estimating treatment\neffect heterogeneity.\n\nThis file consists of classes that implement the following variants of the ORF method:\n\n- The :class:`DMLOrthoForest`, a two-forest approach for learning continuous or discrete treatment effects\n using kernel two stage estimation.\n\n- The :class:`DROrthoForest`, a two-forest approach for learning discrete treatment effects\n using kernel two stage estimation.\n\nFor more details on these methods, see our paper [Oprescu2019]_.\n\"\"\"\n\nimport abc\nimport inspect\nimport numpy as np\nimport warnings\nfrom joblib import Parallel, delayed\nfrom sklearn import clone\nfrom scipy.stats import norm\nfrom sklearn.exceptions import NotFittedError\nfrom sklearn.linear_model import LassoCV, Lasso, LinearRegression, LogisticRegression, \\\n LogisticRegressionCV, ElasticNet\nfrom sklearn.model_selection import KFold, StratifiedKFold\nfrom sklearn.pipeline import Pipeline\nfrom sklearn.preprocessing import OneHotEncoder, LabelEncoder, PolynomialFeatures, FunctionTransformer\nfrom sklearn.utils import check_random_state, check_array, column_or_1d\nfrom .sklearn_extensions.linear_model import WeightedLassoCVWrapper\nfrom .cate_estimator import BaseCateEstimator, LinearCateEstimator, TreatmentExpansionMixin\nfrom .causal_tree import CausalTree\nfrom .inference import Inference, NormalInferenceResults\nfrom .utilities import (reshape, reshape_Y_T, MAX_RAND_SEED, check_inputs, _deprecate_positional,\n cross_product, inverse_onehot, _EncoderWrapper, check_input_arrays,\n _RegressionWrapper, deprecated)\nfrom sklearn.model_selection import check_cv\n# TODO: consider working around relying on sklearn implementation details\nfrom .sklearn_extensions.model_selection import _cross_val_predict\n\n\ndef _build_tree_in_parallel(Y, T, X, W,\n nuisance_estimator,\n parameter_estimator,\n moment_and_mean_gradient_estimator,\n min_leaf_size, max_depth, random_state):\n tree = CausalTree(nuisance_estimator=nuisance_estimator,\n parameter_estimator=parameter_estimator,\n moment_and_mean_gradient_estimator=moment_and_mean_gradient_estimator,\n min_leaf_size=min_leaf_size,\n max_depth=max_depth,\n random_state=random_state)\n # Create splits of causal tree\n tree.create_splits(Y, T, X, W)\n return tree\n\n\ndef _fit_weighted_pipeline(model_instance, X, y, sample_weight):\n weights_error_msg = (\n \"Estimators of type {} do not accept weights. \"\n \"Consider using the class WeightedModelWrapper from econml.utilities to build a weighted model.\"\n )\n expected_error_msg = \"fit() got an unexpected keyword argument 'sample_weight'\"\n if not isinstance(model_instance, Pipeline):\n try:\n model_instance.fit(X, y, sample_weight=sample_weight)\n except TypeError as e:\n if expected_error_msg in str(e):\n # Make sure the correct exception is being rethrown\n raise TypeError(weights_error_msg.format(model_instance.__class__.__name__))\n else:\n raise e\n else:\n try:\n last_step_name = model_instance.steps[-1][0]\n model_instance.fit(X, y, **{\"{0}__sample_weight\".format(last_step_name): sample_weight})\n except TypeError as e:\n if expected_error_msg in str(e):\n raise TypeError(weights_error_msg.format(model_instance.steps[-1][1].__class__.__name__))\n else:\n raise e\n\n\ndef _cross_fit(model_instance, X, y, split_indices, sample_weight=None, predict_func_name='predict'):\n model_instance1 = clone(model_instance, safe=False)\n model_instance2 = clone(model_instance, safe=False)\n split_1, split_2 = split_indices\n predict_func1 = getattr(model_instance1, predict_func_name)\n predict_func2 = getattr(model_instance2, predict_func_name)\n if sample_weight is None:\n model_instance2.fit(X[split_2], y[split_2])\n pred_1 = predict_func2(X[split_1])\n model_instance1.fit(X[split_1], y[split_1])\n pred_2 = predict_func1(X[split_2])\n else:\n _fit_weighted_pipeline(model_instance2, X[split_2], y[split_2], sample_weight[split_2])\n pred_1 = predict_func2(X[split_1])\n _fit_weighted_pipeline(model_instance1, X[split_1], y[split_1], sample_weight[split_1])\n pred_2 = predict_func1(X[split_2])\n # Must make sure indices are merged correctly\n sorted_split_indices = np.argsort(np.concatenate(split_indices), kind='mergesort')\n return np.concatenate((pred_1, pred_2))[sorted_split_indices]\n\n\ndef _group_predict(X, n_groups, predict_func):\n \"\"\" Helper function that predicts using the predict function\n for every input argument that looks like [X; i] for i in range(n_groups). Used in\n DR moments, where we want to predict for each [X; t], for any value of the treatment t.\n Returns an (X.shape[0], n_groups) matrix of predictions for each row of X and each t in range(n_groups).\n\n Parameters\n ----------\n X : (n, m) array\n n_groups : int\n predict_func : fn\n\n Returns\n -------\n pred : (n, n_groups) array\n \"\"\"\n group_pred = np.zeros((X.shape[0], n_groups))\n zero_t = np.zeros((X.shape[0], n_groups))\n for i in range(n_groups):\n zero_t[:, i] = 1\n group_pred[:, i] = predict_func(np.concatenate((X, zero_t), axis=1))\n zero_t[:, i] = 0\n # Convert rows to columns\n return group_pred\n\n\ndef _group_cross_fit(model_instance, X, y, t, split_indices, sample_weight=None, predict_func_name='predict'):\n # Require group assignment t to be one-hot-encoded\n model_instance1 = clone(model_instance, safe=False)\n model_instance2 = clone(model_instance, safe=False)\n split_1, split_2 = split_indices\n n_groups = t.shape[1]\n predict_func1 = getattr(model_instance1, predict_func_name)\n predict_func2 = getattr(model_instance2, predict_func_name)\n Xt = np.concatenate((X, t), axis=1)\n # Get predictions for the 2 splits\n if sample_weight is None:\n model_instance2.fit(Xt[split_2], y[split_2])\n pred_1 = _group_predict(X[split_1], n_groups, predict_func2)\n model_instance1.fit(Xt[split_1], y[split_1])\n pred_2 = _group_predict(X[split_2], n_groups, predict_func1)\n else:\n _fit_weighted_pipeline(model_instance2, Xt[split_2], y[split_2], sample_weight[split_2])\n pred_1 = _group_predict(X[split_1], n_groups, predict_func2)\n _fit_weighted_pipeline(model_instance1, Xt[split_1], y[split_1], sample_weight[split_1])\n pred_2 = _group_predict(X[split_2], n_groups, predict_func1)\n # Must make sure indices are merged correctly\n sorted_split_indices = np.argsort(np.concatenate(split_indices), kind='mergesort')\n return np.concatenate((pred_1, pred_2))[sorted_split_indices]\n\n\ndef _pointwise_effect(X_single, Y, T, X, W, w_nonzero, split_inds, slice_weights_list,\n second_stage_nuisance_estimator, second_stage_parameter_estimator,\n moment_and_mean_gradient_estimator, slice_len, n_slices, n_trees,\n stderr=False):\n \"\"\"Calculate the effect for a one data point with features X_single.\n\n Parameters\n ----------\n X_single : array-like, shape (d_x, )\n Feature vector that captures heterogeneity for one sample.\n\n stderr : boolean (default=False)\n Whether to calculate the covariance matrix via bootstrap-of-little-bags.\n \"\"\"\n # Crossfitting\n # Compute weighted nuisance estimates\n nuisance_estimates = second_stage_nuisance_estimator(Y, T, X, W, w_nonzero, split_indices=split_inds)\n parameter_estimate = second_stage_parameter_estimator(Y, T, X, nuisance_estimates, w_nonzero, X_single)\n # -------------------------------------------------------------------------------\n # Calculate the covariance matrix corresponding to the BLB inference\n #\n # 1. Calculate the moments and gradient of the training data w.r.t the test point\n # 2. Calculate the weighted moments for each tree slice to create a matrix\n # U = (n_slices, n_T). The V = (U x grad^{-1}) matrix represents the deviation\n # in that slice from the overall parameter estimate.\n # 3. Calculate the covariance matrix (V.T x V) / n_slices\n # -------------------------------------------------------------------------------\n if stderr:\n moments, mean_grad = moment_and_mean_gradient_estimator(Y, T, X, W, nuisance_estimates,\n parameter_estimate)\n # Calclulate covariance matrix through BLB\n slice_weighted_moment_one = []\n slice_weighted_moment_two = []\n for slice_weights_one, slice_weights_two in slice_weights_list:\n slice_weighted_moment_one.append(\n np.average(moments[:len(split_inds[0])], axis=0, weights=slice_weights_one)\n )\n slice_weighted_moment_two.append(\n np.average(moments[len(split_inds[0]):], axis=0, weights=slice_weights_two)\n )\n U = np.vstack(slice_weighted_moment_one + slice_weighted_moment_two)\n inverse_grad = np.linalg.inv(mean_grad)\n cov_mat = inverse_grad.T @ U.T @ U @ inverse_grad / (2 * n_slices)\n return parameter_estimate, cov_mat\n return parameter_estimate\n\n\nclass BaseOrthoForest(TreatmentExpansionMixin, LinearCateEstimator):\n \"\"\"Base class for the :class:`DMLOrthoForest` and :class:`DROrthoForest`.\"\"\"\n\n def __init__(self,\n nuisance_estimator,\n second_stage_nuisance_estimator,\n parameter_estimator,\n second_stage_parameter_estimator,\n moment_and_mean_gradient_estimator,\n discrete_treatment=False,\n categories='auto',\n n_trees=500,\n min_leaf_size=10, max_depth=10,\n subsample_ratio=0.25,\n bootstrap=False,\n n_jobs=-1,\n random_state=None):\n # Estimators\n self.nuisance_estimator = nuisance_estimator\n self.second_stage_nuisance_estimator = second_stage_nuisance_estimator\n self.parameter_estimator = parameter_estimator\n self.second_stage_parameter_estimator = second_stage_parameter_estimator\n self.moment_and_mean_gradient_estimator = moment_and_mean_gradient_estimator\n # OrthoForest parameters\n self.n_trees = n_trees\n self.min_leaf_size = min_leaf_size\n self.max_depth = max_depth\n self.bootstrap = bootstrap\n self.subsample_ratio = subsample_ratio\n self.n_jobs = n_jobs\n self.random_state = check_random_state(random_state)\n # Sub-forests\n self.forest_one_trees = None\n self.forest_two_trees = None\n self.forest_one_subsample_ind = None\n self.forest_two_subsample_ind = None\n # Auxiliary attributes\n self.n_slices = int(np.ceil((self.n_trees)**(1 / 2)))\n self.slice_len = int(np.ceil(self.n_trees / self.n_slices))\n # Fit check\n self.model_is_fitted = False\n self.discrete_treatment = discrete_treatment\n super().__init__()\n\n @_deprecate_positional(\"X and W should be passed by keyword only. In a future release \"\n \"we will disallow passing X and W by position.\", ['X', 'W'])\n @BaseCateEstimator._wrap_fit\n def fit(self, Y, T, X, W=None, *, inference='auto'):\n \"\"\"Build an orthogonal random forest from a training set (Y, T, X, W).\n\n Parameters\n ----------\n Y : array-like, shape (n, )\n Outcome for the treatment policy.\n\n T : array-like, shape (n, d_t)\n Treatment policy.\n\n X : array-like, shape (n, d_x)\n Feature vector that captures heterogeneity.\n\n W : array-like, shape (n, d_w) or None (default=None)\n High-dimensional controls.\n\n inference: string, :class:`.Inference` instance, or None\n Method for performing inference. This estimator supports 'bootstrap'\n (or an instance of :class:`.BootstrapInference`) and 'blb' (or an instance of :class:`BLBInference`)\n\n Returns\n -------\n self: an instance of self.\n \"\"\"\n Y, T, X, W = check_inputs(Y, T, X, W, multi_output_Y=False)\n if Y.ndim > 1 and Y.shape[1] > 1:\n raise ValueError(\n \"The outcome matrix must be of shape ({0}, ) or ({0}, 1), instead got {1}.\".format(len(X), Y.shape))\n\n shuffled_inidces = self.random_state.permutation(X.shape[0])\n n = X.shape[0] // 2\n self.Y_one = Y[shuffled_inidces[:n]]\n self.Y_two = Y[shuffled_inidces[n:]]\n self.T_one = T[shuffled_inidces[:n]]\n self.T_two = T[shuffled_inidces[n:]]\n self.X_one = X[shuffled_inidces[:n]]\n self.X_two = X[shuffled_inidces[n:]]\n if W is not None:\n self.W_one = W[shuffled_inidces[:n]]\n self.W_two = W[shuffled_inidces[n:]]\n else:\n self.W_one = None\n self.W_two = None\n self.forest_one_subsample_ind, self.forest_one_trees = self._fit_forest(Y=self.Y_one,\n T=self.T_one,\n X=self.X_one,\n W=self.W_one)\n self.forest_two_subsample_ind, self.forest_two_trees = self._fit_forest(Y=self.Y_two,\n T=self.T_two,\n X=self.X_two,\n W=self.W_two)\n self.model_is_fitted = True\n return self\n\n def const_marginal_effect(self, X):\n \"\"\"Calculate the constant marginal CATE θ(·) conditional on a vector of features X.\n\n Parameters\n ----------\n X : array-like, shape (n, d_x)\n Feature vector that captures heterogeneity.\n\n Returns\n -------\n Theta : matrix , shape (n, d_t)\n Constant marginal CATE of each treatment for each sample.\n \"\"\"\n # TODO: Check performance\n return np.asarray(self._predict(X))\n\n def _predict(self, X, stderr=False):\n if not self.model_is_fitted:\n raise NotFittedError('This {0} instance is not fitted yet.'.format(self.__class__.__name__))\n X = check_array(X)\n results = Parallel(n_jobs=self.n_jobs, verbose=3)(\n delayed(_pointwise_effect)(X_single, *self._pw_effect_inputs(X_single, stderr=stderr),\n self.second_stage_nuisance_estimator, self.second_stage_parameter_estimator,\n self.moment_and_mean_gradient_estimator, self.slice_len, self.n_slices,\n self.n_trees,\n stderr=stderr) for X_single in X)\n return results\n\n def _pw_effect_inputs(self, X_single, stderr=False):\n w1, w2 = self._get_weights(X_single)\n mask_w1 = (w1 != 0)\n mask_w2 = (w2 != 0)\n w1_nonzero = w1[mask_w1]\n w2_nonzero = w2[mask_w2]\n # Must normalize weights\n w_nonzero = np.concatenate((w1_nonzero, w2_nonzero))\n split_inds = (np.arange(len(w1_nonzero)), np.arange(len(w1_nonzero), len(w_nonzero)))\n slice_weights_list = []\n if stderr:\n slices = [\n (it * self.slice_len, min((it + 1) * self.slice_len, self.n_trees)) for it in range(self.n_slices)\n ]\n for slice_it in slices:\n slice_weights_one, slice_weights_two = self._get_weights(X_single, tree_slice=slice_it)\n slice_weights_list.append((slice_weights_one[mask_w1], slice_weights_two[mask_w2]))\n W_none = self.W_one is None\n return np.concatenate((self.Y_one[mask_w1], self.Y_two[mask_w2])),\\\n np.concatenate((self.T_one[mask_w1], self.T_two[mask_w2])),\\\n np.concatenate((self.X_one[mask_w1], self.X_two[mask_w2])),\\\n np.concatenate((self.W_one[mask_w1], self.W_two[mask_w2])\n ) if not W_none else None,\\\n w_nonzero,\\\n split_inds, slice_weights_list\n\n def _get_inference_options(self):\n # Override the CATE inference options\n # Add blb inference to parent's options\n options = super()._get_inference_options()\n options.update(blb=BLBInference)\n options.update(auto=BLBInference)\n return options\n\n def _fit_forest(self, Y, T, X, W=None):\n # Generate subsample indices\n subsample_ind = self._get_blb_indices(X)\n # Build trees in parallel\n return subsample_ind, Parallel(n_jobs=self.n_jobs, verbose=3, max_nbytes=None)(\n delayed(_build_tree_in_parallel)(\n Y[s], T[s], X[s], W[s] if W is not None else None,\n self.nuisance_estimator,\n self.parameter_estimator,\n self.moment_and_mean_gradient_estimator,\n self.min_leaf_size, self.max_depth,\n self.random_state.randint(MAX_RAND_SEED)) for s in subsample_ind)\n\n def _get_weights(self, X_single, tree_slice=None):\n \"\"\"Calculate weights for a single input feature vector over a subset of trees.\n\n The subset of trees is defined by the `tree_slice` tuple (start, end).\n The (start, end) tuple includes all trees from `start` to `end`-1.\n \"\"\"\n w1 = np.zeros(self.Y_one.shape[0])\n w2 = np.zeros(self.Y_two.shape[0])\n if tree_slice is None:\n tree_range = range(self.n_trees)\n else:\n tree_range = range(*tree_slice)\n for t in tree_range:\n leaf = self.forest_one_trees[t].find_split(X_single)\n weight_indexes = self.forest_one_subsample_ind[t][leaf.est_sample_inds]\n leaf_weight = 1 / len(leaf.est_sample_inds)\n if self.bootstrap:\n # Bootstraping has repetitions in tree sample\n unique, counts = np.unique(weight_indexes, return_counts=True)\n w1[unique] += leaf_weight * counts\n else:\n w1[weight_indexes] += leaf_weight\n for t in tree_range:\n leaf = self.forest_two_trees[t].find_split(X_single)\n # Similar for `a` weights\n weight_indexes = self.forest_two_subsample_ind[t][leaf.est_sample_inds]\n leaf_weight = 1 / len(leaf.est_sample_inds)\n if self.bootstrap:\n # Bootstraping has repetitions in tree sample\n unique, counts = np.unique(weight_indexes, return_counts=True)\n w2[unique] += leaf_weight * counts\n else:\n w2[weight_indexes] += leaf_weight\n return (w1 / len(tree_range), w2 / len(tree_range))\n\n def _get_blb_indices(self, X):\n \"\"\"Get data indices for every tree under the little bags split.\"\"\"\n # Define subsample size\n subsample_size = X.shape[0] // 2\n if not self.bootstrap:\n if self.subsample_ratio > 1.0:\n # Safety check\n warnings.warn(\"The argument 'subsample_ratio' must be between 0.0 and 1.0, \" +\n \"however a value of {} was provided. The 'subsample_ratio' will be changed to 1.0.\")\n self.subsample_ratio = 1.0\n subsample_size = int(self.subsample_ratio * subsample_size)\n subsample_ind = []\n # Draw points to create little bags\n for it in range(self.n_slices):\n half_sample_inds = self.random_state.choice(\n X.shape[0], X.shape[0] // 2, replace=False)\n for _ in np.arange(it * self.slice_len, min((it + 1) * self.slice_len, self.n_trees)):\n subsample_ind.append(half_sample_inds[self.random_state.choice(\n X.shape[0] // 2, subsample_size, replace=self.bootstrap)])\n return np.asarray(subsample_ind)\n\n\nclass DMLOrthoForest(BaseOrthoForest):\n \"\"\"OrthoForest for continuous or discrete treatments using the DML residual on residual moment function.\n\n A two-forest approach for learning heterogeneous treatment effects using\n kernel two stage estimation.\n\n Parameters\n ----------\n n_trees : integer, optional (default=500)\n Number of causal estimators in the forest.\n\n min_leaf_size : integer, optional (default=10)\n The minimum number of samples in a leaf.\n\n max_depth : integer, optional (default=10)\n The maximum number of splits to be performed when expanding the tree.\n\n subsample_ratio : float, optional (default=0.7)\n The ratio of the total sample to be used when training a causal tree.\n Values greater than 1.0 will be considered equal to 1.0.\n Parameter is ignored when bootstrap=True.\n\n bootstrap : boolean, optional (default=False)\n Whether to use bootstrap subsampling.\n\n lambda_reg : float, optional (default=0.01)\n The regularization coefficient in the ell_2 penalty imposed on the\n locally linear part of the second stage fit. This is not applied to\n the local intercept, only to the coefficient of the linear component.\n\n model_T : estimator, optional (default=sklearn.linear_model.LassoCV(cv=3))\n The estimator for residualizing the continuous treatment at each leaf.\n Must implement `fit` and `predict` methods.\n\n model_Y : estimator, optional (default=sklearn.linear_model.LassoCV(cv=3)\n The estimator for residualizing the outcome at each leaf. Must implement\n `fit` and `predict` methods.\n\n model_T_final : estimator, optional (default=None)\n The estimator for residualizing the treatment at prediction time. Must implement\n `fit` and `predict` methods. If parameter is set to ``None``, it defaults to the\n value of `model_T` parameter.\n\n model_Y_final : estimator, optional (default=None)\n The estimator for residualizing the outcome at prediction time. Must implement\n `fit` and `predict` methods. If parameter is set to ``None``, it defaults to the\n value of `model_Y` parameter.\n\n global_residualization : bool, optional (default=False)\n Whether to perform a prior residualization of Y and T using the model_Y_final and model_T_final\n estimators, or whether to perform locally weighted residualization at each target point.\n Global residualization is computationally less intensive, but could lose some statistical\n power, especially when W is not None.\n\n global_res_cv : int, cross-validation generator or an iterable, optional (default=2)\n The specification of the cv splitter to be used for cross-fitting, when constructing\n the global residuals of Y and T.\n\n discrete_treatment : bool, optional (default=False)\n Whether the treatment should be treated as categorical. If True, then the treatment T is\n one-hot-encoded and the model_T is treated as a classifier that must have a predict_proba\n method.\n\n categories : array like or 'auto', optional (default='auto')\n A list of pre-specified treatment categories. If 'auto' then categories are automatically\n recognized at fit time.\n\n n_jobs : int, optional (default=-1)\n The number of jobs to run in parallel for both :meth:`fit` and :meth:`effect`.\n ``-1`` means using all processors. Since OrthoForest methods are\n computationally heavy, it is recommended to set `n_jobs` to -1.\n\n random_state : int, :class:`~numpy.random.mtrand.RandomState` instance or None, optional (default=None)\n If int, random_state is the seed used by the random number generator;\n If :class:`~numpy.random.mtrand.RandomState` instance, random_state is the random number generator;\n If None, the random number generator is the :class:`~numpy.random.mtrand.RandomState` instance used\n by :mod:`np.random<numpy.random>`.\n\n \"\"\"\n\n def __init__(self,\n n_trees=500,\n min_leaf_size=10, max_depth=10,\n subsample_ratio=0.7,\n bootstrap=False,\n lambda_reg=0.01,\n model_T='auto',\n model_Y=WeightedLassoCVWrapper(cv=3),\n model_T_final=None,\n model_Y_final=None,\n global_residualization=False,\n global_res_cv=2,\n discrete_treatment=False,\n categories='auto',\n n_jobs=-1,\n random_state=None):\n # Copy and/or define models\n self.lambda_reg = lambda_reg\n if model_T == 'auto':\n if discrete_treatment:\n model_T = LogisticRegressionCV(cv=3)\n else:\n model_T = WeightedLassoCVWrapper(cv=3)\n self.model_T = model_T\n self.model_Y = model_Y\n self.model_T_final = model_T_final\n self.model_Y_final = model_Y_final\n if self.model_T_final is None:\n self.model_T_final = clone(self.model_T, safe=False)\n if self.model_Y_final is None:\n self.model_Y_final = clone(self.model_Y, safe=False)\n if discrete_treatment:\n self.model_T = _RegressionWrapper(self.model_T)\n self.model_T_final = _RegressionWrapper(self.model_T_final)\n self.random_state = check_random_state(random_state)\n self.global_residualization = global_residualization\n self.global_res_cv = global_res_cv\n # Define nuisance estimators\n nuisance_estimator = DMLOrthoForest.nuisance_estimator_generator(\n self.model_T, self.model_Y, self.random_state, second_stage=False,\n global_residualization=self.global_residualization, discrete_treatment=discrete_treatment)\n second_stage_nuisance_estimator = DMLOrthoForest.nuisance_estimator_generator(\n self.model_T_final, self.model_Y_final, self.random_state, second_stage=True,\n global_residualization=self.global_residualization, discrete_treatment=discrete_treatment)\n # Define parameter estimators\n parameter_estimator = DMLOrthoForest.parameter_estimator_func\n second_stage_parameter_estimator = DMLOrthoForest.second_stage_parameter_estimator_gen(\n self.lambda_reg)\n # Define\n moment_and_mean_gradient_estimator = DMLOrthoForest.moment_and_mean_gradient_estimator_func\n if discrete_treatment:\n if categories != 'auto':\n categories = [categories] # OneHotEncoder expects a 2D array with features per column\n self._one_hot_encoder = OneHotEncoder(categories=categories, sparse=False, drop='first')\n super().__init__(\n nuisance_estimator,\n second_stage_nuisance_estimator,\n parameter_estimator,\n second_stage_parameter_estimator,\n moment_and_mean_gradient_estimator,\n n_trees=n_trees,\n min_leaf_size=min_leaf_size,\n max_depth=max_depth,\n subsample_ratio=subsample_ratio,\n bootstrap=bootstrap,\n n_jobs=n_jobs,\n discrete_treatment=discrete_treatment,\n categories=categories,\n random_state=self.random_state)\n\n def _combine(self, X, W):\n if X is None:\n return W\n if W is None:\n return X\n return np.hstack([X, W])\n\n # Need to redefine fit here for auto inference to work due to a quirk in how\n # wrap_fit is defined\n @_deprecate_positional(\"X and W should be passed by keyword only. In a future release \"\n \"we will disallow passing X and W by position.\", ['X', 'W'])\n def fit(self, Y, T, X, W=None, *, inference='auto'):\n \"\"\"Build an orthogonal random forest from a training set (Y, T, X, W).\n\n Parameters\n ----------\n Y : array-like, shape (n, )\n Outcome for the treatment policy.\n\n T : array-like, shape (n, d_t)\n Treatment policy.\n\n X : array-like, shape (n, d_x)\n Feature vector that captures heterogeneity.\n\n W : array-like, shape (n, d_w) or None (default=None)\n High-dimensional controls.\n\n inference: string, :class:`.Inference` instance, or None\n Method for performing inference. This estimator supports 'bootstrap'\n (or an instance of :class:`.BootstrapInference`) and 'blb' (or an instance of :class:`BLBInference`)\n\n Returns\n -------\n self: an instance of self.\n \"\"\"\n Y, T, X, W = check_inputs(Y, T, X, W)\n\n if self.discrete_treatment:\n d_t_in = T.shape[1:]\n T = self._one_hot_encoder.fit_transform(T.reshape(-1, 1))\n self._d_t = T.shape[1:]\n self.transformer = FunctionTransformer(\n func=_EncoderWrapper(self._one_hot_encoder).encode,\n validate=False)\n\n if self.global_residualization:\n cv = check_cv(self.global_res_cv, y=T, classifier=self.discrete_treatment)\n cv = list(cv.split(X=X, y=T))\n Y = Y - _cross_val_predict(self.model_Y_final, self._combine(X, W), Y, cv=cv, safe=False).reshape(Y.shape)\n T = T - _cross_val_predict(self.model_T_final, self._combine(X, W), T, cv=cv, safe=False).reshape(T.shape)\n\n super().fit(Y, T, X=X, W=W, inference=inference)\n\n # weirdness of wrap_fit. We need to store d_t_in. But because wrap_fit decorates the parent\n # fit, we need to set explicitly d_t_in here after super fit is called.\n if self.discrete_treatment:\n self._d_t_in = d_t_in\n return self\n\n def const_marginal_effect(self, X):\n X = check_array(X)\n # Override to flatten output if T is flat\n effects = super().const_marginal_effect(X=X)\n return effects.reshape((-1,) + self._d_y + self._d_t)\n const_marginal_effect.__doc__ = BaseOrthoForest.const_marginal_effect.__doc__\n\n @staticmethod\n def nuisance_estimator_generator(model_T, model_Y, random_state=None, second_stage=True,\n global_residualization=False, discrete_treatment=False):\n \"\"\"Generate nuissance estimator given model inputs from the class.\"\"\"\n def nuisance_estimator(Y, T, X, W, sample_weight=None, split_indices=None):\n if global_residualization:\n return 0\n if discrete_treatment:\n # Check that all discrete treatments are represented\n if len(np.unique(T @ np.arange(1, T.shape[1] + 1))) < T.shape[1] + 1:\n return None\n # Nuissance estimates evaluated with cross-fitting\n this_random_state = check_random_state(random_state)\n if (split_indices is None) and second_stage:\n if discrete_treatment:\n # Define 2-fold iterator\n kfold_it = StratifiedKFold(n_splits=2, shuffle=True, random_state=this_random_state).split(X, T)\n # Check if there is only one example of some class\n with warnings.catch_warnings():\n warnings.filterwarnings('error')\n try:\n split_indices = list(kfold_it)[0]\n except Warning as warn:\n msg = str(warn)\n if \"The least populated class in y has only 1 members\" in msg:\n return None\n else:\n # Define 2-fold iterator\n kfold_it = KFold(n_splits=2, shuffle=True, random_state=this_random_state).split(X)\n split_indices = list(kfold_it)[0]\n if W is not None:\n X_tilde = np.concatenate((X, W), axis=1)\n else:\n X_tilde = X\n\n try:\n if second_stage:\n T_hat = _cross_fit(model_T, X_tilde, T, split_indices, sample_weight=sample_weight)\n Y_hat = _cross_fit(model_Y, X_tilde, Y, split_indices, sample_weight=sample_weight)\n else:\n # need safe=False when cloning for WeightedModelWrapper\n T_hat = clone(model_T, safe=False).fit(X_tilde, T).predict(X_tilde)\n Y_hat = clone(model_Y, safe=False).fit(X_tilde, Y).predict(X_tilde)\n except ValueError as exc:\n raise ValueError(\"The original error: {0}\".format(str(exc)) +\n \" This might be caused by too few sample in the tree leafs.\" +\n \" Try increasing the min_leaf_size.\")\n return Y_hat, T_hat\n\n return nuisance_estimator\n\n @staticmethod\n def parameter_estimator_func(Y, T, X,\n nuisance_estimates,\n sample_weight=None):\n \"\"\"Calculate the parameter of interest for points given by (Y, T) and corresponding nuisance estimates.\"\"\"\n # Compute residuals\n Y_res, T_res = DMLOrthoForest._get_conforming_residuals(Y, T, nuisance_estimates)\n # Compute coefficient by OLS on residuals\n param_estimate = LinearRegression(fit_intercept=False).fit(\n T_res, Y_res, sample_weight=sample_weight\n ).coef_\n # Parameter returned by LinearRegression is (d_T, )\n return param_estimate\n\n @staticmethod\n def second_stage_parameter_estimator_gen(lambda_reg):\n \"\"\"\n For the second stage parameter estimation we add a local linear correction. So\n we fit a local linear function as opposed to a local constant function. We also penalize\n the linear part to reduce variance.\n \"\"\"\n def parameter_estimator_func(Y, T, X,\n nuisance_estimates,\n sample_weight,\n X_single):\n \"\"\"Calculate the parameter of interest for points given by (Y, T) and corresponding nuisance estimates.\n\n The parameter is calculated around the feature vector given by `X_single`. `X_single` can be used to do\n local corrections on a preliminary parameter estimate.\n \"\"\"\n # Compute residuals\n Y_res, T_res = DMLOrthoForest._get_conforming_residuals(Y, T, nuisance_estimates)\n X_aug = np.hstack([np.ones((X.shape[0], 1)), X])\n XT_res = cross_product(T_res, X_aug)\n # Compute coefficient by OLS on residuals\n if sample_weight is not None:\n weighted_XT_res = sample_weight.reshape(-1, 1) * XT_res\n else:\n weighted_XT_res = XT_res / XT_res.shape[0]\n # ell_2 regularization\n diagonal = np.ones(XT_res.shape[1])\n diagonal[:T_res.shape[1]] = 0\n reg = lambda_reg * np.diag(diagonal)\n # Ridge regression estimate\n linear_coef_estimate = np.linalg.lstsq(np.matmul(weighted_XT_res.T, XT_res) + reg,\n np.matmul(weighted_XT_res.T, Y_res.reshape(-1, 1)),\n rcond=None)[0].flatten()\n X_aug = np.append([1], X_single)\n linear_coef_estimate = linear_coef_estimate.reshape((X_aug.shape[0], -1)).T\n # Parameter returned is of shape (d_T, )\n return np.dot(linear_coef_estimate, X_aug)\n\n return parameter_estimator_func\n\n @staticmethod\n def moment_and_mean_gradient_estimator_func(Y, T, X, W,\n nuisance_estimates,\n parameter_estimate):\n \"\"\"Calculate the moments and mean gradient at points given by (Y, T, X, W).\"\"\"\n # Return moments and gradients\n # Compute residuals\n Y_res, T_res = DMLOrthoForest._get_conforming_residuals(Y, T, nuisance_estimates)\n # Compute moments\n # Moments shape is (n, d_T)\n moments = (Y_res - np.matmul(T_res, parameter_estimate)).reshape(-1, 1) * T_res\n # Compute moment gradients\n mean_gradient = - np.matmul(T_res.T, T_res) / T_res.shape[0]\n return moments, mean_gradient\n\n @staticmethod\n def _get_conforming_residuals(Y, T, nuisance_estimates):\n if nuisance_estimates == 0:\n return reshape_Y_T(Y, T)\n # returns shape-conforming residuals\n Y_hat, T_hat = reshape_Y_T(*nuisance_estimates)\n Y, T = reshape_Y_T(Y, T)\n Y_res, T_res = Y - Y_hat, T - T_hat\n return Y_res, T_res\n\n\nclass DROrthoForest(BaseOrthoForest):\n \"\"\"\n OrthoForest for discrete treatments using the doubly robust moment function.\n\n A two-forest approach for learning heterogeneous treatment effects using\n kernel two stage estimation.\n\n Parameters\n ----------\n n_trees : integer, optional (default=500)\n Number of causal estimators in the forest.\n\n min_leaf_size : integer, optional (default=10)\n The minimum number of samples in a leaf.\n\n max_depth : integer, optional (default=10)\n The maximum number of splits to be performed when expanding the tree.\n\n subsample_ratio : float, optional (default=0.7)\n The ratio of the total sample to be used when training a causal tree.\n Values greater than 1.0 will be considered equal to 1.0.\n Parameter is ignored when bootstrap=True.\n\n bootstrap : boolean, optional (default=False)\n Whether to use bootstrap subsampling.\n\n lambda_reg : float, optional (default=0.01)\n The regularization coefficient in the ell_2 penalty imposed on the\n locally linear part of the second stage fit. This is not applied to\n the local intercept, only to the coefficient of the linear component.\n\n propensity_model : estimator, optional (default=sklearn.linear_model.LogisticRegression(penalty='l1',\\\n solver='saga',\\\n multi_class='auto'))\n Model for estimating propensity of treatment at each leaf.\n Will be trained on features and controls (concatenated). Must implement `fit` and `predict_proba` methods.\n\n model_Y : estimator, optional (default=sklearn.linear_model.LassoCV(cv=3))\n Estimator for learning potential outcomes at each leaf.\n Will be trained on features, controls and one hot encoded treatments (concatenated).\n If different models per treatment arm are desired, see the :class:`.MultiModelWrapper`\n helper class. The model(s) must implement `fit` and `predict` methods.\n\n propensity_model_final : estimator, optional (default=None)\n Model for estimating propensity of treatment at at prediction time.\n Will be trained on features and controls (concatenated). Must implement `fit` and `predict_proba` methods.\n If parameter is set to ``None``, it defaults to the value of `propensity_model` parameter.\n\n model_Y_final : estimator, optional (default=None)\n Estimator for learning potential outcomes at prediction time.\n Will be trained on features, controls and one hot encoded treatments (concatenated).\n If different models per treatment arm are desired, see the :class:`.MultiModelWrapper`\n helper class. The model(s) must implement `fit` and `predict` methods.\n If parameter is set to ``None``, it defaults to the value of `model_Y` parameter.\n\n categories: 'auto' or list\n The categories to use when encoding discrete treatments (or 'auto' to use the unique sorted values).\n The first category will be treated as the control treatment.\n\n n_jobs : int, optional (default=-1)\n The number of jobs to run in parallel for both :meth:`fit` and :meth:`effect`.\n ``-1`` means using all processors. Since OrthoForest methods are\n computationally heavy, it is recommended to set `n_jobs` to -1.\n\n random_state : int, :class:`~numpy.random.mtrand.RandomState` instance or None, optional (default=None)\n If int, random_state is the seed used by the random number generator;\n If :class:`~numpy.random.mtrand.RandomState` instance, random_state is the random number generator;\n If None, the random number generator is the :class:`~numpy.random.mtrand.RandomState` instance used\n by :mod:`np.random<numpy.random>`.\n\n\n \"\"\"\n\n def __init__(self,\n n_trees=500,\n min_leaf_size=10, max_depth=10,\n subsample_ratio=0.7,\n bootstrap=False,\n lambda_reg=0.01,\n propensity_model=LogisticRegression(penalty='l1', solver='saga',\n multi_class='auto'), # saga solver supports l1\n model_Y=WeightedLassoCVWrapper(cv=3),\n propensity_model_final=None,\n model_Y_final=None,\n categories='auto',\n n_jobs=-1,\n random_state=None):\n # Copy and/or define models\n self.propensity_model = clone(propensity_model, safe=False)\n self.model_Y = clone(model_Y, safe=False)\n self.propensity_model_final = clone(propensity_model_final, safe=False)\n self.model_Y_final = clone(model_Y_final, safe=False)\n if self.propensity_model_final is None:\n self.propensity_model_final = clone(self.propensity_model, safe=False)\n if self.model_Y_final is None:\n self.model_Y_final = clone(self.model_Y, safe=False)\n self.random_state = check_random_state(random_state)\n\n nuisance_estimator = DROrthoForest.nuisance_estimator_generator(\n self.propensity_model, self.model_Y, self.random_state, second_stage=False)\n second_stage_nuisance_estimator = DROrthoForest.nuisance_estimator_generator(\n self.propensity_model_final, self.model_Y_final, self.random_state, second_stage=True)\n # Define parameter estimators\n parameter_estimator = DROrthoForest.parameter_estimator_func\n second_stage_parameter_estimator = DROrthoForest.second_stage_parameter_estimator_gen(\n lambda_reg)\n # Define moment and mean gradient estimator\n moment_and_mean_gradient_estimator = DROrthoForest.moment_and_mean_gradient_estimator_func\n if categories != 'auto':\n categories = [categories] # OneHotEncoder expects a 2D array with features per column\n self._one_hot_encoder = OneHotEncoder(categories=categories, sparse=False, drop='first')\n\n super().__init__(\n nuisance_estimator,\n second_stage_nuisance_estimator,\n parameter_estimator,\n second_stage_parameter_estimator,\n moment_and_mean_gradient_estimator,\n discrete_treatment=True,\n categories=categories,\n n_trees=n_trees,\n min_leaf_size=min_leaf_size,\n max_depth=max_depth,\n subsample_ratio=subsample_ratio,\n bootstrap=bootstrap,\n n_jobs=n_jobs,\n random_state=self.random_state)\n\n @_deprecate_positional(\"X and W should be passed by keyword only. In a future release \"\n \"we will disallow passing X and W by position.\", ['X', 'W'])\n def fit(self, Y, T, X, W=None, *, inference='auto'):\n \"\"\"Build an orthogonal random forest from a training set (Y, T, X, W).\n\n Parameters\n ----------\n Y : array-like, shape (n, )\n Outcome for the treatment policy. Must be a vector.\n\n T : array-like, shape (n, )\n Discrete treatment policy vector. The treatment policy should be a set of consecutive integers\n starting with `0`, where `0` denotes the control group. Otherwise, the treatment policies\n will be ordered lexicographically, with the smallest value being considered the control group.\n\n X : array-like, shape (n, d_x)\n Feature vector that captures heterogeneity.\n\n W : array-like, shape (n, d_w) or None (default=None)\n High-dimensional controls.\n\n inference: string, :class:`.Inference` instance, or None\n Method for performing inference. This estimator supports 'bootstrap'\n (or an instance of :class:`.BootstrapInference`) and 'blb' (or an instance of :class:`BLBInference`)\n\n Returns\n -------\n self: an instance of self.\n \"\"\"\n # Check that T is shape (n, )\n # Check T is numeric\n T = self._check_treatment(T)\n d_t_in = T.shape[1:]\n # Train label encoder\n T = self._one_hot_encoder.fit_transform(T.reshape(-1, 1))\n self._d_t = T.shape[1:]\n self.transformer = FunctionTransformer(\n func=_EncoderWrapper(self._one_hot_encoder).encode,\n validate=False)\n\n # Call `fit` from parent class\n super().fit(Y, T, X=X, W=W, inference=inference)\n\n # weirdness of wrap_fit. We need to store d_t_in. But because wrap_fit decorates the parent\n # fit, we need to set explicitly d_t_in here after super fit is called.\n self._d_t_in = d_t_in\n\n return self\n\n def const_marginal_effect(self, X):\n X = check_array(X)\n # Override to flatten output if T is flat\n effects = super().const_marginal_effect(X=X)\n return effects.reshape((-1,) + self._d_y + self._d_t)\n const_marginal_effect.__doc__ = BaseOrthoForest.const_marginal_effect.__doc__\n\n @staticmethod\n def nuisance_estimator_generator(propensity_model, model_Y, random_state=None, second_stage=False):\n \"\"\"Generate nuissance estimator given model inputs from the class.\"\"\"\n def nuisance_estimator(Y, T, X, W, sample_weight=None, split_indices=None):\n # Expand one-hot encoding to include the zero treatment\n ohe_T = np.hstack([np.all(1 - T, axis=1, keepdims=True), T])\n # Test that T contains all treatments. If not, return None\n T = ohe_T @ np.arange(ohe_T.shape[1])\n if len(np.unique(T)) < ohe_T.shape[1]:\n return None\n # Nuissance estimates evaluated with cross-fitting\n this_random_state = check_random_state(random_state)\n if (split_indices is None) and second_stage:\n # Define 2-fold iterator\n kfold_it = StratifiedKFold(n_splits=2, shuffle=True, random_state=this_random_state).split(X, T)\n # Check if there is only one example of some class\n with warnings.catch_warnings():\n warnings.filterwarnings('error')\n try:\n split_indices = list(kfold_it)[0]\n except Warning as warn:\n msg = str(warn)\n if \"The least populated class in y has only 1 members\" in msg:\n return None\n if W is not None:\n X_tilde = np.concatenate((X, W), axis=1)\n else:\n X_tilde = X\n try:\n if not second_stage:\n # No need to crossfit for internal nodes\n propensity_model_clone = clone(propensity_model, safe=False)\n propensity_model_clone.fit(X_tilde, T)\n propensities = propensity_model_clone.predict_proba(X_tilde)\n Y_hat = _group_predict(X_tilde, ohe_T.shape[1],\n clone(model_Y, safe=False).fit(np.hstack([X_tilde, ohe_T]), Y).predict)\n else:\n propensities = _cross_fit(propensity_model, X_tilde, T, split_indices,\n sample_weight=sample_weight, predict_func_name='predict_proba')\n Y_hat = _group_cross_fit(model_Y, X_tilde, Y, ohe_T, split_indices, sample_weight=sample_weight)\n except ValueError as exc:\n raise ValueError(\"The original error: {0}\".format(str(exc)) +\n \" This might be caused by too few sample in the tree leafs.\" +\n \" Try increasing the min_leaf_size.\")\n return Y_hat, propensities\n return nuisance_estimator\n\n @staticmethod\n def parameter_estimator_func(Y, T, X,\n nuisance_estimates,\n sample_weight=None):\n \"\"\"Calculate the parameter of interest for points given by (Y, T) and corresponding nuisance estimates.\"\"\"\n # Compute partial moments\n pointwise_params = DROrthoForest._partial_moments(Y, T, nuisance_estimates)\n param_estimate = np.average(pointwise_params, weights=sample_weight, axis=0)\n # If any of the values in the parameter estimate is nan, return None\n return param_estimate\n\n @staticmethod\n def second_stage_parameter_estimator_gen(lambda_reg):\n \"\"\"\n For the second stage parameter estimation we add a local linear correction. So\n we fit a local linear function as opposed to a local constant function. We also penalize\n the linear part to reduce variance.\n \"\"\"\n def parameter_estimator_func(Y, T, X,\n nuisance_estimates,\n sample_weight,\n X_single):\n \"\"\"Calculate the parameter of interest for points given by (Y, T) and corresponding nuisance estimates.\n\n The parameter is calculated around the feature vector given by `X_single`. `X_single` can be used to do\n local corrections on a preliminary parameter estimate.\n \"\"\"\n # Compute partial moments\n pointwise_params = DROrthoForest._partial_moments(Y, T, nuisance_estimates)\n X_aug = np.hstack([np.ones((X.shape[0], 1)), X])\n # Compute coefficient by OLS on residuals\n if sample_weight is not None:\n weighted_X_aug = sample_weight.reshape(-1, 1) * X_aug\n else:\n weighted_X_aug = X_aug / X_aug.shape[0]\n # ell_2 regularization\n diagonal = np.ones(X_aug.shape[1])\n diagonal[0] = 0\n reg = lambda_reg * np.diag(diagonal)\n # Ridge regression estimate\n linear_coef_estimate = np.linalg.lstsq(np.matmul(weighted_X_aug.T, X_aug) + reg,\n np.matmul(weighted_X_aug.T, pointwise_params),\n rcond=None)[0].flatten()\n X_aug = np.append([1], X_single)\n linear_coef_estimate = linear_coef_estimate.reshape((X_aug.shape[0], -1)).T\n # Parameter returned is of shape (d_T, )\n return np.dot(linear_coef_estimate, X_aug)\n\n return parameter_estimator_func\n\n @staticmethod\n def moment_and_mean_gradient_estimator_func(Y, T, X, W,\n nuisance_estimates,\n parameter_estimate):\n \"\"\"Calculate the moments and mean gradient at points given by (Y, T, X, W).\"\"\"\n # Return moments and gradients\n # Compute partial moments\n partial_moments = DROrthoForest._partial_moments(Y, T, nuisance_estimates)\n # Compute moments\n # Moments shape is (n, d_T-1)\n moments = partial_moments - parameter_estimate\n # Compute moment gradients\n n_T = nuisance_estimates[0].shape[1] - 1\n mean_gradient = np.diag(np.ones(n_T) * (-1))\n return moments, mean_gradient\n\n @staticmethod\n def _partial_moments(Y, T, nuisance_estimates):\n Y_hat, propensities = nuisance_estimates\n partial_moments = np.zeros((len(Y), Y_hat.shape[1] - 1))\n T = T @ np.arange(1, T.shape[1] + 1)\n mask_0 = (T == 0)\n for i in range(0, Y_hat.shape[1] - 1):\n # Need to calculate this in an elegant way for when propensity is 0\n partial_moments[:, i] = Y_hat[:, i + 1] - Y_hat[:, 0]\n mask_i = (T == (i + 1))\n partial_moments[:, i][mask_i] += (Y - Y_hat[:, i + 1])[mask_i] / propensities[:, i + 1][mask_i]\n partial_moments[:, i][mask_0] -= (Y - Y_hat[:, 0])[mask_0] / propensities[:, 0][mask_0]\n return partial_moments\n\n def _check_treatment(self, T):\n try:\n # This will flatten T\n T = column_or_1d(T)\n except Exception as exc:\n raise ValueError(\"Expected array of shape ({n}, ), but got {T_shape}\".format(n=len(T), T_shape=T.shape))\n # Check that T is numeric\n try:\n T.astype(float)\n except Exception as exc:\n raise ValueError(\"Expected numeric array but got non-numeric types.\")\n return T\n\n\nclass BLBInference(Inference):\n \"\"\"\n Bootstrap-of-Little-Bags inference implementation for the OrthoForest classes.\n\n This class can only be used for inference with any estimator derived from :class:`BaseOrthoForest`.\n\n Parameters\n ----------\n estimator : :class:`BaseOrthoForest`\n Estimator to perform inference on. Must be a child class of :class:`BaseOrthoForest`.\n \"\"\"\n\n def fit(self, estimator, *args, **kwargs):\n \"\"\"\n Fits the inference model.\n\n This is called after the estimator's fit.\n \"\"\"\n self._estimator = estimator\n # Test whether the input estimator is supported\n if not hasattr(self._estimator, \"_predict\"):\n raise TypeError(\"Unsupported estimator of type {}.\".format(self._estimator.__class__.__name__) +\n \" Estimators must implement the '_predict' method with the correct signature.\")\n return self\n\n def const_marginal_effect_interval(self, X=None, *, alpha=0.1):\n \"\"\" Confidence intervals for the quantities :math:`\\\\theta(X)` produced\n by the model. Available only when ``inference`` is ``blb`` or ``auto``, when\n calling the fit method.\n\n Parameters\n ----------\n X: optional (m, d_x) matrix or None (Default=None)\n Features for each sample\n\n alpha: optional float in [0, 1] (Default=0.1)\n The overall level of confidence of the reported interval.\n The alpha/2, 1-alpha/2 confidence interval is reported.\n\n Returns\n -------\n lower, upper : tuple(type of :meth:`const_marginal_effect(X)<const_marginal_effect>` ,\\\n type of :meth:`const_marginal_effect(X)<const_marginal_effect>` )\n The lower and the upper bounds of the confidence interval for each quantity.\n \"\"\"\n X = check_array(X)\n params_and_cov = self._predict_wrapper(X)\n # Calculate confidence intervals for the parameter (marginal effect)\n lower = alpha / 2\n upper = 1 - alpha / 2\n param_lower = [param + np.apply_along_axis(lambda s: norm.ppf(lower, scale=s), 0, np.sqrt(np.diag(cov_mat)))\n for (param, cov_mat) in params_and_cov]\n param_upper = [param + np.apply_along_axis(lambda s: norm.ppf(upper, scale=s), 0, np.sqrt(np.diag(cov_mat)))\n for (param, cov_mat) in params_and_cov]\n param_lower, param_upper = np.asarray(param_lower), np.asarray(param_upper)\n return param_lower.reshape((-1,) + self._estimator._d_y + self._estimator._d_t),\\\n param_upper.reshape((-1,) + self._estimator._d_y + self._estimator._d_t)\n\n def const_marginal_effect_inference(self, X=None):\n \"\"\" Inference results for the quantities :math:`\\\\theta(X)` produced\n by the model. Available only when ``inference`` is ``blb`` or ``auto``, when\n calling the fit method.\n\n Parameters\n ----------\n X: optional (m, d_x) matrix or None (Default=None)\n Features for each sample\n\n Returns\n -------\n InferenceResults: instance of :class:`~econml.inference.NormalInferenceResults`\n The inference results instance contains prediction and prediction standard error and\n can on demand calculate confidence interval, z statistic and p value. It can also output\n a dataframe summary of these inference results.\n \"\"\"\n X = check_array(X)\n params, cov = zip(*(self._predict_wrapper(X)))\n params = np.array(params).reshape((-1,) + self._estimator._d_y + self._estimator._d_t)\n stderr = np.sqrt(np.diagonal(np.array(cov), axis1=1, axis2=2))\n stderr = stderr.reshape((-1,) + self._estimator._d_y + self._estimator._d_t)\n return NormalInferenceResults(d_t=self._estimator._d_t[0] if self._estimator._d_t else 1,\n d_y=self._estimator._d_y[0] if self._estimator._d_y else 1,\n pred=params, pred_stderr=stderr, inf_type='effect')\n\n def _effect_inference_helper(self, X, T0, T1):\n X, T0, T1 = self._estimator._expand_treatments(*check_input_arrays(X, T0, T1))\n dT = (T1 - T0) if T0.ndim == 2 else (T1 - T0).reshape(-1, 1)\n params_and_cov = self._predict_wrapper(X)\n # Calculate confidence intervals for the effect\n # Calculate the effects\n eff = np.asarray([np.dot(params_and_cov[i][0], dT[i]) for i in range(X.shape[0])])\n # Calculate the standard deviations for the effects\n scales = np.asarray([np.sqrt(dT[i] @ params_and_cov[i][1] @ dT[i]) for i in range(X.shape[0])])\n return eff.reshape((-1,) + self._estimator._d_y), scales.reshape((-1,) + self._estimator._d_y)\n\n def effect_interval(self, X=None, *, T0=0, T1=1, alpha=0.1):\n \"\"\" Confidence intervals for the quantities :math:`\\\\tau(X, T0, T1)` produced\n by the model. Available only when ``inference`` is ``blb`` or ``auto``, when\n calling the fit method.\n\n Parameters\n ----------\n X: optional (m, d_x) matrix\n Features for each sample\n T0: optional (m, d_t) matrix or vector of length m (Default=0)\n Base treatments for each sample\n T1: optional (m, d_t) matrix or vector of length m (Default=1)\n Target treatments for each sample\n alpha: optional float in [0, 1] (Default=0.1)\n The overall level of confidence of the reported interval.\n The alpha/2, 1-alpha/2 confidence interval is reported.\n\n Returns\n -------\n lower, upper : tuple(type of :meth:`effect(X, T0, T1)<effect>`, type of :meth:`effect(X, T0, T1))<effect>` )\n The lower and the upper bounds of the confidence interval for each quantity.\n \"\"\"\n eff, scales = self._effect_inference_helper(X, T0, T1)\n lower = alpha / 2\n upper = 1 - alpha / 2\n effect_lower = eff + np.apply_along_axis(lambda s: norm.ppf(lower, scale=s), 0, scales)\n effect_upper = eff + np.apply_along_axis(lambda s: norm.ppf(upper, scale=s), 0, scales)\n return effect_lower, effect_upper\n\n def effect_inference(self, X=None, *, T0=0, T1=1):\n \"\"\" Inference results for the quantities :math:`\\\\tau(X, T0, T1)` produced\n by the model. Available only when ``inference`` is ``blb`` or ``auto``, when\n calling the fit method.\n\n Parameters\n ----------\n X: optional (m, d_x) matrix\n Features for each sample\n T0: optional (m, d_t) matrix or vector of length m (Default=0)\n Base treatments for each sample\n T1: optional (m, d_t) matrix or vector of length m (Default=1)\n Target treatments for each sample\n\n Returns\n -------\n InferenceResults: instance of :class:`~econml.inference.NormalInferenceResults`\n The inference results instance contains prediction and prediction standard error and\n can on demand calculate confidence interval, z statistic and p value. It can also output\n a dataframe summary of these inference results.\n \"\"\"\n eff, scales = self._effect_inference_helper(X, T0, T1)\n return NormalInferenceResults(d_t=1, d_y=self._estimator._d_y[0] if self._estimator._d_y else 1,\n pred=eff, pred_stderr=scales, inf_type='effect')\n\n def _predict_wrapper(self, X=None):\n return self._estimator._predict(X, stderr=True)\n\n\n@deprecated(\"The ContinuousTreatmentOrthoForest class has been renamed to DMLOrthoForest; \"\n \"an upcoming release will remove support for the old name\")\nclass ContinuousTreatmentOrthoForest(DMLOrthoForest):\n pass\n\n\n@deprecated(\"The DiscreteTreatmentOrthoForest class has been renamed to DROrthoForest; \"\n \"an upcoming release will remove support for the old name\")\nclass DiscreteTreatmentOrthoForest(DROrthoForest):\n pass\n",
"# Copyright (c) Microsoft Corporation. All rights reserved.\n# Licensed under the MIT License.\n\n\"\"\"Provides a non-parametric two-stage least squares instrumental variable estimator.\"\"\"\n\nimport numpy as np\nfrom copy import deepcopy\nfrom sklearn import clone\nfrom sklearn.linear_model import LinearRegression\nfrom .utilities import shape, transpose, reshape, cross_product, ndim, size, _deprecate_positional\nfrom .cate_estimator import BaseCateEstimator, LinearCateEstimator\nfrom numpy.polynomial.hermite_e import hermeval\nfrom sklearn.base import TransformerMixin\nfrom sklearn.preprocessing import PolynomialFeatures\nfrom itertools import product\n\n\nclass HermiteFeatures(TransformerMixin):\n \"\"\"\n Featurizer that returns(unscaled) Hermite function evaluations.\n\n The evaluated functions are of degrees 0..`degree`, differentiated `shift` times.\n\n If the input has shape(n, x) and `joint` is False, the output will have shape(n, (`degree`+ 1)×x) if `shift` is 0.\n If the input has shape(n, x) and `joint` is True, the output will have shape(n, (`degree`+ 1) ^ x) if `shift` is 0.\n In either case, if `shift` is nonzero there will be `shift` additional dimensions of size x\n between the first and last.\n \"\"\"\n\n def __init__(self, degree, shift=0, joint=False):\n self._degree = degree\n self._shift = shift\n self._joint = joint\n\n def _column_feats(self, X, shift):\n \"\"\"\n Apply Hermite function evaluations of degrees 0..`degree` differentiated `shift` times.\n\n When applied to the column `X` of shape(n,), the resulting array has shape(n, (degree + 1)).\n \"\"\"\n assert ndim(X) == 1\n # this will have dimension (d,) + shape(X)\n coeffs = np.identity(self._degree + shift + 1)[:, shift:]\n feats = ((-1) ** shift) * hermeval(X, coeffs) * np.exp(-X * X / 2)\n # send the first dimension to the end\n return transpose(feats)\n\n def fit(self, X):\n \"\"\"Fits the data(a NOP for this class) and returns self.\"\"\"\n return self\n\n def transform(self, X):\n \"\"\"\n Transform the data by applying the appropriate Hermite functions.\n\n Parameters\n ----------\n X: array_like\n 2-dimensional array of input features\n\n Returns\n -------\n The transformed data\n \"\"\"\n assert ndim(X) == 2\n n = shape(X)[0]\n ncols = shape(X)[1]\n columns = []\n for indices in product(*[range(ncols) for i in range(self._shift)]):\n if self._joint:\n columns.append(cross_product(*[self._column_feats(X[:, i], indices.count(i))\n for i in range(shape(X)[1])]))\n else:\n indices = set(indices)\n if self._shift == 0: # return features for all columns:\n columns.append(np.hstack([self._column_feats(X[:, i], self._shift) for i in range(shape(X)[1])]))\n # columns are featurized independently; partial derivatives are only non-zero\n # when taken with respect to the same column each time\n elif len(indices) == 1:\n index = list(indices)[0]\n feats = self._column_feats(X[:, index], self._shift)\n columns.append(np.hstack([feats if i == index else np.zeros(shape(feats))\n for i in range(shape(X)[1])]))\n else:\n columns.append(np.zeros((n, (self._degree + 1) * ncols)))\n return reshape(np.hstack(columns), (n,) + (ncols,) * self._shift + (-1,))\n\n\nclass DPolynomialFeatures(TransformerMixin):\n \"\"\"\n Featurizer that returns the derivatives of :class:`~sklearn.preprocessing.PolynomialFeatures` features in\n a way that's compativle with the expectations of :class:`.NonparametricTwoStageLeastSquares`'s\n `dt_featurizer` parameter.\n\n If the input has shape `(n, x)` and\n :meth:`PolynomialFeatures.transform<sklearn.preprocessing.PolynomialFeatures.transform>` returns an output\n of shape `(n, f)`, then :meth:`.transform` will return an array of shape `(n, x, f)`.\n\n Parameters\n ----------\n degree: integer, default = 2\n The degree of the polynomial features.\n\n interaction_only: boolean, default = False\n If true, only derivatives of interaction features are produced: features that are products of at most degree\n distinct input features (so not `x[1] ** 2`, `x[0] * x[2] ** 3`, etc.).\n\n include_bias: boolean, default = True\n If True (default), then include the derivative of a bias column, the feature in which all polynomial powers\n are zero.\n \"\"\"\n\n def __init__(self, degree=2, interaction_only=False, include_bias=True):\n self.F = PolynomialFeatures(degree=degree, interaction_only=interaction_only, include_bias=include_bias)\n\n def fit(self, X, y=None):\n \"\"\"\n Compute number of output features.\n\n Parameters\n ----------\n X : array-like, shape (n_samples, n_features)\n The data.\n y : array, optional\n Not used\n\n Returns\n -------\n self : instance\n \"\"\"\n return self\n\n def transform(self, X):\n \"\"\"\n Transform data to derivatives of polynomial features\n\n Parameters\n ----------\n X: array-like, shape (n_samples, n_features)\n The data to transform, row by row.\n\n Returns\n -------\n XP: array-like, shape (n_samples, n_features, n_output_features)\n The matrix of features, where `n_output_features` is the number of features that\n would be returned from :class:`~sklearn.preprocessing.PolynomialFeatures`.\n \"\"\"\n self.F.fit(X)\n powers = self.F.powers_\n result = np.zeros(X.shape + (self.F.n_output_features_,))\n for i in range(X.shape[1]):\n p = powers.copy()\n c = powers[:, i]\n p[:, i] -= 1\n M = np.float_power(X[:, np.newaxis, :], p[np.newaxis, :, :])\n result[:, i, :] = c[np.newaxis, :] * np.prod(M, axis=-1)\n return result\n\n\ndef _add_ones(arr):\n \"\"\"Add a column of ones to the front of an array.\"\"\"\n return np.hstack([np.ones((shape(arr)[0], 1)), arr])\n\n\ndef _add_zeros(arr):\n \"\"\"Add a column of zeros to the front of an array.\"\"\"\n return np.hstack([np.zeros((shape(arr)[0], 1)), arr])\n\n\nclass NonparametricTwoStageLeastSquares(BaseCateEstimator):\n \"\"\"\n Non-parametric instrumental variables estimator.\n\n Supports the use of arbitrary featurizers for the features, treatments, and instruments.\n\n Parameters\n ----------\n t_featurizer: transformer\n Featurizer used to transform the treatments\n\n x_featurizer: transformer\n Featurizer used to transform the raw features\n\n z_featurizer: transformer\n Featurizer used to transform the instruments\n\n dt_featurizer: transformer\n Featurizer used to transform the treatments for the computation of the marginal effect.\n This should produce a 3-dimensional array, containing the per-treatment derivative of\n each transformed treatment. That is, given a treatment array of shape(n, dₜ),\n the output should have shape(n, dₜ, fₜ), where fₜ is the number of columns produced by `t_featurizer`.\n\n \"\"\"\n\n def __init__(self, t_featurizer, x_featurizer, z_featurizer, dt_featurizer):\n self._t_featurizer = clone(t_featurizer, safe=False)\n self._x_featurizer = clone(x_featurizer, safe=False)\n self._z_featurizer = clone(z_featurizer, safe=False)\n self._dt_featurizer = clone(dt_featurizer, safe=False)\n # don't fit intercept; manually add column of ones to the data instead;\n # this allows us to ignore the intercept when computing marginal effects\n self._model_T = LinearRegression(fit_intercept=False)\n self._model_Y = LinearRegression(fit_intercept=False)\n super().__init__()\n\n @_deprecate_positional(\"X, W, and Z should be passed by keyword only. In a future release \"\n \"we will disallow passing X, W, and Z by position.\", ['X', 'W', 'Z'])\n @BaseCateEstimator._wrap_fit\n def fit(self, Y, T, X, W, Z, *, inference=None):\n \"\"\"\n Estimate the counterfactual model from data, i.e. estimates functions τ(·, ·, ·), ∂τ(·, ·).\n\n Parameters\n ----------\n Y: (n × d_y) matrix\n Outcomes for each sample\n T: (n × dₜ) matrix\n Treatments for each sample\n X: optional(n × dₓ) matrix\n Features for each sample\n W: optional(n × d_w) matrix\n Controls for each sample\n Z: optional(n × d_z) matrix\n Instruments for each sample\n inference: string, :class:`.Inference` instance, or None\n Method for performing inference. This estimator supports 'bootstrap'\n (or an instance of :class:`.BootstrapInference`)\n\n Returns\n -------\n self\n\n \"\"\"\n if X is None:\n X = np.empty((shape(Y)[0], 0))\n if W is None:\n W = np.empty((shape(Y)[0], 0))\n assert shape(Y)[0] == shape(T)[0] == shape(X)[0] == shape(W)[0] == shape(Z)[0]\n\n # make T 2D if if was a vector\n if ndim(T) == 1:\n T = reshape(T, (-1, 1))\n\n # store number of columns of W so that we can create correctly shaped zero array in effect and marginal effect\n self._d_w = shape(W)[1]\n\n # two stage approximation\n # first, get basis expansions of T, X, and Z\n ft_X = self._x_featurizer.fit_transform(X)\n ft_Z = self._z_featurizer.fit_transform(Z)\n ft_T = self._t_featurizer.fit_transform(T)\n # TODO: is it right that the effective number of intruments is the\n # product of ft_X and ft_Z, not just ft_Z?\n assert shape(ft_T)[1] <= shape(ft_X)[1] * shape(ft_Z)[1], (\"There can be no more T features than the product \"\n \"of the number of X and Z features; otherwise \"\n \"there is not enough information to identify their \"\n \"structure\")\n\n # regress T expansion on X,Z expansions concatenated with W\n features = _add_ones(np.hstack([W, cross_product(ft_X, ft_Z)]))\n self._model_T.fit(features, ft_T)\n # predict ft_T from interacted ft_X, ft_Z\n ft_T_hat = self._model_T.predict(features)\n self._model_Y.fit(_add_ones(np.hstack([W, cross_product(ft_T_hat, ft_X)])), Y)\n\n def effect(self, X=None, T0=0, T1=1):\n \"\"\"\n Calculate the heterogeneous treatment effect τ(·,·,·).\n\n The effect is calculated between the two treatment points\n conditional on a vector of features on a set of m test samples {T0ᵢ, T1ᵢ, Xᵢ}.\n\n Parameters\n ----------\n T0: (m × dₜ) matrix or vector of length m\n Base treatments for each sample\n T1: (m × dₜ) matrix or vector of length m\n Target treatments for each sample\n X: optional (m × dₓ) matrix\n Features for each sample\n\n Returns\n -------\n τ: (m × d_y) matrix\n Heterogeneous treatment effects on each outcome for each sample\n Note that when Y is a vector rather than a 2-dimensional array, the corresponding\n singleton dimension will be collapsed (so this method will return a vector)\n\n \"\"\"\n if ndim(T0) == 0:\n T0 = np.full((1 if X is None else shape(X)[0],) + self._d_t, T0)\n if ndim(T1) == 0:\n T1 = np.full((1 if X is None else shape(X)[0],) + self._d_t, T1)\n if ndim(T0) == 1:\n T0 = reshape(T0, (-1, 1))\n if ndim(T1) == 1:\n T1 = reshape(T1, (-1, 1))\n if X is None:\n X = np.empty((shape(T0)[0], 0))\n assert shape(T0) == shape(T1)\n assert shape(T0)[0] == shape(X)[0]\n\n W = np.zeros((shape(T0)[0], self._d_w)) # can set arbitrarily since values will cancel\n ft_X = self._x_featurizer.transform(X)\n ft_T0 = self._t_featurizer.transform(T0)\n ft_T1 = self._t_featurizer.transform(T1)\n Y0 = self._model_Y.predict(_add_ones(np.hstack([W, cross_product(ft_T0, ft_X)])))\n Y1 = self._model_Y.predict(_add_ones(np.hstack([W, cross_product(ft_T1, ft_X)])))\n return Y1 - Y0\n\n def marginal_effect(self, T, X=None):\n \"\"\"\n Calculate the heterogeneous marginal effect ∂τ(·, ·).\n\n The marginal effect is calculated around a base treatment\n point conditional on a vector of features on a set of m test samples {Tᵢ, Xᵢ}.\n\n Parameters\n ----------\n T: (m × dₜ) matrix\n Base treatments for each sample\n X: optional(m × dₓ) matrix\n Features for each sample\n\n Returns\n -------\n grad_tau: (m × d_y × dₜ) array\n Heterogeneous marginal effects on each outcome for each sample\n Note that when Y or T is a vector rather than a 2-dimensional array,\n the corresponding singleton dimensions in the output will be collapsed\n (e.g. if both are vectors, then the output of this method will also be a vector)\n \"\"\"\n if X is None:\n X = np.empty((shape(T)[0], 0))\n assert shape(T)[0] == shape(X)[0]\n\n ft_X = self._x_featurizer.transform(X)\n n = shape(T)[0]\n dT = self._dt_featurizer.transform(T if ndim(T) == 2 else reshape(T, (-1, 1)))\n W = np.zeros((size(T), self._d_w))\n # dT should be an n×dₜ×fₜ array (but if T was a vector, or if there is only one feature,\n # dT may be only 2-dimensional)\n # promote dT to 3D if necessary (e.g. if T was a vector)\n if ndim(dT) < 3:\n dT = reshape(dT, (n, 1, shape(dT)[1]))\n\n # reshape ft_X and dT to allow cross product (result has shape n×dₜ×fₜ×f_x)\n features = reshape(ft_X, (n, 1, 1, -1)) * reshape(dT, shape(dT) + (1,))\n features = transpose(features, [0, 1, 3, 2]) # swap last two dims to match cross_product\n features = reshape(features, (size(T), -1))\n output = self._model_Y.predict(_add_zeros(np.hstack([W, features])))\n output = reshape(output, shape(T) + shape(output)[1:])\n if ndim(output) == 3:\n return transpose(output, (0, 2, 1)) # transpose trailing T and Y dims\n else:\n return output\n"
] |
[
[
"sklearn.linear_model.LogisticRegression",
"numpy.random.seed",
"numpy.isfinite",
"numpy.isnan",
"numpy.asarray",
"numpy.isposinf",
"sklearn.preprocessing.PolynomialFeatures",
"numpy.testing.assert_array_equal",
"numpy.isneginf",
"numpy.random.normal",
"sklearn.base.clone",
"numpy.mean",
"sklearn.linear_model.LinearRegression",
"numpy.std",
"numpy.random.binomial",
"numpy.random.uniform",
"numpy.array"
],
[
"numpy.diag",
"numpy.dot",
"scipy.stats.norm.ppf",
"numpy.sqrt",
"numpy.asarray",
"sklearn.model_selection.KFold",
"numpy.concatenate",
"sklearn.clone",
"numpy.all",
"numpy.hstack",
"numpy.unique",
"numpy.arange",
"numpy.matmul",
"sklearn.model_selection.StratifiedKFold",
"numpy.ceil",
"sklearn.utils.column_or_1d",
"numpy.zeros",
"numpy.linalg.inv",
"sklearn.model_selection.check_cv",
"numpy.append",
"numpy.array",
"sklearn.linear_model.LogisticRegression",
"sklearn.linear_model.LogisticRegressionCV",
"sklearn.utils.check_array",
"sklearn.preprocessing.OneHotEncoder",
"numpy.ones",
"sklearn.linear_model.LinearRegression",
"numpy.average",
"sklearn.utils.check_random_state",
"numpy.vstack"
],
[
"numpy.hstack",
"numpy.polynomial.hermite_e.hermeval",
"numpy.float_power",
"sklearn.preprocessing.PolynomialFeatures",
"sklearn.clone",
"numpy.identity",
"sklearn.linear_model.LinearRegression",
"numpy.prod",
"numpy.exp",
"numpy.zeros"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
satyajitghana/thetensorclan-backend-heroku
|
[
"893d4f1f8461934d31b8d6f63b7daaff0f3799d4"
] |
[
"helper_repositories/human_pose_estimation_pytorch/pose_lib/core/evaluate.py"
] |
[
"# ------------------------------------------------------------------------------\n# Copyright (c) Microsoft\n# Licensed under the MIT License.\n# Written by Bin Xiao ([email protected])\n# ------------------------------------------------------------------------------\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport numpy as np\n\nfrom pose_lib.core.inference import get_max_preds\n\n\ndef calc_dists(preds, target, normalize):\n preds = preds.astype(np.float32)\n target = target.astype(np.float32)\n dists = np.zeros((preds.shape[1], preds.shape[0]))\n for n in range(preds.shape[0]):\n for c in range(preds.shape[1]):\n if target[n, c, 0] > 1 and target[n, c, 1] > 1:\n normed_preds = preds[n, c, :] / normalize[n]\n normed_targets = target[n, c, :] / normalize[n]\n dists[c, n] = np.linalg.norm(normed_preds - normed_targets)\n else:\n dists[c, n] = -1\n return dists\n\n\ndef dist_acc(dists, thr=0.5):\n \"\"\" Return percentage below threshold while ignoring values with a -1 \"\"\"\n dist_cal = np.not_equal(dists, -1)\n num_dist_cal = dist_cal.sum()\n if num_dist_cal > 0:\n return np.less(dists[dist_cal], thr).sum() * 1.0 / num_dist_cal\n else:\n return -1\n\n\ndef accuracy(output, target, hm_type=\"gaussian\", thr=0.5):\n \"\"\"\n Calculate accuracy according to PCK,\n but uses ground truth heatmap rather than x,y locations\n First value to be returned is average accuracy across 'idxs',\n followed by individual accuracies\n \"\"\"\n idx = list(range(output.shape[1]))\n norm = 1.0\n if hm_type == \"gaussian\":\n pred, _ = get_max_preds(output)\n target, _ = get_max_preds(target)\n h = output.shape[2]\n w = output.shape[3]\n norm = np.ones((pred.shape[0], 2)) * np.array([h, w]) / 10\n dists = calc_dists(pred, target, norm)\n\n acc = np.zeros((len(idx) + 1))\n avg_acc = 0\n cnt = 0\n\n for i in range(len(idx)):\n acc[i + 1] = dist_acc(dists[idx[i]])\n if acc[i + 1] >= 0:\n avg_acc = avg_acc + acc[i + 1]\n cnt += 1\n\n avg_acc = avg_acc / cnt if cnt != 0 else 0\n if cnt != 0:\n acc[0] = avg_acc\n return acc, avg_acc, cnt, pred\n"
] |
[
[
"numpy.less",
"numpy.linalg.norm",
"numpy.ones",
"numpy.not_equal",
"numpy.array",
"numpy.zeros"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
ViDA-NYU/aws_taxi
|
[
"e08f2999987aca3b956096f95fa56759e357a7d4"
] |
[
"neighborhoods/plot_results.py"
] |
[
"import matplotlib, sys\nmatplotlib.use('Agg')\nimport matplotlib.pyplot as m_plot\n\nif __name__=='__main__':\n counts = []\n with open(sys.argv[1], 'r') as f:\n for line in f:\n name, value = line.strip().split('\\t')\n counts.append((int(value), name))\n counts.sort(reverse=True)\n counts = counts[:20]\n values, names = zip(*counts)\n yticks = map(lambda x: len(counts)-x-0.5,range(len(names)))\n fig = m_plot.figure(figsize=(7, 8))\n fig.suptitle('Trips per Neighborhood', fontsize=20)\n ax = m_plot.Axes(fig, [.3,.1,.6,.8])\n fig.add_axes(ax) \n ax.barh(yticks, values, align='center')\n m_plot.yticks(yticks, names)\n fig.savefig(sys.argv[2])\n"
] |
[
[
"matplotlib.use",
"matplotlib.pyplot.yticks",
"matplotlib.pyplot.Axes",
"matplotlib.pyplot.figure"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
DuMoH112/art-photo
|
[
"fee4e84ee4f0c7eaa6b17d17aceba1cee45c4010"
] |
[
"backend/api/correct_photo.py"
] |
[
"import os\n\nimport cv2\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom scipy.stats import itemfreq\n\n\ndef correct_photo(path, filename):\n image = cv2.imread(os.path.join(path, filename))\n\n pallete, image = change_colors_on_photo(image)\n\n image = remove_noise(image)\n\n img_pallet = create_pallete(pallete)\n\n return write_files(path, filename, img_pallet, image)\n\n\ndef remove_noise(image):\n return image\n\n\ndef change_colors_on_photo(image):\n arr = np.float32(image)\n pixels = arr.reshape((-1, 3))\n\n n_colors = 12\n max_iter = 20 # Остановка алгоритма после n колличества прохождений\n epsilon = 0.5 # Точность алгоритма\n # Флаг для указания количества раз, когда алгоритм выполняется с использованием различных начальных меток\n n_iteration = 20\n criteria = (cv2.TERM_CRITERIA_EPS +\n cv2.TERM_CRITERIA_MAX_ITER, max_iter, epsilon)\n flags = cv2.KMEANS_RANDOM_CENTERS\n ret, labels, centroids = cv2.kmeans(\n pixels, n_colors, None, criteria, n_iteration, flags)\n\n palette = np.uint8(centroids)\n quantized = palette[labels.flatten()]\n quantized = quantized.reshape(image.shape)\n\n return palette, quantized\n\n\ndef create_pallete(palette):\n # Формирование картинки с палитрой изменённой фотографии\n n_colors = len(palette)\n img_pallet = np.array([[[0, 0, 0] for i in range(\n n_colors * 50)] for j in range(n_colors * 10)])\n for i in range(n_colors * 10):\n for j in range(n_colors * 50):\n img_pallet[i][j] = palette[j // 50]\n\n return img_pallet\n\n\ndef write_files(path, filename, img_pallet, quantized):\n # Запись палитры\n filename_pallet = str(filename.split('.')[0] +\n \"_pallet.\" + filename.split('.')[1])\n cv2.imwrite(os.path.join(path, filename_pallet), img_pallet)\n\n # Запись изменённой фотографии\n filename = str(filename.split('.')[0] +\n \"_change.\" + filename.split('.')[1])\n cv2.imwrite(os.path.join(path, filename), quantized)\n\n return [filename, filename_pallet]\n"
] |
[
[
"numpy.uint8",
"numpy.float32"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
AJB363/PartIA-Flood-Warning-System
|
[
"713cbb64f272d4a598942cf89292a39a1a05c30a"
] |
[
"floodsystem/plot.py"
] |
[
"import matplotlib.pyplot as plt\nimport matplotlib.dates as d\nfrom numpy import linspace\nfrom .analysis import polyfit\n\n\ndef plot_water_levels(station, dates, levels):\n \"\"\" Plots the station water level history against time \"\"\"\n\n # Return early if data is invalid\n if len(dates) != len(levels):\n print(\"floodsystem.plot.py plot_water_levels: len(dates) != len(levels)\")\n return\n\n plt.plot(dates, levels)\n\n plt.xlabel('date')\n plt.ylabel('water level (m)')\n plt.xticks(rotation=45)\n plt.title('Station: {}'.format(station.name))\n\n plt.tight_layout()\n plt.show()\n\n\ndef plot_water_level_with_fit(station, dates, levels, p):\n \"\"\" Plots the station water level history against time, with a polynomial line of best fit of order p. \"\"\"\n\n # Return early if data is invalid\n if len(dates) != len(levels):\n print(\"floodsystem.plot.py plot_water_levels: len(dates) != len(levels)\")\n return\n\n poly, d0 = polyfit(dates, levels, p)\n x = linspace(0, d.date2num(dates[-1]) - d.date2num(d0), len(dates))\n\n plt.plot(dates, levels)\n plt.plot(dates, poly(x))\n\n plt.xlabel('date')\n plt.ylabel('water level (m)')\n plt.xticks(rotation=45)\n plt.title('Station: {}'.format(station.name))\n\n plt.tight_layout()\n plt.show()\n"
] |
[
[
"matplotlib.pyplot.tight_layout",
"matplotlib.pyplot.plot",
"matplotlib.dates.date2num",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.xticks",
"matplotlib.pyplot.show",
"matplotlib.pyplot.ylabel"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
capogluuu/ivy
|
[
"fc3159a127412f039c7024649b23c89b493b20d2"
] |
[
"ivy/functional/backends/mxnet/manipulation.py"
] |
[
"# global\nimport mxnet as mx\nimport math\nimport numpy as np\nfrom typing import Union, Tuple, Optional, List\nfrom ivy.functional.backends.mxnet import _flat_array_to_1_dim_array, _handle_flat_arrays_in_out, _handle_flat_arrays_in\n\ndef flip(x: mx.ndarray.ndarray.NDArray,\n axis: Optional[Union[int, Tuple[int], List[int]]] = None)\\\n -> mx.ndarray.ndarray.NDArray:\n num_dims = len(x.shape)\n if not num_dims:\n return x\n if axis is None:\n new_axis = list(range(num_dims))\n else:\n new_axis = axis\n if type(new_axis) is int:\n new_axis = [new_axis]\n else:\n new_axis = new_axis\n new_axis = [item + num_dims if item < 0 else item for item in new_axis]\n return mx.nd.flip(x, new_axis)\n\n\ndef expand_dims(x: mx.ndarray.ndarray.NDArray,\n axis: Optional[Union[int, Tuple[int], List[int]]] = None) \\\n -> mx.ndarray.ndarray.NDArray:\n if x.shape == ():\n return _flat_array_to_1_dim_array(x)\n return mx.nd.expand_dims(x, axis)\n\n\n\ndef stack(xs, axis=0):\n if xs[0].shape == ():\n return mx.nd.reshape(mx.nd.stack(*[_flat_array_to_1_dim_array(x) for x in xs], axis=axis), -1)\n return mx.nd.stack(*xs, axis=axis)\n\n\n# Extra #\n# ------#\n\n\ndef split(x, num_or_size_splits=None, axis=0, with_remainder=False):\n if x.shape == ():\n if num_or_size_splits is not None and num_or_size_splits != 1:\n raise Exception('input array had no shape, but num_sections specified was {}'.format(num_or_size_splits))\n return [x]\n if num_or_size_splits == 1:\n return [x]\n elif with_remainder and isinstance(num_or_size_splits, int):\n num_or_size_splits = x.shape[axis] if not num_or_size_splits else num_or_size_splits\n num_chunks = x.shape[axis] / num_or_size_splits\n num_chunks_int = math.floor(num_chunks)\n remainder_size = int((num_chunks - num_chunks_int) * num_or_size_splits)\n num_or_size_splits = [num_or_size_splits]*num_chunks_int + [remainder_size]\n if isinstance(num_or_size_splits, (list, tuple)):\n csum = [0] + np.cumsum(num_or_size_splits).tolist()\n starts = csum[:-1]\n ends = csum[1:]\n if axis < 0:\n slices = [tuple([Ellipsis, slice(s, e, 1)] + [slice(None, None, None)]*int(abs(axis)-1))\n for s, e in zip(starts, ends)]\n else:\n slices = [tuple([slice(None, None, None)]*axis + [slice(s, e, 1)])\n for s, e in zip(starts, ends)]\n return [x[so] for so in slices]\n return mx.nd.split(x, x.shape[axis] if not num_or_size_splits else num_or_size_splits, axis)\n\n\n@_handle_flat_arrays_in_out\ndef repeat(x, repeats, axis=None):\n return mx.nd.repeat(x, repeats, axis)\n\n\ndef tile(x, reps):\n if isinstance(reps, mx.nd.ndarray.NDArray):\n reps = reps.asnumpy().tolist()\n return mx.nd.tile(_flat_array_to_1_dim_array(x), reps)\n\n\n@_handle_flat_arrays_in\ndef constant_pad(x, pad_width, value=0):\n if isinstance(pad_width, mx.ndarray.ndarray.NDArray):\n pad_width = pad_width.asnumpy().tolist()\n x_shape = list(x.shape)\n num_dims = len(x_shape)\n if num_dims > 3:\n raise Exception('Invalid inputs. Pad for mxnet only supports inputs with 3 dimensions or smaller.')\n num_dims_to_add = 4 - num_dims\n new_shape = tuple([1] * num_dims_to_add + x_shape)\n mat_expanded_dims = mx.nd.reshape(x, new_shape)\n pad_width_flat = [0]*num_dims_to_add*2 + [item for sublist in pad_width for item in sublist]\n pad_expanded_dims = mx.nd.pad(mat_expanded_dims, mode=\"constant\", pad_width=tuple(pad_width_flat),\n constant_value=value)\n new_shape = [orig_dim + pad_width_item[0] + pad_width_item[1] for orig_dim, pad_width_item in zip(x_shape, pad_width)]\n res = mx.nd.reshape(pad_expanded_dims, tuple(new_shape))\n return res\n\n\ndef zero_pad(x, pad_width):\n return constant_pad(x, pad_width, 0)\n\n\n@_handle_flat_arrays_in_out\ndef clip(x, x_min, x_max):\n return mx.nd.clip(mx.nd.array(x), x_min, x_max)\n\n\nswapaxes = mx.nd.swapaxes"
] |
[
[
"numpy.cumsum"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
MartinKliemank/lettuce
|
[
"ee1b4dbfdbcf1bd87ac6b867b091a923d033403e",
"ee1b4dbfdbcf1bd87ac6b867b091a923d033403e"
] |
[
"lettuce/flows/couette.py",
"tests/test_simulation.py"
] |
[
"\"\"\"\nCouette Flow\n\"\"\"\n\nimport numpy as np\n\nfrom lettuce.unit import UnitConversion\nfrom lettuce.boundary import BounceBackBoundary, EquilibriumBoundaryPU\n\n\nclass CouetteFlow2D(object):\n def __init__(self, resolution, reynolds_number, mach_number, lattice):\n self.resolution = resolution\n self.units = UnitConversion(\n lattice,\n reynolds_number=reynolds_number, mach_number=mach_number,\n characteristic_length_lu=resolution, characteristic_length_pu=1,\n characteristic_velocity_pu=1\n )\n\n def analytic_solution(self, x, t=0):\n # TODO\n raise NotImplementedError\n\n def initial_solution(self, x):\n return np.array([0*x[0]], dtype=float), np.array([0*x[0],0*x[1]], dtype=float)\n\n @property\n def grid(self):\n x = np.linspace(0, 1, num=self.resolution, endpoint=False)\n y = np.linspace(0, 1, num=self.resolution, endpoint=False)\n return np.meshgrid(x, y, indexing='ij')\n\n @property\n def boundaries(self):\n x, y = self.grid\n return [EquilibriumBoundaryPU(np.abs(y-1) < 1e-6, self.units.lattice, self.units, np.array([1.0, 0.0])),\n BounceBackBoundary(np.abs(y) < 1e-6, self.units.lattice)]\n\n\n\n",
"\"\"\"Tests for simulation\"\"\"\n\nimport pytest\nimport numpy as np\nfrom lettuce import (\n Simulation, TaylorGreenVortex2D, TaylorGreenVortex3D, Lattice,\n D2Q9, D3Q27, BGKCollision, StandardStreaming, ErrorReporter,\n DecayingTurbulence\n)\nimport torch\n\n\n# Note: Simulation is also implicitly tested in test_flows\n\n\ndef test_save_and_load(dtype_device, tmpdir):\n dtype, device = dtype_device\n lattice = Lattice(D2Q9, device, dtype)\n flow = TaylorGreenVortex2D(resolution=16, reynolds_number=10, mach_number=0.05, lattice=lattice)\n collision = BGKCollision(lattice, tau=flow.units.relaxation_parameter_lu)\n streaming = StandardStreaming(lattice)\n simulation = Simulation(flow=flow, lattice=lattice, collision=collision, streaming=streaming)\n simulation.step(10)\n simulation.save_checkpoint(tmpdir/\"checkpoint.pic\")\n simulation2 = Simulation(flow=flow, lattice=lattice, collision=collision, streaming=streaming)\n simulation2.load_checkpoint(tmpdir/\"checkpoint.pic\")\n assert lattice.convert_to_numpy(simulation2.f) == pytest.approx(lattice.convert_to_numpy(simulation.f))\n\n\[email protected](\"use_jacobi\", [True, False])\ndef test_initialization(dtype_device, use_jacobi):\n dtype, device = dtype_device\n lattice = Lattice(D2Q9, device, dtype)\n flow = TaylorGreenVortex2D(resolution=24, reynolds_number=10, mach_number=0.05, lattice=lattice)\n collision = BGKCollision(lattice, tau=flow.units.relaxation_parameter_lu)\n streaming = StandardStreaming(lattice)\n simulation = Simulation(flow=flow, lattice=lattice, collision=collision, streaming=streaming)\n # set initial pressure to 0 everywhere\n p, u = flow.initial_solution(flow.grid)\n u0 = lattice.convert_to_tensor(flow.units.convert_velocity_to_lu(u))\n rho0 = lattice.convert_to_tensor(np.ones_like(u0[0,...].cpu()))\n simulation.f = lattice.equilibrium(rho0, u0)\n if use_jacobi:\n simulation.initialize_pressure(1000, 1e-6)\n num_iterations = 0\n else:\n num_iterations = simulation.initialize(500, 1e-3)\n piter = lattice.convert_to_numpy(flow.units.convert_density_lu_to_pressure_pu(lattice.rho(simulation.f)))\n # assert that pressure is converged up to 0.05 (max p\n assert piter == pytest.approx(p, rel=0.0, abs=5e-2)\n assert num_iterations < 500\n\n\[email protected](\"Case\", [TaylorGreenVortex2D, TaylorGreenVortex3D, DecayingTurbulence])\ndef test_initialize_fneq(Case, dtype_device):\n dtype, device = dtype_device\n lattice = Lattice(D2Q9, device, dtype)\n if \"3D\" in Case.__name__:\n lattice = Lattice(D3Q27, dtype=dtype, device=device)\n flow = Case(resolution=40, reynolds_number=1000, mach_number=0.1, lattice=lattice)\n collision = BGKCollision(lattice, tau=flow.units.relaxation_parameter_lu)\n streaming = StandardStreaming(lattice)\n simulation_neq = Simulation(flow=flow, lattice=lattice, collision=collision, streaming=streaming)\n\n pre_rho = lattice.rho(simulation_neq.f)\n pre_u = lattice.u(simulation_neq.f)\n pre_ke = lattice.incompressible_energy(simulation_neq.f)\n\n simulation_neq.initialize_f_neq()\n\n post_rho = lattice.rho(simulation_neq.f)\n post_u = lattice.u(simulation_neq.f)\n post_ke = lattice.incompressible_energy(simulation_neq.f)\n tol = 1e-6\n assert torch.allclose(pre_rho, post_rho, rtol=0.0, atol=tol)\n assert torch.allclose(pre_u, post_u, rtol=0.0, atol=tol)\n assert torch.allclose(pre_ke, post_ke, rtol=0.0, atol=tol)\n\n if Case is TaylorGreenVortex2D:\n error_reporter_neq = ErrorReporter(lattice, flow, interval=1, out=None)\n error_reporter_eq = ErrorReporter(lattice, flow, interval=1, out=None)\n simulation_eq = Simulation(flow=flow, lattice=lattice, collision=collision, streaming=streaming)\n simulation_neq.reporters.append(error_reporter_neq)\n simulation_eq.reporters.append(error_reporter_eq)\n\n simulation_neq.step(10)\n simulation_eq.step(10)\n error_u, error_p = np.mean(np.abs(error_reporter_neq.out), axis=0).tolist()\n error_u_eq, error_p_eq = np.mean(np.abs(error_reporter_eq.out), axis=0).tolist()\n\n assert(error_u < error_u_eq)\n"
] |
[
[
"numpy.array",
"numpy.meshgrid",
"numpy.abs",
"numpy.linspace"
],
[
"torch.allclose",
"numpy.abs"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
jijyisme/ner-cp-cu
|
[
"6d13247de54e55cbf3ec6856ff3b8bebf7a9d8f2"
] |
[
"ner.py"
] |
[
"import csv\nimport gc\nimport glob\nimport json\nimport os\nimport shutil\nimport sys\nimport warnings\nfrom collections import Counter\n\nfrom multiprocessing import Process, Queue\nfrom pprint import pprint\n\n# Prevent Keras info message; \"Using TensorFlow backend.\"\nSTDERR = sys.stderr\nsys.stderr = open(os.devnull, \"w\")\nsys.stderr = STDERR\n\nimport numpy as np\nimport pandas as pd\n\nfrom sklearn.exceptions import UndefinedMetricWarning\nimport sklearn.metrics\n\nfrom NER import constant\nfrom NER.model import load_model, save_model, Model\nfrom NER.metric import custom_metric\nfrom NER.callback import CustomCallback\n\nfrom keras_contrib.layers import CRF\nfrom types import SimpleNamespace\n\nclass NamedEntityRecognizer(object):\n\n def __init__(self, model_path=None, new_model=False):\n\n self.new_model = new_model\n self.model_path = model_path\n self.model_architecture = Model()\n self.model = None\n\n if not self.new_model:\n if model_path is not None:\n self.model = load_model(model_path)\n else:\n self.model = load_model(constant.DEFAULT_MODEL_PATH)\n\n def evaluate(self, x_true, y_true):\n if self.new_model:\n print(\"model is not trained\")\n return\n\n model = self.model\n\n pred = model.predict(x_true)\n print('pred shape is ', pred.shape)\n amax_pred = np.argmax(pred, axis=2)\n amax_true = np.argmax(y_true, axis=2)\n pred_flat = amax_pred.flatten()\n true_flat = amax_true.flatten()\n\n scores = custom_metric(true_flat, pred_flat)\n\n for score in scores:\n print(score,\": \",scores[score])\n return scores\n\n\n def train(self, x_true, y_true, train_name, model_path=None, num_step=60, valid_split=0.1,\n initial_epoch=None, epochs=100, batch_size=32, learning_rate=0.001,\n shuffle=False, model= None):\n \"\"\"Train model\"\"\"\n if(train_name==''):\n train_name = model.model_name\n\n # Create new model or load model\n if self.new_model:\n if model == None:\n initial_epoch = 0\n model = self.model_architecture.model\n else:\n if not model_path:\n raise Exception(\"Model path is not defined.\")\n\n if initial_epoch is None:\n raise Exception(\"Initial epoch is not defined.\")\n\n model = load_model(model_path)\n\n # Display model summary before train\n model.summary()\n\n callbacks = CustomCallback(train_name).callbacks\n self.model = model\n\n # Train model\n model.fit(x_true, y_true, validation_split=valid_split,\n initial_epoch=initial_epoch, epochs=epochs,\n batch_size=batch_size, shuffle=shuffle ,callbacks=callbacks)\n\n self.new_model = False\n\n def save(self, path, name):\n # Save model architecture to file\n with open(os.path.join(path, name+\".json\"), \"w\") as file:\n file.write(self.model.to_json())\n\n # Save model config to file\n with open(os.path.join(path, name+\"_config.txt\"), \"w\") as file:\n pprint(self.model.get_config(), stream=file)\n\n self.model.save(os.path.join(path,name+'.hdf5'))\n\n def predict(self, x_vector):\n if self.new_model:\n print(\"model is not trained\")\n return\n model = self.model\n print('make prediction')\n per = model.predict(x_vector)\n print('flatten data')\n amax = np.argmax(per, axis=2)\n predict_y = amax.flatten()\n x_flat = x_vector.flatten()\n print('return')\n return dict({\n 'x': x_flat,\n 'ner_tag': predict_y\n })"
] |
[
[
"numpy.argmax"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
bagxi/cocoapi
|
[
"ef301155c4e8951792ec23bd356d9657feb1a292"
] |
[
"pycocotools/cocoeval.py"
] |
[
"from collections import defaultdict, OrderedDict\nimport copy\nimport datetime\nimport logging\nimport time\n\nimport numpy as np\n\nfrom . import mask as maskUtils\n\nlogger = logging.getLogger(__name__)\n\n\nclass COCOeval:\n # Interface for evaluating detection on the Microsoft COCO dataset.\n #\n # The usage for CocoEval is as follows:\n # cocoGt=..., cocoDt=... # load dataset and results\n # E = CocoEval(cocoGt,cocoDt); # initialize CocoEval object\n # E.params.recThrs = ...; # set parameters as desired\n # E.evaluate(); # run per image evaluation\n # E.accumulate(); # accumulate per image results\n # E.summarize(); # display summary metrics of results\n # For example usage see evalDemo.m and http://mscoco.org/.\n #\n # The evaluation parameters are as follows (defaults in brackets):\n # imgIds - [all] N img ids to use for evaluation\n # catIds - [all] K cat ids to use for evaluation\n # iouThrs - [.5:.05:.95] T=10 IoU thresholds for evaluation\n # recThrs - [0:.01:1] R=101 recall thresholds for evaluation\n # areaRng - [...] A=4 object area ranges for evaluation\n # maxDets - [1 10 100] M=3 thresholds on max detections per image\n # iouType - ['segm'] set iouType to 'segm', 'bbox' or 'keypoints'\n # iouType replaced the now DEPRECATED useSegm parameter.\n # useCats - [1] if true use category labels for evaluation\n # Note: if useCats=0 category labels are ignored as in proposal scoring.\n # Note: multiple areaRngs [Ax2] and maxDets [Mx1] can be specified.\n #\n # evaluate(): evaluates detections on every image and every category and\n # concats the results into the \"evalImgs\" with fields:\n # dtIds - [1xD] id for each of the D detections (dt)\n # gtIds - [1xG] id for each of the G ground truths (gt)\n # dtMatches - [TxD] matching gt id at each IoU or 0\n # gtMatches - [TxG] matching dt id at each IoU or 0\n # dtScores - [1xD] confidence of each dt\n # gtIgnore - [1xG] ignore flag for each gt\n # dtIgnore - [TxD] ignore flag for each dt at each IoU\n #\n # accumulate(): accumulates the per-image, per-category evaluation\n # results in \"evalImgs\" into the dictionary \"eval\" with fields:\n # params - parameters used for evaluation\n # date - date evaluation was performed\n # counts - [T,R,K,A,M] parameter dimensions (see above)\n # precision - [TxRxKxAxM] precision for every evaluation setting\n # recall - [TxKxAxM] max recall for every evaluation setting\n # Note: precision and recall==-1 for settings with no gt objects.\n #\n # See also coco, mask, pycocoDemo, pycocoEvalDemo\n #\n # Microsoft COCO Toolbox. version 2.0\n # Data, paper, and tutorials available at: http://mscoco.org/\n # Code written by Piotr Dollar and Tsung-Yi Lin, 2015.\n # Licensed under the Simplified BSD License [see coco/license.txt]\n def __init__(self, cocoGt=None, cocoDt=None, iouType='segm'):\n '''\n Initialize CocoEval using coco APIs for gt and dt\n :param cocoGt: coco object with ground truth annotations\n :param cocoDt: coco object with detection results\n :return: None\n '''\n if not iouType:\n logger.info('iouType not specified. use default iouType segm')\n self.cocoGt = cocoGt # ground truth COCO API\n self.cocoDt = cocoDt # detections COCO API\n self.evalImgs = defaultdict(list) # per-image per-category evaluation results [KxAxI] elements\n self.eval = {} # accumulated evaluation results\n self._gts = defaultdict(list) # gt for evaluation\n self._dts = defaultdict(list) # dt for evaluation\n self.params = Params(iouType=iouType) # parameters\n self._paramsEval = {} # parameters for evaluation\n self.stats = {} # result summarization\n self.ious = {} # ious between all gts and dts\n if not cocoGt is None:\n self.params.imgIds = sorted(cocoGt.getImgIds())\n self.params.catIds = sorted(cocoGt.getCatIds())\n\n\n def _prepare(self):\n '''\n Prepare ._gts and ._dts for evaluation based on params\n :return: None\n '''\n def _toMask(anns, coco):\n # modify ann['segmentation'] by reference\n for ann in anns:\n rle = coco.annToRLE(ann)\n ann['segmentation'] = rle\n p = self.params\n if p.useCats:\n gts=self.cocoGt.loadAnns(self.cocoGt.getAnnIds(imgIds=p.imgIds, catIds=p.catIds))\n dts=self.cocoDt.loadAnns(self.cocoDt.getAnnIds(imgIds=p.imgIds, catIds=p.catIds))\n else:\n gts=self.cocoGt.loadAnns(self.cocoGt.getAnnIds(imgIds=p.imgIds))\n dts=self.cocoDt.loadAnns(self.cocoDt.getAnnIds(imgIds=p.imgIds))\n\n # convert ground truth to mask if iouType == 'segm'\n if p.iouType == 'segm':\n _toMask(gts, self.cocoGt)\n _toMask(dts, self.cocoDt)\n # set ignore flag\n for gt in gts:\n gt['ignore'] = gt['ignore'] if 'ignore' in gt else 0\n gt['ignore'] = 'iscrowd' in gt and gt['iscrowd']\n if p.iouType == 'keypoints':\n gt['ignore'] = (gt['num_keypoints'] == 0) or gt['ignore']\n self._gts = defaultdict(list) # gt for evaluation\n self._dts = defaultdict(list) # dt for evaluation\n for gt in gts:\n self._gts[gt['image_id'], gt['category_id']].append(gt)\n for dt in dts:\n self._dts[dt['image_id'], dt['category_id']].append(dt)\n self.evalImgs = defaultdict(list) # per-image per-category evaluation results\n self.eval = {} # accumulated evaluation results\n\n def evaluate(self):\n '''\n Run per image evaluation on given images and store results (a list of dict) in self.evalImgs\n :return: None\n '''\n tic = time.time()\n logger.info('Running per image evaluation...')\n p = self.params\n # add backward compatibility if useSegm is specified in params\n if p.useSegm is not None:\n p.iouType = 'segm' if p.useSegm == 1 else 'bbox'\n logger.warning('useSegm (deprecated) is not None. Running {} evaluation'.format(p.iouType))\n logger.info('Evaluate annotation type *{}*'.format(p.iouType))\n p.imgIds = list(np.unique(p.imgIds))\n if p.useCats:\n p.catIds = list(np.unique(p.catIds))\n p.maxDets = sorted(p.maxDets)\n self.params=p\n\n self._prepare()\n # loop through images, area range, max detection number\n catIds = p.catIds if p.useCats else [-1]\n\n if p.iouType == 'segm' or p.iouType == 'bbox':\n computeIoU = self.computeIoU\n elif p.iouType == 'keypoints':\n computeIoU = self.computeOks\n self.ious = {(imgId, catId): computeIoU(imgId, catId) \\\n for imgId in p.imgIds\n for catId in catIds}\n\n evaluateImg = self.evaluateImg\n maxDet = p.maxDets[-1]\n self.evalImgs = [evaluateImg(imgId, catId, areaRng, maxDet)\n for catId in catIds\n for areaRng in p.areaRng\n for imgId in p.imgIds\n ]\n self._paramsEval = copy.deepcopy(self.params)\n toc = time.time()\n logger.info('DONE (t={:0.2f}s).'.format(toc-tic))\n\n def computeIoU(self, imgId, catId):\n p = self.params\n if p.useCats:\n gt = self._gts[imgId,catId]\n dt = self._dts[imgId,catId]\n else:\n gt = [_ for cId in p.catIds for _ in self._gts[imgId,cId]]\n dt = [_ for cId in p.catIds for _ in self._dts[imgId,cId]]\n if len(gt) == 0 and len(dt) ==0:\n return []\n inds = np.argsort([-d['score'] for d in dt], kind='mergesort')\n dt = [dt[i] for i in inds]\n if len(dt) > p.maxDets[-1]:\n dt=dt[0:p.maxDets[-1]]\n\n if p.iouType == 'segm':\n g = [g['segmentation'] for g in gt]\n d = [d['segmentation'] for d in dt]\n elif p.iouType == 'bbox':\n g = [g['bbox'] for g in gt]\n d = [d['bbox'] for d in dt]\n else:\n raise Exception('unknown iouType for iou computation')\n\n # compute iou between each dt and gt region\n iscrowd = [int(o['iscrowd']) for o in gt]\n ious = maskUtils.iou(d,g,iscrowd)\n return ious\n\n def computeOks(self, imgId, catId):\n p = self.params\n # dimention here should be Nxm\n gts = self._gts[imgId, catId]\n dts = self._dts[imgId, catId]\n inds = np.argsort([-d['score'] for d in dts], kind='mergesort')\n dts = [dts[i] for i in inds]\n if len(dts) > p.maxDets[-1]:\n dts = dts[0:p.maxDets[-1]]\n # if len(gts) == 0 and len(dts) == 0:\n if len(gts) == 0 or len(dts) == 0:\n return []\n ious = np.zeros((len(dts), len(gts)))\n sigmas = p.kpt_oks_sigmas\n vars = (sigmas * 2)**2\n k = len(sigmas)\n # compute oks between each detection and ground truth object\n for j, gt in enumerate(gts):\n # create bounds for ignore regions(double the gt bbox)\n g = np.array(gt['keypoints'])\n xg = g[0::3]; yg = g[1::3]; vg = g[2::3]\n k1 = np.count_nonzero(vg > 0)\n bb = gt['bbox']\n x0 = bb[0] - bb[2]; x1 = bb[0] + bb[2] * 2\n y0 = bb[1] - bb[3]; y1 = bb[1] + bb[3] * 2\n for i, dt in enumerate(dts):\n d = np.array(dt['keypoints'])\n xd = d[0::3]; yd = d[1::3]\n if k1>0:\n # measure the per-keypoint distance if keypoints visible\n dx = xd - xg\n dy = yd - yg\n else:\n # measure minimum distance to keypoints in (x0,y0) & (x1,y1)\n z = np.zeros((k))\n dx = np.max((z, x0-xd),axis=0)+np.max((z, xd-x1),axis=0)\n dy = np.max((z, y0-yd),axis=0)+np.max((z, yd-y1),axis=0)\n e = (dx**2 + dy**2) / vars / (gt['area']+np.spacing(1)) / 2\n if k1 > 0:\n e=e[vg > 0]\n ious[i, j] = np.sum(np.exp(-e)) / e.shape[0]\n return ious\n\n def evaluateImg(self, imgId, catId, aRng, maxDet):\n '''\n perform evaluation for single category and image\n :return: dict (single image results)\n '''\n p = self.params\n if p.useCats:\n gt = self._gts[imgId,catId]\n dt = self._dts[imgId,catId]\n else:\n gt = [_ for cId in p.catIds for _ in self._gts[imgId,cId]]\n dt = [_ for cId in p.catIds for _ in self._dts[imgId,cId]]\n if len(gt) == 0 and len(dt) ==0:\n return None\n\n for g in gt:\n if g['ignore'] or (g['area']<aRng[0] or g['area']>aRng[1]):\n g['_ignore'] = 1\n else:\n g['_ignore'] = 0\n\n # sort dt highest score first, sort gt ignore last\n gtind = np.argsort([g['_ignore'] for g in gt], kind='mergesort')\n gt = [gt[i] for i in gtind]\n dtind = np.argsort([-d['score'] for d in dt], kind='mergesort')\n dt = [dt[i] for i in dtind[0:maxDet]]\n iscrowd = [int(o['iscrowd']) for o in gt]\n # load computed ious\n ious = self.ious[imgId, catId][:, gtind] if len(self.ious[imgId, catId]) > 0 else self.ious[imgId, catId]\n\n T = len(p.iouThrs)\n G = len(gt)\n D = len(dt)\n gtm = np.zeros((T,G))\n dtm = np.zeros((T,D))\n gtIg = np.array([g['_ignore'] for g in gt])\n dtIg = np.zeros((T,D))\n if not len(ious)==0:\n for tind, t in enumerate(p.iouThrs):\n for dind, d in enumerate(dt):\n # information about best match so far (m=-1 -> unmatched)\n iou = min([t,1-1e-10])\n m = -1\n for gind, g in enumerate(gt):\n # if this gt already matched, and not a crowd, continue\n if gtm[tind,gind]>0 and not iscrowd[gind]:\n continue\n # if dt matched to reg gt, and on ignore gt, stop\n if m>-1 and gtIg[m]==0 and gtIg[gind]==1:\n break\n # continue to next gt unless better match made\n if ious[dind,gind] < iou:\n continue\n # if match successful and best so far, store appropriately\n iou=ious[dind,gind]\n m=gind\n # if match made store id of match for both dt and gt\n if m ==-1:\n continue\n dtIg[tind,dind] = gtIg[m]\n dtm[tind,dind] = gt[m]['id']\n gtm[tind,m] = d['id']\n # set unmatched detections outside of area range to ignore\n a = np.array([d['area']<aRng[0] or d['area']>aRng[1] for d in dt]).reshape((1, len(dt)))\n dtIg = np.logical_or(dtIg, np.logical_and(dtm==0, np.repeat(a,T,0)))\n # store results for given image and category\n return {\n 'image_id': imgId,\n 'category_id': catId,\n 'aRng': aRng,\n 'maxDet': maxDet,\n 'dtIds': [d['id'] for d in dt],\n 'gtIds': [g['id'] for g in gt],\n 'dtMatches': dtm,\n 'gtMatches': gtm,\n 'dtScores': [d['score'] for d in dt],\n 'gtIgnore': gtIg,\n 'dtIgnore': dtIg,\n }\n\n def accumulate(self, p = None):\n '''\n Accumulate per image evaluation results and store the result in self.eval\n :param p: input params for evaluation\n :return: None\n '''\n logger.info('Accumulating evaluation results...')\n tic = time.time()\n if not self.evalImgs:\n logger.warning('Please run evaluate() first')\n # allows input customized parameters\n if p is None:\n p = self.params\n p.catIds = p.catIds if p.useCats == 1 else [-1]\n T = len(p.iouThrs)\n R = len(p.recThrs)\n K = len(p.catIds) if p.useCats else 1\n A = len(p.areaRng)\n M = len(p.maxDets)\n precision = -np.ones((T,R,K,A,M)) # -1 for the precision of absent categories\n recall = -np.ones((T,K,A,M))\n scores = -np.ones((T,R,K,A,M))\n\n # create dictionary for future indexing\n _pe = self._paramsEval\n catIds = _pe.catIds if _pe.useCats else [-1]\n setK = set(catIds)\n setA = set(map(tuple, _pe.areaRng))\n setM = set(_pe.maxDets)\n setI = set(_pe.imgIds)\n # get inds to evaluate\n k_list = [n for n, k in enumerate(p.catIds) if k in setK]\n m_list = [m for n, m in enumerate(p.maxDets) if m in setM]\n a_list = [n for n, a in enumerate(map(lambda x: tuple(x), p.areaRng)) if a in setA]\n i_list = [n for n, i in enumerate(p.imgIds) if i in setI]\n I0 = len(_pe.imgIds)\n A0 = len(_pe.areaRng)\n # retrieve E at each category, area range, and max number of detections\n for k, k0 in enumerate(k_list):\n Nk = k0*A0*I0\n for a, a0 in enumerate(a_list):\n Na = a0*I0\n for m, maxDet in enumerate(m_list):\n E = [self.evalImgs[Nk + Na + i] for i in i_list]\n E = [e for e in E if not e is None]\n if len(E) == 0:\n continue\n dtScores = np.concatenate([e['dtScores'][0:maxDet] for e in E])\n\n # different sorting method generates slightly different results.\n # mergesort is used to be consistent as Matlab implementation.\n inds = np.argsort(-dtScores, kind='mergesort')\n dtScoresSorted = dtScores[inds]\n\n dtm = np.concatenate([e['dtMatches'][:,0:maxDet] for e in E], axis=1)[:,inds]\n dtIg = np.concatenate([e['dtIgnore'][:,0:maxDet] for e in E], axis=1)[:,inds]\n gtIg = np.concatenate([e['gtIgnore'] for e in E])\n npig = np.count_nonzero(gtIg==0 )\n if npig == 0:\n continue\n tps = np.logical_and( dtm, np.logical_not(dtIg) )\n fps = np.logical_and(np.logical_not(dtm), np.logical_not(dtIg) )\n\n tp_sum = np.cumsum(tps, axis=1).astype(dtype=np.float)\n fp_sum = np.cumsum(fps, axis=1).astype(dtype=np.float)\n for t, (tp, fp) in enumerate(zip(tp_sum, fp_sum)):\n tp = np.array(tp)\n fp = np.array(fp)\n nd = len(tp)\n rc = tp / npig\n pr = tp / (fp+tp+np.spacing(1))\n q = np.zeros((R,))\n ss = np.zeros((R,))\n\n if nd:\n recall[t,k,a,m] = rc[-1]\n else:\n recall[t,k,a,m] = 0\n\n # numpy is slow without cython optimization for accessing elements\n # use python array gets significant speed improvement\n pr = pr.tolist(); q = q.tolist()\n\n for i in range(nd-1, 0, -1):\n if pr[i] > pr[i-1]:\n pr[i-1] = pr[i]\n\n inds = np.searchsorted(rc, p.recThrs, side='left')\n try:\n for ri, pi in enumerate(inds):\n q[ri] = pr[pi]\n ss[ri] = dtScoresSorted[pi]\n except:\n pass\n precision[t,:,k,a,m] = np.array(q)\n scores[t,:,k,a,m] = np.array(ss)\n self.eval = {\n 'params': p,\n 'counts': [T, R, K, A, M],\n 'date': datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S'),\n 'precision': precision,\n 'recall': recall,\n 'scores': scores,\n }\n toc = time.time()\n logger.info('DONE (t={:0.2f}s).'.format( toc-tic))\n\n def summarize(self):\n '''\n Compute and display summary metrics for evaluation results.\n Note this functin can *only* be applied on the default parameter setting\n '''\n def _get_metric_key(ap=1, iouThr=None, areaRng='all', maxDets=100):\n p = self.params\n template = '{metric}@{thr}/{area}(max_dets={dets})'\n str_ = template.format(\n metric=('mAP' if ap == 1 else 'mAR'),\n thr=(\n '{:0.2f}:{:0.2f}'.format(p.iouThrs[0], p.iouThrs[-1])\n if iouThr is None\n else '{:0.2f}'.format(iouThr)\n ),\n area=areaRng,\n dets=maxDets,\n )\n return str_\n def _summarize(ap=1, iouThr=None, areaRng='all', maxDets=100):\n p = self.params\n iStr = ' {:<18} {} @[ IoU={:<9} | area={:>6s} | maxDets={:>3d} ] = {:0.3f}'\n titleStr = 'Average Precision' if ap == 1 else 'Average Recall'\n typeStr = '(AP)' if ap==1 else '(AR)'\n iouStr = '{:0.2f}:{:0.2f}'.format(p.iouThrs[0], p.iouThrs[-1]) \\\n if iouThr is None else '{:0.2f}'.format(iouThr)\n\n aind = [i for i, aRng in enumerate(p.areaRngLbl) if aRng == areaRng]\n mind = [i for i, mDet in enumerate(p.maxDets) if mDet == maxDets]\n if ap == 1:\n # dimension of precision: [TxRxKxAxM]\n s = self.eval['precision']\n # IoU\n if iouThr is not None:\n t = np.where(iouThr == p.iouThrs)[0]\n s = s[t]\n s = s[:,:,:,aind,mind]\n else:\n # dimension of recall: [TxKxAxM]\n s = self.eval['recall']\n if iouThr is not None:\n t = np.where(iouThr == p.iouThrs)[0]\n s = s[t]\n s = s[:,:,aind,mind]\n if len(s[s>-1])==0:\n mean_s = -1\n else:\n mean_s = np.mean(s[s>-1])\n logger.info(iStr.format(titleStr, typeStr, iouStr, areaRng, maxDets, mean_s))\n key = _get_metric_key(ap, iouThr, areaRng, maxDets)\n return key, mean_s\n def _summarizeDets():\n params = [\n dict(ap=1),\n dict(ap=1, iouThr=0.5, maxDets=self.params.maxDets[2]),\n dict(ap=1, iouThr=0.75, maxDets=self.params.maxDets[2]),\n dict(ap=1, areaRng='small', maxDets=self.params.maxDets[2]),\n dict(ap=1, areaRng='medium', maxDets=self.params.maxDets[2]),\n dict(ap=1, areaRng='large', maxDets=self.params.maxDets[2]),\n dict(ap=0, maxDets=self.params.maxDets[0]),\n dict(ap=0, maxDets=self.params.maxDets[1]),\n dict(ap=0, maxDets=self.params.maxDets[2]),\n dict(ap=0, areaRng='small', maxDets=self.params.maxDets[2]),\n dict(ap=0, areaRng='medium', maxDets=self.params.maxDets[2]),\n dict(ap=0, areaRng='large', maxDets=self.params.maxDets[2]),\n ]\n\n stats = OrderedDict()\n for kwargs in params:\n key, value = _summarize(**kwargs)\n stats[key] = value\n\n return stats\n def _summarizeKps():\n params = [\n dict(ap=1, maxDets=20),\n dict(ap=1, maxDets=20, iouThr=0.5),\n dict(ap=1, maxDets=20, iouThr=0.75),\n dict(ap=1, maxDets=20, areaRng='medium'),\n dict(ap=1, maxDets=20, areaRng='large'),\n dict(ap=0, maxDets=20),\n dict(ap=0, maxDets=20, iouThr=.5),\n dict(ap=0, maxDets=20, iouThr=.75),\n dict(ap=0, maxDets=20, areaRng='medium'),\n dict(ap=0, maxDets=20, areaRng='large'),\n ]\n\n stats = OrderedDict()\n for kwargs in params:\n key, value = _summarize(**kwargs)\n stats[key] = value\n\n return stats\n if not self.eval:\n raise NotImplementedError('Please run accumulate() first')\n iouType = self.params.iouType\n if iouType == 'segm' or iouType == 'bbox':\n summarize = _summarizeDets\n elif iouType == 'keypoints':\n summarize = _summarizeKps\n self.stats = summarize()\n return self.stats\n\n def __str__(self):\n self.summarize()\n\nclass Params:\n '''\n Params for coco evaluation api\n '''\n def setDetParams(self):\n self.imgIds = []\n self.catIds = []\n # np.arange causes trouble. the data point on arange is slightly larger than the true value\n self.iouThrs = np.linspace(.5, 0.95, int(np.round((0.95 - .5) / .05)) + 1, endpoint=True)\n self.recThrs = np.linspace(.0, 1.00, int(np.round((1.00 - .0) / .01)) + 1, endpoint=True)\n self.maxDets = [1, 10, 100]\n self.areaRng = [[0 ** 2, 1e5 ** 2], [0 ** 2, 32 ** 2], [32 ** 2, 96 ** 2], [96 ** 2, 1e5 ** 2]]\n self.areaRngLbl = ['all', 'small', 'medium', 'large']\n self.useCats = 1\n\n def setKpParams(self):\n self.imgIds = []\n self.catIds = []\n # np.arange causes trouble. the data point on arange is slightly larger than the true value\n self.iouThrs = np.linspace(.5, 0.95, int(np.round((0.95 - .5) / .05)) + 1, endpoint=True)\n self.recThrs = np.linspace(.0, 1.00, int(np.round((1.00 - .0) / .01)) + 1, endpoint=True)\n self.maxDets = [20]\n self.areaRng = [[0 ** 2, 1e5 ** 2], [32 ** 2, 96 ** 2], [96 ** 2, 1e5 ** 2]]\n self.areaRngLbl = ['all', 'medium', 'large']\n self.useCats = 1\n self.kpt_oks_sigmas = np.array([.26, .25, .25, .35, .35, .79, .79, .72, .72, .62,.62, 1.07, 1.07, .87, .87, .89, .89])/10.0\n\n def __init__(self, iouType='segm'):\n if iouType == 'segm' or iouType == 'bbox':\n self.setDetParams()\n elif iouType == 'keypoints':\n self.setKpParams()\n else:\n raise Exception('iouType not supported')\n self.iouType = iouType\n # useSegm is deprecated\n self.useSegm = None\n"
] |
[
[
"numpy.logical_not",
"numpy.spacing",
"numpy.unique",
"numpy.cumsum",
"numpy.ones",
"numpy.concatenate",
"numpy.round",
"numpy.max",
"numpy.mean",
"numpy.count_nonzero",
"numpy.searchsorted",
"numpy.exp",
"numpy.argsort",
"numpy.repeat",
"numpy.array",
"numpy.zeros",
"numpy.where"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
dhairyashah1/Eklavya20-CatchPracticeBot
|
[
"f0e625768aa49cd43df9fec379c8d7919be784b9"
] |
[
"PycharmProjects/OpenCV/Dhairya_OpenCV/6_VideoFilters.py"
] |
[
"import numpy as np\nimport cv2\n#red detection\nvid = cv2.VideoCapture(0) #Webcam=0\nwhile True:\n _, frame = vid.read() #_ is used for those returned things whch are of no use\n # HSV = HUE, SATURATION, VALUE\n hsv = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV)\n lower_red = np.array([0,70,50]) #provides a range\n upper_red = np.array([50,255,255]) # whatever pixels are in this range diaplay their color e\n #else they turn black\n mask = cv2.inRange(hsv,lower_red,upper_red) #creating a range mask is in black n white\n #white in mask AND pixels in frame ==pixels in frame ....rest are black\n result = cv2.bitwise_and(frame,frame,mask = mask)\n cv2.imshow('frame',frame)\n cv2.imshow('hsv',hsv)\n cv2.imshow('maskrange',mask)\n cv2.imshow('result', result)\n if cv2.waitKey(5) & 0xFF == 27:\n break\nvid.release()\ncv2.destroyAllWindows()"
] |
[
[
"numpy.array"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
mussard/QCElemental
|
[
"4ae662b02cd099a5bbadf7e2c03ef2a39b8eee1a"
] |
[
"qcelemental/tests/test_molparse_from_schema.py"
] |
[
"import copy\n\nimport numpy as np\nimport pytest\nimport qcelemental as qcel\nfrom qcelemental.testing import compare_molrecs\n\n_schema_prov_stamp = {'creator': 'QCElemental', 'version': '1.0', 'routine': 'qcelemental.molparse.from_schema'}\n\n\[email protected](\"inp,expected\", [\n ({\n 'frag_pattern': [[0], [1]],\n 'geom': [0., 0., 0., 1., 0., 0.],\n 'elbl': ['O', 'H']\n }, {\n 'fragment_separators': np.array([1]),\n 'geom': np.array([0., 0., 0., 1., 0., 0.]),\n 'elbl': np.array(['O', 'H'])\n }),\n ({\n 'frag_pattern': [[2, 0], [1]],\n 'geom': np.array([[0., 0., 1.], [0., 0., 2.], [0., 0., 0.]]),\n 'elem': np.array(['Li', 'H', 'He'])\n }, {\n 'fragment_separators': np.array([2]),\n 'geom': np.array([0., 0., 0., 0., 0., 1., 0., 0., 2.]),\n 'elem': np.array(['He', 'Li', 'H'])\n }),\n ({\n 'frag_pattern': [[2, 0], [1]],\n 'elez': [3, 1, 2]\n }, {\n 'fragment_separators': np.array([2]),\n 'elez': np.array([2, 3, 1])\n }),\n])\ndef test_contiguize_from_fragment_pattern(inp, expected):\n ans = qcel.molparse.contiguize_from_fragment_pattern(**inp)\n\n # compare_molrecs instead of compare_dicts handles some fragment_separators types issues\n assert compare_molrecs(expected, ans, atol=1.e-6)\n\n\[email protected](\"inp,expected\", [\n ({\n 'frag_pattern': [[2, 0], [1, 3]],\n 'geom': np.array([[0., 0., 1.], [0., 0., 2.], [0., 0., 0.]]),\n 'elem': np.array(['Li', 'H', 'He'])\n }, 'dropped atoms'),\n ({\n 'frag_pattern': [[2, 0], [1, 4]]\n }, 'Fragmentation pattern skips atoms'),\n ({\n 'frag_pattern': [[2, 0], [1, 3]],\n 'elem': np.array(['U', 'Li', 'H', 'He']),\n 'elbl': np.array(['Li', 'H', 'He'])\n }, 'wrong number of atoms in array'),\n ({\n 'frag_pattern': [[2, 0], [1]],\n 'elez': [3, 1, 2],\n 'throw_reorder': True\n }, 'reorder atoms to accommodate non-contiguous fragments'),\n])\ndef test_contiguize_from_fragment_pattern_error(inp, expected):\n with pytest.raises(qcel.ValidationError) as e:\n qcel.molparse.contiguize_from_fragment_pattern(**inp)\n\n assert expected in str(e)\n\n\nschema14_1 = {\n \"geometry\": [0.0, 0.0, -5.0, 0.0, 0.0, 5.0],\n \"symbols\": [\"He\", \"He\"],\n 'fragments': [[0], [1]],\n 'fragment_charges': [0.0, 0.0],\n 'fragment_multiplicities': [3, 1],\n 'masses': [4.00260325413, 4.00260325413],\n 'name': 'He2',\n 'fix_com': False,\n 'fix_orientation': False,\n 'molecular_charge': 0.0,\n \"molecular_multiplicity\": 3,\n \"real\": [True, False]\n}\n\nschema14_psi4_np = {\n \"geom\": np.array([0.0, 0.0, -5.0, 0.0, 0.0, 5.0]),\n \"elem\": np.array([\"He\", \"He\"]),\n 'elea': np.array([4, 4]),\n 'elez': np.array([2, 2]),\n 'fragment_charges': [0.0, 0.0],\n 'fragment_multiplicities': [3, 1],\n 'mass': np.array([4.00260325413, 4.00260325413]),\n 'name': 'He2',\n 'fix_com': False,\n 'fix_orientation': False,\n 'molecular_charge': 0.0,\n \"molecular_multiplicity\": 3,\n 'units': 'Bohr',\n 'fragment_separators': [1],\n 'elbl': np.array(['', '']),\n \"real\": np.array([True, False]),\n \"provenance\": _schema_prov_stamp,\n}\n\n\ndef test_from_schema_1_14e():\n schema = {\"schema_name\": \"qc_schema\", \"schema_version\": 1, \"molecule\": copy.deepcopy(schema14_1)}\n\n ans = qcel.molparse.from_schema(schema)\n assert compare_molrecs(schema14_psi4_np, ans, 4)\n\n\ndef test_from_schema_1p5_14e():\n # this is the oddball case where passing code has internally a dtype=2 molecule\n # but it's still passing the outer data structure\n schema = {\"schema_name\": \"qc_schema\", \"schema_version\": 1, \"molecule\": copy.deepcopy(schema14_1)}\n schema['molecule'].update({\"schema_name\": \"qcschema_molecule\", \"schema_version\": 2})\n\n ans = qcel.molparse.from_schema(schema)\n assert compare_molrecs(schema14_psi4_np, ans, 4)\n\n\ndef test_from_schema_2_14e():\n schema = copy.deepcopy(schema14_1)\n schema.update({\"schema_name\": \"qcschema_molecule\", \"schema_version\": 2})\n\n ans = qcel.molparse.from_schema(schema)\n assert compare_molrecs(schema14_psi4_np, ans, 4)\n\n\ndef test_from_schema_error_f():\n schema = {\"schema_name\": \"private_schema\", \"schema_version\": 1, \"molecule\": copy.deepcopy(schema14_1)}\n\n with pytest.raises(qcel.ValidationError) as e:\n qcel.molparse.from_schema(schema)\n\n assert 'Schema not recognized' in str(e)\n\n\ndef test_from_schema_1_nfr_error_14g():\n schema = {\"schema_name\": \"qc_schema\", \"schema_version\": 1, \"molecule\": copy.deepcopy(schema14_1)}\n schema['molecule'].pop('fragments')\n\n with pytest.raises(qcel.ValidationError) as e:\n ans = qcel.molparse.from_schema(schema)\n\n assert 'Dimension mismatch among fragment quantities: sep + 1 (1), chg (2), and mult(2)' in str(e)\n"
] |
[
[
"numpy.array"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
1155107756/pgdrive
|
[
"708375c8cb6c5ed1950c20d91bd0aec5fd1b1e62",
"708375c8cb6c5ed1950c20d91bd0aec5fd1b1e62",
"708375c8cb6c5ed1950c20d91bd0aec5fd1b1e62"
] |
[
"pgdrive/utils/pg_space.py",
"pgdrive/utils/scene_utils.py",
"pgdrive/tests/test_env/test_change_friction_density_envs.py"
] |
[
"import logging\nimport typing as tp\nfrom collections import namedtuple, OrderedDict\n\nimport numpy as np\n\nfrom pgdrive.utils import get_np_random\n\nPGBoxSpace = namedtuple(\"PGBoxSpace\", \"max min\")\nPGDiscreteSpace = namedtuple(\"PGDiscreteSpace\", \"number\")\nPGConstantSpace = namedtuple(\"PGConstantSpace\", \"value\")\n\"\"\"\nThis filed is mostly copied from gym==0.17.2\nWe use the gym.spaces as helpers, but it may cause problem if user using some old version of gym.\n\"\"\"\n\n\nclass Space:\n \"\"\"\n Copied from gym: gym/spaces/space.py\n\n Defines the observation and action spaces, so you can write generic\n code that applies to any Env. For example, you can choose a random\n action.\n \"\"\"\n def __init__(self, shape=None, dtype=None):\n import numpy as np # takes about 300-400ms to import, so we load lazily\n self.shape = None if shape is None else tuple(shape)\n self.dtype = None if dtype is None else np.dtype(dtype)\n self.np_random = None\n self.seed()\n\n def sample(self):\n \"\"\"Randomly sample an element of this space. Can be\n uniform or non-uniform sampling based on boundedness of space.\"\"\"\n raise NotImplementedError\n\n def seed(self, seed=None):\n \"\"\"Seed the PRNG of this space. \"\"\"\n self.np_random, seed = get_np_random(seed, return_seed=True)\n return [seed]\n\n def contains(self, x):\n \"\"\"\n Return boolean specifying if x is a valid\n member of this space\n \"\"\"\n raise NotImplementedError\n\n def __contains__(self, x):\n return self.contains(x)\n\n def to_jsonable(self, sample_n):\n \"\"\"Convert a batch of samples from this space to a JSONable data type.\"\"\"\n # By default, assume identity is JSONable\n return sample_n\n\n def from_jsonable(self, sample_n):\n \"\"\"Convert a JSONable data type to a batch of samples from this space.\"\"\"\n # By default, assume identity is JSONable\n return sample_n\n\n\nclass Dict(Space):\n \"\"\"\n Copied from gym: gym/spaces/dcit.py\n\n A dictionary of simpler spaces.\n\n Example usage:\n self.observation_space = spaces.Dict({\"position\": spaces.Discrete(2), \"velocity\": spaces.Discrete(3)})\n\n Example usage [nested]:\n self.nested_observation_space = spaces.Dict({\n 'sensors': spaces.Dict({\n 'position': spaces.Box(low=-100, high=100, shape=(3,)),\n 'velocity': spaces.Box(low=-1, high=1, shape=(3,)),\n 'front_cam': spaces.Tuple((\n spaces.Box(low=0, high=1, shape=(10, 10, 3)),\n spaces.Box(low=0, high=1, shape=(10, 10, 3))\n )),\n 'rear_cam': spaces.Box(low=0, high=1, shape=(10, 10, 3)),\n }),\n 'ext_controller': spaces.MultiDiscrete((5, 2, 2)),\n 'inner_state':spaces.Dict({\n 'charge': spaces.Discrete(100),\n 'system_checks': spaces.MultiBinary(10),\n 'job_status': spaces.Dict({\n 'task': spaces.Discrete(5),\n 'progress': spaces.Box(low=0, high=100, shape=()),\n })\n })\n })\n \"\"\"\n def __init__(self, spaces=None, **spaces_kwargs):\n assert (spaces is None) or (not spaces_kwargs), 'Use either Dict(spaces=dict(...)) or Dict(foo=x, bar=z)'\n if spaces is None:\n spaces = spaces_kwargs\n if isinstance(spaces, dict) and not isinstance(spaces, OrderedDict):\n spaces = OrderedDict(sorted(list(spaces.items())))\n if isinstance(spaces, list):\n spaces = OrderedDict(spaces)\n self.spaces = spaces\n for space in spaces.values():\n assert isinstance(space, Space), 'Values of the dict should be instances of gym.Space'\n super(Dict, self).__init__(None, None) # None for shape and dtype, since it'll require special handling\n\n def seed(self, seed=None):\n [space.seed(seed) for space in self.spaces.values()]\n\n def sample(self):\n return OrderedDict([(k, space.sample()) for k, space in self.spaces.items()])\n\n def contains(self, x):\n if not isinstance(x, dict) or len(x) != len(self.spaces):\n return False\n for k, space in self.spaces.items():\n if k not in x:\n return False\n if not space.contains(x[k]):\n return False\n return True\n\n def __getitem__(self, key):\n return self.spaces[key]\n\n def __repr__(self):\n return \"Dict(\" + \", \".join([str(k) + \":\" + str(s) for k, s in self.spaces.items()]) + \")\"\n\n def to_jsonable(self, sample_n):\n # serialize as dict-repr of vectors\n return {key: space.to_jsonable([sample[key] for sample in sample_n]) \\\n for key, space in self.spaces.items()}\n\n def from_jsonable(self, sample_n):\n dict_of_list = {}\n for key, space in self.spaces.items():\n dict_of_list[key] = space.from_jsonable(sample_n[key])\n ret = []\n for i, _ in enumerate(dict_of_list[key]):\n entry = {}\n for key, value in dict_of_list.items():\n entry[key] = value[i]\n ret.append(entry)\n return ret\n\n def __eq__(self, other):\n return isinstance(other, Dict) and self.spaces == other.spaces\n\n\nclass PGSpace(Dict):\n \"\"\"\n length = PGSpace(name=\"length\",max=50.0,min=10.0)\n Usage:\n PGSpace({\"lane_length\":length})\n \"\"\"\n def __init__(self, our_config: tp.Dict[str, tp.Union[PGBoxSpace, PGDiscreteSpace, PGConstantSpace]]):\n super(PGSpace, self).__init__(PGSpace.wrap2gym_space(our_config))\n self.parameters = set(our_config.keys())\n\n @staticmethod\n def wrap2gym_space(our_config):\n ret = dict()\n for key, value in our_config.items():\n if isinstance(value, PGBoxSpace):\n ret[key] = Box(low=value.min, high=value.max, shape=(1, ))\n elif isinstance(value, PGDiscreteSpace):\n ret[key] = Discrete(value.number)\n elif isinstance(value, PGConstantSpace):\n ret[key] = Box(low=value.value, high=value.value, shape=(1, ))\n else:\n raise ValueError(\"{} can not be wrapped in gym space\".format(key))\n return ret\n\n\nclass Parameter:\n \"\"\"\n Block parameters and vehicle parameters\n \"\"\"\n # block\n length = \"length\"\n radius = \"radius\"\n angle = \"angle\"\n goal = \"goal\"\n dir = \"dir\"\n radius_inner = \"inner_radius\" # only for roundabout use\n radius_exit = \"exit_radius\"\n t_intersection_type = \"t_type\"\n lane_num = \"lane_num\"\n change_lane_num = \"change_lane_num\"\n decrease_increase = \"decrease_increase\"\n\n # vehicle\n vehicle_length = \"v_len\"\n vehicle_width = \"v_width\"\n vehicle_height = \"v_height\"\n front_tire_longitude = \"f_tire_long\"\n rear_tire_longitude = \"r_tire_long\"\n tire_lateral = \"tire_lateral\"\n tire_axis_height = \"tire_axis_height\"\n tire_radius = \"tire_radius\"\n mass = \"mass\"\n chassis_height = \"chassis_height\"\n heading = \"heading\"\n steering_max = \"steering_max\"\n engine_force_max = \"e_f_max\"\n brake_force_max = \"b_f_max\"\n speed_max = \"s_max\"\n\n # vehicle visualization\n vehicle_vis_z = \"vis_z\"\n vehicle_vis_y = \"vis_y\"\n vehicle_vis_h = \"vis_h\"\n vehicle_vis_scale = \"vis_scale\"\n\n\nclass VehicleParameterSpace:\n BASE_VEHICLE = {\n # Now the parameter sample is not available and thus the value space is incorrect\n Parameter.vehicle_length: PGConstantSpace(4.0),\n Parameter.vehicle_width: PGConstantSpace(1.5),\n Parameter.vehicle_height: PGConstantSpace(1),\n Parameter.chassis_height: PGConstantSpace(0.3),\n Parameter.front_tire_longitude: PGConstantSpace(1.05),\n Parameter.rear_tire_longitude: PGConstantSpace(1.17),\n Parameter.tire_lateral: PGConstantSpace(0.8),\n Parameter.tire_radius: PGConstantSpace(0.25),\n Parameter.mass: PGConstantSpace(800.0),\n Parameter.heading: PGConstantSpace(0.0),\n\n # visualization\n Parameter.vehicle_vis_h: PGConstantSpace(180),\n Parameter.vehicle_vis_y: PGConstantSpace(0.1),\n Parameter.vehicle_vis_z: PGConstantSpace(-0.31),\n Parameter.vehicle_vis_scale: PGConstantSpace(0.013),\n\n # TODO the following parameters will be opened soon using PGBoxSPace\n Parameter.steering_max: PGConstantSpace(40.0),\n Parameter.engine_force_max: PGConstantSpace(500.0),\n Parameter.brake_force_max: PGConstantSpace(40.0),\n Parameter.speed_max: PGConstantSpace(120),\n }\n\n\nclass BlockParameterSpace:\n \"\"\"\n Make sure the range of curve parameters covers the parameter space of other blocks,\n otherwise, an error may happen in navigation info normalization\n \"\"\"\n STRAIGHT = {Parameter.length: PGBoxSpace(min=40.0, max=80.0)}\n CURVE = {\n Parameter.length: PGBoxSpace(min=40.0, max=80.0),\n Parameter.radius: PGBoxSpace(min=25.0, max=60.0),\n Parameter.angle: PGBoxSpace(min=45, max=135),\n Parameter.dir: PGDiscreteSpace(2)\n }\n INTERSECTION = {\n Parameter.radius: PGConstantSpace(10),\n Parameter.change_lane_num: PGDiscreteSpace(number=2), # 0, 1\n Parameter.decrease_increase: PGDiscreteSpace(number=2) # 0, decrease, 1 increase\n }\n ROUNDABOUT = {\n Parameter.radius_exit: PGBoxSpace(min=5, max=15), # TODO Should we reduce this?\n Parameter.radius_inner: PGBoxSpace(min=15, max=45), # TODO Should we reduce this?\n Parameter.angle: PGConstantSpace(60)\n }\n T_INTERSECTION = {\n Parameter.radius: PGConstantSpace(10),\n Parameter.t_intersection_type: PGDiscreteSpace(number=3), # 3 different t type for previous socket\n Parameter.change_lane_num: PGDiscreteSpace(2), # 0,1\n Parameter.decrease_increase: PGDiscreteSpace(2) # 0, decrease, 1 increase\n }\n RAMP_PARAMETER = {\n Parameter.length: PGBoxSpace(min=20, max=40) # accelerate/decelerate part length\n }\n FORK_PARAMETER = {\n Parameter.length: PGBoxSpace(min=20, max=40), # accelerate/decelerate part length\n Parameter.lane_num: PGDiscreteSpace(2)\n }\n BOTTLENECK_PARAMETER = {\n Parameter.length: PGBoxSpace(min=20, max=50), # the length of straigh part\n Parameter.lane_num: PGDiscreteSpace(2), # the lane num increased or descreased now fix to 1\n }\n\n\nclass Discrete(Space):\n r\"\"\"\n Copied from gym: gym/spaces/discrete.py\n\n A discrete space in :math:`\\{ 0, 1, \\\\dots, n-1 \\}`.\n\n Example::\n\n >>> Discrete(2)\n\n \"\"\"\n def __init__(self, n):\n assert n >= 0\n self.n = n\n super(Discrete, self).__init__((), np.int64)\n\n def sample(self):\n return self.np_random.randint(self.n)\n\n def contains(self, x):\n if isinstance(x, int):\n as_int = x\n elif isinstance(x, (np.generic, np.ndarray)) and (x.dtype.char in np.typecodes['AllInteger'] and x.shape == ()):\n as_int = int(x)\n else:\n return False\n return as_int >= 0 and as_int < self.n\n\n def __repr__(self):\n return \"Discrete(%d)\" % self.n\n\n def __eq__(self, other):\n return isinstance(other, Discrete) and self.n == other.n\n\n\nclass Box(Space):\n \"\"\"\n Copied from gym: gym/spaces/box.py\n\n A (possibly unbounded) box in R^n. Specifically, a Box represents the\n Cartesian product of n closed intervals. Each interval has the form of one\n of [a, b], (-oo, b], [a, oo), or (-oo, oo).\n\n There are two common use cases:\n\n * Identical bound for each dimension::\n >>> Box(low=-1.0, high=2.0, shape=(3, 4), dtype=np.float32)\n Box(3, 4)\n\n * Independent bound for each dimension::\n >>> Box(low=np.array([-1.0, -2.0]), high=np.array([2.0, 4.0]), dtype=np.float32)\n Box(2,)\n\n \"\"\"\n def __init__(self, low, high, shape=None, dtype=np.float32):\n assert dtype is not None, 'dtype must be explicitly provided. '\n self.dtype = np.dtype(dtype)\n\n # determine shape if it isn't provided directly\n if shape is not None:\n shape = tuple(shape)\n assert np.isscalar(low) or low.shape == shape, \"low.shape doesn't match provided shape\"\n assert np.isscalar(high) or high.shape == shape, \"high.shape doesn't match provided shape\"\n elif not np.isscalar(low):\n shape = low.shape\n assert np.isscalar(high) or high.shape == shape, \"high.shape doesn't match low.shape\"\n elif not np.isscalar(high):\n shape = high.shape\n assert np.isscalar(low) or low.shape == shape, \"low.shape doesn't match high.shape\"\n else:\n raise ValueError(\"shape must be provided or inferred from the shapes of low or high\")\n\n if np.isscalar(low):\n low = np.full(shape, low, dtype=dtype)\n\n if np.isscalar(high):\n high = np.full(shape, high, dtype=dtype)\n\n self.shape = shape\n self.low = low\n self.high = high\n\n def _get_precision(dtype):\n if np.issubdtype(dtype, np.floating):\n return np.finfo(dtype).precision\n else:\n return np.inf\n\n low_precision = _get_precision(self.low.dtype)\n high_precision = _get_precision(self.high.dtype)\n dtype_precision = _get_precision(self.dtype)\n if min(low_precision, high_precision) > dtype_precision:\n logging.warning(\"Box bound precision lowered by casting to {}\".format(self.dtype))\n self.low = self.low.astype(self.dtype)\n self.high = self.high.astype(self.dtype)\n\n # Boolean arrays which indicate the interval type for each coordinate\n self.bounded_below = -np.inf < self.low\n self.bounded_above = np.inf > self.high\n\n super(Box, self).__init__(self.shape, self.dtype)\n\n def is_bounded(self, manner=\"both\"):\n below = np.all(self.bounded_below)\n above = np.all(self.bounded_above)\n if manner == \"both\":\n return below and above\n elif manner == \"below\":\n return below\n elif manner == \"above\":\n return above\n else:\n raise ValueError(\"manner is not in {'below', 'above', 'both'}\")\n\n def sample(self):\n \"\"\"\n Generates a single random sample inside of the Box.\n\n In creating a sample of the box, each coordinate is sampled according to\n the form of the interval:\n\n * [a, b] : uniform distribution\n * [a, oo) : shifted exponential distribution\n * (-oo, b] : shifted negative exponential distribution\n * (-oo, oo) : normal distribution\n \"\"\"\n high = self.high if self.dtype.kind == 'f' \\\n else self.high.astype('int64') + 1\n sample = np.empty(self.shape)\n\n # Masking arrays which classify the coordinates according to interval\n # type\n unbounded = ~self.bounded_below & ~self.bounded_above\n upp_bounded = ~self.bounded_below & self.bounded_above\n low_bounded = self.bounded_below & ~self.bounded_above\n bounded = self.bounded_below & self.bounded_above\n\n # Vectorized sampling by interval type\n sample[unbounded] = self.np_random.normal(size=unbounded[unbounded].shape)\n\n sample[low_bounded] = self.np_random.exponential(size=low_bounded[low_bounded].shape) + self.low[low_bounded]\n\n sample[upp_bounded] = -self.np_random.exponential(size=upp_bounded[upp_bounded].shape) + self.high[upp_bounded]\n\n sample[bounded] = self.np_random.uniform(low=self.low[bounded], high=high[bounded], size=bounded[bounded].shape)\n if self.dtype.kind == 'i':\n sample = np.floor(sample)\n\n return sample.astype(self.dtype)\n\n def contains(self, x):\n if isinstance(x, list):\n x = np.array(x) # Promote list to array for contains check\n return x.shape == self.shape and np.all(x >= self.low) and np.all(x <= self.high)\n\n def to_jsonable(self, sample_n):\n return np.array(sample_n).tolist()\n\n def from_jsonable(self, sample_n):\n return [np.asarray(sample) for sample in sample_n]\n\n def __repr__(self):\n return \"Box\" + str(self.shape)\n\n def __eq__(self, other):\n return isinstance(other, Box) and \\\n (self.shape == other.shape) and \\\n np.allclose(self.low, other.low) and \\\n np.allclose(self.high, other.high)\n\n\nif __name__ == \"__main__\":\n \"\"\"\n Test\n \"\"\"\n config = {\n \"length\": PGBoxSpace(min=10.0, max=80.0),\n \"angle\": PGBoxSpace(min=50.0, max=360.0),\n \"goal\": PGDiscreteSpace(number=3)\n }\n config = PGSpace(config)\n print(config.sample())\n config.seed(1)\n print(config.sample())\n print(config.sample())\n config.seed(1)\n print(*config.sample()[\"length\"])\n",
"import math\nfrom typing import List, TYPE_CHECKING, Tuple\n\nimport numpy as np\n\nfrom pgdrive.constants import Decoration, BodyName\nfrom pgdrive.scene_creator.lane.abs_lane import AbstractLane\nfrom pgdrive.scene_creator.lane.circular_lane import CircularLane\nfrom pgdrive.utils.coordinates_shift import panda_position\nfrom pgdrive.utils.math_utils import get_points_bounding_box\nfrom pgdrive.world.pg_world import PGWorld\n\nif TYPE_CHECKING:\n from pgdrive.scene_creator.blocks.block import BlockSocket\n from pgdrive.scene_creator.road.road import Road\n from pgdrive.scene_creator.road.road_network import RoadNetwork\n\n\ndef get_lanes_on_road(road: \"Road\", roadnet: \"RoadNetwork\") -> List[\"AbstractLane\"]:\n return roadnet.graph[road.start_node][road.end_node]\n\n\ndef block_socket_merge(\n socket_1: \"BlockSocket\", socket_2: \"BlockSocket\", global_network: \"RoadNetwork\", positive_merge: False\n):\n global_network.graph[socket_1.positive_road.start_node][socket_2.negative_road.start_node] = \\\n global_network.graph[socket_1.positive_road.start_node].pop(socket_1.positive_road.end_node)\n\n global_network.graph[socket_2.positive_road.start_node][socket_1.negative_road.start_node] = \\\n global_network.graph[socket_2.positive_road.start_node].pop(socket_2.positive_road.end_node)\n\n\ndef check_lane_on_road(road_network: \"RoadNetwork\", lane, positive: float = 0, ignored=None) -> bool:\n \"\"\"\n Calculate if the new lane intersects with other lanes in current road network\n The return Value is True when cross\n Note: the decoration road will be ignored in default\n \"\"\"\n graph = road_network.graph\n for _from, to_dict in graph.items():\n for _to, lanes in to_dict.items():\n if ignored and (_from, _to) == ignored:\n continue\n if (_from, _to) == (Decoration.start, Decoration.end):\n continue\n if len(lanes) == 0:\n continue\n x_max_1, x_min_1, y_max_1, y_min_1 = get_road_bounding_box(lanes)\n x_max_2, x_min_2, y_max_2, y_min_2 = get_road_bounding_box([lane])\n if x_min_1 > x_max_2 or x_min_2 > x_max_1 or y_min_1 > y_max_2 or y_min_2 > y_max_1:\n continue\n for _id, l in enumerate(lanes):\n for i in range(1, int(lane.length), 1):\n sample_point = lane.position(i, positive * lane.width_at(i) / 2.0)\n longitudinal, lateral = l.local_coordinates(sample_point)\n is_on = math.fabs(lateral) <= l.width_at(longitudinal) / 2.0 and 0 <= longitudinal <= l.length\n if is_on:\n return True\n return False\n\n\ndef get_road_bounding_box(lanes, extra_lateral=3) -> Tuple:\n \"\"\"\n Return (x_max, x_min, y_max, y_min) as bounding box of this road\n :param lanes: Lanes in this road\n :param extra_lateral: extra width in lateral direction, usually sidewalk width\n :return: x_max, x_min, y_max, y_min\n \"\"\"\n line_points = get_curve_contour(lanes, extra_lateral) if isinstance(lanes[0], CircularLane) \\\n else get_straight_contour(lanes, extra_lateral)\n return get_points_bounding_box(line_points)\n\n\ndef get_straight_contour(lanes, extra_lateral) -> List:\n \"\"\"\n Get several points as bounding box of this road\n :param lanes: lanes contained in road\n :param extra_lateral: extra length in lateral direction, usually sidewalk\n :return: points\n :param lanes:\n :return:\n \"\"\"\n ret = []\n for lane, dir in [(lanes[0], -1), (lanes[-1], 1)]:\n ret.append(lane.position(0.1, dir * (lane.width / 2.0 + extra_lateral)))\n ret.append(lane.position(lane.length - 0.1, dir * (lane.width / 2.0 + extra_lateral)))\n return ret\n\n\ndef get_curve_contour(lanes, extra_lateral) -> List:\n \"\"\"\n Get several points as bounding box of this road\n :param lanes: lanes contained in road\n :param extra_lateral: extra length in lateral direction, usually sidewalk\n :return: points\n \"\"\"\n points = []\n for lane, lateral_dir in [(lanes[0], -1), (lanes[-1], 1)]:\n pi_2 = (np.pi / 2.0)\n points += [\n lane.position(0.1, lateral_dir * (lane.width / 2.0 + extra_lateral)),\n lane.position(lane.length - 0.1, lateral_dir * (lane.width / 2.0 + extra_lateral))\n ]\n start_phase = (lane.start_phase // pi_2) * pi_2\n start_phase += pi_2 if lane.direction == 1 else 0\n for phi_index in range(4):\n phi = start_phase + phi_index * pi_2 * lane.direction\n if lane.direction * phi > lane.direction * lane.end_phase:\n break\n point = lane.center + (lane.radius - lateral_dir * (lane.width / 2.0 + extra_lateral) *\n lane.direction) * np.array([math.cos(phi), math.sin(phi)])\n points.append(point)\n return points\n\n\ndef get_all_lanes(roadnet: \"RoadNetwork\"):\n graph = roadnet.graph\n res = []\n for from_, to_dict in graph.items():\n for _to, lanes in to_dict.items():\n for l in lanes:\n res.append(l)\n return res\n\n\ndef ray_localization(position: np.ndarray, pg_world: PGWorld) -> Tuple:\n \"\"\"\n Get the index of the lane closest to a physx_world position.\n Only used when smoething is on lane ! Otherwise fall back to use get_closest_lane()\n :param position: a physx_world position [m].\n :param pg_world: PGWorld class\n :return: the index of the closest lane.\n \"\"\"\n results = pg_world.physics_world.static_world.rayTestAll(\n panda_position(position, 1.0), panda_position(position, -1.0)\n )\n lane_index_dist = []\n if results.hasHits():\n for res in results.getHits():\n if res.getNode().getName() == BodyName.Lane:\n lane = res.getNode().getPythonTag(BodyName.Lane)\n lane_index_dist.append((lane.info, lane.index, lane.info.distance(position)))\n if len(lane_index_dist) > 0:\n ret_index = np.argmin([d for _, _, d in lane_index_dist])\n lane, index, dist = lane_index_dist[ret_index]\n else:\n lane, index, dist = None, None, None\n return lane, index\n",
"import numpy as np\nimport pytest\n\nfrom pgdrive.envs.generation_envs.change_density_env import ChangeDensityEnv\nfrom pgdrive.envs.generation_envs.change_friction_env import ChangeFrictionEnv\n\n\ndef _run(env):\n try:\n env.seed(100000)\n for _ in range(5):\n obs = env.reset()\n for s in range(100):\n action = np.array([0.0, 1.0])\n o, r, d, i = env.step(action)\n if d:\n env.reset()\n finally:\n env.close()\n\n\ndef test_change_friction():\n _run(ChangeFrictionEnv(config={\"environment_num\": 100, \"start_seed\": 1000, \"change_friction\": True}))\n _run(ChangeFrictionEnv(config={\"environment_num\": 100, \"start_seed\": 1000, \"change_friction\": False}))\n\n\n# SidePassEnv is tested in test_object_collision.py!\n# def test_side_pass_env():\n# _run(SidePassEnv({\"target_vehicle_configs\": {\"default_agent\": {\"show_navi_mark\": False}}}))\n\n\ndef test_change_density_env():\n _run(ChangeDensityEnv(config={\"change_density\": False}))\n _run(ChangeDensityEnv(config={\"change_density\": True}))\n\n\nif __name__ == '__main__':\n pytest.main([\"-sv\", \"test_change_friction_density_envs.py\"])\n # test_side_pass_env()\n # test_change_friction()\n"
] |
[
[
"numpy.allclose",
"numpy.asarray",
"numpy.issubdtype",
"numpy.dtype",
"numpy.full",
"numpy.all",
"numpy.finfo",
"numpy.isscalar",
"numpy.floor",
"numpy.array",
"numpy.empty"
],
[
"numpy.argmin"
],
[
"numpy.array"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
willfrey/vision
|
[
"56fb0bf5796ac374d4e353032e418236cd73c554",
"56fb0bf5796ac374d4e353032e418236cd73c554",
"56fb0bf5796ac374d4e353032e418236cd73c554"
] |
[
"test/test_prototype_builtin_datasets.py",
"torchvision/models/quantization/inception.py",
"torchvision/ops/stochastic_depth.py"
] |
[
"import functools\nimport io\nimport pickle\nfrom pathlib import Path\n\nimport pytest\nimport torch\nfrom builtin_dataset_mocks import parametrize_dataset_mocks, DATASET_MOCKS\nfrom torch.testing._comparison import assert_equal, TensorLikePair, ObjectPair\nfrom torch.utils.data.datapipes.iter.grouping import ShardingFilterIterDataPipe as ShardingFilter\nfrom torch.utils.data.graph import traverse\nfrom torchdata.datapipes.iter import IterDataPipe, Shuffler\nfrom torchvision._utils import sequence_to_str\nfrom torchvision.prototype import transforms, datasets\n\n\nassert_samples_equal = functools.partial(\n assert_equal, pair_types=(TensorLikePair, ObjectPair), rtol=0, atol=0, equal_nan=True\n)\n\n\[email protected]\ndef test_home(mocker, tmp_path):\n mocker.patch(\"torchvision.prototype.datasets._api.home\", return_value=str(tmp_path))\n yield tmp_path\n\n\ndef test_coverage():\n untested_datasets = set(datasets.list_datasets()) - DATASET_MOCKS.keys()\n if untested_datasets:\n raise AssertionError(\n f\"The dataset(s) {sequence_to_str(sorted(untested_datasets), separate_last='and ')} \"\n f\"are exposed through `torchvision.prototype.datasets.load()`, but are not tested. \"\n f\"Please add mock data to `test/builtin_dataset_mocks.py`.\"\n )\n\n\nclass TestCommon:\n @parametrize_dataset_mocks(DATASET_MOCKS)\n def test_smoke(self, test_home, dataset_mock, config):\n dataset_mock.prepare(test_home, config)\n\n dataset = datasets.load(dataset_mock.name, **config)\n\n if not isinstance(dataset, IterDataPipe):\n raise AssertionError(f\"Loading the dataset should return an IterDataPipe, but got {type(dataset)} instead.\")\n\n @parametrize_dataset_mocks(DATASET_MOCKS)\n def test_sample(self, test_home, dataset_mock, config):\n dataset_mock.prepare(test_home, config)\n\n dataset = datasets.load(dataset_mock.name, **config)\n\n try:\n sample = next(iter(dataset))\n except StopIteration:\n raise AssertionError(\"Unable to draw any sample.\") from None\n except Exception as error:\n raise AssertionError(\"Drawing a sample raised the error above.\") from error\n\n if not isinstance(sample, dict):\n raise AssertionError(f\"Samples should be dictionaries, but got {type(sample)} instead.\")\n\n if not sample:\n raise AssertionError(\"Sample dictionary is empty.\")\n\n @parametrize_dataset_mocks(DATASET_MOCKS)\n def test_num_samples(self, test_home, dataset_mock, config):\n mock_info = dataset_mock.prepare(test_home, config)\n\n dataset = datasets.load(dataset_mock.name, **config)\n\n num_samples = 0\n for _ in dataset:\n num_samples += 1\n\n assert num_samples == mock_info[\"num_samples\"]\n\n @parametrize_dataset_mocks(DATASET_MOCKS)\n def test_decoding(self, test_home, dataset_mock, config):\n dataset_mock.prepare(test_home, config)\n\n dataset = datasets.load(dataset_mock.name, **config)\n\n undecoded_features = {key for key, value in next(iter(dataset)).items() if isinstance(value, io.IOBase)}\n if undecoded_features:\n raise AssertionError(\n f\"The values of key(s) \"\n f\"{sequence_to_str(sorted(undecoded_features), separate_last='and ')} were not decoded.\"\n )\n\n @parametrize_dataset_mocks(DATASET_MOCKS)\n def test_no_vanilla_tensors(self, test_home, dataset_mock, config):\n dataset_mock.prepare(test_home, config)\n\n dataset = datasets.load(dataset_mock.name, **config)\n\n vanilla_tensors = {key for key, value in next(iter(dataset)).items() if type(value) is torch.Tensor}\n if vanilla_tensors:\n raise AssertionError(\n f\"The values of key(s) \"\n f\"{sequence_to_str(sorted(vanilla_tensors), separate_last='and ')} contained vanilla tensors.\"\n )\n\n @parametrize_dataset_mocks(DATASET_MOCKS)\n def test_transformable(self, test_home, dataset_mock, config):\n dataset_mock.prepare(test_home, config)\n\n dataset = datasets.load(dataset_mock.name, **config)\n\n next(iter(dataset.map(transforms.Identity())))\n\n @parametrize_dataset_mocks(DATASET_MOCKS)\n def test_serializable(self, test_home, dataset_mock, config):\n dataset_mock.prepare(test_home, config)\n\n dataset = datasets.load(dataset_mock.name, **config)\n\n pickle.dumps(dataset)\n\n @parametrize_dataset_mocks(DATASET_MOCKS)\n @pytest.mark.parametrize(\"annotation_dp_type\", (Shuffler, ShardingFilter))\n def test_has_annotations(self, test_home, dataset_mock, config, annotation_dp_type):\n def scan(graph):\n for node, sub_graph in graph.items():\n yield node\n yield from scan(sub_graph)\n\n dataset_mock.prepare(test_home, config)\n\n dataset = datasets.load(dataset_mock.name, **config)\n\n if not any(type(dp) is annotation_dp_type for dp in scan(traverse(dataset))):\n raise AssertionError(f\"The dataset doesn't contain a {annotation_dp_type.__name__}() datapipe.\")\n\n @parametrize_dataset_mocks(DATASET_MOCKS)\n def test_save_load(self, test_home, dataset_mock, config):\n dataset_mock.prepare(test_home, config)\n dataset = datasets.load(dataset_mock.name, **config)\n sample = next(iter(dataset))\n\n with io.BytesIO() as buffer:\n torch.save(sample, buffer)\n buffer.seek(0)\n assert_samples_equal(torch.load(buffer), sample)\n\n\n@parametrize_dataset_mocks(DATASET_MOCKS[\"qmnist\"])\nclass TestQMNIST:\n def test_extra_label(self, test_home, dataset_mock, config):\n dataset_mock.prepare(test_home, config)\n\n dataset = datasets.load(dataset_mock.name, **config)\n\n sample = next(iter(dataset))\n for key, type in (\n (\"nist_hsf_series\", int),\n (\"nist_writer_id\", int),\n (\"digit_index\", int),\n (\"nist_label\", int),\n (\"global_digit_index\", int),\n (\"duplicate\", bool),\n (\"unused\", bool),\n ):\n assert key in sample and isinstance(sample[key], type)\n\n\n@parametrize_dataset_mocks(DATASET_MOCKS[\"gtsrb\"])\nclass TestGTSRB:\n def test_label_matches_path(self, test_home, dataset_mock, config):\n # We read the labels from the csv files instead. But for the trainset, the labels are also part of the path.\n # This test makes sure that they're both the same\n if config.split != \"train\":\n return\n\n dataset_mock.prepare(test_home, config)\n\n dataset = datasets.load(dataset_mock.name, **config)\n\n for sample in dataset:\n label_from_path = int(Path(sample[\"path\"]).parent.name)\n assert sample[\"label\"] == label_from_path\n",
"import warnings\nfrom functools import partial\nfrom typing import Any, List, Optional, Union\n\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nfrom torch import Tensor\nfrom torchvision.models import inception as inception_module\nfrom torchvision.models.inception import InceptionOutputs, Inception_V3_Weights\n\nfrom ...transforms._presets import ImageClassification, InterpolationMode\nfrom .._api import WeightsEnum, Weights\nfrom .._meta import _IMAGENET_CATEGORIES\nfrom .._utils import handle_legacy_interface, _ovewrite_named_param\nfrom .utils import _fuse_modules, _replace_relu, quantize_model\n\n\n__all__ = [\n \"QuantizableInception3\",\n \"Inception_V3_QuantizedWeights\",\n \"inception_v3\",\n]\n\n\nclass QuantizableBasicConv2d(inception_module.BasicConv2d):\n def __init__(self, *args: Any, **kwargs: Any) -> None:\n super().__init__(*args, **kwargs)\n self.relu = nn.ReLU()\n\n def forward(self, x: Tensor) -> Tensor:\n x = self.conv(x)\n x = self.bn(x)\n x = self.relu(x)\n return x\n\n def fuse_model(self, is_qat: Optional[bool] = None) -> None:\n _fuse_modules(self, [\"conv\", \"bn\", \"relu\"], is_qat, inplace=True)\n\n\nclass QuantizableInceptionA(inception_module.InceptionA):\n # TODO https://github.com/pytorch/vision/pull/4232#pullrequestreview-730461659\n def __init__(self, *args: Any, **kwargs: Any) -> None:\n super().__init__(conv_block=QuantizableBasicConv2d, *args, **kwargs) # type: ignore[misc]\n self.myop = nn.quantized.FloatFunctional()\n\n def forward(self, x: Tensor) -> Tensor:\n outputs = self._forward(x)\n return self.myop.cat(outputs, 1)\n\n\nclass QuantizableInceptionB(inception_module.InceptionB):\n # TODO https://github.com/pytorch/vision/pull/4232#pullrequestreview-730461659\n def __init__(self, *args: Any, **kwargs: Any) -> None:\n super().__init__(conv_block=QuantizableBasicConv2d, *args, **kwargs) # type: ignore[misc]\n self.myop = nn.quantized.FloatFunctional()\n\n def forward(self, x: Tensor) -> Tensor:\n outputs = self._forward(x)\n return self.myop.cat(outputs, 1)\n\n\nclass QuantizableInceptionC(inception_module.InceptionC):\n # TODO https://github.com/pytorch/vision/pull/4232#pullrequestreview-730461659\n def __init__(self, *args: Any, **kwargs: Any) -> None:\n super().__init__(conv_block=QuantizableBasicConv2d, *args, **kwargs) # type: ignore[misc]\n self.myop = nn.quantized.FloatFunctional()\n\n def forward(self, x: Tensor) -> Tensor:\n outputs = self._forward(x)\n return self.myop.cat(outputs, 1)\n\n\nclass QuantizableInceptionD(inception_module.InceptionD):\n # TODO https://github.com/pytorch/vision/pull/4232#pullrequestreview-730461659\n def __init__(self, *args: Any, **kwargs: Any) -> None:\n super().__init__(conv_block=QuantizableBasicConv2d, *args, **kwargs) # type: ignore[misc]\n self.myop = nn.quantized.FloatFunctional()\n\n def forward(self, x: Tensor) -> Tensor:\n outputs = self._forward(x)\n return self.myop.cat(outputs, 1)\n\n\nclass QuantizableInceptionE(inception_module.InceptionE):\n # TODO https://github.com/pytorch/vision/pull/4232#pullrequestreview-730461659\n def __init__(self, *args: Any, **kwargs: Any) -> None:\n super().__init__(conv_block=QuantizableBasicConv2d, *args, **kwargs) # type: ignore[misc]\n self.myop1 = nn.quantized.FloatFunctional()\n self.myop2 = nn.quantized.FloatFunctional()\n self.myop3 = nn.quantized.FloatFunctional()\n\n def _forward(self, x: Tensor) -> List[Tensor]:\n branch1x1 = self.branch1x1(x)\n\n branch3x3 = self.branch3x3_1(x)\n branch3x3 = [self.branch3x3_2a(branch3x3), self.branch3x3_2b(branch3x3)]\n branch3x3 = self.myop1.cat(branch3x3, 1)\n\n branch3x3dbl = self.branch3x3dbl_1(x)\n branch3x3dbl = self.branch3x3dbl_2(branch3x3dbl)\n branch3x3dbl = [\n self.branch3x3dbl_3a(branch3x3dbl),\n self.branch3x3dbl_3b(branch3x3dbl),\n ]\n branch3x3dbl = self.myop2.cat(branch3x3dbl, 1)\n\n branch_pool = F.avg_pool2d(x, kernel_size=3, stride=1, padding=1)\n branch_pool = self.branch_pool(branch_pool)\n\n outputs = [branch1x1, branch3x3, branch3x3dbl, branch_pool]\n return outputs\n\n def forward(self, x: Tensor) -> Tensor:\n outputs = self._forward(x)\n return self.myop3.cat(outputs, 1)\n\n\nclass QuantizableInceptionAux(inception_module.InceptionAux):\n # TODO https://github.com/pytorch/vision/pull/4232#pullrequestreview-730461659\n def __init__(self, *args: Any, **kwargs: Any) -> None:\n super().__init__(conv_block=QuantizableBasicConv2d, *args, **kwargs) # type: ignore[misc]\n\n\nclass QuantizableInception3(inception_module.Inception3):\n def __init__(\n self,\n num_classes: int = 1000,\n aux_logits: bool = True,\n transform_input: bool = False,\n ) -> None:\n super().__init__(\n num_classes=num_classes,\n aux_logits=aux_logits,\n transform_input=transform_input,\n inception_blocks=[\n QuantizableBasicConv2d,\n QuantizableInceptionA,\n QuantizableInceptionB,\n QuantizableInceptionC,\n QuantizableInceptionD,\n QuantizableInceptionE,\n QuantizableInceptionAux,\n ],\n )\n self.quant = torch.ao.quantization.QuantStub()\n self.dequant = torch.ao.quantization.DeQuantStub()\n\n def forward(self, x: Tensor) -> InceptionOutputs:\n x = self._transform_input(x)\n x = self.quant(x)\n x, aux = self._forward(x)\n x = self.dequant(x)\n aux_defined = self.training and self.aux_logits\n if torch.jit.is_scripting():\n if not aux_defined:\n warnings.warn(\"Scripted QuantizableInception3 always returns QuantizableInception3 Tuple\")\n return InceptionOutputs(x, aux)\n else:\n return self.eager_outputs(x, aux)\n\n def fuse_model(self, is_qat: Optional[bool] = None) -> None:\n r\"\"\"Fuse conv/bn/relu modules in inception model\n\n Fuse conv+bn+relu/ conv+relu/conv+bn modules to prepare for quantization.\n Model is modified in place. Note that this operation does not change numerics\n and the model after modification is in floating point\n \"\"\"\n\n for m in self.modules():\n if type(m) is QuantizableBasicConv2d:\n m.fuse_model(is_qat)\n\n\nclass Inception_V3_QuantizedWeights(WeightsEnum):\n IMAGENET1K_FBGEMM_V1 = Weights(\n url=\"https://download.pytorch.org/models/quantized/inception_v3_google_fbgemm-71447a44.pth\",\n transforms=partial(ImageClassification, crop_size=299, resize_size=342),\n meta={\n \"task\": \"image_classification\",\n \"architecture\": \"InceptionV3\",\n \"publication_year\": 2015,\n \"num_params\": 27161264,\n \"size\": (299, 299),\n \"min_size\": (75, 75),\n \"categories\": _IMAGENET_CATEGORIES,\n \"interpolation\": InterpolationMode.BILINEAR,\n \"backend\": \"fbgemm\",\n \"quantization\": \"Post Training Quantization\",\n \"recipe\": \"https://github.com/pytorch/vision/tree/main/references/classification#post-training-quantized-models\",\n \"unquantized\": Inception_V3_Weights.IMAGENET1K_V1,\n \"acc@1\": 77.176,\n \"acc@5\": 93.354,\n },\n )\n DEFAULT = IMAGENET1K_FBGEMM_V1\n\n\n@handle_legacy_interface(\n weights=(\n \"pretrained\",\n lambda kwargs: Inception_V3_QuantizedWeights.IMAGENET1K_FBGEMM_V1\n if kwargs.get(\"quantize\", False)\n else Inception_V3_Weights.IMAGENET1K_V1,\n )\n)\ndef inception_v3(\n *,\n weights: Optional[Union[Inception_V3_QuantizedWeights, Inception_V3_Weights]] = None,\n progress: bool = True,\n quantize: bool = False,\n **kwargs: Any,\n) -> QuantizableInception3:\n r\"\"\"Inception v3 model architecture from\n `\"Rethinking the Inception Architecture for Computer Vision\" <http://arxiv.org/abs/1512.00567>`_.\n\n .. note::\n **Important**: In contrast to the other models the inception_v3 expects tensors with a size of\n N x 3 x 299 x 299, so ensure your images are sized accordingly.\n\n Note that quantize = True returns a quantized model with 8 bit\n weights. Quantized models only support inference and run on CPUs.\n GPU inference is not yet supported\n\n Args:\n weights (Inception_V3_QuantizedWeights or Inception_V3_Weights, optional): The pretrained\n weights for the model\n progress (bool): If True, displays a progress bar of the download to stderr\n quantize (bool): If True, return a quantized version of the model\n \"\"\"\n weights = (Inception_V3_QuantizedWeights if quantize else Inception_V3_Weights).verify(weights)\n\n original_aux_logits = kwargs.get(\"aux_logits\", False)\n if weights is not None:\n if \"transform_input\" not in kwargs:\n _ovewrite_named_param(kwargs, \"transform_input\", True)\n _ovewrite_named_param(kwargs, \"aux_logits\", True)\n _ovewrite_named_param(kwargs, \"num_classes\", len(weights.meta[\"categories\"]))\n if \"backend\" in weights.meta:\n _ovewrite_named_param(kwargs, \"backend\", weights.meta[\"backend\"])\n backend = kwargs.pop(\"backend\", \"fbgemm\")\n\n model = QuantizableInception3(**kwargs)\n _replace_relu(model)\n if quantize:\n quantize_model(model, backend)\n\n if weights is not None:\n if quantize and not original_aux_logits:\n model.aux_logits = False\n model.AuxLogits = None\n model.load_state_dict(weights.get_state_dict(progress=progress))\n if not quantize and not original_aux_logits:\n model.aux_logits = False\n model.AuxLogits = None\n\n return model\n",
"import torch\nimport torch.fx\nfrom torch import nn, Tensor\n\nfrom ..utils import _log_api_usage_once\n\n\ndef stochastic_depth(input: Tensor, p: float, mode: str, training: bool = True) -> Tensor:\n \"\"\"\n Implements the Stochastic Depth from `\"Deep Networks with Stochastic Depth\"\n <https://arxiv.org/abs/1603.09382>`_ used for randomly dropping residual\n branches of residual architectures.\n\n Args:\n input (Tensor[N, ...]): The input tensor or arbitrary dimensions with the first one\n being its batch i.e. a batch with ``N`` rows.\n p (float): probability of the input to be zeroed.\n mode (str): ``\"batch\"`` or ``\"row\"``.\n ``\"batch\"`` randomly zeroes the entire input, ``\"row\"`` zeroes\n randomly selected rows from the batch.\n training: apply stochastic depth if is ``True``. Default: ``True``\n\n Returns:\n Tensor[N, ...]: The randomly zeroed tensor.\n \"\"\"\n if not torch.jit.is_scripting() and not torch.jit.is_tracing():\n _log_api_usage_once(stochastic_depth)\n if p < 0.0 or p > 1.0:\n raise ValueError(f\"drop probability has to be between 0 and 1, but got {p}\")\n if mode not in [\"batch\", \"row\"]:\n raise ValueError(f\"mode has to be either 'batch' or 'row', but got {mode}\")\n if not training or p == 0.0:\n return input\n\n survival_rate = 1.0 - p\n if mode == \"row\":\n size = [input.shape[0]] + [1] * (input.ndim - 1)\n else:\n size = [1] * input.ndim\n noise = torch.empty(size, dtype=input.dtype, device=input.device)\n noise = noise.bernoulli_(survival_rate)\n if survival_rate > 0.0:\n noise.div_(survival_rate)\n return input * noise\n\n\ntorch.fx.wrap(\"stochastic_depth\")\n\n\nclass StochasticDepth(nn.Module):\n \"\"\"\n See :func:`stochastic_depth`.\n \"\"\"\n\n def __init__(self, p: float, mode: str) -> None:\n super().__init__()\n _log_api_usage_once(self)\n self.p = p\n self.mode = mode\n\n def forward(self, input: Tensor) -> Tensor:\n return stochastic_depth(input, self.p, self.mode, self.training)\n\n def __repr__(self) -> str:\n s = f\"{self.__class__.__name__}(p={self.p}, mode={self.mode})\"\n return s\n"
] |
[
[
"torch.utils.data.graph.traverse",
"torch.load",
"torch.save"
],
[
"torch.nn.functional.avg_pool2d",
"torch.ao.quantization.DeQuantStub",
"torch.ao.quantization.QuantStub",
"torch.jit.is_scripting",
"torch.nn.ReLU",
"torch.nn.quantized.FloatFunctional"
],
[
"torch.jit.is_tracing",
"torch.jit.is_scripting",
"torch.empty",
"torch.fx.wrap"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
dbstein/field_ops
|
[
"70359a1be37d911b879746e5c61d6d10da1c0f3b"
] |
[
"examples/test22.py"
] |
[
"import numpy as np\nimport time\nfrom field_ops import Engine2 as Engine\n\nn = 100\nv = np.linspace(0, 1, n)\nx, y, z = np.meshgrid(v, v, v, indexing='ij')\n\n# setup simulation engine\nsim = Engine()\n\n# allocate variables\nc = sim.zeros([], [n,n,n], float)\nc_hat = sim.zeros([], [n,n,n], complex)\nu = sim.zeros([3], [n,n,n], float)\nu_hat = sim.zeros([3], [n,n,n], complex)\nD = sim.zeros([3,3], [n,n,n], float)\nD_hat = sim.zeros([3,3], [n,n,n], complex)\nR = sim.zeros([3,3], [n,n,n], float)\nM = sim.zeros([3,3], [n,n,n], float)\nv = sim.zeros([3], [n,n,n], float)\nV = sim.zeros([3,3], [n,n,n], float)\neye = sim.zeros([3,3], [n,n,n], float)\n\n# set c to something\nD[:] = np.random.rand(*D.shape)\n\n# make eye actually be an identity\nfor i in range(3):\n\teye[i,i] += 1.0\n\n# execute an einsum prod on D\nprint('\\n--- Testing einsum MAT MAT ---')\ninstr = 'ij...,jk...->ik...'\nsim.einsum(instr, [D, D], R)\nst = time.time(); truth = np.einsum(instr,D.data,D.data); numpy_time = time.time()-st\nst = time.time(); sim.einsum(instr, [D, D], R); sim_time = time.time()-st\nprint('All close? ', np.allclose(truth, R.data))\nprint('... Einsum time (ms): {:0.1f}'.format(numpy_time*1000))\nprint('... Sim time (ms): {:0.1f}'.format(sim_time*1000))\n\n# now make R be nicely conditioned\nR[:] = 0.1*R + eye\n\n# instantiate processor pool\npool = sim.initialize_pool()\n\n# compute the eigendecomposition of R\nprint('\\n--- Testing eigendecomposition ---')\nS = np.transpose(R.data, [2,3,4,0,1])\nst = time.time(); truth = np.linalg.eigh(S); numpy_time = time.time()-st\nst = time.time(); sim.eigh(R, v, V, pool); sim_time = time.time()-st\ntv = np.transpose(truth[0], [3,0,1,2])\ntV = np.transpose(truth[1], [3,4,0,1,2])\nprint('... All close, values? ', np.allclose(tv, v.data))\nprint('... All close, vectors? ', np.allclose(tV, V.data))\nprint('... Eigh time (ms): {:0.1f}'.format(numpy_time*1000))\nprint('... Sim time (ms): {:0.1f}'.format(sim_time*1000))\n\n# test matrix matrix multiply, with transpose on first mat\nprint('\\n--- Testing matrix matrix multiply, transpose on first mat ---')\n# run once to compile\nsim.mat_mat_tA(D, R, M);\nst = time.time(); NR = np.einsum('ji...,jk...->ik...', D.data, R.data); numpy_time = time.time()-st\nst = time.time(); sim.mat_mat_tA(D, R, M); sim_time = time.time()-st\nprint('... All close? ', np.allclose(NR, M.data))\nprint('... numpy time (ms): {:0.1f}'.format(numpy_time*1000))\nprint('... Sim time (ms): {:0.1f}'.format(sim_time*1000))\n\n# test FFTs\nprint('\\n--- Testing FFT (as compared to FFTPACK) ---')\n# run once to be sure the FFT is planned\n_ = np.fft.fftn(D.data)\nst = time.time(); NR = np.fft.fftpack.fftn(D.data, axes=(-3,-2,-1)); numpy_time = time.time()-st\nst = time.time(); sim.fft(D, D_hat); sim_time1 = time.time()-st\nst = time.time(); sim.fft(D, D_hat); sim_time2 = time.time()-st\nprint('... All close? ', np.allclose(NR, D_hat.data))\nprint('... numpy time (ms): {:0.1f}'.format(numpy_time*1000))\nprint('... Sim time 1 (ms): {:0.1f}'.format(sim_time1*1000))\nprint('... Sim time 2 (ms): {:0.1f}'.format(sim_time2*1000))\n\nprint('\\n--- Testing IFFT (as compared to FFTPACK) ---')\n# run once to be sure the FFT is planned\n_ = np.fft.ifftn(NR).real\nst = time.time(); NR = np.fft.fftpack.ifftn(D_hat.data, axes=(-3,-2,-1)); numpy_time = time.time()-st\nst = time.time(); sim.ifft(D_hat, D); sim_time = time.time()-st\nprint('... All close? ', np.allclose(NR, D.data))\nprint('... numpy time (ms): {:0.1f}'.format(numpy_time*1000))\nprint('... Sim time (ms): {:0.1f}'.format(sim_time*1000))\n\nprint('\\n--- Testing Symmetrize Operation ---')\nM = sim.zeros([3,3], [n,n,n], float)\nE = sim.zeros([3,3], [n,n,n], float)\nM[:] = np.random.rand(*M.shape)\nMM = M.data\nNR = np.empty(M.shape)\nst = time.time()\nNR[0,0] = MM[0,0]\nNR[1,1] = MM[1,1]\nNR[2,2] = MM[2,2]\nNR[0,1] = (MM[0,1] + MM[1,0])/2.0\nNR[0,2] = (MM[0,2] + MM[2,0])/2.0\nNR[1,2] = (MM[1,2] + MM[2,1])/2.0\nNR[1,0] = NR[0,1]\nNR[2,0] = NR[0,2]\nNR[2,1] = NR[1,2]\nnumpy_time = time.time()-st\n# run once to be sure the FFT is planned\nsim.symmetrize(M, E)\nst = time.time(); sim.symmetrize(M, E); sim_time = time.time()-st\nprint('... All close? ', np.allclose(NR, E.data))\nprint('... numpy time (ms): {:0.1f}'.format(numpy_time*1000))\nprint('... Sim time (ms): {:0.1f}'.format(sim_time*1000))\n\nprint('\\n--- Test common einsums ---')\ndef test_common(instr):\n\t# parse instr\n\tprint('...Testing einsum:', instr)\n\tl1, l2 = instr.split(',')\n\tl2, l3 = l2.split('->')\n\tl1 = len(l1.replace('...',''))\n\tl2 = len(l2.replace('...',''))\n\tl3 = len(l3.replace('...',''))\n\t# get shapes\n\tsh1 = [3,]*l1 + [n,n,n]\n\tsh2 = [3,]*l2 + [n,n,n]\n\tsh3 = [3,]*l3 + [n,n,n]\n\t# allocate memory\n\tM1 = sim.zeros(sh1[:l1], sh1[l1:], float)\n\tM2 = sim.zeros(sh2[:l2], sh2[l2:], float)\n\tM3 = sim.zeros(sh3[:l3], sh3[l3:], float)\n\tM1N = np.random.rand(*sh1)\n\tM2N = np.random.rand(*sh2)\n\tM1[:] = M1N\n\tM2[:] = M2N\n\t# test numpy\n\tst = time.time(); M3N = np.einsum(instr, M1N, M2N); numpy_time=time.time()-st\n\t# test sim\n\tsim.einsum(instr, [M1, M2], M3)\n\tst = time.time(); sim.einsum(instr, [M1, M2], M3); sim_time=time.time()-st\n\tprint('... All close? ', np.allclose(M3N, M3.data))\n\tprint('... numpy time (ms): {:0.1f}'.format(numpy_time*1000))\n\tprint('... Sim time (ms): {:0.1f}'.format(sim_time*1000))\nfor instr in sim.list_common_einsum():\n\ttest_common(instr)\n"
] |
[
[
"numpy.fft.fftpack.fftn",
"numpy.allclose",
"numpy.linspace",
"numpy.einsum",
"numpy.fft.fftn",
"numpy.fft.ifftn",
"numpy.linalg.eigh",
"numpy.random.rand",
"numpy.transpose",
"numpy.fft.fftpack.ifftn",
"numpy.meshgrid",
"numpy.empty"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
helloric/pydial3
|
[
"34988f4592c4e28388b2818de8768d841696efbb",
"34988f4592c4e28388b2818de8768d841696efbb",
"34988f4592c4e28388b2818de8768d841696efbb"
] |
[
"semo/RNNLG/generator/knn.py",
"semi/CNetTrain/Classifier.py",
"policy/SummaryUtils.py"
] |
[
"######################################################################\n######################################################################\n# Copyright Tsung-Hsien Wen, Cambridge Dialogue Systems Group, 2016 #\n######################################################################\n######################################################################\nimport numpy as np\nimport os\nimport operator\nfrom math import sqrt\nimport random\nfrom ast import literal_eval\nfrom copy import deepcopy\nimport sys\nsys.path.insert(0, '.')\n\nfrom semo.RNNLG.loader.DataReader import *\nfrom semo.RNNLG.loader.GentScorer import *\n\nfrom configparser import SafeConfigParser\n\nclass KNN(object):\n\n def __init__(self,config=None,opts=None):\n # not enough info to execute\n if config==None and opts==None:\n print(\"Please specify command option or config file ...\")\n return\n # config parser\n parser = SafeConfigParser()\n parser.read(config)\n\n self.debug = parser.getboolean('knn','debug')\n self.seed = parser.getint('knn','random_seed')\n self.obj = 'dt'\n self.trainfile = parser.get('knn','train')\n self.validfile = parser.get('knn','valid') \n self.testfile = parser.get('knn','test')\n self.vocabfile = parser.get('knn','vocab')\n self.domain = parser.get('knn','domain')\n self.percentage = float(parser.getfloat('knn','percentage'))/100.0\n # Setting generation specific parameters\n self.topk = parser.getint('knn','topk')\n self.detectpairs= parser.get('knn','detectpairs')\n self.verbose = parser.getint('knn','verbose')\n # set random seed\n np.random.seed(self.seed)\n random.seed(self.seed)\n np.set_printoptions(precision=4)\n # setting data reader, processors, and lexicon\n self.setupSideOperators()\n \n def testKNN(self):\n\n ######## train KNN generator by grouping ########\n da2sents = {}\n templates = self.reader.readall(mode='train')+\\\n self.reader.readall(mode='valid')\n for a,sv,s,v,sents,dact,base in templates:\n key = (tuple(a),tuple(sv))\n if key in da2sents:\n da2sents[key].extend(sents)\n da2sents[key] = list(set(da2sents[key]))\n else:\n da2sents[key] = sents\n\n ######## test KNN generator on test set ######### \n if self.debug:\n print('start KNN generation ...')\n \n # container\n parallel_corpus, hdc_corpus = [], []\n # slot error counts\n gencnts, refcnts = [0.0,0.0,0.0],[0.0,0.0,0.0]\n\n while True:\n # read data point\n data = self.reader.read(mode='test',batch=1)\n if data==None:\n break\n a,sv,s,v,sents,dact,bases,cutoff_b,cutoff_f = data\n # remove batch dimension\n a,sv,s,v = a[0],sv[0],s[0],v[0]\n sents,dact,bases = sents[0],dact[0],bases[0]\n # score DA similarity between testing example and train+valid set\n template_ranks = []\n for da_t,sents_t in da2sents.items():\n a_t,sv_t = [set(x) for x in da_t]\n score =float(len(a_t.intersection(set(a)))+\\\n len(sv_t.intersection(set(sv))))/\\\n sqrt(len(a_t)+len(sv_t))/sqrt(len(a)+len(sv))\n template_ranks.append([score,sents_t])\n # rank templates\n template_ranks = sorted(template_ranks,key=operator.itemgetter(0))\n gens = deepcopy(template_ranks[-1][1])\n score= template_ranks[-1][0]\n random.shuffle(gens)\n gens = gens[:self.topk] if len(gens)>self.topk else gens\n # for slot error rate scoring\n felements = [self.reader.cardinality[x+self.reader.dfs[1]]\\\n for x in sv]\n # print results\n print(dact)\n print('Sim\\tTSER\\tASER\\tGen')\n for i in range(len(gens)):\n # score slot error rate\n cnt, total, caty = self.gentscorer.scoreERR(a,felements,\n self.reader.delexicalise(gens[i],dact))\n gens[i] = self.reader.lexicalise(gens[i],dact)\n # accumulate slot error cnts\n gencnts[0] += cnt\n gencnts[1] += total\n gencnts[2] += caty\n print('%.4f\\t%d\\t%d\\t%s' % (score,total,caty,gens[i]))\n print('\\n')\n \n # compute gold standard slot error rate\n for sent in sents:\n # score slot error rate\n cnt, total, caty = self.gentscorer.scoreERR(a,felements,\n self.reader.delexicalise(sent,dact))\n # accumulate slot error cnts\n refcnts[0] += cnt\n refcnts[1] += total\n refcnts[2] += caty\n\n # accumulate score for bleu score computation \n parallel_corpus.append([[g for g in gens],sents])\n hdc_corpus.append([bases[:1],sents])\n\n bleuModel = self.gentscorer.scoreBLEU(parallel_corpus)\n bleuHDC = self.gentscorer.scoreBLEU(hdc_corpus)\n print('##############################################')\n print('BLEU SCORE & SLOT ERROR on GENERATED SENTENCES')\n print('##############################################')\n print('Metric :\\tBLEU\\tT.ERR\\tA.ERR')\n print('HDC :\\t%.4f\\t%2.2f%%\\t%2.2f%%'% (bleuHDC,0.0,0.0))\n print('Ref :\\t%.4f\\t%2.2f%%\\t%2.2f%%'% (1.0,\n 100*refcnts[1]/refcnts[0],100*refcnts[2]/refcnts[0]))\n print('----------------------------------------------')\n print('This Model :\\t%.4f\\t%2.2f%%\\t%2.2f%%'% (bleuModel,\n 100*gencnts[1]/gencnts[0],100*gencnts[2]/gencnts[0]))\n\n def setupSideOperators(self):\n # initialise data reader\n self.reader = DataReader(self.seed, self.domain, self.obj,\n self.vocabfile, self.trainfile, self.validfile, self.testfile,\n self.percentage, self.verbose, lexCutoff=4)\n # setting generation scorer\n self.gentscorer = GentScorer(self.detectpairs)\n \n\n\n\n",
"from semi.CNetTrain import sutils, Tuples\nimport json, sys, pickle, time, math, os\nfrom collections import defaultdict\nfrom semi.CNetTrain.Features import cnet as cnet_extractor\nfrom scipy.sparse import lil_matrix\nimport numpy\nfrom sklearn.linear_model import SGDClassifier\n\n\nnames_to_classes = {}\n\nclass classifier(object):\n def __init__(self, config):\n # classifier type\n self.type = \"svm\"\n if config.has_option(\"classifier\", \"type\") :\n self.type = config.get(\"classifier\", \"type\")\n \n # min_examples\n self.min_examples = 10\n if config.has_option(\"classifier\", \"min_examples\") :\n self.min_examples = int(config.get(\"classifier\",\"min_examples\"))\n \n # features\n self.features = [\"cnet\"]\n if config.has_option(\"classifier\", \"features\") :\n self.features = json.loads(config.get(\"classifier\", \"features\"))\n self.feature_extractors = []\n for feature in self.features:\n self.feature_extractors.append(\n sutils.import_class(\"Features.\" + feature)(config)\n )\n \n self.tuples = Tuples.tuples(config)\n self.config = config\n self.cnet_extractor = cnet_extractor(config)\n \n # store data:\n self.X = {}\n self.y = {}\n self.baseXs = []\n self.baseX_pointers = {}\n self.fnames = {} \n \n def extractFeatures(self, dw, log_input_key=\"batch\"):\n # given a dataset walker,\n # adds examples to self.X and self.y\n total_calls = len(dw.session_list)\n self.keys = set([])\n for call_num, call in enumerate(dw) :\n print(\"[%i/%i] %s\"%(call_num+1, total_calls, call.log[\"session-id\"]))\n for log_turn, label_turn in call:\n \n if label_turn != None:\n uacts = label_turn['semantics']['json']\n these_tuples = self.tuples.uactsToTuples(uacts)\n # check there aren't any tuples we were not expecting:\n for this_tuple in these_tuples:\n if this_tuple not in self.tuples.all_tuples :\n print(\"Warning: unexpected tuple\", this_tuple)\n # convert tuples to specific tuples:\n these_tuples = [Tuples.generic_to_specific(tup) for tup in these_tuples]\n \n # which tuples would be considered (active) for this turn?\n active_tuples = self.tuples.activeTuples(log_turn)\n \n # calculate base features that are independent of the tuple\n baseX = defaultdict(float)\n for feature_extractor in self.feature_extractors:\n feature_name = feature_extractor.__class__.__name__\n new_feats = feature_extractor.calculate(log_turn, log_input_key=log_input_key)\n for key in new_feats:\n baseX[(feature_name, key)] += new_feats[key]\n self.keys.add((feature_name, key))\n self.baseXs.append(baseX)\n \n for this_tuple in active_tuples:\n if label_turn != None :\n y = (Tuples.generic_to_specific(this_tuple) in these_tuples)\n \n X = defaultdict(float)\n for feature_extractor in self.feature_extractors:\n feature_name = feature_extractor.__class__.__name__\n new_feats = feature_extractor.tuple_calculate(this_tuple, log_turn, log_input_key=log_input_key)\n for key in new_feats:\n X[(feature_name, key)] += new_feats[key]\n self.keys.add((feature_name, key))\n \n if this_tuple not in self.X :\n self.X[this_tuple] = []\n if this_tuple not in self.y :\n self.y[this_tuple] = []\n if this_tuple not in self.baseX_pointers :\n self.baseX_pointers[this_tuple] = []\n if this_tuple not in self.fnames :\n self.fnames[this_tuple] = []\n \n self.X[this_tuple].append(X)\n if label_turn != None :\n self.y[this_tuple].append(y)\n \n self.baseX_pointers[this_tuple].append(len(self.baseXs) - 1)\n \n self.fnames[this_tuple].append(log_turn[\"input\"][\"audio-file\"])\n\n\n def extractFeatures(self, sentinfo, log_input_key=\"batch\"):\n # given a dataset walker,\n # adds examples to self.X and self.y\n total_calls = 1\n self.keys = set([])\n\n\n # calculate base features that are independent of the tuple\n baseX = defaultdict(float)\n for feature_extractor in self.feature_extractors:\n feature_name = feature_extractor.__class__.__name__\n new_feats = feature_extractor.calculate_sent(sentinfo, log_input_key=log_input_key)\n for key in new_feats:\n baseX[(feature_name, key)] += new_feats[key]\n self.keys.add((feature_name, key))\n self.baseXs.append(baseX)\n\n for this_tuple in self.classifiers:\n X = defaultdict(float)\n for feature_extractor in self.feature_extractors:\n feature_name = feature_extractor.__class__.__name__\n new_feats = feature_extractor.tuple_calculate(this_tuple, sentinfo, log_input_key=log_input_key)\n for key in new_feats:\n X[(feature_name, key)] += new_feats[key]\n self.keys.add((feature_name, key))\n\n if this_tuple not in self.X :\n self.X[this_tuple] = []\n if this_tuple not in self.y :\n self.y[this_tuple] = []\n if this_tuple not in self.baseX_pointers :\n self.baseX_pointers[this_tuple] = []\n if this_tuple not in self.fnames :\n self.fnames[this_tuple] = []\n\n self.X[this_tuple].append(X)\n\n self.baseX_pointers[this_tuple].append(len(self.baseXs) - 1)\n\n\n def createDictionary(self):\n self.dictionary = {}\n for i, key in enumerate(self.keys):\n self.dictionary[key] = i\n \n \n def train(self, dw, config=None):\n if config == None :\n config = self.config\n log_input_key = \"batch\"\n if config.has_option(\"train\",\"log_input_key\") :\n log_input_key = config.get(\"train\",\"log_input_key\")\n print(\"extracting features from turns\")\n self.extractFeatures(dw, log_input_key=log_input_key)\n print(\"finished extracting features\")\n print(\"creating feature dictionary\")\n self.createDictionary()\n print(\"finished creating dictionary (of size\",len(self.dictionary),\")\")\n self.classifiers = {}\n for this_tuple in self.tuples.all_tuples:\n print(\"training\", this_tuple)\n if this_tuple not in self.X :\n print(\"Warning: not enough examples of\", this_tuple)\n self.classifiers[this_tuple] = None\n continue\n baseXs = [self.baseXs[index] for index in self.baseX_pointers[this_tuple]]\n y = list(map(int, self.y[this_tuple]))\n if sum(y) < self.min_examples :\n print(\"Warning: not enough examples of\", this_tuple)\n self.classifiers[this_tuple] = None\n continue\n X = toSparse(baseXs, self.X[this_tuple], self.dictionary)\n \n \n # pick the right classifier class\n self.classifiers[this_tuple] = names_to_classes[self.type](self.config)\n \n self.classifiers[this_tuple].train(X, y)\n \n \n no_models = [this_tuple for this_tuple in self.classifiers if self.classifiers[this_tuple] == None]\n \n if no_models:\n print(\"Not able to learn about: \")\n print(\", \".join(map(str, no_models)))\n \n def decode(self):\n # run the classifiers on self.X, return results\n results = {}\n for this_tuple in self.classifiers:\n if this_tuple not in self.X :\n print(\"warning: Did not collect features for \", this_tuple)\n continue\n n = len(self.X[this_tuple])\n if self.classifiers[this_tuple] == None :\n results[this_tuple] = numpy.zeros((n,))\n continue\n baseXs = [self.baseXs[index] for index in self.baseX_pointers[this_tuple]]\n X = toSparse(baseXs, self.X[this_tuple], self.dictionary)\n results[this_tuple] = self.classifiers[this_tuple].predict(X)\n return results\n \n \n def decodeToFile(self, dw, output_fname, config=None):\n if config == None :\n config = self.config\n t0 = time.time()\n results = {\n \"wall-time\":0.0, # add later\n \"dataset\": dw.datasets,\n \"sessions\": []\n }\n log_input_key = \"batch\"\n if config.has_option(\"decode\",\"log_input_key\") :\n log_input_key = config.get(\"decode\",\"log_input_key\")\n \n self.extractFeatures(dw,log_input_key=log_input_key)\n decode_results = self.decode()\n counter = defaultdict(int)\n for call_num, call in enumerate(dw):\n session = {\"session-id\" : call.log[\"session-id\"], \"turns\":[]}\n for log_turn, _ in call:\n active_tuples = self.tuples.activeTuples(log_turn)\n tuple_distribution = {}\n for this_tuple in active_tuples:\n index = counter[this_tuple]\n p = decode_results[this_tuple][index]\n tuple_distribution[Tuples.generic_to_specific(this_tuple)] = p\n # check we are decoding the right utterance\n assert self.fnames[this_tuple][index] == log_turn[\"input\"][\"audio-file\"]\n counter[this_tuple] += 1\n slu_hyps = self.tuples.distributionToNbest(tuple_distribution)\n session[\"turns\"].append({\n \"slu-hyps\":slu_hyps\n })\n results[\"sessions\"].append(session)\n \n \n results[\"wall-time\"] =time.time() - t0\n output_file = open(output_fname, \"wb\")\n json.dump(results, output_file, indent=4)\n output_file.close()\n\n \n def decode_sent(self, sentinfo, output_fname, config=None):\n if config == None :\n config = self.config\n t0 = time.time()\n self.X = {}\n self.y = {}\n self.baseXs = []\n self.baseX_pointers = {}\n self.fnames = {}\n log_input_key = \"batch\"\n if config.has_option(\"decode\",\"log_input_key\") :\n log_input_key = config.get(\"decode\",\"log_input_key\")\n\n self.extractFeatures(sentinfo,log_input_key=log_input_key)\n decode_results = self.decode()\n counter = defaultdict(int)\n\n active_tuples = self.tuples.activeTuples_sent(sentinfo)\n tuple_distribution = {}\n for this_tuple in active_tuples:\n index = counter[this_tuple]\n p = decode_results[this_tuple][index]\n tuple_distribution[Tuples.generic_to_specific(this_tuple)] = p\n # check we are decoding the right utterance\n counter[this_tuple] += 1\n slu_hyps = self.tuples.distributionToNbest(tuple_distribution)\n\n return slu_hyps\n\n\n\n\n def save(self, save_fname):\n classifier_params = {}\n for this_tuple in self.classifiers:\n if self.classifiers[this_tuple] == None :\n classifier_params[this_tuple] = None\n else :\n classifier_params[this_tuple] = self.classifiers[this_tuple].params()\n \n obj = {\n \"classifier_params\":classifier_params,\n \"dictionary\":self.dictionary\n }\n save_file = open(save_fname, \"wb\")\n pickle.dump(obj, save_file)\n save_file.close()\n \n \n def load(self, fname):\n rootpath=os.getcwd()\n if \"semi\"not in rootpath:\n fname=rootpath+\"/semi/CNetTrain/\"+fname\n else:\n fname=rootpath+\"/CNetTrain/\"+fname\n print(\"loading saved Classifier\")\n print(fname)\n obj = pickle.load(open(fname, 'rb'), encoding='ISO-8859-1')\n print(\"loaded.\")\n classifier_params = obj[\"classifier_params\"]\n self.classifiers = {}\n for this_tuple in classifier_params:\n if classifier_params[this_tuple] == None :\n self.classifiers[this_tuple] = None\n else :\n self.classifiers[this_tuple] = names_to_classes[self.type](self.config)\n self.classifiers[this_tuple].load(classifier_params[this_tuple])\n \n self.dictionary = obj[\"dictionary\"]\n \n def export(self, models_fname, dictionary_fname, config_fname):\n print(\"exporting Classifier for Caesar to read\")\n print(\"models to be saved in\", models_fname)\n print(\"dictionary to be saved in\", dictionary_fname)\n print(\"config to be saved in\", config_fname)\n \n if self.type != \"svm\" :\n print(\"Only know how to export SVMs\")\n return\n lines = []\n for this_tuple in self.classifiers:\n if self.classifiers[this_tuple] != None:\n t = this_tuple\n if Tuples.is_generic(this_tuple[-1]) :\n t = this_tuple[:-1] + (\"<generic_value>\",)\n lines += ['('+','.join(t)+')']\n lines += utils.svm_to_libsvm(self.classifiers[this_tuple].model)\n lines += [\".\",\"\"]\n models_savefile = open(models_fname, \"wb\")\n for line in lines:\n models_savefile.write(line+\"\\n\")\n models_savefile.close()\n \n # save dictionary\n json_dictionary = []\n dictionary_items = list(self.dictionary.items())\n dictionary_items.sort(key = lambda x:x[1])\n assert [x[1] for x in dictionary_items] == list(range(len(self.dictionary)))\n keys = [list(x[0]) for x in dictionary_items]\n \n json.dump( keys, open(dictionary_fname, \"w\"))\n \n \n # save config\n config_savefile = open(config_fname, \"w\")\n config_savefile.write(\"# Automatically generated by CNetTrain scripts\\n\")\n options = {\n \"FEATURES\":json.dumps(self.features),\n \"MAX_ACTIVE_TUPLES\":str(self.tuples.max_active),\n \"TAIL_CUTOFF\":str(self.tuples.tail_cutoff),\n \"MODELS\":os.path.join(os.getcwd(), models_fname),\n \"DICTIONARY\":os.path.join(os.getcwd(), dictionary_fname),\n \n }\n if \"cnet\" in self.features :\n index = self.features.index(\"cnet\")\n cnf = self.feature_extractors[index]\n options[\"MAX_NGRAM_LENGTH\"] = str(cnf.max_length)\n options[\"MAX_NGRAMS\"] = str(cnf.max_ngrams)\n for key in options:\n this_line = \"CNET : %s\"% key\n this_line = this_line.ljust(30)\n this_line += \"= \"+options[key]\n config_savefile.write(\"\\t\"+this_line+\"\\n\")\n config_savefile.close()\n print(\"exported Classifier.\")\n \n \n \ndef toSparse(baseX, X, dictionary):\n # convert baseX & X (a list of dictionaries), to a sparse matrix, using dictionary to map to indices\n out = lil_matrix((len(X),len(dictionary)))\n for i, (basex, x) in enumerate(zip(baseX, X)) :\n for key in basex :\n if key not in dictionary :\n continue\n out[i,dictionary[key]] = basex[key] \n for key in x :\n if key not in dictionary :\n continue\n out[i,dictionary[key]] = x[key]\n \n out = out.tocsr()\n return out\n\n \n# classifiers define :\n# train(X,y)\n# predict(X)\n# params()\n# load(params)\n# X is a sparse matrix, y is a vector of class labels (ints)\nfrom sklearn import svm\nclass SVM():\n def __init__(self, config):\n self.C = 1\n \n def pickC(self, X, y):\n Cs = [1, 0.1, 5, 10, 50] # 1 goes first as it should be preferred\n scores = []\n n = X.shape[0]\n dev_index = max([int(n*0.8), 1+y.index(1)])\n max_score = 0.0\n self.C = Cs[0]\n print(\"Warning, not picking C from validation\")\n return\n for i, C in enumerate(Cs) :\n this_model = svm.sparse.SVC(C=C, kernel='linear')\n this_model.probability = False\n this_model.class_weight = 'auto'\n \n this_model.fit(X[:dev_index,:],y[:dev_index])\n pred = this_model.predict(X)\n train_correct = 0.0\n dev_correct = 0.0\n for j, y_j in enumerate(y):\n if j < dev_index :\n train_correct += int(y_j == pred[j])\n else :\n dev_correct += int(y_j == pred[j])\n train_acc = train_correct/dev_index\n dev_acc = dev_correct/(n-dev_index)\n score = (0.1*train_acc + 0.9*dev_acc)\n print(\"\\tfor C=%.2f;\\n\\t\\t train_acc=%.4f, dev_acc=%.4f, score=%.4f\" % (C, train_acc, dev_acc, score))\n if score > max_score :\n max_score = score\n self.C = C\n if score == 1.0 :\n break\n print(\"Selected C=%.2f\"%self.C)\n \n \n def train(self, X, y):\n self.pickC(X, y)\n #model = svm.sparse.SVC(kernel='linear', C=self.C)\n model = svm.SVC(kernel='linear', C=self.C)\n model.probability=True\n model.class_weight = 'auto'\n model.fit(X,y)\n self.model = model\n \n def predict(self, X):\n y = self.model.predict_proba(X)\n return y[:,1]\n \n def params(self, ):\n return self.model\n \n def load(self, params):\n self.model = params\n \nnames_to_classes[\"svm\"] = SVM\n\n\nclass SGD():\n def __init__(self, config):\n pass\n \n def train(self, X, y):\n model = SGDClassifier(loss=\"log\", penalty=\"l2\")\n model.probability = True\n model.fit(X, y)\n self.model = model\n \n def predict(self, X):\n y = self.model.predict_proba(X)\n return y[:, 1]\n \n def params(self, ):\n return self.model\n \n def load(self, params):\n self.model = params\n\n\nnames_to_classes[\"sgd\"] = SGD\n",
"###############################################################################\n# PyDial: Multi-domain Statistical Spoken Dialogue System Software\n###############################################################################\n#\n# Copyright 2015 - 2019\n# Cambridge University Engineering Department Dialogue Systems Group\n#\n# \n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\n###############################################################################\n\n'''\nSummaryUtils.py - summarises dialog events for mapping from master to summary belief \n======================================================================================\n\nCopyright CUED Dialogue Systems Group 2015 - 2017\n\n**Basic Usage**: \n >>> import SummaryUtils\n \n.. Note::\n No classes; collection of utility methods\n\nLocal module variables::\n\n global_summary_features: (list) global actions/methods\n REQUESTING_THRESHOLD: (float) 0.5 min value to consider a slot requested\n\n.. seealso:: CUED Imports/Dependencies: \n\n import :mod:`ontology.Ontology` |.|\n import :mod:`utils.Settings` |.|\n import :mod:`utils.ContextLogger` |.|\n\n************************\n\n'''\n\n__author__ = \"cued_dialogue_systems_group\"\n\nimport copy\nfrom scipy.stats import entropy\nfrom ontology import Ontology\nfrom utils import ContextLogger, Settings\nlogger = ContextLogger.getLogger('')\n\n\nglobal_summary_features = ['GLOBAL_BYCONSTRAINTS',\n 'GLOBAL_BYALTERNATIVES',\n 'GLOBAL_BYNAME',\n 'GLOBAL_FINISHED',\n 'GLOBAL_REPEAT',\n 'GLOBAL_REQMORE',\n 'GLOBAL_THANKYOU',\n 'GLOBAL_ACK',\n 'GLOBAL_RESTART',\n 'GLOBAL_COUNTACCEPTED',\n 'GLOBAL_NAMENONE',\n 'GLOBAL_OFFERHAPPENED']\n\nREQUESTING_THRESHOLD = 0.5\n\n\n'''\n#####Belief state related methods.####\n'''\n\ndef globalSummary(belief, domainString):\n '''\n summary of global actions such as offer happened etc.\n\n :param belief: dict representing the full belief state\n :param domainString: string representing the domain\n :return: (dict) summary. dict keys are given by :meth:`global_summary_features`\n '''\n method_prob_mass = sum(belief['beliefs']['method'].values())\n belief['beliefs']['method']['none'] = 1 - method_prob_mass # hack to fix the bug not assigning 'none' any prob mass\n topMethod, topMethodBelief = getTopBelief(belief['beliefs']['method'])\n topDiscourseAct, topDiscourseActBelief = getTopBelief(belief['beliefs']['discourseAct'])\n\n summaryArray = dict.fromkeys(global_summary_features, False)\n summaryArray['GLOBAL_COUNTACCEPTED'] = len(getTopBeliefs(belief, domainString=domainString))\n summaryArray['GLOBAL_NAMENONE'] = belief['features']['lastActionInformNone']\n summaryArray['GLOBAL_OFFERHAPPENED'] = belief['features']['offerHappened']\n\n if topMethod == 'byalternatives':\n summaryArray['GLOBAL_BYALTERNATIVES'] = True\n elif topMethod == 'byname':\n summaryArray['GLOBAL_BYNAME'] = True\n elif topMethod == 'finished' and topMethodBelief > 0.5:\n summaryArray['GLOBAL_FINISHED'] = True\n elif topMethod == 'restart' and topMethodBelief > 0.5:\n summaryArray['GLOBAL_RESTART'] = True\n else:\n summaryArray['GLOBAL_BYCONSTRAINTS'] = True\n\n if topDiscourseAct == 'repeat' and topDiscourseActBelief > 0.5:\n summaryArray['GLOBAL_REPEAT'] = True\n elif topDiscourseAct == 'reqmore' and topDiscourseActBelief > 0.5:\n summaryArray['GLOBAL_REQMORE'] = True\n elif topDiscourseAct == 'thankyou' and topDiscourseActBelief > 0.5:\n summaryArray['GLOBAL_THANKYOU'] = True\n elif topDiscourseAct == 'ack' and topDiscourseActBelief > 0.5:\n summaryArray['GLOBAL_ACK'] = True\n\n return summaryArray\n\n\ndef arraySlotSummary(belief, domainString):\n '''\n Gets the summary vector for goal slots, including the top probabilities, entropy, etc.\n\n :param belief: dict representing the full belief state\n :param domainString: string representing the domain\n :return: (dict) of slot goal summaries\n '''\n summary = {}\n slots = Ontology.global_ontology.get_sorted_system_requestable_slots(domainString)\n \n for slot in slots:\n summary[slot] = {}\n slot_belief = belief['beliefs'][slot]\n summary[slot]['TOPHYPS'], summary[slot]['ISTOPNONE'] = getTopBeliefsExcludingNone(belief['beliefs'][slot])\n belief_dist = list(slot_belief.values())\n summary[slot]['ENTROPY'] = entropy(belief_dist)\n summary[slot]['ISREQUESTTOP'] = belief['beliefs']['requested'][slot] > 0.5\n\n return summary\n\n\ndef getRequestedSlots(belief):\n '''\n Iterate get the list of mentioned requested slots\n\n :param belief: dict representing the full belief state\n :return: (list) of slot names with prob retrieved from belief > REQUESTING_THRESHOLD (an internal global)\n '''\n requested_slots = []\n for slot in belief['beliefs']['requested']:\n requestprob = belief['beliefs']['requested'][slot]\n if requestprob > REQUESTING_THRESHOLD:\n requested_slots.append(slot)\n return requested_slots\n\n\ndef getTopBelief(slot_belief):\n '''\n Return slot value with the largest belief\n\n :param slot_belief: dict of value-prob pairs for slot distribution\n :return: top_value (str), top_belief (float)\n '''\n\n top_value = max(slot_belief, key=slot_belief.get)\n return top_value, slot_belief[top_value]\n\n\ndef getTopBeliefs(belief, threshold='auto', domainString=None):\n '''\n Get slot values with belief larger than threshold\n\n :param belief: dict representing the full belief state\n :param threshold: threshold on slot value probabilities. Default value is 'auto', only allowable string\n :param domainString: string representing the domain\n :return: (dict) as {slot: (topvalue, topbelief), ...}\n '''\n top_beliefs = {}\n for slot in Ontology.global_ontology.get_system_requestable_slots(domainString):\n if threshold == 'auto':\n numvalues = Ontology.global_ontology.get_len_informable_slot(domainString, slot)\n thres = 1. / (float(numvalues) - 0.1)\n else:\n thres = threshold\n\n topvalue, topbelief = getTopBelief(belief['beliefs'][slot])\n\n if topvalue != '**NONE**' and topbelief > thres:\n top_beliefs[slot] = (topvalue, topbelief)\n\n return top_beliefs\n\n\ndef getTopBeliefsExcludingNone(slot_belief):\n '''\n get the ordered list of (value,belief) in slot\n\n :param slot_belief: dict of value-prob pairs for slot distribution\n :return: (list) of ordered value-beliefs, (bool) telling if the top value is **NONE**\n '''\n slot_belief_copy = copy.deepcopy(slot_belief)\n top_hyps = []\n is_top_none = False\n while len(slot_belief_copy) > 0:\n topvalue, topbelief = getTopBelief(slot_belief_copy)\n if len(top_hyps) == 0 and topvalue == '**NONE**':\n is_top_none = True\n if topvalue != '**NONE**':\n top_hyps.append((topvalue, topbelief))\n del slot_belief_copy[topvalue]\n\n return top_hyps, is_top_none\n\n'''\n####Methods for inform related actions.####\n'''\n\ndef acceptanceListCanBeDiscriminated(accepted_values, domainString, num_accepted=None):\n '''\n Checks if the given acceptance list with the given number of values accepted\n returns a list of values which can be discriminated between -\n i.e. there is a question which we could ask which would give differences between\n the values.\n Note that only slots from the full acceptanceList (i.e. not just below\n maxAcceptedSlots are used for discrimination to exclude things like phone, addr, etc)\n\n :param accepted_values: dict of slot-value-beliefs whose beliefs are above **NONE**\n :param domainString: string representing the domain\n :return: (bool) answering discrimination question\n '''\n\n if num_accepted == None:\n num_accepted = len(accepted_values)\n\n ordered_accepted_values = []\n for slot, value in accepted_values.items():\n ordered_accepted_values.append((slot, value[0], value[1]))\n ordered_accepted_values = sorted(ordered_accepted_values, key=lambda x: x[2], reverse=True)[:num_accepted]\n\n return Ontology.global_ontology.constraintsCanBeDiscriminated(domainString, constraints=ordered_accepted_values)\n\n\ndef getInformNoneVenue(constraints):\n '''\n creates inform(name=none,...) act\n\n :param constraints: dict of accepted slot-values\n :return: (str) inform(name=none,...) act\n '''\n feats = {}\n for slot in constraints:\n if constraints[slot] != 'dontcare':\n feats[slot] = constraints[slot]\n return 'inform(name=none, {})'.format(convertFeatsToStr(feats))\n\n\ndef getInformByConstraints(constraints, domainString, lastInformedVenue):\n '''\n Looks for a database match with constraints and converts this entity into a dialogue act\n\n :param constraints: dict of slot:values whose beliefs are above **NONE**\n :param domainString: string representing the domain\n :return: string representing the inform dialogue act\n '''\n entities = Ontology.global_ontology.entity_by_features(domainString, constraints)\n if len(entities) == 0:\n return getInformNoneVenue(constraints)\n else:\n ret_ent = entities[0]\n for ent in entities:\n if ent['name'] == lastInformedVenue:\n ret_ent = ent\n break\n return getInformEntity(constraints, ret_ent)\n\n\ndef getInformEntity(accepted_values, ent):\n '''\n Converts a database entity into a dialogue act\n\n :param accepted_values: dict of slot-values whose beliefs are above **NONE**\n :param ent: database entity to be converted to dialogue act\n :return: string representing the inform dialogue act\n '''\n feats = {'name': ent['name']}\n numFeats = len(accepted_values)\n acceptance_keys = list(accepted_values.keys())\n\n maxNumFeats = 5\n if Settings.config.has_option(\"summaryacts\", \"maxinformslots\"):\n maxNumFeats = int(Settings.config.get('summaryacts', 'maxinformslots'))\n\n if numFeats > maxNumFeats:\n Settings.random.shuffle(acceptance_keys)\n acceptance_keys = acceptance_keys[:maxNumFeats]\n\n for slot in acceptance_keys:\n if slot != 'name':\n value = accepted_values[slot]\n if value == 'dontcare' and slot in ent and ent[slot] != \"not available\":\n feats[slot] = ent[slot]\n else:\n if slot in ent:\n feats[slot] = ent[slot]\n else:\n logger.warning('Slot {} is not found in data for entity {}'.format(slot, ent['name']))\n\n return 'inform({})'.format(convertFeatsToStr(feats))\n\n\ndef getInformRequestedSlots(requested_slots, name, domainString):\n '''\n Informs about the requested slots from the last informed venue of form the venue informed by name\n\n :param requested_slots: list of requested slots\n :param name: name of the last informed venue\n :param domainString: string representing the domain\n :return: string representing the inform dialogue act\n '''\n result = Ontology.global_ontology.entity_by_features(domainString, {'name': name})\n\n if len(result) > 0:\n ent = result[0]\n return _getInformRequestedSlotsForEntity(requested_slots, ent, domainString)\n else:\n if not name:\n # Return a random venue\n result = []\n while len(result) == 0:\n rand_name = Ontology.global_ontology.getRandomValueForSlot(domainString, 'name', nodontcare=True)\n result = Ontology.global_ontology.entity_by_features(domainString, {'name': rand_name})\n ent = result[0]\n return _getInformRequestedSlotsForEntity(requested_slots, ent, domainString)\n\n else:\n logger.warning('Couldn\\'t find the provided name: ' + name)\n return getInformNoneVenue({'name': name})\n\n\ndef _getInformRequestedSlotsForEntity(requested_slots, ent, domainString):\n '''\n Converts the list of requested slots and the entity into a inform_requested dialogue act\n\n :param requested_slots: list of requested slots (obtained in getRequestedSlots())\n :param ent: dictionary with information about a database entity\n :return: string representing the dialogue act\n '''\n\n slotvaluepair = ['name=\"{}\"'.format(ent['name'])]\n if len(requested_slots) == 0:\n if 'type' in ent:\n slotvaluepair.append('type=\"{}\"'.format(ent['type']))\n else:\n # type is not part of some ontologies. in this case just add a random slot-value\n slots = Ontology.global_ontology.get_requestable_slots(domainString)\n if 'name' in slots:\n slots.remove('name')\n if Settings.config.has_option('summaryacts', 'DSTC2requestables'):\n if Settings.config.getboolean('summaryacts', 'DSTC2requestables'):\n if 'description' in slots:\n slots.remove('description')\n if 'signature' in slots:\n slots.remove('signature')\n slot = slots[Settings.random.randint(len(slots))]\n slotvaluepair.append('{}=\"{}\"'.format(slot, ent[slot]))\n\n else:\n max_num_feats = 5\n if Settings.config.has_option(\"summaryacts\", \"maxinformslots\"):\n max_num_feats = int(Settings.config.get('summaryacts', 'maxinformslots'))\n\n if len(requested_slots) > max_num_feats:\n Settings.random.shuffle(requested_slots)\n requested_slots = requested_slots[:max_num_feats]\n\n for slot in requested_slots:\n if slot != 'name' and slot != 'location':\n if slot in ent:\n slotvaluepair.append('{}=\"{}\"'.format(slot, ent[slot]))\n else:\n slotvaluepair.append('{}=none'.format(slot))\n\n return 'inform({})'.format(','.join(slotvaluepair))\n\n\ndef getInformAlternativeEntities(accepted_values, prohibited_list, domainString):\n ''' returns an inform dialogue act informing about an entity that has not been informed before\n\n :param accepted_values: dict of slot-value-beliefs whose beliefs are above **NONE**\n :param prohibited_list: list of already mentioned entities\n :param domainString: string representing the domain\n :return: the dialogue act representing either\n 1) there is not matching venue: inform(name=none, slot=value, ...)\n 2) it offers a venue which is not on the prohibited list\n 3) if all matching venues are on the prohibited list then it says\n there is no venue except x,y,z,... with such features:\n inform(name=none, name!=x, name!=y, name!=z, ..., slot=value, ...)\n '''\n\n constraints = get_constraints(accepted_values)\n result = Ontology.global_ontology.entity_by_features(domainString, constraints)\n if len(result) == 0:\n return getInformNoneVenue(constraints)\n else:\n for ent in result:\n name = ent['name']\n if name not in prohibited_list:\n return getInformEntity(accepted_values, ent)\n\n return getInformNoMoreVenues(accepted_values, result)\n\n\ndef getInformNoMoreVenues(accepted_values, entities):\n '''\n returns inform(name=none, other than x and y, with constraints w and z) act\n\n :param accepted_values: dict of slot-value-beliefs whose beliefs are above **NONE**\n :param entities: list of database entity dicts\n :return: (str) inform() action\n '''\n\n maxNumFeats = 5\n if Settings.config.has_option(\"summaryacts\", \"maxinformslots\"):\n maxNumFeats = int(Settings.config.get('summaryacts', 'maxinformslots'))\n\n feats = {}\n for slot in accepted_values:\n value = accepted_values[slot][0]\n if slot != 'name' or value != 'dontcare':\n feats[slot] = value\n\n if len(feats) > maxNumFeats:\n feats_keys = list(feats.keys())\n truncated_feats = {}\n Settings.random.shuffle(feats_keys)\n for key in feats_keys[:maxNumFeats]:\n truncated_feats[key] = feats[key]\n feats = truncated_feats\n\n prohibited_list = ''\n for ent in entities:\n prohibited_list += 'name!=\"{}\",'.format(ent['name'])\n\n return 'inform(name=none,{}{})'.format(prohibited_list, convertFeatsToStr(feats))\n\n\ndef get_constraints(accepted_values):\n constraints = {}\n for slot in accepted_values:\n constraints[slot] = accepted_values[slot][0]\n return constraints\n\n\ndef convertFeatsToStr(feats):\n result = []\n for slot in feats:\n value = feats[slot]\n if value is not None and value.lower() != 'not available' and value != '':\n result.append('{}=\"{}\"'.format(slot, value))\n\n return ','.join(result)\n\n\ndef actionSpecificInformSummary(belief, numAccepted, domainString):\n '''count: # of entities matching with numAccepted slots in acceptance list.\n\n :param belief: full belief state\n :type belief: dict\n :param numAccepted: None\n :type numAccepted: int\n :param informable_slots: None\n :type informable_slots: list\n :returns: summary_array [count==0, count==1, 2<=count<=4, count>4, discriminatable] \\ \n discriminatable: matching entities can be further discriminated\n '''\n acceptanceList = getTopBeliefs(belief, domainString=domainString)\n count = _countEntitiesForAcceptanceListPart(acceptanceList, numAccepted, domainString)\n discriminatable = acceptanceListCanBeDiscriminated(acceptanceList, domainString, numAccepted)\n summary_array = [count == 0, count == 1, 2 <= count <= 4, count > 4, discriminatable]\n return summary_array\n\ndef _countEntitiesForAcceptanceListPart(accepted_values, num_accepted, domainString):\n '''\n Returns the number of entities matching the first self.maxAcceptedSlots (default=10)\n values in the acceptance list. Includes values with dontcare in the count\n\n :param acceptanceList: {slot: (topvalue, topbelief), ...}\n :param numAccepted: None\n :type numAccepted: int\n :returns: (int) number of entities\n '''\n\n ordered_accepted_values = []\n for slot, value in accepted_values.items():\n ordered_accepted_values.append((slot, value[0], value[1]))\n ordered_accepted_values = sorted(ordered_accepted_values, key=lambda x: x[2], reverse=True)[:num_accepted]\n\n constraints = {}\n for slot, value, _ in ordered_accepted_values: # slot, value, belief\n if value != 'dontcare':\n constraints[slot] = value\n\n# return len(Ontology.global_ontology.entity_by_features(domainString, constraints=constraints))\n return Ontology.global_ontology.get_length_entity_by_features(domainString, constraints=constraints)\n\n\n\n\n#END OF FILE\n"
] |
[
[
"numpy.set_printoptions",
"numpy.random.seed"
],
[
"sklearn.linear_model.SGDClassifier",
"numpy.zeros",
"sklearn.svm.sparse.SVC",
"sklearn.svm.SVC"
],
[
"scipy.stats.entropy"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [
"0.13",
"1.6",
"0.14",
"1.10",
"0.15",
"1.4",
"0.16",
"1.9",
"0.19",
"1.5",
"0.18",
"1.2",
"1.7",
"0.12",
"1.0",
"0.17",
"1.3",
"1.8"
],
"tensorflow": []
}
] |
mattgibbs/melbourne2018workshop
|
[
"c79a2e6dcef9ecfe92fdaac5149e87908ee75a17"
] |
[
"iocs/thermo.py"
] |
[
"#!/usr/bin/env python3\nfrom caproto.server import pvproperty, PVGroup, ioc_arg_parser, run\nfrom caproto import ChannelType\nimport numpy as np\nimport time\nfrom textwrap import dedent\n\n\nclass Thermo(PVGroup):\n \"\"\"\n Simulates (poorly) an oscillating temperature controller.\n\n Follows :math:`T_{output} = T_{var} exp^{-(t - t_0)/K} sin(ω t) + T_{setpoint}`\n\n The default prefix is `thermo:`\n\n Readonly PVs\n ------------\n\n I -> the current value\n\n Control PVs\n -----------\n\n SP -> where to go\n K -> the decay constant\n omega -> the oscillation frequency\n Tvar -> the scale of the oscillations\n \"\"\"\n\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n self._T0 = time.monotonic()\n self._entered_band = -1\n\n readback = pvproperty(value=0, dtype=float, read_only=True,\n name='I',\n mock_record='ai', units='K')\n\n setpoint = pvproperty(value=100, dtype=float, name='SP',\n mock_record='ai', units='K')\n K = pvproperty(value=10, dtype=float, units='1/K')\n omega = pvproperty(value=np.pi, dtype=float, units='Hz')\n Tvar = pvproperty(value=10, dtype=float, units='K')\n\n state = pvproperty(value=0, dtype=ChannelType.ENUM,\n enum_strings=['stable', 'settling'],\n read_only=True)\n deadband = pvproperty(value=2, dtype=float)\n settle_time = pvproperty(value=2, dtype=float, units='s')\n\n @readback.scan(period=.1, use_scan_field=True)\n async def readback(self, instance, async_lib):\n\n def t_rbv(T0, setpoint, K, omega, Tvar,):\n t = time.monotonic()\n return ((Tvar *\n np.exp(-(t - T0) / K) *\n np.sin(omega * t)) +\n setpoint)\n\n T = t_rbv(T0=self._T0,\n **{k: getattr(self, k).value\n for k in ['setpoint', 'K', 'omega', 'Tvar']})\n\n setpoint = self.setpoint.value\n if np.abs(setpoint - T) > self.deadband.value:\n self._entered_band = -1\n elif self._entered_band < 0:\n self._entered_band = time.monotonic()\n\n if self._entered_band > 0:\n if time.monotonic() > self._entered_band + self.settle_time.value:\n if self.state.value != 'stable':\n await self.state.write('stable')\n else:\n if self.state.value != 'settling':\n await self.state.write('settling')\n\n await instance.write(value=T)\n\n @setpoint.putter\n async def setpoint(self, instance, value):\n self._T0 = time.monotonic()\n # ensure it flashes low\n await self.state.write('stable')\n await self.state.write('settling')\n return value\n\n\nif __name__ == '__main__':\n ioc_options, run_options = ioc_arg_parser(\n default_prefix='thermo:',\n desc=dedent(Thermo.__doc__))\n ioc = Thermo(**ioc_options)\n run(ioc.pvdb, **run_options)\n"
] |
[
[
"numpy.exp",
"numpy.abs",
"numpy.sin"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.