repo_name
stringlengths 6
130
| hexsha
list | file_path
list | code
list | apis
list |
---|---|---|---|---|
Ignas-S/retinanet-simple
|
[
"81b17f65fa5278e6b9a4918e6a20b77949a7e87d"
] |
[
"retinanet/val.py"
] |
[
"from __future__ import print_function\n\nimport numpy as np\nimport json\nimport os\nimport matplotlib.pyplot as plt\nimport torch\n\n\n\ndef compute_overlap(a, b):\n \"\"\"\n Parameters\n ----------\n a: (N, 4) ndarray of float\n b: (K, 4) ndarray of float\n Returns\n -------\n overlaps: (N, K) ndarray of overlap between boxes and query_boxes\n \"\"\"\n area = (b[:, 2] - b[:, 0]) * (b[:, 3] - b[:, 1])\n\n iw = np.minimum(np.expand_dims(a[:, 2], axis=1), b[:, 2]) - np.maximum(np.expand_dims(a[:, 0], 1), b[:, 0])\n ih = np.minimum(np.expand_dims(a[:, 3], axis=1), b[:, 3]) - np.maximum(np.expand_dims(a[:, 1], 1), b[:, 1])\n\n iw = np.maximum(iw, 0)\n ih = np.maximum(ih, 0)\n\n ua = np.expand_dims((a[:, 2] - a[:, 0]) * (a[:, 3] - a[:, 1]), axis=1) + area - iw * ih\n\n ua = np.maximum(ua, np.finfo(float).eps)\n\n intersection = iw * ih\n\n return intersection / ua\n\n\ndef _compute_ap(recall, precision):\n \"\"\" Compute the average precision, given the recall and precision curves.\n Code originally from https://github.com/rbgirshick/py-faster-rcnn.\n # Arguments\n recall: The recall curve (list).\n precision: The precision curve (list).\n # Returns\n The average precision as computed in py-faster-rcnn.\n \"\"\"\n # correct AP calculation\n # first append sentinel values at the end\n mrec = np.concatenate(([0.], recall, [1.]))\n mpre = np.concatenate(([0.], precision, [0.]))\n\n # compute the precision envelope\n for i in range(mpre.size - 1, 0, -1):\n mpre[i - 1] = np.maximum(mpre[i - 1], mpre[i])\n\n # to calculate area under PR curve, look for points\n # where X axis (recall) changes value\n i = np.where(mrec[1:] != mrec[:-1])[0]\n\n # and sum (\\Delta recall) * prec\n ap = np.sum((mrec[i + 1] - mrec[i]) * mpre[i + 1])\n return ap\n\n\ndef _get_detections(dataset, retinanet, score_threshold=0.05, max_detections=100, save_path=None):\n \"\"\" Get the detections from the retinanet using the generator.\n The result is a list of lists such that the size is:\n all_detections[num_images][num_classes] = detections[num_detections, 4 + num_classes]\n # Arguments\n dataset : The generator used to run images through the retinanet.\n retinanet : The retinanet to run on the images.\n score_threshold : The score confidence threshold to use.\n max_detections : The maximum number of detections to use per image.\n save_path : The path to save the images with visualized detections to.\n # Returns\n A list of lists containing the detections for each image in the generator.\n \"\"\"\n all_detections = [[None for i in range(dataset.num_classes())] for j in range(len(dataset))]\n\n retinanet.eval()\n \n with torch.no_grad():\n\n for index in range(len(dataset)):\n data = dataset[index]\n scale = data['scale']\n\n # run network\n if torch.cuda.is_available():\n scores, labels, boxes = retinanet(data['img'].permute(2, 0, 1).cuda().float().unsqueeze(dim=0))\n else:\n scores, labels, boxes = retinanet(data['img'].permute(2, 0, 1).float().unsqueeze(dim=0))\n scores = scores.cpu().numpy()\n labels = labels.cpu().numpy()\n boxes = boxes.cpu().numpy()\n\n # correct boxes for image scale\n boxes /= scale\n\n # select indices which have a score above the threshold\n indices = np.where(scores > score_threshold)[0]\n if indices.shape[0] > 0:\n # select those scores\n scores = scores[indices]\n\n # find the order with which to sort the scores\n scores_sort = np.argsort(-scores)[:max_detections]\n\n # select detections\n image_boxes = boxes[indices[scores_sort], :]\n image_scores = scores[scores_sort]\n image_labels = labels[indices[scores_sort]]\n image_detections = np.concatenate([image_boxes, np.expand_dims(image_scores, axis=1), np.expand_dims(image_labels, axis=1)], axis=1)\n\n # copy detections to all_detections\n for label in range(dataset.num_classes()):\n all_detections[index][label] = image_detections[image_detections[:, -1] == label, :-1]\n else:\n # copy detections to all_detections\n for label in range(dataset.num_classes()):\n all_detections[index][label] = np.zeros((0, 5))\n\n print('{}/{}'.format(index + 1, len(dataset)), end='\\r')\n\n return all_detections\n\n\ndef _get_annotations(generator):\n \"\"\" Get the ground truth annotations from the generator.\n The result is a list of lists such that the size is:\n all_detections[num_images][num_classes] = annotations[num_detections, 5]\n # Arguments\n generator : The generator used to retrieve ground truth annotations.\n # Returns\n A list of lists containing the annotations for each image in the generator.\n \"\"\"\n all_annotations = [[None for i in range(generator.num_classes())] for j in range(len(generator))]\n\n for i in range(len(generator)):\n # load the annotations\n annotations = generator.load_annotations(i)\n\n # copy detections to all_annotations\n for label in range(generator.num_classes()):\n all_annotations[i][label] = annotations[annotations[:, 4] == label, :4].copy()\n\n print('{}/{}'.format(i + 1, len(generator)), end='\\r')\n\n return all_annotations\n\n\ndef evaluate(\n generator,\n retinanet,\n iou_threshold=0.5,\n score_threshold=0.05,\n max_detections=100,\n save_path=None\n):\n \"\"\" Evaluate a given dataset using a given retinanet.\n # Arguments\n generator : The generator that represents the dataset to evaluate.\n retinanet : The retinanet to evaluate.\n iou_threshold : The threshold used to consider when a detection is positive or negative.\n score_threshold : The score confidence threshold to use for detections.\n max_detections : The maximum number of detections to use per image.\n save_path : The path to save precision recall curve of each label.\n # Returns\n A dict mapping class names to mAP scores.\n \"\"\"\n\n\n\n # gather all detections and annotations\n\n all_detections = _get_detections(generator, retinanet, score_threshold=score_threshold, max_detections=max_detections, save_path=save_path)\n all_annotations = _get_annotations(generator)\n\n average_precisions = {}\n p_r = {}\n\n for label in range(generator.num_classes()):\n false_positives = np.zeros((0,))\n true_positives = np.zeros((0,))\n scores = np.zeros((0,))\n num_annotations = 0.0\n\n for i in range(len(generator)):\n detections = all_detections[i][label]\n annotations = all_annotations[i][label]\n num_annotations += annotations.shape[0]\n detected_annotations = []\n\n for d in detections:\n scores = np.append(scores, d[4])\n\n if annotations.shape[0] == 0:\n false_positives = np.append(false_positives, 1)\n true_positives = np.append(true_positives, 0)\n continue\n\n overlaps = compute_overlap(np.expand_dims(d, axis=0), annotations)\n assigned_annotation = np.argmax(overlaps, axis=1)\n max_overlap = overlaps[0, assigned_annotation]\n\n if max_overlap >= iou_threshold and assigned_annotation not in detected_annotations:\n false_positives = np.append(false_positives, 0)\n true_positives = np.append(true_positives, 1)\n detected_annotations.append(assigned_annotation)\n else:\n false_positives = np.append(false_positives, 1)\n true_positives = np.append(true_positives, 0)\n\n # no annotations -> AP for this class is 0 (is this correct?)\n if num_annotations == 0:\n average_precisions[label] = 0, 0\n p_r[label] = [], []\n continue\n\n # sort by score\n indices = np.argsort(-scores)\n false_positives = false_positives[indices]\n true_positives = true_positives[indices]\n\n # compute false positives and true positives\n false_positives = np.cumsum(false_positives)\n true_positives = np.cumsum(true_positives)\n\n # compute recall and precision\n recall = true_positives / num_annotations\n precision = true_positives / np.maximum(true_positives + false_positives, np.finfo(np.float64).eps)\n\n # compute average precision\n average_precision = _compute_ap(recall, precision)\n average_precisions[label] = average_precision, num_annotations\n p_r[label] = precision, recall\n\n\n print('\\nmAP:')\n for label in range(generator.num_classes()):\n label_name = generator.label_to_name(label)\n print('{}: {}'.format(label_name, average_precisions[label][0]))\n precision, recall = p_r[label]\n print(\"Precision: \",precision[-1] if len(precision) > 0 else 0)\n print(\"Recall: \",recall[-1] if len(recall) > 0 else 0)\n \n if save_path!=None:\n plt.plot(recall,precision)\n # naming the x axis \n plt.xlabel('Recall') \n # naming the y axis \n plt.ylabel('Precision') \n\n # giving a title to my graph \n plt.title('Precision Recall curve') \n\n # function to show the plot\n plt.savefig(save_path+'/'+label_name+'_precision_recall.jpg')\n\n\n\n return average_precisions"
] |
[
[
"numpy.concatenate",
"numpy.append",
"numpy.zeros",
"numpy.sum",
"torch.no_grad",
"matplotlib.pyplot.plot",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.title",
"matplotlib.pyplot.savefig",
"numpy.where",
"numpy.finfo",
"torch.cuda.is_available",
"numpy.argmax",
"numpy.argsort",
"numpy.cumsum",
"matplotlib.pyplot.ylabel",
"numpy.expand_dims",
"numpy.maximum"
]
] |
Utkarsh87/Capsule-Networks
|
[
"a3a533d40c3ac32590067782c8a41f450ec0b28a"
] |
[
"src/digitcaps.py"
] |
[
"import torch\r\nimport torch.nn as nn\r\nimport torch.nn.functional as F\r\n\r\nGPU_AVAILABLE = torch.cuda.is_available()\r\n\r\n# if(GPU_AVAILABLE):\r\n# print('Training on GPU!')\r\n# else:\r\n# print('Only CPU available')\r\n\r\ndef softmax(input_tensor, dim=1):\r\n transposed_input = input_tensor.transpose(dim, len(input_tensor.size()) - 1)\r\n softmaxed_output = F.softmax(transposed_input.contiguous().view(-1, transposed_input.size(-1)), dim=-1)\r\n # un-transpose result\r\n return softmaxed_output.view(*transposed_input.size()).transpose(dim, len(input_tensor.size()) - 1)\r\n\r\n# dynamic routing\r\ndef dynamic_routing(b_ij, u_hat, squash, routing_iterations=3):\r\n '''\r\n Performs dynamic routing between two capsule layers.\r\n\r\n param b_ij: initial log probabilities that capsule i should be coupled to capsule j\r\n param u_hat: input, weighted capsule vectors, W u\r\n param squash: given, normalizing squash function\r\n param routing_iterations: number of times to update coupling coefficients\r\n return: v_j, output capsule vectors\r\n ''' \r\n # update b_ij, c_ij for number of routing iterations\r\n for iteration in range(routing_iterations):\r\n # softmax calculation of coupling coefficients, c_ij\r\n c_ij = softmax(b_ij, dim=2)\r\n\r\n # calculating total capsule inputs, s_j = sum(c_ij*u_hat)\r\n s_j = (c_ij * u_hat).sum(dim=2, keepdim=True)\r\n\r\n # squashing to get a normalized vector output, v_j\r\n v_j = squash(s_j)\r\n\r\n # if not on the last iteration, calculate agreement and new b_ij\r\n if iteration < routing_iterations - 1:\r\n # agreement\r\n a_ij = (u_hat * v_j).sum(dim=-1, keepdim=True)\r\n \r\n # new b_ij\r\n b_ij = b_ij + a_ij\r\n \r\n return v_j # return latest v_j\r\n\r\nclass DigitCaps(nn.Module):\r\n \r\n def __init__(self, num_capsules=10, previous_layer_nodes=32*6*6, \r\n in_channels=8, out_channels=16):\r\n '''\r\n Constructs an initial weight matrix, W, and sets class variables.\r\n \r\n param num_capsules: number of capsules to create\r\n param previous_layer_nodes: dimension of input capsule vector, default value = 1152\r\n param in_channels: number of capsules in previous layer, default value = 8\r\n param out_channels: dimensions of output capsule vector, default value = 16\r\n '''\r\n super(DigitCaps, self).__init__()\r\n\r\n self.num_capsules = num_capsules\r\n self.previous_layer_nodes = previous_layer_nodes # vector input (dim=1152)\r\n self.in_channels = in_channels # previous layer's number of capsules\r\n\r\n # starting out with a randomly initialized weight matrix, W\r\n # these will be the weights connecting the PrimaryCaps and DigitCaps layers\r\n self.W = nn.Parameter(torch.randn(num_capsules, previous_layer_nodes, \r\n in_channels, out_channels))\r\n\r\n def forward(self, u):\r\n '''\r\n\t\tDefines the feedforward behavior.\r\n \r\n param u: the input; vectors from the previous PrimaryCaps layer\r\n return: a set of normalized, capsule output vectors\r\n '''\r\n # adding batch_size dims and stacking all u vectors\r\n u = u[None, :, :, None, :]\r\n # 4D weight matrix\r\n W = self.W[:, None, :, :, :]\r\n \r\n # calculating u_hat = W*u\r\n u_hat = torch.matmul(u, W)\r\n\r\n # getting the correct size of b_ij\r\n # setting them all to 0, initially\r\n b_ij = torch.zeros(*u_hat.size())\r\n \r\n # moving b_ij to GPU, if available\r\n if GPU_AVAILABLE:\r\n b_ij = b_ij.cuda()\r\n\r\n # update coupling coefficients and calculate v_j\r\n v_j = dynamic_routing(b_ij, u_hat, self.squash, routing_iterations=3)\r\n\r\n return v_j # return final vector outputs\r\n \r\n \r\n def squash(self, input_tensor):\r\n '''\r\n Squashes an input Tensor so it has a magnitude between 0-1.\r\n \r\n param input_tensor: a stack of capsule inputs, s_j\r\n return: a stack of normalized, capsule output vectors, v_j\r\n '''\r\n # same squash function as in PrimaryCaps\r\n squared_norm = (input_tensor ** 2).sum(dim=-1, keepdim=True)\r\n scale = squared_norm / (1 + squared_norm) # normalization coeff\r\n output_tensor = scale * input_tensor / torch.sqrt(squared_norm) \r\n return output_tensor\r\n"
] |
[
[
"torch.cuda.is_available",
"torch.matmul",
"torch.randn",
"torch.sqrt"
]
] |
thivagar-manickam/data-science-notebooks
|
[
"afd7ecd68a140d98fafc528c983ed1f67c2c6f34"
] |
[
"House Sales Prediction in King County, USA/app/model.py"
] |
[
"import pandas as pd\nfrom sklearn.model_selection import train_test_split\nfrom housePricePredictor import HousePricePrediction as HousePricePredictor\nimport joblib\n\nif __name__ == \"__main__\":\n url = \"house_price.csv\"\n house_df = pd.read_csv(url)\n target_column = \"price\"\n input_col = ['bedrooms', 'bathrooms', 'sqft_living', 'view', 'grade', 'sqft_above', 'sqft_living15']\n train_x, test_x, train_y, test_y = train_test_split(\n house_df[input_col], house_df[target_column], test_size=0.2, random_state=1\n )\n model_obj = HousePricePredictor(train_x, train_y).fit()\n test_x_scaled = model_obj.scale(test_x)\n test_y_pred = model_obj.model.predict(test_x_scaled.values)\n joblib.dump(model_obj, \"model.joblib\")"
] |
[
[
"sklearn.model_selection.train_test_split",
"pandas.read_csv"
]
] |
sylviemonet/strawberryfields
|
[
"e1a90cbb73f84b17d85d993cdf878eb60d41e28e"
] |
[
"tests/integration/test_ops_integration.py"
] |
[
"# Copyright 2019 Xanadu Quantum Technologies Inc.\n\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n\n# http://www.apache.org/licenses/LICENSE-2.0\n\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\nr\"\"\"Integration tests for frontend operations applied to the backend\"\"\"\nimport pytest\n\nimport numpy as np\nimport tensorflow as tf\nimport strawberryfields as sf\nfrom strawberryfields import ops\n\nfrom strawberryfields.backends import BaseGaussian\nfrom strawberryfields.backends.states import BaseFockState, BaseGaussianState\n\nfrom thewalrus.quantum import is_valid_cov\nfrom thewalrus.random import random_symplectic\n\nfrom scipy.stats import unitary_group\n\n# make test deterministic\nnp.random.seed(42)\nA = 0.1234\nB = -0.543\n\n\[email protected](\"gate\", ops.gates)\nclass TestGateApplication:\n \"\"\"tests that involve gate application\"\"\"\n\n @pytest.fixture\n def G(self, gate):\n \"\"\"Initialize each gate\"\"\"\n if gate in ops.zero_args_gates:\n return gate()\n\n if gate in ops.one_args_gates:\n return gate(A)\n\n if gate in ops.two_args_gates:\n return gate(A, B)\n\n def test_gate_dagger_vacuum(self, G, setup_eng, tol):\n \"\"\"Test applying gate inverses after the gate cancels out\"\"\"\n eng, prog = setup_eng(2)\n\n if isinstance(G, (ops.Vgate, ops.Kgate, ops.CKgate)) and isinstance(\n eng.backend, BaseGaussian\n ):\n pytest.skip(\"Non-Gaussian gates cannot be applied to the Gaussian backend\")\n\n with prog.context as q:\n if G.ns == 1:\n G | q[0]\n G.H | q[0]\n elif G.ns == 2:\n G | (q[0], q[1])\n G.H | (q[0], q[1])\n\n eng.run(prog)\n\n # we must end up back in vacuum since G and G.H cancel each other\n assert np.all(eng.backend.is_vacuum(tol))\n\n\nclass TestChannelApplication:\n \"\"\"tests that involve channel application\"\"\"\n\n def test_loss_channel(self, setup_eng, tol):\n \"\"\"Test loss channel with no transmission produces vacuum\"\"\"\n eng, prog = setup_eng(1)\n\n with prog.context as q:\n ops.Dgate(np.abs(A), np.angle(A)) | q[0]\n ops.LossChannel(0) | q[0]\n\n eng.run(prog)\n assert np.all(eng.backend.is_vacuum(tol))\n\n @pytest.mark.backends(\"gaussian\", \"bosonic\")\n def test_thermal_loss_channel(self, setup_eng, tol):\n \"\"\"Test thermal loss channel with no transmission produces thermal state\"\"\"\n eng, prog = setup_eng(1)\n nbar = 0.43\n\n with prog.context as q:\n ops.Dgate(np.abs(A), np.angle(A)) | q[0]\n ops.ThermalLossChannel(0, nbar) | q[0]\n\n state = eng.run(prog).state\n mean_photon, var = state.mean_photon(0)\n assert np.allclose(mean_photon, nbar, atol=tol, rtol=0)\n assert np.allclose(var, nbar ** 2 + nbar, atol=tol, rtol=0)\n\n @pytest.mark.backends(\"gaussian\")\n @pytest.mark.parametrize(\"M\", range(1, 5))\n def test_passive_channel_vacuum(self, M, setup_eng, tol):\n \"\"\"test that you get vacuum on all modes if you apply a channel with all zero\"\"\"\n eng, prog = setup_eng(M)\n\n with prog.context as q:\n for i in range(M):\n ops.Dgate(abs(A), np.angle(A)) | q[i]\n ops.PassiveChannel(np.zeros((M, M))) | q\n\n eng.run(prog)\n assert np.all(eng.backend.is_vacuum(tol))\n\n @pytest.mark.backends(\"gaussian\")\n @pytest.mark.parametrize(\"M\", range(2, 7))\n def test_passive_channel(self, M, setup_eng, tol):\n \"\"\"check that passive channel is consistent with unitary methods\"\"\"\n U = unitary_group.rvs(M)\n\n loss_in = np.random.random(M)\n loss_out = np.random.random(M)\n\n T = (np.sqrt(loss_in) * U) * np.sqrt(loss_out[np.newaxis].T)\n\n eng, prog = setup_eng(M)\n with prog.context as q:\n for i in range(M):\n ops.Sgate(1) | q[i]\n ops.Dgate(A) | q[i]\n ops.PassiveChannel(T) | q\n\n state = eng.run(prog).state\n cov1 = state.cov()\n mu1 = state.means()\n\n eng, prog = setup_eng(M)\n with prog.context as q:\n for i in range(M):\n ops.Sgate(1) | q[i]\n ops.Dgate(A) | q[i]\n ops.LossChannel(loss_in[i]) | q[i]\n ops.Interferometer(U) | q\n for i in range(M):\n ops.LossChannel(loss_out[i]) | q[i]\n\n state = eng.run(prog).state\n cov2 = state.cov()\n mu2 = state.means()\n\n assert np.allclose(cov1, cov2, atol=tol, rtol=0)\n assert np.allclose(mu1, mu2, atol=tol, rtol=0)\n\n u, s, v = np.linalg.svd(T)\n\n eng, prog = setup_eng(M)\n with prog.context as q:\n for i in range(M):\n ops.Sgate(1) | q[i]\n ops.Dgate(A) | q[i]\n ops.Interferometer(v) | q\n for i in range(M):\n ops.LossChannel(s[i] ** 2) | q[i]\n ops.Interferometer(u) | q\n\n state = eng.run(prog).state\n cov3 = state.cov()\n mu3 = state.means()\n\n assert np.allclose(cov1, cov3, atol=tol, rtol=0)\n assert np.allclose(mu1, mu3, atol=tol, rtol=0)\n\n T1 = u * s\n eng, prog = setup_eng(M)\n with prog.context as q:\n for i in range(M):\n ops.Sgate(1) | q[i]\n ops.Dgate(A) | q[i]\n ops.PassiveChannel(v) | q\n ops.PassiveChannel(T1) | q\n\n state = eng.run(prog).state\n cov4 = state.cov()\n mu4 = state.means()\n\n assert np.allclose(cov1, cov4, atol=tol, rtol=0)\n assert np.allclose(mu1, mu4, atol=tol, rtol=0)\n\n\nclass TestPreparationApplication:\n \"\"\"Tests that involve state preparation application\"\"\"\n\n @pytest.mark.backends(\"tf\", \"fock\")\n def test_ket_state_object(self, setup_eng, pure):\n \"\"\"Test loading a ket from a prior state object\"\"\"\n if not pure:\n pytest.skip(\"Test only runs on pure states\")\n\n eng, prog = setup_eng(1)\n\n with prog.context as q:\n ops.Dgate(0.2, 0.0) | q[0]\n\n state1 = eng.run(prog).state\n\n # create new engine\n eng, prog = setup_eng(1)\n\n with prog.context as q:\n ops.Ket(state1) | q[0]\n\n state2 = eng.run(prog).state\n\n # verify it is the same state\n assert state1 == state2\n\n @pytest.mark.backends(\"tf\", \"fock\")\n def test_ket_gaussian_state_object(self, setup_eng):\n \"\"\"Test exception if loading a ket from a Gaussian state object\"\"\"\n eng = sf.Engine(\"gaussian\")\n prog = sf.Program(1)\n state = eng.run(prog).state\n\n # create new engine\n eng, prog = setup_eng(1)\n\n with prog.context as q:\n with pytest.raises(ValueError, match=\"Gaussian states are not supported\"):\n ops.Ket(state) | q[0]\n\n @pytest.mark.backends(\"tf\", \"fock\")\n def test_ket_mixed_state_object(self, setup_eng, pure):\n \"\"\"Test exception if loading a ket from a prior mixed state object\"\"\"\n if pure:\n pytest.skip(\"Test only runs on mixed states\")\n\n eng, prog = setup_eng(1)\n\n with prog.context as q:\n ops.Dgate(0.2, 0.0) | q[0]\n\n state1 = eng.run(prog).state\n\n # create new engine\n eng, prog = setup_eng(1)\n\n with prog.context as q:\n with pytest.raises(ValueError, match=\"Fock state is not pure\"):\n ops.Ket(state1) | q[0]\n\n @pytest.mark.backends(\"tf\", \"fock\")\n def test_dm_state_object(self, setup_eng, tol):\n \"\"\"Test loading a density matrix from a prior state object\"\"\"\n eng, prog = setup_eng(1)\n\n with prog.context as q:\n ops.Dgate(0.2, 0.0) | q[0]\n\n state1 = eng.run(prog).state\n\n # create new engine\n eng, prog = setup_eng(1)\n\n with prog.context as q:\n ops.DensityMatrix(state1) | q[0]\n\n state2 = eng.run(prog).state\n\n # verify it is the same state\n assert np.allclose(state1.dm(), state2.dm(), atol=tol, rtol=0)\n\n @pytest.mark.backends(\"tf\", \"fock\")\n def test_dm_gaussian_state_object(self, setup_eng):\n \"\"\"Test exception if loading a ket from a Gaussian state object\"\"\"\n eng = sf.Engine(\"gaussian\")\n prog = sf.Program(1)\n state = eng.run(prog).state\n\n # create new engine\n eng, prog = setup_eng(1)\n\n with prog.context as q:\n with pytest.raises(ValueError, match=\"Gaussian states are not supported\"):\n ops.DensityMatrix(state) | q[0]\n\n\[email protected](\"fock\", \"tf\")\nclass TestKetDensityMatrixIntegration:\n \"\"\"Tests for the frontend Fock multi-mode state preparations\"\"\"\n\n def test_ket_input_validation(self, setup_eng, hbar, cutoff):\n \"\"\"Test exceptions\"\"\"\n mu = np.array([0.0, 0.0])\n cov = np.identity(2)\n state1 = BaseGaussianState((mu, cov), 1)\n state2 = BaseFockState(np.zeros(cutoff), 1, False, cutoff)\n\n eng, prog = setup_eng(2)\n\n with prog.context as q:\n with pytest.raises(ValueError, match=\"Gaussian states are not supported\"):\n ops.Ket(state1) | q[0]\n with pytest.raises(ValueError, match=\"not pure\"):\n ops.Ket(state2) | q[0]\n\n def test_ket_one_mode(self, setup_eng, hbar, cutoff, tol):\n \"\"\"Tests single mode ket preparation\"\"\"\n eng, prog = setup_eng(2)\n ket0 = np.random.uniform(-1, 1, cutoff) + 1j * np.random.uniform(-1, 1, cutoff)\n ket0 = ket0 / np.linalg.norm(ket0)\n with prog.context as q:\n ops.Ket(ket0) | q[0]\n state = eng.run(prog, **{\"modes\": [0]}).state\n assert np.allclose(state.dm(), np.outer(ket0, ket0.conj()), atol=tol, rtol=0)\n\n eng.reset()\n\n prog = sf.Program(2)\n state1 = BaseFockState(ket0, 1, True, cutoff)\n with prog.context as q:\n ops.Ket(state1) | q[0]\n state2 = eng.run(prog, **{\"modes\": [0]}).state\n assert np.allclose(state1.dm(), state2.dm(), atol=tol, rtol=0)\n\n def test_ket_two_mode(self, setup_eng, hbar, cutoff, tol):\n \"\"\"Tests multimode ket preparation\"\"\"\n eng, prog = setup_eng(2)\n ket0 = np.random.uniform(-1, 1, cutoff) + 1j * np.random.uniform(-1, 1, cutoff)\n ket0 = ket0 / np.linalg.norm(ket0)\n ket1 = np.random.uniform(-1, 1, cutoff) + 1j * np.random.uniform(-1, 1, cutoff)\n ket1 = ket1 / np.linalg.norm(ket1)\n\n ket = np.outer(ket0, ket1)\n with prog.context as q:\n ops.Ket(ket) | q\n state = eng.run(prog).state\n assert np.allclose(state.dm(), np.einsum(\"ij,kl->ikjl\", ket, ket.conj()), atol=tol, rtol=0)\n\n eng.reset()\n\n prog = sf.Program(2)\n state1 = BaseFockState(ket, 2, True, cutoff)\n with prog.context as q:\n ops.Ket(state1) | q\n state2 = eng.run(prog).state\n assert np.allclose(state1.dm(), state2.dm(), atol=tol, rtol=0)\n\n def test_dm_input_validation(self, setup_eng, hbar, cutoff, tol):\n \"\"\"Test exceptions\"\"\"\n mu = np.array([0.0, 0.0])\n cov = np.identity(2)\n state = BaseGaussianState((mu, cov), 1)\n\n eng, prog = setup_eng(2)\n\n with prog.context as q:\n with pytest.raises(ValueError, match=\"Gaussian states are not supported\"):\n ops.DensityMatrix(state) | q[0]\n\n def test_dm_one_mode(self, setup_eng, hbar, cutoff, tol):\n \"\"\"Tests single mode DM preparation\"\"\"\n eng, prog = setup_eng(2)\n\n ket = np.random.uniform(-1, 1, cutoff) + 1j * np.random.uniform(-1, 1, cutoff)\n ket = ket / np.linalg.norm(ket)\n rho = np.outer(ket, ket.conj())\n with prog.context as q:\n ops.DensityMatrix(rho) | q[0]\n state = eng.run(prog, **{\"modes\": [0]}).state\n assert np.allclose(state.dm(), rho, atol=tol, rtol=0)\n\n eng.reset()\n\n prog = sf.Program(2)\n state1 = BaseFockState(rho, 1, False, cutoff)\n with prog.context as q:\n ops.DensityMatrix(state1) | q[0]\n state2 = eng.run(prog, **{\"modes\": [0]}).state\n assert np.allclose(state1.dm(), state2.dm(), atol=tol, rtol=0)\n\n def test_dm_two_mode(self, setup_eng, hbar, cutoff, tol):\n \"\"\"Tests multimode dm preparation\"\"\"\n eng, prog = setup_eng(2)\n\n ket0 = np.random.uniform(-1, 1, cutoff) + 1j * np.random.uniform(-1, 1, cutoff)\n ket0 = ket0 / np.linalg.norm(ket0)\n ket1 = np.random.uniform(-1, 1, cutoff) + 1j * np.random.uniform(-1, 1, cutoff)\n ket1 = ket1 / np.linalg.norm(ket1)\n\n ket = np.outer(ket0, ket1)\n rho = np.einsum(\"ij,kl->ikjl\", ket, ket.conj())\n with prog.context as q:\n ops.DensityMatrix(rho) | q\n state = eng.run(prog).state\n assert np.allclose(state.dm(), rho, atol=tol, rtol=0)\n\n eng.reset()\n\n prog = sf.Program(2)\n state1 = BaseFockState(rho, 2, False, cutoff)\n with prog.context as q:\n ops.DensityMatrix(state1) | q\n state2 = eng.run(prog).state\n assert np.allclose(state1.dm(), state2.dm(), atol=tol, rtol=0)\n\n\[email protected](\"tf\", \"fock\")\nclass TestGaussianGateApplication:\n def test_multimode_gaussian_gate(self, setup_backend, pure):\n \"\"\"Test applying gaussian gate on multiple modes\"\"\"\n num_mode = 1\n eng = sf.Engine(\"tf\", backend_options={\"cutoff_dim\": 5})\n prog = sf.Program(num_mode)\n S = tf.Variable(random_symplectic(num_mode), dtype=tf.complex128)\n d = tf.Variable(np.random.random(2 * num_mode), dtype=tf.complex128)\n with prog.context as q:\n ops.Ggate(S, d) | q\n # tests that no exceptions are raised\n eng.run(prog).state.ket()\n\n def test_gradient_gaussian_gate(self, setup_backend, pure):\n if not pure:\n pytest.skip(\"Test only runs on pure states\")\n num_mode = 2\n eng = sf.Engine(\"tf\", backend_options={\"cutoff_dim\": 5})\n prog = sf.Program(num_mode)\n S = tf.Variable(random_symplectic(num_mode), dtype=tf.complex128)\n d = tf.Variable(np.random.random(2 * num_mode), dtype=tf.complex128)\n with prog.context as q:\n sf.ops.Ggate(S, d) | q\n with tf.GradientTape() as tape:\n if pure:\n state = eng.run(prog).state.ket()\n else:\n state = eng.run(prog).state.dm()\n # tests that no exceptions are raised\n tape.gradient(state, [S, d])\n\n def test_Ggate_optimization(self, setup_backend, pure):\n if not pure:\n pytest.skip(\"Test only runs on pure states\")\n num_mode = 2\n eng = sf.Engine(\"tf\", backend_options={\"cutoff_dim\": 5})\n prog = sf.Program(num_mode)\n optimizer = tf.keras.optimizers.SGD(learning_rate=0.001)\n S = tf.Variable(random_symplectic(num_mode), dtype=tf.complex128)\n d = tf.Variable(np.random.random(2 * num_mode), dtype=tf.complex128)\n\n prog = sf.Program(num_mode)\n with prog.context as q:\n ops.Ggate(S, d) | q\n\n loss_vals = []\n for _ in range(11):\n with tf.GradientTape() as tape:\n state_out = eng.run(prog).state.ket()\n loss_val = tf.abs(state_out[1, 1] - 0.25) ** 2\n eng.reset()\n grad_S, gradients_d = tape.gradient(loss_val, [S, d])\n optimizer.apply_gradients([(gradients_d, d)])\n sf.backends.tfbackend.ops.update_symplectic(S, grad_S, lr=0.05)\n loss_vals.append(loss_val)\n print(loss_val)\n assert all([bool(l1 > l2) for l1, l2 in zip(loss_vals, loss_vals[1:])])\n"
] |
[
[
"tensorflow.abs",
"numpy.array",
"tensorflow.keras.optimizers.SGD",
"numpy.linalg.norm",
"tensorflow.GradientTape",
"numpy.zeros",
"numpy.angle",
"numpy.random.seed",
"numpy.identity",
"numpy.allclose",
"numpy.random.uniform",
"scipy.stats.unitary_group.rvs",
"numpy.linalg.svd",
"numpy.sqrt",
"numpy.outer",
"numpy.random.random",
"numpy.abs"
]
] |
XiaoSanGit/wda_tracker
|
[
"b68ec0edb9daa6cc495815ba9ca549b36eec0369",
"b68ec0edb9daa6cc495815ba9ca549b36eec0369"
] |
[
"clustering/clustering_utils.py",
"feature_extractors/ABD_Net/torchreid/datasets/viper.py"
] |
[
"import pandas as pd\nfrom collections import defaultdict\nimport os\nfrom tqdm import tqdm\nfrom clustering import heapq\nimport sys\nimport numpy as np\nfrom utilities.helper import constrain_bbox_to_img_dims\n\n\ndef valid_heap_node(heap_node, old_clusters):\n pair_data = heap_node[1]\n for old_cluster in old_clusters:\n if old_cluster in pair_data:\n return False\n return True\n\n\ndef assign_track_nos_to_track_dict(tracks):\n for track_cluster_no, track_cluster in enumerate(tracks):\n for track_dict in track_cluster:\n track_dict[\"track_no\"] = track_cluster_no\n\n\ndef print_sorted_distances(heap):\n print(\"Printing sorted heap distances: \")\n sorted_heap_distances = sorted([heap_entry[0] for heap_entry in heap])\n sorted_heap_distances = sorted_heap_distances[:10]\n for dist in sorted_heap_distances:\n print(dist)\n\n\ndef map_clusters_track_indices_to_tracks(clusters,all_tracks):\n result = []\n for cluster in clusters:\n result.append(map_idx_to_tracks(cluster,all_tracks))\n\n return result\n\n\n\ndef map_idx_to_tracks(track_indices, all_tracks):\n result = []\n for track_idx in track_indices:\n result.append(all_tracks[track_idx])\n return result\n\ndef flatten_list(l):\n flat_list = []\n for sublist in l:\n for item in sublist:\n flat_list.append(item)\n\n return flat_list\n\ndef add_up_lists(l):\n result = []\n for sublist in l:\n combined_lists = []\n for item in sublist:\n combined_lists += item\n result.append(combined_lists)\n return result\n\ndef get_distances_and_indices(dataset,calculate_track_distances):\n print(\"Calculating pairwise distances\")\n result = []\n dataset_size = len(dataset)\n for i in tqdm(range(dataset_size - 1)): # ignore last i\n for j in range(i + 1, dataset_size): # ignore duplication\n\n distances = calculate_track_distances([i], [j], dataset)\n\n # duplicate dist, need to be remove, and there is no difference to use tuple only\n # leave second dist here is to take up a position for tie selection\n\n result.append([distances, [i, j]])\n\n return result\n\ndef get_distances_and_indices_server(dataset,single_cam_constraints, item_num_per_cam, calculate_track_distances):\n def judge_num_location(num, Alist):\n # TODO [final] refine this shit code\n cnt = 0\n for item in Alist:\n if num>=item:\n cnt+=item\n else:\n return cnt\n\n print(\"Calculating pairwise distances\")\n result = []\n dataset_size = len(dataset)\n cnt = 0\n for i in tqdm(range(dataset_size - 1)): # ignore last i\n for j in range(i + 1, dataset_size): # ignore duplication\n # [server2] try to solve multi-file input\n # If the (i,j) locate in an area from single cam. Then can directly query otherwise give 0\n # Need to get the length first\n distance_read = 0\n # TODO [urgent] Test the speed.\n # TODO [final] refine this shit code\n if judge_num_location(i,item_num_per_cam) == judge_num_location(j,item_num_per_cam):\n shift = 0\n for idx,item in enumerate(item_num_per_cam):\n if i >= item:\n shift += item\n else:\n distance_read = single_cam_constraints[idx][i+shift,j+shift]\n break\n distances = calculate_track_distances([i], [j], dataset, distance_read)\n # duplicate dist, need to be remove, and there is no difference to use tuple only\n # leave second dist here is to take up a position for tie selection\n\n result.append([distances, [i, j]])\n # TODO [final] improve this.\n cnt += 1\n\n return result\n\ndef compute_pairwise_distance_normalized(distances_and_indices, dist_name_to_distance_weights):\n\n def get_dist_name_to_dist_list(distances_and_indices):\n dist_name_to_dist_list = defaultdict(list)\n for distances_pair in distances_and_indices:\n distances = distances_pair[0]\n for dist_name, dist in distances.items():\n dist_name_to_dist_list[dist_name].append(dist)\n return dist_name_to_dist_list\n\n\n def get_dist_name_to_median_dist(dist_name_to_dist_list):\n dist_name_to_median_dist = {}\n for dist_name, dist_list in dist_name_to_dist_list.items():\n dist_list_no_inf_zero = [dist for dist in dist_list if dist != np.Inf and dist > 0]\n\n if len(dist_list_no_inf_zero) == 0:\n dist_name_to_median_dist[dist_name] = 1\n else:\n dist_name_to_median_dist[dist_name] = np.median(dist_list_no_inf_zero)\n\n return dist_name_to_median_dist\n\n\n\n def get_dist_name_to_max_dist(dist_name_to_dist_list):\n dist_name_to_max_dist = {}\n for dist_name, dist_list in dist_name_to_dist_list.items():\n dist_list_no_inf_zero = [dist for dist in dist_list if dist != np.Inf and dist > 0]\n\n if len(dist_list_no_inf_zero) == 0:\n dist_name_to_max_dist[dist_name] = 1\n else:\n dist_name_to_max_dist[dist_name] = max(dist_list_no_inf_zero)\n\n return dist_name_to_max_dist\n\n def get_dist_name_to_dist_list_div_val(dist_name_to_dist_list, dist_name_to_val):\n dist_name_to_dist_list_div_max = {}\n for dist_name, dist_list in dist_name_to_dist_list.items():\n val = dist_name_to_val[dist_name]\n if val == 0:\n dist_name_to_dist_list_div_max[dist_name] = np.array(dist_list)\n else:\n dist_name_to_dist_list_div_max[dist_name] = np.divide(dist_list, val)\n\n return dist_name_to_dist_list_div_max\n\n def get_dist_name_to_weighted_dist_list(dist_name_to_dist_list, dist_name_to_distance_weights):\n dist_name_to_weighted_dist_list = {}\n for dist_name, distance_weight in dist_name_to_distance_weights.items():\n\n dist_list = dist_name_to_dist_list[dist_name]\n\n #Workaround for the problem that np.Inf*0 results in nan\n if distance_weight == 0:\n dist_name_to_weighted_dist_list[dist_name] = np.full(len(dist_list),0)\n continue\n\n weighted_dist_list = np.multiply(dist_list, distance_weight)\n\n dist_name_to_weighted_dist_list[dist_name] = weighted_dist_list\n\n return dist_name_to_weighted_dist_list\n\n def get_sum_of_distances(dist_name_to_dist_list):\n dist_lists = []\n for dist_name, dist_list in dist_name_to_dist_list.items():\n dist_lists.append(dist_list)\n\n dist_arr = np.array(dist_lists)\n dist_sums = np.sum(dist_arr, axis=0)\n\n return dist_sums\n\n def get_distances_with_track_indices(dist_sums, distances_and_indices):\n distances_with_indices = []\n for dist, dist_and_indices in zip(dist_sums, distances_and_indices):\n indices = dist_and_indices[1]\n i = indices[0]\n j = indices[1]\n distances_with_indices.append((dist, [dist, [[i], [j]]]))\n\n return distances_with_indices\n\n dist_name_to_dist_list = get_dist_name_to_dist_list(distances_and_indices)\n\n dist_name_to_weighted_dist_list = get_dist_name_to_weighted_dist_list(dist_name_to_dist_list\n , dist_name_to_distance_weights)\n\n dist_sums = get_sum_of_distances(dist_name_to_weighted_dist_list)\n\n distances_with_track_indices = get_distances_with_track_indices(dist_sums, distances_and_indices)\n\n return distances_with_track_indices\n\n\ndef get_cluster_tracks_as_list(track_cluster):\n '''\n This will append all track position elements to one list.\n\n :param track_cluster:\n :return:\n '''\n\n def add_cam_id_to_track_elements(cam_id, track):\n for track_pos in track:\n track_pos[\"cam_id\"] = cam_id\n return track\n # TODO [final] this seems to be tedious\n result = []\n for track_dict in track_cluster:\n result += add_cam_id_to_track_elements(track_dict[\"cam_id\"], track_dict[\"track\"])\n return result\n\ndef compute_pairwise_distance(dataset,calculate_track_distance):\n print(\"Calculating pairwise distances\")\n result = []\n dataset_size = len(dataset)\n for i in tqdm(range(dataset_size-1)): # ignore last i\n for j in range(i+1, dataset_size): # ignore duplication\n\n\n dist = calculate_track_distance([i], [j],dataset)\n\n # duplicate dist, need to be remove, and there is no difference to use tuple only\n # leave second dist here is to take up a position for tie selection\n\n result.append( (dist, [dist, [[i], [j]]]) )\n\n return result\n\n\ndef build_priority_queue(distance_list):\n heapq.heapify(distance_list)\n return distance_list\n\ndef get_track_pair_to_dist(pairwise_distances):\n pair_to_dist = {}\n for pairwise_distance in pairwise_distances:\n #pairwise_distance = (dist, [dist, [[i], [j]]])\n idx1 = pairwise_distance[1][1][0][0]\n idx2 = pairwise_distance[1][1][1][0]\n track_idx_pair = frozenset([idx1,idx2])\n pair_to_dist[track_idx_pair] = pairwise_distance[0]\n\n return pair_to_dist\n\ndef get_person_id_to_track(track_results):\n\n if isinstance(track_results,str):\n print(\"Loading track results.\")\n print(track_results)\n track_results = pd.read_csv(track_results)\n\n person_id_to_track = defaultdict(list)\n frame_numbers = track_results.groupby(\"frame_no_cam\",as_index=False).mean()\n frame_numbers = frame_numbers[\"frame_no_cam\"].tolist()\n frame_numbers = list(map(int,frame_numbers))\n\n frame_numbers = sorted(frame_numbers)\n for frame_no in frame_numbers:\n one_frame = track_results[track_results[\"frame_no_cam\"] == frame_no]\n\n for index,track_row in one_frame.iterrows():\n person_id = int(track_row[\"person_id\"])\n bbox = (track_row[\"xtl\"],track_row[\"ytl\"],track_row[\"xbr\"],track_row[\"ybr\"])\n person_id_to_track[person_id].append({\"frame_no_cam\" : int(track_row[\"frame_no_cam\"])\n ,\"bbox\" : bbox})\n\n\n\n return person_id_to_track\n\ndef get_groundtruth_person_id_to_track(track_results,person_identifier=\"ped_id\"):\n\n if isinstance(track_results,str):\n track_results = pd.read_csv(track_results)\n\n person_id_to_track = defaultdict(list)\n frame_numbers = track_results.groupby(\"frame_no_cam\",as_index=False).mean()\n frame_numbers = frame_numbers[\"frame_no_cam\"].tolist()\n frame_numbers = list(map(int,frame_numbers))\n\n frame_numbers = sorted(frame_numbers)\n for frame_no in frame_numbers:\n one_frame = track_results[track_results[\"frame_no_cam\"] == frame_no]\n\n for index,track_row in one_frame.iterrows():\n person_id = int(track_row[person_identifier])\n\n\n bbox = (track_row[\"x_top_left_BB\"],track_row[\"y_top_left_BB\"],track_row[\"x_bottom_right_BB\"],track_row[\"y_bottom_right_BB\"])\n\n bbox = constrain_bbox_to_img_dims(bbox)\n\n person_id_to_track[person_id].append({\"frame_no_cam\" : int(track_row[\"frame_no_cam\"])\n ,\"bbox\" : bbox})\n\n\n\n return person_id_to_track\n\n\ndef save_combined_tracks(tracks,cam_id,output_result_path):\n print(\"Starting save procedure of combined tracks.\")\n combined_tracks:pd.DataFrame = pd.DataFrame({ \"frame_no_cam\" : []\n ,\"cam_id\" : []\n ,\"person_id\" : []\n ,\"xtl\" : []\n ,\"ytl\" : []\n ,\"xbr\" : []\n ,\"ybr\" : [] })\n for track_no, track in enumerate(tracks):\n for track_pos in track:\n combined_tracks = combined_tracks.append({ \"frame_no_cam\" : track_pos[\"frame_no_cam\"]\n ,\"cam_id\" : cam_id\n ,\"person_id\" : track_no\n ,\"xtl\" : track_pos[\"bbox\"][0]\n ,\"ytl\" : track_pos[\"bbox\"][1]\n ,\"xbr\" : track_pos[\"bbox\"][2]\n ,\"ybr\" : track_pos[\"bbox\"][3] },ignore_index=True)\n\n combined_tracks = combined_tracks.astype({ \"frame_no_cam\" : int\n ,\"cam_id\" : int\n ,\"person_id\" : int\n ,\"xtl\" : float\n ,\"ytl\" : float\n ,\"xbr\" : float\n ,\"ybr\" : float })\n combined_tracks = combined_tracks.sort_values(by=[\"frame_no_cam\"]) # removed sorting by frame_no_cam and person_id to increase speed\n\n combined_tracks.to_csv(output_result_path,index=False)\n print(\"Saved combined track results to: {}\".format(output_result_path))\n\n return output_result_path",
"from __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\nimport os\nimport glob\nimport re\nimport sys\nimport urllib\nimport tarfile\nimport zipfile\nimport os.path as osp\nfrom scipy.io import loadmat\nimport numpy as np\nimport h5py\n\nfrom torchreid.utils.iotools import mkdir_if_missing, write_json, read_json\nfrom .bases import BaseImageDataset\n\n\nclass VIPeR(BaseImageDataset):\n \"\"\"\n VIPeR\n\n Reference:\n Gray et al. Evaluating appearance models for recognition, reacquisition, and tracking. PETS 2007.\n\n URL: https://vision.soe.ucsc.edu/node/178\n\n Dataset statistics:\n # identities: 632\n # images: 632 x 2 = 1264\n # cameras: 2\n \"\"\"\n dataset_dir = 'viper'\n\n def __init__(self, root='data', split_id=0, verbose=True, **kwargs):\n super(VIPeR, self).__init__()\n self.dataset_dir = osp.join(root, self.dataset_dir)\n self.dataset_url = 'http://users.soe.ucsc.edu/~manduchi/VIPeR.v1.0.zip'\n self.cam_a_path = osp.join(self.dataset_dir, 'VIPeR', 'cam_a')\n self.cam_b_path = osp.join(self.dataset_dir, 'VIPeR', 'cam_b')\n self.split_path = osp.join(self.dataset_dir, 'splits.json')\n\n self._download_data()\n self._check_before_run()\n\n self._prepare_split()\n splits = read_json(self.split_path)\n if split_id >= len(splits):\n raise ValueError(\"split_id exceeds range, received {}, but expected between 0 and {}\".format(split_id, len(splits)-1))\n split = splits[split_id]\n\n train = split['train']\n query = split['query'] # query and gallery share the same images\n gallery = split['gallery']\n\n train = [tuple(item) for item in train]\n query = [tuple(item) for item in query]\n gallery = [tuple(item) for item in gallery]\n\n if verbose:\n print(\"=> VIPeR loaded\")\n self.print_dataset_statistics(train, query, gallery)\n\n self.train = train\n self.query = query\n self.gallery = gallery\n\n self.num_train_pids, self.num_train_imgs, self.num_train_cams = self.get_imagedata_info(self.train)\n self.num_query_pids, self.num_query_imgs, self.num_query_cams = self.get_imagedata_info(self.query)\n self.num_gallery_pids, self.num_gallery_imgs, self.num_gallery_cams = self.get_imagedata_info(self.gallery)\n\n def _download_data(self):\n if osp.exists(self.dataset_dir):\n print(\"This dataset has been downloaded.\")\n return\n\n print(\"Creating directory {}\".format(self.dataset_dir))\n mkdir_if_missing(self.dataset_dir)\n fpath = osp.join(self.dataset_dir, osp.basename(self.dataset_url))\n\n print(\"Downloading VIPeR dataset\")\n urllib.request.urlretrieve(self.dataset_url, fpath)\n\n print(\"Extracting files\")\n zip_ref = zipfile.ZipFile(fpath, 'r')\n zip_ref.extractall(self.dataset_dir)\n zip_ref.close()\n\n def _check_before_run(self):\n \"\"\"Check if all files are available before going deeper\"\"\"\n if not osp.exists(self.dataset_dir):\n raise RuntimeError(\"'{}' is not available\".format(self.dataset_dir))\n if not osp.exists(self.cam_a_path):\n raise RuntimeError(\"'{}' is not available\".format(self.cam_a_path))\n if not osp.exists(self.cam_b_path):\n raise RuntimeError(\"'{}' is not available\".format(self.cam_b_path))\n\n def _prepare_split(self):\n if not osp.exists(self.split_path):\n print(\"Creating 10 random splits of train ids and test ids\")\n\n cam_a_imgs = sorted(glob.glob(osp.join(self.cam_a_path, '*.bmp')))\n cam_b_imgs = sorted(glob.glob(osp.join(self.cam_b_path, '*.bmp')))\n assert len(cam_a_imgs) == len(cam_b_imgs)\n num_pids = len(cam_a_imgs)\n print(\"Number of identities: {}\".format(num_pids))\n num_train_pids = num_pids // 2\n\n \"\"\"\n In total, there will be 20 splits because each random split creates two\n sub-splits, one using cameraA as query and cameraB as gallery\n while the other using cameraB as query and cameraA as gallery.\n Therefore, results should be averaged over 20 splits (split_id=0~19).\n\n In practice, a model trained on split_id=0 can be applied to split_id=0&1\n as split_id=0&1 share the same training data (so on and so forth).\n \"\"\"\n splits = []\n for _ in range(10):\n order = np.arange(num_pids)\n np.random.shuffle(order)\n train_idxs = order[:num_train_pids]\n test_idxs = order[num_train_pids:]\n assert not bool(set(train_idxs) & set(test_idxs)), \"Error: train and test overlap\"\n\n train = []\n for pid, idx in enumerate(train_idxs):\n cam_a_img = cam_a_imgs[idx]\n cam_b_img = cam_b_imgs[idx]\n train.append((cam_a_img, pid, 0))\n train.append((cam_b_img, pid, 1))\n\n test_a = []\n test_b = []\n for pid, idx in enumerate(test_idxs):\n cam_a_img = cam_a_imgs[idx]\n cam_b_img = cam_b_imgs[idx]\n test_a.append((cam_a_img, pid, 0))\n test_b.append((cam_b_img, pid, 1))\n\n # use cameraA as query and cameraB as gallery\n split = {'train': train, 'query': test_a, 'gallery': test_b,\n 'num_train_pids': num_train_pids,\n 'num_query_pids': num_pids - num_train_pids,\n 'num_gallery_pids': num_pids - num_train_pids\n }\n splits.append(split)\n\n # use cameraB as query and cameraA as gallery\n split = {'train': train, 'query': test_b, 'gallery': test_a,\n 'num_train_pids': num_train_pids,\n 'num_query_pids': num_pids - num_train_pids,\n 'num_gallery_pids': num_pids - num_train_pids\n }\n splits.append(split)\n\n print(\"Totally {} splits are created\".format(len(splits)))\n write_json(splits, self.split_path)\n print(\"Split file saved to {}\".format(self.split_path))\n\n print(\"Splits created\")\n"
] |
[
[
"numpy.divide",
"numpy.array",
"numpy.median",
"pandas.DataFrame",
"numpy.sum",
"numpy.multiply",
"pandas.read_csv"
],
[
"numpy.arange",
"numpy.random.shuffle"
]
] |
kngwyu/rlpy
|
[
"329166de28d311d8f87358a62c38f40a7318fe07"
] |
[
"tests/representations/test_local_bases.py"
] |
[
"from rlpy.representations import NonparametricLocalBases, RandomLocalBases\nfrom rlpy.domains import infinite_track_cartpole as inf_cp\nimport numpy as np\n\ntry:\n from rlpy.representations.kernels import gaussian_kernel\nexcept ImportError:\n from rlpy.representations.slow_kernels import gaussian_kernel\n\n\ndef test_parametric_rep():\n \"\"\"\n For fixed representations: test successful kernel function use, using\n varying number of features.\n Ensure get expected result. Test normalization, ensure expected result.\n \"\"\"\n for normalization in [False, True]: # verify everything with/out norm\n\n kernel = gaussian_kernel\n domain = inf_cp.InfTrackCartPole() # 2 continuous dims\n discretization = 20 # not used\n num = 1 # number of basis functions to use IN EACH DIMENSION\n resolution_min = 1\n resolution_max = 5\n rep = RandomLocalBases(\n domain,\n kernel,\n num,\n resolution_min,\n resolution_max,\n seed=1,\n normalization=normalization,\n discretization=discretization,\n )\n assert rep.features_num == num # in reality, theres one in each dim.\n\n # Center lies within statespace limits\n assert np.all(domain.statespace_limits[:, 0] <= rep.centers[0])\n assert np.all(rep.centers[0] <= domain.statespace_limits[:, 1])\n\n # width lies within resolution bounds\n statespace_range = (\n domain.statespace_limits[:, 1] - domain.statespace_limits[:, 0]\n )\n # widths[0] has `state_space_dims` cols\n assert np.all(statespace_range / resolution_max <= rep.widths[0])\n assert np.all(rep.widths[0] <= statespace_range / resolution_min)\n\n phiVecOrigin = rep.phi(np.array([0, 0], dtype=np.float), terminal=False)\n assert np.all(phiVecOrigin >= 0) # nonnegative feat func values\n\n # feature func only dependent on own dimension\n phiVec2 = rep.phi(np.array([0, 1], dtype=np.float), terminal=False)\n\n if normalization:\n assert sum(phiVecOrigin) == 1\n assert sum(phiVec2) == 1\n\n\ndef test_visual():\n \"\"\" Test 2-D basis func visualization. \"\"\"\n kernel = gaussian_kernel\n normalization = False\n domain = inf_cp.InfTrackCartPole() # 2 continuous dims\n discretization = 20 # not used\n num = 1 # number of basis functions to use\n resolution_min = 1\n resolution_max = 5\n rep = RandomLocalBases(\n domain,\n kernel,\n num,\n resolution_min,\n resolution_max,\n seed=1,\n normalization=normalization,\n discretization=discretization,\n )\n rep.plot_2d_feature_centers()\n\n\ndef test_nonparametric_rep():\n \"\"\"\n For nonparametric representations: test successful kernel function use,\n ensure get expected result.\n \"\"\"\n for normalization in [False, True]: # verify everything with/out norm\n\n kernel = gaussian_kernel\n normalization = False\n domain = inf_cp.InfTrackCartPole() # 2 continuous dims\n discretization = 20 # not used\n resolution = 2\n # Start by making it impossible to add feats:\n max_similarity = 0\n rep = NonparametricLocalBases(\n domain,\n kernel,\n max_similarity,\n resolution,\n normalization=normalization,\n discretization=discretization,\n )\n assert rep.features_num == 0 # ``num`` feats in each dimension\n origS = np.array([0, 0], dtype=np.float)\n s2 = np.array([0, 1], dtype=np.float)\n terminal = False # nonterminal states\n a = 1 # arbitrary\n rep.pre_discover(origS, terminal, a, s2, terminal)\n\n # in the first call, automaticlaly add 1 feature since empty phi_s\n # is always < rep.max_similarity.\n # In ALL OTHER cases though, since max_similarity = 0, can never add\n # any more.\n assert rep.features_num == 1\n\n # Now make it really easy to add feats:\n max_similarity = np.inf\n rep = NonparametricLocalBases(\n domain,\n kernel,\n max_similarity,\n resolution,\n normalization=normalization,\n discretization=discretization,\n )\n assert rep.features_num == 0 # ``num`` feats in each dimension\n origS = np.array([0, 0], dtype=np.float)\n s2 = np.array([0, 1], dtype=np.float)\n terminal = False # nonterminal states\n a = 1 # arbitrary\n rep.pre_discover(origS, terminal, a, s2, terminal)\n\n # max_similarity == inf means we definitely shouldve added feat for\n # BOTH s and ns:\n assert rep.features_num == 2\n assert np.all(rep.centers[0, :] == origS)\n assert np.all(rep.centers[1, :] == s2)\n statespace_range = (\n domain.statespace_limits[:, 1] - domain.statespace_limits[:, 0]\n )\n assert np.all(rep.widths == statespace_range / resolution)\n\n phiVecOrigin = rep.phi(np.array([0, 0], dtype=np.float), terminal=False)\n assert np.all(phiVecOrigin >= 0) # nonnegative feat func values\n\n # feature func only dependent on own dimension\n phiVec2 = rep.phi(np.array([0, 1], dtype=np.float), terminal=False)\n np.all(phiVec2 >= 0)\n\n if normalization:\n assert sum(phiVecOrigin) == 1\n assert sum(phiVec2) == 1\n\n\ndef test_phi_post_expansion():\n \"\"\"\n Ensure correct feature is activated for corresponding state, even after\n expansion. Also tests if weight vector remains aligned with feat vec.\n\n \"\"\"\n # TODO - could check to make sure weight vector remains aligned with\n # feat vec, even after expansion\n"
] |
[
[
"numpy.all",
"numpy.array"
]
] |
justincely/classwork
|
[
"2d2b1882f9141bc5776977a5c7c6a4788ea7bc4f"
] |
[
"UMD/AST615/HW6_2/plot_particles.py"
] |
[
"import pylab\nimport string\nimport glob\nimport numpy\nfrom mpl_toolkits.mplot3d import Axes3D\nN_particles=1000\npylab.ion()\nfig = pylab.figure()\nax=Axes3D(fig)\ntxt_list=glob.glob('t_*.txt')\ntxt_list.sort()\nfor txt in txt_list:\n infile=open(txt,'r')\n\n m=numpy.zeros(N_particles)\n r=numpy.zeros((3,N_particles))\n v=numpy.zeros((3,N_particles))\n a=numpy.zeros((3,N_particles))\n for i,line in enumerate(infile.readlines()):\n line=string.split(line)\n m[i]=(float(line[0]))\n r[0][i]=(float(line[1]))\n r[1][i]=(float(line[2]))\n r[2][i]=(float(line[3]))\n v[0][i]=(float(line[4]))\n v[1][i]=(float(line[5]))\n r[2][i]=(float(line[6]))\n r=r.clip(min=-500,max=500)\n ax.scatter(r[0], r[1], r[2],s=1)\n #ax.set_xlim3d(-5000,5000)\n #ax.set_ylim3d(-5000,5000)\n #ax.set_zlim3d(-5000,5000)\n #pylab.savefig(txt[:-4]+'.pdf')\n raw_input()\n print (txt[:-4]+'.pdf')\n pylab.cla()\n"
] |
[
[
"numpy.zeros"
]
] |
danieljfeller/medline-multilabel
|
[
"acacc6e9bd4d09776c3aa46600676eed562231ba"
] |
[
"src/data/parse_ohsuMed.py"
] |
[
"import os\nfrom os.path import isfile, join\nfrom os import walk\nimport re\nimport pandas as pd\n\n\"\"\"\nthis file takes the raw OHSU corpus in data/external/ and creates a pandas dataframe, \nputting it in data/processed/ohsu_med.csv\n\"\"\"\n\nbasepath=os.path.dirname(__file__)\n\nlabel_list = []\nfor (dirpath, dirnames, filenames) in walk(os.path.join(basepath,\"../../data/external/ohsumed-first-20000-docs/training\")):\n label_list.extend(dirnames)\n\n\n#Create dict of train labels with file number as key\ntrain_label_dict={}\ntrain_text_dict={}\nfor label in label_list:\n for (dirpath, dirnames, filenames) in walk(os.path.join(basepath,\"../../data/external/ohsumed-first-20000-docs/training/\",label)):\n for files in filenames:\n whole_text=\"\"\n if files in train_label_dict:\n train_label_dict[files].append(label)\n else:\n train_label_dict[files] = [label]\n\n f = open(os.path.join(basepath,\"../../data/external/ohsumed-first-20000-docs/training/\",label, files))\n\n for i, line in enumerate(f):\n if i == 0:\n line = line.rstrip(\"\\n\")[:-1] + \",\"\n\n else:\n line = line.replace(\",\", \".\") #comma maybe have special meaning in fasttest file format?\n\n whole_text= whole_text+line.rstrip(\"\\n\")\n\n train_text_dict[files] = whole_text\n\n#Create dict of test labels with file number\ntest_label_dict={}\ntest_text_dict={}\nfor label in label_list:\n for (dirpath, dirnames, filenames) in walk(os.path.join(basepath,\"../../data/external/ohsumed-first-20000-docs/test/\",label)):\n for files in filenames:\n whole_text = \"\"\n if files in test_label_dict:\n test_label_dict[files].append(label)\n\n else:\n test_label_dict[files] = [label]\n f1 = open(os.path.join(basepath, \"../../data/external/ohsumed-first-20000-docs/test/\", label, files))\n\n for i, line in enumerate(f1):\n if i == 0:\n line = line.rstrip(\"\\n\")[:-1]+\",\"\n\n else:\n line = line.replace(\",\", \".\") # comma maybe have special meaning in fasttest file format?\n\n whole_text = whole_text + line.rstrip(\"\\n\")\n\n test_text_dict[files] = whole_text\n\ntraining_file = open(os.path.join(basepath,\"../../data/processed/ohsumed.train\"), 'w')\nfor abstract_id, labels in train_label_dict.items():\n formatted_label=\"\"\n for l in train_label_dict[abstract_id]:\n #formatted_label = formatted_label+\"__label__\"+l+\" , \"\n l = l.replace(\"C\", \"\")\n if l[0]==\"0\":\n l=l[1:]\n formatted_label = l + \", \"\n\n training_file.write(formatted_label+train_text_dict[abstract_id]+\"\\n\")\n\ntest_file = open(os.path.join(basepath,\"../../data/processed/ohsumed.test\"), 'w')\nfor abstract_id, labels in test_label_dict.items():\n formatted_label=\"\"\n for l in test_label_dict[abstract_id]:\n #formatted_label = formatted_label+\"__label__\"+l+\" , \"\n l = l.replace(\"C\", \"\")\n if l[0]==\"0\":\n l=l[1:]\n\n formatted_label = l + \", \"\n\n test_file.write(formatted_label+test_text_dict[abstract_id]+\"\\n\")\n\n\nlabels, docs, splits = [], [], []\n\nfor line in open(\"../../data/processed/ohsumed.train\").read().splitlines():\n labels.append(re.search('^(.+?),', line).group(1))\n docs.append(re.search(',(.*)', line).group(1))\n splits.append('train')\n\nfor line in open(\"../../data/processed/ohsumed.test\").read().splitlines():\n labels.append(re.search('^(.+?),', line).group(1))\n docs.append(re.search(',(.*)', line).group(1))\n splits.append('test')\n\ndf = pd.DataFrame({'label': labels, 'doc': docs, 'split': splits})\ndf.to_csv(\"../../data/processed/ohsumed_abstracts.csv\")"
] |
[
[
"pandas.DataFrame"
]
] |
hyyc116/TopicSelectionBehavior
|
[
"edb415f1f30a65f9f9cf06104163c38f5801d2fc"
] |
[
"src/gini.py"
] |
[
"import numpy as np\n'''\n source repository: https://github.com/oliviaguest/gini.git\n'''\ndef gini(array):\n \"\"\"Calculate the Gini coefficient of a numpy array.\"\"\"\n # based on bottom eq:\n # http://www.statsdirect.com/help/generatedimages/equations/equation154.svg\n # from:\n # http://www.statsdirect.com/help/default.htm#nonparametric_methods/gini.htm\n # All values are treated equally, arrays must be 1d:\n array = np.array([float(i) for i in array])\n # array = array.flatten()\n if np.amin(array) < 0:\n # Values cannot be negative:\n array -= np.amin(array)\n # Values cannot be 0:\n array += 0.0000001\n # Values must be sorted:\n array = np.sort(array)\n # Index per array element:\n index = np.arange(1,array.shape[0]+1)\n # Number of array elements:\n n = array.shape[0]\n # Gini coefficient:\n return ((np.sum((2 * index - n - 1) * array)) / (n * np.sum(array)))\n\n\nif __name__ == '__main__':\n print(gini(np.array([1.0,0,0])) )\n print(gini(np.array([1.0,1.0,1.0])) )"
] |
[
[
"numpy.array",
"numpy.sum",
"numpy.arange",
"numpy.sort",
"numpy.amin"
]
] |
louis-richard/irfu-python
|
[
"38960c069b3d8600fa6cb8e0fbd7f0a2d83ca967"
] |
[
"pyrfu/pyrf/struct_func.py"
] |
[
"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\n# 3rd party imports\nimport numpy as np\nimport xarray as xr\n\n__author__ = \"Louis Richard\"\n__email__ = \"[email protected]\"\n__copyright__ = \"Copyright 2020-2021\"\n__license__ = \"MIT\"\n__version__ = \"2.3.7\"\n__status__ = \"Prototype\"\n\n\ndef struct_func(inp, scales, order):\n r\"\"\"Returns the structure function of a time series\n\n .. math::\n\n y= \\\\frac{1}{N-s}\\\\sum_{i=1}^{N-s}(x_i - x_{i+s})^o\n\n where :math:`s` is the scale, and :math:`o` is the order.\n\n Parameters\n ----------\n inp : xarray.DataArray\n Input time series.\n scales : array_like\n A list or an array containing the scales to calculate.\n order : int\n Order of the exponential of the structure function.\n\n Returns\n -------\n values : xarray.DataArray\n An xarray containing the structure functions, one per product in\n the original time series. The index coordinate contains the scale\n value, and the attribute 'order' keeps a record on the order used\n for its calculation.\n\n \"\"\"\n\n if scales is None:\n scales = [1]\n\n data = inp.data\n\n if len(data.shape) == 1:\n data = data[:, np.newaxis]\n else:\n pass\n\n result = []\n for scale in scales:\n result.append(\n np.mean(np.abs((data[scale:, :] - data[:-scale, :]) ** order),\n axis=0))\n\n result = np.array(result)\n\n cols = inp.coords[inp.dims[1]].data\n\n result = xr.DataArray(result, coords=[scales, cols],\n dims=[\"scale\", inp.dims[1]], attrs=inp.attrs)\n\n result.attrs['order'] = order\n\n return result\n"
] |
[
[
"numpy.array",
"numpy.abs"
]
] |
lee-man/goturn-pytorch
|
[
"3d655eaf63b955fd211234c9a6c9246a4073ee84",
"3d655eaf63b955fd211234c9a6c9246a4073ee84"
] |
[
"src/scripts/train.py",
"src/goturn/helper/image_io.py"
] |
[
"\"\"\"\nFile: train.py\nAuthor: Nrupatunga\nEmail: [email protected]\nGithub: https://github.com/nrupatunga\nDescription: Training scripts for goturn\n\"\"\"\n\nimport argparse\nimport random\nimport sys\nfrom collections import OrderedDict\nfrom multiprocessing import Manager\nimport copy\n\nimport cv2\nimport numpy as np\nimport pytorch_lightning as pl\nimport torch\nimport torch.backends.cudnn as cudnn\nfrom pytorch_lightning import Trainer\nfrom pytorch_lightning.callbacks import ModelCheckpoint\nfrom pytorch_lightning.core.lightning import LightningModule\nfrom torch.utils.data import DataLoader\nfrom torch_lr_finder import LRFinder\n\nfrom loguru import logger\n\n# try:\nfrom goturn.dataloaders.goturndataloader import GoturnDataloader\nfrom goturn.helper.vis_utils import Visualizer\nfrom goturn.network.network import GoturnNetwork\nfrom goturn.helper.BoundingBox import BoundingBox\nfrom goturn.helper.draw_util import draw\nfrom goturn.optimizer.caffeSGD import CaffeSGD\n# except ImportError:\n# logger.error('Please run $source settings.sh from root directory')\n# sys.exit(1)\n\n\nclass GoturnTrain(LightningModule):\n\n \"\"\"Docstring for GoturnTrain. \"\"\"\n\n def __init__(self, hparams, dbg=False):\n '''\n Pytorch lightning module for training goturn tracker.\n\n @hparams: all the argparse arguments for training\n @dbg: boolean for switching on visualizer\n '''\n logger.info('=' * 15)\n logger.info('GOTURN TRACKER')\n logger.info('=' * 15)\n\n super(GoturnTrain, self).__init__()\n\n self.__set_seed(hparams.seed)\n self.hparams = hparams\n logger.info('Setting up the network...')\n\n # network with pretrained model\n if not self.hparams.finetune:\n self._model = GoturnNetwork(self.hparams.pretrained_model)\n else:\n self._model = GoturnNetwork()\n checkpoint = torch.load(self.hparams.pretrained_model)['state_dict']\n checkpoint = self.load_model_param(checkpoint)\n self._model.load_state_dict(checkpoint, strict=False)\n self._dbg = dbg\n if dbg:\n self._viz = Visualizer(port=8097)\n \n def load_model_param(self, state_dict):\n '''\n The customized funtion to load the parameter weights into GoturnTrain's model. (self._model)\n '''\n state_dict_modified = copy.deepcopy(state_dict)\n for key in state_dict:\n if '_model' in key:\n pre, post = key.split('.', 1)\n state_dict_modified[post] = state_dict_modified.pop(key)\n return state_dict_modified\n \n\n def __freeze(self):\n \"\"\"Freeze the model features layer\n \"\"\"\n features_layer = self._model._net\n for param in features_layer.parameters():\n param.requires_grad = False\n\n def _set_conv_layer(self, conv_layers, param_dict):\n for layer in conv_layers.modules():\n if type(layer) == torch.nn.modules.conv.Conv2d:\n param_dict.append({'params': layer.weight,\n 'lr': 0,\n 'weight_decay': self.hparams.wd})\n param_dict.append({'params': layer.bias,\n 'lr': 0,\n 'weight_decay': 0})\n return param_dict\n\n def __set_lr(self):\n '''set learning rate for classifier layer'''\n param_dict = []\n if 1:\n conv_layers = self._model._net_1\n param_dict = self._set_conv_layer(conv_layers, param_dict)\n conv_layers = self._model._net_2\n param_dict = self._set_conv_layer(conv_layers, param_dict)\n\n regression_layer = self._model._classifier\n for layer in regression_layer.modules():\n if type(layer) == torch.nn.modules.linear.Linear:\n param_dict.append({'params': layer.weight,\n 'lr': 10 * self.hparams.lr,\n 'weight_decay': self.hparams.wd})\n param_dict.append({'params': layer.bias,\n 'lr': 20 * self.hparams.lr,\n 'weight_decay': 0})\n return param_dict\n\n def find_lr(self):\n \"\"\"finding suitable learning rate \"\"\"\n model = self._model\n params = self.__set_lr()\n\n criterion = torch.nn.L1Loss(size_average=False)\n optimizer = CaffeSGD(params,\n lr=1e-8,\n momentum=self.hparams.momentum,\n weight_decay=self.hparams.wd)\n\n lr_finder = LRFinder(model, optimizer, criterion, device=\"cuda\")\n trainloader = self.train_dataloader()\n lr_finder.range_test(trainloader, start_lr=1e-7, end_lr=1,\n num_iter=500)\n lr_finder.plot()\n\n def __set_seed(self, SEED):\n ''' set all the seeds for reproducibility '''\n logger.info('Settings seed = {}'.format(SEED))\n torch.manual_seed(SEED)\n np.random.seed(SEED)\n random.seed(SEED)\n cudnn.deterministic = True\n\n @staticmethod\n def add_model_specific_args(parent_parser):\n ''' These are specific parameters for the sample generator '''\n ap = argparse.ArgumentParser(parents=[parent_parser])\n\n ap.add_argument('--min_scale', type=float,\n default=-0.4,\n help='min scale')\n ap.add_argument('--max_scale', type=float,\n default=0.4,\n help='max scale')\n ap.add_argument('--lamda_shift', type=float, default=5)\n ap.add_argument('--lamda_scale', type=int, default=15)\n return ap\n\n def configure_optimizers(self):\n \"\"\"Configure optimizers\"\"\"\n logger.info('Configuring optimizer: SGD with lr = {}, momentum = {}'.format(self.hparams.lr, self.hparams.momentum))\n params = self.__set_lr()\n optimizer = CaffeSGD(params,\n lr=self.hparams.lr,\n momentum=self.hparams.momentum,\n weight_decay=self.hparams.wd)\n scheduler = torch.optim.lr_scheduler.StepLR(optimizer,\n step_size=self.hparams.lr_step,\n gamma=self.hparams.gamma)\n return [optimizer], [scheduler]\n\n @pl.data_loader\n def train_dataloader(self):\n \"\"\"train dataloader\"\"\"\n logger.info('===' * 20)\n logger.info('Loading dataset for training, please wait...')\n logger.info('===' * 20)\n\n imagenet_path = self.hparams.imagenet_path\n alov_path = self.hparams.alov_path\n mean_file = None\n manager = Manager()\n objGoturn = GoturnDataloader(imagenet_path, alov_path,\n mean_file=mean_file,\n images_p=manager.list(),\n targets_p=manager.list(),\n bboxes_p=manager.list(),\n val_ratio=0.005,\n isTrain=True, dbg=False)\n train_loader = DataLoader(objGoturn,\n batch_size=self.hparams.batch_size, shuffle=True,\n num_workers=6,\n collate_fn=objGoturn.collate)\n\n return train_loader\n\n @pl.data_loader\n def val_dataloader(self):\n \"\"\"validation dataloader\"\"\"\n logger.info('===' * 20)\n logger.info('Loading dataset for Validation, please wait...')\n logger.info('===' * 20)\n\n imagenet_path = self.hparams.imagenet_path\n alov_path = self.hparams.alov_path\n mean_file = None\n\n manager = Manager()\n objGoturn = GoturnDataloader(imagenet_path, alov_path,\n mean_file=mean_file,\n images_p=manager.list(),\n targets_p=manager.list(),\n bboxes_p=manager.list(),\n val_ratio=0.005,\n isTrain=False, dbg=False)\n val_loader = DataLoader(objGoturn,\n batch_size=self.hparams.batch_size, shuffle=True,\n num_workers=6,\n collate_fn=objGoturn.collate)\n return val_loader\n\n def forward(self, prev, curr):\n \"\"\"forward function\n \"\"\"\n pred_bb, confidence = self._model(prev.float(), curr.float())\n return pred_bb, confidence\n\n def vis_images(self, prev, curr, gt_bb, pred_bb, prefix='train'):\n\n def unnormalize(image, mean):\n image = np.transpose(image, (1, 2, 0)) + mean\n image = image.astype(np.float32)\n image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)\n\n return image\n\n for i in range(0, prev.shape[0]):\n # _mean = np.load(self.hparams.mean_file)\n _mean = np.array([104, 117, 123])\n prev_img = prev[i].cpu().detach().numpy()\n curr_img = curr[i].cpu().detach().numpy()\n\n prev_img = unnormalize(prev_img, _mean)\n curr_img = unnormalize(curr_img, _mean)\n\n gt_bb_i = BoundingBox(*gt_bb[i].cpu().detach().numpy().tolist())\n gt_bb_i.unscale(curr_img)\n curr_img = draw.bbox(curr_img, gt_bb_i, color=(255, 0, 255))\n\n pred_bb_i = BoundingBox(*pred_bb[i].cpu().detach().numpy().tolist())\n pred_bb_i.unscale(curr_img)\n curr_img = draw.bbox(curr_img, pred_bb_i)\n\n out = np.concatenate((prev_img[np.newaxis, ...], curr_img[np.newaxis, ...]), axis=0)\n out = np.transpose(out, [0, 3, 1, 2])\n\n self._viz.plot_images_np(out, title='sample_{}'.format(i),\n env='goturn_{}'.format(prefix))\n\n def training_step(self, batch, batch_idx):\n \"\"\"Training step\n @batch: current batch data\n @batch_idx: current batch index\n \"\"\"\n curr, prev, gt_bb = batch\n pred_bb, conf_digit = self.forward(prev, curr)\n confidence = torch.sigmoid(conf_digit)\n regression_loss = (torch.nn.L1Loss(reduction='none')(pred_bb.float() * confidence + (1 - confidence) * gt_bb.float(), gt_bb.float())).sum()\n confidence_loss = torch.mean(-torch.log(confidence))\n loss = regression_loss + 0.1 * confidence_loss\n\n if self.trainer.use_dp:\n loss = loss.unsqueeze(0)\n\n if self._dbg:\n if batch_idx % 1000 == 0:\n d = {'loss': loss.item()}\n iters = (self.trainer.num_training_batches - 1) * self.current_epoch + batch_idx\n self._viz.plot_curves(d, iters, title='Train', ylabel='train_loss')\n if batch_idx % 1000 == 0:\n self.vis_images(prev, curr, gt_bb, pred_bb)\n\n tqdm_dict = {'batch_loss': loss, 'regression_loss': regression_loss, 'confidence_loss': confidence_loss}\n output = OrderedDict({'loss': loss,\n 'progress_bar': tqdm_dict,\n 'log': tqdm_dict})\n return output\n\n def validation_step(self, batch, batch_idx):\n \"\"\"validation step\n @batch: current batch data\n @batch_idx: current batch index\n \"\"\"\n curr, prev, gt_bb = batch\n pred_bb, _ = self.forward(prev, curr)\n loss = torch.nn.L1Loss(reduction='sum')(pred_bb, gt_bb.float())\n\n if self.trainer.use_dp:\n loss = loss.unsqueeze(0)\n\n if self._dbg:\n if batch_idx % 100 == 0:\n d = {'loss': loss.item()}\n iters = (self.trainer.num_val_batches - 1) * self.current_epoch + batch_idx\n self._viz.plot_curves(d, iters, title='Validation', ylabel='val_loss')\n\n if batch_idx % 1000 == 0:\n self.vis_images(prev, curr, gt_bb, pred_bb, prefix='val')\n\n tqdm_dict = {'val_loss': loss}\n output = OrderedDict({'val_loss': loss,\n 'progress_bar': tqdm_dict,\n 'log': tqdm_dict})\n return output\n\n def validation_end(self, outputs):\n avg_loss = torch.stack([x['val_loss'] for x in outputs]).mean()\n return {'val_loss': avg_loss}\n\n\ndef get_args():\n \"\"\" These are common arguments such as\n 1. Path to dataset (imagenet and alov)\n 2. Architecture, learning rate, batch size\n 3. Optimizers: learning rate, momentum, weight decay, learning step,\n gamma\n 4. Seed for reproducibility\n 5. save path for the model\n \"\"\"\n\n ap = argparse.ArgumentParser(add_help=False,\n description='Arguments for training Goturn Tracker')\n ap.add_argument('--gpus', type=int, default=1,\n help='number of gpus, 0: means no gpu, -1 to use all \\\n gpus, 1 = use one gpu, 2 = use two gpus')\n\n # Data settings\n ap.add_argument('--imagenet_path', type=str,\n required=True, help='path to imagenet folder, this \\\n folder shoud have images and gt folder')\n ap.add_argument('--alov_path', type=str,\n required=True, help='path to ALOV folder, this \\\n folder should have images and gt folder')\n\n # architecture and hyperparameters\n ap.add_argument('--arch', default='alexnet',\n choices={'alexnet'}, help='model architecture, \\\n default: alexnet, currently only alexnet is \\\n supported')\n ap.add_argument('--pretrained_model',\n default='../goturn/models/pretrained/alexnet.pth.tar',\n help='Path to pretrained model')\n ap.add_argument('--epochs', default=90,\n type=int, help='number of total epochs to run')\n ap.add_argument('--batch_size', default=3,\n type=int, help='number of images per batch')\n\n # Optimizer settings\n ap.add_argument('--lr', default=1e-6, type=float,\n help='initial learning rate', dest='lr')\n ap.add_argument('--momentum', default=0.9, type=float, help='momentum')\n ap.add_argument('--wd', default=5e-4, type=float, help='weight decay (default: 5e-4)',\n dest='wd')\n ap.add_argument('--lr_step', default=1, type=int,\n help='Number of epoch after which we change the learning rate',\n dest='lr_step')\n ap.add_argument('--gamma', default=0.1, type=float,\n help='multiplicative factor for learning rate',\n dest='gamma')\n ap.add_argument('--finetune', default=1, type=int,\n help='finetune on the pre-trained model',\n dest='finetune')\n\n # reproducibility\n ap.add_argument('--seed', type=int, default=42, help='seed value')\n # ap.add_argument('--seed', type=int, default=800, help='seed value')\n\n # save path\n ap.add_argument('--save_path', default=\".\", type=str, help='path to save output')\n\n # goturn specific arguments\n ap = GoturnTrain.add_model_specific_args(ap)\n return ap.parse_args()\n\n\ndef read_images_dbg(idx):\n idx = idx + 1\n _mean = np.array([104, 117, 123])\n images = []\n target = []\n bbox = []\n parent_path = '/media/nthere/datasets/goturn_samples/0{}'.format(idx)\n gt_path = '{}/gt.txt'.format(parent_path)\n with open(gt_path) as f:\n for i, line in enumerate(f):\n prev_path = '{}/Image{}_curr.png'.format(parent_path, i)\n curr_path = '{}/Image{}_target.png'.format(parent_path, i)\n prev = cv2.imread(prev_path) - _mean\n prev = np.transpose(prev, axes=(2, 0, 1))\n curr = cv2.imread(curr_path) - _mean\n curr = np.transpose(curr, axes=(2, 0, 1))\n gt = line.strip().split(',')[0:4]\n gt = [float(p) for p in gt]\n images.append(prev)\n target.append(curr)\n bbox.append(gt)\n\n images = torch.from_numpy(np.stack(images)).to('cuda:0')\n targets = torch.from_numpy(np.stack(target)).to('cuda:0')\n bboxes = torch.from_numpy(np.stack(bbox)).to('cuda:0')\n return images, targets, bboxes\n\n\ndef main(hparams):\n hparams = get_args()\n model = GoturnTrain(hparams, dbg=False)\n # ckpt_resume_path = './caffenet-dbg-2/_ckpt_epoch_1.ckpt'\n ckpt_cb = ModelCheckpoint(filepath=hparams.save_path, save_top_k=-1,\n save_weights_only=False)\n trainer = Trainer(default_save_path=hparams.save_path,\n gpus=[0, ], min_nb_epochs=hparams.epochs,\n accumulate_grad_batches=1,\n train_percent_check=1,\n # resume_from_checkpoint=ckpt_resume_path,\n checkpoint_callback=ckpt_cb,\n val_percent_check=1, profiler=True)\n trainer.fit(model)\n\n\nif __name__ == \"__main__\":\n main(get_args())\n",
"\"\"\"\nFile: image_io.py\nAuthor: Nrupatunga\nEmail: [email protected]\nGithub: https://github.com/nrupatunga\nDescription: Image IO\n\"\"\"\n\nimport numpy as np\nimport torch\nfrom PIL import Image\nfrom torchvision import get_image_backend\n\ntry:\n import accimage\nexcept ImportError:\n accimage = None\n\n\ndef _pil_loader(path):\n # open path as file to avoid ResourceWarning (https://github.com/python-pillow/Pillow/issues/835)\n with open(path, 'rb') as f:\n img = Image.open(f)\n return img.convert('RGB')\n\n\ndef _accimage_loader(path):\n try:\n return accimage.Image(path)\n except IOError:\n # Potentially a decoding problem, fall back to PIL.Image\n return _pil_loader(path)\n\n\ndef _is_pil_image(img):\n if accimage is not None:\n return isinstance(img, (Image.Image, accimage.Image))\n else:\n return isinstance(img, Image.Image)\n\n\ndef _is_numpy_image(img):\n return isinstance(img, np.ndarray) and (img.ndim in {2, 3})\n\n\ndef _is_tensor_image(img):\n return torch.is_tensor(img) and img.ndimension() == 3\n\n\ndef image_to_tensor(pic, scale=255):\n \"\"\"Convert a ``PIL Image`` or ``numpy.ndarray`` to tensor.\n See ``ToTensor`` for more details.\n Args:\n pic (PIL Image or numpy.ndarray): Image to be converted to tensor.\n Returns:\n Tensor: Converted image.\n \"\"\"\n\n if not(_is_pil_image(pic) or _is_numpy_image(pic)):\n raise TypeError('pic should be PIL Image or ndarray. Got {}'.format(type(pic)))\n\n if isinstance(pic, np.ndarray):\n # handle numpy array\n if pic.ndim == 2:\n pic = pic[:, :, None]\n\n img = torch.from_numpy(pic.transpose((2, 0, 1)))\n # backward compatibility\n if isinstance(img, torch.ByteTensor):\n return img.float().div(scale)\n else:\n return img\n\n if accimage is not None and isinstance(pic, accimage.Image):\n nppic = np.zeros([pic.channels, pic.height, pic.width], dtype=np.float32)\n pic.copyto(nppic)\n return torch.from_numpy(nppic)\n\n # handle PIL Image\n if pic.mode == 'I':\n img = torch.from_numpy(np.array(pic, np.int32, copy=False))\n elif pic.mode == 'I;16':\n img = torch.from_numpy(np.array(pic, np.int16, copy=False))\n elif pic.mode == 'F':\n img = torch.from_numpy(np.array(pic, np.float32, copy=False))\n elif pic.mode == '1':\n img = scale * torch.from_numpy(np.array(pic, np.uint8, copy=False))\n else:\n img = torch.ByteTensor(torch.ByteStorage.from_buffer(pic.tobytes()))\n # PIL image mode: L, P, I, F, RGB, YCbCr, RGBA, CMYK\n if pic.mode == 'YCbCr':\n nchannel = 3\n elif pic.mode == 'I;16':\n nchannel = 1\n else:\n nchannel = len(pic.mode)\n img = img.view(pic.size[1], pic.size[0], nchannel)\n # put it from HWC to CHW format\n # this transpose takes 80% of the loading time/CPU\n img = img.transpose(0, 1).transpose(0, 2).contiguous()\n if isinstance(img, torch.ByteTensor):\n return img.float().div(scale)\n else:\n return img\n\n\ndef load_grayscale(path):\n if get_image_backend() == 'accimage':\n img = _accimage_loader(path)\n else:\n img = _pil_loader(path)\n\n channels = img.split()\n if len(channels) == 3:\n img = Image.merge(\"RGB\", [channels[2], channels[1], channels[0]])\n return img.convert('L')\n\n\ndef load(path, image_size=None):\n if get_image_backend() == 'accimage':\n img = _accimage_loader(path)\n else:\n img = _pil_loader(path)\n\n channels = img.split()\n if len(channels) == 1:\n img = img.convert('L')\n else: # Make sure it is BGR for caffenet\n img = Image.merge(\"RGB\", [channels[2], channels[1], channels[0]])\n\n if image_size is not None:\n if (image_size[0] == 1 and len(channels) == 3):\n img = img.convert('L')\n if image_size[1] == img.width and image_size[2] == img.height:\n return img\n\n return img.resize((image_size[1], image_size[2]), Image.BILINEAR)\n else:\n return img\n\n\ndef resize(img, size):\n \"\"\"resize numpy array\n \"\"\"\n\n if _is_numpy_image(img):\n img = Image.fromarray(img)\n\n return img.resize(size, Image.BILINEAR)\n"
] |
[
[
"numpy.concatenate",
"torch.sigmoid",
"numpy.array",
"torch.optim.lr_scheduler.StepLR",
"torch.stack",
"numpy.random.seed",
"torch.nn.L1Loss",
"torch.manual_seed",
"numpy.stack",
"numpy.transpose",
"torch.utils.data.DataLoader",
"torch.load",
"torch.log"
],
[
"torch.is_tensor",
"numpy.array",
"numpy.zeros",
"torch.from_numpy"
]
] |
Data-Science-and-Data-Analytics-Courses/fdfff
|
[
"1aa85d18bdd8526b941a6936d24cfb57c687323a"
] |
[
"project2_digit_recognition/mnist/part2-mnist/nnet_fc.py"
] |
[
"#! /usr/bin/env python\n\nimport _pickle as cPickle, gzip\nimport numpy as np\nfrom tqdm import tqdm\nimport torch\nimport torch.autograd as autograd\nimport torch.nn.functional as F\nimport torch.nn as nn\nimport sys\nsys.path.append(\"..\")\nimport utils\nfrom utils import *\nfrom train_utils import batchify_data, run_epoch, train_model\n\ndef main():\n # Load the dataset\n num_classes = 10\n X_train, y_train, X_test, y_test = get_MNIST_data()\n\n # Split into train and dev\n dev_split_index = int(9 * len(X_train) / 10)\n X_dev = X_train[dev_split_index:]\n y_dev = y_train[dev_split_index:]\n X_train = X_train[:dev_split_index]\n y_train = y_train[:dev_split_index]\n\n permutation = np.array([i for i in range(len(X_train))])\n np.random.shuffle(permutation)\n X_train = [X_train[i] for i in permutation]\n y_train = [y_train[i] for i in permutation]\n\n # Split dataset into batches\n batch_size = 32\n train_batches = batchify_data(X_train, y_train, batch_size)\n dev_batches = batchify_data(X_dev, y_dev, batch_size)\n test_batches = batchify_data(X_test, y_test, batch_size)\n\n #################################\n ## Model specification TODO\n model = nn.Sequential(\n nn.Linear(784, 10),\n nn.ReLU(),\n nn.Linear(10, 10),\n )\n lr=0.1\n momentum=0\n ##################################\n\n train_model(train_batches, dev_batches, model, lr=lr, momentum=momentum)\n\n ## Evaluate the model on test data\n loss, accuracy = run_epoch(test_batches, model.eval(), None)\n\n print (\"Loss on test set:\" + str(loss) + \" Accuracy on test set: \" + str(accuracy))\n\n\nif __name__ == '__main__':\n # Specify seed for deterministic behavior, then shuffle. Do not change seed for official submissions to edx\n np.random.seed(12321) # for reproducibility\n torch.manual_seed(12321) # for reproducibility\n main()\n"
] |
[
[
"torch.nn.Linear",
"numpy.random.seed",
"numpy.random.shuffle",
"torch.manual_seed",
"torch.nn.ReLU"
]
] |
rpSebastian/AutoCFR
|
[
"71fd4ce6e64580c11328b9fe75f79de3fae18af3"
] |
[
"autocfr/program/executor.py"
] |
[
"import numpy as np\n\n\nclass PostCheckError(RuntimeError):\n def __init__(self, hint):\n self.hint = hint\n\n def __str__(self):\n return self.hint\n\n\nclass ProgramExecutionError(RuntimeError):\n pass\n\n\nclass AlgorithmExecutionError(RuntimeError):\n def __init__(self, error_info):\n super().__init__(self)\n self.error_info = error_info\n\n def __str__(self):\n return str(self.error_info)\n\n\ndef _execute_program(\n program_operations, input_values, data_structure_values, post_check=True\n):\n intermediate_values = {**input_values, **data_structure_values}\n output_values = []\n\n for operation in program_operations:\n input_values = [intermediate_values[i] for i in operation.inputs]\n output_value = \"UNSET\"\n try:\n output_value = operation.execute(input_values)\n output_values.append(output_value)\n intermediate_values[operation] = output_value\n except Exception as e:\n e.info = [\n operation,\n operation.cached_output_type,\n *[intermediate_values[i] for i in operation.inputs],\n output_value,\n ]\n raise ProgramExecutionError(e)\n\n for operation, output_value in zip(program_operations, output_values):\n try:\n assert operation.cached_output_type.value_class == type(output_value), (\n \"wanted\",\n operation.cached_output_type.value_class,\n \"got\",\n type(output_value),\n operation,\n )\n assert operation.cached_output_type.is_valid_value(output_value)\n except Exception as e:\n e.info = [\n operation,\n operation.cached_output_type,\n *[intermediate_values[i] for i in operation.inputs],\n output_value,\n ]\n raise ProgramExecutionError(e)\n\n if post_check:\n for k, v in data_structure_values.items():\n if k.name == \"policy\":\n policy_value = v.value()\n min_policy_value = np.min(policy_value)\n sum_policy_value = np.sum(policy_value)\n if min_policy_value < 0 or abs(sum_policy_value - 1) > 1e-3:\n e = PostCheckError(\"policy value erorr\")\n e.info = (\n \"policy\",\n policy_value,\n min_policy_value,\n sum_policy_value,\n )\n raise e\n\n if k.name == \"cum_policy\":\n cum_policy_value = v.value()\n min_cum_policy_value = np.min(cum_policy_value)\n if min_cum_policy_value < 0:\n e = PostCheckError(\"cum policy value erorr\")\n e.info = (\"policy\", cum_policy_value)\n raise e\n\n return intermediate_values\n"
] |
[
[
"numpy.sum",
"numpy.min"
]
] |
dabacon/Cirq
|
[
"54286063f679d67501ff1b905cd16b879feaae27",
"610b0d4ea3a7862169610797266734c844ddcc1f"
] |
[
"cirq-core/cirq/work/observable_measurement.py",
"cirq-core/cirq/ops/mixed_unitary_channel_test.py"
] |
[
"# Copyright 2020 The Cirq Developers\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# https://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport abc\nimport dataclasses\nimport itertools\nimport os\nimport tempfile\nimport warnings\nfrom typing import (\n Optional,\n Union,\n Iterable,\n Dict,\n List,\n Tuple,\n TYPE_CHECKING,\n Set,\n Sequence,\n Any,\n)\n\nimport numpy as np\nimport pandas as pd\nimport sympy\nfrom cirq import circuits, study, ops, value, protocols\nfrom cirq._doc import document\nfrom cirq.work.observable_grouping import group_settings_greedy, GROUPER_T\nfrom cirq.work.observable_measurement_data import (\n BitstringAccumulator,\n ObservableMeasuredResult,\n flatten_grouped_results,\n)\nfrom cirq.work.observable_settings import (\n InitObsSetting,\n observables_to_settings,\n _MeasurementSpec,\n)\n\nif TYPE_CHECKING:\n import cirq\n from cirq.value.product_state import _NamedOneQubitState\n\nMAX_REPETITIONS_PER_JOB = 3_000_000\ndocument(\n MAX_REPETITIONS_PER_JOB,\n \"\"\"The maximum repetitions allowed in a single batch job.\n\n This depends on the Sampler executing your batch job. It is set to be\n tens of minutes assuming ~kilosamples per second.\n \"\"\",\n)\n\n\ndef _with_parameterized_layers(\n circuit: 'cirq.AbstractCircuit',\n qubits: Sequence['cirq.Qid'],\n needs_init_layer: bool,\n) -> 'cirq.Circuit':\n \"\"\"Return a copy of the input circuit with parameterized single-qubit rotations.\n\n These rotations flank the circuit: the initial two layers of X and Y gates\n are given parameter names \"{qubit}-Xi\" and \"{qubit}-Yi\" and are used\n to set up the initial state. If `needs_init_layer` is False,\n these two layers of gates are omitted.\n\n The final two layers of X and Y gates are given parameter names\n \"{qubit}-Xf\" and \"{qubit}-Yf\" and are use to change the frame of the\n qubit before measurement, effectively measuring in bases other than Z.\n \"\"\"\n x_beg_mom = ops.Moment([ops.X(q) ** sympy.Symbol(f'{q}-Xi') for q in qubits])\n y_beg_mom = ops.Moment([ops.Y(q) ** sympy.Symbol(f'{q}-Yi') for q in qubits])\n x_end_mom = ops.Moment([ops.X(q) ** sympy.Symbol(f'{q}-Xf') for q in qubits])\n y_end_mom = ops.Moment([ops.Y(q) ** sympy.Symbol(f'{q}-Yf') for q in qubits])\n meas_mom = ops.Moment([ops.measure(*qubits, key='z')])\n if needs_init_layer:\n total_circuit = circuits.Circuit([x_beg_mom, y_beg_mom])\n total_circuit += circuit.unfreeze()\n else:\n total_circuit = circuit.unfreeze()\n total_circuit.append([x_end_mom, y_end_mom, meas_mom])\n return total_circuit\n\n\nclass StoppingCriteria(abc.ABC):\n \"\"\"An abstract object that queries a BitstringAccumulator to figure out\n whether that `meas_spec` is complete.\"\"\"\n\n @abc.abstractmethod\n def more_repetitions(self, accumulator: BitstringAccumulator) -> int:\n \"\"\"Return the number of additional repetitions to take.\n\n StoppingCriteria should be respectful and have some notion of a\n maximum number of repetitions per chunk.\n \"\"\"\n\n\[email protected](frozen=True)\nclass VarianceStoppingCriteria(StoppingCriteria):\n \"\"\"Stop sampling when average variance per term drops below a variance bound.\"\"\"\n\n variance_bound: float\n repetitions_per_chunk: int = 10_000\n\n def more_repetitions(self, accumulator: BitstringAccumulator) -> int:\n if len(accumulator.bitstrings) == 0:\n return self.repetitions_per_chunk\n\n cov = accumulator.covariance()\n n_terms = cov.shape[0]\n sum_variance = np.sum(cov)\n var_of_the_e = sum_variance / len(accumulator.bitstrings)\n vpt = var_of_the_e / n_terms\n\n if vpt <= self.variance_bound:\n # Done\n return 0\n return self.repetitions_per_chunk\n\n def _json_dict_(self):\n return protocols.dataclass_json_dict(self)\n\n\[email protected](frozen=True)\nclass RepetitionsStoppingCriteria(StoppingCriteria):\n \"\"\"Stop sampling when the number of repetitions has been reached.\"\"\"\n\n total_repetitions: int\n repetitions_per_chunk: int = 10_000\n\n def more_repetitions(self, accumulator: BitstringAccumulator) -> int:\n done = accumulator.n_repetitions\n todo = self.total_repetitions - done\n if todo <= 0:\n return 0\n\n to_do_next = min(self.repetitions_per_chunk, todo)\n return to_do_next\n\n def _json_dict_(self):\n return protocols.dataclass_json_dict(self)\n\n\n_OBS_TO_PARAM_VAL: Dict[Tuple['cirq.Pauli', bool], Tuple[float, float]] = {\n (ops.X, False): (0, -1 / 2),\n (ops.X, True): (0, +1 / 2),\n (ops.Y, False): (1 / 2, 0),\n (ops.Y, True): (-1 / 2, 0),\n (ops.Z, False): (0, 0),\n (ops.Z, True): (1, 0),\n}\n\"\"\"Mapping from single-qubit Pauli observable to the X- and Y-rotation parameter values. The\nsecond element in the key is whether to measure in the positive or negative (flipped) basis\nfor readout symmetrization.\"\"\"\n\n_STATE_TO_PARAM_VAL: Dict['_NamedOneQubitState', Tuple[float, float]] = {\n value.KET_PLUS: (0, +1 / 2),\n value.KET_MINUS: (0, -1 / 2),\n value.KET_IMAG: (-1 / 2, 0),\n value.KET_MINUS_IMAG: (+1 / 2, 0),\n value.KET_ZERO: (0, 0),\n value.KET_ONE: (1, 0),\n}\n\"\"\"Mapping from an initial _NamedOneQubitState to the X- and Y-rotation parameter values.\"\"\"\n\n\ndef _get_params_for_setting(\n setting: InitObsSetting,\n flips: Iterable[bool],\n qubits: Sequence['cirq.Qid'],\n needs_init_layer: bool,\n) -> Dict[str, float]:\n \"\"\"Return the parameter dictionary for the given setting.\n\n This must be used in conjunction with a circuit generated by\n `_with_parameterized_layers`. `flips` (used for readout symmetrization)\n should be of the same length as `qubits` and will modify the parameters\n to also include a bit flip (`X`). Code responsible for running the\n circuit should make sure to flip bits back prior to analysis.\n\n Like `_with_parameterized_layers`, we omit params for initialization gates\n if we know that `setting.init_state` is the all-zeros state and\n `needs_init_layer` is False.\n \"\"\"\n setting = _pad_setting(setting, qubits)\n params = {}\n for qubit, flip in itertools.zip_longest(qubits, flips):\n if qubit is None or flip is None:\n raise ValueError(\"`qubits` and `flips` must be equal length\")\n # When getting the one-qubit state / observable for this qubit,\n # you may be wondering what if there's no observable specified\n # for that qubit. We mandate that by the time you get to this stage,\n # each _max_setting has\n # weight(in_state) == weight(out_operator) == len(qubits)\n # See _pad_setting\n pauli = setting.observable[qubit]\n xf_param, yf_param = _OBS_TO_PARAM_VAL[pauli, flip]\n params[f'{qubit}-Xf'] = xf_param\n params[f'{qubit}-Yf'] = yf_param\n\n if needs_init_layer:\n state = setting.init_state[qubit]\n xi_param, yi_param = _STATE_TO_PARAM_VAL[state]\n params[f'{qubit}-Xi'] = xi_param\n params[f'{qubit}-Yi'] = yi_param\n\n return params\n\n\ndef _pad_setting(\n max_setting: InitObsSetting,\n qubits: Sequence['cirq.Qid'],\n pad_init_state_with=value.KET_ZERO,\n pad_obs_with: 'cirq.Gate' = ops.Z,\n) -> InitObsSetting:\n \"\"\"Pad `max_setting`'s `init_state` and `observable` with `pad_xx_with` operations\n (defaults: |0> and Z) so each max_setting has the same qubits. We need this\n to be the case so we can fill in all the parameters, see `_get_params_for_setting`.\n \"\"\"\n obs = max_setting.observable\n assert obs.coefficient == 1, \"Only the max_setting should be padded.\"\n for qubit in qubits:\n if not qubit in obs:\n obs *= pad_obs_with(qubit)\n\n init_state = max_setting.init_state\n init_state_original_qubits = init_state.qubits\n for qubit in qubits:\n if not qubit in init_state_original_qubits:\n init_state *= pad_init_state_with(qubit)\n\n return InitObsSetting(init_state=init_state, observable=obs)\n\n\ndef _aggregate_n_repetitions(next_chunk_repetitions: Set[int]) -> int:\n \"\"\"A stopping criteria can request a different number of more_repetitions for each\n measurement spec. For batching efficiency, we take the max and issue a warning in this case.\"\"\"\n if len(next_chunk_repetitions) == 1:\n return list(next_chunk_repetitions)[0]\n\n reps = max(next_chunk_repetitions)\n warnings.warn(\n f\"The stopping criteria specified a various numbers of \"\n f\"repetitions to perform next. To be able to submit as a single \"\n f\"sweep, the largest value will be used: {reps}.\"\n )\n return reps\n\n\ndef _check_meas_specs_still_todo(\n meas_specs: List[_MeasurementSpec],\n accumulators: Dict[_MeasurementSpec, BitstringAccumulator],\n stopping_criteria: StoppingCriteria,\n) -> Tuple[List[_MeasurementSpec], int]:\n \"\"\"Filter `meas_specs` in case some are done.\n\n In the sampling loop in `measure_grouped_settings`, we submit\n each `meas_spec` in chunks. This function contains the logic for\n removing `meas_spec`s from the loop if they are done.\n \"\"\"\n still_todo = []\n repetitions_set: Set[int] = set()\n for meas_spec in meas_specs:\n accumulator = accumulators[meas_spec]\n more_repetitions = stopping_criteria.more_repetitions(accumulator)\n\n if more_repetitions < 0:\n raise ValueError(\n \"Stopping criteria's `more_repetitions` should return 0 or a positive number.\"\n )\n if more_repetitions == 0:\n continue\n\n repetitions_set.add(more_repetitions)\n still_todo.append(meas_spec)\n\n if len(still_todo) == 0:\n return still_todo, 0\n\n repetitions = _aggregate_n_repetitions(repetitions_set)\n total_repetitions = len(still_todo) * repetitions\n if total_repetitions > MAX_REPETITIONS_PER_JOB:\n old_repetitions = repetitions\n repetitions = MAX_REPETITIONS_PER_JOB // len(still_todo)\n\n if repetitions < 10:\n raise ValueError(\n \"You have requested too many parameter settings to batch your job effectively. \"\n \"Consider fewer sweeps or manually splitting sweeps into multiple jobs.\"\n )\n\n warnings.warn(\n f\"The number of requested sweep parameters is high. To avoid a batched job with more \"\n f\"than {MAX_REPETITIONS_PER_JOB} shots, the number of shots per call to run_sweep \"\n f\"(per parameter value) will be throttled from {old_repetitions} to {repetitions}.\"\n )\n\n return still_todo, repetitions\n\n\[email protected](frozen=True)\nclass _FlippyMeasSpec:\n \"\"\"Internally, each MeasurementSpec class is split into two\n _FlippyMeasSpecs to support readout symmetrization.\n\n Bitstring results are combined, so this should be opaque to the user.\n \"\"\"\n\n meas_spec: _MeasurementSpec\n flips: np.ndarray\n qubits: Sequence['cirq.Qid']\n\n def param_tuples(self, *, needs_init_layer=True):\n yield from _get_params_for_setting(\n self.meas_spec.max_setting,\n flips=self.flips,\n qubits=self.qubits,\n needs_init_layer=needs_init_layer,\n ).items()\n yield from self.meas_spec.circuit_params.items()\n\n\ndef _subdivide_meas_specs(\n meas_specs: Iterable[_MeasurementSpec],\n repetitions: int,\n qubits: Sequence['cirq.Qid'],\n readout_symmetrization: bool,\n) -> Tuple[List[_FlippyMeasSpec], int]:\n \"\"\"Split measurement specs into sub-jobs for readout symmetrization\n\n In readout symmetrization, we first run the \"normal\" circuit followed\n by running the circuit with flipped measurement.\n One _MeasurementSpec is split into two _FlippyMeasSpecs. These are run\n separately but accumulated according to their shared _MeasurementSpec.\n \"\"\"\n n_qubits = len(qubits)\n flippy_mspecs = []\n for meas_spec in meas_specs:\n all_normal = np.zeros(n_qubits, dtype=bool)\n flippy_mspecs.append(\n _FlippyMeasSpec(\n meas_spec=meas_spec,\n flips=all_normal,\n qubits=qubits,\n )\n )\n\n if readout_symmetrization:\n all_flipped = np.ones(n_qubits, dtype=bool)\n flippy_mspecs.append(\n _FlippyMeasSpec(\n meas_spec=meas_spec,\n flips=all_flipped,\n qubits=qubits,\n )\n )\n\n if readout_symmetrization:\n repetitions //= 2\n\n return flippy_mspecs, repetitions\n\n\ndef _to_sweep(param_tuples):\n \"\"\"Turn param tuples into a sweep.\"\"\"\n to_sweep = [dict(pt) for pt in param_tuples]\n to_sweep = study.to_sweep(to_sweep)\n return to_sweep\n\n\n# TODO(#3388) Add documentation for Raises.\n# pylint: disable=missing-raises-doc\ndef _parse_checkpoint_options(\n checkpoint: bool, checkpoint_fn: Optional[str], checkpoint_other_fn: Optional[str]\n) -> Tuple[Optional[str], Optional[str]]:\n \"\"\"Parse the checkpoint-oriented options in `measure_grouped_settings`.\n\n This function contains the validation and defaults logic. Please see\n `measure_grouped_settings` for documentation on these args.\n\n Returns:\n checkpoint_fn, checkpoint_other_fn: Parsed or default filenames for primary and previous\n checkpoint files.\n \"\"\"\n if not checkpoint:\n if checkpoint_fn is not None or checkpoint_other_fn is not None:\n raise ValueError(\n \"Checkpoint filenames were provided but `checkpoint` was set to False.\"\n )\n return None, None\n\n if checkpoint_fn is None:\n checkpoint_dir = tempfile.mkdtemp()\n chk_basename = 'observables'\n checkpoint_fn = f'{checkpoint_dir}/{chk_basename}.json'\n\n if checkpoint_other_fn is None:\n checkpoint_dir = os.path.dirname(checkpoint_fn)\n chk_basename = os.path.basename(checkpoint_fn)\n chk_basename, dot, ext = chk_basename.rpartition('.')\n if chk_basename == '' or dot != '.' or ext == '':\n raise ValueError(\n f\"You specified `checkpoint_fn={checkpoint_fn!r}` which does not follow the \"\n f\"pattern of 'filename.extension'. Please follow this pattern or fully specify \"\n f\"`checkpoint_other_fn`.\"\n )\n\n if ext != 'json':\n raise ValueError(\n \"Please use a `.json` filename or fully \"\n \"specify checkpoint_fn and checkpoint_other_fn\"\n )\n if checkpoint_dir == '':\n checkpoint_other_fn = f'{chk_basename}.prev.json'\n else:\n checkpoint_other_fn = f'{checkpoint_dir}/{chk_basename}.prev.json'\n\n if checkpoint_fn == checkpoint_other_fn:\n raise ValueError(\n f\"`checkpoint_fn` and `checkpoint_other_fn` were set to the same \"\n f\"filename: {checkpoint_fn}. Please use two different filenames.\"\n )\n\n return checkpoint_fn, checkpoint_other_fn\n\n\[email protected](frozen=True)\nclass CheckpointFileOptions:\n \"\"\"Options to configure \"checkpointing\" to save intermediate results.\n\n Args:\n checkpoint: If set to True, save cumulative raw results at the end\n of each iteration of the sampling loop. Load in these results\n with `cirq.read_json`.\n checkpoint_fn: The filename for the checkpoint file. If `checkpoint`\n is set to True and this is not specified, a file in a temporary\n directory will be used.\n checkpoint_other_fn: The filename for another checkpoint file, which\n contains the previous checkpoint. This lets us avoid losing data if\n a failure occurs during checkpoint writing. If `checkpoint`\n is set to True and this is not specified, a file in a temporary\n directory will be used. If `checkpoint` is set to True and\n `checkpoint_fn` is specified but this argument is *not* specified,\n \"{checkpoint_fn}.prev.json\" will be used.\n \"\"\"\n\n checkpoint: bool = False\n checkpoint_fn: Optional[str] = None\n checkpoint_other_fn: Optional[str] = None\n\n def __post_init__(self):\n fn, other_fn = _parse_checkpoint_options(\n self.checkpoint, self.checkpoint_fn, self.checkpoint_other_fn\n )\n object.__setattr__(self, 'checkpoint_fn', fn)\n object.__setattr__(self, 'checkpoint_other_fn', other_fn)\n\n def maybe_to_json(self, obj: Any):\n \"\"\"Call `cirq.to_json with `value` according to the configuration options in this class.\n\n If `checkpoint=False`, nothing will happen. Otherwise, we will use `checkpoint_fn` and\n `checkpoint_other_fn` as the destination JSON file as described in the class docstring.\n \"\"\"\n if not self.checkpoint:\n return\n assert self.checkpoint_fn is not None, 'mypy'\n assert self.checkpoint_other_fn is not None, 'mypy'\n if os.path.exists(self.checkpoint_fn):\n os.replace(self.checkpoint_fn, self.checkpoint_other_fn)\n protocols.to_json(obj, self.checkpoint_fn)\n\n\n# pylint: enable=missing-raises-doc\ndef _needs_init_layer(grouped_settings: Dict[InitObsSetting, List[InitObsSetting]]) -> bool:\n \"\"\"Helper function to go through init_states and determine if any of them need an\n initialization layer of single-qubit gates.\"\"\"\n for max_setting in grouped_settings.keys():\n if any(st is not value.KET_ZERO for _, st in max_setting.init_state):\n return True\n return False\n\n\n# TODO(#3388) Add documentation for Raises.\n# pylint: disable=missing-raises-doc\ndef measure_grouped_settings(\n circuit: 'cirq.AbstractCircuit',\n grouped_settings: Dict[InitObsSetting, List[InitObsSetting]],\n sampler: 'cirq.Sampler',\n stopping_criteria: StoppingCriteria,\n *,\n readout_symmetrization: bool = False,\n circuit_sweep: 'cirq.Sweepable' = None,\n readout_calibrations: Optional[BitstringAccumulator] = None,\n checkpoint: CheckpointFileOptions = CheckpointFileOptions(),\n) -> List[BitstringAccumulator]:\n \"\"\"Measure a suite of grouped InitObsSetting settings.\n\n This is a low-level API for accessing the observable measurement\n framework. See also `measure_observables` and `measure_observables_df`.\n\n Args:\n circuit: The circuit. This can contain parameters, in which case\n you should also specify `circuit_sweep`.\n grouped_settings: A series of setting groups expressed as a dictionary.\n The key is the max-weight setting used for preparing single-qubit\n basis-change rotations. The value is a list of settings\n compatible with the maximal setting you desire to measure.\n Automated routing algorithms like `group_settings_greedy` can\n be used to construct this input.\n sampler: A sampler.\n stopping_criteria: A StoppingCriteria object that can report\n whether enough samples have been sampled.\n readout_symmetrization: If set to True, each `meas_spec` will be\n split into two runs: one normal and one where a bit flip is\n incorporated prior to measurement. In the latter case, the\n measured bit will be flipped back classically and accumulated\n together. This causes readout error to appear symmetric,\n p(0|0) = p(1|1).\n circuit_sweep: Additional parameter sweeps for parameters contained\n in `circuit`. The total sweep is the product of the circuit sweep\n with parameter settings for the single-qubit basis-change rotations.\n readout_calibrations: The result of `calibrate_readout_error`.\n checkpoint: Options to set up optional checkpointing of intermediate\n data for each iteration of the sampling loop. See the documentation\n for `CheckpointFileOptions` for more. Load in these results with\n `cirq.read_json`.\n \"\"\"\n if readout_calibrations is not None and not readout_symmetrization:\n raise ValueError(\"Readout calibration only works if `readout_symmetrization` is enabled.\")\n\n qubits = sorted({q for ms in grouped_settings.keys() for q in ms.init_state.qubits})\n qubit_to_index = {q: i for i, q in enumerate(qubits)}\n\n needs_init_layer = _needs_init_layer(grouped_settings)\n measurement_param_circuit = _with_parameterized_layers(circuit, qubits, needs_init_layer)\n\n # meas_spec provides a key for accumulators.\n # meas_specs_todo is a mutable list. We will pop things from it as various\n # specs are measured to the satisfaction of the stopping criteria\n accumulators = {}\n meas_specs_todo = []\n for max_setting, param_resolver in itertools.product(\n grouped_settings.keys(), study.to_resolvers(circuit_sweep)\n ):\n circuit_params = param_resolver.param_dict\n meas_spec = _MeasurementSpec(max_setting=max_setting, circuit_params=circuit_params)\n accumulator = BitstringAccumulator(\n meas_spec=meas_spec,\n simul_settings=grouped_settings[max_setting],\n qubit_to_index=qubit_to_index,\n readout_calibration=readout_calibrations,\n )\n accumulators[meas_spec] = accumulator\n meas_specs_todo += [meas_spec]\n\n while True:\n meas_specs_todo, repetitions = _check_meas_specs_still_todo(\n meas_specs=meas_specs_todo,\n accumulators=accumulators,\n stopping_criteria=stopping_criteria,\n )\n if len(meas_specs_todo) == 0:\n break\n\n flippy_meas_specs, repetitions = _subdivide_meas_specs(\n meas_specs=meas_specs_todo,\n repetitions=repetitions,\n qubits=qubits,\n readout_symmetrization=readout_symmetrization,\n )\n\n resolved_params = [\n flippy_ms.param_tuples(needs_init_layer=needs_init_layer)\n for flippy_ms in flippy_meas_specs\n ]\n resolved_params = _to_sweep(resolved_params)\n\n results = sampler.run_sweep(\n program=measurement_param_circuit, params=resolved_params, repetitions=repetitions\n )\n\n assert len(results) == len(\n flippy_meas_specs\n ), 'Not as many results received as sweeps requested!'\n\n for flippy_ms, result in zip(flippy_meas_specs, results):\n accumulator = accumulators[flippy_ms.meas_spec]\n bitstrings = np.logical_xor(flippy_ms.flips, result.measurements['z'])\n accumulator.consume_results(bitstrings.astype(np.uint8, casting='safe'))\n\n checkpoint.maybe_to_json(list(accumulators.values()))\n\n return list(accumulators.values())\n\n\n# pylint: enable=missing-raises-doc\n\n\n_GROUPING_FUNCS: Dict[str, GROUPER_T] = {\n 'greedy': group_settings_greedy,\n}\n\n\ndef _parse_grouper(grouper: Union[str, GROUPER_T] = group_settings_greedy) -> GROUPER_T:\n \"\"\"Logic for turning a named grouper into one of the build-in groupers in support of the\n high-level `measure_observables` API.\"\"\"\n if isinstance(grouper, str):\n try:\n grouper = _GROUPING_FUNCS[grouper.lower()]\n except KeyError:\n raise ValueError(f\"Unknown grouping function {grouper}\")\n return grouper\n\n\ndef _get_all_qubits(\n circuit: 'cirq.AbstractCircuit',\n observables: Iterable['cirq.PauliString'],\n) -> List['cirq.Qid']:\n \"\"\"Helper function for `measure_observables` to get all qubits from a circuit and a\n collection of observables.\"\"\"\n qubit_set = set()\n for obs in observables:\n qubit_set |= set(obs.qubits)\n qubit_set |= circuit.all_qubits()\n return sorted(qubit_set)\n\n\ndef measure_observables(\n circuit: 'cirq.AbstractCircuit',\n observables: Iterable['cirq.PauliString'],\n sampler: Union['cirq.Simulator', 'cirq.Sampler'],\n stopping_criteria: StoppingCriteria,\n *,\n readout_symmetrization: bool = False,\n circuit_sweep: Optional['cirq.Sweepable'] = None,\n grouper: Union[str, GROUPER_T] = group_settings_greedy,\n readout_calibrations: Optional[BitstringAccumulator] = None,\n checkpoint: CheckpointFileOptions = CheckpointFileOptions(),\n) -> List[ObservableMeasuredResult]:\n \"\"\"Measure a collection of PauliString observables for a state prepared by a Circuit.\n\n If you need more control over the process, please see `measure_grouped_settings` for a\n lower-level API. If you would like your results returned as a pandas DataFrame,\n please see `measure_observables_df`.\n\n Args:\n circuit: The circuit used to prepare the state to measure. This can contain parameters,\n in which case you should also specify `circuit_sweep`.\n observables: A collection of PauliString observables to measure. These will be grouped\n into simultaneously-measurable groups, see `grouper` argument.\n sampler: The sampler.\n stopping_criteria: A StoppingCriteria object to indicate how precisely to sample\n measurements for estimating observables.\n readout_symmetrization: If set to True, each run will be split into two: one normal and\n one where a bit flip is incorporated prior to measurement. In the latter case, the\n measured bit will be flipped back classically and accumulated together. This causes\n readout error to appear symmetric, p(0|0) = p(1|1).\n circuit_sweep: Additional parameter sweeps for parameters contained in `circuit`. The\n total sweep is the product of the circuit sweep with parameter settings for the\n single-qubit basis-change rotations.\n grouper: Either \"greedy\" or a function that groups lists of `InitObsSetting`. See the\n documentation for the `grouped_settings` argument of `measure_grouped_settings` for\n full details.\n readout_calibrations: The result of `calibrate_readout_error`.\n checkpoint: Options to set up optional checkpointing of intermediate data for each\n iteration of the sampling loop. See the documentation for `CheckpointFileOptions` for\n more. Load in these results with `cirq.read_json`.\n\n Returns:\n A list of ObservableMeasuredResult; one for each input PauliString.\n \"\"\"\n qubits = _get_all_qubits(circuit, observables)\n settings = list(observables_to_settings(observables, qubits))\n actual_grouper = _parse_grouper(grouper)\n grouped_settings = actual_grouper(settings)\n\n accumulators = measure_grouped_settings(\n circuit=circuit,\n grouped_settings=grouped_settings,\n sampler=sampler,\n stopping_criteria=stopping_criteria,\n circuit_sweep=circuit_sweep,\n readout_symmetrization=readout_symmetrization,\n readout_calibrations=readout_calibrations,\n checkpoint=checkpoint,\n )\n return flatten_grouped_results(accumulators)\n\n\ndef measure_observables_df(\n circuit: 'cirq.AbstractCircuit',\n observables: Iterable['cirq.PauliString'],\n sampler: Union['cirq.Simulator', 'cirq.Sampler'],\n stopping_criteria: StoppingCriteria,\n *,\n readout_symmetrization: bool = False,\n circuit_sweep: Optional['cirq.Sweepable'] = None,\n grouper: Union[str, GROUPER_T] = group_settings_greedy,\n readout_calibrations: Optional[BitstringAccumulator] = None,\n checkpoint: CheckpointFileOptions = CheckpointFileOptions(),\n):\n \"\"\"Measure observables and return resulting data as a Pandas dataframe.\n\n Please see `measure_observables` for argument documentation.\n \"\"\"\n results = measure_observables(\n circuit=circuit,\n observables=observables,\n sampler=sampler,\n stopping_criteria=stopping_criteria,\n readout_symmetrization=readout_symmetrization,\n circuit_sweep=circuit_sweep,\n grouper=grouper,\n readout_calibrations=readout_calibrations,\n checkpoint=checkpoint,\n )\n df = pd.DataFrame(res.as_dict() for res in results)\n return df\n",
"# pylint: disable=wrong-or-nonexistent-copyright-notice\nimport cirq\nimport numpy as np\nimport pytest\n\n\ndef test_matrix_mixture_from_mixture():\n q0 = cirq.LineQubit(0)\n dp = cirq.depolarize(0.1)\n mm = cirq.MixedUnitaryChannel.from_mixture(dp, key='dp')\n assert cirq.measurement_key_name(mm) == 'dp'\n\n circuit = cirq.Circuit(mm.on(q0))\n sim = cirq.Simulator(seed=0)\n\n results = sim.simulate(circuit)\n assert 'dp' in results.measurements\n # The depolarizing channel is composed of four unitaries.\n assert results.measurements['dp'] in range(4)\n\n\ndef test_matrix_mixture_equality():\n dp_pt1 = cirq.depolarize(0.1)\n dp_pt2 = cirq.depolarize(0.2)\n mm_a1 = cirq.MixedUnitaryChannel.from_mixture(dp_pt1, key='a')\n mm_a2 = cirq.MixedUnitaryChannel.from_mixture(dp_pt2, key='a')\n mm_b1 = cirq.MixedUnitaryChannel.from_mixture(dp_pt1, key='b')\n\n # Even if their effect is the same, MixedUnitaryChannels are not treated\n # as equal to other channels defined in Cirq.\n assert mm_a1 != dp_pt1\n assert mm_a1 != mm_a2\n assert mm_a1 != mm_b1\n assert mm_a2 != mm_b1\n\n mix = [\n (0.5, np.array([[1, 0], [0, 1]])),\n (0.5, np.array([[0, 1], [1, 0]])),\n ]\n half_flip = cirq.MixedUnitaryChannel(mix)\n mix_inv = list(reversed(mix))\n half_flip_inv = cirq.MixedUnitaryChannel(mix_inv)\n # Even though these have the same effect on the circuit, their measurement\n # behavior differs, so they are considered non-equal.\n assert half_flip != half_flip_inv\n\n\ndef test_matrix_mixture_remap_keys():\n dp = cirq.depolarize(0.1)\n mm = cirq.MixedUnitaryChannel.from_mixture(dp)\n with pytest.raises(TypeError):\n _ = cirq.measurement_key_name(mm)\n assert cirq.with_measurement_key_mapping(mm, {'a': 'b'}) is NotImplemented\n\n mm_x = cirq.MixedUnitaryChannel.from_mixture(dp, key='x')\n assert cirq.with_measurement_key_mapping(mm_x, {'a': 'b'}) is mm_x\n assert cirq.measurement_key_name(cirq.with_key_path(mm_x, ('path',))) == 'path:x'\n\n mm_a = cirq.MixedUnitaryChannel.from_mixture(dp, key='a')\n mm_b = cirq.MixedUnitaryChannel.from_mixture(dp, key='b')\n assert mm_a != mm_b\n assert cirq.with_measurement_key_mapping(mm_a, {'a': 'b'}) == mm_b\n\n\ndef test_matrix_mixture_from_unitaries():\n q0 = cirq.LineQubit(0)\n mix = [\n (0.5, np.array([[1, 0], [0, 1]])),\n (0.5, np.array([[0, 1], [1, 0]])),\n ]\n half_flip = cirq.MixedUnitaryChannel(mix, key='flip')\n assert cirq.measurement_key_name(half_flip) == 'flip'\n\n circuit = cirq.Circuit(half_flip.on(q0), cirq.measure(q0, key='m'))\n sim = cirq.Simulator(seed=0)\n\n results = sim.simulate(circuit)\n assert 'flip' in results.measurements\n assert results.measurements['flip'] == results.measurements['m']\n\n\ndef test_matrix_mixture_str():\n mix = [\n (0.5, np.array([[1, 0], [0, 1]])),\n (0.5, np.array([[0, 1], [1, 0]])),\n ]\n half_flip = cirq.MixedUnitaryChannel(mix)\n assert (\n str(half_flip)\n == \"\"\"MixedUnitaryChannel([(0.5, array([[1, 0],\n [0, 1]])), (0.5, array([[0, 1],\n [1, 0]]))])\"\"\"\n )\n half_flip_keyed = cirq.MixedUnitaryChannel(mix, key='flip')\n assert (\n str(half_flip_keyed)\n == \"\"\"MixedUnitaryChannel([(0.5, array([[1, 0],\n [0, 1]])), (0.5, array([[0, 1],\n [1, 0]]))], key=flip)\"\"\"\n )\n\n\ndef test_matrix_mixture_repr():\n mix = [\n (0.5, np.array([[1, 0], [0, 1]], dtype=np.complex64)),\n (0.5, np.array([[0, 1], [1, 0]], dtype=np.complex64)),\n ]\n half_flip = cirq.MixedUnitaryChannel(mix, key='flip')\n assert (\n repr(half_flip)\n == \"\"\"\\\ncirq.MixedUnitaryChannel(mixture=[\\\n(0.5, np.array([[(1+0j), 0j], [0j, (1+0j)]], dtype=np.complex64)), \\\n(0.5, np.array([[0j, (1+0j)], [(1+0j), 0j]], dtype=np.complex64))], \\\nkey='flip')\"\"\"\n )\n\n\ndef test_mix_no_unitaries_fails():\n with pytest.raises(ValueError, match='must have at least one unitary'):\n _ = cirq.MixedUnitaryChannel(mixture=[], key='m')\n\n\ndef test_mix_bad_prob_fails():\n mix = [(0.5, np.array([[1, 0], [0, 0]]))]\n\n with pytest.raises(ValueError, match='Unitary probabilities must sum to 1'):\n _ = cirq.MixedUnitaryChannel(mixture=mix, key='m')\n\n\ndef test_mix_mismatch_fails():\n op2 = np.zeros((4, 4))\n op2[1][1] = 1\n mix = [\n (0.5, np.array([[1, 0], [0, 0]])),\n (0.5, op2),\n ]\n\n with pytest.raises(ValueError, match='Inconsistent unitary shapes'):\n _ = cirq.MixedUnitaryChannel(mixture=mix, key='m')\n\n\ndef test_nonqubit_mixture_fails():\n mix = [\n (0.5, np.array([[1, 0, 0], [0, 1, 0]])),\n (0.5, np.array([[0, 1, 0], [1, 0, 0]])),\n ]\n\n with pytest.raises(ValueError, match='Input mixture'):\n _ = cirq.MixedUnitaryChannel(mixture=mix, key='m')\n\n\ndef test_validate():\n mix = [\n (0.5, np.array([[1, 0], [0, 0]])),\n (0.5, np.array([[0, 0], [0, 1]])),\n ]\n with pytest.raises(ValueError, match='non-unitary'):\n _ = cirq.MixedUnitaryChannel(mixture=mix, key='m', validate=True)\n"
] |
[
[
"numpy.sum",
"numpy.ones",
"numpy.zeros",
"numpy.logical_xor"
],
[
"numpy.array",
"numpy.zeros"
]
] |
JiacongXu666/lalala
|
[
"be513ed1dd394f2c09ce43bd377f26868ad9aa74"
] |
[
"models/pidnet_large_uni128_pappm_dfm.py"
] |
[
"import math\nimport torch\nimport numpy as np \nimport torch.nn as nn\nimport torch.nn.functional as F\nfrom torch.nn import init\nfrom collections import OrderedDict\nimport time\nfrom . import model_utils\nimport logging\n\nBatchNorm2d = nn.BatchNorm2d\nbn_mom = 0.1\n \ndef conv3x3(in_planes, out_planes, stride=1):\n \"\"\"3x3 convolution with padding\"\"\"\n return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride,\n padding=1, bias=False)\n\n\nclass BasicBlock(nn.Module):\n expansion = 1\n\n def __init__(self, inplanes, planes, stride=1, downsample=None, no_relu=False):\n super(BasicBlock, self).__init__()\n self.conv1 = conv3x3(inplanes, planes, stride)\n self.bn1 = BatchNorm2d(planes, momentum=bn_mom)\n self.relu = nn.ReLU(inplace=True)\n self.conv2 = conv3x3(planes, planes)\n self.bn2 = BatchNorm2d(planes, momentum=bn_mom)\n self.downsample = downsample\n self.stride = stride\n self.no_relu = no_relu\n\n def forward(self, x):\n residual = x\n\n out = self.conv1(x)\n out = self.bn1(out)\n out = self.relu(out)\n\n out = self.conv2(out)\n out = self.bn2(out)\n\n if self.downsample is not None:\n residual = self.downsample(x)\n\n out += residual\n\n if self.no_relu:\n return out\n else:\n return self.relu(out)\n\nclass Bottleneck(nn.Module):\n expansion = 2\n\n def __init__(self, inplanes, planes, stride=1, downsample=None, no_relu=True):\n super(Bottleneck, self).__init__()\n self.conv1 = nn.Conv2d(inplanes, planes, kernel_size=1, bias=False)\n self.bn1 = BatchNorm2d(planes, momentum=bn_mom)\n self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=stride,\n padding=1, bias=False)\n self.bn2 = BatchNorm2d(planes, momentum=bn_mom)\n self.conv3 = nn.Conv2d(planes, planes * self.expansion, kernel_size=1,\n bias=False)\n self.bn3 = BatchNorm2d(planes * self.expansion, momentum=bn_mom)\n self.relu = nn.ReLU(inplace=True)\n self.downsample = downsample\n self.stride = stride\n self.no_relu = no_relu\n\n def forward(self, x):\n residual = x\n\n out = self.conv1(x)\n out = self.bn1(out)\n out = self.relu(out)\n\n out = self.conv2(out)\n out = self.bn2(out)\n out = self.relu(out)\n\n out = self.conv3(out)\n out = self.bn3(out)\n\n if self.downsample is not None:\n residual = self.downsample(x)\n\n out += residual\n if self.no_relu:\n return out\n else:\n return self.relu(out)\n\n\nclass segmenthead(nn.Module):\n\n def __init__(self, inplanes, interplanes, outplanes, scale_factor=None):\n super(segmenthead, self).__init__()\n self.bn1 = BatchNorm2d(inplanes, momentum=bn_mom)\n self.conv1 = nn.Conv2d(inplanes, interplanes, kernel_size=3, padding=1, bias=False)\n self.bn2 = BatchNorm2d(interplanes, momentum=bn_mom)\n self.relu = nn.ReLU(inplace=True)\n self.conv2 = nn.Conv2d(interplanes, outplanes, kernel_size=1, padding=0, bias=True)\n self.scale_factor = scale_factor\n\n def forward(self, x):\n \n x = self.conv1(self.relu(self.bn1(x)))\n out = self.conv2(self.relu(self.bn2(x)))\n\n if self.scale_factor is not None:\n height = x.shape[-2] * self.scale_factor\n width = x.shape[-1] * self.scale_factor\n out = F.interpolate(out,\n size=[height, width],\n mode='bilinear', align_corners=False)\n\n return out\n\nclass PIDNet_L(nn.Module):\n\n def __init__(self, block, layers, num_classes=19, planes=64, spp_planes=128, head_planes=128, augment=False):\n super(PIDNet_L, self).__init__()\n\n highres_planes = planes * 2\n self.augment = augment\n\n self.conv1 = nn.Sequential(\n nn.Conv2d(3,planes,kernel_size=3, stride=2, padding=1),\n BatchNorm2d(planes, momentum=bn_mom),\n nn.ReLU(inplace=True),\n nn.Conv2d(planes,planes,kernel_size=3, stride=2, padding=1),\n BatchNorm2d(planes, momentum=bn_mom),\n nn.ReLU(inplace=True),\n )\n\n self.relu = nn.ReLU(inplace=True)\n self.layer1 = self._make_layer(block, planes, planes, layers[0])\n self.layer2 = self._make_layer(block, planes, planes * 2, layers[1], stride=2)\n self.layer3 = self._make_layer(block, planes * 2, planes * 4, layers[2], stride=2)\n self.layer4 = self._make_layer(block, planes * 4, planes * 8, layers[3], stride=2)\n\n self.compression3 = nn.Sequential(\n nn.Conv2d(planes * 4, highres_planes, kernel_size=1, bias=False),\n BatchNorm2d(highres_planes, momentum=bn_mom),\n )\n\n self.compression4 = nn.Sequential(\n nn.Conv2d(planes * 8, highres_planes, kernel_size=1, bias=False),\n BatchNorm2d(highres_planes, momentum=bn_mom),\n )\n \n self.diff3 = nn.Sequential(\n nn.Conv2d(planes * 4, planes*2, kernel_size=3, padding=1, bias=False),\n BatchNorm2d(planes*2, momentum=bn_mom),\n )\n self.diff4 = nn.Sequential(\n nn.Conv2d(planes * 8, highres_planes, kernel_size=3, padding=1, bias=False),\n BatchNorm2d(highres_planes, momentum=bn_mom),\n )\n\n \n \n\n self.layer3_ = self._make_layer(block, planes * 2, highres_planes, 3)\n self.layer3_d = self._make_single_layer(block, planes * 2, planes * 2)\n\n self.layer4_ = self._make_layer(block, highres_planes, highres_planes, 3)\n self.layer4_d = self._make_single_layer(block, planes*2, planes*2)\n\n self.layer5_ = self._make_layer(Bottleneck, highres_planes, highres_planes, 1)\n self.layer5_d = self._make_layer(Bottleneck, planes*2, highres_planes, 1)\n\n self.layer5 = self._make_layer(Bottleneck, planes * 8, planes * 8, 2, stride=2)\n\n self.spp = model_utils.PAPPM(planes * 16, spp_planes, planes * 4)\n\n self.dfm = model_utils.DFM(planes * 4, planes * 4)\n\n if self.augment:\n self.seghead_p = segmenthead(highres_planes, head_planes, num_classes)\n self.seghead_d = segmenthead(highres_planes, highres_planes//2, 1) \n\n self.final_layer = segmenthead(planes * 4, head_planes, num_classes)\n\n\n for m in self.modules():\n if isinstance(m, nn.Conv2d):\n nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')\n elif isinstance(m, BatchNorm2d):\n nn.init.constant_(m.weight, 1)\n nn.init.constant_(m.bias, 0)\n\n\n def _make_layer(self, block, inplanes, planes, blocks, stride=1):\n downsample = None\n if stride != 1 or inplanes != planes * block.expansion:\n downsample = nn.Sequential(\n nn.Conv2d(inplanes, planes * block.expansion,\n kernel_size=1, stride=stride, bias=False),\n nn.BatchNorm2d(planes * block.expansion, momentum=bn_mom),\n )\n\n layers = []\n layers.append(block(inplanes, planes, stride, downsample))\n inplanes = planes * block.expansion\n for i in range(1, blocks):\n if i == (blocks-1):\n layers.append(block(inplanes, planes, stride=1, no_relu=True))\n else:\n layers.append(block(inplanes, planes, stride=1, no_relu=False))\n\n return nn.Sequential(*layers)\n \n def _make_single_layer(self, block, inplanes, planes, stride=1):\n downsample = None\n if stride != 1 or inplanes != planes * block.expansion:\n downsample = nn.Sequential(\n nn.Conv2d(inplanes, planes * block.expansion,\n kernel_size=1, stride=stride, bias=False),\n nn.BatchNorm2d(planes * block.expansion, momentum=bn_mom),\n )\n\n layer = block(inplanes, planes, stride, downsample, no_relu=True)\n \n return layer\n\n def forward(self, x):\n\n width_output = x.shape[-1] // 8\n height_output = x.shape[-2] // 8\n\n x = self.conv1(x)\n x = self.layer1(x)\n x = self.relu(self.layer2(self.relu(x)))\n x_ = self.layer3_(x)\n x_d = self.layer3_d(x)\n \n x = self.relu(self.layer3(x))\n x_ = x_ + F.interpolate(\n self.compression3(x),\n size=[height_output, width_output],\n mode='bilinear', align_corners=False)\n x_d = x_d + F.interpolate(\n self.diff3(x),\n size=[height_output, width_output],\n mode='bilinear', align_corners=False)\n if self.augment:\n temp_p = x_\n \n x = self.relu(self.layer4(x))\n x_ = self.layer4_(self.relu(x_))\n x_d = self.layer4_d(self.relu(x_d))\n \n x_ = x_ + F.interpolate(\n self.compression4(x),\n size=[height_output, width_output],\n mode='bilinear', align_corners=False)\n x_d = x_d + F.interpolate(\n self.diff4(x),\n size=[height_output, width_output],\n mode='bilinear', align_corners=False)\n if self.augment:\n temp_d = x_d\n \n x_ = self.layer5_(self.relu(x_))\n x_d = self.layer5_d(self.relu(x_d))\n x = F.interpolate(\n self.spp(self.layer5(x)),\n size=[height_output, width_output],\n mode='bilinear', align_corners=False)\n\n x_ = self.final_layer(self.dfm(x_, x, x_d))\n\n if self.augment: \n x_extra_p = self.seghead_p(temp_p)\n x_extra_d = self.seghead_d(temp_d)\n return [x_extra_p, x_, x_extra_d]\n else:\n return x_ \n\ndef PIDNet_imagenet(cfg, pretrained=True):\n model = PIDNet_L(BasicBlock, [3, 3, 4, 4], num_classes=19, planes=64, spp_planes=96, head_planes=256, augment=True)\n if pretrained:\n pretrained_state = torch.load(cfg.MODEL.PRETRAINED, map_location='cpu')['state_dict'] \n model_dict = model.state_dict()\n pretrained_state = {k: v for k, v in pretrained_state.items() if (k in model_dict and v.shape == model_dict[k].shape)}\n model_dict.update(pretrained_state)\n msg = 'Loaded {} parameters!'.format(len(pretrained_state))\n logging.info('Attention!!!')\n logging.info(msg)\n logging.info('Over!!!')\n model.load_state_dict(model_dict, strict = False)\n return model\n\ndef get_seg_model(cfg, **kwargs):\n\n model = PIDNet_imagenet(cfg, pretrained=True)\n return model\n\nif __name__ == '__main__':\n \"\"\"\n device = torch.device('cuda')\n #torch.backends.cudnn.enabled = True\n #torch.backends.cudnn.benchmark = True\n \n model = PIDNet_L(BasicBlock, [3, 3, 4, 4], num_classes=19, planes=64, spp_planes=96, head_planes=256, augment=False)\n model.eval()\n model.to(device)\n iterations = None\n \n input = torch.randn(1, 3, 1024, 2048).cuda()\n with torch.no_grad():\n for _ in range(10):\n model(input)\n \n if iterations is None:\n elapsed_time = 0\n iterations = 100\n while elapsed_time < 1:\n torch.cuda.synchronize()\n torch.cuda.synchronize()\n t_start = time.time()\n for _ in range(iterations):\n model(input)\n torch.cuda.synchronize()\n torch.cuda.synchronize()\n elapsed_time = time.time() - t_start\n iterations *= 2\n FPS = iterations / elapsed_time\n iterations = int(FPS * 6)\n \n print('=========Speed Testing=========')\n torch.cuda.synchronize()\n torch.cuda.synchronize()\n t_start = time.time()\n for _ in range(iterations):\n model(input)\n torch.cuda.synchronize()\n torch.cuda.synchronize()\n elapsed_time = time.time() - t_start\n latency = elapsed_time / iterations * 1000\n torch.cuda.empty_cache()\n FPS = 1000 / latency\n print(FPS)\n \"\"\"\n \n\n model = PIDNet_L(BasicBlock, [3, 3, 4, 4], num_classes=19, planes=64, spp_planes=96, head_planes=256, augment=True)\n filename = 'D:/ImageNet/imagenet_test/checkpoints/imagenet/pidnet_l_uni128_dfm/model_best.pth.tar'\n pretrained_state = torch.load(filename, map_location='cpu')['state_dict'] \n model_dict = model.state_dict()\n pretrained_state = {k: v for k, v in pretrained_state.items() if (k in model_dict and v.shape == model_dict[k].shape)}\n model_dict.update(pretrained_state)\n msg = 'Loaded {} parameters!'.format(len(pretrained_state))\n logging.info('Attention!!!')\n logging.info(msg)\n logging.info('Over!!!')\n model.load_state_dict(model_dict, strict = False)\n \n\n\n"
] |
[
[
"torch.nn.init.constant_",
"torch.nn.Sequential",
"torch.nn.functional.interpolate",
"torch.nn.init.kaiming_normal_",
"torch.nn.BatchNorm2d",
"torch.nn.ReLU",
"torch.nn.Conv2d",
"torch.load"
]
] |
Myyyr/transseg2d
|
[
"7664653dec0bf63d96ad3c76fc225d2d7f607e41"
] |
[
"mmseg/models/utils/swin_unet_v2_utils_gtv5.py"
] |
[
"import torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nimport torch.utils.checkpoint as checkpoint\nfrom einops import rearrange\nfrom timm.models.layers import DropPath, to_2tuple, trunc_normal_\nimport numpy as np\n\nfrom einops import repeat\n# MERGING STRAT : RANDOM PERMUTE\n\nclass Mlp(nn.Module):\n def __init__(self, in_features, hidden_features=None, out_features=None, act_layer=nn.GELU, drop=0.):\n super().__init__()\n out_features = out_features or in_features\n hidden_features = hidden_features or in_features\n self.fc1 = nn.Linear(in_features, hidden_features)\n self.act = act_layer()\n self.fc2 = nn.Linear(hidden_features, out_features)\n self.drop = nn.Dropout(drop)\n\n def forward(self, x):\n x = self.fc1(x)\n x = self.act(x)\n x = self.drop(x)\n x = self.fc2(x)\n x = self.drop(x)\n return x\n\n\ndef window_partition(x, window_size):\n \"\"\"\n Args:\n x: (B, H, W, C)\n window_size (int): window size\n Returns:\n windows: (num_windows*B, window_size, window_size, C)\n \"\"\"\n B, H, W, C = x.shape\n x = x.view(B, H // window_size, window_size, W // window_size, window_size, C)\n windows = x.permute(0, 1, 3, 2, 4, 5).contiguous().view(-1, window_size, window_size, C)\n return windows\n\n\ndef window_reverse(windows, window_size, H, W):\n \"\"\"\n Args:\n windows: (num_windows*B, window_size, window_size, C)\n window_size (int): Window size\n H (int): Height of image\n W (int): Width of image\n Returns:\n x: (B, H, W, C)\n \"\"\"\n B = int(windows.shape[0] / (H * W / window_size / window_size))\n x = windows.view(B, H // window_size, W // window_size, window_size, window_size, -1)\n x = x.permute(0, 1, 3, 2, 4, 5).contiguous().view(B, H, W, -1)\n return x\n\n\nclass WindowAttention(nn.Module):\n r\"\"\" Window based multi-head self attention (W-MSA) module with relative position bias.\n It supports both of shifted and non-shifted window.\n Args:\n dim (int): Number of input channels.\n window_size (tuple[int]): The height and width of the window.\n num_heads (int): Number of attention heads.\n qkv_bias (bool, optional): If True, add a learnable bias to query, key, value. Default: True\n qk_scale (float | None, optional): Override default qk scale of head_dim ** -0.5 if set\n attn_drop (float, optional): Dropout ratio of attention weight. Default: 0.0\n proj_drop (float, optional): Dropout ratio of output. Default: 0.0\n \"\"\"\n\n def __init__(self, dim, window_size, num_heads, qkv_bias=True, qk_scale=None, attn_drop=0., proj_drop=0., gt_num=1):\n\n super().__init__()\n self.dim = dim\n self.window_size = window_size # Wh, Ww\n self.num_heads = num_heads\n head_dim = dim // num_heads\n\n self.gt_num = gt_num\n # self.global_token = None\n # if first:\n # self.global_token = torch.nn.Parameter(torch.randn(gt_num,self.dim))\n # self.global_token.requires_grad = True\n\n\n self.scale = qk_scale or head_dim ** -0.5\n\n # define a parameter table of relative position bias\n self.relative_position_bias_table = nn.Parameter(\n torch.zeros((2 * window_size[0] - 1) * (2 * window_size[1] - 1), num_heads)) # 2*Wh-1 * 2*Ww-1, nH\n\n # get pair-wise relative position index for each token inside the window\n coords_h = torch.arange(self.window_size[0])\n coords_w = torch.arange(self.window_size[1])\n coords = torch.stack(torch.meshgrid([coords_h, coords_w])) # 2, Wh, Ww\n coords_flatten = torch.flatten(coords, 1) # 2, Wh*Ww\n relative_coords = coords_flatten[:, :, None] - coords_flatten[:, None, :] # 2, Wh*Ww, Wh*Ww\n relative_coords = relative_coords.permute(1, 2, 0).contiguous() # Wh*Ww, Wh*Ww, 2\n relative_coords[:, :, 0] += self.window_size[0] - 1 # shift to start from 0\n relative_coords[:, :, 1] += self.window_size[1] - 1\n relative_coords[:, :, 0] *= 2 * self.window_size[1] - 1\n relative_position_index = relative_coords.sum(-1) # Wh*Ww, Wh*Ww\n self.register_buffer(\"relative_position_index\", relative_position_index)\n\n self.qkv = nn.Linear(dim, dim * 3, bias=qkv_bias)\n self.attn_drop = nn.Dropout(attn_drop)\n self.proj = nn.Linear(dim, dim)\n self.proj_drop = nn.Dropout(proj_drop)\n\n trunc_normal_(self.relative_position_bias_table, std=.02)\n self.softmax = nn.Softmax(dim=-1)\n\n def forward(self, x, mask=None, gt=None):\n \"\"\"\n Args:\n x: input features with shape of (num_windows*B, N, C)\n mask: (0/-inf) mask with shape of (num_windows, Wh*Ww, Wh*Ww) or None\n \"\"\"\n B_, N_, C = x.shape\n\n # add global tokens\n # if gt==None:\n # gt = self.global_token\n if len(gt.shape) != 3:\n gt = repeat(gt, \"g c -> b g c\", b=B_)# shape of (num_windows*B, G, C)\n x = torch.cat([gt, x], dim=1) # x of shape (num_windows*B, G+N_, C)\n B_, N, C = x.shape\n\n\n\n\n qkv = self.qkv(x).reshape(B_, N, 3, self.num_heads, C // self.num_heads).permute(2, 0, 3, 1, 4)\n q, k, v = qkv[0], qkv[1], qkv[2] # make torchscript happy (cannot use tensor as tuple)\n\n \n\n q = q * self.scale\n attn = (q @ k.transpose(-2, -1))\n\n\n relative_position_bias = self.relative_position_bias_table[self.relative_position_index.view(-1)].view(\n self.window_size[0] * self.window_size[1], self.window_size[0] * self.window_size[1], -1) # Wh*Ww,Wh*Ww,nH\n relative_position_bias = relative_position_bias.permute(2, 0, 1).contiguous() # nH, Wh*Ww, Wh*Ww\n \n\n\n attn[:,:,self.gt_num:,self.gt_num:] = attn[:,:,self.gt_num:,self.gt_num:] + relative_position_bias.unsqueeze(0)\n\n\n if mask is not None:\n nW = mask.shape[0]\n attn_ = attn.view(B_ // nW, nW, self.num_heads, N, N)[:,:,:,self.gt_num:,self.gt_num:] + mask.unsqueeze(1).unsqueeze(0)\n\n attn[:,:,self.gt_num:,self.gt_num:] = attn_.view(-1, self.num_heads, N_, N_)\n attn = self.softmax(attn)\n else:\n attn = self.softmax(attn)\n\n attn = self.attn_drop(attn) \n\n x = (attn @ v).transpose(1, 2).reshape(B_, N, C)\n x = self.proj(x)\n x = self.proj_drop(x)\n gt = x[:,:-N_,:]\n x = x[:,-N_:,:] # x of size (B_, N_, C)\n\n return x, gt\n\n def extra_repr(self) -> str:\n return f'dim={self.dim}, window_size={self.window_size}, num_heads={self.num_heads}'\n\n def flops(self, N):\n # calculate flops for 1 window with token length of N\n flops = 0\n # qkv = self.qkv(x)\n flops += N * self.dim * 3 * self.dim\n # attn = (q @ k.transpose(-2, -1))\n flops += self.num_heads * N * (self.dim // self.num_heads) * N\n # x = (attn @ v)\n flops += self.num_heads * N * N * (self.dim // self.num_heads)\n # x = self.proj(x)\n flops += N * self.dim * self.dim\n return flops\n\n\nclass SwinTransformerBlock(nn.Module):\n r\"\"\" Swin Transformer Block.\n Args:\n dim (int): Number of input channels.\n input_resolution (tuple[int]): Input resulotion.\n num_heads (int): Number of attention heads.\n window_size (int): Window size.\n shift_size (int): Shift size for SW-MSA.\n mlp_ratio (float): Ratio of mlp hidden dim to embedding dim.\n qkv_bias (bool, optional): If True, add a learnable bias to query, key, value. Default: True\n qk_scale (float | None, optional): Override default qk scale of head_dim ** -0.5 if set.\n drop (float, optional): Dropout rate. Default: 0.0\n attn_drop (float, optional): Attention dropout rate. Default: 0.0\n drop_path (float, optional): Stochastic depth rate. Default: 0.0\n act_layer (nn.Module, optional): Activation layer. Default: nn.GELU\n norm_layer (nn.Module, optional): Normalization layer. Default: nn.LayerNorm\n \"\"\"\n\n def __init__(self, dim, input_resolution, num_heads, window_size=7, shift_size=0,\n mlp_ratio=4., qkv_bias=True, qk_scale=None, drop=0., attn_drop=0., drop_path=0.,\n act_layer=nn.GELU, norm_layer=nn.LayerNorm, gt_num=1, first=True):\n super().__init__()\n self.dim = dim\n self.input_resolution = input_resolution\n self.num_heads = num_heads\n self.window_size = window_size\n self.shift_size = shift_size\n self.mlp_ratio = mlp_ratio\n if min(self.input_resolution) <= self.window_size:\n # if window size is larger than input resolution, we don't partition windows\n self.shift_size = 0\n self.window_size = min(self.input_resolution)\n assert 0 <= self.shift_size < self.window_size, \"shift_size must in 0-window_size\"\n\n self.norm1 = norm_layer(dim)\n self.attn = WindowAttention(\n dim, window_size=to_2tuple(self.window_size), num_heads=num_heads,\n qkv_bias=qkv_bias, qk_scale=qk_scale, attn_drop=attn_drop, proj_drop=drop, gt_num=gt_num)\n\n self.drop_path = DropPath(drop_path) if drop_path > 0. else nn.Identity()\n self.norm2 = norm_layer(dim)\n mlp_hidden_dim = int(dim * mlp_ratio)\n self.mlp = Mlp(in_features=dim, hidden_features=mlp_hidden_dim, act_layer=act_layer, drop=drop)\n\n \n\n def forward(self, x, mask_matrix, gt):\n H, W = self.input_resolution\n B, L, C = x.shape\n # L = 128 * 256\n # print(L, H, W)\n assert L == H * W, \"input feature has wrong size\"\n \n shortcut = x\n x = self.norm1(x)\n x = x.view(B, H, W, C)\n\n\n pad_l = pad_t = 0\n pad_r = (self.window_size - W % self.window_size) % self.window_size\n pad_b = (self.window_size - H % self.window_size) % self.window_size\n x = F.pad(x, (0, 0, pad_l, pad_r, pad_t, pad_b))\n\n _, Hp, Wp, _ = x.shape\n\n\n\n # cyclic shift\n if self.shift_size > 0:\n shifted_x = torch.roll(x, shifts=(-self.shift_size, -self.shift_size), dims=(1, 2))\n attn_mask = mask_matrix\n else:\n shifted_x = x\n attn_mask = None\n\n # partition windows\n x_windows = window_partition(shifted_x, self.window_size) # nW*B, window_size, window_size, C\n x_windows = x_windows.view(-1, self.window_size * self.window_size, C) # nW*B, window_size*window_size, C\n\n # W-MSA/SW-MSA\n attn_windows, gt = self.attn(x_windows, mask=attn_mask, gt=gt) # nW*B, window_size*window_size, C | nW*B, nGt, C\n tmp, ngt, c = gt.shape\n nw = tmp//B\n gt = gt.view(B, nw, ngt, C)\n # gt = gt.mean(dim=1)\n gt = gt[:, torch.randperm(nw), :, :]\n gt = rearrange(gt, \"b n g c -> (b n) g c\")\n # gt = repeat(gt, \"b g c -> (b n) g c\",n=nw)\n\n # merge windows\n attn_windows = attn_windows.view(-1, self.window_size, self.window_size, C)\n shifted_x = window_reverse(attn_windows, self.window_size, Hp, Wp) # B H' W' C\n\n # reverse cyclic shift\n if self.shift_size > 0:\n x = torch.roll(shifted_x, shifts=(self.shift_size, self.shift_size), dims=(1, 2))\n else:\n x = shifted_x\n\n if pad_r > 0 or pad_b > 0:\n x = x[:, :H, :W, :].contiguous()\n x = x.view(B, H * W, C)\n\n\n # FFN\n x = shortcut + self.drop_path(x)\n x = x + self.drop_path(self.mlp(self.norm2(x)))\n\n return x, gt\n\n def extra_repr(self) -> str:\n return f\"dim={self.dim}, input_resolution={self.input_resolution}, num_heads={self.num_heads}, \" \\\n f\"window_size={self.window_size}, shift_size={self.shift_size}, mlp_ratio={self.mlp_ratio}\"\n\n def flops(self):\n flops = 0\n H, W = self.input_resolution\n # norm1\n flops += self.dim * H * W\n # W-MSA/SW-MSA\n nW = H * W / self.window_size / self.window_size\n flops += nW * self.attn.flops(self.window_size * self.window_size)\n # mlp\n flops += 2 * H * W * self.dim * self.dim * self.mlp_ratio\n # norm2\n flops += self.dim * H * W\n return flops\n\n\nclass PatchMerging(nn.Module):\n r\"\"\" Patch Merging Layer.\n Args:\n input_resolution (tuple[int]): Resolution of input feature.\n dim (int): Number of input channels.\n norm_layer (nn.Module, optional): Normalization layer. Default: nn.LayerNorm\n \"\"\"\n\n def __init__(self, input_resolution, dim, norm_layer=nn.LayerNorm):\n super().__init__()\n self.input_resolution = input_resolution\n self.dim = dim\n self.reduction = nn.Linear(4 * dim, 2 * dim, bias=False)\n self.norm = norm_layer(4 * dim)\n\n def forward(self, x, H, W):\n \"\"\"\n x: B, H*W, C\n \"\"\"\n # H, W = self.input_resolution\n B, L, C = x.shape\n assert L == H * W, \"input feature has wrong size\"\n # assert H % 2 == 0 and W % 2 == 0, f\"x size ({H}*{W}) are not even.\"\n\n x = x.view(B, H, W, C)\n\n pad_input = (H % 2 == 1) or (W % 2 == 1)\n if pad_input:\n x = F.pad(x, (0, 0, 0, W % 2, 0, H % 2))\n\n x0 = x[:, 0::2, 0::2, :] # B H/2 W/2 C\n x1 = x[:, 1::2, 0::2, :] # B H/2 W/2 C\n x2 = x[:, 0::2, 1::2, :] # B H/2 W/2 C\n x3 = x[:, 1::2, 1::2, :] # B H/2 W/2 C\n x = torch.cat([x0, x1, x2, x3], -1) # B H/2 W/2 4*C\n x = x.view(B, -1, 4 * C) # B H/2*W/2 4*C\n\n x = self.norm(x)\n x = self.reduction(x)\n\n return x, [W % 2, H % 2]\n\n def extra_repr(self) -> str:\n return f\"input_resolution={self.input_resolution}, dim={self.dim}\"\n\n def flops(self):\n H, W = self.input_resolution\n flops = H * W * self.dim\n flops += (H // 2) * (W // 2) * 4 * self.dim * 2 * self.dim\n return flops\n\nclass PatchExpand(nn.Module):\n def __init__(self, input_resolution, dim, dim_scale=2, norm_layer=nn.LayerNorm):\n super().__init__()\n self.input_resolution = input_resolution\n self.dim = dim\n self.expand = nn.Linear(dim, 2*dim, bias=False) if dim_scale==2 else nn.Identity()\n self.norm = norm_layer(dim // dim_scale)\n\n def forward(self, x, H, W, padwh):\n \"\"\"\n x: B, H*W, C\n \"\"\"\n # H, W = self.input_resolution\n\n x = self.expand(x)\n B, L, C = x.shape\n assert L == H * W, \"input feature has wrong size\"\n x = x.view(B, H, W, C)\n \n\n x = rearrange(x, 'b h w (p1 p2 c)-> b (h p1) (w p2) c', p1=2, p2=2, c=C//4)\n x = x[:,:(x.shape[1]-padwh[1]),:(x.shape[2]-padwh[0]),:]\n Wh, Ww = x.size(1), x.size(2)\n x = x.contiguous().view(B,-1,C//4)\n x= self.norm(x)\n\n return x, Wh, Ww\n\nclass FinalPatchExpand_X4(nn.Module):\n def __init__(self, input_resolution, dim, dim_scale=4, norm_layer=nn.LayerNorm):\n super().__init__()\n self.input_resolution = input_resolution\n self.dim = dim\n self.dim_scale = dim_scale\n self.expand = nn.Linear(dim, 16*dim, bias=False)\n self.output_dim = dim \n self.norm = norm_layer(self.output_dim)\n\n def forward(self, x, H, W):\n \"\"\"\n x: B, H*W, C\n \"\"\"\n # H, W = self.input_resolution\n x = self.expand(x)\n B, L, C = x.shape\n assert L == H * W, \"input feature has wrong size\"\n\n x = x.view(B, H, W, C)\n x = rearrange(x, 'b h w (p1 p2 c)-> b (h p1) (w p2) c', p1=self.dim_scale, p2=self.dim_scale, c=C//(self.dim_scale**2))\n x = x.view(B,-1,self.output_dim)\n x= self.norm(x)\n\n return x\n\nclass BasicLayer(nn.Module):\n \"\"\" A basic Swin Transformer layer for one stage.\n Args:\n dim (int): Number of input channels.\n input_resolution (tuple[int]): Input resolution.\n depth (int): Number of blocks.\n num_heads (int): Number of attention heads.\n window_size (int): Local window size.\n mlp_ratio (float): Ratio of mlp hidden dim to embedding dim.\n qkv_bias (bool, optional): If True, add a learnable bias to query, key, value. Default: True\n qk_scale (float | None, optional): Override default qk scale of head_dim ** -0.5 if set.\n drop (float, optional): Dropout rate. Default: 0.0\n attn_drop (float, optional): Attention dropout rate. Default: 0.0\n drop_path (float | tuple[float], optional): Stochastic depth rate. Default: 0.0\n norm_layer (nn.Module, optional): Normalization layer. Default: nn.LayerNorm\n downsample (nn.Module | None, optional): Downsample layer at the end of the layer. Default: None\n use_checkpoint (bool): Whether to use checkpointing to save memory. Default: False.\n \"\"\"\n\n def __init__(self, dim, input_resolution, depth, num_heads, window_size,\n mlp_ratio=4., qkv_bias=True, qk_scale=None, drop=0., attn_drop=0.,\n drop_path=0., norm_layer=nn.LayerNorm, downsample=None, use_checkpoint=False, gt_num=1):\n\n super().__init__()\n self.dim = dim\n self.input_resolution = input_resolution\n self.depth = depth\n self.use_checkpoint = use_checkpoint\n self.window_size = window_size\n self.shift_size = window_size // 2\n\n self.global_token = torch.nn.Parameter(torch.randn(gt_num,self.dim))\n self.global_token.requires_grad = True\n\n # build blocks\n self.blocks = nn.ModuleList([\n SwinTransformerBlock(dim=dim, input_resolution=input_resolution,\n num_heads=num_heads, window_size=window_size,\n shift_size=0 if (i % 2 == 0) else window_size // 2,\n mlp_ratio=mlp_ratio,\n qkv_bias=qkv_bias, qk_scale=qk_scale,\n drop=drop, attn_drop=attn_drop,\n drop_path=drop_path[i] if isinstance(drop_path, list) else drop_path,\n norm_layer=norm_layer, gt_num=gt_num)\n for i in range(depth)])\n\n # patch merging layer\n if downsample is not None:\n self.downsample = downsample(input_resolution, dim=dim, norm_layer=norm_layer)\n else:\n self.downsample = None\n\n def forward(self, x, H, W):\n Hp = int(np.ceil(H / self.window_size)) * self.window_size\n Wp = int(np.ceil(W / self.window_size)) * self.window_size\n img_mask = torch.zeros((1, Hp, Wp, 1), device=x.device) # 1 Hp Wp 1\n h_slices = (slice(0, -self.window_size),\n slice(-self.window_size, -self.shift_size),\n slice(-self.shift_size, None))\n w_slices = (slice(0, -self.window_size),\n slice(-self.window_size, -self.shift_size),\n slice(-self.shift_size, None))\n cnt = 0\n for h in h_slices:\n for w in w_slices:\n img_mask[:, h, w, :] = cnt\n cnt += 1\n\n mask_windows = window_partition(img_mask, self.window_size) # nW, window_size, window_size, 1\n mask_windows = mask_windows.view(-1, self.window_size * self.window_size)\n attn_mask = mask_windows.unsqueeze(1) - mask_windows.unsqueeze(2)\n attn_mask = attn_mask.masked_fill(attn_mask != 0, float(-100.0)).masked_fill(attn_mask == 0, float(0.0))\n\n gt = self.global_token\n for blk in self.blocks:\n blk.input_resolution = (H, W)\n if self.use_checkpoint:\n x = checkpoint.checkpoint(blk, x)\n else:\n x, gt= blk(x, attn_mask, gt)\n # if self.downsample is not None:\n # x = self.downsample(x)\n # return x\n if self.downsample is not None:\n x_down, padwh = self.downsample(x, H, W)\n Wh, Ww = (H + 1) // 2, (W + 1) // 2\n return x_down, Wh, Ww, padwh\n else:\n return x, H, W, [0,0]\n\n def extra_repr(self) -> str:\n return f\"dim={self.dim}, input_resolution={self.input_resolution}, depth={self.depth}\"\n\n def flops(self):\n flops = 0\n for blk in self.blocks:\n flops += blk.flops()\n if self.downsample is not None:\n flops += self.downsample.flops()\n return flops\n\nclass BasicLayer_up(nn.Module):\n \"\"\" A basic Swin Transformer layer for one stage.\n Args:\n dim (int): Number of input channels.\n input_resolution (tuple[int]): Input resolution.\n depth (int): Number of blocks.\n num_heads (int): Number of attention heads.\n window_size (int): Local window size.\n mlp_ratio (float): Ratio of mlp hidden dim to embedding dim.\n qkv_bias (bool, optional): If True, add a learnable bias to query, key, value. Default: True\n qk_scale (float | None, optional): Override default qk scale of head_dim ** -0.5 if set.\n drop (float, optional): Dropout rate. Default: 0.0\n attn_drop (float, optional): Attention dropout rate. Default: 0.0\n drop_path (float | tuple[float], optional): Stochastic depth rate. Default: 0.0\n norm_layer (nn.Module, optional): Normalization layer. Default: nn.LayerNorm\n downsample (nn.Module | None, optional): Downsample layer at the end of the layer. Default: None\n use_checkpoint (bool): Whether to use checkpointing to save memory. Default: False.\n \"\"\"\n\n def __init__(self, dim, input_resolution, depth, num_heads, window_size,\n mlp_ratio=4., qkv_bias=True, qk_scale=None, drop=0., attn_drop=0.,\n drop_path=0., norm_layer=nn.LayerNorm, upsample=None, use_checkpoint=False, gt_num=1):\n\n super().__init__()\n self.dim = dim\n self.input_resolution = input_resolution\n self.depth = depth\n self.use_checkpoint = use_checkpoint\n self.window_size = window_size\n self.shift_size = window_size // 2\n\n self.global_token = torch.nn.Parameter(torch.randn(gt_num,self.dim))\n self.global_token.requires_grad = True\n\n # build blocks\n self.blocks = nn.ModuleList([\n SwinTransformerBlock(dim=dim, input_resolution=input_resolution,\n num_heads=num_heads, window_size=window_size,\n shift_size=0 if (i % 2 == 0) else window_size // 2,\n mlp_ratio=mlp_ratio,\n qkv_bias=qkv_bias, qk_scale=qk_scale,\n drop=drop, attn_drop=attn_drop,\n drop_path=drop_path[i] if isinstance(drop_path, list) else drop_path,\n norm_layer=norm_layer, gt_num=gt_num)\n for i in range(depth)])\n\n # patch merging layer\n if upsample is not None:\n self.upsample = PatchExpand(input_resolution, dim=dim, dim_scale=2, norm_layer=norm_layer)\n else:\n self.upsample = None\n\n def forward(self, x, H, W, padwh):\n Hp = int(np.ceil(H / self.window_size)) * self.window_size\n Wp = int(np.ceil(W / self.window_size)) * self.window_size\n img_mask = torch.zeros((1, Hp, Wp, 1), device=x.device) # 1 Hp Wp 1\n h_slices = (slice(0, -self.window_size),\n slice(-self.window_size, -self.shift_size),\n slice(-self.shift_size, None))\n w_slices = (slice(0, -self.window_size),\n slice(-self.window_size, -self.shift_size),\n slice(-self.shift_size, None))\n cnt = 0\n for h in h_slices:\n for w in w_slices:\n img_mask[:, h, w, :] = cnt\n cnt += 1\n\n mask_windows = window_partition(img_mask, self.window_size) # nW, window_size, window_size, 1\n mask_windows = mask_windows.view(-1, self.window_size * self.window_size)\n attn_mask = mask_windows.unsqueeze(1) - mask_windows.unsqueeze(2)\n attn_mask = attn_mask.masked_fill(attn_mask != 0, float(-100.0)).masked_fill(attn_mask == 0, float(0.0))\n\n gt = self.global_token \n for blk in self.blocks:\n blk.input_resolution = (H, W)\n if self.use_checkpoint:\n x = checkpoint.checkpoint(blk, x)\n else:\n x, gt = blk(x, attn_mask, gt)\n # if self.upsample is not None:\n # x = self.upsample(x)\n if self.upsample is not None:\n x_down, Wh, Ww = self.upsample(x, H, W, padwh)\n # Wh, Ww = (H) * 2, (W) * 2\n return x_down, Wh, Ww\n else:\n return x, H, W\n # return x\n\nclass PatchEmbed(nn.Module):\n r\"\"\" Image to Patch Embedding\n Args:\n img_size (int): Image size. Default: 224.\n patch_size (int): Patch token size. Default: 4.\n in_chans (int): Number of input image channels. Default: 3.\n embed_dim (int): Number of linear projection output channels. Default: 96.\n norm_layer (nn.Module, optional): Normalization layer. Default: None\n \"\"\"\n\n def __init__(self, img_size=224, patch_size=4, in_chans=3, embed_dim=96, norm_layer=None):\n super().__init__()\n img_size = to_2tuple(img_size)\n patch_size = to_2tuple(patch_size)\n patches_resolution = [img_size[0] // patch_size[0], img_size[1] // patch_size[1]]\n self.img_size = img_size\n self.patch_size = patch_size\n self.patches_resolution = patches_resolution\n self.num_patches = patches_resolution[0] * patches_resolution[1]\n\n self.in_chans = in_chans\n self.embed_dim = embed_dim\n\n self.proj = nn.Conv2d(in_chans, embed_dim, kernel_size=patch_size, stride=patch_size)\n if norm_layer is not None:\n self.norm = norm_layer(embed_dim)\n else:\n self.norm = None\n\n def forward(self, x):\n # B, C, H, W = x.shape\n # FIXME look at relaxing size constraints\n # assert H == self.img_size[0] and W == self.img_size[1], \\\n # f\"Input image size ({H}*{W}) doesn't match model ({self.img_size[0]}*{self.img_size[1]}).\"\n # padding\n _, _, H, W = x.size()\n if W % self.patch_size[1] != 0:\n x = F.pad(x, (0, self.patch_size[1] - W % self.patch_size[1]))\n if H % self.patch_size[0] != 0:\n x = F.pad(x, (0, 0, 0, self.patch_size[0] - H % self.patch_size[0]))\n\n # x = self.proj(x).flatten(2).transpose(1, 2) # B Ph*Pw C\n # if self.norm is not None:\n # x = self.norm(x)\n\n x = self.proj(x) # B C Wh Ww\n if self.norm is not None:\n Wh, Ww = x.size(2), x.size(3)\n x = x.flatten(2).transpose(1, 2)\n x = self.norm(x)\n x = x.transpose(1, 2).view(-1, self.embed_dim, Wh, Ww)\n\n return x\n\n def flops(self):\n Ho, Wo = self.patches_resolution\n flops = Ho * Wo * self.embed_dim * self.in_chans * (self.patch_size[0] * self.patch_size[1])\n if self.norm is not None:\n flops += Ho * Wo * self.embed_dim\n return flops\n\nclass SwinTransformerSys(nn.Module):\n r\"\"\" Swin Transformer\n A PyTorch impl of : `Swin Transformer: Hierarchical Vision Transformer using Shifted Windows` -\n https://arxiv.org/pdf/2103.14030\n Args:\n img_size (int | tuple(int)): Input image size. Default 224\n patch_size (int | tuple(int)): Patch size. Default: 4\n in_chans (int): Number of input image channels. Default: 3\n num_classes (int): Number of classes for classification head. Default: 1000\n embed_dim (int): Patch embedding dimension. Default: 96\n depths (tuple(int)): Depth of each Swin Transformer layer.\n num_heads (tuple(int)): Number of attention heads in different layers.\n window_size (int): Window size. Default: 7\n mlp_ratio (float): Ratio of mlp hidden dim to embedding dim. Default: 4\n qkv_bias (bool): If True, add a learnable bias to query, key, value. Default: True\n qk_scale (float): Override default qk scale of head_dim ** -0.5 if set. Default: None\n drop_rate (float): Dropout rate. Default: 0\n attn_drop_rate (float): Attention dropout rate. Default: 0\n drop_path_rate (float): Stochastic depth rate. Default: 0.1\n norm_layer (nn.Module): Normalization layer. Default: nn.LayerNorm.\n ape (bool): If True, add absolute position embedding to the patch embedding. Default: False\n patch_norm (bool): If True, add normalization after patch embedding. Default: True\n use_checkpoint (bool): Whether to use checkpointing to save memory. Default: False\n \"\"\"\n\n def __init__(self, pretrain_img_size=224, patch_size=4, in_chans=3, num_classes=1000,\n embed_dim=96, depths=[2, 2, 2, 2], depths_decoder=[1, 2, 2, 2], num_heads=[3, 6, 12, 24],\n window_size=7, mlp_ratio=4., qkv_bias=True, qk_scale=None,\n drop_rate=0., attn_drop_rate=0., drop_path_rate=0.1,\n norm_layer=nn.LayerNorm, ape=False, patch_norm=True,\n use_checkpoint=False, final_upsample=\"expand_first\", gt_num=1, **kwargs):\n super().__init__()\n\n print(\"SwinTransformerSys expand initial----depths:{};depths_decoder:{};drop_path_rate:{};num_classes:{}\".format(depths,\n depths_decoder,drop_path_rate,num_classes))\n\n self.num_classes = num_classes\n self.num_layers = len(depths)\n self.embed_dim = embed_dim\n self.ape = ape\n self.patch_norm = patch_norm\n self.num_features = int(embed_dim * 2 ** (self.num_layers - 1))\n self.num_features_up = int(embed_dim * 2)\n self.mlp_ratio = mlp_ratio\n self.final_upsample = final_upsample\n\n # split image into non-overlapping patches\n self.patch_embed = PatchEmbed(\n img_size=pretrain_img_size, patch_size=patch_size, in_chans=in_chans, embed_dim=embed_dim,\n norm_layer=norm_layer if self.patch_norm else None)\n num_patches = self.patch_embed.num_patches\n patches_resolution = self.patch_embed.patches_resolution\n self.patches_resolution = patches_resolution\n\n # absolute position embedding\n if self.ape:\n self.absolute_pos_embed = nn.Parameter(torch.zeros(1, num_patches, embed_dim))\n trunc_normal_(self.absolute_pos_embed, std=.02)\n\n self.pos_drop = nn.Dropout(p=drop_rate)\n\n # stochastic depth\n dpr = [x.item() for x in torch.linspace(0, drop_path_rate, sum(depths))] # stochastic depth decay rule\n\n # build encoder and bottleneck layers\n self.layers = nn.ModuleList()\n for i_layer in range(self.num_layers):\n layer = BasicLayer(dim=int(embed_dim * 2 ** i_layer),\n input_resolution=(patches_resolution[0] // (2 ** i_layer),\n patches_resolution[1] // (2 ** i_layer)),\n depth=depths[i_layer],\n num_heads=num_heads[i_layer],\n window_size=window_size,\n mlp_ratio=self.mlp_ratio,\n qkv_bias=qkv_bias, qk_scale=qk_scale,\n drop=drop_rate, attn_drop=attn_drop_rate,\n drop_path=dpr[sum(depths[:i_layer]):sum(depths[:i_layer + 1])],\n norm_layer=norm_layer,\n downsample=PatchMerging if (i_layer < self.num_layers - 1) else None,\n use_checkpoint=use_checkpoint, gt_num=gt_num)\n self.layers.append(layer)\n \n # build decoder layers\n self.layers_up = nn.ModuleList()\n self.concat_back_dim = nn.ModuleList()\n for i_layer in range(self.num_layers):\n concat_linear = nn.Linear(2*int(embed_dim*2**(self.num_layers-1-i_layer)),\n int(embed_dim*2**(self.num_layers-1-i_layer))) if i_layer > 0 else nn.Identity()\n if i_layer ==0 :\n layer_up = PatchExpand(input_resolution=(patches_resolution[0] // (2 ** (self.num_layers-1-i_layer)),\n patches_resolution[1] // (2 ** (self.num_layers-1-i_layer))), dim=int(embed_dim * 2 ** (self.num_layers-1-i_layer)), dim_scale=2, norm_layer=norm_layer)\n else:\n layer_up = BasicLayer_up(dim=int(embed_dim * 2 ** (self.num_layers-1-i_layer)),\n input_resolution=(patches_resolution[0] // (2 ** (self.num_layers-1-i_layer)),\n patches_resolution[1] // (2 ** (self.num_layers-1-i_layer))),\n depth=depths[(self.num_layers-1-i_layer)],\n num_heads=num_heads[(self.num_layers-1-i_layer)],\n window_size=window_size,\n mlp_ratio=self.mlp_ratio,\n qkv_bias=qkv_bias, qk_scale=qk_scale,\n drop=drop_rate, attn_drop=attn_drop_rate,\n drop_path=dpr[sum(depths[:(self.num_layers-1-i_layer)]):sum(depths[:(self.num_layers-1-i_layer) + 1])],\n norm_layer=norm_layer,\n upsample=PatchExpand if (i_layer < self.num_layers - 1) else None,\n use_checkpoint=use_checkpoint, gt_num=gt_num)\n self.layers_up.append(layer_up)\n self.concat_back_dim.append(concat_linear)\n\n self.norm = norm_layer(self.num_features)\n self.norm_up= norm_layer(self.embed_dim)\n\n if self.final_upsample == \"expand_first\":\n print(\"---final upsample expand_first---\")\n self.up = FinalPatchExpand_X4(input_resolution=(pretrain_img_size//patch_size,pretrain_img_size//patch_size),dim_scale=4,dim=embed_dim)\n self.output = nn.Conv2d(in_channels=embed_dim,out_channels=self.num_classes,kernel_size=1,bias=False)\n\n self.apply(self._init_weights)\n\n def _init_weights(self, m):\n if isinstance(m, nn.Linear):\n trunc_normal_(m.weight, std=.02)\n if isinstance(m, nn.Linear) and m.bias is not None:\n nn.init.constant_(m.bias, 0)\n elif isinstance(m, nn.LayerNorm):\n nn.init.constant_(m.bias, 0)\n nn.init.constant_(m.weight, 1.0)\n\n @torch.jit.ignore\n def no_weight_decay(self):\n return {'absolute_pos_embed'}\n\n @torch.jit.ignore\n def no_weight_decay_keywords(self):\n return {'relative_position_bias_table'}\n\n #Encoder and Bottleneck\n def forward_features(self, x):\n x = self.patch_embed(x)\n Wh, Ww = x.size(2), x.size(3)\n if self.ape:\n x = x + self.absolute_pos_embed\n x = self.pos_drop(x.flatten(2).transpose(1, 2))\n x_downsample = []\n padswh = []\n\n for layer in self.layers:\n # x_downsample.append(x)\n x_downsample.append(x)\n # x = layer(x)\n x, Wh, Ww, padwh = layer(x, Wh, Ww)\n padswh.append(padwh)\n\n x = self.norm(x) # B L C\n \n return x, x_downsample, Wh, Ww, padswh\n\n #Dencoder and Skip connection\n def forward_up_features(self, x, x_downsample, Wh, Ww, padswh):\n # exit(0)\n # Wh, Ww = x.size(2), x.size(3)\n for inx, layer_up in enumerate(self.layers_up):\n if len(self.layers_up)-(inx+2) >= 0:\n padwh = padswh[-(inx+2)]\n else: padwh = [0,0]\n if inx == 0:\n x, Wh, Ww = layer_up(x, Wh, Ww, padwh)\n else:\n x = torch.cat([x,x_downsample[3-inx]],-1)\n x = self.concat_back_dim[inx](x)\n # x = layer_up(x)\n x, Wh, Ww = layer_up(x, Wh, Ww, padwh)\n\n x = self.norm_up(x) # B L C\n \n return x, Wh, Ww\n\n def up_x4(self, x, H, W):\n # H, W = self.patches_resolution\n B, L, C = x.shape\n assert L == H*W, \"input features has wrong size\"\n\n if self.final_upsample==\"expand_first\":\n x = self.up(x, H, W)\n x = x.view(B,4*H,4*W,-1)\n x = x.permute(0,3,1,2) #B,C,H,W\n x = self.output(x)\n \n return x\n\n def forward(self, x):\n # x, x_downsample = self.forward_features(x)\n x, x_downsample, Wh, Ww, padswh = self.forward_features(x)\n x, Wh, Ww = self.forward_up_features(x,x_downsample, Wh, Ww, padswh)\n x = self.up_x4(x, Wh, Ww)\n\n return x\n\n def flops(self):\n flops = 0\n flops += self.patch_embed.flops()\n for i, layer in enumerate(self.layers):\n flops += layer.flops()\n flops += self.num_features * self.patches_resolution[0] * self.patches_resolution[1] // (2 ** self.num_layers)\n flops += self.num_features * self.num_classes\n return flops\n\n\n\n\n"
] |
[
[
"torch.nn.Linear",
"torch.zeros",
"torch.nn.Dropout",
"torch.cat",
"torch.nn.Identity",
"torch.roll",
"torch.nn.Softmax",
"torch.arange",
"torch.nn.ModuleList",
"numpy.ceil",
"torch.nn.init.constant_",
"torch.randperm",
"torch.nn.Conv2d",
"torch.meshgrid",
"torch.utils.checkpoint.checkpoint",
"torch.flatten",
"torch.nn.functional.pad",
"torch.randn"
]
] |
tw991/pytorch-lightning
|
[
"a6de1b8d75c67cdf18e3cc0a24a1f471d4069613"
] |
[
"pytorch_lightning/trainer/trainer.py"
] |
[
"import inspect\nimport os\nfrom argparse import ArgumentParser\nfrom typing import Union, Optional, List, Dict, Tuple, Iterable, Any\n\nimport torch\nimport torch.distributed as torch_distrib\nimport torch.multiprocessing as mp\nfrom torch.utils.data import DataLoader\n\nfrom pytorch_lightning import _logger as log\nfrom pytorch_lightning.callbacks import ModelCheckpoint, EarlyStopping, Callback, ProgressBarBase\nfrom pytorch_lightning.core.lightning import LightningModule\nfrom pytorch_lightning.loggers import LightningLoggerBase\nfrom pytorch_lightning.profiler import SimpleProfiler, PassThroughProfiler, BaseProfiler\nfrom pytorch_lightning.trainer.auto_mix_precision import TrainerAMPMixin\nfrom pytorch_lightning.trainer.callback_config import TrainerCallbackConfigMixin\nfrom pytorch_lightning.trainer.callback_hook import TrainerCallbackHookMixin\nfrom pytorch_lightning.trainer.data_loading import TrainerDataLoadingMixin\nfrom pytorch_lightning.trainer.deprecated_api import TrainerDeprecatedAPITillVer0_8, TrainerDeprecatedAPITillVer0_9\nfrom pytorch_lightning.trainer.distrib_data_parallel import TrainerDDPMixin\nfrom pytorch_lightning.trainer.distrib_parts import (\n TrainerDPMixin, parse_gpu_ids, determine_root_gpu_device, pick_multiple_gpus)\nfrom pytorch_lightning.trainer.evaluation_loop import TrainerEvaluationLoopMixin\nfrom pytorch_lightning.trainer.logging import TrainerLoggingMixin\nfrom pytorch_lightning.trainer.model_hooks import TrainerModelHooksMixin\nfrom pytorch_lightning.trainer.optimizers import TrainerOptimizersMixin\nfrom pytorch_lightning.trainer.supporters import TensorRunningAccum\nfrom pytorch_lightning.trainer.training_io import TrainerIOMixin\nfrom pytorch_lightning.trainer.training_loop import TrainerTrainLoopMixin\nfrom pytorch_lightning.trainer.training_tricks import TrainerTrainingTricksMixin\nfrom pytorch_lightning.trainer.lr_finder import TrainerLRFinderMixin\nfrom pytorch_lightning.utilities.exceptions import MisconfigurationException\nfrom pytorch_lightning.utilities import rank_zero_warn\nfrom pytorch_lightning.utilities import parsing\n\n\ntry:\n from apex import amp\nexcept ImportError:\n APEX_AVAILABLE = False\nelse:\n APEX_AVAILABLE = True\n\ntry:\n import torch_xla\n import torch_xla.core.xla_model as xm\n import torch_xla.distributed.xla_multiprocessing as xmp\nexcept ImportError:\n XLA_AVAILABLE = False\nelse:\n XLA_AVAILABLE = True\n\ntry:\n import horovod.torch as hvd\nexcept ImportError:\n HOROVOD_AVAILABLE = False\nelse:\n HOROVOD_AVAILABLE = True\n\n\nclass Trainer(\n TrainerIOMixin,\n TrainerOptimizersMixin,\n TrainerAMPMixin,\n TrainerDPMixin,\n TrainerDDPMixin,\n TrainerLoggingMixin,\n TrainerModelHooksMixin,\n TrainerTrainingTricksMixin,\n TrainerDataLoadingMixin,\n TrainerEvaluationLoopMixin,\n TrainerTrainLoopMixin,\n TrainerCallbackConfigMixin,\n TrainerCallbackHookMixin,\n TrainerLRFinderMixin,\n TrainerDeprecatedAPITillVer0_8,\n TrainerDeprecatedAPITillVer0_9,\n):\n DEPRECATED_IN_0_8 = (\n 'gradient_clip', 'nb_gpu_nodes', 'max_nb_epochs', 'min_nb_epochs',\n 'add_row_log_interval', 'nb_sanity_val_steps', 'tng_tqdm_dic',\n )\n DEPRECATED_IN_0_9 = ('use_amp', 'show_progress_bar', 'training_tqdm_dict')\n\n def __init__(\n self,\n logger: Union[LightningLoggerBase, Iterable[LightningLoggerBase], bool] = True,\n checkpoint_callback: Union[ModelCheckpoint, bool] = True,\n early_stop_callback: Optional[Union[EarlyStopping, bool]] = False,\n callbacks: Optional[List[Callback]] = None,\n default_root_dir: Optional[str] = None,\n gradient_clip_val: float = 0,\n process_position: int = 0,\n num_nodes: int = 1,\n num_processes: int = 1,\n gpus: Optional[Union[List[int], str, int]] = None,\n auto_select_gpus: bool = False,\n num_tpu_cores: Optional[int] = None,\n log_gpu_memory: Optional[str] = None,\n progress_bar_refresh_rate: int = 1,\n overfit_pct: float = 0.0,\n track_grad_norm: int = -1,\n check_val_every_n_epoch: int = 1,\n fast_dev_run: bool = False,\n accumulate_grad_batches: Union[int, Dict[int, int], List[list]] = 1,\n max_epochs: int = 1000,\n min_epochs: int = 1,\n max_steps: Optional[int] = None,\n min_steps: Optional[int] = None,\n train_percent_check: float = 1.0,\n val_percent_check: float = 1.0,\n test_percent_check: float = 1.0,\n val_check_interval: float = 1.0,\n log_save_interval: int = 100,\n row_log_interval: int = 10,\n add_row_log_interval=None, # backward compatible, todo: remove in v0.8.0\n distributed_backend: Optional[str] = None,\n precision: int = 32,\n print_nan_grads: bool = False, # backward compatible, todo: remove in v0.9.0\n weights_summary: Optional[str] = 'full',\n weights_save_path: Optional[str] = None,\n num_sanity_val_steps: int = 5,\n truncated_bptt_steps: Optional[int] = None,\n resume_from_checkpoint: Optional[str] = None,\n profiler: Optional[BaseProfiler] = None,\n benchmark: bool = False,\n reload_dataloaders_every_epoch: bool = False,\n auto_lr_find: Union[bool, str] = False,\n replace_sampler_ddp: bool = True,\n progress_bar_callback: Optional[Union[ProgressBarBase, bool]] = True,\n amp_level: str = 'O1', # backward compatible, todo: remove in v0.8.0\n default_save_path=None, # backward compatible, todo: remove in v0.8.0\n gradient_clip=None, # backward compatible, todo: remove in v0.8.0\n nb_gpu_nodes=None, # backward compatible, todo: remove in v0.8.0\n max_nb_epochs=None, # backward compatible, todo: remove in v0.8.0\n min_nb_epochs=None, # backward compatible, todo: remove in v0.8.0\n use_amp=None, # backward compatible, todo: remove in v0.9.0\n show_progress_bar=None, # backward compatible, todo: remove in v0.9.0\n nb_sanity_val_steps=None, # backward compatible, todo: remove in v0.8.0\n terminate_on_nan: bool = False,\n **kwargs\n ):\n r\"\"\"\n\n Customize every aspect of training via flags\n\n Args:\n logger: Logger (or iterable collection of loggers) for experiment tracking.\n\n checkpoint_callback: Callback for checkpointing.\n\n early_stop_callback (:class:`pytorch_lightning.callbacks.EarlyStopping`):\n\n callbacks: Add a list of callbacks.\n\n default_root_dir: Default path for logs and weights when no logger/ckpt_callback passed\n\n default_save_path:\n .. warning:: .. deprecated:: 0.7.3\n\n Use `default_root_dir` instead. Will remove 0.9.0.\n\n gradient_clip_val: 0 means don't clip.\n\n gradient_clip:\n .. warning:: .. deprecated:: 0.7.0\n\n Use `gradient_clip_val` instead. Will remove 0.9.0.\n\n process_position: orders the progress bar when running multiple models on same machine.\n\n num_nodes: number of GPU nodes for distributed training.\n\n nb_gpu_nodes:\n .. warning:: .. deprecated:: 0.7.0\n\n Use `num_nodes` instead. Will remove 0.9.0.\n\n gpus: Which GPUs to train on.\n\n auto_select_gpus:\n\n If enabled and `gpus` is an integer, pick available\n gpus automatically. This is especially useful when\n GPUs are configured to be in \"exclusive mode\", such\n that only one process at a time can access them.\n\n num_tpu_cores: How many TPU cores to train on (1 or 8).\n\n log_gpu_memory: None, 'min_max', 'all'. Might slow performance\n\n show_progress_bar:\n .. warning:: .. deprecated:: 0.7.2\n\n Set `progress_bar_refresh_rate` to postive integer to enable. Will remove 0.9.0.\n\n progress_bar_refresh_rate: How often to refresh progress bar (in steps). Value ``0`` disables progress bar.\n Ignored when a custom callback is passed to :paramref:`~Trainer.callbacks`.\n\n overfit_pct: How much of training-, validation-, and test dataset to check.\n\n track_grad_norm: -1 no tracking. Otherwise tracks that norm\n\n check_val_every_n_epoch: Check val every n train epochs.\n\n fast_dev_run: runs 1 batch of train, test and val to find any bugs (ie: a sort of unit test).\n\n accumulate_grad_batches: Accumulates grads every k batches or as set up in the dict.\n\n max_epochs: Stop training once this number of epochs is reached.\n\n max_nb_epochs:\n .. warning:: .. deprecated:: 0.7.0\n\n Use `max_epochs` instead. Will remove 0.9.0.\n\n min_epochs: Force training for at least these many epochs\n\n min_nb_epochs:\n .. warning:: .. deprecated:: 0.7.0\n\n Use `min_epochs` instead. Will remove 0.9.0.\n\n max_steps: Stop training after this number of steps. Disabled by default (None).\n\n min_steps: Force training for at least these number of steps. Disabled by default (None).\n\n train_percent_check: How much of training dataset to check.\n\n val_percent_check: How much of validation dataset to check.\n\n test_percent_check: How much of test dataset to check.\n\n val_check_interval: How often within one training epoch to check the validation set\n\n log_save_interval: Writes logs to disk this often\n\n row_log_interval: How often to add logging rows (does not write to disk)\n\n add_row_log_interval:\n .. warning:: .. deprecated:: 0.7.0\n\n Use `row_log_interval` instead. Will remove 0.9.0.\n\n distributed_backend: The distributed backend to use.\n\n use_amp:\n .. warning:: .. deprecated:: 0.7.0\n\n Use `precision` instead. Will remove 0.9.0.\n\n precision: Full precision (32), half precision (16).\n\n print_nan_grads:\n .. warning:: .. deprecated:: 0.7.2\n\n Has no effect. When detected, NaN grads will be printed automatically.\n Will remove 0.9.0.\n\n weights_summary: Prints a summary of the weights when training begins.\n\n weights_save_path: Where to save weights if specified. Will override default_root_dir\n for checkpoints only. Use this if for whatever reason you need the checkpoints\n stored in a different place than the logs written in `default_root_dir`.\n\n amp_level: The optimization level to use (O1, O2, etc...).\n\n num_sanity_val_steps: Sanity check runs n batches of val before starting the training routine.\n\n nb_sanity_val_steps:\n .. warning:: .. deprecated:: 0.7.0\n\n Use `num_sanity_val_steps` instead. Will remove 0.8.0.\n\n truncated_bptt_steps: Truncated back prop breaks performs backprop every k steps of\n\n resume_from_checkpoint: To resume training from a specific checkpoint pass in the path here.\n\n profiler: To profile individual steps during training and assist in\n\n reload_dataloaders_every_epoch: Set to True to reload dataloaders every epoch\n\n auto_lr_find: If set to True, will `initially` run a learning rate finder,\n trying to optimize initial learning for faster convergence. Sets learning\n rate in self.hparams.lr | self.hparams.learning_rate in the lightning module.\n To use a different key, set a string instead of True with the key name.\n\n replace_sampler_ddp: Explicitly enables or disables sampler replacement.\n If not specified this will toggled automatically ddp is used\n\n benchmark: If true enables cudnn.benchmark.\n\n terminate_on_nan: If set to True, will terminate training (by raising a `ValueError`) at the\n end of each training batch, if any of the parameters or the loss are NaN or +/-inf.\n \"\"\"\n\n # Init callbacks\n self.callbacks = callbacks or []\n self.on_init_start()\n\n # benchmarking\n self.benchmark = benchmark\n torch.backends.cudnn.benchmark = self.benchmark\n\n # Transfer params\n self.num_nodes = num_nodes\n # Backward compatibility, TODO: remove in v0.8.0\n if nb_gpu_nodes is not None:\n rank_zero_warn(\"Argument `nb_gpu_nodes` has renamed to `num_nodes` since v0.5.0\"\n \" and this method will be removed in v0.8.0\", DeprecationWarning)\n self.num_gpu_nodes = nb_gpu_nodes\n self.log_gpu_memory = log_gpu_memory\n\n self.gradient_clip_val = gradient_clip_val\n # Backward compatibility, TODO: remove in v0.8.0\n if gradient_clip is not None:\n rank_zero_warn(\"Argument `gradient_clip` has renamed to `gradient_clip_val` since v0.5.0\"\n \" and this method will be removed in v0.8.0\", DeprecationWarning)\n self.gradient_clip = gradient_clip\n\n self.check_val_every_n_epoch = check_val_every_n_epoch\n self.track_grad_norm = track_grad_norm\n self.on_gpu = True if (gpus and torch.cuda.is_available()) else False\n\n # tpu config\n self.on_tpu = num_tpu_cores is not None\n self.num_tpu_cores = num_tpu_cores\n assert num_tpu_cores in [1, 8, None], 'num_tpu_cores can only be 1 or 8'\n\n if num_processes != 1 and distributed_backend != \"ddp_cpu\":\n rank_zero_warn(\"num_processes is only used for distributed_backend=\\\"ddp_cpu\\\". Ignoring it.\")\n self.num_processes = num_processes\n\n self.process_position = process_position\n self.weights_summary = weights_summary\n\n self.max_epochs = max_epochs\n # Backward compatibility, TODO: remove in v0.8.0\n if max_nb_epochs is not None:\n rank_zero_warn(\"Argument `max_nb_epochs` has renamed to `max_epochs` since v0.5.0\"\n \" and this method will be removed in v0.8.0\", DeprecationWarning)\n self.max_nb_epochs = max_nb_epochs\n\n self.min_epochs = min_epochs\n # Backward compatibility, TODO: remove in v0.8.0\n if min_nb_epochs is not None:\n rank_zero_warn(\"Argument `min_nb_epochs` has renamed to `min_epochs` since v0.5.0\"\n \" and this method will be removed in v0.8.0\", DeprecationWarning)\n self.min_nb_epochs = min_nb_epochs\n\n self.max_steps = max_steps\n self.min_steps = min_steps\n\n self.num_sanity_val_steps = num_sanity_val_steps\n # Backward compatibility, TODO: remove in v0.8.0\n if nb_sanity_val_steps is not None:\n rank_zero_warn(\"Argument `nb_sanity_val_steps` has renamed to \"\n \"`num_sanity_val_steps` since v0.5.0\"\n \" and this method will be removed in v0.8.0\", DeprecationWarning)\n self.nb_sanity_val_steps = nb_sanity_val_steps\n\n # Backward compatibility, TODO: remove in v0.9.0\n if print_nan_grads:\n rank_zero_warn(\"Argument `print_nan_grads` has no effect and will be removed in v0.9.0.\"\n \" NaN grads will be printed automatically when detected.\", DeprecationWarning)\n\n self.reload_dataloaders_every_epoch = reload_dataloaders_every_epoch\n\n self.auto_lr_find = auto_lr_find\n self.replace_sampler_ddp = replace_sampler_ddp\n\n self.truncated_bptt_steps = truncated_bptt_steps\n self.resume_from_checkpoint = resume_from_checkpoint\n self.terminate_on_nan = terminate_on_nan\n self.shown_warnings = set()\n\n self.fast_dev_run = fast_dev_run\n if self.fast_dev_run:\n self.num_sanity_val_steps = 0\n self.max_epochs = 1\n log.info('Running in fast_dev_run mode: will run a full train,'\n ' val and test loop using a single batch')\n\n # set default save path if user didn't provide one\n self.default_root_dir = default_root_dir\n\n # Backward compatibility, TODO: remove in v0.8.0\n if default_save_path is not None:\n self.default_root_dir = default_save_path\n\n if self.default_root_dir is None:\n self.default_root_dir = os.getcwd()\n\n # training bookeeping\n self.total_batch_idx = 0\n self.running_loss = TensorRunningAccum(window_length=20)\n self.batch_idx = 0\n self.progress_bar_metrics = {}\n self.callback_metrics = {}\n self.num_val_batches = 0\n self.num_training_batches = 0\n self.num_test_batches = 0\n self.train_dataloader = None\n self.test_dataloaders = None\n self.val_dataloaders = None\n\n # training state\n self.model = None\n self.testing = False\n self.disable_validation = False\n self.lr_schedulers = []\n self.optimizers = None\n self.optimizer_frequencies = []\n self.global_step = 0\n self.current_epoch = 0\n self.interrupted = False\n\n # configure logger\n self.configure_logger(logger)\n\n # configure profiler\n if profiler is True:\n profiler = SimpleProfiler()\n self.profiler = profiler or PassThroughProfiler()\n\n # configure early stop callback\n # creates a default one if none passed in\n self.configure_early_stopping(early_stop_callback)\n\n # configure checkpoint callback\n self.checkpoint_callback = checkpoint_callback\n self.weights_save_path = weights_save_path\n\n # accumulated grads\n self.accumulate_grad_batches = accumulate_grad_batches\n self.configure_accumulated_gradients(accumulate_grad_batches)\n\n # for gpus allow int, string and gpu list\n if auto_select_gpus and isinstance(gpus, int):\n self.gpus = pick_multiple_gpus(gpus)\n else:\n self.gpus = gpus\n\n self.data_parallel_device_ids = parse_gpu_ids(self.gpus)\n self.root_gpu = determine_root_gpu_device(self.data_parallel_device_ids)\n self.root_device = torch.device(\"cpu\")\n\n # tpu state flags\n self.use_tpu = False\n self.tpu_local_core_rank = None\n self.tpu_global_core_rank = None\n\n # distributed backend choice\n self.distributed_backend = distributed_backend\n self.set_distributed_mode(distributed_backend)\n\n # override dist backend when using tpus\n if self.on_tpu:\n self.init_tpu()\n self.current_tpu_idx = None\n\n # init flags for SLURM+ddp to work\n self.proc_rank = 0\n self.world_size = 1\n self.node_rank = 0\n self.configure_slurm_ddp(self.num_nodes)\n\n # nvidia setup\n self.set_nvidia_flags(self.is_slurm_managing_tasks, self.data_parallel_device_ids)\n\n # backward compatibility\n if show_progress_bar is not None:\n self.show_progress_bar = show_progress_bar\n\n self.progress_bar_refresh_rate = progress_bar_refresh_rate\n self.progress_bar_callback = None\n self.configure_progress_bar()\n\n # logging\n self.log_save_interval = log_save_interval\n self.val_check_interval = val_check_interval\n\n # backward compatibility\n if add_row_log_interval is not None:\n rank_zero_warn(\"`add_row_log_interval` has renamed to `row_log_interval` since v0.5.0\"\n \" and this method will be removed in v0.8.0\", DeprecationWarning)\n if not row_log_interval: # in case you did not set the proper value\n row_log_interval = add_row_log_interval\n self.row_log_interval = row_log_interval\n\n # how much of the data to use\n self.overfit_pct = overfit_pct\n self.determine_data_use_amount(train_percent_check, val_percent_check,\n test_percent_check, overfit_pct)\n\n # AMP init\n # These are the only lines needed after v0.8.0\n # we wrap the user's forward with autocast and give it back at the end of fit\n self.autocast_original_forward = None\n self.use_native_amp = hasattr(torch.cuda, \"amp\") and hasattr(torch.cuda.amp, \"autocast\")\n self.precision = precision\n if self.use_native_amp and self.precision == 16:\n self.scaler = torch.cuda.amp.GradScaler()\n\n # TODO: remove for v0.8.0\n self.amp_level = amp_level\n self.init_amp(use_amp)\n\n # Callback system\n self.on_init_end()\n\n @property\n def slurm_job_id(self) -> int:\n try:\n job_id = os.environ['SLURM_JOB_ID']\n job_id = int(job_id)\n\n # in interactive mode, don't make logs use the same job id\n in_slurm_interactive_mode = os.environ['SLURM_JOB_NAME'] == 'bash'\n if in_slurm_interactive_mode:\n job_id = None\n\n except Exception:\n job_id = None\n return job_id\n\n @classmethod\n def default_attributes(cls):\n init_signature = inspect.signature(Trainer)\n\n args = {}\n for param_name in init_signature.parameters:\n value = init_signature.parameters[param_name].default\n args[param_name] = value\n\n return args\n\n @classmethod\n def get_init_arguments_and_types(cls) -> List[Tuple[str, Tuple, Any]]:\n r\"\"\"Scans the Trainer signature and returns argument names, types and default values.\n\n Returns:\n List with tuples of 3 values:\n (argument name, set with argument types, argument default value).\n\n Examples:\n >>> args = Trainer.get_init_arguments_and_types()\n >>> import pprint\n >>> pprint.pprint(sorted(args)) # doctest: +ELLIPSIS +NORMALIZE_WHITESPACE\n [('accumulate_grad_batches',\n (<class 'int'>, typing.Dict[int, int], typing.List[list]),\n 1),\n ...\n ('callbacks',\n (typing.List[pytorch_lightning.callbacks.base.Callback],\n <class 'NoneType'>),\n None),\n ('check_val_every_n_epoch', (<class 'int'>,), 1),\n ...\n ('max_epochs', (<class 'int'>,), 1000),\n ...\n ('precision', (<class 'int'>,), 32),\n ('print_nan_grads', (<class 'bool'>,), False),\n ('process_position', (<class 'int'>,), 0),\n ('profiler',\n (<class 'pytorch_lightning.profiler.profilers.BaseProfiler'>,\n <class 'NoneType'>),\n None),\n ...\n \"\"\"\n trainer_default_params = inspect.signature(cls).parameters\n name_type_default = []\n for arg in trainer_default_params:\n arg_type = trainer_default_params[arg].annotation\n arg_default = trainer_default_params[arg].default\n try:\n arg_types = tuple(arg_type.__args__)\n except AttributeError:\n arg_types = (arg_type,)\n\n name_type_default.append((arg, arg_types, arg_default))\n\n return name_type_default\n\n @classmethod\n def get_deprecated_arg_names(cls) -> List:\n \"\"\"Returns a list with deprecated Trainer arguments.\"\"\"\n depr_arg_names = []\n for name, val in cls.__dict__.items():\n if name.startswith('DEPRECATED') and isinstance(val, (tuple, list)):\n depr_arg_names.extend(val)\n return depr_arg_names\n\n @classmethod\n def add_argparse_args(cls, parent_parser: ArgumentParser) -> ArgumentParser:\n r\"\"\"Extends existing argparse by default `Trainer` attributes.\n\n Args:\n parent_parser:\n The custom cli arguments parser, which will be extended by\n the Trainer default arguments.\n\n Only arguments of the allowed types (str, float, int, bool) will\n extend the `parent_parser`.\n \"\"\"\n parser = ArgumentParser(parents=[parent_parser], add_help=False, )\n\n blacklist = ['kwargs']\n depr_arg_names = cls.get_deprecated_arg_names() + blacklist\n\n allowed_types = (str, float, int, bool)\n\n # TODO: get \"help\" from docstring :)\n for arg, arg_types, arg_default in (at for at in cls.get_init_arguments_and_types()\n if at[0] not in depr_arg_names):\n\n for allowed_type in (at for at in allowed_types if at in arg_types):\n if allowed_type is bool:\n def allowed_type(x):\n return bool(parsing.strtobool(x))\n\n if arg == 'gpus':\n allowed_type = Trainer.allowed_type\n arg_default = Trainer.arg_default\n\n parser.add_argument(\n f'--{arg}',\n default=arg_default,\n type=allowed_type,\n dest=arg,\n help='autogenerated by pl.Trainer'\n )\n break\n\n return parser\n\n def allowed_type(x):\n if ',' in x:\n return str(x)\n else:\n return int(x)\n\n def arg_default(x):\n if ',' in x:\n return str(x)\n else:\n return int(x)\n\n @classmethod\n def from_argparse_args(cls, args, **kwargs):\n\n params = vars(args)\n params.update(**kwargs)\n\n return cls(**params)\n\n @property\n def num_gpus(self) -> int:\n gpus = self.data_parallel_device_ids\n if gpus is None:\n return 0\n return len(gpus)\n\n @property\n def data_parallel(self) -> bool:\n return self.use_dp or self.use_ddp or self.use_ddp2\n\n @property\n def progress_bar_dict(self) -> dict:\n \"\"\" Read-only for progress bar metrics. \"\"\"\n ref_model = self.model if not self.data_parallel else self.model.module\n return dict(**ref_model.get_progress_bar_dict(), **self.progress_bar_metrics)\n\n # -----------------------------\n # MODEL TRAINING\n # -----------------------------\n def fit(\n self,\n model: LightningModule,\n train_dataloader: Optional[DataLoader] = None,\n val_dataloaders: Optional[Union[DataLoader, List[DataLoader]]] = None\n ):\n r\"\"\"\n Runs the full optimization routine.\n\n Args:\n model: Model to fit.\n\n train_dataloader: A Pytorch\n DataLoader with training samples. If the model has\n a predefined train_dataloader method this will be skipped.\n\n val_dataloaders: Either a single\n Pytorch Dataloader or a list of them, specifying validation samples.\n If the model has a predefined val_dataloaders method this will be skipped\n\n Example::\n\n # Option 1,\n # Define the train_dataloader() and val_dataloader() fxs\n # in the lightningModule\n # RECOMMENDED FOR MOST RESEARCH AND APPLICATIONS TO MAINTAIN READABILITY\n trainer = Trainer()\n model = LightningModule()\n trainer.fit(model)\n\n # Option 2\n # in production cases we might want to pass different datasets to the same model\n # Recommended for PRODUCTION SYSTEMS\n train, val = DataLoader(...), DataLoader(...)\n trainer = Trainer()\n model = LightningModule()\n trainer.fit(model, train_dataloader=train, val_dataloader=val)\n\n # Option 1 & 2 can be mixed, for example the training set can be\n # defined as part of the model, and validation can then be feed to .fit()\n\n \"\"\"\n # bind logger and other properties\n model.logger = self.logger\n self.copy_trainer_model_properties(model)\n\n # clean hparams\n if hasattr(model, 'hparams'):\n parsing.clean_namespace(model.hparams)\n\n # set up the passed in dataloaders (if needed)\n self.__attach_dataloaders(model, train_dataloader, val_dataloaders)\n\n # check that model is configured correctly\n self.check_model_configuration(model)\n\n # download the data and do whatever transforms we need\n # do before any spawn calls so that the model can assign properties\n # only on proc 0 because no spawn has happened yet\n model.prepare_data()\n\n # Run learning rate finder:\n if self.auto_lr_find:\n self._run_lr_finder_internally(model)\n\n # route to appropriate start method\n # when using multi-node or DDP within a node start each module in a separate process\n if self.use_ddp2:\n task = int(os.environ['SLURM_LOCALID'])\n self.ddp_train(task, model)\n\n elif self.use_ddp:\n if self.is_slurm_managing_tasks:\n task = int(os.environ['SLURM_LOCALID'])\n self.ddp_train(task, model)\n else:\n self.__set_random_port()\n # track for predict\n self.model = model\n # train\n mp.spawn(self.ddp_train, nprocs=self.num_processes, args=(model,))\n # load weights if not interrupted\n if os.getenv('COLAB_GPU') or os.getenv('KAGGLE_URL_BASE'):\n self.load_spawn_weights(model)\n self.model = model\n\n # 1 gpu or dp option triggers training using DP module\n # easier to avoid NCCL issues\n elif self.use_dp:\n self.dp_train(model)\n\n elif self.use_horovod:\n self.horovod_train(model)\n\n elif self.single_gpu:\n self.single_gpu_train(model)\n\n elif self.use_tpu: # pragma: no-cover\n log.info(f'training on {self.num_tpu_cores} TPU cores')\n\n # COLAB_GPU is an env var available by default in Colab environments.\n start_method = 'fork' if os.getenv('COLAB_GPU') or os.getenv('KAGGLE_URL_BASE') else 'spawn'\n\n # track for predict\n self.model = model\n\n # train\n xmp.spawn(self.tpu_train, args=(model,), nprocs=self.num_tpu_cores, start_method=start_method)\n\n # load weights if not interrupted\n self.load_spawn_weights(model)\n self.model = model\n\n # ON CPU\n else:\n # run through amp wrapper\n if self.use_amp:\n raise MisconfigurationException('amp + cpu is not supported. Please use a GPU option')\n\n # CHOOSE OPTIMIZER\n # allow for lr schedulers as well\n self.optimizers, self.lr_schedulers, self.optimizer_frequencies = self.init_optimizers(model)\n\n self.run_pretrain_routine(model)\n\n # return 1 when finished\n # used for testing or when we need to know that training succeeded\n return 1\n\n def __set_random_port(self):\n \"\"\"\n When running DDP NOT managed by SLURM, the ports might collide\n :return:\n \"\"\"\n try:\n default_port = os.environ['MASTER_PORT']\n except Exception:\n import random\n default_port = random.randint(10000, 19000)\n os.environ['MASTER_PORT'] = str(default_port)\n\n def __attach_dataloaders(self, model, train_dataloader=None, val_dataloaders=None, test_dataloaders=None):\n # when dataloader is passed via fit, patch the train_dataloader\n # functions to overwrite with these implementations\n if train_dataloader is not None:\n model.train_dataloader = _PatchDataLoader(train_dataloader)\n\n if val_dataloaders is not None:\n model.val_dataloader = _PatchDataLoader(val_dataloaders)\n\n if test_dataloaders is not None:\n model.test_dataloader = _PatchDataLoader(test_dataloaders)\n\n def run_pretrain_routine(self, model: LightningModule):\n \"\"\"Sanity check a few things before starting actual training.\n\n Args:\n model: The model to run sanity test on.\n \"\"\"\n ref_model = model\n if self.data_parallel:\n ref_model = model.module\n\n # give model convenience properties\n ref_model.trainer = self\n\n # set local properties on the model\n self.copy_trainer_model_properties(ref_model)\n\n # log hyper-parameters\n if self.logger is not None:\n # save exp to get started\n if hasattr(ref_model, \"hparams\"):\n self.logger.log_hyperparams(ref_model.hparams)\n\n self.logger.save()\n\n if self.use_ddp or self.use_ddp2:\n torch_distrib.barrier()\n\n # wait for all models to restore weights\n if self.on_tpu and XLA_AVAILABLE:\n # wait for all processes to catch up\n torch_xla.core.xla_model.rendezvous(\"pl.Trainer.run_pretrain_routine\")\n\n elif self.use_horovod:\n # wait for all processes to catch up\n hvd.join()\n\n # register auto-resubmit when on SLURM\n self.register_slurm_signal_handlers()\n\n # print model summary\n # TODO: remove self.testing condition because model.summarize() is wiping out the weights\n if self.proc_rank == 0 and self.weights_summary is not None and not self.testing:\n if self.weights_summary in ['full', 'top']:\n ref_model.summarize(mode=self.weights_summary)\n else:\n raise MisconfigurationException(\"weights_summary can be None, 'full' or 'top'\")\n\n # track model now.\n # if cluster resets state, the model will update with the saved weights\n self.model = model\n\n # set up checkpoint callback\n self.configure_checkpoint_callback()\n\n # restore training and model before hpc call\n self.restore_weights(model)\n\n # when testing requested only run test and return\n if self.testing:\n # only load test dataloader for testing\n # self.reset_test_dataloader(ref_model)\n self.run_evaluation(test_mode=True)\n return\n\n # check if we should run validation during training\n self.disable_validation = not (self.is_overriden('validation_step') and self.val_percent_check > 0) \\\n and not self.fast_dev_run\n\n # run tiny validation (if validation defined)\n # to make sure program won't crash during val\n if not self.disable_validation and self.num_sanity_val_steps > 0:\n self.reset_val_dataloader(ref_model)\n\n # hook and callback\n ref_model.on_sanity_check_start()\n self.on_sanity_check_start()\n\n eval_results = self._evaluate(model,\n self.val_dataloaders,\n self.num_sanity_val_steps,\n False)\n _, _, _, callback_metrics, _ = self.process_output(eval_results)\n\n self.on_sanity_check_end()\n\n # verify that early stop has conditioned on a metric that exists\n if self.enable_early_stop:\n self.early_stop_callback._validate_condition_metric(callback_metrics)\n\n # clear cache before training\n if self.on_gpu:\n torch.cuda.empty_cache()\n\n # CORE TRAINING LOOP\n self.train()\n\n def test(\n self,\n model: Optional[LightningModule] = None,\n test_dataloaders: Optional[Union[DataLoader, List[DataLoader]]] = None\n ):\n r\"\"\"\n\n Separates from fit to make sure you never run on your test set until you want to.\n\n Args:\n model: The model to test.\n\n test_dataloaders: Either a single\n Pytorch Dataloader or a list of them, specifying validation samples.\n\n Example::\n\n # Option 1\n # run test after fitting\n test = DataLoader(...)\n trainer = Trainer()\n model = LightningModule()\n\n trainer.fit(model)\n trainer.test(test_dataloaders=test)\n\n # Option 2\n # run test from a loaded model\n test = DataLoader(...)\n model = LightningModule.load_from_checkpoint('path/to/checkpoint.ckpt')\n trainer = Trainer()\n trainer.test(model, test_dataloaders=test)\n \"\"\"\n\n self.testing = True\n\n if test_dataloaders is not None:\n if model:\n self.__attach_dataloaders(model, test_dataloaders=test_dataloaders)\n else:\n self.__attach_dataloaders(self.model, test_dataloaders=test_dataloaders)\n\n # give proper warnings if user only passed in loader without hooks\n self.check_testing_model_configuration(model if model else self.model)\n\n if model is not None:\n self.model = model\n self.fit(model)\n elif self.use_ddp or self.use_tpu: # pragma: no-cover\n # attempt to load weights from a spawn\n path = os.path.join(self.default_root_dir, '__temp_weight_ddp_end.ckpt')\n test_model = self.model\n if os.path.exists(path):\n test_model = self.load_spawn_weights(self.model)\n\n self.fit(test_model)\n else:\n self.run_evaluation(test_mode=True)\n\n self.testing = False\n\n def check_model_configuration(self, model: LightningModule):\n r\"\"\"\n Checks that the model is configured correctly before training is started.\n\n Args:\n model: The model to test.\n\n \"\"\"\n # Check training_step, train_dataloader, configure_optimizer methods\n if not self.is_overriden('training_step', model):\n raise MisconfigurationException(\n 'No `training_step()` method defined. Lightning `Trainer` expects as minimum a'\n ' `training_step()`, `training_dataloader()` and `configure_optimizers()` to be defined.')\n\n if not self.is_overriden('train_dataloader', model):\n raise MisconfigurationException(\n 'No `train_dataloader()` method defined. Lightning `Trainer` expects as minimum a'\n ' `training_step()`, `training_dataloader()` and `configure_optimizers()` to be defined.')\n\n if not self.is_overriden('configure_optimizers', model):\n raise MisconfigurationException(\n 'No `configure_optimizers()` method defined. Lightning `Trainer` expects as minimum a'\n ' `training_step()`, `training_dataloader()` and `configure_optimizers()` to be defined.')\n\n # Check val_dataloader, validation_step and validation_epoch_end\n if self.is_overriden('val_dataloader', model):\n if not self.is_overriden('validation_step', model):\n raise MisconfigurationException('You have passed in a `val_dataloader()`'\n ' but have not defined `validation_step()`.')\n else:\n if not self.is_overriden('validation_epoch_end', model):\n rank_zero_warn(\n 'You have defined a `val_dataloader()` and have defined a `validation_step()`,'\n ' you may also want to define `validation_epoch_end()` for accumulating stats.',\n RuntimeWarning\n )\n else:\n if self.is_overriden('validation_step', model):\n raise MisconfigurationException('You have defined `validation_step()`,'\n ' but have not passed in a val_dataloader().')\n\n # Check test_dataloader, test_step and test_epoch_end\n if self.is_overriden('test_dataloader', model):\n if not self.is_overriden('test_step', model):\n raise MisconfigurationException('You have passed in a `test_dataloader()`'\n ' but have not defined `test_step()`.')\n else:\n if not self.is_overriden('test_epoch_end', model):\n rank_zero_warn(\n 'You have defined a `test_dataloader()` and have defined a `test_step()`, you may also want to'\n ' define `test_epoch_end()` for accumulating stats.', RuntimeWarning\n )\n\n def check_testing_model_configuration(self, model: LightningModule):\n\n has_test_step = self.is_overriden('test_step', model)\n has_test_epoch_end = self.is_overriden('test_epoch_end', model)\n gave_test_loader = hasattr(model, 'test_dataloader') and model.test_dataloader()\n\n if gave_test_loader and not has_test_step:\n raise MisconfigurationException('You passed in a `test_dataloader` but did not implement `test_step()`')\n\n if has_test_step and not gave_test_loader:\n raise MisconfigurationException('You defined `test_step()` but did not implement'\n ' `test_dataloader` nor passed in `.fit(test_dataloaders`.')\n\n if has_test_step and gave_test_loader and not has_test_epoch_end:\n rank_zero_warn(\n 'You passed in a `test_dataloader` and have defined a `test_step()`, you may also want to'\n ' define `test_epoch_end()` for accumulating stats.', RuntimeWarning\n )\n\n\nclass _PatchDataLoader(object):\n r\"\"\"\n Callable object for patching dataloaders passed into trainer.fit().\n Use this class to override model.*_dataloader() and be pickle-compatible.\n\n Args:\n dataloader: Dataloader object to return when called.\n\n \"\"\"\n\n def __init__(self, dataloader: Union[List[DataLoader], DataLoader]):\n self.dataloader = dataloader\n\n # cannot pickle __code__ so cannot verify if PatchDataloader\n # exists which shows dataloader methods have been overwritten.\n # so, we hack it by using the string representation\n self.patch_loader_code = str(self.__call__.__code__)\n\n def __call__(self) -> Union[List[DataLoader], DataLoader]:\n return self.dataloader\n"
] |
[
[
"torch.device",
"torch.multiprocessing.spawn",
"torch.cuda.empty_cache",
"torch.cuda.is_available",
"torch.cuda.amp.GradScaler",
"torch.distributed.barrier"
]
] |
janfreyberg/healthy-brain-eeg
|
[
"3354700053bc9133901799b7a628ffa16ab77995"
] |
[
"data/__init__.py"
] |
[
"from pathlib import Path\nimport pandas as pd\n\n\ndatafolder = Path('/') / 'Volumes' / 'Seagate Expansion Drive' / 'cmi-hbn'\n\nif not datafolder.exists():\n datafolder = Path('/') / 'Users' / 'jan' / \\\n 'Documents' / 'eeg-data' / 'cmi-hbn'\n\nif not datafolder.exists():\n # try the windows option\n datafolder = Path('d:') / 'cmi-hbn'\n\nphenotypes = pd.read_csv('data/HBN_S1_Pheno_data.csv')\n"
] |
[
[
"pandas.read_csv"
]
] |
thanhtd91/mt-dnn
|
[
"f9e1fc3edb4a44f0cd70e973fd93583999b517d9"
] |
[
"predict.py"
] |
[
"import argparse\nimport json\nimport os\nimport torch\n\nfrom data_utils.task_def import TaskType\nfrom experiments.exp_def import TaskDefs\nfrom experiments.glue.glue_utils import eval_model\nfrom mt_dnn.batcher import BatchGen\nfrom mt_dnn.model import MTDNNModel\n\ndef dump(path, data):\n with open(path, 'w') as f:\n json.dump(data, f)\n\nparser = argparse.ArgumentParser()\nparser.add_argument(\"--task_def\", type=str, default=\"experiments/glue/glue_task_def.yml\")\nparser.add_argument(\"--task\", type=str)\nparser.add_argument(\"--task_id\", type=int, help=\"the id of this task when training\")\n\nparser.add_argument(\"--prep_input\", type=str)\nparser.add_argument(\"--with_label\", action=\"store_true\")\nparser.add_argument(\"--score\", type=str, help=\"score output path\")\n\nparser.add_argument('--max_seq_len', type=int, default=512)\nparser.add_argument('--batch_size_eval', type=int, default=8)\nparser.add_argument('--cuda', type=bool, default=torch.cuda.is_available(),\n help='whether to use GPU acceleration.')\n\nparser.add_argument(\"--checkpoint\", default='mt_dnn_models/bert_model_base_uncased.pt', type=str)\n\nargs = parser.parse_args()\n\n# load task info\ntask_defs = TaskDefs(args.task_def)\nassert args.task in task_defs.task_type_map\nassert args.task in task_defs.data_type_map\nassert args.task in task_defs.metric_meta_map\ndata_type = task_defs.data_type_map[args.task]\ntask_type = task_defs.task_type_map[args.task]\nmetric_meta = task_defs.metric_meta_map[args.task]\npw_task = False\nif task_type == TaskType.Ranking:\n pw_task = True\n\n# load data\ntest_data = BatchGen(BatchGen.load(args.prep_input, False, pairwise=pw_task, maxlen=args.max_seq_len),\n batch_size=args.batch_size_eval,\n gpu=args.cuda, is_train=False,\n task_id=args.task_id,\n maxlen=args.max_seq_len,\n pairwise=pw_task,\n data_type=data_type,\n task_type=task_type)\n\n# load model\ncheckpoint_path = args.checkpoint\nassert os.path.exists(checkpoint_path)\nif args.cuda:\n state_dict = torch.load(checkpoint_path)\nelse:\n state_dict = torch.load(checkpoint_path, map_location=\"cpu\")\nconfig = state_dict['config']\nconfig[\"cuda\"] = args.cuda\nmodel = MTDNNModel(config, state_dict=state_dict)\nwith torch.no_grad():\n test_metrics, test_predictions, scores, golds, test_ids = eval_model(model, test_data,\n metric_meta=metric_meta,\n use_cuda=args.cuda, with_label=args.with_label)\n\nresults = {'metrics': test_metrics, 'predictions': test_predictions, 'uids': test_ids, 'scores': scores}\ndump(args.score, results)\nif args.with_label:\n print(test_metrics)\n"
] |
[
[
"torch.no_grad",
"torch.cuda.is_available",
"torch.load"
]
] |
johnzhang1999/Spatial-Attention
|
[
"9e8e90ba624e52dcccba47c7289bb305765f5da6"
] |
[
"main.py"
] |
[
"from __future__ import print_function, absolute_import\nimport argparse\nimport os.path as osp\nimport numpy as np\nimport sys\nimport torch\nfrom torch import nn\nfrom torch.backends import cudnn\nfrom torch.utils.data import DataLoader\n\nfrom reid import datasets\nfrom reid import models\nfrom reid.trainers_partloss_4stage import Trainer\nfrom reid.evaluators import Evaluator\nfrom reid.utils.data import transforms as T\nfrom reid.utils.data.preprocessor import Preprocessor\nfrom reid.utils.logging import Logger\nfrom reid.utils.serialization import load_checkpoint, save_checkpoint\n\n'''\nThis is the code for paper 'parameter-free spatial attention network for Person Re-Identification'\nOur code is mainly based on PCB \n'''\ndef get_data(name, data_dir, height, width, batch_size, workers):\n root = osp.join(data_dir, name)\n root = data_dir\n dataset = datasets.create(name, root)\n\n normalizer = T.Normalize(mean=[0.485, 0.456, 0.406],\n std=[0.229, 0.224, 0.225])\n\n num_classes = dataset.num_train_ids\n\n train_transformer = T.Compose([\n T.RectScale(height, width),\n T.RandomHorizontalFlip(),\n T.ToTensor(),\n normalizer,\n ])\n\n test_transformer = T.Compose([\n T.RectScale(height, width),\n T.ToTensor(),\n normalizer,\n ])\n\n train_loader = DataLoader(\n Preprocessor(dataset.train, root=osp.join(dataset.images_dir,dataset.train_path),\n transform=train_transformer,random_mask=False),\n batch_size=batch_size, num_workers=workers,\n shuffle=True, pin_memory=True, drop_last=True)\n\n query_loader = DataLoader(\n Preprocessor(dataset.query, root=osp.join(dataset.images_dir,dataset.query_path),\n transform=test_transformer),\n batch_size=batch_size, num_workers=workers,\n shuffle=False, pin_memory=True)\n\n gallery_loader = DataLoader(\n Preprocessor(dataset.gallery, root=osp.join(dataset.images_dir,dataset.gallery_path),\n transform=test_transformer),\n batch_size=batch_size, num_workers=workers,\n shuffle=False, pin_memory=True)\n\n\n return dataset, num_classes, train_loader, query_loader, gallery_loader\n\n\ndef main(args):\n np.random.seed(args.seed)\n torch.manual_seed(args.seed)\n torch.cuda.manual_seed_all(args.seed)\n cudnn.benchmark = True\n\n # Redirect print to both console and log file\n if not args.evaluate:\n sys.stdout = Logger(osp.join(args.logs_dir, 'log.txt'))\n\n # Create data loaders\n if args.height is None or args.width is None:\n args.height, args.width = (144, 56) if args.arch == 'inception' else \\\n (256, 128)\n dataset, num_classes, train_loader, query_loader, gallery_loader = \\\n get_data(args.dataset, args.data_dir, args.height,\n args.width, args.batch_size, args.workers,\n )\n\n\n # Create model\n model = models.create(args.arch, num_features=args.features,\n dropout=args.dropout, num_classes=num_classes)\n\n # Load from checkpoint\n start_epoch = best_top1 = 0\n if args.resume:\n checkpoint = load_checkpoint(args.resume)\n model_dict = model.state_dict()\n checkpoint_load = {k: v for k, v in (checkpoint['state_dict']).items() if k in model_dict}\n model_dict.update(checkpoint_load)\n model.load_state_dict(model_dict)\n# model.load_state_dict(checkpoint['state_dict'])\n start_epoch = checkpoint['epoch']\n best_top1 = checkpoint['best_top1']\n print(\"=> Start epoch {} best top1 {:.1%}\"\n .format(start_epoch, best_top1))\n\n model = nn.DataParallel(model).cuda()\n\n\n # Evaluator\n evaluator = Evaluator(model)\n if args.evaluate:\n print(\"Test:\")\n evaluator.evaluate(query_loader, gallery_loader, dataset.query, dataset.gallery)\n return\n\n # Criterion\n criterion = nn.CrossEntropyLoss().cuda()\n\n # Optimizer\n if hasattr(model.module, 'base'):\n base_param_ids = set(map(id, model.module.base.parameters()))\n new_params = [p for p in model.parameters() if\n id(p) not in base_param_ids]\n param_groups = [\n {'params': model.module.base.parameters(), 'lr_mult': 0.1},\n {'params': new_params, 'lr_mult': 1.0}]\n else:\n param_groups = model.parameters()\n optimizer = torch.optim.SGD(param_groups, lr=args.lr,\n momentum=args.momentum,\n weight_decay=args.weight_decay,\n nesterov=True)\n # optimizer = torch.optim.Adam(param_groups,lr=args.lr)\n # Trainer\n trainer = Trainer(model, criterion, 0, 0, SMLoss_mode=0)\n\n # Schedule learning rate\n def adjust_lr(epoch):\n step_size = 60 if args.arch == 'inception' else args.step_size\n lr = args.lr * (0.1 ** (epoch // step_size))\n # if epoch>70:\n # lr = 0.01\n for g in optimizer.param_groups:\n g['lr'] = lr * g.get('lr_mult', 1)\n\n # Start training\n for epoch in range(start_epoch, args.epochs):\n adjust_lr(epoch)\n trainer.train(epoch, train_loader, optimizer)\n is_best = True\n save_checkpoint({\n 'state_dict': model.module.state_dict(),\n 'epoch': epoch + 1,\n 'best_top1': best_top1,\n }, is_best, fpath=osp.join(args.logs_dir, 'checkpoint.pth.tar'))\n\n # Final test\n print('Test with best model:')\n checkpoint = load_checkpoint(osp.join(args.logs_dir, 'checkpoint.pth.tar'))\n model.module.load_state_dict(checkpoint['state_dict'])\n evaluator.evaluate(query_loader, gallery_loader, dataset.query, dataset.gallery)\n\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser(description=\"Softmax loss classification\")\n # data\n parser.add_argument('-d', '--dataset', type=str, default='market',\n choices=datasets.names())\n parser.add_argument('-b', '--batch-size', type=int, default=256)\n parser.add_argument('-j', '--workers', type=int, default=4)\n parser.add_argument('--split', type=int, default=0)\n parser.add_argument('--height', type=int, default=384,\n help=\"input height, default: 256 for resnet*, \"\n \"144 for inception\")\n parser.add_argument('--width', type=int, default=128,\n help=\"input width, default: 128 for resnet*, \"\n \"56 for inception\")\n parser.add_argument('--combine-trainval', action='store_true',\n help=\"train and val sets together for training, \"\n \"val set alone for validation\")\n # model\n parser.add_argument('-a', '--arch', type=str, default='resnet50',\n choices=models.names())\n parser.add_argument('--features', type=int, default=256)\n parser.add_argument('--dropout', type=float, default=0.5)\n # optimizer\n parser.add_argument('--lr', type=float, default=0.1,\n help=\"learning rate of new parameters, for pretrained \"\n \"parameters it is 10 times smaller than this\")\n parser.add_argument('--momentum', type=float, default=0.9)\n parser.add_argument('--weight-decay', type=float, default=5e-4)\n # training configs\n parser.add_argument('--resume', type=str, default='', metavar='PATH')\n parser.add_argument('--evaluate', action='store_true',\n help=\"evaluation only\")\n parser.add_argument('--epochs', type=int, default=50)\n parser.add_argument('--step-size',type=int, default=40)\n parser.add_argument('--seed', type=int, default=1)\n parser.add_argument('--print-freq', type=int, default=1)\n # misc\n working_dir = osp.dirname(osp.abspath(__file__))\n parser.add_argument('--data-dir', type=str, metavar='PATH',\n default=osp.join(working_dir, 'data'))\n parser.add_argument('--logs-dir', type=str, metavar='PATH',\n default=osp.join(working_dir, 'logs'))\n main(parser.parse_args())\n"
] |
[
[
"torch.cuda.manual_seed_all",
"numpy.random.seed",
"torch.optim.SGD",
"torch.manual_seed",
"torch.nn.CrossEntropyLoss",
"torch.nn.DataParallel"
]
] |
leiyu1980/cavaface.pytorch
|
[
"522167004bb7792e063d1376c865d451f0895bd0"
] |
[
"backbone/common.py"
] |
[
"\"\"\"\n Common routines for models in PyTorch.\n\"\"\"\n\n__all__ = ['round_channels', 'Identity', 'Swish', 'HSigmoid', 'HSwish', 'get_activation_layer', 'InterpolationBlock',\n 'IBN', 'Flatten', 'l2_norm', 'SEModule', 'bottleneck_IR', 'bottleneck_IR_SE']\n\nimport math\nfrom inspect import isfunction\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nfrom torch.nn import Linear, Conv2d, BatchNorm1d, BatchNorm2d, PReLU, ReLU, Sigmoid, Dropout, MaxPool2d, \\\n AdaptiveAvgPool2d, Sequential, Module\nfrom collections import namedtuple\n\ndef round_channels(channels, divisor=8):\n \"\"\"\n Round weighted channel number (make divisible operation).\n\n Parameters:\n ----------\n channels : int or float\n Original number of channels.\n divisor : int, default 8\n Alignment value.\n\n Returns\n -------\n int\n Weighted number of channels.\n \"\"\"\n rounded_channels = max(int(channels + divisor / 2.0) // divisor * divisor, divisor)\n if float(rounded_channels) < 0.9 * channels:\n rounded_channels += divisor\n return rounded_channels\n\nclass Identity(nn.Module):\n \"\"\"\n Identity block.\n \"\"\"\n def __init__(self):\n super(Identity, self).__init__()\n\n def forward(self, x):\n return x\n\nclass Swish(nn.Module):\n \"\"\"\n Swish activation function from 'Searching for Activation Functions,' https://arxiv.org/abs/1710.05941.\n \"\"\"\n def forward(self, x):\n return x * torch.sigmoid(x)\n\n\nclass HSigmoid(nn.Module):\n \"\"\"\n Approximated sigmoid function, so-called hard-version of sigmoid from 'Searching for MobileNetV3,'\n https://arxiv.org/abs/1905.02244.\n \"\"\"\n def forward(self, x):\n return F.relu6(x + 3.0, inplace=True) / 6.0\n\n\nclass HSwish(nn.Module):\n \"\"\"\n H-Swish activation function from 'Searching for MobileNetV3,' https://arxiv.org/abs/1905.02244.\n\n Parameters:\n ----------\n inplace : bool\n Whether to use inplace version of the module.\n \"\"\"\n def __init__(self, inplace=False):\n super(HSwish, self).__init__()\n self.inplace = inplace\n\n def forward(self, x):\n return x * F.relu6(x + 3.0, inplace=self.inplace) / 6.0\n\n\ndef get_activation_layer(activation):\n \"\"\"\n Create activation layer from string/function.\n\n Parameters:\n ----------\n activation : function, or str, or nn.Module\n Activation function or name of activation function.\n\n Returns\n -------\n nn.Module\n Activation layer.\n \"\"\"\n assert (activation is not None)\n if isfunction(activation):\n return activation()\n elif isinstance(activation, str):\n if activation == \"relu\":\n return nn.ReLU(inplace=True)\n elif activation == \"relu6\":\n return nn.ReLU6(inplace=True)\n elif activation == \"swish\":\n return Swish()\n elif activation == \"hswish\":\n return HSwish(inplace=True)\n elif activation == \"sigmoid\":\n return nn.Sigmoid()\n elif activation == \"hsigmoid\":\n return HSigmoid()\n elif activation == \"identity\":\n return Identity()\n else:\n raise NotImplementedError()\n else:\n assert (isinstance(activation, nn.Module))\n return activation\n\nclass InterpolationBlock(nn.Module):\n \"\"\"\n Interpolation upsampling block.\n\n Parameters:\n ----------\n scale_factor : float\n Multiplier for spatial size.\n mode : str, default 'bilinear'\n Algorithm used for upsampling.\n align_corners : bool, default True\n Whether to align the corner pixels of the input and output tensors.\n \"\"\"\n def __init__(self,\n scale_factor,\n mode=\"bilinear\",\n align_corners=True):\n super(InterpolationBlock, self).__init__()\n self.scale_factor = scale_factor\n self.mode = mode\n self.align_corners = align_corners\n\n def forward(self, x):\n return F.interpolate(\n input=x,\n scale_factor=self.scale_factor,\n mode=self.mode,\n align_corners=self.align_corners)\n\n def __repr__(self):\n s = \"{name}(scale_factor={scale_factor}, mode={mode}, align_corners={align_corners})\"\n return s.format(\n name=self.__class__.__name__,\n scale_factor=self.scale_factor,\n mode=self.mode,\n align_corners=self.align_corners)\n\n def calc_flops(self, x):\n assert (x.shape[0] == 1)\n if self.mode == \"bilinear\":\n num_flops = 9 * x.numel()\n else:\n num_flops = 4 * x.numel()\n num_macs = 0\n return num_flops, num_macs\n\nclass IBN(nn.Module):\n \"\"\"\n Instance-Batch Normalization block from 'Two at Once: Enhancing Learning and Generalization Capacities via IBN-Net,'\n https://arxiv.org/abs/1807.09441.\n\n Parameters:\n ----------\n channels : int\n Number of channels.\n inst_fraction : float, default 0.5\n The first fraction of channels for normalization.\n inst_first : bool, default True\n Whether instance normalization be on the first part of channels.\n \"\"\"\n def __init__(self,\n channels,\n first_fraction=0.5,\n inst_first=True):\n super(IBN, self).__init__()\n self.inst_first = inst_first\n h1_channels = int(math.floor(channels * first_fraction))\n h2_channels = channels - h1_channels\n self.split_sections = [h1_channels, h2_channels]\n\n if self.inst_first:\n self.inst_norm = nn.InstanceNorm2d(\n num_features=h1_channels,\n affine=True)\n self.batch_norm = nn.BatchNorm2d(num_features=h2_channels)\n else:\n self.batch_norm = nn.BatchNorm2d(num_features=h1_channels)\n self.inst_norm = nn.InstanceNorm2d(\n num_features=h2_channels,\n affine=True)\n\n def forward(self, x):\n x1, x2 = torch.split(x, split_size_or_sections=self.split_sections, dim=1)\n if self.inst_first:\n x1 = self.inst_norm(x1.contiguous())\n x2 = self.batch_norm(x2.contiguous())\n else:\n x1 = self.batch_norm(x1.contiguous())\n x2 = self.inst_norm(x2.contiguous())\n x = torch.cat((x1, x2), dim=1)\n return x\n\nclass Flatten(Module):\n def forward(self, input):\n return input.view(input.size(0), -1)\n\n\ndef l2_norm(input, axis=1):\n norm = torch.norm(input, 2, axis, True)\n output = torch.div(input, norm)\n\n return output\n\n\nclass SEModule(Module):\n def __init__(self, channels, reduction):\n super(SEModule, self).__init__()\n self.avg_pool = AdaptiveAvgPool2d(1)\n self.fc1 = Conv2d(\n channels, channels // reduction, kernel_size=1, padding=0, bias=False)\n\n nn.init.xavier_uniform_(self.fc1.weight.data)\n\n self.relu = ReLU(inplace=True)\n self.fc2 = Conv2d(\n channels // reduction, channels, kernel_size=1, padding=0, bias=False)\n\n self.sigmoid = Sigmoid()\n\n def forward(self, x):\n module_input = x\n x = self.avg_pool(x)\n x = self.fc1(x)\n x = self.relu(x)\n x = self.fc2(x)\n x = self.sigmoid(x)\n\n return module_input * x\n\n\nclass bottleneck_IR(Module):\n def __init__(self, in_channel, depth, stride=1):\n super(bottleneck_IR, self).__init__()\n if in_channel == depth:\n self.shortcut_layer = MaxPool2d(1, stride)\n else:\n self.shortcut_layer = Sequential(\n Conv2d(in_channel, depth, (1, 1), stride, bias=False), BatchNorm2d(depth))\n self.res_layer = Sequential(\n BatchNorm2d(in_channel),\n Conv2d(in_channel, depth, (3, 3), (1, 1), 1, bias=False), \n PReLU(depth),\n Conv2d(depth, depth, (3, 3), stride, 1, bias=False), \n BatchNorm2d(depth))\n\n def forward(self, x):\n shortcut = self.shortcut_layer(x)\n res = self.res_layer(x)\n\n return res + shortcut\n\n\nclass bottleneck_IR_SE(Module):\n def __init__(self, in_channel, depth, stride=1):\n super(bottleneck_IR_SE, self).__init__()\n if in_channel == depth:\n self.shortcut_layer = MaxPool2d(1, stride)\n else:\n self.shortcut_layer = Sequential(\n Conv2d(in_channel, depth, (1, 1), stride, bias=False),\n BatchNorm2d(depth))\n self.res_layer = Sequential(\n BatchNorm2d(in_channel),\n Conv2d(in_channel, depth, (3, 3), (1, 1), 1, bias=False),\n PReLU(depth),\n Conv2d(depth, depth, (3, 3), stride, 1, bias=False),\n BatchNorm2d(depth),\n SEModule(depth, 16)\n )\n\n def forward(self, x):\n shortcut = self.shortcut_layer(x)\n res = self.res_layer(x)\n\n return res + shortcut\n\nfrom torch.nn.modules.utils import _pair\nclass SplAtConv2d(nn.Module):\n \"\"\"Split-Attention Conv2d\n \"\"\"\n def __init__(self, in_channels, channels, kernel_size, stride=(1, 1), padding=(0, 0),\n dilation=(1, 1), groups=1, bias=True,\n radix=2, reduction_factor=4,\n rectify=False, rectify_avg=False, norm_layer=None,\n dropblock_prob=0.0, **kwargs):\n super(SplAtConv2d, self).__init__()\n padding = _pair(padding)\n self.rectify = rectify and (padding[0] > 0 or padding[1] > 0)\n self.rectify_avg = rectify_avg\n inter_channels = max(in_channels*radix//reduction_factor, 32)\n self.radix = radix\n self.cardinality = groups\n self.channels = channels\n self.dropblock_prob = dropblock_prob\n if self.rectify:\n from rfconv import RFConv2d\n self.conv = RFConv2d(in_channels, channels*radix, kernel_size, stride, padding, dilation,\n groups=groups*radix, bias=bias, average_mode=rectify_avg, **kwargs)\n else:\n self.conv = Conv2d(in_channels, channels*radix, kernel_size, stride, padding, dilation,\n groups=groups*radix, bias=bias, **kwargs)\n self.use_bn = norm_layer is not None\n if self.use_bn:\n self.bn0 = norm_layer(channels*radix)\n self.relu = ReLU(inplace=True)\n self.fc1 = Conv2d(channels, inter_channels, 1, groups=self.cardinality)\n if self.use_bn:\n self.bn1 = norm_layer(inter_channels)\n self.fc2 = Conv2d(inter_channels, channels*radix, 1, groups=self.cardinality)\n if dropblock_prob > 0.0:\n self.dropblock = DropBlock2D(dropblock_prob, 3)\n self.rsoftmax = rSoftMax(radix, groups)\n\n def forward(self, x):\n x = self.conv(x)\n if self.use_bn:\n x = self.bn0(x)\n if self.dropblock_prob > 0.0:\n x = self.dropblock(x)\n x = self.relu(x)\n\n batch, rchannel = x.shape[:2]\n if self.radix > 1:\n splited = torch.split(x, rchannel//self.radix, dim=1) #https://github.com/pytorch/pytorch/pull/32493/files\n gap = sum(splited) \n else:\n gap = x\n gap = F.adaptive_avg_pool2d(gap, 1)\n gap = self.fc1(gap)\n\n if self.use_bn:\n gap = self.bn1(gap)\n gap = self.relu(gap)\n\n atten = self.fc2(gap)\n atten = self.rsoftmax(atten).view(batch, -1, 1, 1)\n\n if self.radix > 1:\n attens = torch.split(atten, rchannel//self.radix, dim=1)\n out = sum([att*split for (att, split) in zip(attens, splited)])\n else:\n out = atten * x\n return out.contiguous()\n\nclass rSoftMax(nn.Module):\n def __init__(self, radix, cardinality):\n super().__init__()\n self.radix = radix\n self.cardinality = cardinality\n\n def forward(self, x):\n batch = x.size(0)\n if self.radix > 1:\n x = x.view(batch, self.cardinality, self.radix, -1).transpose(1, 2)\n x = F.softmax(x, dim=1)\n x = x.reshape(batch, -1)\n else:\n x = torch.sigmoid(x)\n return x\n\nclass DropBlock2D(nn.Module):\n r\"\"\"Randomly zeroes 2D spatial blocks of the input tensor.\n As described in the paper\n `DropBlock: A regularization method for convolutional networks`_ ,\n dropping whole blocks of feature map allows to remove semantic\n information as compared to regular dropout.\n Args:\n drop_prob (float): probability of an element to be dropped.\n block_size (int): size of the block to drop\n Shape:\n - Input: `(N, C, H, W)`\n - Output: `(N, C, H, W)`\n .. _DropBlock: A regularization method for convolutional networks:\n https://arxiv.org/abs/1810.12890\n \"\"\"\n\n def __init__(self, drop_prob, block_size, share_channel=False):\n super(DropBlock2D, self).__init__()\n self.register_buffer('i', torch.zeros(1, dtype=torch.int64))\n self.register_buffer('drop_prob', drop_prob * torch.ones(1, dtype=torch.float32))\n self.inited = False\n self.step_size = 0.0\n self.start_step = 0\n self.nr_steps = 0\n self.block_size = block_size\n self.share_channel = share_channel\n\n def reset(self):\n \"\"\"stop DropBlock\"\"\"\n self.inited = True\n self.i[0] = 0\n self.drop_prob = 0.0\n\n def reset_steps(self, start_step, nr_steps, start_value=0, stop_value=None):\n self.inited = True\n stop_value = self.drop_prob.item() if stop_value is None else stop_value\n self.i[0] = 0\n self.drop_prob[0] = start_value\n self.step_size = (stop_value - start_value) / nr_steps\n self.nr_steps = nr_steps\n self.start_step = start_step\n\n def forward(self, x):\n if not self.training or self.drop_prob.item() == 0.:\n return x\n else:\n self.step()\n\n # get gamma value\n gamma = self._compute_gamma(x)\n\n # sample mask and place on input device\n if self.share_channel:\n mask = (torch.rand(*x.shape[2:], device=x.device, dtype=x.dtype) < gamma).unsqueeze(0).unsqueeze(0)\n else:\n mask = (torch.rand(*x.shape[1:], device=x.device, dtype=x.dtype) < gamma).unsqueeze(0)\n\n # compute block mask\n block_mask, keeped = self._compute_block_mask(mask)\n\n # apply block mask\n out = x * block_mask\n\n # scale output\n out = out * (block_mask.numel() / keeped).to(out)\n return out\n\n def _compute_block_mask(self, mask):\n block_mask = F.max_pool2d(mask,\n kernel_size=(self.block_size, self.block_size),\n stride=(1, 1),\n padding=self.block_size // 2)\n\n keeped = block_mask.numel() - block_mask.sum().to(torch.float32)\n block_mask = 1 - block_mask\n\n return block_mask, keeped\n\n def _compute_gamma(self, x):\n _, c, h, w = x.size()\n gamma = self.drop_prob.item() / (self.block_size ** 2) * (h * w) / \\\n ((w - self.block_size + 1) * (h - self.block_size + 1))\n return gamma\n\n def step(self):\n assert self.inited\n idx = self.i.item()\n if idx > self.start_step and idx < self.start_step + self.nr_steps:\n self.drop_prob += self.step_size\n self.i += 1\n\n def _load_from_state_dict(self, state_dict, prefix, local_metadata, strict,\n missing_keys, unexpected_keys, error_msgs):\n idx_key = prefix + 'i'\n drop_prob_key = prefix + 'drop_prob'\n if idx_key not in state_dict:\n state_dict[idx_key] = torch.zeros(1, dtype=torch.int64)\n if idx_key not in drop_prob_key:\n state_dict[drop_prob_key] = torch.ones(1, dtype=torch.float32)\n super(DropBlock2D, self)._load_from_state_dict(\n state_dict, prefix, local_metadata, strict,\n missing_keys, unexpected_keys, error_msgs)\n\n def _save_to_state_dict(self, destination, prefix, keep_vars):\n \"\"\"overwrite save method\"\"\"\n pass\n\n def extra_repr(self):\n return 'drop_prob={}, step_size={}'.format(self.drop_prob, self.step_size)\n\ndef reset_dropblock(start_step, nr_steps, start_value, stop_value, m):\n \"\"\"\n Example:\n from functools import partial\n apply_drop_prob = partial(reset_dropblock, 0, epochs*iters_per_epoch, 0.0, 0.1)\n net.apply(apply_drop_prob)\n \"\"\"\n if isinstance(m, DropBlock2D):\n m.reset_steps(start_step, nr_steps, start_value, stop_value)\n\n\nclass Linear_block(Module):\n def __init__(self, in_c, out_c, kernel=(1, 1), stride=(1, 1), padding=(0, 0), groups=1):\n super(Linear_block, self).__init__()\n self.conv = Conv2d(in_c, out_channels=out_c, kernel_size=kernel, groups=groups, stride=stride, padding=padding, bias=False)\n self.bn = BatchNorm2d(out_c)\n def forward(self, x):\n x = self.conv(x)\n x = self.bn(x)\n return x\n\nclass GDC(nn.Module):\n def __init__(self, in_c, embedding_size):\n super(GDC, self).__init__()\n self.conv_6_dw = Linear_block(in_c, in_c, groups=in_c, kernel=(7,7), stride=(1, 1), padding=(0, 0))\n self.conv_6_flatten = Flatten()\n self.linear = nn.Linear(in_c, embedding_size, bias=False)\n #self.bn = BatchNorm1d(embedding_size, affine=False)\n self.bn = nn.BatchNorm1d(embedding_size)\n\n def forward(self, x):\n x = self.conv_6_dw(x)\n x = self.conv_6_flatten(x)\n x = self.linear(x)\n x = self.bn(x)\n return x"
] |
[
[
"torch.nn.Linear",
"torch.cat",
"torch.nn.modules.utils._pair",
"torch.nn.BatchNorm2d",
"torch.ones",
"torch.sigmoid",
"torch.nn.MaxPool2d",
"torch.norm",
"torch.nn.functional.adaptive_avg_pool2d",
"torch.div",
"torch.zeros",
"torch.nn.functional.relu6",
"torch.nn.ReLU",
"torch.nn.Conv2d",
"torch.nn.InstanceNorm2d",
"torch.nn.functional.softmax",
"torch.rand",
"torch.nn.Sigmoid",
"torch.nn.functional.interpolate",
"torch.split",
"torch.nn.init.xavier_uniform_",
"torch.nn.ReLU6",
"torch.nn.BatchNorm1d",
"torch.nn.PReLU",
"torch.nn.AdaptiveAvgPool2d",
"torch.nn.functional.max_pool2d"
]
] |
jerrypeng7773/amazon-sagemaker-examples
|
[
"c5ddecce1f739a345465b9a38b064983a129141d",
"c5ddecce1f739a345465b9a38b064983a129141d"
] |
[
"advanced_functionality/pytorch_extending_our_containers/utils/utils_cifar.py",
"training/distributed_training/tensorflow/data_parallel/mnist/code/train_tensorflow_smdataparallel_mnist.py"
] |
[
"# Copyright 2017-2018 Amazon.com, Inc. or its affiliates. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\"). You\n# may not use this file except in compliance with the License. A copy of\n# the License is located at\n#\n# http://aws.amazon.com/apache2.0/\n#\n# or in the \"license\" file accompanying this file. This file is\n# distributed on an \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF\n# ANY KIND, either express or implied. See the License for the specific\n# language governing permissions and limitations under the License.\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport torch\nimport torchvision\nimport torchvision.transforms as transforms\n\nclasses = (\"plane\", \"car\", \"bird\", \"cat\", \"deer\", \"dog\", \"frog\", \"horse\", \"ship\", \"truck\")\n\n\ndef _get_transform():\n return transforms.Compose(\n [transforms.ToTensor(), transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))]\n )\n\n\ndef get_train_data_loader(data_dir=\"/tmp/pytorch/cifar-10-data\"):\n transform = _get_transform()\n trainset = torchvision.datasets.CIFAR10(\n root=data_dir, train=True, download=True, transform=transform\n )\n return torch.utils.data.DataLoader(trainset, batch_size=4, shuffle=True, num_workers=2)\n\n\ndef get_test_data_loader(data_dir=\"/tmp/pytorch/cifar-10-data\"):\n transform = _get_transform()\n testset = torchvision.datasets.CIFAR10(\n root=data_dir, train=False, download=True, transform=transform\n )\n return torch.utils.data.DataLoader(testset, batch_size=4, shuffle=False, num_workers=2)\n\n\n# function to show an image\ndef imshow(img):\n img = img / 2 + 0.5 # unnormalize\n npimg = img.numpy()\n plt.imshow(np.transpose(npimg, (1, 2, 0)))\n",
"# Licensed to the Apache Software Foundation (ASF) under one\n# or more contributor license agreements. See the NOTICE file\n# distributed with this work for additional information\n# regarding copyright ownership. The ASF licenses this file\n# to you under the Apache License, Version 2.0 (the\n# \"License\"); you may not use this file except in compliance\n# with the License. You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing,\n# software distributed under the License is distributed on an\n# \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n# KIND, either express or implied. See the License for the\n# specific language governing permissions and limitations\n# under the License.\n\nimport os\n\n# Import SMDataParallel TensorFlow2 Modules\nimport smdistributed.dataparallel.tensorflow as dist\nimport tensorflow as tf\n\ntf.random.set_seed(42)\n\n\n# SMDataParallel: Initialize\ndist.init()\n\ngpus = tf.config.experimental.list_physical_devices(\"GPU\")\nfor gpu in gpus:\n tf.config.experimental.set_memory_growth(gpu, True)\nif gpus:\n # SMDataParallel: Pin GPUs to a single SMDataParallel process [use SMDataParallel local_rank() API]\n tf.config.experimental.set_visible_devices(gpus[dist.local_rank()], \"GPU\")\n\n(mnist_images, mnist_labels), _ = tf.keras.datasets.mnist.load_data(\n path=\"mnist-%d.npz\" % dist.rank()\n)\n\ndataset = tf.data.Dataset.from_tensor_slices(\n (tf.cast(mnist_images[..., tf.newaxis] / 255.0, tf.float32), tf.cast(mnist_labels, tf.int64))\n)\ndataset = dataset.repeat().shuffle(10000).batch(128)\n\nmnist_model = tf.keras.Sequential(\n [\n tf.keras.layers.Conv2D(32, [3, 3], activation=\"relu\"),\n tf.keras.layers.Conv2D(64, [3, 3], activation=\"relu\"),\n tf.keras.layers.MaxPooling2D(pool_size=(2, 2)),\n tf.keras.layers.Dropout(0.25),\n tf.keras.layers.Flatten(),\n tf.keras.layers.Dense(128, activation=\"relu\"),\n tf.keras.layers.Dropout(0.5),\n tf.keras.layers.Dense(10, activation=\"softmax\"),\n ]\n)\nloss = tf.losses.SparseCategoricalCrossentropy()\n\n# SMDataParallel: dist.size()\n# LR for 8 node run : 0.000125\n# LR for single node run : 0.001\nopt = tf.optimizers.Adam(0.000125 * dist.size())\n\ncheckpoint_dir = os.environ[\"SM_MODEL_DIR\"]\n\ncheckpoint = tf.train.Checkpoint(model=mnist_model, optimizer=opt)\n\n\[email protected]\ndef training_step(images, labels, first_batch):\n with tf.GradientTape() as tape:\n probs = mnist_model(images, training=True)\n loss_value = loss(labels, probs)\n\n # SMDataParallel: Wrap tf.GradientTape with SMDataParallel's DistributedGradientTape\n tape = dist.DistributedGradientTape(tape)\n\n grads = tape.gradient(loss_value, mnist_model.trainable_variables)\n opt.apply_gradients(zip(grads, mnist_model.trainable_variables))\n\n if first_batch:\n # SMDataParallel: Broadcast model and optimizer variables\n dist.broadcast_variables(mnist_model.variables, root_rank=0)\n dist.broadcast_variables(opt.variables(), root_rank=0)\n\n # SMDataParallel: all_reduce call\n loss_value = dist.oob_allreduce(loss_value) # Average the loss across workers\n return loss_value\n\n\nfor batch, (images, labels) in enumerate(dataset.take(10000 // dist.size())):\n loss_value = training_step(images, labels, batch == 0)\n\n if batch % 50 == 0 and dist.rank() == 0:\n print(\"Step #%d\\tLoss: %.6f\" % (batch, loss_value))\n\n# SMDataParallel: Save checkpoints only from master node.\nif dist.rank() == 0:\n mnist_model.save(os.path.join(checkpoint_dir, \"1\"))\n"
] |
[
[
"numpy.transpose",
"torch.utils.data.DataLoader"
],
[
"tensorflow.GradientTape",
"tensorflow.random.set_seed",
"tensorflow.keras.layers.Flatten",
"tensorflow.config.experimental.set_memory_growth",
"tensorflow.cast",
"tensorflow.keras.layers.Conv2D",
"tensorflow.keras.layers.Dropout",
"tensorflow.keras.layers.MaxPooling2D",
"tensorflow.keras.layers.Dense",
"tensorflow.losses.SparseCategoricalCrossentropy",
"tensorflow.config.experimental.list_physical_devices",
"tensorflow.train.Checkpoint"
]
] |
aaron8tang/raster-vision
|
[
"a1af78bafb8f19ccc40c476adb551bdc23a33611",
"a1af78bafb8f19ccc40c476adb551bdc23a33611"
] |
[
"rastervision/backend/tf_deeplab.py",
"rastervision/task/semantic_segmentation.py"
] |
[
"import os\nimport glob\nimport shutil\nimport tarfile\nimport uuid\nfrom typing import (Dict, List, Tuple)\nfrom os.path import join\nfrom subprocess import Popen\nimport logging\n\nimport numpy as np\nfrom google.protobuf import (json_format)\n\nimport rastervision as rv\nfrom rastervision.core.box import Box\nfrom rastervision.core.class_map import ClassMap\nfrom rastervision.backend import Backend\nfrom rastervision.data.scene import Scene\nfrom rastervision.data.label import SemanticSegmentationLabels\nfrom rastervision.core.training_data import TrainingData\nfrom rastervision.backend.tf_object_detection import (write_tf_record, TRAIN,\n VALIDATION)\nfrom rastervision.protos.deeplab.train_pb2 import (TrainingParameters as\n TrainingParametersMsg)\nfrom rastervision.utils.files import (download_if_needed, get_local_path,\n make_dir, start_sync, upload_or_copy,\n sync_to_dir, sync_from_dir)\nfrom rastervision.utils.misc import (numpy_to_png, png_to_numpy, save_img,\n terminate_at_exit)\nfrom rastervision.data.label_source.utils import color_to_integer\nfrom rastervision.rv_config import RVConfig\n\nFROZEN_INFERENCE_GRAPH = 'model'\nINPUT_TENSOR_NAME = 'ImageTensor:0'\nOUTPUT_TENSOR_NAME = 'SemanticPredictions:0'\n\nlog = logging.getLogger(__name__)\n\n\ndef make_tf_examples(training_data: TrainingData, class_map: ClassMap) -> List:\n \"\"\"Take training data and a class map and return a list of TFRecords.\n\n Args:\n training_data: A rastervision.core.training_data.TrainingData\n object.\n class_map: A rastervision.core.class_map.ClassMap object.\n\n Returns:\n list(tensorflow.core.example.example_pb2.Example)\n\n \"\"\"\n tf_examples = []\n log.info('Creating TFRecord')\n for chip, window, labels in training_data:\n tf_example = create_tf_example(chip, window, labels.to_array(),\n class_map)\n tf_examples.append(tf_example)\n return tf_examples\n\n\ndef merge_tf_records(output_path: str, src_records: List[str]) -> None:\n \"\"\"Merge multiple TFRecord files into one.\n\n Args:\n output_path: Where to write the merged TFRecord file.\n src_records: A list of strings giving the location of the\n input TFRecord files.\n\n Returns:\n None\n\n \"\"\"\n import tensorflow as tf\n\n records = 0\n with tf.python_io.TFRecordWriter(output_path) as writer:\n log.info('Merging TFRecords')\n for src_record in src_records:\n for string_record in tf.python_io.tf_record_iterator(src_record):\n writer.write(string_record)\n records = records + 1\n log.info('{} records'.format(records))\n\n\ndef make_debug_images(record_path: str, output_dir: str, class_map: ClassMap,\n p: float) -> None:\n \"\"\"Render a random sample of the TFRecords in a given file as\n human-viewable PNG files.\n\n Args:\n record_path: Path to the TFRecord file.\n output_dir: Destination directory for the generated PNG files.\n p: The probability of rendering a particular record.\n\n Returns:\n None\n\n \"\"\"\n import tensorflow as tf\n make_dir(output_dir)\n\n ids = class_map.get_keys()\n color_strs = list(map(lambda c: c.color, class_map.get_items()))\n color_ints = list(map(lambda c: color_to_integer(c), color_strs))\n correspondence = dict(zip(ids, color_ints))\n\n def _label_fn(v: int) -> int:\n if v in correspondence:\n return correspondence.get(v)\n else:\n return 0\n\n label_fn = np.vectorize(_label_fn, otypes=[np.uint64])\n\n def _image_fn(pixel: int) -> int:\n if (pixel & 0x00ffffff):\n r = ((pixel >> 41 & 0x7f) + (pixel >> 17 & 0x7f)) << 16\n g = ((pixel >> 33 & 0x7f) + (pixel >> 9 & 0x7f)) << 8\n b = ((pixel >> 25 & 0x7f) + (pixel >> 1 & 0x7f)) << 0\n return r + g + b\n else:\n return pixel >> 24\n\n image_fn = np.vectorize(_image_fn, otypes=[np.uint64])\n\n log.info('Generating debug chips')\n tfrecord_iter = tf.python_io.tf_record_iterator(record_path)\n for ind, example in enumerate(tfrecord_iter):\n if np.random.rand() <= p:\n example = tf.train.Example.FromString(example)\n im_unpacked, labels = parse_tf_example(example)\n\n im_r = np.array(im_unpacked[:, :, 0], dtype=np.uint64) * 1 << 40\n im_g = np.array(im_unpacked[:, :, 1], dtype=np.uint64) * 1 << 32\n im_b = np.array(im_unpacked[:, :, 2], dtype=np.uint64) * 1 << 24\n im_packed = im_r + im_g + im_b\n\n labels_packed = label_fn(np.array(labels))\n im_labels_packed = im_packed + labels_packed\n im_packed = image_fn(im_labels_packed)\n\n im_unpacked[:, :, 0] = np.bitwise_and(\n im_packed >> 16, 0xff, dtype=np.uint8)\n im_unpacked[:, :, 1] = np.bitwise_and(\n im_packed >> 8, 0xff, dtype=np.uint8)\n im_unpacked[:, :, 2] = np.bitwise_and(\n im_packed >> 0, 0xff, dtype=np.uint8)\n\n output_path = join(output_dir, '{}.png'.format(ind))\n save_img(im_unpacked, output_path)\n\n\ndef parse_tf_example(example) -> Tuple[np.ndarray, np.ndarray]:\n \"\"\"Parse a TensorFlow Example into an image array and a label array.\n\n Args:\n example: A TensorFlow Example object.\n\n Returns:\n A np.ndarray × np.ndarray pair.\n\n \"\"\"\n ie = 'image/encoded'\n isce = 'image/segmentation/class/encoded'\n image_encoded = \\\n example.features.feature[ie].bytes_list.value[0]\n image_segmentation_class_encoded = \\\n example.features.feature[isce].bytes_list.value[0]\n im = png_to_numpy(image_encoded)\n labels = png_to_numpy(image_segmentation_class_encoded)\n return im, labels\n\n\ndef create_tf_example(image: np.ndarray,\n window: Box,\n labels: np.ndarray,\n class_map: ClassMap,\n chip_id: str = ''):\n \"\"\"Create a TensorFlow from an image, the labels, &c.\n\n Args:\n image: An np.ndarray containing the image data.\n window: A Box object containing the bounding box for this example.\n labels: An nd.array containing the label data.\n class_map: A ClassMap object containing mappings between\n numerical and textual labels.\n chip_id: The chip id as a string.\n\n Returns:\n A DeepLab-compatible TensorFlow Example object containing the\n given data.\n\n \"\"\"\n import tensorflow as tf\n from object_detection.utils import dataset_util\n\n class_keys = set(class_map.get_keys())\n\n def _clean(n):\n return (n if n in class_keys else 0x00)\n\n clean = np.vectorize(_clean, otypes=[np.uint8])\n\n image_encoded = numpy_to_png(image)\n image_filename = chip_id.encode('utf8')\n image_format = 'png'.encode('utf8')\n image_height, image_width, image_channels = image.shape\n image_segmentation_class_encoded = numpy_to_png(clean(labels))\n image_segmentation_class_format = 'png'.encode('utf8')\n\n features = tf.train.Features(\n feature={\n 'image/encoded':\n dataset_util.bytes_feature(image_encoded),\n 'image/filename':\n dataset_util.bytes_feature(image_filename),\n 'image/format':\n dataset_util.bytes_feature(image_format),\n 'image/height':\n dataset_util.int64_feature(image_height),\n 'image/width':\n dataset_util.int64_feature(image_width),\n 'image/channels':\n dataset_util.int64_feature(image_channels),\n 'image/segmentation/class/encoded':\n dataset_util.bytes_feature(image_segmentation_class_encoded),\n 'image/segmentation/class/format':\n dataset_util.bytes_feature(image_segmentation_class_format),\n })\n\n return tf.train.Example(features=features)\n\n\ndef get_record_uri(base_uri: str, split: str) -> str:\n \"\"\"Given a base URI and a split, return a filename to use.\n\n Args:\n base_uri: The directory under-which the returned record uri\n will reside.\n split: The split (\"train\", \"validate\", et cetera).\n\n Returns:\n A uri, under the base_uri, that can be used to store a record\n file.\n\n \"\"\"\n return join(base_uri, '{}-0.record'.format(split))\n\n\ndef get_latest_checkpoint(train_logdir_local: str) -> str:\n \"\"\"Return the most recently generated checkpoint.\n\n Args:\n train_logir_local: The directory in-which to look for the\n latest checkpoint.\n\n Returns:\n Returns the (local) URI to the latest checkpoint.\n\n \"\"\"\n ckpts = glob.glob(join(train_logdir_local, 'model.ckpt-*.meta'))\n times = map(os.path.getmtime, ckpts)\n latest = sorted(zip(times, ckpts))[-1][1]\n return latest[:len(latest) - len('.meta')]\n\n\ndef get_training_args(train_py: str, train_logdir_local: str, tfic_ckpt: str,\n dataset_dir_local: str, num_classes: int,\n tfdl_config) -> Tuple[List[str], Dict[str, str]]:\n \"\"\"Generate the array of arguments needed to run the training script.\n\n Args:\n train_py: The URI of the training script.\n train_logdir_local: The directory in-which checkpoints will\n be placed.\n tfic_ckpt: URI of the .ckpt \"file\" from the initial\n checkpoint tarball.\n dataset_dir_local: The directory in which the records are\n found.\n num_classes: The number of classes.\n tfdl_config: google.protobuf.Struct with fields from\n rv.protos.deeplab.train.proto containing TF Deeplab training configuration\n\n Returns:\n A tuple of two things: (1) a list of arguments suitable for\n starting the training script and (2) an environment in-which\n to start the training script.\n\n \"\"\"\n fields = [\n 'fine_tune_batch_norm',\n 'initialize_last_layer',\n 'last_layers_contain_logits_only',\n 'save_summaries_images',\n 'upsample_logits',\n 'base_learning_rate',\n 'last_layer_gradient_multiplier',\n 'learning_power',\n 'learning_rate_decay_factor',\n 'max_scale_factor',\n 'min_scale_factor',\n 'momentum',\n 'num_clones',\n 'scale_factor_step_size',\n 'slow_start_learning_rate',\n 'weight_decay',\n 'decoder_output_stride',\n 'learning_rate_decay_step',\n 'output_stride',\n 'save_interval_secs',\n 'save_summaries_secs',\n 'slow_start_step',\n 'train_batch_size',\n 'training_number_of_steps',\n 'dataset',\n 'learning_policy',\n 'model_variant',\n 'train_split',\n ]\n\n multi_fields = [\n 'atrous_rates',\n 'train_crop_size',\n ]\n\n env_fields = [\n 'dl_custom_train',\n 'dl_custom_validation',\n ]\n\n args = ['python', train_py]\n\n args.append('--train_logdir={}'.format(train_logdir_local))\n args.append('--tf_initial_checkpoint={}'.format(tfic_ckpt))\n args.append('--dataset_dir={}'.format(dataset_dir_local))\n\n for field in multi_fields:\n for item in tfdl_config.__getattribute__(field):\n args.append('--{}={}'.format(field, item))\n\n for field in fields:\n field_value = tfdl_config.__getattribute__(field)\n if (not type(field_value) is str) or (not len(field_value) == 0):\n args.append('--{}={}'.format(field, field_value))\n\n env = os.environ.copy()\n for field in env_fields:\n field_value = tfdl_config.__getattribute__(field)\n log.info('{}={}'.format(field.upper(), field_value))\n env[field.upper()] = str(field_value)\n log.info('DL_CUSTOM_CLASSES={}'.format(num_classes))\n env['DL_CUSTOM_CLASSES'] = str(num_classes)\n\n return (args, env)\n\n\ndef get_export_args(export_py: str, train_logdir_local: str, num_classes: int,\n tfdl_config) -> List[str]:\n \"\"\"Generate the array of arguments needed to run the export script.\n\n Args:\n export_py: The URI of the export script.\n train_logdir_local: The directory in-which checkpoints will\n be placed.\n num_classes: The number of classes.\n tfdl_config: google.protobuf.Struct with fields from\n rv.protos.deeplab.train.proto containing TF Deeplab training configuration\n\n Returns:\n A list of arguments suitable for starting the training\n script.\n \"\"\"\n\n fields = [\n 'decoder_output_stride',\n 'output_stride',\n 'model_variant',\n ]\n\n args = ['python', export_py]\n\n args.append('--checkpoint_path={}'.format(\n get_latest_checkpoint(train_logdir_local)))\n args.append('--export_path={}'.format(\n join(train_logdir_local, FROZEN_INFERENCE_GRAPH)))\n args.append('--num_classes={}'.format(num_classes))\n\n for field in fields:\n field_value = tfdl_config.__getattribute__(field)\n args.append('--{}={}'.format(field, field_value))\n\n for item in tfdl_config.__getattribute__('atrous_rates'):\n args.append('--{}={}'.format('atrous_rates', item))\n\n for item in tfdl_config.__getattribute__('train_crop_size'):\n args.append('--{}={}'.format('crop_size', item))\n\n return args\n\n\nclass TFDeeplab(Backend):\n \"\"\"Backend-derived type that implements the TensorFlow DeepLab\n backend.\n\n \"\"\"\n\n def __init__(self, backend_config, task_config):\n \"\"\"Constructor.\n\n Args:\n backend_config: rv.backend.TFDeeplabConfig\n task_config: rv.task.SemanticSegmentationConfig\n \"\"\"\n self.sess = None\n self.backend_config = backend_config\n self.task_config = task_config\n self.class_map = task_config.class_map\n\n def process_scene_data(self, scene: Scene, data: TrainingData,\n tmp_dir: str) -> str:\n \"\"\"Process the given scene and data into a TFRecord file specifically\n associated with that file.\n\n Args:\n scene: The scene data (labels stores, the raster sources,\n and so on).\n data: The training data.\n tmp_dir: (str) temporary directory to use\n Returns:\n The local path to the generated file.\n \"\"\"\n # Currently TF Deeplab can only handle uint8\n if scene.raster_source.get_dtype() != np.uint8:\n raise Exception('Cannot use {} backend for imagery that does '\n 'not have data type uint8. '\n 'Use the StatsAnalyzer and StatsTransformer '\n 'to turn the raster data into uint8 data'.format(\n rv.TF_DEEPLAB))\n\n tf_examples = make_tf_examples(data, self.class_map)\n\n base_uri = self.backend_config.training_data_uri\n split = '{}-{}'.format(scene.id, uuid.uuid4())\n record_path = join(base_uri, '{}.record'.format(split))\n record_path = get_local_path(record_path, tmp_dir)\n\n make_dir(record_path, use_dirname=True)\n write_tf_record(tf_examples, record_path)\n\n return record_path\n\n def process_sceneset_results(self, training_results: List[str],\n validation_results: List[str],\n tmp_dir: str) -> None:\n \"\"\"Merge TFRecord files from individual scenes into two at-large files\n (one for training data and one for validation data).\n\n Args:\n training_results: A list of paths to TFRecords containing\n training data.\n validation_results: A list of paths to TFRecords\n containing validation data.\n tmp_dir: (str) temporary directory to use\n Returns:\n None\n\n \"\"\"\n base_uri = self.backend_config.training_data_uri\n training_record_path = get_record_uri(base_uri, TRAIN)\n training_record_path_local = get_local_path(training_record_path,\n tmp_dir)\n validation_record_path = get_record_uri(base_uri, VALIDATION)\n validation_record_path_local = get_local_path(validation_record_path,\n tmp_dir)\n\n make_dir(training_record_path_local, use_dirname=True)\n make_dir(validation_record_path_local, use_dirname=True) # sic\n merge_tf_records(training_record_path_local, training_results)\n merge_tf_records(validation_record_path_local, validation_results)\n upload_or_copy(training_record_path_local, training_record_path)\n upload_or_copy(validation_record_path_local, validation_record_path)\n\n if self.backend_config.debug:\n training_zip_path = join(base_uri, '{}'.format(TRAIN))\n training_zip_path_local = get_local_path(training_zip_path,\n tmp_dir)\n validation_zip_path = join(base_uri, '{}'.format(VALIDATION))\n validation_zip_path_local = get_local_path(validation_zip_path,\n tmp_dir)\n\n training_debug_dir = join(tmp_dir, 'training-debug')\n make_debug_images(\n training_record_path_local, training_debug_dir, self.class_map,\n self.task_config.chip_options.debug_chip_probability)\n shutil.make_archive(training_zip_path_local, 'zip',\n training_debug_dir)\n\n validation_debug_dir = join(tmp_dir, 'validation-debug')\n make_debug_images(\n validation_record_path_local, validation_debug_dir,\n self.class_map,\n self.task_config.chip_options.debug_chip_probability)\n shutil.make_archive(validation_zip_path_local, 'zip',\n validation_debug_dir)\n\n upload_or_copy('{}.zip'.format(training_zip_path_local),\n '{}.zip'.format(training_zip_path))\n upload_or_copy('{}.zip'.format(validation_zip_path_local),\n '{}.zip'.format(validation_zip_path))\n\n def train(self, tmp_dir: str) -> None:\n \"\"\"Train a DeepLab model the task and backend config.\n\n Args:\n tmp_dir: (str) temporary directory to use\n\n Returns:\n None\n \"\"\"\n train_py = self.backend_config.script_locations.train_py\n export_py = self.backend_config.script_locations.export_py\n\n # Setup local input and output directories\n log.info('Setting up local input and output directories')\n train_logdir = self.backend_config.training_output_uri\n train_logdir_local = get_local_path(train_logdir, tmp_dir)\n dataset_dir = self.backend_config.training_data_uri\n dataset_dir_local = get_local_path(dataset_dir, tmp_dir)\n make_dir(tmp_dir)\n make_dir(train_logdir_local)\n make_dir(dataset_dir_local)\n\n # Download training data\n log.info('Downloading training data')\n download_if_needed(get_record_uri(dataset_dir, TRAIN), tmp_dir)\n\n # Download and untar initial checkpoint.\n log.info('Downloading and untarring initial checkpoint')\n tf_initial_checkpoints_uri = self.backend_config.pretrained_model_uri\n download_if_needed(tf_initial_checkpoints_uri, tmp_dir)\n tfic_tarball = get_local_path(tf_initial_checkpoints_uri, tmp_dir)\n tfic_dir = os.path.dirname(tfic_tarball)\n with tarfile.open(tfic_tarball, 'r:gz') as tar:\n tar.extractall(tfic_dir)\n tfic_ckpt = glob.glob('{}/*/*.index'.format(tfic_dir))[0]\n tfic_ckpt = tfic_ckpt[0:-len('.index')]\n\n # Restart support\n train_restart_dir = self.backend_config.train_options.train_restart_dir\n if type(train_restart_dir) is not str or len(train_restart_dir) == 0:\n train_restart_dir = train_logdir\n\n # Get output from potential previous run so we can resume training.\n if type(train_restart_dir) is str and len(\n train_restart_dir\n ) > 0 and not self.backend_config.train_options.replace_model:\n sync_from_dir(train_restart_dir, train_logdir_local)\n else:\n if self.backend_config.train_options.replace_model:\n if os.path.exists(train_logdir_local):\n shutil.rmtree(train_logdir_local)\n make_dir(train_logdir_local)\n\n # Periodically synchronize with remote\n sync = start_sync(\n train_logdir_local,\n train_logdir,\n sync_interval=self.backend_config.train_options.sync_interval)\n\n with sync:\n # Setup TFDL config\n tfdl_config = json_format.ParseDict(\n self.backend_config.tfdl_config, TrainingParametersMsg())\n log.info('tfdl_config={}'.format(tfdl_config))\n log.info('Training steps={}'.format(\n tfdl_config.training_number_of_steps))\n\n # Additional training options\n max_class = max(\n list(map(lambda c: c.id, self.class_map.get_items())))\n num_classes = len(self.class_map.get_items())\n num_classes = max(max_class, num_classes) + 1\n (train_args, train_env) = get_training_args(\n train_py, train_logdir_local, tfic_ckpt, dataset_dir_local,\n num_classes, tfdl_config)\n\n # Start training\n log.info('Starting training process')\n train_process = Popen(train_args, env=train_env)\n terminate_at_exit(train_process)\n\n if self.backend_config.train_options.do_monitoring:\n # Start tensorboard\n log.info('Starting tensorboard process')\n tensorboard_process = Popen(\n ['tensorboard', '--logdir={}'.format(train_logdir_local)])\n terminate_at_exit(tensorboard_process)\n\n # Wait for training and tensorboard\n log.info('Waiting for training and tensorboard processes')\n train_process.wait()\n if self.backend_config.train_options.do_monitoring:\n tensorboard_process.terminate()\n\n # Export frozen graph\n log.info(\n 'Exporting frozen graph ({}/model)'.format(train_logdir_local))\n export_args = get_export_args(export_py, train_logdir_local,\n num_classes, tfdl_config)\n export_process = Popen(export_args)\n terminate_at_exit(export_process)\n export_process.wait()\n\n # Package up the model files for usage as fine tuning checkpoints\n fine_tune_checkpoint_name = self.backend_config.fine_tune_checkpoint_name\n latest_checkpoints = get_latest_checkpoint(train_logdir_local)\n model_checkpoint_files = glob.glob(\n '{}*'.format(latest_checkpoints))\n inference_graph_path = os.path.join(train_logdir_local, 'model')\n\n with RVConfig.get_tmp_dir() as tmp_dir:\n model_dir = os.path.join(tmp_dir, fine_tune_checkpoint_name)\n make_dir(model_dir)\n model_tar = os.path.join(\n train_logdir_local,\n '{}.tar.gz'.format(fine_tune_checkpoint_name))\n shutil.copy(inference_graph_path,\n '{}/frozen_inference_graph.pb'.format(model_dir))\n for path in model_checkpoint_files:\n shutil.copy(path, model_dir)\n with tarfile.open(model_tar, 'w:gz') as tar:\n tar.add(model_dir, arcname=os.path.basename(model_dir))\n\n # Perform final sync\n sync_to_dir(train_logdir_local, train_logdir, delete=False)\n\n def load_model(self, tmp_dir: str):\n \"\"\"Load the model in preparation for one or more prediction calls.\n\n Args:\n tmp_dir: (str) temporary directory to use\n \"\"\"\n # noqa Courtesy of https://github.com/tensorflow/models/blob/cbbb2ffcde66e646d4a47628ffe2ece2322b64e8/research/deeplab/deeplab_demo.ipynb\n import tensorflow as tf\n if self.sess is None:\n FROZEN_GRAPH_NAME = download_if_needed(\n self.backend_config.model_uri, tmp_dir)\n graph = tf.Graph()\n with open(FROZEN_GRAPH_NAME, 'rb') as data:\n graph_def = tf.GraphDef.FromString(data.read())\n with graph.as_default():\n tf.import_graph_def(graph_def, name='')\n self.sess = tf.Session(graph=graph)\n\n def predict(self, chips: np.ndarray, windows: List[Box],\n tmp_dir: str) -> SemanticSegmentationLabels:\n \"\"\"Predict using an already-trained DeepLab model.\n\n Args:\n chips: An np.ndarray containing the image data.\n windows: A list of windows corresponding to the respective\n training chips.\n tmp_dir: (str) temporary directory to use\n Returns:\n A list of Box × np.ndarray pairs.\n\n \"\"\"\n self.load_model(tmp_dir)\n labels = SemanticSegmentationLabels()\n\n # Feeding in one chip at a time because the model doesn't seem to\n # accept > 1.\n # TODO fix this\n for ind, window in enumerate(windows):\n class_labels = self.sess.run(\n OUTPUT_TENSOR_NAME,\n feed_dict={INPUT_TENSOR_NAME: [chips[ind]]})[0]\n labels.add_label_pair(window, class_labels)\n\n return labels\n",
"import numpy as np\n\nfrom typing import List\n\nfrom .task import Task\nfrom rastervision.core.box import Box\nfrom rastervision.data.scene import Scene\n\n\ndef get_random_sample_train_windows(label_store, chip_size, class_map, extent,\n chip_options, filter_windows):\n prob = chip_options.negative_survival_probability\n target_count_threshold = chip_options.target_count_threshold\n target_classes = chip_options.target_classes\n chips_per_scene = chip_options.chips_per_scene\n\n if not target_classes:\n all_class_ids = [item.id for item in class_map.get_items()]\n target_classes = all_class_ids\n\n windows = []\n attempts = 0\n while (attempts < chips_per_scene):\n candidate_window = extent.make_random_square(chip_size)\n if not filter_windows([candidate_window]):\n continue\n attempts = attempts + 1\n\n if (prob >= 1.0):\n windows.append(candidate_window)\n elif attempts == chips_per_scene and len(windows) == 0:\n windows.append(candidate_window)\n else:\n good = label_store.enough_target_pixels(\n candidate_window, target_count_threshold, target_classes)\n if good or (np.random.rand() < prob):\n windows.append(candidate_window)\n\n return windows\n\n\nclass SemanticSegmentation(Task):\n \"\"\"Task-derived type that implements the semantic segmentation task.\"\"\"\n\n def get_train_windows(self, scene: Scene) -> List[Box]:\n \"\"\"Get training windows covering a scene.\n\n Args:\n scene: The scene over-which windows are to be generated.\n\n Returns:\n A list of windows, list(Box)\n\n \"\"\"\n\n def filter_windows(windows):\n if scene.aoi_polygons:\n windows = Box.filter_by_aoi(windows, scene.aoi_polygons)\n return windows\n\n raster_source = scene.raster_source\n extent = raster_source.get_extent()\n label_store = scene.ground_truth_label_source\n chip_size = self.config.chip_size\n\n chip_options = self.config.chip_options\n\n if chip_options.window_method == 'random_sample':\n return get_random_sample_train_windows(\n label_store, chip_size, self.config.class_map, extent,\n chip_options, filter_windows)\n elif chip_options.window_method == 'sliding':\n stride = chip_options.stride\n if stride is None:\n stride = chip_size / 2\n\n return list(\n filter_windows((extent.get_windows(chip_size, stride))))\n\n def get_train_labels(self, window: Box, scene: Scene) -> np.ndarray:\n \"\"\"Get the training labels for the given window in the given scene.\n\n Args:\n window: The window over-which the labels are to be\n retrieved.\n scene: The scene from-which the window of labels is to be\n extracted.\n\n Returns:\n An appropriately-shaped 2d np.ndarray with the labels\n encoded as packed pixels.\n\n \"\"\"\n label_store = scene.ground_truth_label_source\n return label_store.get_labels(window)\n\n def get_predict_windows(self, extent: Box) -> List[Box]:\n \"\"\"Get windows over-which predictions will be calculated.\n\n Args:\n extent: The overall extent of the area.\n\n Returns:\n An sequence of windows.\n\n \"\"\"\n chip_size = self.config.chip_size\n return extent.get_windows(chip_size, chip_size)\n\n def post_process_predictions(self, labels, scene):\n return labels\n\n def save_debug_predict_image(self, scene, debug_dir_uri):\n # TODO implement this\n pass\n"
] |
[
[
"numpy.array",
"numpy.random.rand",
"tensorflow.python_io.tf_record_iterator",
"numpy.vectorize",
"tensorflow.Graph",
"tensorflow.Session",
"tensorflow.import_graph_def",
"tensorflow.python_io.TFRecordWriter",
"numpy.bitwise_and",
"tensorflow.train.Example",
"tensorflow.train.Example.FromString"
],
[
"numpy.random.rand"
]
] |
gab50000/OpenAI
|
[
"c842631a88a72256349916548153c8b388584f17"
] |
[
"cartpole/manuell.py"
] |
[
"from collections import deque\nimport random\n\nimport gym\n\nimport numpy as np\nimport tensorflow as tf\nimport daiquiri\n\n\ndaiquiri.setup(level=daiquiri.logging.DEBUG)\nlogger = daiquiri.getLogger(__name__)\n\n\nRENDER = False\n\n\nclass Neuro:\n def __init__(self):\n\n n_in = 4\n n_hidden = 4\n n_out = 2\n\n initializer = tf.contrib.layers.variance_scaling_initializer()\n self.input_ = tf.placeholder(dtype=tf.float32, shape=[None, n_in])\n hidden = tf.layers.dense(self.input_, n_hidden, activation=tf.nn.tanh)\n self.probs = tf.nn.softmax(tf.layers.dense(hidden, 2))\n self.gradients = tf.gradients(self.probs[0, 0], tf.trainable_variables())\n\n def run(self, input_arr):\n return self.sess.run(self.probs, feed_dict={self.input_: input_arr})\n\n @property\n def sess(self):\n if not hasattr(self, \"_sess\"):\n self._sess = tf.get_default_session()\n return self._sess\n\n def update_gradients(self, states, actions, scores):\n scores = np.array(scores)\n scores = (scores - scores.mean()) / scores.std()\n training_step = 0.005\n sess = self.sess\n logger.debug(\"Calculating gradients\")\n grads = [sess.run(self.gradients, feed_dict={self.input_: state[None, :]})\n for state in states]\n \n weights = tf.trainable_variables()\n logger.debug(\"Calculating weight updates\")\n score_signs = np.sign(scores)\n for grad, action, score in zip(grads, actions, scores):\n sign = 1 - (2 * action)\n weights = [w + score * training_step * g \n for w, g in zip(weights, grad)]\n logger.debug(\"Apply weight updates\")\n sess.run([tf.assign(w, update) for w, update in zip(tf.trainable_variables(), weights)])\n logger.debug(\"New weights: %s\", sess.run(tf.trainable_variables()))\n\n\ndef calculate_mean_gradient(states, actions, input_, dp0dW):\n grads = [g.eval(feed_dict={input_: np.vstack(states[:-1])}) for g, v in dp0dW]\n import ipdb; ipdb.set_trace()\n\n\ndef evaluate_game(states, actions, immediate_rewards):\n dr = discounted_reward(immediate_rewards)\n return [(state, action, r) for state, action, r in zip(states, actions, dr)]\n\n\ndef discounted_reward(immediate_rewards, discount_factor=0.9):\n discounted_arr = np.zeros(len(immediate_rewards))\n discounted_arr[-1] = immediate_rewards[-1]\n for i in reversed(range(discounted_arr.size - 1)):\n discounted_arr[i] = immediate_rewards[i] + discount_factor * discounted_arr[i+1]\n return discounted_arr\n\n\ndef main():\n no_of_training_episodes = 1_000_000\n no_of_training_steps = 1000\n update_frequency = 10\n reward_gameover = -5\n reward_stillplaying = 1\n neuro = Neuro()\n init = tf.global_variables_initializer() \n\n env = gym.make('CartPole-v0')\n\n sess = tf.InteractiveSession()\n init.run()\n evaluations = deque(maxlen=1000)\n step_counter = 0\n for training_episode in range(1, no_of_training_episodes + 1):\n logger.info(\"Training episode %i\", training_episode)\n obs = env.reset()\n states, actions, immediate_rewards = [obs], [], []\n\n for training_step in range(no_of_training_steps):\n step_counter += 1\n if RENDER:\n env.render()\n # pos, vel, angle, angle_vel = obs\n # step = 1 if angle_vel >= 0 else 0\n step = np.argmax(neuro.run(obs[None, :]))\n actions.append(step)\n obs, reward, done, info = env.step(step)\n states.append(obs)\n if done:\n immediate_rewards.append(reward)\n evalu = evaluate_game(states, actions, immediate_rewards)\n evaluations += evalu\n break\n immediate_rewards.append(reward)\n\n if training_episode % update_frequency == 0:\n # logger.info(\"Average number of training steps: %f\", len(evaluations) / training_episode)\n logger.info(\"Steps in this episode: %f\", step_counter / update_frequency)\n neuro.update_gradients(*zip(*evaluations))\n step_counter = 0\n\nif __name__ == \"__main__\":\n main()\n"
] |
[
[
"tensorflow.trainable_variables",
"numpy.array",
"tensorflow.get_default_session",
"tensorflow.assign",
"tensorflow.contrib.layers.variance_scaling_initializer",
"numpy.sign",
"tensorflow.placeholder",
"tensorflow.layers.dense",
"tensorflow.global_variables_initializer",
"numpy.vstack",
"tensorflow.InteractiveSession"
]
] |
pierresegonne/ncp
|
[
"2decbf7dbf2125353be6f5f030c5bce12beadefd",
"2decbf7dbf2125353be6f5f030c5bce12beadefd"
] |
[
"ncp/scripts/toy_regression.py",
"ncp/scripts/flights_active.py"
] |
[
"import argparse\nimport itertools\nimport os\nimport warnings\n\nimport matplotlib as mpl\n\nmpl.use(\"Agg\")\nimport matplotlib.pyplot as plt\nimport ruamel.yaml as yaml\nimport tensorflow.compat.v1 as tf\n\ntf.disable_v2_behavior()\n\nfrom ncp import datasets, models, tools\n\n# NOTE\n# Possible datasets\nVARGRAD = \"vargrad\"\nOURS = \"ours\"\n\n\ndef default_schedule(model):\n config = tools.AttrDict()\n config.num_epochs = 5000\n _range = range(0, config.num_epochs + 1, 500)\n config.eval_after_epochs = _range\n config.log_after_epochs = _range\n config.visualize_after_epochs = _range\n config.batch_size = 32\n config.filetype = \"pdf\"\n config.save_model = True\n config.record_tensorboard = False\n if model == \"det\":\n config.has_uncertainty = False\n return config\n\n\ndef default_config(model):\n config = tools.AttrDict()\n config.num_inputs = 1\n config.layer_sizes = [200, 200]\n if model == \"bbb\":\n config.divergence_scale = 0.1\n if model == \"bbb_ncp\":\n config.noise_std = 1.5\n config.ncp_scale = 1.5\n config.divergence_scale = 0\n config.ood_std_prior = 0.1\n config.center_at_target = True\n if model == \"det_mix_ncp\":\n config.noise_std = 0.5\n config.center_at_target = True\n config.learning_rate = 3e-4 # 3e-4\n config.weight_std = 0.1\n config.clip_gradient = 100\n return config\n\n\ndef plot_results(args):\n load_results = lambda x: tools.load_results(\n os.path.join(args.logdir + \"/\" + args.dataset, x) + \"-*/*.npz\"\n )\n results = [\n (\"BBB+NCP\", load_results(\"bbb_ncp\")),\n (\"ODC+NCP\", load_results(\"det_mix_ncp\")),\n (\"BBB\", load_results(\"bbb\")),\n (\"Det\", load_results(\"det\")),\n ]\n tools.pretty_print_results(results)\n fig, ax = plt.subplots(ncols=4, figsize=(8, 2))\n for a in ax:\n a.xaxis.set_major_locator(plt.MaxNLocator(5))\n a.yaxis.set_major_locator(plt.MaxNLocator(5))\n tools.plot_distance(ax[0], results, \"train_distances\", {})\n ax[0].set_xlabel(\"Epochs\")\n ax[0].set_title(\"Train RMSE\")\n tools.plot_likelihood(ax[1], results, \"train_likelihoods\", {})\n ax[1].set_xlabel(\"Epochs\")\n ax[1].set_title(\"Train NLPD\")\n tools.plot_distance(ax[2], results, \"test_distances\", {})\n ax[2].set_xlabel(\"Epochs\")\n ax[2].set_title(\"Test RMSE\")\n tools.plot_likelihood(ax[3], results, \"test_likelihoods\", {})\n ax[3].set_xlabel(\"Epochs\")\n ax[3].set_title(\"Test NLPD\")\n ax[3].legend(frameon=False, labelspacing=0.2, borderpad=0)\n fig.tight_layout(pad=0, w_pad=0.5)\n filename = os.path.join(args.logdir, \"results.pdf\")\n fig.savefig(filename)\n\n\ndef main(args):\n if args.replot:\n plot_results(args)\n return\n warnings.filterwarnings(\"ignore\", category=DeprecationWarning) # TensorFlow.\n # NOTE\n # Here we define the models\n # We only want to experiment against *_ncp\n models_ = [\n (\"bbb\", models.bbb.define_graph),\n # (\"det\", models.det.define_graph),\n (\"bbb_ncp\", models.bbb_ncp.define_graph),\n # ('det_mix_ncp', models.det_mix_ncp.define_graph),\n ]\n assert args.dataset in [VARGRAD, OURS]\n if args.dataset == VARGRAD:\n dataset = datasets.generate_vargrad_dataset()\n elif args.dataset == OURS:\n dataset = datasets.generate_toy_ours_dataset()\n experiments = itertools.product(range(args.seeds), models_)\n for seed, (model, define_graph) in experiments:\n schedule = globals()[args.schedule](model)\n config = globals()[args.config](model)\n logdir = os.path.join(\n f\"{args.logdir}/{args.dataset}\", \"{}-{}\".format(model, seed)\n )\n tf.gfile.MakeDirs(logdir)\n if os.path.exists(os.path.join(logdir, \"metrics.npz\")):\n if args.resume:\n continue\n elif args.save_outputs_for_aistats_plot:\n tf.reset_default_graph()\n tf.set_random_seed(seed)\n tools.save_outputs_for_aistats_plot(\n logdir, define_graph(config), dataset, **schedule, seed=seed\n )\n break\n raise RuntimeError(\"The log directory is not empty.\")\n with open(os.path.join(logdir, \"schedule.yaml\"), \"w\") as file_:\n yaml.dump(schedule.copy(), file_)\n with open(os.path.join(logdir, \"config.yaml\"), \"w\") as file_:\n yaml.dump(config.copy(), file_)\n message = \"\\n{0}\\n# Model {1} seed {2}\\n{0}\"\n print(message.format(\"#\" * 79, model, seed))\n tf.reset_default_graph()\n tf.set_random_seed(seed)\n graph = define_graph(config)\n tools.run_experiment(logdir, graph, dataset, **schedule, seed=seed)\n plot_results(args)\n\n\nif __name__ == \"__main__\":\n parser = argparse.ArgumentParser()\n parser.add_argument(\"--schedule\", default=\"default_schedule\")\n parser.add_argument(\"--config\", default=\"default_config\")\n parser.add_argument(\"--logdir\", required=True)\n parser.add_argument(\"--seeds\", type=int, default=5)\n parser.add_argument(\"--resume\", action=\"store_true\", default=False)\n parser.add_argument(\"--replot\", action=\"store_true\", default=False)\n parser.add_argument(\n \"--save_outputs_for_aistats_plot\", action=\"store_true\", default=False\n )\n parser.add_argument(\"--dataset\", default=VARGRAD, choices=[VARGRAD, OURS])\n args = parser.parse_args()\n args.logdir = os.path.expanduser(args.logdir)\n main(args)\n",
"# Copyright 2018 Google LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# https://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport argparse\nimport itertools\nimport os\nimport warnings\n\nimport matplotlib as mpl\n\nmpl.use(\"Agg\")\nimport matplotlib.pyplot as plt\nimport ruamel.yaml as yaml\nimport tensorflow as tf\n\nfrom ncp import datasets, models, tools\n\n\ndef default_schedule(model):\n config = tools.AttrDict()\n config.num_epochs = 2000\n config.num_initial = 10\n config.num_select = 10\n config.select_after_epochs = range(50, 2000, 50)\n config.eval_after_epochs = range(0, 2000, 50)\n config.log_after_epochs = range(0, 2000, 500)\n config.visualize_after_epochs = range(50, 2000, 500)\n config.batch_size = 10\n config.temperature = 0.5\n config.filetype = \"png\"\n if model == \"det\":\n config.has_uncertainty = False\n return config\n\n\ndef default_config(model):\n config = tools.AttrDict()\n config.num_inputs = 8\n config.layer_sizes = [50, 50]\n if model == \"bbb\":\n config.divergence_scale = 1.0\n if model == \"bbb_ncp\":\n config.noise_std = 0.1\n config.ncp_scale = 0.1\n config.divergence_scale = 0\n config.ood_std_prior = None\n config.center_at_target = True\n if model == \"det_mix_ncp\":\n config.noise_std = 0.1\n config.center_at_target = True\n config.learning_rate = 1e-4\n config.weight_std = 0.1\n config.clip_gradient = 100.0\n return config\n\n\ndef plot_results(args):\n load_results = lambda x: tools.load_results(\n os.path.join(args.logdir, x) + \"-*/*.npz\"\n )\n results = [\n (\"BBB+NCP\", load_results(\"bbb_ncp\")),\n (\"ODC+NCP\", load_results(\"det_mix_ncp\")),\n (\"BBB\", load_results(\"bbb\")),\n (\"Det\", load_results(\"det\")),\n ]\n fig, ax = plt.subplots(ncols=4, figsize=(8, 2))\n tools.plot_distance(ax[0], results, \"train_distances\", {})\n ax[0].set_xlabel(\"Data points seen\")\n ax[0].set_title(\"Train RMSE\")\n ax[0].set_ylim(15, 40)\n tools.plot_likelihood(ax[1], results, \"train_likelihoods\", {})\n ax[1].set_xlabel(\"Data points seen\")\n ax[1].set_title(\"Train NLPD\")\n ax[1].set_ylim(3.2, 5.0)\n tools.plot_distance(ax[2], results, \"test_distances\", {})\n ax[2].set_xlabel(\"Data points seen\")\n ax[2].set_title(\"Test RMSE\")\n ax[2].set_ylim(29.5, 32)\n tools.plot_likelihood(ax[3], results, \"test_likelihoods\", {})\n ax[3].set_xlabel(\"Data points seen\")\n ax[3].set_title(\"Test NLPD\")\n ax[3].set_ylim(4.6, 5.6)\n ax[3].legend(frameon=False, labelspacing=0.2, borderpad=0)\n fig.tight_layout(pad=0, w_pad=0.5)\n filename = os.path.join(args.logdir, \"results.pdf\")\n fig.savefig(filename)\n\n\ndef main(args):\n if args.replot:\n plot_results(args)\n return\n warnings.filterwarnings(\"ignore\", category=DeprecationWarning) # TensorFlow.\n dataset = datasets.load_numpy_dataset(\n args.dataset, args.train_amount, args.test_amount\n )\n models_ = [\n (\"bbb_ncp\", models.bbb_ncp.define_graph),\n (\"det_mix_ncp\", models.det_mix_ncp.define_graph),\n (\"bbb\", models.bbb.define_graph),\n (\"det\", models.det.define_graph),\n ]\n experiments = itertools.product(range(args.seeds), models_)\n for seed, (model, define_graph) in experiments:\n schedule = globals()[args.schedule](model)\n config = globals()[args.config](model)\n logdir = os.path.join(args.logdir, \"{}-{}\".format(model, seed))\n tf.gfile.MakeDirs(logdir)\n if os.path.exists(os.path.join(logdir, \"metrics.npz\")):\n if args.resume:\n continue\n raise RuntimeError(\"The log directory is not empty.\")\n with open(os.path.join(logdir, \"schedule.yaml\"), \"w\") as file_:\n yaml.dump(schedule.copy(), file_)\n with open(os.path.join(logdir, \"config.yaml\"), \"w\") as file_:\n yaml.dump(config.copy(), file_)\n message = \"\\n{0}\\n# Model {1} seed {2}\\n{0}\"\n print(message.format(\"#\" * 79, model, seed))\n tf.reset_default_graph()\n tf.set_random_seed(seed)\n graph = define_graph(config)\n tools.run_experiment(logdir, graph, dataset, **schedule, seed=seed)\n plot_results(args)\n\n\nif __name__ == \"__main__\":\n parser = argparse.ArgumentParser()\n parser.add_argument(\"--schedule\", default=\"default_schedule\")\n parser.add_argument(\"--config\", default=\"default_config\")\n parser.add_argument(\"--logdir\", required=True)\n parser.add_argument(\"--dataset\", required=True)\n parser.add_argument(\"--seeds\", type=int, default=5)\n parser.add_argument(\"--train_amount\", type=int)\n parser.add_argument(\"--test_amount\", type=int)\n parser.add_argument(\"--resume\", action=\"store_true\", default=False)\n parser.add_argument(\"--replot\", action=\"store_true\", default=False)\n args = parser.parse_args()\n args.logdir = os.path.expanduser(args.logdir)\n main(args)\n"
] |
[
[
"matplotlib.use",
"tensorflow.compat.v1.gfile.MakeDirs",
"tensorflow.compat.v1.disable_v2_behavior",
"matplotlib.pyplot.MaxNLocator",
"matplotlib.pyplot.subplots",
"tensorflow.compat.v1.reset_default_graph",
"tensorflow.compat.v1.set_random_seed"
],
[
"matplotlib.use",
"tensorflow.set_random_seed",
"tensorflow.reset_default_graph",
"matplotlib.pyplot.subplots",
"tensorflow.gfile.MakeDirs"
]
] |
JFChi/Understanding-and-Mitigating-Accuracy-Disparity-in-Regression
|
[
"30bd1c82a5ccbcb84a23eb12da9071596796283b"
] |
[
"utils.py"
] |
[
"\nimport logging\nimport sys\n\nimport numpy as np\nimport torch\n\ndef get_logger(filename):\n\t# Logging configuration: set the basic configuration of the logging system\n\tlog_formatter = logging.Formatter(fmt='%(asctime)s [%(processName)s, %(process)s] [%(levelname)-5.5s] %(message)s',\n\t\t\t\t\t\t\t\t\t datefmt='%m-%d %H:%M')\n\tlogger = logging.getLogger()\n\tlogger.setLevel(logging.DEBUG)\n\t# File logger\n\tfile_handler = logging.FileHandler(\"{}.log\".format(filename))\n\tfile_handler.setFormatter(log_formatter)\n\tfile_handler.setLevel(logging.DEBUG)\n\tlogger.addHandler(file_handler)\n\t# Stderr logger\n\tstd_handler = logging.StreamHandler(sys.stdout)\n\tstd_handler.setFormatter(log_formatter)\n\tstd_handler.setLevel(logging.DEBUG)\n\tlogger.addHandler(std_handler)\n\treturn logger\n\n\n# def conditional_errors(preds, labels, attrs):\n# \"\"\"\n# Compute the conditional errors of A = 0/1. All the arguments need to be one-dimensional vectors.\n# :param preds: The predicted label given by a model.\n# :param labels: The groundtruth label.\n# :param attrs: The label of sensitive attribute.\n# :return: Overall classification error, error | A = 0, error | A = 1.\n# \"\"\"\n# assert preds.shape == labels.shape and labels.shape == attrs.shape\n# cls_error = 1 - np.mean(preds == labels)\n# idx = attrs == 0\n# error_0 = 1 - np.mean(preds[idx] == labels[idx])\n# error_1 = 1 - np.mean(preds[~idx] == labels[~idx])\n# return cls_error, error_0, error_1\n\ndef conditional_mse_errors(preds, labels, attrs):\n\t\"\"\"\n\tCompute the conditional errors of A = 0/1. All the arguments need to be one-dimensional vectors.\n\t:param preds: The predicted label given by a model.\n\t:param labels: The groundtruth label.\n\t:param attrs: The label of sensitive attribute.\n\t:return: Overall classification error, error | A = 0, error | A = 1.\n\t\"\"\"\n\tassert preds.shape == labels.shape\n\tcls_error = np.mean((preds-labels)**2)\n\tidx = attrs == 0\n\terror_0 = np.mean((preds[idx]-labels[idx])**2)\n\terror_1 = np.mean((preds[~idx]-labels[~idx])**2)\n\treturn cls_error, error_0, error_1\n\n# MMD unbiasd distance\n# code adapted from https://github.com/josipd/torch-two-sample/blob/master/torch_two_sample/statistics_diff.py\nclass MMDStatistic:\n\tr\"\"\"The *unbiased* MMD test of :cite:`gretton2012kernel`.\n\n\tThe kernel used is equal to:\n\n\t.. math ::\n\t\tk(x, x') = \\sum_{j=1}^k e^{-\\alpha_j\\|x - x'\\|^2},\n\n\tfor the :math:`\\alpha_j` proved in :py:meth:`~.MMDStatistic.__call__`..\"\"\"\n\n\tdef __init__(self, alphas, kernel_name=\"gaussian\"):\n\t\tself.alphas = alphas\n\t\tself.kernel_name = kernel_name\n\t\tassert kernel_name in [\"gaussian\", \"laplacian\"]\n\n\tdef __call__(self, sample_1, sample_2, ret_matrix=False):\n\t\tr\"\"\"\n\t\tArguments\n\t\t---------\n\t\tsample_1: :class:`torch:torch.autograd.Variable`\n\t\t\tThe first sample, of size ``(n_1, d)``.\n\t\tsample_2: variable of shape (n_2, d)\n\t\t\tThe second sample, of size ``(n_2, d)``.\n\t\talphas : list of :class:`float`\n\t\t\tThe kernel parameters.\n\t\tret_matrix: bool\n\t\t\tIf set, the call with also return a second variable.\n\n\t\t\tThis variable can be then used to compute a p-value using\n\t\t\t:py:meth:`~.MMDStatistic.pval`.\n\n\t\tReturns\n\t\t-------\n\t\t:class:`float`\n\t\t\tThe test statistic.\n\t\t:class:`torch:torch.autograd.Variable`\n\t\t\tReturned only if ``ret_matrix`` was set to true.\"\"\"\n\n\t\tself.n_1 = sample_1.shape[0]\n\t\tself.n_2 = sample_2.shape[0]\n\n\t\t# The three constants used in the test.\n\t\tself.a00 = 1. / (self.n_1 * (self.n_1 - 1))\n\t\tself.a11 = 1. / (self.n_2 * (self.n_2 - 1))\n\t\tself.a01 = - 1. / (self.n_1 * self.n_2)\n\n\n\t\tsample_12 = torch.cat((sample_1, sample_2), 0)\n\t\tif self.kernel_name == \"gaussian\":\n\t\t\tdistances = pdist(sample_12, sample_12, norm=2)\n\t\telif self.kernel_name == \"laplacian\":\n\t\t\tdistances = pdist(sample_12, sample_12, norm=1)\n\t\telse:\n\t\t\traise NotImplementedError\n\n\t\tkernels = None\n\t\tfor alpha in self.alphas:\n\t\t\t# For single kernel\n\t\t\tif self.kernel_name == \"gaussian\":\n\t\t\t\tkernels_a = torch.exp(- alpha * distances ** 2)\n\t\t\telif self.kernel_name == \"laplacian\":\n\t\t\t\tkernels_a = torch.exp(- alpha * distances)\n\t\t\telse:\n\t\t\t\traise NotImplementedError\n\t\t\t# For multiple kernel, append kernel\n\t\t\tif kernels is None:\n\t\t\t\tkernels = kernels_a\n\t\t\telse:\n\t\t\t\tkernels = kernels + kernels_a\n\n\t\tk_1 = kernels[:self.n_1, :self.n_1]\n\t\tk_2 = kernels[self.n_1:, self.n_1:]\n\t\tk_12 = kernels[:self.n_1, self.n_1:]\n\n\t\tmmd = (2 * self.a01 * k_12.sum() +\n\t\t\t self.a00 * (k_1.sum() - torch.trace(k_1)) +\n\t\t\t self.a11 * (k_2.sum() - torch.trace(k_2)))\n\t\tif ret_matrix:\n\t\t\treturn mmd, kernels\n\t\telse:\n\t\t\treturn mmd\n\ndef pdist(sample_1, sample_2, norm=2, eps=1e-9):\n\tr\"\"\"Compute the matrix of all squared pairwise distances.\n\n\tArguments\n\t---------\n\tsample_1 : torch.Tensor or Variable\n\t\tThe first sample, should be of shape ``(n_1, d)``.\n\tsample_2 : torch.Tensor or Variable\n\t\tThe second sample, should be of shape ``(n_2, d)``.\n\tnorm : float\n\t\tThe l_p norm to be used.\n\n\tReturns\n\t-------\n\ttorch.Tensor or Variable\n\t\tMatrix of shape (n_1, n_2). The [i, j]-th entry is equal to\n\t\t``|| sample_1[i, :] - sample_2[j, :] ||_p``.\"\"\"\n\tn_1, n_2 = sample_1.size(0), sample_2.size(0)\n\tnorm = float(norm)\n\tif norm == 2.:\n\t\tnorms_1 = torch.sum(sample_1**2, dim=1, keepdim=True)\n\t\tnorms_2 = torch.sum(sample_2**2, dim=1, keepdim=True)\n\t\tnorms = (norms_1.expand(n_1, n_2) +\n\t\t\t\t norms_2.transpose(0, 1).expand(n_1, n_2))\n\t\tdistances_squared = norms - 2 * sample_1.mm(sample_2.t())\n\n\t\t### test shape ####\n\t\t# print(\"In pdist\")\n\t\t# print(norms_1)\n\t\t# print(norms_2)\n\t\t# print(norms_1.expand(n_1, n_2))\n\t\t# print(norms_2.transpose(0, 1).expand(n_1, n_2))\n\t\t# print(norms_1.shape, norms_2.shape, norms.shape)\n\t\t# print(distances_squared)\n\t\t# print(distances_squared.shape)\n\t\t###################\n\n\t\treturn torch.sqrt(eps + torch.abs(distances_squared))\n\telse:\n\t\tdim = sample_1.size(1)\n\t\texpanded_1 = sample_1.unsqueeze(1).expand(n_1, n_2, dim)\n\t\texpanded_2 = sample_2.unsqueeze(0).expand(n_1, n_2, dim)\n\t\tdifferences = torch.abs(expanded_1 - expanded_2) ** norm\n\t\tinner = torch.sum(differences, dim=2, keepdim=False)\n\t\treturn (eps + inner) ** (1. / norm)\n\n# MMD unbiasd distance\n# code adapted from https://github.com/josipd/torch-two-sample/blob/master/torch_two_sample/statistics_diff.py\nclass MMDBiasedStatistic:\n\tr\"\"\"The *biased* MMD test of :cite:`gretton2012kernel`.\n\t\"\"\"\n\n\tdef __init__(self, alphas, kernel_name=\"gaussian\"):\n\t\tself.alphas = alphas\n\t\tself.kernel_name = kernel_name\n\t\tassert kernel_name in [\"gaussian\", \"laplacian\"]\n\n\tdef __call__(self, sample_1, sample_2, ret_matrix=False):\n\n\t\tself.n_1 = sample_1.shape[0]\n\t\tself.n_2 = sample_2.shape[0]\n\n\t\t# The three constants used in the test.\n\t\tself.a00 = 1. / (self.n_1 * self.n_1)\n\t\tself.a11 = 1. / (self.n_2 * self.n_2)\n\t\tself.a01 = - 1. / (self.n_1 * self.n_2)\n\n\n\t\tsample_12 = torch.cat((sample_1, sample_2), 0)\n\t\tif self.kernel_name == \"gaussian\":\n\t\t\tdistances = pdist(sample_12, sample_12, norm=2)\n\t\telif self.kernel_name == \"laplacian\":\n\t\t\tdistances = pdist(sample_12, sample_12, norm=1)\n\t\telse:\n\t\t\traise NotImplementedError\n\n\t\tkernels = None\n\t\tfor alpha in self.alphas:\n\t\t\t# For single kernel\n\t\t\tif self.kernel_name == \"gaussian\":\n\t\t\t\tkernels_a = torch.exp(- alpha * distances ** 2)\n\t\t\telif self.kernel_name == \"laplacian\":\n\t\t\t\tkernels_a = torch.exp(- alpha * distances)\n\t\t\telse:\n\t\t\t\traise NotImplementedError\n\t\t\t# For multiple kernel, append kernel\n\t\t\tif kernels is None:\n\t\t\t\tkernels = kernels_a\n\t\t\telse:\n\t\t\t\tkernels = kernels + kernels_a\n\n\t\tk_1 = kernels[:self.n_1, :self.n_1]\n\t\tk_2 = kernels[self.n_1:, self.n_1:]\n\t\tk_12 = kernels[:self.n_1, self.n_1:]\n\n\t\tmmd = (2 * self.a01 * k_12.sum() +\n\t\t\t self.a00 * k_1.sum() +\n\t\t\t self.a11 * k_2.sum())\n\t\tif ret_matrix:\n\t\t\treturn mmd, kernels\n\t\telse:\n\t\t\treturn mmd\n\nif __name__ == \"__main__\":\n\t# test MMD\n\ttorch.manual_seed(42)\n\tx = torch.FloatTensor([[3], [4], [5]])\n\ty = torch.FloatTensor([[1], [2]])\n\t# y = torch.FloatTensor([[1,2,3,4], [3,7,1,6], [3,5,1,6]]) * 0.1\n\n\t#### for guassian kernel ####\n\tprint(\"test guassian kernels\")\n\talphas = [1.0] # coeiffient of rbf kernel \n\tprint(x)\n\tprint(y)\n\tn1, n2 = x.shape[0], y.shape[0]\n\tprint(\"n1, n2\", n1, n2)\n\tmmd_dist = MMDBiasedStatistic(alphas, kernel_name=\"gaussian\") # MMDStatistic(alphas, kernel_name=\"guassian\")\n\tmmd, dist_matrix = mmd_dist(x, y, ret_matrix=True)\n\tprint(\"dist_matrix\")\n\tprint(dist_matrix)\n\tprint(\"mmd\", mmd)\n\t############################\n\n\t#### for laplacian kernel ####\n\tprint(\"test laplacian kernels\")\n\talphas = [1.0] # coeiffient of laplacian kernel \n\tprint(x)\n\tprint(y)\n\tn1, n2 = x.shape[0], y.shape[0]\n\tprint(\"n1, n2\", n1, n2)\n\tmmd_dist = MMDBiasedStatistic(alphas, kernel_name=\"laplacian\") # MMDStatistic(alphas, kernel_name=\"laplacian\")\n\tmmd, dist_matrix = mmd_dist(x, y, ret_matrix=True)\n\tprint(\"dist_matrix\")\n\tprint(dist_matrix)\n\tprint(\"mmd\", mmd)\n\t############################\n\n\n\n"
] |
[
[
"torch.cat",
"torch.trace",
"torch.FloatTensor",
"numpy.mean",
"torch.manual_seed",
"torch.abs",
"torch.exp",
"torch.sum"
]
] |
pongnguy/CS230_Spring-2020
|
[
"e1f75075da6c25b15fcabefa1eab901e8efc5744"
] |
[
"final_project/bert_pytorch.py"
] |
[
"\"\"\"\nFile to do a baseline BERT model fine-tuning using PyTorch\n\nSee this article https://mccormickml.com/2019/07/22/BERT-fine-tuning/\n\n\nA. Wechselberger, 2020\n\"\"\"\n\nimport tensorflow as tf\n\n# Get the GPU device name.\ndevice_name = tf.test.gpu_device_name()\n\n# The device name should look like the following:\nif device_name == '/device:GPU:0':\n print('Found GPU at: {}'.format(device_name))\nelse:\n raise SystemError('GPU device not found')"
] |
[
[
"tensorflow.test.gpu_device_name"
]
] |
vskrachkov/master-degree
|
[
"be1c7f67afaf5cecb96b51bb2065eac87fd4cd4e"
] |
[
"src/aco/graphs.py"
] |
[
"import csv\nimport random\nimport itertools\nimport time\nimport functools\nimport copy\n\nimport matplotlib.pyplot as plt\nimport networkx as nx\nimport pants\n\n\nMAX_EDGE_WEIGHT = 25\nMIN_EDGE_WEIGHT = 12\n\n\ndef create_complete_graph(n=1):\n graph = nx.Graph()\n nodes = range(n)\n\n # create nodes\n for node in nodes:\n graph.add_node(node, **{'attr': 122})\n\n # create edges\n for edge in itertools.permutations(nodes, 2):\n graph.add_edge(*edge, weight=random.randint(MIN_EDGE_WEIGHT,\n MAX_EDGE_WEIGHT))\n\n return graph\n\n\ndef plot_weighted_graph(graph):\n pos = nx.spring_layout(graph)\n nx.draw(graph, pos)\n labels = nx.get_edge_attributes(graph, 'weight')\n nx.draw_networkx_edge_labels(graph, pos, edge_labels=labels)\n plt.show()\n\n\nclass Graph(nx.Graph):\n def __init__(self, *args, **kwargs):\n self.__max_val_map = {}\n super().__init__(*args, **kwargs)\n\n @classmethod\n def create_complete(cls, n):\n return cls(create_complete_graph(n=n))\n\n @classmethod\n def load_from_file(cls, file_path):\n return nx.read_gpickle(file_path)\n\n def dump_to_file(self, file_name=None):\n file_name = file_name or f'accets/graphs/{time.time()}.gz'\n nx.write_gpickle(self, file_name)\n return file_name\n\n @classmethod\n def initialize_complete_graph(cls, csv_file_path):\n f = open(csv_file_path)\n rows = csv.reader(f)\n _G = cls()\n\n for row in rows:\n _G.add_node(row[0])\n\n for edge in itertools.permutations(_G.nodes, 2):\n _G.add_edge(*edge)\n\n f.close()\n return _G\n\n def load_own_attrs(self, csv_file_path):\n f = open(csv_file_path)\n rows = csv.reader(f, delimiter=',')\n id_col, *head_row = next(rows)\n for row in rows:\n n = row[0]\n for i, *attrs in enumerate(row[1:]):\n edges = [(n, neighbor) for neighbor in self.neighbors(n)]\n for attr in attrs:\n attr = float(attr)\n for e in edges:\n self.edges[e][head_row[i]] = attr\n _p = self.__max_val_map.get(head_row[i])\n if _p is None or attr > _p:\n self.__max_val_map[head_row[i]] = attr\n f.close()\n\n def load_relative_properties(self, csv_file_path):\n f = open(csv_file_path)\n rows = csv.reader(f, delimiter=',')\n id_col1, id_col2, *head_row = next(rows)\n for row in rows:\n from_n, to_n = row[0], row[1]\n for i, *attrs in enumerate(row[2:]):\n for attr in attrs:\n attr = float(attr)\n self.edges[(from_n, to_n)] [head_row[i]] = attr\n self.edges[(to_n, from_n)] [head_row[i]] = attr\n _p = self.__max_val_map.get(head_row[i])\n if _p is None or attr > _p:\n self.__max_val_map[head_row[i]] = attr\n f.close()\n\n def show_on_plot(self):\n pos = nx.spring_layout(self)\n nx.draw(self, pos)\n labels = nx.get_edge_attributes(self, '__weight__')\n nx.draw_networkx_edge_labels(self, pos, edge_labels=labels)\n nx.draw_networkx_labels(self, pos)\n plt.show()\n\n def calc_weight(self):\n for e in self.edges:\n attrs = copy.copy(self.edges[e])\n attrs.pop('__weight__', None)\n w = []\n for key, attr in attrs.items():\n w.append(attr / self.__max_val_map[key])\n weight = functools.reduce(lambda x, y: x*y, w)\n self.edges[e]['__weight__'] = round(weight, 3)\n\n def get_weight(self, start, end):\n return self.edges[(start, end)].get('__weight__')\n\n def _solve(self):\n world = pants.World(list(self.nodes), self.get_weight)\n solver = pants.Solver()\n return solver, world\n\n def solutions(self):\n solver, world = self._solve()\n return solver.solutions(world)\n\n def solution(self):\n solver, world = self._solve()\n return solver.solve(world)\n\n\nif __name__ == '__main__':\n G = Graph.initialize_complete_graph('accets/csv/machine_list.csv')\n G.load_own_attrs('accets/csv/own_attrs.csv')\n G.load_relative_properties('accets/csv/related_attrs.csv')\n G.calc_weight()\n\n # path = G.dump_to_file()\n # G = Graph.load_from_file(path)\n\n print(G.solution().distance)\n for sol in G.solutions():\n print(sol.distance)\n\n G.show_on_plot()\n"
] |
[
[
"matplotlib.pyplot.show"
]
] |
Rubtsowa/numba-dppy
|
[
"20f9825b144913ebe1f7635c785b334f3743c4cb"
] |
[
"numba_dppy/examples/sum_ndarray.py"
] |
[
"#! /usr/bin/env python\n# Copyright 2021 Intel Corporation\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom _helper import has_cpu, has_gpu\nimport numpy as np\nimport numba_dppy as dppy\nimport dpctl\n\n\[email protected](\n access_types={\"read_only\": [\"a\", \"b\"], \"write_only\": [\"c\"], \"read_write\": []}\n)\ndef data_parallel_sum(a, b, c):\n i = dppy.get_global_id(0)\n c[i] = a[i] + b[i]\n\n\nglobal_size = 64\nlocal_size = 32\nN = global_size * local_size\n\na = np.array(np.random.random(N), dtype=np.float32)\nb = np.array(np.random.random(N), dtype=np.float32)\nc = np.ones_like(a)\n\n\ndef main():\n if has_gpu():\n with dpctl.device_context(\"opencl:gpu\") as queue:\n print(\"Offloading to ...\")\n queue.get_sycl_device().print_device_info()\n print(\"before A: \", a)\n print(\"before B: \", b)\n data_parallel_sum[global_size, local_size](a, b, c)\n print(\"after C: \", c)\n else:\n print(\"Could not find an OpenCL GPU device\")\n\n if has_cpu():\n with dpctl.device_context(\"opencl:cpu\") as queue:\n print(\"Offloading to ...\")\n queue.get_sycl_device().print_device_info()\n print(\"before A: \", a)\n print(\"before B: \", b)\n data_parallel_sum[global_size, local_size](a, b, c)\n print(\"after C: \", c)\n else:\n print(\"Could not find an OpenCL CPU device\")\n\n if has_gpu(\"level_zero\"):\n with dpctl.device_context(\"level_zero:gpu\") as queue:\n print(\"Offloading to ...\")\n queue.get_sycl_device().print_device_info()\n print(\"before A: \", a)\n print(\"before B: \", b)\n data_parallel_sum[global_size, local_size](a, b, c)\n print(\"after C: \", c)\n else:\n print(\"Could not find an Level Zero GPU device\")\n print(\"Done...\")\n\n\nif __name__ == \"__main__\":\n main()\n"
] |
[
[
"numpy.random.random",
"numpy.ones_like"
]
] |
flegac/deep-experiments
|
[
"e1b12e724f2c8340cbe9c51396cf3f42e3b4e934"
] |
[
"mydeep-lib/tests/test_monitoring.py"
] |
[
"import pandas as pd\n\nfrom mydeep_api.data import Data\nfrom mydeep_api.monitoring.confusion_viewer import ConfusionViewer\nfrom mydeep_api.monitoring.dataset_viewer import DatasetViewer\nfrom mydeep_api.monitoring.history_viewer import HistoryViewer\nfrom surili_core.workspace import Workspace\n\nws = Workspace.from_path('resources')\n\n\ndef test_history_monitoring():\n hist = HistoryViewer.from_path(ws.path_to('training_logs.csv'))\n\n hist.show('loss', 'acc')\n\n\ndef test_dataset_monitoring():\n db = Data.from_folder_tree(x_path=ws.path_to('dataset/folder_tree'))\n DatasetViewer(\n column=db.x,\n label='dataset',\n scale=2\n ).show(n=None)\n\n\ndef test_confusion_viewer():\n expected = pd.DataFrame({\n 'x': [0, 1, 2, 3],\n 'y': [0, 0, 1, 1],\n\n })\n\n predictions = pd.DataFrame({\n 'x': [0, 1, 2, 3],\n 'y': [.1, .5, 1, .3],\n })\n pred = (predictions['y'] > .5).astype(int)\n\n cm = ConfusionViewer(ground_truth=expected['y'], predicted=pred)\n cm.show(normalize=True)\n"
] |
[
[
"pandas.DataFrame"
]
] |
humaohai/mars
|
[
"11373f64c3039d424f9276e610ae5ad108ea0eb1"
] |
[
"mars/learn/neighbors/_faiss.py"
] |
[
"# Copyright 1999-2020 Alibaba Group Holding Ltd.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport atexit\nimport os\nimport operator\nimport tempfile\nfrom enum import Enum\n\nimport numpy as np\ntry:\n import faiss\nexcept ImportError: # pragma: no cover\n faiss = None\n\nfrom ... import opcodes as OperandDef\nfrom ...context import RunningMode\nfrom ...operands import OperandStage\nfrom ...serialize import KeyField, StringField, Int64Field, \\\n Int32Field, BoolField, Int8Field\nfrom ...tiles import TilesError\nfrom ...tensor import tensor as astensor\nfrom ...tensor.core import TensorOrder\nfrom ...tensor.random import RandomState\nfrom ...tensor.array_utils import as_same_device, device\nfrom ...tensor.utils import check_random_state, gen_random_seeds\nfrom ...utils import check_chunks_unknown_shape, require_not_none, recursive_tile\nfrom ..operands import LearnOperand, LearnOperandMixin, OutputType\n\n\nclass MemoryRequirementGrade(Enum):\n minimum = 0\n low = 1\n high = 2\n maximum = 3\n\n\nif faiss is not None:\n METRIC_TO_FAISS_METRIC_TYPE = {\n 'l2': faiss.METRIC_L2,\n 'euclidean': faiss.METRIC_L2,\n 'innerproduct': faiss.METRIC_INNER_PRODUCT,\n 'cosine': faiss.METRIC_INNER_PRODUCT,\n }\nelse: # pragma: no cover\n METRIC_TO_FAISS_METRIC_TYPE = {}\n\n\n@require_not_none(faiss)\nclass FaissBuildIndex(LearnOperand, LearnOperandMixin):\n _op_type_ = OperandDef.FAISS_BUILD_INDEX\n\n _input = KeyField('input')\n _metric = StringField('metric')\n _faiss_index = StringField('faiss_index')\n _n_sample = Int64Field('n_sample')\n _seed = Int32Field('seed')\n _same_distribution = BoolField('same_distribution')\n _accuracy = BoolField('accuracy')\n _memory_require = Int8Field('memory_require',\n on_serialize=operator.attrgetter('value'),\n on_deserialize=MemoryRequirementGrade)\n # for test purpose, could be 'object', 'filename' or 'bytes'\n _return_index_type = StringField('return_index_type')\n\n def __init__(self, metric=None, faiss_index=None, n_sample=None, seed=None,\n same_distribution=None, return_index_type=None,\n accuracy=None, memory_require=None,\n stage=None, output_types=None, gpu=None, **kw):\n super().__init__(_metric=metric, _faiss_index=faiss_index, _n_sample=n_sample,\n _seed=seed, _same_distribution=same_distribution,\n _return_index_type=return_index_type,\n _accuracy=accuracy, _memory_require=memory_require, _gpu=gpu,\n _stage=stage, _output_types=output_types, **kw)\n if self._output_types is None:\n self._output_types = [OutputType.object]\n\n @property\n def input(self):\n return self._input\n\n @property\n def metric(self):\n return self._metric\n\n @property\n def faiss_metric_type(self):\n return METRIC_TO_FAISS_METRIC_TYPE[self._metric]\n\n @property\n def faiss_index(self):\n return self._faiss_index\n\n @property\n def n_sample(self):\n return self._n_sample\n\n @property\n def seed(self):\n return self._seed\n\n @property\n def same_distribution(self):\n return self._same_distribution\n\n @property\n def accuracy(self):\n return self._accuracy\n\n @property\n def memory_require(self):\n return self._memory_require\n\n @property\n def return_index_type(self):\n return self._return_index_type\n\n def _set_inputs(self, inputs):\n super()._set_inputs(inputs)\n self._input = self._inputs[0]\n\n def __call__(self, X):\n return self.new_tileable([X])\n\n @classmethod\n def tile(cls, op):\n check_chunks_unknown_shape(op.inputs, TilesError)\n\n in_tensor = astensor(op.input, np.dtype(np.float32))._inplace_tile()\n if op.faiss_index == 'auto':\n faiss_index, n_sample = _gen_index_string_and_sample_count(\n in_tensor.shape, op.n_sample, op.accuracy, op.memory_require,\n gpu=op.gpu, **op.extra_params)\n op._n_sample = n_sample\n else:\n faiss_index, n_sample = op.faiss_index, op.n_sample\n\n if len(in_tensor.chunks) == 1:\n return cls._tile_one_chunk(op, faiss_index, n_sample)\n\n if in_tensor.chunk_shape[1] != 1:\n # make sure axis 1 has 1 chunk\n in_tensor = in_tensor.rechunk({1: in_tensor.shape[1]})._inplace_tile()\n return cls._tile_chunks(op, in_tensor, faiss_index, n_sample)\n\n @classmethod\n def _tile_one_chunk(cls, op, faiss_index, n_sample):\n in_chunk = op.input.chunks[0]\n chunk_op = op.copy().reset_key()\n chunk_op._faiss_index = faiss_index\n chunk_op._n_sample = n_sample\n chunk = chunk_op.new_chunk([in_chunk], index=in_chunk.index)\n\n new_op = op.copy()\n kw = op.outputs[0].params\n kw['chunks'] = [chunk]\n kw['nsplits'] = ((1,),)\n return new_op.new_tileables(op.inputs, kws=[kw])\n\n @classmethod\n def _tile_chunks(cls, op, in_tensor, faiss_index, n_sample):\n \"\"\"\n If the distribution on each chunk is the same,\n refer to:\n https://github.com/facebookresearch/faiss/wiki/FAQ#how-can-i-distribute-index-building-on-several-machines\n\n 1. train an IndexIVF* on a representative sample of the data, store it.\n 2. for each node, load the trained index, add the local data to it, store the resulting populated index\n 3. on a central node, load all the populated indexes and merge them.\n \"\"\"\n faiss_index_ = faiss.index_factory(in_tensor.shape[1], faiss_index,\n op.faiss_metric_type)\n # Training on sample data when two conditions meet\n # 1. the index type requires for training, e.g. Flat does not require\n # 2. distributions of chunks are the same, in not,\n # train separately on each chunk data\n need_sample_train = not faiss_index_.is_trained and op.same_distribution\n\n train_chunk = None\n if need_sample_train:\n # sample data to train\n rs = RandomState(op.seed)\n sampled_index = rs.choice(in_tensor.shape[0], size=n_sample,\n replace=False, chunk_size=n_sample)\n sample_tensor = recursive_tile(in_tensor[sampled_index])\n assert len(sample_tensor.chunks) == 1\n sample_chunk = sample_tensor.chunks[0]\n train_op = FaissTrainSampledIndex(faiss_index=faiss_index, metric=op.metric,\n return_index_type=op.return_index_type)\n train_chunk = train_op.new_chunk([sample_chunk])\n elif op.gpu: # pragma: no cover\n # if not need train, and on gpu, just merge data together to train\n in_tensor = in_tensor.rechunk(in_tensor.shape)._inplace_tile()\n\n # build index for each input chunk\n build_index_chunks = []\n for i, chunk in enumerate(in_tensor.chunks):\n build_index_op = op.copy().reset_key()\n build_index_op._stage = OperandStage.map\n build_index_op._faiss_index = faiss_index\n if train_chunk is not None:\n build_index_chunk = build_index_op.new_chunk(\n [chunk, train_chunk], index=(i,))\n else:\n build_index_chunk = build_index_op.new_chunk([chunk], index=(i,))\n build_index_chunks.append(build_index_chunk)\n\n out_chunks = []\n if need_sample_train:\n assert op.n_sample is not None\n # merge all indices into one, do only when trained on sample data\n out_chunk_op = op.copy().reset_key()\n out_chunk_op._faiss_index = faiss_index\n out_chunk_op._stage = OperandStage.agg\n out_chunk = out_chunk_op.new_chunk(build_index_chunks, index=(0,))\n out_chunks.append(out_chunk)\n else:\n out_chunks.extend(build_index_chunks)\n\n new_op = op.copy()\n return new_op.new_tileables(op.inputs, chunks=out_chunks,\n nsplits=((len(out_chunks),),))\n\n @classmethod\n def _execute_one_chunk(cls, ctx, op):\n (inp,), device_id, xp = as_same_device(\n [ctx[c.key] for c in op.inputs], device=op.device, ret_extra=True)\n\n with device(device_id):\n # create index\n index = faiss.index_factory(inp.shape[1], op.faiss_index,\n op.faiss_metric_type)\n # GPU\n if device_id >= 0: # pragma: no cover\n index = _index_to_gpu(index, device_id)\n\n # train index\n if not index.is_trained:\n assert op.n_sample is not None\n sample_indices = xp.random.choice(inp.shape[0],\n size=op.n_sample, replace=False)\n sampled = inp[sample_indices]\n index.train(sampled)\n\n if op.metric == 'cosine':\n # faiss does not support cosine distances directly,\n # data needs to be normalize before adding to index,\n # refer to:\n # https://github.com/facebookresearch/faiss/wiki/FAQ#how-can-i-index-vectors-for-cosine-distance\n faiss.normalize_L2(inp)\n # add vectors to index\n if device_id >= 0: # pragma: no cover\n # gpu\n inp = inp.astype(np.float32, copy=False)\n index.add_c(inp.shape[0], _swig_ptr_from_cupy_float32_array(inp))\n else:\n index.add(inp)\n\n ctx[op.outputs[0].key] = _store_index(ctx, op, index, device_id)\n\n @classmethod\n def _execute_map(cls, ctx, op):\n (data,), device_id, _ = as_same_device(\n [ctx[op.inputs[0].key]], device=op.device, ret_extra=True)\n index = ctx[op.inputs[1].key] if len(op.inputs) == 2 else None\n\n with device(device_id):\n if index is not None:\n # fetch the trained index\n trained_index = _load_index(ctx, op, index, device_id)\n return_index_type = _get_index_type(op.return_index_type, ctx)\n if return_index_type == 'object':\n # clone a new one,\n # because faiss does not ensure thread-safe for operations that change index\n # https://github.com/facebookresearch/faiss/wiki/Threads-and-asynchronous-calls#thread-safety\n trained_index = faiss.clone_index(trained_index)\n else:\n trained_index = faiss.index_factory(data.shape[1], op.faiss_index,\n op.faiss_metric_type)\n if op.same_distribution:\n # no need to train, just create index\n pass\n else:\n # distribution no the same, train on each chunk\n trained_index.train(data)\n\n if device_id >= 0: # pragma: no cover\n trained_index = _index_to_gpu(trained_index, device_id)\n if op.metric == 'cosine':\n # faiss does not support cosine distances directly,\n # data needs to be normalize before adding to index,\n # refer to:\n # https://github.com/facebookresearch/faiss/wiki/FAQ#how-can-i-index-vectors-for-cosine-distance\n faiss.normalize_L2(data)\n\n # add data into index\n if device_id >= 0: # pragma: no cover\n # gpu\n trained_index.add_c(data.shape[0], _swig_ptr_from_cupy_float32_array(data))\n else:\n trained_index.add(data)\n\n ctx[op.outputs[0].key] = _store_index(ctx, op, trained_index, device_id)\n\n @classmethod\n def _execute_agg(cls, ctx, op):\n device_id = op.device\n if device_id is None:\n device_id = -1\n inputs = [ctx[inp.key] for inp in op.inputs]\n\n with device(device_id):\n merged_index = None\n indexes = []\n for index in inputs:\n index = _load_index(ctx, op, index, device_id)\n indexes.append(index)\n assert hasattr(index, 'merge_from')\n if merged_index is None:\n merged_index = index\n else:\n merged_index.merge_from(index, index.ntotal)\n\n ctx[op.outputs[0].key] = _store_index(ctx, op, merged_index, device_id)\n\n @classmethod\n def execute(cls, ctx, op):\n if op.stage == OperandStage.map:\n cls._execute_map(ctx, op)\n elif op.stage == OperandStage.agg:\n cls._execute_agg(ctx, op)\n else:\n assert op.stage is None\n cls._execute_one_chunk(ctx, op)\n\n\ndef _get_index_type(return_index_type, ctx):\n if return_index_type is None: # pragma: no cover\n if ctx.running_mode == RunningMode.local:\n return_index_type = 'object'\n elif ctx.running_mode == RunningMode.local_cluster:\n return_index_type = 'filename'\n else:\n return_index_type = 'bytes'\n return return_index_type\n\n\ndef _store_index(ctx, op, index, device_id):\n return_index_type = _get_index_type(op.return_index_type, ctx)\n\n if return_index_type == 'object':\n # no need to serialize\n return index\n elif return_index_type == 'filename':\n # save to file, then return filename\n if device_id >= 0: # pragma: no cover\n # for gpu, convert to cpu first\n index = faiss.index_gpu_to_cpu(index)\n fn = tempfile.mkstemp('.index', prefix='faiss_')[1]\n faiss.write_index(index, fn)\n\n atexit.register(lambda: os.remove(fn))\n\n return fn\n else:\n if device_id >= 0: # pragma: no cover\n # for gpu, convert to cpu first\n index = faiss.index_gpu_to_cpu(index)\n # distributed, save to file, then return in memory bytes\n fn = tempfile.mkstemp('.index', prefix='faiss_')[1]\n faiss.write_index(index, fn)\n try:\n with open(fn, 'rb') as f:\n return f.read()\n finally:\n os.remove(fn)\n\n\ndef _load_index(ctx, op, index, device_id):\n return_index_type = _get_index_type(op.return_index_type, ctx)\n\n if return_index_type == 'object':\n # local\n return index\n elif return_index_type == 'filename':\n # local cluster\n return faiss.read_index(index)\n else:\n # distributed\n fn = tempfile.mkstemp('.index', prefix='faiss_')[1]\n with open(fn, 'wb') as f:\n f.write(index)\n index = faiss.read_index(f.name)\n if device_id >= 0: # pragma: no cover\n index = _index_to_gpu(index, device_id)\n return index\n\n\ndef _index_to_gpu(index, device_id): # pragma: no cover\n res = faiss.StandardGpuResources()\n return faiss.index_cpu_to_gpu(res, device_id, index)\n\n\ndef _swig_ptr_from_cupy_float32_array(x): # pragma: no cover\n assert x.flags.c_contiguous\n assert x.dtype == np.float32\n data_ptr = x.__cuda_array_interface__['data'][0]\n return faiss.cast_integer_to_float_ptr(data_ptr)\n\n\ndef _swig_ptr_from_cupy_int64_array(x): # pragma: no cover\n assert x.flags.c_contiguous\n assert x.dtype == np.int64\n data_ptr = x.__cuda_array_interface__['data'][0]\n return faiss.cast_integer_to_long_ptr(data_ptr)\n\n\n@require_not_none(faiss)\nclass FaissTrainSampledIndex(LearnOperand, LearnOperandMixin):\n _op_type_ = OperandDef.FAISS_TRAIN_SAMPLED_INDEX\n\n _input = KeyField('input')\n _metric = StringField('metric')\n _faiss_index = StringField('faiss_index')\n # for test purpose, could be 'object', 'filename' or 'bytes'\n _return_index_type = StringField('return_index_type')\n\n def __init__(self, faiss_index=None, metric=None,\n return_index_type=None, output_types=None, **kw):\n super().__init__(_faiss_index=faiss_index, _metric=metric,\n _return_index_type=return_index_type,\n _output_types=output_types, **kw)\n if self._output_types is None:\n self._output_types = [OutputType.object]\n\n @property\n def input(self):\n return self._input\n\n @property\n def metric(self):\n return self._metric\n\n @property\n def faiss_metric_type(self):\n return METRIC_TO_FAISS_METRIC_TYPE[self.metric]\n\n @property\n def faiss_index(self):\n return self._faiss_index\n\n @property\n def return_index_type(self):\n return self._return_index_type\n\n def _set_inputs(self, inputs):\n super()._set_inputs(inputs)\n self._input = self._inputs[0]\n\n @classmethod\n def execute(cls, ctx, op):\n (data,), device_id, _ = as_same_device(\n [ctx[op.input.key]], device=op.device, ret_extra=True)\n\n with device(device_id):\n index = faiss.index_factory(data.shape[1], op.faiss_index,\n op.faiss_metric_type)\n\n if device_id >= 0: # pragma: no cover\n # GPU\n index = _index_to_gpu(index, device_id)\n index.train_c(data.shape[0], _swig_ptr_from_cupy_float32_array(data))\n else:\n index.train(data)\n\n ctx[op.outputs[0].key] = _store_index(\n ctx, op, index, device_id)\n\n\ndef _gen_index_string_and_sample_count(shape, n_sample, accuracy, memory_require, gpu=False, **kw):\n \"\"\"\n Generate index string and sample count according to guidance of faiss:\n https://github.com/facebookresearch/faiss/wiki/Guidelines-to-choose-an-index\n \"\"\"\n size, dim = shape\n memory_require = _get_memory_require(memory_require)\n\n if accuracy or size < 10 ** 5:\n # Flat is the only index that guarantees exact results\n # no need to train, thus sample count is None\n return 'Flat', None\n\n if memory_require == MemoryRequirementGrade.maximum and not gpu:\n x = kw.get('M', 32) # get medium number by default\n if x < 4 or x > 64:\n raise ValueError('HNSWx requires M that between 4 and 64, '\n 'got {}'.format(x))\n return 'HNSW%d' % x, None\n\n if memory_require in (MemoryRequirementGrade.high, MemoryRequirementGrade.maximum):\n basement = '{},Flat'\n elif memory_require == MemoryRequirementGrade.low:\n x = kw.get('dim', dim // 2)\n basement = 'PCAR%d,{},SQ8' % x\n elif memory_require == MemoryRequirementGrade.minimum:\n x = kw.get('M', min(64, dim // 2))\n if x > 64:\n raise ValueError('PQx requires M <= 64, got {}'.format(x))\n y = kw.get('dim', None)\n if y is not None and y % x != 0:\n raise ValueError('OPQx_y requires dim is a multiple of M({}), '\n 'got dim: {}'.format(x, y))\n y = min(dim, 4 * x)\n y = x * (y // x) # make sure y is a multiple of x\n basement = 'OPQ%(x)d_%(y)d,{},PQ%(x)d' % {'x': x, 'y': y}\n else: # pragma: no cover\n raise ValueError('unknown memory require')\n\n # now choose the clustering options\n if size < 10 ** 6 or (size < 10 ** 7 and gpu):\n # < 1M, or <10M but need GPU\n k = kw.get('k', 5 * int(np.sqrt(size)))\n if k < 4 * int(np.sqrt(size)) or k > 16 * int(np.sqrt(size)):\n raise ValueError('k should be between 4 * sqrt(N) and 16 * sqrt(N), '\n 'got {}'.format(k))\n index_str = basement.format('IVF%d' % k)\n if n_sample is None:\n # 30 * k - 256 * k\n n_sample = min(30 * k, size)\n elif size < 10 ** 7 and not gpu:\n # 1M - 10M\n index_str = basement.format('IVF65536_HNSW32')\n if n_sample is None:\n # between 30 * 65536 and 256 * 65536\n n_sample = 32 * 65536\n elif size < 10 ** 8:\n index_str = basement.format('IVF65536_HNSW32')\n n_sample = 64 * 65536 if n_sample is None else n_sample\n else:\n index_str = basement.format('IVF1048576_HNSW32')\n n_sample = 64 * 65536 if n_sample is None else n_sample\n\n return index_str, n_sample\n\n\ndef _get_memory_require(memory_require):\n if isinstance(memory_require, str):\n return getattr(MemoryRequirementGrade, memory_require)\n elif isinstance(memory_require, MemoryRequirementGrade):\n return memory_require\n return MemoryRequirementGrade(memory_require)\n\n\n@require_not_none(faiss)\ndef build_faiss_index(X, index_name='auto', n_sample=None, metric=\"euclidean\",\n random_state=None, same_distribution=True,\n accuracy=False, memory_require=None, **kw):\n X = astensor(X)\n\n if metric not in METRIC_TO_FAISS_METRIC_TYPE:\n raise ValueError('unknown metric: {}'.format(metric))\n if index_name != 'auto':\n try:\n faiss.index_factory(X.shape[1], index_name,\n METRIC_TO_FAISS_METRIC_TYPE[metric])\n except RuntimeError:\n raise ValueError('illegal faiss index: {}'.format(index_name))\n\n rs = check_random_state(random_state)\n if isinstance(rs, RandomState):\n rs = rs.to_numpy()\n seed = gen_random_seeds(1, rs)[0]\n if memory_require is None:\n memory_require = MemoryRequirementGrade.low\n else:\n memory_require = _get_memory_require(memory_require)\n op = FaissBuildIndex(faiss_index=index_name, metric=metric,\n n_sample=n_sample, gpu=X.op.gpu, seed=seed,\n same_distribution=same_distribution,\n accuracy=accuracy, memory_require=memory_require, **kw)\n return op(X)\n\n\nclass FaissQuery(LearnOperand, LearnOperandMixin):\n _op_type_ = OperandDef.FAISS_QUERY\n\n _input = KeyField('input')\n _faiss_index = KeyField('faiss_index')\n _metric = StringField('metric')\n _n_neighbors = Int32Field('n_neighbors')\n _return_distance = BoolField('return_distance')\n _nprobe = Int64Field('nprobe')\n # for test purpose, could be 'object', 'filename' or 'bytes'\n _return_index_type = StringField('return_index_type')\n\n def __init__(self, faiss_index=None, metric=None, n_neighbors=None,\n return_distance=None, return_index_type=None,\n nprobe=None, output_types=None, gpu=None, **kw):\n super().__init__(_faiss_index=faiss_index, _n_neighbors=n_neighbors, _metric=metric,\n _return_distance=return_distance, _output_types=output_types,\n _nprobe=nprobe, _return_index_type=return_index_type, _gpu=gpu, **kw)\n if self._output_types is None:\n self._output_types = [OutputType.tensor] * self.output_limit\n\n @property\n def input(self):\n return self._input\n\n @property\n def faiss_index(self):\n return self._faiss_index\n\n @property\n def metric(self):\n return self._metric\n\n @property\n def n_neighbors(self):\n return self._n_neighbors\n\n @property\n def nprobe(self):\n return self._nprobe\n\n @property\n def return_distance(self):\n return self._return_distance\n\n @property\n def return_index_type(self):\n return self._return_index_type\n\n @property\n def output_limit(self):\n return 2 if self._return_distance else 1\n\n def _set_inputs(self, inputs):\n super()._set_inputs(inputs)\n self._input = self._inputs[0]\n if self._faiss_index is not None:\n self._faiss_index = self._inputs[1]\n\n def __call__(self, y):\n kws = []\n if self._return_distance:\n kws.append({'shape': (y.shape[0], self._n_neighbors),\n 'dtype': np.dtype(np.float32),\n 'order': TensorOrder.C_ORDER,\n 'type': 'distance'})\n kws.append({\n 'shape': (y.shape[0], self._n_neighbors),\n 'dtype': np.dtype(np.int64),\n 'order': TensorOrder.C_ORDER,\n 'type': 'indices'\n })\n return self.new_tileables([y, self._faiss_index], kws=kws)\n\n @classmethod\n def tile(cls, op):\n in_tensor = astensor(op.input)\n\n if in_tensor.chunk_shape[1] != 1:\n check_chunks_unknown_shape([in_tensor], TilesError)\n in_tensor = in_tensor.rechunk({1: in_tensor.shape[1]})._inplace_tile()\n\n out_chunks = [], []\n for chunk in in_tensor.chunks:\n chunk_op = op.copy().reset_key()\n chunk_kws = []\n if op.return_distance:\n chunk_kws.append({\n 'shape': (chunk.shape[0], op.n_neighbors),\n 'dtype': np.dtype(np.float32),\n 'order': TensorOrder.C_ORDER,\n 'index': chunk.index,\n 'type': 'distance'\n })\n chunk_kws.append({\n 'shape': (chunk.shape[0], op.n_neighbors),\n 'dtype': np.dtype(np.int64),\n 'order': TensorOrder.C_ORDER,\n 'index': chunk.index,\n 'type': 'indices'\n })\n in_chunks = [chunk]\n in_chunks.extend(op.faiss_index.chunks)\n chunks = chunk_op.new_chunks(in_chunks, kws=chunk_kws)\n if op.return_distance:\n out_chunks[0].append(chunks[0])\n out_chunks[1].append(chunks[-1])\n\n new_op = op.copy()\n kws = [out.params for out in op.outputs]\n if op.return_distance:\n kws[0]['chunks'] = out_chunks[0]\n kws[0]['nsplits'] = (in_tensor.nsplits[0], (op.n_neighbors,))\n kws[-1]['chunks'] = out_chunks[1]\n kws[-1]['nsplits'] = (in_tensor.nsplits[0], (op.n_neighbors,))\n return new_op.new_tileables(op.inputs, kws=kws)\n\n @classmethod\n def execute(cls, ctx, op):\n (y,), device_id, xp = as_same_device(\n [ctx[op.input.key]], device=op.device, ret_extra=True)\n indexes = [_load_index(ctx, op, ctx[index.key], device_id)\n for index in op.inputs[1:]]\n\n with device(device_id):\n y = xp.ascontiguousarray(y, dtype=np.float32)\n\n if len(indexes) == 1:\n index = indexes[0]\n else:\n index = faiss.IndexShards(indexes[0].d)\n [index.add_shard(ind) for ind in indexes]\n\n if op.metric == 'cosine':\n # faiss does not support cosine distances directly,\n # data needs to be normalize before searching,\n # refer to:\n # https://github.com/facebookresearch/faiss/wiki/FAQ#how-can-i-index-vectors-for-cosine-distance\n faiss.normalize_L2(y)\n\n if op.nprobe is not None:\n index.nprobe = op.nprobe\n\n if device_id >= 0: # pragma: no cover\n n = y.shape[0]\n k = op.n_neighbors\n distances = xp.empty((n, k), dtype=xp.float32)\n indices = xp.empty((n, k), dtype=xp.int64)\n index.search_c(n, _swig_ptr_from_cupy_float32_array(y),\n k, _swig_ptr_from_cupy_float32_array(distances),\n _swig_ptr_from_cupy_int64_array(indices))\n else:\n distances, indices = index.search(y, op.n_neighbors)\n if op.return_distance:\n if index.metric_type == faiss.METRIC_L2:\n # make it equivalent to `pairwise.euclidean_distances`\n distances = xp.sqrt(distances, out=distances)\n elif op.metric == 'cosine':\n # make it equivalent to `pairwise.cosine_distances`\n distances = xp.subtract(1, distances, out=distances)\n ctx[op.outputs[0].key] = distances\n ctx[op.outputs[-1].key] = indices\n\n\n@require_not_none(faiss)\ndef faiss_query(faiss_index, data, n_neighbors, return_distance=True, nprobe=None):\n data = astensor(data)\n op = FaissQuery(faiss_index=faiss_index, n_neighbors=n_neighbors,\n metric=faiss_index.op.metric, return_distance=return_distance,\n return_index_type=faiss_index.op.return_index_type,\n nprobe=nprobe, gpu=data.op.gpu)\n ret = op(data)\n if not return_distance:\n return ret[0]\n return ret\n"
] |
[
[
"numpy.dtype",
"numpy.sqrt"
]
] |
lj-ecjtu/Faster-RCNN-TensorFlow-Python3-master-RSDDs
|
[
"33371985133c93d9a7a5ef0a8a60a558ccfa1ae2"
] |
[
"lib/nets/network.py"
] |
[
"# --------------------------------------------------------\n# Tensorflow Faster R-CNN\n# Licensed under The MIT License [see LICENSE for details]\n# Written by Xinlei Chen\n# --------------------------------------------------------\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport numpy as np\nimport tensorflow as tf\nimport tensorflow.contrib.slim as slim\nfrom tensorflow.contrib.slim import arg_scope\n\nfrom lib.config import config as cfg\nfrom lib.layer_utils.anchor_target_layer import anchor_target_layer\nfrom lib.layer_utils.proposal_layer import proposal_layer\nfrom lib.layer_utils.proposal_target_layer import proposal_target_layer\nfrom lib.layer_utils.proposal_top_layer import proposal_top_layer\nfrom lib.layer_utils.snippets import generate_anchors_pre\n\n\nclass Network(object):\n def __init__(self, batch_size=1):\n self._feat_stride = [16, ]\n self._feat_compress = [1. / 16., ]\n self._batch_size = batch_size\n self._predictions = {}\n self._losses = {}\n self._anchor_targets = {}\n self._proposal_targets = {}\n self._layers = {}\n self._act_summaries = []\n self._score_summaries = {}\n self._train_summaries = []\n self._event_summaries = {}\n self._variables_to_fix = {}\n\n # Summaries #\n def _add_image_summary(self, image, boxes):\n # add back mean\n '''\n tf.stack()这是一个矩阵拼接的函数,tf.unstack()则是一个矩阵分解的函数\n '''\n image += cfg.FLAGS2[\"pixel_means\"]\n # bgr to rgb (opencv uses bgr)\n channels = tf.unstack(image, axis=-1)\n image = tf.stack([channels[2], channels[1], channels[0]], axis=-1)\n # dims for normalization\n width = tf.to_float(tf.shape(image)[2])\n height = tf.to_float(tf.shape(image)[1])\n # from [x1, y1, x2, y2, cls] to normalized [y1, x1, y1, x1]\n cols = tf.unstack(boxes, axis=1)\n boxes = tf.stack([cols[1] / height,\n cols[0] / width,\n cols[3] / height,\n cols[2] / width], axis=1)\n # add batch dimension (assume batch_size==1)\n #assert image.get_shape()[0] == 1\n boxes = tf.expand_dims(boxes, dim=0)\n image = tf.image.draw_bounding_boxes(image, boxes) # 在image上画gt_truth\n\n return tf.summary.image('ground_truth', image)\n\n def _add_act_summary(self, tensor):\n tf.summary.histogram('ACT/' + tensor.op.name + '/activations', tensor)\n tf.summary.scalar('ACT/' + tensor.op.name + '/zero_fraction',\n tf.nn.zero_fraction(tensor))\n\n def _add_score_summary(self, key, tensor):\n tf.summary.histogram('SCORE/' + tensor.op.name + '/' + key + '/scores', tensor)\n\n def _add_train_summary(self, var):\n tf.summary.histogram('TRAIN/' + var.op.name, var)\n\n # Custom Layers #\n def _reshape_layer(self, bottom, num_dim, name):\n input_shape = tf.shape(bottom)\n with tf.variable_scope(name):\n # change the channel to the caffe format\n # 18个通道[,18,none,none],分别显示得分,前9个为前景得分,后9个为背景得分\n # 第二次[1,2,none,none]\n to_caffe = tf.transpose(bottom, [0, 3, 1, 2])\n # then force it to have channel 2\n #[1,2,none.none],将9个anchor的前景得分和背景得分分开\n # 第二次[1,18,none,none]\n reshaped = tf.reshape(to_caffe, tf.concat(axis=0, values=[[self._batch_size], [num_dim, -1], [input_shape[2]]]))\n # then swap the channel back\n # [1,none,none,2], 第一个none应该为(行*9)\n # 第二次[1,none,none,18]\n to_tf = tf.transpose(reshaped, [0, 2, 3, 1])\n return to_tf\n\n def _softmax_layer(self, bottom, name):\n if name == 'rpn_cls_prob_reshape':\n input_shape = tf.shape(bottom)\n\n # tf.reshape()中-1的应用,-1表示不知道该填什么数字合适的情况下,可以选择,由python通过原数组和其他的值推测出来\n # 每一行是1个anchor的前景、背景得分,先显示所有点产生的第一种anchor,然后是所有点产生的第二种anchor,........\n bottom_reshaped = tf.reshape(bottom, [-1, input_shape[-1]])\n reshaped_score = tf.nn.softmax(bottom_reshaped, name=name)\n return tf.reshape(reshaped_score, input_shape) # [1,none,none,2]\n return tf.nn.softmax(bottom, name=name)\n\n def _proposal_top_layer(self, rpn_cls_prob, rpn_bbox_pred, name):\n with tf.variable_scope(name):\n rois, rpn_scores = tf.py_func(proposal_top_layer,\n [rpn_cls_prob, rpn_bbox_pred, self._im_info,\n self._feat_stride, self._anchors, self._num_anchors],\n [tf.float32, tf.float32])\n rois.set_shape([cfg.FLAGS.rpn_top_n, 5])\n rpn_scores.set_shape([cfg.FLAGS.rpn_top_n, 1])\n\n return rois, rpn_scores\n\n def _proposal_layer(self, rpn_cls_prob, rpn_bbox_pred, name):\n with tf.variable_scope(name):\n # 返回的rois中多加了一列0在第一列\n rois, rpn_scores = tf.py_func(proposal_layer,\n [rpn_cls_prob, rpn_bbox_pred, self._im_info, self._mode,\n self._feat_stride, self._anchors, self._num_anchors],\n [tf.float32, tf.float32])\n rois.set_shape([None, 5])\n rpn_scores.set_shape([None, 1])\n\n return rois, rpn_scores\n\n def _crop_pool_layer(self, bottom, rois, name):\n with tf.variable_scope(name):\n # tf.squeeze()返回一个张量,这个张量是将原始input中所有维度中为1的那些维都删掉的结果\n batch_ids = tf.squeeze(tf.slice(rois, [0, 0], [-1, 1], name=\"batch_id\"), [1])\n # Get the normalized coordinates of bboxes\n bottom_shape = tf.shape(bottom)\n height = (tf.to_float(bottom_shape[1]) - 1.) * np.float32(self._feat_stride[0])\n width = (tf.to_float(bottom_shape[2]) - 1.) * np.float32(self._feat_stride[0])\n # rois除以h,w就得到了rois在特征图上的位置\n x1 = tf.slice(rois, [0, 1], [-1, 1], name=\"x1\") / width\n y1 = tf.slice(rois, [0, 2], [-1, 1], name=\"y1\") / height\n x2 = tf.slice(rois, [0, 3], [-1, 1], name=\"x2\") / width\n y2 = tf.slice(rois, [0, 4], [-1, 1], name=\"y2\") / height\n # Won't be backpropagated to rois anyway, but to save time\n bboxes = tf.stop_gradient(tf.concat([y1, x1, y2, x2], axis=1))\n # 'roi_pooling_size', 7\n pre_pool_size = cfg.FLAGS.roi_pooling_size * 2\n # 把rois对于的特征图上的部分crop出来,然后resize打破14*14的大小\n crops = tf.image.crop_and_resize(bottom, bboxes, tf.to_int32(batch_ids), [pre_pool_size, pre_pool_size], name=\"crops\")\n\n return slim.max_pool2d(crops, [2, 2], padding='SAME')\n\n def _dropout_layer(self, bottom, name, ratio=0.5):\n return tf.nn.dropout(bottom, ratio, name=name)\n\n def _anchor_target_layer(self, rpn_cls_score, name):\n with tf.variable_scope(name):\n # 这里的index是对于所有anchor而言\n # (1, 1, A * height, width)\n # (1, height, width, A * 4)\n # (1, height, width, A * 4)\n # (1, height, width, A * 4)\n rpn_labels, rpn_bbox_targets, rpn_bbox_inside_weights, rpn_bbox_outside_weights = tf.py_func(\n anchor_target_layer,\n [rpn_cls_score, self._gt_boxes, self._im_info, self._feat_stride, self._anchors, self._num_anchors],\n [tf.float32, tf.float32, tf.float32, tf.float32])\n #self._gt_boxes = tf.placeholder(tf.float32, shape=[None, 5]) #gt_boxes缩放之后的坐标以及所属类别的标号\n \n rpn_labels.set_shape([1, 1, None, None])\n rpn_bbox_targets.set_shape([1, None, None, self._num_anchors * 4])\n rpn_bbox_inside_weights.set_shape([1, None, None, self._num_anchors * 4])\n rpn_bbox_outside_weights.set_shape([1, None, None, self._num_anchors * 4])\n\n rpn_labels = tf.to_int32(rpn_labels, name=\"to_int32\")\n self._anchor_targets['rpn_labels'] = rpn_labels\n self._anchor_targets['rpn_bbox_targets'] = rpn_bbox_targets\n self._anchor_targets['rpn_bbox_inside_weights'] = rpn_bbox_inside_weights\n self._anchor_targets['rpn_bbox_outside_weights'] = rpn_bbox_outside_weights\n\n self._score_summaries.update(self._anchor_targets)\n\n return rpn_labels\n\n def _proposal_target_layer(self, rois, roi_scores, name):\n with tf.variable_scope(name):\n # 这里的index是对于cfg.FLAGS.batch_size=256 而言\n # rois (0, x1, y1, x2, y2),coming from RPN 然后再减少至256个\n # bbox_target (ndarray): N x 4K blob of regression targets\n # bbox_inside_weights (ndarray): N x 4K blob of loss weights\n rois, roi_scores, labels, bbox_targets, bbox_inside_weights, bbox_outside_weights = tf.py_func(\n proposal_target_layer,\n [rois, roi_scores, self._gt_boxes, self._num_classes],\n [tf.float32, tf.float32, tf.float32, tf.float32, tf.float32, tf.float32])\n\n rois.set_shape([cfg.FLAGS.batch_size, 5])\n roi_scores.set_shape([cfg.FLAGS.batch_size])\n labels.set_shape([cfg.FLAGS.batch_size, 1])\n bbox_targets.set_shape([cfg.FLAGS.batch_size, self._num_classes * 4])\n bbox_inside_weights.set_shape([cfg.FLAGS.batch_size, self._num_classes * 4])\n bbox_outside_weights.set_shape([cfg.FLAGS.batch_size, self._num_classes * 4])\n\n self._proposal_targets['rois'] = rois\n self._proposal_targets['labels'] = tf.to_int32(labels, name=\"to_int32\")\n self._proposal_targets['bbox_targets'] = bbox_targets\n self._proposal_targets['bbox_inside_weights'] = bbox_inside_weights\n self._proposal_targets['bbox_outside_weights'] = bbox_outside_weights\n\n self._score_summaries.update(self._proposal_targets) #self._score_summaries.update(self._anchor_targets) \n\n return rois, roi_scores\n\n def _anchor_component(self):\n with tf.variable_scope('ANCHOR_' + 'default'):\n # just to get the shape right\n # 根据原始输入图片通过VGG16的conv5_3后,缩小16倍,得到RPN的输入feature map大小\n height = tf.to_int32(tf.ceil(self._im_info[0, 0] / np.float32(self._feat_stride[0])))\n width = tf.to_int32(tf.ceil(self._im_info[0, 1] / np.float32(self._feat_stride[0])))\n\n #得到一张输入图片的所有anchor在原输入image上的坐标,以及anchor的数量\n anchors, anchor_length = tf.py_func(generate_anchors_pre,\n [height, width,\n self._feat_stride, self._anchor_scales, self._anchor_ratios],\n [tf.float32, tf.int32], name=\"generate_anchors\")\n anchors.set_shape([None, 4])\n anchor_length.set_shape([])\n self._anchors = anchors\n self._anchor_length = anchor_length\n\n def build_network(self, sess, is_training=True):\n raise NotImplementedError\n \n # sigma=sigma_rpn=3, dim=[1, 2, 3]\n def _smooth_l1_loss(self, bbox_pred, bbox_targets, bbox_inside_weights, bbox_outside_weights, sigma=1.0, dim=[1]):\n sigma_2 = sigma ** 2\n box_diff = bbox_pred - bbox_targets\n in_box_diff = bbox_inside_weights * box_diff #属于前景的行不为0,其他的行都为0\n abs_in_box_diff = tf.abs(in_box_diff)\n\n # 决定哪些位置是权重是1(包括的本身为0的位置,即非前景),哪些位置权重为0\n smoothL1_sign = tf.stop_gradient(tf.to_float(tf.less(abs_in_box_diff, 1. / sigma_2)))\n # Smooth L1函数 (和论文有点不一样)\n in_loss_box = tf.pow(in_box_diff, 2) * (sigma_2 / 2.) * smoothL1_sign + (abs_in_box_diff - (0.5 / sigma_2)) * (1. - smoothL1_sign)\n out_loss_box = bbox_outside_weights * in_loss_box\n \n loss_box = tf.reduce_mean(tf.reduce_sum(\n out_loss_box,\n axis=dim\n ))\n return loss_box\n\n def _add_losses(self, sigma_rpn=3.0):\n with tf.variable_scope('loss_' + self._tag):\n # RPN, class loss\n rpn_cls_score = tf.reshape(self._predictions['rpn_cls_score_reshape'], [-1, 2])\n rpn_label = tf.reshape(self._anchor_targets['rpn_labels'], [-1])\n\n # 得到前景和背景anchor的index\n rpn_select = tf.where(tf.not_equal(rpn_label, -1))\n rpn_cls_score = tf.reshape(tf.gather(rpn_cls_score, rpn_select), [-1, 2])\n rpn_label = tf.reshape(tf.gather(rpn_label, rpn_select), [-1])\n\n rpn_cross_entropy = tf.reduce_mean(\n tf.nn.sparse_softmax_cross_entropy_with_logits(logits=rpn_cls_score, labels=rpn_label))\n\n # RPN, bbox loss\n rpn_bbox_pred = self._predictions['rpn_bbox_pred']\n rpn_bbox_targets = self._anchor_targets['rpn_bbox_targets']\n rpn_bbox_inside_weights = self._anchor_targets['rpn_bbox_inside_weights']\n rpn_bbox_outside_weights = self._anchor_targets['rpn_bbox_outside_weights']\n\n rpn_loss_box = self._smooth_l1_loss(rpn_bbox_pred, rpn_bbox_targets, rpn_bbox_inside_weights,\n rpn_bbox_outside_weights, sigma=sigma_rpn, dim=[1, 2, 3])\n\n # RCNN, class loss\n cls_score = self._predictions[\"cls_score\"]\n label = tf.reshape(self._proposal_targets[\"labels\"], [-1])\n\n cross_entropy = tf.reduce_mean(\n tf.nn.sparse_softmax_cross_entropy_with_logits(\n logits=tf.reshape(cls_score, [-1, self._num_classes]), labels=label)) # logits仍然是向量,label只含正确答案\n\n # RCNN, bbox loss\n bbox_pred = self._predictions['bbox_pred']\n bbox_targets = self._proposal_targets['bbox_targets']\n bbox_inside_weights = self._proposal_targets['bbox_inside_weights']\n bbox_outside_weights = self._proposal_targets['bbox_outside_weights']\n\n loss_box = self._smooth_l1_loss(bbox_pred, bbox_targets, bbox_inside_weights, bbox_outside_weights)\n\n self._losses['cross_entropy'] = cross_entropy\n self._losses['loss_box'] = loss_box\n self._losses['rpn_cross_entropy'] = rpn_cross_entropy\n self._losses['rpn_loss_box'] = rpn_loss_box\n\n loss = cross_entropy + loss_box + rpn_cross_entropy + rpn_loss_box\n self._losses['total_loss'] = loss\n\n self._event_summaries.update(self._losses)\n\n return loss\n\n def create_architecture(self, sess, mode, num_classes, tag=None, anchor_scales=(8, 16, 32), anchor_ratios=(0.5, 1, 2)):\n self._image = tf.placeholder(tf.float32, shape=[self._batch_size, None, None, 3])\n self._im_info = tf.placeholder(tf.float32, shape=[self._batch_size, 3]) #缩放之后的图片尺寸和缩放的比例\n self._gt_boxes = tf.placeholder(tf.float32, shape=[None, 5]) #gt_boxes缩放之后的坐标以及所属类别的标号\n self._tag = tag\n\n self._num_classes = num_classes\n self._mode = mode\n self._anchor_scales = anchor_scales\n self._num_scales = len(anchor_scales)\n\n self._anchor_ratios = anchor_ratios\n self._num_ratios = len(anchor_ratios)\n \n # anchor的种数\n self._num_anchors = self._num_scales * self._num_ratios\n\n training = mode == 'TRAIN'\n testing = mode == 'TEST'\n\n assert tag != None\n\n # handle most of the regularizer here\n weights_regularizer = tf.contrib.layers.l2_regularizer(cfg.FLAGS.weight_decay)\n if cfg.FLAGS.bias_decay:\n biases_regularizer = weights_regularizer\n else:\n biases_regularizer = tf.no_regularizer\n\n # list as many types of layers as possible, even if they are not used now\n \n # slim.arg_scope函数可以用于设置默认的参数取值,第一个参数是一个函数列表,在这个列表中的函数使用默认的参数取值\n # 默认stride=1, padding='SAME', activation_fn=nn.relu\n with arg_scope([slim.conv2d, slim.conv2d_in_plane,\n slim.conv2d_transpose, slim.separable_conv2d, slim.fully_connected],\n weights_regularizer=weights_regularizer,\n biases_regularizer=biases_regularizer,\n biases_initializer=tf.constant_initializer(0.0)):\n rois, cls_prob, bbox_pred = self.build_network(sess, training)\n\n layers_to_output = {'rois': rois}\n layers_to_output.update(self._predictions)\n\n for var in tf.trainable_variables():\n self._train_summaries.append(var)\n\n if mode == 'TEST':\n # FLAGS2[\"bbox_normalize_means\"] = (0.0, 0.0, 0.0, 0.0)\n # FLAGS2[\"bbox_normalize_stds\"] = (0.1, 0.1, 0.1, 0.1)\n stds = np.tile(np.array(cfg.FLAGS2[\"bbox_normalize_stds\"]), (self._num_classes))\n means = np.tile(np.array(cfg.FLAGS2[\"bbox_normalize_means\"]), (self._num_classes))\n self._predictions[\"bbox_pred\"] *= stds\n self._predictions[\"bbox_pred\"] += means\n else:\n self._add_losses()\n layers_to_output.update(self._losses)\n\n val_summaries = [] # 保存添加tf.summary.image和添加self._losses的操作\n with tf.device(\"/cpu:0\"):\n val_summaries.append(self._add_image_summary(self._image, self._gt_boxes))\n for key, var in self._event_summaries.items(): #添加self._losses\n val_summaries.append(tf.summary.scalar(key, var))\n for key, var in self._score_summaries.items(): #self._score_summaries.update(self._anchor_targets) self._score_summaries.update(self._proposal_targets) \n self._add_score_summary(key, var) \n for var in self._act_summaries: # 添加head网络和rpn层\n self._add_act_summary(var)\n '''\n for var in tf.trainable_variables():\n self._train_summaries.append(var)\n '''\n for var in self._train_summaries: #添加tf.trainable_variables(),显示张量分布监控数据随着迭代轮数的变化趋势\n self._add_train_summary(var)\n\n self._summary_op = tf.summary.merge_all() # tf.summary.merge_all()函数来整理所有的日志生成操作\n if not testing:\n self._summary_op_val = tf.summary.merge(val_summaries)\n\n return layers_to_output\n\n def get_variables_to_restore(self, variables, var_keep_dic):\n raise NotImplementedError\n\n def fix_variables(self, sess, pretrained_model):\n raise NotImplementedError\n\n # Extract the head feature maps, for example for vgg16 it is conv5_3\n # only useful during testing mode\n def extract_head(self, sess, image):\n feed_dict = {self._image: image}\n feat = sess.run(self._layers[\"head\"], feed_dict=feed_dict)\n return feat\n\n # only useful during testing mode\n def test_image(self, sess, image, im_info):\n feed_dict = {self._image: image,\n self._im_info: im_info}\n cls_score, cls_prob, bbox_pred, rois = sess.run([self._predictions[\"cls_score\"],\n self._predictions['cls_prob'],\n self._predictions['bbox_pred'],\n self._predictions['rois']],\n feed_dict=feed_dict)\n return cls_score, cls_prob, bbox_pred, rois\n\n def get_summary(self, sess, blobs):\n feed_dict = {self._image: blobs['data'], self._im_info: blobs['im_info'],\n self._gt_boxes: blobs['gt_boxes']}\n summary = sess.run(self._summary_op_val, feed_dict=feed_dict)\n\n return summary\n\n def get_summary_2(self, sess, blobs):\n feed_dict = {self._image: blobs['data'], self._im_info: blobs['im_info'],\n self._gt_boxes: blobs['gt_boxes']}\n summary = sess.run(self._summary_op, feed_dict=feed_dict)\n\n return summary\n\n def train_step(self, sess, blobs, train_op):\n feed_dict = {self._image: blobs['data'], self._im_info: blobs['im_info'],\n self._gt_boxes: blobs['gt_boxes']}\n rpn_loss_cls, rpn_loss_box, loss_cls, loss_box, loss, _ = sess.run([self._losses[\"rpn_cross_entropy\"],\n self._losses['rpn_loss_box'],\n self._losses['cross_entropy'],\n self._losses['loss_box'],\n self._losses['total_loss'],\n train_op],\n feed_dict=feed_dict)\n return rpn_loss_cls, rpn_loss_box, loss_cls, loss_box, loss\n\n def train_step_with_summary(self, sess, blobs, train_op):\n feed_dict = {self._image: blobs['data'], self._im_info: blobs['im_info'],\n self._gt_boxes: blobs['gt_boxes']}\n rpn_loss_cls, rpn_loss_box, loss_cls, loss_box, loss, summary, _ = sess.run([self._losses[\"rpn_cross_entropy\"],\n self._losses['rpn_loss_box'],\n self._losses['cross_entropy'],\n self._losses['loss_box'],\n self._losses['total_loss'],\n self._summary_op,\n train_op],\n feed_dict=feed_dict)\n return rpn_loss_cls, rpn_loss_box, loss_cls, loss_box, loss, summary\n\n def train_step_no_return(self, sess, blobs, train_op):\n feed_dict = {self._image: blobs['data'], self._im_info: blobs['im_info'],\n self._gt_boxes: blobs['gt_boxes']}\n sess.run([train_op], feed_dict=feed_dict)\n"
] |
[
[
"tensorflow.constant_initializer",
"tensorflow.contrib.slim.max_pool2d",
"tensorflow.reshape",
"tensorflow.stack",
"tensorflow.to_float",
"tensorflow.nn.softmax",
"tensorflow.gather",
"tensorflow.trainable_variables",
"tensorflow.shape",
"tensorflow.concat",
"tensorflow.less",
"tensorflow.summary.histogram",
"tensorflow.transpose",
"tensorflow.variable_scope",
"tensorflow.nn.dropout",
"tensorflow.abs",
"numpy.array",
"tensorflow.summary.merge",
"tensorflow.expand_dims",
"tensorflow.py_func",
"tensorflow.summary.scalar",
"numpy.float32",
"tensorflow.placeholder",
"tensorflow.reduce_sum",
"tensorflow.image.draw_bounding_boxes",
"tensorflow.summary.merge_all",
"tensorflow.contrib.layers.l2_regularizer",
"tensorflow.unstack",
"tensorflow.nn.zero_fraction",
"tensorflow.to_int32",
"tensorflow.nn.sparse_softmax_cross_entropy_with_logits",
"tensorflow.summary.image",
"tensorflow.not_equal",
"tensorflow.device",
"tensorflow.slice",
"tensorflow.pow"
]
] |
vmarceau/jupytext
|
[
"72aa6c4968da714323fbd7a7c548ee4b1274c946"
] |
[
"tests/notebooks/mirror/ipynb_to_script_vim_folding_markers/Notebook_with_more_R_magic_111.py"
] |
[
"# ---\n# jupyter:\n# jupytext:\n# cell_markers: '{{{,}}}'\n# kernelspec:\n# display_name: Python 3\n# language: python\n# name: python3\n# ---\n\n# {{{\n# %load_ext rpy2.ipython\nimport pandas as pd\n\ndf = pd.DataFrame(\n {\n \"Letter\": [\"a\", \"a\", \"a\", \"b\", \"b\", \"b\", \"c\", \"c\", \"c\"],\n \"X\": [4, 3, 5, 2, 1, 7, 7, 5, 9],\n \"Y\": [0, 4, 3, 6, 7, 10, 11, 9, 13],\n \"Z\": [1, 2, 3, 1, 2, 3, 1, 2, 3],\n }\n)\n# }}}\n\n# {{{ {\"magic_args\": \"-i df\", \"language\": \"R\"}\n# library(\"ggplot2\")\n# ggplot(data = df) + geom_point(aes(x = X, y = Y, color = Letter, size = Z))\n# }}}\n"
] |
[
[
"pandas.DataFrame"
]
] |
patrickcgray/florence_mapping
|
[
"99e2d452a0b2d32a75b42a94086c9a7272e8788e"
] |
[
"rcnn/rnn_tiles.py"
] |
[
"import rasterio\nimport numpy as np\nimport random\nimport math\nfrom rasterio.windows import Window\nfrom pyproj import Proj, transform\nfrom rasterio.plot import reshape_as_raster, reshape_as_image\nimport os\nimport sys\nmodule_path = os.path.abspath(os.path.join('..'))\nif module_path not in sys.path:\n sys.path.append(module_path)\nimport utilities as util\nimport importlib\n\nimportlib.reload(util)\n \n \nclass rnn_tile_gen(): \n def __init__(self, landsat, lc_label, canopy_label, tile_size, class_count):\n self.l8_dict = landsat\n self.lc_label = lc_label\n self.canopy_label = canopy_label\n self.tile_length = tile_size\n self.class_count = class_count\n \n def get_tile_shape(self):\n return (self.tile_length, self.tile_length, self.landsat[0].count)\n \n def final_tile_generator(self, pixel_gdf, batch_size, flatten=False, canopy=False):\n ### this is a keras compatible data generator which generates data and labels on the fly \n ### from a set of pixel locations, a list of image datasets, and a label dataset\n bad_tiles = 0\n tile_size = self.tile_length\n i = 0\n lc_proj = Proj(self.lc_label.crs)\n canopy_proj = Proj(self.canopy_label.crs)\n l8_proj = Proj(self.l8_dict['028012'][0].crs)\n # assuming all images have the same num of bands\n band_count = self.l8_dict['028012'][0].count - 1\n time_steps = len(self.l8_dict['028012'])\n class_count = self.class_count\n buffer = math.floor(tile_size / 2)\n while True:\n lc_batch = np.zeros((batch_size, class_count))\n canopy_batch = np.zeros((batch_size, 1))\n image_batch = np.zeros((batch_size, time_steps, tile_size, tile_size, band_count))\n rnn_image_batch = np.zeros((batch_size, time_steps, band_count))\n b = 0\n while b < batch_size:\n # if we're at the end of the data just restart\n if i >= len(pixel_locations):\n i=0\n r, c = pixel_locations[i][0]\n tile_num = pixel_locations[i][1]\n i += 1\n tiles_to_read = self.l8_dict[tile_num]\n tiles_read = util.read_windows(tiles_to_read, c ,r, buffer, tile_size)\n rnn_tiles_read = util.read_windows(tiles_to_read, c ,r, 0, 1)\n reshaped_tiles = []\n rnn_reshaped_tiles = []\n band_avg = [ 345.72448081, 352.93755735, 319.34257128, 899.39728239,\n 649.46125258, 370.53562465, -1084.8218946 ]\n band_std = [ 661.75737932, 363.32761072, 425.28671553, 959.63442896,\n 838.86193201, 501.96992987, 3562.42995552]\n for index in range(len(tiles_read)):\n rnn_tile = rnn_tiles_read[index][0:7]\n tile = tiles_read[index][0:7]\n reshaped_tile = reshape_as_image(tile).astype(np.float64)\n rnn_reshaped_tile = reshape_as_image(rnn_tile).astype(np.float64)\n #rnn_reshaped_tile = np.divide(np.subtract(rnn_reshaped_tile,band_avg),band_std) \n #reshaped_tile= np.divide(np.subtract(reshaped_tile, band_avg),band_std)\n reshaped_tiles.append(reshaped_tile)\n rnn_reshaped_tiles.append(rnn_reshaped_tile)\n ### get label data\n # find gps of that pixel within the image\n (x, y) = self.l8_dict[tile_num][0].xy(r, c) \n # convert the point we're sampling from to the same projection as the label dataset if necessary\n lc_x,lc_y = x,y\n canopy_x, canopy_y = x,y\n # reference gps in label_image\n lc_row, lc_col = self.lc_label.index(lc_x,lc_y)\n lc_data = self.lc_label.read(1, window=Window(lc_col, lc_row, 1, 1))\n canopy_row, canopy_col = self.canopy_label.index(canopy_x,canopy_y)\n canopy_data = self.canopy_label.read(1, window=Window(canopy_col, canopy_row, 1, 1))\n lc_label = self.one_hot_encode(lc_data, 1, class_count)\n lc_batch[b] = lc_label.reshape(class_count)\n canopy_batch[b] = canopy_data.reshape(1) / 100\n rnn_total_tile = np.array((*rnn_reshaped_tiles,))\n rnn_image_batch[b] = rnn_total_tile.reshape((len(tiles_read),7))\n total_tile = np.array((*reshaped_tiles,))\n image_batch[b] = total_tile\n b += 1\n if canopy:\n yield ({\"rnn_input\":rnn_image_batch, \"tile_input\":image_batch}, {'landcover': lc_batch, 'canopy': canopy_batch})\n else: \n yield (image_batch, lc_batch)\n \n def tile_generator(self, pixel_locations, batch_size, flatten=False, canopy=False, normalize=True):\n ### this is a keras compatible data generator which generates data and labels on the fly \n ### from a set of pixel locations, a list of image datasets, and a label dataset\n bad_tiles = 0\n tile_size = self.tile_length\n i = 0\n lc_proj = Proj(self.lc_label.crs)\n canopy_proj = Proj(self.canopy_label.crs)\n l8_proj = Proj(self.l8_dict['028012'][0].crs)\n # assuming all images have the same num of bands\n band_count = self.l8_dict['028012'][0].count - 1\n time_steps = len(self.l8_dict['028012'])\n class_count = self.class_count\n buffer = math.floor(tile_size / 2)\n while True:\n lc_batch = np.zeros((batch_size, class_count))\n canopy_batch = np.zeros((batch_size, 1))\n image_batch = np.zeros((batch_size, time_steps, tile_size, tile_size, band_count))\n rnn_image_batch = np.zeros((batch_size, time_steps, band_count))\n b = 0\n while b < batch_size:\n # if we're at the end of the data just restart\n if i >= len(pixel_locations):\n i=0\n r, c = pixel_locations[i][0]\n tile_num = pixel_locations[i][1]\n #lc_data = pixel_locations[i][2]\n i += 1\n tiles_to_read = self.l8_dict[tile_num]\n tiles_read = util.read_windows(tiles_to_read, c ,r, buffer, tile_size)\n rnn_tiles_read = util.read_windows(tiles_to_read, c ,r, 0, 1)\n reshaped_tiles = []\n rnn_reshaped_tiles = []\n band_avg = [ 366.61408946, 473.45691342, 499.77979682, 1712.39411433,\n 1351.56817468, 746.1391345 , 3994.48731099 ]\n band_std = [ 168.1579065 , 223.37955379, 303.91997082, 1005.7843712 ,\n 919.80111362, 626.10023407, 3846.60259933]\n offset_scale = 0.0001\n for index in range(len(tiles_read)):\n rnn_tile = rnn_tiles_read[index][0:7]\n tile = tiles_read[index][0:7]\n reshaped_tile = reshape_as_image(tile).astype(np.float64)\n rnn_reshaped_tile = reshape_as_image(rnn_tile).astype(np.float64)\n if normalize:\n rnn_reshaped_tile = np.divide(np.subtract(rnn_reshaped_tile,band_avg),band_std) \n reshaped_tile= np.divide(np.subtract(reshaped_tile, band_avg),band_std)\n else:\n rnn_reshaped_tile = rnn_reshaped_tile*offset_scale \n reshaped_tile = reshaped_tile*offset_scale\n reshaped_tiles.append(reshaped_tile)\n rnn_reshaped_tiles.append(rnn_reshaped_tile)\n ### get label data from the image, not doing this because it is in the shapefile\n #find gps of that pixel within the image\n (x, y) = self.l8_dict[tile_num][0].xy(r, c) \n #convert the point we're sampling from to the same projection as the label dataset if necessary\n lc_x,lc_y = x,y\n canopy_x, canopy_y = x,y\n #reference gps in label_image\n lc_row, lc_col = self.lc_label.index(lc_x,lc_y)\n lc_data = self.lc_label.read(1, window=Window(lc_col, lc_row, 1, 1))\n #canopy_row, canopy_col = self.canopy_label.index(canopy_x,canopy_y)\n #canopy_data = self.canopy_label.read(1, window=Window(canopy_col, canopy_row, 1, 1))\n \n lc_label = self.one_hot_encode(lc_data, 1, class_count)\n lc_batch[b] = lc_label.reshape(class_count)\n #canopy_batch[b] = canopy_data.reshape(1) / 100\n rnn_total_tile = np.array((*rnn_reshaped_tiles,))\n rnn_image_batch[b] = rnn_total_tile.reshape((len(tiles_read),7))\n total_tile = np.array((*reshaped_tiles,))\n image_batch[b] = total_tile\n b += 1\n if canopy:\n yield ({\"rnn_input\":rnn_image_batch, \"tile_input\":image_batch}, {'landcover': lc_batch})\n else: \n #yield (image_batch, lc_batch)\n yield (image_batch.reshape(batch_size, tile_size, tile_size, -1), lc_batch)\n #yield (image_batch.reshape(batch_size,-1), lc_batch)\n # TODO there is probably an efficient scikit learn fcn for this\n # also merging can be done more efficiently with something like \n # lc_data_merged = np.vectorize(util.class_to_index.get)(lc_data)\n def one_hot_encode(self, data, tile_size, class_count):\n label = np.zeros((tile_size, tile_size, class_count))\n flag = True\n count = 0 \n for i in range(tile_size):\n for j in range(tile_size):\n label_index = util.class_to_index[data[i][j]]\n label[i][j][label_index] = 1\n #label[i][j][data] = 1\n return label \n "
] |
[
[
"numpy.array",
"numpy.subtract",
"numpy.zeros"
]
] |
halostorm/OpenPCDet
|
[
"9650c9d71e7f03815e925fee6e16909fb219336a"
] |
[
"pcdet/models/backbones_3d/spconv_backbone.py"
] |
[
"import torch.nn as nn\nimport spconv\nfrom functools import partial\n\n\ndef post_act_block(in_channels, out_channels, kernel_size, indice_key=None, stride=1, padding=0,\n conv_type='subm', norm_fn=None):\n if conv_type == 'subm':\n conv = spconv.SubMConv3d(in_channels, out_channels, kernel_size, bias=False, indice_key=indice_key)\n elif conv_type == 'spconv':\n conv = spconv.SparseConv3d(in_channels, out_channels, kernel_size, stride=stride, padding=padding,\n bias=False, indice_key=indice_key)\n elif conv_type == 'inverseconv':\n conv = spconv.SparseInverseConv3d(in_channels, out_channels, kernel_size, indice_key=indice_key, bias=False)\n else:\n raise NotImplementedError\n\n m = spconv.SparseSequential(\n conv,\n norm_fn(out_channels),\n nn.ReLU(),\n )\n\n return m\n\n\nclass VoxelBackBone8x(nn.Module):\n def __init__(self, model_cfg, input_channels, grid_size, **kwargs):\n super().__init__()\n self.model_cfg = model_cfg\n norm_fn = partial(nn.BatchNorm1d, eps=1e-3, momentum=0.01)\n\n self.sparse_shape = grid_size[::-1] + [1, 0, 0]\n\n self.conv_input = spconv.SparseSequential(\n spconv.SubMConv3d(input_channels, 16, 3, padding=1, bias=False, indice_key='subm1'),\n norm_fn(16),\n nn.ReLU(),\n )\n block = post_act_block\n\n self.conv1 = spconv.SparseSequential(\n block(16, 16, 3, norm_fn=norm_fn, padding=1, indice_key='subm1'),\n )\n\n self.conv2 = spconv.SparseSequential(\n # [1600, 1408, 41] <- [800, 704, 21]\n block(16, 32, 3, norm_fn=norm_fn, stride=2, padding=1, indice_key='spconv2', conv_type='spconv'),\n block(32, 32, 3, norm_fn=norm_fn, padding=1, indice_key='subm2'),\n block(32, 32, 3, norm_fn=norm_fn, padding=1, indice_key='subm2'),\n )\n\n self.conv3 = spconv.SparseSequential(\n # [800, 704, 21] <- [400, 352, 11]\n block(32, 64, 3, norm_fn=norm_fn, stride=2, padding=1, indice_key='spconv3', conv_type='spconv'),\n block(64, 64, 3, norm_fn=norm_fn, padding=1, indice_key='subm3'),\n block(64, 64, 3, norm_fn=norm_fn, padding=1, indice_key='subm3'),\n )\n\n self.conv4 = spconv.SparseSequential(\n # [400, 352, 11] <- [200, 176, 5]\n block(64, 64, 3, norm_fn=norm_fn, stride=2, padding=(0, 1, 1), indice_key='spconv4', conv_type='spconv'),\n block(64, 64, 3, norm_fn=norm_fn, padding=1, indice_key='subm4'),\n block(64, 64, 3, norm_fn=norm_fn, padding=1, indice_key='subm4'),\n )\n\n last_pad = 0\n last_pad = self.model_cfg.get('last_pad', last_pad)\n self.conv_out = spconv.SparseSequential(\n # [200, 150, 5] -> [200, 150, 2]\n spconv.SparseConv3d(64, 128, (3, 1, 1), stride=(2, 1, 1), padding=last_pad,\n bias=False, indice_key='spconv_down2'),\n norm_fn(128),\n nn.ReLU(),\n )\n self.num_point_features = 128\n\n def forward(self, batch_dict):\n \"\"\"\n Args:\n batch_dict:\n batch_size: int\n vfe_features: (num_voxels, C)\n voxel_coords: (num_voxels, 4), [batch_idx, z_idx, y_idx, x_idx]\n Returns:\n batch_dict:\n encoded_spconv_tensor: sparse tensor\n \"\"\"\n voxel_features, voxel_coords = batch_dict['voxel_features'], batch_dict['voxel_coords']\n batch_size = batch_dict['batch_size']\n input_sp_tensor = spconv.SparseConvTensor(\n features=voxel_features,\n indices=voxel_coords.int(),\n spatial_shape=self.sparse_shape,\n batch_size=batch_size\n )\n\n x = self.conv_input(input_sp_tensor)\n\n x_conv1 = self.conv1(x)\n x_conv2 = self.conv2(x_conv1)\n x_conv3 = self.conv3(x_conv2)\n x_conv4 = self.conv4(x_conv3)\n\n # for detection head\n # [200, 176, 5] -> [200, 176, 2]\n out = self.conv_out(x_conv4)\n\n batch_dict.update({\n 'encoded_spconv_tensor': out,\n 'encoded_spconv_tensor_stride': 8\n })\n batch_dict.update({\n 'multi_scale_3d_features': {\n 'x_conv1': x_conv1,\n 'x_conv2': x_conv2,\n 'x_conv3': x_conv3,\n 'x_conv4': x_conv4,\n }\n })\n\n return batch_dict\n"
] |
[
[
"torch.nn.ReLU"
]
] |
tinybug/zvt
|
[
"05c17e390f8c1c611286885ec9d0f6db2c8e0f1d"
] |
[
"zvt/recorders/joinquant/meta/stock_trade_day_recorder.py"
] |
[
"# -*- coding: utf-8 -*-\nimport pandas as pd\nfrom jqdatasdk import auth, get_trade_days\n\nfrom zvt.contract.api import df_to_db\nfrom zvt.contract.recorder import TimeSeriesDataRecorder\nfrom zvt.utils.time_utils import to_time_str\nfrom zvt import zvt_env\nfrom zvt.domain import StockTradeDay, Stock\n\n\nclass StockTradeDayRecorder(TimeSeriesDataRecorder):\n entity_provider = 'joinquant'\n entity_schema = Stock\n\n provider = 'joinquant'\n data_schema = StockTradeDay\n\n def __init__(self, entity_type='stock', exchanges=['sh', 'sz'], entity_ids=None, codes=None, batch_size=10,\n force_update=False, sleeping_time=5, default_size=2000, real_time=False, fix_duplicate_way='add',\n start_timestamp=None, end_timestamp=None, close_hour=0, close_minute=0) -> None:\n super().__init__(entity_type, exchanges, entity_ids, ['000001'], batch_size, force_update, sleeping_time,\n default_size, real_time, fix_duplicate_way, start_timestamp, end_timestamp, close_hour,\n close_minute)\n auth(zvt_env['jq_username'], zvt_env['jq_password'])\n\n def record(self, entity, start, end, size, timestamps):\n df = pd.DataFrame()\n dates = get_trade_days(start_date=start)\n self.logger.info(f'add dates:{dates}')\n df['timestamp'] = pd.to_datetime(dates)\n df['id'] = [to_time_str(date) for date in dates]\n df['entity_id'] = 'stock_sz_000001'\n\n df_to_db(df=df, data_schema=self.data_schema, provider=self.provider, force_update=self.force_update)\n\n\n__all__ = ['StockTradeDayRecorder']\n\nif __name__ == '__main__':\n r = StockTradeDayRecorder()\n r.run()\n"
] |
[
[
"pandas.to_datetime",
"pandas.DataFrame"
]
] |
cjh1/hexrdgui
|
[
"eb8968ba763cebbffce61164f1bda1e2cc622461"
] |
[
"hexrd/ui/image_tab_widget.py"
] |
[
"from PySide2.QtCore import Signal, Slot, Qt\nfrom PySide2.QtWidgets import QMessageBox, QTabWidget, QHBoxLayout\n\nimport numpy as np\n\nfrom hexrd.ui.constants import ViewType, ZOOM, PAN\nfrom hexrd.ui.hexrd_config import HexrdConfig\nfrom hexrd.ui.image_canvas import ImageCanvas\nfrom hexrd.ui.image_series_toolbar import ImageSeriesToolbar\nfrom hexrd.ui.navigation_toolbar import NavigationToolbar\nfrom hexrd.ui import utils\n\n\nclass ImageTabWidget(QTabWidget):\n\n # Tell the main window that an update is needed\n update_needed = Signal()\n\n # Emitted when the mouse is moving on the canvas, but outside\n # an image/plot. Intended to clear the status bar.\n clear_mouse_position = Signal()\n\n # Emitted when the mouse moves on top of an image/plot\n # Arguments are: x, y, xdata, ydata, intensity\n new_mouse_position = Signal(dict)\n\n def __init__(self, parent=None):\n super(ImageTabWidget, self).__init__(parent)\n self.image_canvases = [ImageCanvas(self)]\n\n # Set up a mouse move connection to use with the status bar\n cid = self.image_canvases[0].mpl_connect(\n 'motion_notify_event',\n self.on_motion_notify_event)\n self.mpl_connections = [cid]\n\n self.image_names = []\n self.current_index = 0\n\n # These will get set later\n self.cmap = None\n self.norm = None\n self.toolbars = []\n self.toolbar_visible = True\n\n self.setup_connections()\n\n def setup_connections(self):\n self.tabBarClicked.connect(self.switch_toolbar)\n HexrdConfig().tab_images_changed.connect(self.load_images)\n HexrdConfig().detectors_changed.connect(self.reset_index)\n\n def clear(self):\n # This removes all canvases except the first one,\n # and it calls super().clear()\n\n for canvas, cid in zip(self.image_canvases[1:],\n self.mpl_connections[1:]):\n canvas.mpl_disconnect(cid)\n canvas.deleteLater()\n\n del self.image_canvases[1:]\n del self.toolbars[1:]\n del self.mpl_connections[1:]\n\n super().clear()\n\n def reset_index(self):\n self.current_index = 0\n\n def allocate_canvases(self):\n while len(self.image_canvases) < len(self.image_names):\n self.image_canvases.append(ImageCanvas(self))\n\n # Make connections to use with the status bar\n while len(self.mpl_connections) < len(self.image_canvases):\n ind = len(self.mpl_connections)\n cid = self.image_canvases[ind].mpl_connect(\n 'motion_notify_event',\n self.on_motion_notify_event)\n\n self.mpl_connections.append(cid)\n\n def load_images_tabbed(self):\n self.clear()\n self.allocate_canvases()\n self.allocate_toolbars()\n for i, name in enumerate(self.image_names):\n self.image_canvases[i].load_images(image_names=[name])\n self.addTab(self.image_canvases[i], name)\n\n self.update_canvas_cmaps()\n self.update_canvas_norms()\n self.tabBar().show()\n self.setCurrentIndex(self.current_index)\n\n def load_images_untabbed(self):\n self.clear()\n self.image_canvases[0].load_images(\n image_names=self.image_names)\n self.allocate_toolbars()\n self.addTab(self.image_canvases[0], '')\n\n self.update_canvas_cmaps()\n self.update_canvas_norms()\n self.tabBar().hide()\n\n def update_image_names(self):\n if self.image_names != list(HexrdConfig().imageseries_dict.keys()):\n self.image_names = list(HexrdConfig().imageseries_dict.keys())\n\n def load_images(self):\n self.update_image_names()\n self.update_ims_toolbar()\n\n if HexrdConfig().tab_images:\n self.load_images_tabbed()\n else:\n self.load_images_untabbed()\n\n self.switch_toolbar(self.currentIndex())\n\n def change_ims_image(self, pos):\n HexrdConfig().current_imageseries_idx = pos\n self.update_needed.emit()\n\n @Slot(bool)\n def show_toolbar(self, b):\n self.toolbar_visible = b\n\n if self.current_index < 0 or not self.toolbars:\n return\n\n self.toolbars[self.current_index]['tb'].setVisible(b)\n self.toolbars[self.current_index]['sb'].set_visible(b)\n\n def allocate_toolbars(self):\n parent = self.parent()\n while len(self.toolbars) != len(self.image_canvases):\n # The new one to add\n idx = len(self.toolbars)\n tb = NavigationToolbar(self.image_canvases[idx], parent, False)\n # Current detector\n name = self.image_names[idx]\n sb = ImageSeriesToolbar(name, self)\n\n # This will put it at the bottom of the central widget\n toolbar = QHBoxLayout()\n toolbar.addWidget(tb)\n toolbar.addWidget(sb.widget)\n parent.layout().addLayout(toolbar)\n parent.layout().setAlignment(toolbar, Qt.AlignCenter)\n self.toolbars.append({'tb': tb, 'sb': sb})\n\n def switch_toolbar(self, idx):\n if idx < 0:\n return\n\n self.current_index = idx\n\n # None should be visible except the current one\n for i, toolbar in enumerate(self.toolbars):\n status = self.toolbar_visible if idx == i else False\n toolbar['tb'].setVisible(status)\n toolbar['sb'].set_visible(status)\n self.update_ims_toolbar()\n\n def update_ims_toolbar(self):\n idx = self.current_index\n if self.toolbars:\n self.toolbars[idx]['sb'].update_name(self.image_names[idx])\n self.toolbars[idx]['sb'].update_range(True)\n\n def toggle_off_toolbar(self):\n toolbars = [bars['tb'] for bars in self.toolbars]\n for tb in toolbars:\n if tb.mode == ZOOM:\n tb.zoom()\n if tb.mode == PAN:\n tb.pan()\n\n def show_cartesian(self):\n self.update_image_names()\n self.update_ims_toolbar()\n\n # Make sure we actually have images\n if len(self.image_names) == 0:\n msg = 'Cannot show Cartesian view without images!'\n QMessageBox.warning(self, 'HEXRD', msg)\n return\n\n self.clear()\n self.image_canvases[0].show_cartesian()\n self.addTab(self.image_canvases[0], '')\n self.tabBar().hide()\n self.switch_toolbar(self.currentIndex())\n\n def show_polar(self):\n self.update_image_names()\n self.update_ims_toolbar()\n\n # Make sure we actually have images\n if len(self.image_names) == 0:\n msg = 'Cannot show Polar view without images!'\n QMessageBox.warning(self, 'HEXRD', msg)\n return\n\n self.clear()\n self.image_canvases[0].show_polar()\n self.addTab(self.image_canvases[0], '')\n self.tabBar().hide()\n self.switch_toolbar(self.currentIndex())\n\n def active_canvases(self):\n \"\"\"Get the canvases that are actively being used\"\"\"\n if not HexrdConfig().tab_images:\n return [self.image_canvases[0]]\n\n return self.image_canvases[:len(self.image_names)]\n\n def update_canvas_cmaps(self):\n if self.cmap is not None:\n for canvas in self.active_canvases():\n canvas.set_cmap(self.cmap)\n\n def update_canvas_norms(self):\n if self.norm is not None:\n for canvas in self.active_canvases():\n canvas.set_norm(self.norm)\n\n def set_cmap(self, cmap):\n self.cmap = cmap\n self.update_canvas_cmaps()\n\n def set_norm(self, norm):\n self.norm = norm\n self.update_canvas_norms()\n\n def on_motion_notify_event(self, event):\n # Clear the info if the mouse leaves a plot\n if event.inaxes is None:\n self.clear_mouse_position.emit()\n return\n\n mode = self.image_canvases[0].mode\n\n if mode is None:\n mode = 'images'\n\n info = {\n 'x': event.x,\n 'y': event.y,\n 'x_data': event.xdata,\n 'y_data': event.ydata,\n 'mode': mode\n }\n\n # TODO: we are currently calculating the pixel intensity\n # mathematically, because I couldn't find any other way\n # to obtain it. If we find a better way, let's do it.\n\n if event.inaxes.get_images():\n # Image was created with imshow()\n artist = event.inaxes.get_images()[0]\n i, j = utils.coords2index(artist, info['x_data'], info['y_data'])\n intensity = artist.get_array()[i, j]\n else:\n # This is probably just a plot. Do not calculate intensity.\n intensity = None\n\n info['intensity'] = intensity\n\n # intensity being None implies here that the mouse is on top of the\n # azimuthal integration plot in the polar view.\n if (mode in [ViewType.cartesian, ViewType.polar] and\n intensity is not None):\n\n iviewer = self.image_canvases[0].iviewer\n\n if mode == ViewType.cartesian:\n xy_data = iviewer.dpanel.pixelToCart(np.vstack([i, j]).T)\n ang_data, gvec = iviewer.dpanel.cart_to_angles(xy_data)\n tth = ang_data[:, 0][0]\n eta = ang_data[:, 1][0]\n else:\n tth = np.radians(info['x_data'])\n eta = np.radians(info['y_data'])\n\n # We will only display the active material's hkls\n plane_data = HexrdConfig().active_material.planeData\n dsp = 0.5 * plane_data.wavelength / np.sin(0.5 * tth)\n hkl = str(plane_data.getHKLs(asStr=True, allHKLs=True,\n thisTTh=tth))\n\n info['tth'] = np.degrees(tth)\n info['eta'] = np.degrees(eta)\n info['dsp'] = dsp\n info['hkl'] = hkl\n\n self.new_mouse_position.emit(info)\n\n def export_polar_plot(self, filename):\n self.image_canvases[0].export_polar_plot(filename)\n\n def polar_show_snip1d(self):\n self.image_canvases[0].polar_show_snip1d()\n\n\nif __name__ == '__main__':\n import sys\n from PySide2.QtWidgets import QApplication\n\n app = QApplication(sys.argv)\n\n # This will just test for __init__ errors\n ImageTabWidget()\n"
] |
[
[
"numpy.radians",
"numpy.sin",
"numpy.vstack",
"numpy.degrees"
]
] |
willcerny/ugali
|
[
"00b30c5ac84e4cada338fb77d33aa31afabca7bb"
] |
[
"ugali/analysis/results.py"
] |
[
"#!/usr/bin/env python\n\"\"\"\nCalculate output results dictionary.\n\"\"\"\n\nfrom collections import OrderedDict as odict\n\nimport numpy as np\nimport yaml\nimport numpy.lib.recfunctions as recfuncs\n\nimport astropy.coordinates\nfrom astropy.coordinates import SkyCoord\nimport astropy.units as u\n\nimport ugali.analysis.source\nimport ugali.analysis.loglike\n\nimport ugali.utils.stats\nfrom ugali.utils.stats import Samples\nfrom ugali.utils.projector import dist2mod,mod2dist,gal2cel,gal2cel_angle\nfrom ugali.utils.projector import ang2const, ang2iau\nfrom ugali.utils.config import Config\nfrom ugali.utils.logger import logger\n\nclass Results(object):\n \"\"\"\n Calculate results from a MCMC chain.\n \"\"\"\n def __init__(self, config, loglike, samples=None):\n self.config = Config(config)\n self.alpha = self.config['results'].get('alpha',0.10)\n self.nwalkers = self.config['mcmc'].get('nwalkers',100)\n self.nburn = self.config['results'].get('nburn',10)\n self.coordsys = self.config['coords']['coordsys'].lower()\n \n self.loglike = loglike\n self.source = self.loglike.source\n self.params = list(self.source.get_free_params().keys())\n self.samples = samples\n\n def load_samples(self,filename):\n samples = Samples(filename)\n self.samples = samples.supplement(coordsys=self.coordsys)\n\n\n def get_mle(self):\n mle = self.source.get_params()\n # FIXME: For composite isochrones\n if 'age' not in mle:\n mle['age'] = np.average(self.source.isochrone.age)\n if 'metallicity' not in mle:\n mle['metallicity'] = np.average(self.source.isochrone.metallicity)\n \n return mle\n\n def estimate(self,param,burn=None,clip=10.0,alpha=0.32):\n \"\"\" Estimate parameter value and uncertainties \"\"\"\n # FIXME: Need to add age and metallicity to composite isochrone params (currently properties)\n if param not in list(self.samples.names) + list(self.source.params) + ['age','metallicity']:\n msg = 'Unrecognized parameter: %s'%param\n raise KeyError(msg)\n\n # If the parameter is in the samples\n if param in self.samples.names:\n if param.startswith('position_angle'):\n return self.estimate_position_angle(param,burn=burn,\n clip=clip,alpha=alpha)\n\n return self.samples.peak_interval(param,burn=burn,clip=clip,alpha=alpha)\n \n mle = self.get_mle()\n errors = [np.nan,np.nan] \n\n # Set default value to the MLE value\n if param in self.source.params:\n err = self.source.params[param].errors\n if err is not None: errors = err\n\n # For age and metallicity from composite isochrone\n return [float(mle[param]),errors]\n\n ### if (param not in self.params) or (param not in : \n ### return [float(mle[param]),errors]\n ### \n ### if param not in self.samples.names: \n ### return [float(mle[param]),errors]\n ### \n ### msg = \"Unrecognized parameter: %s\"%param\n ### raise KeyError(msg)\n \n def estimate_params(self,burn=None,clip=10.0,alpha=0.32):\n \"\"\" Estimate all source parameters \"\"\"\n mle = self.get_mle()\n out = odict()\n for param in mle.keys():\n out[param] = self.estimate(param,burn=burn,clip=clip,alpha=alpha)\n return out\n \n def estimate_position_angle(self,param='position_angle',burn=None,clip=10.0,alpha=0.32):\n \"\"\" Estimate the position angle from the posterior dealing\n with periodicity.\n \"\"\"\n # Transform so peak in the middle of the distribution\n pa = self.samples.get(param,burn=burn,clip=clip)\n peak = ugali.utils.stats.kde_peak(pa)\n shift = 180.*((pa+90-peak)>180)\n pa -= shift\n # Get the kde interval\n ret = ugali.utils.stats.peak_interval(pa,alpha)\n if ret[0] < 0: \n ret[0] += 180.; ret[1][0] += 180.; ret[1][1] += 180.;\n return ret\n \n def bayes_factor(self,param,burn=None,clip=10.0,bins=50):\n # CAREFUL: Assumes a flat prior...\n try: \n data = self.samples.get(param,burn=burn,clip=clip)\n except ValueError as msg:\n logger.warning(msg)\n return ugali.utils.stats.interval(np.nan)\n \n bmin,bmax = self.source.params[param].bounds\n bins = np.linspace(bmin,bmax,bins)\n n,b = np.histogram(data,bins=bins,normed=True)\n prior = 1.0/(bmax-bmin)\n posterior = n[0]\n # Excluding the null hypothesis\n bf = prior/posterior\n return ugali.utils.stats.interval(bf)\n \n def get_results(self,**kwargs):\n kwargs.setdefault('alpha',self.alpha)\n kwargs.setdefault('burn',self.nburn*self.nwalkers)\n\n # Calculate best-fit parameters from MCMC chain\n logger.debug('Estimating parameters...')\n estimate = self.estimate_params(**kwargs)\n params = {k:v[0] for k,v in estimate.items()}\n results = dict(estimate)\n\n # Extra parameters from the MCMC chain\n logger.debug('Estimating auxiliary parameters...')\n try: \n results['ra'] = self.estimate('ra',**kwargs)\n results['dec'] = self.estimate('dec',**kwargs)\n except KeyError:\n logger.warn(\"Didn't find 'ra' or 'dec'\")\n ra,dec = gal2cel(results['lon'][0],results['lat'][0])\n results['ra'] = ugali.utils.stats.interval(ra)\n results['dec'] = ugali.utils.stats.interval(dec)\n\n ra,dec = results['ra'][0],results['dec'][0]\n glon,glat = lon,lat = results['lon'][0],results['lat'][0]\n results.update(gal=[float(glon),float(glat)])\n results.update(cel=[float(ra),float(dec)])\n\n try:\n results['position_angle_cel'] = self.estimate('position_angle_cel',**kwargs)\n except KeyError:\n results['position_angle_cel'] = ugali.utils.stats.interval(np.nan)\n\n # Update the loglike to the best-fit parameters from the chain\n logger.debug('Calculating TS...')\n ts = 2*self.loglike.value(**params)\n results['ts'] = ugali.utils.stats.interval(ts,np.nan,np.nan)\n \n #lon,lat = estimate['lon'][0],estimate['lat'][0]\n # \n #results.update(gal=[float(lon),float(lat)])\n #ra,dec = gal2cel(lon,lat)\n #results.update(cel=[float(ra),float(dec)])\n #results['ra'] = ugali.utils.stats.interval(ra,np.nan,np.nan)\n #results['dec'] = ugali.utils.stats.interval(dec,np.nan,np.nan)\n \n # Celestial position angle\n # Break ambiguity in direction with '% 180.'\n pa,pa_err = results['position_angle']\n pa_cel = gal2cel_angle(lon,lat,pa) % 180.\n pa_cel_err = np.array(pa_err) - pa + pa_cel\n results['position_angle_cel'] = ugali.utils.stats.interval(pa_cel,pa_cel_err[0],pa_cel_err[1])\n \n mod,mod_err = estimate['distance_modulus']\n dist = mod2dist(mod)\n dist_lo,dist_hi = [mod2dist(mod_err[0]),mod2dist(mod_err[1])]\n results['distance'] = ugali.utils.stats.interval(dist,dist_lo,dist_hi)\n dist,dist_err = results['distance']\n \n ext,ext_err = estimate['extension']\n ext_sigma = np.nan_to_num(np.array(ext_err) - ext)\n results['extension_arcmin'] = ugali.utils.stats.interval(60*ext,60*ext_err[0],60*ext_err[1])\n \n # Radially symmetric extension (correct for ellipticity).\n ell,ell_err = estimate['ellipticity']\n rext,rext_err = ext*np.sqrt(1-ell),np.array(ext_err)*np.sqrt(1-ell)\n rext_sigma = np.nan_to_num(np.array(rext_err) - rext)\n results['extension_radial'] = ugali.utils.stats.interval(rext,rext_err[0],rext_err[1])\n results['extension_radial_arcmin'] = ugali.utils.stats.interval(60*rext,60*rext_err[0],60*rext_err[1])\n \n # Bayes factor for ellipticity\n results['ellipticity_bayes_factor'] = self.bayes_factor('ellipticity',burn=kwargs['burn'])\n \n # Physical Size (should do this with the posteriors)\n # Radially symmetric\n dist_sigma = np.nan_to_num(np.array(dist_err) - dist)\n\n size = np.arctan(np.radians(ext)) * dist\n size_sigma = size * np.sqrt((ext_sigma/ext)**2 + (dist_sigma/dist)**2)\n size_err = [size-size_sigma[0],size+size_sigma[1]]\n results['physical_size'] = ugali.utils.stats.interval(size,size_err[0],size_err[1])\n\n rsize = np.arctan(np.radians(rext)) * dist\n rsize_sigma = rsize * np.sqrt((rext_sigma/rext)**2 + (dist_sigma/dist)**2)\n rsize_err = [rsize-rsize_sigma[0],rsize+rsize_sigma[1]]\n results['physical_size_radial'] = ugali.utils.stats.interval(rsize,rsize_err[0],rsize_err[1])\n \n # Richness\n rich,rich_err = estimate['richness']\n \n # Number of observed stars (sum of p-values)\n nobs = self.loglike.p.sum()\n nobs_lo,nobs_hi = nobs + np.sqrt(nobs)*np.array([-1,1])\n results['nobs'] = ugali.utils.stats.interval(nobs,nobs_lo,nobs_hi)\n \n # Number of predicted stars (pixelization effects?)\n npred = self.loglike.f*rich\n npred_lo,npred_hi = rich_err[0]*self.loglike.f,rich_err[1]*self.loglike.f\n results['npred'] = ugali.utils.stats.interval(npred,npred_lo,npred_hi)\n \n # Careful, depends on the isochrone...\n stellar_mass = self.source.stellar_mass()\n mass = rich*stellar_mass\n mass_lo,mass_hi = rich_err[0]*stellar_mass,rich_err[1]*stellar_mass\n results['mass'] = ugali.utils.stats.interval(mass,mass_lo,mass_hi)\n \n stellar_luminosity = self.source.stellar_luminosity()\n lum = rich*stellar_luminosity\n lum_lo,lum_hi = rich_err[0]*stellar_luminosity,rich_err[1]*stellar_luminosity\n results['luminosity'] = ugali.utils.stats.interval(lum,lum_lo,lum_hi)\n\n # Absolute magnitude only calculated for DES isochrones with g,r \n try:\n Mv = self.source.absolute_magnitude(rich)\n Mv_lo = self.source.absolute_magnitude(rich_err[0])\n Mv_hi = self.source.absolute_magnitude(rich_err[1])\n results['Mv'] = ugali.utils.stats.interval(Mv,Mv_lo,Mv_hi)\n except ValueError as e:\n logger.warning(\"Skipping absolute magnitude\")\n logger.warn(str(e))\n Mv = np.nan\n results['Mv'] = Mv\n\n # ADW: WARNING this is very fragile.\n # Also, this is not quite right, should cut on the CMD available space\n kwargs = dict(richness=rich,mag_bright=16., mag_faint=23.,\n n_trials=5000,alpha=self.alpha, seed=0)\n martin = self.config['results'].get('martin')\n if martin:\n logger.info(\"Calculating Martin magnitude...\")\n if martin > 1: kwargs['n_trials'] = martin\n Mv_martin = self.source.isochrone.absolute_magnitude_martin(**kwargs)\n results['Mv_martin'] = Mv_martin\n else:\n logger.warning(\"Skipping Martin magnitude\")\n results['Mv_martin'] = np.nan\n \n mu = surfaceBrightness(Mv, size, dist)\n results['surface_brightness'] = ugali.utils.stats.interval(mu,np.nan,np.nan)\n \n try: \n results['constellation'] = ang2const(lon,lat,self.coordsys)[1]\n except:\n pass\n results['iau'] = ugali.utils.projector.ang2iau(lon,lat,self.coordsys)\n \n coord = SkyCoord(ra*u.deg,dec*u.deg,distance=dist*u.kpc)\n results['ra_sex'] = str(coord.ra.to_string())\n results['dec_sex'] = str(coord.dec.to_string())\n \n # Calculate some separations from GC, LMC, SMC\n #NED coordinates with de Grisj distance\n LMC = SkyCoord(80.8939*u.deg,-69.7561*u.deg,distance=49.89*u.kpc)\n #NED coordinates with de Grisj distance\n SMC = SkyCoord(13.1866*u.deg,-72.8286*u.deg,distance=61.94*u.kpc)\n # GC from astropy?\n GC = SkyCoord(266.4168262*u.deg,-29.0077969*u.deg,distance=8.0*u.kpc)\n \n results['d_gc'] = coord.separation_3d(GC).value\n results['d_lmc'] = coord.separation_3d(LMC).value\n results['d_smc'] = coord.separation_3d(SMC).value\n\n try:\n results['feh'] = float(self.source.isochrone.feh)\n except:\n results['feh'] = np.nan\n \n output = dict()\n output['params'] = params\n output['results'] = results\n return output\n\n def write(self,filename):\n if self.samples is not None:\n results = dict(self.get_results())\n params = dict(params=results.pop('params'))\n else:\n results = dict(results=dict())\n params = dict(params=dict())\n source = dict(source=self.source.todict())\n \n out = open(filename,'w')\n out.write(yaml.dump(params,default_flow_style=False))\n out.write(yaml.dump(results))\n out.write(yaml.dump(source))\n out.close()\n\ndef surfaceBrightness(abs_mag, r_physical, distance):\n \"\"\"\n Compute the average surface brightness [mag arcsec^-2] within the half-light radius\n\n abs_mag = absolute magnitude [mag]\n r_physical = half-light radius [kpc] \n distance = [kpc]\n\n The factor 2 in the c_v equation below account for half the luminosity \n within the half-light radius. The 3600.**2 is conversion from deg^2 to arcsec^2\n\n c_v = 2.5 * np.log10(2.) + 2.5 * np.log10(np.pi * 3600.**2) = 19.78\n \"\"\"\n r_angle = np.degrees(np.arctan(r_physical / distance))\n c_v = 19.78 # mag/arcsec^2\n return abs_mag + dist2mod(distance) + c_v + 2.5 * np.log10(r_angle**2)\n\ndef createResults(config,srcfile,section='source',samples=None):\n \"\"\" Create an MCMC instance \"\"\"\n source = ugali.analysis.source.Source()\n source.load(srcfile,section=section)\n loglike = ugali.analysis.loglike.createLoglike(config,source)\n\n results = Results(config,loglike,samples)\n\n if samples is not None:\n results.load_samples(samples)\n\n return results\n\ndef write_results(filename,config,srcfile,samples):\n \"\"\" Package everything nicely \"\"\" \n results = createResults(config,srcfile,samples=samples)\n results.write(filename)\n\n\nif __name__ == \"__main__\":\n import ugali.utils.parser\n parser = ugali.utils.parser.Parser(description=__doc__)\n parser.add_config()\n parser.add_verbose()\n parser.add_argument('--srcmdl',required=True,\n help='Source model file')\n parser.add_argument('--section',default='source',\n help='Section of source file')\n parser.add_argument('--samples',required=True,\n help='Posterior samples file')\n parser.add_argument('outfile',default=None,\n help=\"Output file name\")\n \n args = parser.parse_args()\n\n #write_results(args.outfile,args.config,args.srcmdl,args.samples)\n results = createResults(args.config,args.srcmdl,samples=args.samples)\n results.write(args.outfile)\n\n"
] |
[
[
"numpy.log10",
"numpy.histogram",
"numpy.array",
"numpy.radians",
"numpy.arctan",
"numpy.sqrt",
"numpy.average",
"numpy.linspace"
]
] |
PearCoding/SimpleImageIO
|
[
"922599defcaf70cfadf453198c7f35948dc349d3"
] |
[
"PyWrapper/simpleimageio/error_metrics.py"
] |
[
"from ctypes import *\nimport numpy as np\nfrom . import corelib\n\n# Define the call signatures\n_compute_mse = corelib.core.ComputeMSE\n_compute_mse.argtypes = [POINTER(c_float), c_int, POINTER(c_float), c_int, c_int, c_int, c_int ]\n_compute_mse.restype = c_float\n\n_compute_rel_mse = corelib.core.ComputeRelMSE\n_compute_rel_mse.argtypes = [POINTER(c_float), c_int, POINTER(c_float), c_int, c_int, c_int, c_int ]\n_compute_rel_mse.restype = c_float\n\n_compute_rel_mse_outlier_reject = corelib.core.ComputeRelMSEOutlierReject\n_compute_rel_mse_outlier_reject.argtypes = [\n POINTER(c_float), c_int, POINTER(c_float), c_int, c_int, c_int, c_int, c_float ]\n_compute_rel_mse_outlier_reject.restype = c_float\n\n_compute_mse_outlier_reject = corelib.core.ComputeMSEOutlierReject\n_compute_mse_outlier_reject.argtypes = [\n POINTER(c_float), c_int, POINTER(c_float), c_int, c_int, c_int, c_int, c_float ]\n_compute_mse_outlier_reject.restype = c_float\n\ndef mse(img, ref):\n img = np.array(img, dtype=np.float32, copy=False)\n ref = np.array(ref, dtype=np.float32, copy=False)\n assert img.shape[0] == ref.shape[0], \"Images must have the same height\"\n assert img.shape[1] == ref.shape[1], \"Images must have the same width\"\n return corelib.invoke_on_pair(_compute_mse, img, ref)\n\ndef mse_outlier_rejection(img, ref, percentage=0.1):\n img = np.array(img, dtype=np.float32, copy=False)\n ref = np.array(ref, dtype=np.float32, copy=False)\n assert img.shape[0] == ref.shape[0], \"Images must have the same height\"\n assert img.shape[1] == ref.shape[1], \"Images must have the same width\"\n return corelib.invoke_on_pair(_compute_mse_outlier_reject, img, ref, percentage)\n\ndef relative_mse(img, ref):\n img = np.array(img, dtype=np.float32, copy=False)\n ref = np.array(ref, dtype=np.float32, copy=False)\n assert img.shape[0] == ref.shape[0], \"Images must have the same height\"\n assert img.shape[1] == ref.shape[1], \"Images must have the same width\"\n return corelib.invoke_on_pair(_compute_rel_mse, img, ref)\n\ndef relative_mse_outlier_rejection(img, ref, percentage=0.1):\n img = np.array(img, dtype=np.float32, copy=False)\n ref = np.array(ref, dtype=np.float32, copy=False)\n assert img.shape[0] == ref.shape[0], \"Images must have the same height\"\n assert img.shape[1] == ref.shape[1], \"Images must have the same width\"\n return corelib.invoke_on_pair(_compute_rel_mse_outlier_reject, img, ref, percentage)"
] |
[
[
"numpy.array"
]
] |
jerrykuo7727/transformers
|
[
"6df0d5861ab1bd845932dc24de8d7e6b836729ad"
] |
[
"src/transformers/benchmark/benchmark.py"
] |
[
"# coding=utf-8\n# Copyright 2018 The HuggingFace Inc. team.\n# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"\n Benchmarking the library on inference and training in PyTorch.\n\"\"\"\n\n\nimport logging\nimport timeit\nfrom typing import Callable, Optional\n\nfrom transformers import (\n MODEL_MAPPING,\n MODEL_WITH_LM_HEAD_MAPPING,\n PretrainedConfig,\n is_py3nvml_available,\n is_torch_available,\n)\n\nfrom .benchmark_utils import (\n Benchmark,\n Memory,\n MemorySummary,\n measure_peak_memory_cpu,\n start_memory_tracing,\n stop_memory_tracing,\n)\n\n\nif is_torch_available():\n import torch\n from .benchmark_args import PyTorchBenchmarkArguments\n\n\nif is_py3nvml_available():\n import py3nvml.py3nvml as nvml\n\n\nlogger = logging.getLogger(__name__)\n\n\nclass PyTorchBenchmark(Benchmark):\n\n args: PyTorchBenchmarkArguments\n configs: PretrainedConfig\n framework: str = \"PyTorch\"\n\n @property\n def framework_version(self):\n return torch.__version__\n\n def _inference_speed(self, model_name: str, batch_size: int, sequence_length: int) -> float:\n _inference = self._prepare_inference_func(model_name, batch_size, sequence_length)\n return self._measure_speed(_inference)\n\n def _inference_memory(\n self, model_name: str, batch_size: int, sequence_length: int\n ) -> [Memory, Optional[MemorySummary]]:\n _inference = self._prepare_inference_func(model_name, batch_size, sequence_length)\n return self._measure_memory(_inference)\n\n def _train_speed(self, model_name: str, batch_size: int, sequence_length: int) -> float:\n _train = self._prepare_train_func(model_name, batch_size, sequence_length)\n return self._measure_speed(_train)\n\n def _train_memory(\n self, model_name: str, batch_size: int, sequence_length: int\n ) -> [Memory, Optional[MemorySummary]]:\n _train = self._prepare_train_func(model_name, batch_size, sequence_length)\n return self._measure_memory(_train)\n\n def _prepare_inference_func(self, model_name: str, batch_size: int, sequence_length: int) -> Callable[[], None]:\n config = self.config_dict[model_name]\n\n if self.args.torchscript:\n config.torchscript = True\n\n has_model_class_in_config = hasattr(config, \"architectures\") and len(config.architectures) > 0\n if not self.args.only_pretrain_model and has_model_class_in_config:\n try:\n model_class = config.architectures[0]\n transformers_module = __import__(\"transformers\", fromlist=[model_class])\n model_cls = getattr(transformers_module, model_class)\n model = model_cls(config)\n except ImportError:\n raise ImportError(\n f\"{model_class} does not exist. If you just want to test the pretrained model, you might want to set `--only_pretrain_model` or `args.only_pretrain_model=True`.\"\n )\n else:\n model = MODEL_MAPPING[config.__class__](config)\n\n model.eval()\n model.to(self.args.device)\n\n # encoder-decoder has vocab size saved differently\n vocab_size = config.vocab_size if hasattr(config, \"vocab_size\") else config.encoder.vocab_size\n input_ids = torch.randint(vocab_size, (batch_size, sequence_length), dtype=torch.long, device=self.args.device)\n\n if self.args.fp16:\n logger.info(\"Running training in Mixed Precision...\")\n assert self.args.is_gpu, \"Mixed precision is possible only for GPU.\"\n # amp seems to have memory leaks so that memory usage\n # is measured using .half() for now https://github.com/NVIDIA/apex/issues/439\n model.half()\n\n if self.args.torchscript:\n with torch.no_grad():\n inference_model = torch.jit.trace(model, input_ids)\n else:\n inference_model = model\n\n def encoder_decoder_forward():\n with torch.no_grad():\n outputs = inference_model(input_ids, decoder_input_ids=input_ids)\n return outputs\n\n def encoder_forward():\n with torch.no_grad():\n outputs = inference_model(input_ids)\n return outputs\n\n _forward = encoder_decoder_forward if config.is_encoder_decoder else encoder_forward\n return _forward\n\n def _prepare_train_func(self, model_name: str, batch_size: int, sequence_length: int) -> Callable[[], None]:\n config = self.config_dict[model_name]\n\n has_model_class_in_config = hasattr(config, \"architectures\") and len(config.architectures) > 0\n if not self.args.only_pretrain_model and has_model_class_in_config:\n try:\n model_class = config.architectures[0]\n transformers_module = __import__(\"transformers\", fromlist=[model_class])\n model_cls = getattr(transformers_module, model_class)\n model = model_cls(config)\n except ImportError:\n raise ImportError(\n f\"{model_class} does not exist. If you just want to test the pretrained model, you might want to set `--only_pretrain_model` or `args.only_pretrain_model=True`.\"\n )\n else:\n model = MODEL_WITH_LM_HEAD_MAPPING[config.__class__](config)\n\n if self.args.torchscript:\n raise NotImplementedError(\"Training for torchscript is currently not implemented\")\n else:\n train_model = model\n\n model.train()\n model.to(self.args.device)\n\n # encoder-decoder has vocab size saved differently\n vocab_size = config.vocab_size if hasattr(config, \"vocab_size\") else config.encoder.vocab_size\n input_ids = torch.randint(vocab_size, (batch_size, sequence_length), dtype=torch.long, device=self.args.device)\n\n if self.args.fp16:\n logger.info(\"Running training in Mixed Precision...\")\n assert self.args.is_gpu, \"Mixed precision is possible only for GPU.\"\n\n # amp seems to have memory leaks so that memory usage\n # is measured using .half() for now https://github.com/NVIDIA/apex/issues/439\n model.half()\n\n def compute_loss_and_backprob_encoder():\n loss = train_model(input_ids, labels=input_ids)[0]\n loss.backward()\n return loss\n\n def compute_loss_and_backprob_encoder_decoder():\n loss = train_model(input_ids, decoder_input_ids=input_ids, labels=input_ids)[0]\n loss.backward()\n return loss\n\n _train = (\n compute_loss_and_backprob_encoder_decoder\n if config.is_encoder_decoder\n else compute_loss_and_backprob_encoder\n )\n return _train\n\n def _measure_speed(self, func) -> float:\n try:\n if self.args.is_tpu or self.args.torchscript:\n # run additional 10 times to stabilize compilation for tpu and torchscript\n logger.info(\"Do inference on TPU or torchscript. Running model 5 times to stabilize compilation\")\n timeit.repeat(\n func, repeat=1, number=5,\n )\n\n # as written in https://docs.python.org/2/library/timeit.html#timeit.Timer.repeat, min should be taken rather than the average\n runtimes = timeit.repeat(func, repeat=self.args.repeat, number=10,)\n\n if self.args.is_tpu and self.args.torch_xla_tpu_print_metrics:\n import torch_xla.debug.metrics as met\n\n self.print_fn(met.metrics_report())\n\n return min(runtimes) / 10.0\n except RuntimeError as e:\n self.print_fn(\"Doesn't fit on GPU. {}\".format(e))\n return \"N/A\"\n\n def _measure_memory(self, func: Callable[[], None]) -> [Memory, MemorySummary]:\n try:\n if self.args.trace_memory_line_by_line:\n trace = start_memory_tracing(\"transformers\")\n\n if self.args.is_tpu:\n # tpu\n raise NotImplementedError(\n \"Memory Benchmarking is currently not implemented for TPU. Please disable memory benchmarking with `--no_memory` or `args.no_memory=True`\"\n )\n elif self.args.is_gpu:\n if not is_py3nvml_available():\n logger.warning(\n \"py3nvml not installed, we won't log GPU memory usage. \"\n \"Install py3nvml (pip install py3nvml) to log information about GPU.\"\n )\n memory = \"N/A\"\n else:\n logger.info(\n \"Measuring total GPU usage on GPU device. Make sure to not have additional processes running on the same GPU.\"\n )\n # init nvml\n nvml.nvmlInit()\n func()\n handle = nvml.nvmlDeviceGetHandleByIndex(self.args.device_idx)\n meminfo = nvml.nvmlDeviceGetMemoryInfo(handle)\n max_bytes_in_use = meminfo.used\n memory = Memory(max_bytes_in_use)\n # shutdown nvml\n nvml.nvmlShutdown()\n else:\n # cpu\n memory_bytes = measure_peak_memory_cpu(func)\n memory = Memory(memory_bytes) if isinstance(memory_bytes, int) else memory_bytes\n\n if self.args.trace_memory_line_by_line:\n summary = stop_memory_tracing(trace)\n else:\n summary = None\n\n return memory, summary\n except RuntimeError as e:\n self.print_fn(\"Doesn't fit on GPU. {}\".format(e))\n return \"N/A\", None\n"
] |
[
[
"torch.randint",
"torch.jit.trace",
"torch.no_grad"
]
] |
brettkoonce/ReAgent
|
[
"dcaa16e0bdc5e1cecf816a6683e8909a9859855d"
] |
[
"ml/rl/readers/data_streamer.py"
] |
[
"#!/usr/bin/env python3\n# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.\n\nimport collections\nimport dataclasses\nimport queue\nimport random\nimport sys\nimport threading\nfrom typing import NamedTuple\n\nimport torch\nimport torch.multiprocessing as multiprocessing\nimport torch.utils.data._utils\nfrom torch._six import string_classes\nfrom torch.utils.data._utils import ExceptionWrapper\nfrom torch.utils.data._utils.signal_handling import (\n _remove_worker_pids,\n _set_SIGCHLD_handler,\n _set_worker_pids,\n _set_worker_signal_handlers,\n)\nfrom torch.utils.data._utils.worker import ManagerWatchdog\n\n\nMANAGER_STATUS_CHECK_INTERVAL = 5.0\n\n\nWorkerDone = collections.namedtuple(\"WorkerDone\", [\"worker_id\"])\n\n\ndef _worker_loop(\n data_reader,\n batch_queue,\n data_queue,\n global_done_event,\n worker_done_event,\n seed,\n init_fn,\n worker_id,\n):\n # Intialize C side signal handlers for SIGBUS and SIGSEGV. Python signal\n # module's handlers are executed after Python returns from C low-level\n # handlers, likely when the same fatal signal happened again already.\n # https://docs.python.org/3/library/signal.html Sec. 18.8.1.1\n _set_worker_signal_handlers()\n\n torch.set_num_threads(1)\n random.seed(seed)\n # TODO: numpy doesn't take seed bigger than INT32\n # np.random.seed(seed)\n torch.manual_seed(seed)\n\n # Do not wait for putting thread to join when this worker exits. Otherwise,\n # this worker may always be waiting to put and doesn't check batch_queue\n # and global_done_event for termination signal.\n data_queue.cancel_join_thread()\n\n if init_fn is not None:\n init_fn(worker_id)\n\n watchdog = ManagerWatchdog()\n\n shard = data_reader.get_shard(worker_id)\n shard_itr = iter(shard)\n\n shard_done = False\n\n while True:\n if shard_done:\n # Wait until the main thread acknowledge the WorkerDone message or\n # it signals shutdown.\n if (\n not watchdog.is_alive()\n or global_done_event.is_set()\n or worker_done_event.wait(0.1)\n ):\n break\n continue\n\n try:\n idx = batch_queue.get(timeout=MANAGER_STATUS_CHECK_INTERVAL)\n except queue.Empty:\n if watchdog.is_alive() and not global_done_event.is_set():\n continue\n else:\n break\n # use global_done_event so that we can get faster exiting signal even if there\n # are still batches in batch_queue\n if idx is None or global_done_event.is_set():\n break\n try:\n samples = next(shard_itr)\n except StopIteration:\n # Signal to the main thread that this worker has run out of data.\n # The worker cannot exit immediately because the queue might not be\n # flushed immediately.\n data_queue.put((idx, WorkerDone(worker_id)))\n shard_done = True\n except Exception:\n data_queue.put((idx, ExceptionWrapper(sys.exc_info())))\n else:\n data_queue.put((idx, samples))\n del samples\n\n\ndef _pin_memory_loop(in_queue, out_queue, done_event, pin_memory_param, device_id):\n \"\"\"\n This is copied from dataloader. It uses a different `pin_memory()`.\n It'd probably be best to merge.\n \"\"\"\n if pin_memory_param:\n torch.cuda.set_device(device_id)\n\n while True:\n try:\n r = in_queue.get()\n except Exception:\n if done_event.is_set():\n return\n raise\n if r is None or done_event.is_set():\n break\n if isinstance(r[1], ExceptionWrapper):\n out_queue.put(r)\n continue\n idx, batch = r\n try:\n if pin_memory_param:\n batch = pin_memory(batch)\n except Exception:\n out_queue.put((idx, ExceptionWrapper(sys.exc_info())))\n else:\n out_queue.put((idx, batch))\n\n\ndef pin_memory(batch):\n \"\"\"\n This is ripped off from dataloader. The only difference is that it preserves\n the type of Mapping so that the OrderedDict is maintained.\n \"\"\"\n if isinstance(batch, torch.Tensor):\n return batch.pin_memory().cuda(non_blocking=True)\n elif isinstance(batch, string_classes):\n return batch\n elif dataclasses.is_dataclass(batch):\n return dataclasses.replace(\n batch,\n **{\n field.name: pin_memory(getattr(batch, field.name))\n for field in dataclasses.fields(batch)\n }\n )\n elif isinstance(batch, collections.Mapping):\n # NB: preserving OrderedDict\n return type(batch)((k, pin_memory(sample)) for k, sample in batch.items())\n elif isinstance(batch, NamedTuple) or hasattr(batch, \"_asdict\"):\n # This is mainly for WorkerDone\n return type(batch)(\n **{name: pin_memory(value) for name, value in batch._asdict().items()}\n )\n elif isinstance(batch, collections.Sequence):\n return [pin_memory(sample) for sample in batch]\n else:\n return batch\n\n\nclass _DataStreamerIter(object):\n r\"\"\"Iterates once over the DataStreamer's data_reader\"\"\"\n\n def __init__(self, streamer):\n self.data_reader = streamer.data_reader\n self.num_workers = streamer.num_workers\n self.pin_memory = streamer.pin_memory and torch.cuda.is_available()\n self.timeout = streamer.timeout\n\n base_seed = torch.LongTensor(1).random_().item()\n\n if self.num_workers > 0:\n self.worker_init_fn = streamer.worker_init_fn\n self.worker_result_queue = multiprocessing.Queue()\n self.batch_queue = multiprocessing.Queue()\n self.batches_outstanding = 0\n self.worker_pids_set = False\n self.shutdown = False\n self.send_idx = 0\n self.done_event = multiprocessing.Event()\n\n self.worker_done_events = [\n multiprocessing.Event() for _i in range(self.num_workers)\n ]\n self.workers = []\n for i in range(self.num_workers):\n w = multiprocessing.Process(\n target=_worker_loop,\n args=(\n self.data_reader,\n self.batch_queue,\n self.worker_result_queue,\n self.done_event,\n self.worker_done_events[i],\n base_seed + i,\n self.worker_init_fn,\n i,\n ),\n )\n w.daemon = True # ensure that the worker exits on process exit\n # Process.start() actually take some time as it needs to start a\n # process and pass the arguments over via a pipe. Therefore, we\n # only add a worker to self.workers list after it started, so\n # that we do not call .join() if program dies before it starts,\n # and __del__ tries to join it but will get:\n # AssertionError: can only join a started process.\n w.start()\n self.workers.append(w)\n\n self.num_live_workers = self.num_workers\n\n if self.pin_memory:\n self.data_queue = queue.Queue()\n self.pin_memory_thread = threading.Thread(\n target=_pin_memory_loop,\n args=(\n self.worker_result_queue,\n self.data_queue,\n self.done_event,\n self.pin_memory,\n torch.cuda.current_device(),\n ),\n )\n self.pin_memory_thread.daemon = True\n self.pin_memory_thread.start()\n else:\n self.data_queue = self.worker_result_queue\n\n _set_worker_pids(id(self), tuple(w.pid for w in self.workers))\n _set_SIGCHLD_handler()\n self.worker_pids_set = True\n\n # prime the prefetch loop\n for _ in range(2 * self.num_workers):\n self._put_indices()\n else:\n # No workers\n self.data_reader_iter = iter(self.data_reader)\n\n def _get_batch(self):\n if self.timeout > 0:\n try:\n return self.data_queue.get(timeout=self.timeout)\n except queue.Empty:\n raise RuntimeError(\n \"DataReader timed out after {} seconds\".format(self.timeout)\n )\n else:\n return self.data_queue.get()\n\n def __next__(self):\n if self.num_workers == 0: # same-process loading\n batch = next(self.data_reader_iter) # May raise StopIteration\n if self.pin_memory:\n batch = pin_memory(batch)\n return batch\n\n if self.batches_outstanding == 0:\n self._shutdown_workers()\n raise StopIteration\n\n while True:\n assert not self.shutdown and self.batches_outstanding > 0\n idx, batch = self._get_batch()\n self.batches_outstanding -= 1\n self._put_indices()\n if isinstance(batch, WorkerDone):\n # Acknowledge receiving so worker can terminate early\n self.worker_done_events[batch.worker_id].set()\n self.num_live_workers -= 1\n if self.num_live_workers == 0:\n self._shutdown_workers()\n raise StopIteration\n else:\n continue\n\n if isinstance(batch, ExceptionWrapper):\n raise batch.exc_type(batch.exc_msg)\n return batch\n\n def __iter__(self):\n return self\n\n def _put_indices(self):\n assert self.batches_outstanding < 2 * self.num_workers\n self.batch_queue.put(self.send_idx)\n self.batches_outstanding += 1\n self.send_idx += 1\n\n def __getstate__(self):\n # TODO: add limited pickling support for sharing an iterator\n # across multiple threads for HOGWILD.\n # Probably the best way to do this is by moving the sample pushing\n # to a separate thread and then just sharing the data queue\n # but signalling the end is tricky without a non-blocking API\n raise NotImplementedError(\"_DataReaderIter cannot be pickled\")\n\n def _shutdown_workers(self):\n if not self.shutdown:\n self.shutdown = True\n # removes pids from the C side data structure first so worker\n # termination afterwards won't trigger false positive error report.\n if self.worker_pids_set:\n _remove_worker_pids(id(self))\n self.worker_pids_set = False\n self.done_event.set()\n if self.pin_memory:\n # Sending `None` to `pin_memory_thread` must be before\n # stopping worker processes because the workers may leave\n # corrupted data in `worker_result_queue`, causing\n # `pin_memory_thread` unable to read and terminate properly.\n self.worker_result_queue.put(None)\n # Workers can't be waiting to put be cause their output queue\n # is a multiprocessing.Queue and its .put is non-blocking.\n # They can only be waiting to get, so we put `None` here.\n for _w in self.workers:\n # Putting as many None as workers to ensure worker will get one\n self.batch_queue.put(None)\n for w in self.workers:\n w.join()\n if self.pin_memory:\n self.pin_memory_thread.join()\n\n def __del__(self):\n if self.num_workers > 0:\n self._shutdown_workers()\n\n\nclass DataStreamer(object):\n r\"\"\"\n Data streamer. Provides single- or multi-process iterators over the data_reader.\n\n Arguments:\n data_reader (DataReader): data_reader from which to stream the data.\n num_workers (int, optional): how many subprocesses to use for data\n loading. 0 means that the data will be loaded in the main process.\n (default: 0)\n pin_memory (bool, optional): If ``True``, the data streamer will copy tensors\n into CUDA pinned memory before returning them.\n timeout (numeric, optional): if positive, the timeout value for collecting a\n batch from workers. Should always be non-negative. (default: 0)\n worker_init_fn (callable, optional): If not None, this will be called on each\n worker subprocess with the worker id (an int in ``[0, num_workers - 1]``) as\n input, after seeding and before data loading. (default: None)\n\n .. note:: By default, each worker will have its PyTorch seed set to\n ``base_seed + worker_id``, where ``base_seed`` is a long generated\n by main process using its RNG. However, seeds for other libraies\n may be duplicated upon initializing workers (w.g., NumPy), causing\n each worker to return identical random numbers. (See\n :ref:`datastreamer-workers-random-seed` section in FAQ.) You may\n use ``torch.initial_seed()`` to access the PyTorch seed for each\n worker in :attr:`worker_init_fn`, and use it to set other seeds\n before data loading.\n\n .. warning:: If ``spawn`` start method is used, :attr:`worker_init_fn` cannot be an\n unpicklable object, e.g., a lambda function.\n \"\"\"\n\n def __init__(\n self,\n data_reader,\n num_workers=0,\n pin_memory=False,\n timeout=0,\n worker_init_fn=None,\n ):\n self.data_reader = data_reader\n self.num_workers = num_workers\n self.pin_memory = pin_memory\n self.timeout = timeout\n self.worker_init_fn = worker_init_fn\n\n if timeout < 0:\n raise ValueError(\"timeout option should be non-negative\")\n\n if self.num_workers < 0:\n raise ValueError(\n \"num_workers option cannot be negative; \"\n \"use num_workers=0 to disable multiprocessing.\"\n )\n\n def __iter__(self):\n return _DataStreamerIter(self)\n"
] |
[
[
"torch.multiprocessing.Process",
"torch.utils.data._utils.signal_handling._set_worker_signal_handlers",
"torch.utils.data._utils.signal_handling._set_SIGCHLD_handler",
"torch.cuda.current_device",
"torch.manual_seed",
"torch.cuda.set_device",
"torch.multiprocessing.Event",
"torch.cuda.is_available",
"torch.LongTensor",
"torch.utils.data._utils.worker.ManagerWatchdog",
"torch.multiprocessing.Queue",
"torch.set_num_threads"
]
] |
justinchiu/NeuralDialog
|
[
"f272cc2e12ffdd44c94263ee373208a22c057129"
] |
[
"FB/train.py"
] |
[
"# Copyright 2017-present, Facebook, Inc.\n# All rights reserved.\n#\n# This source code is licensed under the license found in the\n# LICENSE file in the root directory of this source tree.\n\"\"\"\nTraining script. Performs supervised training of DialogModel.\n\"\"\"\n\nimport argparse\nimport itertools\nimport logging\nimport numpy as np\nimport random\nimport re\nimport torch\nimport torch.nn as nn\nimport torch.optim as optim\nfrom torch.autograd import Variable\nimport sys\nimport time\n\n# local imports\nimport config\nimport data\nfrom engine import Engine\nfrom models.dialog_model import DialogModel\nimport utils\n\nlogging.basicConfig(format=config.log_format, level=config.log_level)\n\ndef main():\n parser = argparse.ArgumentParser(description='training script')\n parser.add_argument('--data', type=str, default=config.data_dir,\n help='location of the data corpus')\n parser.add_argument('--nembed_word', type=int, default=config.nembed_word,\n help='size of word embeddings')\n parser.add_argument('--nembed_ctx', type=int, default=config.nembed_ctx,\n help='size of context embeddings')\n parser.add_argument('--nhid_lang', type=int, default=config.nhid_lang,\n help='size of the hidden state for the language module')\n parser.add_argument('--nhid_ctx', type=int, default=config.nhid_ctx,\n help='size of the hidden state for the context module')\n parser.add_argument('--nhid_strat', type=int, default=config.nhid_strat,\n help='size of the hidden state for the strategy module')\n parser.add_argument('--nhid_attn', type=int, default=config.nhid_attn,\n help='size of the hidden state for the attention module')\n parser.add_argument('--nhid_sel', type=int, default=config.nhid_sel,\n help='size of the hidden state for the selection module')\n parser.add_argument('--lr', type=float, default=config.lr,\n help='initial learning rate')\n parser.add_argument('--min_lr', type=float, default=config.min_lr,\n help='min threshold for learning rate annealing')\n parser.add_argument('--decay_rate', type=float, default=config.decay_rate,\n help='decrease learning rate by this factor')\n parser.add_argument('--decay_every', type=int, default=config.decay_every,\n help='decrease learning rate after decay_every epochs')\n parser.add_argument('--momentum', type=float, default=config.momentum,\n help='momentum for sgd')\n parser.add_argument('--nesterov', action='store_true', default=config.nesterov,\n help='enable nesterov momentum')\n parser.add_argument('--clip', type=float, default=config.clip,\n help='gradient clipping')\n parser.add_argument('--dropout', type=float, default=config.dropout,\n help='dropout rate in embedding layer')\n parser.add_argument('--init_range', type=float, default=config.init_range,\n help='initialization range')\n parser.add_argument('--max_epoch', type=int, default=config.max_epoch,\n help='max number of epochs')\n parser.add_argument('--bsz', type=int, default=config.bsz,\n help='batch size')\n parser.add_argument('--unk_threshold', type=int, default=config.unk_threshold,\n help='minimum word frequency to be in dictionary')\n parser.add_argument('--temperature', type=float, default=config.temperature,\n help='temperature')\n parser.add_argument('--sel_weight', type=float, default=config.sel_weight,\n help='selection weight')\n parser.add_argument('--seed', type=int, default=config.seed,\n help='random seed')\n parser.add_argument('--cuda', action='store_true', default=config.cuda,\n help='use CUDA')\n parser.add_argument('--model_file', type=str, default='',\n help='path to save the final model')\n parser.add_argument('--visual', action='store_true', default=config.plot_graphs,\n help='plot graphs')\n parser.add_argument('--domain', type=str, default=config.domain,\n help='domain for the dialogue')\n parser.add_argument('--rnn_ctx_encoder', action='store_true', default=config.rnn_ctx_encoder,\n help='whether to use RNN for encoding the context')\n args = parser.parse_args()\n\n device_id = utils.use_cuda(args.cuda)\n logging.info(\"Starting training using pytorch version:%s\" % (str(torch.__version__)))\n logging.info(\"CUDA is %s\" % (\"enabled. Using device_id:\"+str(device_id) + \" version:\" \\\n +str(torch.version.cuda) + \" on gpu:\" + torch.cuda.get_device_name(0) if args.cuda else \"disabled\"))\n utils.set_seed(args.seed)\n\n logging.info(\"Building word corpus, requiring minimum word frequency of %d for dictionary\" % (args.unk_threshold))\n corpus = data.WordCorpus(args.data, freq_cutoff=args.unk_threshold, verbose=True)\n\n logging.info(\"Building RNN-based dialogue model from word corpus\")\n model = DialogModel(corpus.word_dict, corpus.item_dict, corpus.context_dict,\n corpus.output_length, args, device_id)\n if device_id is not None:\n model.cuda(device_id)\n \n engine = Engine(model, args, device_id, verbose=True)\n logging.info(\"Training model\")\n train_loss, valid_loss, select_loss = engine.train(corpus)\n logging.info('final select_ppl %.3f' % np.exp(select_loss))\n\n # utils.save_model(engine.get_model(), args.model_file)\n torch.save(engine.get_model().state_dict(), args.model_file)\n\n\nif __name__ == '__main__':\n main()\n"
] |
[
[
"torch.cuda.get_device_name",
"numpy.exp"
]
] |
Yaoming95/neurst
|
[
"f7e2a043f20b6724310b048035e0a6075f60032a"
] |
[
"neurst/utils/converters/google_bert.py"
] |
[
"# Copyright 2020 ByteDance Inc.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\nimport os\nimport re\nimport zipfile\n\nimport numpy\nimport tensorflow as tf\nimport yaml\nfrom absl import logging\n\nfrom neurst.models.bert import Bert\nfrom neurst.utils.converters import Converter, register_converter\nfrom neurst.utils.misc import download_with_tqdm\n\n_URL_PREFIX = \"https://storage.googleapis.com/bert_models\"\n_BERT_PRETRAIN_MODELS = {\n \"bert-base-uncased\": _URL_PREFIX + \"/2018_10_18/uncased_L-12_H-768_A-12.zip\",\n # \"bert-base-uncased\": _URL_PREFIX + \"/2020_02_20/uncased_L-12_H-768_A-12.zip\"\n \"bert-large-uncased\": _URL_PREFIX + \"/2018_10_18/uncased_L-24_H-1024_A-16.zip\",\n \"bert-base-chinese\": _URL_PREFIX + \"/2018_11_03/chinese_L-12_H-768_A-12.zip\",\n\n}\n\n_DIRECT_MAPPINGS = {\n \"bert/embeddings/word_embeddings\": \"bert/bert_embedding/word_embedding\",\n \"bert/embeddings/token_type_embeddings\": \"bert/bert_embedding/token_type_embedding\",\n \"bert/embeddings/position_embeddings\": \"bert/bert_embedding/position_embedding\",\n \"bert/pooler/dense/bias\": \"bert/pooler/bias\",\n \"bert/pooler/dense/kernel\": \"bert/pooler/kernel\",\n \"bert/embeddings/LayerNorm/beta\": \"bert/bert_embedding/ln/beta\",\n \"bert/embeddings/LayerNorm/gamma\": \"bert/bert_embedding/ln/gamma\",\n}\n\n_POSTFIX_MAPPINGS = {\n \"attention/output/dense/kernel\": \"self_attention_prepost_wrapper/self_attention/output_transform/kernel\",\n \"attention/output/dense/bias\": \"self_attention_prepost_wrapper/self_attention/output_transform/bias\",\n \"attention/output/LayerNorm/beta\": \"self_attention_prepost_wrapper/ln/beta\",\n \"attention/output/LayerNorm/gamma\": \"self_attention_prepost_wrapper/ln/gamma\",\n \"intermediate/dense/kernel\": \"ffn_prepost_wrapper/ffn/dense1/kernel\",\n \"intermediate/dense/bias\": \"ffn_prepost_wrapper/ffn/dense1/bias\",\n \"output/dense/kernel\": \"ffn_prepost_wrapper/ffn/dense2/kernel\",\n \"output/dense/bias\": \"ffn_prepost_wrapper/ffn/dense2/bias\",\n \"output/LayerNorm/beta\": \"ffn_prepost_wrapper/ln/beta\",\n \"output/LayerNorm/gamma\": \"ffn_prepost_wrapper/ln/gamma\",\n}\n\n\n@register_converter\nclass GoogleBert(Converter):\n \"\"\" Converts from the google bert.\n https://github.com/google-research/bert\n \"\"\"\n\n @staticmethod\n def convert_model_config(path):\n with tf.io.gfile.GFile(os.path.join(path, \"bert_config.json\")) as fp:\n cfg = yaml.load(fp, Loader=yaml.FullLoader)\n return {\n \"model.class\": Bert.__name__,\n \"model.params\": {\n \"max_position_embeddings\": cfg[\"max_position_embeddings\"],\n \"num_layers\": cfg[\"num_hidden_layers\"],\n \"hidden_size\": cfg[\"hidden_size\"],\n \"num_attention_heads\": cfg[\"num_attention_heads\"],\n \"filter_size\": cfg[\"intermediate_size\"],\n \"ffn_activation\": cfg[\"hidden_act\"],\n \"attention_dropout_rate\": cfg[\"attention_probs_dropout_prob\"],\n \"attention_type\": \"dot_product\",\n \"ffn_dropout_rate\": cfg[\"hidden_dropout_prob\"],\n \"layer_postprocess_dropout_rate\": cfg[\"hidden_dropout_prob\"]\n }\n }\n\n @staticmethod\n def convert_task_config(path):\n raise NotImplementedError\n\n @staticmethod\n def download(key):\n if key in _BERT_PRETRAIN_MODELS:\n url = _BERT_PRETRAIN_MODELS[key]\n elif key.startswith(\"http://\") or key.startswith(\"https://\"):\n url = key\n else:\n return None\n bert_name = os.path.basename(url).split(\".\")[0]\n this_dir = os.path.dirname(__file__)\n extract_path = os.path.join(this_dir, bert_name)\n if not os.path.exists(extract_path):\n logging.info(f\"Downloading google bert: {key}\")\n tarball = os.path.join(this_dir, os.path.basename(url))\n download_with_tqdm(url, tarball)\n tf.io.gfile.makedirs(extract_path)\n with zipfile.ZipFile(tarball) as zip_ref:\n zip_ref.extractall(extract_path)\n os.remove(tarball)\n if os.path.isdir(os.path.join(extract_path, bert_name)):\n return os.path.join(extract_path, bert_name)\n return extract_path\n\n @staticmethod\n def convert_checkpoint(path, save_path):\n ckpt = os.path.join(path, \"bert_model.ckpt\")\n bert_var_names = [x[0] for x in tf.train.list_variables(ckpt)]\n new_vars = []\n processed_var_names = []\n for var_name in bert_var_names:\n var_value = tf.train.load_variable(ckpt, var_name)\n new_var_name = None\n if var_name in _DIRECT_MAPPINGS:\n new_var_name = _DIRECT_MAPPINGS[var_name]\n elif var_name.startswith(\"bert/encoder/layer_\"):\n lid = re.search(r\"layer_\\d+\", var_name).group().split(\"_\")[-1]\n postfix = var_name.split(f\"layer_{lid}/\")[1]\n if postfix in _POSTFIX_MAPPINGS:\n new_var_name = f\"bert/encoder/layer_{lid}/{_POSTFIX_MAPPINGS[postfix]}\"\n elif (postfix.startswith(\"attention/self/key\")\n or postfix.startswith(\"attention/self/query\")\n or postfix.startswith(\"attention/self/value\")):\n tensor_name = postfix.split(\"/\")[-1]\n new_var_name = (f\"bert/encoder/layer_{lid}/self_attention_prepost_wrapper\"\n f\"/self_attention/qkv_transform/{tensor_name}\")\n if new_var_name in processed_var_names:\n continue\n q_value = tf.train.load_variable(\n ckpt, f\"bert/encoder/layer_{lid}/attention/self/query/{tensor_name}\")\n k_value = tf.train.load_variable(\n ckpt, f\"bert/encoder/layer_{lid}/attention/self/key/{tensor_name}\")\n v_value = tf.train.load_variable(\n ckpt, f\"bert/encoder/layer_{lid}/attention/self/value/{tensor_name}\")\n var_value = numpy.concatenate([q_value, k_value, v_value], axis=-1)\n\n if new_var_name:\n processed_var_names.append(new_var_name)\n logging.info(f\"convert {var_name}\")\n logging.info(f\"\\t ==> {new_var_name}\")\n new_vars.append(tf.Variable(\n initial_value=var_value,\n trainable=False,\n name=new_var_name,\n dtype=str(var_value.dtype)))\n else:\n logging.info(f\"No matching variable for {var_name}\")\n ckpt_saver = tf.train.Checkpoint(**{x.name.split(\":\")[0]: x for x in new_vars})\n save_ckpt = os.path.join(save_path, \"ckpt\")\n logging.info(f\"Saving checkpoint to {save_ckpt}\")\n ckpt_saver.save(save_ckpt)\n"
] |
[
[
"tensorflow.train.load_variable",
"tensorflow.train.list_variables",
"numpy.concatenate",
"tensorflow.io.gfile.makedirs"
]
] |
juyingnan/KaggleTest
|
[
"27e9114711eb0b6d6ba8c368a6079ff3feb1ca16"
] |
[
"classification/ships.py"
] |
[
"import numpy as np # linear algebra\nimport pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)\nimport matplotlib.pyplot as plt\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.metrics import classification_report, accuracy_score\nfrom sklearn.preprocessing import MinMaxScaler\nfrom tensorflow.keras.models import Sequential, Model\nfrom tensorflow.keras.layers import Conv2D, MaxPooling2D, Dense, Activation, Dropout, Flatten\nfrom tensorflow.compat.v2.keras.layers import BatchNormalization\nfrom tensorflow.keras.optimizers import Adam\nfrom tensorflow.keras.callbacks import LearningRateScheduler, ModelCheckpoint\nfrom tensorflow.keras.regularizers import l2, l1\nfrom skimage import io\nimport math\nimport os\nimport shutil\n\n\ndef make_dir(path):\n if not os.path.exists(path):\n os.makedirs(path)\n else:\n shutil.rmtree(path)\n os.makedirs(path)\n\n\ninput_dir = r'C:\\Users\\bunny\\Desktop\\ships-in-satellite-imagery'\nprint(os.listdir(input_dir))\n\ndata = pd.read_json(os.path.join(input_dir, 'shipsnet.json'))\ndata.head()\n\nx = []\nfor d in data['data']:\n d = np.array(d)\n x.append(d.reshape((3, 80 * 80)).T.reshape((80, 80, 3)))\nx = np.array(x)\ny = np.array(data['labels'])\nprint(x.shape)\nprint(y.shape)\n\n# splitting the data into training ans test sets\nx_train, x_temp, y_train, y_temp = train_test_split(x, y, test_size=0.20)\nx_val, x_test, y_val, y_test = train_test_split(x_temp, y_temp, test_size=0.50)\n# Normalizing the data\nscalar = MinMaxScaler()\nscalar.fit(x_train.reshape(x_train.shape[0], -1))\n\nx_train = scalar.transform(x_train.reshape(x_train.shape[0], -1)).reshape(x_train.shape)\nx_val = scalar.transform(x_val.reshape(x_val.shape[0], -1)).reshape(x_val.shape)\nx_test = scalar.transform(x_test.reshape(x_test.shape[0], -1)).reshape(x_test.shape)\n\nprint(x_train.shape)\nprint(y_train.shape)\nprint(x_val.shape)\nprint(y_val.shape)\nprint(x_test.shape)\nprint(y_test.shape)\n\n\n# creating the convolutional model\n\ndef create_cnn(data_shape):\n kernel_size = 3\n\n model = Sequential()\n\n model.add(Conv2D(16, (kernel_size), strides=(1, 1), padding='valid',\n input_shape=(data_shape[1], data_shape[2], data_shape[3])))\n model.add(BatchNormalization())\n model.add(Activation('relu'))\n # model.add(MaxPooling2D((2,2)))\n\n model.add(Conv2D(32, (kernel_size), strides=(1, 1), padding='valid'))\n model.add(BatchNormalization())\n model.add(Activation('relu'))\n # model.add(MaxPooling2D((2,2)))\n\n model.add(Conv2D(64, (kernel_size), strides=(1, 1), padding='valid'))\n model.add(BatchNormalization())\n model.add(Activation('relu'))\n # model.add(MaxPooling2D((2,2)))\n\n model.add(Conv2D(64, (kernel_size), strides=(1, 1), padding='valid'))\n model.add(BatchNormalization())\n model.add(Activation('relu'))\n model.add(MaxPooling2D((2, 2)))\n\n model.add(Conv2D(64, (kernel_size), strides=(1, 1), padding='valid'))\n model.add(BatchNormalization())\n model.add(Activation('relu'))\n model.add(MaxPooling2D((2, 2)))\n\n model.add(Flatten())\n # model.add(Dense(64, activation='relu'))\n model.add(Dropout(0.5))\n\n model.add(Dense(1, activation='sigmoid'))\n\n return model\n\n\ncnn_model = create_cnn(x_train.shape)\nprint(cnn_model.summary())\n\n\n# training the model\ndef step_decay(epoch):\n initial_lrate = 0.0001\n drop = 0.5\n epochs_drop = 10.0\n lrate = initial_lrate * math.pow(drop, math.floor((1 + epoch) / epochs_drop))\n # if epoch:\n # lrate = initial_lrate/np.sqrt(epoch)\n # else:\n # return initial_lrate\n return lrate\n\n\nopt = Adam(lr=0.0001)\nlrate = LearningRateScheduler(step_decay)\ncnn_model.compile(loss='binary_crossentropy', optimizer=opt, metrics=['accuracy'])\ncheckpoint_path = os.path.join(input_dir, \"model.h5\")\ncheckpoint_dir = input_dir\n\n# # Create a callback that saves the model's weights\n# cp_callback = ModelCheckpoint(filepath=checkpoint_path,\n# save_weights_only=False,\n# verbose=1)\nhistory = cnn_model.fit(x_train, y_train, batch_size=64, epochs=50, shuffle=True, verbose=2,\n validation_data=(x_val, y_val), callbacks=[lrate],\n )\ncnn_model.save(checkpoint_path)\n\ny_pred = cnn_model.predict(x_test)\ny_pred[y_pred > 0.5] = 1\ny_pred[y_pred < 0.5] = 0\ny_pred_bool = np.asarray(y_pred, dtype=bool)\ntarget_names = ['No Plane', 'Plane']\nprint(classification_report(y_test, y_pred_bool, target_names=target_names))\nprint('Accuracy:', accuracy_score(y_test, y_pred_bool))\n\nsavepath = r'C:\\Users\\bunny\\Desktop\\ship_test'\nfor i in [0, 1]:\n for j in [0, 1]:\n make_dir(r'{}\\{}{}'.format(savepath, i, j))\n\nfor i in range(len(y_pred)):\n pred = int(y_pred[i])\n print(pred)\n io.imsave(r'{}\\{}{}\\{}.png'.format(savepath, pred, y_test[i], i), x_test[i])\n\n# # plotting the learning curves.\n# fig, ax = plt.subplots(1,2)\n# fig.set_size_inches((15,5))\n# ax[0].plot(range(1,51), history.history['loss'], c='blue', label='Training loss')\n# ax[0].plot(range(1,51), history.history['val_loss'], c='red', label='Validation loss')\n# ax[0].legend()\n# ax[0].set_xlabel('epochs')\n#\n# ax[1].plot(range(1,51), history.history['acc'], c='blue', label='Training accuracy')\n# ax[1].plot(range(1,51), history.history['val_acc'], c='red', label='Validation accuracy')\n# ax[1].legend()\n# ax[1].set_xlabel('epochs')\n"
] |
[
[
"numpy.array",
"numpy.asarray",
"tensorflow.keras.layers.Flatten",
"tensorflow.keras.layers.Activation",
"sklearn.metrics.accuracy_score",
"sklearn.metrics.classification_report",
"tensorflow.keras.callbacks.LearningRateScheduler",
"tensorflow.keras.layers.Conv2D",
"tensorflow.keras.layers.MaxPooling2D",
"tensorflow.keras.layers.Dropout",
"tensorflow.keras.layers.Dense",
"sklearn.preprocessing.MinMaxScaler",
"tensorflow.keras.models.Sequential",
"sklearn.model_selection.train_test_split",
"tensorflow.keras.optimizers.Adam",
"tensorflow.compat.v2.keras.layers.BatchNormalization"
]
] |
bschilder/public-resources
|
[
"54c129cc27f17d36ff512987659a20eebaa4f0da"
] |
[
"uk_biobank/DeGAs/src/py/rivas_decomposition_py/plot_biplot.py"
] |
[
"import os, logging, collections\nimport numpy as np\nimport pandas as pd\n\nimport matplotlib\nimport matplotlib.pyplot as plt\nimport matplotlib.gridspec as gridspec\nimport matplotlib.patches as mpatches\n\nfrom logging.config import dictConfig\nfrom logging import getLogger\n\ndictConfig(dict(\n version = 1,\n formatters = {'f': {'format': '%(asctime)s %(name)-12s %(levelname)-8s %(message)s'}},\n handlers = {\n 'h': {'class': 'logging.StreamHandler','formatter': 'f',\n 'level': logging.DEBUG}},\n root = {'handlers': ['h'], 'level': logging.DEBUG,},\n))\n\nlogger = getLogger('plot_biplot')\n\n\ndef plot_biplot(\n d, pc_index1, pc_index2, \n biplot_phes, \n variants_of_interest=None,\n arrow_max_scale=1.0,\n figsize=(12,12), \n flip_xaxis=False, flip_yaxis=False, \n save=None\n):\n \"\"\"scatter plot of variants with phenotype annotation (arrows)\"\"\"\n \n if (variants_of_interest is None):\n variants_set = set([])\n else:\n variants_set = set(variants_of_interest)\n\n # prepare data\n plot_d = d.plot_data_pca_var(pc_index1, pc_index2)\n plot_d['color'] = np.where([x in variants_set for x in d.d['label_var']], 'red', 'blue') \n if(biplot_phes is not None and len(biplot_phes) > 0):\n biplot_arrow_2d = d.get_biplot_arrow_by_phenotypes([pc_index1, pc_index2], biplot_phes) \n else:\n biplot_arrow_2d = np.array([])\n \n # prepare fig grid\n fig = plt.figure(figsize=figsize)\n gs = gridspec.GridSpec(1, 1)\n fig_axs = [fig.add_subplot(sp) for sp in gs]\n \n\n # configure main and sub plots\n ax_main = fig_axs[0]\n ax_main.set_aspect('equal') \n ax_main.set_adjustable('box') \n ax_sub = ax_main.twinx().twiny()\n ax_sub.set_aspect('equal') \n ax_sub.set_adjustable('datalim')\n \n # axis range\n scatter_max = 1.1 * np.max([plot_d['x'], -plot_d['x'], plot_d['y'], -plot_d['y']]) \n arrow_max = arrow_max_scale * np.max([biplot_arrow_2d, -biplot_arrow_2d])\n \n if(flip_xaxis):\n ax_main.set_xlim([scatter_max, -scatter_max])\n ax_sub.set_xlim([arrow_max, -arrow_max]) \n else:\n ax_main.set_xlim([-scatter_max, scatter_max])\n ax_sub.set_xlim([-arrow_max, arrow_max]) \n if(flip_yaxis):\n ax_main.set_ylim([scatter_max, -scatter_max])\n ax_sub.set_ylim([arrow_max, -arrow_max]) \n else:\n ax_main.set_ylim([-scatter_max, scatter_max])\n ax_sub.set_ylim([-arrow_max, arrow_max]) \n \n \n # plot arrows\n for ar in biplot_arrow_2d:\n if((ar[1]) ** 2 < (ar[0]) ** 2 ): \n ax_sub.plot(\n np.linspace(-scatter_max, scatter_max, 1000),\n np.linspace(-scatter_max * ar[1] / ar[0], scatter_max * ar[1] / ar[0], 1000),\n linestyle='dashed',\n color='0.8' \n ) \n else:\n ax_sub.plot(\n np.linspace(-scatter_max * ar[0] / ar[1], scatter_max * ar[0] / ar[1], 1000),\n np.linspace(-scatter_max, scatter_max, 1000), \n linestyle='dashed',\n color='0.8'\n ) \n ax_sub.annotate(\n '', \n xy=(ar[0], ar[1]), \n xytext=(0, 0),\n arrowprops=dict(facecolor='red', shrinkA=0,shrinkB=0),\n ) \n\n # scatter plot \n ax_main.scatter(\n plot_d['x'], plot_d['y'], \n color=(plot_d['color'] if 'color' in plot_d else None),\n marker='x', s=(15**2)\n ) \n \n gs.tight_layout(fig, rect=[0, 0, 1, 1]) \n\n if (biplot_phes is not None and len(biplot_phes) > 0):\n # construct a data frame of arrow coordinate for manual annotation\n df = pd.DataFrame(collections.OrderedDict([\n ('phe', biplot_phes),\n ('x', biplot_arrow_2d[:, 0]),\n ('y', biplot_arrow_2d[:, 1]), \n ]))\n df['r'] = (df['y'] ** 2 + df['x'] ** 2) ** 0.5\n df['slope'] = df['y'] / df['x']\n df = df.sort_values(by='slope')\n else:\n df = None\n \n # save to file\n if save is not None:\n for ext in ['pdf', 'png']:\n tmp_save_name='{}.{}'.format(save, ext) \n logger.info('saving the image to {}'.format(tmp_save_name))\n if(not os.path.exists(os.path.dirname(tmp_save_name))):\n os.makedirs(os.path.dirname(tmp_save_name)) \n fig.savefig(tmp_save_name, bbox_inches=\"tight\", pad_inches=0.0)\n if(df is not None):\n tmp_save_name='{}.tsv'.format(save)\n logger.info('saving the table to {}'.format(tmp_save_name))\n df.to_csv(tmp_save_name, sep='\\t', index=False)\n \n return df"
] |
[
[
"numpy.max",
"numpy.array",
"matplotlib.pyplot.figure",
"numpy.where",
"numpy.linspace",
"matplotlib.gridspec.GridSpec"
]
] |
MahimaGaur31/mlModels
|
[
"ecab2cc83986f08b21bc85151cece85a08fcce82"
] |
[
"models/googlenet.py"
] |
[
"from collections import OrderedDict\nimport torch\nimport torch.nn as nn\n\n__all__ = ['googlenet']\n\nclass Inception_v1_GoogLeNet(nn.Module):\n input_side = 227\n rescale = 255.0\n rgb_mean = [122.7717, 115.9465, 102.9801]\n rgb_std = [1, 1, 1]\n\n def __init__(self, num_classes=1000):\n super(Inception_v1_GoogLeNet, self).__init__()\n self.num_classes = num_classes\n self.features = nn.Sequential(\n OrderedDict([\n ('conv1', nn.Sequential(OrderedDict([\n ('7x7_s2', nn.Conv2d(3, 64, (7, 7), (2, 2), (3, 3), bias=False)),\n ('7x7_s2_bn', nn.BatchNorm2d(64, affine=True)),\n ('relu1', nn.ReLU(True)),\n ('pool1', nn.MaxPool2d((3, 3), (2, 2), padding=(1,1)))\n ]))),\n\n ('conv2', nn.Sequential(OrderedDict([\n ('3x3_reduce', nn.Conv2d(64, 64, (1, 1), (1, 1), (0, 0), bias=False)),\n ('3x3_reduce_bn', nn.BatchNorm2d(64, affine=True)),\n ('relu1', nn.ReLU(True)),\n ('3x3', nn.Conv2d(64, 192, (3, 3), (1, 1), (1, 1), bias=False)),\n ('3x3_bn', nn.BatchNorm2d(192, affine=True)),\n ('relu2', nn.ReLU(True)),\n ('pool2', nn.MaxPool2d((3, 3), (2, 2), padding=(1,1)))\n ]))),\n\n ('inception_3a', InceptionModule(192, 64, 96, 128, 16, 32, 32)),\n ('inception_3b', InceptionModule(256, 128, 128, 192, 32, 96, 64)),\n\n ('pool3', nn.MaxPool2d((3, 3), (2, 2), padding=(1,1))),\n\n ('inception_4a', InceptionModule(480, 192, 96, 208, 16, 48, 64)),\n ('inception_4b', InceptionModule(512, 160, 112, 224, 24, 64, 64)),\n ('inception_4c', InceptionModule(512, 128, 128, 256, 24, 64, 64)),\n ('inception_4d', InceptionModule(512, 112, 144, 288, 32, 64, 64)),\n ('inception_4e', InceptionModule(528, 256, 160, 320, 32, 128, 128)),\n\n ('pool4', nn.MaxPool2d((3, 3), (2, 2), padding=(1,1))),\n\n ('inception_5a', InceptionModule(832, 256, 160, 320, 32, 128, 128)),\n ('inception_5b', InceptionModule(832, 384, 192, 384, 48, 128, 128)),\n\n ('pool5', nn.AvgPool2d((7, 7), (1, 1))),\n\n ('drop5', nn.Dropout(0.2))\n ]))\n\n self.classifier = nn.Linear(1024, self.num_classes)\n\n self.regime = [\n {'epoch': 0, 'optimizer': 'SGD', 'lr': 1e-1,\n 'weight_decay': 1e-4, 'momentum': 0.9},\n {'epoch': 30, 'lr': 1e-2},\n {'epoch': 60, 'lr': 1e-3, 'weight_decay': 0},\n {'epoch': 90, 'lr': 1e-3, 'optimizer': 'Adam'}\n ]\n\n def forward(self, x):\n x = self.features(x)\n x = x.view(x.size(0), -1)\n x = self.classifier(x)\n return x\n\nclass InceptionModule(nn.Module):\n def __init__(self, inplane, outplane_a1x1, outplane_b3x3_reduce, outplane_b3x3, outplane_c5x5_reduce, outplane_c5x5,\n outplane_pool_proj):\n super(InceptionModule, self).__init__()\n a = nn.Sequential(OrderedDict([\n ('1x1', nn.Conv2d(inplane, outplane_a1x1, (1, 1), (1, 1), (0, 0), bias=False)),\n ('1x1_bn', nn.BatchNorm2d(outplane_a1x1, affine=True)),\n ('1x1_relu', nn.ReLU(True))\n ]))\n\n b = nn.Sequential(OrderedDict([\n ('3x3_reduce', nn.Conv2d(inplane, outplane_b3x3_reduce, (1, 1), (1, 1), (0, 0), bias=False)),\n ('3x3_reduce_bn', nn.BatchNorm2d(outplane_b3x3_reduce, affine=True)),\n ('3x3_relu1', nn.ReLU(True)),\n ('3x3', nn.Conv2d(outplane_b3x3_reduce, outplane_b3x3, (3, 3), (1, 1), (1, 1), bias=False)),\n ('3x3_bn', nn.BatchNorm2d(outplane_b3x3, affine=True)),\n ('3x3_relu2', nn.ReLU(True))\n ]))\n\n c = nn.Sequential(OrderedDict([\n ('5x5_reduce', nn.Conv2d(inplane, outplane_c5x5_reduce, (1, 1), (1, 1), (0, 0), bias=False)),\n ('5x5_reduce_bn', nn.BatchNorm2d(outplane_c5x5_reduce, affine=True)),\n ('5x5_relu1', nn.ReLU(True)),\n ('5x5', nn.Conv2d(outplane_c5x5_reduce, outplane_c5x5, (5, 5), (1, 1), (2, 2), bias=False)),\n ('5x5_bn', nn.BatchNorm2d(outplane_c5x5, affine=True)),\n ('5x5_relu2', nn.ReLU(True))\n ]))\n\n d = nn.Sequential(OrderedDict([\n ('pool_pool', nn.MaxPool2d((3, 3), (1, 1), (1, 1))),\n ('pool_proj', nn.Conv2d(inplane, outplane_pool_proj, (1, 1), (1, 1), (0, 0))),\n ('pool_proj_bn', nn.BatchNorm2d(outplane_pool_proj, affine=True)),\n ('pool_relu', nn.ReLU(True))\n ]))\n\n for container in [a, b, c, d]:\n for name, module in container.named_children():\n self.add_module(name, module)\n\n self.branches = [a, b, c, d]\n\n def forward(self, input):\n return torch.cat([branch(input) for branch in self.branches], 1)\n\n\ndef googlenet(**kwargs):\n num_classes = getattr(kwargs, 'num_classes', 1000)\n return Inception_v1_GoogLeNet(num_classes)"
] |
[
[
"torch.nn.Linear",
"torch.nn.Dropout",
"torch.nn.MaxPool2d",
"torch.nn.AvgPool2d",
"torch.nn.BatchNorm2d",
"torch.nn.ReLU",
"torch.nn.Conv2d"
]
] |
smonsays/equilibrium-propagation
|
[
"45cfee96ebecb92af1cabef4b7533c82fdb2a263"
] |
[
"lib/energy.py"
] |
[
"# MIT License\n\n# Copyright (c) 2020 Simon Schug, João Sacramento\n\n# Permission is hereby granted, free of charge, to any person obtaining a copy\n# of this software and associated documentation files (the \"Software\"), to deal\n# in the Software without restriction, including without limitation the rights\n# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell\n# copies of the Software, and to permit persons to whom the Software is\n# furnished to do so, subject to the following conditions:\n\n# The above copyright notice and this permission notice shall be included in all\n# copies or substantial portions of the Software.\n\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\n# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\n# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\n# SOFTWARE.\n\nimport abc\n\nimport torch\n\nfrom lib import config\n\n\nclass EnergyBasedModel(abc.ABC, torch.nn.Module):\n \"\"\"\n Abstract base class for all energy-based models.\n\n Attributes:\n batch_size: Number of samples per batch\n c_energy: Cost function to nudge last layer\n clamp_du: List of boolean values tracking if the corresponding layer\n is clamped to a fixed value\n E: Current energy of the model. Object has the responsibility of\n maintaining the energy consistent if u or W change.\n dimensions: Dimensions of the underlying multilayer perceptron\n n_layers: Number of layers of the multilayer perceptron\n phi: List of activation functions for each layer\n W: ModuleList for the linear layers of the multilayer perceptron\n u: List of pre-activations\n\n \"\"\"\n def __init__(self, dimensions, c_energy, batch_size, phi):\n super(EnergyBasedModel, self).__init__()\n\n self.batch_size = batch_size\n self.c_energy = c_energy\n self.clamp_du = torch.zeros(len(dimensions), dtype=torch.bool)\n self.dimensions = dimensions\n self.E = None\n self.n_layers = len(dimensions)\n self.phi = phi\n self.u = None\n self.W = torch.nn.ModuleList(\n torch.nn.Linear(dim1, dim2)\n for dim1, dim2 in zip(self.dimensions[:-1], self.dimensions[1:])\n ).to(config.device)\n\n # Input (u_0) is clamped by default\n self.clamp_du[0] = True\n\n self.reset_state()\n\n @abc.abstractmethod\n def fast_init(self):\n \"\"\"\n Fast initilization of pre-activations.\n \"\"\"\n return\n\n @abc.abstractmethod\n def update_energy(self):\n \"\"\"\n Update the energy.\n \"\"\"\n return\n\n def clamp_layer(self, i, u_i):\n \"\"\"\n Clamp the specified layer.\n\n Args:\n i: Index of layer to be clamped\n u_i: Tensor to which the layer i is clamped\n \"\"\"\n self.u[i] = u_i\n self.clamp_du[i] = True\n self.update_energy()\n\n def release_layer(self, i):\n \"\"\"\n Release (i.e. un-clamp) the specified layer).\n\n Args:\n i: Index of layer to be released\n \"\"\"\n self.u[i].requires_grad = True\n self.clamp_du[i] = False\n self.update_energy()\n\n def reset_state(self):\n \"\"\"\n Reset the state of the system to a random (Normal) configuration.\n \"\"\"\n self.u = []\n for i in range(self.n_layers):\n self.u.append(torch.randn((self.batch_size, self.dimensions[i]),\n requires_grad=not(self.clamp_du[i]),\n device=config.device))\n self.update_energy()\n\n def set_C_target(self, target):\n \"\"\"\n Set new target tensor for the cost function.\n\n Args:\n target: target tensor\n \"\"\"\n self.c_energy.set_target(target)\n self.update_energy()\n\n def u_relax(self, dt, n_relax, tol, tau):\n \"\"\"\n Relax the neural state variables until a fixed point is obtained\n with precision < tol or until the maximum number of steps n_relax is reached.\n\n Args:\n dt: Step size\n n_relax: Maximum number of steps\n tol: Tolerance/precision of relaxation\n tau: Time constant\n\n Returns:\n Change in energy after relaxation\n \"\"\"\n E_init = self.E.clone().detach()\n E_prev = self.E.clone().detach()\n\n for i in range(n_relax):\n # Perform a single relaxation step\n du_norm = self.u_step(dt, tau)\n # dE = self.E.detach() - E_prev\n\n # If change is below numerical tolerance, break\n if du_norm < tol:\n break\n\n E_prev = self.E.clone().detach()\n\n return torch.sum(E_prev - E_init)\n\n def u_step(self, dt, tau):\n \"\"\"\n Perform single relaxation step on the neural state variables.\n\n Args:\n dt: Step size\n tau: Time constant\n\n Returns:\n Absolute change in pre-activations\n \"\"\"\n # Compute gradients wrt current energy\n self.zero_grad()\n batch_E = torch.sum(self.E)\n batch_E.backward()\n\n with torch.no_grad():\n # Apply the update in every layer\n du_norm = 0\n for i in range(self.n_layers):\n if not self.clamp_du[i]:\n du = self.u[i].grad\n self.u[i] -= dt / tau * du\n\n du_norm += float(torch.mean(torch.norm(du, dim=1)))\n\n self.update_energy()\n\n return du_norm\n\n def w_get_gradients(self, loss=None):\n \"\"\"\n Compute the gradient on the energy w.r.t. the parameters W.\n\n Args:\n loss: Optional loss to optimze for. Otherwise the mean energy is optimized.\n\n Returns:\n List of gradients for each layer\n \"\"\"\n self.zero_grad()\n if loss is None:\n loss = torch.mean(self.E)\n return torch.autograd.grad(loss, self.parameters())\n\n def w_optimize(self, free_grad, nudged_grad, w_optimizer):\n \"\"\"\n Update weights using free and nudged phase gradients.\n\n Args:\n free_grad: List of free phase gradients\n nudged_grad: List of nudged phase gradients\n w_optimizer: torch.optim.Optimizer for the model parameters\n \"\"\"\n self.zero_grad()\n w_optimizer.zero_grad()\n\n # Apply the contrastive Hebbian style update\n for p, f_g, n_g in zip(self.parameters(), free_grad, nudged_grad):\n p.grad = (1 / self.c_energy.beta) * (n_g - f_g)\n\n w_optimizer.step()\n self.update_energy()\n\n def zero_grad(self):\n \"\"\"\n Zero gradients for parameters and pre-activations.\n \"\"\"\n self.W.zero_grad()\n for u_i in self.u:\n if u_i.grad is not None:\n u_i.grad.detach_()\n u_i.grad.zero_()\n\n\nclass ConditionalGaussian(EnergyBasedModel):\n \"\"\"\n One example of an energy-based model that has a probabilistic interpretation as\n the (negative) log joint probability of a conditional-Gaussian model.\n Also see review by Bogacz and Whittington, 2019.\n \"\"\"\n def __init__(self, dimensions, c_energy, batch_size, phi):\n super(ConditionalGaussian, self).__init__(dimensions, c_energy, batch_size, phi)\n\n def fast_init(self):\n \"\"\"\n The FF init is a very handy hack when working with the ConditionalGaussian\n model, which allows reducing the number of fixed point iterations\n significantly, and results in improved training for large dt steps.\n \"\"\"\n for i in range(self.n_layers - 1):\n self.u[i + 1] = self.W[i](self.phi[i](self.u[i])).detach()\n self.u[i + 1].requires_grad = not self.clamp_du[i + 1]\n\n self.update_energy()\n\n def update_energy(self):\n \"\"\"\n Update the energy as the mean squared predictive error.\n \"\"\"\n self.E = 0\n for i in range(self.n_layers - 1):\n pred = self.W[i](self.phi[i](self.u[i]))\n loss = torch.nn.functional.mse_loss(pred, self.u[i + 1], reduction='none')\n self.E += torch.sum(loss, dim=1)\n\n if self.c_energy.target is not None:\n self.E += self.c_energy.compute_energy(self.u[-1])\n\n\nclass RestrictedHopfield(EnergyBasedModel):\n \"\"\"\n The classical Hopfield energy in a restricted feedforward model\n as used in the original equilibrium propagation paper by Scellier, 2017\n \"\"\"\n def __init__(self, dimensions, c_energy, batch_size, phi):\n super(RestrictedHopfield, self).__init__(dimensions, c_energy, batch_size, phi)\n\n def fast_init(self):\n raise NotImplementedError(\"Fast initialization not possible for the Hopfield model.\")\n\n def update_energy(self):\n \"\"\"\n Update the energy computed as the Hopfield Energy.\n \"\"\"\n self.E = 0\n\n for i, layer in enumerate(self.W):\n r_pre = self.phi[i](self.u[i])\n r_post = self.phi[i + 1](self.u[i + 1])\n\n if i == 0:\n self.E += 0.5 * torch.einsum('ij,ij->i', self.u[i], self.u[i])\n\n self.E += 0.5 * torch.einsum('ij,ij->i', self.u[i + 1], self.u[i + 1])\n self.E -= 0.5 * torch.einsum('bi,ji,bj->b', r_pre, layer.weight, r_post)\n self.E -= 0.5 * torch.einsum('bi,ij,bj->b', r_post, layer.weight, r_pre)\n self.E -= torch.einsum('i,ji->j', layer.bias, r_post)\n\n if self.c_energy.target is not None:\n self.E += self.c_energy.compute_energy(self.u[-1])\n"
] |
[
[
"torch.nn.Linear",
"torch.einsum",
"torch.norm",
"torch.no_grad",
"torch.nn.functional.mse_loss",
"torch.randn",
"torch.mean",
"torch.sum"
]
] |
azlkiniue/poliastro
|
[
"fedec503390fcb7ee375e278d1c9d7b2c570506f"
] |
[
"src/poliastro/tests/tests_twobody/test_orbit.py"
] |
[
"import pickle\n\nimport matplotlib\nimport numpy as np\nimport pytest\nfrom astropy import units as u\nfrom astropy.coordinates import (\n ITRS,\n CartesianDifferential,\n CartesianRepresentation,\n SkyCoord,\n)\nfrom astropy.tests.helper import assert_quantity_allclose\nfrom astropy.time import Time\nfrom numpy.testing import assert_allclose, assert_array_equal\n\nfrom poliastro.bodies import (\n Body,\n Earth,\n Jupiter,\n Mars,\n Mercury,\n Moon,\n Neptune,\n Pluto,\n Saturn,\n Sun,\n Uranus,\n Venus,\n)\nfrom poliastro.constants import J2000, J2000_TDB\nfrom poliastro.frames import (\n GCRS,\n HCRS,\n ICRS,\n HeliocentricEclipticJ2000,\n JupiterICRS,\n MarsICRS,\n MercuryICRS,\n NeptuneICRS,\n Planes,\n PlutoICRS,\n SaturnICRS,\n UranusICRS,\n VenusICRS,\n get_frame,\n)\nfrom poliastro.twobody.angles import M_to_nu\nfrom poliastro.twobody.orbit import Orbit, OrbitSamplingWarning, TimeScaleWarning\nfrom poliastro.twobody.propagation import cowell, kepler, mean_motion\n\n\[email protected]()\ndef hyperbolic():\n r = [1.197659243752796e09, -4.443716685978071e09, -1.747610548576734e09] * u.km\n v = (\n [5.540549267188614e00, -1.251544669134140e01, -4.848892572767733e00]\n * u.km\n / u.s\n )\n epoch = Time(\"2015-07-14 07:59\", scale=\"tdb\")\n return Orbit.from_vectors(Sun, r, v, epoch)\n\n\ndef test_default_time_for_new_state():\n _d = 1.0 * u.AU # Unused distance\n _ = 0.5 * u.one # Unused dimensionless value\n _a = 1.0 * u.deg # Unused angle\n _body = Sun # Unused body\n expected_epoch = J2000\n ss = Orbit.from_classical(_body, _d, _, _a, _a, _a, _a)\n assert ss.epoch == expected_epoch\n\n\ndef test_state_raises_unitserror_if_elements_units_are_wrong():\n _d = 1.0 * u.AU # Unused distance\n _ = 0.5 * u.one # Unused dimensionless value\n _a = 1.0 * u.deg # Unused angle\n wrong_angle = 1.0 * u.AU\n with pytest.raises(u.UnitsError) as excinfo:\n Orbit.from_classical(Sun, _d, _, _a, _a, _a, wrong_angle)\n assert (\n \"UnitsError: Argument 'nu' to function 'from_classical' must be in units convertible to 'rad'.\"\n in excinfo.exconly()\n )\n\n\ndef test_state_raises_unitserror_if_rv_units_are_wrong():\n _d = [1.0, 0.0, 0.0] * u.AU\n wrong_v = [0.0, 1.0e-6, 0.0] * u.AU\n with pytest.raises(u.UnitsError) as excinfo:\n Orbit.from_vectors(Sun, _d, wrong_v)\n assert (\n \"UnitsError: Argument 'v' to function 'from_vectors' must be in units convertible to 'm / s'.\"\n in excinfo.exconly()\n )\n\n\ndef test_parabolic_elements_fail_early():\n attractor = Earth\n ecc = 1.0 * u.one\n _d = 1.0 * u.AU # Unused distance\n _a = 1.0 * u.deg # Unused angle\n with pytest.raises(ValueError) as excinfo:\n Orbit.from_classical(attractor, _d, ecc, _a, _a, _a, _a)\n assert (\n \"ValueError: For parabolic orbits use Orbit.parabolic instead\"\n in excinfo.exconly()\n )\n\n\ndef test_bad_inclination_raises_exception():\n _d = 1.0 * u.AU # Unused distance\n _ = 0.5 * u.one # Unused dimensionless value\n _a = 1.0 * u.deg # Unused angle\n _body = Sun # Unused body\n bad_inc = 200 * u.deg\n with pytest.raises(ValueError) as excinfo:\n Orbit.from_classical(_body, _d, _, bad_inc, _a, _a, _a)\n assert (\n \"ValueError: Inclination must be between 0 and 180 degrees\" in excinfo.exconly()\n )\n\n\ndef test_bad_hyperbolic_raises_exception():\n bad_a = 1.0 * u.AU\n ecc = 1.5 * u.one\n _inc = 100 * u.deg # Unused inclination\n _a = 1.0 * u.deg # Unused angle\n _body = Sun # Unused body\n with pytest.raises(ValueError) as excinfo:\n Orbit.from_classical(_body, bad_a, ecc, _inc, _a, _a, _a)\n assert \"Hyperbolic orbits have negative semimajor axis\" in excinfo.exconly()\n\n\ndef test_apply_maneuver_changes_epoch():\n _d = 1.0 * u.AU # Unused distance\n _ = 0.5 * u.one # Unused dimensionless value\n _a = 1.0 * u.deg # Unused angle\n ss = Orbit.from_classical(Sun, _d, _, _a, _a, _a, _a)\n dt = 1 * u.h\n dv = [0, 0, 0] * u.km / u.s\n orbit_new = ss.apply_maneuver([(dt, dv)])\n assert orbit_new.epoch == ss.epoch + dt\n\n\ndef test_orbit_from_ephem_with_no_epoch_is_today():\n # This is not that obvious http://stackoverflow.com/q/6407362/554319\n body = Earth\n ss = Orbit.from_body_ephem(body)\n assert (Time.now() - ss.epoch).sec < 1\n\n\ndef test_from_ephem_raises_warning_if_time_is_not_tdb_with_proper_time(recwarn):\n body = Earth\n epoch = Time(\"2017-09-29 07:31:26\", scale=\"utc\")\n expected_epoch_string = \"2017-09-29 07:32:35.182\" # epoch.tdb.value\n\n Orbit.from_body_ephem(body, epoch)\n\n w = recwarn.pop(TimeScaleWarning)\n assert expected_epoch_string in str(w.message)\n\n\ndef test_circular_has_proper_semimajor_axis():\n alt = 500 * u.km\n attractor = Earth\n expected_a = Earth.R + alt\n ss = Orbit.circular(attractor, alt)\n assert ss.a == expected_a\n\n\ndef test_geosync_has_proper_period():\n expected_period = 1436 * u.min\n\n ss = Orbit.circular(Earth, alt=42164 * u.km - Earth.R)\n\n assert_quantity_allclose(ss.period, expected_period, rtol=1e-4)\n\n\ndef test_parabolic_has_proper_eccentricity():\n attractor = Earth\n _d = 1.0 * u.AU # Unused distance\n _a = 1.0 * u.deg # Unused angle\n expected_ecc = 1.0 * u.one\n ss = Orbit.parabolic(attractor, _d, _a, _a, _a, _a)\n assert_allclose(ss.ecc, expected_ecc)\n\n\ndef test_parabolic_has_zero_energy():\n attractor = Earth\n _d = 1.0 * u.AU # Unused distance\n _a = 1.0 * u.deg # Unused angle\n ss = Orbit.parabolic(attractor, _d, _a, _a, _a, _a)\n assert_allclose(ss.energy.value, 0.0, atol=1e-16)\n\n\ndef test_pqw_for_circular_equatorial_orbit():\n ss = Orbit.circular(Earth, 600 * u.km)\n expected_p = [1, 0, 0] * u.one\n expected_q = [0, 1, 0] * u.one\n expected_w = [0, 0, 1] * u.one\n p, q, w = ss.pqw()\n assert_allclose(p, expected_p)\n assert_allclose(q, expected_q)\n assert_allclose(w, expected_w)\n\n\[email protected](\n \"attractor,alt,argp,expected_argp,expected_inc\",\n [\n (\n Earth,\n 1e6 * u.m,\n 3 * np.pi / 2 * u.rad,\n 3 * np.pi / 2 * u.rad,\n 63.4349 * np.pi / 180 * u.rad,\n ),\n (Mars, 3e8 * u.m, 0 * u.deg, 0 * u.deg, 63.4349 * np.pi / 180 * u.rad),\n ],\n)\ndef test_frozen_orbit_argp(attractor, alt, argp, expected_argp, expected_inc):\n orbit = Orbit.frozen(attractor, alt, argp=argp)\n assert_allclose(orbit.argp, expected_argp)\n assert_allclose(orbit.inc, expected_inc)\n\n\[email protected](\n \"attractor,alt,inc,argp,expected_inc,expected_argp\",\n [\n (\n Earth,\n 1e6 * u.m,\n 116.5651 * np.pi / 180 * u.rad,\n 3 * np.pi / 2 * u.rad,\n 116.5651 * np.pi / 180 * u.rad,\n 3 * np.pi / 2 * u.rad,\n ),\n (\n Mars,\n 3e8 * u.m,\n 63.4349 * np.pi / 180 * u.rad,\n np.pi / 2 * u.rad,\n 63.4349 * np.pi / 180 * u.rad,\n np.pi / 2 * u.rad,\n ),\n ],\n)\ndef test_frozen_orbit_with_critical_argp_and_critical_inc(\n attractor, alt, inc, argp, expected_inc, expected_argp\n):\n orbit = Orbit.frozen(attractor, alt, inc=inc, argp=argp)\n assert_allclose(orbit.argp, expected_argp)\n assert_allclose(orbit.inc, expected_inc)\n\n\[email protected](\n \"attractor,alt,expected_inc,expected_argp\",\n [\n (Earth, 1e6 * u.m, 63.4349 * np.pi / 180 * u.rad, np.pi / 2 * u.rad),\n (Mars, 3e8 * u.m, 63.4349 * np.pi / 180 * u.rad, np.pi / 2 * u.rad),\n ],\n)\ndef test_frozen_orbit_no_args(attractor, alt, expected_inc, expected_argp):\n orbit = Orbit.frozen(attractor, alt)\n argp = orbit.argp\n inc = orbit.inc\n assert_allclose(argp, expected_argp)\n assert_allclose(inc, expected_inc)\n\n\[email protected](\n \"attractor,alt,argp,expected_inc,ecc,expected_ecc\",\n [\n (\n Earth,\n 1e6 * u.m,\n 2 * u.deg, # Non critical value\n 63.4349 * np.pi / 180 * u.rad,\n None,\n 0.0549 * u.one,\n ),\n (\n Mars,\n 3e8 * u.m,\n 0 * u.deg, # Non critical value\n 63.4349 * np.pi / 180 * u.rad,\n 0.04 * u.one,\n 0.04 * u.one,\n ),\n ],\n)\ndef test_frozen_orbit_with_non_critical_argp(\n attractor, alt, argp, expected_inc, ecc, expected_ecc\n):\n orbit = Orbit.frozen(attractor, alt, argp=argp, ecc=ecc) # Non-critical value\n assert_allclose(orbit.inc, expected_inc)\n assert_allclose(orbit.ecc, expected_ecc)\n\n\ndef test_frozen_orbit_non_critical_inclination():\n orbit = Orbit.frozen(Earth, 1e3 * u.km, inc=0 * u.deg) # Non-critical value\n assert orbit.argp in [np.pi / 2, 3 * np.pi / 2] * u.rad\n\n\ndef test_frozen_orbit_venus_special_case():\n with pytest.raises(NotImplementedError) as excinfo:\n Orbit.frozen(Venus, 1 * u.m)\n assert excinfo.type == NotImplementedError\n assert str(excinfo.value) == \"This has not been implemented for Venus\"\n\n\ndef test_frozen_orbit_non_spherical_arguments():\n with pytest.raises(AttributeError) as excinfo:\n Orbit.frozen(Jupiter, 1 * u.m)\n assert excinfo.type == AttributeError\n assert (\n str(excinfo.value)\n == \"Attractor Jupiter has not spherical harmonics implemented\"\n )\n\n\ndef test_frozen_orbit_altitude():\n with pytest.raises(ValueError) as excinfo:\n Orbit.frozen(Earth, -1 * u.m)\n assert excinfo.type == ValueError\n assert (\n str(excinfo.value)\n == \"The semimajor axis may not be smaller that Earth's radius\"\n )\n\n\ndef test_orbit_representation():\n ss = Orbit.circular(\n Earth, 600 * u.km, 20 * u.deg, epoch=Time(\"2018-09-08 09:04:00\", scale=\"tdb\")\n )\n expected_str = \"6978 x 6978 km x 20.0 deg (GCRS) orbit around Earth (\\u2641) at epoch 2018-09-08 09:04:00.000 (TDB)\"\n\n assert str(ss) == repr(ss) == expected_str\n\n\ndef test_orbit_no_frame_representation():\n date_launch = Time(\"2011-11-26 15:02\", scale=\"utc\")\n r = [61445.76498656, 24827.93010168, 0.0] * u.km\n v = [-0.42581645, -0.18867869, 0.0] * u.km / u.s\n ss = Orbit.from_vectors(Moon, r, v, date_launch)\n expected_str = \"106 x -142299 km x 180.0 deg orbit around Moon (\\u263E) at epoch 2011-11-26 15:02:00.000 (UTC)\"\n\n assert str(ss) == repr(ss) == expected_str\n\n\ndef test_sample_numpoints():\n _d = 1.0 * u.AU # Unused distance\n _ = 0.5 * u.one # Unused dimensionless value\n _a = 1.0 * u.deg # Unused angle\n _body = Sun # Unused body\n ss = Orbit.from_classical(_body, _d, _, _a, _a, _a, _a)\n positions = ss.sample(values=50)\n assert len(positions) == 50\n\n\[email protected](\"num_points\", [3, 5, 7, 9, 11, 101])\ndef test_sample_num_points(num_points):\n # Data from Vallado, example 2.4\n r0 = [1131.340, -2282.343, 6672.423] * u.km\n v0 = [-5.64305, 4.30333, 2.42879] * u.km / u.s\n ss0 = Orbit.from_vectors(Earth, r0, v0)\n\n # TODO: Test against the perigee and apogee\n # expected_ss = ss0.propagate(ss0.period / 2)\n\n rr = ss0.sample(num_points)\n\n assert len(rr) == num_points\n # assert_quantity_allclose(rr[num_points // 2].data.xyz, expected_ss.r)\n\n\[email protected](\"method\", [mean_motion, cowell, kepler])\ndef test_sample_big_orbits(method):\n # See https://github.com/poliastro/poliastro/issues/265\n ss = Orbit.from_vectors(\n Sun,\n [-9018878.6, -94116055, 22619059] * u.km,\n [-49.950923, -12.948431, -4.2925158] * u.km / u.s,\n )\n positions = ss.sample(15, method=method)\n assert len(positions) == 15\n\n\ndef test_hyperbolic_nu_value_check(hyperbolic):\n positions = hyperbolic.sample(100)\n\n assert isinstance(positions, HCRS)\n assert len(positions) == 100\n\n\ndef test_hyperbolic_modulus_wrapped_nu():\n ss = Orbit.from_vectors(\n Sun,\n [-9.77441841e07, 1.01000539e08, 4.37584668e07] * u.km,\n [23.75936985, -43.09599568, -8.7084724] * u.km / u.s,\n )\n num_values = 3\n\n positions = ss.sample(num_values)\n\n assert_quantity_allclose(positions[0].data.xyz, ss.r)\n\n\[email protected](\"min_anomaly\", [-30 * u.deg, -10 * u.deg])\[email protected](\"max_anomaly\", [10 * u.deg, 30 * u.deg])\ndef test_sample_hyperbolic_limits(hyperbolic, min_anomaly, max_anomaly):\n num_points = 50\n\n coords = hyperbolic.sample(\n num_points, min_anomaly=min_anomaly, max_anomaly=max_anomaly\n )\n\n assert len(coords) == num_points\n\n\ndef test_sample_hyperbolic_outside_limits(hyperbolic):\n with pytest.warns(OrbitSamplingWarning, match=\"anomaly outside range, clipping\"):\n hyperbolic.sample(3, min_anomaly=-np.pi * u.rad)\n\n with pytest.warns(OrbitSamplingWarning, match=\"anomaly outside range, clipping\"):\n hyperbolic.sample(3, max_anomaly=np.pi * u.rad)\n\n\ndef test_orbit_is_pickable(hyperbolic):\n pickled = pickle.dumps(hyperbolic)\n ss_result = pickle.loads(pickled)\n\n assert_array_equal(hyperbolic.r, ss_result.r)\n assert_array_equal(hyperbolic.v, ss_result.v)\n assert ss_result.epoch == hyperbolic.epoch\n\n\ndef test_orbit_plot_is_static():\n\n # Data from Curtis, example 4.3\n r = [-6045, -3490, 2500] * u.km\n v = [-3.457, 6.618, 2.533] * u.km / u.s\n ss = Orbit.from_vectors(Earth, r, v)\n plot = ss.plot(static=True)\n assert isinstance(plot[0], matplotlib.lines.Line2D)\n assert isinstance(plot[1], matplotlib.lines.Line2D)\n\n\ndef test_orbit_plot_static_3d():\n # Data from Curtis, example 4.3\n r = [-6045, -3490, 2500] * u.km\n v = [-3.457, 6.618, 2.533] * u.km / u.s\n ss = Orbit.from_vectors(Earth, r, v)\n with pytest.raises(ValueError, match=\"static and use_3d cannot be true\"):\n ss.plot(static=True, use_3d=True)\n\n\[email protected](\"use_3d\", [False, True])\ndef test_orbit_plot_is_not_static(use_3d):\n from plotly.graph_objs import FigureWidget\n\n # Data from Curtis, example 4.3\n r = [-6045, -3490, 2500] * u.km\n v = [-3.457, 6.618, 2.533] * u.km / u.s\n ss = Orbit.from_vectors(Earth, r, v)\n plot = ss.plot(static=False, use_3d=use_3d)\n assert isinstance(plot, FigureWidget)\n\n\[email protected](\n \"attractor, expected_frame_class\",\n [\n (Sun, HCRS),\n (Mercury, MercuryICRS),\n (Venus, VenusICRS),\n (Earth, GCRS),\n (Mars, MarsICRS),\n (Jupiter, JupiterICRS),\n (Saturn, SaturnICRS),\n (Uranus, UranusICRS),\n (Neptune, NeptuneICRS),\n (Pluto, PlutoICRS),\n ],\n)\ndef test_orbit_has_proper_frame(attractor, expected_frame_class):\n # Dummy data\n r = [1e09, -4e09, -1e09] * u.km\n v = [5e00, -1e01, -4e00] * u.km / u.s\n epoch = Time(\"2015-07-14 07:59\", scale=\"tdb\")\n\n ss = Orbit.from_vectors(attractor, r, v, epoch)\n\n assert ss.frame.is_equivalent_frame(expected_frame_class(obstime=epoch))\n assert ss.frame.obstime == epoch\n\n\ndef test_orbit_from_custom_body_raises_error_when_asked_frame():\n attractor = Body(Sun, 1 * u.km ** 3 / u.s ** 2, \"_DummyPlanet\")\n\n r = [1e09, -4e09, -1e09] * u.km\n v = [5e00, -1e01, -4e00] * u.km / u.s\n\n ss = Orbit.from_vectors(attractor, r, v)\n\n with pytest.raises(NotImplementedError) as excinfo:\n ss.frame\n assert (\n \"Frames for orbits around custom bodies are not yet supported\"\n in excinfo.exconly()\n )\n\n\[email protected](\n \"body\", [Sun, Mercury, Venus, Earth, Mars, Jupiter, Saturn, Uranus, Neptune]\n)\ndef test_orbit_from_ephem_is_in_icrs_frame(body):\n ss = Orbit.from_body_ephem(body)\n\n assert ss.frame.is_equivalent_frame(ICRS())\n\n\ndef test_orbit_accepts_ecliptic_plane():\n r = [1e09, -4e09, -1e09] * u.km\n v = [5e00, -1e01, -4e00] * u.km / u.s\n\n ss = Orbit.from_vectors(Sun, r, v, plane=Planes.EARTH_ECLIPTIC)\n\n assert ss.frame.is_equivalent_frame(HeliocentricEclipticJ2000(obstime=J2000))\n\n\ndef test_orbit_represent_as_produces_correct_data():\n r = [1e09, -4e09, -1e09] * u.km\n v = [5e00, -1e01, -4e00] * u.km / u.s\n\n ss = Orbit.from_vectors(Sun, r, v)\n\n expected_result = CartesianRepresentation(\n *r, differentials=CartesianDifferential(*v)\n )\n\n result = ss.represent_as(CartesianRepresentation)\n\n # We can't directly compare the objects, see\n # https://github.com/astropy/astropy/issues/7793\n assert (result.xyz == expected_result.xyz).all()\n assert (\n result.differentials[\"s\"].d_xyz == expected_result.differentials[\"s\"].d_xyz\n ).all()\n\n\ndef test_orbit_propagate_retains_plane():\n r = [1e09, -4e09, -1e09] * u.km\n v = [5e00, -1e01, -4e00] * u.km / u.s\n\n ss = Orbit.from_vectors(Sun, r, v, plane=Planes.EARTH_ECLIPTIC)\n\n orig_frame = ss.frame\n\n final_ss = ss.propagate(1 * u.h)\n expected_frame = orig_frame.replicate_without_data(obstime=final_ss.epoch)\n\n assert final_ss.frame.is_equivalent_frame(expected_frame)\n\n\[email protected]_data\ndef test_from_horizons_raise_valueerror():\n with pytest.raises(ValueError) as exep:\n Orbit.from_horizons(name=\"Dummy\")\n assert (\n \"ValueError: Unknown target (Dummy). Maybe try different id_type?\"\n in exep.exconly()\n )\n\n\[email protected]_data\ndef test_orbits_are_same():\n epoch = Time(\"2018-07-23\")\n # Orbit Parameters of Ceres\n # Taken from https://ssd.jpl.nasa.gov/horizons.cgi\n ss = Orbit.from_classical(\n Sun,\n 2.767107584017257 * u.au,\n 0.07554802949294502 * u.one,\n 27.18502520750381 * u.deg,\n 23.36913256044832 * u.deg,\n 132.2919806192451 * u.deg,\n 21.28958091587153 * u.deg,\n epoch,\n )\n ss1 = Orbit.from_horizons(name=\"Ceres\", epoch=epoch)\n assert ss.pqw()[0].value.all() == ss1.pqw()[0].value.all()\n assert ss.r_a == ss1.r_a\n assert ss.a == ss1.a\n\n\[email protected]_data\ndef test_plane_is_set_in_horizons():\n plane = Planes.EARTH_ECLIPTIC\n ss = Orbit.from_horizons(name=\"Ceres\", plane=plane)\n assert ss.plane == plane\n\n\[email protected](\n \"attractor,angular_velocity,expected_a,expected_period\",\n [\n (\n Earth,\n (2 * np.pi / 23.9345) * u.rad / u.hour,\n 42164205 * u.m,\n 23.9345 * u.hour,\n ),\n (\n Mars,\n (2 * np.pi / 24.6228) * u.rad / u.hour,\n 20427595 * u.m,\n 24.6228 * u.hour,\n ),\n ],\n)\ndef test_geostationary_creation_from_angular_velocity(\n attractor, angular_velocity, expected_a, expected_period\n):\n ss = Orbit.geostationary(attractor=attractor, angular_velocity=angular_velocity)\n assert_quantity_allclose(ss.a, expected_a, rtol=1.0e-7)\n assert_quantity_allclose(ss.period, expected_period, rtol=1.0e-7)\n\n\[email protected](\n \"attractor,period,expected_a\",\n [\n (Earth, 23.9345 * u.hour, 42164205 * u.m),\n (Mars, 24.6228 * u.hour, 20427595 * u.m),\n ],\n)\ndef test_geostationary_creation_from_period(attractor, period, expected_a):\n ss = Orbit.geostationary(attractor=attractor, period=period)\n assert_quantity_allclose(ss.a, expected_a, rtol=1.0e-7)\n assert_quantity_allclose(ss.period, period, rtol=1.0e-7)\n\n\[email protected](\n \"attractor,period,hill_radius,expected_a\",\n [\n (Earth, 23.9345 * u.hour, 0.01 * u.AU, 42164205 * u.m),\n (Mars, 24.6228 * u.hour, 1000000 * u.km, 20427595 * u.m),\n ],\n)\ndef test_geostationary_creation_with_Hill_radius(\n attractor, period, hill_radius, expected_a\n):\n ss = Orbit.geostationary(\n attractor=attractor, period=period, hill_radius=hill_radius\n )\n assert_quantity_allclose(ss.a, expected_a, rtol=1.0e-7)\n assert_quantity_allclose(ss.period, period, rtol=1.0e-7)\n\n\[email protected](\"attractor\", [Earth, Mars])\ndef test_geostationary_input(attractor):\n with pytest.raises(ValueError) as excinfo:\n Orbit.geostationary(attractor=attractor)\n\n assert (\n \"ValueError: At least one among angular_velocity or period must be passed\"\n in excinfo.exconly()\n )\n\n\[email protected](\n \"attractor,period,hill_radius\", [(Venus, 243.025 * u.day, 1000000 * u.km)]\n)\ndef test_geostationary_non_existence_condition(attractor, period, hill_radius):\n with pytest.raises(ValueError) as excinfo:\n Orbit.geostationary(attractor=attractor, period=period, hill_radius=hill_radius)\n\n assert (\n \"Geostationary orbit for the given parameters doesn't exist\"\n in excinfo.exconly()\n )\n\n\ndef test_perigee_and_apogee():\n expected_r_a = 500 * u.km\n expected_r_p = 300 * u.km\n a = (expected_r_a + expected_r_p) / 2\n ecc = expected_r_a / a - 1\n _a = 1.0 * u.deg # Unused angle\n ss = Orbit.from_classical(Earth, a, ecc, _a, _a, _a, _a)\n assert_allclose(ss.r_a.to(u.km).value, expected_r_a.to(u.km).value)\n assert_allclose(ss.r_p.to(u.km).value, expected_r_p.to(u.km).value)\n\n\ndef test_convert_from_rv_to_coe():\n # Data from Vallado, example 2.6\n attractor = Earth\n p = 11067.790 * u.km\n ecc = 0.83285 * u.one\n inc = 87.87 * u.deg\n raan = 227.89 * u.deg\n argp = 53.38 * u.deg\n nu = 92.335 * u.deg\n expected_r = [6525.344, 6861.535, 6449.125] * u.km\n expected_v = [4.902276, 5.533124, -1.975709] * u.km / u.s\n\n r, v = Orbit.from_classical(\n attractor, p / (1 - ecc ** 2), ecc, inc, raan, argp, nu\n ).rv()\n\n assert_quantity_allclose(r, expected_r, rtol=1e-5)\n assert_quantity_allclose(v, expected_v, rtol=1e-5)\n\n\ndef test_convert_from_coe_to_rv():\n # Data from Vallado, example 2.5\n attractor = Earth\n r = [6524.384, 6862.875, 6448.296] * u.km\n v = [4.901327, 5.533756, -1.976341] * u.km / u.s\n\n expected_p = 11067.79 * u.km\n expected_ecc = 0.832853 * u.one\n expected_inc = 87.870 * u.deg\n expected_raan = 227.89 * u.deg\n expected_argp = 53.38 * u.deg\n expected_nu = 92.335 * u.deg\n\n ss = Orbit.from_vectors(attractor, r, v)\n\n _, ecc, inc, raan, argp, nu = ss.classical()\n p = ss.p\n\n assert_quantity_allclose(p, expected_p, rtol=1e-4)\n assert_quantity_allclose(ecc, expected_ecc, rtol=1e-4)\n assert_quantity_allclose(inc, expected_inc, rtol=1e-4)\n assert_quantity_allclose(raan, expected_raan, rtol=1e-4)\n assert_quantity_allclose(argp, expected_argp, rtol=1e-4)\n assert_quantity_allclose(nu, expected_nu, rtol=1e-4)\n\n\ndef test_perifocal_points_to_perigee():\n _d = 1.0 * u.AU # Unused distance\n _ = 0.5 * u.one # Unused dimensionless value\n _a = 1.0 * u.deg # Unused angle\n ss = Orbit.from_classical(Sun, _d, _, _a, _a, _a, _a)\n p, _, _ = ss.pqw()\n assert_allclose(p, ss.e_vec / ss.ecc)\n\n\ndef test_arglat_within_range():\n r = [3539.08827417, 5310.19903462, 3066.31301457] * u.km\n v = [-6.49780849, 3.24910291, 1.87521413] * u.km / u.s\n ss = Orbit.from_vectors(Earth, r, v)\n assert 0 * u.deg <= ss.arglat <= 360 * u.deg\n\n\ndef test_pqw_returns_dimensionless():\n r_0 = ([1, 0, 0] * u.au).to(u.km)\n v_0 = ([0, 6, 0] * u.au / u.year).to(u.km / u.day)\n ss = Orbit.from_vectors(Sun, r_0, v_0)\n\n p, q, w = ss.pqw()\n\n assert p.unit == u.one\n assert q.unit == u.one\n assert w.unit == u.one\n\n\ndef test_from_coord_fails_if_no_time_differential():\n pos = [30000, 0, 0] * u.km\n cartrep = CartesianRepresentation(*pos)\n\n # Method fails if coordinate instance doesn't contain a differential with\n # respect to time\n with pytest.raises(ValueError) as excinfo:\n Orbit.from_coords(Earth, SkyCoord(cartrep))\n assert (\n \"ValueError: Coordinate instance doesn't have a differential with respect to time\"\n in excinfo.exconly()\n )\n\n\[email protected](\n \"attractor\", [Earth, Jupiter, Mars, Mercury, Neptune, Saturn, Sun, Uranus, Venus]\n)\ndef test_orbit_creation_using_skycoord(attractor):\n vel = [0, 2, 0] * u.km / u.s\n cartdiff = CartesianDifferential(*vel)\n\n pos = [30000, 0, 0] * u.km\n cartrep = CartesianRepresentation(*pos, differentials=cartdiff)\n\n coord = SkyCoord(cartrep, frame=\"icrs\")\n o = Orbit.from_coords(attractor, coord)\n\n inertial_frame_at_body_centre = get_frame(\n attractor, Planes.EARTH_EQUATOR, obstime=coord.obstime\n )\n\n coord_transformed_to_irf = coord.transform_to(inertial_frame_at_body_centre)\n pos_transformed_to_irf = coord_transformed_to_irf.cartesian.xyz\n vel_transformed_to_irf = coord_transformed_to_irf.cartesian.differentials[\"s\"].d_xyz\n\n assert (o.r == pos_transformed_to_irf).all()\n assert (o.v == vel_transformed_to_irf).all()\n\n\[email protected]_data\[email protected](\n \"attractor\", [Earth, Jupiter, Mars, Mercury, Neptune, Saturn, Sun, Uranus, Venus]\n)\[email protected](\"frame\", [ITRS, GCRS])\[email protected](\"obstime\", [J2000, J2000_TDB])\ndef test_orbit_creation_using_frame_obj(attractor, frame, obstime):\n vel = [0, 2, 0] * u.km / u.s\n cartdiff = CartesianDifferential(*vel)\n\n pos = [30000, 0, 0] * u.km\n cartrep = CartesianRepresentation(*pos, differentials=cartdiff)\n\n coord = frame(cartrep, obstime=obstime)\n o = Orbit.from_coords(attractor, coord)\n\n inertial_frame_at_body_centre = get_frame(\n attractor, Planes.EARTH_EQUATOR, obstime=coord.obstime\n )\n\n coord_transformed_to_irf = coord.transform_to(inertial_frame_at_body_centre)\n\n pos_transformed_to_irf = coord_transformed_to_irf.cartesian.xyz\n vel_transformed_to_irf = coord_transformed_to_irf.cartesian.differentials[\"s\"].d_xyz\n\n assert_quantity_allclose(o.r, pos_transformed_to_irf, atol=1e-5 * u.km)\n assert_quantity_allclose(o.v, vel_transformed_to_irf, atol=1e-5 * u.km / u.s)\n\n\[email protected](\"obstime\", [J2000, J2000_TDB])\ndef test_from_coord_fails_for_multiple_positions(obstime):\n cartdiff = CartesianDifferential(\n [[0, 1, 0], [-0.1, 0.9, 0]] * u.km / u.s, xyz_axis=1\n )\n cartrep = CartesianRepresentation(\n [[1, 0, 0], [0.9, 0.1, 0]] * u.km, differentials=cartdiff, xyz_axis=1\n )\n coords = GCRS(cartrep, representation_type=CartesianRepresentation, obstime=obstime)\n\n with pytest.raises(ValueError) as excinfo:\n Orbit.from_coords(Earth, coords)\n assert (\n \"ValueError: Coordinate instance must represents exactly 1 position, found: 2\"\n in excinfo.exconly()\n )\n\n\ndef test_from_coord_if_coord_is_not_of_shape_zero():\n pos = [0, 1, 0]\n vel = [1, 0, 0]\n cartdiff = CartesianDifferential([vel] * u.km / u.s, xyz_axis=1)\n cartrep = CartesianRepresentation([pos] * u.km, differentials=cartdiff, xyz_axis=1)\n coords = GCRS(cartrep, representation_type=CartesianRepresentation, obstime=J2000)\n\n ss = Orbit.from_coords(Earth, coords)\n\n assert_quantity_allclose(ss.r, pos * u.km, rtol=1e-5)\n assert_quantity_allclose(ss.v, vel * u.km / u.s, rtol=1e-5)\n\n\ndef test_from_sbdb():\n\n # Dictionary with structure: 'Object': [a, e, i, raan, argp, nu, epoch]\n # Notice JPL provides Mean anomaly, a conversion is needed to obtain nu\n\n SBDB_DATA = {\n \"Ceres\": (\n 2.769165146349478 * u.AU,\n 0.07600902762923671 * u.one,\n 10.59406732590292 * u.deg,\n 80.30553084093981 * u.deg,\n 73.59769486239257 * u.deg,\n M_to_nu(77.37209773768207 * u.deg, 0.07600902762923671 * u.one),\n )\n }\n\n for target_name in SBDB_DATA.keys():\n\n ss_target = Orbit.from_sbdb(target_name)\n ss_classical = ss_target.classical()\n\n assert ss_classical == SBDB_DATA[target_name]\n\n\ndef test_from_sbdb_raise_valueerror():\n with pytest.raises(ValueError) as e:\n Orbit.from_sbdb(name=\"Halley\")\n\n assert (\n str(e.value)\n == \"2 different objects found: \\n2688 Halley (1982 HG1)\\n1P/Halley\\n\"\n )\n"
] |
[
[
"numpy.testing.assert_allclose",
"numpy.testing.assert_array_equal"
]
] |
barathum000/tensorflow-upstream
|
[
"1ec5f5f5d8e25e9cdd55cb74ae5af370d653c618"
] |
[
"tensorflow/python/keras/backend_test.py"
] |
[
"# Copyright 2016 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Tests for Keras backend.\"\"\"\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport gc\n\nfrom absl.testing import parameterized\nimport numpy as np\nimport scipy.sparse\n\nfrom tensorflow.core.protobuf import config_pb2\nfrom tensorflow.python.eager import context\nfrom tensorflow.python.eager import def_function\nfrom tensorflow.python.framework import config\nfrom tensorflow.python.framework import errors_impl\nfrom tensorflow.python.framework import ops\nfrom tensorflow.python.framework import sparse_tensor\nfrom tensorflow.python.framework import test_util\nfrom tensorflow.python.keras import backend\nfrom tensorflow.python.keras import combinations\nfrom tensorflow.python.keras.engine import input_layer\nfrom tensorflow.python.keras.layers import advanced_activations\nfrom tensorflow.python.keras.layers import normalization\nfrom tensorflow.python.ops import array_ops\nfrom tensorflow.python.ops import nn\nfrom tensorflow.python.ops import variables\nfrom tensorflow.python.platform import test\nfrom tensorflow.python.util import tf_inspect\n\n\ndef compare_single_input_op_to_numpy(keras_op,\n np_op,\n input_shape,\n dtype='float32',\n negative_values=True,\n keras_args=None,\n keras_kwargs=None,\n np_args=None,\n np_kwargs=None):\n keras_args = keras_args or []\n keras_kwargs = keras_kwargs or {}\n np_args = np_args or []\n np_kwargs = np_kwargs or {}\n inputs = 2. * np.random.random(input_shape)\n if negative_values:\n inputs -= 1.\n keras_output = keras_op(\n backend.variable(inputs, dtype=dtype), *keras_args, **keras_kwargs)\n keras_output = backend.eval(keras_output)\n np_output = np_op(inputs.astype(dtype), *np_args, **np_kwargs)\n try:\n np.testing.assert_allclose(keras_output, np_output, atol=1e-4)\n except AssertionError:\n raise AssertionError('Test for op `' + str(keras_op.__name__) + '` failed; '\n 'Expected ' + str(np_output) + ' but got ' +\n str(keras_output))\n\n\ndef compare_two_inputs_op_to_numpy(keras_op,\n np_op,\n input_shape_a,\n input_shape_b,\n dtype='float32',\n keras_args=None,\n keras_kwargs=None,\n np_args=None,\n np_kwargs=None):\n keras_args = keras_args or []\n keras_kwargs = keras_kwargs or {}\n np_args = np_args or []\n np_kwargs = np_kwargs or {}\n input_a = np.random.random(input_shape_a)\n input_b = np.random.random(input_shape_b)\n keras_output = keras_op(\n backend.variable(input_a, dtype=dtype),\n backend.variable(input_b, dtype=dtype), *keras_args, **keras_kwargs)\n keras_output = backend.eval(keras_output)\n np_output = np_op(input_a.astype(dtype), input_b.astype(dtype),\n *np_args, **np_kwargs)\n try:\n np.testing.assert_allclose(keras_output, np_output, atol=1e-4)\n except AssertionError:\n raise AssertionError('Test for op `' + str(keras_op.__name__) + '` failed; '\n 'Expected ' + str(np_output) + ' but got ' +\n str(keras_output))\n\n\nclass BackendResetTest(test.TestCase, parameterized.TestCase):\n\n def test_new_config(self):\n # User defined jit setting\n config.set_optimizer_jit(False)\n sess = backend.get_session()\n default_config = context.context().config\n self.assertEqual(\n sess._config.graph_options.optimizer_options.global_jit_level,\n default_config.graph_options.optimizer_options.global_jit_level)\n backend.clear_session()\n\n # New session has the same jit setting\n sess = backend.get_session()\n default_config = context.context().config\n self.assertEqual(\n sess._config.graph_options.optimizer_options.global_jit_level,\n default_config.graph_options.optimizer_options.global_jit_level)\n backend.clear_session()\n\n # Change respected\n config.set_optimizer_jit(True)\n sess = backend.get_session()\n default_config = context.context().config\n self.assertEqual(\n sess._config.graph_options.optimizer_options.global_jit_level,\n default_config.graph_options.optimizer_options.global_jit_level)\n backend.clear_session()\n\n # We can't use the normal parameterized decorator because the test session\n # will block graph clearing.\n @parameterized.named_parameters(('_v1', context.graph_mode),\n ('_v2', context.eager_mode))\n def test_new_graph(self, test_context):\n with test_context():\n g_old = backend.get_graph()\n backend.clear_session()\n g = backend.get_graph()\n\n assert g_old is not g\n\n\[email protected](combinations.combine(mode=['graph', 'eager']))\nclass BackendUtilsTest(test.TestCase):\n\n def test_backend(self):\n self.assertEqual(backend.backend(), 'tensorflow')\n\n def test_get_reset_uids(self):\n self.assertEqual(backend.get_uid('foo'), 1)\n self.assertEqual(backend.get_uid('foo'), 2)\n\n backend.reset_uids()\n self.assertEqual(backend.get_uid('foo'), 1)\n\n def test_learning_phase(self):\n with self.cached_session() as sess:\n with self.assertRaises(ValueError):\n backend.set_learning_phase(2)\n\n # Test running with a learning-phase-consuming layer\n with backend.learning_phase_scope(0):\n x = input_layer.Input((3,))\n y = normalization.BatchNormalization()(x)\n if not context.executing_eagerly():\n self.evaluate(variables.global_variables_initializer())\n sess.run(y, feed_dict={x: np.random.random((2, 3))})\n\n def test_learning_phase_name(self):\n with backend.name_scope('test_scope'):\n # Test that outer name scopes do not affect the learning phase's name.\n lp = backend.symbolic_learning_phase()\n self.assertEqual(lp.name, 'keras_learning_phase:0')\n\n def test_learning_phase_scope(self):\n initial_learning_phase = backend.learning_phase()\n with backend.learning_phase_scope(1):\n self.assertEqual(backend.learning_phase(), 1)\n self.assertEqual(backend.learning_phase(), initial_learning_phase)\n with backend.learning_phase_scope(0):\n self.assertEqual(backend.learning_phase(), 0)\n self.assertEqual(backend.learning_phase(), initial_learning_phase)\n with self.assertRaises(ValueError):\n with backend.learning_phase_scope(None):\n pass\n self.assertEqual(backend.learning_phase(), initial_learning_phase)\n\n new_learning_phase = 0\n backend.set_learning_phase(new_learning_phase)\n self.assertEqual(backend.learning_phase(), new_learning_phase)\n with backend.learning_phase_scope(1):\n self.assertEqual(backend.learning_phase(), 1)\n self.assertEqual(backend.learning_phase(), new_learning_phase)\n\n def test_learning_phase_scope_in_graph(self):\n initial_learning_phase_outside_graph = backend.learning_phase()\n with backend.get_graph().as_default():\n initial_learning_phase_in_graph = backend.learning_phase()\n\n self.assertEqual(backend.learning_phase(),\n initial_learning_phase_outside_graph)\n with backend.learning_phase_scope(1):\n self.assertEqual(backend.learning_phase(), 1)\n self.assertEqual(backend.learning_phase(),\n initial_learning_phase_outside_graph)\n\n with backend.get_graph().as_default():\n self.assertIs(backend.learning_phase(), initial_learning_phase_in_graph)\n\n self.assertEqual(backend.learning_phase(),\n initial_learning_phase_outside_graph)\n\n def test_int_shape(self):\n x = backend.ones(shape=(3, 4))\n self.assertEqual(backend.int_shape(x), (3, 4))\n\n if not context.executing_eagerly():\n x = backend.placeholder(shape=(None, 4))\n self.assertEqual(backend.int_shape(x), (None, 4))\n\n def test_in_train_phase(self):\n y1 = backend.variable(1)\n y2 = backend.variable(2)\n if context.executing_eagerly():\n with backend.learning_phase_scope(0):\n y_val_test = backend.in_train_phase(y1, y2).numpy()\n with backend.learning_phase_scope(1):\n y_val_train = backend.in_train_phase(y1, y2).numpy()\n else:\n y = backend.in_train_phase(y1, y2)\n f = backend.function([backend.learning_phase()], [y])\n y_val_test = f([0])[0]\n y_val_train = f([1])[0]\n self.assertAllClose(y_val_test, 2)\n self.assertAllClose(y_val_train, 1)\n\n def test_is_keras_tensor(self):\n x = backend.variable(1)\n self.assertEqual(backend.is_keras_tensor(x), False)\n x = input_layer.Input(shape=(1,))\n self.assertEqual(backend.is_keras_tensor(x), True)\n x = input_layer.Input(shape=(None,), ragged=True)\n self.assertEqual(backend.is_keras_tensor(x), True)\n x = input_layer.Input(shape=(None, None), sparse=True)\n self.assertEqual(backend.is_keras_tensor(x), True)\n with self.assertRaises(ValueError):\n backend.is_keras_tensor(0)\n\n def test_stop_gradient(self):\n x = backend.variable(1)\n y = backend.stop_gradient(x)\n if not context.executing_eagerly():\n self.assertEqual(y.op.name[:12], 'StopGradient')\n\n xs = [backend.variable(1) for _ in range(3)]\n ys = backend.stop_gradient(xs)\n if not context.executing_eagerly():\n for y in ys:\n self.assertEqual(y.op.name[:12], 'StopGradient')\n\n def test_placeholder(self):\n x = backend.placeholder(shape=(3, 4))\n self.assertEqual(x.shape.as_list(), [3, 4])\n x = backend.placeholder(shape=(3, 4), sparse=True)\n self.assertEqual(x.shape.as_list(), [3, 4])\n\n def test_is_placeholder(self):\n x = backend.placeholder(shape=(1,))\n self.assertEqual(backend.is_placeholder(x), True)\n x = backend.variable(1)\n self.assertEqual(backend.is_placeholder(x), False)\n\n def test_print_tensor(self):\n # Unfortunately it seems impossible to use `mock` (or any other method)\n # to capture stdout when used inside a graph or graph function, thus\n # we cannot test correctness.\n # The message gets correctly printed in practice.\n x = backend.placeholder(shape=())\n y = backend.print_tensor(x, 'eager=%s' % context.executing_eagerly())\n f = backend.function(x, y)\n f(0)\n\n def test_cast_to_floatx(self):\n x = backend.variable(1, dtype='float64')\n x = backend.cast_to_floatx(x)\n self.assertEqual(x.dtype.name, 'float32')\n x = backend.cast_to_floatx(2)\n self.assertEqual(x.dtype.name, 'float32')\n\n\[email protected](combinations.combine(mode=['graph', 'eager']))\nclass BackendVariableTest(test.TestCase):\n\n def test_zeros(self):\n x = backend.zeros((3, 4))\n val = backend.eval(x)\n self.assertAllClose(val, np.zeros((3, 4)))\n\n def test_ones(self):\n x = backend.ones((3, 4))\n val = backend.eval(x)\n self.assertAllClose(val, np.ones((3, 4)))\n\n def test_eye(self):\n x = backend.eye(4)\n val = backend.eval(x)\n self.assertAllClose(val, np.eye(4))\n\n def test_zeros_like(self):\n x = backend.zeros((3, 4))\n y = backend.zeros_like(x)\n val = backend.eval(y)\n self.assertAllClose(val, np.zeros((3, 4)))\n\n def test_ones_like(self):\n x = backend.zeros((3, 4))\n y = backend.ones_like(x)\n val = backend.eval(y)\n self.assertAllClose(val, np.ones((3, 4)))\n\n def test_random_uniform_variable(self):\n x = backend.random_uniform_variable((30, 20), low=1, high=2, seed=0)\n val = backend.eval(x)\n self.assertAllClose(val.mean(), 1.5, atol=1e-1)\n self.assertAllClose(val.max(), 2., atol=1e-1)\n self.assertAllClose(val.min(), 1., atol=1e-1)\n\n def test_random_normal_variable(self):\n x = backend.random_normal_variable((30, 20), 1., 0.5, seed=0)\n val = backend.eval(x)\n self.assertAllClose(val.mean(), 1., atol=1e-1)\n self.assertAllClose(val.std(), 0.5, atol=1e-1)\n\n def test_count_params(self):\n x = backend.zeros((4, 5))\n val = backend.count_params(x)\n self.assertAllClose(val, 20)\n\n def test_constant(self):\n ref_val = np.random.random((3, 4)).astype('float32')\n x = backend.constant(ref_val)\n val = backend.eval(x)\n self.assertAllClose(val, ref_val)\n\n def test_sparse_variable(self):\n val = scipy.sparse.eye(10)\n x = backend.variable(val)\n self.assertTrue(isinstance(x, sparse_tensor.SparseTensor))\n\n y = backend.to_dense(x)\n self.assertFalse(backend.is_sparse(y))\n\n\[email protected](combinations.combine(mode=['graph', 'eager']))\nclass BackendLinearAlgebraTest(test.TestCase, parameterized.TestCase):\n\n def test_dot(self):\n x = backend.ones(shape=(2, 3))\n y = backend.ones(shape=(3, 4))\n xy = backend.dot(x, y)\n self.assertEqual(xy.shape.as_list(), [2, 4])\n\n x = backend.ones(shape=(32, 28, 3))\n y = backend.ones(shape=(3, 4))\n xy = backend.dot(x, y)\n self.assertEqual(xy.shape.as_list(), [32, 28, 4])\n\n @parameterized.parameters(\n [(2, 3, 4, 5), (2, 5, 6, 7), (2, 3, 4, 6, 7), (3, 1)],\n [(2, 20, 1), (2, 30, 20), (2, 1, 30), (1, 2)],\n [(4, 2, 3), (4, 5, 3), (4, 2, 5), (2, 2)],\n [(4, 2), (4, 2, 3), (4, 3), (1, 1)],\n [(4, 2), (4, 2, 3), (4, 3), 1],\n [(4, 2, 3), (4, 3), (4, 2), (2, 1)],\n )\n def test_batch_dot(self, x_shape, y_shape, output_shape, axes):\n x_val = np.random.random(x_shape)\n y_val = np.random.random(y_shape)\n x = backend.variable(x_val)\n y = backend.variable(y_val)\n xy = backend.batch_dot(x, y, axes=axes)\n self.assertEqual(tuple(xy.shape.as_list()), output_shape)\n xy_val = backend.eval(xy)\n ref_val = self._reference_batch_dot(x_val, y_val, axes)\n self.assertAllClose(xy_val, ref_val, atol=1e-5)\n\n def _reference_batch_dot(self, x, y, axes):\n if isinstance(axes, int):\n axes = [axes, axes]\n elif isinstance(axes, tuple):\n axes = list(axes)\n if axes is None:\n if y.ndim == 2:\n axes = [x.ndim - 1, y.ndim - 1]\n else:\n axes = [x.ndim - 1, y.ndim - 2]\n if axes[0] < 0:\n axes[0] += x.ndim\n if axes[1] < 0:\n axes[1] += y.ndim\n result = []\n axes = [axes[0] - 1, axes[1] - 1]\n for xi, yi in zip(x, y):\n result.append(np.tensordot(xi, yi, axes))\n result = np.array(result)\n if result.ndim == 1:\n result = np.expand_dims(result, -1)\n return result\n\n def test_reduction_ops(self):\n ops_to_test = [\n (backend.max, np.max),\n (backend.min, np.min),\n (backend.sum, np.sum),\n (backend.prod, np.prod),\n (backend.var, np.var),\n (backend.std, np.std),\n (backend.mean, np.mean),\n (backend.argmin, np.argmin),\n (backend.argmax, np.argmax),\n ]\n for keras_op, np_op in ops_to_test:\n compare_single_input_op_to_numpy(keras_op, np_op, input_shape=(4, 7, 5),\n keras_kwargs={'axis': 1},\n np_kwargs={'axis': 1})\n compare_single_input_op_to_numpy(keras_op, np_op, input_shape=(4, 7, 5),\n keras_kwargs={'axis': -1},\n np_kwargs={'axis': -1})\n if 'keepdims' in tf_inspect.getargspec(keras_op).args:\n compare_single_input_op_to_numpy(keras_op, np_op,\n input_shape=(4, 7, 5),\n keras_kwargs={'axis': 1,\n 'keepdims': True},\n np_kwargs={'axis': 1,\n 'keepdims': True})\n\n def test_elementwise_ops(self):\n ops_to_test = [\n (backend.square, np.square),\n (backend.abs, np.abs),\n (backend.round, np.round),\n (backend.sign, np.sign),\n (backend.sin, np.sin),\n (backend.cos, np.cos),\n (backend.exp, np.exp),\n ]\n for keras_op, np_op in ops_to_test:\n compare_single_input_op_to_numpy(keras_op, np_op, input_shape=(4, 7))\n\n ops_to_test = [\n (backend.sqrt, np.sqrt),\n (backend.log, np.log),\n ]\n for keras_op, np_op in ops_to_test:\n compare_single_input_op_to_numpy(keras_op, np_op,\n input_shape=(4, 7),\n negative_values=False)\n\n compare_single_input_op_to_numpy(\n backend.clip,\n np.clip,\n input_shape=(6, 4),\n keras_kwargs={\n 'min_value': 0.1,\n 'max_value': 2.4\n },\n np_kwargs={\n 'a_min': 0.1,\n 'a_max': 1.4\n })\n\n compare_single_input_op_to_numpy(\n backend.pow, np.power, input_shape=(6, 4), keras_args=[3], np_args=[3])\n\n def test_two_tensor_ops(self):\n ops_to_test = [\n (backend.equal, np.equal),\n (backend.not_equal, np.not_equal),\n (backend.greater, np.greater),\n (backend.greater_equal, np.greater_equal),\n (backend.less, np.less),\n (backend.less_equal, np.less_equal),\n (backend.maximum, np.maximum),\n (backend.minimum, np.minimum),\n ]\n for keras_op, np_op in ops_to_test:\n compare_two_inputs_op_to_numpy(keras_op, np_op,\n input_shape_a=(4, 7),\n input_shape_b=(4, 7))\n\n def test_relu(self):\n x = ops.convert_to_tensor_v2_with_dispatch([[-4, 0], [2, 7]], 'float32')\n\n # standard relu\n relu_op = backend.relu(x)\n self.assertAllClose(backend.eval(relu_op), [[0, 0], [2, 7]])\n\n # alpha (leaky relu used)\n relu_op = backend.relu(x, alpha=0.5)\n if not context.executing_eagerly():\n self.assertTrue('LeakyRelu' in relu_op.name)\n self.assertAllClose(backend.eval(relu_op), [[-2, 0], [2, 7]])\n\n # max_value < some elements\n relu_op = backend.relu(x, max_value=5)\n self.assertAllClose(backend.eval(relu_op), [[0, 0], [2, 5]])\n\n # nn.relu6 used\n relu_op = backend.relu(x, max_value=6)\n if not context.executing_eagerly():\n self.assertTrue('Relu6' in relu_op.name) # uses tf.nn.relu6\n self.assertAllClose(backend.eval(relu_op), [[0, 0], [2, 6]])\n\n # max value > 6\n relu_op = backend.relu(x, max_value=10)\n self.assertAllClose(backend.eval(relu_op), [[0, 0], [2, 7]])\n\n # max value is float\n relu_op = backend.relu(x, max_value=4.3)\n self.assertAllClose(backend.eval(relu_op), [[0, 0], [2, 4.3]])\n\n # max value == 0\n relu_op = backend.relu(x, max_value=0)\n self.assertAllClose(backend.eval(relu_op), [[0, 0], [0, 0]])\n\n # alpha and max_value\n relu_op = backend.relu(x, alpha=0.25, max_value=3)\n self.assertAllClose(backend.eval(relu_op), [[-1, 0], [2, 3]])\n\n # threshold\n relu_op = backend.relu(x, threshold=3)\n self.assertAllClose(backend.eval(relu_op), [[0, 0], [0, 7]])\n\n # threshold is float\n relu_op = backend.relu(x, threshold=1.5)\n self.assertAllClose(backend.eval(relu_op), [[0, 0], [2, 7]])\n\n # threshold is negative\n relu_op = backend.relu(x, threshold=-5)\n self.assertAllClose(backend.eval(relu_op), [[-4, 0], [2, 7]])\n\n # threshold and max_value\n relu_op = backend.relu(x, threshold=3, max_value=5)\n self.assertAllClose(backend.eval(relu_op), [[0, 0], [0, 5]])\n\n # threshold and alpha\n relu_op = backend.relu(x, alpha=0.25, threshold=4)\n self.assertAllClose(backend.eval(relu_op), [[-2, -1], [-0.5, 7]])\n\n # threshold, alpha, and max_value\n relu_op = backend.relu(x, alpha=0.25, threshold=4, max_value=5)\n self.assertAllClose(backend.eval(relu_op), [[-2, -1], [-0.5, 5]])\n\n # Test case for GitHub issue 35430, with integer dtype\n x = input_layer.Input(shape=(), name='x', dtype='int64')\n _ = advanced_activations.ReLU(max_value=100, dtype='int64')(x)\n\n\[email protected](combinations.combine(mode=['graph', 'eager']))\nclass BackendShapeOpsTest(test.TestCase):\n\n def test_reshape(self):\n compare_single_input_op_to_numpy(\n backend.reshape,\n np.reshape,\n input_shape=(4, 7),\n keras_args=[(2, 14)],\n np_args=[(2, 14)])\n\n def test_concatenate(self):\n a = backend.variable(np.ones((1, 2, 3)))\n b = backend.variable(np.ones((1, 2, 2)))\n y = backend.concatenate([a, b], axis=-1)\n self.assertEqual(y.shape.as_list(), [1, 2, 5])\n\n def test_permute_dimensions(self):\n compare_single_input_op_to_numpy(\n backend.permute_dimensions,\n np.transpose,\n input_shape=(4, 7),\n keras_args=[(1, 0)],\n np_args=[(1, 0)])\n\n def test_resize_images(self):\n height_factor = 2\n width_factor = 2\n data_format = 'channels_last'\n x = backend.variable(np.ones((1, 2, 2, 3)))\n y = backend.resize_images(x, height_factor, width_factor, data_format)\n self.assertEqual(y.shape.as_list(), [1, 4, 4, 3])\n\n data_format = 'channels_first'\n x = backend.variable(np.ones((1, 3, 2, 2)))\n y = backend.resize_images(x, height_factor, width_factor, data_format)\n self.assertEqual(y.shape.as_list(), [1, 3, 4, 4])\n\n # Invalid use:\n with self.assertRaises(ValueError):\n backend.resize_images(\n x, height_factor, width_factor, data_format='unknown')\n\n def test_resize_volumes(self):\n height_factor = 2\n width_factor = 2\n depth_factor = 2\n data_format = 'channels_last'\n x = backend.variable(np.ones((1, 2, 2, 2, 3)))\n y = backend.resize_volumes(x, depth_factor, height_factor, width_factor,\n data_format)\n self.assertEqual(y.shape.as_list(), [1, 4, 4, 4, 3])\n\n data_format = 'channels_first'\n x = backend.variable(np.ones((1, 3, 2, 2, 2)))\n y = backend.resize_volumes(x, depth_factor, height_factor, width_factor,\n data_format)\n self.assertEqual(y.shape.as_list(), [1, 3, 4, 4, 4])\n\n # Invalid use:\n with self.assertRaises(ValueError):\n backend.resize_volumes(\n x, depth_factor, height_factor, width_factor, data_format='unknown')\n\n def test_repeat_elements(self):\n x = backend.variable(np.ones((1, 3, 2)))\n y = backend.repeat_elements(x, 3, axis=1)\n self.assertEqual(y.shape.as_list(), [1, 9, 2])\n\n # Use with a dynamic axis:\n if not context.executing_eagerly():\n x = backend.placeholder(shape=(2, None, 2))\n y = backend.repeat_elements(x, 3, axis=1)\n self.assertEqual(y.shape.as_list(), [2, None, 2])\n\n def test_repeat(self):\n x = backend.variable(np.ones((1, 3)))\n y = backend.repeat(x, 2)\n self.assertEqual(y.shape.as_list(), [1, 2, 3])\n\n def test_flatten(self):\n compare_single_input_op_to_numpy(\n backend.flatten,\n np.reshape,\n input_shape=(4, 7, 6),\n np_args=[(4 * 7 * 6,)])\n\n def test_batch_flatten(self):\n compare_single_input_op_to_numpy(\n backend.batch_flatten,\n np.reshape,\n input_shape=(4, 7, 6),\n np_args=[(4, 7 * 6)])\n\n def test_temporal_padding(self):\n\n def ref_op(x, padding):\n shape = list(x.shape)\n shape[1] += padding[0] + padding[1]\n y = np.zeros(tuple(shape))\n y[:, padding[0]:-padding[1], :] = x\n return y\n\n compare_single_input_op_to_numpy(\n backend.temporal_padding,\n ref_op,\n input_shape=(4, 7, 6),\n keras_args=[(2, 3)],\n np_args=[(2, 3)])\n\n def test_spatial_2d_padding(self):\n\n def ref_op(x, padding, data_format='channels_last'):\n shape = list(x.shape)\n if data_format == 'channels_last':\n shape[1] += padding[0][0] + padding[0][1]\n shape[2] += padding[1][0] + padding[1][1]\n y = np.zeros(tuple(shape))\n y[:, padding[0][0]:-padding[0][1], padding[1][0]:-padding[1][1], :] = x\n else:\n shape[2] += padding[0][0] + padding[0][1]\n shape[3] += padding[1][0] + padding[1][1]\n y = np.zeros(tuple(shape))\n y[:, :, padding[0][0]:-padding[0][1], padding[1][0]:-padding[1][1]] = x\n return y\n\n compare_single_input_op_to_numpy(\n backend.spatial_2d_padding,\n ref_op,\n input_shape=(2, 3, 2, 3),\n keras_args=[((2, 3), (1, 2))],\n keras_kwargs={'data_format': 'channels_last'},\n np_args=[((2, 3), (1, 2))],\n np_kwargs={'data_format': 'channels_last'})\n compare_single_input_op_to_numpy(\n backend.spatial_2d_padding,\n ref_op,\n input_shape=(2, 3, 2, 3),\n keras_args=[((2, 3), (1, 2))],\n keras_kwargs={'data_format': 'channels_first'},\n np_args=[((2, 3), (1, 2))],\n np_kwargs={'data_format': 'channels_first'})\n\n def test_spatial_3d_padding(self):\n\n def ref_op(x, padding, data_format='channels_last'):\n shape = list(x.shape)\n if data_format == 'channels_last':\n shape[1] += padding[0][0] + padding[0][1]\n shape[2] += padding[1][0] + padding[1][1]\n shape[3] += padding[2][0] + padding[2][1]\n y = np.zeros(tuple(shape))\n y[:,\n padding[0][0]:-padding[0][1],\n padding[1][0]:-padding[1][1],\n padding[2][0]:-padding[2][1],\n :] = x\n else:\n shape[2] += padding[0][0] + padding[0][1]\n shape[3] += padding[1][0] + padding[1][1]\n shape[4] += padding[2][0] + padding[2][1]\n y = np.zeros(tuple(shape))\n y[:, :,\n padding[0][0]:-padding[0][1],\n padding[1][0]:-padding[1][1],\n padding[2][0]:-padding[2][1]] = x\n return y\n\n compare_single_input_op_to_numpy(\n backend.spatial_3d_padding,\n ref_op,\n input_shape=(2, 3, 2, 3, 2),\n keras_args=[((2, 3), (1, 2), (2, 3))],\n keras_kwargs={'data_format': 'channels_last'},\n np_args=[((2, 3), (1, 2), (2, 3))],\n np_kwargs={'data_format': 'channels_last'})\n compare_single_input_op_to_numpy(\n backend.spatial_3d_padding,\n ref_op,\n input_shape=(2, 3, 2, 3, 2),\n keras_args=[((2, 3), (1, 2), (2, 3))],\n keras_kwargs={'data_format': 'channels_first'},\n np_args=[((2, 3), (1, 2), (2, 3))],\n np_kwargs={'data_format': 'channels_first'})\n\n\[email protected](combinations.combine(mode=['graph', 'eager']))\nclass BackendNNOpsTest(test.TestCase, parameterized.TestCase):\n\n def test_bias_add(self):\n keras_op = backend.bias_add\n np_op = np.add\n compare_two_inputs_op_to_numpy(keras_op, np_op,\n input_shape_a=(4, 7),\n input_shape_b=(7,))\n compare_two_inputs_op_to_numpy(keras_op, np_op,\n input_shape_a=(4, 3, 7),\n input_shape_b=(7,))\n compare_two_inputs_op_to_numpy(keras_op, np_op,\n input_shape_a=(4, 3, 5, 7),\n input_shape_b=(7,))\n compare_two_inputs_op_to_numpy(keras_op, np_op,\n input_shape_a=(4, 3, 5, 2, 7),\n input_shape_b=(7,))\n\n with self.assertRaises((ValueError, errors_impl.InvalidArgumentError)):\n x = backend.variable((3, 4))\n b = backend.variable((3, 4))\n backend.bias_add(x, b)\n with self.assertRaises(ValueError):\n x = backend.variable((3, 4))\n b = backend.variable((4,))\n backend.bias_add(x, b, data_format='unknown')\n\n def test_bias_add_channels_first(self):\n\n def keras_op(x, b):\n return backend.bias_add(x, b, data_format='channels_first')\n\n def np_op(x, b):\n if x.ndim == 3:\n b = b.reshape((1, b.shape[0], 1))\n if x.ndim == 4:\n b = b.reshape((1, b.shape[0], 1, 1))\n return x + b\n\n compare_two_inputs_op_to_numpy(keras_op, np_op,\n input_shape_a=(4, 3, 7),\n input_shape_b=(3,))\n compare_two_inputs_op_to_numpy(keras_op, np_op,\n input_shape_a=(4, 3, 5, 7),\n input_shape_b=(3,))\n\n def test_pool2d(self):\n val = np.random.random((10, 3, 10, 10))\n x = backend.variable(val)\n y = backend.pool2d(\n x, (2, 2),\n strides=(1, 1),\n padding='valid',\n data_format='channels_first',\n pool_mode='max')\n self.assertEqual(y.shape.as_list(), [10, 3, 9, 9])\n\n y = backend.pool2d(\n x, (2, 2),\n strides=(1, 1),\n padding='valid',\n data_format='channels_first',\n pool_mode='avg')\n self.assertEqual(y.shape.as_list(), [10, 3, 9, 9])\n\n val = np.random.random((10, 10, 10, 3))\n x = backend.variable(val)\n y = backend.pool2d(\n x, (2, 2), strides=(1, 1), padding='valid', data_format='channels_last')\n self.assertEqual(y.shape.as_list(), [10, 9, 9, 3])\n\n val = np.random.random((10, 10, 10, 3))\n x = backend.variable(val)\n y = backend.pool2d(\n x, (2, 2), strides=(1, 1), padding='same', data_format='channels_last')\n self.assertEqual(y.shape.as_list(), [10, 10, 10, 3])\n\n val = np.random.random((10, 10, 10, 3))\n x = backend.variable(val)\n y = backend.pool2d(\n x, (2, 2), strides=(2, 2), padding='same', data_format='channels_last')\n self.assertEqual(y.shape.as_list(), [10, 5, 5, 3])\n\n with self.assertRaises(ValueError):\n y = backend.pool2d(\n x, (2, 2),\n strides=(2, 2),\n padding='other',\n data_format='channels_last')\n with self.assertRaises(ValueError):\n y = backend.pool2d(x, (2, 2), strides=(2, 2), data_format='other')\n with self.assertRaises(ValueError):\n y = backend.pool2d(x, (2, 2, 2), strides=(2, 2))\n with self.assertRaises(ValueError):\n y = backend.pool2d(x, (2, 2), strides=(2, 2, 2))\n with self.assertRaises(ValueError):\n y = backend.pool2d(x, (2, 2), strides=(2, 2), pool_mode='other')\n\n def test_pool3d(self):\n val = np.random.random((10, 3, 10, 10, 10))\n x = backend.variable(val)\n y = backend.pool3d(\n x, (2, 2, 2),\n strides=(1, 1, 1),\n padding='valid',\n data_format='channels_first',\n pool_mode='max')\n self.assertEqual(y.shape.as_list(), [10, 3, 9, 9, 9])\n\n y = backend.pool3d(\n x, (2, 2, 2),\n strides=(1, 1, 1),\n padding='valid',\n data_format='channels_first',\n pool_mode='avg')\n self.assertEqual(y.shape.as_list(), [10, 3, 9, 9, 9])\n\n val = np.random.random((10, 10, 10, 10, 3))\n x = backend.variable(val)\n y = backend.pool3d(\n x, (2, 2, 2),\n strides=(1, 1, 1),\n padding='valid',\n data_format='channels_last')\n self.assertEqual(y.shape.as_list(), [10, 9, 9, 9, 3])\n\n val = np.random.random((10, 10, 10, 10, 3))\n x = backend.variable(val)\n y = backend.pool3d(\n x, (2, 2, 2),\n strides=(1, 1, 1),\n padding='same',\n data_format='channels_last')\n self.assertEqual(y.shape.as_list(), [10, 10, 10, 10, 3])\n\n val = np.random.random((10, 10, 10, 10, 3))\n x = backend.variable(val)\n y = backend.pool3d(\n x, (2, 2, 2),\n strides=(2, 2, 2),\n padding='same',\n data_format='channels_last')\n self.assertEqual(y.shape.as_list(), [10, 5, 5, 5, 3])\n\n def test_conv1d(self):\n val = np.random.random((10, 4, 10))\n x = backend.variable(val)\n kernel_val = np.random.random((3, 4, 5))\n k = backend.variable(kernel_val)\n y = backend.conv1d(\n x, k, strides=(1,), padding='valid', data_format='channels_first')\n self.assertEqual(y.shape.as_list(), [10, 5, 8])\n\n val = np.random.random((10, 10, 4))\n x = backend.variable(val)\n y = backend.conv1d(\n x, k, strides=(1,), padding='valid', data_format='channels_last')\n self.assertEqual(y.shape.as_list(), [10, 8, 5])\n\n val = np.random.random((10, 10, 4))\n x = backend.variable(val)\n y = backend.conv1d(\n x, k, strides=(1,), padding='same', data_format='channels_last')\n self.assertEqual(y.shape.as_list(), [10, 10, 5])\n\n val = np.random.random((10, 10, 4))\n x = backend.variable(val)\n y = backend.conv1d(\n x, k, strides=(2,), padding='same', data_format='channels_last')\n self.assertEqual(y.shape.as_list(), [10, 5, 5])\n\n def test_local_conv_channels_dim(self):\n filters = 3\n batch_size = 2\n\n for input_shape in [(3, 5), (2, 3, 5), (2, 5, 3, 4)]:\n channels_in = input_shape[0]\n input_spatial_shape = input_shape[1:]\n dim = len(input_spatial_shape)\n\n inputs = np.random.normal(0, 1, (batch_size,) + input_shape)\n inputs_cf = backend.variable(inputs)\n\n for kernel_size in [1, 2]:\n for stride in [1, 2]:\n kernel_sizes = (kernel_size,) * dim\n strides = (stride,) * dim\n\n output_shape = tuple([(i - kernel_size + stride) // stride\n for i in input_spatial_shape])\n\n kernel_shape = (np.prod(output_shape),\n np.prod(kernel_sizes) * channels_in,\n filters)\n\n kernel = np.random.normal(\n 0,\n 1,\n output_shape + (channels_in, np.prod(kernel_sizes), filters)\n )\n\n kernel_cf = np.reshape(kernel, kernel_shape)\n kernel_cf = backend.variable(kernel_cf)\n\n conv_cf = backend.local_conv(inputs_cf, kernel_cf, kernel_sizes,\n strides, output_shape, 'channels_first')\n\n inputs_cl = np.transpose(inputs, [0, 2] + list(range(3, dim + 2)) +\n [1])\n inputs_cl = backend.variable(inputs_cl)\n\n kernel_cl = np.reshape(\n np.transpose(kernel, list(range(dim)) + [dim + 1, dim, dim + 2]),\n kernel_shape\n )\n kernel_cl = backend.variable(kernel_cl)\n\n conv_cl = backend.local_conv(inputs_cl, kernel_cl, kernel_sizes,\n strides, output_shape, 'channels_last')\n\n conv_cf = backend.eval(conv_cf)\n conv_cl = backend.eval(conv_cl)\n\n self.assertAllCloseAccordingToType(\n conv_cf,\n np.transpose(conv_cl,\n [0, dim + 1] + list(range(1, dim + 1))),\n atol=1e-5\n )\n\n @parameterized.named_parameters(\n ('local_conv1d', (5, 6), (3,), (1,), (3,)),\n ('local_conv2d', (4, 5, 6), (3, 3), (1, 1), (2, 3)))\n def test_local_conv_1d_and_2d(self,\n input_shape,\n kernel_sizes,\n strides,\n output_shape):\n filters = 3\n batch_size = 2\n\n inputs = np.random.normal(0, 1, (batch_size,) + input_shape)\n inputs = backend.variable(inputs)\n\n kernel = np.random.normal(0, 1, (np.prod(output_shape),\n np.prod(kernel_sizes) * input_shape[-1],\n filters))\n kernel = backend.variable(kernel)\n\n local_conv = backend.local_conv(inputs, kernel, kernel_sizes, strides,\n output_shape, 'channels_last')\n if len(output_shape) == 1:\n local_conv_dim = backend.local_conv1d(inputs, kernel, kernel_sizes,\n strides, 'channels_last')\n else:\n local_conv_dim = backend.local_conv2d(inputs, kernel, kernel_sizes,\n strides, output_shape,\n 'channels_last')\n\n local_conv = backend.eval(local_conv)\n local_conv_dim = backend.eval(local_conv_dim)\n\n self.assertAllCloseAccordingToType(local_conv, local_conv_dim)\n\n def test_conv2d(self):\n kernel_val = np.random.random((3, 3, 4, 5))\n k = backend.variable(kernel_val)\n\n # Test channels_first\n val = np.random.random((10, 4, 10, 10))\n x = backend.variable(val)\n y = backend.conv2d(x, k, padding='valid', data_format='channels_first')\n self.assertEqual(y.shape.as_list(), [10, 5, 8, 8])\n\n # Test channels_last\n val = np.random.random((10, 10, 10, 4))\n x = backend.variable(val)\n y = backend.conv2d(\n x, k, strides=(1, 1), padding='valid', data_format='channels_last')\n self.assertEqual(y.shape.as_list(), [10, 8, 8, 5])\n\n # Test same padding\n val = np.random.random((10, 10, 10, 4))\n x = backend.variable(val)\n y = backend.conv2d(x, k, padding='same', data_format='channels_last')\n self.assertEqual(y.shape.as_list(), [10, 10, 10, 5])\n\n # Test dilation_rate\n val = np.random.random((10, 10, 10, 4))\n x = backend.variable(val)\n y = backend.conv2d(\n x, k, dilation_rate=(2, 2), padding='same', data_format='channels_last')\n self.assertEqual(y.shape.as_list(), [10, 10, 10, 5])\n\n # Test strides\n val = np.random.random((10, 10, 10, 4))\n x = backend.variable(val)\n y = backend.conv2d(\n x, k, strides=(2, 2), padding='same', data_format='channels_last')\n self.assertEqual(y.shape.as_list(), [10, 5, 5, 5])\n\n # Test invalid arguments\n with self.assertRaises(ValueError):\n y = backend.conv2d(\n x, k, (2, 2), padding='other', data_format='channels_last')\n with self.assertRaises(ValueError):\n y = backend.conv2d(x, k, (2, 2), data_format='other')\n with self.assertRaises(ValueError):\n y = backend.conv2d(x, k, (2, 2, 2))\n\n def test_conv2d_transpose(self):\n input_size = (7, 8)\n kernel_size = (3, 3)\n input_depth = 6\n filters = 6\n batch_size = 2\n\n kernel_val = np.random.random(kernel_size + (input_depth, filters))\n k = backend.variable(kernel_val)\n\n # Test channels_first\n input_val = np.random.random((batch_size, input_depth) + input_size)\n x = backend.variable(input_val)\n y = backend.conv2d_transpose(\n x,\n k, (batch_size, filters) + input_size,\n padding='same',\n data_format='channels_first')\n self.assertEqual(\n tuple(y.shape.as_list()), (batch_size, filters) + input_size)\n\n # Test channels_last\n input_val = np.random.random((batch_size,) + input_size + (input_depth,))\n x = backend.variable(input_val)\n y = backend.conv2d_transpose(\n x,\n k, (batch_size,) + input_size + (filters,),\n padding='same',\n data_format='channels_last')\n self.assertEqual(\n tuple(y.shape.as_list()), (batch_size,) + input_size + (filters,))\n\n # Test dilation_rate\n y = backend.conv2d_transpose(\n x,\n k, (batch_size,) + input_size + (filters,),\n padding='same',\n data_format='channels_last',\n dilation_rate=(2, 2))\n self.assertEqual(\n tuple(y.shape.as_list()), (batch_size,) + input_size + (filters,))\n\n # Test batch size of None in output_shape\n y = backend.conv2d_transpose(\n x,\n k, (None,) + input_size + (filters,),\n padding='same',\n data_format='channels_last')\n self.assertEqual(\n tuple(y.shape.as_list()), (batch_size,) + input_size + (filters,))\n\n # Test invalid values\n with self.assertRaises(ValueError):\n y = backend.conv2d_transpose(\n x, k, (2, 2, 8, 9), padding='other', data_format='channels_last')\n with self.assertRaises(ValueError):\n y = backend.conv2d_transpose(x, k, (2, 2, 8, 9), data_format='other')\n\n def test_separable_conv2d(self):\n val = np.random.random((10, 4, 10, 10))\n x = backend.variable(val)\n depthwise_kernel_val = np.random.random((3, 3, 4, 1))\n pointwise_kernel_val = np.random.random((1, 1, 4, 5))\n dk = backend.variable(depthwise_kernel_val)\n pk = backend.variable(pointwise_kernel_val)\n y = backend.separable_conv2d(\n x, dk, pk, padding='valid', data_format='channels_first')\n self.assertEqual(y.shape.as_list(), [10, 5, 8, 8])\n\n val = np.random.random((10, 10, 10, 4))\n x = backend.variable(val)\n y = backend.separable_conv2d(\n x, dk, pk, strides=(1, 1), padding='valid', data_format='channels_last')\n self.assertEqual(y.shape.as_list(), [10, 8, 8, 5])\n\n val = np.random.random((10, 10, 10, 4))\n x = backend.variable(val)\n y = backend.separable_conv2d(\n x, dk, pk, strides=(1, 1), padding='same', data_format='channels_last')\n self.assertEqual(y.shape.as_list(), [10, 10, 10, 5])\n\n val = np.random.random((10, 10, 10, 4))\n x = backend.variable(val)\n y = backend.separable_conv2d(\n x, dk, pk, strides=(2, 2), padding='same', data_format='channels_last')\n self.assertEqual(y.shape.as_list(), [10, 5, 5, 5])\n with self.assertRaises(ValueError):\n y = backend.separable_conv2d(\n x, dk, pk, (2, 2), padding='other', data_format='channels_last')\n with self.assertRaises(ValueError):\n y = backend.separable_conv2d(x, dk, pk, (2, 2), data_format='other')\n with self.assertRaises(ValueError):\n y = backend.separable_conv2d(x, dk, pk, (2, 2, 2))\n\n def test_conv3d(self):\n val = np.random.random((10, 4, 10, 10, 10))\n x = backend.variable(val)\n kernel_val = np.random.random((3, 3, 3, 4, 5))\n k = backend.variable(kernel_val)\n y = backend.conv3d(x, k, padding='valid', data_format='channels_first')\n self.assertEqual(y.shape.as_list(), [10, 5, 8, 8, 8])\n\n val = np.random.random((10, 10, 10, 10, 4))\n x = backend.variable(val)\n y = backend.conv3d(\n x, k, strides=(1, 1, 1), padding='valid', data_format='channels_last')\n self.assertEqual(y.shape.as_list(), [10, 8, 8, 8, 5])\n\n val = np.random.random((10, 10, 10, 10, 4))\n x = backend.variable(val)\n y = backend.conv3d(\n x, k, strides=(1, 1, 1), padding='same', data_format='channels_last')\n self.assertEqual(y.shape.as_list(), [10, 10, 10, 10, 5])\n\n val = np.random.random((10, 10, 10, 10, 4))\n x = backend.variable(val)\n y = backend.conv3d(\n x, k, strides=(2, 2, 2), padding='same', data_format='channels_last')\n self.assertEqual(y.shape.as_list(), [10, 5, 5, 5, 5])\n with self.assertRaises(ValueError):\n y = backend.conv3d(\n x, k, (2, 2, 2), padding='other', data_format='channels_last')\n with self.assertRaises(ValueError):\n y = backend.conv3d(x, k, (2, 2, 2), data_format='other')\n with self.assertRaises(ValueError):\n y = backend.conv3d(x, k, (2, 2))\n\n def test_rnn(self):\n # implement a simple RNN\n num_samples = 4\n input_dim = 5\n output_dim = 3\n timesteps = 6\n\n input_val = np.random.random(\n (num_samples, timesteps, input_dim)).astype(np.float32)\n init_state_val = np.random.random(\n (num_samples, output_dim)).astype(np.float32)\n w_i_val = np.random.random((input_dim, output_dim)).astype(np.float32)\n w_o_val = np.random.random((output_dim, output_dim)).astype(np.float32)\n np_mask = np.random.randint(2, size=(num_samples, timesteps))\n\n def rnn_step_fn():\n w_i = backend.variable(w_i_val)\n w_o = backend.variable(w_o_val)\n\n def step_function(x, states):\n assert len(states) == 1\n prev_output = states[0]\n output = backend.dot(x, w_i) + backend.dot(prev_output, w_o)\n return output, [output]\n\n return step_function\n\n # test default setup\n last_output_list = [[], [], [], [], [], []]\n outputs_list = [[], [], [], [], [], []]\n state_list = [[], [], [], [], [], []]\n\n rnn_fn = rnn_step_fn()\n inputs = backend.variable(input_val)\n initial_states = [backend.variable(init_state_val)]\n mask = backend.variable(np_mask)\n\n kwargs_list = [\n {'go_backwards': False, 'mask': None},\n {'go_backwards': False, 'mask': None, 'unroll': True},\n {'go_backwards': True, 'mask': None},\n {'go_backwards': True, 'mask': None, 'unroll': True},\n {'go_backwards': False, 'mask': mask},\n {'go_backwards': False, 'mask': mask, 'unroll': True},\n ]\n for i, kwargs in enumerate(kwargs_list):\n last_output, outputs, new_states = backend.rnn(rnn_fn, inputs,\n initial_states, **kwargs)\n # check static shape inference\n self.assertEqual(last_output.shape.as_list(), [num_samples, output_dim])\n self.assertEqual(outputs.shape.as_list(),\n [num_samples, timesteps, output_dim])\n for state in new_states:\n self.assertEqual(state.shape.as_list(), [num_samples, output_dim])\n\n last_output_list[i].append(backend.eval(last_output))\n outputs_list[i].append(backend.eval(outputs))\n self.assertLen(new_states, 1)\n state_list[i].append(backend.eval(new_states[0]))\n\n def assert_list_pairwise(z_list, atol=1e-05):\n for (z1, z2) in zip(z_list[1:], z_list[:-1]):\n self.assertAllClose(z1, z2, atol=atol)\n\n assert_list_pairwise(last_output_list[0], atol=1e-04)\n assert_list_pairwise(outputs_list[0], atol=1e-04)\n assert_list_pairwise(state_list[0], atol=1e-04)\n assert_list_pairwise(last_output_list[2], atol=1e-04)\n assert_list_pairwise(outputs_list[2], atol=1e-04)\n assert_list_pairwise(state_list[2], atol=1e-04)\n\n for l, u_l in zip(last_output_list[0], last_output_list[1]):\n self.assertAllClose(l, u_l, atol=1e-04)\n\n for o, u_o in zip(outputs_list[0], outputs_list[1]):\n self.assertAllClose(o, u_o, atol=1e-04)\n\n for s, u_s in zip(state_list[0], state_list[1]):\n self.assertAllClose(s, u_s, atol=1e-04)\n\n for b_l, b_u_l in zip(last_output_list[2], last_output_list[3]):\n self.assertAllClose(b_l, b_u_l, atol=1e-04)\n\n for b_o, b_u_o in zip(outputs_list[2], outputs_list[3]):\n self.assertAllClose(b_o, b_u_o, atol=1e-04)\n\n for b_s, b_u_s in zip(state_list[2], state_list[3]):\n self.assertAllClose(b_s, b_u_s, atol=1e-04)\n\n def test_rnn_additional_states(self):\n # implement a simple RNN\n num_samples = 4\n input_dim = 5\n output_dim = 3\n timesteps = 6\n\n input_val = np.random.random(\n (num_samples, timesteps, input_dim)).astype(np.float32)\n init_state_val = np.random.random(\n (num_samples, output_dim)).astype(np.float32)\n w_i_val = np.random.random((input_dim, output_dim)).astype(np.float32)\n w_o_val = np.random.random((output_dim, output_dim)).astype(np.float32)\n np_mask = np.random.randint(2, size=(num_samples, timesteps))\n\n def rnn_step_fn():\n w_i = backend.variable(w_i_val)\n w_o = backend.variable(w_o_val)\n\n def step_function(x, states):\n assert len(states) == 2\n prev_output = states[0]\n output = backend.dot(x, w_i) + backend.dot(prev_output, w_o)\n return output, [output, backend.concatenate([output, output], axis=-1)]\n\n return step_function\n\n # test default setup\n last_output_list = [[], [], [], [], [], []]\n outputs_list = [[], [], [], [], [], []]\n state_list = [[], [], [], [], [], []]\n additional_state_list = [[], [], [], [], [], []]\n\n rnn_fn = rnn_step_fn()\n inputs = backend.variable(input_val)\n initial_states = [\n backend.variable(init_state_val),\n ops.convert_to_tensor_v2_with_dispatch(\n np.concatenate([init_state_val, init_state_val], axis=-1))\n ]\n mask = backend.variable(np_mask)\n\n kwargs_list = [\n {'go_backwards': False, 'mask': None},\n {'go_backwards': False, 'mask': None, 'unroll': True},\n {'go_backwards': True, 'mask': None},\n {'go_backwards': True, 'mask': None, 'unroll': True},\n {'go_backwards': False, 'mask': mask},\n {'go_backwards': False, 'mask': mask, 'unroll': True},\n ]\n for i, kwargs in enumerate(kwargs_list):\n last_output, outputs, new_states = backend.rnn(rnn_fn, inputs,\n initial_states, **kwargs)\n # check static shape inference\n self.assertEqual(last_output.shape.as_list(), [num_samples, output_dim])\n self.assertEqual(outputs.shape.as_list(),\n [num_samples, timesteps, output_dim])\n # for state in new_states:\n # self.assertEqual(state.shape.as_list(),\n # [num_samples, output_dim])\n self.assertEqual(new_states[0].shape.as_list(), [num_samples, output_dim])\n self.assertEqual(new_states[1].shape.as_list(),\n [num_samples, 2 * output_dim])\n\n last_output_list[i].append(backend.eval(last_output))\n outputs_list[i].append(backend.eval(outputs))\n self.assertLen(new_states, 2)\n state_list[i].append(backend.eval(new_states[0]))\n additional_state_list[i].append(backend.eval(new_states[1]))\n\n def assert_list_pairwise(z_list, atol=1e-05):\n for (z1, z2) in zip(z_list[1:], z_list[:-1]):\n self.assertAllClose(z1, z2, atol=atol)\n\n assert_list_pairwise(last_output_list[0], atol=1e-04)\n assert_list_pairwise(outputs_list[0], atol=1e-04)\n assert_list_pairwise(state_list[0], atol=1e-04)\n assert_list_pairwise(additional_state_list[0], atol=1e-04)\n assert_list_pairwise(last_output_list[2], atol=1e-04)\n assert_list_pairwise(outputs_list[2], atol=1e-04)\n assert_list_pairwise(state_list[2], atol=1e-04)\n assert_list_pairwise(additional_state_list[2], atol=1e-04)\n\n for l, u_l in zip(last_output_list[0], last_output_list[1]):\n self.assertAllClose(l, u_l, atol=1e-04)\n\n for o, u_o in zip(outputs_list[0], outputs_list[1]):\n self.assertAllClose(o, u_o, atol=1e-04)\n\n for s, u_s in zip(state_list[0], state_list[1]):\n self.assertAllClose(s, u_s, atol=1e-04)\n\n for s, u_s in zip(additional_state_list[0], additional_state_list[1]):\n self.assertAllClose(s, u_s, atol=1e-04)\n\n for b_l, b_u_l in zip(last_output_list[2], last_output_list[3]):\n self.assertAllClose(b_l, b_u_l, atol=1e-04)\n\n for b_o, b_u_o in zip(outputs_list[2], outputs_list[3]):\n self.assertAllClose(b_o, b_u_o, atol=1e-04)\n\n for b_s, b_u_s in zip(state_list[2], state_list[3]):\n self.assertAllClose(b_s, b_u_s, atol=1e-04)\n\n for s, u_s in zip(additional_state_list[2], additional_state_list[3]):\n self.assertAllClose(s, u_s, atol=1e-04)\n\n def test_rnn_output_and_state_masking_independent(self):\n num_samples = 2\n num_timesteps = 4\n state_and_io_size = 2\n mask_last_num_timesteps = 2 # for second sample only\n\n # a step function that just outputs inputs,\n # but increments states +1 per timestep\n def step_function(inputs, states):\n return inputs, [s + 1 for s in states]\n\n inputs_vals = np.random.random((num_samples, num_timesteps,\n state_and_io_size))\n initial_state_vals = np.random.random((num_samples, state_and_io_size))\n # masking of two last timesteps for second sample only\n mask_vals = np.ones((num_samples, num_timesteps))\n mask_vals[1, -mask_last_num_timesteps:] = 0\n\n # outputs expected to be same as inputs for the first sample\n expected_outputs = inputs_vals.copy()\n # but for the second sample all outputs in masked region should be the same\n # as last output before masked region\n expected_outputs[1, -mask_last_num_timesteps:] = \\\n expected_outputs[1, -(mask_last_num_timesteps + 1)]\n\n expected_last_state = initial_state_vals.copy()\n # first state should be incremented for every timestep (no masking)\n expected_last_state[0] += num_timesteps\n # second state should not be incremented for last two timesteps\n expected_last_state[1] += (num_timesteps - mask_last_num_timesteps)\n\n # verify same expected output for `unroll=true/false`\n inputs = backend.variable(inputs_vals)\n initial_states = [backend.variable(initial_state_vals)]\n mask = backend.variable(mask_vals)\n for unroll in [True, False]:\n _, outputs, last_states = backend.rnn(\n step_function,\n inputs,\n initial_states,\n mask=mask,\n unroll=unroll,\n input_length=num_timesteps if unroll else None)\n\n self.assertAllClose(backend.eval(outputs), expected_outputs)\n self.assertAllClose(backend.eval(last_states[0]), expected_last_state)\n\n def test_rnn_output_num_dim_larger_than_2_masking(self):\n num_samples = 3\n num_timesteps = 4\n num_features = 5\n\n def step_function(inputs, states):\n outputs = backend.tile(backend.expand_dims(inputs), [1, 1, 2])\n return outputs, [backend.identity(s) for s in states]\n # Note: cannot just return states (which can be a problem) ->\n # tensorflow/python/ops/resource_variable_ops.py\", line 824, in set_shape\n # NotImplementedError: ResourceVariable does not implement set_shape()\n\n inputs_vals = np.random.random((num_samples, num_timesteps, num_features))\n initial_state_vals = np.random.random((num_samples, 6))\n mask_vals = np.ones((num_samples, num_timesteps))\n mask_vals[-1, -1] = 0 # final timestep masked for last sample\n\n expected_outputs = np.repeat(inputs_vals[..., None], repeats=2, axis=-1)\n # for the last sample, the final timestep (in masked region) should be the\n # same as the second to final output (before masked region)\n expected_outputs[-1, -1] = expected_outputs[-1, -2]\n\n inputs = backend.variable(inputs_vals)\n initial_states = [backend.variable(initial_state_vals)]\n mask = backend.variable(mask_vals)\n for unroll in [True, False]:\n _, outputs, _ = backend.rnn(\n step_function,\n inputs,\n initial_states,\n mask=mask,\n unroll=unroll,\n input_length=num_timesteps if unroll else None)\n\n self.assertAllClose(backend.eval(outputs), expected_outputs)\n\n def test_rnn_state_num_dim_larger_than_2_masking(self):\n num_samples = 3\n num_timesteps = 4\n\n def step_function(inputs, states):\n return inputs, [s + 1 for s in states]\n\n inputs_vals = np.random.random((num_samples, num_timesteps, 5))\n initial_state_vals = np.random.random((num_samples, 6, 7))\n mask_vals = np.ones((num_samples, num_timesteps))\n mask_vals[0, -2:] = 0 # final two timesteps masked for first sample\n\n expected_last_state = initial_state_vals.copy()\n expected_last_state[0] += (num_timesteps - 2)\n expected_last_state[1:] += num_timesteps\n\n inputs = backend.variable(inputs_vals)\n initial_states = [backend.variable(initial_state_vals)]\n mask = backend.variable(mask_vals)\n for unroll in [True, False]:\n _, _, last_states = backend.rnn(\n step_function,\n inputs,\n initial_states,\n mask=mask,\n unroll=unroll,\n input_length=num_timesteps if unroll else None)\n\n self.assertAllClose(backend.eval(last_states[0]), expected_last_state)\n\n def test_batch_normalization(self):\n g_val = np.random.random((3,))\n b_val = np.random.random((3,))\n gamma = backend.variable(g_val)\n beta = backend.variable(b_val)\n\n # 3D NHC case\n val = np.random.random((10, 5, 3))\n x = backend.variable(val)\n mean, var = nn.moments(x, (0, 1), None, None, False)\n normed = backend.batch_normalization(\n x, mean, var, beta, gamma, axis=-1, epsilon=1e-3)\n self.assertEqual(normed.shape.as_list(), [10, 5, 3])\n\n # 4D NHWC case\n val = np.random.random((10, 5, 5, 3))\n x = backend.variable(val)\n mean, var = nn.moments(x, (0, 1, 2), None, None, False)\n normed = backend.batch_normalization(\n x, mean, var, beta, gamma, axis=-1, epsilon=1e-3)\n self.assertEqual(normed.shape.as_list(), [10, 5, 5, 3])\n\n # 4D NCHW case\n if not context.executing_eagerly():\n # Eager CPU kernel for NCHW does not exist.\n val = np.random.random((10, 3, 5, 5))\n x = backend.variable(val)\n mean, var = nn.moments(x, (0, 2, 3), None, None, False)\n normed = backend.batch_normalization(\n x, mean, var, beta, gamma, axis=1, epsilon=1e-3)\n self.assertEqual(normed.shape.as_list(), [10, 3, 5, 5])\n\n def test_normalize_batch_in_training(self):\n val = np.random.random((10, 3, 10, 10))\n x = backend.variable(val)\n reduction_axes = (0, 2, 3)\n\n g_val = np.random.random((3,))\n b_val = np.random.random((3,))\n gamma = backend.variable(g_val)\n beta = backend.variable(b_val)\n normed, mean, var = backend.normalize_batch_in_training(\n x, gamma, beta, reduction_axes, epsilon=1e-3)\n self.assertEqual(normed.shape.as_list(), [10, 3, 10, 10])\n self.assertEqual(mean.shape.as_list(), [\n 3,\n ])\n self.assertEqual(var.shape.as_list(), [\n 3,\n ])\n\n # case: gamma=None\n gamma = None\n normed, mean, var = backend.normalize_batch_in_training(\n x, gamma, beta, reduction_axes, epsilon=1e-3)\n self.assertEqual(normed.shape.as_list(), [10, 3, 10, 10])\n self.assertEqual(mean.shape.as_list(), [\n 3,\n ])\n self.assertEqual(var.shape.as_list(), [\n 3,\n ])\n\n # case: beta=None\n beta = None\n normed, mean, var = backend.normalize_batch_in_training(\n x, gamma, beta, reduction_axes, epsilon=1e-3)\n self.assertEqual(normed.shape.as_list(), [10, 3, 10, 10])\n self.assertEqual(mean.shape.as_list(), [\n 3,\n ])\n self.assertEqual(var.shape.as_list(), [\n 3,\n ])\n\n def test_dropout(self):\n inputs = array_ops.ones((200, 200))\n outputs = backend.dropout(inputs, 0.2)\n outputs_val = backend.eval(outputs)\n self.assertEqual(np.min(outputs_val), 0)\n self.assertAllClose(np.count_nonzero(outputs_val), 32000, atol=1000)\n # Test noise shape\n outputs = backend.dropout(inputs, 0.2, noise_shape=(200, 1))\n outputs_val = backend.eval(outputs)\n self.assertAllClose(outputs_val[2, :], outputs_val[3, :], atol=1e-5)\n\n\nclass BackendCrossEntropyLossesTest(test.TestCase, parameterized.TestCase):\n\n @combinations.generate(combinations.combine(mode=['graph', 'eager']))\n def test_binary_crossentropy_with_sigmoid(self):\n t = backend.constant([[0, 1, 0]])\n logits = backend.constant([[8., 1., 1.]])\n p = backend.sigmoid(logits)\n p = array_ops.identity(array_ops.identity(p))\n result = self.evaluate(backend.binary_crossentropy(t, p))\n self.assertArrayNear(result[0], [8., 0.313, 1.313], 1e-3)\n\n @combinations.generate(combinations.combine(mode=['graph', 'eager']))\n def test_categorical_crossentropy_loss(self):\n t = backend.constant([[1, 0, 0], [0, 1, 0], [0, 0, 1]])\n\n p = backend.constant([[.9, .05, .05], [.05, .89, .06], [.05, .01, .94]])\n result = backend.categorical_crossentropy(t, p)\n self.assertArrayNear(self.evaluate(result), [.105, .116, .062], 1e-3)\n\n p = backend.constant([[.9, .05, .05], [.05, .89, .01], [.05, .06, .94]])\n result = backend.categorical_crossentropy(t, p, axis=0)\n self.assertArrayNear(self.evaluate(result), [.105, .116, .062], 1e-3)\n\n p = backend.constant([[8., 1., 1.], [0., 9., 1.], [2., 3., 5.]])\n result = backend.categorical_crossentropy(t, p, from_logits=True),\n self.assertArrayNear(self.evaluate(result)[0], [.002, 0, .17], 1e-3)\n\n p = backend.constant([[8., 0., 2.], [1., 9., 3.], [1., 1., 5.]])\n result = backend.categorical_crossentropy(t, p, from_logits=True, axis=0),\n self.assertArrayNear(self.evaluate(result)[0], [.002, 0, .17], 1e-3)\n\n @combinations.generate(combinations.combine(mode=['graph', 'eager']))\n def test_categorical_crossentropy_loss_with_unknown_rank_tensor(self):\n t = backend.placeholder()\n p = backend.placeholder()\n o = backend.categorical_crossentropy(t, p)\n\n t_val = ops.convert_to_tensor_v2_with_dispatch([[1., 0., 0.], [0., 1., 0.],\n [0., 0., 1.]])\n p_val = ops.convert_to_tensor_v2_with_dispatch([[.9, .05, .05],\n [.05, .89, .06],\n [.05, .01, .94]])\n f = backend.function([t, p], o)\n\n result = f([t_val, p_val])\n self.assertArrayNear(result, [.105, .116, .062], 1e-3)\n\n # With axis set\n o = backend.categorical_crossentropy(t, p, axis=0)\n f = backend.function([t, p], o)\n\n result = f([t_val, p_val])\n self.assertArrayNear(result, [.105, .065, .111], 1e-3)\n\n # from logits\n p_val = ops.convert_to_tensor_v2_with_dispatch([[8., 1., 1.], [0., 9., 1.],\n [2., 3., 5.]])\n o = backend.categorical_crossentropy(t, p, from_logits=True)\n f = backend.function([t, p], o)\n\n result = f([t_val, p_val])\n self.assertArrayNear(result, [.002, 0, .17], 1e-3)\n\n # from logits and axis set\n o = backend.categorical_crossentropy(t, p, from_logits=True, axis=0)\n f = backend.function([t, p], o)\n\n result = f([t_val, p_val])\n self.assertArrayNear(result, [.002, .003, .036], 1e-3)\n\n @combinations.generate(combinations.combine(mode=['graph', 'eager']))\n def test_categorical_crossentropy_with_softmax(self):\n t = backend.constant([[1, 0, 0], [0, 1, 0], [0, 0, 1]])\n logits = backend.constant([[8., 1., 1.], [0., 9., 1.], [2., 3., 5.]])\n p = backend.softmax(logits)\n p = array_ops.identity(array_ops.identity(p))\n result = self.evaluate(backend.categorical_crossentropy(t, p))\n self.assertArrayNear(result, [0.002, 0.0005, 0.17], 1e-3)\n\n @combinations.generate(combinations.combine(mode=['graph', 'eager']))\n def test_sparse_categorical_crossentropy_loss(self):\n t = backend.constant([0, 1, 2])\n\n p = backend.constant([[.9, .05, .05], [.05, .89, .06], [.05, .01, .94]])\n result = backend.sparse_categorical_crossentropy(t, p)\n self.assertArrayNear(self.evaluate(result), [.105, .116, .062], 1e-3)\n\n p = backend.constant([[.9, .05, .05], [.05, .89, .01], [.05, .06, .94]])\n result = backend.sparse_categorical_crossentropy(t, p, axis=0)\n self.assertArrayNear(self.evaluate(result), [.105, .116, .062], 1e-3)\n\n p = backend.constant([[8., 1., 1.], [0., 9., 1.], [2., 3., 5.]])\n result = backend.sparse_categorical_crossentropy(t, p, from_logits=True),\n self.assertArrayNear(self.evaluate(result)[0], [.002, 0, .17], 1e-3)\n\n p = backend.constant([[8., 0., 2.], [1., 9., 3.], [1., 1., 5.]])\n result = backend.sparse_categorical_crossentropy(\n t, p, from_logits=True, axis=0),\n self.assertArrayNear(self.evaluate(result)[0], [.002, 0, .17], 1e-3)\n\n @combinations.generate(combinations.combine(mode=['graph']))\n def test_sparse_categorical_crossentropy_loss_with_unknown_rank_tensor(self):\n # This test only runs in graph because the TF op layer is not supported yet\n # for sparse ops.\n t = backend.placeholder()\n p = backend.placeholder()\n o = backend.sparse_categorical_crossentropy(t, p)\n\n t_val = ops.convert_to_tensor_v2_with_dispatch([0, 1, 2])\n p_val = ops.convert_to_tensor_v2_with_dispatch([[.9, .05, .05],\n [.05, .89, .06],\n [.05, .01, .94]])\n f = backend.function([t, p], o)\n\n result = f([t_val, p_val])\n self.assertArrayNear(result, [.105, .116, .062], 1e-3)\n\n # With axis set\n with self.assertRaisesRegex(\n ValueError,\n 'Cannot compute sparse categorical crossentropy with `axis=0`'):\n o = backend.sparse_categorical_crossentropy(t, p, axis=0)\n f = backend.function([t, p], o)\n\n _ = f([t_val, p_val])\n\n # from logits\n p_val = ops.convert_to_tensor_v2_with_dispatch([[8., 1., 1.], [0., 9., 1.],\n [2., 3., 5.]])\n o = backend.sparse_categorical_crossentropy(t, p, from_logits=True)\n f = backend.function([t, p], o)\n\n result = f([t_val, p_val])\n self.assertArrayNear(result, [.002, 0, .17], 1e-3)\n\n # from logits and axis set\n with self.assertRaisesRegex(\n ValueError,\n 'Cannot compute sparse categorical crossentropy with `axis=0`'):\n o = backend.sparse_categorical_crossentropy(\n t, p, from_logits=True, axis=0)\n f = backend.function([t, p], o)\n\n _ = f([t_val, p_val])\n\n @combinations.generate(combinations.combine(mode=['graph', 'eager']))\n def test_sparse_categorical_crossentropy_with_softmax(self):\n t = backend.constant([0, 1, 2])\n logits = backend.constant([[8., 1., 1.], [0., 9., 1.], [2., 3., 5.]])\n p = backend.softmax(logits)\n p = array_ops.identity(array_ops.identity(p))\n result = self.evaluate(backend.sparse_categorical_crossentropy(t, p))\n self.assertArrayNear(result, [0.002, 0.0005, 0.17], 1e-3)\n\n\n@test_util.with_control_flow_v2\[email protected](combinations.combine(mode=['graph', 'eager']))\nclass TestCTC(test.TestCase):\n\n def test_ctc_decode(self):\n depth = 6\n seq_len_0 = 5\n input_prob_matrix_0 = np.asarray(\n [[0.30999, 0.309938, 0.0679938, 0.0673362, 0.0708352, 0.173908],\n [0.215136, 0.439699, 0.0370931, 0.0393967, 0.0381581, 0.230517],\n [0.199959, 0.489485, 0.0233221, 0.0251417, 0.0233289, 0.238763],\n [0.279611, 0.452966, 0.0204795, 0.0209126, 0.0194803, 0.20655],\n [0.51286, 0.288951, 0.0243026, 0.0220788, 0.0219297, 0.129878],\n # Random entry added in at time=5\n [0.155251, 0.164444, 0.173517, 0.176138, 0.169979, 0.160671]],\n dtype=np.float32)\n\n # len max_time_steps array of batch_size x depth matrices\n inputs = ([input_prob_matrix_0[t, :][np.newaxis, :]\n for t in range(seq_len_0)] + # Pad to max_time_steps = 8\n 2 * [np.zeros((1, depth), dtype=np.float32)])\n\n inputs = backend.variable(np.asarray(inputs).transpose((1, 0, 2)))\n\n # batch_size length vector of sequence_lengths\n input_length = backend.variable(np.array([seq_len_0], dtype=np.int32))\n # batch_size length vector of negative log probabilities\n log_prob_truth = np.array([\n -3.5821197, # output beam 0\n -3.777835 # output beam 1\n ], np.float32)[np.newaxis, :]\n\n decode_truth = [\n np.array([1, 0, -1, -1, -1, -1, -1]),\n np.array([0, 1, 0, -1, -1, -1, -1])\n ]\n beam_width = 2\n top_paths = 2\n\n decode_pred_tf, log_prob_pred_tf = backend.ctc_decode(\n inputs,\n input_length,\n greedy=False,\n beam_width=beam_width,\n top_paths=top_paths)\n\n self.assertEqual(len(decode_pred_tf), top_paths)\n log_prob_pred = backend.eval(log_prob_pred_tf)\n for i in range(top_paths):\n self.assertTrue(\n np.alltrue(decode_truth[i] == backend.eval(decode_pred_tf[i])))\n self.assertAllClose(log_prob_truth, log_prob_pred)\n\n def test_ctc_batch_cost(self):\n with self.cached_session():\n label_lens = np.expand_dims(np.asarray([5, 4]), 1)\n input_lens = np.expand_dims(np.asarray([5, 5]), 1) # number of timesteps\n loss_log_probs = [3.34211, 5.42262]\n\n # dimensions are batch x time x categories\n labels = np.asarray([[0, 1, 2, 1, 0], [0, 1, 1, 0, -1]])\n inputs = np.asarray(\n [[[0.633766, 0.221185, 0.0917319, 0.0129757, 0.0142857, 0.0260553],\n [0.111121, 0.588392, 0.278779, 0.0055756, 0.00569609, 0.010436],\n [0.0357786, 0.633813, 0.321418, 0.00249248, 0.00272882, 0.0037688],\n [0.0663296, 0.643849, 0.280111, 0.00283995, 0.0035545, 0.00331533],\n [0.458235, 0.396634, 0.123377, 0.00648837, 0.00903441, 0.00623107]],\n [[0.30176, 0.28562, 0.0831517, 0.0862751, 0.0816851, 0.161508],\n [0.24082, 0.397533, 0.0557226, 0.0546814, 0.0557528, 0.19549],\n [0.230246, 0.450868, 0.0389607, 0.038309, 0.0391602, 0.202456],\n [0.280884, 0.429522, 0.0326593, 0.0339046, 0.0326856, 0.190345],\n [0.423286, 0.315517, 0.0338439, 0.0393744, 0.0339315, 0.154046]]],\n dtype=np.float32)\n\n labels = backend.variable(labels, dtype='int32')\n inputs = backend.variable(inputs, dtype='float32')\n input_lens = backend.variable(input_lens, dtype='int32')\n label_lens = backend.variable(label_lens, dtype='int32')\n res = backend.eval(\n backend.ctc_batch_cost(labels, inputs, input_lens, label_lens))\n self.assertAllClose(res[:, 0], loss_log_probs, atol=1e-05)\n\n # test when batch_size = 1, that is, one sample only\n ref = [3.34211]\n input_lens = np.expand_dims(np.asarray([5]), 1)\n label_lens = np.expand_dims(np.asarray([5]), 1)\n\n labels = np.asarray([[0, 1, 2, 1, 0]])\n inputs = np.asarray(\n [[[0.633766, 0.221185, 0.0917319, 0.0129757, 0.0142857, 0.0260553], [\n 0.111121, 0.588392, 0.278779, 0.0055756, 0.00569609, 0.010436\n ], [0.0357786, 0.633813, 0.321418, 0.00249248, 0.00272882, 0.0037688],\n [0.0663296, 0.643849, 0.280111, 0.00283995, 0.0035545, 0.00331533],\n [0.458235, 0.396634, 0.123377, 0.00648837, 0.00903441, 0.00623107]]\n ],\n dtype=np.float32)\n\n k_labels = backend.variable(labels, dtype='int32')\n k_inputs = backend.variable(inputs, dtype='float32')\n k_input_lens = backend.variable(input_lens, dtype='int32')\n k_label_lens = backend.variable(label_lens, dtype='int32')\n res = backend.eval(\n backend.ctc_batch_cost(k_labels, k_inputs, k_input_lens,\n k_label_lens))\n self.assertAllClose(res[:, 0], ref, atol=1e-05)\n\n\[email protected](combinations.combine(mode=['graph', 'eager']))\nclass TestRandomOps(test.TestCase):\n\n def test_random_normal(self):\n np.random.seed(123)\n x = backend.random_normal((500, 500))\n val = backend.eval(x)\n self.assertAllClose(np.mean(val), 0., atol=0.01)\n self.assertAllClose(np.std(val), 1., atol=0.01)\n\n def test_random_uniform(self):\n np.random.seed(123)\n x = backend.random_uniform((500, 500))\n val = backend.eval(x)\n self.assertAllClose(np.mean(val), 0.5, atol=0.01)\n self.assertAllClose(np.max(val), 1., atol=0.01)\n self.assertAllClose(np.min(val), 0., atol=0.01)\n\n def test_random_binomial(self):\n np.random.seed(123)\n x = backend.random_binomial((500, 500), p=0.5)\n self.assertAllClose(np.mean(backend.eval(x)), 0.5, atol=0.01)\n\n def test_truncated_normal(self):\n np.random.seed(123)\n x = backend.truncated_normal((500, 500), mean=0.0, stddev=1.0)\n x = backend.truncated_normal((1000, 1000), mean=0.0, stddev=1.0)\n y = backend.eval(x)\n self.assertAllClose(np.mean(y), 0., atol=0.01)\n self.assertAllClose(np.std(y), 0.88, atol=0.01)\n self.assertAllClose(np.max(y), 2., atol=0.01)\n self.assertAllClose(np.min(y), -2., atol=0.01)\n\n\[email protected](combinations.combine(mode=['graph', 'eager']))\nclass FunctionTest(test.TestCase):\n\n def test_function_basics(self):\n if context.executing_eagerly():\n self.skipTest('eager backend.function does not support updates')\n x1 = backend.placeholder(shape=(), dtype='float32')\n x2 = backend.placeholder(shape=(), dtype='int32')\n v = backend.variable(10.)\n\n y1 = x1 + backend.cast(x2, 'float32') + v\n y2 = x1 * backend.cast(x2, 'float32')\n\n with ops.control_dependencies([y1]):\n u = backend.update(v, x1)\n\n f = backend.function([x1, x2], [y1, y2], updates=[u])\n output_values = f([2, 3])\n self.assertEqual(output_values, [15., 6.])\n self.assertEqual(backend.eval(v), 2.)\n\n def test_function_dict_outputs(self):\n x_ph = backend.placeholder(shape=(), name='x')\n y_ph = backend.placeholder(shape=(), name='y')\n outputs = {'x*y': y_ph * x_ph, 'x*x': x_ph * x_ph}\n\n f = backend.function(inputs=[x_ph, y_ph], outputs=outputs)\n x, y = 2., 5.\n results = f([x, y])\n\n self.assertEqual(results['x*y'], 10.)\n self.assertEqual(results['x*x'], 4)\n\n def test_function_dict_inputs(self):\n placeholders = {\n 'x': backend.placeholder(shape=()),\n 'y': backend.placeholder(shape=())\n }\n outputs = [placeholders['x'] * placeholders['y']]\n\n f = backend.function(inputs=placeholders, outputs=outputs)\n results = f({'x': 2., 'y': 3.})\n self.assertEqual(results[0], 6.)\n\n def test_function_single_input_output(self):\n x_ph = backend.placeholder(shape=(), name='x')\n output = x_ph * x_ph\n f = backend.function(x_ph, output)\n result = f(2.)\n self.assertEqual(result, 4.)\n\n def test_tuple_updates(self):\n if context.executing_eagerly():\n self.skipTest('eager backend.function does not support updates')\n\n x_ph = backend.placeholder(ndim=2)\n v = backend.variable(np.ones((4, 2)))\n output = x_ph ** 2 + v\n new_v = v + x_ph\n f = backend.function(x_ph, output, updates=[(v, new_v)])\n input_val = np.random.random((4, 2))\n result = f(input_val)\n self.assertAllClose(result, input_val ** 2 + 1)\n self.assertAllClose(backend.get_value(v), np.ones((4, 2)) + input_val)\n\n\nclass BackendGraphTests(test.TestCase, parameterized.TestCase):\n\n @combinations.generate(combinations.combine(mode=['graph']))\n def test_function_placeholder_with_default(self):\n with backend.get_graph().as_default():\n x1 = array_ops.placeholder_with_default(\n np.array(2., dtype='float32'), shape=())\n x2 = array_ops.placeholder_with_default(\n np.array(3, dtype='int32'), shape=())\n y1 = x1 + backend.cast(x2, 'float32')\n y2 = x1 * backend.cast(x2, 'float32')\n f = backend.function([x1, x2], [y1, y2])\n output_values = f([4, 5])\n self.assertEqual(output_values, [9., 20.])\n output_values = f([None, None])\n self.assertEqual(output_values, [5., 6.])\n\n def test_function_tf_feed_symbols(self):\n # Test Keras backend functions with TF tensor inputs.\n with ops.Graph().as_default(), self.cached_session():\n # Test feeding a resource variable to `function`.\n x1 = backend.placeholder(shape=())\n x2 = backend.placeholder(shape=())\n lr = backend.learning_phase() # Include a placeholder_with_default.\n\n y1 = backend.variable(10.)\n y2 = 3\n\n f = backend.function(\n inputs=[x1, x2, lr],\n outputs=[x1 + 1, backend.in_train_phase(x2 + 2, x2 - 1)])\n outs = f([y1, y2, None]) # Use default learning_phase value.\n self.assertEqual(outs, [11., 2.])\n outs = f([y1, y2, 1]) # Set learning phase value.\n self.assertEqual(outs, [11., 5.])\n\n # Test triggering a callable refresh by changing the input.\n y3 = backend.constant(20.) # Test with tensor\n outs = f([y3, y2, None])\n self.assertEqual(outs, [21., 2.])\n\n y4 = 4 # Test with non-symbol\n outs = f([y4, y2, None])\n self.assertEqual(outs, [5., 2.])\n\n # Test with a different dtype\n y5 = backend.constant(10., dtype='float64')\n outs = f([y5, y2, None])\n self.assertEqual(outs, [11., 2.])\n\n def test_function_tf_fetches(self):\n # Additional operations can be passed to tf.compat.v1.Session().run() via\n # its `fetches` arguments. In contrast to `updates` argument of\n # backend.function() these do not have control dependency on `outputs`\n # so they can run in parallel. Also they should not contribute to output of\n # backend.function().\n with ops.Graph().as_default(), self.cached_session():\n x = backend.variable(0.)\n y = backend.variable(0.)\n x_placeholder = backend.placeholder(shape=())\n y_placeholder = backend.placeholder(shape=())\n\n f = backend.function(\n inputs=[x_placeholder, y_placeholder],\n outputs=[x_placeholder + y_placeholder],\n updates=[(x, x_placeholder + 1.)],\n fetches=[backend.update(y, 5.)])\n output = f([10., 20.])\n self.assertEqual(output, [30.])\n self.assertEqual(backend.get_session().run(fetches=[x, y]), [11., 5.])\n\n def test_function_tf_feed_dict(self):\n # Additional substitutions can be passed to `tf.compat.v1.Session().run()`\n # via its `feed_dict` arguments. Note that the feed_dict is passed once in\n # the constructor but we can modify the values in the dictionary. Through\n # this feed_dict we can provide additional substitutions besides Keras\n # inputs.\n with ops.Graph().as_default(), self.cached_session():\n x = backend.variable(0.)\n y = backend.variable(0.)\n x_placeholder = backend.placeholder(shape=())\n y_placeholder = backend.placeholder(shape=())\n\n feed_dict = {y_placeholder: 3.}\n fetches = [backend.update(y, y_placeholder * 10.)]\n f = backend.function(\n inputs=[x_placeholder],\n outputs=[x_placeholder + 1.],\n updates=[(x, x_placeholder + 10.)],\n feed_dict=feed_dict,\n fetches=fetches)\n output = f([10.])\n self.assertEqual(output, [11.])\n self.assertEqual(backend.get_session().run(fetches=[x, y]), [20., 30.])\n\n # updated value in feed_dict will be modified within the K.function()\n feed_dict[y_placeholder] = 4.\n output = f([20.])\n self.assertEqual(output, [21.])\n self.assertEqual(backend.get_session().run(fetches=[x, y]), [30., 40.])\n\n def test_function_tf_run_options_with_run_metadata(self):\n with ops.Graph().as_default(), self.cached_session():\n x_placeholder = backend.placeholder(shape=())\n y_placeholder = backend.placeholder(shape=())\n\n run_options = config_pb2.RunOptions(output_partition_graphs=True)\n run_metadata = config_pb2.RunMetadata()\n # enable run_options.\n f = backend.function(\n inputs=[x_placeholder, y_placeholder],\n outputs=[x_placeholder + y_placeholder],\n options=run_options,\n run_metadata=run_metadata)\n output = f([10., 20.])\n self.assertEqual(output, [30.])\n self.assertNotEmpty(run_metadata.partition_graphs)\n # disable run_options.\n f1 = backend.function(\n inputs=[x_placeholder, y_placeholder],\n outputs=[x_placeholder + y_placeholder],\n run_metadata=run_metadata)\n output1 = f1([10., 20.])\n self.assertEqual(output1, [30.])\n self.assertEmpty(run_metadata.partition_graphs)\n\n def test_function_fetch_callbacks(self):\n\n class CallbackStub(object):\n\n def __init__(self):\n self.times_called = 0\n self.callback_result = 0\n\n def _fetch_callback(self, result):\n self.times_called += 1\n self.callback_result = result\n\n with ops.Graph().as_default(), self.cached_session():\n callback = CallbackStub()\n x_placeholder = backend.placeholder(shape=())\n y_placeholder = backend.placeholder(shape=())\n\n callback_op = x_placeholder * y_placeholder\n\n f = backend.function(\n inputs=[x_placeholder, y_placeholder],\n outputs=[x_placeholder + y_placeholder])\n f.fetches.append(callback_op)\n f.fetch_callbacks[callback_op] = callback._fetch_callback\n\n _ = f([10., 20.])\n\n self.assertEqual(callback.times_called, 1)\n self.assertEqual(callback.callback_result, 200)\n\n def test_get_session_different_graphs(self):\n with ops.Graph().as_default():\n x = backend.constant(1)\n session = backend.get_session()\n self.assertIs(session, backend.get_session((x,)))\n self.assertIs(session, backend.get_session())\n with ops.Graph().as_default():\n self.assertIs(session, backend.get_session((x,)))\n self.assertIsNot(session, backend.get_session())\n\n\[email protected](combinations.combine(mode=['graph', 'eager']))\nclass ControlOpsTests(test.TestCase):\n\n def test_function_switch_basics(self):\n x = array_ops.constant(2.0)\n y = array_ops.constant(3.0)\n\n def xpowy():\n return backend.pow(x, y)\n\n def ypowx():\n return backend.pow(y, x)\n\n tensor = backend.switch(backend.less(x, y), xpowy, ypowx)\n self.assertEqual(backend.eval(tensor), [8.0])\n\n tensor = backend.switch(backend.greater(x, y), xpowy, ypowx)\n self.assertEqual(backend.eval(tensor), [9.0])\n\n def test_unequal_rank(self):\n x = ops.convert_to_tensor_v2_with_dispatch(\n np.array([[1, 2, 3], [4, 5, 6]]), dtype='float32')\n y = ops.convert_to_tensor_v2_with_dispatch(\n np.array([1, 2, 3]), dtype='float32')\n\n def true_func():\n return x\n\n def false_func():\n return y\n\n with self.assertRaisesRegex(ValueError,\n 'Rank of `condition` should be less than'):\n backend.switch(backend.equal(x, x), false_func, true_func)\n\n\nclass ContextValueCacheTest(test.TestCase):\n\n def test_cache(self):\n cache = backend.ContextValueCache(list)\n graph1 = ops.Graph()\n graph2 = ops.Graph()\n\n cache[graph1].append(1)\n with graph1.as_default():\n cache[None].append(2)\n\n with graph2.as_default():\n cache[None].append(3)\n cache[graph2].append(4)\n\n self.assertAllEqual(cache[graph1], [1, 2])\n self.assertAllEqual(cache[graph2], [3, 4])\n\n with context.eager_mode():\n cache[None].append(5)\n cache[None].append(6)\n self.assertAllEqual(cache[None], [5, 6])\n\n self.assertLen(cache, 3)\n\n del graph1\n gc.collect()\n self.assertLen(cache, 2)\n\n def test_cache_in_parent_graph(self):\n cache = backend.ContextValueCache(int)\n cache.setdefault(None, backend.constant(5))\n\n with ops.Graph().as_default() as g:\n # g is not a child graph of the default test context, so the recursive\n # lookup will create a new default value.\n self.assertAllEqual(cache[g], 0)\n\n @def_function.function\n def fn():\n # The function graph is a child of the default test context, so\n # __getitem__ will return the previously saved value.\n return cache[ops.get_default_graph()]\n\n self.assertEqual(self.evaluate(fn()), 5)\n\n\nif __name__ == '__main__':\n test.main()\n"
] |
[
[
"tensorflow.python.ops.array_ops.identity",
"tensorflow.python.keras.backend.sigmoid",
"tensorflow.python.keras.backend.symbolic_learning_phase",
"tensorflow.python.ops.array_ops.ones",
"tensorflow.python.keras.backend.is_placeholder",
"tensorflow.python.keras.backend.ContextValueCache",
"numpy.tensordot",
"numpy.random.random",
"tensorflow.python.util.tf_inspect.getargspec",
"tensorflow.python.platform.test.main",
"tensorflow.python.keras.backend.random_binomial",
"numpy.count_nonzero",
"tensorflow.python.keras.backend.zeros_like",
"tensorflow.python.keras.backend.learning_phase_scope",
"numpy.prod",
"tensorflow.python.ops.variables.global_variables_initializer",
"numpy.expand_dims",
"tensorflow.python.framework.ops.Graph",
"tensorflow.python.keras.backend.less",
"tensorflow.python.keras.backend.repeat_elements",
"tensorflow.python.keras.backend.stop_gradient",
"tensorflow.python.keras.backend.placeholder",
"tensorflow.python.keras.backend.normalize_batch_in_training",
"tensorflow.python.keras.backend.reset_uids",
"tensorflow.python.keras.backend.ctc_batch_cost",
"tensorflow.python.keras.backend.eye",
"tensorflow.python.keras.backend.concatenate",
"tensorflow.python.framework.ops.convert_to_tensor_v2_with_dispatch",
"tensorflow.python.keras.backend.expand_dims",
"tensorflow.python.keras.backend.conv3d",
"numpy.testing.assert_allclose",
"tensorflow.python.keras.backend.greater",
"tensorflow.core.protobuf.config_pb2.RunOptions",
"tensorflow.python.eager.context.context",
"numpy.mean",
"tensorflow.python.keras.backend.bias_add",
"tensorflow.python.keras.backend.rnn",
"tensorflow.python.eager.context.eager_mode",
"tensorflow.python.keras.backend.categorical_crossentropy",
"tensorflow.python.ops.nn.moments",
"numpy.eye",
"tensorflow.python.keras.backend.binary_crossentropy",
"tensorflow.python.keras.backend.eval",
"numpy.array",
"tensorflow.python.keras.backend.dropout",
"tensorflow.python.keras.backend.sparse_categorical_crossentropy",
"numpy.zeros",
"tensorflow.python.keras.backend.is_keras_tensor",
"tensorflow.python.keras.backend.softmax",
"tensorflow.python.keras.backend.dot",
"tensorflow.python.keras.layers.advanced_activations.ReLU",
"tensorflow.python.keras.backend.identity",
"numpy.random.seed",
"numpy.ones",
"tensorflow.python.keras.combinations.combine",
"tensorflow.python.keras.backend.get_value",
"tensorflow.python.keras.backend.function",
"numpy.repeat",
"tensorflow.python.keras.backend.separable_conv2d",
"tensorflow.python.keras.backend.conv1d",
"tensorflow.python.keras.backend.learning_phase",
"tensorflow.python.keras.engine.input_layer.Input",
"numpy.min",
"tensorflow.python.keras.backend.resize_volumes",
"tensorflow.python.keras.backend.conv2d_transpose",
"tensorflow.python.keras.backend.is_sparse",
"tensorflow.python.framework.ops.control_dependencies",
"tensorflow.python.keras.backend.name_scope",
"numpy.max",
"tensorflow.python.keras.backend.equal",
"tensorflow.python.keras.backend.local_conv2d",
"tensorflow.python.keras.backend.cast",
"tensorflow.python.keras.backend.random_normal",
"tensorflow.python.keras.backend.get_uid",
"tensorflow.python.keras.backend.batch_normalization",
"tensorflow.python.keras.backend.count_params",
"numpy.std",
"tensorflow.python.keras.backend.local_conv1d",
"tensorflow.python.ops.array_ops.constant",
"tensorflow.python.keras.backend.variable",
"tensorflow.python.keras.backend.local_conv",
"numpy.asarray",
"tensorflow.python.keras.backend.cast_to_floatx",
"tensorflow.core.protobuf.config_pb2.RunMetadata",
"tensorflow.python.framework.config.set_optimizer_jit",
"tensorflow.python.keras.backend.repeat",
"tensorflow.python.keras.backend.relu",
"tensorflow.python.keras.backend.pool3d",
"tensorflow.python.keras.backend.clear_session",
"tensorflow.python.framework.ops.get_default_graph",
"tensorflow.python.keras.backend.zeros",
"tensorflow.python.keras.backend.get_graph",
"tensorflow.python.keras.backend.batch_dot",
"tensorflow.python.keras.backend.random_uniform",
"tensorflow.python.keras.layers.normalization.BatchNormalization",
"tensorflow.python.keras.backend.conv2d",
"tensorflow.python.keras.backend.int_shape",
"tensorflow.python.eager.context.executing_eagerly",
"numpy.concatenate",
"numpy.random.normal",
"tensorflow.python.keras.backend.ones",
"tensorflow.python.keras.backend.in_train_phase",
"tensorflow.python.keras.backend.pool2d",
"tensorflow.python.keras.backend.resize_images",
"numpy.random.randint",
"tensorflow.python.keras.backend.get_session",
"tensorflow.python.keras.backend.constant",
"tensorflow.python.keras.backend.random_normal_variable",
"numpy.reshape",
"tensorflow.python.keras.backend.ctc_decode",
"tensorflow.python.keras.backend.random_uniform_variable",
"tensorflow.python.keras.backend.update",
"tensorflow.python.keras.backend.ones_like",
"tensorflow.python.keras.backend.to_dense",
"tensorflow.python.keras.backend.set_learning_phase",
"tensorflow.python.keras.backend.pow",
"tensorflow.python.keras.backend.backend",
"tensorflow.python.keras.backend.truncated_normal"
]
] |
quejebo/zippy
|
[
"2f7857ab2b295f188af48d434c83a9906c48de75"
] |
[
"zippy/bloom_kmer_finder.py"
] |
[
"import cProfile\nimport os\nimport gzip\nimport csv\nimport time\nfrom collections import defaultdict\nfrom itertools import combinations\nfrom pybloomfilter import BloomFilter\n#from pybloom import ScalableBloomFilter, BloomFilter #pybloom used cryptographic hashes in a bloom filter. This is a bad idea.\nimport numpy\n\nclass BloomKmerFinder():\n \"\"\"\n Finds all kmers that show up more than a certain number of times. Can choose to ignore dimerized reads\n or do only dimerized reads. Useful for finding common kmers in unmapped reads. We use a bloom filter\n to do this, so it is very fast, but requires a few GB of ram to keep the filter in memory.\n \"\"\"\n def __init__(self, params, k, exclude_monomers=False, exclude_dimers=False):\n self.params = params\n self.k = k #kmer for global matching\n self.primer_k = 15 #kmer for primer matching\n self.exclude_monomers = exclude_monomers\n self.exclude_dimers = exclude_dimers\n self.run_dimer_detector = False\n self.bloom = BloomFilter(1e9, 0.01, None) \n self.count_map = defaultdict(int)\n if exclude_monomers or exclude_dimers:\n self.run_dimer_detector = True\n self.reads_read = 0 # how many lines we've looked at\n self.dimer_reads_read = 0 # how many lines we've looked at with at least 2 primers\n self.monomer_reads_read = 0 # how many lines we've looked at with exactly 1 primer\n self.out_stats_file = open(os.path.join(self.params.output_dir,'count_stats'), 'w')\n\n def reverse_complement(self, seq):\n rev_map = {'A':'T','C':'G','G':'C','T':'A', 'N':'N'}\n new_seq = ''.join([rev_map[x] for x in seq]) #sub letters\n new_seq = new_seq[::-1] # reverse it\n return new_seq\n\n def parse_probe_list(self):\n \"\"\"\n Creates probe map, which is a map of probe names to sequence.\n \"\"\"\n probe_map = {}\n with open(self.params.probe_list, 'r') as f:\n c = csv.reader(f, delimiter=\"\\t\")\n for line in c:\n probe_map[line[0]] = line[1].upper()\n return probe_map\n\n def build_kmer_map(self, probe_map, k):\n \"\"\"\n Builds a map from kmer to probenames that have this kmer.\n Also does reverse complements.\n \"\"\"\n kmer_map = defaultdict(set)\n for (probe, seq) in probe_map.items():\n seq_rc = self.reverse_complement(seq)\n for i in range(0, len(seq)-k):\n kmer_map[seq[i:i+k]].add(probe)\n kmer_map[seq_rc[i:i+k]].add(probe+\"rc\")\n return kmer_map\n\n def run_matcher(self, input_file, kmer_map):\n \"\"\"\n Goes through a fastq, and registers all the kmers in it.\n \"\"\"\n if input_file is None: # we don't require the input files... one of them can be undefined\n return\n debug = 0\n with open(input_file, 'r') as f:\n counter = 0 # 0: header 1: sequence 2: junk 3: quality\n for line in f:\n if counter == 0:\n read_name = line.strip()\n if counter == 1:\n line = line.upper()\n if self.run_dimer_detector:\n probe_matches = self.find_matches(line.strip(), kmer_map)\n if len(probe_matches) > 1 and not self.exclude_dimers:\n self.dimer_reads_read += 1\n self.register_kmers(line.strip())\n elif len(probe_matches) == 1 and not self.exclude_monomers:\n self.monomer_reads_read += 1\n self.register_kmers(line.strip())\n elif len(probe_matches) == 0:\n debug += 1\n self.register_kmers(line.strip()) \n else:\n self.register_kmers(line.strip())\n self.reads_read += 1\n counter += 1\n counter = counter % 4\n print('{} dimer: {}'.format(input_file, self.dimer_reads_read))\n print('{} monomer: {}'.format(input_file, self.monomer_reads_read))\n print('{} none: {}'.format(input_file, debug))\n print('{} total: {}'.format(input_file, self.reads_read))\n\n def register_kmers(self, read):\n \"\"\"\n Adds the read and its reverse complement to our bloom filter, and if we have seen it before,\n adds it to the count map. The idea is that the bloom filter can approximately determine\n if we've seen something before or not, and to the count map are added all kmers that the bloom\n filter reports that we've seen before.\n \"\"\"\n for i in range(0, len(read)-self.k):\n seq = read[i:i+self.k]\n seq_rc = self.reverse_complement(seq)\n if self.bloom.add(seq):\n self.count_map[seq]+=1\n if self.bloom.add(seq_rc):\n self.count_map[seq_rc]+=1\n\n\n def find_matches(self, line, kmer_map):\n \"\"\"\n For a single read, reports all found primers\n \"\"\"\n in_primer = None\n matches = []\n for i in range(0, len(line)-self.primer_k):\n sub_seq = line[i:i+self.primer_k]\n if in_primer is None: #we are not currently in a primer. \n if len(kmer_map[sub_seq]) == 1: # If we see a uniquely mappable kmer, we enter a primer.\n (in_primer,) = kmer_map[sub_seq]\n matches.append(in_primer)\n else: # Otherwise, we continue\n continue\n else: # we are in the middle of seeing a primer sequence\n if in_primer in kmer_map[sub_seq]: # we see this primer again, and are thus still reading it. Continue.\n continue\n elif len(kmer_map[sub_seq]) == 1: # We no longer see our current primer, but this sequence is mappable to another primer. We are now in a different primer.\n (in_primer,) = kmer_map[sub_seq]\n matches.append(in_primer)\n else: # We aren't in our current primer, and aren't uniquely in a different primer.\n in_primer = None\n return matches\n\n\n def output_stats(self, kmer_map):\n \"\"\"\n We print the top-two unique maximal strings, and then a sorted list of all kmers that appear\n at least twice in our reads.\n \"\"\"\n sorted_map = sorted(self.count_map.items(), key=lambda x: -x[1])\n first_string = self.extend(sorted_map[0][0], self.count_map)\n for (kmer, count) in sorted_map[1:]:\n if kmer not in first_string and self.reverse_complement(kmer) not in first_string:\n second_string = self.extend(kmer, self.count_map)\n second_score = count\n break\n self.out_stats_file.write(\"{}\\t{}\\n\".format(sorted_map[0][1], first_string))\n self.out_stats_file.write(\"{}\\t{}\\n\".format(second_score, second_string))\n for (kmer, count) in sorted_map:\n probe_matches = self.find_matches(kmer, kmer_map) \n if len(probe_matches) == 0:\n self.out_stats_file.write(\"{}\\t{}\\n\".format(kmer, count))\n else:\n self.out_stats_file.write(\"{}\\t{}\\t{}\\n\".format(probe_matches, kmer, count))\n\n def extend(self, seed, kmer_map):\n \"\"\"\n Given a kmer, we greedily extend it in both directions by looking for kmers that differ by 1 on either side. We add\n the new kmer if its count is at least half of our peak kmer.\n \"\"\"\n final_string = [seed]\n value = kmer_map[seed]\n forward_extend = True\n current_seed = seed\n while forward_extend:\n extender = current_seed[1:]\n new_kmers = [extender+x for x in 'ACGT']\n new_scores = [kmer_map[x] for x in new_kmers]\n if numpy.max(new_scores)>value*0.5: #we extend\n new_kmer = new_kmers[numpy.argmax(new_scores)]\n if new_kmer == current_seed: #we hit a pathological (recursive) read\n forward_extend = False\n final_string.append(new_kmer[-1])\n current_seed = new_kmer\n else:\n forward_extend = False\n reverse_extend = True\n current_seed = seed\n while reverse_extend:\n extender = current_seed[:-1]\n new_kmers = [x+extender for x in 'ACGT']\n new_scores = [kmer_map[x] for x in new_kmers]\n if numpy.max(new_scores)>value*0.5: #we extend\n new_kmer = new_kmers[numpy.argmax(new_scores)]\n if new_kmer == current_seed: #we hit a pathological read\n reverse_extend = False\n final_string = [new_kmer[0]]+final_string\n current_seed = new_kmer\n else:\n reverse_extend = False\n return ''.join(final_string)\n\n\n def run(self):\n \"\"\"\n Main execution function for kmer finder\n \"\"\"\n if self.run_dimer_detector:\n probe_map = self.parse_probe_list()\n kmer_map = self.build_kmer_map(probe_map, self.primer_k) #kmer-map for dimer detection\n else:\n kmer_map = defaultdict(set)\n for input_file in [self.params.input_file1, self.params.input_file2]:\n self.run_matcher(input_file, kmer_map)\n self.output_stats(kmer_map)\n self.out_stats_file.close()\n\nif __name__ == '__main__':\n import argparse\n parser = argparse.ArgumentParser()\n parser.add_argument('--probe_list', type=str, help=\"Needed if you want to filter by dimers. tsv with 2 columns: (probe_name, sequence). If you are looking for adapters or other short sequences, they should be added to the probe list.\")\n parser.add_argument('--kmer', type=int, default=30, help=\"How big a fragment size to count\")\n parser.add_argument('--input_file1', help=\"A fastq file with reads to analyze\")\n parser.add_argument('--input_file2', help=\"Another fastq file (Optional)\")\n parser.add_argument('--output_dir')\n parser.add_argument('--exclude_monomers', dest='exclude_monomers', action='store_true', help=\"Whether we exclude primer monomers from kmer counting\")\n parser.set_defaults(exclude_monomers=False)\n parser.add_argument('--exclude_dimers', dest='exclude_dimers', action='store_true', help=\"Whether we exclude primer dimers from kmer counting\")\n parser.set_defaults(exclude_dimers=False)\n params = parser.parse_args()\n bloomy = BloomKmerFinder(params, params.kmer, params.exclude_monomers, params.exclude_dimers)\n start = time.time()\n bloomy.run()\n print(time.time()-start)"
] |
[
[
"numpy.max",
"numpy.argmax"
]
] |
c-rbp/panoptic_segmentation
|
[
"aa212d1d6e851857e0b9563bb94fe7297c987c1a"
] |
[
"detectron2/evaluation/coco_evaluation.py"
] |
[
"# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved\nimport contextlib\nimport copy\nimport io\nimport itertools\nimport json\nimport logging\nimport numpy as np\nimport os\nimport pickle\nfrom collections import OrderedDict\nimport pycocotools.mask as mask_util\nimport torch\nfrom fvcore.common.file_io import PathManager\nfrom pycocotools.coco import COCO\nfrom pycocotools.cocoeval import COCOeval\nfrom tabulate import tabulate\n\nimport detectron2.utils.comm as comm\nfrom detectron2.data import MetadataCatalog\nfrom detectron2.data.datasets.coco import convert_to_coco_json\nfrom detectron2.structures import Boxes, BoxMode, pairwise_iou\nfrom detectron2.utils.logger import create_small_table\n\nfrom .evaluator import DatasetEvaluator\n\n\nclass COCOEvaluator(DatasetEvaluator):\n \"\"\"\n Evaluate object proposal, instance detection/segmentation, keypoint detection\n outputs using COCO's metrics and APIs.\n \"\"\"\n\n def __init__(self, dataset_name, cfg, distributed, output_dir=None):\n \"\"\"\n Args:\n dataset_name (str): name of the dataset to be evaluated.\n It must have either the following corresponding metadata:\n\n \"json_file\": the path to the COCO format annotation\n\n Or it must be in detectron2's standard dataset format\n so it can be converted to COCO format automatically.\n cfg (CfgNode): config instance\n distributed (True): if True, will collect results from all ranks for evaluation.\n Otherwise, will evaluate the results in the current process.\n output_dir (str): optional, an output directory to dump all\n results predicted on the dataset. The dump contains two files:\n\n 1. \"instance_predictions.pth\" a file in torch serialization\n format that contains all the raw original predictions.\n 2. \"coco_instances_results.json\" a json file in COCO's result\n format.\n \"\"\"\n self._tasks = self._tasks_from_config(cfg)\n self._distributed = distributed\n self._output_dir = output_dir\n if hasattr(cfg, 'ckpt_name'):\n self._ckpt_tag = cfg.ckpt_name\n else:\n self._ckpt_tag = False\n\n self._cpu_device = torch.device(\"cpu\")\n self._logger = logging.getLogger(__name__)\n\n self._metadata = MetadataCatalog.get(dataset_name)\n if not hasattr(self._metadata, \"json_file\"):\n self._logger.warning(\n f\"json_file was not found in MetaDataCatalog for '{dataset_name}'.\"\n \" Trying to convert it to COCO format ...\"\n )\n\n cache_path = os.path.join(output_dir, f\"{dataset_name}_coco_format.json\")\n self._metadata.json_file = cache_path\n convert_to_coco_json(dataset_name, cache_path)\n\n json_file = PathManager.get_local_path(self._metadata.json_file)\n with contextlib.redirect_stdout(io.StringIO()):\n self._coco_api = COCO(json_file)\n\n self._kpt_oks_sigmas = cfg.TEST.KEYPOINT_OKS_SIGMAS\n # Test set json files do not contain annotations (evaluation must be\n # performed using the COCO evaluation server).\n self._do_evaluation = \"annotations\" in self._coco_api.dataset\n\n def reset(self):\n self._predictions = []\n self._coco_results = []\n\n def _tasks_from_config(self, cfg):\n \"\"\"\n Returns:\n tuple[str]: tasks that can be evaluated under the given configuration.\n \"\"\"\n tasks = (\"bbox\",)\n if cfg.MODEL.MASK_ON:\n tasks = tasks + (\"segm\",)\n if cfg.MODEL.KEYPOINT_ON:\n tasks = tasks + (\"keypoints\",)\n return tasks\n\n def process(self, inputs, outputs):\n \"\"\"\n Args:\n inputs: the inputs to a COCO model (e.g., GeneralizedRCNN).\n It is a list of dict. Each dict corresponds to an image and\n contains keys like \"height\", \"width\", \"file_name\", \"image_id\".\n outputs: the outputs of a COCO model. It is a list of dicts with key\n \"instances\" that contains :class:`Instances`.\n \"\"\"\n for input, output in zip(inputs, outputs):\n prediction = {\"image_id\": input[\"image_id\"]}\n\n # TODO this is ugly\n if \"instances\" in output:\n instances = output[\"instances\"].to(self._cpu_device)\n prediction[\"instances\"] = instances_to_coco_json(instances, input[\"image_id\"])\n if \"proposals\" in output:\n prediction[\"proposals\"] = output[\"proposals\"].to(self._cpu_device)\n self._predictions.append(prediction)\n\n def evaluate(self):\n if self._distributed:\n comm.synchronize()\n self._predictions = comm.gather(self._predictions, dst=0)\n self._predictions = list(itertools.chain(*self._predictions))\n\n if not comm.is_main_process():\n return {}\n\n if len(self._predictions) == 0:\n self._logger.warning(\"[COCOEvaluator] Did not receive valid predictions.\")\n return {}\n\n if self._output_dir:\n PathManager.mkdirs(self._output_dir)\n if self._ckpt_tag:\n file_name = \"instances_predictions_{}.pth\".format(self._ckpt_tag)\n else:\n file_name = \"instances_predictions.pth\"\n file_path = os.path.join(self._output_dir, file_name)\n with PathManager.open(file_path, \"wb\") as f:\n torch.save(self._predictions, f)\n\n self._results = OrderedDict()\n if \"proposals\" in self._predictions[0]:\n self._eval_box_proposals()\n if \"instances\" in self._predictions[0]:\n self._eval_predictions(set(self._tasks))\n # Copy so the caller can do whatever with results\n return copy.deepcopy(self._results)\n\n def _eval_predictions(self, tasks):\n \"\"\"\n Evaluate self._predictions on the given tasks.\n Fill self._results with the metrics of the tasks.\n \"\"\"\n self._logger.info(\"Preparing results for COCO format ...\")\n self._coco_results = list(itertools.chain(*[x[\"instances\"] for x in self._predictions]))\n\n # unmap the category ids for COCO\n if hasattr(self._metadata, \"thing_dataset_id_to_contiguous_id\"):\n reverse_id_mapping = {\n v: k for k, v in self._metadata.thing_dataset_id_to_contiguous_id.items()\n }\n for result in self._coco_results:\n category_id = result[\"category_id\"]\n assert (\n category_id in reverse_id_mapping\n ), \"A prediction has category_id={}, which is not available in the dataset.\".format(\n category_id\n )\n result[\"category_id\"] = reverse_id_mapping[category_id]\n\n if self._output_dir:\n if self._ckpt_tag:\n file_name = \"coco_instances_results_{}.json\".format(self._ckpt_tag)\n else:\n file_name = \"coco_instances_results.json\"\n\n file_path = os.path.join(self._output_dir, file_name)\n self._logger.info(\"Saving results to {}\".format(file_path))\n with PathManager.open(file_path, \"w\") as f:\n f.write(json.dumps(self._coco_results))\n f.flush()\n\n if not self._do_evaluation:\n self._logger.info(\"Annotations are not available for evaluation.\")\n return\n\n self._logger.info(\"Evaluating predictions ...\")\n for task in sorted(tasks):\n coco_eval = (\n _evaluate_predictions_on_coco(\n self._coco_api, self._coco_results, task, kpt_oks_sigmas=self._kpt_oks_sigmas\n )\n if len(self._coco_results) > 0\n else None # cocoapi does not handle empty results very well\n )\n\n res = self._derive_coco_results(\n coco_eval, task, class_names=self._metadata.get(\"thing_classes\")\n )\n self._results[task] = res\n\n def _eval_box_proposals(self):\n \"\"\"\n Evaluate the box proposals in self._predictions.\n Fill self._results with the metrics for \"box_proposals\" task.\n \"\"\"\n if self._output_dir:\n # Saving generated box proposals to file.\n # Predicted box_proposals are in XYXY_ABS mode.\n bbox_mode = BoxMode.XYXY_ABS.value\n ids, boxes, objectness_logits = [], [], []\n for prediction in self._predictions:\n ids.append(prediction[\"image_id\"])\n boxes.append(prediction[\"proposals\"].proposal_boxes.tensor.numpy())\n objectness_logits.append(prediction[\"proposals\"].objectness_logits.numpy())\n\n proposal_data = {\n \"boxes\": boxes,\n \"objectness_logits\": objectness_logits,\n \"ids\": ids,\n \"bbox_mode\": bbox_mode,\n }\n if self._ckpt_tag:\n file_name = \"box_proposals_{}.pkl\".format(self._ckpt_tag)\n else:\n file_name = \"box_proposals.pkl\"\n with PathManager.open(os.path.join(self._output_dir, file_name), \"wb\") as f:\n pickle.dump(proposal_data, f)\n\n if not self._do_evaluation:\n self._logger.info(\"Annotations are not available for evaluation.\")\n return\n\n self._logger.info(\"Evaluating bbox proposals ...\")\n res = {}\n areas = {\"all\": \"\", \"small\": \"s\", \"medium\": \"m\", \"large\": \"l\"}\n for limit in [100, 1000]:\n for area, suffix in areas.items():\n stats = _evaluate_box_proposals(\n self._predictions, self._coco_api, area=area, limit=limit\n )\n key = \"AR{}@{:d}\".format(suffix, limit)\n res[key] = float(stats[\"ar\"].item() * 100)\n self._logger.info(\"Proposal metrics: \\n\" + create_small_table(res))\n self._results[\"box_proposals\"] = res\n\n def _derive_coco_results(self, coco_eval, iou_type, class_names=None):\n \"\"\"\n Derive the desired score numbers from summarized COCOeval.\n\n Args:\n coco_eval (None or COCOEval): None represents no predictions from model.\n iou_type (str):\n class_names (None or list[str]): if provided, will use it to predict\n per-category AP.\n\n Returns:\n a dict of {metric name: score}\n \"\"\"\n\n metrics = {\n \"bbox\": [\"AP\", \"AP50\", \"AP75\", \"APs\", \"APm\", \"APl\"],\n \"segm\": [\"AP\", \"AP50\", \"AP75\", \"APs\", \"APm\", \"APl\"],\n \"keypoints\": [\"AP\", \"AP50\", \"AP75\", \"APm\", \"APl\"],\n }[iou_type]\n\n if coco_eval is None:\n self._logger.warn(\"No predictions from the model!\")\n return {metric: float(\"nan\") for metric in metrics}\n\n # the standard metrics\n results = {\n metric: float(coco_eval.stats[idx] * 100 if coco_eval.stats[idx] >= 0 else \"nan\")\n for idx, metric in enumerate(metrics)\n }\n self._logger.info(\n \"Evaluation results for {}: \\n\".format(iou_type) + create_small_table(results)\n )\n if not np.isfinite(sum(results.values())):\n self._logger.info(\"Note that some metrics cannot be computed.\")\n\n if class_names is None or len(class_names) <= 1:\n return results\n # Compute per-category AP\n # from https://github.com/facebookresearch/Detectron/blob/a6a835f5b8208c45d0dce217ce9bbda915f44df7/detectron/datasets/json_dataset_evaluator.py#L222-L252 # noqa\n precisions = coco_eval.eval[\"precision\"]\n # np.save('cbp_precision', precisions)\n # precision has dims (iou, recall, cls, area range, max dets)\n assert len(class_names) == precisions.shape[2]\n\n results_per_category = []\n for idx, name in enumerate(class_names):\n # area range index 0: all area ranges\n # max dets index -1: typically 100 per image\n precision = precisions[:, :, idx, 0, -1]\n precision = precision[precision > -1]\n ap = np.mean(precision) if precision.size else float(\"nan\")\n results_per_category.append((\"{}\".format(name), float(ap * 100)))\n\n # tabulate it\n N_COLS = min(6, len(results_per_category) * 2)\n results_flatten = list(itertools.chain(*results_per_category))\n results_2d = itertools.zip_longest(*[results_flatten[i::N_COLS] for i in range(N_COLS)])\n table = tabulate(\n results_2d,\n tablefmt=\"pipe\",\n floatfmt=\".3f\",\n headers=[\"category\", \"AP\"] * (N_COLS // 2),\n numalign=\"left\",\n )\n self._logger.info(\"Per-category {} AP: \\n\".format(iou_type) + table)\n\n results.update({\"AP-\" + name: ap for name, ap in results_per_category})\n return results\n\n\ndef instances_to_coco_json(instances, img_id):\n \"\"\"\n Dump an \"Instances\" object to a COCO-format json that's used for evaluation.\n\n Args:\n instances (Instances):\n img_id (int): the image id\n\n Returns:\n list[dict]: list of json annotations in COCO format.\n \"\"\"\n num_instance = len(instances)\n if num_instance == 0:\n return []\n\n boxes = instances.pred_boxes.tensor.numpy()\n boxes = BoxMode.convert(boxes, BoxMode.XYXY_ABS, BoxMode.XYWH_ABS)\n boxes = boxes.tolist()\n scores = instances.scores.tolist()\n classes = instances.pred_classes.tolist()\n\n has_mask = instances.has(\"pred_masks\")\n if has_mask:\n # use RLE to encode the masks, because they are too large and takes memory\n # since this evaluator stores outputs of the entire dataset\n rles = [\n mask_util.encode(np.array(mask[:, :, None], order=\"F\", dtype=\"uint8\"))[0]\n for mask in instances.pred_masks\n ]\n for rle in rles:\n # \"counts\" is an array encoded by mask_util as a byte-stream. Python3's\n # json writer which always produces strings cannot serialize a bytestream\n # unless you decode it. Thankfully, utf-8 works out (which is also what\n # the pycocotools/_mask.pyx does).\n rle[\"counts\"] = rle[\"counts\"].decode(\"utf-8\")\n\n has_keypoints = instances.has(\"pred_keypoints\")\n if has_keypoints:\n keypoints = instances.pred_keypoints\n\n results = []\n for k in range(num_instance):\n result = {\n \"image_id\": img_id,\n \"category_id\": classes[k],\n \"bbox\": boxes[k],\n \"score\": scores[k],\n }\n if has_mask:\n result[\"segmentation\"] = rles[k]\n if has_keypoints:\n # In COCO annotations,\n # keypoints coordinates are pixel indices.\n # However our predictions are floating point coordinates.\n # Therefore we subtract 0.5 to be consistent with the annotation format.\n # This is the inverse of data loading logic in `datasets/coco.py`.\n keypoints[k][:, :2] -= 0.5\n result[\"keypoints\"] = keypoints[k].flatten().tolist()\n results.append(result)\n return results\n\n\n# inspired from Detectron:\n# https://github.com/facebookresearch/Detectron/blob/a6a835f5b8208c45d0dce217ce9bbda915f44df7/detectron/datasets/json_dataset_evaluator.py#L255 # noqa\ndef _evaluate_box_proposals(dataset_predictions, coco_api, thresholds=None, area=\"all\", limit=None):\n \"\"\"\n Evaluate detection proposal recall metrics. This function is a much\n faster alternative to the official COCO API recall evaluation code. However,\n it produces slightly different results.\n \"\"\"\n # Record max overlap value for each gt box\n # Return vector of overlap values\n areas = {\n \"all\": 0,\n \"small\": 1,\n \"medium\": 2,\n \"large\": 3,\n \"96-128\": 4,\n \"128-256\": 5,\n \"256-512\": 6,\n \"512-inf\": 7,\n }\n area_ranges = [\n [0 ** 2, 1e5 ** 2], # all\n [0 ** 2, 32 ** 2], # small\n [32 ** 2, 96 ** 2], # medium\n [96 ** 2, 1e5 ** 2], # large\n [96 ** 2, 128 ** 2], # 96-128\n [128 ** 2, 256 ** 2], # 128-256\n [256 ** 2, 512 ** 2], # 256-512\n [512 ** 2, 1e5 ** 2],\n ] # 512-inf\n assert area in areas, \"Unknown area range: {}\".format(area)\n area_range = area_ranges[areas[area]]\n gt_overlaps = []\n num_pos = 0\n\n for prediction_dict in dataset_predictions:\n predictions = prediction_dict[\"proposals\"]\n\n # sort predictions in descending order\n # TODO maybe remove this and make it explicit in the documentation\n inds = predictions.objectness_logits.sort(descending=True)[1]\n predictions = predictions[inds]\n\n ann_ids = coco_api.getAnnIds(imgIds=prediction_dict[\"image_id\"])\n anno = coco_api.loadAnns(ann_ids)\n gt_boxes = [\n BoxMode.convert(obj[\"bbox\"], BoxMode.XYWH_ABS, BoxMode.XYXY_ABS)\n for obj in anno\n if obj[\"iscrowd\"] == 0\n ]\n gt_boxes = torch.as_tensor(gt_boxes).reshape(-1, 4) # guard against no boxes\n gt_boxes = Boxes(gt_boxes)\n gt_areas = torch.as_tensor([obj[\"area\"] for obj in anno if obj[\"iscrowd\"] == 0])\n\n if len(gt_boxes) == 0 or len(predictions) == 0:\n continue\n\n valid_gt_inds = (gt_areas >= area_range[0]) & (gt_areas <= area_range[1])\n gt_boxes = gt_boxes[valid_gt_inds]\n\n num_pos += len(gt_boxes)\n\n if len(gt_boxes) == 0:\n continue\n\n if limit is not None and len(predictions) > limit:\n predictions = predictions[:limit]\n\n overlaps = pairwise_iou(predictions.proposal_boxes, gt_boxes)\n\n _gt_overlaps = torch.zeros(len(gt_boxes))\n for j in range(min(len(predictions), len(gt_boxes))):\n # find which proposal box maximally covers each gt box\n # and get the iou amount of coverage for each gt box\n max_overlaps, argmax_overlaps = overlaps.max(dim=0)\n\n # find which gt box is 'best' covered (i.e. 'best' = most iou)\n gt_ovr, gt_ind = max_overlaps.max(dim=0)\n assert gt_ovr >= 0\n # find the proposal box that covers the best covered gt box\n box_ind = argmax_overlaps[gt_ind]\n # record the iou coverage of this gt box\n _gt_overlaps[j] = overlaps[box_ind, gt_ind]\n assert _gt_overlaps[j] == gt_ovr\n # mark the proposal box and the gt box as used\n overlaps[box_ind, :] = -1\n overlaps[:, gt_ind] = -1\n\n # append recorded iou coverage level\n gt_overlaps.append(_gt_overlaps)\n gt_overlaps = torch.cat(gt_overlaps, dim=0)\n gt_overlaps, _ = torch.sort(gt_overlaps)\n\n if thresholds is None:\n step = 0.05\n thresholds = torch.arange(0.5, 0.95 + 1e-5, step, dtype=torch.float32)\n recalls = torch.zeros_like(thresholds)\n # compute recall for each iou threshold\n for i, t in enumerate(thresholds):\n recalls[i] = (gt_overlaps >= t).float().sum() / float(num_pos)\n # ar = 2 * np.trapz(recalls, thresholds)\n ar = recalls.mean()\n return {\n \"ar\": ar,\n \"recalls\": recalls,\n \"thresholds\": thresholds,\n \"gt_overlaps\": gt_overlaps,\n \"num_pos\": num_pos,\n }\n\n\ndef _evaluate_predictions_on_coco(coco_gt, coco_results, iou_type, kpt_oks_sigmas=None):\n \"\"\"\n Evaluate the coco results using COCOEval API.\n \"\"\"\n assert len(coco_results) > 0\n\n if iou_type == \"segm\":\n coco_results = copy.deepcopy(coco_results)\n # When evaluating mask AP, if the results contain bbox, cocoapi will\n # use the box area as the area of the instance, instead of the mask area.\n # This leads to a different definition of small/medium/large.\n # We remove the bbox field to let mask AP use mask area.\n for c in coco_results:\n c.pop(\"bbox\", None)\n\n coco_dt = coco_gt.loadRes(coco_results)\n coco_eval = COCOeval(coco_gt, coco_dt, iou_type)\n # Use the COCO default keypoint OKS sigmas unless overrides are specified\n if kpt_oks_sigmas:\n coco_eval.params.kpt_oks_sigmas = np.array(kpt_oks_sigmas)\n\n if iou_type == \"keypoints\":\n num_keypoints = len(coco_results[0][\"keypoints\"]) // 3\n assert len(coco_eval.params.kpt_oks_sigmas) == num_keypoints, (\n \"[COCOEvaluator] The length of cfg.TEST.KEYPOINT_OKS_SIGMAS (default: 17) \"\n \"must be equal to the number of keypoints. However the prediction has {} \"\n \"keypoints! For more information please refer to \"\n \"http://cocodataset.org/#keypoints-eval.\".format(num_keypoints)\n )\n\n coco_eval.evaluate()\n coco_eval.accumulate()\n coco_eval.summarize()\n imgids = coco_eval.params.imgIds # list(np.unique(coco_eval.params.imgIds))\n ious = coco_eval.ious\n evalimgs = coco_eval.evalImgs\n # np.savez('baseline_coco', evalimgs=evalimgs, ious=ious, imgids=imgids)\n return coco_eval\n\n"
] |
[
[
"torch.device",
"torch.cat",
"numpy.array",
"torch.arange",
"torch.save",
"numpy.mean",
"torch.zeros_like",
"torch.as_tensor",
"torch.sort"
]
] |
ShannonAI/GNN-LM
|
[
"56335cd0f30b6583260622e3dc2d7ab635ab6a12"
] |
[
"fairseq_cli/convert_ckpt.py"
] |
[
"# encoding: utf-8\n\"\"\"\n@author: Yuxian Meng\n@contact: [email protected]\n@time: 2021/4/23 19:26\n@desc: convert a transformer ckpt to gnn-transformer\n\n\"\"\"\n\nimport argparse\nimport os\n\nimport faiss\nimport torch\n\nfrom fairseq import checkpoint_utils\nfrom knn.pq_wrapper import TorchPQCodec\n\n\ndef main():\n parser = argparse.ArgumentParser()\n parser.add_argument(\"--ckpt\", required=True, help=\"original pretrained ckpt\")\n parser.add_argument(\"--out\", required=True, help=\"new ckpt save path\")\n parser.add_argument(\"--quantizer\", required=True, help=\"quantizer generated by faiss\")\n args = parser.parse_args()\n\n # original pretrained ckpt\n # TRANSFORMER_CKPT = \"/userhome/yuxian/train_logs/lm/wiki-103/fairseq_baseline/checkpoint_best.pt\"\n TRANSFORMER_CKPT = args.ckpt\n # target save path\n # OUT_CKPT = \"/data/yuxian/wiki103-yunnao/baseline/checkpoint_best_qt128.pt\"\n OUT_CKPT = args.out\n # quantizer generated by faiss\n QUANTIZER = args.quantizer\n\n state = checkpoint_utils.load_checkpoint_to_cpu(TRANSFORMER_CKPT)\n\n # load quantizer\n QUANTIZER = TorchPQCodec(index=faiss.read_index(QUANTIZER))\n state[\"model\"][\"decoder.tgt_quantizer.centroids_torch\"] = QUANTIZER.centroids_torch\n state[\"model\"][\"decoder.tgt_quantizer.norm2_centroids_torch\"] = QUANTIZER.norm2_centroids_torch\n state[\"model\"][\"decoder.tgt_quantizer.sdc_table_torch\"] = QUANTIZER.sdc_table_torch\n if QUANTIZER.pre_torch:\n state[\"model\"][\"decoder.tgt_quantizer.A\"] = QUANTIZER.A\n state[\"model\"][\"decoder.tgt_quantizer.b\"] = QUANTIZER.b\n\n state[\"args\"].graph = True\n\n os.makedirs(os.path.dirname(OUT_CKPT), exist_ok=True)\n torch.save(state, OUT_CKPT)\n print(f\"Saved ckpt to {OUT_CKPT}\")\n\n\nif __name__ == '__main__':\n main()\n"
] |
[
[
"torch.save"
]
] |
charlesjsun/rjax
|
[
"8b56f78b34f593f442db3cc0315a3b6f22191442"
] |
[
"rjax/datasets/d4rl/sequence_dataset.py"
] |
[
"import collections\nfrom typing import Callable, Optional, Tuple\n\nimport d4rl\nimport gym\nimport numpy as np\n\nfrom rjax.datasets.d4rl.utils import (get_preprocessing_fn,\n sequence_dataset_iter)\nfrom rjax.datasets.dataset import Dataset\n\nBatch = collections.namedtuple(\n 'Batch',\n [\n 'observations', # [ batch_size x (seq_len + 1) x obs_dim ]\n 'actions', # [ batch_size x seq_len x act_dim ]\n 'rewards', # [ batch_size x seq_len ]\n 'terminals', # [ batch_size x seq_len ]\n 'pad_masks', # [ batch_size x (seq_len + 1) ]\n ])\n\n\nclass D4RLSequenceDataset(Dataset):\n\n def __init__(\n self,\n env_name: str,\n env: gym.Env,\n seq_len: int = 15,\n front_pad: int = 0,\n back_pad: int = 0,\n ):\n\n self.env = env\n self.seq_len = seq_len\n self.front_pad = front_pad\n self.back_pad = back_pad\n\n dataset = self.env.get_dataset()\n dataset = get_preprocessing_fn(env_name)(dataset)\n\n dataset_iter = sequence_dataset_iter(self.env, dataset)\n\n self.path_lengths = []\n self.observations_segmented = []\n self.actions_segmented = []\n self.rewards_segmented = []\n self.terminals_segmented = []\n self.pad_masks_segmented = []\n\n for data in dataset_iter:\n assert data[\"steps\"] == data[\"rewards\"].shape[0]\n assert data[\"steps\"] + 1 == data[\"observations\"].shape[0]\n self.path_lengths.append(data[\"steps\"])\n self.observations_segmented.append(data[\"observations\"].astype(\n np.float32))\n self.actions_segmented.append(data[\"actions\"].astype(np.float32))\n self.rewards_segmented.append(data[\"rewards\"].astype(np.float32))\n self.terminals_segmented.append(data[\"terminals\"].astype(\n np.float32))\n self.pad_masks_segmented.append(\n np.ones(data[\"observations\"].shape[0], dtype=np.float32))\n\n self.n_trajectories = len(self.path_lengths)\n\n # padding\n for i in range(self.n_trajectories):\n self.path_lengths[\n i] = self.front_pad + self.path_lengths[i] + self.back_pad\n self.observations_segmented[i] = np.pad(\n self.observations_segmented[i],\n ((self.front_pad, self.back_pad), (0, 0)),\n constant_values=0.0)\n self.actions_segmented[i] = np.pad(\n self.actions_segmented[i],\n ((self.front_pad, self.back_pad), (0, 0)),\n constant_values=0.0)\n self.rewards_segmented[i] = np.pad(\n self.rewards_segmented[i], ((self.front_pad, self.back_pad), ),\n constant_values=0.0)\n self.terminals_segmented[i] = np.pad(\n self.terminals_segmented[i],\n ((self.front_pad, self.back_pad), ),\n constant_values=0.0)\n self.pad_masks_segmented[i] = np.pad(\n self.pad_masks_segmented[i],\n ((self.front_pad, self.back_pad), ),\n constant_values=0.0)\n\n # generate dataset indices\n indices = []\n for path_ind, length in enumerate(self.path_lengths):\n end = length - self.seq_len + 1\n for i in range(end):\n indices.append((path_ind, i, i + self.seq_len))\n\n self.indices = np.array(indices)\n self._size = len(self.indices)\n\n @property\n def size(self) -> int:\n return self._size\n\n def __getitem__(\n self, idx: int\n ) -> Tuple[np.ndarray, np.ndarray, np.ndarray, np.ndarray, np.ndarray]:\n path_ind, start_ind, end_ind = self.indices[idx]\n\n # [ (seq_len + 1) x obs_dim ]\n observations = self.observations_segmented[path_ind][\n start_ind:end_ind + 1]\n # [ seq_len x act_dim ]\n actions = self.actions_segmented[path_ind][start_ind:end_ind]\n # [ seq_len ]\n rewards = self.rewards_segmented[path_ind][start_ind:end_ind]\n # [ seq_len ]\n terminals = self.terminals_segmented[path_ind][start_ind:end_ind]\n # [ (seq_len + 1) ]\n pad_masks = self.pad_masks_segmented[path_ind][start_ind:end_ind + 1]\n\n return observations, actions, rewards, terminals, pad_masks\n\n def get_random_batch(self, batch_size: int) -> Batch:\n indices = np.random.randint(self.size, size=batch_size)\n\n observations, actions, rewards, terminals, pad_masks = zip(\n *[self[idx] for idx in indices])\n\n return Batch(observations=np.stack(observations, axis=0),\n actions=np.stack(actions, axis=0),\n rewards=np.stack(rewards, axis=0),\n terminals=np.stack(terminals, axis=0),\n pad_masks=np.stack(pad_masks, axis=0))\n"
] |
[
[
"numpy.pad",
"numpy.array",
"numpy.ones",
"numpy.stack",
"numpy.random.randint"
]
] |
lsteffenel/IntroPython
|
[
"5a2a6aacbb850543c264e1f87eedc3955526b8c3"
] |
[
"solutions/random_number.py"
] |
[
"import numpy as np\n\ny = np.random.poisson(lam=5, size=1000)\nbins = np.bincount(y)\nbins.argmax()\n"
] |
[
[
"numpy.bincount",
"numpy.random.poisson"
]
] |
rohitrnath/Monocular-Depth-Estimation-and-Segmentation
|
[
"549aca8c581b76125349d63fb55944d7133563e3"
] |
[
"utils/global_fun.py"
] |
[
"from __future__ import print_function\nfrom tqdm import tqdm\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nfrom torch.optim.lr_scheduler import StepLR\nimport torch.optim as optim\nimport copy\nfrom torchsummary import summary\nfrom torchvision import datasets, transforms\nimport numpy as np\n\ndef train_model(model, device, train_loader, optimizer, scheduler, epoch,train_losses,train_acc,criteria, store_mode ='epoch', doL1 = 0,doL2 = 0,LAMBDA = 0):\n print('L1=',doL1,';L2=',doL2,';LAMBDA=',LAMBDA,'epoch=',epoch)\n model.train()\n pbar = tqdm(train_loader)\n correct = 0\n processed = 0\n for batch_idx, (data, target) in enumerate(pbar):\n # get samples\n data, target = data.to(device), target.to(device)\n #print('data=',len(data),';target=',len(target))\n\n # Init\n optimizer.zero_grad()\n # In PyTorch, we need to set the gradients to zero before starting to do backpropragation because PyTorch accumulates the gradients on subsequent backward passes. \n # Because of this, when you start your training loop, ideally you should zero out the gradients so that you do the parameter update correctly.\n\n # Predict\n y_pred = model(data)\n\n # Calculate loss\n #print('y_pred=',len(y_pred.dataset),'target=',len(target.dataset))\n #loss = F.nll_loss(y_pred, target)\n #criteria = nn.CrossEntropyLoss()\n loss = criteria(y_pred, target) \n reg_loss=0\n if (doL1 == 1):\n for p in model.parameters(): \n reg_loss += torch.sum(torch.abs(p.data))\n if (doL2 == 1):\n for p in model.parameters():\n reg_loss += torch.sum(p.data.pow(2)) \n \n loss+=LAMBDA*reg_loss\n \n train_losses.append(loss)\n\n # Backpropagation\n loss.backward()\n optimizer.step()\n \n #One Cyclec LR step\n scheduler.step()\n\n # Update pbar-tqdm\n \n pred = y_pred.argmax(dim=1, keepdim=True) # get the index of the max log-probability\n correct += pred.eq(target.view_as(pred)).sum().item()\n processed += len(data)\n if store_mode == 'mini_batch': # Store loss and accuracy\n batch_accuracy = 100 * correct / processed\n if not train_losses is None:\n train_losses.append(loss.item())\n if not train_acc is None:\n train_acc.append(batch_accuracy)\n \n if store_mode == 'epoch': # Store loss and accuracy\n accuracy = 100 * correct / processed\n if not train_losses is None:\n train_losses.append(loss.item())\n if not accuracies is None:\n train_acc.append(accuracy)\n\n pbar.set_description(desc= f'Loss={loss.item()} Batch_id={batch_idx} Accuracy={100*correct/processed:0.2f}')\n #train_acc.append(100*correct/processed)\n \n\ndef test_model(model, device, test_loader,test_losses,test_acc,criteria, correct_samples, incorrect_samples, sample_count=30, last_epoch=False):\n model.eval()\n test_loss = 0\n correct = 0\n #criteria = nn.CrossEntropyLoss()\n \n with torch.no_grad():\n for data, target in test_loader:\n img_batch = data\n data, target = data.to(device), target.to(device)\n #print('data=',len(data),';target=',len(target))\n output = model(data)\n #test_loss += F.nll_loss(output, target, reduction='sum').item() # sum up batch loss\n #test_loss += criteria(output, target).item()\n test_loss += criteria(output, target).item()\n pred = output.argmax(dim=1, keepdim=True) # get the index of the max log-probability\n result = pred.eq(target.view_as(pred))\n if last_epoch:\n #print('last_epoch=',last_epoch)\n for i in range(len(list(result))):\n if not list(result)[i] and len(incorrect_samples) < sample_count:\n incorrect_samples.append({\n 'prediction': list(pred)[i],\n 'label': list(target.view_as(pred))[i],\n 'image': img_batch[i]\n \n })\n elif list(result)[i] and len(correct_samples) < sample_count:\n correct_samples.append({\n 'prediction': list(pred)[i],\n 'label': list(target.view_as(pred))[i],\n 'image': img_batch[i]\n \n })\n correct += result.sum().item()\n #correct += pred.eq(target.view_as(pred)).sum().item()\n test_loss /= len(test_loader.dataset)\n test_losses.append(test_loss)\n print('\\nTest set: Average loss: {:.4f}, Accuracy: {}/{} ({:.2f}%)\\n'.format(\n test_loss, correct, len(test_loader.dataset), \n 100. * correct / len(test_loader.dataset)))\n test_acc.append(100. * correct / len(test_loader.dataset)) \n return test_loss\n#Global functions\ndef show_summary(model,input_size = (1, 28, 28)):\n summary(model.m_model, input_size)\n \ndef run_model(model, device, criteria = F.nll_loss, doL1 = 0, doL2 = 0, LAMBDA = 0, EPOCHS = 40,start=0):\n #scheduler = StepLR(optimizer, step_size=8, gamma=0.1)\n for epoch in range(EPOCHS):\n print(\"EPOCH:\", (start+epoch))\n train_model(model.m_model, device, model.m_train_loader, model.m_optimizer, epoch,model.m_train_losses,model.m_train_acc,criteria,doL1,doL2,LAMBDA)\n test_model(model.m_model, device, model.m_test_loader,model.m_test_losses,model.m_test_acc,criteria)\n\ndef run_model_with_entropy(model, device, criteria = nn.CrossEntropyLoss(), doL1 = 0, doL2 = 0, LAMBDA = 0, EPOCHS = 40,start=0):\n #scheduler = StepLR(optimizer, step_size=8, gamma=0.1)\n for epoch in range(EPOCHS):\n print(\"EPOCH:\", (start+epoch))\n train_model(model.m_model, device, model.m_train_loader, model.m_optimizer, model.m_scheduler, epoch,model.m_train_losses,model.m_train_acc,criteria,doL1,doL2,LAMBDA)\n #model.m_scheduler.step()\n last_epoch = False\n if(epoch == (EPOCHS-1)):\n last_epoch = True\n \n test_loss = test_model(model.m_model, device, model.m_test_loader, model.m_test_losses, model.m_test_acc, model.m_criterion, model.m_correct_samples, model.m_incorrect_samples, 30, last_epoch)\n \n #model.m_scheduler.step(test_loss) #Used for LR Plateou\n \ndef run_model_with_entropy_A11(model, device, criteria = nn.CrossEntropyLoss(), doL1 = 0, doL2 = 0, LAMBDA = 0, EPOCHS = 40,start=0):\n #scheduler = StepLR(optimizer, step_size=8, gamma=0.1)\n for epoch in range(EPOCHS):\n print(\"EPOCH:\", (start+epoch))\n print('\\nOneCyclicLR: steps: {}/{}, LR: {:.4f}, Momentum: {:.4f}%\\n'.format(model.m_scheduler.last_step,\n model.m_scheduler.num_steps, model.m_scheduler.get_lr(), model.m_scheduler.get_momentum() ))\n train_model(model.m_model, device, model.m_train_loader, model.m_optimizer, model.m_scheduler, epoch,model.m_train_losses,model.m_train_acc,criteria,doL1,doL2,LAMBDA)\n \n last_epoch = False\n if(epoch == (EPOCHS-1)):\n last_epoch = True\n \n test_loss = test_model(model.m_model, device, model.m_test_loader, model.m_test_losses, model.m_test_acc, model.m_criterion, model.m_correct_samples, model.m_incorrect_samples, 30, last_epoch)\n\nimport matplotlib.pyplot as plt\ndef draw_accuracy_graph(model,metric,single_plot= True):\n #print('train_losses=',len(train_losses))\n #print('test_losses=',len(test_losses))\n if(single_plot == True):\n fig = plt.figure(figsize=(12, 6))\n plt.plot(model.m_train_acc,color='blue',label='Training Accuracy')\n plt.plot(model.m_test_acc,color='green',label='Test Accuracy')\n #plt.set_title(\"Training and validation accuracy\")\n plt.legend(loc=\"center\")\n plt.title(f'{metric}')\n # Label axes\n plt.xlabel('Epoch')\n plt.ylabel(metric)\n return\n\ndef draw_accuracy_loss_change_graps(model_0,model_l1,model_l2,model_l1_l2, single_plot= True):\n fig, axs = plt.subplots(2,2,figsize=(30,20))\n #print('train_losses=',len(train_losses))\n #print('test_losses=',len(test_losses))\n if(single_plot == True):\n fig = plt.figure(figsize=(12, 6))\n plt.plot(model_l1_l2.m_train_acc,color='blue',label='Both L1 and L2 Regularization')\n plt.plot(model_l1_l2.m_test_acc,color='green',label='Both L1 and L2 Regularization')\n plt.set_title(\"Training and validation accuracy\")\n plt.legend(loc=\"center\")\n return\n \n axs[0,0].plot(model_0.m_test_losses,color='black',label='No Regularization')\n axs[0,0].plot(model_l1.m_test_losses,color='red',label='L1 Regularization')\n axs[0,0].plot(model_l2.m_test_losses,color='blue',label='L2 Regularization')\n axs[0,0].plot(model_l1_l2.m_test_losses,color='green',label='Both L1 and L2 Regularization')\n axs[0,0].set_title(\"Validation Loss Change\")\n axs[0,0].legend(loc=\"center\")\n\n axs[0,1].plot(model_0.m_test_acc,color='black',label='No Regularization')\n axs[0,1].plot(model_l1.m_test_acc,color='red',label='L1 Regularization')\n axs[0,1].plot(model_l2.m_test_acc,color='blue',label='L2 Regularization')\n axs[0,1].plot(model_l1_l2.m_test_acc,color='green',label='Both L1 and L2 Regularization')\n axs[0,1].set_title(\"Validation Accuracy Change\")\n axs[0,1].legend(loc=\"center\")\n\n axs[1,0].plot(model_0.m_train_losses,color='black',label='No Regularization')\n axs[1,0].plot(model_l1.m_train_losses,color='red',label='L1 Regularization')\n axs[1,0].plot(model_l2.m_train_losses,color='blue',label='L2 Regularization')\n axs[1,0].plot(model_l1_l2.m_train_losses,color='green',label='Both L1 and L2 Regularization')\n axs[1,0].set_title(\"Training Loss Change\")\n axs[1,0].legend(loc=\"center\")\n\n axs[1,1].plot(model_0.m_train_acc,color='black',label='No Regularization')\n axs[1,1].plot(model_l1.m_train_acc,color='red',label='L1 Regularization')\n axs[1,1].plot(model_l2.m_train_acc,color='blue',label='L2 Regularization')\n axs[1,1].plot(model_l1_l2.m_train_acc,color='green',label='Both L1 and L2 Regularization')\n axs[1,1].set_title(\"Training Accuracy Change\")\n axs[1,1].legend(loc=\"center\")\n\ndef unnormalize(image, mean, std, out_type='array'):\n \"\"\"Un-normalize a given image.\n \n Args:\n image: A 3-D ndarray or 3-D tensor.\n If tensor, it should be in CPU.\n mean: Mean value. It can be a single value or\n a tuple with 3 values (one for each channel).\n std: Standard deviation value. It can be a single value or\n a tuple with 3 values (one for each channel).\n out_type: Out type of the normalized image.\n If `array` then ndarray is returned else if\n `tensor` then torch tensor is returned.\n \"\"\"\n\n if type(image) == torch.Tensor:\n image = np.transpose(image.clone().numpy(), (1, 2, 0))\n \n normal_image = image * std + mean\n if out_type == 'tensor':\n return torch.Tensor(np.transpose(normal_image, (2, 0, 1)))\n elif out_type == 'array':\n return normal_image\n return None # No valid value given\n\n\ndef to_numpy(tensor):\n \"\"\"Convert 3-D torch tensor to a 3-D numpy array.\n Args:\n tensor: Tensor to be converted.\n \"\"\"\n return np.transpose(tensor.clone().numpy(), (1, 2, 0))\n\n\ndef to_tensor(ndarray):\n \"\"\"Convert 3-D numpy array to 3-D torch tensor.\n Args:\n ndarray: Array to be converted.\n \"\"\"\n return torch.Tensor(np.transpose(ndarray, (2, 0, 1)))\n\ndef plot_accuracy_loss_graphs(data, metric):\n \"\"\"Plot accuracy graph or loss graph.\n Args:\n data (list or dict): If only single plot then this is a list, else\n for multiple plots this is a dict with keys containing.\n the plot name and values being a list of points to plot\n metric (str): Metric name which is to be plotted. Can be either\n loss or accuracy.\n \"\"\"\n\n single_plot = True\n if type(data) == dict:\n single_plot = False\n \n # Initialize a figure\n fig = plt.figure(figsize=(7, 5))\n\n # Plot data\n if single_plot:\n plt.plot(data)\n else:\n plots = []\n for value in data.values():\n plots.append(plt.plot(value)[0])\n\n # Set plot title\n plt.title(f'{metric} Change')\n\n # Label axes\n plt.xlabel('Epoch')\n plt.ylabel(metric)\n\n if not single_plot: # Set legend\n location = 'upper' if metric == 'Loss' else 'lower'\n plt.legend(\n tuple(plots), tuple(data.keys()),\n loc=f'{location} right',\n shadow=True,\n prop={'size': 15}\n )\n\n # Save plot\n fig.savefig(f'{metric.lower()}_change.png')\n\ndef plot_predictions(data, classes, plot_title, plot_path):\n \"\"\"Display data.\n Args:\n data (list): List of images, model predictions and ground truths.\n Images should be numpy arrays.\n classes (list or tuple): List of classes in the dataset.\n plot_title (str): Title for the plot.\n plot_path (str): Complete path for saving the plot.\n \"\"\"\n\n # Initialize plot\n row_count = -1\n fig, axs = plt.subplots(5, 5, figsize=(10, 10))\n fig.suptitle(plot_title)\n\n for idx, result in enumerate(data):\n\n # If 25 samples have been stored, break out of loop\n if idx > 24:\n break\n \n label = result['label'].item()\n prediction = result['prediction'].item()\n\n # Plot image\n if idx % 5 == 0:\n row_count += 1\n axs[row_count][idx % 5].axis('off')\n axs[row_count][idx % 5].set_title(f'Label: {classes[label]}\\nPrediction: {classes[prediction]}')\n axs[row_count][idx % 5].imshow(result['image'])\n \n # Set spacing\n fig.tight_layout()\n fig.subplots_adjust(top=0.88)\n\n # Save image\n fig.savefig(f'{plot_path}', bbox_inches='tight')\n\ndef save_and_show_result(classes, correct_pred=None, incorrect_pred=None, path=None):\n \"\"\"Display network predictions.\n Args:\n classes (list or tuple): List of classes in the dataset.\n correct_pred (list, optional): Contains correct model predictions and labels.\n (default: None)\n incorrect_pred (list, optional): Contains incorrect model predictions and labels.\n (default: None)\n path (str, optional): Path where the results will be saved.\n (default: None)\n \"\"\"\n\n # Create directories for saving predictions\n if path is None:\n path = os.path.join(\n os.path.dirname(os.path.abspath(__file__)), 'predictions'\n )\n if not os.path.exists(path):\n os.makedirs(path)\n \n if not correct_pred is None: # Plot correct predicitons\n plot_predictions(\n correct_pred, classes, 'Correct Predictions', f'{path}/correct_predictions.png'\n )\n\n if not incorrect_pred is None: # Plot incorrect predicitons\n plot_predictions(\n incorrect_pred, classes, '\\nIncorrect Predictions', f'{path}/incorrect_predictions.png'\n )"
] |
[
[
"matplotlib.pyplot.xlabel",
"torch.no_grad",
"matplotlib.pyplot.title",
"matplotlib.pyplot.plot",
"matplotlib.pyplot.legend",
"matplotlib.pyplot.subplots",
"matplotlib.pyplot.figure",
"matplotlib.pyplot.set_title",
"torch.abs",
"numpy.transpose",
"matplotlib.pyplot.ylabel",
"torch.nn.CrossEntropyLoss"
]
] |
aweers/tensorflow
|
[
"6aa83398ab03bfae822f36772757097bcb98b6ed"
] |
[
"tensorflow/python/autograph/operators/control_flow.py"
] |
[
"# Copyright 2018 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Control flow statements: loops, conditionals, etc.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nfrom tensorflow.python.autograph.operators import py_builtins\nfrom tensorflow.python.autograph.operators import special_values\nfrom tensorflow.python.data.ops import dataset_ops\nfrom tensorflow.python.framework import constant_op\nfrom tensorflow.python.framework import tensor_util\nfrom tensorflow.python.ops import control_flow_ops\nfrom tensorflow.python.ops import gen_math_ops\nfrom tensorflow.python.util import nest\n\n\ndef for_stmt(iter_, extra_test, body, init_state):\n \"\"\"Functional form of a for statement.\n\n The loop operates on a state, which includes all symbols that are\n variant across loop iterations, excluding the iterate as well as the\n variables local to the loop.\n\n For example, given the loop below that calculates the geometric and\n arithmetic means or some numbers:\n\n geo_mean = 1\n arith_mean = 0\n for i in range(n):\n a = numbers[i]\n geo_mean *= a\n arith_mean += a\n\n The state is represented by the variables geo_mean and arith_mean. The\n argument for initial_state may contain the tuple (1, 0), the body will\n include the arguments geo_mean and arith_mean and will return a tuple\n representing the new values for geo_mean and respectively arith_mean.\n\n Args:\n iter_: The entity being iterated over.\n extra_test: Callable with the state as arguments, and boolean return type.\n An additional loop condition.\n body: Callable with the iterate and the state as arguments, and\n state as return type. The actual loop body.\n init_state: Tuple containing the initial state.\n\n Returns:\n Tuple containing the final state.\n \"\"\"\n if tensor_util.is_tensor(iter_):\n return _known_len_for_stmt(iter_, extra_test, body, init_state)\n elif isinstance(iter_, dataset_ops.DatasetV2):\n # Check for undefined symbols and report an error. This prevents the error\n # from propagating into the TF runtime. We have more information here and\n # can provide a clearer error message.\n undefined_symbols = _filter_undefined(init_state)\n\n if undefined_symbols:\n raise ValueError(\n 'TensorFlow requires that the following symbols must be initialized '\n 'to a Tensor, Variable or TensorArray before the loop: {}'\n .format(tuple(undefined_symbols)))\n\n return _dataset_for_stmt(iter_, extra_test, body, init_state)\n else:\n return _py_for_stmt(iter_, extra_test, body, init_state)\n\n\ndef _py_for_stmt(iter_, extra_test, body, init_state):\n \"\"\"Overload of for_stmt that executes a Python for loop.\"\"\"\n state = init_state\n for target in iter_:\n if extra_test is not None and not extra_test(*state):\n break\n state = body(target, *state)\n return state\n\n\ndef _known_len_for_stmt(iter_, extra_test, body, init_state):\n \"\"\"Overload of for_stmt that iterates over objects that admit a length.\"\"\"\n n = py_builtins.len_(iter_)\n\n def while_body(iterate_index, *state):\n iterate = iter_[iterate_index]\n new_state = body(iterate, *state)\n\n state = (iterate_index + 1,)\n if new_state:\n state += new_state\n\n return state\n\n def while_cond(iterate_index, *state):\n if extra_test is not None:\n return gen_math_ops.logical_and(iterate_index < n, extra_test(*state))\n return iterate_index < n\n\n results = while_stmt(\n while_cond,\n while_body,\n init_state=(0,) + init_state,\n extra_deps=(iter_,),\n opts=dict(maximum_iterations=n))\n\n # Dropping the iteration index because it's not syntactically visible.\n # TODO(mdan): Don't.\n if isinstance(results, (tuple, list)):\n assert len(results) >= 1 # Has at least the iterate.\n if len(results) > 1:\n results = results[1:]\n else:\n results = ()\n\n return results\n\n\ndef _dataset_for_stmt(ds, extra_test, body, init_state):\n \"\"\"Overload of for_stmt that iterates over TF Datasets.\"\"\"\n\n if extra_test is not None:\n raise NotImplementedError(\n 'break and return statements are not yet supported in '\n 'for/Dataset loops.')\n\n def reduce_body(state, iterate):\n new_state = body(iterate, *state)\n return new_state\n\n if init_state:\n return ds.reduce(init_state, reduce_body)\n\n # Workaround for Datset.reduce not allowing empty state tensors - create\n # a dummy state variable that remains unused.\n def reduce_body_with_dummy_state(state, iterate):\n reduce_body((), iterate)\n return state\n ds.reduce((constant_op.constant(0),), reduce_body_with_dummy_state)\n return ()\n\n\ndef while_stmt(test, body, init_state, extra_deps, opts=None):\n \"\"\"Functional form of a while statement.\n\n The loop operates on a so-called state, which includes all symbols that are\n variant across loop iterations. In what follows we refer to state as either\n a tuple of entities that represent an actual state, or a list of arguments\n of the corresponding types.\n\n Args:\n test: Callable with the state as arguments, and boolean return type.\n The loop condition.\n body: Callable with the state as arguments, and state as return type.\n The actual loop body.\n init_state: Tuple containing the initial state.\n extra_deps: Tuple containing additional entities on which the loop may\n depend, such as loop invariants referenced by test. Used\n exclusively for dispatch control.\n opts: Optional dict of extra loop parameters.\n\n Returns:\n Tuple containing the final state.\n \"\"\"\n # TODO(mdan): Consider adding a generic mechanism for dynamic dispatch.\n # That could be something as simple as a collection of dispatch rules, with\n # some prioritization.\n if any(tensor_util.is_tensor(v) for v in nest.flatten(extra_deps)):\n # Check for undefined symbols and report an error. This prevents the error\n # from propagating into the TF runtime. We have more information here and\n # can provide a clearer error message.\n undefined_symbols = _filter_undefined(init_state)\n\n if undefined_symbols:\n raise ValueError(\n 'TensorFlow requires that the following symbols must be initialized '\n 'to a Tensor, Variable or TensorArray before the loop: {}'\n .format(tuple(undefined_symbols)))\n return _tf_while_stmt(test, body, init_state, opts)\n else:\n return _py_while_stmt(test, body, init_state, opts)\n\n\ndef _filter_undefined(all_symbols):\n \"\"\"Returns the names of undefined symbols contained in all_symbols.\"\"\"\n undefined_symbols = [\n s.symbol_name\n for s in all_symbols\n if special_values.is_undefined(s)\n ]\n return undefined_symbols\n\n\ndef _tf_while_stmt(test, body, init_state, opts):\n \"\"\"Overload of while_stmt that stages a TF while_stmt.\"\"\"\n if opts is None:\n opts = {}\n\n # Non-v2 while_loop unpacks the results when there is only one return value.\n # This enforces consistency across versions.\n opts['return_same_structure'] = True\n\n retval = control_flow_ops.while_loop(test, body, init_state, **opts)\n return retval\n\n\ndef _py_while_stmt(test, body, init_state, opts):\n \"\"\"Overload of while_stmt that executes a Python while loop.\"\"\"\n del opts\n state = init_state\n while test(*state):\n state = body(*state)\n return state\n\n\ndef if_stmt(cond, body, orelse):\n \"\"\"Functional form of an if statement.\n\n Args:\n cond: Boolean.\n body: Callable with no arguments, and outputs of the positive (if) branch\n as return type.\n orelse: Callable with no arguments, and outputs of the negative (else)\n branch as return type.\n\n Returns:\n Tuple containing the statement outputs.\n \"\"\"\n if tensor_util.is_tensor(cond):\n return tf_if_stmt(cond, body, orelse)\n else:\n return _py_if_stmt(cond, body, orelse)\n\n\ndef tf_if_stmt(cond, body, orelse):\n \"\"\"Overload of if_stmt that stages a TF cond.\"\"\"\n protected_body = _wrap_in_protection_from_undefined(body, branch_name='if')\n protected_orelse = _wrap_in_protection_from_undefined(orelse,\n branch_name='else')\n\n return control_flow_ops.cond(cond, protected_body, protected_orelse)\n\n\ndef _wrap_in_protection_from_undefined(func, branch_name):\n \"\"\"Wraps function to raise useful error when it returns undefined symbols.\"\"\"\n def protected_func():\n \"\"\"Calls function and raises an error if undefined symbols are returned.\"\"\"\n results = func()\n undefined_symbols = None\n if isinstance(results, tuple):\n undefined_symbols = _filter_undefined(results)\n elif special_values.is_undefined(results):\n # Single return value\n undefined_symbols = results.symbol_name\n\n if undefined_symbols:\n message = ('The following symbols must also be initialized in the %s '\n 'branch: {}. Alternatively, you may initialize them before '\n 'the if statement.') % branch_name\n message = message.format(undefined_symbols)\n raise ValueError(message)\n return results\n return protected_func\n\n\ndef _py_if_stmt(cond, body, orelse):\n \"\"\"Overload of if_stmt that executes a Python if statement.\"\"\"\n return body() if cond else orelse()\n"
] |
[
[
"tensorflow.python.ops.control_flow_ops.cond",
"tensorflow.python.autograph.operators.special_values.is_undefined",
"tensorflow.python.framework.constant_op.constant",
"tensorflow.python.util.nest.flatten",
"tensorflow.python.framework.tensor_util.is_tensor",
"tensorflow.python.ops.control_flow_ops.while_loop",
"tensorflow.python.autograph.operators.py_builtins.len_"
]
] |
kingium/PatternRecognitionForUndergrads
|
[
"5cd08f3a260fae4a7edaf71599433e93484863b0"
] |
[
"Project/src3/pca.py"
] |
[
"import numpy as np\nimport scipy\nimport sklearn.decomposition\nfrom sklearn.manifold import TSNE\nimport os\n\nprint('loading')\n\ndat = np.load('../data2/data2.npz')\nX_test = dat['X_test']\nX_train = dat['X_train']\nY_train = dat['Y_train']\ndat = []\n\nprint('normalizing')\n\nmean = np.mean(X_train, axis=0)\nstd = np.std(X_train, axis=0)\nx = np.divide(np.subtract(X_train, mean), std)\n\nprint('PCA_2')\n\nx_pca = sklearn.decomposition.PCA(n_components=2).fit_transform(x)\nnp.savez('pca.npz', x=x_pca)\n\n"
] |
[
[
"numpy.load",
"numpy.mean",
"numpy.std",
"numpy.savez",
"numpy.subtract"
]
] |
itanghiu/inaFaceGender
|
[
"849ecd9f6d483d6e274f48288825678b3e10da7b"
] |
[
"inaFaceGender/inaFaceGender.py"
] |
[
"#!/usr/bin/env python\n# encoding: utf-8\n\n# The MIT License\n\n# Copyright (c) 2019 Ina (Zohra Rezgui & David Doukhan - http://www.ina.fr/)\n\n# Permission is hereby granted, free of charge, to any person obtaining a copy\n# of this software and associated documentation files (the \"Software\"), to deal\n# in the Software without restriction, including without limitation the rights\n# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell\n# copies of the Software, and to permit persons to whom the Software is\n# furnished to do so, subject to the following conditions:\n\n# The above copyright notice and this permission notice shall be included in\n# all copies or substantial portions of the Software.\n\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\n# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\n# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN\n# THE SOFTWARE.\n\nimport dlib, cv2\nimport numpy as np\nimport pandas as pd\nimport os\nimport csv\nfrom .face_utils import extract_left_eye_center, extract_right_eye_center, get_rotation_matrix, crop_image\n#import sklearn.externals.joblib as extjoblib\nimport joblib as jblib\nfrom keras_vggface.vggface import VGGFace\nfrom keras_vggface import utils\nfrom keras.preprocessing import image \n\n\ndef write_to_video(frames_list, file_name, fps):\n \"\"\" \n Writes a list of frames into a video using MP4V encoding. \n \n Parameters: \n frames_list (list): List of the frames to write\n file_name (string): video output path\n fps (int) : Number of frames per second used in output video\n \"\"\"\n frame_width = frames_list[0].shape[0]\n frame_height = frames_list[0].shape[1]\n fourcc = cv2.VideoWriter_fourcc(*'MP4V')\n out = cv2.VideoWriter(file_name,fourcc,\n fps, (frame_height,frame_width))\n\n for frame in frames_list:\n \n out.write(cv2.cvtColor(frame, cv2.COLOR_BGR2RGB))\n out.release()\n\n \ndef _get_bbox_pts(detections, face_idx, frame_width, frame_height):\n \n \n x1 = int(detections[0, 0, face_idx, 3] * frame_width)\n y1 = int(detections[0, 0, face_idx, 4] * frame_height)\n x2 = int(detections[0, 0, face_idx, 5] * frame_width)\n y2 = int(detections[0, 0, face_idx, 6] * frame_height)\n\n width = x2 - x1\n height = y2 - y1\n max_size = max(width, height)\n x1, x2 = max(0, (x1 + x2) // 2 - max_size // 2), min(frame_width, (x1 + x2) // 2 + max_size // 2)\n y1, y2 = max(0, (y1 + y2) // 2 - max_size // 2), min(frame_height, (y1 + y2) // 2 + max_size // 2)\n\n return x1, y1, x2, y2\n\n \n\ndef info2csv(df, csv_path):\n \"\"\" \n Write df into a csv. \n \n \n Parameters: \n df (DataFrame): Dataframe to be written to csv. \n csv_path (string): CSV output path. \n \n \"\"\"\n df.to_csv(csv_path, index=False)\n \ndef _label_decision_fun(x):\n\n if x>0:\n return 'm'\n else:\n return 'f'\n \ndef _smooth_labels(df):\n if len(df) == 0:\n df['smoothed_decision'] = []\n df['smoothed_label'] = []\n return df\n \n byfaceid = pd.DataFrame(df.groupby('faceid')['decision'].mean())\n byfaceid.rename(columns = {'decision':'smoothed_decision'}, inplace=True)\n new_df = df.merge(byfaceid, on= 'faceid')\n new_df['smoothed_label'] = new_df['smoothed_decision'].map(_label_decision_fun)\n\n return new_df\n\n\ndef _match_bbox_tracker(bbox, tracker):\n # bbox info\n x = bbox.left()\n y = bbox.top()\n width = bbox.width()\n height = bbox.height()\n\n x_center = x + 0.5 * width\n y_center = y + 0.5 * height\n\n # tracker info\n tracked_position = tracker.get_position()\n\n t_x = int(tracked_position.left())\n t_y = int(tracked_position.top())\n t_w = int(tracked_position.width())\n t_h = int(tracked_position.height())\n \n t_x_center = t_x + 0.5 * t_w\n t_y_center = t_y + 0.5 * t_h\n \n return ( ( t_x <= x_center <= (t_x + t_w)) and \n ( t_y <= y_center <= (t_y + t_h)) and \n ( x <= t_x_center <= (x + width)) and \n ( y <= t_y_center <= (y + height)))\n\n \ndef is_tracker_pos_in_frame(tracker, frame):\n fheight, fwidth, _ = frame.shape\n pos = tracker.get_position()\n #print('tracker pos in frame', pos.right(), pos.left(), pos.top(), pos.bottom())\n return (pos.right() > 0) and (pos.left() < fwidth) and (pos.top() < fheight) and (pos.bottom() > 0)\n\n \n \nclass GenderVideo:\n \"\"\" \n This is a class regrouping all phases of a pipeline designed for gender classification from video.\n \n Attributes: \n face_detector: Face detection model. \n align_predictor: Face alignment model.\n gender_svm: Gender SVM classifier model.\n vgg_feature_extractor: VGGFace neural model used for feature extraction.\n threshold: quality of face detection considered acceptable, value between 0 and 1.\n \"\"\"\n def __init__(self, threshold = 0.65, verbose = False):\n\n \"\"\" \n The constructor for GenderVideo class. \n \n Parameters: \n threshold (float): quality of face detection considered acceptable, value between 0 and 1. \n \"\"\"\n p = os.path.dirname(os.path.realpath(__file__)) + '/models/'\n self.face_detector = cv2.dnn.readNetFromTensorflow(p + \"opencv_face_detector_uint8.pb\",\n p + \"opencv_face_detector.pbtxt\")\n self.align_predictor = dlib.shape_predictor(p +'shape_predictor_68_face_landmarks.dat')\n self.gender_svm = jblib.load(p + 'svm_classifier.joblib')\n self.vgg_feature_extractor = VGGFace(include_top = False, input_shape = (224, 224, 3), pooling ='avg')\n self.threshold = threshold\n self.verbose = verbose\n\n\n def _gender_from_face(self, img):\n \"\"\"\n Face is supposed to be aligned and cropped and resized to 224*224\n it is for regulard detection __call__\n we should check if it is done in the tracking implementation\n \"\"\"\n img = image.img_to_array(img)\n img = utils.preprocess_input(img, version=1)\n img = np.expand_dims(img, axis=0)\n features = self.vgg_feature_extractor.predict(img)\n label = self.gender_svm.predict(features)[0]\n decision_value = round(self.gender_svm.decision_function(features)[0], 3)\n return label, decision_value\n\n \n def _process_tracked_face(self, cur_tracker, frame):\n \n ## There is no rotation in this function... results may be suspicious\n\n tracked_position = cur_tracker.get_position()\n #print('tracked position', tracked_position)\n #print('frame_shape', frame.shape)\n# print('cur_tracker', cur_tracker)\n\n t_x = int(tracked_position.left())\n t_y = int(tracked_position.top())\n t_w = int(tracked_position.width())\n t_h = int(tracked_position.height())\n \n# print('tracked face: id, x, y, w, h', face_id, t_x, t_y, t_w, t_h)\n\n copy_img = frame[max(0, t_y):(t_y + t_h), max(0, t_x):(t_x + t_w)]\n\n \n #print('simage shape', copy_img.shape)\n copy_img = cv2.resize(copy_img, (224,224))\n\n label, decision_value = self._gender_from_face(copy_img)\n \n return (t_x, t_y, t_w, t_h, label, decision_value)\n\n \n def align_and_crop_face(self, img, rect_list, desired_width, desired_height):\n \"\"\" \n Aligns and resizes face to desired shape.\n \n Parameters: \n img : Image to be aligned and resized.\n rect_list: Bounding box coordinates tuples.\n desired_width: output image width.\n desired_height: output image height.\n \n \n Returns: \n cropped_img: Image aligned and resized.\n left_eye: left eye position coordinates.\n right_eye: right eye position coordinates.\n \"\"\"\n \n for j, det in enumerate(rect_list):\n shape = self.align_predictor(img, det)\n left_eye = extract_left_eye_center(shape)\n right_eye = extract_right_eye_center(shape)\n M = get_rotation_matrix(left_eye, right_eye)\n\n rotated_img = cv2.warpAffine(img, M, (img.shape[1], img.shape[0]), flags=cv2.INTER_CUBIC)\n cropped = crop_image(rotated_img, det)\n try:\n \n cropped_res = cv2.resize(cropped, (desired_width, desired_height))\n except:\n print('except in align_and_crop_faces', det)\n print(img.shape)\n cropped_res = cv2.resize(rotated_img,(desired_width, desired_height))\n cropped_img = cropped_res[:, :, ::-1]\n\n return cropped_img, left_eye, right_eye\n \n def detect_faces_from_image(self, img, desired_width,\n desired_height, bbox_scaling=1.1):\n \"\"\" \n Detect faces from an image\n \n Parameters: \n img (array): Image to detect faces from.\n desired_width (int): desired output width of the image.\n desired_height (int): desired output height of the image.\n bbox_scaling (float): scaling factor to the bounding box around the face.\n \n Returns: \n faces_data (list) : List containing :\n - the bounding box after scaling\n - image cropped around the face and resized\n - left eye coordinates\n - right eye coordinates\n - index of the face in the image \n - face detection confidence score\n \"\"\"\n \n n_face = 0\n faces_data = []\n\n frame_height = img.shape[0]\n frame_width = img.shape[1]\n blob = cv2.dnn.blobFromImage(img, 1.0, (300, 300), [104, 117, 123], True, False)\n self.face_detector.setInput(blob)\n detections = self.face_detector.forward()\n \n \n for i in range(detections.shape[2]):\n confidence = detections[0, 0, i, 2]\n if confidence > self.threshold:\n n_face += 1\n bbox = _get_bbox_pts(detections, i, frame_width, frame_height)\n x1, y1 = [int(i * abs(bbox_scaling//1 - bbox_scaling%1)) for i in bbox[:2]]\n x2, y2 = [int(i*bbox_scaling) for i in bbox[2:]]\n if x1 < x2 and y1 < y2:\n dets = [dlib.rectangle(x1, y1, x2, y2)]\n else:\n dets = [dlib.rectangle(0, 0, frame_width, frame_height)]\n\n \n face_img, left_eye, right_eye = self.align_and_crop_face(img, dets, desired_width,\n desired_height)\n \n face_data = [dets, face_img, left_eye, right_eye,\n 'face_%d' % n_face, confidence]\n faces_data.append(face_data)\n\n return faces_data\n \n\n\n def detect_with_tracking(self, video_path, k_frames, subsamp_coeff = 1, offset = -1):\n \"\"\"\n Pipeline for gender classification from videos using correlation filters based tracking (dlib's).\n \n Parameters: \n video_path (string): Path for input video.\n k_frames (int) : Number of frames for which continue tracking the faces without renewing face detection.\n subsamp_coeff (int) : only 1/subsamp_coeff frames will be processed\n offset (float) : Time in milliseconds to skip at the beginning of the video.\n \n Returns: \n info (DataFrame): A Dataframe with frame and face information (coordinates, decision function, smoothed and non smoothed labels)\n \"\"\"\n\n assert (k_frames % subsamp_coeff) == 0\n\n font = cv2.FONT_HERSHEY_SIMPLEX\n \n current_face_id = 0\n \n face_trackers = {}\n confidence = {}\n\n info = []\n \n cap = cv2.VideoCapture(video_path)\n \n if not cap.isOpened():\n raise Exception(\"Video file does not exist or is invalid\")\n \n while cap.isOpened() :\n ret, frame = cap.read()\n if not ret:\n break\n\n # skip frames until offset is reached or for subsampling reasons\n if (cap.get(cv2.CAP_PROP_POS_MSEC) < offset) or (cap.get(cv2.CAP_PROP_POS_FRAMES) % subsamp_coeff != 0):\n continue\n\n #if ((cap.get(cv2.CAP_PROP_POS_FRAMES)) % 1000 == 0) or True:\n # print(cap.get(cv2.CAP_PROP_POS_FRAMES))\n # print('dface trackers before update', face_trackers)\n \n \n # track faces in current frame\n face_ids_to_delete = []\n frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)\n \n for fid in face_trackers:\n tracking_quality = face_trackers[fid].update(frame)\n\n if (tracking_quality < 7) or (not is_tracker_pos_in_frame(face_trackers[fid], frame)):\n face_ids_to_delete.append(fid)\n\n for fid in face_ids_to_delete:\n face_trackers.pop(fid)\n #print('dface trackers after update', face_trackers)\n\n # detect faces every k frames\n if (cap.get(cv2.CAP_PROP_POS_FRAMES) % k_frames)==0:\n faces_info = self.detect_faces_from_image(frame,\n desired_width=224, desired_height=224) \n if faces_info:\n for element in faces_info:\n bbox = element[0][0]\n confidence[ current_face_id ] = round(element[5], 3)\n\n matched_fid = None\n\n # match detected face to previously tracked faces\n for fid in face_trackers:\n ## TODO/BUG: several elements may match using this condition\n ## This loop should be debugged to use the closest match found,\n ## instead of the last match found\n if _match_bbox_tracker(bbox, face_trackers[fid]):\n matched_fid = fid\n\n # if detected face is not corresponding to previously tracked faces\n # create a new face id and a new face tracker\n # BUG: in the current implementation, the newly detected face bounding box\n # is not used to update the tracker bounding box\n if matched_fid is None:\n\n tracker = dlib.correlation_tracker()\n tracker.start_track(frame, bbox)\n\n face_trackers[ current_face_id ] = tracker\n current_face_id += 1\n #print('dface trackers after face detection ', face_trackers)\n\n # delete invalide face positions\n face_ids_to_delete = []\n for fid in face_trackers:\n if not is_tracker_pos_in_frame(face_trackers[fid], frame):\n face_ids_to_delete.append(fid)\n for fid in face_ids_to_delete:\n face_trackers.pop(fid) \n \n \n \n # process faces based on position found in trackers\n for fid in face_trackers:\n t_x, t_y, t_w, t_h, label, decision_value = self._process_tracked_face(face_trackers[fid], frame)\n t_bbox = dlib.rectangle(t_x, t_y, t_x+t_w, t_y+t_h)\n info.append([\n cap.get(cv2.CAP_PROP_POS_FRAMES), fid, t_bbox, (t_w, t_h), label,\n decision_value, confidence[fid]\n ])\n\n\n\n cap.release()\n track_res = pd.DataFrame.from_records(info, columns = ['frame', 'faceid', 'bb', 'size','label', 'decision', 'conf'])\n info = _smooth_labels(track_res)\n \n return info\n\n\n def __call__(self, video_path, subsamp_coeff = 1 , offset = -1):\n\n \"\"\"\n Pipeline function for gender classification from videos without tracking.\n \n Parameters: \n video_path (string): Path for input video. \n subsamp_coeff (int) : only 1/subsamp_coeff frames will be processed\n offset (float) : Time in milliseconds to skip at the beginning of the video.\n \n \n Returns: \n info: A Dataframe with frame and face information (coordinates, decision function,labels..)\n \"\"\"\n \n cap = cv2.VideoCapture(video_path)\n if not cap.isOpened():\n raise Exception(\"Video file does not exist or is invalid\")\n \n info = []\n while cap.isOpened():\n ret, frame = cap.read()\n if not ret:\n break\n \n # skip frames until offset is reached or for subsampling reasons\n if (cap.get(cv2.CAP_PROP_POS_MSEC) < offset) or (cap.get(cv2.CAP_PROP_POS_FRAMES) % subsamp_coeff != 0):\n continue\n\n frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)\n faces_info = self.detect_faces_from_image(frame,\n desired_width=224, desired_height=224) \n if faces_info:\n for element in faces_info:\n label, decision_value = self._gender_from_face(element[1])\n bounding_box = element[0][0]\n detection_score = round(element[5], 3)\n bbox_length = bounding_box.bottom() - bounding_box.top()\n\n info.append([\n cap.get(cv2.CAP_PROP_POS_FRAMES), bounding_box, (bbox_length, bbox_length), label,\n decision_value, detection_score\n ])\n\n cap.release()\n info = pd.DataFrame.from_records(info, columns = ['frame', 'bb', 'size','label', 'decision', 'conf'])\n return info\n \n\n\n"
] |
[
[
"pandas.DataFrame.from_records",
"numpy.expand_dims"
]
] |
OpenSourceBrain/BlueBrainProjectShowcase
|
[
"2cd8aa77342576c1a871f5d500d791f927444913"
] |
[
"NMC/parser/L23_PC_cADpyr229_1/run_RmpRiTau.py"
] |
[
"#!/usr/bin/env python\n\n\"\"\"Python script to run cell model\"\"\"\n\n\n\"\"\"\n/* Copyright (c) 2015 EPFL-BBP, All rights reserved.\n\nTHIS SOFTWARE IS PROVIDED BY THE BLUE BRAIN PROJECT ``AS IS''\nAND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,\nTHE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR\nPURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE BLUE BRAIN PROJECT\nBE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR\nCONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF\nSUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR\nBUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,\nWHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE\nOR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN\nIF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n\nThis work is licensed under a\nCreative Commons Attribution-NonCommercial-ShareAlike 4.0 International License.\nTo view a copy of this license, visit\nhttp://creativecommons.org/licenses/by-nc-sa/4.0/legalcode or send a letter to\nCreative Commons, 171 Second Street, Suite 300,\nSan Francisco, California, 94105, USA.\n\"\"\"\n\n\"\"\"\n * @file run.py\n * @brief Run simulation using pyneuron\n * @author Werner Van Geit @ BBP\n * @date 2015\n\"\"\"\n\n# pylint: disable=C0325, W0212, F0401, W0612, F0401\n\nimport os\nimport neuron\nimport numpy\nimport sys\n\n\ndef create_cell():\n \"\"\"Create the cell model\"\"\"\n # Load morphology\n neuron.h.load_file(\"morphology.hoc\")\n # Load biophysics\n neuron.h.load_file(\"biophysics.hoc\")\n # Load main cell template\n neuron.h.load_file(\"template.hoc\")\n\n # Instantiate the cell from the template\n\n print(\"Loading cell cADpyr229_L23_PC_5ecbf9b163\")\n cell = neuron.h.cADpyr229_L23_PC_5ecbf9b163(0)\n return cell\n\n\ndef create_stimuli(cell, stim_start, stim_end, current_amplitude):\n \"\"\"Create the stimuli\"\"\"\n\n print('Attaching stimulus electrodes')\n\n stimuli = []\n\n iclamp = neuron.h.IClamp(0.5, sec=cell.soma[0])\n iclamp.delay = stim_start\n iclamp.dur = stim_end - stim_start\n iclamp.amp = current_amplitude\n print('Setting up step current clamp: '\n 'amp=%f nA, delay=%f ms, duration=%f ms' %\n (iclamp.amp, iclamp.delay, iclamp.dur))\n\n stimuli.append(iclamp)\n\n return stimuli\n\n\ndef create_recordings(cell):\n \"\"\"Create the recordings\"\"\"\n print('Attaching recording electrodes')\n\n recordings = {}\n\n recordings['time'] = neuron.h.Vector()\n recordings['soma(0.5)'] = neuron.h.Vector()\n\n recordings['time'].record(neuron.h._ref_t, 0.1)\n recordings['soma(0.5)'].record(cell.soma[0](0.5)._ref_v, 0.1)\n\n return recordings\n\n\ndef run_RmpRiTau_step(\n stim_start,\n stim_end,\n current_amplitude,\n plot_traces=None):\n \"\"\"Run \"\"\"\n\n cell = create_cell()\n stimuli = create_stimuli(cell, stim_start, stim_end, current_amplitude) # noqa\n recordings = create_recordings(cell)\n\n # Overriding default 30s simulation,\n neuron.h.tstop = stim_end + stim_start\n print(\n 'Setting simulation time to %.6g ms for the step current' %\n neuron.h.tstop)\n\n print('Setting initial voltage to -70 mV')\n neuron.h.v_init = -70\n\n neuron.h.stdinit()\n neuron.h.dt = 1000\n neuron.h.t = -1e9\n for _ in range(10):\n neuron.h.fadvance()\n\n neuron.h.t = 0\n neuron.h.dt = 0.025\n neuron.h.frecord_init()\n\n neuron.h.continuerun(3000)\n\n time = numpy.array(recordings['time'])\n soma_voltage = numpy.array(recordings['soma(0.5)'])\n\n recordings_dir = 'python_recordings'\n\n soma_voltage_filename = os.path.join(\n recordings_dir,\n 'soma_voltage_RmpRiTau_step.dat')\n numpy.savetxt(soma_voltage_filename, zip(time, soma_voltage))\n\n print('Soma voltage for RmpRiTau trace saved to: %s'\n % (soma_voltage_filename))\n\n if plot_traces:\n import pylab\n pylab.figure(facecolor='white')\n pylab.plot(recordings['time'], recordings['soma(0.5)'])\n pylab.xlabel('time (ms)')\n pylab.ylabel('Vm (mV)')\n pylab.gcf().canvas.set_window_title('RmpRiTau trace')\n\n return time, soma_voltage, stim_start, stim_end\n\n\ndef init_simulation():\n \"\"\"Initialise simulation environment\"\"\"\n\n neuron.h.load_file(\"stdrun.hoc\")\n neuron.h.load_file(\"import3d.hoc\")\n\n print('Loading constants')\n neuron.h.load_file('constants.hoc')\n\n\ndef analyse_RmpRiTau_trace(\n time,\n soma_voltage,\n stim_start,\n stim_end,\n current_amplitude):\n \"\"\"Analyse the output of the RmpRiTau protocol\"\"\"\n\n # Import the eFeature Extraction Library\n import efel\n\n # Prepare the trace data\n trace = {}\n trace['T'] = time\n trace['V'] = soma_voltage\n trace['stim_start'] = [stim_start]\n trace['stim_end'] = [stim_end]\n\n # Calculate the necessary eFeatures\n efel_results = efel.getFeatureValues(\n [trace],\n ['voltage_base', 'steady_state_voltage_stimend',\n 'decay_time_constant_after_stim'])\n\n voltage_base = efel_results[0]['voltage_base'][0]\n ss_voltage = efel_results[0]['steady_state_voltage_stimend'][0]\n dct = efel_results[0]['decay_time_constant_after_stim'][0]\n\n # Calculate input resistance\n input_resistance = float(ss_voltage - voltage_base) / current_amplitude\n\n rmpritau_dict = {}\n\n rmpritau_dict['Rmp'] = '%.6g' % voltage_base\n rmpritau_dict['Rmp_Units'] = 'mV'\n rmpritau_dict['Rin'] = '%.6g' % input_resistance\n rmpritau_dict['Rin_Units'] = 'MOhm'\n rmpritau_dict['Tau'] = '%.6g' % dct\n rmpritau_dict['Tau_Units'] = 'ms'\n\n print('Resting membrane potential is %s %s' %\n (rmpritau_dict['Rmp'], rmpritau_dict['Rmp_Units']))\n print('Input resistance is %s %s' %\n (rmpritau_dict['Rin'], rmpritau_dict['Rin_Units']))\n print('Time constant is %s %s' %\n (rmpritau_dict['Tau'], rmpritau_dict['Tau_Units']))\n\n import json\n\n with open('rmp_ri_tau.json', 'w') as rmpritau_json_file:\n json.dump(rmpritau_dict, rmpritau_json_file,\n sort_keys=True,\n indent=4,\n separators=(',', ': '))\n\n\ndef main(plot_traces=False):\n \"\"\"Main\"\"\"\n\n # Import matplotlib to plot the traces\n if plot_traces:\n import matplotlib\n matplotlib.rcParams['path.simplify'] = False\n\n init_simulation()\n\n current_amplitude = -0.01\n stim_start = 1000\n stim_end = 2000\n\n time, soma_voltage, stim_start, stim_end = run_RmpRiTau_step(\n stim_start, stim_end, current_amplitude, plot_traces=plot_traces)\n\n analyse_RmpRiTau_trace(\n time,\n soma_voltage,\n stim_start,\n stim_end,\n current_amplitude)\n\n if plot_traces:\n import pylab\n pylab.show()\n\nif __name__ == '__main__':\n if len(sys.argv) == 1:\n main(plot_traces=True)\n elif len(sys.argv) == 2 and sys.argv[1] == '--no-plots':\n main(plot_traces=False)\n else:\n raise Exception(\n \"Script only accepts one argument: --no-plots, not %s\" %\n str(sys.argv))\n"
] |
[
[
"numpy.array"
]
] |
brett-daley/fast-dqn
|
[
"acf21e8bb193e52d73aa8e2d4e355957095bbd36"
] |
[
"run_fast_dqn.py"
] |
[
"from argparse import ArgumentParser\nfrom distutils.util import strtobool\nimport itertools\nimport os\nos.environ['TF_DETERMINISTIC_OPS'] = '1'\nfrom threading import Thread\nfrom queue import Queue\n\nimport numpy as np\n\nfrom run_dqn import DQNAgent, main, make_parser\nfrom fast_dqn.worker import Worker\n\n\nclass FastDQNAgent(DQNAgent):\n def __init__(self, make_env_fn, workers=8, concurrent=True, synchronize=True, **kwargs):\n assert workers >= 1\n if synchronize:\n assert workers != 1\n\n envs = tuple(make_env_fn(i) for i in range(workers))\n\n self.shared_states = np.empty([workers, *envs[0].observation_space.shape], dtype=np.float32)\n self.shared_qvalues = np.empty([workers, envs[0].action_space.n], dtype=np.float32)\n\n self._workers = tuple(Worker(i, env=envs[i], agent=self) for i in range(workers))\n\n super().__init__(make_env_fn, **kwargs)\n self._env = env = self._workers[0]._env\n\n if synchronize:\n # Target update frequency must be divisible by number of workers to\n # ensure workers use the correct network parameters when synchronized\n assert self._target_update_freq % workers == 0\n\n assert self._target_update_freq % self._train_freq == 0\n self._minibatches_per_epoch = self._target_update_freq // self._train_freq\n\n self._concurrent_training = concurrent\n self._synchronize = synchronize\n self._train_queue = Queue()\n Thread(target=self._train_loop, daemon=True).start()\n\n def run(self, duration):\n self._prepopulate_replay_memory()\n self._sync_everything()\n\n for t in itertools.count(start=1):\n if self._evaluate > 0 and t % self._evaluate == 1:\n self._sync_everything()\n mean_perf, std_perf = self.benchmark(epsilon=0.05, episodes=30)\n print(\"Benchmark (t={}): mean={}, std={}\".format(t - 1, mean_perf, std_perf))\n\n if t > duration:\n self._sync_everything()\n return\n\n if t % self._target_update_freq == 1:\n self._sync_everything()\n self._dqn.update_target_net()\n\n if self._concurrent_training:\n for _ in range(self._minibatches_per_epoch):\n self._train_queue.put_nowait(None)\n\n if not self._concurrent_training:\n if t % self._train_freq == 1:\n self._sync_workers()\n self._train_queue.put_nowait(None)\n self._train_queue.join()\n\n i = t % len(self._workers)\n if i == 1 and self._synchronize:\n self._sync_workers()\n # Compute the Q-values in a single minibatch\n # We use the target network here so we can train the main network in parallel\n self.shared_qvalues[:] = self._dqn.predict_target(self.shared_states).numpy()\n self._workers[i].update(t)\n\n def _train_loop(self):\n while True:\n self._train_queue.get()\n minibatch = self._replay_memory.sample(self._batch_size)\n self._dqn.train(*minibatch)\n self._train_queue.task_done()\n\n def _sync_workers(self):\n for w in self._workers:\n w.join()\n\n def _flush_workers(self):\n self._env.flush_monitor()\n for w in self._workers:\n for transition in w.flush():\n self._replay_memory.save(*transition)\n\n def _sync_everything(self):\n self._train_queue.join()\n self._sync_workers()\n self._flush_workers()\n\n def _step(self, epsilon):\n return self._workers[0]._step(epsilon)\n\n\nif __name__ == '__main__':\n parser = make_parser()\n parser.add_argument('--concurrent', type=strtobool, default=True)\n parser.add_argument('--workers', type=int, default=8)\n parser.add_argument('--synchronize', type=strtobool, default=True)\n kwargs = vars(parser.parse_args())\n main(FastDQNAgent, kwargs)\n"
] |
[
[
"numpy.empty"
]
] |
SKhan97/BigMart-Data-Analysis
|
[
"a0adf270ae1213ed6a53913b5e2cf7bd562d97c9"
] |
[
"model.py"
] |
[
"from sklearn.model_selection import cross_val_score\nfrom sklearn import metrics\nfrom sklearn.linear_model import LinearRegression\nfrom sklearn.tree import DecisionTreeRegressor\nfrom xgboost import XGBRegressor\nfrom sklearn.metrics import mean_absolute_error\nimport pandas as pd\nimport numpy as np\nimport matplotlib.pyplot as plt\n\n\ntrain_df = pd.read_csv('data/train_modified.csv')\ntest_df = pd.read_csv('data/test_modified.csv')\n\ntarget = 'Item_Outlet_Sales'\nIDcol = ['Item_Identifier','Outlet_Identifier']\n\n\ndef modelfit(alg, dtrain, dtest, predictors, target, IDcol, filename):\n # Fit the algorithm on the data\n alg.fit(dtrain[predictors], dtrain[target])\n\n # Predict training set:\n dtrain_predictions = alg.predict(dtrain[predictors])\n\n # Perform cross-validation:\n cv_score = cross_val_score(alg, dtrain[predictors], (dtrain[target]), cv=20,\n scoring='neg_mean_squared_error')\n cv_score = np.sqrt(np.abs(cv_score))\n print(\"\\nModel Report\")\n print(\"RMSE : %.4g\" % np.sqrt(metrics.mean_squared_error((dtrain[target]).values, dtrain_predictions)))\n print(\"CV Score : Mean - %.4g | Std - %.4g | Min - %.4g | Max - %.4g\" % (\n np.mean(cv_score), np.std(cv_score), np.min(cv_score), np.max(cv_score)))\n dtest[target] = alg.predict(dtest[predictors])\n\n\n#Linear regression model\nLR = LinearRegression(normalize=True)\n\npredictors = train_df.columns.drop(['Item_Outlet_Sales','Item_Identifier','Outlet_Identifier'])\nmodelfit(LR, train_df, test_df, predictors, target, IDcol, 'LR.csv')\n\ncoef1 = pd.Series(LR.coef_, predictors).sort_values()\ncoef1.plot(kind='bar', title='Model Coefficients')\nplt.show()\n\n#Decision tree for feature importance\nDT = DecisionTreeRegressor(max_depth=15, min_samples_leaf=100)\nmodelfit(DT, train_df, test_df, predictors, target, IDcol, 'DT.csv')\n\ncoef2 = pd.Series(DT.feature_importances_, predictors).sort_values(ascending=False)\ncoef2.plot(kind='bar', title='Feature Importances')\nplt.show()\n\n#XGBoost model\nmy_model = XGBRegressor(n_estimators=1000, learning_rate=0.05)\nmy_model.fit(train_df[predictors], train_df[target], early_stopping_rounds=5,\n eval_set=[(test_df[predictors], test_df[target])], verbose=False)\n\ntrain_df_predictions = my_model.predict(train_df[predictors])\npredictions = my_model.predict(test_df[predictors])\n\nprint(\"Mean Absolute Error : \" + str(mean_absolute_error(predictions, test_df[target])))\nprint(\"RMSE : %.4g\" % np.sqrt(metrics.mean_squared_error((train_df[target]).values, train_df_predictions)))\n"
] |
[
[
"numpy.max",
"sklearn.metrics.mean_squared_error",
"sklearn.linear_model.LinearRegression",
"numpy.min",
"numpy.mean",
"numpy.std",
"sklearn.metrics.mean_absolute_error",
"numpy.abs",
"sklearn.tree.DecisionTreeRegressor",
"matplotlib.pyplot.show",
"pandas.read_csv",
"pandas.Series",
"sklearn.model_selection.cross_val_score"
]
] |
LuoYR/phono3py
|
[
"38d22f134be1cfd8891ecafe1196478668ba0490"
] |
[
"test/phonon3/test_imag_self_energy.py"
] |
[
"import numpy as np\n\ngammas = [\n 0.0000000, 0.0000000, 0.0000000, 0.0000000, 0.0000000, 0.0000000,\n 0.0005412, 0.0005412, 0.0008843, 0.0191694, 0.0206316, 0.0206316,\n 0.0019424, 0.0019424, 0.0067566, 0.0548967, 0.0506115, 0.0506115,\n 0.0062204, 0.0062204, 0.0088148, 0.0426150, 0.0417223, 0.0417223,\n 0.0016263, 0.0016263, 0.0017293, 0.0279509, 0.0289259, 0.0289259,\n 0.0097926, 0.0097926, 0.0170092, 0.0438828, 0.0523105, 0.0523105,\n 0.0035542, 0.0035542, 0.0135109, 0.0623533, 0.0343746, 0.0343746,\n 0.0073140, 0.0073140, 0.0289659, 0.5006760, 0.5077932, 0.5077932,\n 0.0016144, 0.0016144, 0.0126326, 0.2731933, 0.2791702, 0.2791702,\n 0.0000000, 0.0000000, 0.0000000, 0.0000000, 0.0000000, 0.0000000,\n 0.0000000, 0.0000000, 0.0000000, 0.0000000, 0.0000000, 0.0000000,\n 0.0023304, 0.0026469, 0.0052513, 0.0209641, 0.0220092, 0.0234752,\n 0.0035532, 0.0038158, 0.0087882, 0.0276654, 0.0315055, 0.0286975,\n 0.0345193, 0.0277533, 0.0495734, 0.0511798, 0.0465938, 0.0436605,\n 0.0071705, 0.0081615, 0.0139063, 0.0204058, 0.0307320, 0.0237855,\n 0.0202095, 0.0197716, 0.0316074, 0.0402461, 0.0438103, 0.0394924,\n 0.0171448, 0.0176446, 0.0567310, 0.0930479, 0.0570520, 0.0622142,\n 0.0292639, 0.0328821, 0.0667957, 0.2541887, 0.4592188, 0.4234131,\n 0.0104887, 0.0179753, 0.0827533, 0.2659557, 0.3242633, 0.3189804,\n 0.0000000, 0.0000000, 0.0000000, 0.0000000, 0.0000000, 0.0000000]\ngammas_sigma = [\n 0.00000000, 0.00000000, 0.00000000, 0.00000000, 0.00000000, 0.00000000,\n 0.00046029, 0.00046029, 0.00071545, 0.02242054, 0.01975435, 0.01975435,\n 0.00143860, 0.00143860, 0.00715263, 0.05481156, 0.04396936, 0.04396936,\n 0.00826301, 0.00826301, 0.00950813, 0.04304817, 0.04400210, 0.04400210,\n 0.00203560, 0.00203560, 0.00207048, 0.02226551, 0.03531839, 0.03531839,\n 0.00746195, 0.00746195, 0.01268396, 0.02380441, 0.03074892, 0.03074892,\n 0.00389360, 0.00389360, 0.01154058, 0.05602348, 0.04034627, 0.04034627,\n 0.00642767, 0.00642767, 0.02338437, 0.43710790, 0.48306584, 0.48306584,\n 0.00291728, 0.00291728, 0.11718631, 0.84620157, 0.80881708, 0.80881708,\n 0.00000000, 0.00000000, 0.00000000, 0.00000000, 0.00000000, 0.00000000,\n 0.00000000, 0.00000000, 0.00000000, 0.00000000, 0.00000000, 0.00000000,\n 0.00224835, 0.00288498, 0.00554574, 0.02261273, 0.02349047, 0.02647988,\n 0.00330612, 0.00430468, 0.00975355, 0.02954525, 0.03242621, 0.03052183,\n 0.03210358, 0.02583317, 0.04906091, 0.04609366, 0.04064508, 0.04250035,\n 0.00888799, 0.00936948, 0.01541312, 0.02079095, 0.03001210, 0.02721119,\n 0.02593986, 0.02559304, 0.04760672, 0.04958274, 0.04942973, 0.03703768,\n 0.01005313, 0.01125217, 0.05423798, 0.10135670, 0.06021902, 0.09005459,\n 0.02358822, 0.03737522, 0.06633807, 0.22190369, 0.41562743, 0.32601504,\n 0.01240071, 0.02372173, 0.20217767, 0.49239981, 0.52883866, 0.50769018,\n 0.00000000, 0.00000000, 0.00000000, 0.00000000, 0.00000000, 0.00000000]\ngammas_class1 = [\n 0.00000000, 0.00000000, 0.00000000, -0.00000000, 0.00000000, 0.00000000,\n 0.00053387, 0.00053387, 0.00086230, 0.01894313, 0.02034210, 0.02034210,\n 0.00155506, 0.00155506, 0.00260125, 0.01821681, 0.01820381, 0.01820381,\n 0.00571765, 0.00571765, 0.00544460, 0.01325570, 0.01118428, 0.01118428,\n 0.00016153, 0.00016153, 0.00032679, 0.00020002, 0.00020927, 0.00020927,\n 0.00000000, 0.00000000, 0.00000000, 0.00000000, 0.00000000, 0.00000000,\n 0.00000000, 0.00000000, 0.00000000, 0.00000000, 0.00000000, 0.00000000,\n 0.00000000, 0.00000000, 0.00000000, 0.00000000, 0.00000000, 0.00000000,\n 0.00000000, 0.00000000, 0.00000000, 0.00000000, 0.00000000, 0.00000000,\n 0.00000000, 0.00000000, 0.00000000, 0.00000000, 0.00000000, 0.00000000,\n 0.00000000, 0.00000000, 0.00000000, 0.00000000, 0.00000000, 0.00000000,\n 0.00233036, 0.00264690, 0.00525130, 0.02096414, 0.02200915, 0.02347515,\n 0.00297698, 0.00348529, 0.00638118, 0.01776255, 0.02740917, 0.02217207,\n 0.03234423, 0.02580162, 0.03682891, 0.03904463, 0.01942315, 0.02072384,\n 0.00004097, 0.00005101, 0.00007457, 0.00003508, 0.00004210, 0.00003803,\n 0.00000000, 0.00000000, 0.00000000, 0.00000000, 0.00000000, 0.00000000,\n 0.00000000, 0.00000000, 0.00000000, 0.00000000, 0.00000000, 0.00000000,\n 0.00000000, 0.00000000, 0.00000000, 0.00000000, 0.00000000, 0.00000000,\n 0.00000000, 0.00000000, 0.00000000, 0.00000000, 0.00000000, 0.00000000,\n 0.00000000, 0.00000000, 0.00000000, 0.00000000, 0.00000000, 0.00000000]\ngammas_class2 = [\n 0.00000000, 0.00000000, 0.00000000, 0.00000000, 0.00000000, 0.00000000,\n 0.00000728, 0.00000728, 0.00002201, 0.00022624, 0.00028946, 0.00028946,\n 0.00038736, 0.00038736, 0.00415534, 0.03667993, 0.03240766, 0.03240766,\n 0.00050274, 0.00050274, 0.00337024, 0.02935928, 0.03053801, 0.03053801,\n 0.00146473, 0.00146473, 0.00140248, 0.02775086, 0.02871662, 0.02871662,\n 0.00979262, 0.00979262, 0.01700920, 0.04388280, 0.05231049, 0.05231049,\n 0.00355424, 0.00355424, 0.01351094, 0.06235333, 0.03437465, 0.03437465,\n 0.00731397, 0.00731397, 0.02896588, 0.50067605, 0.50779324, 0.50779324,\n 0.00161440, 0.00161440, 0.01263256, 0.27319333, 0.27917018, 0.27917018,\n 0.00000000, 0.00000000, 0.00000000, 0.00000000, 0.00000000, 0.00000000,\n 0.00000000, 0.00000000, 0.00000000, 0.00000000, 0.00000000, 0.00000000,\n 0.00000000, 0.00000000, 0.00000000, 0.00000000, 0.00000000, 0.00000000,\n 0.00057618, 0.00033051, 0.00240702, 0.00990280, 0.00409632, 0.00652547,\n 0.00217505, 0.00195163, 0.01274449, 0.01213516, 0.02717067, 0.02293662,\n 0.00712953, 0.00811051, 0.01383178, 0.02037067, 0.03068992, 0.02374747,\n 0.02020952, 0.01977157, 0.03160744, 0.04024612, 0.04381027, 0.03949241,\n 0.01714475, 0.01764459, 0.05673104, 0.09304789, 0.05705200, 0.06221421,\n 0.02926385, 0.03288210, 0.06679574, 0.25418868, 0.45921877, 0.42341309,\n 0.01048868, 0.01797532, 0.08275328, 0.26595568, 0.32426329, 0.31898043,\n 0.00000000, 0.00000000, 0.00000000, 0.00000000, 0.00000000, 0.00000000]\nfreq_points = [0.0, 3.41024688, 6.82049376, 10.23074063, 13.64098751,\n 17.05123439, 20.46148127, 23.87172814, 27.28197502, 30.6922219]\nfreq_points_sigma = [\n 0.0, 3.45491354, 6.90982709, 10.36474063, 13.81965418,\n 17.27456772, 20.72948127, 24.18439481, 27.63930835, 31.09422190]\n\ndetailed_gamma = [0.00000000, 0.00653193, 0.02492913, 0.01682092, 0.01001680,\n 0.02181888, 0.01858641, 0.16208762, 0.09598706, 0.00000000]\n\ngammas_nacl = [\n 0.00000000, 0.00000000, 0.00000000, 0.00000000, 0.00000000, 0.00000000,\n 0.03396688, 0.03396688, 0.00687452, 0.21001764, 0.21001764, 0.12310439,\n 0.00297385, 0.00297385, 0.00227915, 0.10673763, 0.10673763, 0.06918881,\n 0.01003326, 0.01003326, 0.00996780, 0.03414868, 0.03414868, 0.02258494,\n 0.04027592, 0.04027592, 0.03603612, 0.57995646, 0.57995646, 0.39737731,\n 0.12705253, 0.12705253, 0.09246595, 0.88750309, 0.88750309, 0.60334780,\n 0.29968747, 0.29968747, 0.14257862, 0.22134950, 0.22134950, 0.09606896,\n 0.03941985, 0.03941985, 0.01632766, 0.00222574, 0.00222574, 0.00627294,\n 0.00240808, 0.00240808, 0.00688951, 0.00008074, 0.00008074, 0.00003641,\n 0.00000000, 0.00000000, 0.00000000, 0.00000000, 0.00000000, 0.00000000,\n 0.00000000, 0.00000000, 0.00000000, 0.00000000, 0.00000000, 0.00000000,\n 0.02850846, 0.09000833, 0.19582553, 0.13715943, 0.19892888, 0.14203618,\n 0.00861856, 0.02747203, 0.05000735, 0.04441740, 0.11080545, 0.04172184,\n 0.00738182, 0.01722875, 0.03273830, 0.04517923, 0.02441539, 0.03277688,\n 0.03233818, 0.08459289, 0.19264167, 0.11281266, 0.45667245, 0.18491212,\n 0.10846241, 0.47768641, 1.04554356, 0.64678566, 0.83834225, 0.61795504,\n 0.19485590, 0.43708391, 0.24896003, 0.35882984, 0.30654914, 0.22471014,\n 0.03624311, 0.13350831, 0.12479592, 0.06750776, 0.02503182, 0.04543786,\n 0.00155614, 0.01088453, 0.00064712, 0.00392933, 0.00058749, 0.00022448,\n 0.00000000, 0.00000000, 0.00000000, 0.00000000, 0.00000000, 0.00000000]\nfreq_points_nacl = [\n 0.0, 1.63223063, 3.26446125, 4.89669188, 6.5289225,\n 8.16115313, 9.79338375, 11.42561438, 13.057845, 14.69007563]\ngammas_nacl_nac = [\n 0.00000000, 0.00000000, 0.00000000, 0.00000000, 0.00000000, 0.00000000,\n 0.00000000, 0.00000000, 0.00000000, 0.20104463, 0.20104463, 0.12311129,\n 0.00000000, 0.00000000, 0.00000000, 0.10448465, 0.10448465, 0.06445738,\n 0.00000000, 0.00000000, 0.00000000, 0.03814089, 0.03814089, 0.02351398,\n 0.00000000, 0.00000000, 0.00000000, 0.79562828, 0.79562828, 0.49265042,\n 0.00000000, 0.00000000, 0.00000000, 0.71487838, 0.71487838, 0.44811019,\n 0.00000000, 0.00000000, 0.00000000, 0.29194862, 0.29194862, 0.18098946,\n 0.00000000, 0.00000000, 0.00000000, 0.00006218, 0.00006218, 0.00004024,\n 0.00000000, 0.00000000, 0.00000000, 0.00000000, 0.00000000, 0.00000000,\n 0.00000000, 0.00000000, 0.00000000, 0.00000000, 0.00000000, 0.00000000]\nfreq_points_nacl_nac = [\n 0.0, 1.65531064, 3.31062129, 4.96593193, 6.62124257, 8.27655322,\n 9.93186386, 11.58717451, 13.24248515, 14.89779579]\n\n\ndef test_imag_self_energy_at_bands(si_pbesol):\n si_pbesol.mesh_numbers = [9, 9, 9]\n si_pbesol.init_phph_interaction()\n _fpoints, _gammas = si_pbesol.run_imag_self_energy(\n [1, 103],\n [300, ],\n frequency_points_at_bands=True)\n gammas_ref = [\n 0.00021553, 0.00021553, 0.00084329, 0.04693498, 0.04388354, 0.04388354,\n 0.00383646, 0.00494357, 0.02741665, 0.01407101, 0.04133322, 0.03013125]\n np.testing.assert_allclose(_gammas.ravel(), gammas_ref, atol=1e-2)\n\n\ndef test_imag_self_energy_at_bands_detailed(si_pbesol):\n si_pbesol.mesh_numbers = [9, 9, 9]\n si_pbesol.init_phph_interaction()\n _fpoints, _gammas, _detailed_gammas = si_pbesol.run_imag_self_energy(\n [1, 103],\n [300, ],\n frequency_points_at_bands=True,\n keep_gamma_detail=True)\n\n weights_1 = [2, 2, 2, 2, 1, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6,\n 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6,\n 6, 12, 12, 12, 12, 6, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12,\n 12, 12, 12, 12, 12, 12, 12, 12, 6, 12, 12, 12, 12, 12, 12, 12,\n 12, 6, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 6]\n weights_103 = [2, ] * 364 + [1, ]\n\n gammas_1_ref = _gammas[:, :, 0].ravel()\n gammas_103_ref = _gammas[:, :, 1].ravel()\n gammas_1 = np.dot(weights_1,\n _detailed_gammas[0][0, 0].sum(axis=-1).sum(axis=-1))\n gammas_103 = np.dot(weights_103,\n _detailed_gammas[1][0, 0].sum(axis=-1).sum(axis=-1))\n np.testing.assert_allclose(gammas_1[:2].sum(), gammas_1_ref[:2].sum(),\n atol=1e-2)\n np.testing.assert_allclose(gammas_1[-2:].sum(), gammas_1_ref[-2:].sum(),\n atol=1e-2)\n np.testing.assert_allclose(gammas_1[2:4], gammas_1_ref[2:4], atol=1e-2)\n np.testing.assert_allclose(gammas_103, gammas_103_ref, atol=1e-2)\n\n\ndef test_imag_self_energy_npoints(si_pbesol):\n si_pbesol.mesh_numbers = [9, 9, 9]\n si_pbesol.init_phph_interaction()\n _fpoints, _gammas = si_pbesol.run_imag_self_energy(\n [1, 103],\n [300, ],\n num_frequency_points=10)\n np.testing.assert_allclose(\n gammas, np.swapaxes(_gammas, -1, -2).ravel(), atol=1e-2)\n np.testing.assert_allclose(\n freq_points, _fpoints.ravel(), atol=1e-5)\n\n\ndef test_imag_self_energy_npoints_with_sigma(si_pbesol):\n si_pbesol.sigmas = [0.1, ]\n si_pbesol.mesh_numbers = [9, 9, 9]\n si_pbesol.init_phph_interaction()\n _fpoints, _gammas = si_pbesol.run_imag_self_energy(\n [1, 103],\n [300, ],\n num_frequency_points=10)\n # for _g_line in np.swapaxes(_gammas, -1, -2).reshape(-1, 6):\n # print(\"\".join([\"%.8f, \" % g for g in _g_line]))\n # print(\"\".join([\"%.8f, \" % f for f in _fpoints]))\n np.testing.assert_allclose(\n gammas_sigma, np.swapaxes(_gammas, -1, -2).ravel(), atol=1e-2)\n np.testing.assert_allclose(\n freq_points_sigma, _fpoints.ravel(), atol=1e-5)\n si_pbesol.sigmas = None\n\n\ndef test_imag_self_energy_freq_points(si_pbesol):\n si_pbesol.mesh_numbers = [9, 9, 9]\n si_pbesol.init_phph_interaction()\n _fpoints, _gammas = si_pbesol.run_imag_self_energy(\n [1, 103],\n [300, ],\n frequency_points=freq_points)\n np.testing.assert_allclose(\n gammas, np.swapaxes(_gammas, -1, -2).ravel(), atol=1e-2)\n np.testing.assert_allclose(\n freq_points, _fpoints.ravel(), atol=1e-5)\n\n\ndef test_imag_self_energy_detailed(si_pbesol):\n si_pbesol.mesh_numbers = [9, 9, 9]\n si_pbesol.init_phph_interaction()\n _fpoints, _gammas, _detailed_gammas = si_pbesol.run_imag_self_energy(\n [1, ],\n [300, ],\n frequency_points=freq_points,\n keep_gamma_detail=True)\n np.testing.assert_allclose(\n detailed_gamma,\n _detailed_gammas[0][0, 0].sum(axis=(1, 2, 3, 4)),\n atol=1e-2)\n\n\ndef test_imag_self_energy_scat_class1(si_pbesol):\n si_pbesol.mesh_numbers = [9, 9, 9]\n si_pbesol.init_phph_interaction()\n _fpoints, _gammas = si_pbesol.run_imag_self_energy(\n [1, 103],\n [300, ],\n frequency_points=freq_points,\n scattering_event_class=1)\n # for line in si_pbesol.gammas.reshape(-1, 6):\n # print((\"%10.8f, \" * 6) % tuple(line))\n np.testing.assert_allclose(\n gammas_class1, np.swapaxes(_gammas, -1, -2).ravel(), atol=1e-2)\n\n\ndef test_imag_self_energy_scat_class2(si_pbesol):\n si_pbesol.mesh_numbers = [9, 9, 9]\n si_pbesol.init_phph_interaction()\n _fpoints, _gammas = si_pbesol.run_imag_self_energy(\n [1, 103],\n [300, ],\n frequency_points=freq_points,\n scattering_event_class=2)\n # for line in si_pbesol.gammas.reshape(-1, 6):\n # print((\"%10.8f, \" * 6) % tuple(line))\n np.testing.assert_allclose(\n gammas_class2, np.swapaxes(_gammas, -1, -2).ravel(), atol=1e-2)\n\n\ndef test_imag_self_energy_nacl_npoints(nacl_pbe):\n nacl_pbe.mesh_numbers = [9, 9, 9]\n nacl_pbe.init_phph_interaction()\n _fpoints, _gammas = nacl_pbe.run_imag_self_energy(\n [1, 103],\n [300, ],\n num_frequency_points=10)\n # for line in np.swapaxes(_gammas, -1, -2).ravel().reshape(-1, 6):\n # print((\"%10.8f, \" * 6) % tuple(line))\n # print(_fpoints.ravel())\n np.testing.assert_allclose(\n gammas_nacl, np.swapaxes(_gammas, -1, -2).ravel(), atol=1e-2)\n np.testing.assert_allclose(\n freq_points_nacl, _fpoints.ravel(), atol=1e-5)\n\n\ndef test_imag_self_energy_nacl_nac_npoints(nacl_pbe):\n nacl_pbe.mesh_numbers = [9, 9, 9]\n nacl_pbe.init_phph_interaction(nac_q_direction=[1, 0, 0])\n _fpoints, _gammas = nacl_pbe.run_imag_self_energy(\n [0, ],\n [300, ],\n num_frequency_points=10)\n # for line in np.swapaxes(_gammas, -1, -2).ravel().reshape(-1, 6):\n # print((\"%10.8f, \" * 6) % tuple(line))\n # print(_fpoints.ravel())\n np.testing.assert_allclose(\n gammas_nacl_nac, np.swapaxes(_gammas, -1, -2).ravel(), atol=1e-2)\n np.testing.assert_allclose(\n freq_points_nacl_nac, _fpoints.ravel(), atol=1e-5)\n"
] |
[
[
"numpy.testing.assert_allclose",
"numpy.swapaxes"
]
] |
ryaninhust/pyxclib
|
[
"6e23615742f0bb263313f2899f46bb027ea68007"
] |
[
"xclib/embeddings/gen_embeddings.py"
] |
[
"# Run using python gen_embeddings.py <embed> <embedding_dim> <vocabulary> <tokens> <out_file>\nimport numpy as np\nimport sys\nimport word_embedding\n\n__author__='KD'\n\ndef main():\n init = 'gaussian'\n embed = sys.argv[1]\n embedding_dim = int(sys.argv[2])\n vocab_file = sys.argv[3]\n tokens = sys.argv[4]\n out_file = sys.argv[5]\n with open(vocab_file, 'r') as f:\n temp = f.readlines()\n dataset_vocab = [item.rstrip(\"\\n\") for item in temp] #list of all words in given dataset\n del temp\n\n word_embeddings = word_embedding.load_embeddings(DATA_DIR='/home/kd/XC/data/word_embeddings', embed=embed, tokens=tokens, embedding_dim=embedding_dim)\n dataset_vocab_embeddings = np.zeros((len(dataset_vocab), embedding_dim))\n not_found_count = 0\n\n for i in range(len(dataset_vocab)):\n try:\n dataset_vocab_embeddings[i, :] = word_embeddings[dataset_vocab[i]]\n except KeyError:\n if init == 'gaussian':\n dataset_vocab_embeddings[i, :] = np.random.randn(embedding_dim, )*0.01\n not_found_count+=1\n print(\"#Words with no word embeddings\", not_found_count)\n np.save(out_file, dataset_vocab_embeddings)\n\nif __name__ == '__main__':\n main()\n"
] |
[
[
"numpy.random.randn",
"numpy.save"
]
] |
yujialuo/Deep-reinforcement-learning
|
[
"689085251805b1a157e21b5d74c6a478eb8a21c8"
] |
[
"hw1/dagger.py"
] |
[
"#!/usr/bin/env python\n\n\"\"\"\nCode to load an expert policy and generate roll-out data for behavioral cloning.\nExample usage:\n python dagger.py experts/Walker2d-v2.pkl Walker2d-v2 --render --num_rollouts 40\n\nAuthor of this script and included expert policies: Jonathan Ho ([email protected])\n\"\"\"\n\nimport os\nimport tensorflow as tf\nimport numpy as np\nimport tf_util\nimport gym\nimport load_policy\nimport pickle\nfrom sklearn.utils import shuffle\n\n\nDATA_PATH = 'expert_data/'\nMODEL_PATH = 'models/'\nNUM_LAYER_UNITS = 64\nTRAIN_ITER = 3000\nBATCH_SIZE = 2048\nNUM_TRAIN_BATCH_ITERS = 10\n\n\ndef main():\n import argparse\n parser = argparse.ArgumentParser()\n parser.add_argument('expert_policy_file', type=str)\n parser.add_argument('envname', type=str)\n parser.add_argument('--render', action='store_true')\n parser.add_argument(\"--max_timesteps\", type=int)\n parser.add_argument('--num_rollouts', type=int, default=20,\n help='Number of expert roll outs')\n args = parser.parse_args()\n\n print('loading and building expert policy')\n policy_fn = load_policy.load_policy(args.expert_policy_file)\n print('loaded and built')\n\n # Get Data\n data_file = os.path.join(DATA_PATH, \"{}.pkl\".format(args.envname))\n with open(data_file, \"rb\") as f:\n data = pickle.loads(f.read())\n expert_obs = data['observations']\n expert_acts = data['actions']\n obs_dim = expert_obs.shape[1]\n act_dim = expert_acts.shape[2]\n expert_acts = expert_acts.reshape(-1, act_dim)\n expert_obs, expert_acts = shuffle(expert_obs, expert_acts, random_state=0)\n\n # Build Model\n x = tf.placeholder(tf.float32, [None, obs_dim])\n y = tf.placeholder(tf.float32, [None, act_dim])\n w = tf.Variable(tf.truncated_normal([obs_dim, NUM_LAYER_UNITS]))\n b = tf.Variable(tf.truncated_normal([NUM_LAYER_UNITS]))\n h = tf.tanh(tf.matmul(x, w) + b)\n w2 = tf.Variable(tf.truncated_normal([NUM_LAYER_UNITS, NUM_LAYER_UNITS]))\n b2 = tf.Variable(tf.truncated_normal([NUM_LAYER_UNITS]))\n h2 = tf.tanh(tf.matmul(h, w2) + b2)\n w3 = tf.Variable(tf.truncated_normal([NUM_LAYER_UNITS, act_dim]))\n b3 = tf.Variable(tf.truncated_normal([act_dim]))\n y_hat = tf.matmul(h2, w3) + b3\n loss = tf.reduce_sum(tf.losses.mean_squared_error(y_hat, y))\n train_target = tf.train.AdamOptimizer().minimize(loss)\n\n # Train and Deploy Model\n train_i = 0\n total_mean = []\n total_std = []\n model_path = MODEL_PATH + \"cloned_model_{}\".format(args.envname)\n saver = tf.train.Saver()\n with tf.Session() as sess:\n tf_util.initialize()\n env = gym.make(args.envname)\n max_steps = args.max_timesteps or env.spec.timestep_limit\n\n for i in range(NUM_TRAIN_BATCH_ITERS):\n # Train Model\n for j in range(TRAIN_ITER):\n random_i = np.random.choice(len(expert_obs), BATCH_SIZE)\n obatch, abatch = expert_obs[random_i], expert_acts[random_i]\n sess.run(train_target, feed_dict={x: obatch, y: abatch})\n train_i += TRAIN_ITER\n\n # Deploy Model\n print('train iter', train_i)\n print(sess.run(loss, feed_dict={x: obatch, y: abatch}))\n\n returns = []\n observations = []\n actions = []\n\n for i in range(args.num_rollouts):\n # print('iter', i)\n obs = env.reset()\n done = False\n totalr = 0.\n steps = 0\n while not done:\n observations.append(obs)\n action_exp = policy_fn(obs[None, :])\n actions.append(action_exp)\n action = sess.run(y_hat, feed_dict={x: np.array([obs]), y: np.array([expert_acts[0]])})\n obs, r, done, _ = env.step(action)\n totalr += r\n steps += 1\n if args.render:\n env.render()\n if steps >= max_steps:\n break\n returns.append(totalr)\n\n # Augment Data\n np.append(expert_obs, np.array(observations), axis=0)\n actions = np.array(actions).reshape(-1, expert_acts.shape[1])\n np.append(expert_acts, actions, axis=0)\n\n print('mean return', np.mean(returns))\n print('std of return', np.std(returns))\n total_mean.append(np.mean(returns))\n total_std.append(np.std(returns))\n\n if train_i % 10000 == 0:\n saver.save(sess, model_path, global_step=train_i)\n\n print(\"total_mean\", total_mean)\n print(\"total_std\", total_std)\n\n\nif __name__ == '__main__':\n main()\n"
] |
[
[
"numpy.array",
"tensorflow.train.AdamOptimizer",
"tensorflow.matmul",
"tensorflow.train.Saver",
"tensorflow.Session",
"numpy.mean",
"tensorflow.truncated_normal",
"numpy.std",
"tensorflow.placeholder",
"tensorflow.losses.mean_squared_error",
"numpy.append",
"sklearn.utils.shuffle"
]
] |
zhu913104/snake
|
[
"8fa5608db9568078fe11c9df84e58fea5c173cd9"
] |
[
"champion_genetic_algorithm.py"
] |
[
"import numpy as np\n\n\nclass GA(object):\n def __init__(self, DNA_size, cross_rate, mutation_rate, pop_size ,pop):\n self.DNA_size = DNA_size\n self.cross_rate = cross_rate\n self.mutate_rate = mutation_rate\n self.pop_size = pop_size\n self.max_distance=0\n if pop.any():\n self.pop=pop\n else:\n self.pop = np.random.rand(self.pop_size,self.DNA_size)*2-1\n self.champion=self.pop[0]\n\n def select(self, fitness):\n idx = np.random.choice(np.arange(self.pop_size), size=self.pop_size, replace=True, p=fitness/fitness.sum())\n return self.pop[idx]\n\n def crossover(self, parent, pop):\n if np.random.rand() < self.cross_rate:\n i_ = np.random.randint(0, self.pop_size, size=1) # select another individual from pop\n cross_points = np.random.randint(0, 2, self.DNA_size).astype(np.bool) # choose crossover points\n parent[cross_points]=pop[i_,cross_points]\n return parent\n\n def mutate(self, child):\n for point in range(self.DNA_size):\n if np.random.rand() < self.mutate_rate:\n child[point]=child[point]+(np.random.rand()*2-1)\n return child\n\n def evolve(self, fitness):\n if self.max_distance<np.max(fitness):\n self.champion=self.pop[np.argmax(fitness)]\n self.max_distance=np.max(fitness)\n pop = self.select(fitness)\n pop_copy = pop.copy()\n for parent in pop: # for every parent\n child = self.crossover(parent, pop_copy)\n child = self.mutate(child)\n parent[:] = child\n pop[0]=self.champion\n self.pop = pop\n # def get_fitness(self,distance):\n # total_distance = np.empty((line_x.shape[0],line_x.shape[1]), dtype=np.float64)\n # fitness = np.exp(total_distance)\n # return fitness\n\n# ga = GA(DNA_size=N_CITIES, cross_rate=CROSS_RATE, mutation_rate=MUTATE_RATE, pop_size=POP_SIZE)\n\n# for generation in range(N_GENERATIONS):\n# t = time.time()\n# ga.evolve(fitness)\n# best_idx = np.argmax(fitness)\n# best_sub = best_idx//POP_SIZE\n# best_idx_one = best_idx-best_idx//POP_SIZE*POP_SIZE\n# env.plotting(lx[best_sub][best_idx_one], ly[best_sub][best_idx_one], total_distance[best_sub][best_idx_one])\n# if generation%migration_time==0:\n# ga.migration(fitness)\n# print(\"MIGRATION!!!!!!!!!!!!!!!!!!!!!\")\n# print(ga.pop[best_sub][best_idx_one])\n# print('Gen:', generation,'|best sub: ', (best_sub),'| best fit: %.3f' % fitness[best_sub][best_idx_one],\"| time:\",time.time()-t )\n#\n#\n# plt.ioff()\n# plt.show()"
] |
[
[
"numpy.max",
"numpy.random.rand",
"numpy.random.randint",
"numpy.arange",
"numpy.argmax"
]
] |
lazappi/scanpy
|
[
"9018e16cae6f3199f914f58841b00a00790cd494"
] |
[
"scanpy/tests/test_get.py"
] |
[
"from functools import partial\nfrom itertools import repeat, chain\n\nimport numpy as np\nimport pandas as pd\nimport pytest\nfrom anndata import AnnData\nfrom scipy import sparse\n\nimport scanpy as sc\nfrom scanpy.datasets._utils import filter_oldformatwarning\nfrom scanpy.tests._data._cached_datasets import pbmc68k_reduced\n\nTRANSPOSE_PARAMS = pytest.mark.parametrize(\n \"dim,transform,func\",\n [\n (\n \"obs\",\n lambda x: x,\n sc.get.obs_df,\n ),\n (\n \"var\",\n lambda x: x.T,\n sc.get.var_df,\n ),\n ],\n ids=[\"obs_df\", \"var_df\"],\n)\n\n\[email protected]\ndef adata():\n \"\"\"\n adata.X is np.ones((2, 2))\n adata.layers['double'] is sparse np.ones((2,2)) * 2 to also test sparse matrices\n \"\"\"\n return AnnData(\n X=np.ones((2, 2)),\n obs=pd.DataFrame(\n {\"obs1\": [0, 1], \"obs2\": [\"a\", \"b\"]}, index=[\"cell1\", \"cell2\"]\n ),\n var=pd.DataFrame(\n {\"gene_symbols\": [\"genesymbol1\", \"genesymbol2\"]}, index=[\"gene1\", \"gene2\"]\n ),\n layers={\"double\": sparse.csr_matrix(np.ones((2, 2)), dtype=int) * 2},\n dtype=int,\n )\n\n\n########################\n# obs_df, var_df tests #\n########################\n\n\ndef test_obs_df(adata):\n adata.obsm[\"eye\"] = np.eye(2, dtype=int)\n adata.obsm[\"sparse\"] = sparse.csr_matrix(np.eye(2), dtype='float64')\n\n # make raw with different genes than adata\n adata.raw = AnnData(\n X=np.array([[1, 2, 3], [2, 4, 6]]),\n var=pd.DataFrame(\n {\"gene_symbols\": [\"raw1\", \"raw2\", 'raw3']},\n index=[\"gene2\", \"gene3\", \"gene4\"],\n ),\n dtype='float64',\n )\n pd.testing.assert_frame_equal(\n sc.get.obs_df(\n adata, keys=[\"gene2\", \"obs1\"], obsm_keys=[(\"eye\", 0), (\"sparse\", 1)]\n ),\n pd.DataFrame(\n {\"gene2\": [1, 1], \"obs1\": [0, 1], \"eye-0\": [1, 0], \"sparse-1\": [0.0, 1.0]},\n index=adata.obs_names,\n ),\n )\n pd.testing.assert_frame_equal(\n sc.get.obs_df(\n adata,\n keys=[\"genesymbol2\", \"obs1\"],\n obsm_keys=[(\"eye\", 0), (\"sparse\", 1)],\n gene_symbols=\"gene_symbols\",\n ),\n pd.DataFrame(\n {\n \"genesymbol2\": [1, 1],\n \"obs1\": [0, 1],\n \"eye-0\": [1, 0],\n \"sparse-1\": [0.0, 1.0],\n },\n index=adata.obs_names,\n ),\n )\n pd.testing.assert_frame_equal(\n sc.get.obs_df(adata, keys=[\"gene2\", \"obs1\"], layer=\"double\"),\n pd.DataFrame({\"gene2\": [2, 2], \"obs1\": [0, 1]}, index=adata.obs_names),\n )\n\n pd.testing.assert_frame_equal(\n sc.get.obs_df(\n adata,\n keys=[\"raw2\", \"raw3\", \"obs1\"],\n gene_symbols=\"gene_symbols\",\n use_raw=True,\n ),\n pd.DataFrame(\n {\"raw2\": [2.0, 4.0], \"raw3\": [3.0, 6.0], \"obs1\": [0, 1]},\n index=adata.obs_names,\n ),\n )\n # test only obs\n pd.testing.assert_frame_equal(\n sc.get.obs_df(adata, keys=[\"obs1\", \"obs2\"]),\n pd.DataFrame({\"obs1\": [0, 1], \"obs2\": [\"a\", \"b\"]}, index=[\"cell1\", \"cell2\"]),\n )\n # test only var\n pd.testing.assert_frame_equal(\n sc.get.obs_df(adata, keys=[\"gene1\", \"gene2\"]),\n pd.DataFrame({\"gene1\": [1, 1], \"gene2\": [1, 1]}, index=adata.obs_names),\n )\n pd.testing.assert_frame_equal(\n sc.get.obs_df(adata, keys=[\"gene1\", \"gene2\"]),\n pd.DataFrame({\"gene1\": [1, 1], \"gene2\": [1, 1]}, index=adata.obs_names),\n )\n # test handling of duplicated keys (in this case repeated gene names)\n pd.testing.assert_frame_equal(\n sc.get.obs_df(adata, keys=[\"gene1\", \"gene2\", \"gene1\", \"gene1\"]),\n pd.DataFrame(\n {\"gene1\": [1, 1], \"gene2\": [1, 1]},\n index=adata.obs_names,\n )[[\"gene1\", \"gene2\", \"gene1\", \"gene1\"]],\n )\n\n badkeys = [\"badkey1\", \"badkey2\"]\n with pytest.raises(KeyError) as badkey_err:\n sc.get.obs_df(adata, keys=badkeys)\n with pytest.raises(AssertionError):\n sc.get.obs_df(adata, keys=[\"gene1\"], use_raw=True, layer=\"double\")\n assert all(badkey_err.match(k) for k in badkeys)\n\n # test non unique index\n adata = sc.AnnData(\n np.arange(16).reshape(4, 4),\n obs=pd.DataFrame(index=[\"a\", \"a\", \"b\", \"c\"]),\n var=pd.DataFrame(index=[f\"gene{i}\" for i in range(4)]),\n )\n df = sc.get.obs_df(adata, [\"gene1\"])\n pd.testing.assert_index_equal(df.index, adata.obs_names)\n\n\ndef test_repeated_gene_symbols():\n \"\"\"\n Gene symbols column allows repeats, but we can't unambiguously get data for these values.\n \"\"\"\n gene_symbols = [f\"symbol_{i}\" for i in [\"a\", \"b\", \"b\", \"c\"]]\n var_names = pd.Index([f\"id_{i}\" for i in [\"a\", \"b.1\", \"b.2\", \"c\"]])\n adata = sc.AnnData(\n np.arange(3 * 4).reshape((3, 4)),\n var=pd.DataFrame({\"gene_symbols\": gene_symbols}, index=var_names),\n )\n\n with pytest.raises(KeyError, match=\"symbol_b\"):\n sc.get.obs_df(adata, [\"symbol_b\"], gene_symbols=\"gene_symbols\")\n\n expected = pd.DataFrame(\n np.arange(3 * 4).reshape((3, 4))[:, [0, 3]].astype(np.float32),\n index=adata.obs_names,\n columns=[\"symbol_a\", \"symbol_c\"],\n )\n result = sc.get.obs_df(adata, [\"symbol_a\", \"symbol_c\"], gene_symbols=\"gene_symbols\")\n\n pd.testing.assert_frame_equal(expected, result)\n\n\n@filter_oldformatwarning\ndef test_backed_vs_memory():\n \"compares backed vs. memory\"\n from pathlib import Path\n\n # get location test h5ad file in datasets\n HERE = Path(sc.__file__).parent\n adata_file = HERE / \"datasets/10x_pbmc68k_reduced.h5ad\"\n adata_backed = sc.read(adata_file, backed='r')\n adata = sc.read_h5ad(adata_file)\n\n # use non-sequential list of genes\n genes = list(adata.var_names[20::-2])\n obs_names = ['bulk_labels', 'n_genes']\n pd.testing.assert_frame_equal(\n sc.get.obs_df(adata, keys=genes + obs_names),\n sc.get.obs_df(adata_backed, keys=genes + obs_names),\n )\n\n # use non-sequential list of cell indices\n cell_indices = list(adata.obs_names[30::-2])\n pd.testing.assert_frame_equal(\n sc.get.var_df(adata, keys=cell_indices + [\"highly_variable\"]),\n sc.get.var_df(adata_backed, keys=cell_indices + [\"highly_variable\"]),\n )\n\n\ndef test_column_content():\n \"uses a larger dataset to test column order and content\"\n adata = pbmc68k_reduced()\n\n # test that columns content is correct for obs_df\n query = ['CST3', 'NKG7', 'GNLY', 'louvain', 'n_counts', 'n_genes']\n df = sc.get.obs_df(adata, query)\n for col in query:\n assert col in df\n np.testing.assert_array_equal(query, df.columns)\n np.testing.assert_array_equal(df[col].values, adata.obs_vector(col))\n\n # test that columns content is correct for var_df\n cell_ids = list(adata.obs.sample(5).index)\n query = cell_ids + ['highly_variable', 'dispersions_norm', 'dispersions']\n df = sc.get.var_df(adata, query)\n np.testing.assert_array_equal(query, df.columns)\n for col in query:\n np.testing.assert_array_equal(df[col].values, adata.var_vector(col))\n\n\ndef test_var_df(adata):\n adata.varm[\"eye\"] = np.eye(2, dtype=int)\n adata.varm[\"sparse\"] = sparse.csr_matrix(np.eye(2), dtype='float64')\n\n pd.testing.assert_frame_equal(\n sc.get.var_df(\n adata,\n keys=[\"cell2\", \"gene_symbols\"],\n varm_keys=[(\"eye\", 0), (\"sparse\", 1)],\n ),\n pd.DataFrame(\n {\n \"cell2\": [1, 1],\n \"gene_symbols\": [\"genesymbol1\", \"genesymbol2\"],\n \"eye-0\": [1, 0],\n \"sparse-1\": [0.0, 1.0],\n },\n index=adata.var_names,\n ),\n )\n pd.testing.assert_frame_equal(\n sc.get.var_df(adata, keys=[\"cell1\", \"gene_symbols\"], layer=\"double\"),\n pd.DataFrame(\n {\"cell1\": [2, 2], \"gene_symbols\": [\"genesymbol1\", \"genesymbol2\"]},\n index=adata.var_names,\n ),\n )\n # test only cells\n pd.testing.assert_frame_equal(\n sc.get.var_df(adata, keys=[\"cell1\", \"cell2\"]),\n pd.DataFrame(\n {\"cell1\": [1, 1], \"cell2\": [1, 1]},\n index=adata.var_names,\n ),\n )\n # test only var columns\n pd.testing.assert_frame_equal(\n sc.get.var_df(adata, keys=[\"gene_symbols\"]),\n pd.DataFrame(\n {\"gene_symbols\": [\"genesymbol1\", \"genesymbol2\"]},\n index=adata.var_names,\n ),\n )\n\n # test handling of duplicated keys (in this case repeated cell names)\n pd.testing.assert_frame_equal(\n sc.get.var_df(adata, keys=[\"cell1\", \"cell2\", \"cell2\", \"cell1\"]),\n pd.DataFrame(\n {\"cell1\": [1, 1], \"cell2\": [1, 1]},\n index=adata.var_names,\n )[[\"cell1\", \"cell2\", \"cell2\", \"cell1\"]],\n )\n\n badkeys = [\"badkey1\", \"badkey2\"]\n with pytest.raises(KeyError) as badkey_err:\n sc.get.var_df(adata, keys=badkeys)\n assert all(badkey_err.match(k) for k in badkeys)\n\n\n@TRANSPOSE_PARAMS\ndef test_just_mapping_keys(dim, transform, func):\n # https://github.com/scverse/scanpy/issues/1634\n # Test for error where just passing obsm_keys, but not keys, would cause error.\n mapping_attr = f\"{dim}m\"\n kwargs = {f\"{mapping_attr}_keys\": [(\"array\", 0), (\"array\", 1)]}\n\n adata = transform(\n sc.AnnData(\n X=np.zeros((5, 5)),\n obsm={\n \"array\": np.arange(10).reshape((5, 2)),\n },\n )\n )\n\n expected = pd.DataFrame(\n np.arange(10).reshape((5, 2)),\n index=getattr(adata, f\"{dim}_names\"),\n columns=[\"array-0\", \"array-1\"],\n )\n result = func(adata, **kwargs)\n\n pd.testing.assert_frame_equal(expected, result)\n\n\n##################################\n# Test errors for obs_df, var_df #\n##################################\n\n\ndef test_non_unique_cols_value_error():\n M, N = 5, 3\n adata = sc.AnnData(\n X=np.zeros((M, N)),\n obs=pd.DataFrame(\n np.arange(M * 2).reshape((M, 2)),\n columns=[\"repeated_col\", \"repeated_col\"],\n index=[f\"cell_{i}\" for i in range(M)],\n ),\n var=pd.DataFrame(\n index=[f\"gene_{i}\" for i in range(N)],\n ),\n )\n with pytest.raises(ValueError):\n sc.get.obs_df(adata, [\"repeated_col\"])\n\n\ndef test_non_unique_var_index_value_error():\n adata = sc.AnnData(\n X=np.ones((2, 3)),\n obs=pd.DataFrame(index=[\"cell-0\", \"cell-1\"]),\n var=pd.DataFrame(index=[\"gene-0\", \"gene-0\", \"gene-1\"]),\n )\n with pytest.raises(ValueError):\n sc.get.obs_df(adata, [\"gene-0\"])\n\n\ndef test_keys_in_both_obs_and_var_index_value_error():\n M, N = 5, 3\n adata = sc.AnnData(\n X=np.zeros((M, N)),\n obs=pd.DataFrame(\n np.arange(M),\n columns=[\"var_id\"],\n index=[f\"cell_{i}\" for i in range(M)],\n ),\n var=pd.DataFrame(\n index=[\"var_id\"] + [f\"gene_{i}\" for i in range(N - 1)],\n ),\n )\n with pytest.raises(KeyError, match=\"var_id\"):\n sc.get.obs_df(adata, [\"var_id\"])\n\n\n@TRANSPOSE_PARAMS\ndef test_repeated_cols(dim, transform, func):\n adata = transform(\n sc.AnnData(\n np.ones((5, 10)),\n obs=pd.DataFrame(\n np.ones((5, 2)), columns=[\"a_column_name\", \"a_column_name\"]\n ),\n var=pd.DataFrame(index=[f\"gene-{i}\" for i in range(10)]),\n )\n )\n # (?s) is inline re.DOTALL\n with pytest.raises(ValueError, match=rf\"(?s)^adata\\.{dim}.*a_column_name.*$\"):\n func(adata, [\"gene_5\"])\n\n\n@TRANSPOSE_PARAMS\ndef test_repeated_index_vals(dim, transform, func):\n # THis one could be reverted, see:\n # https://github.com/scverse/scanpy/pull/1583#issuecomment-770641710\n alt_dim = [\"obs\", \"var\"][dim == \"obs\"]\n adata = transform(\n sc.AnnData(\n np.ones((5, 10)),\n var=pd.DataFrame(\n index=[\"repeated_id\"] * 2 + [f\"gene-{i}\" for i in range(8)]\n ),\n )\n )\n\n with pytest.raises(\n ValueError,\n match=rf\"(?s)adata\\.{alt_dim}_names.*{alt_dim}_names_make_unique\",\n ):\n func(adata, \"gene_5\")\n\n\[email protected](\n params=[\n \"obs_df\",\n \"var_df\",\n \"obs_df:use_raw\",\n \"obs_df:gene_symbols\",\n \"obs_df:gene_symbols,use_raw\",\n ]\n)\ndef shared_key_adata(request):\n kind = request.param\n adata = sc.AnnData(\n np.arange(50).reshape((5, 10)),\n obs=pd.DataFrame(np.zeros((5, 1)), columns=[\"var_id\"]),\n var=pd.DataFrame(index=[\"var_id\"] + [f\"gene_{i}\" for i in range(1, 10)]),\n )\n if kind == \"obs_df\":\n return (\n adata,\n sc.get.obs_df,\n r\"'var_id'.* adata\\.obs .* adata.var_names\",\n )\n elif kind == \"var_df\":\n return (\n adata.T,\n sc.get.var_df,\n r\"'var_id'.* adata\\.var .* adata.obs_names\",\n )\n elif kind == \"obs_df:use_raw\":\n adata.raw = adata\n adata.var_names = [f\"gene_{i}\" for i in range(10)]\n return (\n adata,\n partial(sc.get.obs_df, use_raw=True),\n r\"'var_id'.* adata\\.obs .* adata\\.raw\\.var_names\",\n )\n elif kind == \"obs_df:gene_symbols\":\n adata.var[\"gene_symbols\"] = adata.var_names\n adata.var_names = [f\"gene_{i}\" for i in range(10)]\n return (\n adata,\n partial(sc.get.obs_df, gene_symbols=\"gene_symbols\"),\n r\"'var_id'.* adata\\.obs .* adata\\.var\\['gene_symbols'\\]\",\n )\n elif kind == \"obs_df:gene_symbols,use_raw\":\n base = adata.copy()\n adata.var[\"gene_symbols\"] = adata.var_names\n adata.var_names = [f\"gene_{i}\" for i in range(10)]\n base.raw = adata\n return (\n base,\n partial(\n sc.get.obs_df,\n gene_symbols=\"gene_symbols\",\n use_raw=True,\n ),\n r\"'var_id'.* adata\\.obs .* adata\\.raw\\.var\\['gene_symbols'\\]\",\n )\n else:\n assert False\n\n\ndef test_shared_key_errors(shared_key_adata):\n adata, func, regex = shared_key_adata\n\n # This should error\n with pytest.raises(KeyError, match=regex):\n func(adata, keys=[\"var_id\"])\n\n # This shouldn't error\n _ = func(adata, keys=[\"gene_2\"])\n\n\n##############################\n# rank_genes_groups_df tests #\n##############################\n\n\ndef test_rank_genes_groups_df():\n a = np.zeros((20, 3))\n a[:10, 0] = 5\n adata = AnnData(\n a,\n obs=pd.DataFrame(\n {\"celltype\": list(chain(repeat(\"a\", 10), repeat(\"b\", 10)))},\n index=[f\"cell{i}\" for i in range(a.shape[0])],\n ),\n var=pd.DataFrame(index=[f\"gene{i}\" for i in range(a.shape[1])]),\n )\n sc.tl.rank_genes_groups(adata, groupby=\"celltype\", method=\"wilcoxon\", pts=True)\n dedf = sc.get.rank_genes_groups_df(adata, \"a\")\n assert dedf[\"pvals\"].value_counts()[1.0] == 2\n assert sc.get.rank_genes_groups_df(adata, \"a\", log2fc_max=0.1).shape[0] == 2\n assert sc.get.rank_genes_groups_df(adata, \"a\", log2fc_min=0.1).shape[0] == 1\n assert sc.get.rank_genes_groups_df(adata, \"a\", pval_cutoff=0.9).shape[0] == 1\n del adata.uns[\"rank_genes_groups\"]\n sc.tl.rank_genes_groups(\n adata,\n groupby=\"celltype\",\n method=\"wilcoxon\",\n key_added=\"different_key\",\n pts=True,\n )\n with pytest.raises(KeyError):\n sc.get.rank_genes_groups_df(adata, \"a\")\n dedf2 = sc.get.rank_genes_groups_df(adata, \"a\", key=\"different_key\")\n pd.testing.assert_frame_equal(dedf, dedf2)\n assert 'pct_nz_group' in dedf2.columns\n assert 'pct_nz_reference' in dedf2.columns\n\n # get all groups\n dedf3 = sc.get.rank_genes_groups_df(adata, group=None, key=\"different_key\")\n assert 'a' in dedf3['group'].unique()\n assert 'b' in dedf3['group'].unique()\n adata.var_names.name = 'pr1388'\n sc.get.rank_genes_groups_df(adata, group=None, key=\"different_key\")\n"
] |
[
[
"pandas.testing.assert_frame_equal",
"pandas.Index",
"numpy.array",
"numpy.zeros",
"pandas.DataFrame",
"numpy.testing.assert_array_equal",
"numpy.ones",
"numpy.eye",
"numpy.arange",
"pandas.testing.assert_index_equal"
]
] |
cadoman/map-parser
|
[
"8568429682a14577629a8d126b6ab894b5d66da3"
] |
[
"map_extractor/PolygonGroup.py"
] |
[
"from shapely.geometry import Polygon\nfrom pprint import pprint\nimport numpy as np\nimport shapely.wkt\nimport matplotlib.pyplot as plt\nimport json\n\n\nclass PolygonGroup:\n def __init__(self, polygons: list, name=\"\"):\n assert isinstance(polygons, list)\n for pol in polygons:\n assert isinstance(pol, Polygon)\n self.polygon_list = polygons\n self.name = name\n\n def __iter__(self):\n return iter(self.polygon_list)\n\n def __len__(self):\n return len(self.polygon_list)\n\n def filter_polygons(self, min_area: int):\n '''\n Filter out the polygons which are too small and those which are contained in other polygons\n\n Parameter\n min_area (int) : The minimum area for a polygon to be considered\n '''\n big_polygons = [\n pol for pol in self.polygon_list if pol.area >= min_area]\n\n not_contained = []\n for i, pol in enumerate(big_polygons):\n if not is_contained(pol, np.concatenate((big_polygons[:i], big_polygons[i+1:]))):\n not_contained.append(pol)\n return PolygonGroup(not_contained, self.name)\n\n def to_dict(self):\n polygon_json = [list(pol.exterior.coords) for pol in self.polygon_list]\n return{\n \"name\": self.name,\n \"polygon_list\": polygon_json\n }\n\n @staticmethod\n def from_dict(dict_pg: dict):\n polygons = [Polygon(points_array)\n for points_array in dict_pg['polygon_list']]\n return PolygonGroup(polygons, dict_pg['name'])\n\n def without_polygons_contained_in_other_group(self, other_group):\n '''\n Filter out the polygon of the polygongroup which are contained in the polygons of another polygon group\n\n Parameters\n other_group (PolygonGroup) : The group representing potential containers\n\n Returns\n PolygonGroup : A new group created from self, stripped from contained polygons\n '''\n not_contained = [pol for pol in self.polygon_list if not is_contained(\n pol, other_group.polygon_list)]\n return PolygonGroup(not_contained, self.name)\n\n def display(self):\n points = np.concatenate(\n ([pol.exterior.coords for pol in self.polygon_list])).astype(int)\n maxx, maxy = points.max(axis=0) + 1\n image = np.zeros((maxy, maxx))\n rr, cc = points[:, 1], points[:, 0]\n image[rr, cc] = 1\n plt.imshow(image, cmap='gray')\n plt.title('Name : '+str(self.name))\n plt.show()\n\ndef is_contained(polygon: Polygon, other_polygons:list):\n '''\n Determine if a polygon lies in one the polygons provided in other_polygons\n\n Parameters\n polygon (Polygon) : The potentially contained polygon\n other_polygons (list) : List of potential container polygons\n\n Returns\n boolean : True if contained, False otherwise\n '''\n for potential_container in other_polygons:\n if polygon.within(potential_container):\n return True\n return False\n"
] |
[
[
"numpy.concatenate",
"matplotlib.pyplot.show",
"numpy.zeros",
"matplotlib.pyplot.imshow"
]
] |
nicksum107/thesiswork
|
[
"5d175d0e110b08b7da2926fc64287086f503e086"
] |
[
"patch_attack_imagenet.py"
] |
[
"import torch\nimport torch.nn as nn\nimport torch.optim as optim\nimport torch.nn.functional as F\nimport torch.backends.cudnn as cudnn\nfrom torchvision import datasets, transforms\n\nimport nets.bagnet\nimport nets.resnet\n\nimport os \nimport joblib\nimport argparse\nfrom tqdm import tqdm\nimport numpy as np \n\nfrom PatchAttacker import PatchAttacker\nimport os \n\nimport argparse\n\nparser = argparse.ArgumentParser()\n\nparser.add_argument(\"--dump_dir\",default='patch_adv',type=str,help=\"directory to save attack results\")\nparser.add_argument(\"--model_dir\",default='checkpoints',type=str,help=\"path to checkpoints\")\nparser.add_argument('--data_dir', default='./data/imagenette', type=str,help=\"path to data\")\n# parser.add_argument('--data_dir', default='./data/imagenet',type=str)\nparser.add_argument(\"--model\",default='bagnet17',type=str,help=\"model name\")\nparser.add_argument(\"--clip\",default=-1,type=int,help=\"clipping value; do clipping when this argument is set to positive\")\nparser.add_argument(\"--aggr\",default='mean',type=str,help=\"aggregation methods. one of mean, median, cbn\")\nparser.add_argument(\"--skip\",default=1,type=int,help=\"number of example to skip\")\nparser.add_argument(\"--patch_size\",default=31,type=int,help=\"size of the adversarial patch\")\n\nargs = parser.parse_args()\n\nMODEL_DIR=os.path.join('.',args.model_dir)\nDATA_DIR=os.path.join(args.data_dir)\nDUMP_DIR=os.path.join('dump',args.dump_dir+'_{}'.format(args.model))\nif not os.path.exists('dump'):\n\tos.mkdir('dump')\nif not os.path.exists(DUMP_DIR):\n\tos.mkdir(DUMP_DIR)\n\n#prepare data\nmean_vec = [0.485, 0.456, 0.406]\nstd_vec = [0.229, 0.224, 0.225]\nval_dir=os.path.join(DATA_DIR,'val')\nval_transforms = transforms.Compose([\n transforms.Resize(256),\n transforms.CenterCrop(224),\n transforms.ToTensor(),\n transforms.Normalize(mean_vec,std_vec)\n ])\n\nval_dataset_ = datasets.ImageFolder(val_dir,val_transforms)\nclass_names = val_dataset_.classes\nskips = list(range(0, len(val_dataset_), args.skip))\n\nval_dataset = torch.utils.data.Subset(val_dataset_, skips)\n\nval_loader = torch.utils.data.DataLoader(val_dataset, batch_size=8,shuffle=False)\n\n#build and initialize model\ndevice = 'cuda:0' if torch.cuda.is_available() else 'cpu'\n\nif args.clip > 0:\n\tclip_range = [0,args.clip]\nelse:\n\tclip_range = None\n \nif 'bagnet17' in args.model:\n model = nets.bagnet.bagnet17(pretrained=True,clip_range=clip_range,aggregation=args.aggr)\nelif 'bagnet33' in args.model:\n model = nets.bagnet.bagnet33(pretrained=True,clip_range=clip_range,aggregation=args.aggr)\nelif 'bagnet9' in args.model:\n model = nets.bagnet.bagnet9(pretrained=True,clip_range=clip_range,aggregation=args.aggr)\nelif 'resnet50' in args.model:\n model = nets.resnet.resnet50(pretrained=True,clip_range=clip_range,aggregation=args.aggr)\n\n\nif 'imagenette' in args.data_dir:\n\tnum_ftrs = model.fc.in_features\n\tmodel.fc = nn.Linear(num_ftrs, len(class_names))\n\tmodel = torch.nn.DataParallel(model)\n\tprint('restoring model from checkpoint...')\n\tcheckpoint = torch.load(os.path.join(MODEL_DIR,args.model+'.pth'))\n\tmodel.load_state_dict(checkpoint['model_state_dict'])\n\nmodel = torch.nn.DataParallel(model)\nmodel = model.to(device)\nmodel.eval()\ncudnn.benchmark = True\n\nmodel.eval()\n\nattacker = PatchAttacker(model, mean_vec, std_vec,patch_size=args.patch_size,step_size=0.05,steps=500)\n\nadv_list=[]\nerror_list=[]\naccuracy_list=[]\npatch_loc_list=[]\n\nfor data,labels in tqdm(val_loader):\n\t\n\tdata,labels=data.to(device),labels.to(device)\n\tdata_adv,patch_loc = attacker.perturb(data, labels)\n\n\toutput_adv = model(data_adv)\n\terror_adv=torch.sum(torch.argmax(output_adv, dim=1) != labels).cpu().detach().numpy()\n\toutput_clean = model(data)\n\tacc_clean=torch.sum(torch.argmax(output_clean, dim=1) == labels).cpu().detach().numpy()\n\n\tdata_adv=data_adv.cpu().detach().numpy()\n\tpatch_loc=patch_loc.cpu().detach().numpy()\n\n\tpatch_loc_list.append(patch_loc)\n\tadv_list.append(data_adv)\n\terror_list.append(error_adv)\n\taccuracy_list.append(acc_clean)\n\n\nadv_list = np.concatenate(adv_list)\npatch_loc_list = np.concatenate(patch_loc_list)\njoblib.dump(adv_list,os.path.join(DUMP_DIR,'patch_adv_list_{}.z'.format(args.patch_size)))\njoblib.dump(patch_loc_list,os.path.join(DUMP_DIR,'patch_loc_list_{}.z'.format(args.patch_size)))\nprint(\"Attack success rate:\",np.sum(error_list)/len(val_dataset))\nprint(\"Clean accuracy:\",np.sum(accuracy_list)/len(val_dataset))\n\t\n"
] |
[
[
"numpy.concatenate",
"numpy.sum",
"torch.cuda.is_available",
"torch.utils.data.DataLoader",
"torch.utils.data.Subset",
"torch.argmax",
"torch.nn.DataParallel"
]
] |
daggertye/CS294_homework
|
[
"4905e2622e1c7e4d2bde343da139333b3dbecc93"
] |
[
"hw2/train_pg.py"
] |
[
"import numpy as np\nimport tensorflow as tf\nimport gym\nimport logz\nimport scipy.signal\nimport os\nimport time\nimport inspect\nfrom multiprocessing import Process\n\n#============================================================================================#\n# Utilities\n#============================================================================================#\n\ndef build_mlp(\n input_placeholder, \n output_size,\n scope, \n n_layers=2, \n size=64, \n activation=tf.tanh,\n output_activation=None\n ):\n #========================================================================================#\n # ----------SECTION 3----------\n # Network building\n #\n # Your code should make a feedforward neural network (also called a multilayer perceptron)\n # with 'n_layers' hidden layers of size 'size' units. \n # \n # The output layer should have size 'output_size' and activation 'output_activation'.\n #\n # Hint: use tf.layers.dense\n #========================================================================================#\n\n with tf.variable_scope(scope):\n x = input_placeholder\n while n_layers > 0:\n x = tf.layers.dense(x, size)\n x = activation(x)\n n_layers-=1\n x = tf.layers.dense(x, output_size, activation = output_activation)\n return x\n\ndef pathlength(path):\n return len(path[\"reward\"])\n\ndef norm(values, mean, std):\n std_away = (values - np.mean(values))/(np.std(values) + 1e-8)\n return mean + std * std_away\n\n#============================================================================================#\n# Policy Gradient\n#============================================================================================#\n\ndef train_PG(exp_name='',\n env_name='CartPole-v0',\n n_iter=100, \n gamma=1.0, \n _lambda=1.0,\n min_timesteps_per_batch=1000, \n max_path_length=None,\n learning_rate=5e-3, \n reward_to_go=True, \n animate=True, \n logdir=None, \n normalize_advantages=True,\n nn_baseline=False, \n seed=0,\n # network arguments\n n_layers=1,\n size=32\n ):\n\n start = time.time()\n\n # Configure output directory for logging\n logz.configure_output_dir(logdir)\n\n # Log experimental parameters\n args = inspect.getargspec(train_PG)[0]\n locals_ = locals()\n params = {k: locals_[k] if k in locals_ else None for k in args}\n logz.save_params(params)\n\n # Set random seeds\n tf.set_random_seed(seed)\n np.random.seed(seed)\n\n # Make the gym environment\n env = gym.make(env_name)\n \n # Is this env continuous, or discrete?\n discrete = isinstance(env.action_space, gym.spaces.Discrete)\n\n # Maximum length for episodes\n max_path_length = max_path_length or env.spec.max_episode_steps\n\n #========================================================================================#\n # Notes on notation:\n # \n # Symbolic variables have the prefix sy_, to distinguish them from the numerical values\n # that are computed later in the function\n # \n # Prefixes and suffixes:\n # ob - observation \n # ac - action\n # _no - this tensor should have shape (batch size /n/, observation dim)\n # _na - this tensor should have shape (batch size /n/, action dim)\n # _n - this tensor should have shape (batch size /n/)\n # \n # Note: batch size /n/ is defined at runtime, and until then, the shape for that axis\n # is None\n #========================================================================================#\n\n # Observation and action sizes\n ob_dim = env.observation_space.shape[0]\n ac_dim = env.action_space.n if discrete else env.action_space.shape[0]\n\n #========================================================================================#\n # ----------SECTION 4----------\n # Placeholders\n # \n # Need these for batch observations / actions / advantages in policy gradient loss function.\n #========================================================================================#\n\n sy_ob_no = tf.placeholder(shape=[None, ob_dim], name=\"ob\", dtype=tf.float32)\n if discrete:\n sy_ac_na = tf.placeholder(shape=[None], name=\"ac\", dtype=tf.int32) \n else:\n sy_ac_na = tf.placeholder(shape=[None, ac_dim], name=\"ac\", dtype=tf.float32) \n\n # Define a placeholder for advantages\n sy_adv_n = tf.placeholder(shape = [None], name = \"adv\", dtype = tf.float32)\n\n\n #========================================================================================#\n # ----------SECTION 4----------\n # Networks\n # \n # Make symbolic operations for\n # 1. Policy network outputs which describe the policy distribution.\n # a. For the discrete case, just logits for each action.\n #\n # b. For the continuous case, the mean / log std of a Gaussian distribution over \n # actions.\n #\n # Hint: use the 'build_mlp' function you defined in utilities.\n #\n # Note: these ops should be functions of the placeholder 'sy_ob_no'\n #\n # 2. Producing samples stochastically from the policy distribution.\n # a. For the discrete case, an op that takes in logits and produces actions.\n #\n # Should have shape [None]\n #\n # b. For the continuous case, use the reparameterization trick:\n # The output from a Gaussian distribution with mean 'mu' and std 'sigma' is\n #\n # mu + sigma * z, z ~ N(0, I)\n #\n # This reduces the problem to just sampling z. (Hint: use tf.random_normal!)\n #\n # Should have shape [None, ac_dim]\n #\n # Note: these ops should be functions of the policy network output ops.\n #\n # 3. Computing the log probability of a set of actions that were actually taken, \n # according to the policy.\n #\n # Note: these ops should be functions of the placeholder 'sy_ac_na', and the \n # policy network output ops.\n # \n #========================================================================================#\n\n if discrete:\n # YOUR_CODE_HERE\n sy_logits_na = build_mlp(sy_ob_no, ac_dim, \"policy\", n_layers = n_layers, size = size)\n sy_sampled_ac = tf.squeeze(tf.multinomial(sy_logits_na, 1), axis = [1])\n sy_logprob_n = - tf.nn.sparse_softmax_cross_entropy_with_logits(labels = sy_ac_na, logits = sy_logits_na)\n\n else:\n # YOUR_CODE_HERE\n sy_mean = build_mlp(sy_ob_no, ac_dim, \"policy\", n_layers = n_layers, size = size)\n sy_logstd = tf.Variable(tf.zeros([1, ac_dim], name = 'logstd'))\n sy_std = tf.exp(sy_logstd)\n sy_z_sampled = tf.random_normal(tf.shape(sy_mean))\n sy_sampled_ac = sy_mean + sy_std * sy_z_sampled\n\n sy_z = (sy_ac_na - sy_mean)/sy_std\n sy_logprob_n = - 0.5 * tf.reduce_sum(tf.square(sy_z), axis = 1)\n\n\n #========================================================================================#\n # ----------SECTION 4----------\n # Loss Function and Training Operation\n #========================================================================================#\n\n loss = -tf.reduce_mean(tf.multiply(sy_logprob_n, sy_adv_n)) # Loss function that we'll differentiate to get the policy gradient.\n update_op = tf.train.AdamOptimizer(learning_rate).minimize(loss)\n\n\n #========================================================================================#\n # ----------SECTION 5----------\n # Optional Baseline\n #========================================================================================#\n\n if nn_baseline:\n baseline_prediction = tf.squeeze(build_mlp(\n sy_ob_no, \n 1, \n \"nn_baseline\",\n n_layers=n_layers,\n size=size))\n # Define placeholders for targets, a loss function and an update op for fitting a \n # neural network baseline. These will be used to fit the neural network baseline. \n # YOUR_CODE_HERE\n bl_n = tf.placeholder(shape = [None], name = 'bl_n', dtype = tf.float32)\n bl_loss = tf.nn.l2_loss(baseline_prediction - bl_n)\n baseline_update_op = tf.train.AdamOptimizer(learning_rate).minimize(bl_loss)\n\n\n #========================================================================================#\n # Tensorflow Engineering: Config, Session, Variable initialization\n #========================================================================================#\n\n tf_config = tf.ConfigProto(inter_op_parallelism_threads=1, intra_op_parallelism_threads=1) \n\n sess = tf.Session(config=tf_config)\n sess.__enter__() # equivalent to `with sess:`\n tf.global_variables_initializer().run() #pylint: disable=E1101\n\n\n\n #========================================================================================#\n # Training Loop\n #========================================================================================#\n\n total_timesteps = 0\n\n for itr in range(n_iter):\n print(\"********** Iteration %i ************\"%itr)\n\n # Collect paths until we have enough timesteps\n timesteps_this_batch = 0\n paths = []\n while True:\n ob = env.reset()\n obs, acs, rewards = [], [], []\n animate_this_episode=(len(paths)==0 and (itr % 10 == 0) and animate)\n steps = 0\n while True:\n if animate_this_episode:\n env.render()\n time.sleep(0.05)\n obs.append(ob)\n ac = sess.run(sy_sampled_ac, feed_dict={sy_ob_no : ob[None]})\n ac = ac[0]\n acs.append(ac)\n ob, rew, done, _ = env.step(ac)\n rewards.append(rew)\n steps += 1\n if done or steps > max_path_length:\n break\n path = {\"observation\" : np.array(obs), \n \"reward\" : np.array(rewards), \n \"action\" : np.array(acs)}\n paths.append(path)\n timesteps_this_batch += pathlength(path)\n if timesteps_this_batch > min_timesteps_per_batch:\n break\n total_timesteps += timesteps_this_batch\n\n # Build arrays for observation, action for the policy gradient update by concatenating \n # across paths\n ob_no = np.concatenate([path[\"observation\"] for path in paths])\n ac_na = np.concatenate([path[\"action\"] for path in paths])\n\n #====================================================================================#\n # ----------SECTION 4----------\n # Computing Q-values\n #\n # Your code should construct numpy arrays for Q-values which will be used to compute\n # advantages (which will in turn be fed to the placeholder you defined above). \n #\n # Recall that the expression for the policy gradient PG is\n #\n # PG = E_{tau} [sum_{t=0}^T grad log pi(a_t|s_t) * (Q_t - b_t )]\n #\n # where \n #\n # tau=(s_0, a_0, ...) is a trajectory,\n # Q_t is the Q-value at time t, Q^{pi}(s_t, a_t),\n # and b_t is a baseline which may depend on s_t. \n #\n # You will write code for two cases, controlled by the flag 'reward_to_go':\n #\n # Case 1: trajectory-based PG \n #\n # (reward_to_go = False)\n #\n # Instead of Q^{pi}(s_t, a_t), we use the total discounted reward summed over \n # entire trajectory (regardless of which time step the Q-value should be for). \n #\n # For this case, the policy gradient estimator is\n #\n # E_{tau} [sum_{t=0}^T grad log pi(a_t|s_t) * Ret(tau)]\n #\n # where\n #\n # Ret(tau) = sum_{t'=0}^T gamma^t' r_{t'}.\n #\n # Thus, you should compute\n #\n # Q_t = Ret(tau)\n #\n # Case 2: reward-to-go PG \n #\n # (reward_to_go = True)\n #\n # Here, you estimate Q^{pi}(s_t, a_t) by the discounted sum of rewards starting\n # from time step t. Thus, you should compute\n #\n # Q_t = sum_{t'=t}^T gamma^(t'-t) * r_{t'}\n #\n #\n # Store the Q-values for all timesteps and all trajectories in a variable 'q_n',\n # like the 'ob_no' and 'ac_na' above. \n #\n #====================================================================================#\n\n # YOUR_CODE_HERE\n if reward_to_go:\n q_n = []\n for path in paths:\n q = np.zeros(pathlength(path))\n q[-1] = path['reward'][-1]\n for i in reversed(range(pathlength(path) - 1)):\n q[i] = path['reward'][i] + gamma * q[i+1]\n q_n.extend(q)\n else: \n q_n = []\n for path in paths: \n ret_tau = 0\n for i in range(pathlength(path)):\n ret_tau += (gamma ** i) * path['reward'][i]\n q = np.ones(shape = [pathlength(path)]) * ret_tau\n q_n.extend(q)\n\n #====================================================================================#\n # ----------SECTION 5----------\n # Computing Baselines\n #====================================================================================#\n\n if nn_baseline:\n # If nn_baseline is True, use your neural network to predict reward-to-go\n # at each timestep for each trajectory, and save the result in a variable 'b_n'\n # like 'ob_no', 'ac_na', and 'q_n'.\n #\n # Hint #bl1: rescale the output from the nn_baseline to match the statistics\n # (mean and std) of the current or previous batch of Q-values. (Goes with Hint\n # #bl2 below.)\n\n b_n = norm(sess.run(baseline_prediction, feed_dict = {sy_ob_no: ob_no}), np.mean(q_n), np.std(q_n))\n \n # Implementation of GAE\n adv_n = []\n for path in paths:\n adv = np.zeros(pathlength(path))\n adv[-1] = path['reward'][-1] - b_n[-1]\n for i in reversed(range(pathlength(path) - 1)):\n delta = path['reward'][i] + gamma * b_n[i + 1] - b_n[i]\n adv[i] = delta + gamma * _lambda * adv[i+1]\n if not reward_to_go:\n adv = np.ones(size = [pathlength(path)]) * adv[0]\n adv_n.extend(adv)\n q_n = adv_n + b_n\n else:\n adv_n = q_n.copy()\n\n #====================================================================================#\n # ----------SECTION 4----------\n # Advantage Normalization\n #====================================================================================#\n\n if normalize_advantages:\n # On the next line, implement a trick which is known empirically to reduce variance\n # in policy gradient methods: normalize adv_n to have mean zero and std=1. \n # YOUR_CODE_HERE\n adv_n = norm(adv_n, 0, 1)\n\n\n #====================================================================================#\n # ----------SECTION 5----------\n # Optimizing Neural Network Baseline\n #====================================================================================#\n if nn_baseline:\n # ----------SECTION 5----------\n # If a neural network baseline is used, set up the targets and the inputs for the \n # baseline. \n # \n # Fit it to the current batch in order to use for the next iteration. Use the \n # baseline_update_op you defined earlier.\n #\n # Hint #bl2: Instead of trying to target raw Q-values directly, rescale the \n # targets to have mean zero and std=1. (Goes with Hint #bl1 above.)\n\n # YOUR_CODE_HERE\n bl_true = norm(q_n, 0, 1)\n _ = sess.run(baseline_update_op, feed_dict = {bl_n : bl_true, sy_ob_no : ob_no})\n\n #====================================================================================#\n # ----------SECTION 4----------\n # Performing the Policy Update\n #====================================================================================#\n\n # Call the update operation necessary to perform the policy gradient update based on \n # the current batch of rollouts.\n # \n # For debug purposes, you may wish to save the value of the loss function before\n # and after an update, and then log them below. \n\n # YOUR_CODE_HERE\n _, after_loss = sess.run([update_op, loss],feed_dict = {sy_ob_no : ob_no, sy_ac_na : ac_na, sy_adv_n : adv_n})\n print(after_loss.shape)\n\n # Log diagnostics\n returns = [path[\"reward\"].sum() for path in paths]\n ep_lengths = [pathlength(path) for path in paths]\n logz.log_tabular(\"Time\", time.time() - start)\n logz.log_tabular(\"Iteration\", itr)\n logz.log_tabular(\"AverageReturn\", np.mean(returns))\n logz.log_tabular(\"StdReturn\", np.std(returns))\n logz.log_tabular(\"MaxReturn\", np.max(returns))\n logz.log_tabular(\"MinReturn\", np.min(returns))\n logz.log_tabular(\"EpLenMean\", np.mean(ep_lengths))\n logz.log_tabular(\"EpLenStd\", np.std(ep_lengths))\n logz.log_tabular(\"TimestepsThisBatch\", timesteps_this_batch)\n logz.log_tabular(\"TimestepsSoFar\", total_timesteps)\n logz.log_tabular(\"After-Loss\", after_loss)\n logz.dump_tabular()\n logz.pickle_tf_vars()\n\n\ndef main():\n import argparse\n parser = argparse.ArgumentParser()\n parser.add_argument('env_name', type=str)\n parser.add_argument('--exp_name', type=str, default='vpg')\n parser.add_argument('--render', action='store_true')\n parser.add_argument('--discount', type=float, default=1.0)\n parser.add_argument('--lambda_', type = float, default = 1.0)\n parser.add_argument('--n_iter', '-n', type=int, default=100)\n parser.add_argument('--batch_size', '-b', type=int, default=1000)\n parser.add_argument('--ep_len', '-ep', type=float, default=-1.)\n parser.add_argument('--learning_rate', '-lr', type=float, default=5e-3)\n parser.add_argument('--reward_to_go', '-rtg', action='store_true')\n parser.add_argument('--dont_normalize_advantages', '-dna', action='store_true')\n parser.add_argument('--nn_baseline', '-bl', action='store_true')\n parser.add_argument('--seed', type=int, default=1)\n parser.add_argument('--n_experiments', '-e', type=int, default=1)\n parser.add_argument('--n_layers', '-l', type=int, default=1)\n parser.add_argument('--size', '-s', type=int, default=32)\n args = parser.parse_args()\n\n if not(os.path.exists('data')):\n os.makedirs('data')\n logdir = args.exp_name + '_' + args.env_name + '_' + time.strftime(\"%d-%m-%Y_%H-%M-%S\")\n logdir = os.path.join('data', logdir)\n if not(os.path.exists(logdir)):\n os.makedirs(logdir)\n\n max_path_length = args.ep_len if args.ep_len > 0 else None\n\n for e in range(args.n_experiments):\n seed = args.seed + 10*e\n print('Running experiment with seed %d'%seed)\n def train_func():\n train_PG(\n exp_name=args.exp_name,\n env_name=args.env_name,\n n_iter=args.n_iter,\n gamma=args.discount,\n _lambda = args.lambda_,\n min_timesteps_per_batch=args.batch_size,\n max_path_length=max_path_length,\n learning_rate=args.learning_rate,\n reward_to_go=args.reward_to_go,\n animate=args.render,\n logdir=os.path.join(logdir,'%d'%seed),\n normalize_advantages=not(args.dont_normalize_advantages),\n nn_baseline=args.nn_baseline, \n seed=seed,\n n_layers=args.n_layers,\n size=args.size\n )\n # Awkward hacky process runs, because Tensorflow does not like\n # repeatedly calling train_PG in the same thread.\n p = Process(target=train_func, args=tuple())\n p.start()\n p.join()\n \n\nif __name__ == \"__main__\":\n main()\n"
] |
[
[
"tensorflow.exp",
"tensorflow.multinomial",
"numpy.min",
"numpy.mean",
"tensorflow.global_variables_initializer",
"tensorflow.set_random_seed",
"numpy.concatenate",
"numpy.max",
"tensorflow.shape",
"tensorflow.ConfigProto",
"tensorflow.variable_scope",
"tensorflow.layers.dense",
"numpy.array",
"tensorflow.zeros",
"tensorflow.train.AdamOptimizer",
"tensorflow.Session",
"tensorflow.nn.l2_loss",
"numpy.std",
"tensorflow.placeholder",
"tensorflow.nn.sparse_softmax_cross_entropy_with_logits",
"tensorflow.multiply",
"numpy.random.seed",
"tensorflow.square"
]
] |
fnx4/Screen-Translate
|
[
"f4564cc21787cbc87e9f9b97c1e23a6572eaf353"
] |
[
"screen_translate/Capture.py"
] |
[
"# Internal\nimport os\nfrom datetime import datetime\n# External\nimport numpy as np\nimport pyautogui\nimport pytesseract\nimport cv2\n\n# Lang Code\nfrom screen_translate.LangCode import *\n# Mbox\nfrom screen_translate.Mbox import Mbox\n\n# Settings to capture all screens\nfrom PIL import ImageGrab\nfrom functools import partial\nImageGrab.grab = partial(ImageGrab.grab, all_screens=True)\n\n# Public methods\nfrom screen_translate.Public import startfile\n\n# Get Path\ndir_path = os.path.dirname(os.path.realpath(__file__))\nimg_captured_path = os.path.join(dir_path, '../img_captured')\n\ndef createPicDirIfGone():\n \"\"\"\n Create the directory if it does not exist\n \"\"\"\n # Will create the dir if not exists\n if not os.path.exists(img_captured_path):\n try:\n os.makedirs(img_captured_path)\n except Exception as e:\n print(\"Error: \" + str(e))\n Mbox(\"Error: \", str(e), 2)\n\ndef captureImg(coords, sourceLang, tesseract_Location, saveImg = False, enhance_WithCv2 = False, grayScale = False, background = None, debugmode = False):\n \"\"\"Capture Image and return text from it\n\n Args:\n coords (int): Coordinates and size of the screen to capture (x,y,w,h)\n sourceLang (string): The Language to be translated\n tesseract_Location (string): Tesseract .exe location\n cached (bool, optional): Cache/Save Image or not. Defaults to False.\n\n Returns:\n status, result: Success or Error, Result\n \"\"\"\n # Language Code\n try:\n langCode = tesseract_Lang[sourceLang]\n except KeyError as e:\n print(\"Error: Key Error\\n\" + str(e))\n Mbox(\"Key Error, On Assigning Language Code.\\n\" + str(e), \"Error: Key Error\", 2)\n return False, \"Error: Key Error\"\n\n is_Success = False\n wordsGet = \"\"\n try:\n # Capture the designated location\n captured = pyautogui.screenshot(region=(coords[0], coords[1], coords[2], coords[3]))\n \n # Set tesseract_Location\n pytesseract.pytesseract.tesseract_cmd = tesseract_Location\n\n # Enhance with cv2 if selected\n if enhance_WithCv2:\n # Convert captured img to cv2 format\n open_cv_image = np.array(captured) \n # Convert RGB to BGR \n open_cv_image = open_cv_image[:, :, ::-1].copy()\n # Convert the image to gray scale\n grayImg = cv2.cvtColor(open_cv_image, cv2.COLOR_BGR2GRAY)\n\n # debug\n if debugmode and grayScale: cv2.imshow(\"Grayscale Image\", grayImg)\n\n # Threshtype\n imgType = \"Thresh Image\"\n if background == \"Auto-Detect\":\n is_light = np.mean(open_cv_image) > 127\n imgType += \" (Auto - Light)\" if is_light else \" (Auto - Dark)\"\n print(\">> Image detected as light\" if is_light else \">> Image detected as dark\")\n threshType = cv2.THRESH_BINARY_INV if is_light else cv2.THRESH_BINARY\n else:\n threshType = cv2.THRESH_BINARY_INV if background == \"Light\" else cv2.THRESH_BINARY\n \n # Performing OTSU threshold\n ret, thresh = cv2.threshold(grayImg, 0, 255, cv2.THRESH_OTSU | threshType)\n\n # debug\n if debugmode: cv2.imshow(imgType, thresh)\n\n # Specify structure shape and kernel size. \n # Kernel size increases or decreases the area \n # of the rectangle to be detected.\n # A smaller value like (10, 10) will detect \n # each word instead of a sentence.\n rectKernel = cv2.getStructuringElement(cv2.MORPH_RECT, (18, 18))\n\n # Applying dilation on the threshold image\n dilation = cv2.dilate(thresh, rectKernel, iterations = 1)\n\n # debug\n if debugmode: cv2.imshow(\"Dilation Image\", dilation)\n\n # Finding contours in the image based on dilation\n contours, hierarchy = cv2.findContours(dilation, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_NONE)\n\n # Create a copy of captured image\n imgCopy = grayImg if grayScale else open_cv_image.copy()\n\n # Looping through the identified contours\n # Then rectangular part is cropped and passed on\n # to pytesseract for extracting text from it\n for cnt in contours[::-1]: # Reverse the array because it actually starts from the bottom\n x, y, w, h = cv2.boundingRect(cnt)\n # Drawing a rectangle on copied image\n if debugmode:\n rect = cv2.rectangle(imgCopy, (x, y), (x + w, y + h), (0, 255, 0), 2)\n cv2.imshow(\"Rectangle drawn on image\", rect)\n\n # Cropping the text block for giving input to OCR\n cropped = imgCopy[y:y + h, x:x + w]\n \n # Apply OCR on the cropped image\n text = pytesseract.image_to_string(cropped, langCode)\n\n # Append the text into wordsarr\n wordsGet += text.strip() + \"\\n\"\n\n if saveImg:\n createPicDirIfGone()\n captured.save(os.path.join(img_captured_path, 'captured_' + datetime.now().strftime('%Y-%m-%d_%H%M%S') + '.png'))\n else:\n if grayScale:\n # Convert captured img to cv2 format\n open_cv_image = np.array(captured) \n # Convert RGB to BGR \n open_cv_image = open_cv_image[:, :, ::-1].copy()\n\n # Convert the image to gray scale\n grayImg = cv2.cvtColor(open_cv_image, cv2.COLOR_BGR2GRAY)\n\n if debugmode and grayScale: cv2.imshow(\"Grayscale Image\", grayImg)\n\n # Get the text from the image \n wordsGet = pytesseract.image_to_string(grayImg, langCode)\n\n if saveImg:\n createPicDirIfGone()\n captured.save(os.path.join(img_captured_path, 'captured_' + datetime.now().strftime('%Y-%m-%d_%H%M%S') + '.png'))\n \n is_Success = True\n except Exception as e:\n print(\"Error: \" + str(e))\n wordsGet = str(e)\n if \"is not installed or it's not in your PATH\" in str(e):\n Mbox(\"Error: Tesseract Could not be Found\", \"Invalid path location for tesseract.exe, please change it in the setting!\", 2)\n elif \"Failed loading language\" in str(e):\n Mbox(\"Warning: Failed Loading Language\", \"Language data not found! It could be that the language data is not installed! Please reinstall tesseract or download the language data and put it into Tesseract-OCR\\\\tessdata!\\n\\nThe official version that is used for this program is v5.0.0-alpha.20210811. You can download it from https://github.com/UB-Mannheim/tesseract/wiki or https://digi.bib.uni-mannheim.de/tesseract/\", 1)\n else:\n Mbox(\"Error\", str(e), 2)\n finally:\n return is_Success, wordsGet.strip()\n\ndef captureAll():\n \"\"\"Capture all screens and save the result\"\"\"\n # Capture all screens\n try:\n captured = pyautogui.screenshot()\n createPicDirIfGone()\n captured.save(os.path.join(img_captured_path, 'Monitor(s) Captured View'+ '.png'))\n startfile(dir_path + r\"\\..\\img_captured\\Monitor(s) Captured View.png\")\n except Exception as e:\n print(\"Error: \" + str(e))\n if \"Invalid argument\" in str(e):\n Mbox(\"Error image is still opened\", \"Please close the previous image first!\", 2)\n else:\n Mbox(\"Error\", str(e), 2)"
] |
[
[
"numpy.array",
"numpy.mean"
]
] |
zibinpan/DL_project
|
[
"df48eb36dad2f45bfd439cef2d50d729dbf4cee4"
] |
[
"MyProblem/ZDT3.py"
] |
[
"# -*- coding: utf-8 -*-\nimport numpy as np\nimport geatpy as ea\n\n\nclass ZDT3(ea.Problem): # 继承Problem父类\n def __init__(self):\n name = 'ZDT3' # 初始化name(函数名称,可以随意设置)\n M = 2 # 初始化M(目标维数)\n maxormins = [1] * M # 初始化maxormins(目标最小最大化标记列表,1:最小化该目标;-1:最大化该目标)\n Dim = 30 # 初始化Dim(决策变量维数)\n varTypes = [0] * Dim # 初始化varTypes(决策变量的类型,0:实数;1:整数)\n lb = [0] * Dim # 决策变量下界\n ub = [1] * Dim # 决策变量上界\n lbin = [1] * Dim # 决策变量下边界(0表示不包含该变量的下边界,1表示包含)\n ubin = [1] * Dim # 决策变量上边界(0表示不包含该变量的上边界,1表示包含)\n # 调用父类构造方法完成实例化\n ea.Problem.__init__(self, name, M, maxormins, Dim, varTypes, lb, ub, lbin, ubin)\n\n def aimFunc(self, pop): # 目标函数\n Vars = pop.Phen # 得到决策变量矩阵\n ObjV1 = Vars[:, 0]\n gx = 1 + 9 * np.sum(Vars[:, 1:], 1) / (self.Dim - 1)\n hx = 1 - np.sqrt(np.abs(ObjV1) / gx) - (ObjV1 / gx) * np.sin(10 * np.pi * ObjV1) # 取绝对值是为了避免浮点数精度异常带来的影响\n ObjV2 = gx * hx\n pop.ObjV = np.array([ObjV1, ObjV2]).T # 把结果赋值给ObjV\n\n def calReferObjV(self): # 设定目标数参考值(本问题目标函数参考值设定为理论最优值,即“真实帕累托前沿点”)\n N = 10000 # 生成10000个参考点\n ObjV1 = np.linspace(0, 1, N)\n ObjV2 = 1 - ObjV1 ** 0.5 - ObjV1 * np.sin(10 * np.pi * ObjV1)\n f = np.array([ObjV1, ObjV2]).T\n levels, criLevel = ea.ndsortESS(f, None, 1)\n referenceObjV = f[np.where(levels == 1)[0]]\n return referenceObjV\n"
] |
[
[
"numpy.array",
"numpy.sin",
"numpy.sum",
"numpy.where",
"numpy.abs",
"numpy.linspace"
]
] |
tensorlayer/TensorLayerX
|
[
"4e3e6f13687309dda7787f0b86e35a62bb3adbad"
] |
[
"tensorlayerx/nn/core/common.py"
] |
[
"#! /usr/bin/python\n# -*- coding: utf-8 -*-\n\nimport os\nimport tensorlayerx as tlx\nfrom tensorlayerx.files import utils\nfrom tensorlayerx import logging\nimport numpy as np\nfrom queue import Queue\nfrom tensorlayerx.nn.initializers import *\n\nif tlx.BACKEND == 'mindspore':\n from mindspore.ops.operations import Assign\n from mindspore.nn import Cell\n from mindspore import Tensor\n import mindspore as ms\n\n_act_dict = {\n \"relu\": tlx.ops.ReLU,\n \"relu6\": tlx.ops.ReLU6,\n \"leaky_relu\": tlx.ops.LeakyReLU,\n \"lrelu\": tlx.ops.LeakyReLU,\n \"softplus\": tlx.ops.Softplus,\n \"tanh\": tlx.ops.Tanh,\n \"sigmoid\": tlx.ops.Sigmoid,\n \"softmax\": tlx.ops.Softmax\n}\n\n_initializers_dict = {\n \"ones\": ones(),\n \"zeros\": zeros(),\n \"constant\": constant(value=0.0),\n \"random_uniform\": random_uniform(minval=-1.0, maxval=1.0),\n \"random_normal\": random_normal(mean=0.0, stddev=0.05),\n \"truncated_normal\": truncated_normal(stddev=0.02),\n \"he_normal\": he_normal(),\n \"xavier_uniform\": XavierUniform(),\n \"xavier_normal\": XavierNormal()\n}\n\n\ndef check_parameter(parameter, dim='2d'):\n if dim == '2d':\n if isinstance(parameter, int):\n out = (parameter, parameter)\n else:\n out = parameter\n elif dim == '3d':\n if isinstance(parameter, int):\n out = (parameter, parameter, parameter)\n else:\n out = parameter\n else:\n raise (\"dim must be 2d or 3d.\")\n return out\n\n\ndef str2init(initializer):\n if isinstance(initializer, str):\n if initializer not in _initializers_dict.keys():\n raise Exception(\n \"Unsupported string initialization: {}\".format(initializer),\n \"String initialization supports these methods: {}\".format(_initializers_dict.keys())\n )\n return _initializers_dict[initializer]\n else:\n return initializer\n\n\ndef str2act(act):\n if len(act) > 5 and act[0:5] == \"lrelu\":\n try:\n alpha = float(act[5:])\n return tlx.ops.LeakyReLU(negative_slope=alpha)\n except Exception as e:\n raise Exception(\"{} can not be parsed as a float\".format(act[5:]))\n\n if len(act) > 10 and act[0:10] == \"leaky_relu\":\n try:\n alpha = float(act[10:])\n return tlx.ops.LeakyReLU(negative_slope=alpha)\n except Exception as e:\n raise Exception(\"{} can not be parsed as a float\".format(act[10:]))\n\n if act not in _act_dict.keys():\n raise Exception(\"Unsupported act: {}\".format(act))\n return _act_dict[act]\n\n\ndef processing_act(act):\n # Processing strings as input, activation functions without parameters。\n if isinstance(act, str):\n str_act = str2act(act)\n if act:\n # Processing strings as input, activation functions with parameters。\n if isinstance(act, str) and (len(act) > 5 and act[0:5] == \"lrelu\" or\n len(act) > 10 and act[0:10] == \"leaky_relu\"):\n out_act = str_act\n elif isinstance(act, str):\n out_act = str_act()\n else:\n # Processing classes or functions as input, activation functions without parameters\n try:\n out_act = act()\n # Processing class or function as input, activation function with parameters\n except:\n out_act = act\n else:\n # Processing act is None\n out_act = act\n return out_act\n\n\ndef _save_weights(net, file_path, format=None):\n \"\"\"Input file_path, save model weights into a file of given format.\n Use net.load_weights() to restore.\n\n Parameters\n ----------\n file_path : str\n Filename to which the model weights will be saved.\n format : str or None\n Saved file format.\n Value should be None, 'hdf5', 'npz', 'npz_dict' or 'ckpt'. Other format is not supported now.\n 1) If this is set to None, then the postfix of file_path will be used to decide saved format.\n If the postfix is not in ['h5', 'hdf5', 'npz', 'ckpt'], then file will be saved in hdf5 format by default.\n 2) 'hdf5' will save model weights name in a list and each layer has its weights stored in a group of\n the hdf5 file.\n 3) 'npz' will save model weights sequentially into a npz file.\n 4) 'npz_dict' will save model weights along with its name as a dict into a npz file.\n 5) 'ckpt' will save model weights into a tensorflow ckpt file.\n\n Default None.\n\n Examples\n --------\n 1) Save model weights in hdf5 format by default.\n >>> net = vgg16()\n >>> optimizer = tlx.optimizers.Adam(learning_rate=0.001)\n >>> metrics = tlx.metrics.Accuracy()\n >>> model = tlx.model.Model(network=net, loss_fn=tlx.losses.cross_entropy, optimizer=optimizer, metrics=metrics)\n >>> model.save_weights('./model.h5')\n ...\n >>> model.load_weights('./model.h5')\n\n 2) Save model weights in npz/npz_dict format\n >>> model.save_weights('./model.npz')\n >>> model.save_weights('./model.npz', format='npz_dict')\n\n \"\"\"\n\n if tlx.BACKEND != 'torch' and net.all_weights is None or len(net.all_weights) == 0:\n logging.warning(\"Model contains no weights or layers haven't been built, nothing will be saved\")\n return\n\n if format is None:\n postfix = file_path.split('.')[-1]\n if postfix in ['h5', 'hdf5', 'npz', 'ckpt']:\n format = postfix\n else:\n format = 'hdf5'\n\n if format == 'hdf5' or format == 'h5':\n raise NotImplementedError(\"hdf5 load/save is not supported now.\")\n # utils.save_weights_to_hdf5(file_path, net)\n elif format == 'npz':\n utils.save_npz(net.all_weights, file_path)\n elif format == 'npz_dict':\n if tlx.BACKEND == 'torch':\n utils.save_npz_dict(net.named_parameters(), file_path)\n else:\n utils.save_npz_dict(net.all_weights, file_path)\n elif format == 'ckpt':\n # TODO: enable this when tf save ckpt is enabled\n raise NotImplementedError(\"ckpt load/save is not supported now.\")\n else:\n raise ValueError(\n \"Save format must be 'hdf5', 'npz', 'npz_dict' or 'ckpt'.\"\n \"Other format is not supported now.\"\n )\n\n\ndef _load_weights(net, file_path, format=None, in_order=True, skip=False):\n \"\"\"Load model weights from a given file, which should be previously saved by net.save_weights().\n\n Parameters\n ----------\n file_path : str\n Filename from which the model weights will be loaded.\n format : str or None\n If not specified (None), the postfix of the file_path will be used to decide its format. If specified,\n value should be 'hdf5', 'npz', 'npz_dict' or 'ckpt'. Other format is not supported now.\n In addition, it should be the same format when you saved the file using net.save_weights().\n Default is None.\n in_order : bool\n Allow loading weights into model in a sequential way or by name. Only useful when 'format' is 'hdf5'.\n If 'in_order' is True, weights from the file will be loaded into model in a sequential way.\n If 'in_order' is False, weights from the file will be loaded into model by matching the name\n with the weights of the model, particularly useful when trying to restore model in eager(graph) mode from\n a weights file which is saved in graph(eager) mode.\n Default is True.\n skip : bool\n Allow skipping weights whose name is mismatched between the file and model. Only useful when 'format' is\n 'hdf5' or 'npz_dict'. If 'skip' is True, 'in_order' argument will be ignored and those loaded weights\n whose name is not found in model weights (net.all_weights) will be skipped. If 'skip' is False, error will\n occur when mismatch is found.\n Default is False.\n\n Examples\n --------\n 1) load model from a hdf5 file.\n >>> net = vgg16()\n >>> optimizer = tlx.optimizers.Adam(learning_rate=0.001)\n >>> metrics = tlx.metrics.Accuracy()\n >>> model = tlx.model.Model(network=net, loss_fn=tlx.losses.cross_entropy, optimizer=optimizer, metrics=metrics)\n >>> model.load_weights('./model_graph.h5', in_order=False, skip=True) # load weights by name, skipping mismatch\n >>> model.load_weights('./model_eager.h5') # load sequentially\n\n 2) load model from a npz file\n >>> model.load_weights('./model.npz')\n\n 3) load model from a npz file, which is saved as npz_dict previously\n >>> model.load_weights('./model.npz', format='npz_dict')\n\n Notes\n -------\n 1) 'in_order' is only useful when 'format' is 'hdf5'. If you are trying to load a weights file which is\n saved in a different mode, it is recommended to set 'in_order' be True.\n 2) 'skip' is useful when 'format' is 'hdf5' or 'npz_dict'. If 'skip' is True,\n 'in_order' argument will be ignored.\n\n \"\"\"\n if not os.path.exists(file_path):\n raise FileNotFoundError(\"file {} doesn't exist.\".format(file_path))\n\n if format is None:\n format = file_path.split('.')[-1]\n\n if format == 'hdf5' or format == 'h5':\n raise NotImplementedError(\"hdf5 load/save is not supported now.\")\n # if skip ==True or in_order == False:\n # # load by weights name\n # utils.load_hdf5_to_weights(file_path, net, skip)\n # else:\n # # load in order\n # utils.load_hdf5_to_weights_in_order(file_path, net)\n elif format == 'npz':\n utils.load_and_assign_npz(file_path, net)\n elif format == 'npz_dict':\n utils.load_and_assign_npz_dict(file_path, net, skip)\n elif format == 'ckpt':\n # TODO: enable this when tf save ckpt is enabled\n raise NotImplementedError(\"ckpt load/save is not supported now.\")\n else:\n raise ValueError(\n \"File format must be 'hdf5', 'npz', 'npz_dict' or 'ckpt'. \"\n \"Other format is not supported now.\"\n )\n\n\ndef _save_standard_weights_dict(net, file_path):\n # Eliminate parameter naming differences between frameworks.\n if tlx.BACKEND == 'torch':\n save_standard_npz_dict(net.named_parameters(), file_path)\n else:\n save_standard_npz_dict(net.all_weights, file_path)\n\n\ndef encode_list_name(list_name):\n # TensorFlow weights format: conv1.weight:0, conv1.bias:0\n # Paddle weights format: conv1.weight, conv1.bias\n # PyTorch weights format: conv1.W, conv1.W\n # MindSpore weights format: conv1.weights, conv1.bias\n # standard weights format: conv1.weights, conv1.bias\n\n for i in range(len(list_name)):\n if tlx.BACKEND == 'tensorflow':\n list_name[i] = list_name[i][:-2]\n if tlx.BACKEND == 'torch':\n if list_name[i][-1] == 'W' and 'conv' not in list_name[i]:\n list_name[i] = list_name[i][:-2] + str('/weights')\n elif list_name[i][-1] == 'W' and 'conv' in list_name[i]:\n list_name[i] = list_name[i][:-2] + str('/filters')\n elif list_name[i][-1] == 'b':\n list_name[i] = list_name[i][:-2] + str('/biases')\n elif list_name[i].split('.')[-1] in ['beta', 'gamma', 'moving_mean', 'moving_var']:\n pass\n else:\n raise NotImplementedError('This weights cannot be converted.')\n return list_name\n\n\ndef decode_key_name(key_name):\n if tlx.BACKEND == 'tensorflow':\n key_name = key_name + str(':0')\n if tlx.BACKEND == 'torch':\n if key_name.split('/')[-1] in ['weights', 'filters']:\n key_name = key_name[:-8] + str('.W')\n elif key_name.split('/')[-1] == 'biases':\n key_name = key_name[:-7] + str('.b')\n else:\n raise NotImplementedError('This weights cannot be converted.')\n return key_name\n\n\ndef save_standard_npz_dict(save_list=None, name='model.npz'):\n \"\"\"Input parameters and the file name, save parameters as a dictionary into standard npz_dict file.\n\n Use ``tlx.files.load_and_assign_npz_dict()`` to restore.\n\n Parameters\n ----------\n save_list : list of parameters\n A list of parameters (tensor) to be saved.\n name : str\n The name of the `.npz` file.\n\n \"\"\"\n if save_list is None:\n save_list = []\n if tlx.BACKEND != 'torch':\n save_list_names = [tensor.name for tensor in save_list]\n\n if tlx.BACKEND == 'tensorflow':\n save_list_var = utils.tf_variables_to_numpy(save_list)\n elif tlx.BACKEND == 'mindspore':\n save_list_var = utils.ms_variables_to_numpy(save_list)\n elif tlx.BACKEND == 'paddle':\n save_list_var = utils.pd_variables_to_numpy(save_list)\n elif tlx.BACKEND == 'torch':\n save_list_names = []\n save_list_var = []\n for named, values in save_list:\n save_list_names.append(named)\n save_list_var.append(values.cpu().detach().numpy())\n else:\n raise NotImplementedError('Not implemented')\n\n save_list_names = encode_list_name(save_list_names)\n\n save_var_dict = {save_list_names[idx]: val for idx, val in enumerate(save_list_var)}\n np.savez(name, **save_var_dict)\n save_list_var = None\n save_var_dict = None\n del save_list_var\n del save_var_dict\n logging.info(\"[*] Model saved in npz_dict %s\" % name)\n\n\ndef _load_standard_weights_dict(net, file_path, skip=False, reshape=False, format='npz_dict'):\n if format == 'npz_dict':\n load_and_assign_standard_npz_dict(net, file_path, skip, reshape)\n elif format == 'npz':\n load_and_assign_standard_npz(file_path, net, reshape)\n\n\ndef load_and_assign_standard_npz_dict(net, file_path, skip=False, reshape=False):\n if not os.path.exists(file_path):\n logging.error(\"file {} doesn't exist.\".format(file_path))\n return False\n\n weights = np.load(file_path, allow_pickle=True)\n if len(weights.keys()) != len(set(weights.keys())):\n raise Exception(\"Duplication in model npz_dict %s\" % file_path)\n\n if tlx.BACKEND == 'torch':\n net_weights_name = [n for n, v in net.named_parameters()]\n torch_weights_dict = {n: v for n, v in net.named_parameters()}\n else:\n net_weights_name = [w.name for w in net.all_weights]\n\n for key in weights.keys():\n de_key = decode_key_name(key)\n if de_key not in net_weights_name:\n if skip:\n logging.warning(\"Weights named '%s' not found in network. Skip it.\" % key)\n else:\n raise RuntimeError(\n \"Weights named '%s' not found in network. Hint: set argument skip=Ture \"\n \"if you want to skip redundant or mismatch weights.\" % key\n )\n else:\n if tlx.BACKEND == 'tensorflow':\n reshape_weights = weight_reshape(weights[key], reshape)\n check_reshape(reshape_weights, net.all_weights[net_weights_name.index(de_key)])\n utils.assign_tf_variable(net.all_weights[net_weights_name.index(de_key)], reshape_weights)\n elif tlx.BACKEND == 'mindspore':\n reshape_weights = weight_reshape(weights[key], reshape)\n import mindspore as ms\n assign_param = ms.Tensor(reshape_weights, dtype=ms.float32)\n check_reshape(assign_param, net.all_weights[net_weights_name.index(de_key)])\n utils.assign_ms_variable(net.all_weights[net_weights_name.index(de_key)], assign_param)\n elif tlx.BACKEND == 'paddle':\n reshape_weights = weight_reshape(weights[key], reshape)\n check_reshape(reshape_weights, net.all_weights[net_weights_name.index(de_key)])\n utils.assign_pd_variable(net.all_weights[net_weights_name.index(de_key)], reshape_weights)\n elif tlx.BACKEND == 'torch':\n reshape_weights = weight_reshape(weights[key], reshape)\n check_reshape(reshape_weights, net.all_weights[net_weights_name.index(de_key)])\n utils.assign_th_variable(torch_weights_dict[de_key], reshape_weights)\n else:\n raise NotImplementedError('Not implemented')\n\n logging.info(\"[*] Model restored from npz_dict %s\" % file_path)\n\n\ndef load_and_assign_standard_npz(file_path=None, network=None, reshape=False):\n if network is None:\n raise ValueError(\"network is None.\")\n\n if not os.path.exists(file_path):\n logging.error(\"file {} doesn't exist.\".format(file_path))\n return False\n else:\n weights = utils.load_npz(name=file_path)\n ops = []\n if tlx.BACKEND == 'tensorflow':\n for idx, param in enumerate(weights):\n param = weight_reshape(param, reshape)\n check_reshape(param, network.all_weights[idx])\n ops.append(network.all_weights[idx].assign(param))\n\n elif tlx.BACKEND == 'mindspore':\n\n class Assign_net(Cell):\n\n def __init__(self, y):\n super(Assign_net, self).__init__()\n self.y = y\n\n def construct(self, x):\n Assign()(self.y, x)\n\n for idx, param in enumerate(weights):\n assign_param = Tensor(param, dtype=ms.float32)\n assign_param = weight_reshape(assign_param, reshape)\n check_reshape(assign_param, network.all_weights[idx])\n Assign()(network.all_weights[idx], assign_param)\n\n elif tlx.BACKEND == 'paddle':\n for idx, param in enumerate(weights):\n param = weight_reshape(param, reshape)\n check_reshape(param, network.all_weights[idx])\n utils.assign_pd_variable(network.all_weights[idx], param)\n\n elif tlx.BACKEND == 'torch':\n for idx, param in enumerate(weights):\n param = weight_reshape(param, reshape)\n check_reshape(param, network.all_weights[idx])\n utils.assign_th_variable(network.all_weights[idx], param)\n else:\n raise NotImplementedError(\"This backend is not supported\")\n return ops\n\n logging.info(\"[*] Load {} SUCCESS!\".format(file_path))\n\n\ndef check_reshape(weight, shape_weights):\n if len(weight.shape) >= 4 and weight.shape[::-1] == tuple(shape_weights.shape):\n if tlx.BACKEND == 'tensorflow':\n\n raise Warning(\n 'Set reshape to True only when importing weights from MindSpore/PyTorch/PaddlePaddle to TensorFlow.'\n )\n if tlx.BACKEND == 'torch':\n raise Warning('Set reshape to True only when importing weights from TensorFlow to PyTorch.')\n if tlx.BACKEND == 'paddle':\n raise Warning('Set reshape to True only when importing weights from TensorFlow to PaddlePaddle.')\n if tlx.BACKEND == 'mindspore':\n raise Warning('Set reshape to True only when importing weights from TensorFlow to MindSpore.')\n\n\ndef weight_reshape(weight, reshape=False):\n # TODO In this case only 2D convolution is considered. 3D convolution tests need to be supplemented.\n if reshape:\n if len(weight.shape) == 4:\n weight = np.moveaxis(weight, (2, 3), (1, 0))\n if len(weight.shape) == 5:\n weight = np.moveaxis(weight, (3, 4), (1, 0))\n return weight\n\ndef tolist(tensors):\n if isinstance(tensors, list) or isinstance(tensors, tuple):\n ntensors = list()\n for t in tensors:\n ntensors += tolist(t)\n return ntensors\n else:\n return [tensors]\n\ndef construct_graph(inputs, outputs):\n \"\"\"construct computation graph for model using ModuleNode object\"\"\"\n all_layers = []\n node_by_depth = []\n\n input_tensors_list = inputs if isinstance(inputs, list) else inputs\n\n queue_node = Queue()\n # BFS to visit all nodes that should be involved in the computation graph\n output_tensors_list = outputs if isinstance(outputs, list) else [outputs]\n output_nodes = [tensor._info[0] for tensor in output_tensors_list]\n\n visited_node_names = set()\n for out_node in output_nodes:\n if out_node.visited:\n continue\n queue_node.put(out_node)\n\n while not queue_node.empty():\n cur_node = queue_node.get()\n in_nodes = cur_node.in_nodes\n\n for node in in_nodes:\n node.out_nodes.append(cur_node)\n if not node.visited:\n queue_node.put(node)\n node.visited = True\n if node.node_name not in visited_node_names:\n visited_node_names.add(node.node_name)\n # else have multiple layers with the same name\n else:\n raise ValueError(\n 'Layer name \\'%s\\' has already been used by another layer. Please change the layer name.'\n % node.layer.name\n )\n\n # construct the computation graph in top-sort order\n cur_depth = [tensor._info[0] for tensor in input_tensors_list]\n next_depth = []\n indegrees = {}\n\n visited_layer_names = []\n while not len(cur_depth) == 0:\n node_by_depth.append(cur_depth)\n for node in cur_depth:\n if node.layer.name not in visited_layer_names:\n all_layers.append(node.layer)\n visited_layer_names.append(node.layer.name)\n for out_node in node.out_nodes:\n if out_node.node_name not in indegrees.keys():\n indegrees[out_node.node_name] = len(out_node.in_nodes)\n indegrees[out_node.node_name] -= 1\n if indegrees[out_node.node_name] == 0:\n next_depth.append(out_node)\n cur_depth = next_depth\n next_depth = []\n return node_by_depth, all_layers\n\n\ndef select_attrs(obj):\n attrs_dict = obj.__dict__\n attrs = {}\n _select_key = ['kernel_size', 'stride', 'act', 'padding', 'data_format', 'concat_dim', 'dilation', 'bias']\n for k in _select_key:\n if k in attrs_dict:\n if k == 'act':\n attrs[k] = attrs_dict[k].__class__.__name__\n else:\n attrs[k] = attrs_dict[k]\n return attrs\n\n\nclass ModuleNode(object):\n \"\"\"\n The class :class:`ModuleNode` class represents a conceptional node for a layer.\n\n ModuleNode is used for building topology and it is actually a light weighted\n wrapper over Layer.\n\n Parameters\n ----------\n layer : tl.layers.Layer\n A tl layer that wants to create a node.\n node_index : int\n Index of this node in layer._nodes.\n in_nodes :a list of ModuleNode\n Father nodes to this node.\n in_tensors : a list of tensors\n Input tensors to this node.\n out_tensors : a list of tensors\n Output tensors to this node.\n in_tensor_idxes : a list of int\n Indexes of each input tensor in its corresponding node's out_tensors.\n\n Methods\n ---------\n __init__()\n Initializing the ModuleNode.\n __call__()\n (1) Forwarding through the layer. (2) Update its input/output tensors.\n \"\"\"\n\n def __init__(self, layer, node_index, in_nodes, in_tensors, out_tensors, in_tensor_idxes, attr):\n self.layer = layer\n self.node_index = node_index\n self.in_nodes = in_nodes\n self.out_nodes = []\n self.in_tensors = in_tensors\n self.out_tensors = out_tensors\n self.node_name = layer.name + \"_node_{}\".format(node_index)\n\n self.in_tensors_idxes = in_tensor_idxes\n self.attr = attr\n self.visited = False\n\n def __call__(self, inputs, **kwargs):\n \"\"\"(1) Forwarding through the layer. (2) Update its input/output tensors.\"\"\"\n outputs = self.layer(inputs, **kwargs)\n self.in_tensors = tolist(inputs)\n self.out_tensors = tolist(outputs)\n return self.out_tensors"
] |
[
[
"numpy.moveaxis",
"numpy.savez",
"numpy.load"
]
] |
kencan7749/mmsegmentation
|
[
"e0fe1cb56e5b91f85e33a3ecc3afbeaa31f647e8"
] |
[
"mmseg/datasets/pipelines/multi_transforms.py"
] |
[
"import mmcv\nimport numpy as np\nfrom mmcv.utils import deprecated_api_warning, is_tuple_of\nfrom numpy import random\nfrom .transforms import PhotoMetricDistortion\n\nfrom ..builder import PIPELINES\n\n\[email protected]_module()\nclass MultiNormalize(object):\n \"\"\"Normalize the concated images.\n\n Added key is \"img_norm_cfg\".\n\n Args:\n mean (sequence): Mean values of 3 channels.\n std (sequence): Std values of 3 channels.\n to_rgb: (bool): Whether to convert the image from BGR to RGB, \n default is true\n \"\"\"\n\n def __init__(self, mean, std, to_rgb=True):\n self.mean = np.array(mean, dtype=np.float32)\n self.std = np.array(std, dtype=np.float32)\n self.to_rgb = to_rgb\n\n def __call__(self, results):\n \"\"\"Call function to normalize images.\n\n Args:\n results (dict): Result dict from loading pipeline.\n\n Returns: \n dict: Normalized rsults, 'igmg_norm_cfg' key is added into \n result dict.\n \"\"\"\n\n imgs = results['img']\n img_num = imgs.shape[-1] //3 # should be 6//3, 9//3, 12//3...\n\n for i in range(img_num):\n # Extract one image\n img = imgs[...,3*i:3*(i+1)]\n mean = self.mean[3*i:3*(i+1)]\n std = self.std[3*i:3*(i+1)]\n img = mmcv.imnormalize(img, mean, std,\n self.to_rgb)\n #concat img\n if i==0:\n img_concat = img \n else:\n img_concat = np.concatenate([img_concat, img], axis=2)\n results['img'] = img_concat\n results['img_norm_cfg'] = dict(\n mean=self.mean, std=self.std, to_rgb=self.to_rgb\n )\n return results\n\n\[email protected]_module()\nclass MultiPhotoMetricDistortion(object):\n \"\"\"Apply photometric distortion to image sequentially every transformation\n is applied with a probabillity of 0.5. The position of random contrast is in \n second or second to last.\n\n 1. random brightness\n 2. random contrast (mode 0)\n 3. convert color from BGR to HSV\n 4. random saturation\n 5. random hue\n 6. convert color from HSV to BGR\n 7. random contrast (mode 1)\n\n Args: \n brightness_delta (int): delta of brightness.\n contrast_range (tuple): range of contrast.\n saturation_range ( tuple): range of saturation.\n heu_delta (int): delta of hue.\n \"\"\"\n\n def __init__(self,\n brightness_delta=32,\n contrast_range=(0.5,1.5),\n saturation_range=(0.5, 1.5),\n hue_delta=18):\n\n self.converter = PhotoMetricDistortion(\n brightness_delta,\n contrast_range,\n saturation_range,\n hue_delta\n )\n\n def __call__(self, results):\n \"\"\"Call function to perform photometric distortion on images.\n Args:\n results (dict): Result dict from loading pipeline.\n Returns:\n dict: Result dict with images distorted.\n \"\"\"\n\n imgs = results['img']\n img_num = imgs.shape[-1] //3 # should be 6//3, 9//3, 12//3...\n\n for i in range(img_num):\n # Extract one image\n img = imgs[...,3*i:3*(i+1)]\n img = self.converter({'img': img})['img']\n #concat img\n if i==0:\n img_concat = img \n else:\n img_concat = np.concatenate([img_concat, img], axis=2)\n results['img'] = img_concat\n \n return results\n\n\n\n"
] |
[
[
"numpy.concatenate",
"numpy.array"
]
] |
JOTELLECHEA/neural_networks
|
[
"bced447e4f6d265fd23f5d9f0fcf0c21ce56d930"
] |
[
"hyperparameterRecord.py"
] |
[
"# Written By : Jonathan O. Tellechea\n# Adviser : Mike Hance, Phd\n# Research : Using a neural network to maximize the significance of tttHH production.\n# Description: This script keeps a record of trained NN; Keeps track of time , AUC , lenght of NN etc. \n# The filename for the saved weights is displayed to be used in loadNN.py to create plots.\n# Reference :http://cdsweb.cern.ch/record/2220969/files/ATL-PHYS-PUB-2016-023.pdf\n###########################################################################################################################\n# Imported packages.\nimport pandas as pd\nimport numpy as np\nimport argparse\n\n\n####work in progress to automate script\n# parser = argparse.ArgumentParser(description=\"Plot 1D plots of sig/bac\")\n# parser.add_argument(\"--file\", type=str, help=\"Use '--file=' followed by a *.h5 file\")\n# args = parser.parse_args()\n# file = \"data/\" + str(args.file)\n# file = 'hyperparameterRecord_v3.csv'\n# file = 'fiveLayerDropout_2.csv'\n# file = 'fiveLayerDropout_3.csv'\n# modelParam = ['NN Archi.','#Br.','LR','Batch','AUC','Avg.P','Y/M/D @ H:M','ConfusionMatrix [TP FP] [FN TN]','Score','Max Signif','nsig','nbkg']\n# modelParam = ['NN Archi.','#Br.','LR','Batch','AUC','Avg.P','Run Time','ConfusionMatrix [TP FP] [FN TN]','Score','Max Signif','nsig','nbkg']#######\n\n# file = 'csv/0_10_jets.csv'\n# file = 'csv/jet.csv'\n# file = 'csv/tenlayers.csv'\n# file = 'csv/highlevelvariables.csv'\nfile = 'csv/aug.csv'\nmodelParam = [\n 'FileName',\n \"ConfusionMatrix [TP FP] [FN TN]\",\n \"Run Time\",\n \"AUC\",\n \"Avg.P\",\n \"Score\",\n \"Max Signif\",\n \"nsig\",\n \"nbkg\"\n ]\ndata = pd.read_csv(file)\nprint(data.to_string(justify='right',columns=modelParam,header=True,index=1))"
] |
[
[
"pandas.read_csv"
]
] |
MachineLearningIsEasy/python_lesson_18
|
[
"454591e27eab15a80ddfc96294a6a55f4b090765"
] |
[
"postgresql_docker.py"
] |
[
"import psycopg2\nimport pandas as pd\nfrom sqlalchemy import create_engine\n\n\ndf = pd.read_csv('wine.csv')\n\n\nconn_string = \"host='localhost' dbname='postgres' user='postgres'\"\n\nconn = psycopg2.connect(conn_string)\n\ncursor = conn.cursor()\n\nengine = create_engine('postgresql://postgres@localhost:5432/postgres')\n\ndf.to_sql('wine_test', engine)\n\ndf_pg = pd.read_sql_query('select * from wine_test',con=engine)\n\nprint(df_pg.head())\nprint(pd.read_sql_query('select count(*) from wine_test',con=engine))"
] |
[
[
"pandas.read_csv",
"pandas.read_sql_query"
]
] |
ganyipeng/tr
|
[
"af854ffbeac79f9aa4fb66b8bd39b9e171c68c17"
] |
[
"ocr_util/ImageCommonProcess.py"
] |
[
"# -*- coding: utf-8 -*- \n# @Time 2021/8/26 13:10\n\nfrom PIL import Image\nimport cv2\nimport numpy\n\nMAX_SIZE = 1600 # 图片的大小最好不超过 1600\n\n\ndef resize_image_from_img_path_to_array(img_path):\n pil_img = resize_image_from_img_path_to_pil_img(img_path)\n return pil_img_to_array(pil_img)\n\n\ndef resize_image_from_img_path_to_pil_img(img_path):\n img_pil = Image.open(img_path)\n return resize_pil_img(img_pil)\n\n\ndef resize_pil_img(img_pil):\n scale = max(img_pil.height / MAX_SIZE, img_pil.width / MAX_SIZE)\n new_width = int(img_pil.width / scale + 0.5)\n new_height = int(img_pil.height / scale + 0.5)\n img_pil = img_pil.resize((new_width, new_height), Image.ANTIALIAS)\n return img_pil\n\n\ndef pil_img_to_array(pil_img):\n return numpy.asarray(pil_img)\n\n\ndef resize_gray_bin_and_return_pil_img(img_path):\n bin_array = resize_gray_bin_and_return_array(img_path)\n bin_pil_img = Image.fromarray(bin_array.astype('uint8'))\n return bin_pil_img\n\n\ndef resize_gray_bin_and_return_array(img_path):\n array_of_img = resize_image_from_img_path_to_array(img_path)\n gray_array = get_gray_array(array_of_img)\n bin_array = get_bin_array(gray_array)\n return bin_array\n\n\ndef gray_bin_and_return_pil_img(img_path):\n bin_array = gray_bin_and_return_array(img_path)\n bin_pil_img = Image.fromarray(bin_array.astype('uint8'))\n return bin_pil_img\n\n\ndef gray_bin_and_return_array(img_path):\n gray_array = get_gray_array(numpy.asarray(Image.open(img_path)))\n bin_array = get_bin_array(gray_array)\n return bin_array\n\n\n# gray process: ndarray -> ndarray\ndef get_gray_array(init_ndarray: 'numpy.ndarray'):\n gray_ndarray = cv2.cvtColor(init_ndarray, cv2.COLOR_BGR2GRAY)\n return gray_ndarray\n\n\n# binary process: ndarray -> ndarray\ndef get_bin_array(img: 'numpy.ndarray'):\n ret, bin_image=cv2.threshold(img, 180, 255, cv2.THRESH_BINARY_INV)\n return bin_image\n\n\n# show im('numpy.ndarray')\ndef show_image_by_ndarray(im_ndarray):\n im = Image.fromarray(im_ndarray.astype('uint8'))\n im.show()\n\n# img = numpy.asarray(im) # init image\n# im_gray = gray_img(img) # gray image\n# # show(im_gray)\n# im_bin = bin_img(im_gray) # bin image\n\n\ndef test():\n pil_img = resize_gray_bin_and_return_pil_img('bazirou.png')\n pil_img.show()\n\n\nif __name__ == '__main__':\n test()"
] |
[
[
"numpy.asarray"
]
] |
dou3516/xView2_baseline
|
[
"f1a15c36aaa8f2d5f952b45dd3eddfe98cfde1d2"
] |
[
"utils/mask_polygons_class.py"
] |
[
"#####################################################################################################################################################################\n# xView2 #\n# Copyright 2019 Carnegie Mellon University. #\n# NO WARRANTY. THIS CARNEGIE MELLON UNIVERSITY AND SOFTWARE ENGINEERING INSTITUTE MATERIAL IS FURNISHED ON AN \"AS-IS\" BASIS. CARNEGIE MELLON UNIVERSITY MAKES NO #\n# WARRANTIES OF ANY KIND, EITHER EXPRESSED OR IMPLIED, AS TO ANY MATTER INCLUDING, BUT NOT LIMITED TO, WARRANTY OF FITNESS FOR PURPOSE OR MERCHANTABILITY, # \n# EXCLUSIVITY, OR RESULTS OBTAINED FROM USE OF THE MATERIAL. CARNEGIE MELLON UNIVERSITY DOES NOT MAKE ANY WARRANTY OF ANY KIND WITH RESPECT TO FREEDOM FROM PATENT, # \n# TRADEMARK, OR COPYRIGHT INFRINGEMENT. #\n# Released under a MIT (SEI)-style license, please see LICENSE.md or contact [email protected] for full terms. #\n# [DISTRIBUTION STATEMENT A] This material has been approved for public release and unlimited distribution. Please see Copyright notice for non-US Government use #\n# and distribution. #\n# This Software includes and/or makes use of the following Third-Party Software subject to its own license: #\n# 1. SpaceNet (https://github.com/motokimura/spacenet_building_detection/blob/master/LICENSE) Copyright 2017 Motoki Kimura. #\n# DM19-0988 #\n#####################################################################################################################################################################\n\n\nimport json\nfrom os import path, walk, makedirs\nfrom sys import exit, stderr\n\nfrom cv2 import fillPoly, imwrite\nimport numpy as np\nfrom shapely import wkt\nfrom shapely.geometry import mapping, Polygon\nfrom skimage.io import imread\nfrom tqdm import tqdm\nimport imantics \n\n# This removes the massive amount of scikit warnings of \"low contrast images\"\nimport warnings\nwarnings.filterwarnings(\"ignore\", category=UserWarning)\n\nfrom collections import defaultdict\ndamage_intensity_encoding = defaultdict(lambda: 0)\ndamage_intensity_encoding['destroyed'] = 4\ndamage_intensity_encoding['major-damage'] = 3\ndamage_intensity_encoding['minor-damage'] = 2\ndamage_intensity_encoding['no-damage'] = 1\n\n\ndef get_dimensions(file_path):\n \"\"\"\n :param file_path: The path of the file \n :return: returns (width,height,channels)\n \"\"\"\n # Open the image we are going to mask\n pil_img = imread(file_path)\n img = np.array(pil_img)\n w, h, c = img.shape\n return (w, h, c)\n\n\ndef mask_polygons_separately(size, shapes):\n \"\"\"\n :param size: A tuple of the (width,height,channels)\n :param shapes: A list of points in the polygon from get_feature_info\n :returns: a dict of masked polygons with the shapes filled in from cv2.fillPoly\n \"\"\"\n # For each WKT polygon, read the WKT format and fill the polygon as an image\n masked_polys = {}\n\n for u in shapes:\n sh = shapes[u]\n mask_img = np.zeros(size, np.uint8)\n i = fillPoly(mask_img, [sh], (255, 255, 255))\n masked_polys[u] = i\n\n return masked_polys\n\ndef mask_polygons_together(size, shapes, classids):\n \"\"\"\n :param size: A tuple of the (width,height,channels)\n :param shapes: A list of points in the polygon from get_feature_info\n :returns: A numpy array with the polygons filled 255s where there's a building and 0 where not \n \"\"\"\n # For each WKT polygon, read the WKT format and fill the polygon as an image\n num = len(shapes)\n mask_img = np.zeros((size[0], size[1], num), np.uint8)\n\n for i, u in enumerate(shapes):\n # blank = np.zeros(size, np.uint8)\n # poly = shapes[u]\n # fillPoly(blank, [poly], (1, 1, 1))\n blank = np.zeros(size[:2], np.uint8)\n poly = shapes[u]\n fillPoly(blank, [poly], classids[i])\n\n mask_img[:, :, i] = blank\n # mask_img += blank\n mask_img_max = np.max(mask_img, axis=-1)\n # Here we are taking the overlap (+=) and squashing it back to 0\n # mask_img[mask_img > 1] = 0\n\n # Finally we are taking all 1s and making it pure white (255)\n # mask_img[mask_img == 1] = 255\n\n return mask_img_max\n\ndef mask_polygons_together_with_border(size, shapes, border):\n \"\"\"\n :param size: A tuple of the (width,height,channels)\n :param shapes: A list of points in the polygon from get_feature_info\n :returns: a dict of masked polygons with the shapes filled in from cv2.fillPoly\n \"\"\"\n\n # For each WKT polygon, read the WKT format and fill the polygon as an image\n mask_img = np.zeros(size, np.uint8)\n\n for u in shapes:\n blank = np.zeros(size, np.uint8)\n # Each polygon stored in shapes is a np.ndarray\n poly = shapes[u]\n \n # Creating a shapely polygon object out of the numpy array \n polygon = Polygon(poly)\n\n # Getting the center points from the polygon and the polygon points\n (poly_center_x, poly_center_y) = polygon.centroid.coords[0]\n polygon_points = polygon.exterior.coords\n\n # Setting a new polygon with each X,Y manipulated based off the center point\n shrunk_polygon = []\n for (x,y) in polygon_points:\n if x < poly_center_x:\n x += border\n elif x > poly_center_x:\n x -= border\n\n if y < poly_center_y:\n y += border\n elif y > poly_center_y:\n y -= border\n\n shrunk_polygon.append([x,y])\n \n # Transforming the polygon back to a np.ndarray\n ns_poly = np.array(shrunk_polygon, np.int32)\n \n # Filling the shrunken polygon to add a border between close polygons\n fillPoly(blank, [ns_poly], (1, 1, 1))\n mask_img += blank\n \n mask_img[mask_img > 1] = 0\n mask_img[mask_img == 1] = 255\n return mask_img\n\ndef save_masks(masks, output_path, mask_file_name):\n \"\"\"\n :param masks: dictionary of UID:masked polygons from mask_polygons_separately()\n :param output_path: path to save the masks\n :param mask_file_name: the file name the masks should have \n \"\"\"\n # For each filled polygon, write out a separate file, increasing the name\n for m in masks:\n final_out = path.join(output_path,\n mask_file_name + '_{}.png'.format(m))\n imwrite(final_out, masks[m])\n\ndef save_one_mask(masks, output_path, mask_file_name):\n \"\"\"\n :param masks: list of masked polygons from the mask_polygons_separately function \n :param output_path: path to save the masks\n :param mask_file_name: the file name the masks should have \n \"\"\"\n # For each filled polygon, write the mask shape out to the file per image\n mask_file_name = path.join(output_path, mask_file_name + '.png')\n imwrite(mask_file_name, masks)\n \n\ndef read_json(json_path):\n \"\"\"\n :param json_path: path to load json from\n :returns: a python dictionary of json features\n \"\"\"\n annotations = json.load(open(json_path))\n return annotations\n\n\ndef get_feature_info(feature):\n \"\"\"\n :param feature: a python dictionary of json labels\n :returns: a list mapping of polygons contained in the image \n \"\"\"\n # Getting each polygon points from the json file and adding it to a dictionary of uid:polygons\n props = {}\n classids = []\n\n for feat in feature['features']['xy']:\n feat_shape = wkt.loads(feat['wkt'])\n coords = list(mapping(feat_shape)['coordinates'][0])\n props[feat['properties']['uid']] = (np.array(coords, np.int32))\n try:\n damage_type = feat['properties']['subtype']\n except: # pre-disaster damage is default no-damage\n damage_type = \"no-damage\"\n classids.append(damage_intensity_encoding[damage_type])\n\n return props, classids\n\n\ndef save_blank_mask(chip_size, output_path, mask_file_name):\n mask_file_name = path.join(output_path, mask_file_name + '.png')\n mask = np.zeros((chip_size[0], chip_size[1]), np.uint8)\n imwrite(mask_file_name, mask)\n\n\ndef mask_chips(json_path, images_directory, output_directory, single_file, border):\n \"\"\"\n :param json_path: path to find multiple json files for the chips\n :param images_directory: path to the directory containing the images to be masked\n :param output_directory: path to the directory where masks are to be saved\n :param single_file: a boolean value to see if masks should be saved a single file or multiple\n \"\"\"\n # For each feature in the json we will create a separate mask\n # Getting all files in the directory provided for jsons\n jsons = [j for j in next(walk(json_path))[2] if '_post' in j] # '_pre'\n\n # After removing non-json items in dir (if any)\n for j in tqdm([j for j in jsons if j.endswith('json')],\n unit='poly',\n leave=False):\n # Our chips start off in life as PNGs\n chip_image_id = path.splitext(j)[0] + '.png'\n mask_file = path.splitext(j)[0]\n\n # Loading the per chip json\n j_full_path = path.join(json_path, j)\n chip_json = read_json(j_full_path)\n\n # Getting the full chip path, and loading the size dimensions\n chip_file = path.join(images_directory, chip_image_id)\n chip_size = get_dimensions(chip_file)\n\n # Reading in the polygons from the json file\n polys, classids = get_feature_info(chip_json)\n\n # Getting a list of the polygons and saving masks as separate or single image files\n if len(polys) > 0:\n if single_file:\n if border > 0:\n masked_polys = mask_polygons_together_with_border(chip_size, polys, border)\n else:\n masked_polys = mask_polygons_together(chip_size, polys, classids)\n save_one_mask(masked_polys, output_directory, mask_file)\n else:\n masked_polys = mask_polygons_separately(chip_size, polys)\n save_masks(masked_polys, output_directory, mask_file)\n else:\n save_blank_mask(chip_size, output_directory, mask_file)\n\nif __name__ == \"__main__\":\n import argparse\n\n # Parse command line arguments\n parser = argparse.ArgumentParser(\n description=\n \"\"\"mask_polygons.py: Takes in xBD dataset and masks polygons in the image\\n\\n\n WARNING: This could lead to hundreds of output images per input\\n\"\"\")\n\n parser.add_argument('--input',\n required=True,\n metavar=\"/path/to/xBD/\",\n help='Path to parent dataset directory \"xBD\"')\n parser.add_argument('--single-file', \n action='store_true',\n help='use to save all masked polygon instances to a single file rather than one polygon per mask file')\n parser.add_argument('--border',\n default=0,\n type=int,\n metavar=\"positive integer for pixel border (e.g. 1)\",\n help='Positive integer used to shrink the polygon by')\n\n args = parser.parse_args()\n\n # Getting the list of the disaster types under the xBD directory\n disasters = next(walk(args.input))[1]\n\n for disaster in tqdm(disasters, desc='Masking', unit='disaster'):\n # Create the full path to the images, labels, and mask output directories\n image_dir = path.join(args.input, disaster, 'images')\n json_dir = path.join(args.input, disaster, 'labels')\n output_dir = path.join(args.input, disaster, 'masks')\n\n if not path.isdir(image_dir):\n print(\n \"Error, could not find image files in {}.\\n\\n\"\n .format(image_dir),\n file=stderr)\n exit(2)\n\n if not path.isdir(json_dir):\n print(\n \"Error, could not find labels in {}.\\n\\n\"\n .format(json_dir),\n file=stderr)\n exit(3)\n\n if not path.isdir(output_dir):\n makedirs(output_dir)\n\n mask_chips(json_dir, image_dir, output_dir, args.single_file, args.border)\n"
] |
[
[
"numpy.max",
"numpy.array",
"numpy.zeros"
]
] |
terenceylchow124/Meme-MultiModal
|
[
"114ff75e18685476aba2c67720124a348bc00dde"
] |
[
"utils/util_train.py"
] |
[
"from torchvision import models as modelsummary\nfrom .util import *\nimport torch\nfrom torch import nn\nimport numpy as np\nimport time\nimport sys\nimport os\n\nos.environ[\"CUDA_DEVICE_ORDER\"]=\"PCI_BUS_ID\"\nos.environ[\"CUDA_VISIBLE_DEVICES\"]=\"0,1\"\n\nactivation = {}\ndef get_activation(name1, name2):\n def hook(model, input, output):\n activation[name1] = output.last_hidden_state.detach()\n activation[name2] = output.pooler_output.detach()\n return hook\n\ndef train(train_loader, hyp_params, model, bert, tokenizer, feature_extractor, optimizer, criterion, epoch):\n epoch_loss = 0\n model.train()\n num_batches = hyp_params.n_train // hyp_params.batch_size\n proc_loss, proc_size = 0, 0\n total_loss = 0.0\n losses = []\n results = []\n truths = []\n n_examples = hyp_params.n_train\n start_time = time.time()\n\n for i_batch, data_batch in enumerate(train_loader):\n\n input_ids = data_batch[\"input_ids\"]\n targets = data_batch[\"label\"]\n images = data_batch['image']\n attention_mask = data_batch['attention_mask']\n\n model.zero_grad()\n\n if hyp_params.use_cuda:\n with torch.cuda.device(0):\n input_ids = input_ids.cuda()\n attention_mask = attention_mask.cuda()\n targets = targets.cuda()\n images = images.cuda()\n\n if images.size()[0] != input_ids.size()[0]:\n continue\n\n feature_images = feature_extractor.features(images)\n feature_images = feature_extractor.avgpool(feature_images)\n feature_images = torch.flatten(feature_images, 1)\n feature_images = feature_extractor.classifier[0](feature_images)\n\n bert.bert.register_forward_hook(get_activation('last', 'pool'))\n outputs = bert(input_ids, attention_mask)\n\n outputs = model(\n last_hidden=activation['last'],\n pooled_output=activation['pool'],\n feature_images=feature_images\n )\n\n if hyp_params.dataset == 'memotion':\n _, preds = torch.max(outputs, dim=1)\n elif hyp_params.dataset == 'reddit':\n _, preds = torch.max(outputs, dim=1)\n else:\n preds = outputs\n\n preds_round = (preds > 0.5).float()\n loss = criterion(outputs, targets)\n losses.append(loss.item())\n loss.backward()\n nn.utils.clip_grad_norm_(model.parameters(), hyp_params.clip)\n optimizer.step()\n\n total_loss += loss.item() * hyp_params.batch_size\n results.append(preds)\n truths.append(targets)\n\n proc_loss += loss * hyp_params.batch_size\n proc_size += hyp_params.batch_size\n if i_batch % hyp_params.log_interval == 0 and i_batch > 0:\n train_acc, train_f1, train_f1_macro, train_precision, train_recall = metrics(preds_round, targets)\n avg_loss = proc_loss / proc_size\n elapsed_time = time.time() - start_time\n msg = 'Epoch {:2d} | Batch {:3d}/{:3d} | Time/Batch(ms) {:5.2f} | Train Loss {:5.4f} | Acc {:5.4f} | micro-score {:5.4f} | macro-score {:5.4f} | precision {:5.4f} | recall {:5.4f}'.format(epoch, i_batch, num_batches, elapsed_time * 1000 / hyp_params.log_interval, avg_loss, train_acc, train_f1, train_f1_macro, train_precision, train_recall)\n print(msg)\n write_log(msg)\n proc_loss, proc_size = 0, 0\n start_time = time.time()\n\n avg_loss = total_loss / hyp_params.n_train\n results = torch.cat(results)\n truths = torch.cat(truths)\n return results, truths, avg_loss\n\ndef evaluate(valid_loader, hyp_params, model, bert, tokenizer, feature_extractor, criterion, train=False, train_loader=None):\n model.eval()\n loader = train_loader if train else valid_loader\n total_loss = 0.0\n\n results = []\n truths = []\n correct_predictions = 0\n\n with torch.no_grad():\n for i_batch, data_batch in enumerate(loader):\n input_ids = data_batch[\"input_ids\"]\n targets = data_batch[\"label\"]\n images = data_batch['image']\n attention_mask = data_batch['attention_mask']\n\n if hyp_params.use_cuda:\n with torch.cuda.device(0):\n input_ids = input_ids.cuda()\n attention_mask = attention_mask.cuda()\n targets = targets.cuda()\n images = images.cuda()\n\n if images.size()[0] != input_ids.size()[0]:\n continue\n\n with torch.no_grad():\n feature_images = feature_extractor.features(images)\n feature_images = feature_extractor.avgpool(feature_images)\n feature_images = torch.flatten(feature_images, 1)\n feature_images = feature_extractor.classifier[0](feature_images)\n \n bert.bert.register_forward_hook(get_activation('last', 'pool'))\n outputs = bert(input_ids, attention_mask)\n\n outputs = model(\n last_hidden=activation['last'],\n pooled_output=activation['pool'],\n feature_images=feature_images\n )\n\n if hyp_params.dataset == 'memotion':\n _, preds = torch.max(outputs, dim=1)\n elif hyp_params.dataset == 'reddit':\n _, preds = torch.max(outputs, dim=1)\n else:\n preds = outputs\n\n total_loss += criterion(outputs, targets).item() * hyp_params.batch_size\n correct_predictions += torch.sum(preds == targets)\n\n # Collect the results into dictionary\n results.append(preds)\n truths.append(targets)\n\n avg_loss = total_loss / (hyp_params.n_train if train else hyp_params.n_valid)\n\n results = torch.cat(results)\n truths = torch.cat(truths)\n return results, truths, avg_loss\n"
] |
[
[
"torch.cat",
"torch.max",
"torch.no_grad",
"torch.cuda.device",
"torch.flatten",
"torch.sum"
]
] |
PedrofRodenas/anaglypher
|
[
"02edcb819382f8223f23224f6b590c7e1954948c"
] |
[
"anagliph.py"
] |
[
"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Sat Sep 22 22:03:31 2018\n\n@author: PedrofRodenas\n\"\"\"\n\n# imports\nimport numpy as np\nimport cv2\nimport matplotlib.pyplot as plt\nimport utils\nimport hough\nimport matplotlib.patches as mpatches\n\n\ndef plot_CutPoints(img, cutP, VanishP):\n \n \n fig, ax = plt.subplots()\n ax.imshow(img)\n \n ax.axis('off')\n red_patch = mpatches.Patch(color='firebrick', label='Cut Points')\n blue_patch = mpatches.Patch(color='blue', label='Vanishing Point')\n \n for p in cutP:\n \n ax.plot(p[1],p[0], '+', linewidth=5, color='firebrick')\n \n # Plot intersection point in blue\n ax.plot(VanishP[1],VanishP[0], '+', linewidth=7, color='blue')\n ax.legend(handles=[red_patch, blue_patch])\n \n \n\n# xcale, yscale: image scale for less time computation\n# cannymin, cannymax: pixel values range to apply canny filter\n# nlines: number of lines to detect\n# threshold: minimum value in H space to be considered lines\n# nhood_size: minimum space between lines to be considered sigle lines. \ndef ConvertImageto3D(image, xscale, yscale, cannymin=100, cannymax=200,\n nlines=4,threshold=0.7, nhood_size=80):\n \n shapes = cv2.resize(image, (0,0), fx=xscale, fy=yscale)\n \n M, N, D = shapes.shape\n \n imagen = np.copy(shapes)\n \n shapes_grayscale = cv2.cvtColor(shapes, cv2.COLOR_RGB2GRAY)\n \n # blur image (this will help clean up noise for Canny Edge Detection)\n # see Chapter 2.0 for Guassian Blur or check OpenCV documentation\n shapes_blurred = cv2.GaussianBlur(shapes_grayscale, (5, 5), 1.5)\n\n # find Canny Edges and show resulting image\n canny_edges = cv2.Canny(shapes_blurred, cannymin, cannymax)\n plt.imshow(canny_edges, cmap='gray')\n plt.title(\"Canny Edges\")\n\n # run hough_lines_accumulator on the shapes canny_edges image\n \n H, rhos, thetas = hough.hough_lines_acc(canny_edges)\n indicies, H = hough.hough_peaks(H, nlines,threshold, nhood_size) # find peaks\n hough.plot_hough_acc(H) # plot hough space, brighter spots have higher votes\n hough.hough_lines_draw(shapes, indicies, rhos, thetas)\n \n plt.figure()\n plt.imshow(shapes)\n plt.title('Lines Detected')\n plt.axis('off')\n \n \n x, y = utils.hough_lines_coords(indicies, rhos, thetas)\n \n coefficients = utils.get_coefficients(x,y)\n \n # Get the intersection points\n CutPoints = utils.get_intersection(coefficients, M, N)\n \n \n # Selection of intersection point that is closer to everyone else\n VanishP = utils.less_distancePoint(CutPoints)\n \n # Plot cut points and vanishing point\n plot_CutPoints(shapes, CutPoints, VanishP)\n \n # Memory reservation for DephMap\n DepthMap = np.zeros((M,N))\n DepthMapL = np.zeros((M,N))\n DepthMapR = np.zeros((M,N))\n\n xvan = VanishP[0]\n yvan = VanishP[1]\n\n # DepthMap Synthesis at vertical axis\n for i in range(M):\n \n DepthMap[i,:] = (255/(M-xvan))*(i-xvan)\n \n # Convert to 0 negatives values\n DepthMap[DepthMap < 0] = 0\n \n # DepthMap Synthesis at horizontal axis\n for i in range(yvan):\n \n DepthMapL[xvan:,i] = -(255/yvan)*(i-yvan)\n \n for i in range(yvan,N):\n \n DepthMapR[xvan:,i] = (255/(N-yvan))*(i-yvan)\n \n DepthMapH = DepthMapL + DepthMapR\n \n \n # Maximum displacement of image\n Md = int(N / 95)\n\n # Parallax Matrix Vertical Shift\n parallax = (Md*((DepthMap/255))).astype(int)\n\n # Parallax Matrix Vertical Shift\n parallaxh = (Md*((DepthMapH/255))).astype(int)\n \n # Copy third channel (Red)\n imgR = imagen[:,:,2]\n \n # Image border extension\n img = cv2.copyMakeBorder(imagen,0,0,Md,Md,\n cv2.BORDER_CONSTANT,value=0)\n\n pp = (parallax + parallaxh)\n \n # Channel displacement\n for i in range(M):\n \n for j in range(N):\n \n img[i,j+pp[i,j],2] = imgR[i,j]\n\n\n Image3D = img[:,Md:-Md,:]\n \n return Image3D\n\n\n\n"
] |
[
[
"numpy.zeros",
"numpy.copy",
"matplotlib.pyplot.title",
"matplotlib.pyplot.subplots",
"matplotlib.pyplot.figure",
"matplotlib.patches.Patch",
"matplotlib.pyplot.axis",
"matplotlib.pyplot.imshow"
]
] |
irvingzhang0512/mmpose
|
[
"17557522ce3e41f830973079c5b4321935c41439"
] |
[
"mmpose/datasets/datasets/top_down/topdown_posetrack18_dataset.py"
] |
[
"import os\nimport os.path as osp\nimport warnings\nfrom collections import OrderedDict, defaultdict\n\nimport json_tricks as json\nimport numpy as np\nfrom poseval import eval_helpers\nfrom poseval.evaluateAP import evaluateAP\nfrom xtcocotools.coco import COCO\n\nfrom ....core.post_processing import oks_nms, soft_oks_nms\nfrom ...registry import DATASETS\nfrom .topdown_coco_dataset import TopDownCocoDataset\n\n\[email protected]_module()\nclass TopDownPoseTrack18Dataset(TopDownCocoDataset):\n \"\"\"PoseTrack18 dataset for top-down pose estimation.\n\n `Posetrack: A benchmark for human pose estimation and tracking' CVPR'2018\n More details can be found in the `paper\n <https://arxiv.org/abs/1710.10000>`_ .\n\n The dataset loads raw features and apply specified transforms\n to return a dict containing the image tensors and other information.\n\n PoseTrack2018 keypoint indexes::\n 0: 'nose',\n 1: 'head_bottom',\n 2: 'head_top',\n 3: 'left_ear',\n 4: 'right_ear',\n 5: 'left_shoulder',\n 6: 'right_shoulder',\n 7: 'left_elbow',\n 8: 'right_elbow',\n 9: 'left_wrist',\n 10: 'right_wrist',\n 11: 'left_hip',\n 12: 'right_hip',\n 13: 'left_knee',\n 14: 'right_knee',\n 15: 'left_ankle',\n 16: 'right_ankle'\n\n Args:\n ann_file (str): Path to the annotation file.\n img_prefix (str): Path to a directory where images are held.\n Default: None.\n data_cfg (dict): config\n pipeline (list[dict | callable]): A sequence of data transforms.\n test_mode (bool): Store True when building test or\n validation dataset. Default: False.\n \"\"\"\n\n def __init__(self,\n ann_file,\n img_prefix,\n data_cfg,\n pipeline,\n test_mode=False):\n super(TopDownCocoDataset, self).__init__(\n ann_file, img_prefix, data_cfg, pipeline, test_mode=test_mode)\n\n self.use_gt_bbox = data_cfg['use_gt_bbox']\n self.bbox_file = data_cfg['bbox_file']\n self.det_bbox_thr = data_cfg.get('det_bbox_thr', 0.0)\n if 'image_thr' in data_cfg:\n warnings.warn(\n 'image_thr is deprecated, '\n 'please use det_bbox_thr instead', DeprecationWarning)\n self.det_bbox_thr = data_cfg['image_thr']\n self.use_nms = data_cfg.get('use_nms', True)\n self.soft_nms = data_cfg['soft_nms']\n self.nms_thr = data_cfg['nms_thr']\n self.oks_thr = data_cfg['oks_thr']\n self.vis_thr = data_cfg['vis_thr']\n\n self.ann_info['flip_pairs'] = [[3, 4], [5, 6], [7, 8], [9, 10],\n [11, 12], [13, 14], [15, 16]]\n\n self.ann_info['upper_body_ids'] = (0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10)\n self.ann_info['lower_body_ids'] = (11, 12, 13, 14, 15, 16)\n\n self.ann_info['use_different_joint_weights'] = False\n self.ann_info['joint_weights'] = np.array(\n [\n 1., 1., 1., 1., 1., 1., 1., 1.2, 1.2, 1.5, 1.5, 1., 1., 1.2,\n 1.2, 1.5, 1.5\n ],\n dtype=np.float32).reshape((self.ann_info['num_joints'], 1))\n\n self.sigmas = np.array([\n .26, .25, .25, .35, .35, .79, .79, .72, .72, .62, .62, 1.07, 1.07,\n .87, .87, .89, .89\n ]) / 10.0\n\n self.coco = COCO(ann_file)\n\n cats = [\n cat['name'] for cat in self.coco.loadCats(self.coco.getCatIds())\n ]\n self.classes = ['__background__'] + cats\n self.num_classes = len(self.classes)\n self._class_to_ind = dict(zip(self.classes, range(self.num_classes)))\n self._class_to_coco_ind = dict(zip(cats, self.coco.getCatIds()))\n self._coco_ind_to_class_ind = dict(\n (self._class_to_coco_ind[cls], self._class_to_ind[cls])\n for cls in self.classes[1:])\n self.img_ids = self.coco.getImgIds()\n self.num_images = len(self.img_ids)\n self.id2name, self.name2id = self._get_mapping_id_name(self.coco.imgs)\n self.dataset_name = 'posetrack18'\n\n self.db = self._get_db()\n\n print(f'=> num_images: {self.num_images}')\n print(f'=> load {len(self.db)} samples')\n\n def evaluate(self, outputs, res_folder, metric='mAP', **kwargs):\n \"\"\"Evaluate coco keypoint results. The pose prediction results will be\n saved in `${res_folder}/result_keypoints.json`.\n\n Note:\n num_keypoints: K\n\n Args:\n outputs (list(preds, boxes, image_paths))\n :preds (np.ndarray[1,K,3]): The first two dimensions are\n coordinates, score is the third dimension of the array.\n :boxes (np.ndarray[1,6]): [center[0], center[1], scale[0]\n , scale[1],area, score]\n :image_paths (list[str]): For example, ['val/010016_mpii_test\n /000024.jpg']\n :heatmap (np.ndarray[N, K, H, W]): model output heatmap.\n :bbox_id (list(int))\n res_folder (str): Path of directory to save the results.\n metric (str | list[str]): Metric to be performed. Defaults: 'mAP'.\n\n Returns:\n dict: Evaluation results for evaluation metric.\n \"\"\"\n metrics = metric if isinstance(metric, list) else [metric]\n allowed_metrics = ['mAP']\n for metric in metrics:\n if metric not in allowed_metrics:\n raise KeyError(f'metric {metric} is not supported')\n\n pred_folder = osp.join(res_folder, 'preds')\n os.makedirs(pred_folder, exist_ok=True)\n gt_folder = osp.join(\n osp.dirname(self.annotations_path),\n osp.splitext(self.annotations_path.split('_')[-1])[0])\n\n kpts = defaultdict(list)\n\n for preds, boxes, image_paths, _, bbox_ids in outputs:\n batch_size = len(image_paths)\n for i in range(batch_size):\n image_id = self.name2id[image_paths[i][len(self.img_prefix):]]\n kpts[image_id].append({\n 'keypoints': preds[i],\n 'center': boxes[i][0:2],\n 'scale': boxes[i][2:4],\n 'area': boxes[i][4],\n 'score': boxes[i][5],\n 'image_id': image_id,\n 'bbox_id': bbox_ids[i]\n })\n kpts = self._sort_and_unique_bboxes(kpts)\n\n # rescoring and oks nms\n num_joints = self.ann_info['num_joints']\n vis_thr = self.vis_thr\n oks_thr = self.oks_thr\n valid_kpts = defaultdict(list)\n for image_id in kpts.keys():\n img_kpts = kpts[image_id]\n for n_p in img_kpts:\n box_score = n_p['score']\n kpt_score = 0\n valid_num = 0\n for n_jt in range(0, num_joints):\n t_s = n_p['keypoints'][n_jt][2]\n if t_s > vis_thr:\n kpt_score = kpt_score + t_s\n valid_num = valid_num + 1\n if valid_num != 0:\n kpt_score = kpt_score / valid_num\n # rescoring\n n_p['score'] = kpt_score * box_score\n\n if self.use_nms:\n nms = soft_oks_nms if self.soft_nms else oks_nms\n keep = nms(list(img_kpts), oks_thr, sigmas=self.sigmas)\n valid_kpts[image_id].append(\n [img_kpts[_keep] for _keep in keep])\n else:\n valid_kpts[image_id].append(img_kpts)\n\n self._write_posetrack18_keypoint_results(valid_kpts, gt_folder,\n pred_folder)\n\n info_str = self._do_python_keypoint_eval(gt_folder, pred_folder)\n name_value = OrderedDict(info_str)\n\n return name_value\n\n @staticmethod\n def _write_posetrack18_keypoint_results(keypoint_results, gt_folder,\n pred_folder):\n \"\"\"Write results into a json file.\n\n Args:\n keypoint_results (dict): keypoint results organized by image_id.\n gt_folder (str): Path of directory for official gt files.\n pred_folder (str): Path of directory to save the results.\n \"\"\"\n categories = []\n\n cat = {}\n cat['supercategory'] = 'person'\n cat['id'] = 1\n cat['name'] = 'person'\n cat['keypoints'] = [\n 'nose', 'head_bottom', 'head_top', 'left_ear', 'right_ear',\n 'left_shoulder', 'right_shoulder', 'left_elbow', 'right_elbow',\n 'left_wrist', 'right_wrist', 'left_hip', 'right_hip', 'left_knee',\n 'right_knee', 'left_ankle', 'right_ankle'\n ]\n cat['skeleton'] = [[16, 14], [14, 12], [17, 15], [15, 13], [12, 13],\n [6, 12], [7, 13], [6, 7], [6, 8], [7, 9], [8, 10],\n [9, 11], [2, 3], [1, 2], [1, 3], [2, 4], [3, 5],\n [4, 6], [5, 7]]\n categories.append(cat)\n\n json_files = [\n pos for pos in os.listdir(gt_folder) if pos.endswith('.json')\n ]\n for json_file in json_files:\n\n with open(osp.join(gt_folder, json_file), 'r') as f:\n gt = json.load(f)\n\n annotations = []\n images = []\n\n for image in gt['images']:\n im = {}\n im['id'] = image['id']\n im['file_name'] = image['file_name']\n images.append(im)\n\n img_kpts = keypoint_results[im['id']]\n\n if len(img_kpts) == 0:\n continue\n for track_id, img_kpt in enumerate(img_kpts[0]):\n ann = {}\n ann['image_id'] = img_kpt['image_id']\n ann['keypoints'] = np.array(\n img_kpt['keypoints']).reshape(-1).tolist()\n ann['scores'] = np.array(ann['keypoints']).reshape(\n [-1, 3])[:, 2].tolist()\n ann['score'] = float(img_kpt['score'])\n ann['track_id'] = track_id\n annotations.append(ann)\n\n info = {}\n info['images'] = images\n info['categories'] = categories\n info['annotations'] = annotations\n\n with open(osp.join(pred_folder, json_file), 'w') as f:\n json.dump(info, f, sort_keys=True, indent=4)\n\n def _do_python_keypoint_eval(self, gt_folder, pred_folder):\n \"\"\"Keypoint evaluation using poseval.\"\"\"\n\n argv = ['', gt_folder + '/', pred_folder + '/']\n\n print('Loading data')\n gtFramesAll, prFramesAll = eval_helpers.load_data_dir(argv)\n\n print('# gt frames :', len(gtFramesAll))\n print('# pred frames:', len(prFramesAll))\n\n # evaluate per-frame multi-person pose estimation (AP)\n # compute AP\n print('Evaluation of per-frame multi-person pose estimation')\n apAll, _, _ = evaluateAP(gtFramesAll, prFramesAll, None, False, False)\n\n # print AP\n print('Average Precision (AP) metric:')\n eval_helpers.printTable(apAll)\n\n stats = eval_helpers.getCum(apAll)\n\n stats_names = [\n 'Head AP', 'Shou AP', 'Elb AP', 'Wri AP', 'Hip AP', 'Knee AP',\n 'Ankl AP', 'Total AP'\n ]\n\n info_str = list(zip(stats_names, stats))\n\n return info_str\n"
] |
[
[
"numpy.array"
]
] |
lee15253/edl_bk
|
[
"6777f5803138e6a64dabb096fe18a495728aabe3"
] |
[
"agents/maze_agents/ant_maze/off_policy_hierarchical.py"
] |
[
"# Copyright (c) 2019, salesforce.com, inc.\n# All rights reserved.\n# SPDX-License-Identifier: MIT\n# For full license text, see the LICENSE file in the repo root or https://opensource.org/licenses/MIT\n\nimport torch\nimport numpy as np\nfrom torch.distributions import Normal\nfrom base.actors.base import BaseHierarchicalActor\nfrom base.learners.distance import BaseDistanceLearner, BaseSiblingRivalryLearner\nfrom base.learners.her import BaseHERLearner\nfrom agents.maze_agents.modules import Policy, Critic, StochasticPolicy, Value\nfrom agents.maze_agents.ant_maze.env import Env\n\n\nclass Agent(BaseHierarchicalActor):\n def __init__(self, **kwargs):\n super().__init__(**kwargs)\n assert self.noise is not None\n assert self.epsilon is not None\n\n def _make_modules(self, policy):\n self.policy = policy\n\n @property\n def rollout(self):\n states = torch.stack([e['state'] for e in self.episode] + [self.episode[-1]['next_state']]).data.numpy()\n xs = states[:, 0]\n ys = states[:, 1]\n return [xs, ys]\n\n def step(self, do_eval=False):\n s = self.env.state\n g = self.env.goal\n a = self.policy(s.view(1, -1), g.view(1, -1)).view(-1)\n\n if not do_eval:\n if np.random.rand() < self.epsilon:\n a = np.random.uniform(\n low=-self.policy.a_range,\n high=self.policy.a_range,\n size=(self.policy.action_size,)\n )\n a = torch.from_numpy(a.astype(np.float32))\n else:\n z = Normal(torch.zeros_like(a), torch.ones_like(a) * self.noise * self.policy.a_range)\n a = a + z.sample()\n a = torch.clamp(a, -self.policy.a_range, self.policy.a_range)\n\n self.lo_rollout(goal_hi=a + self.env.achieved, do_eval=do_eval)\n\n complete = float(self.env.is_success) * torch.ones(1)\n terminal = float(self.env.is_done) * torch.ones(1)\n s_next = self.env.state\n r = -1 * torch.ones(1)\n\n self.episode.append({\n 'state': s,\n 'goal': g,\n 'achieved': self.env.achieved,\n 'action': a,\n 'next_state': s_next,\n 'terminal': terminal.view([]),\n 'complete': complete.view([]),\n 'reward': r.view([]),\n })\n\nclass DistanceLearner(BaseDistanceLearner):\n AGENT_TYPE = 'HierarchicalDistance'\n def __init__(self, *args,\n hi_skip=10, entropy_lambda_lo=0.02, n_lo_epochs=1,\n **kwargs):\n self._hierarchical_agent_kwargs = dict(\n hi_skip=hi_skip, entropy_lambda=entropy_lambda_lo, n_lo_epochs=n_lo_epochs\n )\n super().__init__(*args, **kwargs)\n\n self._lo_parameters = self.agent._lo_parameters\n\n def create_env(self):\n return Env(**self.env_params)\n\n def _make_agent_modules(self):\n self.policy = Policy(self._dummy_env, 128, a_range=5, action_size=2)\n self.p_target = Policy(self._dummy_env, 128, a_range=5, action_size=2)\n self.p_target.load_state_dict(self.policy.state_dict())\n\n self.q_module = Critic(self._dummy_env, 128, a_range=5, action_size=2)\n self.q_target = Critic(self._dummy_env, 128, a_range=5, action_size=2)\n self.q_target.load_state_dict(self.q_module.state_dict())\n\n self.policy_lo = StochasticPolicy(self._dummy_env, 256, goal_size=2)\n self.v_module_lo = Value(self._dummy_env, 256, goal_size=2, use_antigoal=False)\n\n def _make_agent(self):\n return Agent(env=self.create_env(), policy_lo=self.policy_lo, value_lo=self.v_module_lo,\n noise=self.noise, epsilon=self.epsilon,\n policy=self.policy, **self._hierarchical_agent_kwargs)\n\n def soft_update(self):\n module_pairs = [\n dict(source=self.q_module, target=self.q_target),\n dict(source=self.policy, target=self.p_target),\n ]\n for pair in module_pairs:\n for p, p_targ in zip(pair['source'].parameters(), pair['target'].parameters()):\n p_targ.data *= self.polyak\n p_targ.data += (1 - self.polyak) * p.data\n\n def get_next_qs(self, batch):\n next_policy_actions = self.p_target(batch['next_state'], batch['goal'])\n return self.q_target(batch['next_state'], next_policy_actions, batch['goal'], batch.get('antigoal', None))\n\n def get_action_qs(self, batch):\n return self.q_module(batch['state'], batch['action'], batch['goal'], batch.get('antigoal', None))\n\n def get_policy_loss_and_actions(self, batch):\n policy_actions = self.policy(batch['state'], batch['goal'])\n p_losses = -self.q_target.q_no_grad(batch['state'], policy_actions, batch['goal'], batch.get('antigoal', None))\n p_loss = p_losses.mean()\n return p_loss, policy_actions\n\nclass SiblingRivalryLearner(BaseSiblingRivalryLearner, DistanceLearner):\n AGENT_TYPE = 'HierarchicalSiblingRivalry'\n\n def _make_agent_modules(self):\n self.policy = Policy(self._dummy_env, 128, a_range=5, action_size=2)\n self.p_target = Policy(self._dummy_env, 128, a_range=5, action_size=2)\n self.p_target.load_state_dict(self.policy.state_dict())\n\n self.q_module = Critic(self._dummy_env, 128, a_range=5, action_size=2, use_antigoal=self.use_antigoal)\n self.q_target = Critic(self._dummy_env, 128, a_range=5, action_size=2, use_antigoal=self.use_antigoal)\n self.q_target.load_state_dict(self.q_module.state_dict())\n\n self.policy_lo = StochasticPolicy(self._dummy_env, 256, goal_size=2)\n self.v_module_lo = Value(self._dummy_env, 256, goal_size=2, use_antigoal=False)\n\nclass HERLearner(BaseHERLearner, DistanceLearner):\n AGENT_TYPE = 'HierarchicalHER'\n pass"
] |
[
[
"numpy.random.rand",
"torch.stack",
"torch.clamp",
"torch.ones",
"numpy.random.uniform",
"torch.ones_like",
"torch.zeros_like"
]
] |
huobanlqs/faster-rcnn-win-tf2
|
[
"63b5a61b3801f505a12f42b49529be8a5c337740"
] |
[
"lib/config/config.py"
] |
[
"import os\nimport os.path as osp\n\nimport numpy as np\nimport tensorflow.compat.v1 as tf\ntf.disable_v2_behavior()\n\nFLAGS = tf.app.flags.FLAGS\nFLAGS2 = {}\n\n######################\n# General Parameters #\n######################\nFLAGS2[\"pixel_means\"] = np.array([[[102.9801, 115.9465, 122.7717]]])\ntf.app.flags.DEFINE_integer('rng_seed', 3, \"Tensorflow seed for reproducibility\")\n\n######################\n# Network Parameters #\n######################\ntf.app.flags.DEFINE_string('network', \"vgg16\", \"The network to be used as backbone\")\n\n#######################\n# Training Parameters #\n#######################\ntf.app.flags.DEFINE_float('weight_decay', 0.0005, \"Weight decay, for regularization\")\ntf.app.flags.DEFINE_float('learning_rate', 0.001, \"Learning rate\")\ntf.app.flags.DEFINE_float('momentum', 0.9, \"Momentum\")\ntf.app.flags.DEFINE_float('gamma', 0.1, \"Factor for reducing the learning rate\")\n\ntf.app.flags.DEFINE_integer('batch_size', 256, \"Network batch size during training\")\ntf.app.flags.DEFINE_integer('max_iters', 40000, \"Max iteration\")\ntf.app.flags.DEFINE_integer('step_size', 30000, \"Step size for reducing the learning rate, currently only support one step\")\ntf.app.flags.DEFINE_integer('display', 10, \"Iteration intervals for showing the loss during training, on command line interface\")\n\ntf.app.flags.DEFINE_string('initializer', \"truncated\", \"Network initialization parameters\")\ntf.app.flags.DEFINE_string('pretrained_model', \"./data/imagenet_weights/vgg16.ckpt\", \"Pretrained network weights\")\n\ntf.app.flags.DEFINE_boolean('bias_decay', False, \"Whether to have weight decay on bias as well\")\ntf.app.flags.DEFINE_boolean('double_bias', True, \"Whether to double the learning rate for bias\")\ntf.app.flags.DEFINE_boolean('use_all_gt', True, \"Whether to use all ground truth bounding boxes for training, \"\n \"For COCO, setting USE_ALL_GT to False will exclude boxes that are flagged as ''iscrowd''\")\ntf.app.flags.DEFINE_integer('max_size', 1000, \"Max pixel size of the longest side of a scaled input image\")\ntf.app.flags.DEFINE_integer('test_max_size', 1000, \"Max pixel size of the longest side of a scaled input image\")\ntf.app.flags.DEFINE_integer('ims_per_batch', 1, \"Images to use per minibatch\")\ntf.app.flags.DEFINE_integer('snapshot_iterations', 5000, \"Iteration to take snapshot\")\n\nFLAGS2[\"scales\"] = (600,)\nFLAGS2[\"test_scales\"] = (600,)\n\n######################\n# Testing Parameters #\n######################\ntf.app.flags.DEFINE_string('test_mode', \"top\", \"Test mode for bbox proposal\") # nms, top\n\n##################\n# RPN Parameters #\n##################\ntf.app.flags.DEFINE_float('rpn_negative_overlap', 0.3, \"IOU < thresh: negative example\")\ntf.app.flags.DEFINE_float('rpn_positive_overlap', 0.7, \"IOU >= thresh: positive example\")\ntf.app.flags.DEFINE_float('rpn_fg_fraction', 0.5, \"Max number of foreground examples\")\ntf.app.flags.DEFINE_float('rpn_train_nms_thresh', 0.7, \"NMS threshold used on RPN proposals\")\ntf.app.flags.DEFINE_float('rpn_test_nms_thresh', 0.7, \"NMS threshold used on RPN proposals\")\n\ntf.app.flags.DEFINE_integer('rpn_train_pre_nms_top_n', 12000, \"Number of top scoring boxes to keep before apply NMS to RPN proposals\")\ntf.app.flags.DEFINE_integer('rpn_train_post_nms_top_n', 2000, \"Number of top scoring boxes to keep before apply NMS to RPN proposals\")\ntf.app.flags.DEFINE_integer('rpn_test_pre_nms_top_n', 6000, \"Number of top scoring boxes to keep before apply NMS to RPN proposals\")\ntf.app.flags.DEFINE_integer('rpn_test_post_nms_top_n', 300, \"Number of top scoring boxes to keep before apply NMS to RPN proposals\")\ntf.app.flags.DEFINE_integer('rpn_batchsize', 256, \"Total number of examples\")\ntf.app.flags.DEFINE_integer('rpn_positive_weight', -1,\n 'Give the positive RPN examples weight of p * 1 / {num positives} and give negatives a weight of (1 - p).'\n 'Set to -1.0 to use uniform example weighting')\ntf.app.flags.DEFINE_integer('rpn_top_n', 300, \"Only useful when TEST.MODE is 'top', specifies the number of top proposals to select\")\n\ntf.app.flags.DEFINE_boolean('rpn_clobber_positives', False, \"If an anchor satisfied by positive and negative conditions set to negative\")\n\n#######################\n# Proposal Parameters #\n#######################\ntf.app.flags.DEFINE_float('proposal_fg_fraction', 0.25, \"Fraction of minibatch that is labeled foreground (i.e. class > 0)\")\ntf.app.flags.DEFINE_boolean('proposal_use_gt', False, \"Whether to add ground truth boxes to the pool when sampling regions\")\n\n###########################\n# Bounding Box Parameters #\n###########################\ntf.app.flags.DEFINE_float('roi_fg_threshold', 0.5, \"Overlap threshold for a ROI to be considered foreground (if >= FG_THRESH)\")\ntf.app.flags.DEFINE_float('roi_bg_threshold_high', 0.5, \"Overlap threshold for a ROI to be considered background (class = 0 if overlap in [LO, HI))\")\ntf.app.flags.DEFINE_float('roi_bg_threshold_low', 0.1, \"Overlap threshold for a ROI to be considered background (class = 0 if overlap in [LO, HI))\")\n\ntf.app.flags.DEFINE_boolean('bbox_normalize_targets_precomputed', True, \"# Normalize the targets using 'precomputed' (or made up) means and stdevs (BBOX_NORMALIZE_TARGETS must also be True)\")\ntf.app.flags.DEFINE_boolean('test_bbox_reg', True, \"Test using bounding-box regressors\")\n\nFLAGS2[\"bbox_inside_weights\"] = (1.0, 1.0, 1.0, 1.0)\nFLAGS2[\"bbox_normalize_means\"] = (0.0, 0.0, 0.0, 0.0)\nFLAGS2[\"bbox_normalize_stds\"] = (0.1, 0.1, 0.1, 0.1)\n\n##################\n# ROI Parameters #\n##################\ntf.app.flags.DEFINE_integer('roi_pooling_size', 7, \"Size of the pooled region after RoI pooling\")\n\n######################\n# Dataset Parameters #\n######################\nFLAGS2[\"root_dir\"] = osp.abspath(osp.join(osp.dirname(__file__), '..', '..'))\nFLAGS2[\"data_dir\"] = osp.abspath(osp.join(FLAGS2[\"root_dir\"], 'data'))\n\n\ndef get_output_dir(imdb, weights_filename):\n \"\"\"Return the directory where experimental artifacts are placed.\n If the directory does not exist, it is created.\n\n A canonical path is built using the name from an imdb and a network\n (if not None).\n \"\"\"\n outdir = osp.abspath(osp.join(FLAGS2[\"root_dir\"], FLAGS2[\"root_dir\"] , 'default', imdb.name))\n if weights_filename is None:\n weights_filename = 'default'\n outdir = osp.join(outdir, weights_filename)\n if not os.path.exists(outdir):\n os.makedirs(outdir)\n return outdir\n"
] |
[
[
"numpy.array",
"tensorflow.compat.v1.disable_v2_behavior",
"tensorflow.compat.v1.app.flags.DEFINE_string",
"tensorflow.compat.v1.app.flags.DEFINE_float",
"tensorflow.compat.v1.app.flags.DEFINE_boolean",
"tensorflow.compat.v1.app.flags.DEFINE_integer"
]
] |
etrejo23/web-scraping-challenge
|
[
"c1b3833dbf33043a9b54fb9b57b29b9f7a413321"
] |
[
"app/mars.py"
] |
[
"from splinter import Browser\nfrom bs4 import BeautifulSoup as BS\nimport pandas as pd\nimport requests\nimport selenium\nfrom selenium import webdriver \nfrom selenium.webdriver.common.keys import Keys \nfrom selenium.webdriver.common.by import By\nfrom selenium.webdriver.support.ui import WebDriverWait\nfrom selenium.webdriver.support import expected_conditions as EC\nfrom selenium.common.exceptions import TimeoutException\nimport time\n# from pymongo import MongoClient\n\n# def scrape_info():\n\n# driver = webdriver.Firefox(executable_path= \"geckodriver.exe\")\n# driver.maximize_window()\n\n\n\nfeature=\"https://www.jpl.nasa.gov/spaceimages/?search=&category=Mars\"\nfacts=\"https://space-facts.com/mars/\"\n\n\n# Connect to MongoDB\n#connecting to the local host\n# client = MongoClient(\"mongodb://localhost:27017\")\n# #creating database\n# db = client['mars_database']\n\ndef mars_news():\n driver = webdriver.Firefox()\n news=\"https://mars.nasa.gov/news/\"\n driver.get(news)\n\n # Retrieve the latest news title\n title_elements = driver.find_elements_by_class_name(\"content_title\")\n title_htmls = [title_element.get_attribute(\"innerHTML\") for title_element in title_elements]\n title_html = title_htmls[1]\n news_soup = BS(title_html, 'lxml')\n news_title = news_soup.get_text()\n #news_title\n\n teaser_element = driver.find_element_by_class_name(\"article_teaser_body\")\n news_p = teaser_element.get_attribute(\"innerHTML\")\n #news_p\n # news_page_id=browser.find_by_id('site_body')\n news = {\n \"title\":news_title,\n \"summary\":news_p,\n }\n\n # timeout = 20\n # Find an ID on the page and wait before executing anything until found: \n # try:\n # WebDriverWait(driver, timeout).until(EC.visibility_of_element_located((By.ID, \"site_body\")))\n # except TimeoutException:\n driver.quit()\n return news\n\n\ndef featured_image_url():\n driver = webdriver.Firefox()\n driver.implicitly_wait(10)\n driver.get(feature)\n\n # timeout = 20\n # #Find an ID on the page and wait before executing anything until found: \n # try:\n # WebDriverWait(driver, timeout).until(EC.visibility_of_element_located((By.ID, \"full_image\")))\n # except TimeoutException:\n # # driver.quit()\n # print(\"Atribute Error\")\n #LOOK FOR AN ID FIRST, THEN FIND THE ATTRIBUTE\n feature_image = driver.find_element_by_id(\"full_image\")\n # featured_image = [image_element.get_attribute(\"data-link\") for image_element in feature_image]\n # image_link=feature+featured_image[0]\n # driver.get(image_link)\n feature_image.click()\n\n featured_image_next = driver.find_element_by_partial_link_text(\"more info\") #.get_attribute('href')\n featured_image_next.click()\n time.sleep(1)\n # featured_image_next = [image_element.get_attribute(\"data-link\") for image_element in feature_image_next]\n # image_link_forrealz=feature+featured_image_next[0]\n # image_link_forrealz\n # driver.get(image_link_forrealz)\n\n # featured_soup = BeautifulSoup(html, 'html.parser')\n\n #main_image= featured_soup.find('img', class_='main_image')\n # try:\n featured_image_url = driver.find_element_by_tag_name('img[class=\"main_image\"]').get_attribute('src')\n # except AttributeError:\n # print(\"atrribute error\")\n # driver.implicitly_wait(10)\n # large_image = driver.find_elements_by_class_name(\"main_image\")\n\n # feat_images=[]\n # for feat_image in large_image:\n # feat_images.append(feat_image.get_attribute(\"src\"))\n\n # featured_image_url=feat_images[0]\n # featured_image_url = feat_image\n # main_image\n driver.quit()\n return featured_image_url\n\n\ndef mars_table():\n tables = pd.read_html(facts)\n mars_table=tables[0].to_html()\n return mars_table\n\ndef hemispheres_images():\n driver = webdriver.Firefox()\n hemisphere=\"https://astrogeology.usgs.gov/search/results?q=hemisphere+enhanced&k1=target&v1=Mars\"\n driver.get(hemisphere)\n driver.implicitly_wait(10)\n hemisphere_image_urls = []\n hemisphere_links = driver.find_elements_by_tag_name('h3')\n for link in range(len(hemisphere_links)):\n hemisphere_data = {}\n # starting point\n hemisphere_links = driver.find_elements_by_tag_name('h3')\n # print(hemisphere_links[link].text) \n title = hemisphere_links[link].text\n hemisphere_data['title'] = title\n # navigate to the h3 tag and get href attribute for the sample image\n # append the href url to the dictionary\n hemisphere_links[link].click()\n sample = driver.find_element_by_link_text('Sample').get_attribute('href')\n #sample.click()\n hemisphere_data['img_url'] = sample\n # append the image title and img url to the empty list\n hemisphere_image_urls.append(hemisphere_data)\n \n # return to previous page to iterate through remaining images\n driver.back()\n #hemisphere_image_urls\n driver.quit()\n return hemisphere_image_urls\n\n \n\n# A dictionary that will become a MongoDB document\ndef scrape_all():\n mars_dictionary = {\n 'title': mars_news(),\n #'paragraph': news_p(),\n 'image': featured_image_url(),\n 'table': mars_table(),\n 'urls': hemispheres_images(),\n }\n return mars_dictionary\n \n # # Declare the collection\n # mars_collection = db.mars_scrape\n # # Insert document into collection\n # mars_collection.insert_one(mars_dictionary)\n\n\n"
] |
[
[
"pandas.read_html"
]
] |
chanind/hanzi-font-deconstructor
|
[
"ce41b2a5c0e66b8a83d6c734678446d1d32a18b7"
] |
[
"hanzi_font_deconstructor/model/pix2pix/Generator.py"
] |
[
"# from https://github.com/aladdinpersson/Machine-Learning-Collection/tree/master/ML/Pytorch/GANs/Pix2Pix\n\nimport torch\nimport torch.nn as nn\n\n\nclass Block(nn.Module):\n def __init__(\n self, in_channels, out_channels, down=True, act=\"relu\", use_dropout=False\n ):\n super(Block, self).__init__()\n self.conv = nn.Sequential(\n nn.Conv2d(\n in_channels, out_channels, 4, 2, 1, bias=False, padding_mode=\"reflect\"\n )\n if down\n else nn.ConvTranspose2d(in_channels, out_channels, 4, 2, 1, bias=False),\n nn.BatchNorm2d(out_channels),\n nn.ReLU() if act == \"relu\" else nn.LeakyReLU(0.2),\n )\n\n self.use_dropout = use_dropout\n self.dropout = nn.Dropout(0.5)\n self.down = down\n\n def forward(self, x):\n x = self.conv(x)\n return self.dropout(x) if self.use_dropout else x\n\n\nclass Generator(nn.Module):\n def __init__(self, in_channels=3, features=64):\n super().__init__()\n self.initial_down = nn.Sequential(\n nn.Conv2d(in_channels, features, 4, 2, 1, padding_mode=\"reflect\"),\n nn.LeakyReLU(0.2),\n )\n self.down1 = Block(\n features, features * 2, down=True, act=\"leaky\", use_dropout=False\n )\n self.down2 = Block(\n features * 2, features * 4, down=True, act=\"leaky\", use_dropout=False\n )\n self.down3 = Block(\n features * 4, features * 8, down=True, act=\"leaky\", use_dropout=False\n )\n self.down4 = Block(\n features * 8, features * 8, down=True, act=\"leaky\", use_dropout=False\n )\n self.down5 = Block(\n features * 8, features * 8, down=True, act=\"leaky\", use_dropout=False\n )\n self.down6 = Block(\n features * 8, features * 8, down=True, act=\"leaky\", use_dropout=False\n )\n self.bottleneck = nn.Sequential(\n nn.Conv2d(features * 8, features * 8, 4, 2, 1), nn.ReLU()\n )\n\n self.up1 = Block(\n features * 8, features * 8, down=False, act=\"relu\", use_dropout=True\n )\n self.up2 = Block(\n features * 8 * 2, features * 8, down=False, act=\"relu\", use_dropout=True\n )\n self.up3 = Block(\n features * 8 * 2, features * 8, down=False, act=\"relu\", use_dropout=True\n )\n self.up4 = Block(\n features * 8 * 2, features * 8, down=False, act=\"relu\", use_dropout=False\n )\n self.up5 = Block(\n features * 8 * 2, features * 4, down=False, act=\"relu\", use_dropout=False\n )\n self.up6 = Block(\n features * 4 * 2, features * 2, down=False, act=\"relu\", use_dropout=False\n )\n self.up7 = Block(\n features * 2 * 2, features, down=False, act=\"relu\", use_dropout=False\n )\n self.final_up = nn.Sequential(\n nn.ConvTranspose2d(\n features * 2, in_channels, kernel_size=4, stride=2, padding=1\n ),\n nn.Tanh(),\n )\n\n def forward(self, x):\n d1 = self.initial_down(x)\n d2 = self.down1(d1)\n d3 = self.down2(d2)\n d4 = self.down3(d3)\n d5 = self.down4(d4)\n d6 = self.down5(d5)\n d7 = self.down6(d6)\n bottleneck = self.bottleneck(d7)\n up1 = self.up1(bottleneck)\n up2 = self.up2(torch.cat([up1, d7], 1))\n up3 = self.up3(torch.cat([up2, d6], 1))\n up4 = self.up4(torch.cat([up3, d5], 1))\n up5 = self.up5(torch.cat([up4, d4], 1))\n up6 = self.up6(torch.cat([up5, d3], 1))\n up7 = self.up7(torch.cat([up6, d2], 1))\n return self.final_up(torch.cat([up7, d1], 1))\n"
] |
[
[
"torch.nn.Dropout",
"torch.cat",
"torch.nn.Tanh",
"torch.nn.BatchNorm2d",
"torch.nn.LeakyReLU",
"torch.nn.ConvTranspose2d",
"torch.nn.ReLU",
"torch.nn.Conv2d"
]
] |
Tobigs-team/EmoGET_tobigticon
|
[
"4b13d6a780bbe269a9c285cc603b16b09d459edf"
] |
[
"sol2/models.py"
] |
[
"\"\"\"\nCopyright (C) 2017 NVIDIA Corporation. All rights reserved.\nLicensed under the CC BY-NC-ND 4.0 license (https://creativecommons.org/licenses/by-nc-nd/4.0/legalcode).\n\"\"\"\n\nimport torch\nimport torch.nn as nn\nimport torch.nn.parallel\nimport torch.utils.data\nfrom torch.autograd import Variable\n\nimport math\nfrom torch.nn.modules.utils import _triple\n\nimport numpy as np\n\nif torch.cuda.is_available():\n T = torch.cuda\nelse:\n T = torch\n\n\nclass Noise(nn.Module):\n def __init__(self, use_noise, sigma=0.2):\n super(Noise, self).__init__()\n self.use_noise = use_noise\n self.sigma = sigma\n\n def forward(self, x):\n if self.use_noise:\n return x + self.sigma * Variable(T.FloatTensor(x.size()).normal_(), requires_grad=False)\n return x\n\n\nclass ImageDiscriminator(nn.Module):\n def __init__(self, n_channels, ndf=64, use_noise=False, noise_sigma=None):\n super(ImageDiscriminator, self).__init__()\n\n self.use_noise = use_noise\n\n self.main = nn.Sequential(\n Noise(use_noise, sigma=noise_sigma),\n nn.Conv2d(n_channels, ndf, 4, 2, 1, bias=False),\n nn.LeakyReLU(0.2, inplace=True),\n\n Noise(use_noise, sigma=noise_sigma),\n nn.Conv2d(ndf, ndf * 2, 4, 2, 1, bias=False),\n nn.BatchNorm2d(ndf * 2),\n nn.LeakyReLU(0.2, inplace=True),\n\n Noise(use_noise, sigma=noise_sigma),\n nn.Conv2d(ndf * 2, ndf * 4, 4, 2, 1, bias=False),\n nn.BatchNorm2d(ndf * 4),\n nn.LeakyReLU(0.2, inplace=True),\n\n Noise(use_noise, sigma=noise_sigma),\n nn.Conv2d(ndf * 4, ndf * 8, 4, 2, 1, bias=False),\n nn.BatchNorm2d(ndf * 8),\n nn.LeakyReLU(0.2, inplace=True),\n\n nn.Conv2d(ndf * 8, 1, 4, 1, 0, bias=False),\n )\n\n def forward(self, input):\n h = self.main(input).squeeze()\n return h, None\n\n\nclass PatchImageDiscriminator(nn.Module):\n def __init__(self, n_channels, ndf=64, use_noise=False, noise_sigma=None):\n super(PatchImageDiscriminator, self).__init__()\n\n self.use_noise = use_noise\n\n self.main = nn.Sequential(\n Noise(use_noise, sigma=noise_sigma),\n nn.Conv2d(n_channels, ndf, 4, 2, 1, bias=False),\n nn.LeakyReLU(0.2, inplace=True),\n\n Noise(use_noise, sigma=noise_sigma),\n nn.Conv2d(ndf, ndf * 2, 4, 2, 1, bias=False),\n nn.BatchNorm2d(ndf * 2),\n nn.LeakyReLU(0.2, inplace=True),\n\n Noise(use_noise, sigma=noise_sigma),\n nn.Conv2d(ndf * 2, ndf * 4, 4, 2, 1, bias=False),\n nn.BatchNorm2d(ndf * 4),\n nn.LeakyReLU(0.2, inplace=True),\n\n Noise(use_noise, sigma=noise_sigma),\n nn.Conv2d(ndf * 4, 1, 4, 2, 1, bias=False),\n )\n\n def forward(self, input):\n h = self.main(input).squeeze()\n return h, None\n\nclass PatchVideoDiscriminator(nn.Module):\n def __init__(self, n_channels, n_output_neurons=1, bn_use_gamma=True, use_noise=False, noise_sigma=None, ndf=64):\n super(PatchVideoDiscriminator, self).__init__()\n\n self.n_channels = n_channels\n self.n_output_neurons = n_output_neurons\n self.use_noise = use_noise\n self.bn_use_gamma = bn_use_gamma\n\n self.main = nn.Sequential(\n Noise(use_noise, sigma=noise_sigma),\n nn.Conv3d(n_channels, ndf, 4, stride=(1, 2, 2), padding=(0, 1, 1), bias=False),\n nn.LeakyReLU(0.2, inplace=True),\n\n Noise(use_noise, sigma=noise_sigma),\n nn.Conv3d(ndf, ndf * 2, 4, stride=(1, 2, 2), padding=(0, 1, 1), bias=False),\n nn.BatchNorm3d(ndf * 2),\n nn.LeakyReLU(0.2, inplace=True),\n\n Noise(use_noise, sigma=noise_sigma),\n nn.Conv3d(ndf * 2, ndf * 4, 4, stride=(1, 2, 2), padding=(0, 1, 1), bias=False),\n nn.BatchNorm3d(ndf * 4),\n nn.LeakyReLU(0.2, inplace=True),\n\n nn.Conv3d(ndf * 4, 1, 4, stride=(1, 2, 2), padding=(0, 1, 1), bias=False),\n )\n\n def forward(self, input):\n h = self.main(input).squeeze()\n\n return h, None\n\n\n\nclass SpatioTemporalConv(nn.Module):\n \n #12.20 Relu->LeakyRelU\n r\"\"\"Applies a factored 3D convolution over an input signal composed of several input \n planes with distinct spatial and time axes, by performing a 2D convolution over the \n spatial axes to an intermediate subspace, followed by a 1D convolution over the time \n axis to produce the final output.\n Args:\n in_channels (int): Number of channels in the input tensor\n out_channels (int): Number of channels produced by the convolution\n kernel_size (int or tuple): Size of the convolving kernel\n stride (int or tuple, optional): Stride of the convolution. Default: 1\n padding (int or tuple, optional): Zero-padding added to the sides of the input during their respective convolutions. Default: 0\n bias (bool, optional): If ``True``, adds a learnable bias to the output. Default: ``True``\n \"\"\"\n\n def __init__(self, in_channels, out_channels, kernel_size, stride=1, padding=0, bias=True):\n super(SpatioTemporalConv, self).__init__()\n\n # if ints are entered, convert them to iterables, 1 -> [1, 1, 1]\n kernel_size = _triple(kernel_size)\n stride = _triple(stride)\n padding = _triple(padding)\n\n # decomposing the parameters into spatial and temporal components by\n # masking out the values with the defaults on the axis that\n # won't be convolved over. This is necessary to avoid unintentional\n # behavior such as padding being added twice\n spatial_kernel_size = [1, kernel_size[1], kernel_size[2]]\n spatial_stride = [1, stride[1], stride[2]]\n spatial_padding = [0, padding[1], padding[2]]\n\n temporal_kernel_size = [kernel_size[0], 1, 1]\n temporal_stride = [stride[0], 1, 1]\n temporal_padding = [padding[0], 0, 0]\n\n # compute the number of intermediary channels (M) using formula \n # from the paper section 3.5\n intermed_channels = int(math.floor((kernel_size[0] * kernel_size[1] * kernel_size[2] * in_channels * out_channels)/ \\\n (kernel_size[1]* kernel_size[2] * in_channels + kernel_size[0] * out_channels)))\n\n # the spatial conv is effectively a 2D conv due to the \n # spatial_kernel_size, followed by batch_norm and ReLU\n self.spatial_conv = nn.Conv3d(in_channels, intermed_channels, spatial_kernel_size,\n stride=spatial_stride, padding=spatial_padding, bias=bias)\n self.bn = nn.BatchNorm3d(intermed_channels)\n self.leakyrelu = nn.LeakyReLU()\n\n # the temporal conv is effectively a 1D conv, but has batch norm \n # and ReLU added inside the model constructor, not here. This is an \n # intentional design choice, to allow this module to externally act \n # identical to a standard Conv3D, so it can be reused easily in any \n # other codebase\n self.temporal_conv = nn.Conv3d(intermed_channels, out_channels, temporal_kernel_size, \n stride=temporal_stride, padding=temporal_padding, bias=bias)\n\n def forward(self, x):\n x = self.leakyrelu(self.bn(self.spatial_conv(x)))\n x = self.temporal_conv(x)\n return x\n\n\nclass VideoDiscriminator(nn.Module):\n def __init__(self, n_channels, n_output_neurons=1, bn_use_gamma=True, use_noise=False, noise_sigma=None, ndf=64):\n super(VideoDiscriminator, self).__init__()\n\n self.n_channels = n_channels\n self.n_output_neurons = n_output_neurons\n self.use_noise = use_noise\n self.bn_use_gamma = bn_use_gamma\n #self.SpatioTemporalConv = SpatioTemporalConv()???\n\n self.main = nn.Sequential(\n Noise(use_noise, sigma=noise_sigma),\n SpatioTemporalConv(n_channels, ndf, 4, stride=[1, 2, 2], padding=[0, 1, 1], bias=False),\n #nn.Conv3d(n_channels, ndf, 4, stride=(1, 2, 2), padding=(0, 1, 1), bias=False),\n nn.LeakyReLU(0.2, inplace=True),\n\n Noise(use_noise, sigma=noise_sigma),\n SpatioTemporalConv(ndf, ndf * 2, 4, stride=[1, 2, 2], padding=[0, 1, 1], bias=False),\n #nn.Conv3d(ndf, ndf * 2, 4, stride=(1, 2, 2), padding=(0, 1, 1), bias=False),\n nn.BatchNorm3d(ndf * 2),\n nn.LeakyReLU(0.2, inplace=True),\n\n Noise(use_noise, sigma=noise_sigma),\n SpatioTemporalConv(ndf * 2,ndf * 4, 4, stride=[1, 2, 2], padding=[0, 1, 1], bias=False),\n #nn.Conv3d(ndf * 2, ndf * 4, 4, stride=(1, 2, 2), padding=(0, 1, 1), bias=False),\n nn.BatchNorm3d(ndf * 4),\n nn.LeakyReLU(0.2, inplace=True),\n\n Noise(use_noise, sigma=noise_sigma),\n SpatioTemporalConv(ndf * 4, ndf * 8, 4, stride=[1, 2, 2], padding=[0, 1, 1], bias=False),\n #nn.Conv3d(ndf * 4, ndf * 8, 4, stride=(1, 2, 2), padding=(0, 1, 1), bias=False),\n nn.BatchNorm3d(ndf * 8),\n nn.LeakyReLU(0.2, inplace=True),\n\n SpatioTemporalConv(ndf * 8, n_output_neurons, 4, stride=1, padding=0, bias=False),\n #nn.Conv3d(ndf * 8, n_output_neurons, 4, 1, 0, bias=False),\n )\n\n def forward(self, input):\n h = self.main(input).squeeze()\n\n return h, None\n\n\nclass CategoricalVideoDiscriminator(VideoDiscriminator): #Video Discriminator상속받아 사용\n def __init__(self, n_channels, dim_categorical, n_output_neurons=1, use_noise=False, noise_sigma=None):\n super(CategoricalVideoDiscriminator, self).__init__(n_channels=n_channels,\n n_output_neurons=n_output_neurons + dim_categorical,\n use_noise=use_noise,\n noise_sigma=noise_sigma)\n\n self.dim_categorical = dim_categorical\n\n def split(self, input):\n return input[:, :input.size(1) - self.dim_categorical], input[:, input.size(1) - self.dim_categorical:]\n\n def forward(self, input):\n h, _ = super(CategoricalVideoDiscriminator, self).forward(input)\n labels, categ = self.split(h)\n return labels, categ\n\nclass embedding_f(nn.Module): \n #U-Net에 쓰일 임베딩 class. 1D vector -> 2D Embedding vector\n def __init__(self,n_class, x):\n super().__init__()\n self.n_class = n_class\n self.categ_embed = nn.Embedding(self.n_class, x)\n\n def forward(self,z_category):\n categ_embedding = self.categ_embed(z_category.long())\n return categ_embedding\n\n\nclass UNet(nn.Module): \n \"\"\"\n z_motion,z_category(one-hot)은 입력으로 받는다.\n 이미지는 인코더를 통과하여 임베딩\n 이미지 임베딩벡터와 z_motion,z_category와 concate되어 디코더 통과\n 디코더 피쳐맵에, 인코더 피쳐맵과 z_motion임베딩값 z_cateogry임베딩값 concate\n \"\"\"\n def __init__(self, n_class, n_channels, z_motion, batch_size,video_length):\n super().__init__()\n\n self.n_class = n_class\n self.z_noise = torch.from_numpy(np.random.normal(0, 1, (batch_size, 100)).astype(np.float32))\n self.z_motion = z_motion\n #self.z_category = z_category_labels\n self.n_channels = n_channels\n self.video_length = video_length\n\n self.embedding_c1 = embedding_f(self.n_class,16)\n #self.embedding_m1 = embedding_f(int(torch.max(self.z_motion).item()),16)\n\n self.embedding_c2 = embedding_f(self.n_class,64)\n #self.embedding_m2 = embedding_f(int(torch.max(self.z_motion).item()),64)\n\n self.embedding_c3 = embedding_f(self.n_class,256)\n #self.embedding_m3 = embedding_f(int(torch.max(self.z_motion).item()),256)\n\n self.embedding_c4 = embedding_f(self.n_class,1024)\n #self.embedding_m4 = embedding_f(int(torch.max(self.z_motion).item()),1024)\n \n # input 3x64x64 \n self.conv_down1 = nn.utils.spectral_norm(nn.Conv2d(3, 16, 4, stride =2,padding=1)) # 32x32x16 if input1024 ->Output = 512x512x16/conv층 더 쌓기\n self.conv_down_acf1 = nn.LeakyReLU(inplace=True)\n\n self.conv_down2 = nn.utils.spectral_norm(nn.Conv2d(16, 32, 4, stride =2,padding=1)) # 16x16x32 \n self.conv_down_acf2 = nn.LeakyReLU(inplace=True)\n\n self.conv_down3 = nn.utils.spectral_norm(nn.Conv2d(32, 64, 4, stride =2,padding=1)) # 8x8x64\n self.conv_down_acf3 = nn.LeakyReLU(inplace=True)\n\n self.conv_down4 = nn.utils.spectral_norm(nn.Conv2d(64, 128, 4, stride =2,padding=1)) #4x4x128\n self.conv_down_acf4 = nn.LeakyReLU(inplace=True) \n \n self.flatten = nn.Flatten() \n self.linear = nn.Linear(2048,200) #image embedding dim = 200\n #여기까지 이미지 임베딩을 위한 인코더, 밑에부터 디코더\n\n self.conv_up4 = nn.utils.spectral_norm(nn.ConvTranspose2d(316, 128, 4, 1, padding=0)) #output feature map W,H = 4\n self.conv_up_acf4 = nn.LeakyReLU(inplace=True)\n\n self.conv_up3 = nn.utils.spectral_norm(nn.ConvTranspose2d(259, 64, 4, 2, padding=1)) #output feature map W,H = 8\n self.conv_up_acf3 = nn.LeakyReLU(inplace=True)\n\n self.conv_up2 = nn.utils.spectral_norm(nn.ConvTranspose2d(131, 32, 4, 2, padding=1)) #output feature map W,H = 16\n self.conv_up_acf2 = nn.LeakyReLU(inplace=True)\n \n self.conv_up1 = nn.utils.spectral_norm(nn.ConvTranspose2d(67, 16, 4, 2, padding=1))#output feature map W,H = 32\n self.conv_up_acf1 = nn.LeakyReLU(inplace=True)\n\n self.conv_last = nn.ConvTranspose2d(35, self.n_channels, 4, 2, padding=1) #output feature map W,H = 64\n \n \n def forward(self,image,z_category):\n conv1 = self.conv_down1(image)\n conv1 = self.conv_down_acf1(conv1)\n\n conv2 = self.conv_down2(conv1)\n conv2 = self.conv_down_acf2(conv2)\n \n conv3 = self.conv_down3(conv2)\n conv3 = self.conv_down_acf3(conv3)\n \n conv4 = self.conv_down4(conv3)\n conv4 = self.conv_down_acf4(conv4)\n\n x = self.flatten(conv4)\n x = self.linear(x)\n\n if torch.cuda.is_available():\n z_category = z_category.cuda()\n self.z_motion = self.z_motion.cuda()\n x = x.cuda()\n self.z_noise = self.z_noise.cuda()\n\n x = x.repeat(self.video_length,1)\n z_noise_1 = self.z_noise.repeat(self.video_length,1)\n\n p = torch.cat([z_category, self.z_motion, x, z_noise_1], dim=1)\n # x : 200, noise : 10, z_motion: 13, categ_embedding : 3\n p = p.view(p.size(0),p.size(1),1,1)#[b,316,1,,]\n\n u_conv4 = self.conv_up4(p)\n x = self.conv_up_acf4(u_conv4)#c=128\n conv4 = conv4.repeat(self.video_length,1,1,1)\n categ_embedding_1 = self.embedding_c1(z_category)\n categ_embedding_1 = categ_embedding_1.reshape(categ_embedding_1.size(0),categ_embedding_1.size(1),x.size(2),x.size(3))\n x = torch.cat([x, conv4, categ_embedding_1], dim=1) \n \n x = self.conv_up_acf3(u_conv3)\n conv3 = conv3.repeat(self.video_length,1,1,1)\n categ_embedding_2 = self.embedding_c2(z_category)\n categ_embedding_2 = categ_embedding_2.reshape(categ_embedding_2.size(0),categ_embedding_2.size(1),x.size(2),x.size(3))\n x = torch.cat([x, conv3, categ_embedding_2 ], dim=1)\n \n u_conv2 = self.conv_up2(x)\n x = self.conv_up_acf2(u_conv2)\n conv2 = conv2.repeat(self.video_length,1,1,1)\n categ_embedding_3 = self.embedding_c3(z_category)\n categ_embedding_3 = categ_embedding_3.reshape(categ_embedding_3.size(0),categ_embedding_3.size(1),x.size(2),x.size(3))\n x = torch.cat([x, conv2, categ_embedding_3 ], dim=1)\n\n u_conv1 = self.conv_up1(x)\n x = self.conv_up_acf1(u_conv1)\n conv1 = conv1.repeat(self.video_length,1,1,1)\n \n categ_embedding_4 = self.embedding_c4(z_category)\n categ_embedding_4 = categ_embedding_4.reshape(categ_embedding_4.size(0),categ_embedding_4.size(1),x.size(2),x.size(3))\n x = torch.cat([x, conv1, categ_embedding_4], dim=1)\n\n out = self.conv_last(x)\n\n return out\n\n\nclass VideoGenerator(nn.Module):\n def __init__(self, n_class,n_channels, dim_z_category, dim_z_motion,\n video_length, ngf=64):\n super(VideoGenerator, self).__init__()\n self.n_class = n_class\n\n self.n_channels = n_channels\n self.dim_z_category = dim_z_category\n self.dim_z_motion = dim_z_motion\n self.video_length = video_length\n\n dim_z = dim_z_motion + dim_z_category\n\n self.recurrent = nn.GRUCell(dim_z_motion, dim_z_motion)\n\n def sample_z_m(self, num_samples, video_len=None): #GRU통과한 motion vector 만들기\n video_len = video_len if video_len is not None else self.video_length\n\n h_t = [self.get_gru_initial_state(num_samples)]\n\n for frame_num in range(video_len):\n e_t = self.get_iteration_noise(num_samples)\n h_t.append(self.recurrent(e_t, h_t[-1]))\n\n z_m_t = [h_k.view(-1, 1, self.dim_z_motion) for h_k in h_t]\n z_m = torch.cat(z_m_t[1:], dim=1).view(-1, self.dim_z_motion)\n\n return z_m\n\n def sample_z_categ(self, num_samples, video_len): # category one-hot vector, z_category_labels(categorical classification loss에 사용) 만들기\n video_len = video_len if video_len is not None else self.video_length\n\n if self.dim_z_category <= 0:\n return None, np.zeros(num_samples)\n\n classes_to_generate = np.random.randint(self.dim_z_category, size=num_samples)\n one_hot = np.zeros((num_samples, self.dim_z_category), dtype=np.float32)\n one_hot[np.arange(num_samples), classes_to_generate] = 1\n one_hot_video = np.repeat(one_hot, video_len, axis=0)\n\n one_hot_video = torch.from_numpy(one_hot_video)\n\n if torch.cuda.is_available():\n one_hot_video = one_hot_video.cuda()\n\n return Variable(one_hot_video), torch.from_numpy(classes_to_generate)\n\n\n def sample_z_video(self, num_samples, video_len=None): \n # motion(z)만들기, motion(z:생성에 사용)와 one hot category(z_category:생성에 사용) z_category_labels(categorical classification loss에 사용) 출력 \n z_category, z_category_labels = self.sample_z_categ(num_samples, video_len)\n z_motion = self.sample_z_m(num_samples, video_len)\n\n if z_category is not None:\n z = torch.cat([z_category, z_motion], dim=1)\n else:\n z = z_motion\n return z, z_category, z_category_labels\n\n def sample_videos(self, image, num_samples, target_class=None, video_len=None): # main network(Unet)으로 video 만들기\n video_len = video_len if video_len is not None else self.video_length\n self.video_length\n z, z_category, z_category_labels = self.sample_z_video(num_samples, video_len)\n if target_class is not None : #inference\n print(\"inference\")\n z_category = target_class\n\n\n main = UNet(self.n_class, self.n_channels, z, num_samples,video_len)\n if torch.cuda.is_available():\n main.cuda()\n h = main(image,z_category)\n h = h.view(int(h.size(0) / video_len), int(video_len), self.n_channels, h.size(3), h.size(3))\n h = h.permute(0, 2, 1, 3, 4)\n return h, Variable(z_category_labels, requires_grad=False)\n \n def sample_images(self, image, num_samples, video_len=None):\n video_len = video_len if video_len is not None else self.video_length\n\n z, z_category, z_category_labels = self.sample_z_video(num_samples, video_len)\n\n h, _ = self.sample_videos(image, num_samples, None, video_len)\n h_result =[]\n for i in range(num_samples):\n j = np.random.randint(video_len)\n img_frame = h[i,:,j,:,:]\n h_result.append(img_frame)\n h_result = torch.stack(h_result)\n\n return h_result, None\n\n\n def get_gru_initial_state(self, num_samples): #z_motion만드는 recurrent(GRU cell) network input\n return Variable(T.FloatTensor(num_samples, self.dim_z_motion).normal_())\n\n def get_iteration_noise(self, num_samples): #z_motion만드는 recurrent(GRU cell) network input\n return Variable(T.FloatTensor(num_samples, self.dim_z_motion).normal_())\n"
] |
[
[
"torch.nn.Linear",
"torch.cat",
"torch.stack",
"torch.nn.LeakyReLU",
"torch.nn.BatchNorm2d",
"torch.cuda.is_available",
"torch.nn.BatchNorm3d",
"torch.nn.modules.utils._triple",
"numpy.random.normal",
"torch.autograd.Variable",
"torch.nn.ConvTranspose2d",
"numpy.random.randint",
"torch.nn.Conv3d",
"numpy.arange",
"torch.nn.Flatten",
"numpy.zeros",
"torch.nn.Conv2d",
"torch.nn.GRUCell",
"torch.from_numpy",
"numpy.repeat",
"torch.nn.Embedding"
]
] |
ngocson2vn/data-complexity
|
[
"6c7b9e3e7cdbea4d5c2f1af894dc45c59c54db3c"
] |
[
"examples/creditcard_C12.py"
] |
[
"import pandas as pd\nfrom dcm import dcm\nimport datasets\n\ndataset_file = 'examples/datasets/creditcard.csv'\ndatasets.download_creditcard(dataset_file)\n\nprint(\"Loading dataset ... \", end=\"\", flush=True)\ndataset = pd.read_csv(dataset_file)\nX = dataset.drop(columns=['Class']).values\ny = dataset['Class'].values\nprint(\"DONE\")\n\nprint(\"Calculating C1 and C2 ... \", end=\"\", flush=True)\nC1, C2 = dcm.C12(X, y)\nprint(\"DONE\")\nprint(\"C1 = {}\".format(C1))\nprint(\"C2 = {}\".format(C2))\n"
] |
[
[
"pandas.read_csv"
]
] |
akhildraju/lambdata-akhildraju
|
[
"b490068460c3e379725f0e117fa323863f880c10"
] |
[
"akhilutils/dsutils.py"
] |
[
"import pandas as pd\nfrom sklearn.model_selection import train_test_split\n\n\nclass DatasetUtil:\n \"\"\" This class provides untilities for splitting your dataset \n into train, test and validate sets \"\"\"\n\n dataframe = None\n\n def __init__(self, df):\n self.dataframe = df\n\n def train_test_validate_split(self, test_size=0.25, random_state=42):\n\n if test_size > 0.25:\n test_size = 0.25\n\n totalrows = len(df)\n test_rows = totalrows * test_size\n remaining_rows = totalrows - test_rows\n val_size = test_rows/remaining_rows\n\n train, test = train_test_split(self.dataframe, test_size=test_size,\n random_state=random_state)\n train, validate = train_test_split(train,\n test_size=val_size,\n random_state=random_state)\n\n return train, test, validate\n\nif __name__ == \"__main__\":\n\n df = pd.read_csv(\"http://archive.ics.uci.edu/ml/machine-learning-databases/autos/imports-85.data\")\n du = DatasetUtil(df)\n\n tn, tt, v = du.train_test_validate_split(test_size=0.25,\n random_state=42)\n\n print('Test Size=', len(tt))\n print('Validate Size=', len(v))\n print('Train Size=', len(tn))\n"
] |
[
[
"sklearn.model_selection.train_test_split",
"pandas.read_csv"
]
] |
AndreySheka/catalyst
|
[
"28dd7696fb4a1331c85dbeecc147bed6b46b411c"
] |
[
"catalyst/contrib/models/cv/segmentation/blocks/unet.py"
] |
[
"import torch\nfrom torch import nn\nfrom torch.nn import functional as F\n\nfrom ..abn import ABN\nfrom .core import _get_block, _upsample, DecoderBlock, EncoderBlock\n\n\nclass EncoderDownsampleBlock(EncoderBlock):\n def __init__(\n self,\n in_channels: int,\n out_channels: int,\n in_strides: int = None,\n abn_block: nn.Module = ABN,\n activation: str = \"ReLU\",\n first_stride: int = 2,\n second_stride: int = 1,\n **kwargs\n ):\n super().__init__(in_channels, out_channels, in_strides)\n self._out_strides = (\n in_strides * first_stride * second_stride\n if in_strides is not None\n else None\n )\n self._block = _get_block(\n in_channels=in_channels,\n out_channels=out_channels,\n abn_block=abn_block,\n activation=activation,\n first_stride=first_stride,\n second_stride=second_stride,\n **kwargs\n )\n\n @property\n def out_strides(self) -> int:\n return self._out_strides\n\n @property\n def block(self):\n return self._block\n\n\nclass EncoderUpsampleBlock(EncoderBlock):\n def __init__(\n self,\n in_channels: int,\n out_channels: int,\n in_strides: int = None,\n abn_block: nn.Module = ABN,\n activation: str = \"ReLU\",\n first_stride: int = 1,\n second_stride: int = 1,\n pool_first: bool = False,\n upsample_scale: int = 2,\n interpolation_mode: str = \"nearest\",\n align_corners: bool = None,\n **kwargs\n ):\n super().__init__(in_channels, out_channels, in_strides)\n if in_strides is None:\n self._out_strides = None\n elif pool_first:\n self._out_strides = (\n in_strides * first_stride * second_stride * 2 // upsample_scale\n )\n else:\n self._out_strides = (\n in_strides * first_stride * second_stride // upsample_scale\n )\n self.pool_first = pool_first\n self.upsample_scale = upsample_scale\n self.interpolation_mode = interpolation_mode\n self.align_corners = align_corners\n self._block = _get_block(\n in_channels=in_channels,\n out_channels=out_channels,\n abn_block=abn_block,\n activation=activation,\n first_stride=first_stride,\n second_stride=second_stride,\n **kwargs\n )\n\n @property\n def out_strides(self) -> int:\n return self._out_strides\n\n @property\n def block(self):\n return self._block\n\n def forward(self, x: torch.Tensor) -> torch.Tensor:\n if self.pool_first:\n x = F.max_pool2d(\n x, kernel_size=self.upsample_scale, stride=self.upsample_scale\n )\n x = F.interpolate(\n x,\n scale_factor=self.upsample_scale,\n mode=self.interpolation_mode,\n align_corners=self.align_corners,\n )\n return self.block(x)\n\n\nclass DecoderConcatBlock(DecoderBlock):\n def __init__(\n self,\n in_channels: int,\n enc_channels: int,\n out_channels: int,\n in_strides: int = None,\n abn_block: nn.Module = ABN,\n activation: str = \"ReLU\",\n pre_dropout_rate: float = 0.0,\n post_dropout_rate: float = 0.0,\n upsample_scale: int = None,\n interpolation_mode: str = \"bilinear\",\n align_corners: bool = True,\n aggregate_first: bool = False,\n **kwargs\n ):\n\n self.upsample_scale = upsample_scale\n self.interpolation_mode = interpolation_mode\n self.align_corners = align_corners\n self.aggregate_first = aggregate_first\n\n super().__init__(\n in_channels,\n enc_channels,\n out_channels,\n in_strides,\n abn_block=abn_block,\n activation=activation,\n pre_dropout_rate=pre_dropout_rate,\n post_dropout_rate=post_dropout_rate,\n **kwargs\n )\n\n def _get_block(\n self,\n abn_block: nn.Module = ABN,\n activation: str = \"ReLU\",\n pre_dropout_rate: float = 0.0,\n post_dropout_rate: float = 0.0,\n **kwargs\n ):\n layers = []\n if pre_dropout_rate > 0:\n layers.append(nn.Dropout2d(pre_dropout_rate, inplace=True))\n layers.append(\n _get_block(\n in_channels=self.in_channels + self.enc_channels,\n out_channels=self.out_channels,\n abn_block=abn_block,\n activation=activation,\n first_stride=1,\n second_stride=1,\n **kwargs\n )\n )\n if post_dropout_rate > 0:\n layers.append(nn.Dropout2d(pre_dropout_rate, inplace=True))\n\n block = nn.Sequential(*layers)\n return block\n\n def forward(self, bottom: torch.Tensor, left: torch.Tensor) -> torch.Tensor:\n\n if self.aggregate_first:\n x = torch.cat([bottom, left], 1)\n x = _upsample(\n x,\n scale=self.upsample_scale,\n interpolation_mode=self.interpolation_mode,\n align_corners=self.align_corners,\n )\n else:\n x = _upsample(\n bottom,\n scale=self.upsample_scale,\n size=left.shape[2:],\n interpolation_mode=self.interpolation_mode,\n align_corners=self.align_corners,\n )\n x = torch.cat([x, left], 1)\n\n return self.block(x)\n\n\nclass DecoderSumBlock(DecoderConcatBlock):\n def __init__(self, enc_channels: int, **kwargs):\n super().__init__(enc_channels=0, **kwargs)\n\n def forward(self, bottom: torch.Tensor, left: torch.Tensor) -> torch.Tensor:\n\n if self.aggregate_first:\n x = bottom + left\n x = _upsample(\n x,\n scale=self.upsample_scale,\n interpolation_mode=self.interpolation_mode,\n align_corners=self.align_corners,\n )\n x = self.block(x)\n else:\n x = _upsample(\n bottom,\n scale=self.upsample_scale,\n size=left.shape[2:],\n interpolation_mode=self.interpolation_mode,\n align_corners=self.align_corners,\n )\n x = self.block(x)\n x = x + left\n\n return x\n"
] |
[
[
"torch.cat",
"torch.nn.Sequential",
"torch.nn.functional.interpolate",
"torch.nn.functional.max_pool2d",
"torch.nn.Dropout2d"
]
] |
cclauss/mindsdb
|
[
"d59b9273dcfaf7f137dd38ab6c6e544b3114669f"
] |
[
"mindsdb/libs/phases/model_predictor/model_predictor.py"
] |
[
"\"\"\"\n*******************************************************\n * Copyright (C) 2017 MindsDB Inc. <[email protected]>\n *\n * This file is part of MindsDB Server.\n *\n * MindsDB Server can not be copied and/or distributed without the express\n * permission of MindsDB Inc\n *******************************************************\n\"\"\"\n\nimport mindsdb.config as CONFIG\nfrom mindsdb.libs.constants.mindsdb import *\nfrom mindsdb.libs.phases.base_module import BaseModule\nfrom mindsdb.libs.workers.predict import PredictWorker\nimport numpy as np\nimport time\nimport random\nclass ModelPredictor(BaseModule):\n\n phase_name = PHASE_PREDICTION\n\n def run(self):\n\n model_name = self.transaction.persistent_model_metadata.model_name\n self.train_start_time = time.time()\n self.session.logging.info('Predict: model {model_name}, epoch 0'.format(model_name=model_name))\n\n self.last_time = time.time()\n\n # We moved everything to a worker so we can run many of these in parallel\n # Todo: use Ray https://github.com/ray-project/tutorial\n\n ret_diffs = PredictWorker.start(model_name=model_name, data=self.transaction.model_data)\n\n\n confusion_matrices = self.transaction.persistent_ml_model_info.confussion_matrices\n\n self.transaction.output_data.columns = self.transaction.input_data.columns\n # TODO: This may be inneficient, try to work on same pointer\n self.transaction.output_data.data_array = self.transaction.input_data.data_array\n self.transaction.output_data.predicted_columns=self.transaction.metadata.model_predict_columns\n for diff in ret_diffs:\n for col in diff['ret_dict']:\n confusion_matrix = confusion_matrices[col]\n col_index = self.transaction.input_data.columns.index(col)\n self.transaction.output_data.columns.insert(col_index+1,KEY_CONFIDENCE)\n offset = diff['start_pointer']\n group_pointer = diff['group_pointer']\n column_pointer = diff['column_pointer']\n for j, cell in enumerate(diff['ret_dict'][col]):\n #TODO: This may be calculated just as j+offset\n if not cell:\n continue\n actual_row = self.transaction.model_data.predict_set_map[group_pointer][j+offset]\n if not self.transaction.output_data.data_array[actual_row][col_index] or self.transaction.output_data.data_array[actual_row][col_index] == '':\n\n if self.transaction.persistent_model_metadata.column_stats[col][KEYS.DATA_TYPE] == DATA_TYPES.NUMERIC:\n target_val = np.format_float_positional(cell, precision=2)\n else:\n target_val = cell\n self.transaction.output_data.data_array[actual_row][col_index] = target_val\n confidence = self.getConfidence(cell,confusion_matrix)\n #randConfidence = random.uniform(0.85, 0.93)\n\n self.transaction.output_data.data_array[actual_row].insert(col_index + 1, confidence)\n else:\n self.transaction.output_data.data_array[actual_row].insert(col_index+1,1.0)\n\n\n total_time = time.time() - self.train_start_time\n self.session.logging.info(\n 'Predict: model {model_name} [OK], TOTAL TIME: {total_time:.2f} seconds'.format(model_name=model_name,\n total_time=total_time))\n\n pass\n\n def getConfidence(self,value,confusion_matrix):\n labels = confusion_matrix['labels']\n index = 0\n for i,label in enumerate(labels):\n if value < label:\n index = i\n break\n\n transposed = np.transpose(confusion_matrix['real_x_predicted'])\n confidence = transposed[index][index]\n if confidence >=1:\n confidence = 0.99\n return \"{0:.2f}\".format(confidence)\n\ndef test():\n\n from mindsdb.libs.controllers.mindsdb_controller import MindsDBController as MindsDB\n import logging\n\n mdb = MindsDB()\n ret = mdb.predict(predict='position', when={'max_time_rec': 700}, model_name='mdsb_model')\n logging.info(ret)\n\n\n# only run the test if this file is called from debugger\nif __name__ == \"__main__\":\n test()\n\n"
] |
[
[
"numpy.transpose",
"numpy.format_float_positional"
]
] |
parasdahal/tinyml
|
[
"cf2fcc021ae65df19d420e3142e4a38d20ca87e0"
] |
[
"src/MultiLayerPerceptron.py"
] |
[
"import numpy as np\nimport random\nimport math\nimport datetime\n\nclass CrossEntropyCost:\n \"\"\"\n Cross Entropy class with cost function and error \n \"\"\"\n\n @staticmethod\n def fn(a,y):\n return np.sum( np.nan_to_num( -y * np.log(a) - (1-y) * np.log(1-a) ) )\n \n @staticmethod\n def delta(a,y):\n return (a-y)\n\nclass MultiLayerPerceptron:\n \"\"\"\n A fully connected neural network with stochastic gradient descent and \n various diagnostic visualizations.\n \"\"\"\n\n def __init__(self, sizes, cost=CrossEntropyCost):\n \"\"\"Initializes the network parameters\n\n Parameters\n ----------\n sizes : List\n A list with number of neurons per each layer\n cost : object\n Cost object to use for cost calculation\n \"\"\"\n self.num_layers = len(sizes)\n self.sizes = sizes\n self.initialize_weights()\n self.cost = cost\n \n def initialize_weights(self):\n \"\"\"Initializing weights as Gaussian random variables with mean\n 0 and standard deviation 1/sqrt(n) where n is the number\n of weights connecting to the same neuron.\n\n \"\"\"\n self.biases = [np.random.randn(y,1) for y in self.sizes[1:]]\n self.weights = [np.random.randn(y,x)/np.sqrt(x) for x,y in zip(self.sizes[:-1],self.sizes[1:])]\n\n def feed_forward(self,a):\n \"\"\"Carry out a forward pass through the network and return\n the activation value of the last layer\n \n \"\"\"\n \n for b,w in zip(self.biases,self.weights):\n a = self.sigmoid(np.dot(w,a)+b)\n return a\n \n def backprop(self,x,y):\n \"\"\"Perform backward pass using backpropagation on a single\n item of dataset and return the weights and biases \n \n \"\"\"\n # biases and weights calculated by backprop\n b = [np.zeros(bias.shape) for bias in self.biases]\n w = [np.zeros(weight.shape) for weight in self.weights]\n \n # forward pass\n activation = x\n activations = [x]\n zs = []\n for bias,weight in zip(self.biases,self.weights):\n z = np.dot(weight, activation) + bias\n zs.append(z)\n activation = self.sigmoid(z)\n activations.append(activation)\n # output error\n delta = (self.cost).delta(activations[-1],y)\n b[-1] = delta\n w[-1] = np.dot(delta,activations[-2].transpose())\n\n # backpropagate\n for l in xrange(2,self.num_layers):\n z = zs[-l]\n sp = self.sigmoid_prime(z)\n delta = np.dot(self.weights[-l+1].transpose(),delta) * sp\n # store the derrivative terms in the bias and weight list\n b[-l] = delta\n w[-l] = np.dot(delta,activations[-l-1].transpose())\n \n return (b,w)\n \n def gd_mini_batch(self,mini_batch,alpha,lmbda,n):\n \"\"\"Update the weights and biases of the netwrok by applying\n gradient descent on each mini batch. Mini batch is a list\n of tuple (x,y)\n\n \"\"\"\n biases = [np.zeros(b.shape) for b in self.biases]\n weights = [np.zeros(w.shape) for w in self.weights]\n \n for x, y in mini_batch:\n # get derrivative terms using backprop\n delta_b, delta_w = self.backprop(x,y)\n # accumulate the weights and biases\n biases = [nb + db for nb, db in zip(biases,delta_b)]\n weights = [nw + dw for nw, dw in zip(weights,delta_w)]\n \n # update network using gradient descent update rule\n self.biases = [b - (alpha/len(mini_batch))*nb \n for b, nb in zip(self.biases, biases)]\n self.weights = [(1 - (alpha*lmbda/n))*w - (alpha/len(mini_batch))*nw\n for w,nw in zip(self.weights, weights)]\n \n def SGD(self,training_data,epochs,mini_batch_size,alpha,lmbda,evaluation_data):\n \"\"\"Train the network using mini-batch stochastic gradient descent\n\n Parameters\n ----------\n training_data : ndarray\n Numpy array of training data\n epochs : int\n Number of epochs to train the network\n mini_batch_size : int\n The size of each mini batch to use for SGD\n alpha : float\n Learning Rate \n lmbda : float\n Regularization parameter\n evaluation_data : ndarray\n Validation or test dataset similar to training_data\n\n \"\"\"\n n = len(training_data)\n n_data = len(evaluation_data)\n\n evaluation_cost = []\n evaluation_accuracy = []\n training_cost = []\n training_accuracy = []\n for i in xrange(epochs):\n random.shuffle(training_data)\n mini_batches = [training_data[k:k+mini_batch_size]\n for k in xrange(0,n,mini_batch_size)]\n for mini_batch in mini_batches:\n self.gd_mini_batch(mini_batch,alpha,lmbda,n)\n print(\"Epoch \"+ str(i) +\" training complete\")\n # training cost and accuracy\n cost = self.total_cost(training_data,lmbda)\n training_cost.append(cost)\n print(\"Cost on training data: \"+str(cost))\n accuracy = self.accuracy(training_data)\n training_accuracy.append(accuracy)\n print(\"Accuracy on training data: \"+str(accuracy)+\"/\"+str(n))\n # evaluation cost and accuracy\n cost = self.total_cost(evaluation_data,lmbda)\n print(\"Cost on evaluation data: \"+str(cost))\n evaluation_cost.append(cost)\n accuracy = self.accuracy(evaluation_data)\n evaluation_accuracy.append(accuracy)\n print(\"Accuracy on evaluation data: \"+str(accuracy)+\"/\"+str(n_data))\n \n return evaluation_cost,evaluation_accuracy,training_cost,training_accuracy\n\n def accuracy(self,data):\n \"\"\"Returns the number of input in data for which neural network \n outputs the correct result.\n \"\"\"\n results = [(np.argmax(self.feed_forward(x)),np.argmax(y)) for(x, y) in data]\n return sum( int(x == y) for(x,y) in results)\n\n def total_cost(self,data,lmbda):\n \"\"\"Return the total cost of the network for dataset\n \"\"\"\n cost = 0.0\n for x, y in data:\n a = self.feed_forward(x)\n cost += self.cost.fn(a,y)/len(data)\n # add regularization\n cost += 0.5*(lmbda/len(data))*sum( np.linalg.norm(w)**2 for w in self.weights )\n return cost\n\n def one_hot_encoded_result(self,j):\n \"\"\"Convert output value into one hot encoded output vector\n \"\"\"\n vec = np.zeros((self.sizes[-1],1))\n vec[j] = 1.0\n return vec\n \n def sigmoid(self,z):\n return 1.0/(1.0+np.exp(-z))\n \n def sigmoid_prime(self,z):\n return self.sigmoid(z)*(1-self.sigmoid(z))\n\n def plot(self,evaluation_cost,evaluation_accuracy,training_cost,training_accuracy):\n \"\"\"Visualize the cost and accuracy on training and evaluation data\n\n Parameters\n ----------\n evaluation_cost : list\n List of cost on evaluation data for each epoch\n evaluation_accuracy : list\n List of accuracy on evaluation data for each epoch\n training_cost : list\n List of cost on training data for each epoch\n training_accuracy : list\n List of accuracy on training data for each epoch\n \n \"\"\"\n import matplotlib.pyplot as plt\n from matplotlib.ticker import MaxNLocator\n\n train_cost,eval_cost = [],[]\n train_acc,eval_acc = [],[]\n for i,cost in enumerate(training_cost):\n train_cost.append((cost,i))\n for i,cost in enumerate(evaluation_cost):\n eval_cost.append((cost,i))\n for i,acc in enumerate(training_accuracy):\n train_acc.append((acc,i))\n for i,acc in enumerate(evaluation_accuracy):\n eval_acc.append((acc,i))\n \n np_train_cost = np.asarray(train_cost)\n np_eval_cost = np.asarray(eval_cost)\n np_train_acc = np.asarray(train_acc)\n np_eval_acc = np.asarray(eval_acc)\n\n plt.subplot(221)\n plt.plot(np_train_cost[:,1],np_train_cost[:,0],linewidth=2)\n ax = plt.gca()\n ax.xaxis.set_major_locator(MaxNLocator(integer=True))\n plt.title(\"Cost on training data\")\n plt.xlabel(\"No of epochs\")\n plt.ylabel(\"Cost\")\n plt.subplot(222)\n plt.plot(np_eval_cost[:,1],np_eval_cost[:,0],linewidth=2)\n ax = plt.gca()\n ax.xaxis.set_major_locator(MaxNLocator(integer=True))\n plt.title(\"Cost on evaluation data\")\n plt.xlabel(\"No of epochs\")\n plt.ylabel(\"Cost\")\n plt.subplot(223)\n plt.plot(np_train_acc[:,1],np_train_acc[:,0],linewidth=2)\n plt.title(\"Accuracy on training data\")\n ax = plt.gca()\n ax.xaxis.set_major_locator(MaxNLocator(integer=True))\n ax.set_ylim([80,100])\n plt.xlabel(\"No of epochs\")\n plt.ylabel(\"Accuracy\")\n plt.subplot(224)\n plt.plot(np_eval_acc[:,1],np_eval_acc[:,0],linewidth=2)\n plt.title(\"Accuracy on evaluation data\")\n ax = plt.gca()\n ax.xaxis.set_major_locator(MaxNLocator(integer=True))\n ax.set_ylim([80,100])\n plt.xlabel(\"No of epochs\")\n plt.ylabel(\"Accuracy\")\n plt.tight_layout()\n plt.show()"
] |
[
[
"numpy.dot",
"numpy.linalg.norm",
"numpy.asarray",
"numpy.zeros",
"matplotlib.ticker.MaxNLocator",
"numpy.log",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.plot",
"matplotlib.pyplot.title",
"numpy.random.randn",
"numpy.exp",
"numpy.argmax",
"matplotlib.pyplot.tight_layout",
"matplotlib.pyplot.ylabel",
"numpy.sqrt",
"matplotlib.pyplot.show",
"matplotlib.pyplot.gca",
"matplotlib.pyplot.subplot"
]
] |
AllenCellModeling/CVAE_testbed
|
[
"bc9be2cd9725b9cddfca69b450dd4a2b93da6858",
"bc9be2cd9725b9cddfca69b450dd4a2b93da6858"
] |
[
"CVAE_testbed/datasets/synthetic_projected.py",
"CVAE_testbed/datasets/dataloader.py"
] |
[
"import torch\nfrom torch.utils.data import DataLoader, Dataset\nfrom torch.distributions import MultivariateNormal\nimport numpy as np\nimport sklearn\nfrom sklearn import preprocessing\nfrom scipy import stats\n\n\nclass ProjectedSyntheticDataset(Dataset):\n def __init__(\n self,\n num_batches,\n BATCH_SIZE,\n model_kwargs,\n shuffle=True,\n corr=False,\n train=True,\n P=None,\n mask=False\n ):\n \"\"\"\n Args: \n num_batches: Number of batches of synthetic data\n BATCH_SIZE: batchsize of synthetic data\n model_kwargs: dictionary containing \"x_dim\" which indicates input data size\n shuffle: True sets condition vector in input data to 0 for all possible permutations\n corr: True sets dependent input dimensions via a correlation matrix \n \"\"\"\n self.num_batches = num_batches\n self.BATCH_SIZE = BATCH_SIZE\n self.corr = corr\n self.shuffle = shuffle\n self.model_kwargs = model_kwargs\n self.train = train\n\n if train is True:\n self._P = self.generate_projection_matrix()\n else:\n self._P = P\n\n Batches_X, Batches_C, Batches_conds = torch.empty([0]), torch.empty([0]), torch.empty([0])\n\n for j, i in enumerate(range(self.num_batches)):\n if self.corr is False:\n m = MultivariateNormal(torch.zeros(self.model_kwargs['x_dim']), torch.eye(self.model_kwargs['x_dim']))\n else:\n if j == 0:\n corr_matrix = self.random_corr_mat(D=self.model_kwargs['x_dim'])\n corr_matrix = torch.from_numpy(corr_matrix)\n m = MultivariateNormal(torch.zeros(self.model_kwargs['x_dim']).float(), corr_matrix.float())\n\n X = m.sample((self.BATCH_SIZE,))\n X = torch.cat([X, torch.zeros((self.BATCH_SIZE, self.model_kwargs['projection_dim'] - self.model_kwargs['x_dim']))], 1)\n X = X.t()\n X = torch.mm(self._P, X).cuda()\n\n # std_scaler = preprocessing.StandardScaler()\n # X = torch.from_numpy(std_scaler.fit_transform(X.cpu().numpy())).cuda()\n # X = torch.from_numpy(sklearn.preprocessing.normalize(X.cpu().numpy(), axis=0)).cuda()\n X = X.t()\n\n # TRY ZSCOREING\n # X = torch.from_numpy(stats.zscore(X.cpu().numpy())).cuda()\n\n C = X.clone().cuda()\n count = 0\n if self.shuffle is True:\n while count == 0:\n C_mask = torch.zeros(C.shape).bernoulli_(0.5)\n count = 1\n else:\n C_mask = torch.zeros(C.shape).bernoulli_(0)\n\n C[C_mask.byte()] = 0\n C_indicator = C_mask == 0\n\n C = torch.cat([C.float(), C_indicator.float().cuda()], 1)\n\n if mask is True:\n # This will mask 20% of the elements in X\n print('MASK IS TRUE')\n X_mask = torch.cuda.FloatTensor(X.size()[0], X.size()[1]).uniform_() > 1 - model_kwargs['mask_percentage']\n X[X_mask] = 0\n\n X = X.view([1, -1, X.size()[-1]])\n C = C.view([1, -1, C.size()[-1]])\n\n # Sum up\n conds = C[:, :, int(C.size()[-1]/2):].sum(2)\n\n Batches_X = torch.cat([Batches_X.cuda(), X], 0)\n Batches_C = torch.cat([Batches_C.cuda(), C], 0)\n Batches_conds = torch.cat([Batches_conds.cuda(), conds.cuda()], 0)\n\n self._batches_x = Batches_X\n self._batches_c = Batches_C\n self._batches_conds = Batches_conds\n\n def __len__(self):\n return len(self._batches_x)\n\n def __getitem__(self, idx):\n \"\"\"\n Returns a tuple. (X, C, sum(C[mid:end])). \n X is the input, \n C is the condition, \n sum(C[mid:end]) is the sum of the indicators in C. It tells us how many of the condition\n columns have been masked\n \"\"\"\n return self._batches_x[idx], self._batches_c[idx], self._batches_conds[idx]\n\n def get_all_items(self):\n if self.train is True:\n return self._batches_x, self._batches_c, self._batches_conds, self._P\n else:\n return self._batches_x, self._batches_c, self._batches_conds\n\n def get_projection_matrix(self):\n return self._P\n\n def get_color(self):\n return None\n\n def generate_projection_matrix(self):\n P = torch.zeros([self.model_kwargs['projection_dim'], self.model_kwargs['projection_dim']])\n col = 0\n for row in range(P.size()[0]):\n # col = torch.randint(0,self.model_kwargs['x_dim'],(1,)).item()\n # P[row][col] = torch.randn(1).item()\n P[row][col] = 1 + torch.randn(1).item()/100\n # P[row][col] = 1\n if col != self.model_kwargs['x_dim'] - 1:\n col += 1\n else:\n col = 0\n # P[row][col] = 1\n # print(P)\n return P\n\n def random_corr_mat(self, D=10, beta=1):\n \"\"\"Generate random valid correlation matrix of dimension D.\n Smaller beta gives larger off diagonal correlations (beta > 0).\"\"\"\n\n P = np.zeros([D, D])\n S = np.eye(D)\n\n for k in range(0, D - 1):\n for i in range(k + 1, D):\n P[k, i] = 2 * np.random.beta(beta, beta) - 1\n p = P[k, i]\n for l in reversed(range(k)):\n p = (\n p * np.sqrt((1 - P[l, i] ** 2) * (1 - P[l, k] ** 2)) + P[l, i] * P[l, k]\n )\n S[k, i] = S[i, k] = p\n\n p = np.random.permutation(D)\n for i in range(D):\n S[:, i] = S[p, i]\n for i in range(D):\n S[i, :] = S[i, p]\n return S\n",
"import torch\nfrom torch.utils.data import DataLoader\nfrom torchvision import datasets, transforms\nimport torchvision\nfrom torch.distributions import MultivariateNormal\nimport numpy as np\n\n\ndef load_mnist_data(BATCH_SIZE, model_kwargs):\n from torchvision import transforms\n device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')\n transforms = transforms.Compose([transforms.ToTensor()])\n\n train_dataset = datasets.MNIST(\n './datasets/mnist_data',\n train=True,\n download=True,\n transform=transforms\n )\n\n test_dataset = datasets.MNIST(\n './datasets/mnist_data',\n train=False,\n download=True,\n transform=transforms\n )\n\n train_iterator = DataLoader(train_dataset, batch_size=BATCH_SIZE, shuffle=True)\n test_iterator = DataLoader(test_dataset, batch_size=BATCH_SIZE)\n\n return train_iterator, test_iterator\n"
] |
[
[
"torch.zeros",
"numpy.zeros",
"numpy.random.permutation",
"numpy.eye",
"torch.from_numpy",
"torch.mm",
"numpy.random.beta",
"torch.eye",
"numpy.sqrt",
"torch.empty",
"torch.randn"
],
[
"torch.cuda.is_available",
"torch.utils.data.DataLoader"
]
] |
n3011/deepchem
|
[
"c316d998c462ce01032f0dae883856b400ea4765"
] |
[
"deepchem/models/tensorgraph/symmetry_functions.py"
] |
[
"#!/usr/bin/env python2\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Thu Jul 6 20:43:23 2017\n\n@author: zqwu\n\"\"\"\nfrom __future__ import division\nfrom __future__ import unicode_literals\n\nimport numpy as np\nimport tensorflow as tf\nfrom deepchem.models.tensorgraph import activations\nfrom deepchem.models.tensorgraph import initializations\nfrom deepchem.models.tensorgraph import model_ops\nfrom deepchem.models.tensorgraph.layers import Layer\nfrom deepchem.models.tensorgraph.layers import convert_to_layers\nfrom deepchem.metrics import to_one_hot\n\n\nclass DistanceMatrix(Layer):\n\n def __init__(self, max_atoms, **kwargs):\n \"\"\"\n Parameters\n ----------\n max_atoms: int\n Maximum number of atoms in the dataset\n \"\"\"\n self.max_atoms = max_atoms\n super(DistanceMatrix, self).__init__(**kwargs)\n\n def create_tensor(self, in_layers=None, set_tensors=True, **kwargs):\n \"\"\" Generate distance matrix for BPSymmetryFunction with trainable cutoff \"\"\"\n if in_layers is None:\n in_layers = self.in_layers\n in_layers = convert_to_layers(in_layers)\n\n max_atoms = self.max_atoms\n atom_coordinates = in_layers[0].out_tensor\n atom_flags = in_layers[1].out_tensor\n tensor1 = tf.tile(\n tf.expand_dims(atom_coordinates, axis=2), (1, 1, max_atoms, 1))\n tensor2 = tf.tile(\n tf.expand_dims(atom_coordinates, axis=1), (1, max_atoms, 1, 1))\n # Calculate pairwise distance\n d = tf.sqrt(tf.reduce_sum(tf.square(tensor1 - tensor2), axis=3))\n # Masking for valid atom index\n self.out_tensor = d * tf.to_float(atom_flags)\n\n\nclass DistanceCutoff(Layer):\n\n def __init__(self, max_atoms, cutoff=6 / 0.52917721092, **kwargs):\n \"\"\"\n Parameters\n ----------\n cutoff: float, optional\n cutoff threshold for distance, in Bohr(0.53Angstrom)\n \"\"\"\n self.max_atoms = max_atoms\n self.cutoff = cutoff\n super(DistanceCutoff, self).__init__(**kwargs)\n\n def build(self):\n self.Rc = tf.Variable(tf.constant(self.cutoff))\n\n def create_tensor(self, in_layers=None, set_tensors=True, **kwargs):\n \"\"\" Generate distance matrix for BPSymmetryFunction with trainable cutoff \"\"\"\n if in_layers is None:\n in_layers = self.in_layers\n in_layers = convert_to_layers(in_layers)\n\n self.build()\n d = in_layers[0].out_tensor\n d_flag = in_layers[1].out_tensor\n # Cutoff with threshold Rc\n d_flag = d_flag * tf.nn.relu(tf.sign(self.Rc - d))\n d = 0.5 * (tf.cos(np.pi * d / self.Rc) + 1)\n out_tensor = d * d_flag\n out_tensor = out_tensor * tf.expand_dims((1 - tf.eye(self.max_atoms)), 0)\n self.out_tensor = out_tensor\n\n\nclass RadialSymmetry(Layer):\n \"\"\" Radial Symmetry Function \"\"\"\n\n def __init__(self,\n max_atoms,\n Rs_init=None,\n ita_init=None,\n atomic_number_differentiated=False,\n atom_numbers=[1, 6, 7, 8],\n **kwargs):\n self.max_atoms = max_atoms\n self.atomic_number_differentiated = atomic_number_differentiated\n self.atom_number_cases = atom_numbers\n if Rs_init is None:\n self.Rs_init = np.array([0.5, 1.17, 1.83, 2.5, 3.17, 3.83, 4.5])\n self.Rs_init = self.Rs_init / 0.52917721092\n else:\n self.Rs_init = np.array(Rs_init)\n if ita_init is None:\n self.ita_init = np.array([1.12])\n else:\n self.ita_init = np.array(ita_init)\n\n super(RadialSymmetry, self).__init__(**kwargs)\n\n def build(self):\n \"\"\" Parameters for the Gaussian \"\"\"\n len_Rs = len(self.Rs_init)\n len_ita = len(self.ita_init)\n self.length = len_Rs * len_ita\n Rs_init, ita_init = np.meshgrid(self.Rs_init, self.ita_init)\n self.Rs = tf.constant(Rs_init.flatten(), dtype=tf.float32)\n self.ita = tf.constant(ita_init.flatten(), dtype=tf.float32)\n self.atom_number_embedding = tf.eye(max(self.atom_number_cases) + 1)\n\n def create_tensor(self, in_layers=None, set_tensors=True, **kwargs):\n \"\"\" Generate Radial Symmetry Function \"\"\"\n if in_layers is None:\n in_layers = self.in_layers\n in_layers = convert_to_layers(in_layers)\n\n self.build()\n d_cutoff = in_layers[0].out_tensor\n d = in_layers[1].out_tensor\n if self.atomic_number_differentiated:\n atom_numbers = in_layers[2].out_tensor\n atom_number_embedded = tf.nn.embedding_lookup(self.atom_number_embedding,\n atom_numbers)\n d_cutoff = tf.stack([d_cutoff] * self.length, axis=3)\n d = tf.stack([d] * self.length, axis=3)\n Rs = tf.reshape(self.Rs, (1, 1, 1, -1))\n ita = tf.reshape(self.ita, (1, 1, 1, -1))\n out_tensor = tf.exp(-ita * tf.square(d - Rs)) * d_cutoff\n if self.atomic_number_differentiated:\n out_tensors = []\n for atom_type in self.atom_number_cases:\n selected_atoms = tf.expand_dims(\n tf.expand_dims(atom_number_embedded[:, :, atom_type], axis=1),\n axis=3)\n out_tensors.append(tf.reduce_sum(out_tensor * selected_atoms, axis=2))\n self.out_tensor = tf.concat(out_tensors, axis=2)\n else:\n self.out_tensor = tf.reduce_sum(out_tensor, axis=2)\n\n\nclass AngularSymmetry(Layer):\n \"\"\" Angular Symmetry Function \"\"\"\n\n def __init__(self,\n max_atoms,\n lambd_init=None,\n ita_init=None,\n zeta_init=None,\n **kwargs):\n self.max_atoms = max_atoms\n if lambd_init is None:\n self.lambd_init = np.array([1., -1.])\n else:\n self.lambd_init = np.array(lambd_init)\n\n if ita_init is None:\n self.ita_init = np.array([4.])\n else:\n self.ita_init = np.array(ita_init)\n\n if zeta_init is None:\n self.zeta_init = np.array([2., 4., 8.])\n else:\n self.zeta_init = np.array(zeta_init)\n\n super(AngularSymmetry, self).__init__(**kwargs)\n\n def build(self):\n len_lambd = len(self.lambd_init)\n len_ita = len(self.ita_init)\n len_zeta = len(self.zeta_init)\n self.length = len_lambd * len_ita * len_zeta\n\n lambd_init, ita_init, zeta_init = np.meshgrid(self.lambd_init,\n self.ita_init, self.zeta_init)\n self.lambd = tf.constant(lambd_init.flatten(), dtype=tf.float32)\n self.ita = tf.constant(ita_init.flatten(), dtype=tf.float32)\n self.zeta = tf.constant(zeta_init.flatten(), dtype=tf.float32)\n\n def create_tensor(self, in_layers=None, set_tensors=True, **kwargs):\n \"\"\" Generate Angular Symmetry Function \"\"\"\n if in_layers is None:\n in_layers = self.in_layers\n in_layers = convert_to_layers(in_layers)\n\n self.build()\n max_atoms = self.max_atoms\n d_cutoff = in_layers[0].out_tensor\n d = in_layers[1].out_tensor\n atom_coordinates = in_layers[2].out_tensor\n vector_distances = tf.tile(tf.expand_dims(atom_coordinates, axis=2), (1, 1, max_atoms, 1)) - \\\n tf.tile(tf.expand_dims(atom_coordinates, axis=1), (1, max_atoms, 1, 1))\n R_ij = tf.tile(tf.expand_dims(d, axis=3), (1, 1, 1, max_atoms))\n R_ik = tf.tile(tf.expand_dims(d, axis=2), (1, 1, max_atoms, 1))\n R_jk = tf.tile(tf.expand_dims(d, axis=1), (1, max_atoms, 1, 1))\n f_R_ij = tf.tile(tf.expand_dims(d_cutoff, axis=3), (1, 1, 1, max_atoms))\n f_R_ik = tf.tile(tf.expand_dims(d_cutoff, axis=2), (1, 1, max_atoms, 1))\n f_R_jk = tf.tile(tf.expand_dims(d_cutoff, axis=1), (1, max_atoms, 1, 1))\n\n # Define angle theta = R_ij(Vector) dot R_ik(Vector)/R_ij(distance)/R_ik(distance)\n theta = tf.reduce_sum(tf.tile(tf.expand_dims(vector_distances, axis=3), (1, 1, 1, max_atoms, 1)) * \\\n tf.tile(tf.expand_dims(vector_distances, axis=2), (1, 1, max_atoms, 1, 1)), axis=4)\n\n theta = tf.div(theta, R_ij * R_ik + 1e-5)\n\n R_ij = tf.stack([R_ij] * self.length, axis=4)\n R_ik = tf.stack([R_ik] * self.length, axis=4)\n R_jk = tf.stack([R_jk] * self.length, axis=4)\n f_R_ij = tf.stack([f_R_ij] * self.length, axis=4)\n f_R_ik = tf.stack([f_R_ik] * self.length, axis=4)\n f_R_jk = tf.stack([f_R_jk] * self.length, axis=4)\n\n theta = tf.stack([theta] * self.length, axis=4)\n lambd = tf.reshape(self.lambd, (1, 1, 1, 1, -1))\n zeta = tf.reshape(self.zeta, (1, 1, 1, 1, -1))\n ita = tf.reshape(self.ita, (1, 1, 1, 1, -1))\n\n out_tensor = tf.pow(1 + lambd * tf.cos(theta), zeta) * \\\n tf.exp(-ita * (tf.square(R_ij) + tf.square(R_ik) + tf.square(R_jk))) * \\\n f_R_ij * f_R_ik * f_R_jk\n self.out_tensor = tf.reduce_sum(out_tensor, axis=[2, 3]) * \\\n tf.pow(tf.constant(2.), 1 - tf.reshape(self.zeta, (1, 1, -1)))\n\n\nclass AngularSymmetryMod(Layer):\n \"\"\" Angular Symmetry Function \"\"\"\n\n def __init__(self,\n max_atoms,\n lambd_init=None,\n ita_init=None,\n zeta_init=None,\n Rs_init=None,\n thetas_init=None,\n atomic_number_differentiated=False,\n atom_numbers=[1, 6, 7, 8],\n **kwargs):\n self.max_atoms = max_atoms\n self.atomic_number_differentiated = atomic_number_differentiated\n self.atom_number_cases = atom_numbers\n if lambd_init is None:\n self.lambd_init = np.array([1., -1.])\n else:\n self.lambd_init = np.array(lambd_init)\n\n if ita_init is None:\n self.ita_init = np.array([1.12])\n else:\n self.ita_init = np.array(ita_init)\n\n if zeta_init is None:\n self.zeta_init = np.array([4.])\n else:\n self.zeta_init = np.array(zeta_init)\n\n if Rs_init is None:\n self.Rs_init = np.array([0.5, 1.17, 1.83, 2.5, 3.17])\n self.Rs_init = self.Rs_init / 0.52917721092\n else:\n self.Rs_init = np.array(Rs_init)\n\n if thetas_init is None:\n self.thetas_init = np.array([0., 1.57, 3.14, 4.71])\n else:\n self.thetas_init = np.array(thetas_init)\n super(AngularSymmetryMod, self).__init__(**kwargs)\n\n def build(self):\n len_lambd = len(self.lambd_init)\n len_ita = len(self.ita_init)\n len_zeta = len(self.zeta_init)\n len_Rs = len(self.Rs_init)\n len_thetas = len(self.thetas_init)\n self.length = len_lambd * len_ita * len_zeta * len_Rs * len_thetas\n\n lambd_init, ita_init, zeta_init, Rs_init, thetas_init = \\\n np.meshgrid(self.lambd_init, self.ita_init, self.zeta_init, self.Rs_init, self.thetas_init)\n self.lambd = tf.constant(lambd_init.flatten(), dtype=tf.float32)\n self.ita = tf.constant(ita_init.flatten(), dtype=tf.float32)\n self.zeta = tf.constant(zeta_init.flatten(), dtype=tf.float32)\n self.Rs = tf.constant(Rs_init.flatten(), dtype=tf.float32)\n self.thetas = tf.constant(thetas_init.flatten(), dtype=tf.float32)\n self.atom_number_embedding = tf.eye(max(self.atom_number_cases) + 1)\n\n def create_tensor(self, in_layers=None, set_tensors=True, **kwargs):\n \"\"\" Generate Angular Symmetry Function \"\"\"\n if in_layers is None:\n in_layers = self.in_layers\n in_layers = convert_to_layers(in_layers)\n\n self.build()\n max_atoms = self.max_atoms\n d_cutoff = in_layers[0].out_tensor\n d = in_layers[1].out_tensor\n atom_coordinates = in_layers[2].out_tensor\n if self.atomic_number_differentiated:\n atom_numbers = in_layers[3].out_tensor\n atom_number_embedded = tf.nn.embedding_lookup(self.atom_number_embedding,\n atom_numbers)\n\n vector_distances = tf.tile(tf.expand_dims(atom_coordinates, axis=2), (1, 1, max_atoms, 1)) - \\\n tf.tile(tf.expand_dims(atom_coordinates, axis=1), (1, max_atoms, 1, 1))\n R_ij = tf.tile(tf.expand_dims(d, axis=3), (1, 1, 1, max_atoms))\n R_ik = tf.tile(tf.expand_dims(d, axis=2), (1, 1, max_atoms, 1))\n f_R_ij = tf.tile(tf.expand_dims(d_cutoff, axis=3), (1, 1, 1, max_atoms))\n f_R_ik = tf.tile(tf.expand_dims(d_cutoff, axis=2), (1, 1, max_atoms, 1))\n\n # Define angle theta = R_ij(Vector) dot R_ik(Vector)/R_ij(distance)/R_ik(distance)\n theta = tf.reduce_sum(tf.tile(tf.expand_dims(vector_distances, axis=3), (1, 1, 1, max_atoms, 1)) * \\\n tf.tile(tf.expand_dims(vector_distances, axis=2), (1, 1, max_atoms, 1, 1)), axis=4)\n\n theta = tf.div(theta, R_ij * R_ik + 1e-5)\n\n R_ij = tf.stack([R_ij] * self.length, axis=4)\n R_ik = tf.stack([R_ik] * self.length, axis=4)\n f_R_ij = tf.stack([f_R_ij] * self.length, axis=4)\n f_R_ik = tf.stack([f_R_ik] * self.length, axis=4)\n\n theta = tf.stack([theta] * self.length, axis=4)\n lambd = tf.reshape(self.lambd, (1, 1, 1, 1, -1))\n zeta = tf.reshape(self.zeta, (1, 1, 1, 1, -1))\n ita = tf.reshape(self.ita, (1, 1, 1, 1, -1))\n Rs = tf.reshape(self.Rs, (1, 1, 1, 1, -1))\n thetas = tf.reshape(self.thetas, (1, 1, 1, 1, -1))\n\n out_tensor = tf.pow(1 + lambd * tf.cos(theta - thetas), zeta) * \\\n tf.exp(-ita * tf.square((R_ij + R_ik) / 2 - Rs)) * \\\n f_R_ij * f_R_ik * tf.pow(tf.constant(2.), 1 - zeta)\n if self.atomic_number_differentiated:\n out_tensors = []\n for atom_type_j in self.atom_number_cases:\n for atom_type_k in self.atom_number_cases:\n selected_atoms = tf.stack([atom_number_embedded[:, :, atom_type_j]] * max_atoms, axis=2) * \\\n tf.stack([atom_number_embedded[:, :, atom_type_k]] * max_atoms, axis=1)\n selected_atoms = tf.expand_dims(\n tf.expand_dims(selected_atoms, axis=1), axis=4)\n out_tensors.append(\n tf.reduce_sum(out_tensor * selected_atoms, axis=[2, 3]))\n self.out_tensor = tf.concat(out_tensors, axis=2)\n else:\n self.out_tensor = tf.reduce_sum(out_tensor, axis=[2, 3])\n\n\nclass BPFeatureMerge(Layer):\n\n def __init__(self, max_atoms, **kwargs):\n self.max_atoms = max_atoms\n super(BPFeatureMerge, self).__init__(**kwargs)\n\n def create_tensor(self, in_layers=None, set_tensors=True, **kwargs):\n \"\"\" Merge features together \"\"\"\n if in_layers is None:\n in_layers = self.in_layers\n in_layers = convert_to_layers(in_layers)\n\n atom_embedding = in_layers[0].out_tensor\n radial_symmetry = in_layers[1].out_tensor\n angular_symmetry = in_layers[2].out_tensor\n atom_flags = in_layers[3].out_tensor\n\n out_tensor = tf.concat(\n [atom_embedding, radial_symmetry, angular_symmetry], axis=2)\n self.out_tensor = out_tensor * atom_flags[:, :, 0:1]\n\n\nclass BPGather(Layer):\n\n def __init__(self, max_atoms, **kwargs):\n self.max_atoms = max_atoms\n super(BPGather, self).__init__(**kwargs)\n\n def create_tensor(self, in_layers=None, set_tensors=True, **kwargs):\n \"\"\" Merge features together \"\"\"\n if in_layers is None:\n in_layers = self.in_layers\n in_layers = convert_to_layers(in_layers)\n\n out_tensor = in_layers[0].out_tensor\n flags = in_layers[1].out_tensor\n\n out_tensor = tf.reduce_sum(out_tensor * flags[:, :, 0:1], axis=1)\n self.out_tensor = out_tensor\n\n\nclass AtomicDifferentiatedDense(Layer):\n \"\"\" Separate Dense module for different atoms \"\"\"\n\n def __init__(self,\n max_atoms,\n out_channels,\n atom_number_cases=[1, 6, 7, 8],\n init='glorot_uniform',\n activation='relu',\n **kwargs):\n self.init = init # Set weight initialization\n self.activation = activation # Get activations\n self.max_atoms = max_atoms\n self.out_channels = out_channels\n self.atom_number_cases = atom_number_cases\n\n super(AtomicDifferentiatedDense, self).__init__(**kwargs)\n\n def create_tensor(self, in_layers=None, set_tensors=True, **kwargs):\n \"\"\" Generate Radial Symmetry Function \"\"\"\n init_fn = initializations.get(self.init) # Set weight initialization\n activation_fn = activations.get(self.activation)\n if in_layers is None:\n in_layers = self.in_layers\n in_layers = convert_to_layers(in_layers)\n\n inputs = in_layers[0].out_tensor\n atom_numbers = in_layers[1].out_tensor\n in_channels = inputs.get_shape().as_list()[-1]\n self.W = init_fn(\n [len(self.atom_number_cases), in_channels, self.out_channels])\n\n self.b = model_ops.zeros((len(self.atom_number_cases), self.out_channels))\n outputs = []\n for i, atom_case in enumerate(self.atom_number_cases):\n # optimization to allow for tensorcontraction/broadcasted mmul\n # using a reshape trick. Note that the np and tf matmul behavior\n # differs when dealing with broadcasts\n\n a = inputs # (i,j,k)\n b = self.W[i, :, :] # (k, l)\n\n ai = tf.shape(a)[0]\n aj = tf.shape(a)[1]\n ak = tf.shape(a)[2]\n bl = tf.shape(b)[1]\n\n output = activation_fn(\n tf.reshape(tf.matmul(tf.reshape(a, [ai * aj, ak]), b), [ai, aj, bl]) +\n self.b[i, :])\n\n mask = 1 - tf.to_float(tf.cast(atom_numbers - atom_case, tf.bool))\n output = tf.reshape(output * tf.expand_dims(mask, 2),\n (-1, self.max_atoms, self.out_channels))\n outputs.append(output)\n self.out_tensor = tf.add_n(outputs)\n\n def none_tensors(self):\n w, b, out_tensor = self.W, self.b, self.out_tensor\n self.W, self.b, self.out_tensor = None, None, None\n return w, b, out_tensor\n\n def set_tensors(self, tensor):\n self.W, self.b, self.out_tensor = tensor\n"
] |
[
[
"tensorflow.square",
"numpy.array",
"tensorflow.shape",
"tensorflow.concat",
"tensorflow.eye",
"tensorflow.expand_dims",
"tensorflow.cos",
"tensorflow.reshape",
"tensorflow.add_n",
"tensorflow.constant",
"tensorflow.sign",
"tensorflow.div",
"tensorflow.stack",
"tensorflow.reduce_sum",
"tensorflow.to_float",
"tensorflow.nn.embedding_lookup",
"numpy.meshgrid",
"tensorflow.cast"
]
] |
nmiles2718/PACMan_dist
|
[
"62f495f8d506919146a16750072a523aaafb3c53"
] |
[
"utils/analyzer.py"
] |
[
"#!/usr/bin/env python\n\nimport os\n\nfrom matplotlib.ticker import AutoMinorLocator\nimport matplotlib.pyplot as plt\nplt.style.use('ggplot')\nimport pandas as pd\n\n\nclass PACManAnalyze():\n def __init__(self):\n \"\"\" This class provides functionality for analyzing the model results\n \"\"\"\n self._cycle = None\n self._computed_accuracy = None\n self._model_results = None\n\n @property\n def model_results(self):\n \"\"\"PACMan classification results\"\"\"\n return self._model_results\n\n @model_results.setter\n def model_results(self, value):\n self._model_results = value\n\n @property\n def computed_accuracy(self):\n \"\"\"Computed accuracy of the classifier\"\"\"\n return self._computed_accuracy\n\n @computed_accuracy.setter\n def computed_accuracy(self, value):\n self._computed_accuracy = value\n\n def load_model_results(self, fname=None, training=False):\n if training:\n fname = os.path.join(\n self.results_dir,\n 'training',\n fname\n )\n else:\n fname = os.path.join(\n self.results_dir,\n 'production',\n fname\n )\n return pd.read_csv(fname, header=0)\n\n def compute_accuracy_measurements(self, df=None, normalize=False):\n \"\"\"Compute the classification accuracies\n\n Compute the standard accuracy and a custom accuracy. The standard\n accuracy is the number of proposals where the correct category\n is the most probable. The custom accuracy is the number\n of proposals where the correct category is in the top two most probable\n classes.\n\n Parameters\n ----------\n df\n encoder\n normalie\n\n Returns\n -------\n\n \"\"\"\n\n custom_accuracy = 0\n custom_accuracy_dict = {}\n if df is None:\n # df = self.\n pass\n # Get the total number of proposals per category\n proposal_numbers = df['hand_classification'].value_counts()\n # Generate a nested dictionary to store the results\n for c in self.encoder.classes_:\n custom_accuracy_dict[c] = {}\n\n for key in custom_accuracy_dict.keys():\n custom_accuracy_dict[key]['top'] = []\n custom_accuracy_dict[key]['top_two'] = []\n custom_accuracy_dict[key]['misclassified'] = []\n\n for num, row in df.iterrows():\n hand_classification = row['hand_classification']\n # print(hand_classification)\n prob_flag = row.index.str.contains('prob')\n top_two = row[prob_flag].sort_values(ascending=False)[:2]\n categories = list(top_two.index)\n categories = [val.replace('_prob', '').replace('_', ' ') for val in\n categories]\n\n if hand_classification == categories[0]:\n custom_accuracy_dict[hand_classification]['top'].append(1)\n custom_accuracy += 1\n elif hand_classification in categories:\n custom_accuracy_dict[hand_classification]['top_two'].append(1)\n custom_accuracy += 1\n else:\n custom_accuracy_dict[hand_classification]['misclassified'].append(\n 1\n )\n # Reformat the results so we can generate a dataframe for plotting\n computed_results = {'misclassified': [], 'top_two': [], 'top': []}\n index = []\n for cat in custom_accuracy_dict.keys():\n index.append(cat)\n for key in custom_accuracy_dict[cat].keys():\n num_per_key = sum(custom_accuracy_dict[cat][key])\n if normalize:\n frac_of_dataset = num_per_key / proposal_numbers[cat]\n else:\n frac_of_dataset = num_per_key\n computed_results[key].append(frac_of_dataset)\n print(\n f\"Total number of {cat} proposals in {key}: \"\n f\"{num_per_key / proposal_numbers[cat]:.2f}\"\n )\n print(\"-\"*60)\n\n self.computed_accuracy = pd.DataFrame(computed_results, index=index)\n\n def plot_barh(self, df, fout=None):\n \"\"\"\n\n Parameters\n ----------\n df\n\n Returns\n -------\n\n \"\"\"\n if fout is None:\n fout = os.path.join(\n self.base,\n f\"cy{self.cycle}_accuracy.png\"\n )\n fig, ax = plt.subplots(nrows=1, ncols=1, figsize=(6, 5))\n ax = df.plot.barh(\n stacked=True,\n color=['g', 'y', 'r'],\n ax=ax,\n legend=False\n )\n handles, labels = ax.get_legend_handles_labels()\n ax.xaxis.set_minor_locator(AutoMinorLocator(5))\n ax.legend(handles, labels, bbox_to_anchor=(1., 1.01), edgecolor='k')\n ax.set_xlabel('Percentage of Proposals')\n ax.set_title(f'Cycle {self.cycle} Classification Results')\n fig.savefig(fout,\n format='png',\n dpi=250,\n bbox_inches='tight')\n plt.show()"
] |
[
[
"matplotlib.ticker.AutoMinorLocator",
"pandas.DataFrame",
"matplotlib.pyplot.subplots",
"matplotlib.pyplot.style.use",
"matplotlib.pyplot.show",
"pandas.read_csv"
]
] |
lv-cha/zipline-chinese
|
[
"86904cac4b6e928271f640910aa83675ce945b8b"
] |
[
"zipline/protocol.py"
] |
[
"#\n# Copyright 2013 Quantopian, Inc.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\nfrom copy import copy\n\nfrom six import iteritems, iterkeys\nimport pandas as pd\nimport numpy as np\n\nfrom .utils.enum import enum\nfrom .utils.math_utils import nanstd, nanmean, nansum\n\nfrom zipline.utils.algo_instance import get_algo_instance\nfrom zipline.utils.serialization_utils import (\n VERSION_LABEL\n)\n\n# Datasource type should completely determine the other fields of a\n# message with its type.\nDATASOURCE_TYPE = enum(\n 'AS_TRADED_EQUITY',\n 'MERGER',\n 'SPLIT',\n 'DIVIDEND',\n 'TRADE',\n 'TRANSACTION',\n 'ORDER',\n 'EMPTY',\n 'DONE',\n 'CUSTOM',\n 'BENCHMARK',\n 'COMMISSION',\n 'CLOSE_POSITION'\n)\n\n# Expected fields/index values for a dividend Series.\nDIVIDEND_FIELDS = [\n 'declared_date',\n 'ex_date',\n 'gross_amount',\n 'net_amount',\n 'pay_date',\n 'payment_sid',\n 'ratio',\n 'sid',\n]\n# Expected fields/index values for a dividend payment Series.\nDIVIDEND_PAYMENT_FIELDS = [\n 'id',\n 'payment_sid',\n 'cash_amount',\n 'share_count',\n]\n\n\ndef dividend_payment(data=None):\n \"\"\"\n Take a dictionary whose values are in DIVIDEND_PAYMENT_FIELDS and return a\n series representing the payment of a dividend.\n\n Ids are assigned to each historical dividend in\n PerformanceTracker.update_dividends. They are guaranteed to be unique\n integers with the context of a single simulation. If @data is non-empty, a\n id is required to identify the historical dividend associated with this\n payment.\n\n Additionally, if @data is non-empty, either data['cash_amount'] should be\n nonzero or data['payment_sid'] should be an asset identifier and\n data['share_count'] should be nonzero.\n\n The returned Series is given its id value as a name so that concatenating\n payments results in a DataFrame indexed by id. (Note, however, that the\n name value is not used to construct an index when this series is returned\n by function passed to `DataFrame.apply`. In such a case, pandas preserves\n the index of the DataFrame on which `apply` is being called.)\n \"\"\"\n return pd.Series(\n data=data,\n name=data['id'] if data is not None else None,\n index=DIVIDEND_PAYMENT_FIELDS,\n dtype=object,\n )\n\n\nclass Event(object):\n\n def __init__(self, initial_values=None):\n if initial_values:\n self.__dict__ = initial_values\n\n def __getitem__(self, name):\n return getattr(self, name)\n\n def __setitem__(self, name, value):\n setattr(self, name, value)\n\n def __delitem__(self, name):\n delattr(self, name)\n\n def keys(self):\n return self.__dict__.keys()\n\n def __eq__(self, other):\n return hasattr(other, '__dict__') and self.__dict__ == other.__dict__\n\n def __contains__(self, name):\n return name in self.__dict__\n\n def __repr__(self):\n return \"Event({0})\".format(self.__dict__)\n\n def to_series(self, index=None):\n return pd.Series(self.__dict__, index=index)\n\n\nclass Order(Event):\n pass\n\n\nclass Portfolio(object):\n\n def __init__(self):\n self.capital_used = 0.0\n self.starting_cash = 0.0\n self.portfolio_value = 0.0\n self.pnl = 0.0\n self.returns = 0.0\n self.cash = 0.0\n self.positions = Positions()\n self.start_date = None\n self.positions_value = 0.0\n\n def __getitem__(self, key):\n return self.__dict__[key]\n\n def __repr__(self):\n return \"Portfolio({0})\".format(self.__dict__)\n\n def __getstate__(self):\n\n state_dict = copy(self.__dict__)\n\n # Have to convert to primitive dict\n state_dict['positions'] = dict(self.positions)\n\n STATE_VERSION = 1\n state_dict[VERSION_LABEL] = STATE_VERSION\n\n return state_dict\n\n def __setstate__(self, state):\n\n OLDEST_SUPPORTED_STATE = 1\n version = state.pop(VERSION_LABEL)\n\n if version < OLDEST_SUPPORTED_STATE:\n raise BaseException(\"Portfolio saved state is too old.\")\n\n self.positions = Positions()\n self.positions.update(state.pop('positions'))\n\n self.__dict__.update(state)\n\n\nclass Account(object):\n '''\n The account object tracks information about the trading account. The\n values are updated as the algorithm runs and its keys remain unchanged.\n If connected to a broker, one can update these values with the trading\n account values as reported by the broker.\n '''\n\n def __init__(self):\n self.settled_cash = 0.0\n self.accrued_interest = 0.0\n self.buying_power = float('inf')\n self.equity_with_loan = 0.0\n self.total_positions_value = 0.0\n self.regt_equity = 0.0\n self.regt_margin = float('inf')\n self.initial_margin_requirement = 0.0\n self.maintenance_margin_requirement = 0.0\n self.available_funds = 0.0\n self.excess_liquidity = 0.0\n self.cushion = 0.0\n self.day_trades_remaining = float('inf')\n self.leverage = 0.0\n self.net_leverage = 0.0\n self.net_liquidation = 0.0\n\n def __getitem__(self, key):\n return self.__dict__[key]\n\n def __repr__(self):\n return \"Account({0})\".format(self.__dict__)\n\n def __getstate__(self):\n\n state_dict = copy(self.__dict__)\n\n STATE_VERSION = 1\n state_dict[VERSION_LABEL] = STATE_VERSION\n\n return state_dict\n\n def __setstate__(self, state):\n\n OLDEST_SUPPORTED_STATE = 1\n version = state.pop(VERSION_LABEL)\n\n if version < OLDEST_SUPPORTED_STATE:\n raise BaseException(\"Account saved state is too old.\")\n\n self.__dict__.update(state)\n\n\nclass Position(object):\n\n def __init__(self, sid):\n self.sid = sid\n self.amount = 0\n self.cost_basis = 0.0 # per share\n self.last_sale_price = 0.0\n\n def __getitem__(self, key):\n return self.__dict__[key]\n\n def __repr__(self):\n return \"Position({0})\".format(self.__dict__)\n\n def __getstate__(self):\n\n state_dict = copy(self.__dict__)\n\n STATE_VERSION = 1\n state_dict[VERSION_LABEL] = STATE_VERSION\n\n return state_dict\n\n def __setstate__(self, state):\n\n OLDEST_SUPPORTED_STATE = 1\n version = state.pop(VERSION_LABEL)\n\n if version < OLDEST_SUPPORTED_STATE:\n raise BaseException(\"Protocol Position saved state is too old.\")\n\n self.__dict__.update(state)\n\n\nclass Positions(dict):\n\n def __missing__(self, key):\n pos = Position(key)\n self[key] = pos\n return pos\n\n\nclass SIDData(object):\n # Cache some data on the class so that this is shared for all instances of\n # siddata.\n\n # The dt where we cached the history.\n _history_cache_dt = None\n # _history_cache is a a dict mapping fields to pd.DataFrames. This is the\n # most data we have for a given field for the _history_cache_dt.\n _history_cache = {}\n\n # This is the cache that is used for returns. This will have a different\n # structure than the other history cache as this is always daily.\n _returns_cache_dt = None\n _returns_cache = None\n\n # The last dt that we needed to cache the number of minutes.\n _minute_bar_cache_dt = None\n # If we are in minute mode, there is some cost associated with computing\n # the number of minutes that we need to pass to the bar count of history.\n # This will remain constant for a given bar and day count.\n # This maps days to number of minutes.\n _minute_bar_cache = {}\n\n def __init__(self, sid, initial_values=None):\n self._sid = sid\n self._freqstr = None\n\n # To check if we have data, we use the __len__ which depends on the\n # __dict__. Because we are foward defining the attributes needed, we\n # need to account for their entrys in the __dict__.\n # We will add 1 because we need to account for the _initial_len entry\n # itself.\n self._initial_len = len(self.__dict__) + 1\n\n if initial_values:\n self.__dict__.update(initial_values)\n\n @property\n def datetime(self):\n \"\"\"\n Provides an alias from data['foo'].datetime -> data['foo'].dt\n\n `datetime` was previously provided by adding a seperate `datetime`\n member of the SIDData object via a generator that wrapped the incoming\n data feed and added the field to each equity event.\n\n This alias is intended to be temporary, to provide backwards\n compatibility with existing algorithms, but should be considered\n deprecated, and may be removed in the future.\n \"\"\"\n return self.dt\n\n def get(self, name, default=None):\n return self.__dict__.get(name, default)\n\n def __getitem__(self, name):\n return self.__dict__[name]\n\n def __setitem__(self, name, value):\n self.__dict__[name] = value\n\n def __len__(self):\n return len(self.__dict__) - self._initial_len\n\n def __contains__(self, name):\n return name in self.__dict__\n\n def __repr__(self):\n return \"SIDData({0})\".format(self.__dict__)\n\n def _get_buffer(self, bars, field='price', raw=False):\n \"\"\"\n Gets the result of history for the given number of bars and field.\n\n This will cache the results internally.\n \"\"\"\n cls = self.__class__\n algo = get_algo_instance()\n\n now = algo.datetime\n if now != cls._history_cache_dt:\n # For a given dt, the history call for this field will not change.\n # We have a new dt, so we should reset the cache.\n cls._history_cache_dt = now\n cls._history_cache = {}\n\n if field not in self._history_cache \\\n or bars > len(cls._history_cache[field][0].index):\n # If we have never cached this field OR the amount of bars that we\n # need for this field is greater than the amount we have cached,\n # then we need to get more history.\n hst = algo.history(\n bars, self._freqstr, field, ffill=True,\n )\n # Assert that the column holds ints, not security objects.\n if not isinstance(self._sid, str):\n hst.columns = hst.columns.astype(int)\n self._history_cache[field] = (hst, hst.values, hst.columns)\n\n # Slice of only the bars needed. This is because we strore the LARGEST\n # amount of history for the field, and we might request less than the\n # largest from the cache.\n buffer_, values, columns = cls._history_cache[field]\n if raw:\n sid_index = columns.get_loc(self._sid)\n return values[-bars:, sid_index]\n else:\n return buffer_[self._sid][-bars:]\n\n def _cache_daily_minutely(self, days, fn):\n \"\"\"\n Gets the number of bars needed for the current number of days.\n\n Figures this out based on the algo datafrequency and caches the result.\n This caches the result by replacing this function on the object.\n This means that after the first call to _get_bars, this method will\n point to a new function object.\n\n \"\"\"\n def daily_get_max_bars(days):\n return days\n\n def minute_get_max_bars(days):\n # max number of minute. regardless of current days or short\n # sessions\n return days * 390\n\n def daily_get_bars(days):\n return days\n\n def minute_get_bars(days):\n cls = self.__class__\n\n now = get_algo_instance().datetime\n if now != cls._minute_bar_cache_dt:\n cls._minute_bar_cache_dt = now\n cls._minute_bar_cache = {}\n\n if days not in cls._minute_bar_cache:\n # Cache this calculation to happen once per bar, even if we\n # use another transform with the same number of days.\n env = get_algo_instance().trading_environment\n prev = env.previous_trading_day(now)\n ds = env.days_in_range(\n env.add_trading_days(-days + 2, prev),\n prev,\n )\n # compute the number of minutes in the (days - 1) days before\n # today.\n # 210 minutes in a an early close and 390 in a full day.\n ms = sum(210 if d in env.early_closes else 390 for d in ds)\n # Add the number of minutes for today.\n ms += int(\n (now - env.get_open_and_close(now)[0]).total_seconds() / 60\n )\n\n cls._minute_bar_cache[days] = ms + 1 # Account for this minute\n\n return cls._minute_bar_cache[days]\n\n if get_algo_instance().sim_params.data_frequency == 'daily':\n self._freqstr = '1d'\n # update this method to point to the daily variant.\n self._get_bars = daily_get_bars\n self._get_max_bars = daily_get_max_bars\n else:\n self._freqstr = '1m'\n # update this method to point to the minute variant.\n self._get_bars = minute_get_bars\n self._get_max_bars = minute_get_max_bars\n\n # NOTE: This silently adds these two entries to the `__dict__`\n # without affecting the `__len__` of the object. This is important\n # because we use the `len` of the `SIDData` object to see if we have\n # data for this asset.\n self._initial_len += 2\n\n # Not actually recursive because we have already cached the new method.\n return getattr(self, fn)(days)\n\n def _get_bars(self, bars):\n return self._cache_daily_minutely(bars, fn='_get_bars')\n\n def _get_max_bars(self, bars):\n return self._cache_daily_minutely(bars, fn='_get_max_bars')\n\n def mavg(self, days):\n bars = self._get_bars(days)\n max_bars = self._get_max_bars(days)\n prices = self._get_buffer(max_bars, raw=True)[-bars:]\n return nanmean(prices)\n\n def stddev(self, days):\n bars = self._get_bars(days)\n max_bars = self._get_max_bars(days)\n prices = self._get_buffer(max_bars, raw=True)[-bars:]\n return nanstd(prices, ddof=1)\n\n def vwap(self, days):\n bars = self._get_bars(days)\n max_bars = self._get_max_bars(days)\n prices = self._get_buffer(max_bars, raw=True)[-bars:]\n vols = self._get_buffer(max_bars, field='volume', raw=True)[-bars:]\n\n vol_sum = nansum(vols)\n try:\n ret = nansum(prices * vols) / vol_sum\n except ZeroDivisionError:\n ret = np.nan\n\n return ret\n\n def returns(self):\n algo = get_algo_instance()\n\n now = algo.datetime\n if now != self._returns_cache_dt:\n self._returns_cache_dt = now\n self._returns_cache = algo.history(2, '1d', 'price', ffill=True)\n\n hst = self._returns_cache[self._sid]\n return (hst.iloc[-1] - hst.iloc[0]) / hst.iloc[0]\n\n\nclass BarData(object):\n \"\"\"\n Holds the event data for all sids for a given dt.\n\n This is what is passed as `data` to the `handle_data` function.\n\n Note: Many methods are analogues of dictionary because of historical\n usage of what this replaced as a dictionary subclass.\n \"\"\"\n\n def __init__(self, data=None):\n self._data = data if data is not None else {}\n self._contains_override = None\n\n def __contains__(self, name):\n if self._contains_override:\n if self._contains_override(name):\n return name in self._data\n else:\n return False\n else:\n return name in self._data\n\n def has_key(self, name):\n \"\"\"\n DEPRECATED: __contains__ is preferred, but this method is for\n compatibility with existing algorithms.\n \"\"\"\n return name in self\n\n def __setitem__(self, name, value):\n self._data[name] = value\n\n def __getitem__(self, name):\n return self._data[name]\n\n def __delitem__(self, name):\n del self._data[name]\n\n def __iter__(self):\n for sid, data in iteritems(self._data):\n # Allow contains override to filter out sids.\n if sid in self:\n if len(data):\n yield sid\n\n def iterkeys(self):\n # Allow contains override to filter out sids.\n return (sid for sid in iterkeys(self._data) if sid in self)\n\n def keys(self):\n # Allow contains override to filter out sids.\n return list(self.iterkeys())\n\n def itervalues(self):\n return (value for _sid, value in self.iteritems())\n\n def values(self):\n return list(self.itervalues())\n\n def iteritems(self):\n return ((sid, value) for sid, value\n in iteritems(self._data)\n if sid in self)\n\n def items(self):\n return list(self.iteritems())\n\n def __len__(self):\n return len(self.keys())\n\n def __repr__(self):\n return '{0}({1})'.format(self.__class__.__name__, self._data)\n"
] |
[
[
"pandas.Series"
]
] |
sgzqc/wechat
|
[
"6589915c46b8f51d28dba61c6da9702821f5b47c"
] |
[
"20211112/20211112.py"
] |
[
"import os\r\nimport sys\r\nimport matplotlib.pyplot as plt\r\nfrom matplotlib import cm\r\nimport numpy as np\r\n\r\nplt.rcParams['figure.figsize'] = (16, 8)\r\n\r\n\r\ndef test1():\r\n # left data\r\n M, N = 16, 4\r\n dadosEmp = np.random.random((N, M)) * 0.9 + 0.1\r\n empilha = 100 * dadosEmp / np.sum(dadosEmp, axis=0)\r\n # right data\r\n folhas = 64\r\n area = np.random.random(folhas) * 3 + 1\r\n area = np.round_(area, decimals=2)\r\n cores = np.random.random(folhas)\r\n lado = area.sum() ** 0.5\r\n # param\r\n cmapArvore = cm.get_cmap('rainbow')\r\n cores = cmapArvore(cores)\r\n from squarify import squarify\r\n partes = squarify(area, 0, 0, lado, lado)\r\n x = [parte['x'] for parte in partes]\r\n y = [parte['y'] for parte in partes]\r\n dx = [parte['dx'] for parte in partes]\r\n dy = [parte['dy'] for parte in partes]\r\n fig, (axA, axB) = plt.subplots(1, 2)\r\n # draw left\r\n axA.stackplot(np.arange(M), empilha, baseline='zero')\r\n axA.set_title('stack_plot')\r\n axA.set_ylabel('ratio')\r\n axA.set_xticks(np.arange(M))\r\n axA.set_yticks(np.linspace(0, 100, M))\r\n axA.set_xticklabels([chr(i + ord('a')) for i in range(M)])\r\n axA.legend(['G{}'.format(i + 1) for i in range(N)])\r\n axA.grid(alpha=0.75, linestyle=':')\r\n # draw right\r\n axB.bar(x, dy, width=dx, bottom=y, color=cores, align='edge')\r\n for p, a in zip(partes, area):\r\n x, y, dx, dy = p['x'], p['y'], p['dx'], p['dy']\r\n axB.text(x + dx * 0.5, y + dy * 0.5, a, va='center', ha='center')\r\n axB.set_title('squarify')\r\n plt.show()\r\n\r\n\r\ndef test2():\r\n # 统计数据\r\n entrev_dia = 1000\r\n dias = ['Mon', 'Tue', 'Wed', 'Thu', 'Fri']\r\n ndias = len(dias)\r\n mu = 4 + np.random.random(ndias) * 2\r\n sigma = 0.5 + np.random.random(ndias) * 2\r\n horas = np.random.normal(mu, sigma, (entrev_dia, ndias))\r\n horas += np.random.random((entrev_dia, ndias)) * 2 - 1\r\n # 显示参数\r\n cmapStat = cm.get_cmap('cool')\r\n posicao = np.arange(ndias) * 1.5\r\n fig, (axA, axB) = plt.subplots(1, 2)\r\n # 箱图和小提琴图\r\n bplots = axA.boxplot(horas, positions=posicao - 0.25,\r\n vert=True, widths=0.25,\r\n patch_artist=True, notch=True)\r\n violins = axA.violinplot(horas, positions=posicao + 0.25,\r\n widths=0.25, showmeans=True)\r\n for i, (box, violin) in enumerate(zip(bplots['boxes'], violins['bodies'])):\r\n cor = cmapStat(i / ndias)\r\n box.set_facecolor(cor)\r\n violin.set_facecolor(cor)\r\n violin.set_edgecolor('black')\r\n violin.set_alpha(0.75)\r\n axA.set_title('box_violin')\r\n axA.set_ylabel('sleep time')\r\n axA.set_xticks(posicao)\r\n axA.set_yticks(range(1, 10))\r\n axA.set_xticklabels(dias)\r\n axA.set_xlim((-0.5, 6.5))\r\n axA.grid(alpha=0.75, linestyle=':')\r\n\r\n # Histogram\r\n n, bins, patches = axB.hist(horas, bins=50, stacked=True)\r\n for i, patchList in enumerate(patches):\r\n for patch in patchList:\r\n patch.set_facecolor(cmapStat(i / ndias))\r\n axB.set_title('Histograma')\r\n axB.set_xlabel('sleep time')\r\n axB.set_ylabel('count of people')\r\n axB.legend(dias)\r\n plt.show()\r\n\r\n pass\r\n\r\n\r\n\r\nif __name__ == \"__main__\":\r\n test1()\r\n test2()"
] |
[
[
"numpy.random.normal",
"numpy.round_",
"matplotlib.cm.get_cmap",
"numpy.sum",
"matplotlib.pyplot.subplots",
"matplotlib.pyplot.show",
"numpy.arange",
"numpy.random.random",
"numpy.linspace"
]
] |
aess14/Cursos-Uniandes
|
[
"be016b25f2f49788235fbe91ec577fd16b9ad613"
] |
[
"Metodos Computacionales Uniandes/Code/ejercicio_11.py"
] |
[
"import numpy as np\nimport matplotlib.pyplot as plt\n\ndef histograma_compara(n_personas):\n n_gripas_normal = np.random.poisson(3.0, size=n_personas) \n n_gripas_medicamento = np.random.poisson(2.0, size=n_personas)\n\n n_sin_resultados = int(n_personas*0.25)\n n_gripas_medicamento[:n_sin_resultados] = n_gripas_normal[:n_sin_resultados]\n \n plt.figure()\n plt.hist(n_gripas_normal+0.25, bins=np.arange(0,np.max(n_gripas_normal)+1, 0.25),\n rwidth=0.75, label='Sin medicamento')\n plt.hist(n_gripas_medicamento, bins=np.arange(0,np.max(n_gripas_normal)+1, 0.25),\n rwidth=0.75, label='Con medicamento')\n plt.xlabel(\"N gripas\")\n plt.ylabel(\"N personas \")\n plt.legend()\n plt.savefig(\"gripas.png\")\n\n#histograma_compara(10000000)\n\n"
] |
[
[
"numpy.max",
"matplotlib.pyplot.xlabel",
"numpy.random.poisson",
"matplotlib.pyplot.savefig",
"matplotlib.pyplot.legend",
"matplotlib.pyplot.figure",
"matplotlib.pyplot.ylabel"
]
] |
wxwisgood/keras
|
[
"ef082c1c026e63b4705dae47a7ef463527438d1a"
] |
[
"Human_Pose_Estimation.py"
] |
[
"import cv2 as cv\nimport numpy as np\n\n\ndataset = 'COCO'\nprotoc = \"pose_deploy_linevec.prototxt\"\nmodel = \"pose_iter_440000.caffemodel\"\n# dataset = 'MPI'\n# protoc = \"pose_deploy_linevec_faster_4_stages.prototxt\"\n# model = \"pose_iter_160000.caffemodel\"\ncap = cv.VideoCapture('D:\\WXW\\Video_Data\\\\video.avi')\n\nif dataset == 'COCO':\n BODY_PARTS = { \"Nose\": 0, \"Neck\": 1, \"RShoulder\": 2, \"RElbow\": 3, \"RWrist\": 4,\n \"LShoulder\": 5, \"LElbow\": 6, \"LWrist\": 7, \"RHip\": 8, \"RKnee\": 9,\n \"RAnkle\": 10, \"LHip\": 11, \"LKnee\": 12, \"LAnkle\": 13, \"REye\": 14,\n \"LEye\": 15, \"REar\": 16, \"LEar\": 17, \"Background\": 18 }\n\n POSE_PAIRS = [ [\"Neck\", \"RShoulder\"], [\"Neck\", \"LShoulder\"], [\"RShoulder\", \"RElbow\"],\n [\"RElbow\", \"RWrist\"], [\"LShoulder\", \"LElbow\"], [\"LElbow\", \"LWrist\"],\n [\"Neck\", \"RHip\"], [\"RHip\", \"RKnee\"], [\"RKnee\", \"RAnkle\"], [\"Neck\", \"LHip\"],\n [\"LHip\", \"LKnee\"], [\"LKnee\", \"LAnkle\"], [\"Neck\", \"Nose\"], [\"Nose\", \"REye\"],\n [\"REye\", \"REar\"], [\"Nose\", \"LEye\"], [\"LEye\", \"LEar\"] ]\nelse:\n assert(dataset == 'MPI')\n BODY_PARTS = { \"Head\": 0, \"Neck\": 1, \"RShoulder\": 2, \"RElbow\": 3, \"RWrist\": 4,\n \"LShoulder\": 5, \"LElbow\": 6, \"LWrist\": 7, \"RHip\": 8, \"RKnee\": 9,\n \"RAnkle\": 10, \"LHip\": 11, \"LKnee\": 12, \"LAnkle\": 13, \"Chest\": 14,\n \"Background\": 15 }\n\n POSE_PAIRS = [ [\"Head\", \"Neck\"], [\"Neck\", \"RShoulder\"], [\"RShoulder\", \"RElbow\"],\n [\"RElbow\", \"RWrist\"], [\"Neck\", \"LShoulder\"], [\"LShoulder\", \"LElbow\"],\n [\"LElbow\", \"LWrist\"], [\"Neck\", \"Chest\"], [\"Chest\", \"RHip\"], [\"RHip\", \"RKnee\"],\n [\"RKnee\", \"RAnkle\"], [\"Chest\", \"LHip\"], [\"LHip\", \"LKnee\"], [\"LKnee\", \"LAnkle\"] ]\n\ninWidth = 368\ninHeight = 368\nthr = 0.1\n\nnet = cv.dnn.readNetFromCaffe(protoc, model)\n\n\nheight = cap.get(cv.CAP_PROP_FRAME_HEIGHT)\nwidth = cap.get(cv.CAP_PROP_FRAME_WIDTH)\nvideo_writer = cv.VideoWriter(\"D:/pose_estimation_demo.mp4\", cv.VideoWriter_fourcc('D', 'I', 'V', 'X'), 15, (640, 480), True)\nwhile cv.waitKey(1) < 0:\n hasFrame, frame = cap.read()\n if not hasFrame:\n cv.waitKey()\n break\n\n frameWidth = frame.shape[1]\n frameHeight = frame.shape[0]\n inp = cv.dnn.blobFromImage(frame, 1.0 / 255, (inWidth, inHeight),\n (0, 0, 0), swapRB=False, crop=False)\n net.setInput(inp)\n out = net.forward()\n\n print(len(BODY_PARTS), out.shape[0])\n # assert(len(BODY_PARTS) == out.shape[1])\n\n points = []\n for i in range(len(BODY_PARTS)):\n # Slice heatmap of corresponging body's part.\n heatMap = out[0, i, :, :]\n\n # Originally, we try to find all the local maximums. To simplify a sample\n # we just find a global one. However only a single pose at the same time\n # could be detected this way.\n _, conf, _, point = cv.minMaxLoc(heatMap)\n x = (frameWidth * point[0]) / out.shape[3]\n y = (frameHeight * point[1]) / out.shape[2]\n\n # Add a point if it's confidence is higher than threshold.\n points.append((x, y) if conf > thr else None)\n\n for pair in POSE_PAIRS:\n partFrom = pair[0]\n partTo = pair[1]\n assert(partFrom in BODY_PARTS)\n assert(partTo in BODY_PARTS)\n\n idFrom = BODY_PARTS[partFrom]\n idTo = BODY_PARTS[partTo]\n if points[idFrom] and points[idTo]:\n x1, y1 = points[idFrom]\n x2, y2 = points[idTo]\n cv.line(frame, (np.int32(x1), np.int32(y1)), (np.int32(x2), np.int32(y2)), (0, 255, 0), 3)\n cv.ellipse(frame, (np.int32(x1), np.int32(y1)), (3, 3), 0, 0, 360, (0, 0, 255), cv.FILLED)\n cv.ellipse(frame, (np.int32(x2), np.int32(y2)), (3, 3), 0, 0, 360, (0, 0, 255), cv.FILLED)\n\n t, _ = net.getPerfProfile()\n freq = cv.getTickFrequency() / 1000\n cv.putText(frame, '%.2fms' % (t / freq), (10, 20), cv.FONT_HERSHEY_SIMPLEX, 0.5, (0, 0, 0))\n # video_writer.write(frame);\n # cv.imwrite(\"D:/pose.png\", frame)\n cv.imshow('OpenPose using OpenCV', frame)"
] |
[
[
"numpy.int32"
]
] |
Ecoent/biosteam
|
[
"f1371386d089df3aa8ce041175f210c0318c1fe0"
] |
[
"build/lib/biosteam/units/_fermentation.py"
] |
[
"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Thu Aug 23 22:45:47 2018\n\n@author: yoelr\n\"\"\"\nimport numpy as np\nfrom ._hx import HXutility\nfrom .. import Unit\nfrom scipy.integrate import odeint\nfrom .decorators import cost\nfrom ._tank import MixTank\nfrom .design_tools import size_batch\nfrom thermosteam.reaction import Reaction\n\n__all__ = ('Fermentation',)\n\n@cost('Reactor volume', 'Cleaning in place', CE=521.9,\n cost=421e3, S=3785, n=0.6, BM=1.8, N='N')\n@cost('Reactor volume', 'Agitators', CE=521.9, cost=52500,\n S=3785, n=0.5, kW=22.371, BM=1.5, N='N')\n@cost('Reactor volume', 'Reactors', CE=521.9, cost=844000,\n S=3785, n=0.5, BM=1.5, N='N')\nclass Fermentation(Unit):\n \"\"\"\n Create a Fermentation object which models large-scale batch fermentation\n for the production of 1st generation ethanol using yeast\n [1]_ [2]_ [3]_ [4]_. A compound with CAS 'Yeast' must be present.\n Only sucrose and glucose are taken into account for conversion.\n Conversion is based on reaction time, `tau`. Cleaning and unloading time,\n `tau_0`, fraction of working volume, `V_wf`, and number of reactors,\n `N_reactors`, are attributes that can be changed. Cost of a reactor\n is based on the NREL batch fermentation tank cost assuming volumetric\n scaling with a 6/10th exponent [5]_. \n \n Parameters\n ----------\n ins : streams\n Inlet fluids to be mixed into the fermentor.\n outs : stream sequence\n * [0] Vent\n * [1] Effluent\n tau : float\n Reaction time.\n efficiency=0.9 : float, optional\n User enforced efficiency.\n iskinetic=False: bool, optional\n If True, `Fermenation.kinetic_model` will be used.\n N : int\n Number of batch reactors\n \n Examples\n --------\n Simulate a Fermentation object which models batch fermentation for the\n production of 1st generation ethanol using yeast.\n \n >>> from biorefineries.lipidcane.chemicals import ethanol_chemicals \n >>> from biosteam.units import Fermentation\n >>> from thermosteam import Stream, settings\n >>> settings.set_thermo(ethanol_chemicals)\n >>> feed = Stream('feed',\n ... Water=1.20e+05,\n ... Glucose=1.89e+03,\n ... Sucrose=2.14e+04,\n ... DryYeast=1.03e+04,\n ... units='kg/hr',\n ... T=32+273.15)\n >>> F1 = Fermentation('F1',\n ... ins=feed, outs=('CO2', 'product'),\n ... tau=8, efficiency=0.90, N=8)\n >>> F1.simulate()\n >>> F1.show()\n Fermentation: F1\n ins...\n [0] feed\n phase: 'l', T: 305.15 K, P: 101325 Pa\n flow (kmol/hr): Water 6.66e+03\n Glucose 10.5\n Sucrose 62.5\n DryYeast 1.03e+04\n [1] missing stream\n outs...\n [0] CO2\n phase: 'g', T: 305.15 K, P: 101325 Pa\n flow (kmol/hr): Water 2.5\n CO2 244\n Ethanol 0.582\n [1] product\n phase: 'l', T: 305.15 K, P: 101325 Pa\n flow (kmol/hr): Water 6.6e+03\n Ethanol 243\n Glucose 13.6\n DryYeast 1.03e+04\n >>> F1.results()\n Fermentation Units F1\n Power Rate kW 11.5\n Cost USD/hr 0.899\n Chilled water Duty kJ/hr -1.35e+07\n Flow kmol/hr 9.02e+03\n Cost USD/hr 67.3\n Design Reactor volume m3 243\n Cycle time hr 12.6\n Loading time hr 1.57\n Cleaning and unloading time hr 3\n Working volume fraction 0.9\n Number of reactors 8\n Purchase cost Coolers USD 1.67e+05\n Reactors USD 1.86e+06\n Agitators USD 1.16e+05\n Cleaning in place USD 7.05e+05\n Total purchase cost USD 2.85e+06\n Utility cost USD/hr 68.2\n \n \n References\n ----------\n .. [1] Oliveira, Samuel C., et al. \"Discrimination between ethanol inhibition models in a continuous alcoholic fermentation process using flocculating yeast.\" Applied biochemistry and biotechnology 74.3 (1998): 161-172.\n \n .. [2] Oliveira, Samuel C., et al. \"Continuous ethanol fermentation in a tower reactor with flocculating yeast recycle: scale-up effects on process performance, kinetic parameters and model predictions.\" Bioprocess Engineering 20.6 (1999): 525-530.\n \n .. [3] Oliveira, Samuel C., et al. \"Mathematical modeling of a continuous alcoholic fermentation process in a two-stage tower reactor cascade with flocculating yeast recycle.\" Bioprocess and biosystems engineering 38.3 (2015): 469-479.\n \n .. [4] Oliveira, Samuel C., et al. \"Kinetic Modeling of 1‐G Ethanol Fermentations.\" Fermentation Processes. InTech, 2017.\n \n .. [5] D. Humbird, R. Davis, L. Tao, C. Kinchin, D. Hsu, and A. Aden National. Renewable Energy Laboratory Golden, Colorado. P. Schoen, J. Lukas, B. Olthof, M. Worley, D. Sexton, and D. Dudgeon. Harris Group Inc. Seattle, Washington and Atlanta, Georgia. Process Design and Economics for Biochemical Conversion of Lignocellulosic Biomass to Ethanol Dilute-Acid Pretreatment and Enzymatic Hydrolysis of Corn Stover. May 2011. Technical Report NREL/TP-5100-47764\n \n \"\"\"\n _units = {'Reactor volume': 'm3',\n 'Cycle time': 'hr',\n 'Loading time': 'hr',\n 'Total dead time': 'hr'}\n _N_ins = _N_outs = 2\n _N_heat_utilities = 0\n _has_power_utility = True\n line = 'Fermentation' \n \n #: [bool] If True, number of reactors (N) is chosen as to minimize installation cost in every simulation. Otherwise, N remains constant.\n autoselect_N = False\n \n #: [float] Cleaning and unloading time (hr)\n tau_0 = 3\n \n #: [float] Fraction of filled tank to total tank volume\n V_wf = 0.9\n \n #: tuple[float] Kinetic parameters for the kinetic model. Default constants are fitted for Oliveria's model (mu_m1, mu_m2, Ks1, Ks2, Pm1, Pm2, Xm, Y_PS, a)\n kinetic_constants = (0.31, # mu_m1\n 1.01, # mu_m2\n 1.88, # Ks1\n 2.81, # Ks2\n 82.8, # Pm1\n 108.2, # Pm2\n 113.4, # Xm\n 0.45, # Y_PS\n 0.18) # a\n \n def _get_design_specs(self):\n return (('Cleaning and unloading time', self.tau_0, 'hr'),\n ('Working volume fraction', self.V_wf, ''),\n ('Number of reactors', self.N, ''))\n \n def __init__(self, ID='', ins=None, outs=(), *, \n tau, N, efficiency=0.9, iskinetic=False):\n Unit.__init__(self, ID, ins, outs)\n self.hydrolysis = Reaction('Sucrose + Water -> 2Glucose', 'Sucrose', 1.00)\n self.fermentation = Reaction('Glucose -> 2Ethanol + 2CO2', 'Glucose', efficiency)\n self.iskinetic = iskinetic\n self.efficiency = efficiency\n self.tau = tau\n self.N = N\n self.cooler = hx = HXutility(None)\n self.heat_utilities = hx.heat_utilities\n hx._ins = hx._outs\n vent, effluent = self.outs\n hx._outs[0].T = effluent.T = vent.T = 305.15\n vent.phase = 'g'\n\n def _calc_efficiency(self, feed, tau):\n # Get initial concentrations\n y, e, s, w = feed.indices(['Yeast',\n '64-17-5',\n '492-61-5',\n '7732-18-5'])\n mass = feed.mass\n F_vol = feed.F_vol\n concentration_in = mass/F_vol\n X0, P0, S0 = (concentration_in[i] for i in (y, e, s))\n \n # Integrate to get final concentration\n t = np.linspace(0, tau, 1000)\n C_t = odeint(self.kinetic_model, (X0, P0, S0), t,\n args=self.kinetic_constants)\n # Cache data\n self._X = C_t[:, 0]\n self._P = C_t[:, 1]\n self._S = S = C_t[:, 2]\n \n # Calculate efficiency\n Sf = S[-1]\n Sf = Sf if Sf > 0 else 0\n Y_PS = self.kinetic_constants[-2]\n eff = (S0 - Sf)/S0 * Y_PS/0.511\n return eff\n \n @staticmethod\n def kinetic_model(z, t, *kinetic_constants):\n \"\"\"Return change of yeast, ethanol, and substrate concentration in kg/m3.\n \n Parameters\n ----------\n z : Iterable with (X, E, S) [-]:\n * X: Yeast concentration (kg/m3)\n * P: Ethanol concentration (kg/m3)\n * S: Substrate concentration (kg/m3)\n \n t : float\n Time point\n \n *kinetic_constants\n * mu_m1: Maximum specific growth rate (1/hr)\n * mu_m2: Maximum specific ethanol production rate (g-product/g-cell-hr)\n * Ks1: Sugar saturation constant for growth (g/L)\n * Ks2: Sugar saturation constant for product (g/L)\n * Pm1: Maximum product concentration at zero growth [mu_m1=0] (g/L)\n * Pm2: Maximum product concentration [mu_m2=0] (g/L)\n * Xm: Maximum cell concentration [mu_m1=0] (g/L)\n * Y_PS: Ethanol yield based on sugar consumed\n * a: Toxic power\n \n \"\"\"\n mu_m1, mu_m2, Ks1, Ks2, Pm1, Pm2, Xm, Y_PS, a = kinetic_constants\n \n # Current yeast, ethanol, and glucose concentration (kg/m3)\n X, P, S = z\n \n # Compute coefficients\n mu_X = mu_m1 * (S/(Ks1 + S)) * (1 - P/Pm1)**a*((1-X/Xm))\n mu_P = mu_m2 * (S/(Ks2 + S)) * (1 - P/Pm2)\n mu_S = mu_P/0.45\n \n # Compute derivatives\n dXdt = mu_X * X\n dPdt = (mu_P * X)\n dSdt = - mu_S * X\n return (dXdt, dPdt, dSdt)\n \n @property\n def N(self):\n \"\"\"[int] Number of reactors\"\"\"\n return self._N\n @N.setter\n def N(self, N):\n if N <= 1:\n raise ValueError(f\"number of reactors must be greater than 1, value {N} is infeasible\")\n self._N = N\n\n @property\n def efficiency(self):\n return self.fermentation.X\n @efficiency.setter\n def efficiency(self, efficiency):\n self.fermentation.X = efficiency\n\n @property\n def tau(self):\n return self._tau\n @tau.setter\n def tau(self, tau):\n self._tau = tau\n\n def _run(self):\n vent, effluent = self.outs\n effluent.mix_from(self.ins)\n effluent_mol = effluent.mol\n self.hydrolysis(effluent_mol)\n if self.iskinetic:\n self.fermentation.X = self._calc_efficiency(effluent, self._tau)\n self.fermentation(effluent_mol)\n vent.recieve_vent(effluent)\n \n @property\n def N_at_minimum_capital_cost(self):\n cost_old = np.inf\n self._N, N = 2, self._N\n cost_new = self.purchase_cost\n self._summary()\n while cost_new < cost_old:\n self._N += 1\n self._summary()\n cost_old = cost_new\n cost_new = self.purchase_cost\n self._N, N = N, self._N\n return N - 1\n \n def _design(self):\n v_0 = self.outs[1].F_vol\n tau = self._tau\n tau_0 = self.tau_0\n Design = self.design_results\n if self.autoselect_N:\n self.autoselect_N = False\n self._N = self.N_at_minimum_capital_cost\n self.autoselect_N = True\n N = self._N\n Design.update(size_batch(v_0, tau, tau_0, N, self.V_wf))\n hx = self.cooler\n hx.outs[0].mol[:] = self.outs[0].mol/N \n hu = hx.heat_utilities[0]\n hu(self.Hnet/N, self.outs[0].T)\n hx._design(hu.duty)\n hx._cost()\n hu.duty *= N\n hu.cost *= N\n hu.flow *= N\n self.purchase_costs['Coolers'] = self.cooler.purchase_costs['Heat exchanger'] * N\n \n "
] |
[
[
"numpy.linspace",
"scipy.integrate.odeint"
]
] |
abondar24/deepLearnPython
|
[
"9325cd18458f66f9d90ebd044fb4c8b2c6d8abc0"
] |
[
"tensorflow/tf_multivariate_linear_regression.py"
] |
[
"import matplotlib.pyplot as plt\nimport numpy as np\nimport tensorflow as tf\nimport tensorflow.contrib.learn as skflow\nimport pandas as pd\n\nfrom sklearn.utils import shuffle\n\ndf = pd.read_csv('../data/boston.csv', header=0)\nprint(df.describe())\n\nf, ax1 = plt.subplots()\nplt.figure()\ny = df['MEDV']\n\nfor i in range(1, 8):\n number = 420 + i\n ax1.locator_params(nbins=3)\n ax1 = plt.subplot(number)\n plt.title(list(df)[i])\n # print a scatter draw of datapoints\n ax1.scatter(df[df.columns[i]], y)\n\nplt.tight_layout(pad=0.4, w_pad=0.5, h_pad=1.0)\nplt.show()\n\nX = tf.placeholder(\"float\", name=\"X\")\nY = tf.placeholder(\"float\", name=\"Y\")\n\nwith tf.name_scope(\"Model\"):\n w = tf.Variable(tf.random_normal([2], stddev=0.01), name=\"b0\")\n b = tf.Variable(tf.random_normal([2], stddev=0.01), name=\"b1\")\n\n\n def model(x, w, b):\n return tf.add(x, w) + b\n\n\n y_model = model(X, w, b)\n\nwith tf.name_scope(\"CostFunction\"):\n # use square error for cost func\n cost = tf.reduce_mean(tf.pow(Y - y_model, 2))\n\ntrain_op = tf.train.AdamOptimizer(0.001).minimize(cost)\n\nsess = tf.Session()\ninit = tf.global_variables_initializer()\n# tf.summary.FileWriter(sess.graph, './graphs', 'graph.pbtxt')\ncost_op = tf.summary.scalar(\"loss\", cost)\nmerged = tf.summary.merge_all()\nsess.run(init)\n# writer = tf.summary.FileWriter('./graphs', sess.graph)\n\nx_vals = df[[df.columns[2], df.columns[4]]].values.astype(float)\ny_vals = df[df.columns[12]].values.astype(float)\n\nb0_temp = b.eval(session=sess)\nb1_temp = w.eval(session=sess)\n\nfor a in range(1, 10):\n cost_1 = 0.0\n for i, j in zip(x_vals, y_vals):\n sess.run(train_op, feed_dict={X: i, Y: j})\n cost_1 += sess.run(cost, feed_dict={X: i, Y: j})/506.00\n x_vals, y_vals = shuffle(x_vals,y_vals)\n print(cost_1)\n b0_temp = b.eval(session=sess)\n b1_temp = w.eval(session=sess)\n print(b0_temp)\n print(b1_temp)\n"
] |
[
[
"tensorflow.train.AdamOptimizer",
"tensorflow.summary.scalar",
"tensorflow.Session",
"sklearn.utils.shuffle",
"matplotlib.pyplot.subplots",
"matplotlib.pyplot.figure",
"tensorflow.placeholder",
"matplotlib.pyplot.tight_layout",
"tensorflow.name_scope",
"tensorflow.pow",
"tensorflow.summary.merge_all",
"matplotlib.pyplot.show",
"pandas.read_csv",
"tensorflow.global_variables_initializer",
"tensorflow.add",
"tensorflow.random_normal",
"matplotlib.pyplot.subplot"
]
] |
toxtli/BlurbGenreCollection-HMC
|
[
"c9d9e7cb31889422d84180a0a90c904f619be12e"
] |
[
"code/capsulelayers.py"
] |
[
"\"\"\"\nAuthor: Xifeng Guo, E-mail: `[email protected]`, Github: `https://github.com/XifengGuo/CapsNet-Keras`\n\"\"\"\n\nimport keras.backend as K\nimport tensorflow as tf\nfrom keras.constraints import max_norm\nfrom keras import initializers, layers\ntf_session = K.get_session()\nimport sys\n\n\nclass Length(layers.Layer):\n \"\"\"\n Compute the length of vectors. This is used to compute a Tensor that has the same shape with y_true in margin_loss.\n Using this layer as model's output can directly predict labels by using `y_pred = np.argmax(model.predict(x), 1)`\n inputs: shape=[None, num_vectors, dim_vector]\n output: shape=[None, num_vectors]\n \"\"\"\n def call(self, inputs, **kwargs):\n return K.sqrt(K.sum(K.square(inputs), -1))\n\n def compute_output_shape(self, input_shape):\n return input_shape[:-1]\n\n def get_config(self):\n config = super(Length, self).get_config()\n return config\n\n\ndef squash(vectors, axis=-1):\n \"\"\"\n The non-linear activation used in Capsule. It drives the length of a large vector to near 1 and small vector to 0\n :param vectors: some vectors to be squashed, N-dim tensor\n :param axis: the axis to squash\n :return: a Tensor with same shape as input vectors\n \"\"\"\n s_squared_norm = K.sum(K.square(vectors), axis, keepdims=True)\n scale = s_squared_norm / (1 + s_squared_norm) / K.sqrt(s_squared_norm + K.epsilon())\n return scale * vectors\n\n\nclass CapsuleLayer(layers.Layer):\n \"\"\"\n The capsule layer. It is similar to Dense layer. Dense layer has `in_num` inputs, each is a scalar, the output of the\n neuron from the former layer, and it has `out_num` output neurons. CapsuleLayer just expand the output of the neuron\n from scalar to vector. So its input shape = [None, input_num_capsule, input_dim_capsule] and output shape = \\\n [None, num_capsule, dim_capsule]. For Dense Layer, input_dim_capsule = dim_capsule = 1.\n\n :param num_capsule: number of capsules in this layer\n :param dim_capsule: dimension of the output vectors of the capsules in this layer\n :param routings: number of iterations for the routing algorithm\n \"\"\"\n def __init__(self, num_capsule, dim_capsule, routings=3,\n kernel_initializer='glorot_uniform',\n **kwargs):\n super(CapsuleLayer, self).__init__(**kwargs)\n self.num_capsule = num_capsule\n self.dim_capsule = dim_capsule\n self.routings = routings\n self.kernel_initializer = initializers.get(kernel_initializer)\n\n def build(self, input_shape):\n assert len(input_shape) >= 3, \"The input Tensor should have shape=[None, input_num_capsule, input_dim_capsule]\"\n self.input_num_capsule = input_shape[1]\n self.input_dim_capsule = input_shape[2]\n\n # Transform matrix\n self.W = self.add_weight(shape=[self.num_capsule, self.input_num_capsule,\n self.dim_capsule, self.input_dim_capsule],\n initializer=self.kernel_initializer,\n name='W')\n\n print(K.int_shape(self.W))\n print(self.W)\n print(self.W[0][0][0][0])\n self.built = True\n \n def call(self, inputs, training=None):\n # inputs.shape=[None, input_num_capsule, input_dim_capsule]\n # inputs_expand.shape=[None, 1, input_num_capsule, input_dim_capsule]\n inputs_expand = K.expand_dims(inputs, 1)\n\n # Replicate num_capsule dimension to prepare being multiplied by W\n # inputs_tiled.shape=[None, num_capsule, input_num_capsule, input_dim_capsule](?,12,2805,8)\n inputs_tiled = K.tile(inputs_expand, [1, self.num_capsule, 1, 1])\n\n # Compute `inputs * W` by scanning inputs_tiled on dimension 0.\n # x.shape=[num_capsule, input_num_capsule, input_dim_capsule]\n # W.shape=[num_capsule, input_num_capsule, dim_capsule, input_dim_capsule]\n # Regard the first two dimensions as `batch` dimension,\n # then matmul: [input_dim_capsule] x [dim_capsule, input_dim_capsule]^T -> [dim_capsule].\n # inputs_hat.shape = [None, num_capsule, input_num_capsule, dim_capsule](?,12,?,2805,16)\n\n\n #inputs_hat = K.map_fn(lambda x: K.batch_dot(x, self.W, [2, 3]), elems=inputs_tiled)\n inputs_hat = K.map_fn(lambda x: tf.reshape(tf.matmul(self.W,K.expand_dims(x,3)),[self.num_capsule,self.input_num_capsule,self.dim_capsule]), elems=inputs_tiled)\n\n # Begin: Routing algorithm ---------------------------------------------------------------------#\n # The prior for coupling coefficient, initialized as zeros.\n # b.shape = [None, self.num_capsule, self.input_num_capsule]\n\n b = tf.zeros(shape=[K.shape(inputs_hat)[0], self.num_capsule, self.input_num_capsule])\n\n assert self.routings > 0, 'The routings should be > 0.'\n for i in range(self.routings):\n # c.shape=[batch_size, num_capsule, input_num_capsule]\n c = tf.nn.softmax(b, axis=1)\n\n # c.shape = [batch_size, num_capsule, input_num_capsule]\n # inputs_hat.shape=[None, num_capsule, input_num_capsule, dim_capsule]\n # The first two dimensions as `batch` dimension,\n # then matmal: [input_num_capsule] x [input_num_capsule, dim_capsule] -> [dim_capsule].\n # outputs.shape=[None, num_capsule, dim_capsule]\n\n #outputs = squash(K.batch_dot(c, inputs_hat, [2, 2])) # [None, 10, 16]\n c = tf.expand_dims(c,2)\n z = tf.matmul(c,inputs_hat)\n outputs = squash(tf.reshape(z,[-1,self.num_capsule,self.dim_capsule]))\n\n if i < self.routings - 1:\n # outputs.shape = [None, num_capsule, dim_capsule]\n # inputs_hat.shape=[None, num_capsule, input_num_capsule, dim_capsule]\n # The first two dimensions as `batch` dimension,\n # then matmal: [dim_capsule] x [input_num_capsule, dim_capsule]^T -> [input_num_capsule].\n # b.shape=[batch_size, num_capsule, input_num_capsule]\n #b += K.batch_dot(outputs, inputs_hat, [2, 3])\n outputs1 = tf.expand_dims(outputs,3)\n x = tf.matmul(inputs_hat,outputs1)\n x = tf.reshape(x,[-1,self.num_capsule,self.input_num_capsule])\n b += x\n\n # End: Routing algorithm -----------------------------------------------------------------------#\n\n return outputs\n\n def compute_output_shape(self, input_shape):\n return tuple([None, self.num_capsule, self.dim_capsule])\n\n def get_config(self):\n config = {\n 'num_capsule': self.num_capsule,\n 'dim_capsule': self.dim_capsule,\n 'routings': self.routings\n }\n base_config = super(CapsuleLayer, self).get_config()\n return dict(list(base_config.items()) + list(config.items()))\n\n\ndef PrimaryCap(inputs, dim_capsule, n_channels, kernel_size, strides, padding, name):\n # shape = K.int_shape(inputs)\n # output = layers.Reshape(target_shape = [shape[1],shape[3],shape[2]], name = 'conv_output')(inputs)\n output = layers.Conv1D(filters=dim_capsule*n_channels, kernel_size=kernel_size, strides=strides, padding=padding, kernel_initializer='normal', kernel_constraint=max_norm(3), activation='relu',\n name=name)(inputs)\n #dropout = layers.Dropout(0.2)(output)\n outputs = layers.Reshape(target_shape=[-1, dim_capsule], name=name + '_reshape')(output)\n return layers.Lambda(squash, name= name + '_squash')(outputs)\n"
] |
[
[
"tensorflow.nn.softmax",
"tensorflow.matmul",
"tensorflow.reshape",
"tensorflow.expand_dims"
]
] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.