repo_name
stringlengths
6
130
hexsha
list
file_path
list
code
list
apis
list
peleiden/daluke
[ "d2c85ba6b80021b2959b369381c447d18b058576" ]
[ "tests/pretrain/test_pretrain_data.py" ]
[ "import os\n\nimport torch\nfrom pelutils import MainTest\nfrom transformers import AutoTokenizer\n\nfrom daluke import daBERT\nfrom daluke.data import BatchedExamples\nfrom daluke.pretrain.data import load_entity_vocab, DataLoader, calculate_spans\nfrom daluke.pretrain.data.build import DatasetBuilder\n\n\nclass TestData(MainTest):\n def test_entity_loader(self):\n path = os.path.join(self.test_dir, \"entity.jsonl\")\n with open(path, \"w\") as ev:\n ev.write(\"\\n\".join([\n '{\"id\": 1271, \"entities\": [[\"27. oktober\", \"da\"]], \"count\": 529}',\n '{\"id\": 1272, \"entities\": [[\"Computerprogram\", \"da\"]], \"count\": 528}',\n '{\"id\": 1273, \"entities\": [[\"Kancelli\", \"da\"]], \"count\": 527}',\n '{\"id\": 1274, \"entities\": [[\"2. marts\", \"da\"]], \"count\": 527}',\n '{\"id\": 1275, \"entities\": [[\"Guvern\\u00f8r\", \"da\"]], \"count\": 527}',\n ]))\n ev = load_entity_vocab(path)\n assert ev == {\n \"27. oktober\": { \"id\": 1271, \"count\": 529 },\n \"Computerprogram\": { \"id\": 1272, \"count\": 528 },\n \"Kancelli\": { \"id\": 1273, \"count\": 527 },\n \"2. marts\": { \"id\": 1274, \"count\": 527 },\n \"Guvern\\u00f8r\": { \"id\": 1275, \"count\": 527 },\n }\n\n def test_dataloader(self):\n path = os.path.join(self.test_dir, DatasetBuilder.data_file)\n with open(path, \"w\") as f:\n f.write(\"\\n\".join([\n '{ \"word_ids\": [32, 59, 3], \"word_spans\": [[0, 2], [2, 3], [5, 7]], \"entity_ids\": [5], \"entity_spans\": [[0, 3]] }',\n '{ \"word_ids\": [42, 11], \"word_spans\": [[0, 1], [1, 2]], \"entity_ids\": [], \"entity_spans\": [], \"is_validation\": true }',\n ]))\n metadata = {\n \"number-of-items\": 2,\n \"number-of-words\": 5,\n \"number-of-word-tokens\": 5,\n \"max-seq-length\": 512,\n \"max-entities\": 128,\n \"max-entity-span\": 30,\n \"min-sentence-length\": 5,\n \"base-model\": daBERT,\n \"tokenizer_class\": \"BertTokenizerFast\",\n \"language\": \"da\",\n }\n dl = DataLoader(\n self.test_dir,\n metadata,\n entity_vocab = {\"[MASK]\": dict(id=2)},\n device = torch.device(\"cpu\"),\n word_mask_prob = 0.1,\n word_unmask_prob = 0.1,\n word_randword_prob = 0.1,\n ent_mask_prob = 0.1,\n )\n assert len(dl) == 2\n assert len(dl.train_examples) == 1\n assert len(dl.val_examples) == 1\n assert torch.all(dl.val_examples[0].entities.ids == 0)\n train_loader = dl.get_dataloader(1, torch.utils.data.RandomSampler(dl.train_examples))\n i = 0\n for batch in train_loader:\n i += 1\n assert isinstance(batch, BatchedExamples)\n assert i == 1\n val_loader = dl.get_dataloader(1, torch.utils.data.RandomSampler(dl.train_examples), validation=True)\n i = 0\n for batch in val_loader:\n i += 1\n assert isinstance(batch, BatchedExamples)\n assert i == 1\n\n\n def test_word_spans(self):\n tokens = [\"jeg\", \"hed\", \"##der\", \"kaj\", \"ii\", \"d\", \".\", \"Sto\", \"##re\"]\n word_spans = [(0, 1), (1, 3), (3, 4), (4, 5), (5, 6), (7, 9)]\n assert calculate_spans(tokens, AutoTokenizer.from_pretrained(\"Maltehb/danish-bert-botxo\")) == word_spans\n" ]
[ [ "torch.device", "torch.all", "torch.utils.data.RandomSampler" ] ]
aperrin66/DAPPER
[ "d9d09ed87ca58d59972296e317bfeea50ba6cdd0" ]
[ "dapper/mods/Lorenz96/anderson2009.py" ]
[ "\"\"\"A land-ocean setup from `bib.anderson2009spatially`.\"\"\"\n\nimport numpy as np\n\nimport dapper.mods as modelling\nfrom dapper.mods.Lorenz96.sakov2008 import X0, Dyn, LPs, Nx, Tplot\nfrom dapper.tools.localization import localization_setup, pairwise_distances\nfrom dapper.tools.viz import xtrema\n\nt = modelling.Chronology(0.05, dtObs=0.05, KObs=4000, Tplot=Tplot, BurnIn=2000*0.05)\n\n# Define obs sites\nobs_sites = 0.395 + 0.01*np.arange(1, 21)\nobs_sites *= 40\n# Surrounding inds\nii_below = obs_sites.astype(int)\nii_above = ii_below + 1\n# Linear-interpolation weights\nw_above = obs_sites - ii_below\nw_below = 1 - w_above\n# Define obs matrix\nH = np.zeros((20, 40))\nH[np.arange(20), ii_below] = w_below\nH[np.arange(20), ii_above] = w_above\n# Measure obs-state distances\ny2x_dists = pairwise_distances(obs_sites[:, None], np.arange(Nx)[:, None], domain=(Nx,))\nbatches = np.arange(40)[:, None]\n# Define operator\nObs = {\n 'M': len(H),\n 'model': lambda E, t: E @ H.T,\n 'linear': lambda E, t: H,\n 'noise': 1,\n 'localizer': localization_setup(lambda t: y2x_dists, batches),\n}\n\nHMM = modelling.HiddenMarkovModel(\n Dyn, Obs, t, X0, LP=LPs(),\n sectors={'land': np.arange(*xtrema(obs_sites)).astype(int)})\n\n####################\n# Suggested tuning\n####################\n\n# Reproduce Anderson Figure 2\n# -----------------------------------------------------------------------------------\n# xp = SL_EAKF(N=6, infl=sqrt(1.1), loc_rad=0.2/1.82*40)\n# for lbl in ['err','std']:\n# stat = getattr(xp.stats,lbl).f[HMM.t.maskObs_BI]\n# plt.plot(sqrt(np.mean(stat**2, axis=0)),label=lbl)\n#\n# Note: for this xp, one must to be lucky with the random seed to avoid\n# blow up in the ocean sector (which is not constrained by obs) due to infl.\n# Instead, I recommend lowering dt (as in Miyoshi 2011) to stabilize integration.\n" ]
[ [ "numpy.arange", "numpy.zeros" ] ]
gyger/PICwriter
[ "a94d0f468a17256523086c2ce1d024a84c5952e3" ]
[ "picwriter/components/spiral.py" ]
[ "# -*- coding: utf-8 -*-\n\nfrom __future__ import absolute_import, division, print_function, unicode_literals\nimport numpy as np\nimport gdspy\nimport picwriter.toolkit as tk\nfrom picwriter.components.waveguide import Waveguide\nfrom picwriter.components.sbend import SBend\n\n\nclass Spiral(tk.Component):\n \"\"\" Spiral Waveguide Cell class. The desired length of the spiral is first set, along with the spacing between input and output (the 'width' paramter). Then, the corresponding height of the spiral is automatically set.\n\n Args:\n * **wgt** (WaveguideTemplate): WaveguideTemplate object\n * **width** (float): width of the spiral (i.e. distance between input/output ports)\n * **length** (float): desired length of the waveguide\n\n Keyword Args:\n * **spacing** (float): distance between parallel waveguides\n * **parity** (int): If 1 spiral on right side, if -1 spiral on left side (mirror flip)\n * **port** (tuple): Cartesian coordinate of the input port\n * **direction** (string): Direction that the component will point *towards*, can be of type `'NORTH'`, `'WEST'`, `'SOUTH'`, `'EAST'`, OR an angle (float, in radians)\n\n Members:\n * **portlist** (dict): Dictionary with the relevant port information\n\n Portlist format:\n * portlist['input'] = {'port': (x1,y1), 'direction': 'dir1'}\n * portlist['output'] = {'port': (x2, y2), 'direction': 'dir2'}\n\n Where in the above (x1,y1) are the first elements of the spiral trace, (x2, y2) are the last elements of the spiral trace, and 'dir1', 'dir2' are of type `'NORTH'`, `'WEST'`, `'SOUTH'`, `'EAST'`, *or* an angle in *radians*.\n 'Direction' points *towards* the waveguide that will connect to it.\n\n \"\"\"\n\n def __init__(\n self, wgt, width, length, spacing=None, parity=1, port=(0, 0), direction=\"NORTH\"\n ):\n tk.Component.__init__(self, \"Spiral\", locals())\n\n self.portlist = {}\n\n self.width = width\n self.length = length\n self.parity = parity\n self.port = port\n self.spacing = 3 * wgt.clad_width if spacing == None else spacing\n\n self.wgt = wgt\n if self.wgt.euler == True:\n self.bend_radius = wgt.effective_bend_radius\n self.corner_dl = 2 * wgt.effective_bend_radius - wgt.bend_length_90\n else:\n self.bend_radius = wgt.bend_radius\n self.corner_dl = 2 * wgt.bend_radius - (0.5 * np.pi * wgt.bend_radius)\n\n self.direction = direction\n\n if width < self.spacing + 5 * self.bend_radius:\n print(\"width = \" + str(width))\n print(\"spacing = \" + str(self.spacing))\n print(\"bend_radius = \" + str(self.bend_radius))\n raise ValueError(\n \"Warning! Given the WaveguideTemplate 'bend radius' and 'spacing' specified, no spiral can be fit within the requested 'width'. Please increase the 'width'.\"\n )\n\n self.nmax = int(\n (self.width - self.spacing - 5 * self.bend_radius) / (2 * self.spacing)\n )\n\n self.__build_cell()\n self.__build_ports()\n\n \"\"\" Translate & rotate the ports corresponding to this specific component object\n \"\"\"\n self._auto_transform_()\n\n def __fixed_len(self, h):\n w = self.width\n s = self.spacing\n br = self.bend_radius\n wcent = (w - s - br) / 2.0\n return 2 * wcent + (h - s) + wcent + br + h + (w - br) + (h - s) + wcent\n\n def __spiral_len(self, h, n):\n if n == 0:\n return 0\n else:\n w = self.width\n s = self.spacing\n br = self.bend_radius\n wcent = (w - s - br) / 2.0\n return 2 * ((2 * (wcent - n * s)) + (h - s - 2 * n * s))\n\n def __middle_len(self, h, n):\n return (h - 2 * self.spacing) - 2 * n * self.spacing\n\n def get_length(self, h, n):\n # Return the length of the spiral given the height and number of wraps, \"n\"\n num_points = 10 + 4 * n\n\n length = self.__fixed_len(h)\n length += sum([self.__spiral_len(h, i + 1) for i in range(n)])\n length += self.__middle_len(h, n)\n length -= (num_points - 2) * self.corner_dl\n return length\n\n def __get_hmin(self, n):\n # Determine the minimum height corresponding to the spiral parameters and # of spiral turns, 'n'\n br = self.bend_radius\n s = self.spacing\n return 2 * br + 2 * s + 2 * n * s\n\n def get_spiral_length(self):\n # Returns the true length of the spiral, including length from the turns\n return self.actual_length\n\n def __get_number_of_spirals(self):\n # Find the ideal number of loops required to make the spiral such that the\n # spiral is wound as tightly as possible. This means that the required height\n # of the spiral should be minimized appropriately.\n length_goal = self.length\n\n n = 0\n hmin = self.__get_hmin(n)\n length_min = self.get_length(hmin, n)\n while (length_min < length_goal) and n < self.nmax:\n n += 1\n hmin = self.__get_hmin(n)\n length_min = self.get_length(hmin, n)\n\n if n == 0:\n if length_min > length_goal:\n return None\n else:\n return n\n else:\n return n - 1\n\n def __get_spiral_height(self, n):\n # n is the number of spirals\n # Returns the appropriate height ( > hmin) such that\n num_wg_segments = 4 + 2 * n\n\n hmin = self.__get_hmin(n)\n delta_length = self.length - self.get_length(hmin, n)\n hnew = hmin + (delta_length / num_wg_segments)\n\n return hnew\n\n def __build_cell(self):\n # Determine the correct set of waypoints, then feed this over to a\n # Waveguide() class.\n # This is just one way of doing it... ¯\\_(ツ)_/¯\n\n # Determine the number of spiral wraps\n skip_length_check = False\n n = self.__get_number_of_spirals()\n\n if n != None:\n \"\"\" Determine the corresponding spiral height\n \"\"\"\n h = self.__get_spiral_height(n)\n\n w = self.width\n length = self.length\n br = self.bend_radius\n s = self.spacing\n\n \"\"\" Double check all parameters\n \"\"\"\n if abs(length - self.get_length(h, n)) > 1e-6:\n raise ValueError(\n \"Warning! The computed length and desired length are not equal!\"\n )\n\n \"\"\" Now that the parameters are all determined, build the corresponding\n waypoints \"\"\"\n wcent = (w - s - br) / 2.0\n\n p = self.parity\n x0, y0 = 0, 0\n\n \"\"\" Start/end points corresponding to 'fixed_len' unit \"\"\"\n start_points = [\n (x0, y0),\n (x0 + 2 * wcent, y0),\n (x0 + 2 * wcent, y0 - p * (h - s)),\n ]\n end_points = [\n (x0, y0 - p * s),\n (x0, y0 - p * h),\n (x0 + w - br, y0 - p * h),\n (x0 + w - br, y0),\n (x0 + w, y0),\n ]\n\n \"\"\" Generate the spiral going inwards \"\"\"\n spiral_in_pts = []\n\n x_left_start, x_right_start = x0 + s, x0 + 2 * wcent - 2 * s\n y_top_start, y_bot_start = y0 - p * 2 * s, y0 - p * (h - s)\n\n for j in range(n):\n i = j + 1\n if i % 2 == 1: # ODD, so add a segment on the LEFT\n left_segment_index = (i - 1) / 2\n spiral_in_pts.append(\n (\n x_left_start + 2 * s * left_segment_index,\n y_bot_start + p * (2 * s * left_segment_index),\n )\n )\n spiral_in_pts.append(\n (\n x_left_start + 2 * s * left_segment_index,\n y_top_start - p * (2 * s * left_segment_index),\n )\n )\n if j + 1 == n: # This is the last one! Add the middle point now\n spiral_in_pts.append(\n (x0 + wcent, y_top_start - p * (2 * s * left_segment_index))\n )\n if i % 2 == 0: # EVEN, so add a segment on the RIGHT\n right_segment_index = (i - 2) / 2\n spiral_in_pts.append(\n (\n x_right_start - (2 * s * right_segment_index),\n y_top_start - p * (2 * s * right_segment_index),\n )\n )\n spiral_in_pts.append(\n (\n x_right_start - (2 * s * right_segment_index),\n y_bot_start + p * (2 * s * right_segment_index + 2 * s),\n )\n )\n if j + 1 == n: # This is the last one! Add the middle point now\n spiral_in_pts.append(\n (\n x0 + wcent,\n y_bot_start + p * (2 * s * right_segment_index + 2 * s),\n )\n )\n\n if n == 0:\n spiral_in_pts.append((x0 + wcent, y_bot_start))\n\n \"\"\" Generate the spiral going outwards \"\"\"\n spiral_out_pts = []\n\n x_left_start, x_right_start = x0 + 2 * s, x0 + 2 * wcent - s\n y_top_start, y_bot_start = y0 - p * s, y0 - p * (h - 2 * s)\n\n for j in range(n):\n i = j + 1\n if i % 2 == 1: # ODD, so add a segment on the RIGHT\n right_segment_index = (i - 1) / 2\n spiral_out_pts.append(\n (\n x_right_start - 2 * s * right_segment_index,\n y_top_start - p * 2 * s * right_segment_index,\n )\n )\n spiral_out_pts.append(\n (\n x_right_start - 2 * s * right_segment_index,\n y_bot_start + p * (2 * s * right_segment_index),\n )\n )\n if j + 1 == n: # This is the last one! Add the middle point now\n spiral_out_pts.append(\n (x0 + wcent, y_bot_start + p * 2 * s * right_segment_index)\n )\n\n elif i % 2 == 0: # EVEN, add a segment on the LEFT\n left_segment_index = (i - 2) / 2\n spiral_out_pts.append(\n (\n x_left_start + 2 * s * left_segment_index,\n y_bot_start + p * 2 * s * left_segment_index,\n )\n )\n spiral_out_pts.append(\n (\n x_left_start + 2 * s * left_segment_index,\n y_top_start - p * (2 * s * left_segment_index + 2 * s),\n )\n )\n if j + 1 == n: # This is the last one! Add the middle point now\n spiral_out_pts.append(\n (\n x0 + wcent,\n y_top_start - p * (2 * s * left_segment_index + 2 * s),\n )\n )\n\n if n == 0:\n spiral_out_pts.append((x0 + wcent, y_top_start))\n\n spiral_out_pts.reverse() # reverse order\n\n waypoints = start_points + spiral_in_pts + spiral_out_pts + end_points\n\n else:\n \"\"\" Make the waveguide waypoints just a U-bend, since the waveguide length is not long enough to spiral in on itself \"\"\"\n\n length = self.length\n w = self.width\n br = self.bend_radius\n dl = self.corner_dl\n\n if length < w + 4 * br - 4 * dl:\n \"\"\" Route a sinusoidal s-bend waveguide with the desired length \"\"\"\n # Goal: Find the height of the s-bend\n\n from scipy.optimize import fsolve\n from scipy.special import ellipeinc\n\n # The equation below is the arc length of a sine curve, for a given height and width\n func = lambda s_height: length - ellipeinc(\n 2 * np.pi, 1 - 1 / (1 + (s_height ** 2 * np.pi ** 2 / w ** 2))\n ) / (\n (2 * np.pi / w) / np.sqrt(1 + (s_height ** 2 * np.pi ** 2 / w ** 2))\n )\n\n h_guess = np.sqrt((length / 2.0) ** 2 - (w / 2) ** 2)\n\n h_solution = fsolve(func, h_guess)\n h = -self.parity * h_solution[0]\n\n sbend1 = SBend(self.wgt, w / 2.0, h, port=(0, 0), direction=\"EAST\")\n self.add(sbend1)\n\n sbend2 = SBend(\n self.wgt, w / 2.0, -h, port=(w / 2.0, h), direction=\"EAST\"\n )\n self.add(sbend2)\n\n # print(\"Added an SBend\")\n # print(\"h = \"+str(h))\n # print(\"w = \"+str(w))\n # print(\"length = \"+str(length))\n\n self.actual_length = ellipeinc(\n 2 * np.pi, 1 - 1 / (1 + (h ** 2 * np.pi ** 2 / w ** 2))\n ) / ((2 * np.pi / w) / np.sqrt(1 + (h ** 2 * np.pi ** 2 / w ** 2)))\n\n skip_length_check = True\n\n else:\n p = self.parity\n x0, y0 = 0, 0\n\n extra_height = (length - (w + 4 * br - 4 * dl)) / 2.0\n\n max_turns = (w - 4 * br) // (\n 4 * br\n ) # one 'turn' is a turn segment added to the waveguide \"U\" (to get the length required without making the bend very tall)\n extra_length_per_turn = (\n 8 * br - 4 * dl - 4 * br\n ) # Extra length incurred by adding a turn (compared to a straight section)\n\n waypoints = [(x0, y0), (x0 + br, y0)]\n\n number_of_turns = (\n extra_height // extra_length_per_turn\n ) # Max number of turns that could be formed from the extra_height\n\n if number_of_turns > max_turns:\n \"\"\" Add *all* of the turns, plus some extra for the height, else add only the smaller number of turns. \"\"\"\n number_of_turns = max_turns\n\n dh = (\n length\n - (w + 4 * br - 4 * dl)\n - number_of_turns * extra_length_per_turn\n ) / (number_of_turns * 2 + 2)\n\n waypoints.append((x0 + br, y0 - p * (2 * br + dh)))\n for i in range(int(number_of_turns)):\n waypoints.append((x0 + 3 * br + i * br * 4, y0 - p * (2 * br + dh)))\n waypoints.append((x0 + 3 * br + i * br * 4, y0))\n waypoints.append((x0 + 5 * br + i * br * 4, y0))\n waypoints.append((x0 + 5 * br + i * br * 4, y0 - p * (2 * br + dh)))\n\n waypoints.append((x0 + w - br, y0 - p * (2 * br + dh)))\n waypoints.append((x0 + w - br, y0))\n waypoints.append((x0 + w, y0))\n\n \"\"\" Independently verify that the length of the spiral structure generated is correct\n \"\"\"\n if not skip_length_check:\n l = 0\n for i in range(len(waypoints) - 1):\n dx, dy = (\n waypoints[i + 1][0] - waypoints[i][0],\n waypoints[i + 1][1] - waypoints[i][1],\n )\n l += np.sqrt(dx ** 2 + dy ** 2)\n num_corners = len(waypoints) - 2\n l -= num_corners * self.corner_dl\n\n self.actual_length = l\n\n if abs(l - self.length) > 1e-6:\n print(\"Actual computed length = \" + str(l))\n print(\"Expected length = \" + str(self.length))\n raise ValueError(\n \"Warning! Spiral generated is significantly different from what is expected.\"\n )\n\n \"\"\" Generate the waveguide \"\"\"\n wg = Waveguide(waypoints, self.wgt)\n\n self.add(wg)\n\n self.portlist_input = (0, 0)\n self.portlist_output = (self.width, 0)\n\n def __build_ports(self):\n # Portlist format:\n # example: {'port':(x_position, y_position), 'direction': 'NORTH'}\n\n self.portlist[\"input\"] = {\"port\": self.portlist_input, \"direction\": \"WEST\"}\n self.portlist[\"output\"] = {\"port\": self.portlist_output, \"direction\": \"EAST\"}\n\n\nif __name__ == \"__main__\":\n from picwriter.components.waveguide import WaveguideTemplate\n\n gdspy.current_library = gdspy.GdsLibrary()\n top = gdspy.Cell(\"top\")\n wgt = WaveguideTemplate(\n bend_radius=50, wg_width=1.0, clad_width=10.0, euler_bend=True\n )\n\n sp1 = Spiral(\n wgt,\n width=2700.0,\n length=2900.0,\n spacing=20.0,\n parity=1,\n port=(0, 0),\n direction=\"EAST\",\n )\n tk.add(top, sp1)\n\n print(\"length is \" + str(sp1.get_spiral_length()))\n print(\"portlist = \" + str(sp1.portlist))\n\n gdspy.LayoutViewer(cells=\"top\")\n # gdspy.write_gds('spiral.gds', unit=1.0e-6, precision=1.0e-9)\n" ]
[ [ "scipy.special.ellipeinc", "scipy.optimize.fsolve", "numpy.sqrt" ] ]
gnicks007/data-driven-discretization-1d
[ "c5466d094d27dd85af932f00070893180d2f9918" ]
[ "pde_superresolution/layers_test.py" ]
[ "# Copyright 2018 Google LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# https://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Sanity tests for layers.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nfrom absl.testing import absltest # pylint: disable=g-bad-import-order\nfrom absl.testing import parameterized\nimport numpy as np\nimport tensorflow as tf\n\nfrom pde_superresolution import layers # pylint: disable=g-bad-import-order\n\n\ndef pad_periodic_1d(inputs, padding, center=False):\n padded_inputs = inputs[tf.newaxis, :, tf.newaxis]\n padded_outputs = layers.pad_periodic(padded_inputs, padding, center)\n return tf.squeeze(padded_outputs, axis=(0, 2))\n\n\nclass LayersTest(parameterized.TestCase):\n\n def test_static_or_dynamic_size(self):\n with tf.Graph().as_default():\n with tf.Session():\n self.assertEqual(layers.static_or_dynamic_size(tf.range(5), axis=0), 5)\n\n feed_size = tf.placeholder(tf.int32, ())\n size = layers.static_or_dynamic_size(tf.range(feed_size), axis=0)\n self.assertEqual(size.eval(feed_dict={feed_size: 5}), 5)\n\n with self.assertRaisesRegexp(ValueError, 'out of bounds'):\n layers.static_or_dynamic_size(tf.range(5), axis=1)\n\n @parameterized.parameters(\n dict(padding=0, center=True, expected=[0, 1, 2]),\n dict(padding=1, center=True, expected=[2, 0, 1, 2]),\n dict(padding=2, center=True, expected=[2, 0, 1, 2, 0]),\n dict(padding=3, center=True, expected=[1, 2, 0, 1, 2, 0]),\n dict(padding=4, center=True, expected=[1, 2, 0, 1, 2, 0, 1]),\n dict(padding=6, center=True, expected=[0, 1, 2, 0, 1, 2, 0, 1, 2]),\n dict(padding=7, center=True, expected=[2, 0, 1, 2, 0, 1, 2, 0, 1, 2]),\n dict(padding=0, center=False, expected=[0, 1, 2]),\n dict(padding=1, center=False, expected=[0, 1, 2, 0]),\n dict(padding=2, center=False, expected=[0, 1, 2, 0, 1]),\n dict(padding=3, center=False, expected=[0, 1, 2, 0, 1, 2]),\n dict(padding=5, center=False, expected=[0, 1, 2, 0, 1, 2, 0, 1]),\n )\n def test_pad_periodic(self, padding, expected, center):\n with tf.Graph().as_default():\n with tf.Session():\n inputs = pad_periodic_1d(tf.range(3), padding=padding, center=center)\n np.testing.assert_equal(inputs.eval(), expected)\n\n def test_nn_conv1d_periodic(self):\n with tf.Graph().as_default():\n with tf.Session():\n inputs = tf.range(5.0)[tf.newaxis, :, tf.newaxis]\n\n filters = tf.constant([0.0, 1.0, 0.0])[:, tf.newaxis, tf.newaxis]\n actual = layers.nn_conv1d_periodic(inputs, filters, center=True)\n np.testing.assert_allclose(inputs.eval(), actual.eval())\n\n filters = tf.constant([0.0, 1.0])[:, tf.newaxis, tf.newaxis]\n actual = layers.nn_conv1d_periodic(inputs, filters, center=True)\n np.testing.assert_allclose(inputs.eval(), actual.eval())\n\n filters = tf.constant([0.5, 0.5])[:, tf.newaxis, tf.newaxis]\n expected = tf.constant(\n [2.0, 0.5, 1.5, 2.5, 3.5])[tf.newaxis, :, tf.newaxis]\n actual = layers.nn_conv1d_periodic(inputs, filters, center=True)\n np.testing.assert_allclose(expected.eval(), actual.eval())\n\n\nif __name__ == '__main__':\n absltest.main()\n" ]
[ [ "tensorflow.range", "tensorflow.Graph", "tensorflow.Session", "tensorflow.constant", "tensorflow.squeeze", "tensorflow.placeholder" ] ]
anetczuk/stock-monitor
[ "5852980837649ad3b8f76d413b61271f208a2399" ]
[ "src/stockmonitor/gui/widget/mpl/baseintradaychart.py" ]
[ "# MIT License\n#\n# Copyright (c) 2020 Arkadiusz Netczuk <[email protected]>\n#\n# Permission is hereby granted, free of charge, to any person obtaining a copy\n# of this software and associated documentation files (the \"Software\"), to deal\n# in the Software without restriction, including without limitation the rights\n# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell\n# copies of the Software, and to permit persons to whom the Software is\n# furnished to do so, subject to the following conditions:\n#\n# The above copyright notice and this permission notice shall be included in all\n# copies or substantial portions of the Software.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\n# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\n# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\n# SOFTWARE.\n#\n\nimport logging\nimport datetime\n\nimport pandas\n\nfrom .mplcanvas import matplotlib, MplCanvas\n\n\n_LOGGER = logging.getLogger(__name__)\n\n\nclass BaseIntradayChart( MplCanvas ):\n\n def __init__(self, parentWidget=None):\n super().__init__(parentWidget, 10, 10, 80)\n\n self.mouseIndicators = dict()\n self.figure.canvas.mpl_connect('motion_notify_event', self._onPlotUpdateMouseIndicators )\n self.figure.canvas.mpl_connect('figure_leave_event', self._onPlotHideMouseIndicators )\n\n def clearPlot(self):\n# if self.figure.get_visible() is True:\n# self.figure.set_visible( False )\n\n allaxes = self.figure.get_axes()\n for ax in allaxes:\n ax.cla()\n self._removeMouseIndicators()\n\n def _onPlotUpdateMouseIndicators( self, event ):\n plot = event.inaxes\n if plot is None:\n self._removeMouseIndicators()\n return\n if len(plot.lines) < 1:\n self._removeMouseIndicators()\n return\n if event.xdata is None:\n self._removeMouseIndicators()\n return\n\n self._removeMouseIndicators( plot )\n\n firstLine = plot.lines[0]\n xdata = firstLine.get_xdata()\n ydata = firstLine.get_ydata()\n xindex = get_index_float( xdata, event.xdata )\n yvalue = ydata[ xindex ]\n\n indicators = self.mouseIndicators.get( plot, None )\n if indicators is None:\n indicators = [ plot.axhline( y=yvalue, color=\"y\", linestyle=\"--\" ),\n plot.axvline( x=event.xdata, color=\"y\", linestyle=\"--\" ) ]\n self.mouseIndicators[ plot ] = indicators\n else:\n indicators[0].set_data( [0, 1], [yvalue, yvalue] )\n indicators[1].set_data( [event.xdata, event.xdata], [0, 1] )\n\n self.draw_idle()\n\n def _onPlotHideMouseIndicators( self, _ ):\n# def _onPlotHideMouseIndicators( self, event ):\n self._removeMouseIndicators()\n\n def _removeMouseIndicators(self, preserve=None):\n keysList = set( self.mouseIndicators.keys() )\n for key in keysList:\n if key == preserve:\n continue\n lineList = self.mouseIndicators[ key ]\n for line in lineList:\n line.remove()\n del self.mouseIndicators[ key ]\n self.draw_idle()\n\n\ndef _configure_plot( plot, ylabel ):\n plot.set_xlabel( 'Time', fontsize=14 )\n plot.set_ylabel( ylabel, fontsize=14 )\n\n plot.margins( y=0.2 )\n plot.set_xmargin(0.0) ## prevents empty space between first tick and y axis\n\n\ndef _update_plot(xdata, plot ):\n ticks = _generate_ticks(xdata, 12)\n plot.set_xticks( ticks )\n\n setLongFormat = False\n if len(ticks) > 1:\n timeSpan = ticks[-1] - ticks[0]\n if timeSpan > datetime.timedelta( days=2 ):\n setLongFormat = True\n\n if setLongFormat is True:\n formatter = matplotlib.dates.DateFormatter('%d-%m-%Y')\n plot.xaxis.set_major_formatter( formatter )\n else:\n formatter = matplotlib.dates.DateFormatter('%H:%M:%S')\n plot.xaxis.set_major_formatter( formatter )\n\n ### hide first and last major tick (next to plot edges)\n# xticks = plot.xaxis.get_major_ticks()\n# xticks[0].label1.set_visible(False)\n ##xticks[-1].label1.set_visible(False)\n\n plot.relim(True)\n plot.autoscale_view()\n\n\ndef _generate_ticks(xdata, number):\n if number < 1:\n return list()\n start = xdata[0].timestamp()\n tzoffset = start - pandas.Timestamp( start, unit=\"s\" ).timestamp()\n if number < 2:\n middle = (start + xdata[-1].timestamp()) / 2 + tzoffset\n ts = pandas.Timestamp( middle, unit=\"s\" )\n ticks = [ts]\n return ticks\n# print(\"data:\", self.xdata, type(self.xdata))\n delta = (xdata[-1].timestamp() - start) / (number - 1)\n ticks = list()\n ticks.append( xdata[0] )\n currTs = start + tzoffset\n for _ in range(1, number):\n currTs += delta\n ts = pandas.Timestamp( currTs, unit=\"s\" )\n ticks.append( ts )\n return ticks\n\n\ndef get_index_float( xdata, xvalue ):\n valueDate = matplotlib.dates.num2date( xvalue )\n valueDate = valueDate.replace( tzinfo=None ) ## remove timezone info\n dataSize = len( xdata )\n for i in range(0, dataSize):\n currData = xdata[ i ]\n if valueDate < currData:\n return i - 1\n return dataSize - 1\n\n\ndef set_ref_format_coord( plot, refValue=None ):\n firstLine = plot.lines[0]\n xdata = firstLine.get_xdata()\n ydata = firstLine.get_ydata()\n xformatter = plot.xaxis.get_major_formatter()\n\n def format_coord(x, _):\n# def format_coord(x, y):\n xindex = get_index_float( xdata, x )\n yvalue = ydata[ xindex ]\n if refValue is not None:\n change = ( yvalue / refValue - 1 ) * 100\n return 'x=' + xformatter.format_data(x) + ' y=%1.4f ch=%1.2f%%' % ( yvalue, change )\n return 'x=' + xformatter.format_data(x) + ' y=%1.4f' % ( yvalue )\n\n plot.format_coord = format_coord\n\n\ndef set_int_format_coord( plot ):\n firstLine = plot.lines[0]\n xdata = firstLine.get_xdata()\n ydata = firstLine.get_ydata()\n xformatter = plot.xaxis.get_major_formatter()\n\n def format_coord(x, _):\n# def format_coord(x, y):\n xindex = get_index_float( xdata, x )\n yvalue = ydata[ xindex ]\n return 'x=' + xformatter.format_data(x) + ' y=%i' % yvalue\n\n plot.format_coord = format_coord\n" ]
[ [ "pandas.Timestamp" ] ]
MBasting/Sudoku-solver
[ "7a5d9e070bc3a3cc59f8b2afc179163dad1a2d69" ]
[ "python/solver.py" ]
[ "import numpy as np\nimport copy\nimport random\nimport time\n\n\ndef solveSudoku(sudokuOriginal):\n blocks, rows, columns = fillPossibilities(sudokuOriginal)\n counter = 0\n start = time.time()\n res, sudoku1 = trySudoku(copy.deepcopy(sudokuOriginal), copy.deepcopy(blocks),\n copy.deepcopy(rows), copy.deepcopy(columns))\n counter += 1\n if res:\n print(sudoku1)\n print(\"Number of attempts: \", counter)\n end = time.time()\n print(\"Duration: \", end - start)\n return sudoku1\n\n\ndef trySudoku(sudoku, blocks, rows, columns):\n sudoku1 = []\n for i in range(len(sudoku)):\n for j in range(len(sudoku[i])):\n if sudoku[i][j] == 0:\n res, sudokus, mblocks, mrows, mcolumns = solveEntry(sudoku, blocks, rows, columns, [i, j])\n if not res:\n return False, sudoku1\n # Try all possible valid numbers, and continue with those numbers.\n for sud in range(len(sudokus)):\n curS = sudokus[sud]\n curB = mblocks[sud]\n curRow = mrows[sud]\n curCol = mcolumns[sud]\n res, temp = trySudoku(curS, curB, curRow, curCol)\n if not res:\n continue\n return True, sudoku1\n\n\ndef solveEntry(sudoku, blocks, rows, columns, pos):\n posb, block = getPossibilitiesSquare(blocks, rows, columns, pos)\n if len(posb) == 0:\n return False, sudoku, blocks, rows, columns\n sudokus = []\n mblocks = []\n mrows = []\n mcols = []\n for number in posb:\n sudoku1 = copy.deepcopy(sudoku)\n sudoku1[pos[0]][pos[1]] = number\n sudokus.append(sudoku1)\n mblocks.append(updateBlock(copy.deepcopy(blocks), block, number))\n mrows.append(updateRowColumn(copy.deepcopy(rows), pos[0], number))\n mcols.append(updateRowColumn(copy.deepcopy(columns), pos[1], number))\n return True, sudokus, mblocks, mrows, mcols\n\n\ndef getPossibilitiesSquare(blocks, rows, columns, pos):\n block = (int(np.floor(pos[0] / 3)), int(np.floor(pos[1] / 3)))\n curblock = blocks[block[0]][block[1]]\n currow = rows[pos[0]]\n curcol = columns[pos[1]]\n possibilities = list(set(curcol).intersection(set(curblock).intersection(currow)))\n return possibilities, block\n\n\ndef updateBlock(blocks, block, entry):\n cur = blocks[block[0]][block[1]]\n blocks[block[0]][block[1]] = [a for a in cur if a != entry]\n return blocks\n\n\ndef updateRowColumn(rows, i, entry):\n rows[i] = [a for a in rows[i] if a != entry]\n return rows\n\n\ndef fillPossibilities(sudoku):\n blocks = np.zeros((3, 3, 9), dtype=int)\n for i in range(0, 3):\n for j in range(0, 3):\n blocks[i, j] = np.arange(1, 10)\n blocks = blocks.tolist()\n rows = np.zeros((9, 9), dtype=int)\n for i in range(0, 9):\n rows[i] = np.arange(1, 10)\n rows = rows.tolist()\n columns = rows.copy()\n for i in range(len(sudoku)):\n for j in range(len(sudoku[i])):\n if sudoku[i][j] != 0:\n entry = sudoku[i][j]\n block = (int(np.floor(i / 3)), int(np.floor(j / 3)))\n updateBlock(blocks, block, entry)\n updateRowColumn(rows, i, entry)\n updateRowColumn(columns, j, entry)\n return blocks, rows, columns\n\n\n# testsudoku = [[0, 0, 0, 4, 0, 0, 0, 3, 0],\n# [7, 0, 4, 8, 0, 0, 1, 0, 2],\n# [0, 0, 0, 2, 3, 0, 4, 0, 9],\n# [0, 4, 0, 5, 0, 9, 0, 8, 0],\n# [5, 0, 0, 0, 0, 0, 9, 1, 3],\n# [1, 0, 0, 0, 8, 0, 2, 0, 4],\n# [0, 0, 0, 0, 0, 0, 3, 4, 5],\n# [0, 5, 1, 9, 4, 0, 7, 2, 0],\n# [4, 7, 3, 0, 5, 0, 0, 9, 1]]\n#\n#\n# difficult = [[0, 0, 0, 0, 0, 0, 3, 0, 7],\n# [9, 0, 0, 0, 0, 0, 5, 1, 4],\n# [3, 0, 4, 0, 1, 6, 0, 2, 0],\n# [0, 0, 6, 0, 0, 0, 0, 5, 0],\n# [2, 0, 0, 0, 0, 4, 0, 0, 0],\n# [0, 0, 0, 0, 0, 9, 4, 0, 0],\n# [0, 0, 1, 9, 0, 0, 0, 7, 6],\n# [0, 0, 0, 0, 0, 0, 0, 3, 0],\n# [0, 0, 7, 6, 0, 0, 0, 0, 5]]\n# solveSudoku(testsudoku)\n\n" ]
[ [ "numpy.arange", "numpy.zeros", "numpy.floor" ] ]
xphongvn/dcase2020
[ "7b4cee3cdef252f80e75ffe51450308dbd589fec" ]
[ "05_SVDD.py" ]
[ "import torch\nimport logging\nimport random\nimport numpy as np\nimport common as com\nimport os\nimport ipdb\nfrom sklearn import metrics\nfrom tqdm import tqdm\nfrom SVDD.datasets.dcase import DCASE_Dataset\nfrom SVDD.utils.config import Config\nfrom SVDD.deepSVDD import DeepSVDD\n\n########################################################################\n# load parameter.yaml\n########################################################################\nparam = com.yaml_load()\n\n\n# Configuration hard coded\n# TODO: move to config file\nload_config = None\nnet_name = \"dcase\"\nxp_path = param[\"result_directory_SVDD\"]\ndata_path = \"\"\nload_con2fig = None\nload_model = None\nobjective = \"one-class\"\nnu = 0.1\ndevice = \"cuda\"\nseed = 0\noptimizer_name = \"adam\"\nlr = 0.0001\nn_epochs = 10\nlr_milestone = ([5])\nbatch_size = 512\nweight_decay = 0.5e-6\npretrain = True\nae_optimizer_name = \"adam\"\nae_lr = 0.001\nae_n_epochs = 10\nae_lr_milestone = ([5])\nae_batch_size = 512\nae_weight_decay = 0.5e-3\nn_jobs_dataloader = 2\nnormal_class = 0\n\nif __name__ == '__main__':\n \"\"\"\n Deep SVDD, a fully deep method for anomaly detection.\n\n :arg DATASET_NAME: Name of the dataset to load.\n :arg NET_NAME: Name of the neural network to use.\n :arg XP_PATH: Export path for logging the experiment.\n :arg DATA_PATH: Root path of data.\n \"\"\"\n\n # Set up logging\n log_file = xp_path + '/log.txt'\n logging.basicConfig(level=logging.DEBUG, filename=log_file)\n logger = logging.getLogger()\n formatter = logging.Formatter('%(asctime)s - %(levelname)s - %(message)s')\n file_handler = logging.StreamHandler()\n file_handler.setFormatter(formatter)\n logger.addHandler(file_handler)\n\n # Get configuration\n cfg = Config(locals().copy())\n\n # Print arguments\n logger.info('Log file is %s.' % log_file)\n logger.info('Data path is %s.' % data_path)\n logger.info('Export path is %s.' % xp_path)\n\n logger.info('Normal class: %d' % normal_class)\n logger.info('Network: %s' % net_name)\n\n # If specified, load experiment config from JSON-file\n if load_config:\n cfg.load_config(import_json=load_config)\n logger.info('Loaded configuration from %s.' % load_config)\n\n # Print configuration\n logger.info('Deep SVDD objective: %s' % cfg.settings['objective'])\n logger.info('Nu-paramerter: %.2f' % cfg.settings['nu'])\n\n # Set seed\n if cfg.settings['seed'] != -1:\n random.seed(cfg.settings['seed'])\n np.random.seed(cfg.settings['seed'])\n torch.manual_seed(cfg.settings['seed'])\n logger.info('Set seed to %d.' % cfg.settings['seed'])\n\n # Default device to 'cpu' if cuda is not available\n if not torch.cuda.is_available():\n device = 'cpu'\n logger.info('Computation device: %s' % device)\n logger.info('Number of dataloader workers: %d' % n_jobs_dataloader)\n\n # load base_directory list\n dirs = com.select_dirs(param=param, mode=True)\n\n # loop of the base directory\n for idx, target_dir in enumerate(dirs):\n # set path\n machine_type = os.path.split(target_dir)[1]\n\n # Load train data\n files = com.file_list_generator(target_dir)\n train_data = com.list_to_vector_array(files,\n msg=\"generate train_dataset\",\n n_mels=param[\"feature\"][\"n_mels\"],\n frames=param[\"feature\"][\"frames\"],\n n_fft=param[\"feature\"][\"n_fft\"],\n hop_length=param[\"feature\"][\"hop_length\"],\n power=param[\"feature\"][\"power\"],\n extra_features=param[\"feature\"][\"extra\"])\n # Get labels into train_data\n train_labels = np.full(train_data.shape[0], 0)\n if train_data.shape[0] != len(train_labels):\n raise(\"Train data and labels do not have the same size\")\n\n # Load test data\n test_files, test_files_label = com.test_file_list_generator(target_dir, id_name=\"\", mode=True)\n test_data = com.list_to_vector_array(test_files,\n msg=\"generate test_dataset\",\n n_mels=param[\"feature\"][\"n_mels\"],\n frames=param[\"feature\"][\"frames\"],\n n_fft=param[\"feature\"][\"n_fft\"],\n hop_length=param[\"feature\"][\"hop_length\"],\n power=param[\"feature\"][\"power\"],\n extra_features=param[\"feature\"][\"extra\"])\n\n # Get labels into train_data\n n_row = int(test_data.shape[0] / len(test_files_label))\n test_labels = []\n for i in range(len(test_files_label)):\n test_labels.extend([test_files_label[i] for k in range(n_row)]) # Duplicate label into n_row times\n test_labels = np.array(test_labels)\n\n if test_data.shape[0] != len(test_labels):\n raise(\"Test data and labels do not have the same size\")\n\n dataset = DCASE_Dataset(train_data=train_data, train_labels=train_labels,\n test_data=test_data, test_labels=test_labels,\n normal_class=0)\n\n # Initialize DeepSVDD model and set neural network \\phi\n deep_SVDD = DeepSVDD(cfg.settings['objective'], cfg.settings['nu'])\n inputDim = train_data.shape[1]\n deep_SVDD.set_network(net_name, inputDim)\n\n # If specified, load Deep SVDD model (radius R, center c, network weights, and possibly autoencoder weights)\n if load_model:\n deep_SVDD.load_model(model_path=load_model, load_ae=True)\n logger.info('Loading model from %s.' % load_model)\n\n logger.info('Pretraining: %s' % pretrain)\n if pretrain:\n # Log pretraining details\n logger.info('Pretraining optimizer: %s' % cfg.settings['ae_optimizer_name'])\n logger.info('Pretraining learning rate: %g' % cfg.settings['ae_lr'])\n logger.info('Pretraining epochs: %d' % cfg.settings['ae_n_epochs'])\n logger.info('Pretraining learning rate scheduler milestones: %s' % (cfg.settings['ae_lr_milestone'],))\n logger.info('Pretraining batch size: %d' % cfg.settings['ae_batch_size'])\n logger.info('Pretraining weight decay: %g' % cfg.settings['ae_weight_decay'])\n\n # Pretrain model on dataset (via autoencoder)\n deep_SVDD.pretrain(dataset,\n optimizer_name=cfg.settings['ae_optimizer_name'],\n lr=cfg.settings['ae_lr'],\n n_epochs=cfg.settings['ae_n_epochs'],\n lr_milestones=(cfg.settings['ae_lr_milestone']),\n batch_size=cfg.settings['ae_batch_size'],\n weight_decay=cfg.settings['ae_weight_decay'],\n device=device,\n n_jobs_dataloader=n_jobs_dataloader,\n inputDim = inputDim)\n\n # Log training details\n logger.info('Training optimizer: %s' % cfg.settings['optimizer_name'])\n logger.info('Training learning rate: %g' % cfg.settings['lr'])\n logger.info('Training epochs: %d' % cfg.settings['n_epochs'])\n logger.info('Training learning rate scheduler milestones: %s' % (cfg.settings['lr_milestone'],))\n logger.info('Training batch size: %d' % cfg.settings['batch_size'])\n logger.info('Training weight decay: %g' % cfg.settings['weight_decay'])\n\n # Train model on dataset\n deep_SVDD.train(dataset,\n optimizer_name=cfg.settings['optimizer_name'],\n lr=cfg.settings['lr'],\n n_epochs=cfg.settings['n_epochs'],\n lr_milestones=(cfg.settings['lr_milestone']),\n batch_size=cfg.settings['batch_size'],\n weight_decay=cfg.settings['weight_decay'],\n device=device,\n n_jobs_dataloader=n_jobs_dataloader)\n\n # Test model\n deep_SVDD.test(dataset, device=device, n_jobs_dataloader=n_jobs_dataloader)\n\n # Plot most anomalous and most normal (within-class) test samples\n labels, scores = zip(*deep_SVDD.results['test_scores'])\n labels, scores = np.array(labels), np.array(scores)\n\n # Reshape score for errors to match each file\n scores_file = scores.reshape(len(test_files_label), n_row)\n # Calculate score of each file\n errors = np.mean(scores_file, axis=1)\n auc = metrics.roc_auc_score(test_files_label,errors)\n print(\"AUC is: {}\".format(auc))\n p_auc = metrics.roc_auc_score(test_files_label, errors, max_fpr=param[\"max_fpr\"])\n print(\"P_AUC is: {}\".format(p_auc))\n\n anomaly_score_list = []\n for file_idx, file_path in tqdm(enumerate(test_files), total=len(test_files)):\n anomaly_score_list.append([os.path.basename(file_path), errors[file_idx]])\n\n os.makedirs(param[\"result_directory_SVDD\"], exist_ok=True)\n anomaly_score_csv = \"{result}/anomaly_score_{machine_type}.csv\".format(result=param[\"result_directory_SVDD\"],\n machine_type=machine_type)\n\n com.save_csv(save_file_path=anomaly_score_csv, save_data=anomaly_score_list)\n com.logger.info(\"anomaly score result -> {}\".format(anomaly_score_csv))\n\n #idx_sorted = indices[labels == 0][np.argsort(scores[labels == 0])] # sorted from lowest to highest anomaly score\n\n # Save results, model, and configuration\n deep_SVDD.save_results(export_json=xp_path + '/results.json')\n deep_SVDD.save_model(export_model=xp_path + '/model.tar')\n #cfg.save_config(export_json=xp_path + '/config.json')\n" ]
[ [ "numpy.full", "numpy.array", "numpy.random.seed", "numpy.mean", "torch.manual_seed", "torch.cuda.is_available", "sklearn.metrics.roc_auc_score" ] ]
s-tian/robosuite
[ "56c20db6231e03ec5910869f7227819943fc24b2" ]
[ "robosuite/utils/camera_utils.py" ]
[ "\"\"\"\nThis module includes:\n\n- Utility classes for modifying sim cameras\n\n- Utility functions for performing common camera operations such as retrieving\ncamera matrices and transforming from world to camera frame or vice-versa.\n\"\"\"\nimport json\nimport xml.etree.ElementTree as ET\n\nimport h5py\nimport numpy as np\n\nimport robosuite\nimport robosuite.utils.transform_utils as T\nfrom robosuite.utils.mjcf_utils import postprocess_model_xml\nfrom robosuite.wrappers import DomainRandomizationWrapper, VisualizationWrapper\n\n\ndef get_camera_intrinsic_matrix(sim, camera_name, camera_height, camera_width):\n \"\"\"\n Obtains camera intrinsic matrix.\n\n Args:\n sim (MjSim): simulator instance\n camera_name (str): name of camera\n camera_height (int): height of camera images in pixels\n camera_width (int): width of camera images in pixels\n Return:\n K (np.array): 3x3 camera matrix\n \"\"\"\n cam_id = sim.model.camera_name2id(camera_name)\n fovy = sim.model.cam_fovy[cam_id]\n f = 0.5 * camera_height / np.tan(fovy * np.pi / 360)\n K = np.array([[f, 0, camera_width / 2], [0, f, camera_height / 2], [0, 0, 1]])\n return K\n\n\ndef get_camera_extrinsic_matrix(sim, camera_name):\n \"\"\"\n Returns a 4x4 homogenous matrix corresponding to the camera pose in the\n world frame. MuJoCo has a weird convention for how it sets up the\n camera body axis, so we also apply a correction so that the x and y\n axis are along the camera view and the z axis points along the\n viewpoint.\n Normal camera convention: https://docs.opencv.org/2.4/modules/calib3d/doc/camera_calibration_and_3d_reconstruction.html\n\n Args:\n sim (MjSim): simulator instance\n camera_name (str): name of camera\n Return:\n R (np.array): 4x4 camera extrinsic matrix\n \"\"\"\n cam_id = sim.model.camera_name2id(camera_name)\n camera_pos = sim.data.cam_xpos[cam_id]\n camera_rot = sim.data.cam_xmat[cam_id].reshape(3, 3)\n R = T.make_pose(camera_pos, camera_rot)\n\n # IMPORTANT! This is a correction so that the camera axis is set up along the viewpoint correctly.\n camera_axis_correction = np.array(\n [[1.0, 0.0, 0.0, 0.0], [0.0, -1.0, 0.0, 0.0], [0.0, 0.0, -1.0, 0.0], [0.0, 0.0, 0.0, 1.0]]\n )\n R = R @ camera_axis_correction\n return R\n\n\ndef get_camera_transform_matrix(sim, camera_name, camera_height, camera_width):\n \"\"\"\n Camera transform matrix to project from world coordinates to pixel coordinates.\n\n Args:\n sim (MjSim): simulator instance\n camera_name (str): name of camera\n camera_height (int): height of camera images in pixels\n camera_width (int): width of camera images in pixels\n Return:\n K (np.array): 4x4 camera matrix to project from world coordinates to pixel coordinates\n \"\"\"\n R = get_camera_extrinsic_matrix(sim=sim, camera_name=camera_name)\n K = get_camera_intrinsic_matrix(\n sim=sim, camera_name=camera_name, camera_height=camera_height, camera_width=camera_width\n )\n K_exp = np.eye(4)\n K_exp[:3, :3] = K\n\n # Takes a point in world, transforms to camera frame, and then projects onto image plane.\n return K_exp @ T.pose_inv(R)\n\n\ndef get_camera_segmentation(sim, camera_name, camera_height, camera_width):\n \"\"\"\n Obtains camera segmentation matrix.\n\n Args:\n sim (MjSim): simulator instance\n camera_name (str): name of camera\n camera_height (int): height of camera images in pixels\n camera_width (int): width of camera images in pixels\n Return:\n im (np.array): 2-channel segmented image where the first contains the\n geom types and the second contains the geom IDs\n \"\"\"\n return sim.render(camera_name=camera_name, height=camera_height, width=camera_width, segmentation=True)[::-1]\n\n\ndef get_real_depth_map(sim, depth_map):\n \"\"\"\n By default, MuJoCo will return a depth map that is normalized in [0, 1]. This\n helper function converts the map so that the entries correspond to actual distances.\n\n (see https://github.com/deepmind/dm_control/blob/master/dm_control/mujoco/engine.py#L742)\n\n Args:\n sim (MjSim): simulator instance\n depth_map (np.array): depth map with values normalized in [0, 1] (default depth map\n returned by MuJoCo)\n Return:\n depth_map (np.array): depth map that corresponds to actual distances\n \"\"\"\n # Make sure that depth values are normalized\n assert np.all(depth_map >= 0.0) and np.all(depth_map <= 1.0)\n extent = sim.model.stat.extent\n far = sim.model.vis.map.zfar * extent\n near = sim.model.vis.map.znear * extent\n return near / (1.0 - depth_map * (1.0 - near / far))\n\n\ndef project_points_from_world_to_camera(points, world_to_camera_transform, camera_height, camera_width):\n \"\"\"\n Helper function to project a batch of points in the world frame\n into camera pixels using the world to camera transformation.\n\n Args:\n points (np.array): 3D points in world frame to project onto camera pixel locations. Should\n be shape [..., 3].\n world_to_camera_transform (np.array): 4x4 Tensor to go from robot coordinates to pixel\n coordinates.\n camera_height (int): height of the camera image\n camera_width (int): width of the camera image\n\n Return:\n pixels (np.array): projected pixel indices of shape [..., 2]\n \"\"\"\n assert points.shape[-1] == 3 # last dimension must be 3D\n assert len(world_to_camera_transform.shape) == 2\n assert world_to_camera_transform.shape[0] == 4 and world_to_camera_transform.shape[1] == 4\n\n # convert points to homogenous coordinates -> (px, py, pz, 1)\n ones_pad = np.ones(points.shape[:-1] + (1,))\n points = np.concatenate((points, ones_pad), axis=-1) # shape [..., 4]\n\n # batch matrix multiplication of 4 x 4 matrix and 4 x 1 vectors to do robot frame to pixels transform\n mat_reshape = [1] * len(points.shape[:-1]) + [4, 4]\n cam_trans = world_to_camera_transform.reshape(mat_reshape) # shape [..., 4, 4]\n pixels = np.matmul(cam_trans, points[..., None])[..., 0] # shape [..., 4]\n\n # re-scaling from homogenous coordinates to recover pixel values\n # (x, y, z) -> (x / z, y / z)\n pixels = pixels / pixels[..., 2:3]\n pixels = pixels[..., :2].round().astype(int) # shape [..., 2]\n\n # swap first and second coordinates to get pixel indices that correspond to (height, width)\n # and also clip pixels that are out of range of the camera image\n pixels = np.concatenate(\n (\n pixels[..., 1:2].clip(0, camera_height - 1),\n pixels[..., 0:1].clip(0, camera_width - 1),\n ),\n axis=-1,\n )\n\n return pixels\n\n\ndef transform_from_pixels_to_world(pixels, depth_map, camera_to_world_transform):\n \"\"\"\n Helper function to take a batch of pixel locations and the corresponding depth image\n and transform these points from the camera frame to the world frame.\n\n Args:\n pixels (np.array): pixel coordinates of shape [..., 2]\n depth_map (np.array): depth images of shape [..., H, W, 1]\n camera_to_world_transform (np.array): 4x4 Tensor to go from pixel coordinates to world\n coordinates.\n\n Return:\n points (np.array): 3D points in robot frame of shape [..., 3]\n \"\"\"\n\n # make sure leading dimensions are consistent\n pixels_leading_shape = pixels.shape[:-1]\n depth_map_leading_shape = depth_map.shape[:-3]\n assert depth_map_leading_shape == pixels_leading_shape\n\n # sample from the depth map using the pixel locations with bilinear sampling\n pixels = pixels.astype(float)\n im_h, im_w = depth_map.shape[-2:]\n depth_map_reshaped = depth_map.reshape(-1, im_h, im_w, 1)\n z = bilinear_interpolate(im=depth_map_reshaped, x=pixels[..., 1:2], y=pixels[..., 0:1])\n z = z.reshape(*depth_map_leading_shape, 1) # shape [..., 1]\n\n # form 4D homogenous camera vector to transform - [x * z, y * z, z, 1]\n # (note that we need to swap the first 2 dimensions of pixels to go from pixel indices\n # to camera coordinates)\n cam_pts = [pixels[..., 1:2] * z, pixels[..., 0:1] * z, z, np.ones_like(z)]\n cam_pts = np.concatenate(cam_pts, axis=-1) # shape [..., 4]\n\n # batch matrix multiplication of 4 x 4 matrix and 4 x 1 vectors to do camera to robot frame transform\n mat_reshape = [1] * len(cam_pts.shape[:-1]) + [4, 4]\n cam_trans = camera_to_world_transform.reshape(mat_reshape) # shape [..., 4, 4]\n points = np.matmul(cam_trans, cam_pts[..., None])[..., 0] # shape [..., 4]\n return points[..., :3]\n\n\ndef bilinear_interpolate(im, x, y):\n \"\"\"\n Bilinear sampling for pixel coordinates x and y from source image im.\n Taken from https://stackoverflow.com/questions/12729228/simple-efficient-bilinear-interpolation-of-images-in-numpy-and-python\n \"\"\"\n x = np.asarray(x)\n y = np.asarray(y)\n\n x0 = np.floor(x).astype(int)\n x1 = x0 + 1\n y0 = np.floor(y).astype(int)\n y1 = y0 + 1\n\n x0 = np.clip(x0, 0, im.shape[1] - 1)\n x1 = np.clip(x1, 0, im.shape[1] - 1)\n y0 = np.clip(y0, 0, im.shape[0] - 1)\n y1 = np.clip(y1, 0, im.shape[0] - 1)\n\n Ia = im[y0, x0]\n Ib = im[y1, x0]\n Ic = im[y0, x1]\n Id = im[y1, x1]\n\n wa = (x1 - x) * (y1 - y)\n wb = (x1 - x) * (y - y0)\n wc = (x - x0) * (y1 - y)\n wd = (x - x0) * (y - y0)\n\n return wa * Ia + wb * Ib + wc * Ic + wd * Id\n\n\nclass CameraMover:\n \"\"\"\n A class for manipulating a camera.\n\n WARNING: This class will initially RE-INITIALIZE the environment.\n\n Args:\n env (MujocoEnv): Mujoco environment to modify camera\n camera (str): Which camera to mobilize during playback, e.g.: frontview, agentview, etc.\n init_camera_pos (None or 3-array): If specified, should be the (x,y,z) global cartesian pos to\n initialize camera to\n init_camera_quat (None or 4-array): If specified, should be the (x,y,z,w) global quaternion orientation to\n initialize camera to\n \"\"\"\n\n def __init__(\n self,\n env,\n camera=\"frontview\",\n init_camera_pos=None,\n init_camera_quat=None,\n ):\n # Store relevant values and initialize other values\n self.env = env\n self.camera = camera\n self.mover_body_name = f\"{self.camera}_cameramover\"\n\n # Get state\n state = self.env.sim.get_state().flatten()\n\n # Grab environment xml\n xml = env.sim.model.get_xml()\n\n # Modify xml to add mocap to move camera around\n xml = self.modify_xml_for_camera_movement(xml=xml, camera_name=self.camera)\n\n # Reset the environment and restore the state\n self.env.reset_from_xml_string(xml)\n self.env.sim.reset()\n self.env.sim.set_state_from_flattened(state)\n self.env.sim.forward()\n\n # Set initial camera pose\n self.set_camera_pose(pos=init_camera_pos, quat=init_camera_quat)\n\n def set_camera_pose(self, pos=None, quat=None):\n \"\"\"\n Sets the camera pose, which optionally includes position and / or quaternion\n\n Args:\n pos (None or 3-array): If specified, should be the (x,y,z) global cartesian pos to set camera to\n quat (None or 4-array): If specified, should be the (x,y,z,w) global quaternion orientation to set camera to\n \"\"\"\n if pos is not None:\n self.env.sim.data.set_mocap_pos(self.mover_body_name, pos)\n if quat is not None:\n self.env.sim.data.set_mocap_quat(self.mover_body_name, T.convert_quat(quat, to=\"wxyz\"))\n\n # Make sure changes propagate in sim\n self.env.sim.forward()\n\n def get_camera_pose(self):\n \"\"\"\n Grab the current camera pose, which optionally includes position and / or quaternion\n\n Returns:\n 2-tuple:\n - 3-array: (x,y,z) camera global cartesian pos\n - 4-array: (x,y,z,w) camera global quaternion orientation\n \"\"\"\n # Grab values from sim\n pos = self.env.sim.data.get_mocap_pos(self.mover_body_name)\n quat = T.convert_quat(self.env.sim.data.get_mocap_quat(self.mover_body_name), to=\"xyzw\")\n\n return pos, quat\n\n def modify_xml_for_camera_movement(self, xml, camera_name):\n \"\"\"\n Cameras in mujoco are 'fixed', so they can't be moved by default.\n Although it's possible to hack position movement, rotation movement\n does not work. An alternative is to attach a camera to a mocap body,\n and move the mocap body.\n\n This function modifies the camera with name @camera_name in the xml\n by attaching it to a mocap body that can move around freely. In this\n way, we can move the camera by moving the mocap body.\n\n See http://www.mujoco.org/forum/index.php?threads/move-camera.2201/ for\n further details.\n\n Args:\n xml (str): Mujoco sim XML file as a string\n camera_name (str): Name of camera to tune\n \"\"\"\n tree = ET.fromstring(xml)\n wb = tree.find(\"worldbody\")\n\n # find the correct camera\n camera_elem = None\n cameras = wb.findall(\"camera\")\n for camera in cameras:\n if camera.get(\"name\") == camera_name:\n camera_elem = camera\n break\n assert camera_elem is not None\n\n # add mocap body\n mocap = ET.SubElement(wb, \"body\")\n mocap.set(\"name\", self.mover_body_name)\n mocap.set(\"mocap\", \"true\")\n mocap.set(\"pos\", camera.get(\"pos\"))\n mocap.set(\"quat\", camera.get(\"quat\"))\n new_camera = ET.SubElement(mocap, \"camera\")\n new_camera.set(\"mode\", \"fixed\")\n new_camera.set(\"name\", camera.get(\"name\"))\n new_camera.set(\"pos\", \"0 0 0\")\n\n # remove old camera element\n wb.remove(camera_elem)\n\n return ET.tostring(tree, encoding=\"utf8\").decode(\"utf8\")\n\n def rotate_camera(self, point, axis, angle):\n \"\"\"\n Rotate the camera view about a direction (in the camera frame).\n\n Args:\n point (None or 3-array): (x,y,z) cartesian coordinates about which to rotate camera in camera frame. If None,\n assumes the point is the current location of the camera\n axis (3-array): (ax,ay,az) axis about which to rotate camera in camera frame\n angle (float): how much to rotate about that direction\n\n Returns:\n 2-tuple:\n pos: (x,y,z) updated camera position\n quat: (x,y,z,w) updated camera quaternion orientation\n \"\"\"\n # current camera rotation + pos\n camera_pos = np.array(self.env.sim.data.get_mocap_pos(self.mover_body_name))\n camera_rot = T.quat2mat(T.convert_quat(self.env.sim.data.get_mocap_quat(self.mover_body_name), to=\"xyzw\"))\n\n # rotate by angle and direction to get new camera rotation\n rad = np.pi * angle / 180.0\n R = T.rotation_matrix(rad, axis, point=point)\n camera_pose = np.zeros((4, 4))\n camera_pose[:3, :3] = camera_rot\n camera_pose[:3, 3] = camera_pos\n camera_pose = camera_pose @ R\n\n # Update camera pose\n pos, quat = camera_pose[:3, 3], T.mat2quat(camera_pose[:3, :3])\n self.set_camera_pose(pos=pos, quat=quat)\n\n return pos, quat\n\n def move_camera(self, direction, scale):\n \"\"\"\n Move the camera view along a direction (in the camera frame).\n\n Args:\n direction (3-array): direction vector for where to move camera in camera frame\n scale (float): how much to move along that direction\n \"\"\"\n # current camera rotation + pos\n camera_pos = np.array(self.env.sim.data.get_mocap_pos(self.mover_body_name))\n camera_quat = self.env.sim.data.get_mocap_quat(self.mover_body_name)\n camera_rot = T.quat2mat(T.convert_quat(camera_quat, to=\"xyzw\"))\n\n # move along camera frame axis and set new position\n camera_pos += scale * camera_rot.dot(direction)\n self.set_camera_pose(pos=camera_pos)\n\n return camera_pos, camera_quat\n\n\nclass DemoPlaybackCameraMover(CameraMover):\n \"\"\"\n A class for playing back demonstrations and recording the resulting frames with the flexibility of a mobile camera\n that can be set manually or panned automatically frame-by-frame\n\n Note: domain randomization is also supported for playback!\n\n Args:\n demo (str): absolute fpath to .hdf5 demo\n env_config (None or dict): (optional) values to override inferred environment information from demonstration.\n (e.g.: camera h / w, depths, segmentations, etc...)\n Any value not specified will be inferred from the extracted demonstration metadata\n Note that there are some specific arguments that MUST be set a certain way, if any of these values\n are specified with @env_config, an error will be raised\n replay_from_actions (bool): If True, will replay demonstration's actions. Otherwise, replays will be hardcoded\n from the demonstration states\n visualize_sites (bool): If True, will visualize sites during playback. Note that this CANNOT be paired\n simultaneously with camera segmentations\n camera (str): Which camera to mobilize during playback, e.g.: frontview, agentview, etc.\n init_camera_pos (None or 3-array): If specified, should be the (x,y,z) global cartesian pos to\n initialize camera to\n init_camera_quat (None or 4-array): If specified, should be the (x,y,z,w) global quaternion orientation to\n initialize camera to\n use_dr (bool): If True, will use domain randomization during playback\n dr_args (None or dict): If specified, will set the domain randomization wrapper arguments if using dr\n \"\"\"\n\n def __init__(\n self,\n demo,\n env_config=None,\n replay_from_actions=False,\n visualize_sites=False,\n camera=\"frontview\",\n init_camera_pos=None,\n init_camera_quat=None,\n use_dr=False,\n dr_args=None,\n ):\n # Store relevant values and initialize other values\n self.camera_id = None\n self.replay_from_actions = replay_from_actions\n self.states = None\n self.actions = None\n self.step = None\n self.n_steps = None\n self.current_ep = None\n self.started = False\n\n # Load the demo\n self.f = h5py.File(demo, \"r\")\n\n # Extract relevant info\n env_info = json.loads(self.f[\"data\"].attrs[\"env_info\"])\n\n # Construct default env arguments\n default_args = {\n \"has_renderer\": False,\n \"has_offscreen_renderer\": True,\n \"ignore_done\": True,\n \"use_camera_obs\": True,\n \"reward_shaping\": True,\n \"hard_reset\": False,\n \"camera_names\": camera,\n }\n\n # If custom env_config is specified, make sure that there's no overlap with default args and merge with config\n if env_config is not None:\n for k in env_config.keys():\n assert k not in default_args, f\"Key {k} cannot be specified in env_config!\"\n env_info.update(env_config)\n\n # Merge in default args\n env_info.update(default_args)\n\n # Create env\n env = robosuite.make(**env_info)\n\n # Optionally wrap with visualization wrapper\n if visualize_sites:\n env = VisualizationWrapper(env=self.env)\n\n # Optionally use domain randomization if specified\n self.use_dr = use_dr\n if self.use_dr:\n default_dr_args = {\n \"seed\": 1,\n \"randomize_camera\": False,\n \"randomize_every_n_steps\": 10,\n }\n default_dr_args.update(dr_args)\n env = DomainRandomizationWrapper(\n env=self.env,\n **default_dr_args,\n )\n\n # list of all demonstrations episodes\n self.demos = list(self.f[\"data\"].keys())\n\n # Run super init\n super().__init__(\n env=env,\n camera=camera,\n init_camera_pos=init_camera_pos,\n init_camera_quat=init_camera_quat,\n )\n\n # Load episode 0 by default\n self.load_episode_xml(demo_num=0)\n\n def load_episode_xml(self, demo_num):\n \"\"\"\n Loads demo episode with specified @demo_num into the simulator.\n\n Args:\n demo_num (int): Demonstration number to load\n \"\"\"\n # Grab raw xml file\n ep = self.demos[demo_num]\n model_xml = self.f[f\"data/{ep}\"].attrs[\"model_file\"]\n\n # Reset environment\n self.env.reset()\n xml = postprocess_model_xml(model_xml)\n xml = self.modify_xml_for_camera_movement(xml, camera_name=self.camera)\n self.env.reset_from_xml_string(xml)\n self.env.sim.reset()\n\n # Update camera info\n self.camera_id = self.env.sim.model.camera_name2id(self.camera)\n\n # Load states and actions\n self.states = self.f[f\"data/{ep}/states\"].value\n self.actions = np.array(self.f[f\"data/{ep}/actions\"].value)\n\n # Set initial state\n self.env.sim.set_state_from_flattened(self.states[0])\n\n # Reset step count and set current episode number\n self.step = 0\n self.n_steps = len(self.actions)\n self.current_ep = demo_num\n\n # Notify user of loaded episode\n print(f\"Loaded episode {demo_num}.\")\n\n def grab_next_frame(self):\n \"\"\"\n Grabs the next frame in the demo sequence by stepping the simulation and returning the resulting value(s)\n\n Returns:\n dict: Keyword-mapped np.arrays from the demonstration sequence, corresponding to all image modalities used\n in the playback environment (e.g.: \"image\", \"depth\", \"segmentation_instance\")\n \"\"\"\n # Make sure the episode isn't completed yet, if so, we load the next episode\n if self.step == self.n_steps:\n self.load_episode_xml(demo_num=self.current_ep + 1)\n\n # Step the environment and grab obs\n if self.replay_from_actions:\n obs, _, _, _ = self.env.step(self.actions[self.step])\n else: # replay from states\n self.env.sim.set_state_from_flattened(self.states[self.step + 1])\n if self.use_dr:\n self.env.step_randomization()\n self.env.sim.forward()\n obs = self.env._get_observation()\n\n # Increment the step counter\n self.step += 1\n\n # Return all relevant frames\n return {k.split(f\"{self.camera}_\")[-1]: obs[k] for k in obs if self.camera in k}\n\n def grab_episode_frames(self, demo_num, pan_point=(0, 0, 0.8), pan_axis=(0, 0, 1), pan_rate=0.01):\n \"\"\"\n Playback entire episode @demo_num, while optionally rotating the camera about point @pan_point and\n axis @pan_axis if @pan_rate > 0\n\n Args:\n demo_num (int): Demonstration episode number to load for playback\n pan_point (3-array): (x,y,z) cartesian coordinates about which to rotate camera in camera frame\n pan_direction (3-array): (ax,ay,az) axis about which to rotate camera in camera frame\n pan_rate (float): how quickly to pan camera if not 0\n\n Returns:\n dict: Keyword-mapped stacked np.arrays from the demonstration sequence, corresponding to all image\n modalities used in the playback environment (e.g.: \"image\", \"depth\", \"segmentation_instance\")\n\n \"\"\"\n # First, load env\n self.load_episode_xml(demo_num=demo_num)\n\n # Initialize dict to return\n obs = self.env._get_observation()\n frames_dict = {k.split(f\"{self.camera}_\")[-1]: [] for k in obs if self.camera in k}\n\n # Continue to loop playback steps while there are still frames left in the episode\n while self.step < self.n_steps:\n # Take playback step and add frames\n for k, frame in self.grab_next_frame().items():\n frames_dict[k].append(frame)\n\n # Update camera pose\n self.rotate_camera(point=pan_point, axis=pan_axis, angle=pan_rate)\n\n # Stack all frames and return\n return {k: np.stack(frames) for k, frames in frames_dict.items()}\n" ]
[ [ "numpy.concatenate", "numpy.array", "numpy.ones_like", "numpy.asarray", "numpy.matmul", "numpy.zeros", "numpy.ones", "numpy.tan", "numpy.eye", "numpy.stack", "numpy.clip", "numpy.all", "numpy.floor" ] ]
stevewongv/DSC-PyTorch
[ "bf2ae87d9cf763678de5d186c3f0184687516b8d" ]
[ "DSC.py" ]
[ "import torch\nfrom torch import nn\nimport torch.nn.functional as F\nfrom collections import OrderedDict\n\nfrom irnn import irnn\nfrom backbone.resnext.resnext101_regular import ResNeXt101\n\ndef conv1x1(in_channels, out_channels, stride = 1):\n return nn.Conv2d(in_channels,out_channels,kernel_size = 1,\n stride =stride, padding=0,bias=False)\n\ndef conv3x3(in_channels, out_channels, stride = 1):\n return nn.Conv2d(in_channels,out_channels,kernel_size = 3,\n stride =stride, padding=1,bias=False)\n\nclass Spacial_IRNN(nn.Module):\n def __init__(self,in_channels,alpha=1.0):\n super(Spacial_IRNN,self).__init__()\n self.left_weight = nn.Conv2d(in_channels,in_channels,kernel_size=1,stride=1,groups=in_channels,padding=0)\n self.right_weight = nn.Conv2d(in_channels,in_channels,kernel_size=1,stride=1,groups=in_channels,padding=0)\n self.up_weight = nn.Conv2d(in_channels,in_channels,kernel_size=1,stride=1,groups=in_channels,padding=0)\n self.down_weight = nn.Conv2d(in_channels,in_channels,kernel_size=1,stride=1,groups=in_channels,padding=0)\n self.left_weight.weight = nn.Parameter(torch.tensor([[[[alpha]]]]*in_channels))\n self.right_weight.weight = nn.Parameter(torch.tensor([[[[alpha]]]]*in_channels))\n self.up_weight.weight = nn.Parameter(torch.tensor([[[[alpha]]]]*in_channels))\n self.down_weight.weight = nn.Parameter(torch.tensor([[[[alpha]]]]*in_channels))\n\n def forward(self,input):\n return irnn()(input,self.up_weight.weight,self.right_weight.weight,self.down_weight.weight,self.left_weight.weight, self.up_weight.bias,self.right_weight.bias,self.down_weight.bias,self.left_weight.bias)\n\nclass Attention(nn.Module):\n def __init__(self,in_channels):\n super(Attention,self).__init__()\n self.out_channels = int(in_channels/2)\n self.conv1 = nn.Conv2d(in_channels,self.out_channels,kernel_size=3,padding=1,stride=1)\n self.relu1 = nn.ReLU()\n self.conv2 = nn.Conv2d(self.out_channels,self.out_channels,kernel_size=3,padding=1,stride=1)\n self.relu2 = nn.ReLU()\n self.conv3 = nn.Conv2d(self.out_channels,4,kernel_size=1,padding=0,stride=1)\n self.sigmod = nn.Sigmoid()\n \n def forward(self,x):\n out = self.conv1(x)\n out = self.relu1(out)\n out = self.conv2(out)\n out = self.relu2(out)\n out = self.conv3(out)\n out = self.sigmod(out)\n return out\n\nclass DSC_Module(nn.Module):\n def __init__(self,in_channels,out_channels,attention=1,alpha=1.0):\n super(DSC_Module,self).__init__()\n self.out_channels = out_channels\n self.irnn1 = Spacial_IRNN(self.out_channels,alpha)\n self.irnn2 = Spacial_IRNN(self.out_channels,alpha)\n self.conv_in = conv1x1(in_channels,in_channels)\n self.conv2 = conv1x1(in_channels*4,in_channels)\n self.conv3 = conv1x1(in_channels*4,in_channels)\n self.relu2 = nn.ReLU(True)\n self.attention = attention\n if self.attention:\n self.attention_layer = Attention(in_channels)\n \n \n \n def forward(self,x):\n if self.attention:\n weight = self.attention_layer(x)\n out = self.conv_in(x)\n top_up,top_right,top_down,top_left = self.irnn1(out)\n \n # direction attention\n if self.attention:\n top_up.mul(weight[:,0:1,:,:])\n top_right.mul(weight[:,1:2,:,:])\n top_down.mul(weight[:,2:3,:,:])\n top_left.mul(weight[:,3:4,:,:])\n out = torch.cat([top_up,top_right,top_down,top_left],dim=1)\n out = self.conv2(out)\n top_up,top_right,top_down,top_left = self.irnn2(out)\n \n # direction attention\n if self.attention:\n top_up.mul(weight[:,0:1,:,:])\n top_right.mul(weight[:,1:2,:,:])\n top_down.mul(weight[:,2:3,:,:])\n top_left.mul(weight[:,3:4,:,:])\n \n out = torch.cat([top_up,top_right,top_down,top_left],dim=1)\n out = self.conv3(out)\n out = self.relu2(out)\n \n return out\n\nclass LayerConv(nn.Module):\n def __init__(self, in_planes, out_planes, kernel_size, stride, padding, relu):\n super(LayerConv, self).__init__()\n self.conv = nn.Conv2d(in_channels=in_planes, out_channels=out_planes, kernel_size=kernel_size,\n stride=stride, padding=padding)\n self.relu = nn.ReLU() if relu else None\n\n def forward(self, x):\n x = self.conv(x)\n if self.relu is not None:\n x = self.relu(x)\n\n return x\n\n\n\n\nclass Predict(nn.Module):\n def __init__(self, in_planes=32, out_planes=1, kernel_size=1):\n super(Predict, self).__init__()\n self.conv = nn.Conv2d(in_planes, out_planes, kernel_size)\n\n def forward(self, x):\n y = self.conv(x)\n\n return y\n\nclass DSC(nn.Module):\n def __init__(self):\n super(DSC,self).__init__()\n\n resnext = ResNeXt101()\n self.layer0 = resnext.layer0\n self.layer1 = resnext.layer1\n self.layer2 = resnext.layer2\n self.layer3 = resnext.layer3\n self.layer4 = resnext.layer4\n\n\n self.layer4_conv1 = LayerConv(2048, 512, 7, 1, 3, True)\n self.layer4_conv2 = LayerConv(512, 512, 7, 1, 3, True)\n self.layer4_dsc = DSC_Module(512, 512)\n self.layer4_conv3 = LayerConv(1024, 32, 1, 1, 0, False)\n\n self.layer3_conv1 = LayerConv(1024, 256, 5, 1, 2, True)\n self.layer3_conv2 = LayerConv(256, 256, 5, 1, 2, True)\n self.layer3_dsc = DSC_Module(256, 256)\n self.layer3_conv3 = LayerConv(512, 32, 1, 1, 0, False)\n\n self.layer2_conv1 = LayerConv(512, 128, 5, 1, 2, True)\n self.layer2_conv2 = LayerConv(128, 128, 5, 1, 2, True)\n self.layer2_dsc = DSC_Module(128, 128)\n self.layer2_conv3 = LayerConv(256, 32, 1, 1, 0, False)\n\n self.layer1_conv1 = LayerConv(256, 64, 3, 1, 1, True)\n self.layer1_conv2 = LayerConv(64, 64, 3, 1, 1, True)\n self.layer1_dsc = DSC_Module(64, 64,alpha=0.8)\n self.layer1_conv3 = LayerConv(128, 32, 1, 1, 0, False)\n\n self.layer0_conv1 = LayerConv(64, 64, 3, 1, 1, True)\n self.layer0_conv2 = LayerConv(64, 64, 3, 1, 1, True)\n self.layer0_dsc = DSC_Module(64, 64,alpha=0.8)\n self.layer0_conv3 = LayerConv(128, 32, 1, 1, 0, False)\n\n self.relu = nn.ReLU()\n\n self.global_conv = LayerConv(160, 32, 1, 1, 0, True)\n\n self.layer4_predict = Predict(32, 1, 1)\n self.layer3_predict_ori = Predict(32, 1, 1)\n self.layer3_predict = Predict(2, 1, 1)\n self.layer2_predict_ori = Predict(32, 1, 1)\n self.layer2_predict = Predict(3, 1, 1)\n self.layer1_predict_ori = Predict(32, 1, 1)\n self.layer1_predict = Predict(4, 1, 1)\n self.layer0_predict_ori = Predict(32, 1, 1)\n self.layer0_predict = Predict(5, 1, 1)\n self.global_predict = Predict(32, 1, 1)\n self.fusion_predict = Predict(6, 1, 1)\n\n\n def forward(self, x):\n layer0 = self.layer0(x)\n layer1 = self.layer1(layer0)\n layer2 = self.layer2(layer1)\n layer3 = self.layer3(layer2)\n layer4 = self.layer4(layer3)\n\n layer4_conv1 = self.layer4_conv1(layer4)\n layer4_conv2 = self.layer4_conv2(layer4_conv1)\n layer4_dsc = self.layer4_dsc(layer4_conv2)\n layer4_context = torch.cat((layer4_conv2, layer4_dsc), 1)\n layer4_conv3 = self.layer4_conv3(layer4_context)\n layer4_up = F.upsample(layer4_conv3, size=x.size()[2:], mode='bilinear', align_corners=True)\n layer4_up = self.relu(layer4_up)\n\n layer3_conv1 = self.layer3_conv1(layer3)\n layer3_conv2 = self.layer3_conv2(layer3_conv1)\n layer3_dsc = self.layer3_dsc(layer3_conv2)\n layer3_context = torch.cat((layer3_conv2, layer3_dsc), 1)\n layer3_conv3 = self.layer3_conv3(layer3_context)\n layer3_up = F.upsample(layer3_conv3, size=x.size()[2:], mode='bilinear', align_corners=True)\n layer3_up = self.relu(layer3_up)\n\n layer2_conv1 = self.layer2_conv1(layer2)\n layer2_conv2 = self.layer2_conv2(layer2_conv1)\n layer2_dsc = self.layer2_dsc(layer2_conv2)\n layer2_context = torch.cat((layer2_conv2, layer2_dsc), 1)\n layer2_conv3 = self.layer2_conv3(layer2_context)\n layer2_up = F.upsample(layer2_conv3, size=x.size()[2:], mode='bilinear', align_corners=True)\n layer2_up = self.relu(layer2_up)\n\n layer1_conv1 = self.layer1_conv1(layer1)\n layer1_conv2 = self.layer1_conv2(layer1_conv1)\n layer1_dsc = self.layer1_dsc(layer1_conv2)\n layer1_context = torch.cat((layer1_conv2, layer1_dsc), 1)\n layer1_conv3 = self.layer1_conv3(layer1_context)\n layer1_up = F.upsample(layer1_conv3, size=x.size()[2:], mode='bilinear', align_corners=True)\n layer1_up = self.relu(layer1_up)\n\n layer0_conv1 = self.layer0_conv1(layer0)\n layer0_conv2 = self.layer0_conv2(layer0_conv1)\n layer0_dsc = self.layer0_dsc(layer0_conv2)\n layer0_context = torch.cat((layer0_conv2, layer0_dsc), 1)\n layer0_conv3 = self.layer0_conv3(layer0_context)\n layer0_up = F.upsample(layer0_conv3, size=x.size()[2:], mode='bilinear', align_corners=True)\n layer0_up = self.relu(layer0_up)\n\n global_concat = torch.cat((layer0_up, layer1_up, layer2_up, layer3_up, layer4_up), 1)\n global_conv = self.global_conv(global_concat)\n\n layer4_predict = self.layer4_predict(layer4_up)\n\n layer3_predict_ori = self.layer3_predict_ori(layer3_up)\n layer3_concat = torch.cat((layer3_predict_ori, layer4_predict), 1)\n layer3_predict = self.layer3_predict(layer3_concat)\n\n layer2_predict_ori = self.layer2_predict_ori(layer2_up)\n layer2_concat = torch.cat((layer2_predict_ori, layer3_predict_ori, layer4_predict), 1)\n layer2_predict = self.layer2_predict(layer2_concat)\n\n layer1_predict_ori = self.layer1_predict_ori(layer1_up)\n layer1_concat = torch.cat((layer1_predict_ori, layer2_predict_ori, layer3_predict_ori, layer4_predict), 1)\n layer1_predict = self.layer1_predict(layer1_concat)\n\n layer0_predict_ori = self.layer0_predict_ori(layer0_up)\n layer0_concat = torch.cat((layer0_predict_ori, layer1_predict_ori, layer2_predict_ori,\n layer3_predict_ori, layer4_predict), 1)\n layer0_predict = self.layer0_predict(layer0_concat)\n\n global_predict = self.global_predict(global_conv)\n\n # fusion\n fusion_concat = torch.cat((layer0_predict, layer1_predict, layer2_predict, layer3_predict,\n layer4_predict, global_predict), 1)\n fusion_predict = self.fusion_predict(fusion_concat)\n\n\n return layer4_predict, layer3_predict, layer2_predict, layer1_predict, layer0_predict, \\\n global_predict, fusion_predict\n \n" ]
[ [ "torch.cat", "torch.nn.Sigmoid", "torch.nn.ReLU", "torch.nn.Conv2d", "torch.tensor" ] ]
pk1994/mlflow
[ "495946edb5d3a2837304a2318de3a70c47555112" ]
[ "mlflow/tracking/fluent.py" ]
[ "\"\"\"\nInternal module implementing the fluent API, allowing management of an active\nMLflow run. This module is exposed to users at the top-level :py:mod:`mlflow` module.\n\"\"\"\n\nfrom __future__ import print_function\n\nimport os\n\nimport atexit\nimport time\nimport logging\nimport numpy as np\nimport pandas as pd\n\nfrom mlflow.entities import Run, RunStatus, Param, RunTag, Metric, ViewType\nfrom mlflow.entities.lifecycle_stage import LifecycleStage\nfrom mlflow.exceptions import MlflowException\nfrom mlflow.tracking.client import MlflowClient\nfrom mlflow.tracking import artifact_utils\nfrom mlflow.tracking.context import registry as context_registry\nfrom mlflow.utils import env\nfrom mlflow.utils.databricks_utils import is_in_databricks_notebook, get_notebook_id\nfrom mlflow.utils.mlflow_tags import MLFLOW_PARENT_RUN_ID, MLFLOW_RUN_NAME\nfrom mlflow.utils.validation import _validate_run_id\n\n_EXPERIMENT_ID_ENV_VAR = \"MLFLOW_EXPERIMENT_ID\"\n_EXPERIMENT_NAME_ENV_VAR = \"MLFLOW_EXPERIMENT_NAME\"\n_RUN_ID_ENV_VAR = \"MLFLOW_RUN_ID\"\n_active_run_stack = []\n_active_experiment_id = None\n\nSEARCH_MAX_RESULTS_PANDAS = 100000\nNUM_RUNS_PER_PAGE_PANDAS = 10000\n\n_logger = logging.getLogger(__name__)\n\n\ndef set_experiment(experiment_name):\n \"\"\"\n Set given experiment as active experiment. If experiment does not exist, create an experiment\n with provided name.\n\n :param experiment_name: Name of experiment to be activated.\n \"\"\"\n client = MlflowClient()\n experiment = client.get_experiment_by_name(experiment_name)\n exp_id = experiment.experiment_id if experiment else None\n if exp_id is None: # id can be 0\n print(\"INFO: '{}' does not exist. Creating a new experiment\".format(experiment_name))\n exp_id = client.create_experiment(experiment_name)\n elif experiment.lifecycle_stage == LifecycleStage.DELETED:\n raise MlflowException(\n \"Cannot set a deleted experiment '%s' as the active experiment.\"\n \" You can restore the experiment, or permanently delete the \"\n \" experiment to create a new one.\" % experiment.name)\n global _active_experiment_id\n _active_experiment_id = exp_id\n\n\nclass ActiveRun(Run): # pylint: disable=W0223\n \"\"\"Wrapper around :py:class:`mlflow.entities.Run` to enable using Python ``with`` syntax.\"\"\"\n\n def __init__(self, run):\n Run.__init__(self, run.info, run.data)\n\n def __enter__(self):\n return self\n\n def __exit__(self, exc_type, exc_val, exc_tb):\n status = RunStatus.FINISHED if exc_type is None else RunStatus.FAILED\n end_run(RunStatus.to_string(status))\n return exc_type is None\n\n\ndef start_run(run_id=None, experiment_id=None, run_name=None, nested=False):\n \"\"\"\n Start a new MLflow run, setting it as the active run under which metrics and parameters\n will be logged. The return value can be used as a context manager within a ``with`` block;\n otherwise, you must call ``end_run()`` to terminate the current run.\n\n If you pass a ``run_id`` or the ``MLFLOW_RUN_ID`` environment variable is set,\n ``start_run`` attempts to resume a run with the specified run ID and\n other parameters are ignored. ``run_id`` takes precedence over ``MLFLOW_RUN_ID``.\n\n MLflow sets a variety of default tags on the run, as defined in\n :ref:`MLflow system tags <system_tags>`.\n\n :param run_id: If specified, get the run with the specified UUID and log parameters\n and metrics under that run. The run's end time is unset and its status\n is set to running, but the run's other attributes (``source_version``,\n ``source_type``, etc.) are not changed.\n :param experiment_id: ID of the experiment under which to create the current run (applicable\n only when ``run_id`` is not specified). If ``experiment_id`` argument\n is unspecified, will look for valid experiment in the following order:\n activated using ``set_experiment``, ``MLFLOW_EXPERIMENT_NAME``\n environment variable, ``MLFLOW_EXPERIMENT_ID`` environment variable,\n or the default experiment as defined by the tracking server.\n :param run_name: Name of new run (stored as a ``mlflow.runName`` tag).\n Used only when ``run_id`` is unspecified.\n :param nested: Controls whether run is nested in parent run. ``True`` creates a nest run.\n :return: :py:class:`mlflow.ActiveRun` object that acts as a context manager wrapping\n the run's state.\n \"\"\"\n global _active_run_stack\n # back compat for int experiment_id\n experiment_id = str(experiment_id) if isinstance(experiment_id, int) else experiment_id\n if len(_active_run_stack) > 0 and not nested:\n raise Exception((\"Run with UUID {} is already active. To start a nested \" +\n \"run, call start_run with nested=True\").format(\n _active_run_stack[0].info.run_id))\n if run_id:\n existing_run_id = run_id\n elif _RUN_ID_ENV_VAR in os.environ:\n existing_run_id = os.environ[_RUN_ID_ENV_VAR]\n del os.environ[_RUN_ID_ENV_VAR]\n else:\n existing_run_id = None\n if existing_run_id:\n _validate_run_id(existing_run_id)\n active_run_obj = MlflowClient().get_run(existing_run_id)\n # Check to see if experiment_id from environment matches experiment_id from set_experiment()\n if (_active_experiment_id is not None and\n _active_experiment_id != active_run_obj.info.experiment_id):\n raise MlflowException(\"Cannot start run with ID {} because active run ID \"\n \"does not match environment run ID. Make sure --experiment-name \"\n \"or --experiment-id matches experiment set with \"\n \"set_experiment(), or just use command-line \"\n \"arguments\".format(existing_run_id))\n # Check to see if current run isn't deleted\n if active_run_obj.info.lifecycle_stage == LifecycleStage.DELETED:\n raise MlflowException(\"Cannot start run with ID {} because it is in the \"\n \"deleted state.\".format(existing_run_id))\n else:\n if len(_active_run_stack) > 0:\n parent_run_id = _active_run_stack[-1].info.run_id\n else:\n parent_run_id = None\n\n exp_id_for_run = experiment_id if experiment_id is not None else _get_experiment_id()\n\n user_specified_tags = {}\n if parent_run_id is not None:\n user_specified_tags[MLFLOW_PARENT_RUN_ID] = parent_run_id\n if run_name is not None:\n user_specified_tags[MLFLOW_RUN_NAME] = run_name\n\n tags = context_registry.resolve_tags(user_specified_tags)\n\n active_run_obj = MlflowClient().create_run(\n experiment_id=exp_id_for_run,\n tags=tags\n )\n\n _active_run_stack.append(ActiveRun(active_run_obj))\n return _active_run_stack[-1]\n\n\ndef end_run(status=RunStatus.to_string(RunStatus.FINISHED)):\n \"\"\"End an active MLflow run (if there is one).\"\"\"\n global _active_run_stack\n if len(_active_run_stack) > 0:\n MlflowClient().set_terminated(_active_run_stack[-1].info.run_id, status)\n # Clear out the global existing run environment variable as well.\n env.unset_variable(_RUN_ID_ENV_VAR)\n _active_run_stack.pop()\n\n\natexit.register(end_run)\n\n\ndef active_run():\n \"\"\"Get the currently active ``Run``, or None if no such run exists.\"\"\"\n return _active_run_stack[-1] if len(_active_run_stack) > 0 else None\n\n\ndef log_param(key, value):\n \"\"\"\n Log a parameter under the current run, creating a run if necessary.\n\n :param key: Parameter name (string)\n :param value: Parameter value (string, but will be string-ified if not)\n \"\"\"\n run_id = _get_or_start_run().info.run_id\n MlflowClient().log_param(run_id, key, value)\n\n\ndef set_tag(key, value):\n \"\"\"\n Set a tag under the current run, creating a run if necessary.\n\n :param key: Tag name (string)\n :param value: Tag value (string, but will be string-ified if not)\n \"\"\"\n run_id = _get_or_start_run().info.run_id\n MlflowClient().set_tag(run_id, key, value)\n\n\ndef delete_tag(key):\n \"\"\"\n Delete a tag from a run. This is irreversible.\n\n :param key: Name of the tag\n \"\"\"\n run_id = _get_or_start_run().info.run_id\n MlflowClient().delete_tag(run_id, key)\n\n\ndef log_metric(key, value, step=None):\n \"\"\"\n Log a metric under the current run, creating a run if necessary.\n\n :param key: Metric name (string).\n :param value: Metric value (float). Note that some special values such as +/- Infinity may be\n replaced by other values depending on the store. For example, sFor example, the\n SQLAlchemy store replaces +/- Inf with max / min float values.\n :param step: Metric step (int). Defaults to zero if unspecified.\n \"\"\"\n run_id = _get_or_start_run().info.run_id\n MlflowClient().log_metric(run_id, key, value, int(time.time() * 1000), step or 0)\n\n\ndef log_metrics(metrics, step=None):\n \"\"\"\n Log multiple metrics for the current run, starting a run if no runs are active.\n\n :param metrics: Dictionary of metric_name: String -> value: Float. Note that some special values\n such as +/- Infinity may be replaced by other values depending on the store.\n For example, sql based store may replace +/- Inf with max / min float values.\n :param step: A single integer step at which to log the specified\n Metrics. If unspecified, each metric is logged at step zero.\n\n :returns: None\n \"\"\"\n run_id = _get_or_start_run().info.run_id\n timestamp = int(time.time() * 1000)\n metrics_arr = [Metric(key, value, timestamp, step or 0) for key, value in metrics.items()]\n MlflowClient().log_batch(run_id=run_id, metrics=metrics_arr, params=[], tags=[])\n\n\ndef log_params(params):\n \"\"\"\n Log a batch of params for the current run, starting a run if no runs are active.\n\n :param params: Dictionary of param_name: String -> value: (String, but will be string-ified if\n not)\n :returns: None\n \"\"\"\n run_id = _get_or_start_run().info.run_id\n params_arr = [Param(key, str(value)) for key, value in params.items()]\n MlflowClient().log_batch(run_id=run_id, metrics=[], params=params_arr, tags=[])\n\n\ndef set_tags(tags):\n \"\"\"\n Log a batch of tags for the current run, starting a run if no runs are active.\n\n :param tags: Dictionary of tag_name: String -> value: (String, but will be string-ified if\n not)\n :returns: None\n \"\"\"\n run_id = _get_or_start_run().info.run_id\n tags_arr = [RunTag(key, str(value)) for key, value in tags.items()]\n MlflowClient().log_batch(run_id=run_id, metrics=[], params=[], tags=tags_arr)\n\n\ndef log_artifact(local_path, artifact_path=None):\n \"\"\"\n Log a local file or directory as an artifact of the currently active run.\n\n :param local_path: Path to the file to write.\n :param artifact_path: If provided, the directory in ``artifact_uri`` to write to.\n \"\"\"\n run_id = _get_or_start_run().info.run_id\n MlflowClient().log_artifact(run_id, local_path, artifact_path)\n\n\ndef log_artifacts(local_dir, artifact_path=None):\n \"\"\"\n Log all the contents of a local directory as artifacts of the run.\n\n :param local_dir: Path to the directory of files to write.\n :param artifact_path: If provided, the directory in ``artifact_uri`` to write to.\n \"\"\"\n run_id = _get_or_start_run().info.run_id\n MlflowClient().log_artifacts(run_id, local_dir, artifact_path)\n\n\ndef create_experiment(name, artifact_location=None):\n \"\"\"\n Create an experiment.\n\n :param name: The experiment name. Must be unique.\n :param artifact_location: The location to store run artifacts.\n If not provided, the server picks an appropriate default.\n :return: Integer ID of the created experiment.\n \"\"\"\n return MlflowClient().create_experiment(name, artifact_location)\n\n\ndef delete_experiment(experiment_id):\n \"\"\"\n Delete an experiment from the backend store.\n\n :param experiment_id: The experiment ID returned from ``create_experiment``.\n \"\"\"\n MlflowClient().delete_experiment(experiment_id)\n\n\ndef delete_run(run_id):\n \"\"\"\n Deletes a run with the given ID.\n\n :param run_id: Unique identifier for the run to delete.\n \"\"\"\n MlflowClient().delete_run(run_id)\n\n\ndef get_artifact_uri(artifact_path=None):\n \"\"\"\n Get the absolute URI of the specified artifact in the currently active run.\n If `path` is not specified, the artifact root URI of the currently active\n run will be returned; calls to ``log_artifact`` and ``log_artifacts`` write\n artifact(s) to subdirectories of the artifact root URI.\n\n :param artifact_path: The run-relative artifact path for which to obtain an absolute URI.\n For example, \"path/to/artifact\". If unspecified, the artifact root URI\n for the currently active run will be returned.\n :return: An *absolute* URI referring to the specified artifact or the currently adtive run's\n artifact root. For example, if an artifact path is provided and the currently active\n run uses an S3-backed store, this may be a uri of the form\n ``s3://<bucket_name>/path/to/artifact/root/path/to/artifact``. If an artifact path\n is not provided and the currently active run uses an S3-backed store, this may be a\n URI of the form ``s3://<bucket_name>/path/to/artifact/root``.\n \"\"\"\n return artifact_utils.get_artifact_uri(run_id=_get_or_start_run().info.run_id,\n artifact_path=artifact_path)\n\n\ndef search_runs(experiment_ids=None, filter_string=\"\", run_view_type=ViewType.ACTIVE_ONLY,\n max_results=SEARCH_MAX_RESULTS_PANDAS, order_by=None):\n \"\"\"\n Get a pandas DataFrame of runs that fit the search criteria.\n\n :param experiment_ids: List of experiment IDs. None will default to the active experiment.\n :param filter_string: Filter query string, defaults to searching all runs.\n :param run_view_type: one of enum values ``ACTIVE_ONLY``, ``DELETED_ONLY``, or ``ALL`` runs\n defined in :py:class:`mlflow.entities.ViewType`.\n :param max_results: The maximum number of runs to put in the dataframe. Default is 100,000\n to avoid causing out-of-memory issues on the user's machine.\n :param order_by: List of columns to order by (e.g., \"metrics.rmse\"). The ``order_by`` column\n can contain an optional ``DESC`` or ``ASC`` value. The default is ``ASC``.\n The default ordering is to sort by ``start_time DESC``, then ``run_id``.\n\n :return: A pandas.DataFrame of runs, where each metric, parameter, and tag\n are expanded into their own columns named metrics.*, params.*, and tags.*\n respectively. For runs that don't have a particular metric, parameter, or tag, their\n value will be (NumPy) Nan, None, or None respectively.\n \"\"\"\n if not experiment_ids:\n experiment_ids = _get_experiment_id()\n runs = _get_paginated_runs(experiment_ids, filter_string, run_view_type, max_results,\n order_by)\n info = {'run_id': [], 'experiment_id': [],\n 'status': [], 'artifact_uri': [],\n 'start_time': [], 'end_time': []}\n params, metrics, tags = ({}, {}, {})\n PARAM_NULL, METRIC_NULL, TAG_NULL = (None, np.nan, None)\n for i, run in enumerate(runs):\n info['run_id'].append(run.info.run_id)\n info['experiment_id'].append(run.info.experiment_id)\n info['status'].append(run.info.status)\n info['artifact_uri'].append(run.info.artifact_uri)\n info['start_time'].append(pd.to_datetime(run.info.start_time, unit=\"ms\", utc=True))\n info['end_time'].append(pd.to_datetime(run.info.end_time, unit=\"ms\", utc=True))\n\n # Params\n param_keys = set(params.keys())\n for key in param_keys:\n if key in run.data.params:\n params[key].append(run.data.params[key])\n else:\n params[key].append(PARAM_NULL)\n new_params = set(run.data.params.keys()) - param_keys\n for p in new_params:\n params[p] = [PARAM_NULL]*i # Fill in null values for all previous runs\n params[p].append(run.data.params[p])\n\n # Metrics\n metric_keys = set(metrics.keys())\n for key in metric_keys:\n if key in run.data.metrics:\n metrics[key].append(run.data.metrics[key])\n else:\n metrics[key].append(METRIC_NULL)\n new_metrics = set(run.data.metrics.keys()) - metric_keys\n for m in new_metrics:\n metrics[m] = [METRIC_NULL]*i\n metrics[m].append(run.data.metrics[m])\n\n # Tags\n tag_keys = set(tags.keys())\n for key in tag_keys:\n if key in run.data.tags:\n tags[key].append(run.data.tags[key])\n else:\n tags[key].append(TAG_NULL)\n new_tags = set(run.data.tags.keys()) - tag_keys\n for t in new_tags:\n tags[t] = [TAG_NULL]*i\n tags[t].append(run.data.tags[t])\n\n data = {}\n data.update(info)\n for key in metrics:\n data['metrics.' + key] = metrics[key]\n for key in params:\n data['params.' + key] = params[key]\n for key in tags:\n data['tags.' + key] = tags[key]\n return pd.DataFrame(data)\n\n\ndef _get_paginated_runs(experiment_ids, filter_string, run_view_type, max_results,\n order_by):\n all_runs = []\n next_page_token = None\n while(len(all_runs) < max_results):\n runs_to_get = max_results-len(all_runs)\n if runs_to_get < NUM_RUNS_PER_PAGE_PANDAS:\n runs = MlflowClient().search_runs(experiment_ids, filter_string, run_view_type,\n runs_to_get, order_by, next_page_token)\n else:\n runs = MlflowClient().search_runs(experiment_ids, filter_string, run_view_type,\n NUM_RUNS_PER_PAGE_PANDAS, order_by, next_page_token)\n all_runs.extend(runs)\n if hasattr(runs, 'token') and runs.token != '' and runs.token is not None:\n next_page_token = runs.token\n else:\n break\n return all_runs\n\n\ndef _get_or_start_run():\n if len(_active_run_stack) > 0:\n return _active_run_stack[-1]\n return start_run()\n\n\ndef _get_experiment_id_from_env():\n experiment_name = env.get_env(_EXPERIMENT_NAME_ENV_VAR)\n if experiment_name is not None:\n exp = MlflowClient().get_experiment_by_name(experiment_name)\n return exp.experiment_id if exp else None\n return env.get_env(_EXPERIMENT_ID_ENV_VAR)\n\n\ndef _get_experiment_id():\n # TODO: Replace with None for 1.0, leaving for 0.9.1 release backcompat with existing servers\n deprecated_default_exp_id = \"0\"\n\n return (_active_experiment_id or\n _get_experiment_id_from_env() or\n (is_in_databricks_notebook() and get_notebook_id())) or deprecated_default_exp_id\n" ]
[ [ "pandas.to_datetime", "pandas.DataFrame" ] ]
shubhamagarwal92/visdial_conv
[ "237c853ebc72f5d4c072d581e919e1c434a2bf2e" ]
[ "visdialch/vqa_models/mcan/mca.py" ]
[ "# --------------------------------------------------------\n# OpenVQA\n# Written by Yuhao Cui https://github.com/cuiyuhao1996\n# --------------------------------------------------------\n\nfrom visdialch.vqa_models.mcan.fc import FC, MLP\nfrom visdialch.vqa_models.mcan.layer_norm import LayerNorm\n\nimport torch.nn as nn\nimport torch.nn.functional as F\nimport torch, math\n\n\n# ------------------------------\n# ---- Multi-Head Attention ----\n# ------------------------------\n\nclass MHAtt(nn.Module):\n def __init__(self, __C):\n super(MHAtt, self).__init__()\n self.__C = __C\n\n self.linear_v = nn.Linear(__C.HIDDEN_SIZE, __C.HIDDEN_SIZE)\n self.linear_k = nn.Linear(__C.HIDDEN_SIZE, __C.HIDDEN_SIZE)\n self.linear_q = nn.Linear(__C.HIDDEN_SIZE, __C.HIDDEN_SIZE)\n self.linear_merge = nn.Linear(__C.HIDDEN_SIZE, __C.HIDDEN_SIZE)\n\n self.dropout = nn.Dropout(__C.DROPOUT_R)\n\n def forward(self, v, k, q, mask):\n n_batches = q.size(0)\n\n # print(v.size()) # (bs*rounds, proposal/len, emb_size)\n v = self.linear_v(v).view(\n n_batches,\n -1,\n self.__C.MULTI_HEAD,\n int(self.__C.HIDDEN_SIZE/self.__C.MULTI_HEAD)\n ).transpose(1, 2)\n # print(\"Value size: \", v.size()) # (bs*rounds, heads, proposal/len, emb_size)\n\n k = self.linear_k(k).view(\n n_batches,\n -1,\n self.__C.MULTI_HEAD,\n int(self.__C.HIDDEN_SIZE / self.__C.MULTI_HEAD)\n ).transpose(1, 2)\n\n q = self.linear_q(q).view(\n n_batches,\n -1,\n self.__C.MULTI_HEAD,\n int(self.__C.HIDDEN_SIZE / self.__C.MULTI_HEAD)\n ).transpose(1, 2)\n\n atted = self.att(v, k, q, mask)\n\n # print(atted.size()) # torch.Size([40, 8, 20, 64])\n\n atted = atted.transpose(1, 2).contiguous().view(\n n_batches,\n -1,\n self.__C.HIDDEN_SIZE\n )\n\n atted = self.linear_merge(atted)\n\n return atted\n\n def att(self, value, key, query, mask):\n d_k = query.size(-1)\n\n scores = torch.matmul(\n query, key.transpose(-2, -1)\n ) / math.sqrt(d_k)\n\n # print(scores.size()) # (bs*rounds, heads, proposal/len, proposal/len)\n\n if mask is not None:\n scores = scores.masked_fill(mask, -1e9)\n\n att_map = F.softmax(scores, dim=-1)\n att_map = self.dropout(att_map)\n\n return torch.matmul(att_map, value)\n\n\n# ---------------------------\n# ---- Feed Forward Nets ----\n# ---------------------------\n\nclass FFN(nn.Module):\n def __init__(self, __C):\n super(FFN, self).__init__()\n\n self.mlp = MLP(\n in_size=__C.HIDDEN_SIZE,\n mid_size=__C.FF_SIZE,\n out_size=__C.HIDDEN_SIZE,\n dropout_r=__C.DROPOUT_R,\n use_relu=True\n )\n\n def forward(self, x):\n return self.mlp(x)\n\n\n# ------------------------\n# ---- Self Attention ----\n# ------------------------\n\nclass SA(nn.Module):\n def __init__(self, __C):\n super().__init__()\n\n self.mhatt = MHAtt(__C)\n self.ffn = FFN(__C)\n\n self.dropout1 = nn.Dropout(__C.DROPOUT_R)\n self.norm1 = LayerNorm(__C.HIDDEN_SIZE)\n\n self.dropout2 = nn.Dropout(__C.DROPOUT_R)\n self.norm2 = LayerNorm(__C.HIDDEN_SIZE)\n\n def forward(self, y, y_mask):\n y = self.norm1(y + self.dropout1(\n self.mhatt(y, y, y, y_mask)\n ))\n\n y = self.norm2(y + self.dropout2(\n self.ffn(y)\n ))\n\n return y\n\n\n# -------------------------------\n# ---- Self Guided Attention ----\n# -------------------------------\n\nclass SGA(nn.Module):\n def __init__(self, __C):\n super(SGA, self).__init__()\n\n self.mhatt1 = MHAtt(__C)\n self.mhatt2 = MHAtt(__C)\n self.ffn = FFN(__C)\n\n self.dropout1 = nn.Dropout(__C.DROPOUT_R)\n self.norm1 = LayerNorm(__C.HIDDEN_SIZE)\n\n self.dropout2 = nn.Dropout(__C.DROPOUT_R)\n self.norm2 = LayerNorm(__C.HIDDEN_SIZE)\n\n self.dropout3 = nn.Dropout(__C.DROPOUT_R)\n self.norm3 = LayerNorm(__C.HIDDEN_SIZE)\n\n def forward(self, x, y, x_mask, y_mask):\n x = self.norm1(x + self.dropout1(\n self.mhatt1(v=x, k=x, q=x, mask=x_mask)\n ))\n\n x = self.norm2(x + self.dropout2(\n self.mhatt2(v=y, k=y, q=x, mask=y_mask)\n ))\n\n x = self.norm3(x + self.dropout3(\n self.ffn(x)\n ))\n\n return x\n\n\n# ------------------------------------------------\n# ---- MAC Layers Cascaded by Encoder-Decoder ----\n# ------------------------------------------------\n\n\n# SA: This is the api open to net.py\nclass MCA_ED(nn.Module):\n def __init__(self, __C):\n super().__init__()\n\n self.enc_list = nn.ModuleList([SA(__C) for _ in range(__C.LAYER)])\n self.dec_list = nn.ModuleList([SGA(__C) for _ in range(__C.LAYER)])\n\n def forward(self, y, x, y_mask, x_mask):\n \"\"\"\n\n :param y: lang (bs*rounds, seq_len, emb_sie)\n :param x: img (bs*rounds, proposals, emb_sie)\n :param y_mask: (bs*rounds, seq_len)\n :param x_mask: (bs*rounds, proposals)\n :return:\n \"\"\"\n\n # print(\"Text size\", y.size())\n # Get encoder last hidden vector\n for enc in self.enc_list:\n y = enc(y, y_mask)\n\n # print(\"Text size after encoding\", y.size())\n # print(\"Img size\", x.size())\n # print(\"Type of y\", type(y))\n # print(y.size())\n # print(x.size())\n # print(x_mask.size())\n # print(y_mask.size())\n # Input encoder last hidden vector\n # And obtain decoder last hidden vectors\n for dec in self.dec_list:\n x = dec(x, y, x_mask, y_mask)\n\n return y, x\n" ]
[ [ "torch.nn.Linear", "torch.nn.Dropout", "torch.matmul", "torch.nn.functional.softmax" ] ]
jpWang/LiLT
[ "c82d963c2c19df76f97b1ff94ae7da2f83c9dfe1" ]
[ "LiLTfinetune/trainers/xfun_trainer.py" ]
[ "import collections\nimport time\nfrom typing import Any, Dict, List, Optional, Tuple, Union\n\nimport torch\nfrom packaging import version\nfrom torch import nn\nfrom torch.utils.data import DataLoader, Dataset\n\nfrom transformers.utils import logging\nfrom transformers.file_utils import is_sagemaker_mp_enabled\nfrom transformers.trainer_utils import EvalPrediction, PredictionOutput, speed_metrics, ShardedDDPOption\nfrom transformers.trainer_pt_utils import get_parameter_names\nfrom transformers.optimization import Adafactor, AdamW, get_scheduler\n\nfrom .funsd_trainer import FunsdTrainer\n\n\nif version.parse(torch.__version__) >= version.parse(\"1.6\"):\n _is_native_amp_available = True\n from torch.cuda.amp import autocast\n\nlogger = logging.get_logger(__name__)\n\n\nclass XfunSerTrainer(FunsdTrainer):\n pass\n\n\nclass XfunReTrainer(FunsdTrainer):\n def __init__(self, **kwargs):\n super().__init__(**kwargs)\n self.label_names.append(\"relations\")\n\n def prediction_step(\n self,\n model: nn.Module,\n inputs: Dict[str, Union[torch.Tensor, Any]],\n prediction_loss_only: bool,\n ignore_keys: Optional[List[str]] = None,\n ) -> Tuple[Optional[float], Optional[torch.Tensor], Optional[torch.Tensor]]:\n inputs = self._prepare_inputs(inputs)\n\n with torch.no_grad():\n if self.use_amp:\n with autocast():\n outputs = model(**inputs)\n else:\n outputs = model(**inputs)\n labels = tuple(inputs.get(name) for name in self.label_names)\n return outputs, labels\n\n def prediction_loop(\n self,\n dataloader: DataLoader,\n description: str,\n prediction_loss_only: Optional[bool] = None,\n ignore_keys: Optional[List[str]] = None,\n metric_key_prefix: str = \"eval\",\n ) -> PredictionOutput:\n \"\"\"\n Prediction/evaluation loop, shared by :obj:`Trainer.evaluate()` and :obj:`Trainer.predict()`.\n\n Works both with or without labels.\n \"\"\"\n if not isinstance(dataloader.dataset, collections.abc.Sized):\n raise ValueError(\"dataset must implement __len__\")\n prediction_loss_only = (\n prediction_loss_only if prediction_loss_only is not None else self.args.prediction_loss_only\n )\n\n if self.args.deepspeed and not self.args.do_train:\n # no harm, but flagging to the user that deepspeed config is ignored for eval\n # flagging only for when --do_train wasn't passed as only then it's redundant\n logger.info(\"Detected the deepspeed argument but it will not be used for evaluation\")\n\n model = self._wrap_model(self.model, training=False)\n\n # if full fp16 is wanted on eval and this ``evaluation`` or ``predict`` isn't called while\n # ``train`` is running, half it first and then put on device\n if not self.is_in_train and self.args.fp16_full_eval:\n model = model.half().to(self.args.device)\n\n batch_size = dataloader.batch_size\n num_examples = self.num_examples(dataloader)\n logger.info(\"***** Running %s *****\", description)\n logger.info(\" Num examples = %d\", num_examples)\n logger.info(\" Batch size = %d\", batch_size)\n\n model.eval()\n\n self.callback_handler.eval_dataloader = dataloader\n\n re_labels = None\n pred_relations = None\n entities = None\n for step, inputs in enumerate(dataloader):\n outputs, labels = self.prediction_step(model, inputs, prediction_loss_only, ignore_keys=ignore_keys)\n re_labels = labels[1] if re_labels is None else re_labels + labels[1]\n pred_relations = (\n outputs.pred_relations if pred_relations is None else pred_relations + outputs.pred_relations\n )\n entities = outputs.entities if entities is None else entities + outputs.entities\n\n self.control = self.callback_handler.on_prediction_step(self.args, self.state, self.control)\n\n gt_relations = []\n for b in range(len(re_labels)):\n rel_sent = []\n for head, tail in zip(re_labels[b][\"head\"], re_labels[b][\"tail\"]):\n rel = {}\n rel[\"head_id\"] = head\n rel[\"head\"] = (entities[b][\"start\"][rel[\"head_id\"]], entities[b][\"end\"][rel[\"head_id\"]])\n rel[\"head_type\"] = entities[b][\"label\"][rel[\"head_id\"]]\n\n rel[\"tail_id\"] = tail\n rel[\"tail\"] = (entities[b][\"start\"][rel[\"tail_id\"]], entities[b][\"end\"][rel[\"tail_id\"]])\n rel[\"tail_type\"] = entities[b][\"label\"][rel[\"tail_id\"]]\n\n rel[\"type\"] = 1\n\n rel_sent.append(rel)\n\n gt_relations.append(rel_sent)\n\n re_metrics = self.compute_metrics(EvalPrediction(predictions=pred_relations, label_ids=gt_relations))\n\n re_metrics = {\n \"precision\": re_metrics[\"ALL\"][\"p\"],\n \"recall\": re_metrics[\"ALL\"][\"r\"],\n \"f1\": re_metrics[\"ALL\"][\"f1\"],\n }\n re_metrics[f\"{metric_key_prefix}_loss\"] = outputs.loss.mean().item()\n\n metrics = {}\n\n # # Prefix all keys with metric_key_prefix + '_'\n for key in list(re_metrics.keys()):\n if not key.startswith(f\"{metric_key_prefix}_\"):\n metrics[f\"{metric_key_prefix}_{key}\"] = re_metrics.pop(key)\n else:\n metrics[f\"{key}\"] = re_metrics.pop(key)\n\n return metrics\n\n def evaluate(\n self,\n eval_dataset: Optional[Dataset] = None,\n ignore_keys: Optional[List[str]] = None,\n metric_key_prefix: str = \"eval\",\n ) -> Dict[str, float]:\n \"\"\"\n Run evaluation and returns metrics.\n\n The calling script will be responsible for providing a method to compute metrics, as they are task-dependent\n (pass it to the init :obj:`compute_metrics` argument).\n\n You can also subclass and override this method to inject custom behavior.\n\n Args:\n eval_dataset (:obj:`Dataset`, `optional`):\n Pass a dataset if you wish to override :obj:`self.eval_dataset`. If it is an :obj:`datasets.Dataset`,\n columns not accepted by the ``model.forward()`` method are automatically removed. It must implement the\n :obj:`__len__` method.\n ignore_keys (:obj:`Lst[str]`, `optional`):\n A list of keys in the output of your model (if it is a dictionary) that should be ignored when\n gathering predictions.\n metric_key_prefix (:obj:`str`, `optional`, defaults to :obj:`\"eval\"`):\n An optional prefix to be used as the metrics key prefix. For example the metrics \"bleu\" will be named\n \"eval_bleu\" if the prefix is \"eval\" (default)\n\n Returns:\n A dictionary containing the evaluation loss and the potential metrics computed from the predictions. The\n dictionary also contains the epoch number which comes from the training state.\n \"\"\"\n if eval_dataset is not None and not isinstance(eval_dataset, collections.abc.Sized):\n raise ValueError(\"eval_dataset must implement __len__\")\n\n self.args.local_rank = -1\n eval_dataloader = self.get_eval_dataloader(eval_dataset)\n self.args.local_rank = torch.distributed.get_rank()\n\n start_time = time.time()\n\n metrics = self.prediction_loop(\n eval_dataloader,\n description=\"Evaluation\",\n # No point gathering the predictions if there are no metrics, otherwise we defer to\n # self.args.prediction_loss_only\n prediction_loss_only=True if self.compute_metrics is None else None,\n ignore_keys=ignore_keys,\n metric_key_prefix=metric_key_prefix,\n )\n\n n_samples = len(eval_dataset if eval_dataset is not None else self.eval_dataset)\n metrics.update(speed_metrics(metric_key_prefix, start_time, n_samples))\n self.log(metrics)\n self.control = self.callback_handler.on_evaluate(self.args, self.state, self.control, metrics)\n\n return metrics\n\n def create_optimizer(self, speedup_r=4.):\n if self.optimizer is None:\n decay_parameters = get_parameter_names(self.model, [torch.nn.LayerNorm])\n decay_parameters = [name for name in decay_parameters if \"bias\" not in name]\n speedup_parameters = [name for name in get_parameter_names(self.model, []) if 'extractor' in name and 'rel_classifier' not in name]\n optimizer_grouped_parameters = [\n {\n \"params\": [p for n, p in self.model.named_parameters() if n in decay_parameters and n in speedup_parameters],\n \"weight_decay\": self.args.weight_decay,\n \"lr\": self.args.learning_rate *speedup_r,\n },\n {\n \"params\": [p for n, p in self.model.named_parameters() if n not in decay_parameters and n in speedup_parameters],\n \"weight_decay\": 0.0,\n \"lr\": self.args.learning_rate *speedup_r,\n },\n {\n \"params\": [p for n, p in self.model.named_parameters() if n in decay_parameters and n not in speedup_parameters],\n \"weight_decay\": self.args.weight_decay,\n \"lr\": self.args.learning_rate,\n },\n {\n \"params\": [p for n, p in self.model.named_parameters() if n not in decay_parameters and n not in speedup_parameters],\n \"weight_decay\": 0.0,\n \"lr\": self.args.learning_rate,\n },\n ]\n optimizer_cls = Adafactor if self.args.adafactor else AdamW\n if self.args.adafactor:\n optimizer_cls = Adafactor\n optimizer_kwargs = {\"scale_parameter\": False, \"relative_step\": False}\n else:\n optimizer_cls = AdamW\n optimizer_kwargs = {\n \"betas\": (self.args.adam_beta1, self.args.adam_beta2),\n \"eps\": self.args.adam_epsilon,\n }\n\n if self.sharded_ddp == ShardedDDPOption.SIMPLE:\n self.optimizer = OSS(\n params=optimizer_grouped_parameters,\n optim=optimizer_cls,\n **optimizer_kwargs,\n )\n else:\n self.optimizer = optimizer_cls(optimizer_grouped_parameters, **optimizer_kwargs)\n\n if is_sagemaker_mp_enabled():\n import smdistributed.modelparallel.torch as smp\n self.optimizer = smp.DistributedOptimizer(self.optimizer)\n" ]
[ [ "torch.distributed.get_rank", "torch.cuda.amp.autocast", "torch.no_grad" ] ]
180flyer/StanfordQuadruped
[ "1fd0f8a503aeb08c124813a3e698d5e4df7465b3" ]
[ "pupper/HardwareConfig.py" ]
[ "\"\"\"\nPer-robot configuration file that is particular to each individual robot, not just the type of robot.\n\"\"\"\nimport numpy as np\n\n\nMICROS_PER_RAD = 11.333 * 180.0 / np.pi # Must be calibrated\nNEUTRAL_ANGLE_DEGREES = np.array(\n [[-7, -2, 2, -1], [18, 55, 33, 50], [-46, -37, -40, -39]]\n)\n\nPS4_COLOR = {\"red\": 0, \"blue\": 0, \"green\": 255}\nPS4_DEACTIVATED_COLOR = {\"red\": 0, \"blue\": 0, \"green\": 50}" ]
[ [ "numpy.array" ] ]
twuebi/finalfusion-python
[ "1771c77e7e09822137b4889a6ce96d0623664e7a" ]
[ "tests/test_similarity.py" ]
[ "import pytest\nimport numpy\n\nSIMILARITY_ORDER_STUTTGART_10 = [\n \"Karlsruhe\",\n \"Mannheim\",\n \"München\",\n \"Darmstadt\",\n \"Heidelberg\",\n \"Wiesbaden\",\n \"Kassel\",\n \"Düsseldorf\",\n \"Leipzig\",\n \"Berlin\",\n]\n\n\nSIMILARITY_ORDER = [\n \"Potsdam\",\n \"Hamburg\",\n \"Leipzig\",\n \"Dresden\",\n \"München\",\n \"Düsseldorf\",\n \"Bonn\",\n \"Stuttgart\",\n \"Weimar\",\n \"Berlin-Charlottenburg\",\n \"Rostock\",\n \"Karlsruhe\",\n \"Chemnitz\",\n \"Breslau\",\n \"Wiesbaden\",\n \"Hannover\",\n \"Mannheim\",\n \"Kassel\",\n \"Köln\",\n \"Danzig\",\n \"Erfurt\",\n \"Dessau\",\n \"Bremen\",\n \"Charlottenburg\",\n \"Magdeburg\",\n \"Neuruppin\",\n \"Darmstadt\",\n \"Jena\",\n \"Wien\",\n \"Heidelberg\",\n \"Dortmund\",\n \"Stettin\",\n \"Schwerin\",\n \"Neubrandenburg\",\n \"Greifswald\",\n \"Göttingen\",\n \"Braunschweig\",\n \"Berliner\",\n \"Warschau\",\n \"Berlin-Spandau\",\n]\n\n\ndef test_similarity_berlin_40(similarity_fifu):\n for idx, sim in enumerate(similarity_fifu.word_similarity(\"Berlin\", 40)):\n assert SIMILARITY_ORDER[idx] == sim.word\n\n\ndef test_similarity_stuttgart_10(similarity_fifu):\n for idx, sim in enumerate(similarity_fifu.word_similarity(\"Stuttgart\", 10)):\n assert SIMILARITY_ORDER_STUTTGART_10[idx] == sim.word\n\n\ndef test_embedding_similarity_stuttgart_10(similarity_fifu):\n stuttgart = similarity_fifu.embedding(\"Stuttgart\")\n sims = similarity_fifu.embedding_similarity(stuttgart, limit=10)\n assert sims[0].word == \"Stuttgart\"\n\n for idx, sim in enumerate(sims[1:]):\n assert SIMILARITY_ORDER_STUTTGART_10[idx] == sim.word\n\n for idx, sim in enumerate(similarity_fifu.embedding_similarity(stuttgart, skip={\"Stuttgart\"}, limit=10)):\n assert SIMILARITY_ORDER_STUTTGART_10[idx] == sim.word\n\n\ndef test_embedding_similarity_incompatible_shapes(similarity_fifu):\n incompatible_embed = numpy.ones(1, dtype=numpy.float32)\n with pytest.raises(ValueError):\n similarity_fifu.embedding_similarity(incompatible_embed)\n" ]
[ [ "numpy.ones" ] ]
eyal-orbach/Details2Story-XLNetPlanCloze
[ "47fd98ca2d59f3d4113d2de510fe955fff49efb9" ]
[ "transformer_base.py" ]
[ "import logging\nimport os\nimport random\n\nimport numpy as np\nimport pytorch_lightning as pl\nimport torch\n\nfrom transformers import (\n AdamW,\n AutoConfig,\n AutoModel,\n AutoModelForQuestionAnswering,\n AutoModelForSequenceClassification,\n AutoModelWithLMHead,\n AutoTokenizer,\n)\nfrom transformers.modeling_auto import MODEL_MAPPING, ALL_PRETRAINED_MODEL_ARCHIVE_MAP, AutoModelForPreTraining, AutoModelForTokenClassification\nfrom transformers.optimization import get_linear_schedule_with_warmup\n\nlogger = logging.getLogger(__name__)\n\n\nALL_MODELS = tuple(ALL_PRETRAINED_MODEL_ARCHIVE_MAP)\nMODEL_CLASSES = tuple(m.model_type for m in MODEL_MAPPING)\n\nMODEL_MODES = {\n \"base\": AutoModel,\n \"sequence-classification\": AutoModelForSequenceClassification,\n \"question-answering\": AutoModelForQuestionAnswering,\n \"pretraining\": AutoModelForPreTraining,\n \"token-classification\": AutoModelForTokenClassification,\n \"language-modeling\": AutoModelWithLMHead,\n}\n\n\ndef set_seed(args):\n random.seed(args.seed)\n np.random.seed(args.seed)\n torch.manual_seed(args.seed)\n if args.n_gpu > 0:\n torch.cuda.manual_seed_all(args.seed)\n\n\nclass BaseTransformer(pl.LightningModule):\n def __init__(self, hparams, num_labels=None, mode=\"base\"):\n \"Initialize a model.\"\n\n super(BaseTransformer, self).__init__()\n self.hparams = hparams\n self.hparams.model_type = self.hparams.model_type.lower()\n config = AutoConfig.from_pretrained(\n self.hparams.config_name if self.hparams.config_name else self.hparams.model_name_or_path,\n **({\"num_labels\": num_labels} if num_labels is not None else {}),\n cache_dir=self.hparams.cache_dir if self.hparams.cache_dir else None,\n )\n tokenizer = AutoTokenizer.from_pretrained(\n self.hparams.tokenizer_name if self.hparams.tokenizer_name else self.hparams.model_name_or_path,\n do_lower_case=self.hparams.do_lower_case,\n cache_dir=self.hparams.cache_dir if self.hparams.cache_dir else None,\n config=config\n )\n model = MODEL_MODES[mode].from_pretrained(\n self.hparams.model_name_or_path,\n from_tf=bool(\".ckpt\" in self.hparams.model_name_or_path),\n config=config,\n cache_dir=self.hparams.cache_dir if self.hparams.cache_dir else None,\n )\n self.config, self.tokenizer, self.model = config, tokenizer, model\n\n def is_logger(self):\n return self.trainer.proc_rank <= 0\n\n def configure_optimizers(self):\n \"Prepare optimizer and schedule (linear warmup and decay)\"\n\n model = self.model\n no_decay = [\"bias\", \"LayerNorm.weight\"]\n optimizer_grouped_parameters = [\n {\n \"params\": [p for n, p in model.named_parameters() if not any(nd in n for nd in no_decay)],\n \"weight_decay\": self.hparams.weight_decay,\n },\n {\n \"params\": [p for n, p in model.named_parameters() if any(nd in n for nd in no_decay)],\n \"weight_decay\": 0.0,\n },\n ]\n optimizer = AdamW(optimizer_grouped_parameters, lr=self.hparams.learning_rate, eps=self.hparams.adam_epsilon)\n self.opt = optimizer\n return [optimizer]\n\n def optimizer_step(self, epoch, batch_idx, optimizer, optimizer_idx, second_order_closure=None):\n if self.trainer.use_tpu:\n xm.optimizer_step(optimizer)\n else:\n optimizer.step()\n optimizer.zero_grad()\n self.lr_scheduler.step()\n\n def get_tqdm_dict(self):\n tqdm_dict = {\"loss\": \"{:.3f}\".format(self.trainer.avg_loss), \"lr\": self.lr_scheduler.get_last_lr()[-1]}\n\n return tqdm_dict\n\n def test_step(self, batch, batch_nb):\n return self.validation_step(batch, batch_nb)\n\n def test_end(self, outputs):\n return self.validation_end(outputs)\n\n def train_dataloader(self):\n train_batch_size = self.hparams.train_batch_size\n dataloader = self.load_dataset(\"train\", train_batch_size)\n\n t_total = (\n (len(dataloader.dataset) // (train_batch_size * max(1, self.hparams.n_gpu)))\n // self.hparams.gradient_accumulation_steps\n * float(self.hparams.num_train_epochs)\n )\n scheduler = get_linear_schedule_with_warmup(\n self.opt, num_warmup_steps=self.hparams.warmup_steps, num_training_steps=t_total\n )\n self.lr_scheduler = scheduler\n return dataloader\n\n def val_dataloader(self):\n return self.load_dataset(\"dev\", self.hparams.eval_batch_size)\n\n def test_dataloader(self):\n return self.load_dataset(\"test\", self.hparams.eval_batch_size)\n\n def _feature_file(self, mode):\n return os.path.join(\n self.hparams.data_dir,\n \"cached_{}_{}_{}\".format(\n mode,\n list(filter(None, self.hparams.model_name_or_path.split(\"/\"))).pop(),\n str(self.hparams.max_seq_length),\n ),\n )\n\n @staticmethod\n def add_model_specific_args(parser, root_dir):\n parser.add_argument(\n \"--model_type\",\n default=None,\n type=str,\n required=True,\n help=\"Model type selected in the list: \" + \", \".join(MODEL_CLASSES),\n )\n parser.add_argument(\n \"--model_name_or_path\",\n default=None,\n type=str,\n required=True,\n help=\"Path to pre-trained model or shortcut name selected in the list: \" + \", \".join(ALL_MODELS),\n )\n parser.add_argument(\n \"--config_name\", default=\"\", type=str, help=\"Pretrained config name or path if not the same as model_name\"\n )\n parser.add_argument(\n \"--tokenizer_name\",\n default=\"\",\n type=str,\n help=\"Pretrained tokenizer name or path if not the same as model_name\",\n )\n parser.add_argument(\n \"--cache_dir\",\n default=\"\",\n type=str,\n help=\"Where do you want to store the pre-trained models downloaded from s3\",\n )\n parser.add_argument(\n \"--do_lower_case\", action=\"store_true\", help=\"Set this flag if you are using an uncased model.\"\n )\n parser.add_argument(\"--learning_rate\", default=5e-5, type=float, help=\"The initial learning rate for Adam.\")\n parser.add_argument(\"--weight_decay\", default=0.0, type=float, help=\"Weight decay if we apply some.\")\n parser.add_argument(\"--adam_epsilon\", default=1e-8, type=float, help=\"Epsilon for Adam optimizer.\")\n parser.add_argument(\"--warmup_steps\", default=0, type=int, help=\"Linear warmup over warmup_steps.\")\n parser.add_argument(\n \"--num_train_epochs\", default=3, type=int, help=\"Total number of training epochs to perform.\"\n )\n\n parser.add_argument(\"--train_batch_size\", default=32, type=int)\n parser.add_argument(\"--eval_batch_size\", default=32, type=int)\n\n def configure_ddp(self, model, device_ids):\n \"\"\"\n Configure to use a single GPU set on local rank.\n\n Must return model.\n :param model:\n :param device_ids:\n :return: DDP wrapped model\n \"\"\"\n device_id = f\"cuda:{os.environ['LOCAL_RANK']}\"\n\n model = LightningDistributedDataParallel(\n model,\n device_ids=[device_id],\n output_device=device_id,\n find_unused_parameters=True,\n )\n\n return model\n\n def init_ddp_connection(self, proc_rank, world_size):\n \"\"\"\n Connect all procs in the world using the env:// init\n Use the first node as the root address\n \"\"\"\n\n import torch.distributed as dist\n\n dist.init_process_group(\"nccl\", init_method=\"env://\")\n\n # Explicitly setting seed to make sure that models created in two processes\n # start from same random weights and biases.\n # TODO(jeffling): I'm pretty sure we need to set other seeds as well?\n print(f\"Setting torch manual seed to {FIXED_SEED} for DDP.\")\n torch.manual_seed(FIXED_SEED)\n\n\nclass LoggingCallback(pl.Callback):\n def on_validation_end(self, trainer, pl_module):\n logger.info(\"***** Validation results *****\")\n if pl_module.is_logger():\n metrics = trainer.callback_metrics\n # Log results\n for key in sorted(metrics):\n if key not in [\"log\", \"progress_bar\"]:\n logger.info(\"{} = {}\\n\".format(key, str(metrics[key])))\n\n def on_test_end(self, trainer, pl_module):\n logger.info(\"***** Test results *****\")\n\n if pl_module.is_logger():\n metrics = trainer.callback_metrics\n\n # Log and save results to file\n # output_test_results_file = os.path.join(pl_module.hparams.output_dir, \"test_results.txt\")\n output_test_results_file = os.path.join(\"bartresults\", \"test_results.txt\")\n with open(output_test_results_file, \"w\") as writer:\n for key in sorted(metrics):\n if key not in [\"log\", \"progress_bar\"]:\n logger.info(\"{} = {}\\n\".format(key, str(metrics[key])))\n writer.write(\"{} = {}\\n\".format(key, str(metrics[key])))\n\n\ndef add_generic_args(parser, root_dir):\n parser.add_argument(\n \"--output_dir\",\n default=None,\n type=str,\n required=True,\n help=\"The output directory where the model predictions and checkpoints will be written.\",\n )\n\n parser.add_argument(\n \"--fp16\",\n action=\"store_true\",\n help=\"Whether to use 16-bit (mixed) precision (through NVIDIA apex) instead of 32-bit\",\n )\n\n parser.add_argument(\n \"--fp16_opt_level\",\n type=str,\n default=\"O1\",\n help=\"For fp16: Apex AMP optimization level selected in ['O0', 'O1', 'O2', and 'O3'].\"\n \"See details at https://nvidia.github.io/apex/amp.html\",\n )\n\n parser.add_argument(\"--n_gpu\", type=int, default=1)\n parser.add_argument(\"--n_tpu_cores\", type=int, default=0)\n parser.add_argument(\"--max_grad_norm\", default=1.0, type=float, help=\"Max gradient norm.\")\n parser.add_argument(\"--do_train\", action=\"store_true\", help=\"Whether to run training.\")\n parser.add_argument(\"--do_predict\", action=\"store_true\", help=\"Whether to run predictions on the test set.\")\n parser.add_argument(\n \"--gradient_accumulation_steps\",\n type=int,\n default=1,\n help=\"Number of updates steps to accumulate before performing a backward/update pass.\",\n )\n\n parser.add_argument(\"--server_ip\", type=str, default=\"\", help=\"For distant debugging.\")\n parser.add_argument(\"--server_port\", type=str, default=\"\", help=\"For distant debugging.\")\n parser.add_argument(\"--seed\", type=int, default=42, help=\"random seed for initialization\")\n\n\ndef generic_train(model, args, logger=None):\n # init model\n set_seed(args)\n\n # Setup distant debugging if needed\n if args.server_ip and args.server_port:\n # Distant debugging - see https://code.visualstudio.com/docs/python/debugging#_attach-to-a-local-script\n import ptvsd\n\n print(\"Waiting for debugger attach\")\n ptvsd.enable_attach(address=(args.server_ip, args.server_port), redirect_output=True)\n ptvsd.wait_for_attach()\n\n if os.path.exists(args.output_dir) and os.listdir(args.output_dir) and args.do_train:\n raise ValueError(\"Output directory ({}) already exists and is not empty.\".format(args.output_dir))\n\n checkpoint_callback = pl.callbacks.ModelCheckpoint(\n filepath=args.output_dir, prefix=\"checkpoint\", monitor=\"val_loss\", mode=\"min\", save_top_k=5\n )\n\n train_params = dict(\n accumulate_grad_batches=args.gradient_accumulation_steps,\n gpus=args.set_gpu,\n max_epochs=args.num_train_epochs,\n early_stop_callback=False,\n gradient_clip_val=args.max_grad_norm,\n checkpoint_callback=checkpoint_callback,\n callbacks=[LoggingCallback()],\n )\n\n if logger is not None:\n train_params['logger'] = logger\n\n if args.fp16:\n train_params[\"use_amp\"] = args.fp16\n train_params[\"amp_level\"] = args.fp16_opt_level\n\n if args.n_tpu_cores > 0:\n global xm\n import torch_xla.core.xla_model as xm\n\n train_params[\"num_tpu_cores\"] = args.n_tpu_cores\n train_params[\"gpus\"] = 0\n\n if args.n_gpu > 1:\n train_params[\"distributed_backend\"] = \"dp\"\n\n trainer = pl.Trainer(**train_params)\n\n if args.do_train:\n trainer.fit(model)\n\n return trainer\n" ]
[ [ "numpy.random.seed", "torch.manual_seed", "torch.cuda.manual_seed_all", "torch.distributed.init_process_group" ] ]
DavidJanz/emukit
[ "7421cb7f4ed831b6581f3686806521ff7fb97e74" ]
[ "tests/emukit/test_acquisitions.py" ]
[ "from collections import namedtuple\n\nimport numpy as np\nimport pytest\nimport pytest_lazyfixture\nfrom scipy.optimize import check_grad\n\nfrom bayesian_optimization.test_entropy_search import entropy_search_acquisition\nfrom emukit.bayesian_optimization.acquisitions import ExpectedImprovement, NegativeLowerConfidenceBound, EntropySearch\nfrom emukit.core.acquisition import IntegratedHyperParameterAcquisition\nfrom emukit.bayesian_optimization.acquisitions.entropy_search import MultiInformationSourceEntropySearch\nfrom emukit.bayesian_optimization.acquisitions.log_acquisition import LogAcquisition\nfrom emukit.core import ParameterSpace, ContinuousParameter, InformationSourceParameter\nfrom emukit.core.acquisition.acquisition_per_cost import CostAcquisition\n\nfrom emukit.bayesian_optimization.acquisitions import ProbabilityOfImprovement\nfrom emukit.experimental_design.model_based.acquisitions import ModelVariance, IntegratedVarianceReduction\nfrom emukit.model_wrappers.gpy_quadrature_wrappers import convert_gpy_model_to_emukit_model\nfrom emukit.quadrature.acquisitions import SquaredCorrelation\nfrom emukit.quadrature.methods import VanillaBayesianQuadrature\n\n\n# This is the step sized used by scipy.optimize.check_grad to calculate the numerical gradient\ngradient_check_step_size = 1e-8\ndefault_grad_tol = 1e-7\n# rmse_gradient_tolerance is the maximum allowed root mean squared error as calculated by scipy.optimize.check_grad\n# before the test will fail\nacquisition_test_tuple = namedtuple('AcquisitionTest', ['name', 'has_gradients', 'rmse_gradient_tolerance'])\nacquisition_tests = [acquisition_test_tuple('negative_lower_confidence_bound_acquisition', True, default_grad_tol),\n acquisition_test_tuple('expected_improvement_acquisition', True, default_grad_tol),\n acquisition_test_tuple('cost_acquisition', True, default_grad_tol),\n acquisition_test_tuple('log_acquisition', True, 1e-5),\n acquisition_test_tuple('probability_of_improvement_acquisition', True, default_grad_tol),\n acquisition_test_tuple('model_variance_acquisition', True, 1e-5),\n acquisition_test_tuple('squared_correlation_acquisition', True, 1e-3),\n acquisition_test_tuple('entropy_search_acquisition', False, np.nan),\n acquisition_test_tuple('multi_source_entropy_search_acquisition', False, np.nan),\n acquisition_test_tuple('integrated_variance_acquisition', False, np.nan),\n acquisition_test_tuple('integrated_expected_improvement_acquisition', True, default_grad_tol),\n acquisition_test_tuple('integrated_probability_of_improvement_acquisition', False, np.nan)]\n\n\n# Vanilla bq model for squared correlation test\[email protected]\ndef vanilla_bq_model(gpy_model, continuous_space, n_dims):\n integral_bounds = continuous_space.get_bounds()\n model = convert_gpy_model_to_emukit_model(gpy_model.model, integral_bounds)\n return VanillaBayesianQuadrature(model)\n\n\n# Acquisition function fixtures\[email protected]\ndef negative_lower_confidence_bound_acquisition(gpy_model):\n return NegativeLowerConfidenceBound(gpy_model)\n\n\[email protected]\ndef expected_improvement_acquisition(gpy_model):\n return ExpectedImprovement(gpy_model)\n\n\[email protected]\ndef integrated_expected_improvement_acquisition(gpy_model_mcmc):\n return IntegratedHyperParameterAcquisition(gpy_model_mcmc, ExpectedImprovement, 10)\n\n\[email protected]\ndef integrated_probability_of_improvement_acquisition(gpy_model_mcmc):\n return IntegratedHyperParameterAcquisition(gpy_model_mcmc, ProbabilityOfImprovement, 10)\n\n\[email protected]\ndef cost_acquisition(gpy_model):\n return CostAcquisition(gpy_model, 1e-6)\n\n\[email protected]\ndef log_acquisition(expected_improvement_acquisition):\n return LogAcquisition(expected_improvement_acquisition)\n\n\[email protected]\ndef probability_of_improvement_acquisition(gpy_model):\n return ProbabilityOfImprovement(gpy_model)\n\n\[email protected]\ndef model_variance_acquisition(gpy_model):\n return ModelVariance(gpy_model)\n\n\[email protected]\ndef integrated_variance_acquisition(gpy_model, continuous_space):\n return IntegratedVarianceReduction(gpy_model, continuous_space)\n\n\[email protected]\ndef squared_correlation_acquisition(vanilla_bq_model):\n return SquaredCorrelation(vanilla_bq_model)\n\n\[email protected]\[email protected]('n_dims', [2])\ndef multi_source_entropy_search_acquisition(gpy_model):\n space = ParameterSpace([ContinuousParameter('x1', 0, 1), InformationSourceParameter(2)])\n return MultiInformationSourceEntropySearch(gpy_model, space, num_representer_points=10)\n\n\n# Helpers for creating parameterized fixtures\ndef create_acquisition_fixture_parameters():\n return [pytest.param(pytest_lazyfixture.lazy_fixture(acq.name), id=acq.name) for acq in acquisition_tests]\n\n\ndef create_gradient_acquisition_fixtures():\n # Create list of tuples of parameters with (fixture, tolerance) for acquisitions that gave gradients only\n parameters = []\n for acquisition in acquisition_tests:\n if acquisition.has_gradients:\n acquisition_name = acquisition.name\n lazy_fixture = pytest_lazyfixture.lazy_fixture(acquisition.name)\n parameters.append(pytest.param(lazy_fixture, acquisition.rmse_gradient_tolerance, id=acquisition_name))\n return parameters\n\n\n# Tests\[email protected]('acquisition', create_acquisition_fixture_parameters())\ndef test_acquisition_evaluate_shape(acquisition, n_dims):\n x = np.random.rand(10, n_dims)\n acquisition_value = acquisition.evaluate(x)\n assert acquisition_value.shape == (10, 1)\n\n\[email protected](('acquisition', 'tol'), create_gradient_acquisition_fixtures())\ndef test_acquisition_gradient_computation(acquisition, n_dims, tol):\n rng = np.random.RandomState(43)\n x_test = rng.rand(10, n_dims)\n\n acq = lambda x: acquisition.evaluate(np.array([x]))[0][0]\n grad = lambda x: acquisition.evaluate_with_gradients(np.array([x]))[1][0]\n\n for xi in x_test:\n err = check_grad(acq, grad, xi, epsilon=gradient_check_step_size)\n assert err < tol\n\n\[email protected](('acquisition', 'tol'), create_gradient_acquisition_fixtures())\ndef test_acquisition_gradient_shapes(acquisition, n_dims, tol):\n rng = np.random.RandomState(43)\n x_test = rng.rand(10, n_dims)\n\n gradients = acquisition.evaluate_with_gradients(x_test)[1]\n assert gradients.shape == (10, n_dims)\n" ]
[ [ "numpy.array", "numpy.random.rand", "scipy.optimize.check_grad", "numpy.random.RandomState" ] ]
tallenglish/cayleydickenson
[ "6bf9e016968801183adf697e872c405a0073239a" ]
[ "group.py" ]
[ "from hypercomplex import Order, Names\n\nimport argparse as ap\nimport definitions as df\nimport graph_tool as gt\nimport graph_tool.draw as gtd\nimport itertools as it\nimport networkx as nx\nimport numpy as np\n\ndef group(**options):\n\n\tdef option(name, default, **options):\n\n\t\tif name in options:\n\n\t\t\treturn options[name]\n\n\t\treturn default\n\n\tdef identity():\n\n\t\trg = range(0, self.dimensions)\n\t\tid = [[+1 if i == j else 0 for j in rg] for i in rg]\n\t\tid += [[-1 if i == j else 0 for j in rg] for i in rg]\n\n\t\tfor i in range(0, self.dimensions * 2):\n\n\t\t\tid[i] = self.__class__(tuple(id[i]))\n\n\t\treturn id\n\n\tdef edges(index):\n\n\t\tfound = np.zeros(groups.shape, dtype=int)\n\n\t\tfor id in range(size):\n\n\t\t\tfound[id, groups[id, index]] = 1\n\n\t\treturn found\n\n\tdef indexer(input):\n\n\t\tif not input:\n\n\t\t\treturn 0\n\n\t\tcoefficients = input.coefficients()\n\t\tid, val = next(((id, val) for id, val in enumerate(coefficients) if val))\n\t\tid += input.dimensions if val < 0 else 0\n\n\t\treturn id\n\n\telement = option(\"element\", \"e\", **options)\n\tindices = option(\"indices\", \"1ijkLIJKmpqrMPQRnstuNSTUovwxOVWX\", **options)\n\tfontsize = option(\"fontsize\", 14, **options)\n\tfigsize = option(\"figsize\", 6.0, **options)\n\tfigdpi = option(\"figdpi\", 100.0, **options)\n\tfilename = option(\"filename\", \"G{order}.{filetype}\", **options)\n\tfiletype = option(\"filetype\", \"png\", **options)\n\tdirected = option(\"directed\", False, **options)\n\tshowneg = option(\"negatives\", False, **options)\n\tshowpos = option(\"positives\", False, **options)\n\tshowall = option(\"showall\", False, **options)\n\tlayers = option(\"layers\", False, **options)\n\torder = option(\"order\", None, **options)\n\tnamed = option(\"named\", None, **options)\n\tsave = option(\"save\", False, **options)\n\tshow = option(\"show\", False, **options)\n\n\tif named != None:\n\n\t\tself = Names.get(named, None)\n\n\telif order != None:\n\n\t\tself = Order.get(order, None)\n\n\telse:\n\n\t\tself = None\n\n\tif self == None or (hasattr(self, \"order\") and self.order > 5):\n\n\t\traise NotImplementedError\n\n\tsize = self.dimensions * 2\n\tgroups = np.zeros((size, size), dtype=int)\n\tindices = list(indices)\n\tconnections = []\n\tlayered = []\n\tindexes = []\n\n\tfor a, b in it.product(identity(), repeat=2):\n\n\t\tgroups[indexer(a), indexer(b)] = indexer(a * b)\n\n\tif layers:\n\n\t\tlayers = layers.split(\",\")\n\t\tlayered = [0] * len(layers)\n\t\tshowall = True\n\n\t\tfor index in range(0, len(layers)):\n\n\t\t\tlayer = layers[index]\n\t\t\tid = 0\n\n\t\t\tif layer[:1] == \"-\" or layer[:1] == \"+\": # first handle sign\n\n\t\t\t\tid += self.dimensions if layer[:1] == \"-\" else 0\n\t\t\t\tlayer = layer[1:]\n\n\t\t\tif element in layer: # handle e0,e12,e4, etc.\n\n\t\t\t\tx = layer.index(element)\n\t\t\t\tid += int(layer[x+1:])\n\n\t\t\telif layer.isdigit(): # handle numbers\n\n\t\t\t\tid += int(layer)\n\n\t\t\telif layer.isalpha() and layer in indices: # handle i,j,k, etc\n\n\t\t\t\tid += indices.index(layer)\n\n\t\t\tlayered[index] = id\n\n\telif showneg and not showpos and not showall:\n\n\t\tlayered = range(self.dimensions, size)\n\n\telif showpos and not showneg and not showall:\n\n\t\tlayered = range(0, self.dimensions)\n\n\telse:\n\n\t\tif showneg and showpos:\n\n\t\t\tshowall = True\n\n\t\tlayered = range(0, size)\n\n\tfor index in layered:\n\n\t\tif index == 0 or index == self.dimensions: # inore the +1, -1 layers\n\n\t\t\tcontinue\n\n\t\tconnections.append(edges(index))\n\t\ttotal = nx.from_numpy_matrix(sum(connections))\n\t\tindexes.append(index)\n\n\t\tif nx.is_connected(total) and not (showall or showpos or showneg):\n\n\t\t\tbreak\n\n\tfirst = nx.from_numpy_matrix(connections[0])\n\tloops = nx.connected_components(first)\n\tloops = [np.roll(x, -k) for k, x in enumerate(loops)]\n\tgraph = gt.Graph(directed=directed)\n\ttext = graph.new_vertex_property(\"string\")\n\tpos = graph.new_vertex_property(\"vector<double>\")\n\tfill = graph.new_vertex_property(\"vector<double>\")\n\tcolor = graph.new_edge_property(\"vector<double>\")\n\n\tgraph.add_vertex(size)\n\n\t# Position Indices Consistantly\n\n\tfor id in range(size):\n\n\t\tvertex = graph.vertex(id)\n\n\t\ttext[vertex] = self.named(1, index=id, asstring=True, **options)\n\t\tfill[vertex] = df.color(self.order, id)\n\t\tpos[vertex] = df.location(self.order, id)\n\n\t# Add Rotations\n\n\tfor id, connection in enumerate(connections):\n\n\t\tfor e1, e2 in zip(*np.where(connection)):\n\n\t\t\tedge = graph.add_edge(e1, e2)\n\n\t\t\tcolor[edge] = df.color(self.order, indexes[id])\n\n\topts = {\n\t\t\"edge_color\": color,\n\t\t\"edge_pen_width\": 2,\n\t\t\"edge_marker_size\": 20,\n\t\t\"edge_start_marker\": \"none\",\n\t\t\t# “none”, “arrow”, “circle”, “square”, “diamond”, “bar”\n\t\t\"edge_end_marker\": \"arrow\",\n\t\t\t# “none”, “arrow”, “circle”, “square”, “diamond”, “bar”\n\t\t\"output_size\": (int(figsize * figdpi), int(figsize * figdpi)),\n\t\t\"vertex_font_size\": fontsize,\n\t\t\"vertex_fill_color\": fill,\n\t\t\"vertex_text\": text,\n\t\t\"vertex_shape\": \"circle\",\n\t\t\t# “circle”, “triangle”, “square”, “pentagon”, “hexagon”,\n\t\t\t# “heptagon”, “octagon” “double_circle”, “double_triangle”, “double_square”,\n\t\t\t# “double_pentagon”, “double_hexagon”, “double_heptagon”, “double_octagon”,\n\t\t\t# “pie”, “none”\n\t\t\"vertex_pen_width\": 1,\n\t\t\"vertex_size\": 30,\n\t\t\"pos\": pos,\n\t}\n\n\tif save:\n\n\t\toutput = ((filename).format(order=self.order, filetype=filetype))\n\n\t\tgtd.graph_draw(graph, output=output, fmt=filetype, **opts)\n\n\tif show:\n\n\t\tgtd.graph_draw(graph, **opts)\n\nif __name__ == \"__main__\":\n\n\tparser = ap.ArgumentParser()\n\n\tparser.add_argument(\"-o\", \"--order\", type=int, default=2)\n\tparser.add_argument(\"-e\", \"--element\", type=str, default=\"e\")\n\tparser.add_argument(\"-i\", \"--indices\", type=str, default=\"1ijkLIJKmpqrMPQRnstuNSTUovwxOVWX\")\n\tparser.add_argument(\"-f\", \"--filename\", type=str, default=\"G{order}.{filetype}\")\n\tparser.add_argument(\"-t\", \"--filetype\", type=str, default=\"png\")\n\tparser.add_argument(\"-s\", \"--figsize\", type=float, default=6.0)\n\tparser.add_argument(\"-r\", \"--figdpi\", type=float, default=100.0)\n\tparser.add_argument(\"-x\", \"--fontsize\", type=int, default=14)\n\tparser.add_argument(\"-l\", \"--layers\", type=str)\n\tparser.add_argument(\"-n\", \"--named\", type=str)\n\n\tparser.add_argument(\"--directed\", action=\"store_true\")\n\tparser.add_argument(\"--translate\", action=\"store_true\")\n\tparser.add_argument(\"--negatives\", action=\"store_true\")\n\tparser.add_argument(\"--positives\", action=\"store_true\")\n\tparser.add_argument(\"--showall\", action=\"store_true\")\n\tparser.add_argument(\"--save\", action=\"store_true\")\n\tparser.add_argument(\"--show\", action=\"store_true\")\n\n\targs, urgs = parser.parse_known_args()\n\n\tgroup(**vars(args))\n" ]
[ [ "numpy.where", "numpy.roll", "numpy.zeros" ] ]
sarvex/graphics
[ "aeeb6e4753b8561ecfd39234ceea1436cd65e89e", "aeeb6e4753b8561ecfd39234ceea1436cd65e89e" ]
[ "tensorflow_graphics/projects/gan/architectures_style_gan_test.py", "tensorflow_graphics/projects/gan/exponential_moving_average.py" ]
[ "# Copyright 2020 The TensorFlow Authors\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# https://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"Tests for gan.architectures_style_gan.\"\"\"\n\nfrom absl.testing import parameterized\nimport numpy as np\nimport tensorflow as tf\n\nfrom tensorflow_graphics.projects.gan import architectures_style_gan\n\n\nclass ArchitecturesStyleGanTest(tf.test.TestCase, parameterized.TestCase):\n\n @parameterized.named_parameters(('batch_1', 1, False), ('batch_2', 2, False),\n ('normalize_latent_code', 1, True))\n def test_style_based_generator_output_size(self, batch_size,\n normalize_latent_code):\n input_data = np.ones(shape=(batch_size, 8), dtype=np.float32)\n generator, _, _ = architectures_style_gan.create_style_based_generator(\n latent_code_dimension=8,\n upsampling_blocks_num_channels=(8, 8),\n normalize_latent_code=normalize_latent_code)\n expected_size = 16\n\n output = generator(input_data)\n output_value = self.evaluate(output)\n\n with self.subTest(name='static_shape'):\n output.shape.assert_is_fully_defined()\n self.assertSequenceEqual(output.shape,\n (batch_size, expected_size, expected_size, 3))\n with self.subTest(name='dynamic_shape'):\n self.assertSequenceEqual(output_value.shape,\n (batch_size, expected_size, expected_size, 3))\n\n @parameterized.named_parameters(('batch_1', 1), ('batch_2', 2))\n def test_style_based_generator_intermediate_outputs_shape(self, batch_size):\n input_data = tf.ones(shape=(batch_size, 8))\n generator, _, _ = architectures_style_gan.create_style_based_generator(\n latent_code_dimension=8,\n upsampling_blocks_num_channels=(8, 8),\n generate_intermediate_outputs=True)\n\n outputs = generator(input_data)\n output_values = self.evaluate(outputs)\n\n self.assertLen(outputs, 3)\n for index, output_value in enumerate(output_values):\n self.assertSequenceEqual(output_value.shape,\n (batch_size, 2**(index + 2), 2**(index + 2), 3))\n\n def test_cloning_style_based_generator(self):\n generator, _, _ = architectures_style_gan.create_style_based_generator()\n\n with tf.keras.utils.custom_object_scope(\n architectures_style_gan.CUSTOM_LAYERS):\n generator_clone = tf.keras.models.clone_model(generator)\n\n self.assertIsInstance(generator_clone, tf.keras.Model)\n\n @parameterized.named_parameters(('batch_1', 1), ('batch_2', 2))\n def test_style_based_generator_mapping_outputs_shape(self, batch_size):\n input_data = tf.ones(shape=(batch_size, 512))\n output_dimension = 554\n mapping_network = architectures_style_gan.create_mapping_network(\n latent_code_dimension=512,\n output_dimension=output_dimension,\n normalize_latent_code=False,\n name='keypoint_mapping')\n\n outputs = mapping_network(input_data)\n\n self.assertEqual(outputs.shape[1], output_dimension)\n\n\nif __name__ == '__main__':\n tf.test.main()\n", "# Copyright 2020 The TensorFlow Authors\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# https://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"Implements an ExponentialMovingAverage class that is checkpointable.\"\"\"\n\nfrom typing import Sequence\n\nimport tensorflow as tf\n\n\nclass ExponentialMovingAverage(tf.Module):\n \"\"\"Exponential moving average.\n\n This class is a checkpointable implementation of a subset of the functionality\n provided by tf.train.ExponentialMovingAverage. The tf version is not\n checkpointable due to use of tf.Variable.ref() to associate tf.Variables\n objects to their corresponding averages\n (cf. https://github.com/tensorflow/tensorflow/issues/38452). This version uses\n the order of the tf.Variable objects in a sequence to associate the variables\n with their averages.\n\n Note: This class offers less functionality than the tensorflow version and it\n is only implemented for replica context.\n\n Attributes:\n averaged_variables: A sequence of tf.Variables that stores the averages for\n the variables. They are associated to the new values that are provided to\n ExponentialMovingAverage.apply() by the order in the sequence. If None a\n call to ExponentialMovingAverage.apply() initializes the variable before\n applying the update.\n \"\"\"\n\n def __init__(self, decay: float = 0.999):\n \"\"\"Initializes exponential moving average.\n\n Args:\n decay: The decay rate of the exponential moving average.\n \"\"\"\n self.averaged_variables: Sequence[tf.Variable] = None\n self._decay = decay\n\n def _ema_assign_fn(self, variable: tf.Variable, value: tf.Tensor):\n \"\"\"Updates the exponential moving average for a single variable.\"\"\"\n return variable.assign(self._decay * variable + (1.0 - self._decay) * value)\n\n def _apply_values(self, variables: Sequence[tf.Variable]):\n \"\"\"Applies the new values to the exponential moving averages.\"\"\"\n\n def merge_fn(strategy: tf.distribute.Strategy, variable: tf.Variable,\n value: tf.Tensor):\n value = strategy.extended.reduce_to(tf.distribute.ReduceOp.MEAN, value,\n variable)\n strategy.extended.update(variable, self._ema_assign_fn, args=(value,))\n\n replica_context = tf.distribute.get_replica_context()\n\n if replica_context:\n for variable_ema, variable in zip(self.averaged_variables, variables):\n replica_context.merge_call(merge_fn, args=(variable_ema, variable))\n else:\n raise NotImplementedError(\n 'Cross-replica context version not implemented.')\n\n def apply(self, variables: Sequence[tf.Variable]):\n \"\"\"Applies new values to the averages.\n\n This function is called to update the averages with new values. If the\n variables for the averages have not been created before this function\n creates new variables for the averages before the update.\n\n Args:\n variables: The variables storing the values to apply to the averages. The\n sequence is assumed to have the same order of the variables as the\n averages stored in self.averaged_variables. If self.averaged_variables\n is None it gets initialized with a new sequence of variables with the\n values of the provided variables as initial value.\n \"\"\"\n if self.averaged_variables is None:\n with tf.init_scope():\n strategy = tf.distribute.get_strategy()\n self.averaged_variables = []\n\n for variable in variables:\n with strategy.extended.colocate_vars_with(variable):\n self.averaged_variables.append(\n tf.Variable(initial_value=variable.read_value()))\n self._apply_values(variables)\n" ]
[ [ "tensorflow.ones", "numpy.ones", "tensorflow.keras.utils.custom_object_scope", "tensorflow.test.main", "tensorflow.keras.models.clone_model" ], [ "tensorflow.distribute.get_replica_context", "tensorflow.distribute.get_strategy", "tensorflow.init_scope" ] ]
Pressio/pressio-hyperreduction
[ "3beba2532ccac64262fca81fbc35a9f0ce223620" ]
[ "tests/distributed/test_lspg_weighting_n6.py" ]
[ "\nimport numpy as np\nimport sys\nimport pressiotools.linalg as ptla\nfrom pressiotools.romoperators.lspgWeighting import computeLspgWeighting\n\nnp.set_printoptions(linewidth=340, precision=14)\ntol = 1e-14\n\n#-----------------------------\ndef runDof1(comm):\n rank = comm.Get_rank()\n\n np.random.seed(3274618)\n psi0 = np.asfortranarray(np.random.rand(37, 7))\n if rank==0:\n print(psi0)\n\n if rank==0: locRows = np.arange(0,4).tolist()\n elif rank==1: locRows = []\n elif rank==2: locRows = np.arange(4, 15).tolist()\n elif rank==3: locRows = np.arange(15, 19).tolist()\n elif rank==4: locRows = np.arange(19, 28).tolist()\n elif rank==5: locRows = np.arange(28, 37).tolist()\n\n psi = ptla.MultiVector(np.asfortranarray(psi0[locRows, :]))\n\n # even if I pass a single list of glob indices,\n # the code will work beucase each rank will pick up\n # only the glob inds that pertain to it\n meshGlobIndices = [0, 8,9,10,11, 15,16,17, 32,34]\n wMat = computeLspgWeighting(residualBasis=psi,\n dofsPerMeshNode=1,\n sampleMeshIndices=meshGlobIndices,\n communicator=comm)\n print(rank, wMat)\n\n ## check correctness ##\n import scipy.linalg as scipyla\n Zpsi = psi0[meshGlobIndices, :]\n ZpsiPsInv = scipyla.pinv(Zpsi)\n A = np.matmul(psi0, ZpsiPsInv)\n gold = np.transpose(A).dot(A)\n if rank==0:\n print(rank, \"gold\", gold)\n\n\n if rank==0: myGold = gold[[0], :]\n elif rank==1: myGold = None\n elif rank==2: myGold = gold[np.arange(1,5), :]\n elif rank==3: myGold = gold[np.arange(5,8), :]\n elif rank==4: myGold = None\n elif rank==5: myGold = gold[np.arange(8,10), :]\n\n if rank not in [1,4]:\n assert( wMat.shape == myGold.shape )\n assert(np.allclose(wMat, myGold, atol=1e-12))\n else:\n assert(wMat == None)\n\n\n#-----------------------------\ndef runDof2(comm):\n rank = comm.Get_rank()\n\n np.random.seed(3274618)\n psi0 = np.asfortranarray(np.random.rand(38, 7))\n if rank==0: print(psi0)\n\n # here we use dof/cell = 2 so when we divide,\n # make sure we have an integer num of mesh cells for each rank\n if rank==0: locRows = np.arange(0,4).tolist()\n elif rank==1: locRows = []\n elif rank==2: locRows = np.arange(4, 16).tolist()\n elif rank==3: locRows = np.arange(16, 20).tolist()\n elif rank==4: locRows = np.arange(20, 28).tolist()\n elif rank==5: locRows = np.arange(28, 38).tolist()\n\n psi = ptla.MultiVector(np.asfortranarray(psi0[locRows, :]))\n\n # even if I pass a single list of glob indices,\n # the code will work beucase each rank will pick up\n # only the glob inds that pertain to it\n meshGlobIndices = [0,1,8,9,10,13,15]\n wMat = computeLspgWeighting(residualBasis=psi,\n dofsPerMeshNode=2,\n sampleMeshIndices=meshGlobIndices,\n communicator=comm)\n print(rank, wMat)\n\n ## check correctness ##\n import scipy.linalg as scipyla\n # note that we have dofs/cell = 2, so here\n # we need to list the DOFs GIDs whcih are not just the mesh GIDs\n Zpsi = psi0[[0,1,2,3,16,17,18,19,20,21,26,27,30,31], :]\n ZpsiPsInv = scipyla.pinv(Zpsi)\n A = np.matmul(psi0, ZpsiPsInv)\n gold = np.transpose(A).dot(A)\n if rank==0: print(rank, \"gold\", gold)\n\n # note that we have dofs/cell = 2, so here\n # we need to list the DOFs GIDs whcih are not just the mesh GIDs\n if rank==0: myGold = gold[[0,1,2,3], :]\n elif rank==1: myGold = None\n elif rank==2: myGold = None\n elif rank==3: myGold = gold[[4,5,6,7], :]\n elif rank==4: myGold = gold[[8,9,10,11], :]\n elif rank==5: myGold = gold[[12,13], :]\n\n if rank not in [1,2]:\n assert( wMat.shape == myGold.shape )\n assert(np.allclose(wMat, myGold, atol=1e-12))\n else:\n assert(wMat == None)\n\n\n############\n### MAIN ###\n############\nif __name__ == '__main__':\n from mpi4py import MPI\n comm = MPI.COMM_WORLD\n rank = comm.Get_rank()\n assert(comm.Get_size() == 6)\n\n runDof1(comm)\n runDof2(comm)\n" ]
[ [ "numpy.random.rand", "numpy.matmul", "numpy.random.seed", "numpy.set_printoptions", "numpy.asfortranarray", "scipy.linalg.pinv", "numpy.allclose", "numpy.transpose", "numpy.arange" ] ]
flokno/phonopy
[ "02e31d5998de0a9b664b67968bb511e21c400574" ]
[ "phonopy/harmonic/forces.py" ]
[ "# Copyright (C) 2011 Atsushi Togo\n# All rights reserved.\n#\n# This file is part of phonopy.\n#\n# Redistribution and use in source and binary forms, with or without\n# modification, are permitted provided that the following conditions\n# are met:\n#\n# * Redistributions of source code must retain the above copyright\n# notice, this list of conditions and the following disclaimer.\n#\n# * Redistributions in binary form must reproduce the above copyright\n# notice, this list of conditions and the following disclaimer in\n# the documentation and/or other materials provided with the\n# distribution.\n#\n# * Neither the name of the phonopy project nor the names of its\n# contributors may be used to endorse or promote products derived\n# from this software without specific prior written permission.\n#\n# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS\n# \"AS IS\" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT\n# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS\n# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE\n# COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,\n# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,\n# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;\n# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER\n# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT\n# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN\n# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE\n# POSSIBILITY OF SUCH DAMAGE.\n\nimport numpy as np\n\n\nclass Forces(object):\n \"\"\"\n forces: Forces on atoms in a supercell with a displacement in Cartesian\n coordinate\n [ [ F_1x, F_1y, F_1z ],\n [ F_2x, F_2y, F_2z ],\n ... ]\n displacement: An atomic displacement in Cartesian coordiante\n [ d_x, d_y, d_z ]\n \"\"\"\n\n def __init__(self, atom_number, displacement, forces,\n is_translational_invariance=False):\n self.atom_number = atom_number\n self.displacement = displacement\n self.forces = np.array(forces)\n if is_translational_invariance:\n self.set_translational_invariance()\n\n def get_atom_number(self):\n return self.atom_number\n\n def get_displacement(self):\n return self.displacement\n\n def get_forces(self):\n return self.forces\n\n def set_translational_invariance(self):\n self.forces = (self.forces -\n np.sum(self.forces, axis=0) / self.forces.shape[0])\n" ]
[ [ "numpy.sum", "numpy.array" ] ]
yandex-research/ddpm-segmentation
[ "8a46740fc9536f68e0901fed720437eb45c57747" ]
[ "train_interpreter.py" ]
[ "import torch\nimport torch.nn as nn\nfrom tqdm import tqdm\nimport json\nimport os\nimport gc\n\nfrom torch.utils.data import DataLoader\n\nimport argparse\nfrom src.utils import setup_seed, multi_acc\nfrom src.pixel_classifier import load_ensemble, compute_iou, predict_labels, save_predictions, save_predictions, pixel_classifier\nfrom src.datasets import ImageLabelDataset, FeatureDataset, make_transform\nfrom src.feature_extractors import create_feature_extractor, collect_features\n\nfrom guided_diffusion.guided_diffusion.script_util import model_and_diffusion_defaults, add_dict_to_argparser\nfrom guided_diffusion.guided_diffusion.dist_util import dev\n\n\ndef prepare_data(args):\n feature_extractor = create_feature_extractor(**args)\n \n print(f\"Preparing the train set for {args['category']}...\")\n dataset = ImageLabelDataset(\n data_dir=args['training_path'],\n resolution=args['image_size'],\n num_images=args['training_number'],\n transform=make_transform(\n args['model_type'],\n args['image_size']\n )\n )\n X = torch.zeros((len(dataset), *args['dim'][::-1]), dtype=torch.float)\n y = torch.zeros((len(dataset), *args['dim'][:-1]), dtype=torch.uint8)\n\n if 'share_noise' in args and args['share_noise']:\n rnd_gen = torch.Generator(device=dev()).manual_seed(args['seed'])\n noise = torch.randn(1, 3, args['image_size'], args['image_size'], \n generator=rnd_gen, device=dev())\n else:\n noise = None \n\n for row, (img, label) in enumerate(tqdm(dataset)):\n img = img[None].to(dev())\n features = feature_extractor(img, noise=noise)\n X[row] = collect_features(args, features).cpu()\n \n for target in range(args['number_class']):\n if target == args['ignore_label']: continue\n if 0 < (label == target).sum() < 20:\n print(f'Delete small annotation from image {dataset.image_paths[row]} | label {target}')\n label[label == target] = args['ignore_label']\n y[row] = label\n \n d = X.shape[1]\n print(f'Total dimension {d}')\n X = X.permute(1,0,2,3).reshape(d, -1).permute(1, 0)\n y = y.flatten()\n return X[y != args['ignore_label']], y[y != args['ignore_label']]\n\n\ndef evaluation(args, models):\n feature_extractor = create_feature_extractor(**args)\n dataset = ImageLabelDataset(\n data_dir=args['testing_path'],\n resolution=args['image_size'],\n num_images=args['testing_number'],\n transform=make_transform(\n args['model_type'],\n args['image_size']\n )\n )\n\n if 'share_noise' in args and args['share_noise']:\n rnd_gen = torch.Generator(device=dev()).manual_seed(args['seed'])\n noise = torch.randn(1, 3, args['image_size'], args['image_size'], \n generator=rnd_gen, device=dev())\n else:\n noise = None \n\n preds, gts, uncertainty_scores = [], [], []\n for img, label in tqdm(dataset): \n img = img[None].to(dev())\n features = feature_extractor(img, noise=noise)\n features = collect_features(args, features)\n\n x = features.view(args['dim'][-1], -1).permute(1, 0)\n pred, uncertainty_score = predict_labels(\n models, x, size=args['dim'][:-1]\n )\n gts.append(label.numpy())\n preds.append(pred.numpy())\n uncertainty_scores.append(uncertainty_score.item())\n \n save_predictions(args, dataset.image_paths, preds)\n miou = compute_iou(args, preds, gts)\n print(f'Overall mIoU: ', miou)\n print(f'Mean uncertainty: {sum(uncertainty_scores) / len(uncertainty_scores)}')\n\n\n# Adopted from https://github.com/nv-tlabs/datasetGAN_release/blob/d9564d4d2f338eaad78132192b865b6cc1e26cac/datasetGAN/train_interpreter.py#L434\ndef train(args):\n features, labels = prepare_data(args)\n train_data = FeatureDataset(features, labels)\n\n print(f\" ********* max_label {args['number_class']} *** ignore_label {args['ignore_label']} ***********\")\n print(f\" *********************** Current number data {len(features)} ***********************\")\n\n train_loader = DataLoader(dataset=train_data, batch_size=args['batch_size'], shuffle=True, drop_last=True)\n\n print(\" *********************** Current dataloader length \" + str(len(train_loader)) + \" ***********************\")\n for MODEL_NUMBER in range(args['start_model_num'], args['model_num'], 1):\n\n gc.collect()\n classifier = pixel_classifier(numpy_class=(args['number_class']), dim=args['dim'][-1])\n classifier.init_weights()\n\n classifier = nn.DataParallel(classifier).cuda()\n criterion = nn.CrossEntropyLoss()\n optimizer = torch.optim.Adam(classifier.parameters(), lr=0.001)\n classifier.train()\n\n iteration = 0\n break_count = 0\n best_loss = 10000000\n stop_sign = 0\n for epoch in range(100):\n for X_batch, y_batch in train_loader:\n X_batch, y_batch = X_batch.to(dev()), y_batch.to(dev())\n y_batch = y_batch.type(torch.long)\n\n optimizer.zero_grad()\n y_pred = classifier(X_batch)\n loss = criterion(y_pred, y_batch)\n acc = multi_acc(y_pred, y_batch)\n\n loss.backward()\n optimizer.step()\n\n iteration += 1\n if iteration % 1000 == 0:\n print('Epoch : ', str(epoch), 'iteration', iteration, 'loss', loss.item(), 'acc', acc)\n \n if epoch > 3:\n if loss.item() < best_loss:\n best_loss = loss.item()\n break_count = 0\n else:\n break_count += 1\n\n if break_count > 50:\n stop_sign = 1\n print(\"*************** Break, Total iters,\", iteration, \", at epoch\", str(epoch), \"***************\")\n break\n\n if stop_sign == 1:\n break\n\n model_path = os.path.join(args['exp_dir'], \n 'model_' + str(MODEL_NUMBER) + '.pth')\n MODEL_NUMBER += 1\n print('save to:',model_path)\n torch.save({'model_state_dict': classifier.state_dict()},\n model_path)\n \n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser()\n add_dict_to_argparser(parser, model_and_diffusion_defaults())\n\n parser.add_argument('--exp', type=str)\n parser.add_argument('--seed', type=int, default=0)\n\n args = parser.parse_args()\n setup_seed(args.seed)\n\n # Load the experiment config\n opts = json.load(open(args.exp, 'r'))\n opts.update(vars(args))\n opts['image_size'] = opts['dim'][0]\n\n # Prepare the experiment folder \n if len(opts['steps']) > 0:\n suffix = '_'.join([str(step) for step in opts['steps']])\n suffix += '_' + '_'.join([str(step) for step in opts['blocks']])\n opts['exp_dir'] = os.path.join(opts['exp_dir'], suffix)\n\n path = opts['exp_dir']\n os.makedirs(path, exist_ok=True)\n print('Experiment folder: %s' % (path))\n os.system('cp %s %s' % (args.exp, opts['exp_dir']))\n\n # Check whether all models in ensemble are trained \n pretrained = [os.path.exists(os.path.join(opts['exp_dir'], f'model_{i}.pth')) \n for i in range(opts['model_num'])]\n \n if not all(pretrained):\n # train all remaining models\n opts['start_model_num'] = sum(pretrained)\n train(opts)\n \n print('Loading pretrained models...')\n models = load_ensemble(opts, device='cuda')\n evaluation(opts, models)\n" ]
[ [ "torch.nn.CrossEntropyLoss", "torch.utils.data.DataLoader", "torch.nn.DataParallel" ] ]
junkoda/junkoda_cellularlib
[ "bc97d6ab419d8e9e1c295a7662d94cfd1f5b3501" ]
[ "py/junkoda_cellularlib/graph.py" ]
[ "import numpy as np\nimport pandas as pd\n\n\nclass Graph:\n \"\"\"\n Graph struture in a 2D image\n\n Methods:\n vetices\n edges\n plot_vertices()\n plot_edges()\n \"\"\"\n def __init__(self, ei, ev, nx, ny):\n \"\"\"\n ei: edge indices (index = ix*ny + iy)\n ev: edge values\n\n nx, ny (int) : image size nx, ny\n \"\"\"\n\n # Convert indices to corrdinate (x, y)\n\n # Vertices\n v = np.array(list(set(ei.flatten())))\n v.sort()\n\n n_vertices = len(v)\n v_coords = np.empty((n_vertices, 2), dtype=float)\n v_coords[:, 0] = v // ny # x\n v_coords[:, 1] = v % ny # y\n\n # Edges\n n_edges = len(ei)\n edge_coords = np.empty((n_edges, 5), dtype=float)\n edge_coords[:, 0] = ei[:, 0] // ny # x1\n edge_coords[:, 1] = ei[:, 0] % ny # y1\n edge_coords[:, 2] = ei[:, 1] // ny # x2\n edge_coords[:, 3] = ei[:, 1] % ny # x2\n edge_coords[:, 4] = ev\n\n self.v = v_coords\n self.e = edge_coords\n self.nx = nx\n self.ny = ny\n\n def __repr__(self):\n return 'Graph (%d vertices, %d edges)' % (len(self.v), len(self.e))\n\n def plot_vertices(self, marker='o', **kwargs):\n \"\"\"\n Args:\n marker (str): 3rd argmument in plt.plot\n **kwargs: any keyword argments are passed to plt.plot\n \"\"\"\n import matplotlib.pyplot as plt\n plt.xlim(0, self.nx)\n plt.ylim(0, self.ny)\n plt.gca().invert_yaxis()\n\n plt.plot(self.v[:, 0], self.v[:, 1], marker, **kwargs)\n\n def plot_edges(self, idx=None, *,\n color='black',\n cmap=None, vmin=None, vmax=None,\n **kwargs):\n \"\"\"\n Args:\n idx: edge indices, e.g., plot_edges(range(100))\n color (str): color, e.g., str 'black', rgba (0, 0, 0, 0)\n cmap : matplotlib map name (str) or matplotlib.colors.Colormap\n vmin: minimum value for colour map\n vmax: maximum value for colour map\n **kwargs: any keyword argmuments are padded to plt.plot\n\n Note:\n x is ploted on vertical axis and y on horizontal axis to match\n plt.imshow\n \"\"\"\n import matplotlib.pyplot as plt\n import matplotlib\n from matplotlib import collections as mc\n\n ax = plt.gca()\n\n # Change xlim, ylim if they not set yet\n if ax.get_xlim() == (0, 1):\n plt.xlim(0, self.nx)\n if ax.get_ylim() == (0, 1):\n plt.ylim(0, self.ny)\n\n # Change xlabel, ylabel if they are not set yet\n if ax.xaxis.get_label().get_text() == '':\n plt.xlabel('$x$')\n if ax.yaxis.get_label().get_text() == '':\n plt.ylabel('$y$')\n\n lines = []\n\n # norm convers scaler value to colour\n if vmin is None:\n vmin = np.min(self.e[:, 4])\n if vmax is None:\n vmin = np.max(self.e[:, 4])\n norm = matplotlib.colors.Normalize(vmin=vmin, vmax=vmax)\n\n if idx is None:\n edges = self.e\n else:\n edges = self.e[idx, ]\n\n for e in edges:\n lines.append([(e[0], e[1]), (e[2], e[3])])\n\n if 'linewidths' not in kwargs:\n kwargs['linewidths'] = 2\n\n if cmap is not None:\n # use cmap\n if isinstance(cmap, str):\n # color name\n cmap = matplotlib.cm.get_cmap(cmap)\n\n colours = []\n for e in edges:\n colours.append(cmap(norm(e[4])))\n\n lc = mc.LineCollection(lines, colors=colours,\n **kwargs)\n else:\n # use colour\n lc = mc.LineCollection(lines, colors=color,\n **kwargs)\n plt.gca().add_collection(lc)\n\n @property\n def edges(self):\n \"\"\"\n Returns: edges (pd.DataFrame)\n x1, y1, x2, y2, value\n \"\"\"\n ec = self.edge_coords\n return pd.DataFrame({'x1': ec[:, 0],\n 'y1': ec[:, 1],\n 'x2': ec[:, 2],\n 'y2': ec[:, 3],\n 'value': ec[:, 4]})\n\n @property\n def vetices(self):\n \"\"\"\n Returns: vetices (pd.DataFrame)\n x, y\n \"\"\"\n return pd.DataFrame({'x': self.v[:, 0],\n 'y': self.v[:, 1]})\n" ]
[ [ "numpy.max", "numpy.empty", "matplotlib.pyplot.xlim", "matplotlib.cm.get_cmap", "pandas.DataFrame", "matplotlib.pyplot.ylim", "matplotlib.pyplot.plot", "matplotlib.pyplot.xlabel", "numpy.min", "matplotlib.collections.LineCollection", "matplotlib.colors.Normalize", "matplotlib.pyplot.ylabel", "matplotlib.pyplot.gca" ] ]
mclaughlin6464/halotools_old
[ "96fbdf5fc156160f19ccd4ae3ee964f831d26fa6" ]
[ "halotools/mock_observables/pair_counters/pairs.py" ]
[ "r\"\"\"\nsimple python brute force pair counting functions. The primary purpose of these functions\nis as a sanity check on more complex pair counting techniques. These functions should not\nbe used on large data sets, as memory usage is very large, and runtimes can be very slow.\n\"\"\"\n\nfrom __future__ import absolute_import, division, print_function, unicode_literals\nimport numpy as np\n\n__all__ = ['npairs', 'wnpairs', 'xy_z_npairs', 'xy_z_wnpairs', 's_mu_npairs']\n__author__ = ['Duncan Campbell']\n\n\ndef npairs(sample1, sample2, rbins, period=None):\n r\"\"\"\n Calculate the number of pairs with separations less than or equal to rbins[i].\n\n Parameters\n ----------\n sample1 : array_like\n N by k numpy array of k-dimensional positions. Should be between zero and\n period\n\n sample2 : array_like\n N by k numpy array of k-dimensional positions. Should be between zero and\n period\n\n rbins : array_like\n numpy array of boundaries defining the bins in which pairs are counted.\n len(rbins) = Nrbins + 1.\n\n period : array_like, optional\n length k array defining periodic boundary conditions. If only\n one number, Lbox, is specified, period is assumed to be np.array([Lbox]*k).\n If none, PBCs are set to infinity.\n\n Returns\n -------\n N_pairs : array of length len(rbins)\n number counts of pairs\n \"\"\"\n\n sample1 = np.atleast_2d(sample1)\n sample2 = np.atleast_2d(sample2)\n rbins = np.atleast_1d(rbins)\n\n # Check to make sure both data sets have the same dimension. Otherwise, throw an error\n if np.shape(sample1)[-1] != np.shape(sample2)[-1]:\n raise ValueError(\"sample1 and sample2 inputs do not have the same dimension.\")\n return None\n\n # Process period entry and check for consistency.\n if period is None:\n period = np.array([np.inf]*np.shape(sample1)[-1])\n else:\n period = np.asarray(period).astype(\"float64\")\n if np.shape(period) == ():\n period = np.array([period]*np.shape(sample1)[-1])\n elif np.shape(period)[0] != np.shape(sample1)[-1]:\n raise ValueError(\"period should have len == dimension of points\")\n return None\n\n N1 = len(sample1)\n N2 = len(sample2)\n dd = np.zeros((N1*N2,)) # store radial pair separations\n for i in range(0, N1): # calculate distance between every point and every other point\n x1 = sample1[i, :]\n x2 = sample2\n dd[i*N2:i*N2+N2] = distance(x1, x2, period)\n\n # sort results\n dd.sort()\n\n # count number less than r\n n = np.zeros((rbins.size,), dtype=np.int)\n for i in range(rbins.size):\n if rbins[i] > np.min(period)/2.0:\n print(\"r=\", rbins[i], \" min(period)/2=\", np.min(period)/2.0)\n n[i] = len(np.where(dd <= rbins[i])[0])\n\n return n\n\n\ndef xy_z_npairs(sample1, sample2, rp_bins, pi_bins, period=None):\n r\"\"\"\n Calculate the number of pairs with parellal separations less than or equal to\n pi_bins[i], and perpendicular separations less than or equal to rp_bins[i].\n\n Assumes the first N-1 dimensions are perpendicular to the line-of-sight (LOS), and\n the final dimension is parallel to the LOS.\n\n Parameters\n ----------\n sample1 : array_like\n N by k numpy array of k-dimensional positions. Should be between zero and\n period\n\n sample2 : array_like\n N by k numpy array of k-dimensional positions. Should be between zero and\n period\n\n rp_bins : array_like\n numpy array of boundaries defining the perpendicular bins in which pairs are\n counted.\n\n pi_bins : array_like\n numpy array of boundaries defining the parallel bins in which pairs are counted.\n\n period : array_like, optional\n length k array defining periodic boundary conditions. If only\n one number, Lbox, is specified, period is assumed to be np.array([Lbox]*k).\n If none, PBCs are set to infinity.\n\n Returns\n -------\n N_pairs : ndarray of shape (len(rp_bins),len(pi_bins))\n number counts of pairs\n \"\"\"\n\n sample1 = np.atleast_2d(sample1)\n sample2 = np.atleast_2d(sample2)\n rp_bins = np.atleast_1d(rp_bins)\n pi_bins = np.atleast_1d(pi_bins)\n\n # Check to make sure both data sets have the same dimension. Otherwise, throw an error!\n if np.shape(sample1)[-1] != np.shape(sample2)[-1]:\n raise ValueError(\"sample1 and sample2 inputs do not have the same dimension.\")\n return None\n\n # Process period entry and check for consistency.\n if period is None:\n period = np.array([np.inf]*np.shape(sample1)[-1])\n else:\n period = np.asarray(period).astype(\"float64\")\n if np.shape(period) == ():\n period = np.array([period]*np.shape(sample1)[-1])\n elif np.shape(period)[0] != np.shape(sample1)[-1]:\n raise ValueError(\"period should have len == dimension of points\")\n return None\n\n N1 = len(sample1)\n N2 = len(sample2)\n dd = np.zeros((N1*N2, 2)) # store pair separations\n for i in range(0, N1): # calculate distance between every point and every other point\n x1 = sample1[i, :]\n x2 = sample2\n dd[i*N2:i*N2+N2, 1] = parallel_distance(x1, x2, period)\n dd[i*N2:i*N2+N2, 0] = perpendicular_distance(x1, x2, period)\n\n # count number less than r\n n = np.zeros((rp_bins.size, pi_bins.size), dtype=np.int)\n for i in range(rp_bins.size):\n for j in range(pi_bins.size):\n n[i, j] = np.sum((dd[:, 0] <= rp_bins[i]) & (dd[:, 1] <= pi_bins[j]))\n\n return n\n\n\ndef wnpairs(sample1, sample2, r, period=None, weights1=None, weights2=None):\n r\"\"\"\n Calculate the weighted number of pairs with separations less than or equal to rbins[i].\n\n Parameters\n ----------\n sample1 : array_like\n N by k numpy array of k-dimensional positions. Should be between zero and\n period\n\n sample2 : array_like\n N by k numpy array of k-dimensional positions. Should be between zero and\n period\n\n rbins : array_like\n numpy array of boundaries defining the bins in which pairs are counted.\n len(rbins) = Nrbins + 1.\n\n period : array_like, optional\n length k array defining periodic boundary conditions. If only\n one number, Lbox, is specified, period is assumed to be np.array([Lbox]*k).\n If none, PBCs are set to infinity.\n\n weights1 : array_like, optional\n length N1 array containing weights used for weighted pair counts, w1*w2.\n\n weights2 : array_like, optional\n length N2 array containing weights used for weighted pair counts, w1*w2.\n\n Returns\n -------\n wN_pairs : array of length len(rbins)\n weighted number counts of pairs\n \"\"\"\n\n sample1 = np.atleast_2d(sample1)\n sample2 = np.atleast_2d(sample2)\n r = np.atleast_1d(r)\n\n # Check to make sure both data sets have the same dimension. Otherwise, throw an error!\n if np.shape(sample1)[-1] != np.shape(sample2)[-1]:\n raise ValueError(\"sample1 and sample2 inputs do not have the same dimension.\")\n return None\n\n # Process period entry and check for consistency.\n if period is None:\n period = np.array([np.inf]*np.shape(sample1)[-1])\n else:\n period = np.asarray(period).astype(\"float64\")\n if np.shape(period) == ():\n period = np.array([period]*np.shape(sample1)[-1])\n if np.shape(period)[0] != np.shape(sample1)[-1]:\n raise ValueError(\"period should have len == dimension of points\")\n return None\n\n # Process weights1 entry and check for consistency.\n if weights1 is None:\n weights1 = np.array([1.0]*np.shape(sample1)[0], dtype=np.float64)\n else:\n weights1 = np.asarray(weights1).astype(\"float64\")\n if np.shape(weights1)[0] != np.shape(sample1)[0]:\n raise ValueError(\"weights1 should have same len as sample1\")\n return None\n # Process weights2 entry and check for consistency.\n if weights2 is None:\n weights2 = np.array([1.0]*np.shape(sample2)[0], dtype=np.float64)\n else:\n weights2 = np.asarray(weights2).astype(\"float64\")\n if np.shape(weights2)[0] != np.shape(sample2)[0]:\n raise ValueError(\"weights2 should have same len as sample2\")\n return None\n\n N1 = len(sample1)\n N2 = len(sample2)\n dd = np.zeros((N1, N2), dtype=np.float64) # store radial pair separations\n for i in range(0, N1): # calculate distance between every point and every other point\n x1 = sample1[i, :]\n x2 = sample2\n dd[i, :] = distance(x1, x2, period)\n\n # count number less than r\n n = np.zeros((r.size,), dtype=np.float64)\n for i in range(r.size):\n if r[i] > np.min(period)/2:\n print(\"r=\", r[i], \" min(period)/2=\", np.min(period)/2)\n for j in range(N1):\n n[i] += np.sum(np.extract(dd[j, :] <= r[i], weights2))*weights1[j]\n\n return n\n\n\ndef xy_z_wnpairs(sample1, sample2, rp_bins, pi_bins, period=None, weights1=None, weights2=None):\n r\"\"\"\n Calculate the number of weighted pairs with parellal separations less than or equal to\n pi_bins[i], and perpendicular separations less than or equal to rp_bins[i].\n\n Assumes the first N-1 dimensions are perpendicular to the line-of-sight (LOS), and\n the final dimension is parallel to the LOS.\n\n Parameters\n ----------\n sample1 : array_like\n N by k numpy array of k-dimensional positions. Should be between zero and\n period\n\n sample2 : array_like\n N by k numpy array of k-dimensional positions. Should be between zero and\n period\n\n rp_bins : array_like\n numpy array of boundaries defining the perpendicular bins in which pairs are\n counted.\n\n pi_bins : array_like\n numpy array of boundaries defining the parallel bins in which pairs are counted.\n\n period : array_like, optional\n length k array defining periodic boundary conditions. If only\n one number, Lbox, is specified, period is assumed to be np.array([Lbox]*k).\n If none, PBCs are set to infinity.\n\n weights1 : array_like, optional\n length N1 array containing weights used for weighted pair counts, w1*w2.\n\n weights2 : array_like, optional\n length N2 array containing weights used for weighted pair counts, w1*w2.\n\n\n Returns\n -------\n wN_pairs : ndarray of shape (len(rp_bins),len(pi_bins))\n weighted number counts of pairs\n \"\"\"\n\n sample1 = np.atleast_2d(sample1)\n sample2 = np.atleast_2d(sample2)\n rp_bins = np.atleast_1d(rp_bins)\n pi_bins = np.atleast_1d(pi_bins)\n\n # Check to make sure both data sets have the same dimension. Otherwise, throw an error!\n if np.shape(sample1)[-1] != np.shape(sample2)[-1]:\n raise ValueError(\"sample1 and sample2 inputs do not have the same dimension.\")\n return None\n\n # Process period entry and check for consistency.\n if period is None:\n period = np.array([np.inf]*np.shape(sample1)[-1])\n else:\n period = np.asarray(period).astype(\"float64\")\n if np.shape(period) == ():\n period = np.array([period]*np.shape(sample1)[-1])\n elif np.shape(period)[0] != np.shape(sample1)[-1]:\n raise ValueError(\"period should have len == dimension of points\")\n return None\n\n # Process weights1 entry and check for consistency.\n if weights1 is None:\n weights1 = np.array([1.0]*np.shape(sample1)[0], dtype=np.float64)\n else:\n weights1 = np.asarray(weights1).astype(\"float64\")\n if np.shape(weights1)[0] != np.shape(sample1)[0]:\n raise ValueError(\"weights1 should have same len as sample1\")\n return None\n # Process weights2 entry and check for consistency.\n if weights2 is None:\n weights2 = np.array([1.0]*np.shape(sample2)[0], dtype=np.float64)\n else:\n weights2 = np.asarray(weights2).astype(\"float64\")\n if np.shape(weights2)[0] != np.shape(sample2)[0]:\n raise ValueError(\"weights2 should have same len as sample2\")\n return None\n\n N1 = len(sample1)\n N2 = len(sample2)\n dd = np.zeros((N1*N2, 2)) # store pair separations\n ww = np.zeros((N1*N2, 1)) # store pair separations\n for i in range(0, N1): # calculate distance between every point and every other point\n x1 = sample1[i, :]\n x2 = sample2\n dd[i*N2:i*N2+N2, 1] = parallel_distance(x1, x2, period)\n dd[i*N2:i*N2+N2, 0] = perpendicular_distance(x1, x2, period)\n ww[i*N2:i*N2+N2] = weights1[i]*weights2\n\n # count number less than r\n n = np.zeros((rp_bins.size, pi_bins.size), dtype=np.float64)\n for i in range(rp_bins.size):\n for j in range(pi_bins.size):\n n[i, j] += np.sum(np.extract((dd[:, 0] <= rp_bins[i]) & (dd[:, 1] <= pi_bins[j]), ww))\n\n return n\n\n\ndef s_mu_npairs(sample1, sample2, s_bins, mu_bins, period=None):\n r\"\"\"\n Calculate the number of pairs with 3D radial separations less than or equal to\n :math:`s`, and angular separations along the LOS, :math:`\\mu=\\cos(\\theta_{\\rm LOS})`.\n\n Assumes the first N-1 dimensions are perpendicular to the line-of-sight (LOS), and\n the final dimension is parallel to the LOS.\n\n Parameters\n ----------\n sample1 : array_like\n N by k numpy array of k-dimensional positions. Should be between zero and\n period\n\n sample2 : array_like\n N by k numpy array of k-dimensional positions. Should be between zero and\n period\n\n s_bins : array_like\n numpy array of shape (num_s_bin_edges, ) storing the :math:`s`\n boundaries defining the bins in which pairs are counted.\n\n mu_bins : array_like\n numpy array of shape (num_mu_bin_edges, ) storing the\n :math:`\\cos(\\theta_{\\rm LOS})` boundaries defining the bins in\n which pairs are counted. All values must be between [0,1].\n\n period : array_like, optional\n length k array defining periodic boundary conditions. If only\n one number, Lbox, is specified, period is assumed to be np.array([Lbox]*k).\n If none, PBCs are set to infinity.\n\n Returns\n -------\n N_pairs : ndarray of shape (num_s_bin_edges, num_mu_bin_edges) storing the \n number counts of pairs with separations less than ``s_bins`` and ``mu_bins``\n \n Notes\n -----\n Along the first dimension of ``N_pairs``, :math:`s` (the radial separation) increases.\n Along the second dimension, :math:`\\mu` (the cosine of :math:`\\theta_{\\rm LOS}`) \n decreases, i.e. :math:`\\theta_{\\rm LOS}` increases.\n \"\"\"\n\n sample1 = np.atleast_2d(sample1)\n sample2 = np.atleast_2d(sample2)\n s_bins = np.atleast_1d(s_bins)\n mu_bins = np.atleast_1d(mu_bins)\n\n # Check to make sure both data sets have the same dimension. Otherwise, throw an error!\n if np.shape(sample1)[-1] != np.shape(sample2)[-1]:\n raise ValueError(\"sample1 and sample2 inputs do not have the same dimension.\")\n return None\n\n # Process period entry and check for consistency.\n if period is None:\n period = np.array([np.inf]*np.shape(sample1)[-1])\n else:\n period = np.asarray(period).astype(\"float64\")\n if np.shape(period) == ():\n period = np.array([period]*np.shape(sample1)[-1])\n elif np.shape(period)[0] != np.shape(sample1)[-1]:\n raise ValueError(\"period should have len == dimension of points\")\n return None\n \n # create N1 x N2 x 2 array to store **all** pair separation distances\n # note that this array can be very large for large N1 and N2\n N1 = len(sample1)\n N2 = len(sample2)\n dd = np.zeros((N1*N2, 2))\n \n # calculate distance between every point and every other point\n for i in range(0, N1):\n x1 = sample1[i, :]\n x2 = sample2\n dd[i*N2:i*N2+N2, 0] = distance(x1, x2, period)\n dd[i*N2:i*N2+N2, 1] = np.cos(theta_LOS(x1, x2, period))\n \n # put mu bins in increasing theta_LOS order\n mu_bins = np.sort(mu_bins)[::-1]\n \n # bin distances in s and mu bins\n n = np.zeros((s_bins.size, mu_bins.size), dtype=np.int)\n for i in range(s_bins.size):\n for j in range(mu_bins.size):\n n[i, j] = np.sum((dd[:, 0] <= s_bins[i]) & (dd[:, 1] >= mu_bins[j]))\n \n return n\n\n\ndef distance(x1, x2, period=None):\n r\"\"\"\n Find the Euclidean distance between x1 & x2, accounting for box periodicity.\n\n Parameters\n ----------\n x1 : array_like\n N by k numpy array of k-dimensional positions. Should be between zero and period\n\n x2 : array_like\n N by k numpy array of k-dimensional positions. Should be between zero and period.\n\n period : array_like\n Size of the simulation box along each dimension. Defines periodic boundary\n conditioning. Must be axis aligned.\n\n Returns\n -------\n distance : array\n \"\"\"\n\n x1 = np.atleast_2d(x1)\n x2 = np.atleast_2d(x2)\n if period is None:\n period = np.array([np.inf]*np.shape(x1)[-1])\n\n # check for consistency\n if np.shape(x1)[-1] != np.shape(x2)[-1]:\n raise ValueError(\"x1 and x2 list of points must have same dimension k.\")\n else:\n k = np.shape(x1)[-1]\n if np.shape(period)[0] != np.shape(x1)[-1]:\n raise ValueError(\"period must have length equal to the dimension of x1 and x2.\")\n\n m = np.minimum(np.fabs(x1 - x2), period - np.fabs(x1 - x2))\n distance = np.sqrt(np.sum(m*m, axis=len(np.shape(m))-1))\n\n return distance\n\n\ndef parallel_distance(x1, x2, period=None):\n r\"\"\"\n Find the parallel distance between x1 & x2, accounting for box periodicity.\n\n Assumes the last dimension is the line-of-sight.\n\n Parameters\n ----------\n x1 : array_like\n N by k numpy array of k-dimensional positions. Should be between zero and period\n\n x2 : array_like\n N by k numpy array of k-dimensional positions. Should be between zero and period.\n\n period : array_like\n Size of the simulation box along each dimension. Defines periodic boundary\n conditioning. Must be axis aligned.\n\n Returns\n -------\n distance : array\n \"\"\"\n\n x1 = np.atleast_2d(x1)\n x2 = np.atleast_2d(x2)\n if period is None:\n period = np.array([np.inf]*np.shape(x1)[-1])\n\n # check for consistency\n if np.shape(x1)[-1] != np.shape(x2)[-1]:\n raise ValueError(\"x1 and x2 list of points must have same dimension k.\")\n else:\n k = np.shape(x1)[-1]\n if np.shape(period)[0] != np.shape(x1)[-1]:\n raise ValueError(\"period must have length equal to the dimension of x1 and x2.\")\n\n m = np.minimum(np.fabs(x1[:, -1] - x2[:, -1]), period[-1] - np.fabs(x1[:, -1] - x2[:, -1]))\n distance = np.sqrt(m*m)\n\n return distance\n\n\ndef perpendicular_distance(x1, x2, period=None):\n r\"\"\"\n Find the perpendicular distance between x1 & x2, accounting for box periodicity.\n\n Assumes the first N-1 dimensions are perpendicular to the line-of-sight.\n\n Parameters\n ----------\n x1 : array_like\n N by k numpy array of k-dimensional positions. Should be between zero and period\n\n x2 : array_like\n N by k numpy array of k-dimensional positions. Should be between zero and period.\n\n period : array_like\n Size of the simulation box along each dimension. Defines periodic boundary\n conditioning. Must be axis aligned.\n\n Returns\n -------\n distance : array\n \"\"\"\n\n x1 = np.atleast_2d(x1)\n x2 = np.atleast_2d(x2)\n if period is None:\n period = np.array([np.inf]*np.shape(x1)[-1])\n\n # check for consistency\n if np.shape(x1)[-1] != np.shape(x2)[-1]:\n raise ValueError(\"x1 and x2 list of points must have same dimension k.\")\n else:\n k = np.shape(x1)[-1]\n if np.shape(period)[0] != np.shape(x1)[-1]:\n raise ValueError(\"period must have length equal to the dimension of x1 and x2.\")\n\n m = np.minimum(np.fabs(x1[:, :-1] - x2[:, :-1]), period[:-1] - np.fabs(x1[:, :-1] - x2[:, :-1]))\n distance = np.sqrt(np.sum(m*m, axis=len(np.shape(m))-1))\n\n return distance\n\n\ndef theta_LOS(x1, x2, period=None):\n r\"\"\"\n Find the separation angle from the LOS between x1 & x2, accounting for box periodicity.\n\n Assumes the first N-1 dimensions are perpendicular to the line-of-sight (LOS).\n\n Parameters\n ----------\n x1 : array_like\n N by k numpy array of k-dimensional positions. Should be between zero and period\n\n x2 : array_like\n N by k numpy array of k-dimensional positions. Should be between zero and period.\n\n period : array_like\n Size of the simulation box along each dimension. Defines periodic boundary\n conditioning. Must be axis aligned.\n\n Returns\n -------\n theta_LOS : array\n angle from LOS in radians\n \n Notes\n -----\n theta_LOS is set to 0.0 if the distance between points is 0.0\n \"\"\"\n\n x1 = np.atleast_2d(x1)\n x2 = np.atleast_2d(x2)\n if period is None:\n period = np.array([np.inf]*np.shape(x1)[-1])\n\n # check for consistency\n if np.shape(x1)[-1] != np.shape(x2)[-1]:\n raise ValueError(\"x1 and x2 list of points must have same dimension k.\")\n else:\n k = np.shape(x1)[-1]\n if np.shape(period)[0] != np.shape(x1)[-1]:\n raise ValueError(\"period must have length equal to the dimension of x1 and x2.\")\n \n r_perp = perpendicular_distance(x1, x2, period=period)\n r_parallel = parallel_distance(x1, x2, period=period)\n \n # deal with zero separation\n r = np.sqrt(r_perp**2 + r_parallel**2)\n mask = (r>0.0)\n \n theta = np.zeros(len(r)) # set to zero if r==0\n theta[mask] = np.pi/2.0 - np.arctan2(r_parallel[mask],r_perp[mask])\n\n return theta\n" ]
[ [ "numpy.extract", "numpy.asarray", "numpy.zeros", "numpy.sum", "numpy.min", "numpy.shape", "numpy.where", "numpy.fabs", "numpy.atleast_1d", "numpy.sort", "numpy.sqrt", "numpy.arctan2", "numpy.atleast_2d" ] ]
mrazvan22/disProgModSummerSchool
[ "d3b8848792bd9d574259ee700a689736502502ea" ]
[ "notebooks/updateLeaderboard.py" ]
[ "import sys\nimport dropbox\nfrom dropbox.files import WriteMode\nfrom dropbox.exceptions import ApiError, AuthError\nimport argparse\nimport os\nimport evalOneSubmission\nimport numpy as np\nimport pandas as pd\nimport csv\nimport string\nimport time\nimport datetime\nimport pickle\nfrom scipy.stats import rankdata\n\nparser = argparse.ArgumentParser(usage='python3 updateLeaderboard.py', description=r'''\n Script uploads the SummerSchool2018 leaderboard table to dropbox\n\n Author: Razvan V. Marinescu, [email protected]\n\n''')\n\nparser.add_argument('--runPart', dest='runPart', default='RR',\n help='which part of the script to run. Usually either LR or RR, where '\n 'LR means \"load first part, run second part\" while RR means run both parts')\n\nparser.add_argument('--fast', dest='fast', type=int, default=1,\n help='whether to run a fast version of the leaderboard.')\n\nargs = parser.parse_args()\n\nTOKEN = open(os.path.expanduser('~/.dropboxTadpoleToken'), 'r').read()[:-1]\n\ntag = 'SummerSchool2018'\n\nclass DropboxObj:\n\n def __init__(self):\n self.dbx = self.createDropboxInstance()\n\n def createDropboxInstance(self):\n # Check for an access token\n\n if (len(TOKEN) == 0):\n sys.exit(\"ERROR: Looks like you didn't add your access token. \"\n \"Open up backupuploadDropboxAPIv2.py in a text editor and \"\n \"paste in your token in line 14.\")\n\n # Create an instance of a Dropbox class, which can make requests to the API.\n print(\"Creating a Dropbox object...\")\n print('TOKEN', '%s' % TOKEN[:-1], type(TOKEN))\n\n dbx = dropbox.Dropbox(TOKEN)\n # Check that the access token is valid\n try:\n dbx.users_get_current_account()\n except AuthError as err:\n sys.exit(\"ERROR: Invalid access token; try re-generating an \"\n \"access token from the app console on the web.\")\n\n return dbx\n\n # Uploads contents of LOCALFILE to Dropbox\n def upload(self, fullPathLocal, fullPathRemote):\n print('fullPathRemote', fullPathRemote)\n with open(fullPathLocal, 'rb') as f:\n # We use WriteMode=overwrite to make sure that the settings in the file\n # are changed on upload\n print(\"Uploading \" + fullPathLocal + \" to Dropbox as \" + fullPathRemote + \"...\")\n try:\n self.dbx.files_upload(f.read(), fullPathRemote, mode=WriteMode('overwrite'))\n except ApiError as err:\n # This checks for the specific error where a user doesn't have\n # enough Dropbox space quota to upload this file\n if (err.error.is_path() and\n err.error.get_path().error.is_insufficient_space()):\n sys.exit(\"ERROR: Cannot back up; insufficient space.\")\n elif err.user_message_text:\n print(err.user_message_text)\n sys.exit()\n else:\n print(err)\n sys.exit()\n\n # Download contents of LOCALFILE to Dropbox\n def download(self, localPath, remotePath):\n\n print(\"Downloading \" + remotePath + \" from Dropbox to \" + localPath + \" ...\")\n try:\n self.dbx.files_download_to_file(localPath, remotePath)\n except ApiError as err:\n if err.user_message_text:\n print(err.user_message_text)\n sys.exit()\n else:\n print(err)\n sys.exit()\n\n def list_folder(self, folder, subfolder):\n \"\"\"List a folder.\n Return a dict mapping unicode filenames to\n FileMetadata|FolderMetadata entries.\n \"\"\"\n path = '/%s/%s' % (folder, subfolder.replace(os.path.sep, '/'))\n while '//' in path:\n path = path.replace('//', '/')\n path = path.rstrip('/')\n try:\n res = self.dbx.files_list_folder(path)\n except dropbox.exceptions.ApiError as err:\n print('Folder listing failed for', path, '-- assumed empty:', err)\n return {}\n else:\n rv = {}\n for entry in res.entries:\n rv[entry.name] = entry\n return rv\n\n\ndef writeHTMLtable(evalResults, htmlFile, forecastFiles):\n html = open(htmlFile, 'w')\n text = 'Table last updated on %s' % (datetime.datetime.now().strftime('%Y-%m-%d %H:%M (UTC+0)') )\n text += '<table class=\"sortable smallfont\" style=\"width: 780px; table-layout: fixed;\" >\\n'\n text += r'''\n <col width=\"30\">\n <col width=\"40\">\n <col width=\"35\">\n <col width=\"30\">\n <col width=\"40\">\n <col width=\"40\">\n <col width=\"40\">\n <col width=\"40\">\n <col width=\"35\">\n <col width=\"35\">\n <col width=\"60\">'''\n\n trStartHead = r'''<thead>\n\t<tr class=\"d1\"><td>'''\n trEndHead = r'''</td></tr>\n</thead>\n'''\n text += trStartHead\n text += '</td><td>'.join(['RANK', 'TEAM NAME', 'MAUC', 'BCA', 'ADAS MAE', 'VENTS MAE',\n 'ADAS WES', 'VENTS WES', 'ADAS CPA', 'VENTS CPA', 'DATE'])\n text += trEndHead + '<tbody>'\n nrFiles = len(forecastFiles)\n # print(evalResults.shape)\n # print(evalResults['MAUC'])\n formatStrsMeasures = ['%.3f','%.3f','%.3f','%.5f','%.3f','%.5f','%.3f','%.3f']\n for f in range(evalResults['MAUC'].shape[0]):\n if not np.isnan(evalResults['MAUC'].iloc[f]):\n text += '\\n <tr class=\"d%d\">' % (f % 2)\n teamName = forecastFiles[f].split('.')[0][len('TADPOLE_Submission_%s_' % tag):]\n # print(f, type(evalResults['TEAMNAME'].iloc[f]))\n # print(f, type('%f' % evalResults['RANK'].iloc[f]))\n # print(f, [type(n) for n in evalResults.loc[f,'MAUC':'ventsCP']])\n\n text += '<td>%.1f</td>' % evalResults['RANK'].iloc[f]\n text += '<td style=\"word-wrap: break-word\">%s</td><td>' % evalResults['TEAMNAME'].iloc[f]\n text += '</td><td>'.join(\n [ strFmt % n for strFmt, n in zip(formatStrsMeasures, evalResults.loc[f,'MAUC':'ventsCPA'])] +\n [evalResults.loc[f, 'Date'].strftime('%Y-%m-%d %H:%M (UTC+0)')])\n text += '</td></tr>\\n'\n\n text += '</tbody>\\n</table>'\n\n with open(htmlFile, \"w\") as f:\n f.write(text)\n\ndef downloadLeaderboardSubmissions():\n htmlFile = '%sTable.html' % tag\n dropboxRemoteFolder = '/ProAD/public_html'\n uploadsFldRemote = '/ProAD/uploads'\n ldbSubmissionsFld = 'leaderboardSubmissions'\n\n\n ldbDropbox = DropboxObj()\n\n fileListAll = ldbDropbox.list_folder(uploadsFldRemote, '/')\n fileListLdb = [x for x in fileListAll.keys() if x.startswith('TADPOLE_Submission_%s' % tag)]\n fileListLdb.sort()\n print('fileListLdb ', fileListLdb)\n os.system('mkdir -p %s' % ldbSubmissionsFld)\n nrEntries = len(fileListLdb)\n\n teamNames = [f.split('.')[0][len('TADPOLE_Submission_%s_' % tag):] for f in fileListLdb]\n\n evalResFile = '%s/evalResAll.npz' % ldbSubmissionsFld\n\n # entriesList = [0,1,2]\n tableColumns = ('TEAMNAME', 'RANK' , 'MAUC', 'BCA',\n 'adasMAE', 'ventsMAE', 'adasWES', 'ventsWES', 'adasCPA', 'ventsCPA', 'Date')\n\n if args.runPart[0] == 'R':\n if args.fast:\n # load submissions already evaluated and only evaluate the new ones\n dataStruct = pickle.load(open(evalResFile, 'rb'))\n evalResults = dataStruct['evalResults']\n fileDatesRemote = dataStruct['fileDatesRemote']\n entriesList = [e for e,f in enumerate(teamNames) if (evalResults['TEAMNAME'].str.contains(f).sum() == 0)]\n nanSeries = pd.DataFrame(np.nan, index=range(len(entriesList)), columns=tableColumns)\n nrEntriesSoFar = evalResults.shape[0]\n evalResults = evalResults.append(nanSeries, ignore_index=True)\n print('teamNames', teamNames)\n print('entriesList', entriesList)\n print('evalResults', evalResults)\n # print(adsa)\n else:\n evalResults = pd.DataFrame(np.nan, index=range(nrEntries), columns=tableColumns)\n fileDatesRemote = []\n entriesList = range(nrEntries)\n nrEntriesSoFar = 0\n\n lb4Df = pd.read_csv('../data/TADPOLE_LB4.csv')\n lb4Df = lb4Df[lb4Df['LB4'] == 1] # only keep the LB4 entries\n lb4Df.reset_index(drop=True, inplace=True)\n indexInTable = 0\n entryToAddIndex = nrEntriesSoFar\n for f in entriesList:\n fileName = fileListLdb[f]\n teamName = teamNames[f]\n # print('teamname ', teamName)\n remotePath = '%s/%s' % (uploadsFldRemote, fileName)\n localPath = '%s/%s' % (ldbSubmissionsFld, fileName)\n ldbDropbox.download(localPath, remotePath)\n\n metadataFileRemote = ldbDropbox.dbx.files_get_metadata(remotePath)\n print(metadataFileRemote)\n #if 'LR_Eman' in fileName:\n # asd\n\n print('Evaluating %s' % fileName)\n forecastDf = pd.read_csv(localPath)\n try:\n evalResults.loc[entryToAddIndex, ['MAUC', 'BCA',\n 'adasMAE', 'ventsMAE', 'adasWES', 'ventsWES', 'adasCPA', 'ventsCPA']] = \\\n evalOneSubmission.evalOneSub(lb4Df, forecastDf)\n evalResults.loc[entryToAddIndex, 'TEAMNAME'] = teamName\n evalResults.loc[entryToAddIndex, 'Date'] = metadataFileRemote.server_modified\n\n except :\n print('Error while processing submission %s' % fileName)\n pass\n\n\n # if not np.isnan(evalResults['MAUC'].iloc[f]):\n\n entryToAddIndex += 1\n\n\n\n nanMask = np.isnan(evalResults['MAUC'])\n evalResults = evalResults[np.logical_not(nanMask)]\n evalResults.reset_index(drop = True, inplace = True)\n\n # # compute the ranks using MAUC\n # rankOrder = np.argsort(evalResults.as_matrix(columns = ['MAUC']).reshape(-1))[::-1] # sort them by MAUC\n # rankOrder += 1 # make them start from 1\n # print('ranks', evalResults['MAUC'], rankOrder, evalResults.as_matrix(columns = ['MAUC']).reshape(-1))\n # for f in range(evalResults.shape[0]):\n # evalResults.loc[f, 'RANK'] = rankOrder[f]\n\n dataStruct = dict(evalResults=evalResults, fileDatesRemote=fileDatesRemote)\n pickle.dump(dataStruct, open(evalResFile, 'wb'), protocol=pickle.HIGHEST_PROTOCOL)\n else:\n dataStruct = pickle.load(open(evalResFile, 'rb'))\n fileDatesRemote = dataStruct['fileDatesRemote']\n evalResults = dataStruct['evalResults']\n\n rankMAUC = rankdata(rankdata(-evalResults.as_matrix(columns=['MAUC']).reshape(-1), method='average'), method='average')\n rankADAS = rankdata(rankdata(evalResults.as_matrix(columns = ['adasMAE']).reshape(-1), method='average'), method='average')\n rankVENTS = rankdata(rankdata(evalResults.as_matrix(columns = ['ventsMAE']).reshape(-1), method='average'), method='average')\n\n\n print('rankMAUC', rankMAUC)\n print('rankADAS', rankADAS)\n print('rankVENTS', rankVENTS)\n\n rankSum = rankMAUC + rankADAS + rankVENTS\n\n rankOrder = rankdata(rankSum, method='average') # make them start from 1\n for f in range(evalResults.shape[0]):\n evalResults.loc[f, 'RANK'] = rankOrder[f]\n\n # print('evalResults before\\n', evalResults)\n\n evalResults = evalResults.sort_values(by=['MAUC', 'BCA'],ascending=False)\n evalResults = evalResults.reset_index(drop=True)\n\n print('evalResults after\\n', evalResults)\n\n htmlFileFullPathRemote = '%s/%s' % (dropboxRemoteFolder, htmlFile)\n htmlFileFullPathLocal = '%s/%s' % (ldbSubmissionsFld, htmlFile)\n writeHTMLtable(evalResults, htmlFileFullPathLocal, fileListLdb)\n ldbDropbox.upload(htmlFileFullPathLocal, htmlFileFullPathRemote)\n\nif __name__ == '__main__':\n downloadLeaderboardSubmissions()\n" ]
[ [ "numpy.logical_not", "scipy.stats.rankdata", "numpy.isnan", "pandas.read_csv" ] ]
le3t/ko-repo
[ "50eb0b4cadb9db9bf608a9e5d36376f38ff5cce5" ]
[ "data-science/scikit-learn/playML/SimpleLinearRegression.py" ]
[ "import numpy as np\n# from .metrics import r2_score\n\n\nclass SimpleLinearRegression1:\n\n def __init__(self):\n \"\"\"初始化Simple Linear Regression 模型\"\"\"\n self.a_ = None\n self.b_ = None\n\n def fit(self, x_train, y_train):\n \"\"\"根据训练数据集x_train, y_train训练Simple Linear Regression模型\"\"\"\n assert x_train.ndim == 1, \\\n \"Simple Linear Regression can only solve single feature training data.\"\n assert len(x_train) == len(y_train), \\\n \"the size of x_train must be equal to the size of y_train\"\n\n x_mean = np.mean(x_train)\n y_mean = np.mean(y_train)\n\n num = 0.0\n d = 0.0\n for x, y in zip(x_train, y_train):\n num += (x - x_mean) * (y - y_mean)\n d += (x - x_mean) ** 2\n\n self.a_ = num / d\n self.b_ = y_mean - self.a_ * x_mean\n return self\n\n def predict(self, x_predict):\n \"\"\"给定待预测数据集x_predict,返回表示x_predictr的结果向量\"\"\"\n assert x_predict.ndim == 1, \\\n \"Simple Linear Regression can only solve single feature training data.\"\n assert self.a_ is not None and self.b_ is not None, \\\n \"must fit before predict!\"\n return np.array([self._predict(x) for x in x_predict])\n\n def _predict(self, x_single):\n \"\"\"给定单个待预测数据x_single, 返回x_single的预测结果值\"\"\"\n return self.a_ * x_single + self.b_\n\n def __repr__(self):\n return \"SimpleLinearRegression1()\"\n\n\nclass SimpleLinearRegression2:\n\n def __init__(self):\n \"\"\"初始化Simple Linear Regression 模型\"\"\"\n self.a_ = None\n self.b_ = None\n\n def fit(self, x_train, y_train):\n \"\"\"根据训练数据集x_train, y_train训练Simple Linear Regression模型\"\"\"\n assert x_train.ndim == 1, \\\n \"Simple Linear Regression can only solve single feature training data.\"\n assert len(x_train) == len(y_train), \\\n \"the size of x_train must be equal to the size of y_train\"\n\n x_mean = np.mean(x_train)\n y_mean = np.mean(y_train)\n\n num = (x_train - x_mean).dot(y_train - y_mean)\n d = (x_train - x_mean).dot(x_train - x_mean)\n\n self.a_ = num / d\n self.b_ = y_mean - self.a_ * x_mean\n return self\n\n def predict(self, x_predict):\n \"\"\"给定待预测数据集x_predict,返回表示x_predictr的结果向量\"\"\"\n assert x_predict.ndim == 1, \\\n \"Simple Linear Regression can only solve single feature training data.\"\n assert self.a_ is not None and self.b_ is not None, \\\n \"must fit before predict!\"\n return np.array([self._predict(x) for x in x_predict])\n\n def _predict(self, x_single):\n \"\"\"给定单个待预测数据x_single, 返回x_single的预测结果值\"\"\"\n return self.a_ * x_single + self.b_\n\n # def score(self, x_test, y_test):\n # \"\"\"根据测试数据集 x_test 和 y_test 确定当前模型的准确度\"\"\"\n #\n # y_predict = self.predict(x_test)\n # return r2_score(y_test, y_predict)\n\n def __repr__(self):\n return \"SimpleLinearRegression2()\"\n" ]
[ [ "numpy.mean" ] ]
megvii-research/GyroFlow
[ "776ad1e30253d77281b46a8bc7b95527b254d05f" ]
[ "dataset/data_loader.py" ]
[ "# -*- coding: utf-8 -*-\n# This repo is licensed under the Apache License, Version 2.0 (the \"License\")\n#\n# Copyright (c) 2014-2021 Megvii Inc. All rights reserved.\n#\n# Unless required by applicable law or agreed to in writing,\n# software distributed under the License is distributed on an\n# \"AS IS\" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nimport logging\nimport os\nimport cv2\nimport glob\nimport bisect\n\nimport numpy as np\nimport megengine.functional as F\n\nfrom megengine.data import DataLoader\nfrom megengine.data.dataset import Dataset\nfrom megengine.data.sampler import RandomSampler, SequentialSampler\n\nfrom dataset.transformations import homo_to_flow, fetch_spatial_transform, fetch_input_transform\n\n_logger = logging.getLogger(__name__)\n\n\nclass ConcatDataset(Dataset):\n def __init__(self, datasets) -> None:\n self.datasets = list(datasets)\n\n def __getitem__(self, index):\n cumsum = np.cumsum([len(d) for d in self.datasets])\n idx_dataset = bisect.bisect_right(cumsum, index)\n offset = cumsum[idx_dataset - 1] if idx_dataset > 0 else 0\n return self.datasets[idx_dataset][index - offset]\n\n def __len__(self):\n return sum(len(d) for d in self.datasets)\n\n\nclass BaseDataset(Dataset):\n def __init__(self, input_transform, spatial_transform):\n self.input_transform = input_transform\n self.spatial_transform = spatial_transform\n\n self.samples = self.collect_samples()\n\n def collect_samples(self):\n files = glob.glob(\"dataset/GOF_Train/sample*\")\n return files\n\n def resize_flow(self, inputs, target_as, isRate=False):\n h, w, _ = target_as.shape\n h_, w_, _ = inputs.shape\n res = cv2.resize(inputs, (w, h), interpolation=cv2.INTER_LINEAR)\n if isRate:\n u_scale = (w / w_)\n v_scale = (h / h_)\n res[:, :, 0] *= u_scale\n res[:, :, 1] *= v_scale\n return res\n\n def __len__(self):\n return len(self.samples)\n\n def __getitem__(self, idx):\n file = self.samples[idx]\n frame_path_1 = os.path.join(file, \"img1.png\")\n frame_path_2 = os.path.join(file, \"img2.png\")\n\n gyro_homo_path = os.path.join(file, \"gyro_homo.npy\")\n gyro_homo = np.load(gyro_homo_path)\n\n try:\n imgs = [cv2.imread(i).astype(np.float32) for i in [frame_path_1, frame_path_2]]\n except Exception as e:\n print(frame_path_1 + \" \" + frame_path_2)\n raise e\n\n # gyro_homo is the homography from img1 to img2\n gyro_filed = homo_to_flow(np.expand_dims(gyro_homo, 0), H=600, W=800).squeeze()\n\n if self.spatial_transform is not None:\n imgs.append(gyro_filed)\n data = self.spatial_transform(imgs)\n imgs, gyro_filed = data[:2], data[-1]\n gyro_filed = gyro_filed.transpose(2, 0, 1)\n else:\n dummy_data = np.zeros([512, 640, 2])\n imgs = [cv2.resize(i, (640, 512)) for i in imgs]\n gyro_filed = self.resize_flow(gyro_filed, dummy_data, True).transpose(2, 0, 1)\n\n if self.input_transform:\n imgs_it = [self.input_transform.apply(i) for i in imgs]\n\n ret = {\"img{}\".format(i + 1): v for i, v in enumerate(imgs_it)}\n ret[\"gyro_field\"] = gyro_filed\n return ret\n\n\nclass TestDataset(Dataset):\n def __init__(self, benchmark_path, input_transform):\n self.input_transform = input_transform\n\n self.samples = np.load(benchmark_path, allow_pickle=True)\n\n def __len__(self):\n return len(self.samples)\n\n def resize_flow(self, inputs, target_as, isRate=False):\n h, w, _ = target_as.shape\n h_, w_, _ = inputs.shape\n res = cv2.resize(inputs, (w, h), interpolation=cv2.INTER_LINEAR)\n if isRate:\n u_scale = (w / w_)\n v_scale = (h / h_)\n res[:, :, 0] *= u_scale\n res[:, :, 1] *= v_scale\n return res\n\n def __getitem__(self, idx):\n dummy_data = np.zeros([512, 640, 2])\n\n imgs = [self.samples[idx][\"img1\"], self.samples[idx][\"img2\"]]\n\n gyro_homo = self.samples[idx][\"homo\"]\n\n gt_flow = self.samples[idx][\"gt_flow\"]\n\n split = self.samples[idx][\"split\"]\n\n gyro_filed = homo_to_flow(np.expand_dims(gyro_homo, 0), H=600, W=800).squeeze()\n\n imgs = [cv2.resize(i, (640, 512)) for i in imgs]\n\n gt_flow = self.resize_flow(gt_flow, dummy_data, True).transpose(2, 0, 1)\n gyro_filed = self.resize_flow(gyro_filed, dummy_data, True).transpose(2, 0, 1)\n\n if self.input_transform:\n imgs_it = [F.transpose(i, (2, 0, 1)) for i in imgs]\n\n ret = {\"img{}\".format(i + 1): v for i, v in enumerate(imgs_it)}\n\n ret[\"gyro_field\"] = gyro_filed\n ret[\"gt_flow\"] = gt_flow\n ret[\"label\"] = split\n ret[\"rain_label\"] = split\n return ret\n\n\ndef fetch_dataloader(params):\n input_transform = fetch_input_transform()\n spatial_transform = fetch_spatial_transform(params)\n\n benchmark_path_gof_clean = \"dataset/GOF_Clean.npy\"\n benchmark_path_gof_final = \"dataset/GOF_Final.npy\"\n\n if params.dataset_type == \"GOF\":\n train_ds = BaseDataset(input_transform, spatial_transform)\n val_ds = TestDataset(benchmark_path_gof_clean, input_transform)\n test_ds = ConcatDataset(\n [TestDataset(benchmark_path_gof_clean, input_transform),\n TestDataset(benchmark_path_gof_final, input_transform)])\n\n dataloaders = {}\n # add defalt train data loader\n train_sampler = RandomSampler(train_ds, batch_size=params.train_batch_size, drop_last=True)\n train_dl = DataLoader(train_ds, train_sampler, num_workers=params.num_workers)\n dataloaders[\"train\"] = train_dl\n\n # chosse val or test data loader for evaluate\n for split in [\"val\", \"test\"]:\n if split in params.eval_type:\n if split == \"val\":\n val_sampler = SequentialSampler(val_ds, batch_size=params.eval_batch_size)\n dl = DataLoader(val_ds, val_sampler, num_workers=params.num_workers)\n elif split == \"test\":\n test_sampler = SequentialSampler(test_ds, batch_size=params.eval_batch_size)\n dl = DataLoader(test_ds, test_sampler, num_workers=params.num_workers)\n else:\n raise ValueError(\"Unknown eval_type in params, should in [val, test]\")\n dataloaders[split] = dl\n else:\n dataloaders[split] = None\n\n return dataloaders\n" ]
[ [ "numpy.expand_dims", "numpy.load", "numpy.zeros" ] ]
lucky-luk3/msticpy
[ "623f726f15fa35dafae7e1e65df91b7601456002" ]
[ "msticpy/vis/entity_graph_tools.py" ]
[ "# -------------------------------------------------------------------------\n# Copyright (c) Microsoft Corporation. All rights reserved.\n# Licensed under the MIT License. See License.txt in the project root for\n# license information.\n# --------------------------------------------------------------------------\n\"\"\"Creates an entity graph for an Azure Sentinel Incident.\"\"\"\nfrom datetime import datetime\nfrom typing import List, Optional, Union\n\nimport numpy as np\nimport networkx as nx\nimport pandas as pd\nfrom bokeh.io import output_notebook, show\nfrom bokeh.layouts import column\nfrom bokeh.models import LayoutDOM\nfrom bokeh.plotting import figure, from_networkx\nfrom bokeh.models import Circle, HoverTool, Label\n\nfrom .._version import VERSION\nfrom ..common.exceptions import MsticpyUserError\nfrom ..datamodel.entities import Entity\nfrom ..datamodel.entities.alert import Alert\nfrom ..datamodel.soc.incident import Incident\nfrom ..nbtools.security_alert import SecurityAlert\nfrom ..nbtools.timeline import display_timeline\nfrom ..nbtools.timeline_duration import display_timeline_duration\n\n__version__ = VERSION\n__author__ = \"Pete Bryan\"\n\nreq_alert_cols = [\"DisplayName\", \"Severity\", \"AlertType\"]\nreq_inc_cols = [\"id\", \"name\", \"properties.severity\"]\n\n\nclass EntityGraph:\n \"\"\"Create a graph for visualizing and tracking links between entities.\"\"\"\n\n def __init__(\n self,\n entity: Union[Incident, Alert, pd.DataFrame, pd.Series, Entity, SecurityAlert],\n ):\n \"\"\"\n Create a new instance of the entity graph.\n\n Parameters\n ----------\n entity : Union[Incident, Alert, pd.DataFrame, pd.Series, Entity, SecurityAlert]\n The initial item to add to the graph.\n Can be an Incident, Alert, SecurityAlert or other Entity\n\n \"\"\"\n output_notebook()\n self.alertentity_graph = nx.Graph(id=\"IncidentGraph\")\n if isinstance(entity, (Incident, Alert)):\n self._add_incident_or_alert_node(entity)\n elif isinstance(entity, pd.DataFrame):\n self.add_incident(entity)\n elif isinstance(entity, pd.Series):\n self.add_incident(entity.to_frame().T)\n elif isinstance(entity, Entity):\n self._add_entity_node(entity)\n elif isinstance(entity, SecurityAlert):\n entity = Alert(entity) # type: ignore\n self._add_incident_or_alert_node(entity)\n\n def plot(self, hide: bool = False, timeline: bool = False, **kwargs) -> LayoutDOM:\n \"\"\"\n Plot a graph of entities.\n\n Parameters\n ----------\n hide : bool, optional\n Set true to not display the graphic, by default False\n timeline : bool, optional\n Set to True to display a timeline, by default False\n node_size : int, optional\n Size of the nodes in pixels, by default 25\n font_size : int, optional\n Font size for node labels, by default 10\n Can be an integer (point size) or a string (e.g. \"10pt\")\n width : int, optional\n Width in pixels, by default 800\n height : int, optional\n Image height (the default is 800)\n scale : int, optional\n Position scale (the default is 2)\n\n Returns\n -------\n LayoutDOM\n A Bokeh figure object\n\n \"\"\"\n if timeline:\n return self._plot_with_timeline(hide=hide, **kwargs)\n return self._plot_no_timeline(hide=hide, **kwargs)\n\n def _plot_no_timeline(self, hide: bool = False, **kwargs) -> LayoutDOM:\n \"\"\"\n Plot a graph of entities.\n\n Parameters\n ----------\n hide : bool, optional\n Set true to not display the graphic, by default False\n\n Returns\n -------\n LayoutDOM\n A Bokeh figure object\n\n \"\"\"\n return plot_entitygraph(self.alertentity_graph, hide=hide, **kwargs)\n\n def _plot_with_timeline(self, hide: bool = False, **kwargs) -> LayoutDOM:\n \"\"\"\n Plot the entity graph with a timeline.\n\n Parameters\n ----------\n hide : bool, optional\n Set true to not display the graphic, by default False\n\n Returns\n -------\n LayoutDOM\n A Bokeh figure object\n\n \"\"\"\n timeline = None\n tl_df = self.to_df()\n tl_type = \"duration\"\n if len(tl_df[\"EndTime\"].unique()) == 1 and not tl_df[\"EndTime\"].unique()[0]:\n tl_type = \"discreet\"\n if (\n len(tl_df[\"TimeGenerated\"].unique()) == 1\n and not tl_df[\"TimeGenerated\"].unique()[0]\n ):\n print(\"No timestamps available to create timeline\")\n return self._plot_no_timeline(timeline=False, hide=hide, **kwargs)\n tl_df[\"TimeGenerated\"] = pd.to_datetime(tl_df[\"TimeGenerated\"], utc=True)\n tl_df[\"StartTime\"] = pd.to_datetime(tl_df[\"StartTime\"], utc=True)\n tl_df[\"EndTime\"] = pd.to_datetime(tl_df[\"EndTime\"], utc=True)\n graph = self._plot_no_timeline(hide=True, **kwargs)\n if tl_type == \"duration\":\n timeline = display_timeline_duration(\n tl_df.dropna(subset=[\"TimeGenerated\"]),\n group_by=\"Name\",\n title=\"Entity Timeline\",\n time_column=\"StartTime\",\n end_time_column=\"EndTime\",\n source_columns=[\"Name\", \"Description\", \"Type\", \"TimeGenerated\"],\n hide=True,\n width=800,\n )\n elif tl_type == \"discreet\":\n timeline = display_timeline(\n tl_df.dropna(subset=[\"TimeGenerated\"]),\n group_by=\"Type\",\n title=\"Entity Timeline\",\n time_column=\"TimeGenerated\",\n source_columns=[\"Name\", \"Description\", \"Type\", \"TimeGenerated\"],\n hide=True,\n width=800,\n )\n plot_layout = column(graph, timeline) if timeline else graph\n if not hide:\n show(plot_layout)\n return plot_layout\n\n def add_entity(self, ent: Entity, attached_to: str = None):\n \"\"\"\n Add an entity to the graph.\n\n Parameters\n ----------\n ent : Entity\n The entity object to add the graph\n attached_to : str, optional\n The name of the node to attach the entity to, by default None\n\n \"\"\"\n self._add_entity_node(ent, attached_to)\n\n def add_incident(self, incident: Union[Incident, Alert, pd.DataFrame]):\n \"\"\"\n Add another incident or set of incidents to the graph.\n\n Parameters\n ----------\n incident : Union[Incident, Alert, pd.DataFrame]\n This can be an alert, and incident or a DataFrame of alerts or incidents\n\n \"\"\"\n inc = None\n if isinstance(incident, pd.DataFrame):\n for row in incident.iterrows():\n if \"name\" in row[1]:\n inc = Incident(src_event=row[1])\n elif \"AlertName\" in row[1]:\n inc = Alert(src_event=row[1]) # type: ignore\n self._add_incident_or_alert_node(inc)\n else:\n self._add_incident_or_alert_node(incident)\n\n def add_note(\n self,\n name: str,\n description: Optional[str] = None,\n attached_to: Union[str, List] = None,\n ):\n \"\"\"\n Add a node to the graph representing a note or comment.\n\n Parameters\n ----------\n name : str\n The name of the node to add\n description : Optional[str], optional\n A description of the note, by default None\n attached_to : Union[str, List], optional\n What existing nodes on the graph to attach it the note to, by default None\n user: str, optional\n What user to associate the note with\n\n \"\"\"\n self.alertentity_graph.add_node(\n name,\n Name=name,\n Description=description,\n Type=\"analystnote\",\n TimeGenerated=datetime.now(),\n )\n if attached_to:\n if isinstance(attached_to, str):\n attached_to = [attached_to]\n for link in attached_to:\n self.add_link(name, link)\n\n def add_link(self, source: str, target: str):\n \"\"\"\n Add a link between 2 nodes on the graph.\n\n Parameters\n ----------\n source : str\n Name of node to link from\n target : str\n Name of node to link to\n\n Raises\n ------\n MsticpyUserError\n If nodes aren't present in the graph\n\n\n \"\"\"\n # Check names are present\n if (\n source in self.alertentity_graph.nodes()\n and target in self.alertentity_graph.nodes()\n ):\n self.alertentity_graph.add_edge(source, target)\n else:\n missing = [\n name\n for name in [source, target]\n if name not in self.alertentity_graph.nodes()\n ]\n raise MsticpyUserError(title=f\"Node(s) {missing} not found in graph\")\n\n def remove_link(self, source: str, target: str):\n \"\"\"\n Remove a link between 2 nodes on the graph.\n\n Parameters\n ----------\n source : str\n Name of node to remove link from\n target : str\n name of node to remove link to\n\n Raises\n ------\n MsticpyUserError\n If edge isn't present in the graph\n\n \"\"\"\n if (\n source in self.alertentity_graph.nodes()\n and target in self.alertentity_graph.nodes()\n and self.alertentity_graph.has_edge(source, target)\n ):\n self.alertentity_graph.remove_edge(source, target)\n else:\n raise MsticpyUserError(\n title=f\"No edge exists between {source} and {target}\"\n )\n\n def remove_node(self, name: str):\n \"\"\"\n Remove a node from the graph.\n\n Parameters\n ----------\n name : str\n The name of the node to remove.\n\n \"\"\"\n # Check node is present\n if name in self.alertentity_graph.nodes():\n self.alertentity_graph.remove_node(name)\n else:\n raise MsticpyUserError(f\"Node named {name} not found\")\n\n def to_df(self) -> pd.DataFrame:\n \"\"\"Generate a dataframe of nodes in the graph.\"\"\"\n names = [node[1][\"Name\"] for node in self.alertentity_graph.nodes.items()]\n descs = [\n node[1][\"Description\"] for node in self.alertentity_graph.nodes.items()\n ]\n types = [node[1][\"Type\"] for node in self.alertentity_graph.nodes.items()]\n times = [\n node[1][\"TimeGenerated\"] if \"TimeGenerated\" in node[1] else None\n for node in self.alertentity_graph.nodes.items()\n ]\n starttimes = [\n node[1][\"StartTime\"] if \"StartTime\" in node[1] else node[1][\"TimeGenerated\"]\n for node in self.alertentity_graph.nodes.items()\n ]\n endtimes = [\n node[1][\"EndTime\"] if \"EndTime\" in node[1] else None\n for node in self.alertentity_graph.nodes.items()\n ]\n tl_df = pd.DataFrame(\n {\n \"Name\": names,\n \"Description\": descs,\n \"Type\": types,\n \"TimeGenerated\": times,\n \"EndTime\": endtimes,\n \"StartTime\": starttimes,\n }\n )\n tl_df.replace(\"None\", np.NaN, inplace=True)\n return tl_df\n\n def _add_incident_or_alert_node(self, incident: Union[Incident, Alert, None]):\n \"\"\"Check what type of entity is passed in and creates relevent graph.\"\"\"\n if isinstance(incident, Incident):\n self._add_incident_node(incident)\n elif isinstance(incident, Alert):\n self._add_alert_node(incident)\n\n def _add_entity_node(self, ent, attached_to=None):\n \"\"\"Add an Entity to the graph.\"\"\"\n self.alertentity_graph = nx.compose(self.alertentity_graph, ent.to_networkx())\n if attached_to:\n self.add_link(attached_to, ent.name_str)\n\n def _add_alert_node(self, alert, incident_name=None):\n \"\"\"Add an alert entity to the graph.\"\"\"\n self.alertentity_graph = nx.compose(self.alertentity_graph, alert.to_networkx())\n if alert[\"Entities\"]:\n for ent in alert[\"Entities\"]:\n self._add_entity_node(ent, alert.name_str)\n if incident_name:\n self.add_link(incident_name, alert.name_str)\n\n def _add_incident_node(self, incident):\n \"\"\"Add an incident entity to the graph.\"\"\"\n self.alertentity_graph = nx.compose(\n self.alertentity_graph, incident.to_networkx()\n )\n if incident.Alerts:\n for alert in incident.Alerts:\n self._add_alert_node(alert, incident.name_str)\n if incident.Entities:\n entities = _dedupe_entities(incident.Alerts, incident.Entities)\n for ent in entities:\n self._add_entity_node(ent, incident.name_str)\n\n def _add_entity_edges(self, edges: set, attached_to: str):\n \"\"\"Check entity edges and add them.\"\"\"\n for edge in edges:\n if isinstance(edge.target, Entity):\n if not self.alertentity_graph.has_node(edge.target.name_str):\n self._add_entity_node(edge.target)\n try:\n self.add_link(attached_to, edge.target.name_str)\n except MsticpyUserError:\n pass\n\n @property\n def graph(self) -> nx.Graph:\n \"\"\"Return the raw NetworkX graph.\"\"\"\n return self.alertentity_graph\n\n\ndef _dedupe_entities(alerts, ents) -> list:\n \"\"\"Deduplicate incident and alert entities.\"\"\"\n alrt_ents = []\n for alrt in alerts:\n if alrt[\"Entities\"]:\n\n alrt_ents += [ent.__hash__() for ent in alrt[\"Entities\"]]\n for ent in ents:\n if ent.__hash__() in alrt_ents:\n ents.remove(ent)\n return ents\n\n\ndef plot_entitygraph( # pylint: disable=too-many-locals\n entity_graph: nx.Graph,\n node_size: int = 25,\n font_size: Union[int, str] = 10,\n height: int = 800,\n width: int = 800,\n scale: int = 2,\n hide: bool = False,\n) -> figure:\n \"\"\"\n Plot entity graph with Bokeh.\n\n Parameters\n ----------\n entity_graph : nx.Graph\n The entity graph as a networkX graph\n node_size : int, optional\n Size of the nodes in pixels, by default 25\n font_size : int, optional\n Font size for node labels, by default 10\n Can be an integer (point size) or a string (e.g. \"10pt\")\n width : int, optional\n Width in pixels, by default 800\n height : int, optional\n Image height (the default is 800)\n scale : int, optional\n Position scale (the default is 2)\n hide : bool, optional\n Don't show the plot, by default False. If True, just\n return the figure.\n\n Returns\n -------\n bokeh.plotting.figure\n The network plot.\n\n \"\"\"\n color_map = {\n \"incident\": \"red\",\n \"alert\": \"orange\",\n \"alerts\": \"orange\",\n \"securityalert\": \"orange\",\n \"analystnote\": \"blue\",\n }\n output_notebook()\n font_pnt = f\"{font_size}pt\" if isinstance(font_size, int) else font_size\n node_attrs = {}\n for node, attrs in entity_graph.nodes(data=True):\n try:\n color = color_map.get(attrs[\"Type\"].lower(), \"green\")\n except KeyError:\n color = \"green\"\n node_attrs.update({node: color})\n\n nx.set_node_attributes(entity_graph, node_attrs, \"node_color\")\n\n plot = figure(\n title=\"Alert Entity graph\",\n x_range=(-3, 3),\n y_range=(-3, 3),\n width=width,\n height=height,\n )\n\n plot.add_tools(\n HoverTool(\n tooltips=[\n (\"Name\", \"@Name\"),\n (\"Description\", \"@Description\"),\n (\"Type\", \"@Type\"),\n ]\n )\n )\n\n graph_renderer = from_networkx(\n entity_graph, nx.spring_layout, scale=scale, center=(0, 0)\n )\n\n graph_renderer.node_renderer.glyph = Circle(\n size=node_size, fill_color=\"node_color\", fill_alpha=0.5\n )\n # pylint: disable=no-member\n plot.renderers.append(graph_renderer)\n\n # Create labels\n for name, pos in graph_renderer.layout_provider.graph_layout.items():\n label = Label(\n x=pos[0],\n y=pos[1],\n x_offset=5,\n y_offset=5,\n text=name,\n text_font_size=font_pnt,\n )\n plot.add_layout(label)\n # pylint: enable=no-member\n if not hide:\n show(plot)\n return plot\n" ]
[ [ "pandas.to_datetime", "pandas.DataFrame" ] ]
Prakash19921206/cv-tricks.com
[ "555ef027fed42129e8f57caf2de195b1034d987c" ]
[ "Tensorflow-tutorials/Keras-Tensorflow-tutorial/2_run_vgg.py" ]
[ "import numpy as np\nfrom keras import applications\nfrom keras.applications.imagenet_utils import preprocess_input, decode_predictions\nfrom keras.preprocessing import image\n#import matplotlib.pyplot as plt\n#import matplotlib.image as mpimg\n\n# build the VGG16 network\nmodel = applications.VGG16(weights='imagenet')\nimg = image.load_img('pexels-photo-280207.jpeg', target_size=(224, 224))\nx = image.img_to_array(img)\nx = np.expand_dims(x, axis=0)\nx = preprocess_input(x)\npreds = model.predict(x)\nall_results = decode_predictions(preds)\nfor results in all_results: \n for result in results:\n print('Probability %0.2f%% => [%s]' % (100*result[2], result[1]))\n #result_text= 'Probability %0.2f%% => [%s]' % (100*result[2], result[1])\n #break\n#plt.figure(num=1,figsize=(8, 6), dpi=80)\n#plt.imshow(img)\n#plt.text(120,100,result_text,horizontalalignment='center', verticalalignment='center',fontsize=16,color='black')\n#plt.axis('off')\n#plt.show()\n" ]
[ [ "numpy.expand_dims" ] ]
Ysp9714/SlowFastNet-keras
[ "75dc4e75158e35d46b0ae1e9130fd18c1295eec6" ]
[ "slow_fast_net.py" ]
[ "import os\n\nimport tensorflow as tf\nfrom tensorflow.keras import layers\nfrom tensorflow.keras.layers import Conv3D\nimport numpy as np\n\n\ndef swish(x, beta=1):\n return x * tf.nn.sigmoid(beta * x)\n\n\ndef mish(x):\n return x * tf.nn.tanh(tf.nn.softplus(x))\n\n\ntf.keras.utils.get_custom_objects().update({\"swish\": swish})\ntf.keras.utils.get_custom_objects().update({\"mish\": mish})\n\n\ndef auto_pad(inputs, kernel_size, data_format):\n\n \"\"\"\n This function replaces the padding implementation in original tensorflow.\n It also avoids negative dimension by automatically padding given the input kernel size (for each dimension).\n \"\"\"\n\n islist = isinstance(kernel_size, tuple)\n\n kernel_size = np.array(kernel_size)\n pad_total = kernel_size - 1\n pad_beg = pad_total // 2\n pad_end = pad_total - pad_beg\n\n if islist:\n paddings = np.concatenate(\n [pad_beg[:, np.newaxis], pad_end[:, np.newaxis]], axis=1\n )\n paddings = [list(p) for p in paddings]\n else:\n paddings = [[pad_beg, pad_end]] * 3\n\n if data_format == \"channels_first\":\n padded_inputs = tf.pad(tensor=inputs, paddings=[[0, 0], [0, 0]] + paddings)\n else:\n padded_inputs = tf.pad(tensor=inputs, paddings=[[0, 0]] + paddings + [[0, 0]])\n return padded_inputs\n\n\nclass ConvXD(tf.keras.layers.Layer):\n def __init__(\n self,\n filters,\n kernel_size,\n strides,\n padding=\"valid\",\n use_bias=False,\n name=\"conv_3d\",\n data_format=\"channels_last\",\n **kwargs\n ):\n super(ConvXD, self).__init__(name=name, **kwargs)\n # self.name = name\n self.pad = False\n if isinstance(strides, list) or isinstance(strides, tuple):\n self.pad = any(np.array(strides) > 1)\n else:\n self.pad = strides > 1\n\n if self.pad:\n padding = \"valid\"\n else:\n padding = \"same\"\n\n self.conv = Conv3D(\n filters=filters,\n kernel_size=kernel_size,\n strides=strides,\n padding=padding,\n use_bias=use_bias,\n )\n\n self.data_format = data_format\n self.kernel_size = kernel_size\n\n @tf.function\n def call(self, inputs, training=None):\n if self.pad:\n inputs = auto_pad(\n inputs=inputs,\n kernel_size=self.kernel_size,\n data_format=self.data_format,\n )\n\n outputs = self.conv(inputs)\n return outputs\n\n\nclass PreactBlock(tf.keras.layers.Layer):\n def __init__(self, activation=\"swish\", **kwargs):\n super(PreactBlock, self).__init__()\n self.batch_norm = layers.BatchNormalization(epsilon=1e-5)\n self.act = layers.Activation(activation)\n\n def get_config(self):\n return super().get_config()\n\n @tf.function\n def call(self, inputs, training=False):\n x = self.batch_norm(inputs, training)\n x = self.act(x)\n return x\n\n\nclass DataLayer(layers.Layer):\n def __init__(self, stride):\n super(DataLayer, self).__init__()\n self.stride = stride\n\n @tf.function\n def call(self, inputs):\n x = inputs[:, :: self.stride, :, :, :]\n return x\n\n def get_config(self):\n config = super(DataLayer, self).get_config()\n config.update({\"stride\": self.stride})\n return config\n\n\nclass InitBlock(tf.keras.layers.Layer):\n def __init__(self, filters, kernel_size, use_bias=False):\n super(InitBlock, self).__init__()\n self.conv3d = ConvXD(\n filters, kernel_size, strides=(1, 2, 2), padding=\"same\", use_bias=use_bias\n )\n self.maxpl3d = layers.MaxPool3D(\n pool_size=(1, 3, 3), strides=(1, 2, 2), padding=\"same\"\n )\n\n def get_config(self):\n return super().get_config()\n\n @tf.function\n def call(self, inputs, training=False):\n x = self.conv3d(inputs)\n x = self.maxpl3d(x)\n return x\n\n\nclass ResBlock(tf.keras.layers.Layer):\n def __init__(\n self,\n filters,\n time_kernel_size=1,\n stride=1,\n shortcut=None,\n shortcut_stride=1,\n use_bias=False,\n **kwargs\n ):\n super(ResBlock, self).__init__()\n self.conv3d1 = ConvXD(\n filters, (time_kernel_size, 1, 1), 1, padding=\"same\", use_bias=use_bias\n )\n self.preact1 = PreactBlock()\n\n self.conv3d2 = ConvXD(\n filters, (1, 3, 3), (1, stride, stride), padding=\"same\", use_bias=use_bias\n )\n self.preact2 = PreactBlock()\n\n self.conv3d3 = ConvXD(\n filters * 4, 1, strides=1, padding=\"same\", use_bias=use_bias\n )\n self.conv3d3_batch = layers.BatchNormalization(epsilon=1e-5)\n\n if shortcut == True:\n self.shortcut = ConvXD(\n filters * 4,\n 1,\n (1, shortcut_stride, shortcut_stride),\n padding=\"same\",\n use_bias=use_bias,\n )\n else:\n self.shortcut = None\n\n @tf.function\n def call(self, inputs, training=False):\n x = self.conv3d1(inputs)\n x = self.preact1(x, training)\n\n x = self.conv3d2(x)\n x = self.preact2(x, training)\n\n x = self.conv3d3(x)\n x = self.conv3d3_batch(x)\n\n if self.shortcut:\n x = layers.Add()([x, self.shortcut(inputs)])\n else:\n x = layers.Add()([x, inputs])\n\n x = layers.Activation(activation=\"swish\")(x)\n\n return x\n\n\nclass SlowBody(tf.keras.layers.Layer):\n def __init__(self, stages, filters=64):\n super(SlowBody, self).__init__()\n self.stages = stages\n self.res_blocks = []\n self.concat = layers.Concatenate()\n\n self.init_block = InitBlock(filters, (1, 7, 7))\n for stage_num, conv_num in enumerate(stages):\n for conv in range(conv_num):\n time_kernel_size = 1 if stage_num < 1 else 3\n shortcut = True if conv == 0 else None\n if conv == 0:\n if stage_num == 0:\n self.res_blocks.append(\n ResBlock(\n filters,\n time_kernel_size=time_kernel_size,\n shortcut=shortcut,\n stride=1,\n shortcut_stride=1,\n )\n )\n else:\n self.res_blocks.append(\n ResBlock(\n filters,\n time_kernel_size=time_kernel_size,\n shortcut=shortcut,\n stride=2,\n shortcut_stride=2,\n )\n )\n else:\n self.res_blocks.append(ResBlock(filters, shortcut=shortcut))\n filters = filters * 2\n self.global_avgpool3d = layers.GlobalAveragePooling3D()\n\n @tf.function\n def call(self, x, laterals, training=False):\n num_res_block = 0\n\n x = self.init_block(x)\n for conv_num, lateral in zip(self.stages, laterals):\n x = self.concat([x, lateral])\n for _ in range(conv_num):\n x = self.res_blocks[num_res_block](x, training)\n num_res_block += 1\n x = self.global_avgpool3d(x)\n\n return x\n\n\nclass FastBody(tf.keras.layers.Layer):\n def __init__(self, stages, filters: int = 8):\n super(FastBody, self).__init__()\n self.stages = stages\n self.filters = filters\n\n self.main_res_blocks = []\n self.last_res_blocks = []\n self.conv3s = []\n self.first_lateral = ConvXD(\n filters * 2,\n kernel_size=(5, 1, 1),\n strides=(8, 1, 1),\n padding=\"same\",\n use_bias=False,\n )\n self.init_block = InitBlock(filters, (5, 7, 7))\n for stage_num, conv_num in enumerate(stages):\n for conv in range(conv_num):\n shortcut = True if conv == 0 else None\n if conv == 0:\n if stage_num == 0:\n filters = filters * 1\n self.main_res_blocks.append(\n ResBlock(\n filters,\n time_kernel_size=3,\n shortcut=shortcut,\n shortcut_stride=1,\n )\n )\n else:\n filters = filters * 2\n self.main_res_blocks.append(\n ResBlock(\n filters,\n time_kernel_size=3,\n shortcut=shortcut,\n stride=2,\n shortcut_stride=2,\n )\n )\n else:\n if stage_num == 0:\n self.main_res_blocks.append(\n ResBlock(filters, time_kernel_size=3, shortcut=None)\n )\n else:\n self.main_res_blocks.append(\n ResBlock(\n filters,\n time_kernel_size=3,\n shortcut=None,\n shortcut_stride=2,\n )\n )\n\n self.last_res_blocks.append(\n ResBlock(filters, time_kernel_size=3, shortcut=None)\n )\n self.conv3s.append(\n ConvXD(\n filters * 8,\n kernel_size=(5, 1, 1),\n strides=(8, 1, 1),\n padding=\"same\",\n use_bias=False,\n )\n )\n\n self.global_avg_pool3d = layers.GlobalAveragePooling3D()\n\n @tf.function\n def call(self, x, training=False):\n with tf.init_scope():\n laterals = []\n\n cnt_main_res_block = 0\n cnt_last_res_block = 0\n cnt_conv3s = 0\n x = self.init_block(x)\n\n first_lateral = self.first_lateral(x)\n laterals.append(first_lateral)\n\n for conv_num in self.stages:\n for _ in range(conv_num):\n x = self.main_res_blocks[cnt_main_res_block](x, training)\n cnt_main_res_block += 1\n\n x = self.last_res_blocks[cnt_last_res_block](x, training)\n cnt_last_res_block += 1\n\n lateral = self.conv3s[cnt_conv3s](x)\n cnt_conv3s += 1\n\n laterals.append(lateral)\n\n x = self.global_avg_pool3d(x)\n\n return x, laterals\n\n\nclass SlowFastNet(tf.keras.Model):\n resnet = {\n 50: [3, 4, 6, 3],\n 101: [3, 4, 23, 3],\n 152: [3, 8, 36, 3],\n 200: [3, 24, 36, 3],\n }\n\n def __init__(self, input_shapes, resnet_size=50, outputs=3):\n super(SlowFastNet, self).__init__()\n self.input_shapes = input_shapes\n self.fast_data_layer = DataLayer(2)\n self.slow_data_layer = DataLayer(16)\n\n self.fast_body = FastBody(\n self.resnet[resnet_size], filters=input_shapes[0] // 8\n )\n self.slow_body = SlowBody(self.resnet[resnet_size], filters=input_shapes[0])\n\n self.concat = layers.Concatenate()\n self.dropout = layers.Dropout(0.6)\n self.dense = layers.Dense(outputs, activation=\"softmax\")\n\n @tf.function\n def call(self, x, training=False, mask=None):\n fast_inputs = self.fast_data_layer(x)\n slow_inputs = self.slow_data_layer(x)\n fast_x, laterals = self.fast_body(fast_inputs, training)\n slow_x = self.slow_body(slow_inputs, laterals, training)\n\n x = self.concat([slow_x, fast_x])\n x = self.dropout(x)\n x = self.dense(x)\n\n return x\n\n def build_graph(self):\n x = layers.Input(self.input_shape)\n return tf.keras.Model(inputs=[x], outputs=self.call(x))\n\n" ]
[ [ "numpy.concatenate", "tensorflow.init_scope", "numpy.array", "tensorflow.keras.layers.Add", "tensorflow.keras.layers.Input", "tensorflow.keras.layers.GlobalAveragePooling3D", "tensorflow.keras.layers.Activation", "tensorflow.keras.layers.Conv3D", "tensorflow.keras.utils.get_custom_objects", "tensorflow.keras.layers.Dropout", "tensorflow.keras.layers.Dense", "tensorflow.nn.softplus", "tensorflow.keras.layers.MaxPool3D", "tensorflow.pad", "tensorflow.nn.sigmoid", "tensorflow.keras.layers.BatchNormalization", "tensorflow.keras.layers.Concatenate" ] ]
Y1fanHE/kdps
[ "c09810afb35d93018b9a7d7edb182e2f8f8a6049" ]
[ "pgsyn/gp/population.py" ]
[ "'''\nAuthor: He,Yifan\nDate: 2022-02-18 16:06:00\nLastEditors: He,Yifan\nLastEditTime: 2022-02-18 16:27:32\n'''\n\n\nfrom collections.abc import Sequence\nfrom bisect import insort_left\nimport numpy as np\nimport pickle\nfrom multiprocessing import Pool\nfrom functools import partial\n\nfrom pgsyn.gp.individual import Individual\nfrom pgsyn.gp.evaluation import Evaluator\nfrom pgsyn.tap import tap\n\n\ndef _eval_indiv(indiv: Individual, evalr: Evaluator, ):\n indiv.error_vector = evalr.evaluate(indiv.program)\n return indiv\n\n\nclass Population(Sequence):\n \"\"\"A sequence of Individuals kept in sorted order, with respect to their total errors.\"\"\"\n\n __slots__ = [\"unevaluated\", \"evaluated\"]\n\n def __init__(self, individuals: list = None):\n self.unevaluated = []\n self.evaluated = []\n\n if individuals is not None:\n for el in individuals:\n self.add(el)\n\n def __len__(self):\n return len(self.evaluated) + len(self.unevaluated)\n\n def __getitem__(self, key: int) -> Individual:\n if key < len(self.evaluated):\n return self.evaluated[key]\n return self.unevaluated[key - len(self.evaluated)]\n\n def add(self, individual: Individual):\n \"\"\"Add an Individual to the population.\"\"\"\n if individual.total_error is None:\n self.unevaluated.append(individual)\n else:\n insort_left(self.evaluated, individual)\n return self\n\n def best(self):\n \"\"\"Return the best n individual in the population.\"\"\"\n return self.evaluated[0]\n\n def best_n(self, n: int):\n \"\"\"Return the best n individuals in the population.\"\"\"\n return self.evaluated[:n]\n\n @tap\n def p_evaluate(self, evaluator_proxy, pool: Pool):\n \"\"\"Evaluate all unevaluated individuals in the population in parallel.\"\"\"\n func = partial(_eval_indiv, evalr=evaluator_proxy)\n for individual in pool.imap_unordered(func, self.unevaluated):\n insort_left(self.evaluated, individual)\n self.unevaluated = []\n\n @tap\n def evaluate(self, evaluator: Evaluator):\n \"\"\"Evaluate all unevaluated individuals in the population.\"\"\"\n for individual in self.unevaluated:\n individual = _eval_indiv(individual, evaluator)\n insort_left(self.evaluated, individual)\n self.unevaluated = []\n\n def all_error_vectors(self):\n \"\"\"2D array containing all Individuals' error vectors.\"\"\"\n return np.array([i.error_vector for i in self.evaluated])\n\n def all_total_errors(self):\n \"\"\"1D array containing all Individuals' total errors.\"\"\"\n return np.array([i.total_error for i in self.evaluated])\n\n def median_error(self):\n \"\"\"Median total error in the population.\"\"\"\n return np.median(self.all_total_errors())\n\n def error_diversity(self):\n \"\"\"Proportion of unique error vectors.\"\"\"\n return len(np.unique(self.all_error_vectors(), axis=0)) / float(len(self))\n\n def genome_diversity(self):\n \"\"\"Proportion of unique genomes.\"\"\"\n unq = set([pickle.dumps(i.genome) for i in self])\n return len(unq) / float(len(self))\n\n def program_diversity(self):\n \"\"\"Proportion of unique programs.\"\"\"\n unq = set([pickle.dumps(i.program.code) for i in self])\n return len(unq) / float(len(self))\n\n def mean_genome_length(self):\n \"\"\"Average genome length across all individuals.\"\"\"\n tot_gn_len = sum([len(i.genome) for i in self])\n return tot_gn_len / len(self)\n" ]
[ [ "numpy.array" ] ]
melodrivemusic/keras-to-tensorflow
[ "23029767de3da085e6ac5cc98c6018b9e4abbab2" ]
[ "k2tf/convert.py" ]
[ "\"\"\"\nThis script converts a .h5 Keras model into a Tensorflow .pb file.\n\nAttribution: This script was adapted from https://github.com/amir-abdi/keras_to_tensorflow\n\nMIT License\n\nCopyright (c) 2017 bitbionic\n\nPermission is hereby granted, free of charge, to any person obtaining a copy\nof this software and associated documentation files (the \"Software\"), to deal\nin the Software without restriction, including without limitation the rights\nto use, copy, modify, merge, publish, distribute, sublicense, and/or sell\ncopies of the Software, and to permit persons to whom the Software is\nfurnished to do so, subject to the following conditions:\n\nThe above copyright notice and this permission notice shall be included in all\ncopies or substantial portions of the Software.\n\nTHE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\nIMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\nFITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\nAUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\nLIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\nOUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\nSOFTWARE.\n\"\"\"\n\nimport os\nimport os.path as osp\nimport tensorflow as tf\nfrom tensorflow.python.framework import graph_util, graph_io\n\n\ndef convertGraph(modelPath, outputPath, prefix, name):\n \"\"\"\n Converts an HD5F file to a .pb file for use with Tensorflow.\n\n Args:\n modelPath (str): path to the .h5 file\n outdir (str): path to the output directory\n prefix (str): the prefix of the output aliasing\n name (str):\n Returns:\n None\n \"\"\"\n\n keras = tf.keras\n load_model = keras.models.load_model\n K = keras.backend\n\n os.makedirs(outputPath, exist_ok=True)\n\n K.set_learning_phase(0)\n\n net_model = load_model(modelPath)\n net_model.summary()\n\n numOutputs = net_model.output.shape[1]\n\n # Alias the outputs in the model - this sometimes makes them easier to access in TF\n pred = [None] * numOutputs\n pred_node_names = [None] * numOutputs\n for i in range(numOutputs):\n pred_node_names[i] = prefix+\"_\"+str(i)\n pred[i] = tf.identity(net_model.output[i], name=pred_node_names[i])\n print(\"Output nodes names are: \", pred_node_names)\n\n sess = K.get_session()\n \n # Write the graph in human readable\n f = name + \".ascii\"\n tf.train.write_graph(sess.graph.as_graph_def(), outputPath, f, as_text=True)\n print(\"Saved the graph definition in ascii format at: \", osp.join(outputPath, f))\n\n # Write the graph in binary .pb file\n constant_graph = graph_util.convert_variables_to_constants(sess, sess.graph.as_graph_def(), pred_node_names)\n graph_io.write_graph(constant_graph, outputPath, name, as_text=False)\n print(\"Saved the constant graph (ready for inference) at: \", osp.join(outputPath, name))\n\n" ]
[ [ "tensorflow.python.framework.graph_io.write_graph", "tensorflow.identity" ] ]
ShuangXieIrene/ssds.pytorch
[ "b5ec682a42c923afe964205b21448e9f141d55bc" ]
[ "ssds/core/config.py" ]
[ "from __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\nfrom __future__ import unicode_literals\n\nfrom ast import literal_eval\nimport os\nimport os.path as osp\nimport numpy as np\n\n\"\"\"config system.\nThis file specifies default config options. You should not\nchange values in this file. Instead, you should write a config file (in yaml)\nand use merge_cfg_from_file(yaml_file) to load it and override the default\noptions.\n\"\"\"\n\n\nclass AttrDict(dict):\n def __getattr__(self, name):\n if name in self.__dict__:\n return self.__dict__[name]\n elif name in self:\n return self[name]\n else:\n raise AttributeError(name)\n\n def __setattr__(self, name, value):\n if name in self.__dict__:\n self.__dict__[name] = value\n else:\n self[name] = value\n\n\n__C = AttrDict()\n\ncfg = __C\n\n# ---------------------------------------------------------------------------- #\n# Model options\n# ---------------------------------------------------------------------------- #\n__C.MODEL = AttrDict()\n\n# Name of the base net used to extract the features\n__C.MODEL.NETS = \"vgg16\"\n\n# Name of the model used to detect boundingbox\n__C.MODEL.SSDS = \"ssd\"\n\n# Whether use half precision for the model. currently only inference support.\n__C.MODEL.HALF_PRECISION = True\n\n# image size for ssd\n__C.MODEL.IMAGE_SIZE = [300, 300]\n\n# number of the input images for the model\n__C.MODEL.NUM_IMAGES = 1\n\n# number of the class for the model\n__C.MODEL.NUM_CLASSES = 21\n\n# FEATURE_LAYER to extract the proposed bounding box,\n# the first dimension is the feature layer/type,\n# while the second dimension is feature map channel.\n__C.MODEL.FEATURE_LAYER = [[22, 34, \"S\", \"S\", \"\", \"\"], [512, 1024, 512, 256, 256, 256]]\n\n# STEPS for the proposed bounding box, if empty the STEPS = image_size / feature_map_size\n__C.MODEL.STEPS = []\n\n# STEPS for the proposed bounding box, a list from min value to max value\n__C.MODEL.SIZES = [0.2, 0.95]\n\n# ASPECT_RATIOS for the proposed bounding box, 1 is default contains\n__C.MODEL.ASPECT_RATIOS = [[2, 3], [2, 3], [2, 3], [2, 3], [2], [2]]\n\n#\n__C.MODEL.CLIP = True\n\n# FSSD setting, NUM_FUSED for fssd\n__C.MODEL.NUM_FUSED = 3\n\n\n# ---------------------------------------------------------------------------- #\n# Train options\n# ---------------------------------------------------------------------------- #\n__C.TRAIN = AttrDict()\n# The number of checkpoints kept, older ones are deleted to save space\n__C.TRAIN.CHECKPOINTS_KEPT = 10\n__C.TRAIN.CHECKPOINTS_EPOCHS = 5\n# The number of max iters\n__C.TRAIN.MAX_EPOCHS = 300\n# Minibatch size\n__C.TRAIN.BATCH_SIZE = 128\n# trainable scope and resuming scope\n__C.TRAIN.TRAINABLE_SCOPE = \"base,extras,norm,loc,conf\"\n__C.TRAIN.RESUME_SCOPE = \"\"\n__C.TRAIN.CRITERION = \"\"\n\n# ---------------------------------------------------------------------------- #\n# optimizer options\n# ---------------------------------------------------------------------------- #\n__C.TRAIN.OPTIMIZER = AttrDict()\n# type of the optimizer\n__C.TRAIN.OPTIMIZER.OPTIMIZER = \"sgd\"\n# Initial learning rate\n__C.TRAIN.OPTIMIZER.LEARNING_RATE = 0.001\n# Initial differential learning rate for different layers\n__C.TRAIN.OPTIMIZER.DIFFERENTIAL_LEARNING_RATE = []\n# Momentum\n__C.TRAIN.OPTIMIZER.MOMENTUM = 0.9\n# Momentum_2\n__C.TRAIN.OPTIMIZER.MOMENTUM_2 = 0.99\n# epsilon\n__C.TRAIN.OPTIMIZER.EPS = 1e-8\n# Weight decay, for regularization\n__C.TRAIN.OPTIMIZER.WEIGHT_DECAY = 0.0001\n\n# ---------------------------------------------------------------------------- #\n# lr_scheduler options\n# ---------------------------------------------------------------------------- #\n__C.TRAIN.LR_SCHEDULER = AttrDict()\n# type of the LR_SCHEDULER\n__C.TRAIN.LR_SCHEDULER.SCHEDULER = \"step\"\n# Step size for reducing the learning rate\n__C.TRAIN.LR_SCHEDULER.STEPS = [1]\n# Factor for reducing the learning rate\n__C.TRAIN.LR_SCHEDULER.GAMMA = 0.98\n# min learning rate\n__C.TRAIN.LR_SCHEDULER.LR_MIN = 0.0\n# warm_up epochs\n__C.TRAIN.LR_SCHEDULER.WARM_UP_EPOCHS = 0\n# The number of max iters\n__C.TRAIN.LR_SCHEDULER.MAX_EPOCHS = (\n __C.TRAIN.MAX_EPOCHS - __C.TRAIN.LR_SCHEDULER.WARM_UP_EPOCHS\n)\n\n# ---------------------------------------------------------------------------- #\n# Test options\n# ---------------------------------------------------------------------------- #\n__C.TEST = AttrDict()\n__C.TEST.BATCH_SIZE = __C.TRAIN.BATCH_SIZE\n__C.TEST.TEST_SCOPE = [0, 300]\n\n\n# ---------------------------------------------------------------------------- #\n# Matcher options\n# ---------------------------------------------------------------------------- #\n# matcher\n__C.MATCHER = AttrDict()\n__C.MATCHER.NUM_CLASSES = __C.MODEL.NUM_CLASSES\n__C.MATCHER.CLASSIFY_LOSS = \"FocalLoss\"\n__C.MATCHER.LOCATE_LOSS = \"SmoothL1Loss\"\n__C.MATCHER.BACKGROUND_LABEL = 0\n__C.MATCHER.MATCH_THRESHOLD = [0.5, 0.4]\n__C.MATCHER.CENTER_SAMPLING_RADIUS = 0.0\n__C.MATCHER.FOCAL_ALPHA = 0.25\n__C.MATCHER.FOCAL_GAMMA = 2\n__C.MATCHER.NEGPOS_RATIO = 3\n__C.MATCHER.VARIANCE = [0.1, 0.2]\n\n\n# ---------------------------------------------------------------------------- #\n# Post process options\n# ---------------------------------------------------------------------------- #\n# post process\n__C.POST_PROCESS = AttrDict()\n__C.POST_PROCESS.NUM_CLASSES = __C.MODEL.NUM_CLASSES\n__C.POST_PROCESS.BACKGROUND_LABEL = __C.MATCHER.BACKGROUND_LABEL\n__C.POST_PROCESS.SCORE_THRESHOLD = 0.01\n__C.POST_PROCESS.IOU_THRESHOLD = 0.6\n__C.POST_PROCESS.MAX_DETECTIONS = 100\n__C.POST_PROCESS.MAX_DETECTIONS_PER_LEVEL = 300\n__C.POST_PROCESS.USE_DIOU = True\n__C.POST_PROCESS.RESCORE_CENTER = True\n__C.POST_PROCESS.VARIANCE = __C.MATCHER.VARIANCE\n\n\n# ---------------------------------------------------------------------------- #\n# Dataset options\n# ---------------------------------------------------------------------------- #\n# Root directory of project\n__C.ROOT_DIR = osp.abspath(osp.join(osp.dirname(__file__), \"..\", \"..\"))\n\n__C.DATASET = AttrDict()\n# name of the dataset\n__C.DATASET.DATASET = \"\"\n# path of the dataset\n__C.DATASET.DATASET_DIR = \"\"\n# train set scope\n__C.DATASET.TRAIN_SETS = []\n# test set scope\n__C.DATASET.TEST_SETS = []\n# image expand probability during train\n__C.DATASET.PICKLE = False\n# image size\n__C.DATASET.IMAGE_SIZE = __C.MODEL.IMAGE_SIZE\n# train batch size\n__C.DATASET.TRAIN_BATCH_SIZE = __C.TRAIN.BATCH_SIZE\n# test batch size\n__C.DATASET.TEST_BATCH_SIZE = __C.TEST.BATCH_SIZE\n# number of workers to extract datas\n__C.DATASET.NUM_WORKERS = 8\n__C.DATASET.DEVICE_ID = []\n# image preprocessing\n__C.DATASET.PREPROC = AttrDict()\n__C.DATASET.PREPROC.MEAN = 0\n__C.DATASET.PREPROC.STD = 255\n__C.DATASET.PREPROC.CROP_SCALE = [0.3, 1.0]\n__C.DATASET.PREPROC.CROP_ASPECT_RATIO = [0.5, 2.0]\n__C.DATASET.PREPROC.CROP_ATTEMPTS = 50\n__C.DATASET.PREPROC.HUE_DELTA = 9\n__C.DATASET.PREPROC.BRI_DELTA = 16\n__C.DATASET.PREPROC.CONTRAST_RANGE = [0.75, 1.25]\n__C.DATASET.PREPROC.SATURATION_RANGE = [0.75, 1.25]\n__C.DATASET.PREPROC.MAX_EXPAND_RATIO = 2.0\n# multiscale training\n__C.DATASET.MULTISCALE = []\n\n# ---------------------------------------------------------------------------- #\n# Export options\n# ---------------------------------------------------------------------------- #\n# Place outputs model under an experiments directory\n__C.EXP_DIR = osp.abspath(osp.join(__C.ROOT_DIR, \"experiments/models/\"))\n__C.LOG_DIR = __C.EXP_DIR\n__C.RESUME_CHECKPOINT = \"\"\n__C.CHECKPOINTS_PREFIX = \"{}_{}_{}\".format(\n __C.MODEL.SSDS, __C.MODEL.NETS, __C.DATASET.DATASET\n)\n__C.PHASE = [\"train\", \"eval\", \"test\"]\n__C.DEVICE_ID = []\n\n\ndef _merge_a_into_b(a, b, stack=None):\n \"\"\"Merge config dictionary a into config dictionary b, clobbering the\n options in b whenever they are also specified in a.\n \"\"\"\n assert isinstance(a, AttrDict), \"Argument `a` must be an AttrDict\"\n assert isinstance(b, AttrDict), \"Argument `b` must be an AttrDict\"\n\n for k, v_ in a.items():\n full_key = \".\".join(stack) + \".\" + k if stack is not None else k\n # a must specify keys that are in b\n if k not in b:\n raise KeyError(\"Non-existent config key: {}\".format(full_key))\n\n v = _decode_cfg_value(v_)\n v = _check_and_coerce_cfg_value_type(v, b[k], k, full_key)\n\n # Recursively merge dicts\n if isinstance(v, AttrDict):\n try:\n stack_push = [k] if stack is None else stack + [k]\n _merge_a_into_b(v, b[k], stack=stack_push)\n except BaseException:\n raise\n else:\n b[k] = v\n\n\ndef update_cfg():\n __C.TRAIN.LR_SCHEDULER.MAX_EPOCHS = (\n __C.TRAIN.MAX_EPOCHS - __C.TRAIN.LR_SCHEDULER.WARM_UP_EPOCHS\n )\n __C.DATASET.IMAGE_SIZE = __C.MODEL.IMAGE_SIZE\n __C.DATASET.TRAIN_BATCH_SIZE = __C.TRAIN.BATCH_SIZE\n __C.DATASET.TEST_BATCH_SIZE = __C.TEST.BATCH_SIZE\n __C.MATCHER.NUM_CLASSES = __C.MODEL.NUM_CLASSES\n __C.POST_PROCESS.NUM_CLASSES = __C.MODEL.NUM_CLASSES\n __C.POST_PROCESS.BACKGROUND_LABEL = __C.MATCHER.BACKGROUND_LABEL\n __C.POST_PROCESS.VARIANCE = __C.MATCHER.VARIANCE\n __C.CHECKPOINTS_PREFIX = \"{}_{}_{}\".format(\n __C.MODEL.SSDS, __C.MODEL.NETS, __C.DATASET.DATASET\n )\n\n\ndef cfg_from_file(filename):\n \"\"\"Load a config file and merge it into the default options.\"\"\"\n import yaml\n\n with open(filename, \"r\") as f:\n yaml_cfg = AttrDict(yaml.safe_load(f))\n\n _merge_a_into_b(yaml_cfg, __C)\n update_cfg()\n return cfg\n\n\ndef _decode_cfg_value(v):\n \"\"\"Decodes a raw config value (e.g., from a yaml config files or command\n line argument) into a Python object.\n \"\"\"\n # Configs parsed from raw yaml will contain dictionary keys that need to be\n # converted to AttrDict objects\n if isinstance(v, dict):\n return AttrDict(v)\n # All remaining processing is only applied to strings\n if not isinstance(v, str):\n return v\n # Try to interpret `v` as a:\n # string, number, tuple, list, dict, boolean, or None\n try:\n v = literal_eval(v)\n # The following two excepts allow v to pass through when it represents a\n # string.\n #\n # Longer explanation:\n # The type of v is always a string (before calling literal_eval), but\n # sometimes it *represents* a string and other times a data structure, like\n # a list. In the case that v represents a string, what we got back from the\n # yaml parser is 'foo' *without quotes* (so, not '\"foo\"'). literal_eval is\n # ok with '\"foo\"', but will raise a ValueError if given 'foo'. In other\n # cases, like paths (v = 'foo/bar' and not v = '\"foo/bar\"'), literal_eval\n # will raise a SyntaxError.\n except ValueError:\n pass\n except SyntaxError:\n pass\n return v\n\n\ndef _check_and_coerce_cfg_value_type(value_a, value_b, key, full_key):\n \"\"\"Checks that `value_a`, which is intended to replace `value_b` is of the\n right type. The type is correct if it matches exactly or is one of a few\n cases in which the type can be easily coerced.\n \"\"\"\n # The types must match (with some exceptions)\n type_b = type(value_b)\n type_a = type(value_a)\n if type_a is type_b:\n return value_a\n\n # Exceptions: numpy arrays, strings, tuple<->list\n if isinstance(value_b, np.ndarray):\n value_a = np.array(value_a, dtype=value_b.dtype)\n elif isinstance(value_b, str):\n value_a = str(value_a)\n elif isinstance(value_a, tuple) and isinstance(value_b, list):\n value_a = list(value_a)\n elif isinstance(value_a, list) and isinstance(value_b, tuple):\n value_a = tuple(value_a)\n else:\n raise ValueError(\n \"Type mismatch ({} vs. {}) with values ({} vs. {}) for config \"\n \"key: {}\".format(type_b, type_a, value_b, value_a, full_key)\n )\n return value_a\n" ]
[ [ "numpy.array" ] ]
Halfknow/akshare
[ "754d86be72b5d6ed2f288e6d16d712da68a734a4" ]
[ "akshare/index/index_baidu.py" ]
[ "# -*- coding:utf-8 -*-\n# /usr/bin/env python\n\"\"\"\nAuthor: Albert King\ndate: 2019/12/2 23:53\ncontact: [email protected]\ndesc: 百度指数\n感谢 https://cloudcrawler.club/categories/2019%E5%B9%B4%E6%9C%AB%E9%80%86%E5%90%91%E5%A4%8D%E4%B9%A0/\n\"\"\"\nimport matplotlib.pyplot as plt\nimport pandas as pd\nimport requests\n\nplt.rcParams[\"font.sans-serif\"] = [\"SimHei\"] # 显示中文标签\n\n\ndef decrypt(t: str, e: str) -> str:\n \"\"\"\n 解密函数\n :param t:\n :type t:\n :param e:\n :type e:\n :return:\n :rtype:\n \"\"\"\n n, i, a, result = list(t), list(e), {}, []\n ln = int(len(n) / 2)\n start, end = n[ln:], n[:ln]\n a = dict(zip(end, start))\n return \"\".join([a[j] for j in e])\n\n\ndef get_ptbk(uniqid: str, cookie: str) -> str:\n headers = {\n \"Accept\": \"application/json, text/plain, */*\",\n \"Accept-Encoding\": \"gzip, deflate\",\n \"Accept-Language\": \"zh-CN,zh;q=0.9\",\n \"Cache-Control\": \"no-cache\",\n \"Cookie\": cookie,\n \"DNT\": \"1\",\n \"Host\": \"zhishu.baidu.com\",\n \"Pragma\": \"no-cache\",\n \"Proxy-Connection\": \"keep-alive\",\n \"Referer\": \"zhishu.baidu.com\",\n \"User-Agent\": \"Mozilla/5.0 (Macintosh; Intel Mac OS X 10_13_6) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/75.0.3770.90 Safari/537.36\",\n \"X-Requested-With\": \"XMLHttpRequest\",\n }\n session = requests.Session()\n session.headers.update(headers)\n with session.get(\n url=f\"http://index.baidu.com/Interface/ptbk?uniqid={uniqid}\"\n ) as response:\n ptbk = response.json()[\"data\"]\n return ptbk\n\n\ndef baidu_search_index(word: str, start_date: str, end_date: str, cookie: str) -> str:\n headers = {\n \"Accept\": \"application/json, text/plain, */*\",\n \"Accept-Encoding\": \"gzip, deflate\",\n \"Accept-Language\": \"zh-CN,zh;q=0.9\",\n \"Cache-Control\": \"no-cache\",\n \"Cookie\": cookie,\n \"DNT\": \"1\",\n \"Host\": \"zhishu.baidu.com\",\n \"Pragma\": \"no-cache\",\n \"Proxy-Connection\": \"keep-alive\",\n \"Referer\": \"zhishu.baidu.com\",\n \"User-Agent\": \"Mozilla/5.0 (Macintosh; Intel Mac OS X 10_13_6) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/75.0.3770.90 Safari/537.36\",\n \"X-Requested-With\": \"XMLHttpRequest\",\n }\n session = requests.Session()\n session.headers.update(headers)\n with session.get(\n url=f\"http://index.baidu.com/api/SearchApi/index?word={word}&area=0&startDate={start_date}&endDate={end_date}\"\n ) as response:\n data = response.json()[\"data\"]\n all_data = data[\"userIndexes\"][0][\"all\"][\"data\"]\n uniqid = data[\"uniqid\"]\n ptbk = get_ptbk(uniqid, cookie)\n result = decrypt(ptbk, all_data).split(\",\")\n result = [int(item) if item != \"\" else 0 for item in result]\n if len(result) == len(pd.date_range(start=start_date, end=end_date, freq=\"7D\")):\n temp_df_7 = pd.DataFrame(\n [pd.date_range(start=start_date, end=end_date, freq=\"7D\"), result],\n index=[\"date\", word],\n ).T\n temp_df_7.index = pd.to_datetime(temp_df_7[\"date\"])\n del temp_df_7[\"date\"]\n return temp_df_7\n else:\n temp_df_1 = pd.DataFrame(\n [pd.date_range(start=start_date, end=end_date, freq=\"1D\"), result],\n index=[\"date\", word],\n ).T\n temp_df_1.index = pd.to_datetime(temp_df_1[\"date\"])\n del temp_df_1[\"date\"]\n return temp_df_1\n\n\ndef baidu_info_index(word: str, start_date: str, end_date: str, cookie: str) -> str:\n headers = {\n \"Accept\": \"application/json, text/plain, */*\",\n \"Accept-Encoding\": \"gzip, deflate\",\n \"Accept-Language\": \"zh-CN,zh;q=0.9\",\n \"Cache-Control\": \"no-cache\",\n \"Cookie\": cookie,\n \"DNT\": \"1\",\n \"Host\": \"zhishu.baidu.com\",\n \"Pragma\": \"no-cache\",\n \"Proxy-Connection\": \"keep-alive\",\n \"Referer\": \"zhishu.baidu.com\",\n \"User-Agent\": \"Mozilla/5.0 (Macintosh; Intel Mac OS X 10_13_6) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/75.0.3770.90 Safari/537.36\",\n \"X-Requested-With\": \"XMLHttpRequest\",\n }\n session = requests.Session()\n session.headers.update(headers)\n with session.get(\n url=f\"http://index.baidu.com/api/FeedSearchApi/getFeedIndex?word={word}&area=0&startDate={start_date}&endDate={end_date}\"\n ) as response:\n data = response.json()[\"data\"]\n all_data = data[\"index\"][0][\"data\"]\n uniqid = data[\"uniqid\"]\n ptbk = get_ptbk(uniqid, cookie)\n result = decrypt(ptbk, all_data).split(\",\")\n result = [int(item) if item != \"\" else 0 for item in result]\n if len(result) == len(pd.date_range(start=start_date, end=end_date, freq=\"7D\")):\n temp_df_7 = pd.DataFrame(\n [pd.date_range(start=start_date, end=end_date, freq=\"7D\"), result],\n index=[\"date\", word],\n ).T\n temp_df_7.index = pd.to_datetime(temp_df_7[\"date\"])\n del temp_df_7[\"date\"]\n return temp_df_7\n else:\n temp_df_1 = pd.DataFrame(\n [pd.date_range(start=start_date, end=end_date, freq=\"1D\"), result],\n index=[\"date\", word],\n ).T\n temp_df_1.index = pd.to_datetime(temp_df_1[\"date\"])\n del temp_df_1[\"date\"]\n return temp_df_1\n\n\ndef baidu_media_index(word: str, start_date: str, end_date: str, cookie: str,) -> str:\n headers = {\n \"Accept\": \"application/json, text/plain, */*\",\n \"Accept-Encoding\": \"gzip, deflate\",\n \"Accept-Language\": \"zh-CN,zh;q=0.9\",\n \"Cache-Control\": \"no-cache\",\n \"Cookie\": cookie,\n \"DNT\": \"1\",\n \"Host\": \"zhishu.baidu.com\",\n \"Pragma\": \"no-cache\",\n \"Proxy-Connection\": \"keep-alive\",\n \"Referer\": \"zhishu.baidu.com\",\n \"User-Agent\": \"Mozilla/5.0 (Macintosh; Intel Mac OS X 10_13_6) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/75.0.3770.90 Safari/537.36\",\n \"X-Requested-With\": \"XMLHttpRequest\",\n }\n session = requests.Session()\n session.headers.update(headers)\n with session.get(\n url=f\"http://index.baidu.com/api/NewsApi/getNewsIndex?word={word}&area=0&startDate={start_date}&endDate={end_date}\"\n ) as response:\n data = response.json()[\"data\"]\n all_data = data[\"index\"][0][\"data\"]\n uniqid = data[\"uniqid\"]\n ptbk = get_ptbk(uniqid, cookie)\n result = decrypt(ptbk, all_data).split(\",\")\n result = [\"0\" if item == \"\" else item for item in result]\n result = [int(item) for item in result]\n if len(result) == len(pd.date_range(start=start_date, end=end_date, freq=\"7D\")):\n temp_df_7 = pd.DataFrame(\n [pd.date_range(start=start_date, end=end_date, freq=\"7D\"), result],\n index=[\"date\", word],\n ).T\n temp_df_7.index = pd.to_datetime(temp_df_7[\"date\"])\n del temp_df_7[\"date\"]\n return temp_df_7\n else:\n temp_df_1 = pd.DataFrame(\n [pd.date_range(start=start_date, end=end_date, freq=\"1D\"), result],\n index=[\"date\", word],\n ).T\n temp_df_1.index = pd.to_datetime(temp_df_1[\"date\"])\n del temp_df_1[\"date\"]\n return temp_df_1\n\n\nif __name__ == \"__main__\":\n cookie = \"\"\n data = baidu_search_index(\n word=\"中国原油期货\", start_date=\"2020-01-01\", end_date=\"2020-02-14\", cookie=cookie\n )\n print(data)\n data.dropna(inplace=True)\n data.plot()\n plt.show()\n data = baidu_info_index(\n word=\"中国原油期货\", start_date=\"2018-07-03\", end_date=\"2020-01-21\", cookie=cookie\n )\n print(data)\n data.dropna(inplace=True)\n data.plot()\n plt.show()\n data = baidu_media_index(\n word=\"中国原油期货\", start_date=\"2018-10-27\", end_date=\"2020-01-21\", cookie=cookie\n )\n print(data)\n data.dropna(inplace=True)\n data.plot()\n plt.show()\n" ]
[ [ "pandas.to_datetime", "matplotlib.pyplot.show", "pandas.date_range" ] ]
yxliang/pva-faster-rcnn
[ "b0dc7d255da61188dda01e8760078b6991d6f7e2" ]
[ "lib/utils/blob.py" ]
[ "# --------------------------------------------------------\n# Fast R-CNN\n# Copyright (c) 2015 Microsoft\n# Licensed under The MIT License [see LICENSE for details]\n# Written by Ross Girshick\n# --------------------------------------------------------\n\n\"\"\"Blob helper functions.\"\"\"\n\nimport numpy as np\nimport cv2\n\ndef im_list_to_blob(ims):\n \"\"\"Convert a list of images into a network input.\n\n Assumes images are already prepared (means subtracted, BGR order, ...).\n \"\"\"\n max_shape = np.array([im.shape for im in ims]).max(axis=0)\n num_images = len(ims)\n blob = np.zeros((num_images, max_shape[0], max_shape[1], 3),\n dtype=np.float32)\n for i in xrange(num_images):\n im = ims[i]\n blob[i, 0:im.shape[0], 0:im.shape[1], :] = im\n # Move channels (axis 3) to axis 1\n # Axis order will become: (batch elem, channel, height, width)\n channel_swap = (0, 3, 1, 2)\n blob = blob.transpose(channel_swap)\n return blob\n\ndef prep_im_for_blob(im, pixel_means, target_size, max_size, multiple):\n \"\"\"Mean subtract and scale an image for use in a blob.\"\"\"\n im = im.astype(np.float32, copy=False)\n im -= pixel_means\n im_shape = im.shape\n im_size_min = np.min(im_shape[0:2])\n im_size_max = np.max(im_shape[0:2])\n im_scale = float(target_size) / float(im_size_min)\n # Prevent the biggest axis from being more than MAX_SIZE\n if np.round(im_scale * im_size_max) > max_size:\n im_scale = float(max_size) / float(im_size_max)\n im_scale_x = np.floor(im.shape[1] * im_scale / multiple) * multiple / im.shape[1]\n im_scale_y = np.floor(im.shape[0] * im_scale / multiple) * multiple / im.shape[0]\n im = cv2.resize(im, None, None, fx=im_scale_x, fy=im_scale_y,\n interpolation=cv2.INTER_LINEAR)\n\n return im, np.array([im_scale_x, im_scale_y, im_scale_x, im_scale_y])\n" ]
[ [ "numpy.max", "numpy.array", "numpy.zeros", "numpy.round", "numpy.min", "numpy.floor" ] ]
TommyLin/focus-stacking
[ "ac474ea4cf34fc61a197aefad97a02786eae388c" ]
[ "src/focus_stacking.py" ]
[ "#!/usr/bin/env python3\n\nimport matplotlib.image as mpimg\nimport matplotlib.pyplot as plt\nfrom skimage import filters\n\nfiles = [\"../images/fly1.jpg\", \"../images/fly2.jpg\"]\nsave_image = False\n\n\ndef gaussian_blur(fin=\"../images/fly1.jpg\", fout=\"fly1_blur.jpg\"):\n if fin == \"\" or fout == \"\":\n raise SyntaxError(\"No parameter\")\n\n image = mpimg.imread(fin)\n blured = filters.gaussian(image, sigma=1, multichannel=True)\n mpimg.imsave(fout, blured)\n\n\ndef get_gray(rgb):\n r, g, b = rgb[0], rgb[1], rgb[2]\n gray = 0.2989 * r + 0.5870 * g + 0.1140 * b\n return gray\n\n\ndef rgb2gray(image):\n gray = [[0 for i in range(image.shape[0])] for j in range(image.shape[1])]\n for i in range(image.shape[0]):\n for j in range(image.shape[1]):\n gray[i][j] = get_gray(image[i][j])\n return gray\n\n\nimage = []\n\nfor filename in files:\n print(\"[\", files.index(filename), \"] \", filename)\n fig = plt.figure()\n image = mpimg.imread(filename)\n ax = fig.add_subplot(2, 2, 1)\n ax.set_title(\"Original\")\n plt.axis(\"off\")\n plt.imshow(image)\n\n edges = filters.sobel(image)\n ax = fig.add_subplot(2, 2, 2)\n ax.set_title(\"Edge(Orig)\")\n plt.axis(\"off\")\n plt.imshow(rgb2gray(edges))\n\n blured = filters.gaussian(image, sigma=1, multichannel=True)\n if save_image:\n mpimg.imsave(\"result-01.jpg\", blured)\n ax = fig.add_subplot(2, 2, 3)\n ax.set_title(\"Blur(Orig)\")\n plt.imshow(blured)\n\n edges = filters.sobel(blured)\n if save_image:\n mpimg.imsave(\"result-02.jpg\", edges)\n ax = fig.add_subplot(2, 2, 4)\n ax.set_title(\"Edge(Blured)\")\n plt.axis(\"off\")\n plt.imshow(rgb2gray(edges))\n\n plt.show(block=False)\n plt.pause(3)\n fig.savefig(\"result-01.png\")\n\n# debug part\nheight = image.shape[0]\nwidth = image.shape[1]\nprint(\"Image size\", image.size, \"= (\", height, \"x\", width, \") x 3\")\n" ]
[ [ "matplotlib.image.imread", "matplotlib.image.imsave", "matplotlib.pyplot.figure", "matplotlib.pyplot.pause", "matplotlib.pyplot.show", "matplotlib.pyplot.axis", "matplotlib.pyplot.imshow" ] ]
MaximilianJanetschek/Urban_Intermodal_Transportation
[ "632caf668636448dc9290d54cf1c7b527c68a957" ]
[ "StatisticalAnalysis.py" ]
[ "import matplotlib.pyplot as plt\nimport numpy as np\nimport pandas as pd\nimport seaborn as sns\nfrom scipy.stats import norm\nfrom scipy.spatial import distance\n\n\n\n\n'''\nREADME: \nThis is a standalone file, run to execute\nRequirement: Please make sure in results/distances folder:\n1. results0 includes the BASELINE beta set 0 (100 requests)\n2. results1 includes the BIASED beta set 1 (100 requests)\nPLEASE UNCOMMENT THE GRAPH DRAWING PART IF YOU WANT TO SEE THE GRAPH\n\n'''\n\ndef inputcsv(filenumber):\n \"\"\"Import the result request run for each beta csv file and include the column\n of beta set to classify which beta set the data belong to.\n Output: Data Frame\"\"\"\n # Import request run result for a beta set\n with open(r'results/distances/StatisticalAnalysis/results' + str(filenumber) + '.CSV', newline='') as csvfile:\n df_reader = pd.read_csv(csvfile)\n # Add new information columns to the existing data frame\n df_reader['betaSet'] = filenumber\n df_reader['ED.x'] = df_reader['cost']\n df_reader['ED.y'] = df_reader['ptTime'] + df_reader['taxiTime'] + df_reader['bikeTime'] + \\\n df_reader['walkingTime'] + df_reader['waitingTime']\n df_reader['totalTime'] = df_reader['ptTime'] + df_reader['taxiTime'] + df_reader['bikeTime'] + \\\n df_reader['walkingTime'] + df_reader['waitingTime']\n df_reader['outVehicleTime'] = df_reader['walkingTime'] + df_reader['waitingTime']\n df_reader['PhysicalTime'] = df_reader['walkingTime'] + df_reader['bikeTime']\n #turn cost into categorical column\n df_reader['CostType'] = 'WalkOnly'\n df_reader.loc[(df_reader['cost'] ==1.5),'CostType'] = 'Bike'\n df_reader.loc[(df_reader['cost'] == 2.9), 'CostType'] = 'pt'\n df_reader.loc[(df_reader['cost'] == 4.4), 'CostType'] = 'Bike+pt'\n df_reader.loc[(df_reader['cost'] > 4.4), 'CostType'] = 'Taxi'\n\n\n\n return df_reader\n\ndef mergeinput_preprocess_duo(df1, df2):\n \"\"\" Merge 2 data frames into one for full analysis.\n Output will be two form: long form and wide form\n long form: simply add 2 df on top of each other\n wide form: each row represents a request coordinate with result from both run. -> calculate ED\n relevant cols for wide form: maxChanges, origin, destination, ED.x1,ED.y1,ED.x2,ED.y2,ED.x1.normalized,ED.x2.normalized\n ED\"\"\"\n ## long form\n df_long = pd.concat([df1,df2],ignore_index = True)\n\n #normalize the x and y coordinates\n df_long['ED.x.normalized'] = (df_long['ED.x'] - df_long['ED.x'].min()) / (df_long['ED.x'].max() - df_long['ED.x'].min())\n df_long['ED.y.normalized'] = (df_long['ED.y'] - df_long['ED.y'].min()) / (\n df_long['ED.y'].max() - df_long['ED.y'].min())\n ## wide form\n df_temp = df_long.drop(\n columns=['beta_1', 'beta_2', 'beta_3', 'beta_4', 'beta_5', 'Disutility', 'inVehicleTime', 'ptTime',\n 'taxiTime', 'bikeTime', 'walkingTime', 'waitingTime', 'cost',\n 'numberOfChanges', 'preprocessingTime', 'optimizationTime', 'Gap'])\n betaSetx = df_temp['betaSet'][0]\n betaSety = df_temp['betaSet'][199]\n df_temp0 = df_temp[df_temp['betaSet'] == betaSetx]\n df_temp0 =df_temp0.drop(columns='betaSet')\n df_temp1 = df_temp[df_temp['betaSet'] == betaSety]\n df_temp1 = df_temp1.drop(columns='betaSet')\n df_wide = pd.merge(df_temp0, df_temp1, on=('origin','destination','maxChanges'), suffixes=('_base','_mod'))\n df_wide['ED'] = np.sqrt(np.square(df_wide['ED.x.normalized_base']-df_wide['ED.x.normalized_mod'])\n + np.square(df_wide['ED.y.normalized_base']-df_wide['ED.y.normalized_mod']))\n df_wide['request_no'] = range(1, len(df_wide) + 1)\n return df_long, df_wide\n\ndef mergeinput(df1, df2):\n ## long form\n df_long = pd.concat([df1,df2],ignore_index = True)\n return df_long\n\ndef normalize(df_long):\n '''Normalize the values and output df_long and df_wide'''\n #normalize the x and y coordinates\n df_long['ED.x.normalized'] = (df_long['ED.x'] - df_long['ED.x'].min()) / (df_long['ED.x'].max() - df_long['ED.x'].min())\n df_long['ED.y.normalized'] = (df_long['ED.y'] - df_long['ED.y'].min()) / (\n df_long['ED.y'].max() - df_long['ED.y'].min())\n return df_long\n\ndef splitdf(df_long,i):\n ## wide form\n df_temp = df_long.drop(\n columns=['beta_1', 'beta_2', 'beta_3', 'beta_4', 'beta_5', 'Disutility', 'inVehicleTime', 'ptTime',\n 'taxiTime', 'bikeTime', 'walkingTime', 'waitingTime', 'cost',\n 'numberOfChanges', 'preprocessingTime', 'optimizationTime', 'Gap'])\n betaSetx = df_temp['betaSet'][0]\n betaSety = df_temp['betaSet'][i*100]\n df_temp0 = df_temp[df_temp['betaSet'] == betaSetx]\n df_temp0 =df_temp0.drop(columns='betaSet')\n df_temp1 = df_temp[df_temp['betaSet'] == betaSety]\n df_temp1 = df_temp1.drop(columns='betaSet')\n df_wide = pd.merge(df_temp0, df_temp1, on=('origin','destination','maxChanges'), suffixes=('_0','_1'))\n df_wide['ED'] = np.sqrt(np.square(df_wide['ED.x.normalized_0']-df_wide['ED.x.normalized_1'])\n + np.square(df_wide['ED.y.normalized_0']-df_wide['ED.y.normalized_1']))\n df_wide['request_no'] = range(1, len(df_wide) + 1)\n return df_wide\n\n\n\ndef ecdf(data):\n \"\"\"Compute ECDF for a one-dimensional array of measurements.\n Input: DF[column] for the desired column \"\"\"\n # Number of data points: n\n n = len(data)\n # x-data for the ECDF: x\n x = np.sort(data)\n # y-data for the ECDF: y\n y = np.arange(1, n+1) / n\n return x, y\n\ndef inputrequest(jsonFileNumber):\n \"\"\" Input the request coordinates to the dataframe. THE ORIGINAL ORDER MUST BE INTACT\"\"\"\n df_reader = pd.read_json(r'data/requests/' + str(jsonFileNumber) + 'requests.json')\n df_reader['request_no'] = range(1, len(df_reader) + 1)\n df_reader['ori-destED'] = [distance.euclidean([df_reader['fromLat'][i],df_reader['fromLon'][i]],\n [df_reader['toLat'][i],df_reader['toLon'][i]])\n for i in range(0,len(df_reader['request_no']))]\n return df_reader\n\ndef addEDInfo(df_wide,df_request):\n \"\"\"Add coordination information of requests into result data frame\"\"\"\n df = pd.merge(df_wide,df_request, on='request_no')\n return df\n\ndef CItest(df,alpha):\n \"\"\" test whether the lower range of CI is larger than 0 or not\"\"\"\n ci = norm(*norm.fit(df['ED'])).interval(alpha)\n print(ci)\n if ci[0] < 0:\n print(str(alpha*100)+\"%\",\"Confidence: ED not significantly different from 0\")\n else:\n print(str(alpha*100)+\"%\",\"Confidence: ED significantly different from 0\")\n return\n\n# Function to map the colors as a list from the input list of x variables\ndef pltcolor(lst):\n cols=[]\n for l in lst:\n if l=='WalkOnly':\n cols.append('tab:green')\n # cols.append('red')\n elif l == 'Bike':\n cols.append('tab:blue')\n # cols.append('orange')\n elif l == 'pt':\n cols.append('tab:purple')\n # cols.append('purple')\n elif l == 'Bike+pt':\n cols.append('tab:orange')\n # cols.append('orange')\n else:\n # cols.append('tab:red')\n cols.append('red')\n\n colslabel = {'tab:green':'Walk Only',\n 'tab:blue':'Bike',\n 'tab:purple':'pt',\n 'tab:orange':'Bike+pt',\n 'tab:red':'Taxi'}\n return colslabel,cols\n\ndef compare(df_base,df1):\n \"\"\" Compare the base beta set with the current beta set and only keep the distinct route\"\"\"\n df_base_temp = df_base.drop(\n columns=['ED.x','ED.y','preprocessingTime', 'optimizationTime', 'Gap'])\n df1_temp = df1.drop(\n columns=['ED.x','ED.y','Disutility',\n 'preprocessingTime', 'optimizationTime', 'Gap'])\n df_wide = pd.merge(df_base_temp, df1_temp, on=('origin', 'destination', 'maxChanges'), suffixes=('_base', '_set{}'.format(str(df1['betaSet'][1]))))\n # Compare the corresponding route from 2 beta sets\n df_wide['DistinctRoute?'] = [False if (abs(df_wide['inVehicleTime_set{}'.format(str(df1['betaSet'][1]))][i] - df_wide['inVehicleTime_base'][i]) < 0.0001\n and\n abs(df_wide['ptTime_set{}'.format(str(df1['betaSet'][1]))][i] - df_wide['ptTime_base'][i]) < 0.0001\n and\n abs(df_wide['outVehicleTime_set{}'.format(str(df1['betaSet'][1]))][i] - df_wide['outVehicleTime_base'][i]) < 0.0001\n and\n abs(df_wide['bikeTime_set{}'.format(str(df1['betaSet'][1]))][i] - df_wide['bikeTime_base'][i]) < 0.0001\n and\n abs(df_wide['walkingTime_set{}'.format(str(df1['betaSet'][1]))][i] - df_wide['walkingTime_base'][i]) < 0.0001\n and\n abs(df_wide['waitingTime_set{}'.format(str(df1['betaSet'][1]))][i] - df_wide['waitingTime_base'][i]) < 0.0001\n and\n abs(df_wide['cost_set{}'.format(str(df1['betaSet'][1]))][i] - df_wide['cost_base'][i]) < 0.0001\n # and\n # abs(df_wide['numberOfChanges_set{}'.format(str(df1['betaSet'][1]))][i] - df_wide['numberOfChanges_base'][i]) < 0.0001\n\n )else True for i in range(0,len(df_wide))]\n\n return df_wide\n\ndef extractdistinct(df_wide):\n \"\"\"Extract and reformat to standard form for disinct point set\"\"\"\n df1_distincttemp = df_wide[df_wide['DistinctRoute?'] == True]\n # df1_distinct = df1_distinct.drop([x for x in df1_distinct if x.endswith('_base')], 1)\n\n df1_distinct = df1_distincttemp.reset_index(drop=True)\n return df1_distinct, df1_distincttemp\n\ndef drawline(betaset,df1_distinct,j,x_axis,y_axis,linecolor):\n \"\"\"Draw a line between points from different beta sets that share the same ori-dest coordination\"\"\"\n plt.plot([df1_distinct['{}_base'.format(x_axis)][j],df1_distinct['{}_set{}'.format(x_axis,betaset)][j]],\n [df1_distinct['{}_base'.format(y_axis)][j],df1_distinct['{}_set{}'.format(y_axis,betaset)][j]]\n ,color=linecolor,linestyle='--',linewidth=0.4,marker='.',markersize=0.000001)\n\n return\n\ndef calculateED(betaset,df1_distinct_oldpos,x_axis,y_axis):\n df1_distinct_oldpos['ED'] = np.sqrt(np.square(df1_distinct_oldpos['{}_base'.format(x_axis)]-df1_distinct_oldpos['{}_set{}'.format(x_axis,betaset)])\n + np.square(df1_distinct_oldpos['{}_base'.format(y_axis)]-df1_distinct_oldpos['{}_set{}'.format(y_axis,betaset)]))\n return df1_distinct_oldpos\n\n######################### Execution #######################################\n\"\"\"\nREADME: Adjust the end_range depend on the number of beta sets run\n\"\"\"\n\n\ndf_base = inputcsv(0)\ndf0 = inputcsv(0)\nprint('size of df',df_base.info())\n# input fields\nstart_range = 1 #inclusive\nend_range = 2 #exclusive\nTestset = 'PS'\nPlottype = 'Manually Chosen 20'\nIncreasesetPS = ['200%','550%']\nIncreasesetTS = ['75%','8.33%']\nIncreasesetCS = ['167%', '450%']\npalette = sns.color_palette(\"husl\", end_range)\n##### Draw 2D scatter plot with only distinct points #####\n#Gather Distinct Data\nfor i in range(start_range,end_range):\n df1 = inputcsv(i)\n df_wide = compare(df_base,df1)\n df1_distinct,df1_distinct_oldpostemp = extractdistinct(df_wide)\n print('distinct route set{}'.format(i),len(df1_distinct))\n\n\"\"\"\nREADME: Uncomment part 1 for running PS set, part 2 for TS set, part 3 for CS\n\"\"\"\n# Part 1\n#\n# x_axis = 'PhysicalTime'\n# y_axis = 'totalTime'\n# sns.scatterplot(x='{}'.format(x_axis),y='{}'.format(y_axis),data=df_base,marker='o',color='black',label=\"Set {} (standard)\".format(str(df_base['betaSet'][1])))\n#\n#\n# for i in range(start_range,end_range):\n# df1 = inputcsv(i)\n# df_wide = compare(df_base,df1)\n# df1_distinct,df1_distinct_oldpostemp = extractdistinct(df_wide)\n# sns.scatterplot(x='{}_set{}'.format(x_axis,str(df1['betaSet'][1])), y='{}_set{}'.format(y_axis,str(df1['betaSet'][1])),marker='o', data=df1_distinct, color=palette[i],label=\"Set {} ({} of standard walking time, bike betas)\".format(str(df1['betaSet'][1]),IncreasesetPS[i-1]))\n# df1_distinct_oldpos = calculateED(i,df1_distinct_oldpostemp,x_axis,y_axis)\n# for j in range(0,len(df1_distinct)):\n# drawline(i,df1_distinct,j,x_axis,y_axis,palette[i])\n#\n# plt.xlabel('{}'.format(x_axis))\n# plt.ylabel('{}'.format(y_axis))\n#\n# plt.legend(bbox_to_anchor=(0.8, -0.14))\n# plt.tight_layout()\n# plt.title('Beta Sets Comparison: {} - {}'.format(x_axis,y_axis))\n# path = 'results/plots/[{0}][{1}]{2}-{3}Beta{4}-Beta{5}.png'.format(Testset,Plottype,x_axis,y_axis,str(df0['betaSet'][1]),str(end_range-1))\n# plt.savefig(path, dpi=1000)\n# plt.show()\n\n# Part 2\n#\n# x_axis = 'cost'\n# y_axis = 'totalTime'\n# sns.scatterplot(x='{}'.format(x_axis),y='{}'.format(y_axis),data=df_base,marker='o',color='black',label=\"Set {} (standard)\".format(str(df_base['betaSet'][1])))\n#\n#\n# for i in range(start_range,end_range):\n# df1 = inputcsv(i)\n# df_wide = compare(df_base,df1)\n# df1_distinct,df1_distinct_oldpostemp = extractdistinct(df_wide)\n# sns.scatterplot(x='{}_set{}'.format(x_axis,str(df1['betaSet'][1])), y='{}_set{}'.format(y_axis,str(df1['betaSet'][1])),marker='o', data=df1_distinct, color=palette[i],label=\"Set {} ({} of standard cost beta)\".format(str(df1['betaSet'][1]),IncreasesetTS[i-1]))\n# df1_distinct_oldpos = calculateED(i,df1_distinct_oldpostemp,x_axis,y_axis)\n# for j in range(0,len(df1_distinct)):\n# drawline(i,df1_distinct,j,x_axis,y_axis,palette[i])\n#\n# plt.xlabel('{}'.format(x_axis))\n# plt.ylabel('{}'.format(y_axis))\n# plt.legend(bbox_to_anchor=(0.8, -0.14))\n# plt.tight_layout()\n#\n# plt.title('Beta Sets Comparison: {} - {}'.format(x_axis,y_axis))\n# path = 'results/plots/[{0}][{1}]{2}-{3}Beta{4}-Beta{5}.png'.format(Testset,Plottype,x_axis,y_axis,str(df0['betaSet'][1]),str(end_range-1))\n# plt.savefig(path, dpi=1000)\n# plt.show()\n\n# Part 3\n#\n# x_axis = 'cost'\n# y_axis = 'totalTime'\n# sns.scatterplot(x='{}'.format(x_axis),y='{}'.format(y_axis),data=df_base,marker='o',color='black',label=\"Set {} (standard)\".format(str(df_base['betaSet'][1])))\n#\n#\n# for i in range(start_range,end_range):\n# df1 = inputcsv(i)\n# df_wide = compare(df_base,df1)\n# df1_distinct,df1_distinct_oldpostemp = extractdistinct(df_wide)\n# sns.scatterplot(x='{}_set{}'.format(x_axis,str(df1['betaSet'][1])), y='{}_set{}'.format(y_axis,str(df1['betaSet'][1])),marker='o', data=df1_distinct, color=palette[i],label=\"Set {} ({} of standard cost beta)\".format(str(df1['betaSet'][1]),IncreasesetCS[i-1]))\n# df1_distinct_oldpos = calculateED(i,df1_distinct_oldpostemp,x_axis,y_axis)\n# for j in range(0,len(df1_distinct)):\n# drawline(i,df1_distinct,j,x_axis,y_axis,palette[i])\n#\n# plt.xlabel('{}'.format(x_axis))\n# plt.ylabel('{}'.format(y_axis))\n# plt.legend(bbox_to_anchor=(0.8, -0.14))\n# plt.tight_layout()\n#\n# plt.title('Beta Sets Comparison: {} - {}'.format(x_axis,y_axis))\n# path = 'results/plots/[{0}][{1}]{2}-{3}Beta{4}-Beta{5}.png'.format(Testset,Plottype,x_axis,y_axis,str(df0['betaSet'][1]),str(end_range-1))\n# plt.savefig(path, dpi=1000)\n# plt.show()\n\n\n\n\n" ]
[ [ "numpy.square", "scipy.spatial.distance.euclidean", "pandas.merge", "scipy.stats.norm.fit", "numpy.arange", "numpy.sort", "pandas.concat", "pandas.read_csv" ] ]
usnistgov/dioptra
[ "08a08e96c27787915bafc75a483431333e2c70ca" ]
[ "examples/patch-defended-pixel-threshold/src/attacks.py" ]
[ "#!/usr/bin/env python\n# This Software (Dioptra) is being made available as a public service by the\n# National Institute of Standards and Technology (NIST), an Agency of the United\n# States Department of Commerce. This software was developed in part by employees of\n# NIST and in part by NIST contractors. Copyright in portions of this software that\n# were developed by NIST contractors has been licensed or assigned to NIST. Pursuant\n# to Title 17 United States Code Section 105, works of NIST employees are not\n# subject to copyright protection in the United States. However, NIST may hold\n# international copyright in software created by its employees and domestic\n# copyright (or licensing rights) in portions of software that were assigned or\n# licensed to NIST. To the extent that NIST holds copyright in this software, it is\n# being made available under the Creative Commons Attribution 4.0 International\n# license (CC BY 4.0). The disclaimers of the CC BY 4.0 license apply to all parts\n# of the software developed or licensed by NIST.\n#\n# ACCESS THE FULL CC BY 4.0 LICENSE HERE:\n# https://creativecommons.org/licenses/by/4.0/legalcode\n\nimport warnings\nfrom typing import Tuple\nfrom pathlib import Path\nimport mlflow\nimport scipy.stats\n\nwarnings.filterwarnings(\"ignore\")\n\nimport tensorflow as tf\n\ntf.compat.v1.disable_eager_execution()\n\nimport numpy as np\nimport structlog\nfrom art.attacks.evasion import PixelAttack, ThresholdAttack\nfrom art.estimators.classification import KerasClassifier\nfrom tensorflow.keras.preprocessing.image import ImageDataGenerator\nfrom tensorflow.keras.preprocessing.image import save_img\n\nfrom models import load_model_in_registry\nfrom metrics import (\n l_inf_norm,\n paired_cosine_similarities,\n paired_euclidean_distances,\n paired_manhattan_distances,\n paired_wasserstein_distances,\n)\n\nLOGGER = structlog.get_logger()\nDISTANCE_METRICS = [\n (\"l_infinity_norm\", l_inf_norm),\n (\"cosine_similarity\", paired_cosine_similarities),\n (\"euclidean_distance\", paired_euclidean_distances),\n (\"manhattan_distance\", paired_manhattan_distances),\n (\"wasserstein_distance\", paired_wasserstein_distances),\n]\n\n\ndef wrap_keras_classifier(model):\n keras_model = load_model_in_registry(model=model)\n return KerasClassifier(model=keras_model)\n\n\ndef init_pt(model, batch_size, **kwargs):\n classifier = wrap_keras_classifier(model)\n # th=4, es=1, targeted=True, verbose=True\n attack = PixelAttack(classifier, **kwargs)\n return classifier, attack\n\n\ndef save_adv_batch(class_names_list, adv_batch, batch_size, batch_num, adv_data_dir, y):\n\n for batch_image_num, adv_image in enumerate(adv_batch):\n outfile = class_names_list[y[batch_image_num]]\n adv_image_path = (\n adv_data_dir\n / f\"{outfile}\"\n / f\"adv{str(batch_size * batch_num + batch_image_num).zfill(5)}.png\"\n )\n\n if not adv_image_path.parent.exists():\n adv_image_path.parent.mkdir(parents=True)\n\n save_img(path=str(adv_image_path), x=adv_image)\n\n\ndef np_norm(im, im2, order):\n im_diff = im - im2\n batch_size = im_diff.shape[0]\n flatten_size = np.prod(im_diff.shape[1:])\n im_diff_norm = np.linalg.norm(\n im_diff.reshape((batch_size, flatten_size)), axis=1, ord=order\n )\n return im_diff_norm.tolist()\n\n\ndef evaluate_distance_metrics(\n clean_filenames, distance_metrics_, clean_batch, adv_batch\n):\n LOGGER.debug(\"evaluate image perturbations using distance metrics\")\n distance_metrics_[\"image\"].extend([x.name for x in clean_filenames])\n distance_metrics_[\"label\"].extend([x.parent for x in clean_filenames])\n for metric_name, metric in DISTANCE_METRICS:\n distance_metrics_[metric_name].extend(metric(clean_batch, adv_batch))\n\n\ndef log_distance_metrics(distance_metrics_):\n distance_metrics_ = distance_metrics_.copy()\n del distance_metrics_[\"image\"]\n del distance_metrics_[\"label\"]\n for metric_name, metric_values_list in distance_metrics_.items():\n metric_values = np.array(metric_values_list)\n mlflow.log_metric(key=f\"{metric_name}_mean\", value=metric_values.mean())\n mlflow.log_metric(key=f\"{metric_name}_median\", value=np.median(metric_values))\n mlflow.log_metric(key=f\"{metric_name}_stdev\", value=metric_values.std())\n mlflow.log_metric(\n key=f\"{metric_name}_iqr\", value=scipy.stats.iqr(metric_values)\n )\n mlflow.log_metric(key=f\"{metric_name}_min\", value=metric_values.min())\n mlflow.log_metric(key=f\"{metric_name}_max\", value=metric_values.max())\n LOGGER.info(\"logged distance-based metric\", metric_name=metric_name)\n\n\ndef create_adversarial_pt_dataset(\n data_dir: str,\n model: str,\n adv_data_dir: Path = None,\n rescale: float = 1.0,\n batch_size: int = 32,\n label_mode: str = \"categorical\",\n color_mode: str = \"rgb\",\n image_size: Tuple[int, int] = (224, 224),\n **kwargs,\n):\n classifier, attack = init_pt(model=model, batch_size=batch_size, **kwargs)\n\n data_generator: ImageDataGenerator = ImageDataGenerator(rescale=rescale)\n\n data_flow = data_generator.flow_from_directory(\n directory=data_dir,\n target_size=image_size,\n color_mode=color_mode,\n class_mode=label_mode,\n batch_size=batch_size,\n shuffle=False,\n )\n class_names_list = sorted(data_flow.class_indices, key=data_flow.class_indices.get)\n num_images = data_flow.n\n img_filenames = [Path(x) for x in data_flow.filenames]\n\n distance_metrics_ = {\"image\": [], \"label\": []}\n for metric_name, _ in DISTANCE_METRICS:\n distance_metrics_[metric_name] = []\n\n for batch_num, (x, y) in enumerate(data_flow):\n if batch_num >= num_images // batch_size:\n break\n\n clean_filenames = img_filenames[\n batch_num * batch_size : (batch_num + 1) * batch_size\n ]\n\n LOGGER.info(\"Attacking data batch\", batch_num=batch_num)\n\n y_int = np.argmax(y, axis=1)\n adv_batch = attack.generate(x=x)\n save_adv_batch(\n class_names_list, adv_batch, batch_size, batch_num, adv_data_dir, y_int\n )\n evaluate_distance_metrics(\n clean_filenames=clean_filenames,\n distance_metrics_=distance_metrics_,\n clean_batch=x,\n adv_batch=adv_batch,\n )\n LOGGER.info(\"Adversarial image generation complete\", attack=\"fgm\")\n log_distance_metrics(distance_metrics_)\n\n return classifier\n" ]
[ [ "numpy.array", "tensorflow.keras.preprocessing.image.ImageDataGenerator", "numpy.median", "numpy.prod", "numpy.argmax", "tensorflow.compat.v1.disable_eager_execution" ] ]
penguinmenac3/deeptech
[ "0c7fb170d62f193dbbb2018f7b8d42f713178bb8" ]
[ "deeptech/model/layers/selection.py" ]
[ "\"\"\"doc\n# deeptech.model.layers.selection\n\n> These layers select parts of a tensor.\n\"\"\"\nimport torch\nfrom torch.nn import Module\nfrom deeptech.model.module_registry import add_module\n\n\n@add_module()\nclass Gather(Module):\n def __init__(self, axis):\n \"\"\"\n Gather tensors from one tensor by providing an index tensor.\n\n ```\n assert src.shape = [B, X, Y, Z]\n assert idx.shape = [B, K]\n assert 0 <= idx.min() and idx.max() < src.shape[axis]\n # -->\n assert Gather(1)(src, idx).shape == [B, K, Y, Z]\n assert Gather(2)(src, idx).shape == [B, X, K, Z]\n assert Gather(3)(src, idx).shape == [B, X, Y, K]\n #assert Gather(0) -> Exception\n ```\n \n Created object is callable with the following parameters:\n * **input_tensor**: (Tensor[B, ..., L, ...]) The tensor from which to gather values at the given indices.\n * **indices**: (Tensor[B, K]) The indices at which to return the values of the input tensor.\n * **returns**: (Tensor[B, ..., K, ...]) The tensor containing the values at the indices given.\n\n Arguments:\n :param axis: The axis along which to select.\n \"\"\"\n super().__init__()\n assert axis != 0, \"You cannot gather over the batch dimension.\"\n self.axis = axis\n\n def forward(self, input_tensor, indices):\n import torch\n assert input_tensor.shape[0] == indices.shape[0]\n assert len(indices.shape) == 2, \"Indices must be of shape (B, K). Found shape: {}\".format(indices.shape)\n assert 0 <= indices.min(), \"Indices contain values out of bounds. Min idx: {}\".format(indices.min())\n assert indices.max() < input_tensor.shape[self.axis], \"Indices contain values out of bounds. Max idx: {}, Shape: {}, Axis: {}\".format(\n indices.max(), input_tensor.shape, self.axis)\n\n # Then gather the indices along the batches.\n batchless_axis = self.axis - 1 if self.axis > 0 else self.axis\n return torch.stack([torch.index_select(input_tensor[i], batchless_axis, indices[i]) for i in range(indices.shape[0])])\n\n\n@add_module()\nclass TopKIndices(Module):\n def __init__(self, k):\n \"\"\"\n Returns the top k tensor indices (separate per batch).\n \n Created object is callable with the following parameters:\n * **input_tensor**: (Tensor[N, L]) The tensor in which to search the top k indices.\n * **returns**: (Tensor[N, K]) The tensor containing the indices of the top k values.\n \n Parameters for the constructor:\n :param k: The number of indices to return per batch.\n \"\"\"\n super().__init__()\n self.k = k\n\n def forward(self, input_tensor):\n return torch.topk(input_tensor, self.k).indices\n\n\n@add_module()\nclass GatherTopKIndices(Module):\n def __init__(self, k: int, background_class_idx: int = 0):\n \"\"\"\n Returns the top k tensor indices (separate per batch).\n \n For shapes: B=#Batches, X=Arbitrary, C=#Classes, N=#Samples.\n\n Created object is callable with the following parameters:\n * **input_tensor**: (Tensor[B, X, N]) The tensor from which to gather the values.\n * **scores**: (Tensor[B, C, N]) The tensor in which to search the top k indices.\n * **returns**: (Tensor[B, X, k]) The tensor containing the values at the top k indices.\n \n Parameters for the constructor:\n :param k: The number of indices to return per batch.\n :param background_class_idx: (int) The index at which the background class is. (Default: 0)\n \"\"\"\n super().__init__()\n self.gather = Gather(axis=-1)\n self.topk = TopKIndices(k)\n self.background_class_idx = background_class_idx\n\n def forward(self, input_tensor, scores):\n foreground_scores = 1 - scores[:, self.background_class_idx]\n indices = self.topk(foreground_scores)\n return self.gather(input_tensor, indices)\n" ]
[ [ "torch.index_select", "torch.topk" ] ]
joey12300/PaddleDetection
[ "b457850659c43fbd4a26c4fc4b70a3709b9952d4" ]
[ "tools/infer.py" ]
[ "# Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport os, sys\n# add python path of PadleDetection to sys.path\nparent_path = os.path.abspath(os.path.join(__file__, *(['..'] * 2)))\nif parent_path not in sys.path:\n sys.path.append(parent_path)\n\nimport glob\nimport numpy as np\nimport six\nfrom PIL import Image\n\nfrom paddle import fluid\n\nfrom ppdet.core.workspace import load_config, merge_config, create\n\nfrom ppdet.utils.eval_utils import parse_fetches\nfrom ppdet.utils.cli import ArgsParser\nfrom ppdet.utils.check import check_gpu, check_version, check_config\nfrom ppdet.utils.visualizer import visualize_results\nimport ppdet.utils.checkpoint as checkpoint\n\nfrom ppdet.data.reader import create_reader\n\nimport logging\nFORMAT = '%(asctime)s-%(levelname)s: %(message)s'\nlogging.basicConfig(level=logging.INFO, format=FORMAT)\nlogger = logging.getLogger(__name__)\n\n\ndef get_save_image_name(output_dir, image_path):\n \"\"\"\n Get save image name from source image path.\n \"\"\"\n if not os.path.exists(output_dir):\n os.makedirs(output_dir)\n image_name = os.path.split(image_path)[-1]\n name, ext = os.path.splitext(image_name)\n return os.path.join(output_dir, \"{}\".format(name)) + ext\n\n\ndef get_test_images(infer_dir, infer_img):\n \"\"\"\n Get image path list in TEST mode\n \"\"\"\n assert infer_img is not None or infer_dir is not None, \\\n \"--infer_img or --infer_dir should be set\"\n assert infer_img is None or os.path.isfile(infer_img), \\\n \"{} is not a file\".format(infer_img)\n assert infer_dir is None or os.path.isdir(infer_dir), \\\n \"{} is not a directory\".format(infer_dir)\n\n # infer_img has a higher priority\n if infer_img and os.path.isfile(infer_img):\n return [infer_img]\n\n images = set()\n infer_dir = os.path.abspath(infer_dir)\n assert os.path.isdir(infer_dir), \\\n \"infer_dir {} is not a directory\".format(infer_dir)\n exts = ['jpg', 'jpeg', 'png', 'bmp']\n exts += [ext.upper() for ext in exts]\n for ext in exts:\n images.update(glob.glob('{}/*.{}'.format(infer_dir, ext)))\n images = list(images)\n\n assert len(images) > 0, \"no image found in {}\".format(infer_dir)\n logger.info(\"Found {} inference images in total.\".format(len(images)))\n\n return images\n\n\ndef main():\n cfg = load_config(FLAGS.config)\n\n merge_config(FLAGS.opt)\n check_config(cfg)\n # check if set use_gpu=True in paddlepaddle cpu version\n check_gpu(cfg.use_gpu)\n # check if paddlepaddle version is satisfied\n check_version()\n\n main_arch = cfg.architecture\n\n dataset = cfg.TestReader['dataset']\n\n test_images = get_test_images(FLAGS.infer_dir, FLAGS.infer_img)\n dataset.set_images(test_images)\n\n place = fluid.CUDAPlace(0) if cfg.use_gpu else fluid.CPUPlace()\n exe = fluid.Executor(place)\n\n model = create(main_arch)\n\n startup_prog = fluid.Program()\n infer_prog = fluid.Program()\n with fluid.program_guard(infer_prog, startup_prog):\n with fluid.unique_name.guard():\n inputs_def = cfg['TestReader']['inputs_def']\n inputs_def['iterable'] = True\n feed_vars, loader = model.build_inputs(**inputs_def)\n test_fetches = model.test(feed_vars)\n infer_prog = infer_prog.clone(True)\n\n reader = create_reader(cfg.TestReader, devices_num=1)\n loader.set_sample_list_generator(reader, place)\n\n exe.run(startup_prog)\n if cfg.weights:\n checkpoint.load_params(exe, infer_prog, cfg.weights)\n\n # parse infer fetches\n assert cfg.metric in ['COCO', 'VOC', 'OID', 'WIDERFACE'], \\\n \"unknown metric type {}\".format(cfg.metric)\n extra_keys = []\n if cfg['metric'] in ['COCO', 'OID']:\n extra_keys = ['im_info', 'im_id', 'im_shape']\n if cfg['metric'] == 'VOC' or cfg['metric'] == 'WIDERFACE':\n extra_keys = ['im_id', 'im_shape']\n keys, values, _ = parse_fetches(test_fetches, infer_prog, extra_keys)\n\n # parse dataset category\n if cfg.metric == 'COCO':\n from ppdet.utils.coco_eval import bbox2out, mask2out, get_category_info\n if cfg.metric == 'OID':\n from ppdet.utils.oid_eval import bbox2out, get_category_info\n if cfg.metric == \"VOC\":\n from ppdet.utils.voc_eval import bbox2out, get_category_info\n if cfg.metric == \"WIDERFACE\":\n from ppdet.utils.widerface_eval_utils import bbox2out, lmk2out, get_category_info\n\n anno_file = dataset.get_anno()\n with_background = dataset.with_background\n use_default_label = dataset.use_default_label\n\n clsid2catid, catid2name = get_category_info(anno_file, with_background,\n use_default_label)\n\n # whether output bbox is normalized in model output layer\n is_bbox_normalized = False\n if hasattr(model, 'is_bbox_normalized') and \\\n callable(model.is_bbox_normalized):\n is_bbox_normalized = model.is_bbox_normalized()\n\n # use VisualDL to log image\n if FLAGS.use_vdl:\n assert six.PY3, \"VisualDL requires Python >= 3.5\"\n from visualdl import LogWriter\n vdl_writer = LogWriter(FLAGS.vdl_log_dir)\n vdl_image_step = 0\n vdl_image_frame = 0 # each frame can display ten pictures at most.\n\n imid2path = dataset.get_imid2path()\n for iter_id, data in enumerate(loader()):\n outs = exe.run(infer_prog,\n feed=data,\n fetch_list=values,\n return_numpy=False)\n res = {\n k: (np.array(v), v.recursive_sequence_lengths())\n for k, v in zip(keys, outs)\n }\n logger.info('Infer iter {}'.format(iter_id))\n if 'TTFNet' in cfg.architecture:\n res['bbox'][1].append([len(res['bbox'][0])])\n\n bbox_results = None\n mask_results = None\n lmk_results = None\n if 'bbox' in res:\n bbox_results = bbox2out([res], clsid2catid, is_bbox_normalized)\n if 'mask' in res:\n mask_results = mask2out([res], clsid2catid,\n model.mask_head.resolution)\n if 'landmark' in res:\n lmk_results = lmk2out([res], is_bbox_normalized)\n\n # visualize result\n im_ids = res['im_id'][0]\n for im_id in im_ids:\n image_path = imid2path[int(im_id)]\n image = Image.open(image_path).convert('RGB')\n\n # use VisualDL to log original image\n if FLAGS.use_vdl:\n original_image_np = np.array(image)\n vdl_writer.add_image(\n \"original/frame_{}\".format(vdl_image_frame),\n original_image_np, vdl_image_step)\n\n image = visualize_results(image,\n int(im_id), catid2name,\n FLAGS.draw_threshold, bbox_results,\n mask_results, lmk_results)\n\n # use VisualDL to log image with bbox\n if FLAGS.use_vdl:\n infer_image_np = np.array(image)\n vdl_writer.add_image(\"bbox/frame_{}\".format(vdl_image_frame),\n infer_image_np, vdl_image_step)\n vdl_image_step += 1\n if vdl_image_step % 10 == 0:\n vdl_image_step = 0\n vdl_image_frame += 1\n\n save_name = get_save_image_name(FLAGS.output_dir, image_path)\n logger.info(\"Detection bbox results save in {}\".format(save_name))\n image.save(save_name, quality=95)\n\n\nif __name__ == '__main__':\n parser = ArgsParser()\n parser.add_argument(\n \"--infer_dir\",\n type=str,\n default=None,\n help=\"Directory for images to perform inference on.\")\n parser.add_argument(\n \"--infer_img\",\n type=str,\n default=None,\n help=\"Image path, has higher priority over --infer_dir\")\n parser.add_argument(\n \"--output_dir\",\n type=str,\n default=\"output\",\n help=\"Directory for storing the output visualization files.\")\n parser.add_argument(\n \"--draw_threshold\",\n type=float,\n default=0.5,\n help=\"Threshold to reserve the result for visualization.\")\n parser.add_argument(\n \"--use_vdl\",\n type=bool,\n default=False,\n help=\"whether to record the data to VisualDL.\")\n parser.add_argument(\n '--vdl_log_dir',\n type=str,\n default=\"vdl_log_dir/image\",\n help='VisualDL logging directory for image.')\n FLAGS = parser.parse_args()\n main()\n" ]
[ [ "numpy.array" ] ]
alexandrovteam/sm-networks
[ "d549ea73a388a342f95c68e99f16eae7ea62c43f" ]
[ "table_server.py" ]
[ "import pyarrow.parquet\nimport pandas as pd\nimport numpy as np\nimport scipy.spatial.distance as ssd\nimport bottle\n\nfrom tempfile import mkdtemp\nimport os.path\nfrom glob import glob\nfrom zipfile import ZipFile\nimport json\n\nclass NetworkGenerator:\n def __init__(self, config):\n self.config = config\n\n def _load_data(self):\n df = pyarrow.parquet.read_table(self.config['annotations']).to_pandas()\n \n # some datasets are present with multiple ds_ids; we take only the largest here\n ds_ids = df[['ds_id', 'ds_name']].groupby('ds_name')\\\n .apply(lambda df: max(df['ds_id']))\n self.annot = df[df['ds_id'].isin(ds_ids.values)].copy()\n self.name_to_id = {x[0]: x[1] for x in ds_ids.iteritems()}\n \n self.annot['msm'] = self.annot['msm'].astype(np.float32)\n self.annot['fdr'] = self.annot['fdr'].astype(np.float32)\n self.datasets = self.annot['ds_name'].unique()\n self.metadata = pd.read_csv(self.config['datasets'])\n self.metadata.rename(columns={'id': 'ds_id', 'name': 'ID'}, inplace=True)\n self.metadata = self.metadata.set_index('ds_id')\n\n def annotations(self, datasets):\n return self.annot[self.annot['ds_name'].isin(datasets)]\n\n def pass_fdr_table(self, annot, max_fdr):\n passes = annot[annot['fdr'] <= max_fdr].copy()\n passes['pass_fdr'] = True\n pass_table = passes.pivot_table('pass_fdr', aggfunc='max', index=['sf'],\n columns=['ds_name'], fill_value=False)\n return pass_table\n\n def _block(self, pass_fdr_table, j, block_size):\n df_block = pass_fdr_table[j:j + block_size]\n array_block = df_block.as_matrix().astype(np.float)\n dataset_counts = array_block.sum(axis=1)\n return array_block, df_block.index, dataset_counts\n\n def _loopify(self, edges, id1_name, id2_name):\n loops = edges[edges[id1_name] == edges[id2_name]]\n edges = edges[edges[id1_name] < edges[id2_name]]\n loops = loops[~loops[id1_name].isin(edges[id1_name])]\n loops = loops[~loops[id2_name].isin(edges[id2_name])]\n return edges.append(loops)\n\n def annotation_network(self, datasets, max_fdr, cutoff):\n self._load_data()\n annotations = self.annotations(datasets)\n full_pass_table = self.pass_fdr_table(annotations, max_fdr)\n pass_fdr_table = full_pass_table[full_pass_table.sum(axis=1) >= 2]\n edges = []\n\n distance_name = 'relative co-occurence'\n n_sf = pass_fdr_table.shape[0]\n block_size = 1000\n blocks = range(0, n_sf, block_size)\n for i, j in enumerate(blocks):\n mj, idx_j, nj = self._block(pass_fdr_table, j, block_size)\n for k in blocks[i:]:\n mk, idx_k, nk = self._block(pass_fdr_table, k, block_size)\n intersection = mj.dot(mk.T)\n union = np.add.outer(nj, nk) - intersection\n ratio = intersection / union\n ratio = pd.DataFrame(ratio, index=idx_j, columns=idx_k)\n ratio.index.rename('sf1', inplace=True)\n d = pd.melt(ratio.reset_index(), id_vars=['sf1'],\n var_name='sf2', value_name=distance_name)\n d = d[d[distance_name] >= cutoff]\n edges.append(d[d['sf1'] <= d['sf2']])\n\n edges = pd.concat(edges)\n edges = self._loopify(edges, 'sf1', 'sf2').sort_values(by=['sf1', 'sf2'])\n edges['comments'] = ''\n\n nodes = full_pass_table.copy().astype(int)\n nodes['# of datasets'] = nodes.sum(axis=1)\n nodes.sort_values(by='# of datasets', ascending=False, inplace=True)\n compound_info = annotations[['sf', 'comp_names', 'comp_ids']]\n nodes = nodes.join(compound_info.drop_duplicates().set_index('sf'))\n nodes = nodes.reset_index()\n nodes['comments'] = ''\n return {'nodes': nodes, 'edges': edges}\n\n def _cosine_similarities(self, msm_table):\n dist = ssd.squareform(ssd.pdist(msm_table.T.as_matrix(), 'cosine'))\n return pd.DataFrame(data=1.0 - dist, \n index=msm_table.columns, columns=msm_table.columns)\n\n def _pairwise_df(self, msm_table):\n cosine_sim = self._cosine_similarities(msm_table)\n df = pd.melt(cosine_sim.reset_index(), id_vars=['ds_id'], var_name='ID2', \n value_name='cosine_similarity')\\\n .rename(columns={'ds_id': 'ID1'})\n return df\n\n def dataset_network(self, datasets, threshold1, threshold2):\n self._load_data()\n annot = self.annotations(datasets)\n annotated_sf = annot[annot['fdr'] <= 0.1]['sf'].unique()\n annot = annot[annot['sf'].isin(annotated_sf)]\n\n fdr_table = (annot.pivot_table('fdr', index=['sf', 'adduct'],\n columns=['ds_id'], fill_value=1.0) * 100)\\\n .astype(np.uint8)\n msm_table = annot.pivot_table('msm', index=['sf', 'adduct'],\n columns=['ds_id'], fill_value=0.0)\n avg_msm = msm_table.sum(axis=1) / len(msm_table.columns)\n sorted_avg_msm = avg_msm.sort_values(ascending=False)\n cutoff = -1\n n_top = 1000\n if len(sorted_avg_msm) > n_top:\n cutoff = sorted_avg_msm[n_top]\n cosine_sim_full = self._pairwise_df(msm_table[avg_msm > cutoff])\n msm_table[fdr_table > 20] = 0.0\n cosine_sim_02 = self._pairwise_df(msm_table[avg_msm > cutoff])\n msm_table[fdr_table > 10] = 0.0\n cosine_sim_01 = self._pairwise_df(msm_table[avg_msm > cutoff])\n\n edges = cosine_sim_full.copy()\n edges['cosine_similarity_fdr0.1'] = cosine_sim_01['cosine_similarity']\n edges['cosine_similarity_fdr0.2'] = cosine_sim_02['cosine_similarity']\n edges = edges[(edges['cosine_similarity_fdr0.1'] >= threshold1) &\n (edges['cosine_similarity_fdr0.2'] >= threshold2)]\n edges['ID1'] = pd.merge(edges[['ID1']], self.metadata[['ID']], \n left_on='ID1', right_index=True)['ID']\n edges['ID2'] = pd.merge(edges[['ID2']], self.metadata[['ID']], \n left_on='ID2', right_index=True)['ID']\n\n edges = edges.fillna(0)\n edges = self._loopify(edges, 'ID1', 'ID2')\n\n nodes = fdr_table.groupby(level='sf').agg('min').T\\\n .reindex(self.metadata.index) / 100.0\n nodes.index.rename('ds_id', inplace=True)\n nodes['# of annotations @ FDR = 0.1'] = (nodes <= 0.1).sum(axis=1)\n nodes['# of annotations @ FDR = 0.2'] = (nodes <= 0.2).sum(axis=1)\n ds_ids = [self.name_to_id[name] for name in datasets]\n nodes = self.metadata[self.metadata.index.isin(ds_ids)].join(nodes)\n nodes = nodes.reset_index()\n del nodes['ds_id']\n\n return {'nodes': nodes, 'edges': edges}\n\n def generate_networks(self, query):\n tmpdir = mkdtemp()\n def F(fn):\n return os.path.join(tmpdir, fn)\n\n datasets = self.dataset_network(query['datasets'], \n query['thresholdD01'], query['thresholdD02'])\n datasets['nodes'].to_csv(F('Dnodes.csv'), index=False)\n datasets['edges'].sort_values(by=['ID1', 'ID2'])\\\n .to_csv(F('Dedges.csv'), index=False, float_format='%.4f')\n\n annot_01 = self.annotation_network(query['datasets'], 0.1, query['thresholdA'])\n annot_01['nodes'].to_csv(F(\"Anodes01.csv\"), index=False)\n annot_01['edges'].to_csv(F(\"Aedges01.csv\"), index=False)\n\n annot_02 = self.annotation_network(query['datasets'], 0.2, query['thresholdA'])\n annot_02['nodes'].to_csv(F(\"Anodes02.csv\"), index=False)\n annot_02['edges'].to_csv(F(\"Aedges02.csv\"), index=False)\n\n with open(F(\"settings.json\"), \"w+\") as j:\n json.dump(query, j, indent=4, sort_keys=True)\n\n with ZipFile(F('networks.zip'), 'w') as z:\n for fn in list(glob(tmpdir + \"/*.csv\")) + [F('settings.json')]:\n z.write(fn, os.path.basename(fn))\n os.unlink(fn)\n return tmpdir, 'networks.zip'\n\n# EDIT this to point to the correct files!\nconfig = {\n 'annotations': '/home/ec2-user/Dropbox/networks/annotations.parquet',\n 'datasets': '/home/ec2-user/Dropbox/networks/datasets.csv',\n}\n\ngen = NetworkGenerator(config)\n\[email protected](\"/\")\ndef index():\n return bottle.static_file(\"index.html\", \"templates\")\n\[email protected](\"/datasets\")\ndef datasets():\n gen._load_data()\n return bottle.template(\"templates/datasets.html\", \n names=sorted(gen.name_to_id.keys()))\n\[email protected](\"/network\")\ndef network():\n print(list(bottle.request.forms))\n query = {\n 'thresholdD01': float(bottle.request.forms.get('thresholdD01')),\n 'thresholdD02': float(bottle.request.forms.get('thresholdD02')),\n 'thresholdA': float(bottle.request.forms.get('thresholdA')),\n 'datasets': [s.strip() for s in bottle.request.forms.get('datasets').split(\"\\n\")]\n }\n print(query)\n tmpdir, fn = gen.generate_networks(query)\n return bottle.static_file(fn, root=tmpdir)\n\nif __name__ == \"__main__\":\n bottle.run(host='0.0.0.0', port=5000, debug=True)\n\n" ]
[ [ "pandas.merge", "pandas.DataFrame", "pandas.concat", "pandas.read_csv", "numpy.add.outer" ] ]
nemanja-m/key-value-memory-network
[ "03b42fd9e7f8d90e2fbd3ea0f8be91f43ecd7e4a" ]
[ "lucy/train.py" ]
[ "import argparse\nimport os\nimport time\nfrom collections import namedtuple\n\nimport numpy as np\nimport torch\nfrom torch import optim\nfrom torch.nn import CosineEmbeddingLoss, CosineSimilarity\nfrom tqdm import tqdm\nfrom visdom import Visdom\n\nfrom .constants import MODELS_DIR\nfrom .dataset import Dataset\nfrom .kvmemnn import KeyValueMemoryNet\nfrom .memory import KeyValueMemory\nfrom .verbosity import verbose, set_verbosity\n\n\nEPOCHS = 10\nBATCH_SIZE = 64\nEMBEDDING_DIM = 128\nLEARNING_RATE = 2.5e-3\n\nHistory = namedtuple('History', ['losses', 'hits'])\n\n\nclass Trainer:\n \"\"\"Utility class for PyTorch model training and visualization.\n\n Manages training process, evaluation metrics and handles visualization with\n visdom.\n\n Attributes:\n device (torch.device): Device for model training.\n batch_size (int): Size of batch.\n learning_rate (float): Learning rate for optimizer.\n embedding_dim (int): Dimension of embedding layer.\n model (KeyValueMemoryNet): PyTorch key-value memory network model instance.\n \"\"\"\n\n def __init__(self, device, batch_size, learning_rate, embedding_dim):\n self.batch_size = batch_size\n self.learning_rate = learning_rate\n self.embedding_dim = embedding_dim\n\n self._device = device\n self._data = Dataset(batch_size=BATCH_SIZE)\n self._memory = KeyValueMemory(self._data)\n\n self.model = KeyValueMemoryNet(embedding_dim=embedding_dim,\n vocab_size=len(self._data.vocab)).to(device=device)\n\n self._loss_criterion = CosineEmbeddingLoss(margin=0.1,\n size_average=False).to(device=device)\n\n self._cosine_similarity = CosineSimilarity(dim=2)\n self._optimizer = optim.Adam(self.model.parameters(), lr=learning_rate)\n self._hits = (1, 5, 10)\n self._init_visdom()\n\n @verbose\n def train(self, epochs):\n \"\"\"Starts model training and visualization.\n\n Args:\n epochs (int): Number of epochs for training.\n \"\"\"\n print('Starting training')\n print(' - Epochs {}'.format(epochs))\n print(' - Batches: {}'.format(len(self._data.train_iter)))\n print(' - Batch size: {}'.format(self.batch_size))\n print(' - Learning rate: {}'.format(self.learning_rate))\n print(' - Embedding dim: {}\\n'.format(self.embedding_dim))\n\n self._init_history(epochs)\n\n for epoch in range(epochs):\n self.model.train()\n\n with tqdm(self._data.iterator,\n unit=' batches',\n desc='Epoch {:3}/{}'.format(epoch + 1, epochs)) as pb:\n\n for batch in pb:\n self._optimizer.zero_grad()\n\n x, y = self._forward(query_batch=batch.query,\n response_batch=batch.response)\n\n targets = self._make_targets(shape=x.shape[:2])\n loss = self._compute_loss(x, y, targets)\n loss.backward()\n\n self._optimizer.step()\n self.history.losses[epoch].append(loss.item())\n\n mean_loss = np.mean(self.history.losses[epoch])\n pb.set_postfix_str('Loss: {:.3f}'.format(mean_loss))\n\n self._validate(epoch)\n self._update_visdom(epoch)\n\n def _validate(self, epoch):\n self.model.eval()\n\n with torch.no_grad():\n hits = []\n for batch in self._data.validation_iter:\n x, y = self._forward(query_batch=batch.query,\n response_batch=batch.response)\n\n predictions = self._cosine_similarity(x, y)\n _, indices = predictions.sort(descending=True)\n hits.append([self._hits_at_n(indices, n) for n in self._hits])\n\n mean_hits = np.array(hits).mean(axis=0)\n self.history.hits[epoch] = mean_hits\n\n def _hits_at_n(self, response_indices, n):\n return response_indices[:, :n].eq(0).sum().item() / len(response_indices)\n\n def _forward(self, query_batch, response_batch, train=True):\n keys, values, candidates = self._memory.batch_address(query_batch, train=train)\n\n return self.model(query_batch.to(device=self._device),\n response_batch.to(device=self._device),\n keys.to(device=self._device),\n values.to(device=self._device),\n candidates.to(device=self._device))\n\n def _make_targets(self, shape):\n targets = -torch.ones(shape, device=self._device)\n targets[:, 0] = 1 # First candidate response is correct one\n return targets\n\n def _compute_loss(self, x, y, targets):\n # CosineEmbeddingLoss doesn't support 3-d tensors so we must create\n # custom loss where we add individual losses accross batch dimension\n cosine_embedding_losses = torch.stack([\n self._loss_criterion(x[i, :, :], y[i, :, :], targets[i, :])\n for i in range(len(x))\n ])\n return torch.sum(cosine_embedding_losses) / len(x)\n\n def save_model(self, path):\n print(\"\\nSaving model to '{}'\\n\".format(path))\n torch.save(self.model.state_dict(), path)\n\n def _init_history(self, epochs):\n # Save loss history for each epoch\n self.history = History(losses=[[]] * epochs,\n hits=[None] * epochs)\n\n @verbose\n def _init_visdom(self):\n self.visdom = Visdom()\n\n # Wait for connection\n startup_time = 1\n step = 0.1\n while not self.visdom.check_connection() and startup_time > 0:\n time.sleep(step)\n startup_time -= step\n\n if not self.visdom.check_connection():\n self._visdom_detected = False\n\n print(\"Can't connect to visdom server.\")\n print(\"Start it with 'python -m visdom.server'\\n\")\n return\n\n self._visdom_detected = True\n\n def plot_options(title, ylabel, **kwargs):\n meta = 'lr: {} batch: {} emb: {}'.format(LEARNING_RATE,\n BATCH_SIZE,\n EMBEDDING_DIM)\n\n return dict(kwargs,\n width=380,\n height=360,\n title='{}\\t{}'.format(title, meta),\n xlabel='Iteration',\n ylabel=ylabel)\n\n loss_options = plot_options(title='Train Loss', ylabel='Loss')\n self.loss_window = self.visdom.line(Y=np.array([1]),\n X=np.array([0]),\n opts=loss_options)\n\n hits_legend = ['hits@' + hit for hit in self._hits]\n hits_options = plot_options(title='hits@n',\n ylabel='%',\n showlegend=True,\n legend=hits_legend)\n\n self.hits_window = self.visdom.line(Y=np.zeros((1, 3)),\n X=np.zeros((1, 3)),\n opts=hits_options)\n\n def _update_visdom(self, epoch):\n if not self._visdom_detected:\n return\n\n mean_loss = np.mean(self.history.losses[epoch])\n self.visdom.line(Y=np.array([mean_loss]),\n X=np.array([epoch]),\n win=self.loss_window,\n update='append')\n\n hits = self.history.hits[epoch]\n self.visdom.line(Y=np.array([hits]),\n X=np.ones((1, len(hits))) * epoch,\n win=self.hits_window,\n update='append')\n\n\ndef parse_args():\n parser = argparse.ArgumentParser(\n description='Train Key-Value Memory Network on query-response dataset')\n parser.add_argument('--cpu',\n action='store_true',\n help='Disable CUDA training and train on CPU')\n\n parser.add_argument('-b', '--batch',\n type=int,\n default=BATCH_SIZE,\n help='Training batch size')\n\n parser.add_argument('-d', '--embedding_dim',\n type=int,\n default=EMBEDDING_DIM,\n help='Embedding dimension')\n\n parser.add_argument('-e', '--epochs',\n type=int,\n default=EPOCHS,\n help='Number of training epochs')\n\n parser.add_argument('-lr', '--learning_rate',\n type=int,\n default=LEARNING_RATE,\n help='Learning rate')\n\n parser.add_argument('-w', '--weights_path',\n type=str,\n default=os.path.join(MODELS_DIR, 'lucy'),\n help='Path where trained model will be saved')\n\n parser.add_argument('-s', '--silent',\n action='store_true',\n help='Turn off verbose output')\n\n return parser.parse_args()\n\n\n@verbose\ndef get_device(use_cpu):\n if torch.cuda.is_available() and not use_cpu:\n device = torch.device('cuda')\n print('\\nUsing CUDA for training')\n print(\"Pass '--cpu' argument to disable CUDA and train on CPU\")\n else:\n device = torch.device('cpu')\n print('\\nUsing CPU for training')\n return device\n\n\ndef main():\n args = parse_args()\n set_verbosity(verbose=not args.silent)\n\n device = get_device(use_cpu=args.cpu)\n\n trainer = Trainer(device,\n batch_size=args.batch,\n learning_rate=args.learning_rate,\n embedding_dim=args.embedding_dim)\n\n trainer.train(epochs=args.epochs)\n trainer.save_model(path=args.weights_path)\n\n\nif __name__ == '__main__':\n main()\n" ]
[ [ "torch.device", "numpy.array", "numpy.zeros", "torch.no_grad", "numpy.mean", "torch.ones", "torch.cuda.is_available", "torch.nn.CosineSimilarity", "torch.nn.CosineEmbeddingLoss", "torch.sum" ] ]
erwanp/exojax
[ "79a148a1f30fda357a2586255569243815df05e7" ]
[ "examples/LUH16A/FidEMbu_modit/fit.py" ]
[ "import numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt\nimport seaborn as sns\nimport tqdm\n\nimport jax.numpy as jnp\nfrom jax import random\nfrom jax import vmap, jit\n\nfrom exojax.spec import rtransfer as rt\nfrom exojax.spec import planck, moldb, contdb, response, molinfo\nfrom exojax.spec.lpf import xsmatrix\nfrom exojax.spec.exomol import gamma_exomol\nfrom exojax.spec.hitran import SijT, doppler_sigma, gamma_natural, gamma_hitran\nfrom exojax.spec.hitrancia import read_cia, logacia \nfrom exojax.spec.rtransfer import rtrun, dtauM, dtauCIA, nugrid\nfrom exojax.plot.atmplot import plottau, plotcf, plot_maxpoint\nfrom exojax.utils.afunc import getjov_logg\nfrom exojax.utils.constants import RJ, pc, Rs, c\nfrom exojax.spec.evalline import mask_weakline\n\nfrom exojax.spec import dit, modit\n\n#reference pressure for a T-P model\nPref=1.0 #bar\n\n#FLUX reference\nFabs_REF2=2.7e-12 #absolute flux (i.e. flux@10pc) erg/s/cm2/um Burgasser+ 1303.7283 @2.29um\nfac0=RJ**2/((10.0*pc)**2) #nomralize by RJ\nFref=(2.29**2)*Fabs_REF2/fac0/1.e4 #erg/cm2/s/cm-1 @ 2.3um\n\n#loading spectrum\ndat=pd.read_csv(\"../data/luhman16a_spectra_detector1.csv\",delimiter=\",\")\nwavd=(dat[\"wavelength_micron\"].values)*1.e4 #AA\nnusd=1.e8/wavd[::-1]\nfobs=(dat[\"normalized_flux\"].values)[::-1]\nerr=(dat[\"err_normalized_flux\"].values)[::-1]\n\n#ATMOSPHERE\nNP=100\nParr, dParr, k=rt.pressure_layer(NP=NP)\nmmw=2.33 #mean molecular weight\nR=100000.\nbeta=c/(2.0*np.sqrt(2.0*np.log(2.0))*R) #IP sigma need check\nONEARR=np.ones_like(Parr) #ones_array for MMR\nmolmassCO=molinfo.molmass(\"CO\") #molecular mass (CO)\nmolmassH2O=molinfo.molmass(\"H2O\") #molecular mass (H2O)\n\n#LOADING CIA\nmmrH2=0.74\nmmrHe=0.25\nmolmassH2=molinfo.molmass(\"H2\")\nmolmassHe=molinfo.molmass(\"He\")\nvmrH2=(mmrH2*mmw/molmassH2)\nvmrHe=(mmrHe*mmw/molmassHe)\n\n#LINES\ng=10**(5.0)\nT0c=1700.0\nTarr = T0c*np.ones_like(Parr) \nmaxMMR_CO=0.01\nmaxMMR_H2O=0.005\n\n\n###########################################################\n#Loading Molecular datanase and Reducing Molecular Lines\n###########################################################\nNx=9000\nws=22876.0\nwe=23010.0\nmask=(ws<wavd[::-1])*(wavd[::-1]<we)\n#additional mask to remove a strong telluric\nmask=mask*((22898.5>wavd[::-1])+(wavd[::-1]>22899.5)) \nfobsx=fobs[mask]\nnusdx=nusd[mask]\nwavdx=1.e8/nusdx[::-1]\nerrx=err[mask]\n\nprint(\"data masked\",len(nusd),\"->\",len(nusdx))\n\nnus,wav,res=nugrid(ws-5.0,we+5.0,Nx,unit=\"AA\",xsmode=\"modit\")\n#loading molecular database \nmdbCO=moldb.MdbExomol('.database/CO/12C-16O/Li2015',nus) \nmdbH2O=moldb.MdbExomol('.database/H2O/1H2-16O/POKAZATEL',nus,crit=1.e-46) \n#LOADING CIA\ncdbH2H2=contdb.CdbCIA('.database/H2-H2_2011.cia',nus)\ncdbH2He=contdb.CdbCIA('.database/H2-He_2011.cia',nus)\n\n### MODIT settings\nfrom exojax.spec import initspec\nfrom exojax.spec.modit import minmax_dgmatrix\n\ncnu_CO, indexnu_CO, R_CO, pmarray_CO=initspec.init_modit(mdbCO.nu_lines,nus)\ncnu_H2O, indexnu_H2O, R_H2O, pmarray_H2O=initspec.init_modit(mdbH2O.nu_lines,nus)\n\n# Precomputing gdm_ngammaL \nfrom exojax.spec.modit import setdgm_exomol\nfrom jax import jit, vmap\n\nfT = lambda T0,alpha: T0[:,None]*(Parr[None,:]/Pref)**alpha[:,None]\nT0_test=np.array([1000.0,1700.0,1000.0,1700.0])\nalpha_test=np.array([0.15,0.15,0.05,0.05])\nres=0.2\ndgm_ngammaL_CO=setdgm_exomol(mdbCO,fT,Parr,R_CO,molmassCO,res,T0_test,alpha_test)\ndgm_ngammaL_H2O=setdgm_exomol(mdbH2O,fT,Parr,R_H2O,molmassH2O,res,T0_test,alpha_test)\n\n#######################################################\n#HMC-NUTS FITTING PART\n#######################################################\nimport numpyro.distributions as dist\nimport numpyro\nfrom numpyro.infer import MCMC, NUTS\nfrom numpyro.infer import Predictive\nfrom numpyro.diagnostics import hpdi\nfrom exojax.spec.modit import exomol,xsmatrix\n\nbaseline=1.07 #(baseline for a CIA photosphere in the observed (normaized) spectrum)\n# Model\ndef model_c(nu1,y1,e1):\n Rp = numpyro.sample('Rp', dist.Uniform(0.5,1.5))\n Mp = numpyro.sample('Mp', dist.Normal(33.5,0.3))\n sigma = numpyro.sample('sigma', dist.Exponential(10.0))\n RV = numpyro.sample('RV', dist.Uniform(26.0,30.0))\n MMR_CO = numpyro.sample('MMR_CO', dist.Uniform(0.0,maxMMR_CO))\n MMR_H2O = numpyro.sample('MMR_H2O', dist.Uniform(0.0,maxMMR_H2O))\n T0 = numpyro.sample('T0', dist.Uniform(1000.0,1700.0))\n alpha = numpyro.sample('alpha', dist.Uniform(0.05,0.15))\n vsini = numpyro.sample('vsini', dist.Uniform(10.0,20.0))\n\n #Kipping Limb Darkening Prior arxiv:1308.0009 \n q1 = numpyro.sample('q1', dist.Uniform(0.0,1.0))\n q2 = numpyro.sample('q2', dist.Uniform(0.0,1.0))\n sqrtq1=jnp.sqrt(q1)\n u1=2.0*sqrtq1*q2\n u2=sqrtq1*(1.0-2.0*q2)\n\n g=2478.57730044555*Mp/Rp**2 #gravity\n \n #T-P model//\n Tarr = T0*(Parr/Pref)**alpha \n \n #line computation CO\n qt_CO=vmap(mdbCO.qr_interp)(Tarr)\n qt_H2O=vmap(mdbH2O.qr_interp)(Tarr)\n \n def obyo(y,tag,nusdx,nus,mdbCO,mdbH2O,cdbH2H2,cdbH2He):\n #CO\n SijM_CO,ngammaLM_CO,nsigmaDl_CO=exomol(mdbCO,Tarr,Parr,R_CO,molmassCO)\n xsm_CO=xsmatrix(cnu_CO,indexnu_CO,R_CO,pmarray_CO,nsigmaDl_CO,ngammaLM_CO,SijM_CO,nus,dgm_ngammaL_CO)\n dtaumCO=dtauM(dParr,jnp.abs(xsm_CO),MMR_CO*ONEARR,molmassCO,g)\n \n #H2O\n SijM_H2O,ngammaLM_H2O,nsigmaDl_H2O=exomol(mdbH2O,Tarr,Parr,R_H2O,molmassH2O)\n xsm_H2O=xsmatrix(cnu_H2O,indexnu_H2O,R_H2O,pmarray_H2O,nsigmaDl_H2O,ngammaLM_H2O,SijM_H2O,nus,dgm_ngammaL_H2O)\n dtaumH2O=dtauM(dParr,jnp.abs(xsm_H2O),MMR_H2O*ONEARR,molmassH2O,g)\n\n #CIA\n dtaucH2H2=dtauCIA(nus,Tarr,Parr,dParr,vmrH2,vmrH2,\\\n mmw,g,cdbH2H2.nucia,cdbH2H2.tcia,cdbH2H2.logac)\n dtaucH2He=dtauCIA(nus,Tarr,Parr,dParr,vmrH2,vmrHe,\\\n mmw,g,cdbH2He.nucia,cdbH2He.tcia,cdbH2He.logac)\n \n dtau=dtaumCO+dtaumH2O+dtaucH2H2+dtaucH2He \n sourcef = planck.piBarr(Tarr,nus)\n\n Ftoa=Fref/Rp**2\n F0=rtrun(dtau,sourcef)/baseline/Ftoa\n \n Frot=response.rigidrot(nus,F0,vsini,u1,u2)\n mu=response.ipgauss_sampling(nusdx,nus,Frot,beta,RV)\n \n errall=jnp.sqrt(e1**2+sigma**2)\n numpyro.sample(tag, dist.Normal(mu, errall), obs=y)\n\n obyo(y1,\"y1\",nusdx,nus,mdbCO,mdbH2O,cdbH2H2,cdbH2He)\n\n\n \n#Running a HMC-NUTS\nrng_key = random.PRNGKey(0)\nrng_key, rng_key_ = random.split(rng_key)\nnum_warmup, num_samples = 500, 1000\nkernel = NUTS(model_c,forward_mode_differentiation=True)\nmcmc = MCMC(kernel, num_warmup=num_warmup, num_samples=num_samples)\nmcmc.run(rng_key_, nu1=nusdx, y1=fobsx, e1=errx)\nprint(\"end HMC\")\n\n#Post-processing\nposterior_sample = mcmc.get_samples()\nnp.savez(\"npz/savepos.npz\",[posterior_sample])\n\npred = Predictive(model_c,posterior_sample,return_sites=[\"y1\"])\nnu = nus\npredictions = pred(rng_key_,nu1=nu,y1=None,e1=errx)\nmedian_mu = jnp.median(predictions[\"y1\"],axis=0)\nhpdi_mu = hpdi(predictions[\"y1\"], 0.9)\nnp.savez(\"npz/saveplotpred.npz\",[wavdx,fobsx,errx,median_mu,hpdi_mu])\n\nred=(1.0+28.07/300000.0) #for annotation\nfig, ax = plt.subplots(nrows=1, ncols=1, figsize=(20,6.0))\nax.plot(wavdx[::-1],median_mu,color=\"C0\")\nax.plot(wavdx[::-1],fobsx,\"+\",color=\"C1\",label=\"data\")\n\n#annotation for some lines\nax.plot([22913.3*red,22913.3*red],[0.6,0.75],color=\"C0\",lw=1)\nax.plot([22918.07*red,22918.07*red],[0.6,0.77],color=\"C1\",lw=1)\nax.plot([22955.67*red,22955.67*red],[0.6,0.68],color=\"C2\",lw=1)\nplt.text(22913.3*red,0.55,\"A\",color=\"C0\",fontsize=12,horizontalalignment=\"center\")\nplt.text(22918.07*red,0.55,\"B\",color=\"C1\",fontsize=12,horizontalalignment=\"center\")\nplt.text(22955.67*red,0.55,\"C\",color=\"C2\",fontsize=12,horizontalalignment=\"center\")\n#\n\nax.fill_between(wavdx[::-1], hpdi_mu[0], hpdi_mu[1], alpha=0.3, interpolate=True,color=\"C0\",\n label=\"90% area\")\nplt.xlabel(\"wavelength ($\\AA$)\",fontsize=16)\nplt.legend(fontsize=16)\nplt.tick_params(labelsize=16)\n\nplt.savefig(\"npz/results.pdf\", bbox_inches=\"tight\", pad_inches=0.0)\nplt.savefig(\"npz/results.png\", bbox_inches=\"tight\", pad_inches=0.0)\n\n#ARVIZ part\nimport arviz\nrc = {\n \"plot.max_subplots\": 250,\n}\n\n\narviz.rcParams.update(rc)\npararr=[\"Mp\",\"Rp\",\"T0\",\"alpha\",\"MMR_CO\",\"MMR_H2O\",\"vsini\",\"RV\",\"sigma\",\"q1\",\"q2\"]\narviz.plot_trace(mcmc, var_names=pararr)\nplt.savefig(\"npz/trace.png\")\npararr=[\"Mp\",\"Rp\",\"T0\",\"alpha\",\"MMR_CO\",\"MMR_H2O\",\"vsini\",\"RV\",\"sigma\",\"q1\",\"q2\"]\narviz.plot_pair(arviz.from_numpyro(mcmc),kind='kde',divergences=False,marginals=True)\nplt.savefig(\"npz/cornerall.png\")\n" ]
[ [ "numpy.array", "numpy.ones_like", "matplotlib.pyplot.text", "numpy.log", "matplotlib.pyplot.xlabel", "matplotlib.pyplot.savefig", "matplotlib.pyplot.legend", "matplotlib.pyplot.subplots", "matplotlib.pyplot.tick_params", "numpy.savez", "pandas.read_csv" ] ]
Vignesh-95/seg-net-satellite-imagery
[ "5165b4d7d50e0646b710a9c8d700a4e815042ed8" ]
[ "seg-net-code-256-resolution-images/model.py" ]
[ "import tensorflow as tf\nfrom tensorflow.python.framework import ops\nfrom tensorflow.python.framework import dtypes\n\nimport os, sys\nimport numpy as np\nimport math\nfrom datetime import datetime\nimport time\nfrom PIL import Image\nfrom math import ceil\nfrom tensorflow.python.ops import gen_nn_ops\n# modules\nfrom Utils import _variable_with_weight_decay, _variable_on_cpu, _add_loss_summaries, _activation_summary, print_hist_summery, get_hist, per_class_acc, writeImage\nfrom Inputs import *\n\n# TODO: CHANGE THESE\n# Constants describing the training process.\nMOVING_AVERAGE_DECAY = 0.9999 # The decay to use for the moving average.\nNUM_EPOCHS_PER_DECAY = 350.0 # Epochs after which learning rate decays.\nLEARNING_RATE_DECAY_FACTOR = 0.1 # Learning rate decay factor.\n\nINITIAL_LEARNING_RATE = 0.001 # Initial learning rate.\nEVAL_BATCH_SIZE = 8\nBATCH_SIZE = 8\n# for CamVid\nIMAGE_HEIGHT = 256\nIMAGE_WIDTH = 256\nIMAGE_DEPTH = 3\n\nNUM_CLASSES = 6\nNUM_EXAMPLES_PER_EPOCH_FOR_TRAIN = 24\nNUM_EXAMPLES_PER_EPOCH_FOR_TEST = 24\nNUM_EXAMPLES_PER_EPOCH_FOR_EVAL = 1\nTEST_ITER = NUM_EXAMPLES_PER_EPOCH_FOR_TEST / BATCH_SIZE\n\ndef msra_initializer(kl, dl):\n \"\"\"\n kl for kernel size, dl for filter number\n \"\"\"\n stddev = math.sqrt(2. / (kl**2 * dl))\n return tf.truncated_normal_initializer(stddev=stddev)\n\ndef orthogonal_initializer(scale = 1.1):\n ''' From Lasagne and Keras. Reference: Saxe et al., http://arxiv.org/abs/1312.6120\n '''\n def _initializer(shape, dtype=tf.float32, partition_info=None):\n flat_shape = (shape[0], np.prod(shape[1:]))\n a = np.random.normal(0.0, 1.0, flat_shape)\n u, _, v = np.linalg.svd(a, full_matrices=False)\n # pick the one with the correct shape\n q = u if u.shape == flat_shape else v\n q = q.reshape(shape) #this needs to be corrected to float32\n return tf.constant(scale * q[:shape[0], :shape[1]], dtype=tf.float32)\n return _initializer\n\ndef loss(logits, labels):\n \"\"\"\n loss func without re-weighting\n \"\"\"\n # Calculate the average cross entropy loss across the batch.\n logits = tf.reshape(logits, (-1,NUM_CLASSES))\n labels = tf.reshape(labels, [-1])\n\n cross_entropy = tf.nn.sparse_softmax_cross_entropy_with_logits(\n logits=logits, labels=labels, name='cross_entropy_per_example')\n cross_entropy_mean = tf.reduce_mean(cross_entropy, name='cross_entropy')\n tf.add_to_collection('losses', cross_entropy_mean)\n\n return tf.add_n(tf.get_collection('losses'), name='total_loss')\n\ndef weighted_loss(logits, labels, num_classes, head=None):\n \"\"\" median-frequency re-weighting \"\"\"\n with tf.name_scope('loss'):\n\n logits = tf.reshape(logits, (-1, num_classes))\n\n epsilon = tf.constant(value=1e-10)\n\n logits = logits + epsilon\n\n # consturct one-hot label array\n label_flat = tf.reshape(labels, (-1, 1))\n\n # should be [batch ,num_classes]\n labels = tf.reshape(tf.one_hot(label_flat, depth=num_classes), (-1, num_classes))\n\n softmax = tf.nn.softmax(logits)\n\n cross_entropy = -tf.reduce_sum(tf.multiply(labels * tf.log(softmax + epsilon), head), axis=[1])\n\n cross_entropy_mean = tf.reduce_mean(cross_entropy, name='cross_entropy')\n\n tf.add_to_collection('losses', cross_entropy_mean)\n\n loss = tf.add_n(tf.get_collection('losses'), name='total_loss')\n\n return loss\n\ndef cal_loss(logits, labels):\n loss_weight = np.array([\n 0.2595,\n 0.1826,\n 4.5640,\n 0.1417,\n 0.9051,\n 0.3826]) # class 0~11\n\n labels = tf.cast(labels, tf.int32)\n # return loss(logits, labels)\n return weighted_loss(logits, labels, num_classes=NUM_CLASSES, head=loss_weight)\n\ndef conv_layer_with_bn(inputT, shape, train_phase, activation=True, name=None):\n in_channel = shape[2]\n out_channel = shape[3]\n k_size = shape[0]\n with tf.variable_scope(name) as scope:\n kernel = _variable_with_weight_decay('ort_weights', shape=shape, initializer=orthogonal_initializer(), wd=None)\n conv = tf.nn.conv2d(inputT, kernel, [1, 1, 1, 1], padding='SAME')\n biases = _variable_on_cpu('biases', [out_channel], tf.constant_initializer(0.0))\n bias = tf.nn.bias_add(conv, biases)\n if activation is True:\n conv_out = tf.nn.relu(batch_norm_layer(bias, train_phase, scope.name))\n else:\n conv_out = batch_norm_layer(bias, train_phase, scope.name)\n return conv_out\n\ndef get_deconv_filter(f_shape):\n \"\"\"\n reference: https://github.com/MarvinTeichmann/tensorflow-fcn\n \"\"\"\n width = f_shape[0]\n heigh = f_shape[0]\n f = ceil(width/2.0)\n c = (2 * f - 1 - f % 2) / (2.0 * f)\n bilinear = np.zeros([f_shape[0], f_shape[1]])\n for x in range(width):\n for y in range(heigh):\n value = (1 - abs(x / f - c)) * (1 - abs(y / f - c))\n bilinear[x, y] = value\n weights = np.zeros(f_shape)\n for i in range(f_shape[2]):\n weights[:, :, i, i] = bilinear\n\n init = tf.constant_initializer(value=weights,\n dtype=tf.float32)\n return tf.get_variable(name=\"up_filter\", initializer=init,\n shape=weights.shape)\n\ndef deconv_layer(inputT, f_shape, output_shape, stride=2, name=None):\n # output_shape = [b, w, h, c]\n # sess_temp = tf.InteractiveSession()\n sess_temp = tf.global_variables_initializer()\n strides = [1, stride, stride, 1]\n with tf.variable_scope(name):\n weights = get_deconv_filter(f_shape)\n deconv = tf.nn.conv2d_transpose(inputT, weights, output_shape,\n strides=strides, padding='SAME')\n return deconv\n\ndef batch_norm_layer(inputT, is_training, scope):\n return tf.cond(is_training,\n lambda: tf.contrib.layers.batch_norm(inputT, is_training=True,\n center=False, updates_collections=None, scope=scope+\"_bn\"),\n lambda: tf.contrib.layers.batch_norm(inputT, is_training=False,\n updates_collections=None, center=False, scope=scope+\"_bn\", reuse = True))\n\n\ndef inference(images, labels, batch_size, phase_train):\n # norm1\n norm1 = tf.nn.lrn(images, depth_radius=5, bias=1.0, alpha=0.0001, beta=0.75,\n name='norm1')\n # conv1\n conv1 = conv_layer_with_bn(norm1, [7, 7, images.get_shape().as_list()[3], 64], phase_train, name=\"conv1\")\n # pool1\n pool1, pool1_indices = tf.nn.max_pool_with_argmax(conv1, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1],\n padding='SAME', name='pool1')\n # conv2\n conv2 = conv_layer_with_bn(pool1, [7, 7, 64, 64], phase_train, name=\"conv2\")\n\n # pool2\n pool2, pool2_indices = tf.nn.max_pool_with_argmax(conv2, ksize=[1, 2, 2, 1],\n strides=[1, 2, 2, 1], padding='SAME', name='pool2')\n # conv3\n conv3 = conv_layer_with_bn(pool2, [7, 7, 64, 64], phase_train, name=\"conv3\")\n\n # pool3\n pool3, pool3_indices = tf.nn.max_pool_with_argmax(conv3, ksize=[1, 2, 2, 1],\n strides=[1, 2, 2, 1], padding='SAME', name='pool3')\n # conv4\n conv4 = conv_layer_with_bn(pool3, [7, 7, 64, 64], phase_train, name=\"conv4\")\n\n # pool4\n pool4, pool4_indices = tf.nn.max_pool_with_argmax(conv4, ksize=[1, 2, 2, 1],\n strides=[1, 2, 2, 1], padding='SAME', name='pool4')\n\n \"\"\" End of encoder \"\"\"\n \"\"\" start upsample \"\"\"\n # upsample4\n # Need to change when using different dataset out_w, out_h\n # upsample4 = upsample_with_pool_indices(pool4, pool4_indices, pool4.get_shape(), out_w=45, out_h=60, scale=2, name='upsample4')\n upsample4 = deconv_layer(pool4, [2, 2, 64, 64], [batch_size, int(IMAGE_WIDTH/2/2/2), int(IMAGE_HEIGHT/2/2/2), 64], 2, \"up4\")\n # decode 4\n conv_decode4 = conv_layer_with_bn(upsample4, [7, 7, 64, 64], phase_train, False, name=\"conv_decode4\")\n\n # upsample 3\n # upsample3 = upsample_with_pool_indices(conv_decode4, pool3_indices, conv_decode4.get_shape(), scale=2, name='upsample3')\n upsample3= deconv_layer(conv_decode4, [2, 2, 64, 64], [batch_size, int(IMAGE_WIDTH/2/2), int(IMAGE_HEIGHT/2/2), 64], 2, \"up3\")\n # decode 3\n conv_decode3 = conv_layer_with_bn(upsample3, [7, 7, 64, 64], phase_train, False, name=\"conv_decode3\")\n\n # upsample2\n # upsample2 = upsample_with_pool_indices(conv_decode3, pool2_indices, conv_decode3.get_shape(), scale=2, name='upsample2')\n upsample2= deconv_layer(conv_decode3, [2, 2, 64, 64], [batch_size, int(IMAGE_WIDTH/2), int(IMAGE_HEIGHT/2), 64], 2, \"up2\")\n # decode 2\n conv_decode2 = conv_layer_with_bn(upsample2, [7, 7, 64, 64], phase_train, False, name=\"conv_decode2\")\n\n # upsample1\n # upsample1 = upsample_with_pool_indices(conv_decode2, pool1_indices, conv_decode2.get_shape(), scale=2, name='upsample1')\n upsample1= deconv_layer(conv_decode2, [2, 2, 64, 64], [batch_size, IMAGE_WIDTH, IMAGE_HEIGHT, 64], 2, \"up1\")\n # decode4\n conv_decode1 = conv_layer_with_bn(upsample1, [7, 7, 64, 64], phase_train, False, name=\"conv_decode1\")\n \"\"\" end of Decode \"\"\"\n \"\"\" Start Classify \"\"\"\n # output predicted class number (6)\n with tf.variable_scope('conv_classifier') as scope:\n kernel = _variable_with_weight_decay('weights',\n shape=[1, 1, 64, NUM_CLASSES],\n initializer=msra_initializer(1, 64),\n wd=0.0005)\n conv = tf.nn.conv2d(conv_decode1, kernel, [1, 1, 1, 1], padding='SAME')\n biases = _variable_on_cpu('biases', [NUM_CLASSES], tf.constant_initializer(0.0))\n conv_classifier = tf.nn.bias_add(conv, biases, name=scope.name)\n\n logit = conv_classifier\n loss = cal_loss(conv_classifier, labels)\n\n return loss, logit\n\ndef train(total_loss, global_step):\n # total_sample = 274\n # num_batches_per_epoch = 274/1\n \"\"\" fix lr \"\"\"\n lr = INITIAL_LEARNING_RATE\n loss_averages_op = _add_loss_summaries(total_loss)\n\n # Compute gradients.\n with tf.control_dependencies([loss_averages_op]):\n opt = tf.train.AdamOptimizer(lr)\n grads = opt.compute_gradients(total_loss)\n apply_gradient_op = opt.apply_gradients(grads, global_step=global_step)\n\n # Add histograms for trainable variables.\n for var in tf.trainable_variables():\n tf.summary.histogram(var.op.name, var)\n\n # Add histograms for gradients.\n for grad, var in grads:\n if grad is not None:\n tf.summary.histogram(var.op.name + '/gradients', grad)\n\n # Track the moving averages of all trainable variables.\n variable_averages = tf.train.ExponentialMovingAverage(\n MOVING_AVERAGE_DECAY, global_step)\n variables_averages_op = variable_averages.apply(tf.trainable_variables())\n\n with tf.control_dependencies([apply_gradient_op, variables_averages_op]):\n train_op = tf.no_op(name='train')\n\n return train_op\n\ndef test(FLAGS):\n max_steps = FLAGS.max_steps\n batch_size = FLAGS.batch_size\n train_dir = FLAGS.log_dir # /tmp3/first350/TensorFlow/Logs\n test_dir = FLAGS.test_dir # /tmp3/first350/SegNet-Tutorial/CamVid/train.txt\n test_ckpt = FLAGS.testing\n image_w = FLAGS.image_w\n image_h = FLAGS.image_h\n image_c = FLAGS.image_c\n # testing should set BATCH_SIZE = 1\n batch_size = 1\n\n image_filenames, label_filenames = get_filename_list(test_dir)\n\n test_data_node = tf.placeholder(\n tf.float32,\n shape=[batch_size, image_h, image_w, image_c])\n\n test_labels_node = tf.placeholder(tf.int64, shape=[batch_size, IMAGE_WIDTH, IMAGE_HEIGHT, 1])\n\n phase_train = tf.placeholder(tf.bool, name='phase_train')\n\n loss, logits = inference(test_data_node, test_labels_node, batch_size, phase_train)\n\n pred = tf.argmax(logits, axis=3)\n # get moving avg\n variable_averages = tf.train.ExponentialMovingAverage(\n MOVING_AVERAGE_DECAY)\n variables_to_restore = variable_averages.variables_to_restore()\n\n saver = tf.train.Saver(variables_to_restore)\n\n # gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction=0.0001)\n\n with tf.Session() as sess:\n # Load checkpoint\n saver.restore(sess, test_ckpt )\n\n images, labels = get_all_test_data(image_filenames, label_filenames)\n\n threads = tf.train.start_queue_runners(sess=sess)\n hist = np.zeros((NUM_CLASSES, NUM_CLASSES))\n for image_batch, label_batch in zip(images, labels):\n feed_dict = {\n test_data_node: image_batch,\n test_labels_node: label_batch,\n phase_train: False\n }\n\n dense_prediction, im = sess.run([logits, pred], feed_dict=feed_dict)\n # output_image to verify\n if (FLAGS.save_image):\n writeImage(im[0], 'testing_image.png')\n # writeImage(im[0], 'out_image/'+str(image_filenames[count]).split('/')[-1])\n\n hist += get_hist(dense_prediction, label_batch)\n # count+=1\n acc_total = np.diag(hist).sum() / hist.sum()\n iu = np.diag(hist) / (hist.sum(1) + hist.sum(0) - np.diag(hist))\n print(\"acc: \", acc_total)\n print(\"mean IU: \", np.nanmean(iu))\n\ndef training(FLAGS, is_finetune=False):\n max_steps = FLAGS.max_steps\n batch_size = FLAGS.batch_size\n train_dir = FLAGS.log_dir # /tmp3/first350/TensorFlow/Logs\n image_dir = FLAGS.image_dir # /tmp3/first350/SegNet-Tutorial/CamVid/train.txt\n val_dir = FLAGS.val_dir # /tmp3/first350/SegNet-Tutorial/CamVid/val.txt\n finetune_ckpt = FLAGS.finetune\n image_w = FLAGS.image_w\n image_h = FLAGS.image_h\n image_c = FLAGS.image_c\n # should be changed if your model stored by different convention\n startstep = 0 if not is_finetune else int(FLAGS.finetune.split('-')[-1])\n\n image_filenames, label_filenames = get_filename_list(image_dir)\n val_image_filenames, val_label_filenames = get_filename_list(val_dir)\n\n with tf.Graph().as_default():\n\n train_data_node = tf.placeholder( tf.float32, shape=[batch_size, image_h, image_w, image_c])\n\n train_labels_node = tf.placeholder(tf.int64, shape=[batch_size, image_h, image_w, 1])\n\n phase_train = tf.placeholder(tf.bool, name='phase_train')\n\n global_step = tf.Variable(0, trainable=False)\n\n # For CamVid\n images, labels = CamVidInputs(image_filenames, label_filenames, batch_size)\n\n val_images, val_labels = CamVidInputs(val_image_filenames, val_label_filenames, batch_size)\n\n # Build a Graph that computes the logits predictions from the inference model.\n loss, eval_prediction = inference(train_data_node, train_labels_node, batch_size, phase_train)\n\n # Build a Graph that trains the model with one batch of examples and updates the model parameters.\n train_op = train(loss, global_step)\n\n saver = tf.train.Saver(tf.global_variables())\n\n summary_op = tf.summary.merge_all()\n\n # gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction=0.0001)\n\n with tf.Session() as sess:\n # Build an initialization operation to run below.\n if (is_finetune == True):\n saver.restore(sess, finetune_ckpt )\n else:\n init = tf.global_variables_initializer()\n sess.run(init)\n\n # Start the queue runners.\n coord = tf.train.Coordinator()\n threads = tf.train.start_queue_runners(sess=sess, coord=coord)\n\n # Summery placeholders\n summary_writer = tf.summary.FileWriter(train_dir, sess.graph)\n average_pl = tf.placeholder(tf.float32)\n acc_pl = tf.placeholder(tf.float32)\n iu_pl = tf.placeholder(tf.float32)\n average_summary = tf.summary.scalar(\"test_average_loss\", average_pl)\n acc_summary = tf.summary.scalar(\"test_accuracy\", acc_pl)\n iu_summary = tf.summary.scalar(\"Mean_IU\", iu_pl)\n\n for step in range(startstep, startstep + max_steps):\n image_batch ,label_batch = sess.run([images, labels])\n # since we still use mini-batches in validation, still set bn-layer phase_train = True\n feed_dict = {\n train_data_node: image_batch,\n train_labels_node: label_batch,\n phase_train: True\n }\n start_time = time.time()\n\n _, loss_value = sess.run([train_op, loss], feed_dict=feed_dict)\n duration = time.time() - start_time\n\n assert not np.isnan(loss_value), 'Model diverged with loss = NaN'\n\n if step % 10 == 0:\n num_examples_per_step = batch_size\n examples_per_sec = num_examples_per_step / duration\n sec_per_batch = float(duration)\n\n format_str = ('%s: step %d, loss = %.2f (%.1f examples/sec; %.3f '\n 'sec/batch)')\n print (format_str % (datetime.now(), step, loss_value,\n examples_per_sec, sec_per_batch))\n\n # eval current training batch pre-class accuracy\n pred = sess.run(eval_prediction, feed_dict=feed_dict)\n per_class_acc(pred, label_batch)\n\n if step % 100 == 0:\n print(\"start validating.....\")\n total_val_loss = 0.0\n hist = np.zeros((NUM_CLASSES, NUM_CLASSES))\n for test_step in range(int(TEST_ITER)):\n val_images_batch, val_labels_batch = sess.run([val_images, val_labels])\n\n _val_loss, _val_pred = sess.run([loss, eval_prediction], feed_dict={\n train_data_node: val_images_batch,\n train_labels_node: val_labels_batch,\n phase_train: True\n })\n total_val_loss += _val_loss\n hist += get_hist(_val_pred, val_labels_batch)\n print(\"val loss: \", total_val_loss / TEST_ITER)\n acc_total = np.diag(hist).sum() / hist.sum()\n iu = np.diag(hist) / (hist.sum(1) + hist.sum(0) - np.diag(hist))\n test_summary_str = sess.run(average_summary, feed_dict={average_pl: total_val_loss / TEST_ITER})\n acc_summary_str = sess.run(acc_summary, feed_dict={acc_pl: acc_total})\n iu_summary_str = sess.run(iu_summary, feed_dict={iu_pl: np.nanmean(iu)})\n print_hist_summery(hist)\n print(\" end validating.... \")\n\n summary_str = sess.run(summary_op, feed_dict=feed_dict)\n summary_writer.add_summary(summary_str, step)\n summary_writer.add_summary(test_summary_str, step)\n summary_writer.add_summary(acc_summary_str, step)\n summary_writer.add_summary(iu_summary_str, step)\n # Save the model checkpoint periodically.\n if step % 1000 == 0 or (step + 1) == max_steps:\n checkpoint_path = os.path.join(train_dir, 'model.ckpt')\n saver.save(sess, checkpoint_path, global_step=step)\n\n coord.request_stop()\n coord.join(threads)\n" ]
[ [ "tensorflow.contrib.layers.batch_norm", "tensorflow.train.start_queue_runners", "tensorflow.constant_initializer", "tensorflow.nn.conv2d", "tensorflow.nn.lrn", "tensorflow.reshape", "numpy.nanmean", "tensorflow.control_dependencies", "tensorflow.nn.softmax", "tensorflow.one_hot", "tensorflow.global_variables_initializer", "tensorflow.add_to_collection", "tensorflow.cast", "tensorflow.no_op", "tensorflow.trainable_variables", "numpy.random.normal", "tensorflow.argmax", "tensorflow.summary.histogram", "tensorflow.train.Saver", "tensorflow.Variable", "tensorflow.global_variables", "tensorflow.constant", "tensorflow.variable_scope", "numpy.prod", "tensorflow.nn.conv2d_transpose", "tensorflow.nn.bias_add", "tensorflow.get_collection", "numpy.array", "tensorflow.train.AdamOptimizer", "numpy.zeros", "tensorflow.train.Coordinator", "tensorflow.summary.scalar", "tensorflow.Session", "tensorflow.log", "tensorflow.placeholder", "tensorflow.get_variable", "numpy.linalg.svd", "tensorflow.name_scope", "tensorflow.summary.merge_all", "tensorflow.nn.max_pool_with_argmax", "tensorflow.nn.sparse_softmax_cross_entropy_with_logits", "numpy.isnan", "tensorflow.Graph", "tensorflow.train.ExponentialMovingAverage", "tensorflow.truncated_normal_initializer", "tensorflow.summary.FileWriter", "tensorflow.reduce_mean", "numpy.diag" ] ]
zdassen/ml-tools
[ "a5ee4e06c6c808a3f78890dfae868c79fd8299fa" ]
[ "lib/data_loaders.py" ]
[ "# -*- coding: utf-8 -*-\r\n#\r\n# ml-tools/lib/data_loaders.py\r\n#\r\nimport numpy as np\r\n\r\n# TensorFlow & keras\r\nimport os\r\nos.environ[\"TF_CPP_MIN_LOG_LEVEL\"] = \"2\"\r\nimport tensorflow as tf\r\nfrom tensorflow.python.keras.datasets import mnist\r\n\r\n\r\ndef get_rinds_label(y, label, n):\r\n \"\"\"指定された数値のインデックスからn個をランダムに抽出\"\"\"\r\n is_label = np.where(y == label)[0]\r\n ri = np.random.choice(is_label, n, replace=False)\r\n return ri\r\n\r\n\r\ndef get_petit_MNIST(X, y, n_images_per_label):\r\n \"\"\"\r\n 小規模なMNISTデータを読み込む\r\n\r\n 各数値につき n_images_per_label ずつのデータを抽出する\r\n \"\"\"\r\n\r\n n_labels = 10\r\n for label in range(n_labels):\r\n ri = get_rinds_label(y, label, n_images_per_label)\r\n\r\n # 初回\r\n if label == 0:\r\n X_petit = X[ri, :]\r\n y_petit = y[ri]\r\n\r\n # 次回以降\r\n else:\r\n X_petit = np.vstack((X_petit, X[ri, :]))\r\n y_petit = np.hstack((y_petit, y[ri]))\r\n\r\n # end of for label in range(n_labels) ...\r\n\r\n return X_petit, y_petit\r\n\r\n\r\ndef get_MNIST_train_test(n_images_per_label=None,\r\n test_size=0.2, standardize=True):\r\n \"\"\"MNISTデータセットを読み込む\"\"\"\r\n\r\n # 訓練用のデータとテスト用のデータを読み込む ( keras の場合 )\r\n (X_train, y_train), (X_test, y_test) = mnist.load_data()\r\n\r\n # 小規模なデータセットを利用する場合\r\n if n_images_per_label:\r\n assert isinstance(n_images_per_label, int)\r\n\r\n # テスト用のデータ数 = 訓練用のサイズ x test_size ( 切り捨て )\r\n tst_size = int(n_images_per_label * test_size)\r\n\r\n # 指定個数だけデータとラベルをランダムに抽出する\r\n (X_train, y_train), (X_test, y_test) = [\r\n get_petit_MNIST(X, y, size)\r\n for X, y, size in (\r\n (X_train, y_train, n_images_per_label),\r\n (X_test, y_test, tst_size),\r\n )\r\n ]\r\n\r\n # 標準化を行う\r\n if standardize:\r\n X_train, X_test = [X / 255. for X in (X_train, X_test)]\r\n\r\n return X_train, y_train, X_test, y_test" ]
[ [ "numpy.random.choice", "numpy.where", "tensorflow.python.keras.datasets.mnist.load_data", "numpy.hstack", "numpy.vstack" ] ]
cthoyt/embeddingdb
[ "e6c67e92e540c4315045a0b4de5b31490331c177" ]
[ "src/embeddingdb/sql/analysis.py" ]
[ "# -*- coding: utf-8 -*-\n\n\"\"\"Compute cross-correlations in embedding collections.\"\"\"\n\nfrom typing import BinaryIO, Mapping, Optional, Type, Union\n\nimport click\nimport joblib\nfrom sklearn.base import RegressorMixin\nfrom sklearn.cross_decomposition import CCA, PLSRegression\nfrom sklearn.linear_model import (\n LinearRegression, MultiTaskElasticNet, MultiTaskElasticNetCV, MultiTaskLasso,\n MultiTaskLassoCV,\n)\nfrom sklearn.metrics import r2_score\nfrom sqlalchemy.orm import Session\n\nfrom embeddingdb.constants import config\nfrom embeddingdb.sql.models import Collection, get_session\n\n__all__ = [\n 'perform_regression',\n 'main',\n]\n\n_REGRESSIONS = {\n 'linear': LinearRegression,\n 'pls': PLSRegression,\n 'cca': CCA,\n 'elastic': MultiTaskElasticNet,\n 'elastic-cv': MultiTaskElasticNetCV,\n 'lasso': MultiTaskLasso,\n 'lasso-cv': MultiTaskLassoCV,\n # 'svr': sklearn.svm.SVR,\n}\n\n\ndef calculate_overlap():\n \"\"\"Calculate the pairwise overlap between all collections.\"\"\"\n raise NotImplementedError\n\n\ndef perform_regression(\n collection_1: Collection,\n collection_2: Collection,\n regression_cls: Union[None, str, Type[RegressorMixin]] = None,\n regression_kwargs: Optional[Mapping] = None,\n output: Union[None, str, BinaryIO] = None,\n):\n \"\"\"Perform a regression between two collections of embeddings and evaluate the results.\n\n :param collection_1: The first collection\n :param collection_2: The second collection\n :param regression_cls: Class or shortcut name to class that is a ``RegressorMixin``. Valid shortcuts are 'lienar'\n and 'pls'.\n :param regression_kwargs: Keyword arguments to pass to the regressor class on instantiation\n :param output: Optional path to output the regressor model using ``joblib``\n :return:\n \"\"\"\n if regression_cls is None:\n regression_cls = LinearRegression\n elif isinstance(regression_cls, str):\n regression_cls = _REGRESSIONS[regression_cls]\n elif not issubclass(regression_cls, RegressorMixin):\n raise TypeError(f'regression_cls had invalid type: {regression_cls}')\n\n clf = regression_cls(**(regression_kwargs or {}))\n x = collection_1.as_dataframe()\n y = collection_2.as_dataframe()\n\n index = x.index & y.index\n x = x.loc[index]\n y = y.loc[index]\n\n clf.fit(x, y)\n\n if output is not None:\n joblib.dump(clf, output)\n\n y_pred = clf.predict(x)\n r2 = r2_score(y, y_pred)\n\n return clf, r2, len(index), len(index) / min(len(x.index), len(y.index))\n\n\ndef _get_collection(session: Session, collection_id: int) -> Collection:\n \"\"\"Get a collection by its identifier.\"\"\"\n return session.query(Collection).get(collection_id)\n\n\[email protected]()\[email protected]('id_1', type=int)\[email protected]('id_2', type=int)\[email protected]('-m', '--model', type=click.Choice(list(_REGRESSIONS)), default='linear')\[email protected]('-o', '--output', type=click.File('wb'))\[email protected]_connection_option()\ndef main(id_1: int, id_2: int, model: Optional[str], output: Optional[BinaryIO], connection: str):\n \"\"\"Perform a regression between two collections.\"\"\"\n session = get_session(connection=connection)\n collection_1 = _get_collection(session, id_1)\n collection_2 = _get_collection(session, id_2)\n\n clf, r2, intersect, intersect_percent = perform_regression(\n collection_1,\n collection_2,\n regression_cls=model,\n output=output,\n )\n click.echo(f'Model: {clf}')\n click.echo(f'Dimensions: {clf.coef_.shape}')\n click.echo(f'R^2: {r2:.3f}')\n click.echo(f'Intersection: {intersect} ({intersect_percent:.1%})')\n\n\nif __name__ == '__main__':\n main()\n" ]
[ [ "sklearn.metrics.r2_score" ] ]
mila-iqia/COVI-ML
[ "5c6e7441d2ddfe2d11d2e76a884daeb9a114048d" ]
[ "ctt/models/modules.py" ]
[ "import torch\nimport torch.nn as nn\n\nfrom ctt.utils import thermometer_encoding, compute_moments\n\n\nclass HealthHistoryEmbedding(nn.Sequential):\n def __init__(self, in_features, embedding_size, capacity=128, dropout=0.1):\n super(HealthHistoryEmbedding, self).__init__(\n nn.Linear(in_features, capacity),\n nn.Dropout(dropout),\n nn.ReLU(),\n nn.Linear(capacity, embedding_size),\n )\n\n def forward(self, input, mask=None):\n output = super(HealthHistoryEmbedding, self).forward(input)\n if mask is not None:\n output = output * mask[:, :, None]\n return output\n\n\nclass HealthProfileEmbedding(HealthHistoryEmbedding):\n pass\n\n\nclass MessageEmbedding(nn.Sequential):\n def __init__(self, message_dim, embedding_size, capacity=128, dropout=0.1):\n super(MessageEmbedding, self).__init__(\n nn.Linear(message_dim, capacity),\n nn.Dropout(dropout),\n nn.ReLU(),\n nn.Linear(capacity, embedding_size),\n )\n\n def forward(self, input, mask=None):\n output = super(MessageEmbedding, self).forward(input)\n if mask is not None:\n output = output * mask[:, :, None]\n return output\n\n\nclass PartnerIdEmbedding(nn.Linear):\n def __init__(self, num_id_bits, embedding_size):\n super(PartnerIdEmbedding, self).__init__(num_id_bits, embedding_size)\n\n def forward(self, input, mask=None):\n output = super(PartnerIdEmbedding, self).forward(input)\n if mask is not None:\n output = output * mask[:, :, None]\n return output\n\n\nclass DurationEmbedding(HealthHistoryEmbedding):\n EPS = 0.0001\n\n def __init__(\n self,\n embedding_size,\n num_thermo_bins=32,\n capacity=128,\n dropout=0.1,\n thermo_range=(0.0, 6.0),\n ):\n super(DurationEmbedding, self).__init__(\n in_features=num_thermo_bins,\n embedding_size=embedding_size,\n capacity=capacity,\n dropout=dropout,\n )\n self.num_thermo_bins = num_thermo_bins\n self.thermo_range = thermo_range\n\n def forward(self, input, mask=None):\n assert input.shape[-1] == 1\n encoded_input = thermometer_encoding(\n torch.log(input + self.EPS),\n value_range=self.thermo_range,\n size=self.num_thermo_bins,\n )\n return super(DurationEmbedding, self).forward(encoded_input, mask)\n\n\nclass EntityMasker(nn.Module):\n EPS = 1e-7\n\n def __init__(self, mode=\"multiplicative\"):\n super(EntityMasker, self).__init__()\n assert mode in [\"multiplicative\", \"logsum\"]\n self.mode = mode\n\n def forward(self, entities, mask):\n assert mask.shape[0:2] == entities.shape[0:2]\n if self.mode == \"multiplicative\":\n return entities * mask[:, :, None]\n elif self.mode == \"logsum\":\n with torch.no_grad():\n log_mask = torch.log(mask.clamp_min(0.0) + self.EPS)\n return entities + log_mask[:, :, None]\n else:\n raise NotImplementedError\n\n\nclass TimeEmbedding(nn.Embedding):\n def __init__(self, embedding_size, num_timestamps=14):\n super(TimeEmbedding, self).__init__(\n num_embeddings=num_timestamps, embedding_dim=embedding_size\n )\n\n def forward(self, timestamps, mask=None):\n timestamps = timestamps.abs().long()\n if timestamps.dim() == 3:\n timestamps = timestamps[..., 0]\n assert timestamps.dim() == 2\n output = super(TimeEmbedding, self).forward(timestamps)\n if mask is not None:\n output = output * mask[:, :, None]\n return output\n\n\nclass PositionalEncoding(nn.Module):\n def __init__(\n self, encoding_dim=16, position_dim=1, max_frequency=10000, normalize=True,\n ):\n super(PositionalEncoding, self).__init__()\n assert (\n encoding_dim % position_dim\n ) == 0, \"Encoding dim must be divisible by the position dim.\"\n assert (\n (encoding_dim // position_dim) % 2\n ) == 0, \"Encoding dim / postion dim must be even.\"\n self.encoding_dim = encoding_dim\n self.position_dim = 1\n self.max_frequency = max_frequency\n self.normalize = normalize\n\n def get_exponents(self, device=None):\n return torch.arange(\n 0,\n self.encoding_dim // self.position_dim,\n 2,\n dtype=torch.float,\n device=device,\n )\n\n def forward(self, positions, mask=None):\n assert positions.ndim == 3\n # positions.shape = NTD, where D = self.position_dim\n N, T, D = positions.shape\n assert D == self.position_dim\n # The final encoding.shape = NTC, where C = self.encoding_dim,\n # but per input dimension, we get C // D encoding dimensions. Let C' = C // D.\n encoding_dim_per_dim = self.encoding_dim // D\n # exps is like `i` in Attention is All You Need.\n exps = self.get_exponents(device=positions.device)\n # Divisor is 10000^(i/encoding_dim), but reshaped for proper broadcasting\n divisors = torch.pow(self.max_frequency, (exps / encoding_dim_per_dim))[\n None, None, None, :\n ]\n # pre_sinusoids is a NTD(C'/2) tensor.\n pre_sinusoids = positions[:, :, :, None] / divisors\n # Apply sinusoids to obtain a NTDC' tensor.\n post_sinusoids = torch.cat(\n [torch.sin(pre_sinusoids), torch.cos(pre_sinusoids)], dim=-1\n )\n # Now flatten the last two dimensions to obtain a NTC tensor (remember C = D * C')\n encodings = post_sinusoids.reshape(N, T, self.encoding_dim)\n # Normalize if required\n if self.normalize:\n encodings = encodings / torch.norm(encodings, dim=-1, keepdim=True)\n if mask is not None:\n encodings = encodings * (mask[:, :, None])\n return encodings\n\n\nclass Moments(nn.Module):\n def __init__(self, num_moments=2, dim=None):\n super(Moments, self).__init__()\n self.num_moments = num_moments\n self.dim = dim\n\n def forward(self, x, mask=None, dim=None):\n dim = dim or self.dim\n assert dim is not None\n return compute_moments(x, mask=mask, dim=dim, num_moments=self.num_moments)\n" ]
[ [ "torch.nn.Linear", "torch.cos", "torch.nn.Dropout", "torch.arange", "torch.sin", "torch.norm", "torch.no_grad", "torch.nn.ReLU", "torch.log", "torch.pow" ] ]
fxcqz/Primes
[ "66d2e7e426c61c2eab74fc0d6c502f25be781ee4" ]
[ "primes/tests/utils/test_customcomplex.py" ]
[ "from nose.tools import *\nfrom primes.utils.custom_complex import CustomComplex\nimport numpy\n\n\ndef test_complex_init():\n # 2 ways of instantiating custom complex\n a = CustomComplex(2, 3)\n assert_equals(numpy.real(a), 2)\n assert_equals(numpy.imag(a), 3)\n b = CustomComplex(complex(1, 2))\n assert_equals(numpy.real(b), 1)\n assert_equals(numpy.imag(b), 2)\n\ndef test_cc_eq():\n assert_true(CustomComplex(0, 0) == CustomComplex(0, 0))\n assert_false(CustomComplex(1, 0) == CustomComplex(0, 1))\n assert_false(CustomComplex(0, 0) == CustomComplex(1, 1))\n\ndef test_cc_lt():\n assert_true(CustomComplex(0, 0) < CustomComplex(1, 1))\n assert_false(CustomComplex(1, 1) < CustomComplex(0, 0))\n\ndef test_cc_le():\n assert_true(CustomComplex(0, 0) <= CustomComplex(1, 1))\n assert_true(CustomComplex(1, 1) <= CustomComplex(1, 1))\n assert_false(CustomComplex(1, 1) <= CustomComplex(0, 0))\n\ndef test_cc_gt():\n assert_true(CustomComplex(1, 1) > CustomComplex(0, 0))\n assert_false(CustomComplex(0, 0) > CustomComplex(1, 1))\n" ]
[ [ "numpy.imag", "numpy.real" ] ]
Ahrvo-Trading-Systems/tcapy
[ "df8439aa5c754fc9a7fde463c44c489b27112f76", "df8439aa5c754fc9a7fde463c44c489b27112f76" ]
[ "tcapy_scripts/gen/copy_parquet_to_arrow.py", "test/test_tcapy/test_data_vendor_feed.py" ]
[ "\"\"\"Copies a folder of parquet files into another into arrow folder for use with vaex. Note you need to install vaex\nlibrary in addition to use this.\n\"\"\"\n\nfrom __future__ import print_function, division\n\n__author__ = 'saeedamen' # Saeed Amen / [email protected]\n\n#\n# Copyright 2021 Cuemacro Ltd. - http//www.cuemacro.com / @cuemacro\n#\n# See the License for the specific language governing permissions and limitations under the License.\n#\n\nif __name__ == '__main__':\n import time\n import vaex\n import pandas as pd\n\n import glob\n import os\n\n from findatapy.util.loggermanager import LoggerManager\n\n start = time.time()\n\n data_vendor = 'dukascopy' # 'ncfx' or 'dukascopy'\n\n source_folder = '/data/csv_dump/' + data_vendor + '/'\n destination_folder = '/data/csv_dump/' + data_vendor + '_arrow/'\n\n logger = LoggerManager().getLogger(__name__)\n\n parquet_list = glob.glob(source_folder + '/*.parquet')\n\n for p in parquet_list:\n df = pd.read_parquet(p)\n\n df = vaex.from_pandas(df, name='pandas', copy_index=True, index_name='Date')\n\n logger.info(\"Converting \" + p + \"...\")\n filename = os.path.basename(p)\n\n df.export(destination_folder + \"/\" + filename.replace('parquet', 'arrow'))\n\n finish = time.time()\n print('Status: calculated ' + str(round(finish - start, 3)) + \"s\")\n", "\"\"\"Tests functions for downloading data from external sources including Dukascopy and New Change FX (NCFX) - which\nare external data source and also dumping to disk\n\"\"\"\n\n__author__ = 'saeedamen' # Saeed Amen / [email protected]\n\n#\n# Copyright 2017 Cuemacro Ltd. - http//www.cuemacro.com / @cuemacro\n#\n# See the License for the specific language governing permissions and limitations under the License.\n#\n\nimport pytz\n\nfrom pandas.testing import assert_frame_equal\n\nimport glob\n\nfrom tcapy.conf.constants import Constants\nfrom tcapy.util.loggermanager import LoggerManager\nfrom tcapy.util.mediator import Mediator\nfrom tcapy.util.utilfunc import UtilFunc\n\nfrom tcapy.data.databasepopulator import DatabasePopulatorNCFX, DatabasePopulatorDukascopy\nfrom tcapy.data.databasesource import DatabaseSourceNCFX, DatabaseSourceDukascopy\n\nfrom test.config import *\n\nlogger = LoggerManager().getLogger(__name__)\n\nconstants = Constants()\nutil_func = UtilFunc()\n\n# check that your database has market and trade data for these before running the test\n# check that you have created appropriate folders for storing data\n\nlogger.info('Make sure you have created folder ' + constants.csv_folder + ' & ' + constants.temp_data_folder +\n ' otherwise tests will fail')\n\nMediator.get_volatile_cache().clear_cache()\n\n########################################################################################################################\n# YOU MAY NEED TO CHANGE THESE\n\nstart_date = '26 Apr 2017'\nfinish_date = '05 Jun 2017'\nticker = 'EURUSD'\n\nread_cached_from_disk = False # Generally want to download from the data vendor to test\n\n# Can test web proxies (can run a pure Python proxy server https://pypi.org/project/pproxy/)\n# or alternatively use web proxy provided by your internal IT team (more realistic environment, also to help test\n# firewall issues)\nweb_proxies = {'https' : None}\n\n# web_proxies = {\n# 'http' : \"http://127.0.0.1:8080\",\n# 'https' : \"https://127.0.0.1:7000\",\n# }\n\nchunk_int_min_dict = {'dukascopy' : None, 'ncfx' : 60} # number of minutes to download from data vendor (eg. 5 minutes)\n\n########################################################################################################################\nfolder = Constants().test_data_harness_folder\n\n#### Change for your data vendor\ndata_vendor_name_list = ['dukascopy'] # ['ncfx', 'dukascopy']\n\ndatabase_populator_dict = {'dukascopy' : DatabasePopulatorDukascopy(), 'ncfx' : DatabasePopulatorNCFX()}\ndatabase_source_dict = {'dukascopy' : DatabaseSourceDukascopy(), 'ncfx' : DatabaseSourceNCFX()}\n\nif constants.ncfx_url is None and 'ncfx' in data_vendor_name_list:\n data_vendor_name_list.remove('ncfx')\n\nif constants.ncfx_url is not None and 'ncfx' in data_vendor_name_list:\n if len(constants.ncfx_url) < 10:\n data_vendor_name_list.remove('ncfx')\n\ninvalid_start_date = '01 Jan 1999'\ninvalid_finish_date = '01 Feb 1999'\n\nuse_multithreading = False\n\ndef test_fetch_market_data_from_data_vendor():\n \"\"\"Test the various downloaders (low level one, high level one - also with invalid dates, to test error messages)\n \"\"\"\n\n # test for every data vendor\n for data_vendor_name in data_vendor_name_list:\n\n database_source = database_source_dict[data_vendor_name]\n database_populator = database_populator_dict[data_vendor_name]\n chunk_int_min = chunk_int_min_dict[data_vendor_name]\n\n # Test the low level downloader (to download in one chunk) - DatabaseSource\n start_date = '04 Dec 2017 10:00'; finish_date = '04 Dec 2017 10:05'\n\n df_low_level = database_source.fetch_market_data(start_date, finish_date, 'EURUSD', web_proxies=web_proxies)\n\n start_date = pd.Timestamp(start_date).tz_localize('utc')\n finish_date = pd.Timestamp(finish_date).tz_localize('utc')\n\n assert not(df_low_level.empty) and df_low_level.index[0] >= start_date \\\n and df_low_level.index[-1] <= finish_date\n\n # Test the high level downloader, which can download multiple chunks (don't write anything to disk) - DatabasePopulator\n start_date = '04 Dec 2017 10:00'; finish_date = '04 Dec 2017 10:20'\n\n msg, df_high_level_dict = database_populator.download_from_external_source(\n remove_duplicates=True,\n number_of_days=30 * 7, chunk_int_min=chunk_int_min,\n start_date=start_date, finish_date=finish_date, delete_cached_files=False, tickers='EURUSD',\n write_to_disk_db=False, read_cached_from_disk=read_cached_from_disk, return_df=True, web_proxies=web_proxies)\n\n start_date = pd.Timestamp(start_date).tz_localize('utc')\n finish_date = pd.Timestamp(finish_date).tz_localize('utc')\n\n df_high_level = df_high_level_dict[ticker]\n\n # Check to make sure the start/finish dates are within bounds and also no error messages returned\n assert not(df_high_level.empty) and df_high_level.index[0] >= start_date \\\n and df_high_level.index[-1] <= finish_date and msg == []\n\n # Now try dates with no data in the weekend, which should return back a None and also an error message, which\n # can be collected and returned to the user\n start_date = '03 Dec 2017 10:00';\n finish_date = '03 Dec 2017 10:20'\n\n msg, df_invalid = database_populator.download_from_external_source(remove_duplicates=True,\n number_of_days=30 * 7,\n chunk_int_min=chunk_int_min,\n start_date=start_date,\n finish_date=finish_date,\n delete_cached_files=False, tickers='EURUSD',\n write_to_disk_db=False,\n read_cached_from_disk=read_cached_from_disk,\n web_proxies=web_proxies)\n\n # check to make sure the start/finish dates are within bounds and also no error messages returned\n assert df_invalid == {} and \"No downloaded data\" in msg[0]\n\ndef test_write_csv_from_data_vendor():\n \"\"\"Tests downloading market data from the data vendor and dumping to CSV. Checks written CSV against what is loaded\n in memory. Also checks data is available in each 'usual' market hour.\n\n Note, that we use cached data from disk, as we want to download relatively large sections of data, and doing\n this externally can cause the test to run very slowly.\n \"\"\"\n\n for data_vendor_name in data_vendor_name_list:\n\n # database_source = database_source_dict[data_vendor_name]\n database_populator = database_populator_dict[data_vendor_name]\n chunk_int_min = chunk_int_min_dict[data_vendor_name]\n\n # Specifically choose dates which straddle the weekend boundary\n # 1) during British Summer Time in London\n # 2) during GMT time in London\n start_date = '27 Apr 2018'; finish_date = '03 May 2018'; expected_csv_files = 5\n # start_date = '02 Feb 2018'; finish_date = '07 Feb 2018'; expected_csv_files = 4\n split_size = 'daily'\n write_csv = False\n\n # Prepare the CSV folder first\n csv_folder = resource('csv_' + data_vendor_name + '_dump')\n\n # Empty the CSV test harness folder\n UtilFunc().forcibly_create_empty_folder(csv_folder)\n\n msg, df_dict = database_populator.download_to_csv(\n start_date, finish_date, ['EURUSD'], chunk_int_min=chunk_int_min, split_size=split_size, csv_folder=csv_folder,\n return_df=True, write_large_csv=write_csv, remove_duplicates=False, web_proxies=web_proxies)\n\n df_read_direct_from_data_vendor = df_dict['EURUSD']\n\n # Check it has data for every market hour (eg. ignoring Saturdays)\n assert util_func.check_data_frame_points_in_every_hour(df_read_direct_from_data_vendor, start_date, finish_date)\n\n if write_csv:\n # read back the CSVs dumped on disk in the test harness CSV folder\n csv_file_list = glob.glob(csv_folder + '/EURUSD*.csv')\n\n assert len(csv_file_list) == expected_csv_files\n\n df_list = []\n\n for c in csv_file_list:\n df = pd.read_csv(c, index_col=0)\n df.index = pd.to_datetime(df.index)\n df_list.append(df)\n\n # now compare the CSVs on disk versus those read directly\n df_read_from_csv = pd.concat(df_list).tz_localize(pytz.utc)\n\n assert_frame_equal(df_read_from_csv, df_read_direct_from_data_vendor)\n\ndef test_daily_download_boundary_from_data_vendor():\n \"\"\"Tests that data over a daily boundary still downloads correctly from the data vendor\n \"\"\"\n\n for data_vendor_name in data_vendor_name_list:\n\n database_populator = database_populator_dict[data_vendor_name]\n chunk_int_min = chunk_int_min_dict[data_vendor_name]\n\n start_date = '29 Apr 2018 21:00'; # Saturday\n finish_date = '30 Apr 2018 01:00'; # Monday\n\n msg, df = database_populator.download_from_external_source(remove_duplicates=False,\n chunk_int_min=chunk_int_min,\n start_date=start_date,\n finish_date=finish_date,\n delete_cached_files=False, tickers='EURUSD',\n write_to_disk_db=False,\n read_cached_from_disk=read_cached_from_disk,\n return_df=True, web_proxies=web_proxies)\n\n assert util_func.check_data_frame_points_in_every_hour(df['EURUSD'], start_date, finish_date)\n\ndef test_weekend_download_boundary_from_data_vendor():\n \"\"\"Tests that data over a weekend boundary still works from the data vendor (note: shouldn't have any data on Saturday)\n \"\"\"\n\n for data_vendor_name in data_vendor_name_list:\n\n database_populator = database_populator_dict[data_vendor_name]\n chunk_int_min = chunk_int_min_dict[data_vendor_name]\n\n # start_date = '12 Jan 2018 19:00';\n # finish_date = '15 Jan 2018 05:00';\n\n # Fri 12 - Mon 15 Jan 2018\n start_date = '12 Jan 2018 21:00';\n finish_date = '15 Jan 2018 01:00';\n\n msg, df = database_populator.download_from_external_source(remove_duplicates=False,\n chunk_int_min=chunk_int_min,\n start_date=start_date,\n finish_date=finish_date,\n delete_cached_files=False, tickers='EURUSD',\n write_to_disk_db=False,\n read_cached_from_disk=read_cached_from_disk,\n return_df=True, web_proxies=web_proxies)\n\n # Note: this will exclude data when FX market is not trading, eg. Saturday\n assert util_func.check_data_frame_points_in_every_hour(df['EURUSD'], start_date, finish_date)\n" ]
[ [ "pandas.read_parquet" ], [ "pandas.testing.assert_frame_equal" ] ]
Arya-A-Nair/learning-data-science
[ "0ecd48f7379a202b59dc93f59b9447f86512bd86" ]
[ "module_2.3_Binning.py" ]
[ "import pandas as pd\nimport numpy as np\n\ndf=pd.read_csv(\"corrected_data.csv\")\n\n#converting dtype=object to dtype=float64\ndf['price'] = pd.to_numeric(df['price'], errors='coerce')\n\n#to get evenly spaced integers over a range of numbers\nbins=np.linspace(min(df[\"price\"]), max(df[\"price\"]),4)\n\ngroup_names=[\"Low\",\"Medium\",\"High\"]\n\n#to bin the data into 4 segments\ndf[\"price-binned\"]=pd.cut(df[\"price\"],bins,labels=group_names, include_lowest=True)\n\n\n#to save the binned data to csv format\ndf[\"price-binned\"].to_csv(\"price_binned.csv\")\n" ]
[ [ "pandas.read_csv", "pandas.to_numeric", "pandas.cut" ] ]
sem-onyalo/mlm-4-dcgan-celeba
[ "3fdfbfe88de3877a4273cdfe84e52978dcc27bf2" ]
[ "discriminator.py" ]
[ "from tensorflow.keras.layers import Conv2D, Dense, Dropout, Flatten, LeakyReLU\nfrom tensorflow.keras.models import Sequential\nfrom tensorflow.keras.optimizers import Adam\n\ndef createDiscriminator(n_inputs=(80,80,3)):\n model = Sequential()\n model.add(Conv2D(128, (5,5), padding='same', input_shape=n_inputs))\n model.add(LeakyReLU(alpha=0.2))\n model.add(Dropout(0.4))\n model.add(Conv2D(128, (5,5), (2,2), padding='same'))\n model.add(LeakyReLU(alpha=0.2))\n model.add(Dropout(0.4))\n model.add(Conv2D(128, (5,5), (2,2), padding='same'))\n model.add(LeakyReLU(alpha=0.2))\n model.add(Dropout(0.4))\n model.add(Conv2D(128, (5,5), (2,2), padding='same'))\n model.add(LeakyReLU(alpha=0.2))\n model.add(Dropout(0.4))\n model.add(Conv2D(128, (5,5), (2,2), padding='same'))\n model.add(LeakyReLU(alpha=0.2))\n model.add(Dropout(0.4))\n\n model.add(Flatten())\n model.add(Dense(1, activation='sigmoid'))\n opt = Adam(learning_rate=0.0002, beta_1=0.5)\n model.compile(loss='binary_crossentropy', optimizer=opt, metrics=['accuracy'])\n return model\n\nif __name__ == '__main__':\n discriminator = createDiscriminator()\n discriminator.summary()" ]
[ [ "tensorflow.keras.layers.Flatten", "tensorflow.keras.layers.Dense", "tensorflow.keras.layers.Conv2D", "tensorflow.keras.layers.LeakyReLU", "tensorflow.keras.layers.Dropout", "tensorflow.keras.models.Sequential", "tensorflow.keras.optimizers.Adam" ] ]
FanKuan44/NASLib
[ "a8226e76e3dfe289f0c412bf48d197db17a03b9c" ]
[ "naslib/search_spaces/nasbench101/conversions.py" ]
[ "import numpy as np\n\n\"\"\"\n'naslib': the NASBench101SearchSpace object\n'spec': adjacency matrix + op list\n\"\"\"\n\n\ndef convert_naslib_to_spec(naslib_object):\n\n matrix = np.triu(np.ones((7, 7)), 1)\n\n ops_to_nb101 = {\n \"MaxPool1x1\": \"maxpool3x3\",\n \"ReLUConvBN1x1\": \"conv1x1-bn-relu\",\n \"ReLUConvBN3x3\": \"conv3x3-bn-relu\",\n }\n\n ops_to_nb101_edges = {\n \"Identity\": 1,\n \"Zero\": 0,\n }\n\n num_vertices = 7\n ops = [\"input\"] * num_vertices\n ops[-1] = \"output\"\n\n cell = naslib_object.edges[2, 3].op\n print(\"cell\", cell)\n\n for i in range(1, 6):\n ops[i] = ops_to_nb101[\n cell.nodes[i + 1][\"subgraph\"].edges[1, 2][\"op\"].get_op_name\n ]\n\n for i, j in cell.edges:\n matrix[i - 1][j - 1] = ops_to_nb101_edges[cell.edges[i, j][\"op\"].get_op_name]\n\n return [matrix, ops]\n\n\ndef convert_spec_to_naslib(spec, naslib_object):\n # TODO: write this method similar to how it was written for nasbench201 and darts\n raise NotImplementedError(\"Cannot yet convert a spec to naslib object\")\n\n\ndef convert_spec_to_tuple(spec):\n # convert the spec to a hashable type\n op_dict = [\"input\", \"output\", \"maxpool3x3\", \"conv1x1-bn-relu\", \"conv3x3-bn-relu\"]\n\n matrix = spec[\"matrix\"].flatten()\n ops = [op_dict.index(s) for s in spec[\"ops\"]]\n tup = tuple([*matrix, *ops])\n return tup\n" ]
[ [ "numpy.ones" ] ]
mapillary/metropolis_sdk
[ "4d25e0687d192e1f64707a9474a4b0896e430c01" ]
[ "metropolis/utils/data_classes.py" ]
[ "# Copyright (c) Facebook, Inc. and its affiliates.\n\n# Original copyright notice:\n# nuScenes dev-kit.\n# Code written by Oscar Beijbom, 2018.\n\nimport copy\nimport os.path as osp\nfrom abc import ABC, abstractmethod\nfrom functools import reduce\nfrom typing import Tuple, List, Dict, TYPE_CHECKING, Any, Optional\n\nimport numpy as np\nfrom matplotlib.axes import ( # @manual=fbsource//third-party/pypi/matplotlib:matplotlib\n Axes,\n)\nfrom pyquaternion import Quaternion\n\nfrom . import pathmgr\nfrom .geometry_utils import view_points, view_points_eq, transform_matrix, split_poly_eq\n\nif TYPE_CHECKING:\n from ..metropolis import Metropolis\n\nEYE3 = np.eye(3)\nEYE4 = np.eye(4)\n\n\nclass PointCloud(ABC):\n \"\"\"Abstract class for manipulating and viewing point clouds.\n\n Every point cloud (lidar and radar) consists of points where:\n - Dimensions 0, 1, 2 represent x, y, z coordinates.\n These are modified when the point cloud is rotated or translated.\n - All other dimensions are optional. Hence these have to be manually modified if\n the reference frame changes.\n\n Args:\n points: d-dimensional input point cloud matrix.\n \"\"\"\n\n def __init__(self, points: np.ndarray):\n assert (\n points.shape[0] == self.nbr_dims()\n ), f\"Error: Pointcloud points must have format: {self.nbr_dims()} x n\"\n self.points = points\n\n @staticmethod\n @abstractmethod\n def nbr_dims() -> int:\n \"\"\"Returns the number of dimensions.\n\n Returns: Number of dimensions.\n \"\"\"\n pass\n\n @classmethod\n @abstractmethod\n def from_file(cls, file_name: str) -> \"PointCloud\":\n \"\"\"Loads point cloud from disk.\n\n Args:\n file_name: Path of the pointcloud file on disk.\n\n Returns:\n PointCloud instance.\n \"\"\"\n pass\n\n @classmethod\n def from_file_multisweep(\n cls,\n metr: \"Metropolis\",\n sample_rec: Dict[str, Any],\n chan: str,\n ref_chan: str,\n nsweeps: int = 5,\n min_distance: float = 1.0,\n ) -> Tuple[\"PointCloud\", np.ndarray]:\n \"\"\"Return a point cloud that aggregates multiple sweeps.\n\n As every sweep is in a different coordinate frame, we need to map the\n coordinates to a single reference frame. As every sweep has a different\n timestamp, we need to account for that in the transformations and timestamps.\n\n Args:\n metr: A Metropolis instance.\n sample_rec: The current sample.\n chan: The lidar/radar channel from which we track back n sweeps to\n aggregate the point cloud.\n ref_chan: The reference channel of the current sample_rec that the point\n clouds are mapped to.\n nsweeps: Number of sweeps to aggregated.\n min_distance: Distance below which points are discarded.\n\n Returns:\n all_pc: The aggregated point clouds.\n all_times: The aggregated timestamps.\n \"\"\"\n # Init.\n points = np.zeros(\n (cls.nbr_dims(), 0),\n dtype=np.float32 if cls == LidarPointCloud else np.float64,\n )\n all_pc = cls(points)\n all_times = np.zeros((1, 0))\n\n # Get reference pose and timestamp.\n ref_sd_token = sample_rec[\"data\"][ref_chan]\n ref_sd_rec = metr.get(\"sample_data\", ref_sd_token)\n ref_pose_rec = metr.get(\"ego_pose\", ref_sd_rec[\"ego_pose_token\"])\n ref_cs_rec = metr.get(\n \"calibrated_sensor\", ref_sd_rec[\"calibrated_sensor_token\"]\n )\n ref_time = 1e-6 * ref_sd_rec[\"timestamp\"]\n\n # Homogeneous transform from ego car frame to reference frame.\n ref_from_car = transform_matrix(\n ref_cs_rec[\"translation\"], Quaternion(ref_cs_rec[\"rotation\"]), inverse=True\n )\n\n # Homogeneous transformation matrix from global to _current_ ego car frame.\n car_from_global = transform_matrix(\n ref_pose_rec[\"translation\"],\n Quaternion(ref_pose_rec[\"rotation\"]),\n inverse=True,\n )\n\n # Aggregate current and previous sweeps.\n sample_data_token = sample_rec[\"data\"][chan]\n current_sd_rec = metr.get(\"sample_data\", sample_data_token)\n for _ in range(nsweeps):\n # Load up the pointcloud and remove points close to the sensor.\n current_pc = cls.from_file(\n osp.join(metr.dataroot, current_sd_rec[\"filename\"])\n )\n current_pc.remove_close(min_distance)\n\n # Get past pose.\n current_pose_rec = metr.get(\"ego_pose\", current_sd_rec[\"ego_pose_token\"])\n global_from_car = transform_matrix(\n current_pose_rec[\"translation\"],\n Quaternion(current_pose_rec[\"rotation\"]),\n inverse=False,\n )\n\n # Homogeneous transformation matrix from sensor coordinate frame to ego car frame.\n current_cs_rec = metr.get(\n \"calibrated_sensor\", current_sd_rec[\"calibrated_sensor_token\"]\n )\n car_from_current = transform_matrix(\n current_cs_rec[\"translation\"],\n Quaternion(current_cs_rec[\"rotation\"]),\n inverse=False,\n )\n\n # Fuse four transformation matrices into one and perform transform.\n trans_matrix = reduce(\n np.dot,\n [ref_from_car, car_from_global, global_from_car, car_from_current],\n )\n current_pc.transform(trans_matrix)\n\n # Add time vector which can be used as a temporal feature.\n time_lag = (\n ref_time - 1e-6 * current_sd_rec[\"timestamp\"]\n ) # Positive difference.\n times = time_lag * np.ones((1, current_pc.nbr_points()))\n all_times = np.hstack((all_times, times))\n\n # Merge with key pc.\n all_pc.points = np.hstack((all_pc.points, current_pc.points))\n\n # Abort if there are no previous sweeps.\n if current_sd_rec[\"previous_sample_data\"] == \"\":\n break\n else:\n current_sd_rec = metr.get(\n \"sample_data\", current_sd_rec[\"previous_sample_data\"]\n )\n\n return all_pc, all_times\n\n def nbr_points(self) -> int:\n \"\"\"Returns the number of points.\n\n Returns:\n Number of points.\n \"\"\"\n return self.points.shape[1]\n\n def subsample(self, ratio: float) -> None:\n \"\"\"Sub-samples the pointcloud.\n\n Args:\n ratio: Fraction to keep.\n \"\"\"\n selected_ind = np.random.choice(\n np.arange(0, self.nbr_points()), size=int(self.nbr_points() * ratio)\n )\n self.points = self.points[:, selected_ind]\n\n def remove_close(self, radius: float) -> None:\n \"\"\"Removes point too close within a certain radius from origin.\n\n Args:\n radius: Radius below which points are removed.\n \"\"\"\n\n x_filt = np.abs(self.points[0, :]) < radius\n y_filt = np.abs(self.points[1, :]) < radius\n not_close = np.logical_not(np.logical_and(x_filt, y_filt))\n self.points = self.points[:, not_close]\n\n def translate(self, x: np.ndarray) -> None:\n \"\"\"Applies a translation to the point cloud.\n\n Args:\n x: Translation in x, y, z.\n \"\"\"\n for i in range(3):\n self.points[i, :] = self.points[i, :] + x[i]\n\n def rotate(self, rot_matrix: np.ndarray) -> None:\n \"\"\"Applies a rotation.\n\n Args:\n rot_matrix: Rotation matrix.\n \"\"\"\n self.points[:3, :] = np.dot(rot_matrix, self.points[:3, :])\n\n def transform(self, transf_matrix: np.ndarray) -> None:\n \"\"\"Applies a homogeneous transform.\n\n Args:\n transf_matrix: Homogenous transformation matrix.\n \"\"\"\n self.points[:3, :] = transf_matrix.dot(\n np.vstack((self.points[:3, :], np.ones(self.nbr_points())))\n )[:3, :]\n\n def render_height(\n self,\n ax: Axes,\n view: np.ndarray = EYE4,\n x_lim: Tuple[float, float] = (-20, 20),\n y_lim: Tuple[float, float] = (-20, 20),\n marker_size: float = 1,\n ) -> None:\n \"\"\"Very simple method that applies a transformation and then scatter plots\n the points colored by height (z-value).\n\n Args:\n ax: Axes on which to render the points.\n view: Defines an arbitrary projection (n <= 4).\n x_lim: (min, max). x range for plotting.\n y_lim: (min, max). y range for plotting.\n marker_size: Marker size.\n \"\"\"\n self._render_helper(2, ax, view, x_lim, y_lim, marker_size)\n\n def render_intensity(\n self,\n ax: Axes,\n view: np.ndarray = EYE4,\n x_lim: Tuple[float, float] = (-20, 20),\n y_lim: Tuple[float, float] = (-20, 20),\n marker_size: float = 1,\n ) -> None:\n \"\"\"Very simple method that applies a transformation and then scatter plots\n the points colored by intensity.\n\n Args:\n ax: Axes on which to render the points.\n view: Defines an arbitrary projection (n <= 4).\n x_lim: (min, max).\n y_lim: (min, max).\n marker_size: Marker size.\n \"\"\"\n self._render_helper(3, ax, view, x_lim, y_lim, marker_size)\n\n def _render_helper(\n self,\n color_channel: int,\n ax: Axes,\n view: np.ndarray,\n x_lim: Tuple[float, float],\n y_lim: Tuple[float, float],\n marker_size: float,\n ) -> None:\n \"\"\"Helper function for rendering.\n\n Args:\n color_channel: Point channel to use as color.\n ax: Axes on which to render the points.\n view: Defines an arbitrary projection (n <= 4).\n x_lim: (min, max).\n y_lim: (min, max).\n marker_size: Marker size.\n \"\"\"\n points = view_points(self.points[:3, :], view, normalize=False)\n ax.scatter(\n points[0, :], points[1, :], c=self.points[color_channel, :], s=marker_size\n )\n ax.set_xlim(x_lim[0], x_lim[1])\n ax.set_ylim(y_lim[0], y_lim[1])\n\n\nclass LidarPointCloud(PointCloud):\n @staticmethod\n def nbr_dims() -> int:\n \"\"\"Returns the number of dimensions.\n\n Returns:\n Number of dimensions.\n \"\"\"\n return 4\n\n @classmethod\n def from_file(cls, file_name: str) -> \"LidarPointCloud\":\n \"\"\"Loads LIDAR data from binary numpy format. Data is stored as (x, y, z,\n intensity, ring index).\n\n Args:\n file_name: Path of the pointcloud file on disk.\n\n Returns:\n LidarPointCloud instance (x, y, z, intensity).\n \"\"\"\n\n assert file_name.endswith(\".npz\"), f\"Unsupported filetype {file_name}\"\n\n with pathmgr.open(file_name, \"rb\") as fid:\n data = np.load(fid)\n points = data[\"points\"]\n\n points4 = np.ones((points.shape[0], 4))\n points4[:, :3] = points\n\n return cls(points4.T)\n\n\nclass Box:\n \"\"\"Simple data class representing a 3d box\n\n Args:\n center: Center of box given as x, y, z.\n size: Size of box in width, length, height.\n orientation: Box orientation.\n name: Box name, optional. Can be used e.g. for denote category name.\n token: Unique string identifier from DB.\n \"\"\"\n\n def __init__(\n self,\n center: List[float],\n size: List[float],\n orientation: Quaternion,\n name: str = None,\n token: str = None,\n ):\n assert not np.any(np.isnan(center))\n assert not np.any(np.isnan(size))\n assert len(center) == 3\n assert len(size) == 3\n assert type(orientation) == Quaternion\n\n self.center = np.array(center)\n self.lwh = np.array(size)\n self.orientation = orientation\n self.name = name\n self.token = token\n\n def __eq__(self, other):\n center = np.allclose(self.center, other.center)\n lwh = np.allclose(self.lwh, other.lwh)\n orientation = np.allclose(self.orientation.elements, other.orientation.elements)\n\n return center and lwh and orientation\n\n def __repr__(self):\n repr_str = (\n \"xyz: [{:.2f}, {:.2f}, {:.2f}], lwh: [{:.2f}, {:.2f}, {:.2f}], \"\n \"rot axis: [{:.2f}, {:.2f}, {:.2f}], ang(degrees): {:.2f}, ang(rad): {:.2f}, \"\n \"name: {}, token: {}\"\n )\n\n return repr_str.format(\n self.center[0],\n self.center[1],\n self.center[2],\n self.lwh[0],\n self.lwh[1],\n self.lwh[2],\n self.orientation.axis[0],\n self.orientation.axis[1],\n self.orientation.axis[2],\n self.orientation.degrees,\n self.orientation.radians,\n self.name,\n self.token,\n )\n\n @property\n def rotation_matrix(self) -> np.ndarray:\n \"\"\"Return a rotation matrix.\n\n Returns:\n The box's rotation matrix.\n \"\"\"\n return self.orientation.rotation_matrix\n\n def translate(self, x: np.ndarray) -> None:\n \"\"\"Applies a translation.\n\n Args:\n x: Translation in x, y, z direction.\n \"\"\"\n self.center += x\n\n def rotate(self, quaternion: Quaternion) -> None:\n \"\"\"Rotates box.\n\n Args:\n quaternion: Rotation to apply.\n \"\"\"\n self.center = np.dot(quaternion.rotation_matrix, self.center)\n self.orientation = quaternion * self.orientation\n\n def corners(self, lwh_factor: float = 1.0) -> np.ndarray:\n \"\"\"Returns the bounding box corners.\n\n Args:\n lwh_factor: Multiply l, w, h by a factor to scale the box.\n\n Returns:\n First four corners are the ones facing forward. The last four are the\n ones facing backwards.\n \"\"\"\n l, w, h = self.lwh * lwh_factor\n\n # 3D bounding box corners. (Convention: x points right, y to the front, z up.)\n y_corners = l / 2 * np.array([1, 1, 1, 1, -1, -1, -1, -1])\n x_corners = w / 2 * np.array([-1, 1, 1, -1, -1, 1, 1, -1])\n z_corners = h / 2 * np.array([1, 1, -1, -1, 1, 1, -1, -1])\n corners = np.vstack((x_corners, y_corners, z_corners))\n\n # Rotate\n corners = np.dot(self.orientation.rotation_matrix, corners)\n\n # Translate\n x, y, z = self.center\n corners[0, :] = corners[0, :] + x\n corners[1, :] = corners[1, :] + y\n corners[2, :] = corners[2, :] + z\n\n return corners\n\n def bottom_corners(self) -> np.ndarray:\n \"\"\"Returns the four bottom corners.\n\n Returns:\n Bottom corners. First two face forward, last two face backwards.\n \"\"\"\n return self.corners()[:, [2, 3, 7, 6]]\n\n def render(\n self,\n axis: Axes,\n view: np.ndarray = EYE3,\n normalize: bool = False,\n colors: Tuple[Any, Any, Any] = (\"b\", \"r\", \"k\"),\n linewidth: float = 2,\n ) -> None:\n \"\"\"Renders the box in the provided Matplotlib axis.\n\n Args:\n axis: Axis onto which the box should be drawn.\n view: Define a projection in needed (e.g. for drawing projection in an image).\n normalize: Whether to normalize the remaining coordinate.\n colors: Valid Matplotlib colors (<str> or normalized RGB tuple) for front,\n back and sides.\n linewidth: Width in pixel of the box sides.\n \"\"\"\n corners = view_points(self.corners(), view, normalize=normalize)[:2, :]\n\n def draw_rect(selected_corners, color):\n prev = selected_corners[-1]\n for corner in selected_corners:\n axis.plot(\n [prev[0], corner[0]],\n [prev[1], corner[1]],\n color=color,\n linewidth=linewidth,\n )\n prev = corner\n\n # Draw the sides\n for i in range(4):\n axis.plot(\n [corners.T[i][0], corners.T[i + 4][0]],\n [corners.T[i][1], corners.T[i + 4][1]],\n color=colors[2],\n linewidth=linewidth,\n )\n\n # Draw front (first 4 corners) and rear (last 4 corners) rectangles(3d)/lines(2d)\n draw_rect(corners.T[:4], colors[0])\n draw_rect(corners.T[4:], colors[1])\n\n # Draw line indicating the front\n center_bottom_forward = np.mean(corners.T[2:4], axis=0)\n center_bottom = np.mean(corners.T[[2, 3, 7, 6]], axis=0)\n axis.plot(\n [center_bottom[0], center_bottom_forward[0]],\n [center_bottom[1], center_bottom_forward[1]],\n color=colors[0],\n linewidth=linewidth,\n )\n\n def render_eq(\n self,\n axis: Axes,\n img_size: Tuple[int, int],\n colors: Tuple[Any, Any, Any] = (\"b\", \"r\", \"k\"),\n linewidth: float = 2,\n num_samples: int = 20,\n ) -> None:\n \"\"\"Renders the box in the provided Matplotlib axis, using an equirectangular\n projection.\n\n Args:\n axis: Axis onto which the box should be drawn.\n img_size: Image size as (width, height).\n colors: Valid Matplotlib colors (<str> or normalized RGB tuple) for front,\n back and sides.\n linewidth: Width in pixel of the box sides.\n num_samples: Number of points to sample on each edge of the bounding box\n \"\"\"\n t = np.linspace(0, 1, num_samples).reshape(1, -1)\n corners = self.corners()\n\n def draw_line(p1, p2, color):\n line = p1.reshape(3, -1) + t * (p2 - p1).reshape(3, -1)\n line = view_points_eq(line, img_size[0], img_size[1])\n\n for line in split_poly_eq(line, img_size[0]):\n axis.plot(line[0, :], line[1, :], color=color, linewidth=linewidth)\n\n # Draw the sides\n for i in range(4):\n draw_line(corners[:, i], corners[:, i + 4], colors[2])\n\n # Draw front and back\n for i in range(4):\n draw_line(corners[:, i % 4], corners[:, (i + 1) % 4], colors[0])\n for i in range(4):\n draw_line(corners[:, i % 4 + 4], corners[:, (i + 1) % 4 + 4], colors[1])\n\n def copy(self) -> \"Box\":\n \"\"\"Create a copy of self.\n\n Returns:\n A copy.\n \"\"\"\n return copy.deepcopy(self)\n\n\nclass Box2d:\n \"\"\"Simple data class representing a 2d box\n\n This representa an axis-aligned box on an equirectangular image, meaning that\n the same region on a side-view perspective image will be a deformed rectangle.\n\n Args:\n coords: Bounding box coordinates [left, top, right, bottom]. Note that for\n boxes that \"wrap around\" the equirectangular image, left > right, while\n for all others left < right.\n name: Box name, optional. Can be used e.g. for denote category name.\n token: Unique string identifier from DB.\n \"\"\"\n\n def __init__(\n self,\n coords: List[float],\n name: str = None,\n token: str = None,\n ):\n assert not np.any(np.isnan(coords))\n assert len(coords) == 4\n\n self.coords = coords\n self.name = name\n self.token = token\n\n def __eq__(self, other) -> bool:\n return np.allclose(self.coords, other.coords)\n\n def __repr__(self) -> str:\n return \"[{:.2f}, {:.2f}, {:.2f}, {:.2f}], name={}, token={}\".format(\n self.coords[0],\n self.coords[1],\n self.coords[2],\n self.coords[3],\n self.name,\n self.token,\n )\n\n def render(\n self, axis: Axes, width: int, color: Any = \"r\", linewidth: int = 2\n ) -> None:\n \"\"\"Renders the box in the provided Matplotlib axis.\n\n Args:\n axis: Axis onto which the box should be drawn.\n width: Width of the equirectangular image.\n colors: Valid Matplotlib color.\n linewidth: Width in pixel of the box sides.\n \"\"\"\n if self.coords[0] < self.coords[2]:\n segments = [\n ([self.coords[0], self.coords[2]], [self.coords[1], self.coords[1]]),\n ([self.coords[2], self.coords[2]], [self.coords[1], self.coords[3]]),\n ([self.coords[2], self.coords[0]], [self.coords[3], self.coords[3]]),\n ([self.coords[0], self.coords[0]], [self.coords[3], self.coords[1]]),\n ]\n else:\n segments = [\n ([self.coords[0], width], [self.coords[1], self.coords[1]]),\n ([0, self.coords[2]], [self.coords[1], self.coords[1]]),\n ([self.coords[2], self.coords[2]], [self.coords[1], self.coords[3]]),\n ([self.coords[2], 0], [self.coords[3], self.coords[3]]),\n ([width, self.coords[0]], [self.coords[3], self.coords[3]]),\n ([self.coords[0], self.coords[0]], [self.coords[3], self.coords[1]]),\n ]\n\n for x, y in segments:\n axis.plot(x, y, color=color, linewidth=linewidth)\n\n\nclass EquiBox2d:\n \"\"\"2D bounding box on an equirectangular image reprojected to a perspective image\n\n Args:\n points: 2D points defining the box contour.\n name: Box name, optional. Can be used e.g. for denote category name.\n token: Unique string identifier from DB.\n \"\"\"\n\n def __init__(\n self,\n points: np.ndarray,\n name: Optional[str] = None,\n token: Optional[str] = None,\n ):\n assert points.shape[0] == 2\n\n self.points = points\n self.name = name\n self.token = token\n\n def render(self, axis: Axes, color: Any = \"r\", linewidth: int = 2) -> None:\n \"\"\"Renders the box in the provided Matplotlib axis.\n\n Args:\n axis: Axis onto which the box should be drawn.\n width: Width of the equirectangular image.\n colors: Valid Matplotlib color.\n linewidth: Width in pixel of the box sides.\n \"\"\"\n axis.plot(self.points[0], self.points[1], color=color, linewidth=linewidth)\n\n @classmethod\n def from_box_2d(\n cls,\n box: Box2d,\n q_eq: Quaternion,\n q_pr: Quaternion,\n intrinsic: np.ndarray,\n size_eq: Tuple[int, int],\n size_pr: Tuple[int, int],\n num_samples: int = 20,\n ) -> Optional[\"EquiBox2d\"]:\n \"\"\"Project an equirectangular bounding box to a projective image\n\n This assumes that both cameras have the same center, being related by a\n simple rotation transformation. The output box is constructed by projecting\n a fixed number of points on each edge of the input box from the\n equirectangular to the perspective image, forming a discrete approximation\n of its curved shape.\n\n Args:\n box: A bounding box defined in the equirectangular image.\n q_eq: The rotation of the equirectangular image with respect to an\n external frame of reference (e.g the ego vehicle).\n q_pr: The rotation of the projective image with respect to the same\n external frame of referene.\n intrinsic: Intrinsic matrix of the projective image.\n size_eq: The size of the equirectangular image, as width, height\n size_pr: The size of the projective image, as width, height\n num_samples: Number of points to sample on each edge of the bounding box\n\n Returns:\n The projected box, or None if the given box falls completely outside\n of the projective image.\n \"\"\"\n\n def get_points(x0, y0, x1, y1):\n side1 = np.stack(\n [\n np.linspace(x0, x1, num_samples, dtype=np.float32),\n np.full((num_samples,), y0, dtype=np.float32),\n ],\n axis=0,\n )\n side2 = np.stack(\n [\n np.full((num_samples,), x1, dtype=np.float32),\n np.linspace(y0, y1, num_samples, dtype=np.float32),\n ],\n axis=0,\n )\n side3 = np.stack(\n [\n np.linspace(x1, x0, num_samples, dtype=np.float32),\n np.full((num_samples,), y1, dtype=np.float32),\n ],\n axis=0,\n )\n side4 = np.stack(\n [\n np.full((num_samples,), x0, dtype=np.float32),\n np.linspace(y1, y0, num_samples, dtype=np.float32),\n ],\n axis=0,\n )\n return np.concatenate([side1, side2, side3, side4], axis=1)\n\n def project(points):\n # From pixel coordinates to angles\n u = (points[0] / size_eq[0] - 0.5) * 2 * np.pi\n v = (points[1] / size_eq[1] - 0.5) * np.pi\n\n # From angles to 3D coordinates on the sphere\n p_eq = np.stack(\n [np.cos(v) * np.sin(u), np.sin(v), np.cos(v) * np.cos(u)], axis=0\n )\n\n # Rotate to target camera\n p_pr = np.dot(q_pr.rotation_matrix.T, np.dot(q_eq.rotation_matrix, p_eq))\n\n # Filter out points that end up behind the camera\n p_pr = p_pr[:, p_pr[2] > 0]\n\n # Apply camera intrinsics and project\n p_pr = np.dot(intrinsic, p_pr)\n p_pr = np.stack([p_pr[0] / p_pr[2], p_pr[1] / p_pr[2]], axis=0)\n\n return p_pr\n\n # Extract and normalize the input box coordinates\n x0, y0, x1, y1 = box.coords\n if x0 > x1:\n x1 += size_eq[0]\n\n # \"draw\" the box with num_samples on each side\n points = get_points(x0, y0, x1, y1)\n\n # Project to the perspective image\n points = project(points)\n\n # Filter out points that end up outside of the image\n valid = points[0] >= 0\n valid = np.logical_and(valid, points[1] >= 0)\n valid = np.logical_and(valid, points[0] < size_pr[0])\n valid = np.logical_and(valid, points[1] < size_pr[1])\n\n if not valid.any():\n return None\n\n # points = points[:, valid]\n\n return cls(points, name=box.name, token=box.token)\n" ]
[ [ "numpy.dot", "numpy.load", "numpy.mean", "numpy.cos", "numpy.concatenate", "numpy.full", "numpy.sin", "numpy.logical_and", "numpy.eye", "numpy.vstack", "numpy.array", "numpy.zeros", "numpy.allclose", "numpy.stack", "numpy.hstack", "numpy.isnan", "numpy.ones", "numpy.abs", "numpy.linspace" ] ]
yangyangkiki/pytorch-lightning-bolts
[ "01f1a936815262ec810551c56f5ac87198be7c3f" ]
[ "pl_bolts/models/rl/noisy_dqn_model.py" ]
[ "\"\"\"\nNoisy DQN\n\"\"\"\nimport argparse\nfrom typing import Tuple\n\nimport numpy as np\nimport pytorch_lightning as pl\nimport torch\n\nfrom pl_bolts.datamodules.experience_source import Experience\nfrom pl_bolts.models.rl.common.networks import NoisyCNN\nfrom pl_bolts.models.rl.dqn_model import DQN\n\n\nclass NoisyDQN(DQN):\n \"\"\"\n PyTorch Lightning implementation of `Noisy DQN <https://arxiv.org/abs/1706.10295>`_\n\n Paper authors: Meire Fortunato, Mohammad Gheshlaghi Azar, Bilal Piot, Jacob Menick, Ian Osband, Alex Graves,\n Vlad Mnih, Remi Munos, Demis Hassabis, Olivier Pietquin, Charles Blundell, Shane Legg\n\n Model implemented by:\n\n - `Donal Byrne <https://github.com/djbyrne>`\n\n Example:\n >>> from pl_bolts.models.rl.noisy_dqn_model import NoisyDQN\n ...\n >>> model = NoisyDQN(\"PongNoFrameskip-v4\")\n\n Train::\n\n trainer = Trainer()\n trainer.fit(model)\n\n .. note:: Currently only supports CPU and single GPU training with `distributed_backend=dp`\n\n \"\"\"\n\n def build_networks(self) -> None:\n \"\"\"Initializes the Noisy DQN train and target networks\"\"\"\n self.net = NoisyCNN(self.obs_shape, self.n_actions)\n self.target_net = NoisyCNN(self.obs_shape, self.n_actions)\n\n def on_train_start(self) -> None:\n \"\"\"Set the agents epsilon to 0 as the exploration comes from the network\"\"\"\n self.agent.epsilon = 0.0\n\n def train_batch(self, ) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor, torch.Tensor, torch.Tensor]:\n \"\"\"\n Contains the logic for generating a new batch of data to be passed to the DataLoader.\n This is the same function as the standard DQN except that we dont update epsilon as it is always 0. The\n exploration comes from the noisy network.\n\n Returns:\n yields a Experience tuple containing the state, action, reward, done and next_state.\n \"\"\"\n episode_reward = 0\n episode_steps = 0\n\n while True:\n self.total_steps += 1\n action = self.agent(self.state, self.device)\n\n next_state, r, is_done, _ = self.env.step(action[0])\n\n episode_reward += r\n episode_steps += 1\n\n exp = Experience(state=self.state, action=action[0], reward=r, done=is_done, new_state=next_state)\n\n self.buffer.append(exp)\n self.state = next_state\n\n if is_done:\n self.done_episodes += 1\n self.total_rewards.append(episode_reward)\n self.total_episode_steps.append(episode_steps)\n self.avg_rewards = float(np.mean(self.total_rewards[-self.avg_reward_len:]))\n self.state = self.env.reset()\n episode_steps = 0\n episode_reward = 0\n\n states, actions, rewards, dones, new_states = self.buffer.sample(self.batch_size)\n\n for idx, _ in enumerate(dones):\n yield states[idx], actions[idx], rewards[idx], dones[idx], new_states[idx]\n\n # Simulates epochs\n if self.total_steps % self.batches_per_epoch == 0:\n break\n\n\ndef cli_main():\n parser = argparse.ArgumentParser(add_help=False)\n\n # trainer args\n parser = pl.Trainer.add_argparse_args(parser)\n\n # model args\n parser = NoisyDQN.add_model_specific_args(parser)\n args = parser.parse_args()\n\n model = NoisyDQN(**args.__dict__)\n\n trainer = pl.Trainer.from_argparse_args(args)\n trainer.fit(model)\n\n\nif __name__ == '__main__':\n cli_main()\n" ]
[ [ "numpy.mean" ] ]
serycjon/synth-rot
[ "1da61126de171c53d833b55ddbb70a2d7692e88f" ]
[ "synth_rot/generator.py" ]
[ "#!/usr/bin/python\n''' based on tfrecords-guide at http://warmspringwinds.github.io/tensorflow/tf-slim/2016/12/21/tfrecords-guide/ '''\nfrom __future__ import print_function\n\nimport argparse\nimport os\nimport sys\nimport random\n\nimport numpy as np\nimport cv2\nimport tensorflow as tf\n\nfrom . import rotator\n\ndef _bytes_feature(value):\n return tf.train.Feature(bytes_list=tf.train.BytesList(value=[value]))\n\ndef _float_feature(value):\n return tf.train.Feature(float_list=tf.train.FloatList(value=[value]))\n\ndef _int64_feature(value):\n return tf.train.Feature(int64_list=tf.train.Int64List(value=[value]))\n\ndef no_alpha(img):\n return img[:, :, :3]\n\ndef bgr2rgb(img):\n return img[..., [2, 1, 0]]\n\ndef to_rgb(bgra, default=0):\n rgb = bgra[:, :, [2, 1, 0]]\n rgb[bgra[:, :, 3] < 127] = default\n return rgb\n\ndef get_valid_images(path):\n ''' get all png images with transparency from path '''\n print('Loading images from {}'.format(path))\n images = []\n files = next(os.walk(path))[2]\n for file in files:\n if os.path.splitext(file)[1] == '.png':\n img = cv2.imread(os.path.join(path, file), cv2.IMREAD_UNCHANGED) # read RGBA\n if img.shape[2] == 4:\n images.append(img)\n else:\n print('Not using image without alpha: {}'.format(file))\n return images\n\ndef dropout(img):\n ''' insert random circular hole '''\n h, w = img.shape[:2]\n center_x = int(np.random.uniform(0, w))\n center_y = int(np.random.uniform(0, h))\n radius = int(np.random.uniform(h/10, h/3))\n\n alpha = img[..., 3].copy()\n cv2.circle(alpha, (center_x, center_y), radius, color=0, thickness=-1)\n img[..., 3] = alpha\n return img\n\ndef generate_example(img, sz=np.array([224, 224]),\n angle_margin=5,\n rotate_base=True, margin=0, center=False,\n dropout_chance=1):\n if rotate_base:\n base_in_angle = np.random.rand() * 360\n else:\n base_in_angle = 0\n base = rotator.rotate(img, 0, angle_in=base_in_angle, angle_post=0, fit_in=True)\n base_fitted = rotator.fit_in_size(base, sz, random_pad=False,\n margin=margin, center=center)\n base_raw = to_rgb(base_fitted).tostring()\n\n out_angle = np.random.rand() * (90 - angle_margin)\n in_angle = np.random.rand() * 360\n post_angle = np.random.rand() * 360\n\n rot = rotator.rotate(img,\n angle=out_angle, angle_in=in_angle, angle_post=post_angle,\n fit_in=True)\n rot_fitted = rotator.fit_in_size(rot, sz, random_pad=False,\n margin=margin, center=center)\n\n dropout_rand = np.random.rand()\n if dropout_rand < dropout_chance:\n rot_fitted = dropout(rot_fitted)\n rot_raw = to_rgb(rot_fitted).tostring()\n\n example = tf.train.Example(features=tf.train.Features(feature={\n 'height': _int64_feature(sz[0]),\n 'width': _int64_feature(sz[1]),\n 'base_raw': _bytes_feature(base_raw),\n 'rot_raw': _bytes_feature(rot_raw),\n 'rot_angle': _float_feature(out_angle)}))\n\n return example\n\ndef generate(images, output, N, max_entries=None,\n rotate_base=True, compress=False, margin=0, center=False,\n dropout_chance=1):\n if compress:\n options = tf.python_io.TFRecordOptions(\n compression_type=tf.python_io.TFRecordCompressionType.ZLIB)\n else:\n options = None\n\n writer = tf.python_io.TFRecordWriter(output, options=options)\n for i in range(N):\n if (i > 0) and (max_entries is not None) and (i%max_entries == 0):\n writer.close()\n shard = i/max_entries\n writer = tf.python_io.TFRecordWriter('{}-{}'.format(output, shard), options=options)\n print('generating {}/{}'.format(i+1, N))\n img = random.choice(images)\n example = generate_example(img, rotate_base=rotate_base,\n margin=margin, center=center,\n dropout_chance=dropout_chance)\n writer.write(example.SerializeToString())\n writer.close()\n\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser()\n parser.add_argument('output', help='output name (without .tfrecords)')\n parser.add_argument('--image', help='select one image in images/')\n parser.add_argument('--val', help='use validation image set', action='store_true')\n parser.add_argument('-N', help='number of generated examples', required=True, type=int)\n parser.add_argument('--max', help='max entries per tfrecords file', type=int)\n parser.add_argument('--compress', help='compress the outputs', action='store_true')\n parser.add_argument('--dropout', help='randomly drop a circular hole', action='store_true')\n args = vars(parser.parse_args())\n\n if args['val']:\n base_img_dir = 'val_images'\n else:\n base_img_dir = 'images'\n if args['image'] is not None:\n img_name = args['image']\n images = [cv2.imread(os.path.join(base_img_dir, img_name), cv2.IMREAD_UNCHANGED)]\n else:\n images = get_valid_images(base_img_dir)\n\n if args['dropout']:\n dropout_chance = 1\n else:\n dropout_chance = 0\n\n N = args['N']\n tfrecords_path = '{}.tfrecords'.format(args['output'])\n generate(images, tfrecords_path, N, args['max'], compress=args['compress'],\n margin=20, center=True, dropout_chance=dropout_chance)\n" ]
[ [ "tensorflow.python_io.TFRecordOptions", "numpy.array", "tensorflow.train.BytesList", "numpy.random.rand", "tensorflow.train.FloatList", "tensorflow.train.Int64List", "tensorflow.python_io.TFRecordWriter", "numpy.random.uniform" ] ]
danmalowany-allegro/trains
[ "10f11ba0383bc0d54c54a4f079f2385403bd8ad6" ]
[ "examples/frameworks/pytorch/pytorch_distributed_example.py" ]
[ "# TRAINS - example of TRAINS torch distributed support\n# notice all nodes will be reporting to the master Task (experiment)\n\nimport os\nimport subprocess\nimport sys\nfrom argparse import ArgumentParser\nfrom math import ceil\nfrom random import Random\n\nimport torch as th\nimport torch.nn as nn\nimport torch.distributed as dist\nimport torch.nn.functional as F\nfrom torch import optim\nfrom torchvision import datasets, transforms\n\nfrom trains import Task\n\n\nlocal_dataset_path = './MNIST_data'\n\n\nclass Net(nn.Module):\n def __init__(self):\n super(Net, self).__init__()\n self.conv1 = nn.Conv2d(1, 32, 3, 1)\n self.conv2 = nn.Conv2d(32, 64, 3, 1)\n self.dropout1 = nn.Dropout2d(0.25)\n self.dropout2 = nn.Dropout2d(0.5)\n self.fc1 = nn.Linear(9216, 128)\n self.fc2 = nn.Linear(128, 10)\n\n def forward(self, x):\n x = self.conv1(x)\n x = F.relu(x)\n x = self.conv2(x)\n x = F.max_pool2d(x, 2)\n x = self.dropout1(x)\n x = th.flatten(x, 1)\n x = self.fc1(x)\n x = F.relu(x)\n x = self.dropout2(x)\n x = self.fc2(x)\n output = F.log_softmax(x, dim=1)\n return output\n\n\nclass Partition(object):\n \"\"\" Dataset partitioning helper \"\"\"\n def __init__(self, data, index):\n self.data = data\n self.index = index\n\n def __len__(self):\n return len(self.index)\n\n def __getitem__(self, index):\n data_idx = self.index[index]\n return self.data[data_idx]\n\n\nclass DataPartitioner(object):\n def __init__(self, data, sizes=(0.7, 0.2, 0.1), seed=1234):\n self.data = data\n self.partitions = []\n rng = Random()\n rng.seed(seed)\n data_len = len(data)\n indexes = [x for x in range(0, data_len)]\n rng.shuffle(indexes)\n\n for frac in sizes:\n part_len = int(frac * data_len)\n self.partitions.append(indexes[0:part_len])\n indexes = indexes[part_len:]\n\n def use(self, partition):\n return Partition(self.data, self.partitions[partition])\n\n\ndef partition_dataset(num_workers=4):\n \"\"\" Partitioning MNIST \"\"\"\n dataset = datasets.MNIST(root=local_dataset_path, train=True, download=True,\n transform=transforms.Compose([\n transforms.ToTensor(),\n transforms.Normalize((0.1307,), (0.3081,))\n ]))\n size = dist.get_world_size()\n bsz = int(128 / float(size))\n partition_sizes = [1.0 / size for _ in range(size)]\n partition = DataPartitioner(dataset, partition_sizes)\n partition = partition.use(dist.get_rank())\n train_set = th.utils.data.DataLoader(\n partition, num_workers=num_workers, batch_size=bsz, shuffle=True)\n return train_set, bsz\n\n\ndef run(num_workers):\n \"\"\" Distributed Synchronous SGD Example \"\"\"\n th.manual_seed(1234)\n train_set, bsz = partition_dataset(num_workers)\n model = Net()\n optimizer = optim.SGD(model.parameters(), lr=0.01, momentum=0.5)\n\n num_batches = ceil(len(train_set.dataset) / float(bsz))\n\n from random import randint\n param = {'worker_{}_stuff'.format(dist.get_rank()): 'some stuff ' + str(randint(0, 100))}\n Task.current_task().connect(param)\n Task.current_task().upload_artifact(\n 'temp {:02d}'.format(dist.get_rank()), artifact_object={'worker_rank': dist.get_rank()})\n\n for epoch in range(2):\n epoch_loss = 0.0\n for i, (data, target) in enumerate(train_set):\n optimizer.zero_grad()\n output = model(data)\n loss = F.nll_loss(output, target)\n epoch_loss += loss.item()\n loss.backward()\n average_gradients(model)\n optimizer.step()\n if i % 10 == 0:\n print('{}] Train Epoch {} - {} \\tLoss {:.6f}'.format(dist.get_rank(), epoch, i, loss))\n Task.current_task().get_logger().report_scalar(\n 'loss', 'worker {:02d}'.format(dist.get_rank()), value=loss.item(), iteration=i)\n if i > 100:\n break\n print('Rank ', dist.get_rank(), ', epoch ',\n epoch, ': ', epoch_loss / num_batches)\n\n\ndef average_gradients(model):\n \"\"\" Gradient averaging. \"\"\"\n size = float(dist.get_world_size())\n for param in model.parameters():\n dist.all_reduce(param.grad.data, op=dist.reduce_op.SUM)\n param.grad.data /= size\n\n\nif __name__ == \"__main__\":\n parser = ArgumentParser()\n parser.add_argument('--nodes', help='number of nodes', type=int, default=10)\n parser.add_argument('--workers_in_node', help='number of workers per node', type=int, default=3)\n # this argument we will not be logging, see below Task.init\n parser.add_argument('--rank', help='current rank', type=int)\n\n args = parser.parse_args()\n\n # We have to initialize the task in the master process,\n # it will make sure that any sub-process calling Task.init will get the master task object\n # notice that we exclude the `rank` argument, so we can launch multiple sub-processes with trains-agent\n # otherwise, the `rank` will always be set to the original value.\n task = Task.init(\"examples\", \"test torch distributed\", auto_connect_arg_parser={'rank': False})\n\n if os.environ.get('MASTER_ADDR'):\n dist.init_process_group(backend='gloo', rank=args.rank, world_size=args.nodes)\n run(args.workers_in_node)\n else:\n # first let's download the dataset, if we have multiple machines,\n # they will take care of it when they get there\n datasets.MNIST(root=local_dataset_path, train=True, download=True)\n\n os.environ['MASTER_ADDR'] = '127.0.0.1'\n os.environ['MASTER_PORT'] = '29500'\n\n print(os.getpid(), 'ARGS:', args)\n processes = []\n for rank in range(args.nodes):\n cmd = [sys.executable, sys.argv[0],\n '--nodes', str(args.nodes),\n '--workers_in_node', str(args.workers_in_node),\n '--rank', str(rank)]\n print(cmd)\n p = subprocess.Popen(cmd, cwd=os.getcwd(), pass_fds=[], close_fds=True)\n processes.append(p)\n\n for p in processes:\n p.wait()\n" ]
[ [ "torch.nn.Linear", "torch.distributed.get_world_size", "torch.nn.functional.nll_loss", "torch.flatten", "torch.distributed.init_process_group", "torch.nn.functional.log_softmax", "torch.manual_seed", "torch.nn.Conv2d", "torch.utils.data.DataLoader", "torch.distributed.all_reduce", "torch.nn.functional.relu", "torch.distributed.get_rank", "torch.nn.functional.max_pool2d", "torch.nn.Dropout2d" ] ]
jochenater/catboost
[ "de2786fbc633b0d6ea6a23b3862496c6151b95c2" ]
[ "contrib/python/numpy/py3/numpy/core/tests/test_print.py" ]
[ "import sys\n\nimport pytest\n\nimport numpy as np\nfrom numpy.testing import assert_, assert_equal\nfrom __tests__.core.tests._locales import CommaDecimalPointLocale\n\n\nfrom io import StringIO\n\n_REF = {np.inf: 'inf', -np.inf: '-inf', np.nan: 'nan'}\n\n\[email protected]('tp', [np.float32, np.double, np.longdouble])\ndef test_float_types(tp):\n \"\"\" Check formatting.\n\n This is only for the str function, and only for simple types.\n The precision of np.float32 and np.longdouble aren't the same as the\n python float precision.\n\n \"\"\"\n for x in [0, 1, -1, 1e20]:\n assert_equal(str(tp(x)), str(float(x)),\n err_msg='Failed str formatting for type %s' % tp)\n\n if tp(1e16).itemsize > 4:\n assert_equal(str(tp(1e16)), str(float('1e16')),\n err_msg='Failed str formatting for type %s' % tp)\n else:\n ref = '1e+16'\n assert_equal(str(tp(1e16)), ref,\n err_msg='Failed str formatting for type %s' % tp)\n\n\[email protected]('tp', [np.float32, np.double, np.longdouble])\ndef test_nan_inf_float(tp):\n \"\"\" Check formatting of nan & inf.\n\n This is only for the str function, and only for simple types.\n The precision of np.float32 and np.longdouble aren't the same as the\n python float precision.\n\n \"\"\"\n for x in [np.inf, -np.inf, np.nan]:\n assert_equal(str(tp(x)), _REF[x],\n err_msg='Failed str formatting for type %s' % tp)\n\n\[email protected]('tp', [np.complex64, np.cdouble, np.clongdouble])\ndef test_complex_types(tp):\n \"\"\"Check formatting of complex types.\n\n This is only for the str function, and only for simple types.\n The precision of np.float32 and np.longdouble aren't the same as the\n python float precision.\n\n \"\"\"\n for x in [0, 1, -1, 1e20]:\n assert_equal(str(tp(x)), str(complex(x)),\n err_msg='Failed str formatting for type %s' % tp)\n assert_equal(str(tp(x*1j)), str(complex(x*1j)),\n err_msg='Failed str formatting for type %s' % tp)\n assert_equal(str(tp(x + x*1j)), str(complex(x + x*1j)),\n err_msg='Failed str formatting for type %s' % tp)\n\n if tp(1e16).itemsize > 8:\n assert_equal(str(tp(1e16)), str(complex(1e16)),\n err_msg='Failed str formatting for type %s' % tp)\n else:\n ref = '(1e+16+0j)'\n assert_equal(str(tp(1e16)), ref,\n err_msg='Failed str formatting for type %s' % tp)\n\n\[email protected]('dtype', [np.complex64, np.cdouble, np.clongdouble])\ndef test_complex_inf_nan(dtype):\n \"\"\"Check inf/nan formatting of complex types.\"\"\"\n TESTS = {\n complex(np.inf, 0): \"(inf+0j)\",\n complex(0, np.inf): \"infj\",\n complex(-np.inf, 0): \"(-inf+0j)\",\n complex(0, -np.inf): \"-infj\",\n complex(np.inf, 1): \"(inf+1j)\",\n complex(1, np.inf): \"(1+infj)\",\n complex(-np.inf, 1): \"(-inf+1j)\",\n complex(1, -np.inf): \"(1-infj)\",\n complex(np.nan, 0): \"(nan+0j)\",\n complex(0, np.nan): \"nanj\",\n complex(-np.nan, 0): \"(nan+0j)\",\n complex(0, -np.nan): \"nanj\",\n complex(np.nan, 1): \"(nan+1j)\",\n complex(1, np.nan): \"(1+nanj)\",\n complex(-np.nan, 1): \"(nan+1j)\",\n complex(1, -np.nan): \"(1+nanj)\",\n }\n for c, s in TESTS.items():\n assert_equal(str(dtype(c)), s)\n\n\n# print tests\ndef _test_redirected_print(x, tp, ref=None):\n file = StringIO()\n file_tp = StringIO()\n stdout = sys.stdout\n try:\n sys.stdout = file_tp\n print(tp(x))\n sys.stdout = file\n if ref:\n print(ref)\n else:\n print(x)\n finally:\n sys.stdout = stdout\n\n assert_equal(file.getvalue(), file_tp.getvalue(),\n err_msg='print failed for type%s' % tp)\n\n\[email protected]('tp', [np.float32, np.double, np.longdouble])\ndef test_float_type_print(tp):\n \"\"\"Check formatting when using print \"\"\"\n for x in [0, 1, -1, 1e20]:\n _test_redirected_print(float(x), tp)\n\n for x in [np.inf, -np.inf, np.nan]:\n _test_redirected_print(float(x), tp, _REF[x])\n\n if tp(1e16).itemsize > 4:\n _test_redirected_print(float(1e16), tp)\n else:\n ref = '1e+16'\n _test_redirected_print(float(1e16), tp, ref)\n\n\[email protected]('tp', [np.complex64, np.cdouble, np.clongdouble])\ndef test_complex_type_print(tp):\n \"\"\"Check formatting when using print \"\"\"\n # We do not create complex with inf/nan directly because the feature is\n # missing in python < 2.6\n for x in [0, 1, -1, 1e20]:\n _test_redirected_print(complex(x), tp)\n\n if tp(1e16).itemsize > 8:\n _test_redirected_print(complex(1e16), tp)\n else:\n ref = '(1e+16+0j)'\n _test_redirected_print(complex(1e16), tp, ref)\n\n _test_redirected_print(complex(np.inf, 1), tp, '(inf+1j)')\n _test_redirected_print(complex(-np.inf, 1), tp, '(-inf+1j)')\n _test_redirected_print(complex(-np.nan, 1), tp, '(nan+1j)')\n\n\ndef test_scalar_format():\n \"\"\"Test the str.format method with NumPy scalar types\"\"\"\n tests = [('{0}', True, np.bool_),\n ('{0}', False, np.bool_),\n ('{0:d}', 130, np.uint8),\n ('{0:d}', 50000, np.uint16),\n ('{0:d}', 3000000000, np.uint32),\n ('{0:d}', 15000000000000000000, np.uint64),\n ('{0:d}', -120, np.int8),\n ('{0:d}', -30000, np.int16),\n ('{0:d}', -2000000000, np.int32),\n ('{0:d}', -7000000000000000000, np.int64),\n ('{0:g}', 1.5, np.float16),\n ('{0:g}', 1.5, np.float32),\n ('{0:g}', 1.5, np.float64),\n ('{0:g}', 1.5, np.longdouble),\n ('{0:g}', 1.5+0.5j, np.complex64),\n ('{0:g}', 1.5+0.5j, np.complex128),\n ('{0:g}', 1.5+0.5j, np.clongdouble)]\n\n for (fmat, val, valtype) in tests:\n try:\n assert_equal(fmat.format(val), fmat.format(valtype(val)),\n \"failed with val %s, type %s\" % (val, valtype))\n except ValueError as e:\n assert_(False,\n \"format raised exception (fmt='%s', val=%s, type=%s, exc='%s')\" %\n (fmat, repr(val), repr(valtype), str(e)))\n\n\n#\n# Locale tests: scalar types formatting should be independent of the locale\n#\n\nclass TestCommaDecimalPointLocale(CommaDecimalPointLocale):\n\n def test_locale_single(self):\n assert_equal(str(np.float32(1.2)), str(float(1.2)))\n\n def test_locale_double(self):\n assert_equal(str(np.double(1.2)), str(float(1.2)))\n\n def test_locale_longdouble(self):\n assert_equal(str(np.longdouble('1.2')), str(float(1.2)))\n" ]
[ [ "numpy.float32", "numpy.longdouble", "numpy.double" ] ]
Tontolda/genui
[ "c5b7da7c5a99fc16d34878e2170145ac7c8e31c4" ]
[ "src/genui/generators/extensions/genuidrugex/genuimodels/builders.py" ]
[ "\"\"\"\nbuilders\n\nCreated by: Martin Sicho\nOn: 1/26/20, 6:27 PM\n\"\"\"\nfrom django.core.files.base import ContentFile\nfrom django.db import transaction\nfrom pandas import Series\nimport torch\n\nfrom drugex.api.corpus import Corpus, BasicCorpus\nfrom genui.utils import gpu\nfrom genui.generators.extensions.genuidrugex.genuimodels.corpus import CorpusFromDB\nfrom .monitors import DrugExNetMonitor, DrugExAgentMonitor\nfrom genui.models.genuimodels import bases\nfrom genui.models.models import ModelFile, Model\nfrom ..models import DrugExNet, DrugExAgent\nfrom ..torchutils import cleanup\n\nclass DrugExBuilderMixIn:\n\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n self.device = None\n self.allocateDevice()\n\n def __del__(self):\n if self.device:\n self.device = None\n cleanup()\n self.releaseDevice()\n\n def releaseDevice(self):\n if self.device and self.device != 'cpu':\n print(f'Releasing device: {self.device}')\n gpu.release(self.device)\n self.device = None\n else:\n self.device = None\n\n def allocateDevice(self):\n if not self.device:\n self.device = gpu.allocate() # TODO: wait for some time and try again if we get an allocation exception\n if not self.device:\n print('Failed to allocate GPU device. Using CPU...')\n self.device = 'cpu'\n torch.device(self.device)\n else:\n torch.device('cuda', int(self.device['index']))\n\nclass DrugExNetBuilder(bases.ProgressMixIn, DrugExBuilderMixIn, bases.ModelBuilder):\n\n def __init__(self, instance: DrugExNet, initial: DrugExNet =None, progress=None, onFit=None):\n super().__init__(instance, progress, onFit)\n self.corpus = instance.corpus\n self.initial = initial\n self.onFit = DrugExNetMonitor(self, onFit)\n\n if not self.corpus:\n self.progressStages.append(\"Creating Corpus\")\n\n @property\n def corePackage(self):\n from .. import genuimodels\n return genuimodels\n\n def createCorpus(self):\n if self.instance.molset:\n corpus = CorpusFromDB(self.instance.molset)\n corpus.updateData(update_voc=True)\n with transaction.atomic():\n if self.instance.corpus:\n self.instance.corpus = None\n corpus_file = ModelFile.create(\n self.instance,\n \"corpus.csv\",\n ContentFile('placeholder'),\n note=DrugExNet.CORPUS_FILE_NOTE\n )\n voc_file = ModelFile.create(\n self.instance,\n \"voc.txt\",\n ContentFile('placeholder'),\n note=DrugExNet.VOC_FILE_NOTE\n )\n corpus.saveVoc(voc_file.path)\n corpus.saveCorpus(corpus_file.path)\n self.corpus = self.instance.corpus\n elif self.instance.corpus:\n print(\"WARNING: No molset available to create corpus. Falling back to the original...\")\n self.corpus = self.instance.corpus\n else:\n Exception(\"Unable to create corpus. No molecule set is specified and no corpus found on model instance.\")\n\n def getY(self):\n return None\n\n def getX(self) -> Corpus:\n if not self.corpus:\n self.recordProgress()\n self.createCorpus()\n\n if self.initial:\n corpus_init = self.initial.corpus\n # voc_all = self.corpus.voc + corpus_init.voc\n # self.corpus.voc = voc_all\n # FIXME: add an error message if there are extra tokens in this vocabulary when compared to parent\n self.corpus.voc = corpus_init.voc\n self.corpus.saveVoc(self.instance.vocFile.path)\n\n return self.corpus\n\n def build(self) -> Model:\n if self.instance.molset and self.validation:\n return super().build()\n else:\n raise NotImplementedError(\"Building DrugEx network without molecule set and validation strategy is not allowed.\")\n\n def sample(self, n_samples):\n return self.model.sample(n_samples)\n\nclass DrugExAgentBuilder(bases.ProgressMixIn, DrugExBuilderMixIn, bases.ModelBuilder):\n\n def __init__(\n self,\n instance: DrugExAgent,\n progress=None,\n onFit=None\n ):\n super().__init__(instance, progress, onFit)\n self.onFit = DrugExAgentMonitor(self, self.onFit)\n self.exploitNet = self.instance.exploitationNet\n self.exploreNet = self.instance.explorationNet\n self.environ = self.instance.environment\n self.corpus = BasicCorpus(vocabulary=self.exploitNet.corpus.voc)\n\n @property\n def corePackage(self):\n from .. import genuimodels\n return genuimodels\n\n def getY(self) -> Series:\n pass\n\n def getX(self) -> Corpus:\n return self.corpus\n\n def sample(self, n_samples):\n return self.model.sample(n_samples)" ]
[ [ "torch.device" ] ]
toy270/Nara
[ "964ab5d7324b1d4d8670599402ea517af46e5637" ]
[ "narajangteo_crawling.py" ]
[ "\n# coding: utf-8\n\n# # <center> 나라장터 입찰공고 크롤링 with Python3</center>\n# \n# 나라장터에 올라오는 입찰공고를 모니터링하기 위해 개발된 간단한 프로그램으로, 검색어 리스트를 설정하면 그에 따라 최근 7일간 공고된 입찰공고 리스트를 가져와 엑셀파일로 정리해줍니다. 크롤링 프로그램이지만, BeautifulSoup을 사용하지 않습니다.\n\n# In[18]:\n\nimport pandas as pd\nimport numpy as np\nimport requests\nimport os\nimport datetime, time\nimport string\nfrom time import localtime, strftime\nfrom datetime import timedelta\nfrom tqdm import tqdm\nfrom xlsxwriter.utility import xl_col_to_name, xl_range\nfrom lxml import html\n\n\n# In[6]:\n\nclass KoreaPageScraper(object):\n def __init__(self):\n pass\n \n def request_url(self,cat):\n '''returns url for a category'''\n d = datetime.date.today()\n fromtd = d - timedelta(days=7)\n start_date = str(fromtd.strftime(\"%Y/%m/%d\"))\n end_date =str(d.strftime(\"%Y/%m/%d\"))\n fromBidDt = requests.utils.quote(start_date, safe='')\n toBidDt = requests.utils.quote(end_date, safe='')\n bidNm = requests.utils.quote(cat.encode('euc-kr'))\n url = \"http://www.g2b.go.kr:8101/ep/tbid/tbidList.do?taskClCds=&bidNm=\" + bidNm + \"&searchDtType=1&fromBidDt=\" + fromBidDt + \"&toBidDt=\" + toBidDt + \"&fromOpenBidDt=&toOpenBidDt=&radOrgan=1&instNm=&exceptEnd=Y&area=&regYn=Y&bidSearchType=1&searchType=1&recordCountPerPage=1000\"\n return url\n\n def scrape_cat(self,cat):\n '''searches for each category'''\n cat_url = self.request_url(cat)\n df = pd.read_html(cat_url)[0]\n df['search_term']=cat\n return df\n \n def get_bidurl(self,bidnum):\n '''gets the bid url based on the bid registration number \n (ones that do not have a proper bid registration number usually doesn't have a corresponding link and would ask the user to go to the organization website for more informatioin)'''\n num_split = str(bidnum).split(sep='-')\n bidno = num_split[0]\n if len(bidno) == 11:\n bidseq = num_split[-1]\n bidurl = \"http://www.g2b.go.kr:8081/ep/invitation/publish/bidInfoDtl.do?bidno=\"+bidno+\"&bidseq=\"+bidseq\n return bidurl\n else: \n return \"Check organization website (공고기관) for details\"\n bidseq = refnum_split[-1]\n bidurl = \"http://www.g2b.go.kr:8081/ep/invitation/publish/bidInfoDtl.do?bidno=\"+bidno+\"&bidseq=\"+bidseq\n return bidurl\n\n def scrape_categories(self, categories):\n '''scrapes each keyword and compiles it into a list. \n There is a 1 second delay between each search term to prevent getting blocked out of the site'''\n appended_df = []\n for category in tqdm(categories):\n one_df = self.scrape_cat(category)\n appended_df.append(one_df)\n time.sleep(1)\n appended_df = pd.concat(appended_df, axis = 0)\n urlist=[]\n for index,row in appended_df.iterrows():\n urlist.append(self.get_bidurl(row['공고번호-차수']))\n \n appended_df['url']=urlist\n return appended_df\n\n\n# In[7]:\n\n#function to read txt files and parse the list\ndef txt_reader(name):\n with open(name+\".txt\",'rb') as f:\n line = f.readline()\n return line.decode('utf-8').split('/')\n\n\n# In[8]:\n\n#load the categories with the txt_reader function\ncategory_list = txt_reader('category')\nprint(\"Getting the list of given keywords: \" +str(category_list).replace('[','').replace(']','').replace(\"'\",\"\"))\n\n#scrape with the \"KoreaPageScraper\" class\nmyscraper = KoreaPageScraper()\n\ndf = myscraper.scrape_categories(category_list)\n\n\n# In[42]:\n\nprint(str(len(df))+\" results have been found. \")\n\n\n# In[11]:\n\n#Load the excluding keywords\nwith open('exclude.txt','rb') as f:\n line = f.readline()\n contains_excluding = line.decode('utf-8').replace('/','|')\n\n\n# In[40]:\n\nprint(\"Excluding the list of given keywords: \"+str(txt_reader('exclude')).replace('[','').replace(']','').replace(\"'\",\"\"))\n\n\n# In[43]:\n\n#Deleting the excluding keywords and informing how many lines were deleted. \nog = len(df)\ndf = df[-df.공고명.str.contains(contains_excluding).fillna(True)]\nprint(\"Deleted \"+str(og-len(df))+\" entries with keywords to exclude. (Currently at \"+str(len(df))+\" entries)\")\n\n\n# In[53]:\n\ndef clean_up(df):\n #Delete duplicates (more than two keywords together)\n og2 = len(df)\n df = df[~df.duplicated(['공고명'])].copy()\n print(str(og2-len(df))+\" duplicates were found and deleted (Currently at \"+str(len(df))+\" entries)\")\n #Divide the register date and due date\n df['register_date'],df['duedate'] = df['입력일시(입찰마감일시)'].str.split('(', 1).str\n df['duedate']=df['duedate'].str.replace(')','').replace('-','')\n df = df.drop('입력일시(입찰마감일시)',axis=1)\n #Sort the values by duedate. To sort with a different value, change the following line's 'duedate' with the column name you desire to sort it by. \n column_sort = 'duedate'\n df = df.sort_values(by=column_sort,ascending=False)\n print(\"Values are sorted by the column '\"+column_sort+\"'. To change this, please talk to the tool owner. \")\n return df\n\n\n# In[45]:\n\ndef filter_prioritize(df,filter_list,column):\n new_df = df[df[column].isin(filter_list)].copy()\n new_df[str(column+\"_sorted\")] = pd.Categorical(new_df[column],categories=filter_list,ordered=True)\n new_df = new_df.sort_values(column+\"_sorted\")\n return new_df\n\n\n# In[54]:\n\n#Cleaning up the df to make more sense\nclean_df = clean_up(df)\n\n\n# In[55]:\n\n#Get the target organization list\norg_list = txt_reader('orgs')\nprint(\"Getting the entries from target organization list: \"+str(org_list).replace('[','').replace(']','').replace(\"'\",\"\"))\norg_df = filter_prioritize(clean_df,org_list,'공고기관')\n\n\n# In[56]:\n\nclass create_excel(object):\n def get_length(self,column):\n ##\n ##This line is the problem!!\n ##\n valueex = column[~column.isnull()].reset_index(drop=True)[0]\n if type(valueex) == str:\n if valueex.startswith('=HYPERLINK'):\n return len('Click link')\n else: \n len_list = list(column.dropna().apply(lambda x: len(str(x))))\n maxlen = max(len_list)\n medlen = np.median(len_list)\n meanlen = np.mean(len_list)\n diff = maxlen-medlen\n stdlen = np.std(len_list)\n #min(A,B+C*numchars)\n if maxlen < 10:\n return maxlen+5\n elif diff > 50:\n if medlen == 0:\n return min(55,meanlen+5)\n return medlen\n elif maxlen < 50:\n return meanlen+15\n else:\n return 50\n else:\n return 5\n\n def to_excel(self,df,name):\n #Next step, format the excel file\n print(\"saving the \"+name+\" list...\")\n docname = \"나라장터_입찰공고-\"+name+\"-\"+str(strftime(\"%y%m%d(%H%M%S)\", localtime()))+\".xlsx\"\n #make the destination directory, but guard against race condition\n if not os.path.exists(name):\n try:\n os.makedirs(name)\n except OSError as exc: \n print(exc)\n raise Exception('something failed')\n writer = pd.ExcelWriter(\"%s/%s\"%(name,docname), engine='xlsxwriter')\n df.to_excel(writer,index=False,sheet_name='Sheet1')\n workbook = writer.book\n worksheet = writer.sheets['Sheet1']\n tablerange = xl_range(0,0,len(df),len(df.columns)-1)\n headerrange = xl_range(0,0,0,len(df.columns)-1)\n contentrange = xl_range(1,0,len(df),len(df.columns)-1)\n\n #Formatting headers\n header_format = workbook.add_format({'bg_color':'black'})\n column_format = workbook.add_format({'bottom':True,'bg_color':'white'})\n link_format = workbook.add_format({'font_color':'#157993','underline':True})\n \n # Set the column width and format.\n columns = []\n widths = []\n for i in range(0,len(df.columns)):\n a = xl_col_to_name(i)+\":\"+xl_col_to_name(i)\n columns.append(a)\n widths.append(self.get_length(df[df.columns[i]])) \n \n for c,w in zip(columns,widths):\n worksheet.set_column(c, w)\n \n worksheet.conditional_format(contentrange,{'type':'no_errors',\n 'format':column_format})\n worksheet.conditional_format(headerrange,{'type':'no_errors',\n 'format':header_format})\n worksheet.conditional_format(tablerange,{'type':'text',\n 'criteria':'containing',\n 'value':'Click link',\n 'format':link_format})\n \n #Formatting for putting in the header titles\n table_headers = [{'header':c} for c in df.columns]\n #Create a table with the data\n worksheet.add_table(tablerange,{'columns' : table_headers}) \n \n writer.save()\n return\n\n\n# In[57]:\n\ngo_to_excel = create_excel()\n\n\n# In[58]:\n\ngo_to_excel.to_excel(clean_df,'full')\n\n\n# In[59]:\n\ngo_to_excel.to_excel(org_df,'orgs')\n\n\n# In[60]:\n\nprint ('All done! Please hit Enter to exit this command prompt. ')\ninput()\n\n\n# In[ ]:\n\n\n\n" ]
[ [ "numpy.median", "pandas.concat", "numpy.mean", "numpy.std", "pandas.Categorical", "pandas.ExcelWriter", "pandas.read_html" ] ]
santhoshkumarvs/spark
[ "074533334d01afdd7862a1ac6c5a7a672bcce3f8" ]
[ "python/pyspark/ml/tests/test_algorithms.py" ]
[ "#\n# Licensed to the Apache Software Foundation (ASF) under one or more\n# contributor license agreements. See the NOTICE file distributed with\n# this work for additional information regarding copyright ownership.\n# The ASF licenses this file to You under the Apache License, Version 2.0\n# (the \"License\"); you may not use this file except in compliance with\n# the License. You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\n\nfrom shutil import rmtree\nimport tempfile\nimport unittest\n\nimport numpy as np\n\nfrom pyspark.ml.classification import DecisionTreeClassifier, LogisticRegression, \\\n MultilayerPerceptronClassifier, OneVsRest\nfrom pyspark.ml.clustering import DistributedLDAModel, KMeans, LocalLDAModel, LDA, LDAModel\nfrom pyspark.ml.fpm import FPGrowth\nfrom pyspark.ml.linalg import Matrices, Vectors\nfrom pyspark.ml.recommendation import ALS\nfrom pyspark.ml.regression import GeneralizedLinearRegression, LinearRegression\nfrom pyspark.sql import Row\nfrom pyspark.testing.mlutils import SparkSessionTestCase\n\n\nclass LogisticRegressionTest(SparkSessionTestCase):\n\n def test_binomial_logistic_regression_with_bound(self):\n\n df = self.spark.createDataFrame(\n [(1.0, 1.0, Vectors.dense(0.0, 5.0)),\n (0.0, 2.0, Vectors.dense(1.0, 2.0)),\n (1.0, 3.0, Vectors.dense(2.0, 1.0)),\n (0.0, 4.0, Vectors.dense(3.0, 3.0)), ], [\"label\", \"weight\", \"features\"])\n\n lor = LogisticRegression(regParam=0.01, weightCol=\"weight\",\n lowerBoundsOnCoefficients=Matrices.dense(1, 2, [-1.0, -1.0]),\n upperBoundsOnIntercepts=Vectors.dense(0.0))\n model = lor.fit(df)\n self.assertTrue(\n np.allclose(model.coefficients.toArray(), [-0.2944, -0.0484], atol=1E-4))\n self.assertTrue(np.isclose(model.intercept, 0.0, atol=1E-4))\n\n def test_multinomial_logistic_regression_with_bound(self):\n\n data_path = \"data/mllib/sample_multiclass_classification_data.txt\"\n df = self.spark.read.format(\"libsvm\").load(data_path)\n\n lor = LogisticRegression(regParam=0.01,\n lowerBoundsOnCoefficients=Matrices.dense(3, 4, range(12)),\n upperBoundsOnIntercepts=Vectors.dense(0.0, 0.0, 0.0))\n model = lor.fit(df)\n expected = [[4.593, 4.5516, 9.0099, 12.2904],\n [1.0, 8.1093, 7.0, 10.0],\n [3.041, 5.0, 8.0, 11.0]]\n for i in range(0, len(expected)):\n self.assertTrue(\n np.allclose(model.coefficientMatrix.toArray()[i], expected[i], atol=1E-4))\n self.assertTrue(\n np.allclose(model.interceptVector.toArray(), [-0.9057, -1.1392, -0.0033], atol=1E-4))\n\n\nclass MultilayerPerceptronClassifierTest(SparkSessionTestCase):\n\n def test_raw_and_probability_prediction(self):\n\n data_path = \"data/mllib/sample_multiclass_classification_data.txt\"\n df = self.spark.read.format(\"libsvm\").load(data_path)\n\n mlp = MultilayerPerceptronClassifier(maxIter=100, layers=[4, 5, 4, 3],\n blockSize=128, seed=123)\n model = mlp.fit(df)\n test = self.sc.parallelize([Row(features=Vectors.dense(0.1, 0.1, 0.25, 0.25))]).toDF()\n result = model.transform(test).head()\n expected_prediction = 2.0\n expected_probability = [0.0, 0.0, 1.0]\n expected_rawPrediction = [-11.6081922998, -8.15827998691, 22.17757045]\n self.assertTrue(result.prediction, expected_prediction)\n self.assertTrue(np.allclose(result.probability, expected_probability, atol=1E-4))\n self.assertTrue(np.allclose(result.rawPrediction, expected_rawPrediction, atol=1E-4))\n\n\nclass OneVsRestTests(SparkSessionTestCase):\n\n def test_copy(self):\n df = self.spark.createDataFrame([(0.0, Vectors.dense(1.0, 0.8)),\n (1.0, Vectors.sparse(2, [], [])),\n (2.0, Vectors.dense(0.5, 0.5))],\n [\"label\", \"features\"])\n lr = LogisticRegression(maxIter=5, regParam=0.01)\n ovr = OneVsRest(classifier=lr)\n ovr1 = ovr.copy({lr.maxIter: 10})\n self.assertEqual(ovr.getClassifier().getMaxIter(), 5)\n self.assertEqual(ovr1.getClassifier().getMaxIter(), 10)\n model = ovr.fit(df)\n model1 = model.copy({model.predictionCol: \"indexed\"})\n self.assertEqual(model1.getPredictionCol(), \"indexed\")\n\n def test_output_columns(self):\n df = self.spark.createDataFrame([(0.0, Vectors.dense(1.0, 0.8)),\n (1.0, Vectors.sparse(2, [], [])),\n (2.0, Vectors.dense(0.5, 0.5))],\n [\"label\", \"features\"])\n lr = LogisticRegression(maxIter=5, regParam=0.01)\n ovr = OneVsRest(classifier=lr, parallelism=1)\n model = ovr.fit(df)\n output = model.transform(df)\n self.assertEqual(output.columns, [\"label\", \"features\", \"rawPrediction\", \"prediction\"])\n\n def test_parallelism_doesnt_change_output(self):\n df = self.spark.createDataFrame([(0.0, Vectors.dense(1.0, 0.8)),\n (1.0, Vectors.sparse(2, [], [])),\n (2.0, Vectors.dense(0.5, 0.5))],\n [\"label\", \"features\"])\n ovrPar1 = OneVsRest(classifier=LogisticRegression(maxIter=5, regParam=.01), parallelism=1)\n modelPar1 = ovrPar1.fit(df)\n ovrPar2 = OneVsRest(classifier=LogisticRegression(maxIter=5, regParam=.01), parallelism=2)\n modelPar2 = ovrPar2.fit(df)\n for i, model in enumerate(modelPar1.models):\n self.assertTrue(np.allclose(model.coefficients.toArray(),\n modelPar2.models[i].coefficients.toArray(), atol=1E-4))\n self.assertTrue(np.allclose(model.intercept, modelPar2.models[i].intercept, atol=1E-4))\n\n def test_support_for_weightCol(self):\n df = self.spark.createDataFrame([(0.0, Vectors.dense(1.0, 0.8), 1.0),\n (1.0, Vectors.sparse(2, [], []), 1.0),\n (2.0, Vectors.dense(0.5, 0.5), 1.0)],\n [\"label\", \"features\", \"weight\"])\n # classifier inherits hasWeightCol\n lr = LogisticRegression(maxIter=5, regParam=0.01)\n ovr = OneVsRest(classifier=lr, weightCol=\"weight\")\n self.assertIsNotNone(ovr.fit(df))\n # classifier doesn't inherit hasWeightCol\n dt = DecisionTreeClassifier()\n ovr2 = OneVsRest(classifier=dt, weightCol=\"weight\")\n self.assertIsNotNone(ovr2.fit(df))\n\n\nclass KMeansTests(SparkSessionTestCase):\n\n def test_kmeans_cosine_distance(self):\n data = [(Vectors.dense([1.0, 1.0]),), (Vectors.dense([10.0, 10.0]),),\n (Vectors.dense([1.0, 0.5]),), (Vectors.dense([10.0, 4.4]),),\n (Vectors.dense([-1.0, 1.0]),), (Vectors.dense([-100.0, 90.0]),)]\n df = self.spark.createDataFrame(data, [\"features\"])\n kmeans = KMeans(k=3, seed=1, distanceMeasure=\"cosine\")\n model = kmeans.fit(df)\n result = model.transform(df).collect()\n self.assertTrue(result[0].prediction == result[1].prediction)\n self.assertTrue(result[2].prediction == result[3].prediction)\n self.assertTrue(result[4].prediction == result[5].prediction)\n\n\nclass LDATest(SparkSessionTestCase):\n\n def _compare(self, m1, m2):\n \"\"\"\n Temp method for comparing instances.\n TODO: Replace with generic implementation once SPARK-14706 is merged.\n \"\"\"\n self.assertEqual(m1.uid, m2.uid)\n self.assertEqual(type(m1), type(m2))\n self.assertEqual(len(m1.params), len(m2.params))\n for p in m1.params:\n if m1.isDefined(p):\n self.assertEqual(m1.getOrDefault(p), m2.getOrDefault(p))\n self.assertEqual(p.parent, m2.getParam(p.name).parent)\n if isinstance(m1, LDAModel):\n self.assertEqual(m1.vocabSize(), m2.vocabSize())\n self.assertEqual(m1.topicsMatrix(), m2.topicsMatrix())\n\n def test_persistence(self):\n # Test save/load for LDA, LocalLDAModel, DistributedLDAModel.\n df = self.spark.createDataFrame([\n [1, Vectors.dense([0.0, 1.0])],\n [2, Vectors.sparse(2, {0: 1.0})],\n ], [\"id\", \"features\"])\n # Fit model\n lda = LDA(k=2, seed=1, optimizer=\"em\")\n distributedModel = lda.fit(df)\n self.assertTrue(distributedModel.isDistributed())\n localModel = distributedModel.toLocal()\n self.assertFalse(localModel.isDistributed())\n # Define paths\n path = tempfile.mkdtemp()\n lda_path = path + \"/lda\"\n dist_model_path = path + \"/distLDAModel\"\n local_model_path = path + \"/localLDAModel\"\n # Test LDA\n lda.save(lda_path)\n lda2 = LDA.load(lda_path)\n self._compare(lda, lda2)\n # Test DistributedLDAModel\n distributedModel.save(dist_model_path)\n distributedModel2 = DistributedLDAModel.load(dist_model_path)\n self._compare(distributedModel, distributedModel2)\n # Test LocalLDAModel\n localModel.save(local_model_path)\n localModel2 = LocalLDAModel.load(local_model_path)\n self._compare(localModel, localModel2)\n # Clean up\n try:\n rmtree(path)\n except OSError:\n pass\n\n\nclass FPGrowthTests(SparkSessionTestCase):\n def setUp(self):\n super(FPGrowthTests, self).setUp()\n self.data = self.spark.createDataFrame(\n [([1, 2], ), ([1, 2], ), ([1, 2, 3], ), ([1, 3], )],\n [\"items\"])\n\n def test_association_rules(self):\n fp = FPGrowth()\n fpm = fp.fit(self.data)\n\n expected_association_rules = self.spark.createDataFrame(\n [([3], [1], 1.0, 1.0), ([2], [1], 1.0, 1.0)],\n [\"antecedent\", \"consequent\", \"confidence\", \"lift\"]\n )\n actual_association_rules = fpm.associationRules\n\n self.assertEqual(actual_association_rules.subtract(expected_association_rules).count(), 0)\n self.assertEqual(expected_association_rules.subtract(actual_association_rules).count(), 0)\n\n def test_freq_itemsets(self):\n fp = FPGrowth()\n fpm = fp.fit(self.data)\n\n expected_freq_itemsets = self.spark.createDataFrame(\n [([1], 4), ([2], 3), ([2, 1], 3), ([3], 2), ([3, 1], 2)],\n [\"items\", \"freq\"]\n )\n actual_freq_itemsets = fpm.freqItemsets\n\n self.assertEqual(actual_freq_itemsets.subtract(expected_freq_itemsets).count(), 0)\n self.assertEqual(expected_freq_itemsets.subtract(actual_freq_itemsets).count(), 0)\n\n def tearDown(self):\n del self.data\n\n\nclass ALSTest(SparkSessionTestCase):\n\n def test_storage_levels(self):\n df = self.spark.createDataFrame(\n [(0, 0, 4.0), (0, 1, 2.0), (1, 1, 3.0), (1, 2, 4.0), (2, 1, 1.0), (2, 2, 5.0)],\n [\"user\", \"item\", \"rating\"])\n als = ALS().setMaxIter(1).setRank(1)\n # test default params\n als.fit(df)\n self.assertEqual(als.getIntermediateStorageLevel(), \"MEMORY_AND_DISK\")\n self.assertEqual(als._java_obj.getIntermediateStorageLevel(), \"MEMORY_AND_DISK\")\n self.assertEqual(als.getFinalStorageLevel(), \"MEMORY_AND_DISK\")\n self.assertEqual(als._java_obj.getFinalStorageLevel(), \"MEMORY_AND_DISK\")\n # test non-default params\n als.setIntermediateStorageLevel(\"MEMORY_ONLY_2\")\n als.setFinalStorageLevel(\"DISK_ONLY\")\n als.fit(df)\n self.assertEqual(als.getIntermediateStorageLevel(), \"MEMORY_ONLY_2\")\n self.assertEqual(als._java_obj.getIntermediateStorageLevel(), \"MEMORY_ONLY_2\")\n self.assertEqual(als.getFinalStorageLevel(), \"DISK_ONLY\")\n self.assertEqual(als._java_obj.getFinalStorageLevel(), \"DISK_ONLY\")\n\n\nclass GeneralizedLinearRegressionTest(SparkSessionTestCase):\n\n def test_tweedie_distribution(self):\n\n df = self.spark.createDataFrame(\n [(1.0, Vectors.dense(0.0, 0.0)),\n (1.0, Vectors.dense(1.0, 2.0)),\n (2.0, Vectors.dense(0.0, 0.0)),\n (2.0, Vectors.dense(1.0, 1.0)), ], [\"label\", \"features\"])\n\n glr = GeneralizedLinearRegression(family=\"tweedie\", variancePower=1.6)\n model = glr.fit(df)\n self.assertTrue(np.allclose(model.coefficients.toArray(), [-0.4645, 0.3402], atol=1E-4))\n self.assertTrue(np.isclose(model.intercept, 0.7841, atol=1E-4))\n\n model2 = glr.setLinkPower(-1.0).fit(df)\n self.assertTrue(np.allclose(model2.coefficients.toArray(), [-0.6667, 0.5], atol=1E-4))\n self.assertTrue(np.isclose(model2.intercept, 0.6667, atol=1E-4))\n\n def test_offset(self):\n\n df = self.spark.createDataFrame(\n [(0.2, 1.0, 2.0, Vectors.dense(0.0, 5.0)),\n (0.5, 2.1, 0.5, Vectors.dense(1.0, 2.0)),\n (0.9, 0.4, 1.0, Vectors.dense(2.0, 1.0)),\n (0.7, 0.7, 0.0, Vectors.dense(3.0, 3.0))], [\"label\", \"weight\", \"offset\", \"features\"])\n\n glr = GeneralizedLinearRegression(family=\"poisson\", weightCol=\"weight\", offsetCol=\"offset\")\n model = glr.fit(df)\n self.assertTrue(np.allclose(model.coefficients.toArray(), [0.664647, -0.3192581],\n atol=1E-4))\n self.assertTrue(np.isclose(model.intercept, -1.561613, atol=1E-4))\n\n\nclass LinearRegressionTest(SparkSessionTestCase):\n\n def test_linear_regression_with_huber_loss(self):\n\n data_path = \"data/mllib/sample_linear_regression_data.txt\"\n df = self.spark.read.format(\"libsvm\").load(data_path)\n\n lir = LinearRegression(loss=\"huber\", epsilon=2.0)\n model = lir.fit(df)\n\n expectedCoefficients = [0.136, 0.7648, -0.7761, 2.4236, 0.537,\n 1.2612, -0.333, -0.5694, -0.6311, 0.6053]\n expectedIntercept = 0.1607\n expectedScale = 9.758\n\n self.assertTrue(\n np.allclose(model.coefficients.toArray(), expectedCoefficients, atol=1E-3))\n self.assertTrue(np.isclose(model.intercept, expectedIntercept, atol=1E-3))\n self.assertTrue(np.isclose(model.scale, expectedScale, atol=1E-3))\n\n\nif __name__ == \"__main__\":\n from pyspark.ml.tests.test_algorithms import *\n\n try:\n import xmlrunner\n testRunner = xmlrunner.XMLTestRunner(output='target/test-reports')\n except ImportError:\n testRunner = None\n unittest.main(testRunner=testRunner, verbosity=2)\n" ]
[ [ "numpy.allclose", "numpy.isclose" ] ]
pondbooks/CDQL_with_Sim
[ "cc5b8680653e12db6003b154c58d8033f5a103b8" ]
[ "section_5_3/5_to_50/dynamics.py" ]
[ "import numpy as np \n\ndef angle_normalize(theta):\n x_plot = np.cos(theta)\n y_plot = np.sin(theta)\n angle = np.arctan2(y_plot,x_plot)\n return angle\n\n\ndef reward_func(x,u): #rewardがnumpy.array(1)型になっていると学習がうまくいかないことに注意.\n cost = -x[0,0]**(2) - 0.1*x[0,1]**(2) - 10.0*u**(2)\n return cost[0]\n\ndef Dynamics(x, u, a_param, b_param, l=1.0, g=9.81, delta=2**(-4)):\n theta = x[0,0]\n omega = x[0,1]\n\n reward = reward_func(x, u)\n done = False\n\n new_theta = theta + delta*(omega)\n new_omega = omega + delta*(g/l*np.sin(theta) - a_param*omega + b_param*u[0])\n\n new_theta = angle_normalize(new_theta)\n new_x = np.array([[new_theta, new_omega]])\n\n return new_x, reward, done\n\ndef Initialize():\n theta = np.pi # Get the initial state s_0\n omega = 0.0 \n state = np.array([[theta, omega]]) #numpy array (1,2)\n\n return state\n" ]
[ [ "numpy.array", "numpy.sin", "numpy.arctan2", "numpy.cos" ] ]
tamaslevente/trai
[ "4bf68463b941f305d9b25a9374b6c2a2d51a8046", "4bf68463b941f305d9b25a9374b6c2a2d51a8046" ]
[ "PC-NBV/generate_nbv_data.py", "camera_calibration_ws/monodepth-FPN/MonoDepth-FPN-PyTorch/main_fpn_curv_grad_t3_torch_old.py" ]
[ "import os\nimport numpy as np\nimport tensorflow as tf\nimport scipy.io as sio\nfrom open3d import *\nimport random\nfrom tf_ops.nn_distance import tf_nndistance \nimport time\nimport pdb\n\nif __name__ == '__main__':\n\n os.environ['CUDA_VISIBLE_DEVICES'] = \"0\"\n \n # view num\n #view_num = 33\n view_num = 5\n nr_views_choose=1\n\n # path\n data_type = 'train/'\n ShapeNetv1_dir = '/home/cuda/Alex/trai/PC-NBV/Shapenet_v1/Trial_Test_Valid/' \n pc_dir = \"/home/cuda/Alex/trai/PC-NBV/Output_model_blender/\" + data_type + \"/pcd\"\n save_dir = \"/home/cuda/Alex/trai/PC-NBV/NBV_data/shapenet_33_views_640x480/\"+ data_type\n model_dir = '/home/cuda/Alex/trai/PC-NBV/Shapenet_v1/Trial_Test_Valid/' + data_type\n\n \n\n # for calculating surface coverage and register\n part_tensor = tf.placeholder(tf.float32, (1, None, 3))\n gt_tensor = tf.placeholder(tf.float32, (1, None, 3))\n sess = tf.Session()\n dist1, _, dist2, _ = tf_nndistance.nn_distance(part_tensor, gt_tensor)\n\n class_list = os.listdir(model_dir)\n\n f=open('generate_nbv.log', 'w+')\n\n for class_id in class_list:\n\n model_list = os.listdir(os.path.join(ShapeNetv1_dir, data_type, class_id))\n\n for model in model_list:\n save_model_path = os.path.join(save_dir, model)\n if os.path.exists(save_model_path):\n print(\"skip \" + save_model_path)\n continue\n\n # gt point cloud\n gt_points = sio.loadmat(os.path.join(ShapeNetv1_dir, data_type, class_id, model, 'model.mat'))\n gt_points = np.array(gt_points['points'])\n if not os.path.exists(os.path.join(save_dir, model)):\n os.makedirs(os.path.join(save_dir, model))\n\n np.savetxt(os.path.join(save_dir, model, \"gt.xyz\"), gt_points) \n\n # every view's partial point cloud\n part_points_list = []\n \n for i in range(view_num):\n pcd_path = os.path.join(pc_dir, model, str(i) + \".pcd\")\n if os.path.exists(pcd_path):\n cur_pc = open3d.io.read_point_cloud(pcd_path)\n cur_points = np.asarray(cur_pc.points) \n else:\n cur_points = np.zeros((1,3))\n\n part_points_list.append(cur_points)\n\n # reconstruct from different views 1 times\n selected_init_view = []\n for ex_index in range(1): \n #for ex_index in range(16): \n\n start = time.time() \n\n cur_ex_dir = os.path.join(save_dir, model, str(ex_index))\n if not os.path.exists(cur_ex_dir):\n os.makedirs(cur_ex_dir) \n\n # init view state\n view_state = np.zeros(view_num, dtype=np.int) # 0 unselected, 1 selected, 2 cur\n # init start view\n while (True):\n # cur_view = random.randint(0, view_num - 1)\n cur_view = random.randint(0, nr_views_choose-1)\n if not cur_view in selected_init_view:\n selected_init_view.append(cur_view)\n break \n\n view_state[cur_view] = 1\n #view_state[0]=1\n\n acc_pc_points = part_points_list[cur_view] \n\n # accumulate points coverage\n batch_acc = acc_pc_points[np.newaxis, :, :]\n batch_gt = gt_points[np.newaxis, :, :]\n\n dist2_new = sess.run(dist2, feed_dict={part_tensor:batch_acc, gt_tensor:batch_gt}) \n dis_flag_new = dist2_new < 0.00005\n cover_sum = np.sum(dis_flag_new == True)\n cur_cov = cover_sum / dis_flag_new.shape[1]\n\n \n\n # max scan 10 times\n for scan_index in range(1): \n\n print(\"coverage:\" + str(cur_cov) + \" in scan round \" + str(scan_index)) \n\n f.write(\"coverage:\" + str(cur_cov) + \" in scan round \" + str(scan_index) +'\\n')\n\n np.save(os.path.join(cur_ex_dir, str(scan_index) + \"_viewstate.npy\") ,view_state)\n np.save(os.path.join(cur_ex_dir, str(scan_index) + \"_acc_pc.npy\"), acc_pc_points)\n # np.savetxt(os.path.join(cur_ex_dir, str(scan_index) + \"_acc_pc.xyz\"), acc_pc_points) \n\n target_value = np.zeros((view_num, 1)) # surface coverage, register coverage, moving cost for each view \n\n max_view_index = 0\n max_view_cov = 0\n max_new_pc = np.zeros((1,3))\n\n # # accumulate points coverage\n batch_acc = acc_pc_points[np.newaxis, :, :]\n batch_gt = gt_points[np.newaxis, :, :]\n \n # evaluate all the views\n for i in range(view_num): \n\n # current evaluate view\n batch_part_cur = part_points_list[i][np.newaxis, :, :] \n\n # new pc\n dist1_new = sess.run(dist1, feed_dict={part_tensor:batch_part_cur, gt_tensor:batch_acc})\n dis_flag_new = dist1_new < 0.00005 \n\n pc_register = batch_part_cur[dis_flag_new]\n pc_new = batch_part_cur[~dis_flag_new]\n\n batch_new = pc_new[np.newaxis, :, :] \n\n # test new coverage\n if batch_new.shape[1] != 0:\n dist2_new = sess.run(dist2, feed_dict={part_tensor:batch_new, gt_tensor:batch_gt}) \n\n dis_flag_new = dist2_new < 0.00005\n cover_sum = np.sum(dis_flag_new == True)\n cover_new = cover_sum / dis_flag_new.shape[1]\n else:\n cover_new = 0 \n\n target_value[i, 0] = cover_new\n\n if ( target_value[i, 0] > max_view_cov ):\n max_view_index = i\n max_view_cov = target_value[i, 0]\n max_new_pc = pc_new \n\n np.save(os.path.join(cur_ex_dir, str(scan_index) + \"_target_value.npy\"), target_value) \n\n print(\"choose view:\" + str(max_view_index) + \" add coverage:\" + str(max_view_cov)) \n\n f.write(\"choose view:\" + str(max_view_index) + \" add coverage:\" + str(max_view_cov) +'\\n') \n\n cur_view = max_view_index\n cur_cov = cur_cov + target_value[max_view_index, 0]\n view_state[cur_view] = 1\n acc_pc_points = np.append(acc_pc_points, max_new_pc, axis=0) \n\n print('scan %s done, time=%.4f sec' % (scan_index, time.time() - start))\n\n f.write('scan %s done, time=%.4f sec' % (scan_index, time.time() - start) +'\\n')\n\n\n \n\n\n", "import torch\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport os\nimport sys\nimport random\nrandom.seed(10)\n\nfrom zmq import device\nfrom constants import *\nfrom model_fpn_curv_grad import I2D\nimport argparse\nimport time\n# from utils.net_utils import adjust_learning_rate\nfrom torch.autograd import Variable\n# from dataset.dataloader import DepthDataset\n# from dataset.nyuv2_dataset import NYUv2Dataset\nfrom torchvision.utils import save_image\nfrom dataset.nyuv2_dataset import MyCustomDataset\nimport torch.nn as nn\nimport torch.nn.functional as F\nfrom torchvision import transforms\nfrom collections import Counter\nimport matplotlib\nimport open3d as o3d\nimport math\n\nmatplotlib.use('Agg')\n\nfrom torch.utils.tensorboard import SummaryWriter\nwriter = SummaryWriter()\n\n\nclass DDDDepthDiff(nn.Module):\n def __init__(self):\n super(DDDDepthDiff, self).__init__()\n\n def forward(self, fake, real,epoch,show_image):\n if not fake.shape == real.shape:\n _, _, H, W = real.shape\n fake = F.interpolate(fake, size=(H, W), mode='bilinear')\n eps = 1e-7\n # eps = 2\n\n batch_size = real.shape[0]\n \n real1 = real.clone() #real[0].cpu().detach().numpy()\n fake1 = fake.clone() #fake[0].cpu().detach().numpy()\n ###### debug purposes ########\n # fake1[real1==0] = 1.0\n # a = np.asarray(real1.cpu().detach()*7000)[0]\n # # a[a!=0.0] = 10000\n # b = np.asarray(fake1.cpu().detach()*7000)[0]\n # plt.imshow(np.uint16(b), vmin=0, vmax=7000)\n # plt.colorbar()\n # plt.savefig(save_dir +'fake1_'+str(epoch)+'.png',bbox_inches='tight')\n # plt.close()\n\n # plt.imshow(np.uint16(a), vmin=0, vmax=7000)\n # plt.colorbar()\n # plt.savefig(save_dir +'real1_'+str(epoch)+'.png',bbox_inches='tight')\n # plt.close()\n # b[b!=0.0] = 10000\n # c = np.abs(a-b)\n # cv2.imwrite(save_dir+'gttest_'+str(epoch)+'.png',np.uint16(a))\n # cv2.imwrite(save_dir+'faketest_'+str(epoch)+'.png',np.uint16(b))\n # cv2.imwrite(save_dir+'diff_test_'+str(epoch)+'.png',np.uint16(c))\n ####################################\n\n # real1[real1==0] = eps\n # fake1[fake1==0] = eps\n\n # for calculating the loss on all the images in the batch size (Thanks Szilard for telling me about this!!!)\n\n if epoch > 24:\n all_real_pcd = self.point_cloud(fake1[0]).clone() * 1000.0\n else: \n all_real_pcd = self.point_cloud(real1[0]).clone() * 1000.0\n\n all_fake_pcd = self.point_cloud(fake1[0]).clone() * 1000.0\n\n all_real_pcd[all_real_pcd==0] = eps\n all_fake_pcd[all_fake_pcd==0] = eps\n\n # real_pcd = nan_real_pcd[~torch.isnan(nan_real_pcd)]\n # fake_pcd = nan_fake_pcd[~torch.isnan(nan_real_pcd)]\n\n \n \n #######################\n # Take out nan points #\n # If this doesn't work replace the values with 2 or something\n ### loss17\n nan_z_real = all_real_pcd[:,2].clone()\n temp_z_real = nan_z_real[~torch.isnan(nan_z_real)]\n \n nan_z_fake = all_fake_pcd[:,2].clone()\n temp_z_fake = nan_z_fake[~torch.isnan(nan_z_real)]\n \n nan_x_real = all_real_pcd[:,0].clone()\n temp_x_real = nan_x_real[~torch.isnan(nan_x_real)]\n \n nan_x_fake = all_fake_pcd[:,0].clone()\n temp_x_fake = nan_x_fake[~torch.isnan(nan_x_real)]\n \n nan_y_real = all_real_pcd[:,1].clone()\n temp_y_real = nan_y_real[~torch.isnan(nan_y_real)]\n \n nan_y_fake = all_fake_pcd[:,1].clone()\n temp_y_fake = nan_y_fake[~torch.isnan(nan_y_real)]\n\n z_real = temp_z_real[~torch.isnan(temp_z_fake)]\n z_fake = temp_z_fake[~torch.isnan(temp_z_fake)]\n\n x_real = temp_x_real[~torch.isnan(temp_x_fake)]\n x_fake = temp_x_fake[~torch.isnan(temp_x_fake)]\n\n y_real = temp_y_real[~torch.isnan(temp_y_fake)]\n y_fake = temp_y_fake[~torch.isnan(temp_y_fake)]\n \n\n lossX = torch.sqrt(torch.mean(torch.abs(x_real-x_fake)**2))\n lossZ = torch.sqrt(torch.mean(torch.abs(z_real-z_fake)**2))\n lossY = torch.sqrt(torch.mean(torch.abs(y_real-y_fake)**2))\n \n \n RMSE_log = 10000* torch.sqrt(torch.mean(torch.abs(torch.log(torch.abs(z_real))-torch.log(torch.abs(z_fake)))**2))\n\n loss17 = RMSE_log * torch.abs(10*(3-torch.exp(1*lossX)-torch.exp(1*lossY)-torch.exp(1*lossZ)))\n \n # plane_mean_dist_grad = torch.tensor(0.0001).cuda() # ????????\n \n # ###############################\n # Curv loss\n # ################################\n nan_cloud = all_fake_pcd.clone()\n nan_cloud[torch.isnan(nan_cloud)] = 0\n\n norm_all_fake_pcd = nan_cloud/nan_cloud.max()\n # norm_o3d_pcd_fake = o3d.geometry.PointCloud()\n # norm_o3d_pcd_fake.points = o3d.utility.Vector3dVector(norm_all_fake_pcd.cpu().detach().numpy())\n \n \n # o3d.io.write_point_cloud(\"fake_norm_cloud\"+str(epoch)+\".pcd\", norm_o3d_pcd_fake)\n\n # ################# with Open3d\n # fake_plane_model, fake_inliers = norm_o3d_pcd_fake.segment_plane(distance_threshold=0.025,\n # ransac_n=3,\n # num_iterations=500)\n # [a, b, c, d] = fake_plane_model\n # a_torch = torch.from_numpy(np.array(a)).cuda()\n # b_torch = torch.from_numpy(np.array(b)).cuda()\n # c_torch = torch.from_numpy(np.array(c)).cuda()\n # d_torch = torch.from_numpy(np.array(d)).cuda()\n\n # fake_plane_pcd = norm_o3d_pcd_fake.select_by_index(fake_inliers)\n # fake_plane_pcd.paint_uniform_color([1.0, 0, 0])\n # ################################\n\n # if loss17<400:\n # np_plane_model, np_plane_inliers = self.oh_numpy_RANSAC_give_me_a_plane(np.asarray(norm_o3d_pcd_fake.points),thresh=0.025,\n # minPoints=5000,\n # maxIteration=1000)\n\n torch_plane_model, torch_plane_inliers = self.oh_torch_RANSAC_give_me_a_plane(norm_all_fake_pcd,thresh=0.025,\n minPoints=5000,\n maxIteration=1000)\n # o3d.io.write_point_cloud(\"fake_plane_o3d\"+str(epoch)+\".pcd\", fake_plane_pcd)\n # fake_plane_pcd = norm_o3d_pcd_fake.select_by_index(torch_plane_inliers.cpu().detach().numpy())\n # o3d.io.write_point_cloud(\"plane_torch\"+str(epoch)+\".pcd\", fake_plane_pcd)\n\n # torch_pcd.points = o3d.utility.Vector3dVector(norm_all_fake_pcd[fake_inliers].cpu().detach().numpy())\n # o3d.io.write_point_cloud(\"fake_plane_torch_\"+str(epoch)+\".pcd\", torch_pcd)\n \n # Open3D translation and rotation\n ################# removable stuff, just for testing\n # fake_plane_pcd = fake_plane_pcd.translate((0,0,d/c))\n # # o3d.io.write_point_cloud(\"fake_plane_o3d_trans_\"+str(epoch)+\".pcd\", fake_plane_pcd)\n\n # cos_theta = c / math.sqrt(a**2 + b**2 + c**2)\n # sin_theta = math.sqrt((a**2+b**2)/(a**2 + b**2 + c**2))\n # u_1 = b / math.sqrt(a**2 + b**2 )\n # u_2 = -a / math.sqrt(a**2 + b**2)\n # pred_rotation_matrix = np.array([[cos_theta + u_1**2 * (1-cos_theta), u_1*u_2*(1-cos_theta), u_2*sin_theta],\n # [u_1*u_2*(1-cos_theta), cos_theta + u_2**2*(1- cos_theta), -u_1*sin_theta],\n # [-u_2*sin_theta, u_1*sin_theta, cos_theta]])\n\n # center_before_rot = fake_plane_pcd.get_center()\n # fake_plane_pcd.rotate(pred_rotation_matrix)\n # ###############################\n # center_after_rot = fake_plane_pcd.get_center()\n\n # o3d.io.write_point_cloud(\"fake_plane_o3d_rot_\"+str(epoch)+\".pcd\", fake_plane_pcd)\n \n #####\n # fake_plane_pcd = fake_plane_pcd.translate((-center_before_rot[0],-center_before_rot[1],-center_before_rot[2]))\n # center_after_translate = fake_plane_pcd.get_center()\n # fake_plane_pcd.rotate(pred_rotation_matrix,center=(center_before_rot[0],center_before_rot[1],center_before_rot[2]))\n # fake_plane_pcd = fake_plane_pcd.translate((center_before_rot[0],center_before_rot[1],center_before_rot[2]))\n # o3d.io.write_point_cloud(\"fake_plane_o3d_rot_zero_c_and_t\"+str(epoch)+\".pcd\", fake_plane_pcd)\n ######\n \n ###################\n a_torch = torch_plane_model[0]\n b_torch = torch_plane_model[1]\n c_torch = torch_plane_model[2]\n d_torch = torch_plane_model[3]\n\n # torch translation and rotation\n torch_fake_plane = norm_all_fake_pcd[torch_plane_inliers]\n torch_translation = torch.tensor([0, 0, d_torch/c_torch]).cuda()\n torch_fake_plane += torch_translation\n\n # torch_pcd.points = o3d.utility.Vector3dVector(torch_fake_plane.cpu().detach().numpy())\n # o3d.io.write_point_cloud(\"fake_plane_torch_trans_\"+str(epoch)+\".pcd\", torch_pcd)\n\n cos_theta_torch = c_torch / torch.sqrt(a_torch**2 + b_torch**2 + c_torch**2)\n sin_theta_torch = torch.sqrt((a_torch**2+b_torch**2)/(a_torch**2 + b_torch**2 + c_torch**2))\n u_1_torch = b_torch / torch.sqrt(a_torch**2 + b_torch**2 )\n u_2_torch = -a_torch / torch.sqrt(a_torch**2 + b_torch**2)\n \n pred_rotation_matrix_torch = torch.tensor([[cos_theta_torch + u_1_torch**2 * (1-cos_theta_torch), u_1_torch*u_2_torch*(1-cos_theta_torch), u_2_torch*sin_theta_torch],\n [u_1_torch*u_2_torch*(1-cos_theta_torch), cos_theta_torch + u_2_torch**2*(1- cos_theta_torch), -u_1_torch*sin_theta_torch],\n [-u_2_torch*sin_theta_torch, u_1_torch*sin_theta_torch, cos_theta_torch]]).cuda()\n # torch_plane_center = torch.tensor([center_before_rot[0],center_before_rot[1],center_before_rot[2]]).cuda()\n # torch_fake_plane = torch_fake_plane - torch_plane_center\n pred_rotation_matrix_torch = pred_rotation_matrix_torch.transpose(0,1)\n torch_fake_plane = torch.matmul(torch_fake_plane,pred_rotation_matrix_torch)\n # torch_fake_plane += torch_plane_center\n\n # if loss17 < 400:\n # torch_pcd = o3d.geometry.PointCloud()\n # torch_pcd.points = o3d.utility.Vector3dVector(torch_fake_plane.cpu().detach().numpy())\n # o3d.io.write_point_cloud(\"fake_plane_torch_rot_\"+str(epoch)+\".pcd\", torch_pcd)\n\n #########\n # BELOW #\n #########\n # fake_plane_numpy = np.asarray(fake_plane_pcd.points)\n # fake_plane_numpy[:,2] = fake_plane_numpy[:,2] - fake_plane_numpy[:,2].max() \n # plane_mean_distance_below_XY_numpy = np.mean(abs(fake_plane_numpy[:,2]))\n # fake_plane_numpy[:,2] = fake_plane_numpy[:,2] - fake_plane_numpy[:,2].min() \n # plane_mean_distance_above_XY_numpy = np.mean(abs(fake_plane_numpy[:,2]))\n \n torch_fake_plane_dist_below = torch_fake_plane[:,2] - torch_fake_plane[:,2].max()\n plane_mean_distance_below_XY = torch.mean(abs(torch_fake_plane_dist_below))\n\n ##############\n # ABOVE #\n #########\n\n torch_fake_plane_dist_above = torch_fake_plane[:,2] - torch_fake_plane[:,2].min()\n plane_mean_distance_above_XY = torch.mean(abs(torch_fake_plane_dist_above))\n \n if plane_mean_distance_below_XY + plane_mean_distance_above_XY == 0.0: plane_mean_distance_below_XY = torch.tensor(0.001).cuda()\n plane_mean_dist_grad = 1000* plane_mean_distance_above_XY/(plane_mean_distance_below_XY + plane_mean_distance_above_XY)\n \n\n\n loss_curv = loss17 + plane_mean_dist_grad\n delta = [RMSE_log, lossX, lossY, lossZ, plane_mean_dist_grad, loss17]\n\n\n return delta, loss_curv\n\n def oh_numpy_RANSAC_give_me_a_plane(self, pts, thresh=0.05, minPoints=100, maxIteration=1000):\n \"\"\"\n Find the best equation for a plane.\n :param pts: 3D point cloud as a `np.array (N,3)`.\n :param thresh: Threshold distance from the plane which is considered inlier.\n :param maxIteration: Number of maximum iteration which RANSAC will loop over.\n :returns:\n - `self.equation`: Parameters of the plane using Ax+By+Cy+D `np.array (1, 4)`\n - `self.inliers`: points from the dataset considered inliers\n ---\n \"\"\"\n n_points = pts.shape[0]\n best_eq = []\n best_inliers = []\n\n for it in range(maxIteration):\n\n # Samples 3 random points\n id_samples = random.sample(range(0, n_points), 3)\n pt_samples = pts[id_samples]\n\n # We have to find the plane equation described by those 3 points\n # We find first 2 vectors that are part of this plane\n # A = pt2 - pt1\n # B = pt3 - pt1\n\n vecA = pt_samples[1, :] - pt_samples[0, :]\n vecB = pt_samples[2, :] - pt_samples[0, :]\n\n # Now we compute the cross product of vecA and vecB to get vecC which is normal to the plane\n vecC = np.cross(vecA, vecB)\n\n # The plane equation will be vecC[0]*x + vecC[1]*y + vecC[0]*z = -k\n # We have to use a point to find k\n vecC = vecC / np.linalg.norm(vecC)\n k = -np.sum(np.multiply(vecC, pt_samples[1, :]))\n plane_eq = [vecC[0], vecC[1], vecC[2], k]\n\n # Distance from a point to a plane\n # https://mathworld.wolfram.com/Point-PlaneDistance.html\n pt_id_inliers = [] # list of inliers ids\n dist_pt = (\n plane_eq[0] * pts[:, 0] + plane_eq[1] * pts[:, 1] + plane_eq[2] * pts[:, 2] + plane_eq[3]\n ) / np.sqrt(plane_eq[0] ** 2 + plane_eq[1] ** 2 + plane_eq[2] ** 2)\n\n # Select indexes where distance is biggers than the threshold\n pt_id_inliers = np.where(np.abs(dist_pt) <= thresh)[0]\n if len(pt_id_inliers) > len(best_inliers) & (len(pt_id_inliers) > minPoints):\n best_eq = plane_eq\n best_inliers = pt_id_inliers\n self.inliers = best_inliers\n self.equation = best_eq\n\n return self.equation, self.inliers\n\n def oh_torch_RANSAC_give_me_a_plane(self, pts, thresh=0.05, minPoints=100, maxIteration=1000):\n \"\"\"\n Find the best equation for a plane.\n :param pts: 3D point cloud as a `torch.tensor (N,3)`.\n :param thresh: Threshold distance from the plane which is considered inlier.\n :param maxIteration: Number of maximum iteration which RANSAC will loop over.\n :returns:\n - `self.equation`: Parameters of the plane using Ax+By+Cy+D `torch.tensor (1, 4)`\n - `self.inliers`: points from the dataset considered inliers\n ---\n \"\"\"\n n_points = pts.shape[0]\n best_eq = []\n best_inliers = []\n\n for it in range(maxIteration):\n\n # Samples 3 random points\n id_samples = random.sample(range(0, n_points), 3)\n pt_samples = pts[id_samples]\n\n # We have to find the plane equation described by those 3 points\n # We find first 2 vectors that are part of this plane\n # A = pt2 - pt1\n # B = pt3 - pt1\n\n vecA = pt_samples[1, :] - pt_samples[0, :]\n vecB = pt_samples[2, :] - pt_samples[0, :]\n\n # Now we compute the cross product of vecA and vecB to get vecC which is normal to the plane\n vecC = torch.cross(vecA, vecB)\n\n # The plane equation will be vecC[0]*x + vecC[1]*y + vecC[0]*z = -k\n # We have to use a point to find k\n vecC = vecC / torch.linalg.norm(vecC)\n k = -torch.sum(torch.multiply(vecC, pt_samples[1, :]))\n plane_eq = [vecC[0], vecC[1], vecC[2], k]\n\n # Distance from a point to a plane\n # https://mathworld.wolfram.com/Point-PlaneDistance.html\n pt_id_inliers = [] # list of inliers ids\n dist_pt = (\n plane_eq[0] * pts[:, 0] + plane_eq[1] * pts[:, 1] + plane_eq[2] * pts[:, 2] + plane_eq[3]\n ) / torch.sqrt(plane_eq[0] ** 2 + plane_eq[1] ** 2 + plane_eq[2] ** 2)\n\n # Select indexes where distance is biggers than the threshold\n pt_id_inliers = torch.where(torch.abs(dist_pt) <= thresh)[0]\n if (len(pt_id_inliers) > len(best_inliers)) & (len(pt_id_inliers) > minPoints):\n best_eq = plane_eq\n best_inliers = pt_id_inliers\n self.inliers = best_inliers\n self.equation = best_eq\n\n return self.equation, self.inliers\n\n def point_cloud(self, depth1):\n \"\"\"Transform a depth image into a point cloud with one point for each\n pixel in the image, using the camera transform for a camera\n centred at cx, cy with field of view fx, fy.\n\n depth is a 2-D ndarray with shape (rows, cols) containing\n depths from 1 to 254 inclusive. The result is a 3-D array with\n shape (rows, cols, 3). Pixels with invalid depth in the input have\n NaN for the z-coordinate in the result.\n\n \"\"\"\n # depth is of shape (1,480,640)\n cx = 334.081\n cy = 169.808\n fx = 460.585\n fy = 460.268\n\n if depth1.shape[0] == 3:\n depth = depth1[2].clone()\n else:\n depth = depth1[0].clone()\n # open3d_img = o3d.t.geometry.Image(depth[0])#/1000.0)\n # intrinsics = o3d.camera.PinholeCameraIntrinsic(640,360,fx,fy,cx,cy)\n # pcd = o3d.geometry.create_point_cloud_from_depth_image(open3d_img,intrinsic=intrinsics)\n \n rows, cols = depth.shape\n c, _ = torch.meshgrid(torch.arange(cols), torch.arange(cols))\n c = torch.meshgrid(torch.arange(cols))\n new_c = c[0].reshape([1,cols]).to('cuda')\n r = torch.meshgrid(torch.arange(rows))\n new_r = r[0].unsqueeze(-1).to('cuda')\n valid = (depth > 0) & (depth < 65535)\n nan_number = torch.tensor(np.nan).to('cuda')\n # zero_number = torch.tensor(0.).to('cuda')\n z = torch.where(valid, depth/1000.0, nan_number) # allways divide with 1000.0\n x = torch.where(valid, z * (new_c - cx) / fx, nan_number)\n y = torch.where(valid, z * (new_r - cy) / fy, nan_number)\n \n\n dimension = rows * cols\n z_ok = z.reshape(dimension)\n x_ok = x.reshape(dimension)\n y_ok = y.reshape(dimension)\n \n return torch.stack((x_ok,y_ok,z_ok),dim=1) \n\n def image_from_cloud(self, point_cloud):\n \n cx = 334.081\n cy = 169.808\n fx = 460.585\n fy = 460.268\n \n # point_cloud = point_cloud/1000.0\n np_image = np.tile(0,(360,640))\n\n z = point_cloud[:,2] * 1000.0\n x = point_cloud[:,0]\n y = point_cloud[:,1]\n\n valid = ~(np.isnan(z) | (z==0))\n z = np.where(valid, z*1000.0, 0)\n # z[np.isnan(z) | (z==0)] = 1e-7\n # pos_x = (point_cloud[:,0] * 1000.0 * fx)/ z + cx\n valid_x = ~(np.isnan(x) | np.isnan(z) | (z==0))\n pos_x = np.where(valid_x,(x * 1000.0 * fx)/ z + cx, 0)\n pos_x = pos_x.astype(np.int32)\n # pos_y = (point_cloud[:,1] * 1000.0 * fy)/ z + cy\n valid_y = ~(np.isnan(y) | np.isnan(z) | (z==0))\n pos_y = np.where(valid_y,(y * 1000.0 * fy)/z + cy, 0) \n pos_y = pos_y.astype(np.int32)\n \n pos_x[pos_x>639] = 639\n pos_x[pos_x<0] = 0\n pos_y[pos_y>359] =359\n pos_y[pos_y<0] = 0\n\n pos_x = pos_x.reshape(360,640)\n pos_y = pos_y.reshape(360,640)\n z = z.reshape(360,640)\n np_image[pos_y,pos_x] = z\n\n # depth = depth.cpu().detach().numpy()\n # rows, cols = depth[0].shape\n # c, r = np.meshgrid(np.arange(cols), np.arange(rows), sparse=True)\n # valid = (depth[0] > 0) & (depth[0] < 65535)\n # z = np.where(valid, depth[0] / 1000.0, np.nan)\n # x = np.where(valid, z * (c - cx) / fx, 0)\n # y = np.where(valid, z * (r - cy) / fy, 0)\n return np_image\n \n\ndef adjust_learning_rate(optimizer, decay=0.1):\n \"\"\"Sets the learning rate to the initial LR decayed by 0.5 every 20 epochs\"\"\"\n for param_group in optimizer.param_groups:\n param_group['lr'] = decay * param_group['lr']\n\ndef parse_args():\n \"\"\"\n Parse input arguments\n \"\"\"\n parser = argparse.ArgumentParser(\n description='Single image depth estimation')\n parser.add_argument('--dataset', dest='dataset',\n help='training dataset',\n default='nyuv2', type=str)\n parser.add_argument('--epochs', dest='max_epochs',\n help='number of epochs to train',\n default=10, type=int)\n parser.add_argument('--cuda', dest='cuda',\n help='whether use CUDA',\n action='store_true')\n parser.add_argument('--bs', dest='bs',\n help='batch_size',\n default=1, type=int)\n parser.add_argument('--num_workers', dest='num_workers',\n help='num_workers',\n default=1, type=int)\n parser.add_argument('--disp_interval', dest='disp_interval',\n help='display interval',\n default=10, type=int)\n parser.add_argument('--output_dir', dest='output_dir',\n help='output directory',\n default='saved_models_t3', type=str)\n\n# config optimization\n parser.add_argument('--o', dest='optimizer',\n help='training optimizer',\n default=\"adam\", type=str)\n parser.add_argument('--lr', dest='lr',\n help='starting learning rate',\n default=1e-3, type=float)\n parser.add_argument('--lr_decay_step', dest='lr_decay_step',\n help='step to do learning rate decay, unit is epoch',\n default=5, type=int)\n parser.add_argument('--lr_decay_gamma', dest='lr_decay_gamma',\n help='learning rate decay ratio',\n default=0.1, type=float)\n\n# set training session\n parser.add_argument('--s', dest='session',\n help='training session',\n default=1, type=int)\n parser.add_argument('--eval_epoch', dest='eval_epoch',\n help='number of epoch to evaluate',\n default=2, type=int)\n\n# resume trained model\n parser.add_argument('--r', dest='resume',\n help='resume checkpoint or not',\n default=False, type=bool)\n parser.add_argument('--start_at', dest='start_epoch',\n help='epoch to start with',\n default=0, type=int)\n parser.add_argument('--checksession', dest='checksession',\n help='checksession to load model',\n default=1, type=int)\n parser.add_argument('--checkepoch', dest='checkepoch',\n help='checkepoch to load model',\n default=1, type=int)\n parser.add_argument('--checkpoint', dest='checkpoint',\n help='checkpoint to load model',\n default=0, type=int)\n\n# training parameters\n parser.add_argument('--gamma_sup', dest='gamma_sup',\n help='factor of supervised loss',\n default=1., type=float)\n parser.add_argument('--gamma_unsup', dest='gamma_unsup',\n help='factor of unsupervised loss',\n default=1., type=float)\n parser.add_argument('--gamma_reg', dest='gamma_reg',\n help='factor of regularization loss',\n default=10., type=float)\n\n args = parser.parse_args()\n return args\n\n\nif __name__ == '__main__':\n\n args = parse_args()\n\n if torch.cuda.is_available() and not args.cuda:\n print(\"WARNING: You might want to run with --cuda\")\n\n if not os.path.exists(args.output_dir):\n os.makedirs(args.output_dir)\n\n train_dataset = MyCustomDataset()\n train_size = len(train_dataset)\n eval_dataset = MyCustomDataset(train=False)\n eval_size = len(eval_dataset)\n print(train_size)\n print(eval_size)\n\n train_dataloader = torch.utils.data.DataLoader(train_dataset, batch_size=args.bs,\n shuffle=True, num_workers=args.num_workers)\n\n \n eval_dataloader = torch.utils.data.DataLoader(eval_dataset, batch_size=args.bs,\n shuffle=True, num_workers=args.num_workers) #maybe trying with False for shuffle, here?\n \n \n # network initialization\n print('Initializing model...')\n i2d = I2D(fixed_feature_weights=False)\n torch.cuda.empty_cache()\n if args.cuda:\n i2d = i2d.cuda()\n\n print('Done!')\n\n # hyperparams\n lr = args.lr\n bs = args.bs\n lr_decay_step = args.lr_decay_step\n lr_decay_gamma = args.lr_decay_gamma\n\n # params\n params = []\n for key, value in dict(i2d.named_parameters()).items():\n if value.requires_grad:\n if 'bias' in key:\n DOUBLE_BIAS = 0\n WEIGHT_DECAY = 4e-5\n params += [{'params': [value], 'lr':lr*(DOUBLE_BIAS + 1),\n 'weight_decay': 4e-5 and WEIGHT_DECAY or 0}]\n else:\n params += [{'params': [value], 'lr':lr, 'weight_decay': 4e-5}]\n\n # optimizer\n if args.optimizer == \"adam\":\n optimizer = torch.optim.Adam(params, lr=lr, betas=(0.9, 0.999), eps=1e-08, weight_decay=4e-5)\n elif args.optimizer == \"sgd\":\n optimizer = torch.optim.SGD(params, lr=lr, momentum=0.9)\n\n dddDepth_criterion = DDDDepthDiff()\n\n # resume\n if args.resume:\n load_name = os.path.join(args.output_dir,\n 'i2d_1_{}.pth'.format(args.checkepoch))\n print(\"loading checkpoint %s\" % (load_name))\n state = i2d.state_dict()\n checkpoint = torch.load(load_name)\n args.start_epoch = checkpoint['epoch']\n checkpoint = {k: v for k,\n v in checkpoint['model'].items() if k in state}\n state.update(checkpoint)\n i2d.load_state_dict(state)\n # optimizer.load_state_dict(checkpoint['optimizer'])\n # lr = optimizer.param_groups[0]['lr']\n if 'pooling_mode' in checkpoint.keys():\n POOLING_MODE = checkpoint['pooling_mode']\n print(\"loaded checkpoint %s\" % (load_name))\n del checkpoint\n torch.cuda.empty_cache()\n\n # constants\n iters_per_epoch = int(train_size / args.bs)\n\n grad_factor = 10.\n normal_factor = 1.\n # max_depth = 6571\n max_depth = 11000\n \n #for visualizing the train and validation loss\n train_loss_arr = []\n val_loss_arr = []\n\n for epoch in range(args.start_epoch, args.max_epochs):\n\n train_loss = 0 \n val_loss = 0\n # setting to train mode\n i2d.train()\n start = time.time()\n if epoch % (args.lr_decay_step + 1) == 0:\n adjust_learning_rate(optimizer, args.lr_decay_gamma)\n lr *= args.lr_decay_gamma\n\n img = Variable(torch.FloatTensor(1))\n z = Variable(torch.FloatTensor(1))\n if args.cuda:\n img = img.cuda()\n z = z.cuda()\n\n train_data_iter = iter(train_dataloader)\n show_image = True\n # saving results in a txt file\n save_dir = '/home/marian/workspace/monodepth_ws/monodepth-FPN/MonoDepth-FPN-PyTorch/dataset/training_data/training_data/training_process_t3/'\n\n \n for step in range(iters_per_epoch):\n start = time.time()\n data = train_data_iter.next()\n\n img.resize_(data[0].size()).copy_(data[0])#*max_depth)\n # z.resize_(data[1].size()).copy_(data[1])#*max_depth)\n # one channel depth image\n z.resize_(data[0][:,0].unsqueeze(0).size()).copy_(data[0][:,0].unsqueeze(0))\n\n # max_depth = data[1].max()\n\n optimizer.zero_grad()\n z_fake = i2d(img)#*max_depth # * 6000 #z.max()\n \n delta, dddDepth_loss = dddDepth_criterion(z_fake,z,epoch,show_image)#*max_depth,z*max_depth)\n\n \n torch.autograd.set_detect_anomaly(True)\n\n\n loss = 1*dddDepth_loss\n # loss *= 10\n loss.backward()\n optimizer.step()\n writer.add_scalar(\"Loss/train\",loss,step)\n train_loss += loss.item()\n end = time.time()\n \n if show_image:\n o3d_pcd = o3d.geometry.PointCloud()\n\n ##############################\n #####save input cloud#########\n input_img = img[0][2].cpu().numpy()\n rgbArray = input_img*max_depth # np.array(img[0].cpu()*max_depth,np.uint16).transpose((1,2,0))\n plt.imshow(rgbArray, vmin=0, vmax=max_depth)\n plt.colorbar()\n plt.savefig(save_dir+'depthCV_'+str(epoch)+'.png', bbox_inches='tight')\n plt.close()\n # cv2.imwrite(save_dir+'depthirCV_'+str(epoch)+'.png',rgbArray)\n \n input_depth = img.clone() \n input_pcd = dddDepth_criterion.point_cloud(input_depth[0]).cpu().detach().numpy()\n o3d_pcd.points = o3d.utility.Vector3dVector(input_pcd*max_depth)\n o3d.io.write_point_cloud(save_dir+\"input_cloud\"+str(epoch)+\".pcd\", o3d_pcd)\n # a = cv2.imread(save_dir+'depthirCV_'+str(epoch)+'.png', cv2.IMREAD_UNCHANGED)\n # vmin, vmax = 0, 10000/65536.\n\n ####################\n #depth ground truth#\n plt.imshow(z[0].cpu().numpy().transpose((1,2,0))*max_depth, vmin=0, vmax=max_depth)\n plt.colorbar()\n plt.savefig(save_dir +'gt_'+str(epoch)+'.png',bbox_inches='tight')\n plt.close()\n # plt.imshow(z[0].cpu().numpy().transpose((1,2,0)))#, vmin=vmin, vmax=vmax)\n # plt.colorbar()\n # plt.savefig(save_dir +'unscaled_gt_'+str(epoch)+'.png',bbox_inches='tight')\n # plt.close()\n z_pcd = dddDepth_criterion.point_cloud(z[0]).cpu().detach().numpy()\n o3d_pcd.points = o3d.utility.Vector3dVector(z_pcd*max_depth)\n o3d.io.write_point_cloud(save_dir+\"gt_cloud\"+str(epoch)+\".pcd\", o3d_pcd)\n \n ##################\n #depth prediction#\n plt.imshow(z_fake[0].cpu().detach().numpy().transpose((1,2,0))*max_depth, vmin=0, vmax=max_depth)\n plt.colorbar()\n plt.savefig(save_dir +'pred_'+str(epoch)+'.png',bbox_inches='tight')\n plt.close()\n # plt.imshow(z_fake[0].cpu().detach().numpy().transpose((1,2,0)))#, vmin=vmin, vmax=vmax)\n # plt.colorbar()\n # plt.savefig(save_dir +'unscaled_pred_'+str(epoch)+'.png',bbox_inches='tight')\n # plt.close()\n z_fake_pcd = dddDepth_criterion.point_cloud(z_fake[0]).cpu().detach().numpy()\n o3d_pcd.points = o3d.utility.Vector3dVector(z_fake_pcd*max_depth)\n o3d.io.write_point_cloud(save_dir+\"pred_cloud\"+str(epoch)+\".pcd\", o3d_pcd)\n \n #### save difference ####\n plt.imshow(np.abs(z[0].cpu().numpy().transpose((1,2,0)) - z_fake[0].cpu().detach().numpy().transpose((1,2,0)))*max_depth)\n plt.colorbar()\n plt.savefig(save_dir+'diff_'+str(epoch)+'.png', bbox_inches='tight')\n plt.close()\n\n pred_img = z_fake[0][0].cpu().detach().numpy()\n\n plt.imshow(np.abs(input_img - pred_img)*max_depth)\n plt.colorbar()\n plt.savefig(save_dir+'input_pred_diff_'+str(epoch)+'.png', bbox_inches='tight')\n plt.close()\n\n # z_diff = dddDepth_criterion.point_cloud(torch.abs(z[0]-z_fake[0])).cpu().detach().numpy()\n # o3d_pcd.points = o3d.utility.Vector3dVector(z_diff*max_depth)\n # o3d.io.write_point_cloud(save_dir+\"diff_cloud\"+str(epoch)+\".pcd\", o3d_pcd)\n \n\n show_image=False\n\n\n # info\n if step % args.disp_interval == 0:\n # file_object = open(\"/home/marian/workspace/monodepth_ws/monodepth-FPN/MonoDepth-FPN-PyTorch/results.txt\", 'a')\n print(\"[epoch %2d][iter %4d] loss: %.4f 3DDepthLoss: %.4f RMSE: %.4f lossX: %.4f lossY: %.4f lossZ: %.4f curv_loss: %.4f loss17: %.4f\"#RMSElog: %.4f Grad: %.4f Normals diff: %.4f\"\n % (epoch, step, loss, dddDepth_loss, delta[0], delta[1], delta[2], delta[3], delta[4], delta[5]))\n\n\n if epoch%4 == 0:\n save_name = os.path.join(args.output_dir, 'i2d_{}_{}.pth'.format(args.session, epoch))\n \n torch.save({'epoch': epoch+1,\n 'model': i2d.state_dict(),\n # 'optimizer': optimizer.state_dict(),\n },\n save_name)\n \n\n print('save model: {}'.format(save_name))\n print('time elapsed: %fs' % (end - start))\n\n # if epoch % 1 == 0:\n with torch.no_grad():\n # setting to eval mode\n i2d.eval()\n\n # img = Variable(torch.FloatTensor(1), volatile=True)\n # img = Variable(torch.FloatTensor(1),requires_grad=False)\n img = Variable(torch.FloatTensor(1))\n # z = Variable(torch.FloatTensor(1), volatile=True)\n # z = Variable(torch.FloatTensor(1), requires_grad=False)\n z = Variable(torch.FloatTensor(1))\n if args.cuda:\n img = img.cuda()\n z = z.cuda()\n\n print('evaluating...')\n\n rmse_accum = 0\n count = 0\n eval_data_iter = iter(eval_dataloader)\n for i, data_eval in enumerate(eval_data_iter):\n print(i, '/', len(eval_data_iter)-1)\n\n img.resize_(data_eval[0].size()).copy_(data_eval[0])\n # z.resize_(data_eval[1].size()).copy_(data_eval[1])\n z.resize_(data_eval[0][:,0].unsqueeze(0).size()).copy_(data_eval[0][:,0].unsqueeze(0))\n\n z_fake = i2d(img)\n\n \n \n delta_val, dddDepth_loss_eval = dddDepth_criterion(z_fake,z,epoch,show_image)\n loss_val = 1*dddDepth_loss_eval\n writer.add_scalar(\"Loss/validation\",loss_val,i)\n\n val_loss += loss_val.item()\n # print(\"Loss on test_data: \",loss_eval)\n if i==337:\n plt.imshow(z[0].cpu().numpy().transpose((1,2,0))*max_depth)#, vmin=vmin, vmax=vmax)\n plt.colorbar()\n plt.savefig('/home/marian/workspace/monodepth_ws/monodepth-FPN/MonoDepth-FPN-PyTorch/dataset/training_data/multiP_training_data/main_multiP/val_vis_images/gt_'+str(epoch)+'.png',bbox_inches='tight')\n plt.close()\n\n plt.imshow(z_fake[0].cpu().detach().numpy().transpose((1,2,0))*max_depth)#, vmin=vmin, vmax=vmax)\n plt.colorbar()\n plt.savefig('/home/marian/workspace/monodepth_ws/monodepth-FPN/MonoDepth-FPN-PyTorch/dataset/training_data/multiP_training_data/main_multiP/val_vis_images/pred_'+str(epoch)+'.png',bbox_inches='tight')\n plt.close()\n\n\n train_loss = train_loss/iters_per_epoch #len(train_dataloader)\n val_loss = val_loss/len(eval_dataloader)\n\n train_loss_arr.append(train_loss)\n val_loss_arr.append(val_loss)\n print('Epoch: {} \\tTraining Loss: {:.6f} \\tValidation Loss: {:.6f}'.format(epoch, train_loss, val_loss))\n\n file_object = open(\"/home/marian/workspace/monodepth_ws/monodepth-FPN/MonoDepth-FPN-PyTorch/results_t3.txt\", 'a')\n # print(\"[epoch %2d][iter %4d] loss: %.4f RMSElog: %.4f\"# grad_loss: %.4f\"# normal_loss: %.4f\"\n # % (epoch, step, loss, depth_loss))#, grad_loss))#, normal_loss))\n # print(\"[epoch %2d][iter %4d] loss: %.4f RMSElog: %.4f\"\n # % (epoch, step, loss, depth_loss))\n file_object.write('\\nEpoch: {} \\tTraining Loss: {:.6f} \\tValidation Loss: {:.6f}'.format(epoch, train_loss, val_loss)) #grad_loss: %.4f\" # normal_loss: %.4f\" \n # % (epoch, step, loss, depth_loss))#, grad_loss))#, normal_loss))\n file_object.close()\n writer.flush()\n # print(\"[epoch %2d] RMSE_log: %.4f RMSE: %.4f\"\n # % (epoch, torch.sqrt(eval_loss/count), torch.sqrt(rmse_accum/count)))\n # with open('val.txt', 'a') as f:\n # f.write(\"[epoch %2d] RMSE_log: %.4f RMSE: %.4f\\n\"\n # % (epoch, torch.sqrt(eval_loss/count), torch.sqrt(rmse_accum/count)))\n \n\n writer.close()\n epochs = range(args.start_epoch, args.max_epochs)\n plt.plot(epochs, train_loss_arr, '-g', label='Training loss')\n plt.plot(epochs, val_loss_arr, 'b', label='Validation loss')\n plt.title('Training and Validation loss')\n plt.xlabel('Epochs')\n plt.ylabel('Loss')\n plt.legend()\n plt.savefig(save_dir+\"t100_losses.png\")\n plt.close()" ]
[ [ "numpy.array", "numpy.asarray", "numpy.zeros", "numpy.sum", "tensorflow.Session", "tensorflow.placeholder", "numpy.append" ], [ "torch.stack", "torch.isnan", "numpy.tile", "numpy.multiply", "numpy.where", "torch.cuda.is_available", "torch.load", "torch.multiply", "torch.exp", "torch.where", "matplotlib.pyplot.colorbar", "torch.sqrt", "numpy.linalg.norm", "matplotlib.pyplot.savefig", "torch.FloatTensor", "torch.abs", "torch.tensor", "torch.utils.data.DataLoader", "numpy.sqrt", "numpy.cross", "torch.utils.tensorboard.SummaryWriter", "matplotlib.use", "matplotlib.pyplot.title", "torch.optim.SGD", "matplotlib.pyplot.close", "torch.autograd.set_detect_anomaly", "torch.cuda.empty_cache", "torch.cross", "torch.matmul", "numpy.isnan", "torch.arange", "matplotlib.pyplot.xlabel", "torch.nn.functional.interpolate", "matplotlib.pyplot.plot", "matplotlib.pyplot.legend", "torch.optim.Adam", "torch.no_grad", "matplotlib.pyplot.ylabel", "numpy.abs", "torch.linalg.norm", "matplotlib.pyplot.imshow" ] ]
mattpitkin/samplers-demo
[ "127c869b870ce3e6b96e59bda0999bf2cc7d3849" ]
[ "content/downloads/code/test_zeus.py" ]
[ "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\n\"\"\"\nExample of running Zeus to fit the parameters of a straight line.\n\"\"\"\n\nfrom __future__ import print_function, division\n\nimport os\nimport sys\nimport numpy as np\n\n# import zeus\nimport zeus\n\n# import model and data\nfrom createdata import *\n\ndef logposterior(theta, data, sigma, x):\n \"\"\"\n The natural logarithm of the joint posterior.\n \n Args:\n theta (tuple): a sample containing individual parameter values\n data (list): the set of data/observations\n sigma (float): the standard deviation of the data points\n x (list): the abscissa values at which the data/model is defined\n \"\"\"\n \n lp = logprior(theta) # get the prior\n \n # if the prior is not finite return a probability of zero (log probability of -inf)\n if not np.isfinite(lp):\n return -np.inf\n \n # return the likeihood times the prior (log likelihood plus the log prior)\n return lp + loglikelihood(theta, data, sigma, x)\n\n\ndef loglikelihood(theta, data, sigma, x):\n \"\"\"\n The natural logarithm of the joint likelihood.\n \n Args:\n theta (tuple): a sample containing individual parameter values\n data (list): the set of data/observations\n sigma (float): the standard deviation of the data points\n x (list): the abscissa values at which the data/model is defined\n \n Note:\n We do not include the normalisation constants (as discussed above).\n \"\"\"\n \n # unpack the model parameters from the tuple\n m, c = theta\n \n # evaluate the model (assumes that the straight_line model is defined as above)\n md = straight_line(x, m, c)\n \n # return the log likelihood\n return -0.5*np.sum(((md - data)/sigma)**2)\n\n\ndef logprior(theta):\n \"\"\"\n The natural logarithm of the prior probability.\n \n Args:\n theta (tuple): a sample containing individual parameter values\n \n Note:\n We can ignore the normalisations of the prior here.\n \"\"\"\n \n lp = 0.\n \n # unpack the model parameters from the tuple\n m, c = theta\n \n # uniform prior on c\n cmin = -10. # lower range of prior\n cmax = 10. # upper range of prior\n \n # set prior to 1 (log prior to 0) if in the range and zero (-inf) outside the range \n lp = 0. if cmin < c < cmax else -np.inf\n \n # Gaussian prior on m\n mmu = 0. # mean of the Gaussian prior\n msigma = 10. # standard deviation of the Gaussian prior\n lp -= 0.5*((m - mmu)/msigma)**2\n \n return lp\n\n\nNens = 100 # number of ensemble points\n\nmmu = 0. # mean of the Gaussian prior\nmsigma = 10. # standard deviation of the Gaussian prior\n\nmini = np.random.normal(mmu, msigma, Nens) # initial m points\n\ncmin = -10. # lower range of prior\ncmax = 10. # upper range of prior\n\ncini = np.random.uniform(cmin, cmax, Nens) # initial c points\n\ninisamples = np.array([mini, cini]).T # initial samples\n\nndims = inisamples.shape[1] # number of parameters/dimensions\n\nNburnin = 500 # number of burn-in samples\nNsamples = 500 # number of final posterior samples\n\n# set additional args for the posterior (the data, the noise std. dev., and the abscissa)\nargslist = (data, sigma, x)\n\n# set up the sampler\nsampler = zeus.EnsembleSampler(Nens, ndims, logposterior, args=argslist)\n\n# pass the initial samples and total number of samples required\nsampler.run_mcmc(inisamples, Nsamples+Nburnin);\n\n# extract the samples (removing the burn-in)\npostsamples = sampler.get_chain(flat=True, discrd=Nburnin)\n\n# plot posterior samples (if corner.py is installed)\ntry:\n import matplotlib as mpl\n mpl.use(\"Agg\") # force Matplotlib backend to Agg\n import corner # import corner.py\nexcept ImportError:\n sys.exit(1)\n\nprint('Number of posterior samples is {}'.format(postsamples.shape[0]))\n\nfig = corner.corner(postsamples, labels=[r\"$m$\", r\"$c$\"], truths=[m, c])\nfig.savefig('zeus.png')\n\n" ]
[ [ "matplotlib.use", "numpy.random.normal", "numpy.array", "numpy.sum", "numpy.random.uniform", "numpy.isfinite" ] ]
BigRedT/gpv-1
[ "6a0c2173b44961cb492d00f94864c461aa77641d" ]
[ "exp/gpv/models/detr.py" ]
[ "# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved\n\"\"\"\nDETR model and criterion classes.\n\"\"\"\nimport torch\nimport torch.nn.functional as F\nfrom torch import nn\n\nimport utils.box_ops as box_ops\nfrom utils.detr_misc import (NestedTensor, nested_tensor_from_tensor_list,\n accuracy, get_world_size, interpolate,\n is_dist_avail_and_initialized)\n\nfrom .backbone import build_backbone\nfrom .transformer import build_transformer\nfrom utils.matcher import build_matcher\n\n\nclass DETR(nn.Module):\n \"\"\" This is the DETR module that performs object detection \"\"\"\n def __init__(self, backbone, transformer, num_classes, num_queries, last_layer_only, aux_loss=False):\n \"\"\" Initializes the model.\n Parameters:\n backbone: torch module of the backbone to be used. See backbone.py\n transformer: torch module of the transformer architecture. See transformer.py\n num_classes: number of object classes\n num_queries: number of object queries, ie detection slot. This is the maximal number of objects\n DETR can detect in a single image. For COCO, we recommend 100 queries.\n aux_loss: True if auxiliary decoding losses (loss at each decoder layer) are to be used.\n \"\"\"\n super().__init__()\n self.num_queries = num_queries\n self.transformer = transformer\n hidden_dim = transformer.d_model\n self.class_embed = nn.Linear(hidden_dim, num_classes + 1)\n self.bbox_embed = MLP(hidden_dim, hidden_dim, 4, 3)\n self.query_embed = nn.Embedding(num_queries, hidden_dim)\n self.input_proj = nn.Conv2d(backbone.num_channels, hidden_dim, kernel_size=1)\n self.backbone = backbone\n self.last_layer_only = last_layer_only\n self.aux_loss = aux_loss\n\n def forward(self, samples: NestedTensor):\n \"\"\" The forward expects a NestedTensor, which consists of:\n - samples.tensor: batched images, of shape [batch_size x 3 x H x W]\n - samples.mask: a binary mask of shape [batch_size x H x W], containing 1 on padded pixels\n It returns a dict with the following elements:\n - \"pred_relevance_logits\": the classification logits (including no-object) for all queries.\n Shape= [batch_size x num_queries x (num_classes + 1)]\n - \"pred_boxes\": The normalized boxes coordinates for all queries, represented as\n (center_x, center_y, height, width). These values are normalized in [0, 1],\n relative to the size of each individual image (disregarding possible padding).\n See PostProcess for information on how to retrieve the unnormalized bounding box.\n - \"aux_outputs\": Optional, only returned when auxilary losses are activated. It is a list of\n dictionnaries containing the two above keys for each decoder layer.\n \"\"\"\n if isinstance(samples, (list, torch.Tensor)):\n samples = nested_tensor_from_tensor_list(samples)\n \n features, pos = self.backbone(samples)\n\n src, mask = features[-1].decompose()\n assert mask is not None\n hs = self.transformer(self.input_proj(src), mask, self.query_embed.weight, pos[-1])[0]\n L,B,Q,D = hs.size()\n if (self.last_layer_only is True) or (self.training is not True):\n hs = hs[-1].view(1,B,Q,D)\n outputs_class = self.class_embed(hs)\n outputs_coord = self.bbox_embed(hs).sigmoid()\n out = {'pred_relevance_logits': outputs_class[-1], 'pred_boxes': outputs_coord[-1], 'detr_hs': hs}\n if self.aux_loss:\n out['aux_outputs'] = self._set_aux_loss(outputs_class, outputs_coord)\n return out\n\n @torch.jit.unused\n def _set_aux_loss(self, outputs_class, outputs_coord):\n # this is a workaround to make torchscript happy, as torchscript\n # doesn't support dictionary with non-homogeneous values, such\n # as a dict having both a Tensor and a list.\n return [{'pred_relevance_logits': a, 'pred_boxes': b}\n for a, b in zip(outputs_class[:-1], outputs_coord[:-1])]\n\n\nclass MLP(nn.Module):\n \"\"\" Very simple multi-layer perceptron (also called FFN)\"\"\"\n\n def __init__(self, input_dim, hidden_dim, output_dim, num_layers):\n super().__init__()\n self.num_layers = num_layers\n h = [hidden_dim] * (num_layers - 1)\n self.layers = nn.ModuleList(nn.Linear(n, k) for n, k in zip([input_dim] + h, h + [output_dim]))\n\n def forward(self, x):\n for i, layer in enumerate(self.layers):\n x = F.relu(layer(x)) if i < self.num_layers - 1 else layer(x)\n return x\n\n\ndef create_detr(cfg):\n backbone = build_backbone(cfg)\n\n transformer = build_transformer(cfg)\n\n model = DETR(\n backbone,\n transformer,\n num_classes=cfg.num_classes,\n num_queries=cfg.num_queries,\n last_layer_only=cfg.last_layer_only,\n aux_loss=cfg.aux_loss)\n \n return model" ]
[ [ "torch.nn.Linear", "torch.nn.Conv2d", "torch.nn.Embedding" ] ]
NAshwinKumar/deep-read
[ "aae93c62a0c85ce31eb0e2d759d4d95d7d076c5d" ]
[ "src/app/deep-read/core/video_to_text/screenclassifier.py" ]
[ "import tensorflow as tf\nfrom tensorflow.keras.preprocessing.image import ImageDataGenerator\nimport os\nimport numpy as np\nimport pandas as pd\n\n\nclass ScreenClassifier:\n\n def __init__(self):\n self.model = tf.keras.models.load_model(\"data/training/model_v3_xception\")\n self.imagedatagen = ImageDataGenerator(rescale=1/255)\n\n def predict(self,directory):\n \"\"\"\n returns filenames that are slides\n \"\"\"\n\n filename = os.listdir(directory)\n dataframe = pd.DataFrame({'filename':filename})\n\n predict_generator = self.imagedatagen.flow_from_dataframe(\n dataframe,\n directory=directory,\n x_col=\"filename\",\n target_size=(299, 299),\n class_mode=None,\n batch_size=32,\n shuffle=False\n )\n\n prob = self.model.predict(predict_generator).flatten()\n slides_index = np.array(np.where(prob>0.7)).flatten()\n\n return dataframe.iloc[slides_index,0].tolist()\n " ]
[ [ "tensorflow.keras.models.load_model", "tensorflow.keras.preprocessing.image.ImageDataGenerator", "numpy.where", "pandas.DataFrame" ] ]
udday2014/HebbianLearning
[ "e0d17e53e3db8ce54b8fdd901702d2d6e0633732" ]
[ "models/gdes/top_6l/top2.py" ]
[ "import torch.nn as nn\nimport torch.nn.functional as F\n\nfrom neurolab import params as P\nfrom neurolab import utils\nfrom neurolab.model import Model\n\n\nclass Net(Model):\n\t# Layer names\n\tCONV3 = 'conv3'\n\tRELU3 = 'relu3'\n\tPOOL3 = 'pool3'\n\tBN3 = 'bn3'\n\tCONV4 = 'conv4'\n\tRELU4 = 'relu4'\n\tBN4 = 'bn4'\n\tCONV_OUTPUT = BN4 # Symbolic name for the last convolutional layer providing extracted features\n\tFLAT = 'flat'\n\tFC5 = 'fc5'\n\tRELU5 = 'relu5'\n\tBN5 = 'bn5'\n\tFC6 = 'fc6'\n\tCLASS_SCORES = 'class_scores' # Name of the classification output providing the class scores\n\t\n\tdef __init__(self, config, input_shape=None):\n\t\tsuper(Net, self).__init__(config, input_shape)\n\t\t\n\t\tself.NUM_CLASSES = P.GLB_PARAMS[P.KEY_DATASET_METADATA][P.KEY_DS_NUM_CLASSES]\n\t\tself.DROPOUT_P = config.CONFIG_OPTIONS.get(P.KEY_DROPOUT_P, 0.5)\n\t\t\n\t\t# Here we define the layers of our network\n\t\t\n\t\t# Third convolutional layer\n\t\tself.conv3 = nn.Conv2d(self.get_input_shape()[0], 192, 3) # 128 x channels, 192 output channels, 3x3 convolutions\n\t\tself.bn3 = nn.BatchNorm2d(192) # Batch Norm layer\n\t\t# Fourth convolutional layer\n\t\tself.conv4 = nn.Conv2d(192, 256, 3) # 192 x channels, 256 output channels, 3x3 convolutions\n\t\tself.bn4 = nn.BatchNorm2d(256) # Batch Norm layer\n\t\t\n\t\tself.CONV_OUTPUT_SIZE = utils.shape2size(utils.tens2shape(self.get_dummy_fmap()[self.CONV_OUTPUT]))\n\t\t\n\t\t# FC Layers\n\t\tself.fc5 = nn.Linear(self.CONV_OUTPUT_SIZE, 4096) # conv_output_size-dimensional x, 4096-dimensional output\n\t\tself.bn5 = nn.BatchNorm1d(4096) # Batch Norm layer\n\t\tself.fc6 = nn.Linear(4096, self.NUM_CLASSES) # 4096-dimensional x, NUM_CLASSES-dimensional output (one per class)\n\t\n\tdef get_conv_output(self, x):\n\t\t# Layer 3: Convolutional + ReLU activations + 2x2 Max Pooling + Batch Norm\n\t\tconv3_out = self.conv3(x)\n\t\trelu3_out = F.relu(conv3_out)\n\t\tpool3_out = F.max_pool2d(relu3_out, 2)\n\t\tbn3_out = self.bn3(pool3_out)\n\t\t\n\t\t# Layer 4: Convolutional + ReLU activations + Batch Norm\n\t\tconv4_out = self.conv4(bn3_out)\n\t\trelu4_out = F.relu(conv4_out)\n\t\tbn4_out = self.bn4(relu4_out)\n\n\t\t# Build dictionary containing outputs of each layer\n\t\tconv_out = {\n\t\t\tself.CONV3: conv3_out,\n\t\t\tself.RELU3: relu3_out,\n\t\t\tself.POOL3: pool3_out,\n\t\t\tself.BN3: bn3_out,\n\t\t\tself.CONV4: conv4_out,\n\t\t\tself.RELU4: relu4_out,\n\t\t\tself.BN4: bn4_out\n\t\t}\n\t\treturn conv_out\n\t\n\t# Here we define the flow of information through the network\n\tdef forward(self, x):\n\t\t# Compute the output feature map from the convolutional layers\n\t\tout = self.get_conv_output(x)\n\t\t\n\t\t# Stretch out the feature map before feeding it to the FC layers\n\t\tflat = out[self.CONV_OUTPUT].view(-1, self.CONV_OUTPUT_SIZE)\n\t\t\n\t\t# Fifth Layer: FC with ReLU activations + Batch Norm\n\t\tfc5_out = self.fc5(flat)\n\t\trelu5_out = F.relu(fc5_out)\n\t\tbn5_out = self.bn5(relu5_out)\n\t\t\n\t\t# Sixth Layer: dropout + FC, outputs are the class scores\n\t\tfc6_out = self.fc6(F.dropout(bn5_out, p=self.DROPOUT_P, training=self.training))\n\t\t\n\t\t# Build dictionary containing outputs from convolutional and FC layers\n\t\tout[self.FLAT] = flat\n\t\tout[self.FC5] = fc5_out\n\t\tout[self.RELU5] = relu5_out\n\t\tout[self.BN5] = bn5_out\n\t\tout[self.FC6] = fc6_out\n\t\tout[self.CLASS_SCORES] = {P.KEY_CLASS_SCORES: fc6_out}\n\t\treturn out\n" ]
[ [ "torch.nn.Linear", "torch.nn.BatchNorm2d", "torch.nn.functional.dropout", "torch.nn.Conv2d", "torch.nn.BatchNorm1d", "torch.nn.functional.relu", "torch.nn.functional.max_pool2d" ] ]
AIandSocialGoodLab/FeatureDeceptionGame
[ "0225291dec268f15c94a366228c6741da03b00ab", "0225291dec268f15c94a366228c6741da03b00ab" ]
[ "1i/test.py", "1l/nnIpoptSolve.py" ]
[ "from game import Game\nfrom approxMILP import ApproxMILP\nfrom approxMILPBS import ApproxMILPBS\nfrom learn import LearnNN\nimport time\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport copy\n\nif __name__ == '__main__':\n seed = 99\n n = 5\n K = 12\n L = 100\n eps = 0.0001\n\n numExp = 20\n\n dList = np.linspace(120, 2280, 10, dtype=int)\n numD = dList.size\n\n meadResults = -1 * np.ones((numD,12))\n stdResults = -1 * np.ones((numD,12))\n for di in range(numD):\n numData = int(dList[di])\n dResults = -1 * np.ones((numExp, 12))\n for seed in np.arange(numExp):\n print('numData = ', numData, ', seed = ', seed)\n trueG = Game(seed, n, K)\n\n learnModel = LearnNN(trueG, numData)\n GD_model, CF_model = learnModel.learn()\n\n\n\n # Test closed form learning\n planG = copy.deepcopy(trueG)\n planG.setModel(CF_model)\n\n planModel = ApproxMILP(planG, int(L))\n planModel.solve()\n planModel.evaluateSolution(trueG)\n\n planModelBS = ApproxMILPBS(planG, int(L))\n planModelBS.solveBS()\n planModelBS.evaluateSolution(trueG)\n\n dResults[seed, 0] = planModel.trueobj\n dResults[seed, 1] = planModelBS.trueobj\n\n dResults[seed, 3] = np.abs(planModel.trueobj - planModel.obj) / planModel.trueobj\n dResults[seed, 4] = np.abs(planModelBS.trueobj - planModelBS.obj) / planModelBS.trueobj\n\n\n\n # Test gradient descent learning\n planG = copy.deepcopy(trueG)\n planG.setModel(GD_model)\n\n planModel = ApproxMILP(planG, int(L))\n planModel.solve()\n planModel.evaluateSolution(trueG)\n\n planModelBS = ApproxMILPBS(planG, int(L))\n planModelBS.solveBS()\n planModelBS.evaluateSolution(trueG)\n\n\n\n dResults[seed, 6] = planModel.trueobj\n dResults[seed, 7] = planModelBS.trueobj\n\n dResults[seed, 9] = np.abs(planModel.trueobj - planModel.obj) / planModel.trueobj\n dResults[seed, 10] = np.abs(planModelBS.trueobj - planModelBS.obj) / planModelBS.trueobj\n truePlanModelBS = ApproxMILPBS(trueG, int(L))\n truePlanModelBS.solveBS()\n\n dResults[seed, 0] = (dResults[seed, 0] - truePlanModelBS.obj) / truePlanModelBS.obj\n dResults[seed, 1] = (dResults[seed, 1] - truePlanModelBS.obj) / truePlanModelBS.obj\n dResults[seed, 6] = (dResults[seed, 6] - truePlanModelBS.obj) / truePlanModelBS.obj\n dResults[seed, 7] = (dResults[seed, 7] - truePlanModelBS.obj) / truePlanModelBS.obj\n\n print('True Optimal: ', truePlanModelBS.obj, 'CF Optimal: ', dResults[seed, 1], 'GD Optimal: ', dResults[seed, 7])\n\n dResults[seed, 11] = learnModel.getCFBound()\n np.savetxt(str(n)+'Results.csv', dResults, delimiter=',')\n\n meadResults[di,:] = np.mean(dResults, axis=0)\n stdResults[di,:] = np.std(dResults, axis=0)\n\n np.savetxt('meadResults.csv', meadResults, delimiter=',')\n np.savetxt('stdResults.csv', stdResults, delimiter=',')\n\n\n\n meadResults = np.loadtxt('meadResults.csv', delimiter=',')\n stdResults = np.loadtxt('stdResults.csv', delimiter=',')\n\n fig, (timeplt) = plt.subplots(1, 1)\n ind = np.arange(numD) # the x locations for the groups\n width = 0.2 # the width of the bars\n time1 = timeplt.bar(ind - 3 * width / 2, meadResults[:,0], width, yerr=stdResults[:,0],\n color='Blue', label='MILP-CF')\n time2 = timeplt.bar(ind - width / 2, meadResults[:,1], width, yerr=stdResults[:,1],\n color='Red', label='MILPBS-CF')\n time3 = timeplt.bar(ind + width / 2, meadResults[:,6], width, yerr=stdResults[:,6],\n color='Yellow', label='MILP-GD')\n time4 = timeplt.bar(ind + 3 * width / 2, meadResults[:,7], width, yerr=stdResults[:,7],\n color='Green', label='MILPBS-GD')\n\n timeplt.set_ylabel('Solution Gap')\n timeplt.set_xticks(ind)\n timeplt.set_xticklabels(dList)\n timeplt.set_xlabel('Number of Data Points')\n timeplt.legend()\n\n\n\n plt.show()\n", "# from __future__ import division\nfrom pyomo.environ import *\nfrom pyomo.opt import SolverStatus, TerminationCondition\nimport numpy as np\nimport time\nfrom nn import MyLossReg\nimport torch\n\nclass nnIpoptSolve:\n def __init__(self, game):\n self.initStart = time.time()\n self.game = game\n self.n = game.n\n self.K = game.K\n self.xhat = game.xhat\n self.us = game.us\n self.costs = game.costs\n self.budget = game.budget\n self.f = game.f\n self.layers = len(game.f.input_linear)\n self.weights = list()\n for i in range(self.layers):\n self.weights.append(np.array(game.f.input_linear[i].weight.data))\n self.bias = list()\n for i in range(self.layers-1):\n self.bias.append(np.array(game.f.input_linear[i].bias.data))\n self.generateNLP()\n self.f.train = False\n self.initTime = time.time() - self.initStart\n\n def generateNLP(self):\n self.model = ConcreteModel()\n self.model.xSet = Set(initialize=range(self.n)) * Set(initialize=range(self.K))\n self.model.nSet = Set(initialize=range(self.n))\n self.model.kSet = Set(initialize=range(self.K))\n\n def xb(model, i, k):\n return (0, 1)\n self.model.x = Var(self.model.nSet, self.model.kSet, domain=NonNegativeReals, bounds=xb)\n\n for i in range(self.n):\n for k in range(self.K):\n self.model.x[i,k].value = self.xhat[i,k]\n\n\n score = list()\n for i in range(self.n):\n lin = list()\n for kk in range(self.K):\n lin.append(self.model.x[i,kk])\n for l in range(self.layers):\n W = self.weights[l]\n\n lout = list()\n if l != 0:\n b = self.bias[l-1]\n for ii in range(len(lin)):\n lin[ii] = tanh(lin[ii] + b[ii])\n for k in range(W.shape[0]):\n lout.append(sum(W[k,j] * lin[j] for j in range(W.shape[1])))\n lin = lout.copy()\n score.append(lout[0])\n exprNum = sum(exp(score[i]) * self.us[i] for i in range(self.n))\n exprDen = sum(exp(score[i]) for i in range(self.n))\n\n\n expr = exprNum / exprDen\n self.model.obj = Objective(expr=expr, sense=minimize)\n self.model.h = Var(self.model.nSet, self.model.kSet, domain=NonNegativeReals)\n self.model.absConstraint = ConstraintList()\n for i in range(self.n):\n for k in range(self.K):\n self.model.absConstraint.add(abs(self.model.x[i,k] - self.xhat[i,k]) == self.model.h[i,k])\n self.model.budgetConstraint = Constraint(expr = sum(self.model.h[i,k] * self.costs[i,k] for i in range(self.n) for k in range(self.K)) <= self.budget)\n\n\n def solve(self):\n self.solveStart = time.time()\n solver = pyomo.opt.SolverFactory('ipopt')\n solver.options['print_level'] = 0\n solver.options['max_iter'] = int(100)\n solver.options['max_cpu_time'] = int(60)\n solver.options['warm_start_init_point'] = 'yes'\n result = solver.solve(self.model, tee = True)\n if (result.solver.status == SolverStatus.ok) and (result.solver.termination_condition == TerminationCondition.optimal):\n self.feasible = True\n self.extractSolution(result)\n self.solveTime = time.time() - self.solveStart\n return self.optVal, self.xsol\n else:\n self.feasible = False\n self.extractSolution(result)\n self.solveTime = time.time() - self.solveStart\n return self.optVal, self.xsol\n\n def extractSolution(self, solution):\n self.optVal = value(self.model.obj)\n self.xsol = [[value(self.model.x[i,k]) for k in range(self.K)] for i in range(self.n)]\n s = 0\n for i in range(self.n):\n for j in range(self.K):\n s = s + self.costs[i,j] * abs(self.xsol[i][j] - self.xhat[i,j])\n def evaluateSolution(self, trueGame):\n true_model = trueGame.f\n self.regCoe = 1\n criterion = MyLossReg(self.regCoe, torch.tensor(self.costs, dtype=torch.float), torch.tensor(self.xhat, dtype=torch.float)\n , torch.tensor(self.budget, dtype=torch.float))\n self.xsol = torch.tensor(self.xsol, dtype=torch.float)\n y = true_model(self.xsol)\n y = y.squeeze()\n self.trueoptVal, self.trueActualCost = criterion(y, torch.tensor(self.us, dtype=torch.float), self.xsol)\n self.trueoptVal = self.trueoptVal.item()\n\n y = true_model(torch.tensor(self.xhat, dtype=torch.float))\n y = y.squeeze()\n self.trueoriVal, self.trueoriCost = criterion(y, torch.tensor(self.us, dtype=torch.float), torch.tensor(self.xhat, dtype=torch.float))\n self.trueoriVal = self.trueoriVal.item()\n print('ipopt optimized loss: ', self.trueoptVal, ', initial loss: ', self.trueoriVal)\n\n return self.trueoptVal\n" ]
[ [ "numpy.savetxt", "numpy.ones", "matplotlib.pyplot.subplots", "numpy.mean", "numpy.std", "numpy.loadtxt", "numpy.arange", "numpy.abs", "matplotlib.pyplot.show", "numpy.linspace" ], [ "numpy.array", "torch.tensor" ] ]
hanshiyi/POSBERT
[ "4d290945371b0043b3370459b9436364311320f8" ]
[ "pytorch_pretrained_bert/origin_modeling.py" ]
[ "# coding=utf-8\n# Copyright 2018 The Google AI Language Team Authors and The HugginFace Inc. team.\n# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"PyTorch BERT model.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport os\nimport copy\nimport json\nimport math\nimport logging\nimport tarfile\nimport tempfile\nimport shutil\nimport numpy as np\n\nimport torch\nfrom torch import nn\nfrom torch.nn import CrossEntropyLoss\n\nfrom .file_utils import cached_path\n\nlogger = logging.getLogger(__name__)\n\nPRETRAINED_MODEL_ARCHIVE_MAP = {\n 'bert-base-uncased': \"https://s3.amazonaws.com/models.huggingface.co/bert/bert-base-uncased.tar.gz\",\n 'bert-large-uncased': \"https://s3.amazonaws.com/models.huggingface.co/bert/bert-large-uncased.tar.gz\",\n 'bert-base-cased': \"https://s3.amazonaws.com/models.huggingface.co/bert/bert-base-cased.tar.gz\",\n 'bert-large-cased': \"https://s3.amazonaws.com/models.huggingface.co/bert/bert-large-cased.tar.gz\",\n 'bert-base-multilingual-uncased': \"https://s3.amazonaws.com/models.huggingface.co/bert/bert-base-multilingual-uncased.tar.gz\",\n 'bert-base-multilingual-cased': \"https://s3.amazonaws.com/models.huggingface.co/bert/bert-base-multilingual-cased.tar.gz\",\n 'bert-base-chinese': \"https://s3.amazonaws.com/models.huggingface.co/bert/bert-base-chinese.tar.gz\",\n}\nCONFIG_NAME = 'bert_config.json'\nWEIGHTS_NAME = 'pytorch_model.bin'\n\ndef gelu(x):\n \"\"\"Implementation of the gelu activation function.\n For information: OpenAI GPT's gelu is slightly different (and gives slightly different results):\n 0.5 * x * (1 + torch.tanh(math.sqrt(2 / math.pi) * (x + 0.044715 * torch.pow(x, 3))))\n \"\"\"\n return x * 0.5 * (1.0 + torch.erf(x / math.sqrt(2.0)))\n\n\ndef swish(x):\n return x * torch.sigmoid(x)\n\n\nACT2FN = {\"gelu\": gelu, \"relu\": torch.nn.functional.relu, \"swish\": swish}\n\n\nclass BertConfig(object):\n \"\"\"Configuration class to store the configuration of a `BertModel`.\n \"\"\"\n def __init__(self,\n vocab_size_or_config_json_file,\n hidden_size=768,\n num_hidden_layers=12,\n num_attention_heads=12,\n intermediate_size=3072,\n hidden_act=\"gelu\",\n hidden_dropout_prob=0.1,\n attention_probs_dropout_prob=0.1,\n max_position_embeddings=512,\n type_vocab_size=2,\n initializer_range=0.02):\n \"\"\"Constructs BertConfig.\n\n Args:\n vocab_size_or_config_json_file: Vocabulary size of `inputs_ids` in `BertModel`.\n hidden_size: Size of the encoder layers and the pooler layer.\n num_hidden_layers: Number of hidden layers in the Transformer encoder.\n num_attention_heads: Number of attention heads for each attention layer in\n the Transformer encoder.\n intermediate_size: The size of the \"intermediate\" (i.e., feed-forward)\n layer in the Transformer encoder.\n hidden_act: The non-linear activation function (function or string) in the\n encoder and pooler. If string, \"gelu\", \"relu\" and \"swish\" are supported.\n hidden_dropout_prob: The dropout probabilitiy for all fully connected\n layers in the embeddings, encoder, and pooler.\n attention_probs_dropout_prob: The dropout ratio for the attention\n probabilities.\n max_position_embeddings: The maximum sequence length that this model might\n ever be used with. Typically set this to something large just in case\n (e.g., 512 or 1024 or 2048).\n type_vocab_size: The vocabulary size of the `token_type_ids` passed into\n `BertModel`.\n initializer_range: The sttdev of the truncated_normal_initializer for\n initializing all weight matrices.\n \"\"\"\n if isinstance(vocab_size_or_config_json_file, str):\n with open(vocab_size_or_config_json_file, \"r\", encoding='utf-8') as reader:\n json_config = json.loads(reader.read())\n for key, value in json_config.items():\n self.__dict__[key] = value\n elif isinstance(vocab_size_or_config_json_file, int):\n self.vocab_size = vocab_size_or_config_json_file\n self.hidden_size = hidden_size\n self.num_hidden_layers = num_hidden_layers\n self.num_attention_heads = num_attention_heads\n self.hidden_act = hidden_act\n self.intermediate_size = intermediate_size\n self.hidden_dropout_prob = hidden_dropout_prob\n self.attention_probs_dropout_prob = attention_probs_dropout_prob\n self.max_position_embeddings = max_position_embeddings\n self.type_vocab_size = type_vocab_size\n self.initializer_range = initializer_range\n else:\n raise ValueError(\"First argument must be either a vocabulary size (int)\"\n \"or the path to a pretrained model config file (str)\")\n\n @classmethod\n def from_dict(cls, json_object):\n \"\"\"Constructs a `BertConfig` from a Python dictionary of parameters.\"\"\"\n config = BertConfig(vocab_size_or_config_json_file=-1)\n for key, value in json_object.items():\n config.__dict__[key] = value\n return config\n\n @classmethod\n def from_json_file(cls, json_file):\n \"\"\"Constructs a `BertConfig` from a json file of parameters.\"\"\"\n with open(json_file, \"r\", encoding='utf-8') as reader:\n text = reader.read()\n return cls.from_dict(json.loads(text))\n\n def __repr__(self):\n return str(self.to_json_string())\n\n def to_dict(self):\n \"\"\"Serializes this instance to a Python dictionary.\"\"\"\n output = copy.deepcopy(self.__dict__)\n return output\n\n def to_json_string(self):\n \"\"\"Serializes this instance to a JSON string.\"\"\"\n return json.dumps(self.to_dict(), indent=2, sort_keys=True) + \"\\n\"\n\ntry:\n from apex.normalization.fused_layer_norm import FusedLayerNorm as BertLayerNorm\nexcept ImportError:\n print(\"Better speed can be achieved with apex installed from https://www.github.com/nvidia/apex.\")\n class BertLayerNorm(nn.Module):\n def __init__(self, hidden_size, eps=1e-12):\n \"\"\"Construct a layernorm module in the TF style (epsilon inside the square root).\n \"\"\"\n super(BertLayerNorm, self).__init__()\n self.weight = nn.Parameter(torch.ones(hidden_size))\n self.bias = nn.Parameter(torch.zeros(hidden_size))\n self.variance_epsilon = eps\n\n def forward(self, x):\n u = x.mean(-1, keepdim=True)\n s = (x - u).pow(2).mean(-1, keepdim=True)\n x = (x - u) / torch.sqrt(s + self.variance_epsilon)\n return self.weight * x + self.bias\n\nclass BertEmbeddings(nn.Module):\n \"\"\"Construct the embeddings from word, position and token_type embeddings.\n \"\"\"\n def __init__(self, config):\n super(BertEmbeddings, self).__init__()\n self.word_embeddings = nn.Embedding(config.vocab_size, config.hidden_size)\n self.position_embeddings = nn.Embedding(config.max_position_embeddings, config.hidden_size)\n self.token_type_embeddings = nn.Embedding(config.type_vocab_size, config.hidden_size)\n\n # self.LayerNorm is not snake-cased to stick with TensorFlow model variable name and be able to load\n # any TensorFlow checkpoint file\n self.LayerNorm = BertLayerNorm(config.hidden_size, eps=1e-12)\n self.dropout = nn.Dropout(config.hidden_dropout_prob)\n\n def forward(self, input_ids, token_type_ids=None):\n seq_length = input_ids.size(1)\n position_ids = torch.arange(seq_length, dtype=torch.long, device=input_ids.device)\n position_ids = position_ids.unsqueeze(0).expand_as(input_ids)\n if token_type_ids is None:\n token_type_ids = torch.zeros_like(input_ids)\n\n words_embeddings = self.word_embeddings(input_ids)\n position_embeddings = self.position_embeddings(position_ids)\n token_type_embeddings = self.token_type_embeddings(token_type_ids)\n\n embeddings = words_embeddings + position_embeddings + token_type_embeddings\n embeddings = self.LayerNorm(embeddings)\n embeddings = self.dropout(embeddings)\n return embeddings\n\n\nclass BertSelfAttention(nn.Module):\n def __init__(self, config):\n super(BertSelfAttention, self).__init__()\n if config.hidden_size % config.num_attention_heads != 0:\n raise ValueError(\n \"The hidden size (%d) is not a multiple of the number of attention \"\n \"heads (%d)\" % (config.hidden_size, config.num_attention_heads))\n self.num_attention_heads = config.num_attention_heads\n self.attention_head_size = int(config.hidden_size / config.num_attention_heads)\n self.all_head_size = self.num_attention_heads * self.attention_head_size\n\n self.query = nn.Linear(config.hidden_size, self.all_head_size)\n self.key = nn.Linear(config.hidden_size, self.all_head_size)\n self.value = nn.Linear(config.hidden_size, self.all_head_size)\n\n self.dropout = nn.Dropout(config.attention_probs_dropout_prob)\n\n def transpose_for_scores(self, x):\n new_x_shape = x.size()[:-1] + (self.num_attention_heads, self.attention_head_size)\n x = x.view(*new_x_shape)\n return x.permute(0, 2, 1, 3)\n\n def forward(self, hidden_states, attention_mask):\n mixed_query_layer = self.query(hidden_states)\n mixed_key_layer = self.key(hidden_states)\n mixed_value_layer = self.value(hidden_states)\n\n query_layer = self.transpose_for_scores(mixed_query_layer)\n key_layer = self.transpose_for_scores(mixed_key_layer)\n value_layer = self.transpose_for_scores(mixed_value_layer)\n\n # Take the dot product between \"query\" and \"key\" to get the raw attention scores.\n attention_scores = torch.matmul(query_layer, key_layer.transpose(-1, -2))\n attention_scores = attention_scores / math.sqrt(self.attention_head_size)\n # Apply the attention mask is (precomputed for all layers in BertModel forward() function)\n attention_scores = attention_scores + attention_mask\n\n # Normalize the attention scores to probabilities.\n attention_probs = nn.Softmax(dim=-1)(attention_scores)\n\n # This is actually dropping out entire tokens to attend to, which might\n # seem a bit unusual, but is taken from the original Transformer paper.\n attention_probs = self.dropout(attention_probs)\n\n context_layer = torch.matmul(attention_probs, value_layer)\n context_layer = context_layer.permute(0, 2, 1, 3).contiguous()\n new_context_layer_shape = context_layer.size()[:-2] + (self.all_head_size,)\n context_layer = context_layer.view(*new_context_layer_shape)\n return context_layer\n\n\nclass BertSelfOutput(nn.Module):\n def __init__(self, config):\n super(BertSelfOutput, self).__init__()\n self.dense = nn.Linear(config.hidden_size, config.hidden_size)\n self.LayerNorm = BertLayerNorm(config.hidden_size, eps=1e-12)\n self.dropout = nn.Dropout(config.hidden_dropout_prob)\n\n def forward(self, hidden_states, input_tensor):\n hidden_states = self.dense(hidden_states)\n hidden_states = self.dropout(hidden_states)\n hidden_states = self.LayerNorm(hidden_states + input_tensor)\n return hidden_states\n\n\nclass BertAttention(nn.Module):\n def __init__(self, config):\n super(BertAttention, self).__init__()\n self.self = BertSelfAttention(config)\n self.output = BertSelfOutput(config)\n\n def forward(self, input_tensor, attention_mask):\n self_output = self.self(input_tensor, attention_mask)\n attention_output = self.output(self_output, input_tensor)\n return attention_output\n\n\nclass BertIntermediate(nn.Module):\n def __init__(self, config):\n super(BertIntermediate, self).__init__()\n self.dense = nn.Linear(config.hidden_size, config.intermediate_size)\n self.intermediate_act_fn = ACT2FN[config.hidden_act] \\\n if isinstance(config.hidden_act, str) else config.hidden_act\n\n def forward(self, hidden_states):\n hidden_states = self.dense(hidden_states)\n hidden_states = self.intermediate_act_fn(hidden_states)\n return hidden_states\n\n\nclass BertOutput(nn.Module):\n def __init__(self, config):\n super(BertOutput, self).__init__()\n self.dense = nn.Linear(config.intermediate_size, config.hidden_size)\n self.LayerNorm = BertLayerNorm(config.hidden_size, eps=1e-12)\n self.dropout = nn.Dropout(config.hidden_dropout_prob)\n\n def forward(self, hidden_states, input_tensor):\n hidden_states = self.dense(hidden_states)\n hidden_states = self.dropout(hidden_states)\n hidden_states = self.LayerNorm(hidden_states + input_tensor)\n return hidden_states\n\n\nclass BertLayer(nn.Module):\n def __init__(self, config):\n super(BertLayer, self).__init__()\n self.attention = BertAttention(config)\n self.intermediate = BertIntermediate(config)\n self.output = BertOutput(config)\n\n def forward(self, hidden_states, attention_mask):\n attention_output = self.attention(hidden_states, attention_mask)\n intermediate_output = self.intermediate(attention_output)\n layer_output = self.output(intermediate_output, attention_output)\n return layer_output\n\n\nclass BertEncoder(nn.Module):\n def __init__(self, config):\n super(BertEncoder, self).__init__()\n layer = BertLayer(config)\n self.layer = nn.ModuleList([copy.deepcopy(layer) for _ in range(config.num_hidden_layers)])\n\n def forward(self, hidden_states, attention_mask, output_all_encoded_layers=True):\n all_encoder_layers = []\n for layer_module in self.layer:\n hidden_states = layer_module(hidden_states, attention_mask)\n if output_all_encoded_layers:\n all_encoder_layers.append(hidden_states)\n if not output_all_encoded_layers:\n all_encoder_layers.append(hidden_states)\n return all_encoder_layers\n\n\nclass BertPooler(nn.Module):\n def __init__(self, config):\n super(BertPooler, self).__init__()\n self.dense = nn.Linear(config.hidden_size, config.hidden_size)\n self.activation = nn.Tanh()\n\n def forward(self, hidden_states):\n # We \"pool\" the model by simply taking the hidden state corresponding\n # to the first token.\n first_token_tensor = hidden_states[:, 0]\n pooled_output = self.dense(first_token_tensor)\n pooled_output = self.activation(pooled_output)\n return pooled_output\n\n\nclass BertPredictionHeadTransform(nn.Module):\n def __init__(self, config):\n super(BertPredictionHeadTransform, self).__init__()\n self.dense = nn.Linear(config.hidden_size, config.hidden_size)\n self.transform_act_fn = ACT2FN[config.hidden_act] \\\n if isinstance(config.hidden_act, str) else config.hidden_act\n self.LayerNorm = BertLayerNorm(config.hidden_size, eps=1e-12)\n\n def forward(self, hidden_states):\n hidden_states = self.dense(hidden_states)\n hidden_states = self.transform_act_fn(hidden_states)\n hidden_states = self.LayerNorm(hidden_states)\n return hidden_states\n\n\nclass BertLMPredictionHead(nn.Module):\n def __init__(self, config, bert_model_embedding_weights):\n super(BertLMPredictionHead, self).__init__()\n self.transform = BertPredictionHeadTransform(config)\n\n # The output weights are the same as the input embeddings, but there is\n # an output-only bias for each token.\n self.decoder = nn.Linear(bert_model_embedding_weights.size(1),\n bert_model_embedding_weights.size(0),\n bias=False)\n self.decoder.weight = bert_model_embedding_weights\n self.bias = nn.Parameter(torch.zeros(bert_model_embedding_weights.size(0)))\n\n def forward(self, hidden_states):\n hidden_states = self.transform(hidden_states)\n hidden_states = self.decoder(hidden_states) + self.bias\n return hidden_states\n\n\nclass BertOnlyMLMHead(nn.Module):\n def __init__(self, config, bert_model_embedding_weights):\n super(BertOnlyMLMHead, self).__init__()\n self.predictions = BertLMPredictionHead(config, bert_model_embedding_weights)\n\n def forward(self, sequence_output):\n prediction_scores = self.predictions(sequence_output)\n return prediction_scores\n\n\nclass BertOnlyNSPHead(nn.Module):\n def __init__(self, config):\n super(BertOnlyNSPHead, self).__init__()\n self.seq_relationship = nn.Linear(config.hidden_size, 2)\n\n def forward(self, pooled_output):\n seq_relationship_score = self.seq_relationship(pooled_output)\n return seq_relationship_score\n\n\nclass BertPreTrainingHeads(nn.Module):\n def __init__(self, config, bert_model_embedding_weights):\n super(BertPreTrainingHeads, self).__init__()\n self.predictions = BertLMPredictionHead(config, bert_model_embedding_weights)\n self.seq_relationship = nn.Linear(config.hidden_size, 2)\n\n def forward(self, sequence_output, pooled_output):\n prediction_scores = self.predictions(sequence_output)\n seq_relationship_score = self.seq_relationship(pooled_output)\n return prediction_scores, seq_relationship_score\n\n\nclass PreTrainedBertModel(nn.Module):\n \"\"\" An abstract class to handle weights initialization and\n a simple interface for dowloading and loading pretrained models.\n \"\"\"\n def __init__(self, config, *inputs, **kwargs):\n super(PreTrainedBertModel, self).__init__()\n if not isinstance(config, BertConfig):\n raise ValueError(\n \"Parameter config in `{}(config)` should be an instance of class `BertConfig`. \"\n \"To create a model from a Google pretrained model use \"\n \"`model = {}.from_pretrained(PRETRAINED_MODEL_NAME)`\".format(\n self.__class__.__name__, self.__class__.__name__\n ))\n self.config = config\n\n def init_bert_weights(self, module):\n \"\"\" Initialize the weights.\n \"\"\"\n if isinstance(module, (nn.Linear, nn.Embedding)):\n # Slightly different from the TF version which uses truncated_normal for initialization\n # cf https://github.com/pytorch/pytorch/pull/5617\n module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)\n elif isinstance(module, BertLayerNorm):\n module.bias.data.zero_()\n module.weight.data.fill_(1.0)\n if isinstance(module, nn.Linear) and module.bias is not None:\n module.bias.data.zero_()\n\n @classmethod\n def from_pretrained(cls, pretrained_model_name, state_dict=None, cache_dir=None, *inputs, **kwargs):\n \"\"\"\n Instantiate a PreTrainedBertModel from a pre-trained model file or a pytorch state dict.\n Download and cache the pre-trained model file if needed.\n\n Params:\n pretrained_model_name: either:\n - a str with the name of a pre-trained model to load selected in the list of:\n . `bert-base-uncased`\n . `bert-large-uncased`\n . `bert-base-cased`\n . `bert-large-cased`\n . `bert-base-multilingual-uncased`\n . `bert-base-multilingual-cased`\n . `bert-base-chinese`\n - a path or url to a pretrained model archive containing:\n . `bert_config.json` a configuration file for the model\n . `pytorch_model.bin` a PyTorch dump of a BertForPreTraining instance\n cache_dir: an optional path to a folder in which the pre-trained models will be cached.\n state_dict: an optional state dictionnary (collections.OrderedDict object) to use instead of Google pre-trained models\n *inputs, **kwargs: additional input for the specific Bert class\n (ex: num_labels for BertForSequenceClassification)\n \"\"\"\n if pretrained_model_name in PRETRAINED_MODEL_ARCHIVE_MAP:\n archive_file = PRETRAINED_MODEL_ARCHIVE_MAP[pretrained_model_name]\n else:\n archive_file = pretrained_model_name\n # redirect to the cache, if necessary\n try:\n resolved_archive_file = cached_path(archive_file, cache_dir=cache_dir)\n except FileNotFoundError:\n logger.error(\n \"Model name '{}' was not found in model name list ({}). \"\n \"We assumed '{}' was a path or url but couldn't find any file \"\n \"associated to this path or url.\".format(\n pretrained_model_name,\n ', '.join(PRETRAINED_MODEL_ARCHIVE_MAP.keys()),\n archive_file))\n return None\n if resolved_archive_file == archive_file:\n logger.info(\"loading archive file {}\".format(archive_file))\n else:\n logger.info(\"loading archive file {} from cache at {}\".format(\n archive_file, resolved_archive_file))\n tempdir = None\n if os.path.isdir(resolved_archive_file):\n serialization_dir = resolved_archive_file\n else:\n # Extract archive to temp dir\n tempdir = tempfile.mkdtemp()\n logger.info(\"extracting archive file {} to temp dir {}\".format(\n resolved_archive_file, tempdir))\n with tarfile.open(resolved_archive_file, 'r:gz') as archive:\n archive.extractall(tempdir)\n serialization_dir = tempdir\n # Load config\n config_file = os.path.join(serialization_dir, CONFIG_NAME)\n config = BertConfig.from_json_file(config_file)\n logger.info(\"config {}\".format(type(config)))\n logger.info(\"Model config {}\".format(config))\n test_config = copy.copy(config)\n test_config.max_position_embeddings = 128\n test_config.hidden_size = 256\n logger.info(\"Model pos config {}\".format(test_config))\n # Instantiate model.\n model = cls(config, *inputs, **kwargs)\n if state_dict is None:\n weights_path = os.path.join(serialization_dir, WEIGHTS_NAME)\n state_dict = torch.load(weights_path)\n\n old_keys = []\n new_keys = []\n for key in state_dict.keys():\n new_key = None\n if 'gamma' in key:\n new_key = key.replace('gamma', 'weight')\n if 'beta' in key:\n new_key = key.replace('beta', 'bias')\n if new_key:\n old_keys.append(key)\n new_keys.append(new_key)\n for old_key, new_key in zip(old_keys, new_keys):\n state_dict[new_key] = state_dict.pop(old_key)\n\n missing_keys = []\n unexpected_keys = []\n error_msgs = []\n # copy state_dict so _load_from_state_dict can modify it\n metadata = getattr(state_dict, '_metadata', None)\n state_dict = state_dict.copy()\n if metadata is not None:\n state_dict._metadata = metadata\n\n def load(module, prefix=''):\n local_metadata = {} if metadata is None else metadata.get(prefix[:-1], {})\n module._load_from_state_dict(\n state_dict, prefix, local_metadata, True, missing_keys, unexpected_keys, error_msgs)\n for name, child in module._modules.items():\n if child is not None:\n load(child, prefix + name + '.')\n load(model, prefix='' if hasattr(model, 'bert') else 'bert.')\n if len(missing_keys) > 0:\n logger.info(\"Weights of {} not initialized from pretrained model: {}\".format(\n model.__class__.__name__, missing_keys))\n if len(unexpected_keys) > 0:\n logger.info(\"Weights from pretrained model not used in {}: {}\".format(\n model.__class__.__name__, unexpected_keys))\n if tempdir:\n # Clean up temp dir\n shutil.rmtree(tempdir)\n return model\n\n\nclass BertModel(PreTrainedBertModel):\n \"\"\"BERT model (\"Bidirectional Embedding Representations from a Transformer\").\n\n Params:\n config: a BertConfig class instance with the configuration to build a new model\n\n Inputs:\n `input_ids`: a torch.LongTensor of shape [batch_size, sequence_length]\n with the word token indices in the vocabulary(see the tokens preprocessing logic in the scripts\n `extract_features.py`, `run_classifier.py` and `run_squad.py`)\n `token_type_ids`: an optional torch.LongTensor of shape [batch_size, sequence_length] with the token\n types indices selected in [0, 1]. Type 0 corresponds to a `sentence A` and type 1 corresponds to\n a `sentence B` token (see BERT paper for more details).\n `attention_mask`: an optional torch.LongTensor of shape [batch_size, sequence_length] with indices\n selected in [0, 1]. It's a mask to be used if the input sequence length is smaller than the max\n input sequence length in the current batch. It's the mask that we typically use for attention when\n a batch has varying length sentences.\n `output_all_encoded_layers`: boolean which controls the content of the `encoded_layers` output as described below. Default: `True`.\n\n Outputs: Tuple of (encoded_layers, pooled_output)\n `encoded_layers`: controled by `output_all_encoded_layers` argument:\n - `output_all_encoded_layers=True`: outputs a list of the full sequences of encoded-hidden-states at the end\n of each attention block (i.e. 12 full sequences for BERT-base, 24 for BERT-large), each\n encoded-hidden-state is a torch.FloatTensor of size [batch_size, sequence_length, hidden_size],\n - `output_all_encoded_layers=False`: outputs only the full sequence of hidden-states corresponding\n to the last attention block of shape [batch_size, sequence_length, hidden_size],\n `pooled_output`: a torch.FloatTensor of size [batch_size, hidden_size] which is the output of a\n classifier pretrained on top of the hidden state associated to the first character of the\n input (`CLF`) to train on the Next-Sentence task (see BERT's paper).\n\n Example usage:\n ```python\n # Already been converted into WordPiece token ids\n input_ids = torch.LongTensor([[31, 51, 99], [15, 5, 0]])\n input_mask = torch.LongTensor([[1, 1, 1], [1, 1, 0]])\n token_type_ids = torch.LongTensor([[0, 0, 1], [0, 1, 0]])\n\n config = modeling.BertConfig(vocab_size_or_config_json_file=32000, hidden_size=768,\n num_hidden_layers=12, num_attention_heads=12, intermediate_size=3072)\n\n model = modeling.BertModel(config=config)\n all_encoder_layers, pooled_output = model(input_ids, token_type_ids, input_mask)\n ```\n \"\"\"\n def __init__(self, config):\n super(BertModel, self).__init__(config)\n self.embeddings = BertEmbeddings(config)\n self.encoder = BertEncoder(config)\n self.pooler = BertPooler(config)\n self.apply(self.init_bert_weights)\n\n def forward(self, input_ids, token_type_ids=None, attention_mask=None, output_all_encoded_layers=True):\n if attention_mask is None:\n attention_mask = torch.ones_like(input_ids)\n if token_type_ids is None:\n token_type_ids = torch.zeros_like(input_ids)\n\n # We create a 3D attention mask from a 2D tensor mask.\n # Sizes are [batch_size, 1, 1, to_seq_length]\n # So we can broadcast to [batch_size, num_heads, from_seq_length, to_seq_length]\n # this attention mask is more simple than the triangular masking of causal attention\n # used in OpenAI GPT, we just need to prepare the broadcast dimension here.\n extended_attention_mask = attention_mask.unsqueeze(1).unsqueeze(2)\n\n # Since attention_mask is 1.0 for positions we want to attend and 0.0 for\n # masked positions, this operation will create a tensor which is 0.0 for\n # positions we want to attend and -10000.0 for masked positions.\n # Since we are adding it to the raw scores before the softmax, this is\n # effectively the same as removing these entirely.\n extended_attention_mask = extended_attention_mask.to(dtype=next(self.parameters()).dtype) # fp16 compatibility\n extended_attention_mask = (1.0 - extended_attention_mask) * -10000.0\n\n embedding_output = self.embeddings(input_ids, token_type_ids)\n encoded_layers = self.encoder(embedding_output,\n extended_attention_mask,\n output_all_encoded_layers=output_all_encoded_layers)\n sequence_output = encoded_layers[-1]\n pooled_output = self.pooler(sequence_output)\n if not output_all_encoded_layers:\n encoded_layers = encoded_layers[-1]\n return encoded_layers, pooled_output\n\n\nclass BertForPreTraining(PreTrainedBertModel):\n \"\"\"BERT model with pre-training heads.\n This module comprises the BERT model followed by the two pre-training heads:\n - the masked language modeling head, and\n - the next sentence classification head.\n\n Params:\n config: a BertConfig class instance with the configuration to build a new model.\n\n Inputs:\n `input_ids`: a torch.LongTensor of shape [batch_size, sequence_length]\n with the word token indices in the vocabulary(see the tokens preprocessing logic in the scripts\n `extract_features.py`, `run_classifier.py` and `run_squad.py`)\n `token_type_ids`: an optional torch.LongTensor of shape [batch_size, sequence_length] with the token\n types indices selected in [0, 1]. Type 0 corresponds to a `sentence A` and type 1 corresponds to\n a `sentence B` token (see BERT paper for more details).\n `attention_mask`: an optional torch.LongTensor of shape [batch_size, sequence_length] with indices\n selected in [0, 1]. It's a mask to be used if the input sequence length is smaller than the max\n input sequence length in the current batch. It's the mask that we typically use for attention when\n a batch has varying length sentences.\n `masked_lm_labels`: masked language modeling labels: torch.LongTensor of shape [batch_size, sequence_length]\n with indices selected in [-1, 0, ..., vocab_size]. All labels set to -1 are ignored (masked), the loss\n is only computed for the labels set in [0, ..., vocab_size]\n `next_sentence_label`: next sentence classification loss: torch.LongTensor of shape [batch_size]\n with indices selected in [0, 1].\n 0 => next sentence is the continuation, 1 => next sentence is a random sentence.\n\n Outputs:\n if `masked_lm_labels` and `next_sentence_label` are not `None`:\n Outputs the total_loss which is the sum of the masked language modeling loss and the next\n sentence classification loss.\n if `masked_lm_labels` or `next_sentence_label` is `None`:\n Outputs a tuple comprising\n - the masked language modeling logits of shape [batch_size, sequence_length, vocab_size], and\n - the next sentence classification logits of shape [batch_size, 2].\n\n Example usage:\n ```python\n # Already been converted into WordPiece token ids\n input_ids = torch.LongTensor([[31, 51, 99], [15, 5, 0]])\n input_mask = torch.LongTensor([[1, 1, 1], [1, 1, 0]])\n token_type_ids = torch.LongTensor([[0, 0, 1], [0, 1, 0]])\n\n config = BertConfig(vocab_size_or_config_json_file=32000, hidden_size=768,\n num_hidden_layers=12, num_attention_heads=12, intermediate_size=3072)\n\n model = BertForPreTraining(config)\n masked_lm_logits_scores, seq_relationship_logits = model(input_ids, token_type_ids, input_mask)\n ```\n \"\"\"\n def __init__(self, config):\n super(BertForPreTraining, self).__init__(config)\n self.bert = BertModel(config)\n self.cls = BertPreTrainingHeads(config, self.bert.embeddings.word_embeddings.weight)\n self.apply(self.init_bert_weights)\n\n def forward(self, input_ids, token_type_ids=None, attention_mask=None, masked_lm_labels=None, next_sentence_label=None):\n sequence_output, pooled_output = self.bert(input_ids, token_type_ids, attention_mask,\n output_all_encoded_layers=False)\n prediction_scores, seq_relationship_score = self.cls(sequence_output, pooled_output)\n\n if masked_lm_labels is not None and next_sentence_label is not None:\n loss_fct = CrossEntropyLoss(ignore_index=-1)\n masked_lm_loss = loss_fct(prediction_scores.view(-1, self.config.vocab_size), masked_lm_labels.view(-1))\n next_sentence_loss = loss_fct(seq_relationship_score.view(-1, 2), next_sentence_label.view(-1))\n total_loss = masked_lm_loss + next_sentence_loss\n return total_loss\n else:\n return prediction_scores, seq_relationship_score\n\n\nclass BertForMaskedLM(PreTrainedBertModel):\n \"\"\"BERT model with the masked language modeling head.\n This module comprises the BERT model followed by the masked language modeling head.\n\n Params:\n config: a BertConfig class instance with the configuration to build a new model.\n\n Inputs:\n `input_ids`: a torch.LongTensor of shape [batch_size, sequence_length]\n with the word token indices in the vocabulary(see the tokens preprocessing logic in the scripts\n `extract_features.py`, `run_classifier.py` and `run_squad.py`)\n `token_type_ids`: an optional torch.LongTensor of shape [batch_size, sequence_length] with the token\n types indices selected in [0, 1]. Type 0 corresponds to a `sentence A` and type 1 corresponds to\n a `sentence B` token (see BERT paper for more details).\n `attention_mask`: an optional torch.LongTensor of shape [batch_size, sequence_length] with indices\n selected in [0, 1]. It's a mask to be used if the input sequence length is smaller than the max\n input sequence length in the current batch. It's the mask that we typically use for attention when\n a batch has varying length sentences.\n `masked_lm_labels`: masked language modeling labels: torch.LongTensor of shape [batch_size, sequence_length]\n with indices selected in [-1, 0, ..., vocab_size]. All labels set to -1 are ignored (masked), the loss\n is only computed for the labels set in [0, ..., vocab_size]\n\n Outputs:\n if `masked_lm_labels` is not `None`:\n Outputs the masked language modeling loss.\n if `masked_lm_labels` is `None`:\n Outputs the masked language modeling logits of shape [batch_size, sequence_length, vocab_size].\n\n Example usage:\n ```python\n # Already been converted into WordPiece token ids\n input_ids = torch.LongTensor([[31, 51, 99], [15, 5, 0]])\n input_mask = torch.LongTensor([[1, 1, 1], [1, 1, 0]])\n token_type_ids = torch.LongTensor([[0, 0, 1], [0, 1, 0]])\n\n config = BertConfig(vocab_size_or_config_json_file=32000, hidden_size=768,\n num_hidden_layers=12, num_attention_heads=12, intermediate_size=3072)\n\n model = BertForMaskedLM(config)\n masked_lm_logits_scores = model(input_ids, token_type_ids, input_mask)\n ```\n \"\"\"\n def __init__(self, config):\n super(BertForMaskedLM, self).__init__(config)\n self.bert = BertModel(config)\n self.cls = BertOnlyMLMHead(config, self.bert.embeddings.word_embeddings.weight)\n self.apply(self.init_bert_weights)\n\n def forward(self, input_ids, token_type_ids=None, attention_mask=None, masked_lm_labels=None):\n sequence_output, _ = self.bert(input_ids, token_type_ids, attention_mask,\n output_all_encoded_layers=False)\n prediction_scores = self.cls(sequence_output)\n\n if masked_lm_labels is not None:\n loss_fct = CrossEntropyLoss(ignore_index=-1)\n masked_lm_loss = loss_fct(prediction_scores.view(-1, self.config.vocab_size), masked_lm_labels.view(-1))\n return masked_lm_loss\n else:\n return prediction_scores\n\n\nclass BertForNextSentencePrediction(PreTrainedBertModel):\n \"\"\"BERT model with next sentence prediction head.\n This module comprises the BERT model followed by the next sentence classification head.\n\n Params:\n config: a BertConfig class instance with the configuration to build a new model.\n\n Inputs:\n `input_ids`: a torch.LongTensor of shape [batch_size, sequence_length]\n with the word token indices in the vocabulary(see the tokens preprocessing logic in the scripts\n `extract_features.py`, `run_classifier.py` and `run_squad.py`)\n `token_type_ids`: an optional torch.LongTensor of shape [batch_size, sequence_length] with the token\n types indices selected in [0, 1]. Type 0 corresponds to a `sentence A` and type 1 corresponds to\n a `sentence B` token (see BERT paper for more details).\n `attention_mask`: an optional torch.LongTensor of shape [batch_size, sequence_length] with indices\n selected in [0, 1]. It's a mask to be used if the input sequence length is smaller than the max\n input sequence length in the current batch. It's the mask that we typically use for attention when\n a batch has varying length sentences.\n `next_sentence_label`: next sentence classification loss: torch.LongTensor of shape [batch_size]\n with indices selected in [0, 1].\n 0 => next sentence is the continuation, 1 => next sentence is a random sentence.\n\n Outputs:\n if `next_sentence_label` is not `None`:\n Outputs the total_loss which is the sum of the masked language modeling loss and the next\n sentence classification loss.\n if `next_sentence_label` is `None`:\n Outputs the next sentence classification logits of shape [batch_size, 2].\n\n Example usage:\n ```python\n # Already been converted into WordPiece token ids\n input_ids = torch.LongTensor([[31, 51, 99], [15, 5, 0]])\n input_mask = torch.LongTensor([[1, 1, 1], [1, 1, 0]])\n token_type_ids = torch.LongTensor([[0, 0, 1], [0, 1, 0]])\n\n config = BertConfig(vocab_size_or_config_json_file=32000, hidden_size=768,\n num_hidden_layers=12, num_attention_heads=12, intermediate_size=3072)\n\n model = BertForNextSentencePrediction(config)\n seq_relationship_logits = model(input_ids, token_type_ids, input_mask)\n ```\n \"\"\"\n def __init__(self, config):\n super(BertForNextSentencePrediction, self).__init__(config)\n self.bert = BertModel(config)\n self.cls = BertOnlyNSPHead(config)\n self.apply(self.init_bert_weights)\n\n def forward(self, input_ids, token_type_ids=None, attention_mask=None, next_sentence_label=None):\n _, pooled_output = self.bert(input_ids, token_type_ids, attention_mask,\n output_all_encoded_layers=False)\n seq_relationship_score = self.cls( pooled_output)\n\n if next_sentence_label is not None:\n loss_fct = CrossEntropyLoss(ignore_index=-1)\n next_sentence_loss = loss_fct(seq_relationship_score.view(-1, 2), next_sentence_label.view(-1))\n return next_sentence_loss\n else:\n return seq_relationship_score\n\n\nclass BertForSequenceClassification(PreTrainedBertModel):\n \"\"\"BERT model for classification.\n This module is composed of the BERT model with a linear layer on top of\n the pooled output.\n\n Params:\n `config`: a BertConfig class instance with the configuration to build a new model.\n `num_labels`: the number of classes for the classifier. Default = 2.\n\n Inputs:\n `input_ids`: a torch.LongTensor of shape [batch_size, sequence_length]\n with the word token indices in the vocabulary(see the tokens preprocessing logic in the scripts\n `extract_features.py`, `run_classifier.py` and `run_squad.py`)\n `token_type_ids`: an optional torch.LongTensor of shape [batch_size, sequence_length] with the token\n types indices selected in [0, 1]. Type 0 corresponds to a `sentence A` and type 1 corresponds to\n a `sentence B` token (see BERT paper for more details).\n `attention_mask`: an optional torch.LongTensor of shape [batch_size, sequence_length] with indices\n selected in [0, 1]. It's a mask to be used if the input sequence length is smaller than the max\n input sequence length in the current batch. It's the mask that we typically use for attention when\n a batch has varying length sentences.\n `labels`: labels for the classification output: torch.LongTensor of shape [batch_size]\n with indices selected in [0, ..., num_labels].\n\n Outputs:\n if `labels` is not `None`:\n Outputs the CrossEntropy classification loss of the output with the labels.\n if `labels` is `None`:\n Outputs the classification logits of shape [batch_size, num_labels].\n\n Example usage:\n ```python\n # Already been converted into WordPiece token ids\n input_ids = torch.LongTensor([[31, 51, 99], [15, 5, 0]])\n input_mask = torch.LongTensor([[1, 1, 1], [1, 1, 0]])\n token_type_ids = torch.LongTensor([[0, 0, 1], [0, 1, 0]])\n\n config = BertConfig(vocab_size_or_config_json_file=32000, hidden_size=768,\n num_hidden_layers=12, num_attention_heads=12, intermediate_size=3072)\n\n num_labels = 2\n\n model = BertForSequenceClassification(config, num_labels)\n logits = model(input_ids, token_type_ids, input_mask)\n ```\n \"\"\"\n def __init__(self, config, num_labels=2):\n super(BertForSequenceClassification, self).__init__(config)\n self.num_labels = num_labels\n self.bert = BertModel(config)\n self.dropout = nn.Dropout(config.hidden_dropout_prob)\n self.classifier = nn.Linear(config.hidden_size, num_labels)\n self.apply(self.init_bert_weights)\n\n def forward(self, input_ids, token_type_ids=None, attention_mask=None, labels=None):\n _, pooled_output = self.bert(input_ids, token_type_ids, attention_mask, output_all_encoded_layers=False)\n pooled_output = self.dropout(pooled_output)\n logits = self.classifier(pooled_output)\n\n if labels is not None:\n loss_fct = CrossEntropyLoss()\n loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1))\n return loss\n else:\n return logits\n\n\nclass BertForMultipleChoice(PreTrainedBertModel):\n \"\"\"BERT model for multiple choice tasks.\n This module is composed of the BERT model with a linear layer on top of\n the pooled output.\n\n Params:\n `config`: a BertConfig class instance with the configuration to build a new model.\n `num_choices`: the number of classes for the classifier. Default = 2.\n\n Inputs:\n `input_ids`: a torch.LongTensor of shape [batch_size, num_choices, sequence_length]\n with the word token indices in the vocabulary(see the tokens preprocessing logic in the scripts\n `extract_features.py`, `run_classifier.py` and `run_squad.py`)\n `token_type_ids`: an optional torch.LongTensor of shape [batch_size, num_choices, sequence_length]\n with the token types indices selected in [0, 1]. Type 0 corresponds to a `sentence A`\n and type 1 corresponds to a `sentence B` token (see BERT paper for more details).\n `attention_mask`: an optional torch.LongTensor of shape [batch_size, num_choices, sequence_length] with indices\n selected in [0, 1]. It's a mask to be used if the input sequence length is smaller than the max\n input sequence length in the current batch. It's the mask that we typically use for attention when\n a batch has varying length sentences.\n `labels`: labels for the classification output: torch.LongTensor of shape [batch_size]\n with indices selected in [0, ..., num_choices].\n\n Outputs:\n if `labels` is not `None`:\n Outputs the CrossEntropy classification loss of the output with the labels.\n if `labels` is `None`:\n Outputs the classification logits of shape [batch_size, num_labels].\n\n Example usage:\n ```python\n # Already been converted into WordPiece token ids\n input_ids = torch.LongTensor([[[31, 51, 99], [15, 5, 0]], [[12, 16, 42], [14, 28, 57]]])\n input_mask = torch.LongTensor([[[1, 1, 1], [1, 1, 0]],[[1,1,0], [1, 0, 0]]])\n token_type_ids = torch.LongTensor([[[0, 0, 1], [0, 1, 0]],[[0, 1, 1], [0, 0, 1]]])\n config = BertConfig(vocab_size_or_config_json_file=32000, hidden_size=768,\n num_hidden_layers=12, num_attention_heads=12, intermediate_size=3072)\n\n num_choices = 2\n\n model = BertForMultipleChoice(config, num_choices)\n logits = model(input_ids, token_type_ids, input_mask)\n ```\n \"\"\"\n def __init__(self, config, num_choices=2):\n super(BertForMultipleChoice, self).__init__(config)\n self.num_choices = num_choices\n self.bert = BertModel(config)\n self.dropout = nn.Dropout(config.hidden_dropout_prob)\n self.classifier = nn.Linear(config.hidden_size, 1)\n self.apply(self.init_bert_weights)\n\n def forward(self, input_ids, token_type_ids=None, attention_mask=None, labels=None):\n flat_input_ids = input_ids.view(-1, input_ids.size(-1))\n flat_token_type_ids = token_type_ids.view(-1, token_type_ids.size(-1))\n flat_attention_mask = attention_mask.view(-1, attention_mask.size(-1))\n _, pooled_output = self.bert(flat_input_ids, flat_token_type_ids, flat_attention_mask, output_all_encoded_layers=False)\n pooled_output = self.dropout(pooled_output)\n logits = self.classifier(pooled_output)\n reshaped_logits = logits.view(-1, self.num_choices)\n\n if labels is not None:\n loss_fct = CrossEntropyLoss()\n loss = loss_fct(reshaped_logits, labels)\n return loss\n else:\n return reshaped_logits\n\n\nclass BertForTokenClassification(PreTrainedBertModel):\n \"\"\"BERT model for token-level classification.\n This module is composed of the BERT model with a linear layer on top of\n the full hidden state of the last layer.\n\n Params:\n `config`: a BertConfig class instance with the configuration to build a new model.\n `num_labels`: the number of classes for the classifier. Default = 2.\n\n Inputs:\n `input_ids`: a torch.LongTensor of shape [batch_size, sequence_length]\n with the word token indices in the vocabulary(see the tokens preprocessing logic in the scripts\n `extract_features.py`, `run_classifier.py` and `run_squad.py`)\n `token_type_ids`: an optional torch.LongTensor of shape [batch_size, sequence_length] with the token\n types indices selected in [0, 1]. Type 0 corresponds to a `sentence A` and type 1 corresponds to\n a `sentence B` token (see BERT paper for more details).\n `attention_mask`: an optional torch.LongTensor of shape [batch_size, sequence_length] with indices\n selected in [0, 1]. It's a mask to be used if the input sequence length is smaller than the max\n input sequence length in the current batch. It's the mask that we typically use for attention when\n a batch has varying length sentences.\n `labels`: labels for the classification output: torch.LongTensor of shape [batch_size, sequence_length]\n with indices selected in [0, ..., num_labels].\n\n Outputs:\n if `labels` is not `None`:\n Outputs the CrossEntropy classification loss of the output with the labels.\n if `labels` is `None`:\n Outputs the classification logits of shape [batch_size, sequence_length, num_labels].\n\n Example usage:\n ```python\n # Already been converted into WordPiece token ids\n input_ids = torch.LongTensor([[31, 51, 99], [15, 5, 0]])\n input_mask = torch.LongTensor([[1, 1, 1], [1, 1, 0]])\n token_type_ids = torch.LongTensor([[0, 0, 1], [0, 1, 0]])\n\n config = BertConfig(vocab_size_or_config_json_file=32000, hidden_size=768,\n num_hidden_layers=12, num_attention_heads=12, intermediate_size=3072)\n\n num_labels = 2\n\n model = BertForTokenClassification(config, num_labels)\n logits = model(input_ids, token_type_ids, input_mask)\n ```\n \"\"\"\n def __init__(self, config, num_labels=2):\n super(BertForTokenClassification, self).__init__(config)\n self.num_labels = num_labels\n self.bert = BertModel(config)\n self.dropout = nn.Dropout(config.hidden_dropout_prob)\n self.classifier = nn.Linear(config.hidden_size, num_labels)\n self.apply(self.init_bert_weights)\n\n def forward(self, input_ids, token_type_ids=None, attention_mask=None, labels=None):\n sequence_output, _ = self.bert(input_ids, token_type_ids, attention_mask, output_all_encoded_layers=False)\n sequence_output = self.dropout(sequence_output)\n logits = self.classifier(sequence_output)\n\n if labels is not None:\n loss_fct = CrossEntropyLoss()\n # Only keep active parts of the loss\n if attention_mask is not None:\n active_loss = attention_mask.view(-1) == 1\n active_logits = logits.view(-1, self.num_labels)[active_loss]\n active_labels = labels.view(-1)[active_loss]\n loss = loss_fct(active_logits, active_labels)\n else:\n loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1))\n return loss\n else:\n return logits\n\n\nclass BertForQuestionAnswering(PreTrainedBertModel):\n \"\"\"BERT model for Question Answering (span extraction).\n This module is composed of the BERT model with a linear layer on top of\n the sequence output that computes start_logits and end_logits\n\n Params:\n `config`: a BertConfig class instance with the configuration to build a new model.\n\n Inputs:\n `input_ids`: a torch.LongTensor of shape [batch_size, sequence_length]\n with the word token indices in the vocabulary(see the tokens preprocessing logic in the scripts\n `extract_features.py`, `run_classifier.py` and `run_squad.py`)\n `token_type_ids`: an optional torch.LongTensor of shape [batch_size, sequence_length] with the token\n types indices selected in [0, 1]. Type 0 corresponds to a `sentence A` and type 1 corresponds to\n a `sentence B` token (see BERT paper for more details).\n `attention_mask`: an optional torch.LongTensor of shape [batch_size, sequence_length] with indices\n selected in [0, 1]. It's a mask to be used if the input sequence length is smaller than the max\n input sequence length in the current batch. It's the mask that we typically use for attention when\n a batch has varying length sentences.\n `start_positions`: position of the first token for the labeled span: torch.LongTensor of shape [batch_size].\n Positions are clamped to the length of the sequence and position outside of the sequence are not taken\n into account for computing the loss.\n `end_positions`: position of the last token for the labeled span: torch.LongTensor of shape [batch_size].\n Positions are clamped to the length of the sequence and position outside of the sequence are not taken\n into account for computing the loss.\n\n Outputs:\n if `start_positions` and `end_positions` are not `None`:\n Outputs the total_loss which is the sum of the CrossEntropy loss for the start and end token positions.\n if `start_positions` or `end_positions` is `None`:\n Outputs a tuple of start_logits, end_logits which are the logits respectively for the start and end\n position tokens of shape [batch_size, sequence_length].\n\n Example usage:\n ```python\n # Already been converted into WordPiece token ids\n input_ids = torch.LongTensor([[31, 51, 99], [15, 5, 0]])\n input_mask = torch.LongTensor([[1, 1, 1], [1, 1, 0]])\n token_type_ids = torch.LongTensor([[0, 0, 1], [0, 1, 0]])\n\n config = BertConfig(vocab_size_or_config_json_file=32000, hidden_size=768,\n num_hidden_layers=12, num_attention_heads=12, intermediate_size=3072)\n\n model = BertForQuestionAnswering(config)\n start_logits, end_logits = model(input_ids, token_type_ids, input_mask)\n ```\n \"\"\"\n def __init__(self, config):\n super(BertForQuestionAnswering, self).__init__(config)\n self.bert = BertModel(config)\n # TODO check with Google if it's normal there is no dropout on the token classifier of SQuAD in the TF version\n # self.dropout = nn.Dropout(config.hidden_dropout_prob)\n self.qa_outputs = nn.Linear(config.hidden_size, 2)\n self.apply(self.init_bert_weights)\n\n def forward(self, input_ids, token_type_ids=None, attention_mask=None, start_positions=None, end_positions=None, args=None):\n sequence_output, _ = self.bert(input_ids, token_type_ids, attention_mask, output_all_encoded_layers=False)\n logits = self.qa_outputs(sequence_output)\n start_logits, end_logits = logits.split(1, dim=-1)\n start_logits = start_logits.squeeze(-1)\n end_logits = end_logits.squeeze(-1)\n\n def cornerNet_loss(start_logits, start_positions, end_logits, end_positions):\n '''\n start_logits [batch_size, sequence_length]\n start_positions [batch_size, sequence_length]\n end_logits [batch_size, sequence_length]\n end_positions [batch_size, sequence_length]\n '''\n t = torch.FloatTensor([0.7])\n # answers_len ()\n answers_len = (end_positions.argmax(1) - start_positions.argmax(1) + 1).float()\n # when sigma is a large num, the loss will cross entropy\n # [batch_size]\n sigma = (answers_len / t - answers_len) / args.theta\n #sigma = torch.ones(answers_len.size()).float() * 0.1\n def one_cornerNet_loss(logits, positions, alpha:float=2, beta:float=4):\n batch_size, length = logits.size()\n logits = logits.float()\n positions = positions.float()\n # position_is_zero [batch_size, sequence_length]\n position_is_zero = torch.arange(0, positions.shape[1]).float().view(1,-1).expand(batch_size,-1) \\\n - positions.argmax(1).float().view(-1,1).expand(-1,length)\n y = torch.exp(-position_is_zero**2/(sigma**2).view(-1,1).expand(-1,length)/2)\n p = torch.softmax(logits, dim=-1)\n # p = torch.exp(logits)/torch.sum(torch.exp(logits),1).view(-1,1).expand(-1,length)\n # the loss for positions = 1\n loss = torch.sum(positions.cuda() * torch.pow(1-p, alpha).cuda() * torch.log(p).cuda())\n # the loss for positions ~= 1\n loss += torch.sum(torch.pow(1-y, beta).cuda() * torch.pow(p, alpha).cuda() * torch.log(1-p).cuda())\n loss = -loss\n return loss\n return 0.5*(one_cornerNet_loss(start_logits, start_positions, args.alpha, args.beta) +\n one_cornerNet_loss(end_logits, end_positions, args.alpha, args.beta)) \n\n\n if start_positions is not None and end_positions is not None:\n # If we are on multi-GPU, split add a dimension\n if len(start_positions.size()) > 1:\n start_positions = start_positions.squeeze(-1)\n if len(end_positions.size()) > 1:\n end_positions = end_positions.squeeze(-1)\n # sometimes the start/end positions are outside our model inputs, we ignore these terms\n ignored_index = start_logits.size(1)\n start_positions.clamp_(0, ignored_index)\n end_positions.clamp_(0, ignored_index)\n if args:\n total_loss = cornerNet_loss(start_logits, torch.eye(start_logits.size(1))[start_positions],\n end_logits, torch.eye(end_logits.size(1))[end_positions])\n else:\n loss_fct = CrossEntropyLoss(ignore_index=ignored_index)\n start_loss = loss_fct(start_logits, start_positions)\n end_loss = loss_fct(end_logits, end_positions)\n total_loss = (start_loss + end_loss) / 2\n return total_loss\n else:\n return start_logits, end_logits\n" ]
[ [ "torch.nn.Linear", "torch.ones", "torch.load", "torch.nn.CrossEntropyLoss", "torch.sigmoid", "torch.sqrt", "torch.nn.Softmax", "torch.FloatTensor", "torch.zeros_like", "torch.zeros", "torch.nn.Tanh", "torch.log", "torch.matmul", "torch.pow", "torch.nn.Dropout", "torch.arange", "torch.softmax", "torch.ones_like", "torch.nn.Embedding" ] ]
est271/geomstats
[ "225fd9ecf8c5c681cdee5cb9ce0951a9f613aa1e" ]
[ "examples/plot_geodesics_s2.py" ]
[ "\"\"\"\nPlot a geodesic on the sphere S2\n\"\"\"\n\nimport logging\nimport os\n\nimport matplotlib.pyplot as plt\nimport numpy as np\n\nimport geomstats.visualization as visualization\nfrom geomstats.geometry.hypersphere import Hypersphere\n\nSPHERE2 = Hypersphere(dimension=2)\nMETRIC = SPHERE2.metric\n\n\ndef main():\n initial_point = [1., 0., 0.]\n initial_tangent_vec = SPHERE2.projection_to_tangent_space(\n vector=[1., 2., 0.8], base_point=initial_point)\n geodesic = METRIC.geodesic(\n initial_point=initial_point,\n initial_tangent_vec=initial_tangent_vec)\n\n n_steps = 10\n t = np.linspace(0, 1, n_steps)\n\n points = geodesic(t)\n visualization.plot(points, space='S2')\n plt.show()\n\n\nif __name__ == \"__main__\":\n if os.environ['GEOMSTATS_BACKEND'] == 'tensorflow':\n logging.info('Examples with visualizations are only implemented '\n 'with numpy backend.\\n'\n 'To change backend, write: '\n 'export GEOMSTATS_BACKEND = \\'numpy\\'.')\n else:\n main()\n" ]
[ [ "matplotlib.pyplot.show", "numpy.linspace" ] ]
imrachbini/Efficient-CapsNet
[ "82b852002e3d268030d89ccfec586c6f9e2b2b2e" ]
[ "models/efficient_capsnet_graph_mnist.py" ]
[ "# Copyright 2021 Vittorio Mazzia & Francesco Salvetti. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\nimport numpy as np\nimport tensorflow as tf\nfrom utils.layers import PrimaryCaps, FCCaps, Length, Mask\n\n\ndef efficient_capsnet_graph(input_shape):\n \"\"\"\n Efficient-CapsNet graph architecture.\n\n Parameters\n ---------- \n input_shape: list\n network input shape\n \"\"\"\n inputs = tf.keras.Input(input_shape)\n \n x = tf.keras.layers.Conv2D(32,5,activation=\"relu\", padding='valid', kernel_initializer='he_normal')(inputs)\n x = tf.keras.layers.BatchNormalization()(x)\n x = tf.keras.layers.Conv2D(64,3, activation='relu', padding='valid', kernel_initializer='he_normal')(x)\n x = tf.keras.layers.BatchNormalization()(x)\n x = tf.keras.layers.Conv2D(64,3, activation='relu', padding='valid', kernel_initializer='he_normal')(x) \n x = tf.keras.layers.BatchNormalization()(x)\n x = tf.keras.layers.Conv2D(128,3,2, activation='relu', padding='valid', kernel_initializer='he_normal')(x) \n x = tf.keras.layers.BatchNormalization()(x)\n x = PrimaryCaps(128, 9, 16, 8)(x)\n \n digit_caps = FCCaps(10,16)(x)\n \n digit_caps_len = Length(name='length_capsnet_output')(digit_caps)\n\n return tf.keras.Model(inputs=inputs,outputs=[digit_caps, digit_caps_len], name='Efficient_CapsNet')\n\n\ndef generator_graph(input_shape):\n \"\"\"\n Generator graph architecture.\n\n Parameters\n ---------- \n input_shape: list\n network input shape\n \"\"\"\n inputs = tf.keras.Input(16*10)\n \n x = tf.keras.layers.Dense(512, activation='relu', kernel_initializer='he_normal')(inputs)\n x = tf.keras.layers.Dense(1024, activation='relu', kernel_initializer='he_normal')(x)\n x = tf.keras.layers.Dense(np.prod(input_shape), activation='sigmoid', kernel_initializer='glorot_normal')(x)\n x = tf.keras.layers.Reshape(target_shape=input_shape, name='out_generator')(x)\n \n return tf.keras.Model(inputs=inputs, outputs=x, name='Generator')\n\n\ndef build_graph(input_shape, mode, verbose):\n \"\"\"\n Efficient-CapsNet graph architecture with reconstruction regularizer. The network can be initialize with different modalities.\n\n Parameters\n ---------- \n input_shape: list\n network input shape\n mode: str\n working mode ('train', 'test' & 'play')\n verbose: bool\n \"\"\"\n inputs = tf.keras.Input(input_shape)\n y_true = tf.keras.layers.Input(shape=(10,))\n noise = tf.keras.layers.Input(shape=(10, 16))\n\n efficient_capsnet = efficient_capsnet_graph(input_shape)\n\n if verbose:\n efficient_capsnet.summary()\n print(\"\\n\\n\")\n \n digit_caps, digit_caps_len = efficient_capsnet(inputs)\n noised_digitcaps = tf.keras.layers.Add()([digit_caps, noise]) # only if mode is play\n \n masked_by_y = Mask()([digit_caps, y_true]) \n masked = Mask()(digit_caps)\n masked_noised_y = Mask()([noised_digitcaps, y_true])\n \n generator = generator_graph(input_shape)\n\n if verbose:\n generator.summary()\n print(\"\\n\\n\")\n\n x_gen_train = generator(masked_by_y)\n x_gen_eval = generator(masked)\n x_gen_play = generator(masked_noised_y)\n\n if mode == 'train': \n return tf.keras.models.Model([inputs, y_true], [digit_caps_len, x_gen_train], name='Efficinet_CapsNet_Generator')\n elif mode == 'test':\n return tf.keras.models.Model(inputs, [digit_caps_len, x_gen_eval], name='Efficinet_CapsNet_Generator')\n elif mode == 'play':\n return tf.keras.models.Model([inputs, y_true, noise], [digit_caps_len, x_gen_play], name='Efficinet_CapsNet_Generator')\n else:\n raise RuntimeError('mode not recognized')\n" ]
[ [ "tensorflow.keras.layers.Add", "tensorflow.keras.layers.Input", "tensorflow.keras.layers.Reshape", "tensorflow.keras.layers.Dense", "tensorflow.keras.layers.Conv2D", "tensorflow.keras.models.Model", "tensorflow.keras.Model", "numpy.prod", "tensorflow.keras.Input", "tensorflow.keras.layers.BatchNormalization" ] ]
FreemanTang/yolov3_tiny_tensorflow
[ "ea7a01e44db77e59669cf7ea6892ff5d5126461e" ]
[ "test_single_image_yolov3_tiny.py" ]
[ "####\n\n# edicted by Huangdebo\n# test the model using ckpt file\n\n# CMD:python test_single_image.py --input_image bird.jpg --class_name_path ./data/COCO.name --restore_path ./checkpoint/yolov3_tiny_COCO/model-step_30000_loss_0.075246_lr_0.0003061015\n# ***\n\nfrom __future__ import division, print_function\n\nimport tensorflow as tf\nimport numpy as np\nimport argparse\nimport cv2\n\nfrom utils.misc_utils import parse_anchors, read_class_names\nfrom utils.nms_utils import gpu_nms\nfrom utils.plot_utils import get_color_table, plot_one_box\n\nfrom model.yolov3 import yolov3\nfrom model.yolov3_tiny import yolov3_tiny\n# 相关参数\nnet_name = 'yolov3_tiny'\nbody_name = 'darknet19'\ndata_name = 'COCO'\nckpt_name = 'yolov3_tiny_my.cpkt'\nimg_path = \"imgs/person3.jpg\"\n# 解析器\nparser = argparse.ArgumentParser(description=\"%s test single image test procedure.\"%net_name)\nparser.add_argument(\"--input_image\", type=str, default=img_path,\n help=\"The path of the input image.\")\nparser.add_argument(\"--anchor_path\", type=str, default=\"./data/tiny_yolo_anchors.txt\",\n help=\"The path of the anchor txt file.\")\nparser.add_argument(\"--new_size\", nargs='*', type=int, default=[416, 416],\n help=\"Resize the input image with `new_size`, size format: [width, height]\")\nparser.add_argument(\"--class_name_path\", type=str, default=\"./data/%s.name\"%data_name,\n help=\"The path of the class names.\")\nparser.add_argument(\"--restore_path\", type=str, default=\"./checkpoint/yolov3_tiny_COCO/%s\"%(ckpt_name),\n help=\"The path of the weights to restore.\")\nargs = parser.parse_args()\n# 锚框\nargs.anchors = parse_anchors(args.anchor_path)\n# 类别名\nargs.classes = read_class_names(args.class_name_path)\n#类别数\nargs.num_class = len(args.classes)\n#得到框的颜色种类\ncolor_table = get_color_table(args.num_class)\n# 读取图片\nimg_ori = cv2.imread(args.input_image)\n# 得到图片大小(h*w)\nheight_ori, width_ori = img_ori.shape[:2]\n# 缩放图片(416*416)\nimg = cv2.resize(img_ori, tuple(args.new_size))\n# 转为rgb图片\nimg = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)\n# 转化为float型\nimg = np.asarray(img, np.float32)\n# 归一化\nimg = img[np.newaxis, :] / 255.\n\nwith tf.Session() as sess:\n\t# 输入数据的占位符\n input_data = tf.placeholder(tf.float32, [1, args.new_size[1], args.new_size[0], 3], name='input_data')\n # yolo_model = yolov3(args.num_class, args.anchors)\n # 得到训练模型 \n yolo_model = yolov3_tiny(args.num_class, args.anchors)\n with tf.variable_scope(net_name):\n\t\t# 得到多尺度框\n pred_feature_maps = yolo_model.forward(input_data, False)\n #得到预测值[边界框,置信度,类别]\n pred_boxes, pred_confs, pred_probs = yolo_model.predict(pred_feature_maps)\n \n \n # 预测的得分\n pred_scores = pred_confs * pred_probs\n # 用非极大值抑制,得到[边界框,置信度,类别] \n boxes, scores, labels = gpu_nms(pred_boxes, pred_scores, args.num_class, max_boxes=200, score_thresh=0.4, iou_thresh=0.5)\n # 重载模型\n saver = tf.train.Saver()\n saver.restore(sess, args.restore_path)\n # 得到结果\n boxes_, scores_, labels_ = sess.run([boxes, scores, labels], feed_dict={input_data: img})\n\n # rescale the coordinates to the original image将坐标缩放到原始图像\n boxes_[:, 0] *= (width_ori/float(args.new_size[0]))\n boxes_[:, 2] *= (width_ori/float(args.new_size[0]))\n boxes_[:, 1] *= (height_ori/float(args.new_size[1]))\n boxes_[:, 3] *= (height_ori/float(args.new_size[1]))\n \n \n print(\"box coords:\")\n print(boxes_)\n print('*' * 30)\n print(\"scores:\")\n print(scores_)\n print('*' * 30)\n print(\"labels:\")\n print(labels_)\n # 得到所有边界框坐标\n for i in range(len(boxes_)):\n x0, y0, x1, y1 = boxes_[i] \n # 显示出来\n plot_one_box(img_ori, [x0, y0, x1, y1], label=args.classes[labels_[i]], color=color_table[labels_[i]])\n cv2.imshow('Detection result', img_ori)\n #cv2.imwrite('detection_result.jpg', img_ori)\n cv2.waitKey(0)\n" ]
[ [ "numpy.asarray", "tensorflow.Session", "tensorflow.train.Saver", "tensorflow.variable_scope", "tensorflow.placeholder" ] ]
NumesSanguis/MLTensor
[ "bd5b467f0567254843fd9f7729b65decaa672fed" ]
[ "tutorial/CNN/cifar10_train.py" ]
[ "# Copyright 2015 Google Inc. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\n\"\"\"A binary to train CIFAR-10 using a single GPU.\n\nAccuracy:\ncifar10_train.py achieves ~86% accuracy after 100K steps (256 epochs of\ndata) as judged by cifar10_eval.py.\n\nSpeed: With batch_size 128.\n\nSystem | Step Time (sec/batch) | Accuracy\n------------------------------------------------------------------\n1 Tesla K20m | 0.35-0.60 | ~86% at 60K steps (5 hours)\n1 Tesla K40m | 0.25-0.35 | ~86% at 100K steps (4 hours)\n\nUsage:\nPlease see the tutorial and website for how to download the CIFAR-10\ndata set, compile the program and train the model.\n\nhttp://tensorflow.org/tutorials/deep_cnn/\n\"\"\"\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nfrom datetime import datetime\nimport os.path\nimport time\n\nimport tensorflow.python.platform\nfrom tensorflow.python.platform import gfile\n\nimport numpy as np\nfrom six.moves import xrange # pylint: disable=redefined-builtin\nimport tensorflow as tf\n\nfrom tensorflow.models.image.cifar10 import cifar10\n\nFLAGS = tf.app.flags.FLAGS\n\ntf.app.flags.DEFINE_string('train_dir', 'train',\n \"\"\"Directory where to write event logs \"\"\"\n \"\"\"and checkpoint.\"\"\")\ntf.app.flags.DEFINE_integer('max_steps', 1000000,\n \"\"\"Number of batches to run.\"\"\")\ntf.app.flags.DEFINE_boolean('log_device_placement', False,\n \"\"\"Whether to log device placement.\"\"\")\n\n\ndef train():\n \"\"\"Train CIFAR-10 for a number of steps.\"\"\"\n with tf.Graph().as_default():\n global_step = tf.Variable(0, trainable=False)\n\n # Get images and labels for CIFAR-10.\n images, labels = cifar10.distorted_inputs()\n\n # Build a Graph that computes the logits predictions from the\n # inference model.\n logits = cifar10.inference(images)\n\n # Calculate loss.\n loss = cifar10.loss(logits, labels)\n\n # Build a Graph that trains the model with one batch of examples and\n # updates the model parameters.\n train_op = cifar10.train(loss, global_step)\n\n # Create a saver.\n saver = tf.train.Saver(tf.all_variables())\n\n # Build the summary operation based on the TF collection of Summaries.\n summary_op = tf.merge_all_summaries()\n\n # Build an initialization operation to run below.\n init = tf.initialize_all_variables()\n\n # Start running operations on the Graph.\n sess = tf.Session(config=tf.ConfigProto(\n log_device_placement=FLAGS.log_device_placement))\n sess.run(init)\n\n # Start the queue runners.\n tf.train.start_queue_runners(sess=sess)\n\n summary_writer = tf.train.SummaryWriter(FLAGS.train_dir,\n graph_def=sess.graph_def)\n\n for step in xrange(FLAGS.max_steps):\n start_time = time.time()\n _, loss_value = sess.run([train_op, loss])\n duration = time.time() - start_time\n\n assert not np.isnan(loss_value), 'Model diverged with loss = NaN'\n\n if step % 10 == 0:\n num_examples_per_step = FLAGS.batch_size\n examples_per_sec = num_examples_per_step / duration\n sec_per_batch = float(duration)\n\n format_str = ('%s: step %d, loss = %.2f (%.1f examples/sec; %.3f '\n 'sec/batch)')\n print (format_str % (datetime.now(), step, loss_value,\n examples_per_sec, sec_per_batch))\n\n if step % 100 == 0:\n summary_str = sess.run(summary_op)\n summary_writer.add_summary(summary_str, step)\n\n # Save the model checkpoint periodically.\n if step % 1000 == 0 or (step + 1) == FLAGS.max_steps:\n checkpoint_path = os.path.join(FLAGS.train_dir, 'model.ckpt')\n saver.save(sess, checkpoint_path, global_step=step)\n\n\ndef main(argv=None): # pylint: disable=unused-argument\n if gfile.Exists(FLAGS.train_dir):\n gfile.DeleteRecursively(FLAGS.train_dir)\n gfile.MakeDirs(FLAGS.train_dir)\n train()\n\n\nif __name__ == '__main__':\n tf.app.run()\n" ]
[ [ "tensorflow.train.start_queue_runners", "tensorflow.merge_all_summaries", "tensorflow.models.image.cifar10.cifar10.train", "tensorflow.python.platform.gfile.Exists", "tensorflow.models.image.cifar10.cifar10.inference", "tensorflow.models.image.cifar10.cifar10.distorted_inputs", "tensorflow.Variable", "tensorflow.ConfigProto", "tensorflow.train.SummaryWriter", "tensorflow.app.run", "tensorflow.all_variables", "tensorflow.models.image.cifar10.cifar10.loss", "tensorflow.initialize_all_variables", "tensorflow.python.platform.gfile.DeleteRecursively", "numpy.isnan", "tensorflow.app.flags.DEFINE_integer", "tensorflow.app.flags.DEFINE_string", "tensorflow.Graph", "tensorflow.app.flags.DEFINE_boolean", "tensorflow.python.platform.gfile.MakeDirs" ] ]
SmirnovEgorRu/daal4py
[ "86ba419afe159edb3cb33e6b629f0800811853c8", "86ba419afe159edb3cb33e6b629f0800811853c8" ]
[ "examples/sycl/covariance_batch.py", "daal4py/sklearn/linear_model/_coordinate_descent_0_21.py" ]
[ "#*******************************************************************************\n# Copyright 2014-2020 Intel Corporation\n# All Rights Reserved.\n#\n# This software is licensed under the Apache License, Version 2.0 (the\n# \"License\"), the following terms apply:\n#\n# You may not use this file except in compliance with the License. You may\n# obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n#\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#*******************************************************************************\n\n# daal4py covariance example for shared memory systems\n\nimport daal4py as d4p\nimport numpy as np\nimport os\nfrom daal4py.oneapi import sycl_buffer\n\n# let's try to use pandas' fast csv reader\ntry:\n import pandas\n read_csv = lambda f, c=None, t=np.float64: pandas.read_csv(f, usecols=c, delimiter=',', header=None, dtype=t)\nexcept:\n # fall back to numpy loadtxt\n read_csv = lambda f, c=None, t=np.float64: np.loadtxt(f, usecols=c, delimiter=',', ndmin=2)\n\ntry:\n from dpctx import device_context, device_type\n with device_context(device_type.gpu, 0):\n gpu_available=True\nexcept:\n try:\n from daal4py.oneapi import sycl_context\n with sycl_context('gpu'):\n gpu_available=True\n except:\n gpu_available=False\n\n# Common code for both CPU and GPU computations\ndef compute(data, method):\n # configure a covariance object\n algo = d4p.covariance(method=method)\n return algo.compute(data)\n\n\n# At this moment with sycl we are working only with numpy arrays\ndef to_numpy(data):\n try:\n from pandas import DataFrame\n if isinstance(data, DataFrame):\n return np.ascontiguousarray(data.values)\n except ImportError:\n pass\n try:\n from scipy.sparse import csr_matrix\n if isinstance(data, csr_matrix):\n return data.toarray()\n except ImportError:\n pass\n return data\n\n\ndef main(readcsv=read_csv, method='defaultDense'):\n infile = os.path.join('..', 'data', 'batch', 'covcormoments_dense.csv')\n\n # Load the data\n data = readcsv(infile, range(10))\n\n # Using of the classic way (computations on CPU)\n result_classic = compute(data, method)\n\n data = to_numpy(data)\n\n try:\n from dpctx import device_context, device_type\n gpu_context = lambda: device_context(device_type.gpu, 0)\n cpu_context = lambda: device_context(device_type.cpu, 0)\n except:\n from daal4py.oneapi import sycl_context\n gpu_context = lambda: sycl_context('gpu')\n cpu_context = lambda: sycl_context('cpu')\n\n # It is possible to specify to make the computations on GPU\n if gpu_available:\n with gpu_context():\n sycl_data = sycl_buffer(data)\n result_gpu = compute(sycl_data, 'defaultDense')\n\n assert np.allclose(result_classic.covariance, result_gpu.covariance)\n assert np.allclose(result_classic.mean, result_gpu.mean)\n assert np.allclose(result_classic.correlation, result_gpu.correlation)\n\n # It is possible to specify to make the computations on CPU\n with cpu_context():\n sycl_data = sycl_buffer(data)\n result_cpu = compute(sycl_data, 'defaultDense')\n\n # covariance result objects provide correlation, covariance and mean\n assert np.allclose(result_classic.covariance, result_cpu.covariance)\n assert np.allclose(result_classic.mean, result_cpu.mean)\n assert np.allclose(result_classic.correlation, result_cpu.correlation)\n\n return result_classic\n\n\nif __name__ == \"__main__\":\n res = main()\n print(\"Covariance matrix:\\n\", res.covariance)\n print(\"Mean vector:\\n\", res.mean)\n print('All looks good!')\n", "#\n#*******************************************************************************\n# Copyright 2020 Intel Corporation\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#******************************************************************************/\n\nimport numpy as np\nimport numbers\nimport daal4py\nfrom scipy import sparse as sp\nfrom sklearn.utils import check_array, check_X_y\nfrom sklearn.linear_model import ElasticNet as ElasticNet_original\nfrom sklearn.linear_model import Lasso as Lasso_original\nfrom daal4py.sklearn._utils import (make2d, getFPType, get_patch_message)\nimport logging\n\n#only for compliance with Sklearn\nimport warnings\nfrom sklearn.exceptions import ConvergenceWarning\nfrom sklearn.preprocessing import normalize\n\ndef _daal4py_check(self, X, y, check_input):\n _fptype = getFPType(X)\n\n #check alpha\n if self.alpha == 0:\n warnings.warn(\"With alpha=0, this algorithm does not converge \"\n \"well. You are advised to use the LinearRegression \"\n \"estimator\", stacklevel=2)\n\n #check l1_ratio\n if (not isinstance(self.l1_ratio, numbers.Number) or\n self.l1_ratio < 0 or self.l1_ratio > 1):\n raise ValueError(\"l1_ratio must be between 0 and 1; \"\n f\"got l1_ratio={self.l1_ratio}\")\n\n #check precompute\n if isinstance(self.precompute, np.ndarray):\n if check_input:\n check_array(self.precompute, dtype=_fptype)\n self.precompute = make2d(self.precompute)\n else:\n if self.precompute not in [False, True, 'auto']:\n raise ValueError(\"precompute should be one of True, False, \"\n \"'auto' or array-like. Got %r\" % self.precompute)\n\n #check selection\n if self.selection not in ['random', 'cyclic']:\n raise ValueError(\"selection should be either random or cyclic.\")\n\ndef _daal4py_fit_enet(self, X, y_, check_input):\n\n #appropriate checks\n _daal4py_check(self, X, y_, check_input)\n X = make2d(X)\n y = make2d(y_)\n _fptype = getFPType(X)\n\n #only for dual_gap computation, it is not required for Intel(R) oneAPI Data Analytics Library\n self._X = X\n self._y = y\n\n penalty_L1 = np.asarray(self.alpha*self.l1_ratio, dtype=X.dtype)\n penalty_L2 = np.asarray(self.alpha*(1.0 - self.l1_ratio), dtype=X.dtype)\n if (penalty_L1.size != 1 or penalty_L2.size != 1):\n raise ValueError(\"alpha or l1_ratio length is wrong\")\n penalty_L1 = penalty_L1.reshape((1,-1))\n penalty_L2 = penalty_L2.reshape((1,-1))\n\n #normalizing and centering\n X_offset = np.zeros(X.shape[1], dtype=X.dtype)\n X_scale = np.ones(X.shape[1], dtype=X.dtype)\n if y.ndim == 1:\n y_offset = X.dtype.type(0)\n else:\n y_offset = np.zeros(y.shape[1], dtype=X.dtype)\n\n if self.fit_intercept:\n X_offset = np.average(X, axis=0)\n if self.normalize:\n if self.copy_X:\n X = np.copy(X) - X_offset\n else:\n X -= X_offset\n X, X_scale = normalize(X, axis=0, copy=False, return_norm=True)\n y_offset = np.average(y, axis=0)\n y = y - y_offset\n\n #only for compliance with Sklearn\n if isinstance(self.precompute, np.ndarray) and (\n self.fit_intercept and not np.allclose(X_offset, np.zeros(X.shape[1])) or\n self.normalize and not np.allclose(X_scale, np.ones(X.shape[1]))):\n warnings.warn(\"Gram matrix was provided but X was centered\"\n \" to fit intercept, \"\n \"or X was normalized : recomputing Gram matrix.\",\n UserWarning)\n\n mse_alg = daal4py.optimization_solver_mse(\n numberOfTerms = X.shape[0],\n fptype = _fptype,\n method = 'defaultDense'\n )\n mse_alg.setup(X, y, None)\n\n cd_solver = daal4py.optimization_solver_coordinate_descent(\n function = mse_alg,\n fptype = _fptype,\n method = 'defaultDense',\n selection = self.selection,\n seed = 0 if (self.random_state is None) else self.random_state,\n nIterations = self.max_iter,\n positive = self.positive,\n accuracyThreshold = self.tol\n )\n\n #set warm_start\n if (self.warm_start and hasattr(self, \"coef_\") and isinstance(self.coef_, np.ndarray)):\n n_rows = y.shape[1]\n n_cols = X.shape[1] + 1\n inputArgument = np.zeros((n_rows, n_cols), dtype = _fptype)\n for i in range(n_rows):\n inputArgument[i][0] = self.intercept_ if (n_rows == 1) else self.intercept_[i]\n inputArgument[i][1:] = self.coef_[:].copy(order='C') if (n_rows == 1) else self.coef_[i,:].copy(order='C')\n cd_solver.setup(inputArgument)\n\n elastic_net_alg = daal4py.elastic_net_training(\n fptype = _fptype,\n method = 'defaultDense',\n interceptFlag = (self.fit_intercept is True),\n dataUseInComputation = 'doUse' if ((self.copy_X is False) or (self.fit_intercept and self.normalize and self.copy_X)) else 'doNotUse',\n penaltyL1 = penalty_L1,\n penaltyL2 = penalty_L2,\n optimizationSolver = cd_solver\n )\n try:\n if isinstance(self.precompute, np.ndarray):\n elastic_net_res = elastic_net_alg.compute(data=X, dependentVariables=y, gramMatrix=self.precompute)\n else:\n elastic_net_res = elastic_net_alg.compute(data=X, dependentVariables=y)\n except RuntimeError:\n return None\n\n #set coef_ and intersept_ results\n elastic_net_model = elastic_net_res.model\n self.daal_model_ = elastic_net_model\n\n #update coefficients if normalizing and centering\n if self.fit_intercept and self.normalize:\n elastic_net_model.Beta[:,1:] = elastic_net_model.Beta[:,1:] / X_scale\n elastic_net_model.Beta[:,0] = (y_offset - np.dot(X_offset, elastic_net_model.Beta[:,1:].T)).T\n\n coefs = elastic_net_model.Beta\n\n self.intercept_ = coefs[:,0].copy(order='C')\n self.coef_ = coefs[:,1:].copy(order='C')\n\n #only for compliance with Sklearn\n if y.shape[1] == 1:\n self.coef_ = np.ravel(self.coef_)\n self.intercept_ = np.ravel(self.intercept_)\n if self.intercept_.shape[0] == 1:\n self.intercept_ = self.intercept_[0]\n\n #set n_iter_\n n_iter = cd_solver.__get_result__().nIterations[0][0]\n if y.shape[1] == 1:\n self.n_iter_ = n_iter\n else:\n self.n_iter_ = np.full(y.shape[1], n_iter)\n\n #only for compliance with Sklearn\n if (self.max_iter == n_iter + 1):\n warnings.warn(\"Objective did not converge. You might want to \"\n \"increase the number of iterations.\", ConvergenceWarning)\n\n return self\n\ndef _daal4py_predict_enet(self, X):\n X = make2d(X)\n _fptype = getFPType(self.coef_)\n\n elastic_net_palg = daal4py.elastic_net_prediction(\n fptype=_fptype,\n method='defaultDense'\n )\n elastic_net_res = elastic_net_palg.compute(X, self.daal_model_)\n\n res = elastic_net_res.prediction\n\n if res.shape[1] == 1 and self.coef_.ndim == 1:\n res = np.ravel(res)\n return res\n\ndef _daal4py_fit_lasso(self, X, y_, check_input):\n\n #appropriate checks\n _daal4py_check(self, X, y_, check_input)\n X = make2d(X)\n y = make2d(y_)\n _fptype = getFPType(X)\n\n #only for dual_gap computation, it is not required for Intel(R) oneAPI Data Analytics Library\n self._X = X\n self._y = y\n\n #normalizing and centering\n X_offset = np.zeros(X.shape[1], dtype=X.dtype)\n X_scale = np.ones(X.shape[1], dtype=X.dtype)\n if y.ndim == 1:\n y_offset = X.dtype.type(0)\n else:\n y_offset = np.zeros(y.shape[1], dtype=X.dtype)\n\n if self.fit_intercept:\n X_offset = np.average(X, axis=0)\n if self.normalize:\n if self.copy_X:\n X = np.copy(X) - X_offset\n else:\n X -= X_offset\n X, X_scale = normalize(X, axis=0, copy=False, return_norm=True)\n y_offset = np.average(y, axis=0)\n y = y - y_offset\n\n #only for compliance with Sklearn\n if isinstance(self.precompute, np.ndarray) and (\n self.fit_intercept and not np.allclose(X_offset, np.zeros(X.shape[1])) or\n self.normalize and not np.allclose(X_scale, np.ones(X.shape[1]))):\n warnings.warn(\"Gram matrix was provided but X was centered\"\n \" to fit intercept, \"\n \"or X was normalized : recomputing Gram matrix.\",\n UserWarning)\n\n mse_alg = daal4py.optimization_solver_mse(\n numberOfTerms = X.shape[0],\n fptype = _fptype,\n method = 'defaultDense'\n )\n mse_alg.setup(X, y, None)\n\n cd_solver = daal4py.optimization_solver_coordinate_descent(\n function = mse_alg,\n fptype = _fptype,\n method = 'defaultDense',\n selection = self.selection,\n seed = 0 if (self.random_state is None) else self.random_state,\n nIterations = self.max_iter,\n positive = self.positive,\n accuracyThreshold = self.tol\n )\n\n #set warm_start\n if (self.warm_start and hasattr(self, \"coef_\") and isinstance(self.coef_, np.ndarray)):\n n_rows = y.shape[1]\n n_cols = X.shape[1] + 1\n inputArgument = np.zeros((n_rows, n_cols), dtype = _fptype)\n for i in range(n_rows):\n inputArgument[i][0] = self.intercept_ if (n_rows == 1) else self.intercept_[i]\n inputArgument[i][1:] = self.coef_[:].copy(order='C') if (n_rows == 1) else self.coef_[i,:].copy(order='C')\n cd_solver.setup(inputArgument)\n\n lasso_alg = daal4py.lasso_regression_training(\n fptype = _fptype,\n method = 'defaultDense',\n interceptFlag = (self.fit_intercept is True),\n dataUseInComputation = 'doUse' if ((self.copy_X is False) or (self.fit_intercept and self.normalize and self.copy_X)) else 'doNotUse',\n lassoParameters = np.asarray(self.alpha, dtype=X.dtype).reshape((1,-1)),\n optimizationSolver = cd_solver\n )\n try:\n if isinstance(self.precompute, np.ndarray):\n lasso_res = lasso_alg.compute(data=X, dependentVariables=y, gramMatrix=self.precompute)\n else:\n lasso_res = lasso_alg.compute(data=X, dependentVariables=y)\n except RuntimeError:\n return None\n\n #set coef_ and intersept_ results\n lasso_model = lasso_res.model\n self.daal_model_ = lasso_model\n\n #update coefficients if normalizing and centering\n if self.fit_intercept and self.normalize:\n lasso_model.Beta[:,1:] = lasso_model.Beta[:,1:] / X_scale\n lasso_model.Beta[:,0] = (y_offset - np.dot(X_offset, lasso_model.Beta[:,1:].T)).T\n\n coefs = lasso_model.Beta\n\n self.intercept_ = coefs[:,0].copy(order='C')\n self.coef_ = coefs[:,1:].copy(order='C')\n\n #only for compliance with Sklearn\n if y.shape[1] == 1:\n self.coef_ = np.ravel(self.coef_)\n self.intercept_ = np.ravel(self.intercept_)\n if self.intercept_.shape[0] == 1:\n self.intercept_ = self.intercept_[0]\n\n #set n_iter_\n n_iter = cd_solver.__get_result__().nIterations[0][0]\n if y.shape[1] == 1:\n self.n_iter_ = n_iter\n else:\n self.n_iter_ = np.full(y.shape[1], n_iter)\n\n #only for compliance with Sklearn\n if (self.max_iter == n_iter + 1):\n warnings.warn(\"Objective did not converge. You might want to \"\n \"increase the number of iterations.\", ConvergenceWarning)\n\n return self\n\ndef _daal4py_predict_lasso(self, X):\n X = make2d(X)\n _fptype = getFPType(self.coef_)\n\n lasso_palg = daal4py.lasso_regression_prediction(\n fptype=_fptype,\n method='defaultDense'\n )\n lasso_res = lasso_palg.compute(X, self.daal_model_)\n\n res = lasso_res.prediction\n\n if res.shape[1] == 1 and self.coef_.ndim == 1:\n res = np.ravel(res)\n return res\n\nclass ElasticNet(ElasticNet_original):\n __doc__ = ElasticNet_original.__doc__\n\n def __init__(self, alpha=1.0, l1_ratio=0.5, fit_intercept=True,\n normalize=False, precompute=False, max_iter=1000,\n copy_X=True, tol=1e-4, warm_start=False, positive=False,\n random_state=None, selection='cyclic'):\n super(ElasticNet, self).__init__(\n alpha=alpha, l1_ratio=l1_ratio, fit_intercept=fit_intercept,\n normalize=normalize, precompute=precompute, max_iter=max_iter,\n copy_X=copy_X, tol=tol, warm_start=warm_start,\n positive=positive, random_state=random_state, selection=selection)\n\n def fit(self, X, y, check_input=True):\n \"\"\"Fit model with coordinate descent.\n\n Parameters\n ----------\n X : ndarray or scipy.sparse matrix, (n_samples, n_features)\n Data\n\n y : ndarray, shape (n_samples,) or (n_samples, n_targets)\n Target. Will be cast to X's dtype if necessary\n\n check_input : boolean, (default=True)\n Allow to bypass several input checking.\n Don't use this parameter unless you know what you do.\n\n Notes\n -----\n\n Coordinate descent is an algorithm that considers each column of\n data at a time hence it will automatically convert the X input\n as a Fortran-contiguous numpy array if necessary.\n\n To avoid memory re-allocation it is advised to allocate the\n initial data in memory directly using that format.\n \"\"\"\n #check X and y\n if check_input:\n X, y = check_X_y(X, y, copy=False, accept_sparse='csc', dtype=[np.float64, np.float32], multi_output=True, y_numeric=True)\n y = check_array(y, copy=False, dtype=X.dtype.type, ensure_2d=False)\n else:\n #only for compliance with Sklearn, this assert is not required for Intel(R) oneAPI Data Analytics Library\n if (isinstance(X, np.ndarray) and X.flags['F_CONTIGUOUS'] == False):\n raise ValueError(\"ndarray is not Fortran contiguous\")\n\n if isinstance(X, np.ndarray):\n self.fit_shape_good_for_daal_ = True if X.ndim <= 1 else True if X.shape[0] >= X.shape[1] else False \n else:\n self.fit_shape_good_for_daal_ = False\n\n if (sp.issparse(X) or\n not self.fit_shape_good_for_daal_ or\n not (X.dtype == np.float64 or X.dtype == np.float32)):\n if hasattr(self, 'daal_model_'):\n del self.daal_model_\n logging.info(\"sklearn.linear_model.ElasticNet.fit: \" + get_patch_message(\"sklearn\"))\n res_new = super(ElasticNet, self).fit(X, y, check_input=check_input)\n self._gap = res_new.dual_gap_\n return res_new\n self.n_iter_ = None\n self._gap = None\n #only for pass tests \"check_estimators_fit_returns_self(readonly_memmap=True) and check_regressors_train(readonly_memmap=True)\n if not (X.flags.writeable):\n X = np.copy(X)\n if not (y.flags.writeable):\n y = np.copy(y)\n logging.info(\"sklearn.linear_model.ElasticNet.fit: \" + get_patch_message(\"daal\"))\n res = _daal4py_fit_enet(self, X, y, check_input=check_input)\n if res is None:\n if hasattr(self, 'daal_model_'):\n del self.daal_model_\n logging.info(\"sklearn.linear_model.ElasticNet.fit: \" + get_patch_message(\"sklearn_after_daal\"))\n res_new = super(ElasticNet, self).fit(X, y, check_input=check_input)\n self._gap = res_new.dual_gap_\n return res_new\n return res\n\n\n def predict(self, X):\n \"\"\"Predict using the linear model\n\n Parameters\n ----------\n X : array-like or sparse matrix, shape = (n_samples, n_features)\n Samples.\n\n Returns\n -------\n C : array, shape = (n_samples,)\n Returns predicted values.\n \"\"\"\n\n X = check_array(X, accept_sparse=['csr', 'csc', 'coo'])\n good_shape_for_daal = True if X.ndim <= 1 else True if X.shape[0] >= X.shape[1] else False\n\n if (not hasattr(self, 'daal_model_') or\n sp.issparse(X) or\n not good_shape_for_daal or\n not (X.dtype == np.float64 or X.dtype == np.float32)):\n logging.info(\"sklearn.linear_model.ElasticNet.predict: \" + get_patch_message(\"sklearn\"))\n return self._decision_function(X)\n logging.info(\"sklearn.linear_model.ElasticNet.predict: \" + get_patch_message(\"daal\"))\n return _daal4py_predict_enet(self, X)\n\n\n @property\n def dual_gap_(self):\n if (self._gap is None):\n l1_reg = self.alpha * self.l1_ratio * self._X.shape[0]\n l2_reg = self.alpha * (1.0 - self.l1_ratio) * self._X.shape[0]\n n_targets = self._y.shape[1]\n\n if (n_targets == 1):\n self._gap = self.tol + 1.0\n X_offset = np.average(self._X, axis=0)\n y_offset = np.average(self._y, axis=0)\n coef = np.reshape(self.coef_, (self.coef_.shape[0], 1))\n R = (self._y - y_offset) - np.dot((self._X - X_offset), coef)\n XtA = np.dot((self._X - X_offset).T, R) - l2_reg * coef\n R_norm2 = np.dot(R.T, R)\n coef_norm2 = np.dot(self.coef_, self.coef_)\n dual_norm_XtA = np.max(XtA) if self.positive else np.max(np.abs(XtA))\n if dual_norm_XtA > l1_reg:\n const = l1_reg / dual_norm_XtA\n A_norm2 = R_norm2 * (const ** 2)\n self._gap = 0.5 * (R_norm2 + A_norm2)\n else:\n const = 1.0\n self._gap = R_norm2\n l1_norm = np.sum(np.abs(self.coef_))\n self._gap += (l1_reg * l1_norm - const * np.dot(R.T, (self._y - y_offset)) + 0.5 * l2_reg * (1 + const ** 2) * coef_norm2)\n self._gap = self._gap[0][0]\n else:\n self._gap = np.full(n_targets, self.tol + 1.0)\n X_offset = np.average(self._X, axis=0)\n y_offset = np.average(self._y, axis=0)\n for k in range(n_targets):\n R = (self._y[:, k] - y_offset[k]) - np.dot((self._X - X_offset), self.coef_[k, :].T)\n XtA = np.dot((self._X - X_offset).T, R) - l2_reg * self.coef_[k, :].T\n R_norm2 = np.dot(R.T, R)\n coef_norm2 = np.dot(self.coef_[k, :], self.coef_[k, :].T)\n dual_norm_XtA = np.max(XtA) if self.positive else np.max(np.abs(XtA))\n if dual_norm_XtA > l1_reg:\n const = l1_reg / dual_norm_XtA\n A_norm2 = R_norm2 * (const ** 2)\n self._gap[k] = 0.5 * (R_norm2 + A_norm2)\n else:\n const = 1.0\n self._gap[k] = R_norm2\n l1_norm = np.sum(np.abs(self.coef_[k, :]))\n self._gap[k] += (l1_reg * l1_norm - const * np.dot(R.T, (self._y[:, k] - y_offset[k])) + 0.5 * l2_reg * (1 + const ** 2) * coef_norm2)\n return self._gap\n\n @dual_gap_.setter\n def dual_gap_(self, value):\n self._gap = value\n\n @dual_gap_.deleter\n def dual_gap_(self):\n self._gap = None\n\nclass Lasso(ElasticNet):\n __doc__ = Lasso_original.__doc__\n\n def __init__(self, alpha=1.0, fit_intercept=True, normalize=False,\n precompute=False, copy_X=True, max_iter=1000,\n tol=1e-4, warm_start=False, positive=False,\n random_state=None, selection='cyclic'):\n super().__init__(\n alpha=alpha, l1_ratio=1.0, fit_intercept=fit_intercept,\n normalize=normalize, precompute=precompute, copy_X=copy_X,\n max_iter=max_iter, tol=tol, warm_start=warm_start,\n positive=positive, random_state=random_state,\n selection=selection)\n\n def fit(self, X, y, check_input=True):\n \"\"\"Fit model with coordinate descent.\n\n Parameters\n ----------\n X : ndarray or scipy.sparse matrix, (n_samples, n_features)\n Data\n\n y : ndarray, shape (n_samples,) or (n_samples, n_targets)\n Target. Will be cast to X's dtype if necessary\n\n check_input : boolean, (default=True)\n Allow to bypass several input checking.\n Don't use this parameter unless you know what you do.\n\n Notes\n -----\n\n Coordinate descent is an algorithm that considers each column of\n data at a time hence it will automatically convert the X input\n as a Fortran-contiguous numpy array if necessary.\n\n To avoid memory re-allocation it is advised to allocate the\n initial data in memory directly using that format.\n \"\"\"\n #check X and y\n if check_input:\n X, y = check_X_y(X, y, copy=False, accept_sparse='csc', dtype=[np.float64, np.float32], multi_output=True, y_numeric=True)\n y = check_array(y, copy=False, dtype=X.dtype.type, ensure_2d=False)\n else:\n #only for compliance with Sklearn, this assert is not required for Intel(R) oneAPI Data\n #Analytics Library\n if (isinstance(X, np.ndarray) and X.flags['F_CONTIGUOUS'] == False):\n raise ValueError(\"ndarray is not Fortran contiguous\")\n\n if isinstance(X, np.ndarray):\n self.fit_shape_good_for_daal_ = True if X.ndim <= 1 else True if X.shape[0] >= X.shape[1] else False \n else:\n self.fit_shape_good_for_daal_ = False\n\n if (sp.issparse(X) or\n not self.fit_shape_good_for_daal_ or\n not (X.dtype == np.float64 or X.dtype == np.float32)):\n if hasattr(self, 'daal_model_'):\n del self.daal_model_\n logging.info(\"sklearn.linear_model.Lasso.fit: \" + get_patch_message(\"sklearn\"))\n res_new = super(ElasticNet, self).fit(X, y, check_input=check_input)\n self._gap = res_new.dual_gap_\n return res_new\n self.n_iter_ = None\n self._gap = None\n #only for pass tests \"check_estimators_fit_returns_self(readonly_memmap=True) and check_regressors_train(readonly_memmap=True)\n if not (X.flags.writeable):\n X = np.copy(X)\n if not (y.flags.writeable):\n y = np.copy(y)\n logging.info(\"sklearn.linear_model.Lasso.fit: \" + get_patch_message(\"daal\"))\n res = _daal4py_fit_lasso(self, X, y, check_input=check_input)\n if res is None:\n if hasattr(self, 'daal_model_'):\n del self.daal_model_\n logging.info(\"sklearn.linear_model.Lasso.fit: \" + get_patch_message(\"sklearn_after_daal\"))\n res_new = super(ElasticNet, self).fit(X, y, check_input=check_input)\n self._gap = res_new.dual_gap_\n return res_new\n return res\n\n\n def predict(self, X):\n \"\"\"Predict using the linear model\n\n Parameters\n ----------\n X : array-like or sparse matrix, shape = (n_samples, n_features)\n Samples.\n\n Returns\n -------\n C : array, shape = (n_samples,)\n Returns predicted values.\n \"\"\"\n X = check_array(X, accept_sparse=['csr', 'csc', 'coo'])\n good_shape_for_daal = True if X.ndim <= 1 else True if X.shape[0] >= X.shape[1] else False\n\n if (not hasattr(self, 'daal_model_') or\n sp.issparse(X) or\n not good_shape_for_daal or\n not (X.dtype == np.float64 or X.dtype == np.float32)):\n logging.info(\"sklearn.linear_model.Lasso.predict: \" + get_patch_message(\"sklearn\"))\n return self._decision_function(X)\n logging.info(\"sklearn.linear_model.Lasso.predict: \" + get_patch_message(\"daal\"))\n return _daal4py_predict_lasso(self, X)\n" ]
[ [ "numpy.ascontiguousarray", "numpy.allclose", "pandas.read_csv", "numpy.loadtxt" ], [ "numpy.max", "numpy.full", "scipy.sparse.issparse", "numpy.dot", "sklearn.utils.check_X_y", "numpy.asarray", "numpy.zeros", "numpy.reshape", "numpy.copy", "numpy.ones", "numpy.ravel", "sklearn.preprocessing.normalize", "numpy.abs", "numpy.average", "sklearn.utils.check_array" ] ]
BBN-Q/libaps2
[ "7787c821279deeca14108d7ac9a49992c64fffe9" ]
[ "src/util/aps_debug_watcher.py" ]
[ "#!/usr/bin/env python\n\nimport sys\nimport socket\nimport struct\ntry:\n from QGL.drivers import APS2Pattern\n haveQGL = True\nexcept:\n warn(\"Could not load QGL. Will not be able to decode instructions.\")\n haveQGL = False\nimport numpy as np\nfrom ansicolor import * # from ansicolors\nimport argparse\n\nshort_time = True\n\nPORT = 0xbb50\nPACKET_SIZE=168//8+1\nif short_time:\n PACKET_SIZE -= 2\nMODE_UNKNOWN = 0\nMODE_SEQUENCER = 1\nMODE_RAM = 2\nMODE_TRIGGER = 3\n\ndef bittest(v, b):\n if ((v & (1<<b)) == (1<<b)):\n return red('T')\n else:\n return 'F'\n\ndef get_bytes(a, s, e):\n e += s\n return a[s:e], e\n\ndef formatHaltBits(haltBits):\n haltSync = bittest(haltBits,5)\n haltCustom = bittest(haltBits,4)\n haltValid = bittest(haltBits,3)\n haltWait = bittest(haltBits,2)\n haltStr = \"S:{} V:{} C:{} W:{}\".format(haltSync, haltValid, haltCustom, haltWait)\n return haltStr\n\nif __name__ == \"__main__\":\n\n parser = argparse.ArgumentParser()\n parser.add_argument(\"ip\", help=\"IPv4 Address of APS2 to connect to.\", type=str)\n parser.add_argument(\"-f\", \"--file\", help=\"File to store log.\", type=str)\n group = parser.add_mutually_exclusive_group()\n group.add_argument(\"-r\", \"--raw\", help=\"Set raw mode.\", action=\"store_true\")\n group.add_argument(\"-t\", \"--tdm\", help=\"Decode as TDM\", action=\"store_true\")\n parser.add_argument(\"-s\", \"--silent\", help=\"Silent mode (log only to file)\", action=\"store_true\")\n\n args = parser.parse_args()\n\n ip = args.ip\n port = 0xbb50\n\n raw_mode = args.raw\n tdm_mode = args.tdm\n\n print(\"Connecting to APS Debug Port at {0}:{1}\".format(ip,port))\n try:\n s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n s.connect((ip,port))\n print(\"Connected!\")\n except Exception as e:\n print(f\"Could not connect to APS2 at {ip}.\")\n print(f\"Got error: {e.args[0]}\")\n sys.exit(0)\n\n if args.file:\n #Delete ansi escape codes\n #From: https://stackoverflow.com/questions/14693701/how-can-i-remove-the-ansi-escape-sequences-from-a-string-in-python\n import re\n ansi_escape = re.compile(r'\\x1B\\[[0-?]*[ -/]*[@-~]')\n strip = lambda x: ansi_escape.sub('', x)\n outfile = open(args.file, \"w\")\n\n if args.silent:\n output = lambda x: print(strip(x), file=outfile)\n else:\n def output(x):\n print(strip(x), file=outfile)\n print(x)\n else:\n outfile = None\n if args.silent:\n logger.error(\"No logging enabled.\")\n sys.exit(1)\n output = print\n\n\n extraData = b''\n\n try:\n while 1:\n data = extraData + s.recv(10*PACKET_SIZE)\n num_packets = len(data) // PACKET_SIZE\n num_excess = len(data) % PACKET_SIZE\n block_size = num_packets*PACKET_SIZE\n extraData = b''\n if num_excess > 0:\n extraData = data[block_size:]\n data = data[:block_size]\n\n for cnt in range(num_packets):\n start = cnt * PACKET_SIZE\n end = (cnt+1)*PACKET_SIZE\n packet = data[start:end]\n packet_bytes = bytearray(packet)\n if raw_mode:\n logging.info(packet_bytes)\n continue\n\n if packet[0] == 0x1:\n mode = MODE_SEQUENCER\n elif packet[0] in [0x2,0x3, 0x4, 0x5]:\n mode = MODE_RAM\n elif packet[0] == 0x6:\n mode = MODE_TRIGGER\n else:\n mode = MODE_UNKNOWN\n\n\n if short_time:\n uptime_seconds = packet_bytes[1:3]\n uptime_nanoseconds = packet_bytes[3:7]\n\n uptime_seconds = struct.unpack(\">H\", uptime_seconds)[0]\n else:\n uptime_seconds = packet_bytes[1:5]\n uptime_nanoseconds = packet_bytes[5:9]\n\n uptime_seconds = struct.unpack(\">I\", uptime_seconds)[0]\n\n uptime_nanoseconds = struct.unpack(\">I\", uptime_nanoseconds)[0]\n\n #print(uptime_seconds, uptime_nanoseconds)\n\n uptime_nanoseconds = uptime_nanoseconds/1e9\n uptime = uptime_seconds + uptime_nanoseconds\n\n if short_time:\n start = 8\n else:\n start = 10\n\n if mode == MODE_SEQUENCER:\n\n if short_time:\n triggerWord = packet[7]\n else:\n triggerWord = packet[9]\n\n haltBits = packet[start]\n\n haltStr = formatHaltBits(haltBits)\n\n instructionAddr, start = get_bytes(packet, start, 4)\n instructionAddr = bytearray(instructionAddr)\n # clear halt bits, only the first two bits are part of the Addr\n instructionAddr[0] = instructionAddr[0] & 0x3\n instructionAddr = struct.unpack(\">I\", instructionAddr)[0]\n\n seq_debug_data, start = get_bytes(packet, start, 8)\n seq_debug_data = np.fromstring(seq_debug_data[::-1], dtype=np.uint64)\n\n if haveQGL:\n instruction = APS2Pattern.Instruction.unflatten(seq_debug_data, decode_as_tdm = tdm_mode)\n else:\n instruction = ''\n\n h = \"{:016x}\".format(seq_debug_data[0]).upper()\n\n output(red(\"{:10}\".format(\"Sequencer\")), \\\n white(\": {:4}\".format(instructionAddr)),\\\n blue(\"0x{}\".format(h)), \\\n yellow(\" {:6.8f}\".format(uptime)), \\\n haltStr, \\\n \"T:0x{:x}\".format(triggerWord), \\\n \" {} \".format(instruction))\n # if instructionAddr > 100:\n # sys.exit()\n elif mode == MODE_RAM:\n\n zeros, start = get_bytes(packet, start, 4)\n vram_addr, start = get_bytes(packet, start, 4)\n vram_data, start = get_bytes(packet, start, 4)\n\n vram_header = struct.unpack(\">I\", zeros)[0]\n vram_addr = struct.unpack(\">I\", vram_addr)[0]\n vram_data = struct.unpack(\">I\", vram_data)[0]\n\n if packet[0] == 0x2:\n ram_mode = \"Valid\"\n\n if packet[0] == 0x3:\n ram_mode = \"Write\"\n\n if packet[0] == 0x4:\n ram_mode = \"Send\"\n\n if packet[0] == 0x5:\n ram_mode = \"Recv\"\n\n output(green(\"{:10}\".format(\"RAM\")), \\\n white(\": {:>23}\".format(ram_mode)),\\\n yellow(\" {:6.8f}\".format(uptime)), \\\n \"addr = 0x{:08X} data = 0x{:08X}\".format(vram_addr, vram_data))\n\n elif mode == MODE_TRIGGER:\n\n zeros, start = get_bytes(packet, start, 8)\n syncBits, start = get_bytes(packet,start,1)\n haltBits, start = get_bytes(packet, start, 1)\n triggers, start = get_bytes(packet, start, 1)\n triggerWord, start = get_bytes(packet,start,1)\n\n for z in zeros:\n if z != 0:\n print(\"Error expected 0 not\", z)\n\n haltBits = haltBits[0]\n syncBits = syncBits[0]\n triggers = triggers[0]\n triggerWord = triggerWord[0]\n\n haltStr = formatHaltBits(haltBits)\n\n syncWF = (syncBits >> 3) & 0xF\n syncMarker = (syncBits >> 1) & 0x3\n syncMod = syncBits & 0x1\n\n\n halt = bittest(syncBits,4)\n pc_jump = bittest(syncBits, 5)\n tready = bittest(syncBits, 6)\n tvalid = bittest(syncBits, 7)\n\n syncStr = \"TV: {} TR: {} PJ: {} H: {} SWF: 0x{} SMa: 0x{} SMo: 0x{}\".format(tvalid, tready, pc_jump, halt, syncWF, syncMarker, syncMod)\n\n trigger = bittest(triggers,1)\n triggerWordValid = bittest(triggers,0)\n\n triggerStr = \"t = {} tWV = {} tW = 0x{:0X}\".format(trigger,triggerWordValid, triggerWord)\n\n output(green(\"{:10}\".format(\"Trigger\")), \\\n white(\": {:>23}\".format('')),\\\n yellow(\" {:6.8f}\".format(uptime)), \\\n haltStr, \\\n syncStr, \\\n triggerStr)\n elif mode == MODE_UNKNOWN:\n output(green(\"{:10}\".format(\"Unknown\")),packet_bytes)\n except struct.error:\n print(\"Could not understand data from APS2. Likely lost sync?\")\n except KeyboardInterrupt:\n pass\n if outfile:\n close(outfile)\n s.close()\n print(\"Goodbye!\")\n sys.exit(0)\n" ]
[ [ "numpy.fromstring" ] ]
researchai/unsupervised_meta_rl
[ "9ca4b41438277ef6cfea047482b98de9da07815a", "9ca4b41438277ef6cfea047482b98de9da07815a" ]
[ "tests/garage/tf/regressors/test_bernoulli_mlp_regressor_with_model.py", "tests/garage/tf/policies/test_continuous_mlp_policy_with_model.py" ]
[ "import pickle\nfrom unittest import mock\n\nimport numpy as np\nimport pytest\nimport tensorflow as tf\n\nfrom garage.tf.optimizers import ConjugateGradientOptimizer, LbfgsOptimizer\nfrom garage.tf.regressors import BernoulliMLPRegressorWithModel\nfrom tests.fixtures import TfGraphTestCase\n\n\ndef get_labels(input_shape, xs, output_dim):\n if input_shape == (1, ):\n label = [0, 0]\n # [0, 1] if sign is positive else [1, 0]\n ys = 0 if np.sin(xs) <= 0 else 1\n label[ys] = 1\n elif input_shape == (2, ):\n ys = int(np.round(xs[0])) ^ int(np.round(xs[1]))\n if output_dim == 1:\n label = ys\n else:\n # [0, 1] if XOR is 1 else [1, 0]\n label = [0, 0]\n label[ys] = 1\n return label\n\n\ndef get_train_data(input_shape, output_dim):\n if input_shape == (1, ):\n # Sign of sin function\n data = np.linspace(-np.pi, np.pi, 1000)\n obs = [{\n 'observations': [[x]],\n 'returns': [get_labels(input_shape, x, output_dim)]\n } for x in data]\n elif input_shape == (2, ):\n # Generate 1000 points with coordinates in [0, 1] for XOR data\n x = np.linspace(0, 1, 100)\n y = np.linspace(0, 1, 10)\n data = np.dstack(np.meshgrid(x, y)).reshape(-1, 2)\n obs = [{\n 'observations': [x],\n 'returns': [get_labels(input_shape, x, output_dim)]\n } for x in data]\n observations = np.concatenate([p['observations'] for p in obs])\n returns = np.concatenate([p['returns'] for p in obs])\n returns = returns.reshape((-1, output_dim))\n return observations, returns\n\n\ndef get_test_data(input_shape, output_dim):\n if input_shape == (1, ):\n paths = {\n 'observations': [[-np.pi / 2], [-np.pi / 3], [-np.pi / 4],\n [np.pi / 4], [np.pi / 3], [np.pi / 4]]\n }\n expected = [[1, 0], [1, 0], [1, 0], [0, 1], [0, 1], [0, 1]]\n elif input_shape == (2, ):\n paths = {'observations': [[0, 0], [0, 1], [1, 0], [1, 1]]}\n if output_dim == 1:\n expected = [[0], [1], [1], [0]]\n else:\n expected = [[1, 0], [0, 1], [0, 1], [1, 0]]\n return paths, expected\n\n\nclass TestBernoulliMLPRegressorWithModel(TfGraphTestCase):\n # yapf: disable\n @pytest.mark.parametrize('input_shape, output_dim', [\n ((1, ), 2),\n ((2, ), 1),\n ((2, ), 2),\n ])\n # yapf: enable\n def test_fit_normalized(self, input_shape, output_dim):\n bmr = BernoulliMLPRegressorWithModel(\n input_shape=input_shape, output_dim=output_dim)\n\n observations, returns = get_train_data(input_shape, output_dim)\n\n for _ in range(150):\n bmr.fit(observations, returns)\n\n paths, expected = get_test_data(input_shape, output_dim)\n\n prediction = np.cast['int'](bmr.predict(paths['observations']))\n assert np.allclose(prediction, expected, rtol=0, atol=0.1)\n\n x_mean = self.sess.run(bmr.model.networks['default'].x_mean)\n x_mean_expected = np.mean(observations, axis=0, keepdims=True)\n x_std = self.sess.run(bmr.model.networks['default'].x_std)\n x_std_expected = np.std(observations, axis=0, keepdims=True)\n\n assert np.allclose(x_mean, x_mean_expected)\n assert np.allclose(x_std, x_std_expected)\n\n # yapf: disable\n @pytest.mark.parametrize('input_shape, output_dim', [\n ((1, ), 2),\n ((2, ), 2),\n ((2, ), 1),\n ])\n # yapf: enable\n def test_fit_unnormalized(self, input_shape, output_dim):\n bmr = BernoulliMLPRegressorWithModel(\n input_shape=input_shape,\n output_dim=output_dim,\n normalize_inputs=False)\n\n observations, returns = get_train_data(input_shape, output_dim)\n\n for _ in range(150):\n bmr.fit(observations, returns)\n\n paths, expected = get_test_data(input_shape, output_dim)\n\n prediction = np.cast['int'](bmr.predict(paths['observations']))\n\n assert np.allclose(prediction, expected, rtol=0, atol=0.1)\n\n x_mean = self.sess.run(bmr.model.networks['default'].x_mean)\n x_mean_expected = np.zeros_like(x_mean)\n x_std = self.sess.run(bmr.model.networks['default'].x_std)\n x_std_expected = np.ones_like(x_std)\n\n assert np.allclose(x_mean, x_mean_expected)\n assert np.allclose(x_std, x_std_expected)\n\n # yapf: disable\n @pytest.mark.parametrize('input_shape, output_dim', [\n ((1, ), 2),\n ((2, ), 2),\n ((2, ), 1),\n ])\n # yapf: enable\n def test_fit_with_no_trust_region(self, input_shape, output_dim):\n bmr = BernoulliMLPRegressorWithModel(\n input_shape=input_shape,\n output_dim=output_dim,\n use_trust_region=False)\n\n observations, returns = get_train_data(input_shape, output_dim)\n\n for _ in range(150):\n bmr.fit(observations, returns)\n\n paths, expected = get_test_data(input_shape, output_dim)\n prediction = np.cast['int'](bmr.predict(paths['observations']))\n\n assert np.allclose(prediction, expected, rtol=0, atol=0.1)\n\n x_mean = self.sess.run(bmr.model.networks['default'].x_mean)\n x_mean_expected = np.mean(observations, axis=0, keepdims=True)\n x_std = self.sess.run(bmr.model.networks['default'].x_std)\n x_std_expected = np.std(observations, axis=0, keepdims=True)\n\n assert np.allclose(x_mean, x_mean_expected)\n assert np.allclose(x_std, x_std_expected)\n\n # yapf: disable\n @pytest.mark.parametrize('output_dim, input_shape', [\n (1, (1, 1)),\n (1, (2, 2)),\n (2, (3, 2)),\n (3, (2, 2)),\n ])\n # yapf: enable\n def test_log_likelihood_sym(self, output_dim, input_shape):\n bmr = BernoulliMLPRegressorWithModel(\n input_shape=(input_shape[1], ), output_dim=output_dim)\n\n new_xs_var = tf.compat.v1.placeholder(tf.float32, input_shape)\n new_ys_var = tf.compat.v1.placeholder(\n dtype=tf.float32, name='ys', shape=(None, output_dim))\n\n data = np.full(input_shape, 0.5)\n one_hot_label = np.zeros((input_shape[0], output_dim))\n one_hot_label[np.arange(input_shape[0]), 0] = 1\n\n p = bmr._f_prob(np.asarray(data))\n ll = bmr._dist.log_likelihood(np.asarray(one_hot_label), dict(p=p))\n\n outputs = bmr.log_likelihood_sym(new_xs_var, new_ys_var, name='ll_sym')\n\n ll_from_sym = self.sess.run(\n outputs, feed_dict={\n new_xs_var: data,\n new_ys_var: one_hot_label\n })\n\n assert np.allclose(ll, ll_from_sym, rtol=0, atol=1e-5)\n\n @mock.patch('tests.garage.tf.regressors.'\n 'test_bernoulli_mlp_regressor_with_model.'\n 'LbfgsOptimizer')\n @mock.patch('tests.garage.tf.regressors.'\n 'test_bernoulli_mlp_regressor_with_model.'\n 'ConjugateGradientOptimizer')\n def test_optimizer_args(self, mock_cg, mock_lbfgs):\n lbfgs_args = dict(max_opt_itr=25)\n cg_args = dict(cg_iters=15)\n bmr = BernoulliMLPRegressorWithModel(\n input_shape=(1, ),\n output_dim=2,\n optimizer=LbfgsOptimizer,\n optimizer_args=lbfgs_args,\n tr_optimizer=ConjugateGradientOptimizer,\n tr_optimizer_args=cg_args,\n use_trust_region=True)\n\n assert mock_lbfgs.return_value is bmr._optimizer\n assert mock_cg.return_value is bmr._tr_optimizer\n\n mock_lbfgs.assert_called_with(max_opt_itr=25)\n mock_cg.assert_called_with(cg_iters=15)\n\n def test_is_pickleable(self):\n bmr = BernoulliMLPRegressorWithModel(input_shape=(1, ), output_dim=2)\n\n with tf.compat.v1.variable_scope(\n 'BernoulliMLPRegressorWithModel/NormalizedInputMLPModel',\n reuse=True):\n bias = tf.compat.v1.get_variable('mlp/hidden_0/bias')\n bias.load(tf.ones_like(bias).eval())\n bias1 = bias.eval()\n\n result1 = np.cast['int'](bmr.predict(np.ones((1, 1))))\n h = pickle.dumps(bmr)\n\n with tf.compat.v1.Session(graph=tf.Graph()):\n bmr_pickled = pickle.loads(h)\n result2 = np.cast['int'](bmr_pickled.predict(np.ones((1, 1))))\n assert np.array_equal(result1, result2)\n\n with tf.compat.v1.variable_scope(\n 'BernoulliMLPRegressorWithModel/NormalizedInputMLPModel',\n reuse=True):\n bias2 = tf.compat.v1.get_variable('mlp/hidden_0/bias').eval()\n\n assert np.array_equal(bias1, bias2)\n\n def test_is_pickleable2(self):\n bmr = BernoulliMLPRegressorWithModel(input_shape=(1, ), output_dim=2)\n\n with tf.compat.v1.variable_scope(\n 'BernoulliMLPRegressorWithModel/NormalizedInputMLPModel',\n reuse=True):\n x_mean = tf.compat.v1.get_variable('normalized_vars/x_mean')\n x_mean.load(tf.ones_like(x_mean).eval())\n x1 = bmr.model.networks['default'].x_mean.eval()\n h = pickle.dumps(bmr)\n with tf.compat.v1.Session(graph=tf.Graph()):\n bmr_pickled = pickle.loads(h)\n x2 = bmr_pickled.model.networks['default'].x_mean.eval()\n assert np.array_equal(x1, x2)\n", "import pickle\nfrom unittest import mock\n\nimport numpy as np\nimport pytest\nimport tensorflow as tf\n\nfrom garage.tf.envs import TfEnv\nfrom garage.tf.policies import ContinuousMLPPolicyWithModel\nfrom tests.fixtures import TfGraphTestCase\nfrom tests.fixtures.envs.dummy import DummyBoxEnv\nfrom tests.fixtures.models import SimpleMLPModel\n\n\nclass TestContinuousMLPPolicyWithModel(TfGraphTestCase):\n @pytest.mark.parametrize('obs_dim, action_dim', [\n ((1, ), (1, )),\n ((1, ), (2, )),\n ((2, ), (2, )),\n ((1, 1), (1, 1)),\n ((1, 1), (2, 2)),\n ((2, 2), (2, 2)),\n ])\n def test_get_action(self, obs_dim, action_dim):\n env = TfEnv(DummyBoxEnv(obs_dim=obs_dim, action_dim=action_dim))\n with mock.patch(('garage.tf.policies.'\n 'continuous_mlp_policy_with_model.MLPModel'),\n new=SimpleMLPModel):\n policy = ContinuousMLPPolicyWithModel(env_spec=env.spec)\n\n env.reset()\n obs, _, _, _ = env.step(1)\n\n action, _ = policy.get_action(obs)\n\n expected_action = np.full(action_dim, 0.5)\n\n assert env.action_space.contains(action)\n assert np.array_equal(action, expected_action)\n\n actions, _ = policy.get_actions([obs, obs, obs])\n for action in actions:\n assert env.action_space.contains(action)\n assert np.array_equal(action, expected_action)\n\n @pytest.mark.parametrize('obs_dim, action_dim', [\n ((1, ), (1, )),\n ((1, ), (2, )),\n ((2, ), (2, )),\n ((1, 1), (1, 1)),\n ((1, 1), (2, 2)),\n ((2, 2), (2, 2)),\n ])\n def test_get_action_sym(self, obs_dim, action_dim):\n env = TfEnv(DummyBoxEnv(obs_dim=obs_dim, action_dim=action_dim))\n with mock.patch(('garage.tf.policies.'\n 'continuous_mlp_policy_with_model.MLPModel'),\n new=SimpleMLPModel):\n policy = ContinuousMLPPolicyWithModel(env_spec=env.spec)\n\n env.reset()\n obs, _, _, _ = env.step(1)\n\n obs_dim = env.spec.observation_space.flat_dim\n state_input = tf.compat.v1.placeholder(\n tf.float32, shape=(None, obs_dim))\n action_sym = policy.get_action_sym(state_input, name='action_sym')\n\n expected_action = np.full(action_dim, 0.5)\n\n action = self.sess.run(\n action_sym, feed_dict={state_input: [obs.flatten()]})\n action = policy.action_space.unflatten(action)\n\n assert np.array_equal(action, expected_action)\n assert env.action_space.contains(action)\n\n @pytest.mark.parametrize('obs_dim, action_dim', [\n ((1, ), (1, )),\n ((1, ), (2, )),\n ((2, ), (2, )),\n ((1, 1), (1, 1)),\n ((1, 1), (2, 2)),\n ((2, 2), (2, 2)),\n ])\n def test_is_pickleable(self, obs_dim, action_dim):\n env = TfEnv(DummyBoxEnv(obs_dim=obs_dim, action_dim=action_dim))\n with mock.patch(('garage.tf.policies.'\n 'continuous_mlp_policy_with_model.MLPModel'),\n new=SimpleMLPModel):\n policy = ContinuousMLPPolicyWithModel(env_spec=env.spec)\n\n env.reset()\n obs, _, _, _ = env.step(1)\n\n with tf.compat.v1.variable_scope(\n 'ContinuousMLPPolicy/MLPModel', reuse=True):\n return_var = tf.compat.v1.get_variable('return_var')\n # assign it to all one\n return_var.load(tf.ones_like(return_var).eval())\n output1 = self.sess.run(\n policy.model.outputs,\n feed_dict={policy.model.input: [obs.flatten()]})\n\n p = pickle.dumps(policy)\n with tf.compat.v1.Session(graph=tf.Graph()) as sess:\n policy_pickled = pickle.loads(p)\n output2 = sess.run(\n policy_pickled.model.outputs,\n feed_dict={policy_pickled.model.input: [obs.flatten()]})\n assert np.array_equal(output1, output2)\n" ]
[ [ "numpy.ones_like", "numpy.array_equal", "tensorflow.ones_like", "numpy.mean", "numpy.concatenate", "tensorflow.compat.v1.placeholder", "numpy.zeros_like", "numpy.full", "numpy.sin", "numpy.arange", "numpy.zeros", "numpy.round", "tensorflow.compat.v1.variable_scope", "numpy.allclose", "numpy.std", "tensorflow.compat.v1.get_variable", "numpy.asarray", "tensorflow.Graph", "numpy.ones", "numpy.linspace", "numpy.meshgrid" ], [ "tensorflow.compat.v1.placeholder", "numpy.full", "numpy.array_equal", "tensorflow.compat.v1.get_variable", "tensorflow.Graph", "tensorflow.ones_like", "tensorflow.compat.v1.variable_scope" ] ]
mfrigerio17/robot-model-tools
[ "97e25d5c4d1386c503d37a70b57400022c5b7ca0" ]
[ "src/robmodel/convert/urdf/imp.py" ]
[ "import xml.etree.ElementTree as ET\nfrom collections import OrderedDict as ODict\nimport logging, sys\n\nimport numpy as np\nimport math\n\nimport kgprim.core as primitives\nimport kgprim.motions as motions\nfrom kgprim.motions import MotionSequence\nfrom kgprim.motions import PoseSpec\nfrom kgprim.motions import MotionPath\n\nimport robmodel.connectivity\nimport robmodel.ordering\nimport robmodel.frames\nimport robmodel.geometry\nfrom robmodel.connectivity import JointKind\n\nlogger = logging.getLogger(__name__)\n\n'''\nSimply reads the XML file and stores the links/joints data, no conversions\n'''\nclass URDFWrapper :\n class Link:\n def __init__(self, name):\n self.name = name\n self.inertia = None\n self.parent = None\n self.supportingJoint = None\n class Joint:\n def __init__(self, name):\n self.name = name\n self.type = None\n self.frame = None\n self.parent= None\n self.child = None\n #self.predec_H_joint = np.identity(4)\n\n iMomentsLabels = ['ixx', 'iyy', 'izz', 'ixy', 'ixz', 'iyz']\n\n def __init__(self, urdfInFile):\n root = ET.parse(urdfInFile)\n self.robotName = root.getroot().get('name')\n\n linkNodes = root.findall(\"link\")\n jointNodes = root.findall(\"joint\")\n\n self.links = ODict()\n self.joints = ODict()\n self.frames = ODict()\n\n for nodelink in linkNodes:\n name = nodelink.get('name')\n link = URDFWrapper.Link( name )\n link.inertia = self.readInertialData(nodelink)\n self.links[name] = link\n\n for nodejoint in jointNodes:\n name = nodejoint.get('name')\n joint = URDFWrapper.Joint( name )\n joint.type = nodejoint.get('type')\n joint.frame = self.readJointFrameData( nodejoint )\n #joint.predec_H_joint[:3,:3] = getR_extrinsicXYZ( * joint.frame['rpy'] )\n #joint.predec_H_joint[:3,3] = np.array( joint.frame['xyz'] )\n joint.parent= nodejoint.find('parent').get('link')\n joint.child = nodejoint.find('child').get('link')\n\n # Note I keep URDF nomenclature (\"parent\" and \"child\") just to\n # stress the bond with the source URDF XML file. I will later use\n # the more appropriate terms (e.g. \"predecessor\")\n\n self.joints[name] = joint\n\n predecessor = self.links[ joint.parent ]\n successor = self.links[ joint.child ]\n successor.parent = predecessor # a Link instance, not a name\n successor.supportingJoint = joint\n\n def readInertialData(self, linkNode):\n params = dict()\n paramsNode = linkNode.find('inertial')\n\n # Default inertia parameters if the URDF does not have the data\n if paramsNode == None :\n params['mass'] = 0.0\n params['xyz'] = (0.0, 0.0, 0.0)\n for m in URDFWrapper.iMomentsLabels :\n params[m] = 0.0\n return params\n\n mass = float(paramsNode.find('mass').get('value'))\n\n xyz = (0.0, 0.0, 0.0)\n originNode = paramsNode.find('origin')\n if originNode != None :\n comstr = originNode.get('xyz')\n if(comstr != None) :\n xyz = tuple([float(x) for x in comstr.split()])\n\n # We cannot deal with non-zero values for the 'rpy' attribute\n rpystr = originNode.get('rpy')\n if(rpystr != None) :\n tmp = [float(x) for x in rpystr.split()]\n if(sum(tmp) != 0) :\n logger.warning('The rpy attribute in the inertial section is not yet supported (link ' + linkNode.get('name') + '). Ignoring it.')\n\n moments = paramsNode.find('inertia')\n for m in URDFWrapper.iMomentsLabels :\n params[m] = float(moments.get(m))\n\n params['mass'] = mass\n params['xyz'] = xyz\n return params\n\n\n def readJointFrameData(self, jointNode):\n params = dict()\n\n # URDF defaults:\n params['xyz'] = (0.,0.,0.)\n params['rpy'] = (0.,0.,0.)\n\n frameNode = jointNode.find('origin')\n if frameNode != None :\n xyz_node = frameNode.get('xyz')\n if xyz_node != None :\n params['xyz'] = tuple([float(x) for x in xyz_node.split()])\n rpy_node = frameNode.get('rpy')\n if rpy_node != None :\n params['rpy'] = tuple([float(x) for x in rpy_node.split()])\n\n axis_node = jointNode.find('axis')\n if axis_node != None :\n params['axis'] = tuple([float(x) for x in axis_node.get('xyz').split()])\n else :\n params['axis'] = (1.,0.,0.) # URDF default\n\n return params\n\n\n\ndef toValidID( name ) :\n return name.replace('-', '__')\n\n\n\ndef linkFrameToJointFrameInURDF(urdfjoint):\n '''\n Return the data about the location of the joint frame relative to the\n predecessor link frame.\n\n This function returns three values:\n - the `xyz` attribute as found in the source URDF\n - the `rpy` attribute as found in the source URDF\n - the rigid motion model representing the relative pose (this is the motion\n that the link frame should undergo to coincide with the joint frame)\n '''\n xyz = urdfjoint.frame['xyz']\n rpy = urdfjoint.frame['rpy']\n\n tr = [motions.translation(a, xyz[a.value]) for a in motions.Axis if round(xyz[a.value],5) != 0.0]\n rt = [motions.rotation (a, rpy[a.value]) for a in motions.Axis if round(rpy[a.value],5) != 0.0]\n\n motion__linkToJoint = MotionSequence(tr+rt, MotionSequence.Mode.fixedFrame)\n return xyz, rpy, motion__linkToJoint\n\ndef convert( urdf ) :\n '''\n Reads the model from a URDFWrapper instance, and construct the corresponding\n models in our format.\n '''\n\n robotName = urdf.robotName\n links = ODict()\n joints = ODict()\n pairs = []\n children = {}\n orphans = []\n\n for urdfname in urdf.links.keys() :\n name = toValidID( urdfname )\n link = robmodel.connectivity.Link(name)\n links[name] = link\n children[name] = []\n if urdf.links[urdfname].parent == None :\n orphans.append(link)\n\n if len(orphans)==0 :\n logger.fatal(\"Could not find any root link (i.e. a link without parent).\")\n logger.fatal(\"Check for kinematic loops.\")\n print(\"Error, no root link found. Aborting\", file=sys.stderr)\n sys.exit(-1)\n if len(orphans) > 1 :\n logger.warning(\"Found {0} links without parent, only one expected\".format(len(orphans)))\n logger.warning(\"Any robot model must have exactly one root element.\")\n logger.warning(\"This might lead to unexpected results.\")\n robotBase = orphans[0]\n\n for jname in urdf.joints.keys() :\n urdfjoint = urdf.joints[jname]\n name = toValidID( jname )\n jkind = urdfjoint.type\n if jkind in JointKind.__members__.keys() :\n jkind = JointKind[jkind] # otherwise, it stays a string (like 'fixed')\n joint = robmodel.connectivity.Joint(name, jkind)\n parent = links[ toValidID(urdfjoint.parent) ]\n child = links[ toValidID(urdfjoint.child) ]\n children[parent.name].append( child )\n\n joints[name] = joint\n pairs.append( robmodel.connectivity.KPair(joint, parent, child) )\n\n\n # CONNECTIVITY MODEL\n connectivityModel = robmodel.connectivity.Robot(\n urdf.robotName, links, joints, pairs)\n\n # REGULAR NUMBERING\n # There is no numbering scheme in the URDF format, so we arbitrarily\n # associate code to each link via a Depth-First-Traversal\n code = 0\n numbering = {}\n def setCode(currentLink):\n nonlocal code, numbering\n if currentLink == None : return\n numbering[currentLink.name] = code\n for child in children[currentLink.name] :\n code = code + 1\n setCode( child )\n\n setCode( robotBase )\n ordering = { 'robot': robotName, 'nums' : numbering }\n orderedModel = robmodel.ordering.Robot(connectivityModel, ordering)\n\n # FRAMES\n # The URDF does not have explicit frames, so there are no more frames than\n # joints and links; thus the second argument is always the empty list\n framesModel = robmodel.frames.RobotDefaultFrames(orderedModel, [])\n\n # GEOMETRY MEASUREMENTS\n poses = []\n axes = {}\n for joint in urdf.joints.values() :\n # Get the current joint and link (predecessor)\n myjoint = joints[ toValidID( joint.name ) ]\n mylink = orderedModel.predecessor(myjoint)\n\n logger.debug(\"Processing joint * {0} * and predecessor link * {1} *\".format(myjoint.name, mylink.name) )\n\n # The relative pose of the URDF joint frame relative to the URDF link frame\n xyz, rpy, motion_link_to_joint = linkFrameToJointFrameInURDF(joint)\n\n jaxis = np.round( np.array(joint.frame['axis']), 5)\n\n logger.debug(\"Joint axis in URDF coordinates : {0}\".format(jaxis) )\n logger.debug(\"URDF joint xyz and rpy attributes : {0} {1}\".format(xyz, rpy) )\n\n frame_joint = framesModel.framesByName[ myjoint.name ]\n frame_link = framesModel.framesByName[ mylink.name ]\n pose = primitives.Pose(target=frame_joint, reference=frame_link)\n poses.append( PoseSpec(pose, motion_link_to_joint) )\n axes[myjoint.name] = joint.frame['axis']\n\n posesModel = motions.PosesSpec(robotName, poses)\n\n geometryModel = robmodel.geometry.Geometry(orderedModel, framesModel, posesModel, axes)\n return connectivityModel, orderedModel, framesModel, geometryModel\n" ]
[ [ "numpy.array" ] ]
yiling-h/selective-inference
[ "dc66337b8dd72ed75ca09b4dcef8d6d814b5ed57" ]
[ "selectinf/randomized/paired_group_lasso_backup.py" ]
[ "from __future__ import print_function\nimport functools\nfrom copy import copy\n\nimport numpy as np\nfrom scipy.stats import norm as ndist\nfrom scipy.linalg import block_diag\n\nimport regreg.api as rr\n\nfrom .query import query, affine_gaussian_sampler\n\nfrom .randomization import randomization\nfrom .approx_reference_grouplasso import group_lasso\nfrom ..base import restricted_estimator\nfrom ..algorithms.debiased_lasso import (debiasing_matrix,\n pseudoinverse_debiasing_matrix)\n\nclass paired_group_lasso(query):\n def __init__(self,\n X,\n weights,\n ridge_term,\n randomizer_scale,\n perturb=None):\n r\"\"\"\n Create a new post-selection object for the paired group LASSO problem\n\n Parameters\n ----------\n\n weights : np.ndarray\n Feature weights for L-1 penalty. If a float,\n it is broadcast to all features.\n\n ridge_term : float\n How big a ridge term to add?\n\n randomizer : object\n Randomizer -- contains representation of randomization density.\n\n perturb : np.ndarray\n Random perturbation subtracted as a linear\n term in the objective function.\n \"\"\"\n\n self.X = X\n self.nfeature = p = self.X.shape[1]\n self.nobs = n = self.X.shape[0]\n self.X_aug = self.augment_X()\n self.Y_aug = self.augment_Y()\n\n self.groups, self.groups_to_vars, self.weights = self.create_groups(weights)\n\n # Optimization hyperparameters\n self.ridge_term = ridge_term\n self.penalty = rr.group_lasso(self.groups,\n weights=self.weights,\n lagrange=1.)\n self.perturb = perturb # random perturbation\n\n self.randomizer_scale = randomizer_scale\n\n # TESTED\n def augment_X(self):\n r\"\"\"\n Augment the matrix X to get a design matrix used for the group lasso solver.\n \"\"\"\n n = self.X.shape[0]\n p = self.X.shape[1]\n q = p - 1\n\n X_aug = np.zeros((p*n, q*p))\n for j in range(p):\n X_aug[(j*n):(j*n+n),(j*q):(j*q+q)] = np.delete(self.X, j, axis=1)\n\n return X_aug\n\n # TESTED\n def augment_Y(self):\n r\"\"\"\n Generate an augmented vector Y to get a response vector used for the group lasso solver.\n \"\"\"\n n = self.X.shape[0]\n p = self.X.shape[1]\n\n Y_aug = np.zeros((p*n,))\n for j in range(p):\n Y_aug[(j*n):(j*n+n),] = self.X[:,j]\n\n return Y_aug\n\n # TESTED\n def create_groups(self, weights):\n r\"\"\"\n 1. Generate an ndarray containing the appropriate grouping of parameters: (b_ij, b_ji)\n 2. Generate a dict that maps back from g to (i,j), i<j\n 3. Generate a dict for group weights that is interperatable by the group lasso solver,\n from a symmetric ndarray of weights\n \"\"\"\n n = self.X.shape[0]\n p = self.X.shape[1]\n\n # E.g. groups = [0, 0, 1, 1, 1]; start counting from 0\n groups = np.zeros((p * (p - 1),))\n g = 0\n groups_to_vars = dict()\n group_weights = dict()\n\n # indicator of whether weights is a real number\n is_singleton = (type(weights) == float or type(weights) == int)\n\n for i in range(p):\n for j in range(i + 1, p):\n # Assign b_ij and b_ji to be in the same group\n # Note that b_ji is mapped to an earlier dimension of the vectorized parameter\n groups[j * (p - 1) + i] = g\n groups[i * (p - 1) + j - 1] = g\n # Record this correspondence between g and i,j\n groups_to_vars[g] = [i,j]\n # Record the group weights accordingly\n if is_singleton:\n group_weights[g] = weights\n else:\n group_weights[g] = weights[i, j]\n\n g = g + 1\n\n # Cast the datatype\n groups = groups.tolist()\n groups = list(map(int, groups))\n\n return groups, groups_to_vars, group_weights\n\n # TESTED\n def undo_vectorize(self, k):\n r\"\"\"\n 1. Mapp the k-th entry of the vectorized parameter to its corresponding\n entry in the matrix parameter\n \"\"\"\n p = self.X.shape[1]\n j = k // (p-1)\n i = k % (p-1)\n if i >= j:\n i = i + 1\n\n return i,j\n\n # TESTED\n # Cast the vectorized parameters to a matrix with zero diagonals\n def vec_to_mat(self, p, vec):\n mat = np.zeros((p, p))\n for k in range(len(vec)):\n i,j = self.undo_vectorize(k)\n # print(k, 'mapped to', i, j)\n mat[i,j] = vec[k]\n return mat\n\n # TESTED\n # Given an index pair (i,j) of the B matrix,\n # this function returns the corresponding index in the vectorized parameter\n def mat_idx_to_vec_idx(self, i, j, p):\n if i < j:\n return (p-1)*j + i\n return (p-1)*j + i - 1\n\n # TESTED\n # The inverse of vec_to_mat()\n # This is the vectorization operator\n def mat_to_vec(self, p, mat):\n vec = np.zeros((p*(p-1),))\n for i in range(p):\n for j in range(p):\n if i != j:\n vec_idx = self.mat_idx_to_vec_idx(i,j,p)\n vec[vec_idx] = mat[i,j]\n return vec\n\n # REQUIRES: perturb is a p x p ndarray with the diagonal being zero\n def fit(self):\n ### SELECTION PART\n # Vectorize the perturbation\n if self.perturb is not None:\n perturb_vec = self.mat_to_vec(p=self.nfeature, mat=self.perturb)\n glsolver = group_lasso.gaussian(self.X_aug,\n self.Y_aug,\n np.array(self.groups),\n self.weights,\n randomizer_scale=self.randomizer_scale,\n perturb=perturb_vec)\n signs = glsolver.fit()\n else:\n glsolver = group_lasso.gaussian(self.X_aug,\n self.Y_aug,\n np.array(self.groups),\n self.weights,\n randomizer_scale=self.randomizer_scale)\n signs = glsolver.fit()\n # If perturbation not provided, stack the perturbation given by glslover into matrix\n perturb_vec = glsolver._initial_omega\n self.perturb = self.vec_to_mat(p=self.nfeature, vec=perturb_vec)\n\n # coeffs = signs['directions']\n # nonzero = glsolver.selection_variable['directions'].keys()\n\n # gammas negative in original implementation?\n gammas = glsolver.observed_opt_state\n # subgrad is the subgradient vector corresponding to beta,\n # with each of its entries scaled up by the correpsponding penalty weight\n subgrad = glsolver.initial_subgrad\n\n # Cast the subgradient vector into matrix\n subgrad_mat = self.vec_to_mat(p=self.nfeature, vec=subgrad)\n vectorized_beta = glsolver.initial_soln\n # Stack the parameters into a pxp matrix\n beta = self.vec_to_mat(p = self.nfeature, vec=vectorized_beta)\n\n # KKT map for the group lasso solver\n # LHS_gl = glsolver._initial_omega\n # RHS_gl = -(self.X_aug.T @ self.Y_aug) + self.X_aug.T @ self.X_aug @ vectorized_beta + subgrad\n # num_disagreement = np.abs(LHS_gl - RHS_gl) > 0.00000001\n # print(num_disagreement)\n # num_dis_mat = self.vec_to_mat(p = self.nfeature, vec=num_disagreement.astype(int)).astype(bool)\n # print('gl disagreement', num_dis_mat)\n\n # Calculate the KKT map for the paired group lasso\n # rhs = - self.X.T @ self.X + (self.X.T @ self.X) @ beta + subgrad_mat\n # np.fill_diagonal(rhs, 0)\n # lhs = self.perturb\n\n self.observed_opt_state = glsolver.observed_opt_state\n self.opt_linear = glsolver.opt_linear\n self.observed_score_state = glsolver.observed_score_state\n self.opt_offset = glsolver.opt_offset\n self._initial_omega = glsolver._initial_omega\n self.ordered_groups = glsolver._ordered_groups\n #print(\"sub\", glsolver.initial_subgrad)\n #print('vars', glsolver.ordered_vars)\n\n # -(self.X_aug.T @ self.Y_aug) == pgl.observed_score_state\n print(\"XY\",-(self.X_aug.T @ self.Y_aug))\n\n # self.opt_linear.dot(self.observed_opt_state) = X^T sum(X gamma u)\n # roughly the same\n # print(np.abs(self.opt_linear.dot(self.observed_opt_state) - (self.X_aug.T @ self.X_aug) @ vectorized_beta))\n\n self.beta = beta\n print(\"beta\", beta)\n\n ### INFERENCE PART\n\n ## TESTED\n ## X_ is the augmented design matrix\n ## Y_ is the augmented response\n ## t is the value of x_i^T x_j\n ## REQUIRES: i < j, i, j follows the natural enumeration (starting from 1),\n ## p is the dimension of data,\n def XY(t, i, j, p, X_, Y_):\n i = i - 1\n j = j - 1\n\n # the first appearance of x_i^T x_j\n idx_ij = i*(p-1) + j - 1\n idx_ji = j*(p-1) + i\n # the target object\n XY = X_.T @ Y_\n XY[idx_ij] = t\n XY[idx_ji] = t\n return XY\n\n ## TESTED\n ## NOTES: Assuming i < j, then b_ij comes after b_ji in the vectorized beta\n ## This implies when we order covariates according to groups,\n ## within the group g corresponding to i,j,\n ## the earlier one corresponds to b_ji, and the later one corresponds to b_ij.\n ## That is, the earlier column contains the observations x_j,\n ## and the later column contains the observations x_i.\n ## REQUIRES: i, j follows the python indexing (starting from 0),\n ## p is the dimension of data, g is the group label of i,j,\n ## Retrieval of g: g = groups[j * (p - 1) + i]\n def XXE(t, i, j, p, X_, XE):\n # Swap the value of i,j if i>j,\n # so that i always represents the lower value\n if i > j:\n k = i\n i = j\n j = k\n\n # the target object\n XXE_ = X_.T @ XE\n\n # identify x_i*x_j and x_j*x_i in the kth block\n for k in range(p):\n # when both x_i and x_j appear in the kth blcok\n if i != k and j != k:\n if i > k:\n i_idx = (p-1)*k + i - 1\n else:\n i_idx = (p-1)*k + i\n\n if j > k:\n j_idx = (p-1)*k + j - 1\n else:\n j_idx = (p-1)*k + j\n\n # identify x_i^T * x_j if b_jk != 0\n if self.groups[j_idx] in self.ordered_groups:\n # g_j is the index of x_j's group\n # in the list of ordered selected groups\n g_j = self.ordered_groups.index(self.groups[j_idx])\n\n # In our indexing rule that determines the group index\n # of each parameter, if two augmented vectors, one containing x_i,\n # one containing x_j, are in the same group, with i < j,\n # then in the truncated matrix ordered by groups,\n # the column containing x_j will be the to left of the other,\n # as explained in the comments above function definition\n if np.max(self.undo_vectorize(j_idx)) == j:\n j_idx_XE = 2 * g_j\n else:\n j_idx_XE = 2 * g_j + 1\n XXE_[i_idx, j_idx_XE] = t\n\n # identify x_j^T * x_i if b_ik != 0\n if self.groups[i_idx] in self.ordered_groups:\n # g_i is the index of x_i's group\n # in the list of ordered selected groups\n g_i = self.ordered_groups.index(self.groups[i_idx])\n if np.max(self.undo_vectorize(i_idx)) == i:\n i_idx_XE = 2 * g_i\n else:\n i_idx_XE = 2 * g_i + 1\n XXE_[j_idx, i_idx_XE] = t\n return XXE_\n\n ## TESTED\n ## NOTES: beta_grouped is the solution of beta ordered according to groups\n ## Retrieval of beta_grouped: beta_grouped = initial_soln[ordered_vars]\n ## prec:\n def quad_exp(t, X_, Y_, XE, p, prec,\n beta_grouped, subgradient, i, j):\n XY_ = XY(t=t, i=i, j=j, p=p, X_=X_, Y_=Y_)\n XXE_ = XXE(t=t, i=i, j=j, p=p, X_=X_, XE=XE)\n omega = -XY_ + XXE_ @ beta_grouped + subgradient\n return np.exp(omega.T @ prec @ omega)\n\n ## TESTED\n def Q(t, i, j, p, XE):\n # Swap the value of i,j if i>j,\n # so that i always represents the lower value\n if i > j:\n k = i\n i = j\n j = k\n\n # the target object\n XEXE = XE.T @ XE\n\n # identify x_i*x_j and x_j*x_i in the kth block\n for k in range(p):\n # when both x_i and x_j appear in the kth blcok\n if i != k and j != k:\n if i > k:\n i_idx = (p - 1) * k + i - 1\n else:\n i_idx = (p - 1) * k + i\n\n if j > k:\n j_idx = (p - 1) * k + j - 1\n else:\n j_idx = (p - 1) * k + j\n\n # identify x_i^T * x_j if b_jk != 0 AND b_ik != 0\n if self.groups[j_idx] in self.ordered_groups and \\\n self.groups[i_idx] in self.ordered_groups:\n # g_j, g_i is the index of x_j, x_i's group\n # in the list of ordered selected groups\n g_j = self.ordered_groups.index(self.groups[j_idx])\n g_i = self.ordered_groups.index(self.groups[i_idx])\n\n # In our indexing rule that determines the group index\n # of each parameter, if two augmented vectors, one containing x_i,\n # one containing x_j, are in the same group, with i < j,\n # then in the truncated matrix ordered by groups,\n # the column containing x_j will be the to left of the other,\n # as explained in the comments above function definition\n if np.max(self.undo_vectorize(j_idx)) == j:\n j_idx_XE = 2 * g_j\n else:\n j_idx_XE = 2 * g_j + 1\n\n if np.max(self.undo_vectorize(i_idx)) == i:\n i_idx_XE = 2 * g_i\n else:\n i_idx_XE = 2 * g_i + 1\n\n XEXE[i_idx_XE, j_idx_XE] = t\n XEXE[j_idx_XE, i_idx_XE] = t\n return XEXE\n\n # print(Q(100, 0, 1, p=4, XE=glsolver.XE))\n # print(Q(100, 1, 2, p=4, XE=glsolver.XE))\n # print(Q(100, 1, 3, p=4, XE=glsolver.XE))\n\n # Calculate the Gamma matrix in the Jacobian\n def calc_GammaMinus(gamma, active_dirs):\n \"\"\"Calculate Gamma^minus (as a function of gamma vector, active directions)\n \"\"\"\n to_diag = [[g] * (ug.size - 1) for (g, ug) in zip(gamma, active_dirs.values())]\n return block_diag(*[i for gp in to_diag for i in gp])\n\n def calc_GammaBar(gamma, active_dirs):\n \"\"\"Calculate Gamma^minus (as a function of gamma vector, active directions)\n \"\"\"\n to_diag = [[g] * (ug.size) for (g, ug) in zip(gamma, active_dirs.values())]\n return block_diag(*[i for gp in to_diag for i in gp])\n\n ## UNTESTED\n ## Calculate the Jacobian as a function of t, and location parameters i,j\n def Jacobian(t, i, j):\n\n ## Tasks:\n ## 1. Compute Q(t) by replacing x_i*x_j with t\n Q_ = Q(t, i, j, p=self.nfeature, XE=glsolver.XE)\n ## 2. Compute U using GL file lines 179\n U_ = glsolver.U\n ## 3. Compute U_bar (V) using GL file lines 143-161\n V_ = glsolver.V\n ## 4. Compute Lambda using GL file compute_Lg()\n L_ = glsolver.L\n ## 5. Compute GammaBar using GL file calc_GammaBar()\n G_ = calc_GammaBar(glsolver.observed_opt_state, glsolver.active_dirs)\n\n J_ = np.block([(Q_ @ G_ + L_) @ V_, Q_ @ U_])\n return np.linalg.det(J_)\n \"\"\"\n ## Tasks:\n ## 1. Compute Q(t) by replacing x_i*x_j with t\n Q_ = Q(t, i, j, p = self.nfeature, XE=glsolver.XE)\n Q_inv = np.linalg.inv(Q_)\n ## 2. Compute U_bar (V) using GL file lines 143-161\n V_ = glsolver.V\n ## 3. Compute Lambda using GL file compute_Lg()\n L_ = glsolver.L\n ## 4. Compute Gamma using GL file calc_GammaMinus()\n G_ = calc_GammaMinus(glsolver.observed_opt_state, glsolver.active_dirs)\n\n return np.linalg.det(Q_) * np.linalg.det(G_ + V_.T @ Q_inv @ L_ @ V_)\n \"\"\"\n\n #print(Jacobian(10,1,2))\n\n \"\"\"\n # FOR TESTING\n def call_quad_exp(i,j):\n #t = self.X[:,i-1].T @ self.X[:,j-1]\n t = 100\n X_ = self.X_aug\n XE = glsolver.XE\n print(XE.shape)\n Y_ = self.Y_aug\n p = self.nfeature\n beta_grouped = glsolver.initial_soln[glsolver.ordered_vars]\n subgradient = glsolver.initial_subgrad\n quad_t = quad_exp(t=t, X_=X_, Y_=Y_, XE=XE, p=p, prec=np.identity(p*(p-1)),\n beta_grouped=beta_grouped, subgradient=subgradient, i=i, j=j)\n quad_original = np.exp(glsolver._initial_omega.T @ glsolver._initial_omega)\n \"\"\"\n" ]
[ [ "numpy.delete", "numpy.array", "numpy.zeros", "numpy.block", "scipy.linalg.block_diag", "numpy.exp", "numpy.linalg.det" ] ]
lucid281/pyEfi
[ "c8a5b69820a3c1e7c4c652f7e4194cd8ce8a6e18" ]
[ "app/ref/test-matlib.py" ]
[ "# this is just an example of using matplotlib with pyefi.\n# it's not an ideal because matplot uses many cpu cycles.\n\nimport redis\nfrom collections import deque\n\nimport matplotlib\n\n# ignore PEP E402\nmatplotlib.use('Qt5Agg') # needs to be here, before matplotlib.*\nimport matplotlib.pyplot as plt\nimport matplotlib.animation as animation\n\nredisDb = redis.StrictRedis(unix_socket_path='/var/run/redis/redis.sock',\n decode_responses=True,\n db=0)\n\n\n# plot class\nclass AnalogPlot:\n # constr\n def __init__(self, maxLen):\n self.ax = deque([0.0] * maxLen)\n self.ay = deque([0.0] * maxLen)\n self.maxLen = maxLen\n\n # add to buffer\n def addToBuf(self, buf, val):\n if len(buf) < self.maxLen:\n buf.append(val)\n else:\n buf.pop()\n buf.appendleft(val)\n\n # add data\n def add(self, data):\n assert(len(data) == 2)\n self.addToBuf(self.ax, data[0])\n self.addToBuf(self.ay, data[1])\n\n # update plot\n def update(self, frameNum, a0, a1):\n try:\n top1 = redisDb.zrevrange('pyefi:timeindex', 0, 0, 'withscores')\n # print(top1)\n idnow = top1[0][0]\n rpm = redisDb.hmget(\"pyefi:events:%s\" % idnow, 'rpm')\n rpmdot = redisDb.hmget(\"pyefi:events:%s\" % idnow, 'rpmdot')\n data = []\n data.append(rpm[0])\n data.append(int(rpmdot[0]))\n\n if(len(data) == 2):\n self.add(data)\n a0.set_data(range(self.maxLen), self.ax)\n a1.set_data(range(self.maxLen), self.ay)\n except KeyboardInterrupt:\n print('exiting')\n # except IndexError:\n # pass\n return a0, a1\n\n\ndef main():\n print('plotting data...')\n analogPlot = AnalogPlot(200)\n\n # set up animation\n fig = plt.figure()\n ax = plt.axes(xlim=(0, 200), ylim=(-1000, 8000))\n a0, = ax.plot([], [])\n a1, = ax.plot([], [])\n anim = animation.FuncAnimation(fig, analogPlot.update,\n fargs=(a0, a1),\n interval=40,\n frames=300,\n blit=True)\n\n plt.show()\n # anim.save('animation.mp4', fps=30,\n # extra_args=['-vcodec', 'libx264'])\n\n print('exiting.')\n\n\n# call main\nif __name__ == '__main__':\n main()\n" ]
[ [ "matplotlib.use", "matplotlib.animation.FuncAnimation", "matplotlib.pyplot.figure", "matplotlib.pyplot.show", "matplotlib.pyplot.axes" ] ]
lars-frogner/GriSPy
[ "98a2448ffc1454476d58a66843a9922c36f0ffcc" ]
[ "grispy/validators.py" ]
[ "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\n# This file is part of the\n# GriSPy Project (https://github.com/mchalela/GriSPy).\n# Copyright (c) 2019, Martin Chalela\n# License: MIT\n# Full Text: https://github.com/mchalela/GriSPy/blob/master/LICENSE\n\n\n\"\"\"Functions to validate GriSPy input parameters.\"\"\"\n\nimport numpy as np\n\n\n# ---------------------------------\n# Validators for method params\n# Meant to be called within each method\n# --------------------------------\n\n\ndef validate_centres(centres, data):\n \"\"\"Validate method params: centres.\"\"\"\n # Chek if numpy array\n if not isinstance(centres, np.ndarray):\n raise TypeError(\n \"Centres: Argument must be a numpy array.\"\n \"Got instead type {}\".format(type(centres))\n )\n\n # Check if data has the expected dimension\n if centres.ndim != 2 or centres.shape[1] != data.shape[1]:\n raise ValueError(\n \"Centres: Array has the wrong shape. Expected shape of (n, {}), \"\n \"got instead {}\".format(data.ndim, centres.shape)\n )\n # Check if data has the expected dimension\n if len(centres.flatten()) == 0:\n raise ValueError(\"Centres: Array must have at least 1 point\")\n\n # Check if every data point is valid\n if not np.isfinite(centres).all():\n raise ValueError(\"Centres: Array must have real numbers\")\n\n\ndef validate_equalsize(a, b):\n \"\"\"Check if two arrays have the same lenght.\"\"\"\n if len(a) != len(b):\n raise ValueError(\"Arrays must have the same lenght.\")\n\n\ndef validate_distance_bound(distance, periodic):\n \"\"\"Distance bounds, upper and lower, can be scalar or numpy array.\"\"\"\n # Check if type is valid\n if not (np.isscalar(distance) or isinstance(distance, np.ndarray)):\n raise TypeError(\n \"Distance: Must be either a scalar or a numpy array.\"\n \"Got instead type {}\".format(type(distance))\n )\n\n # Check if value is valid\n if not np.all(distance >= 0):\n raise ValueError(\"Distance: Must be positive.\")\n\n # Check distance is not larger than periodic range\n for v in periodic.values():\n if v is None:\n continue\n if np.any(distance > (v[1] - v[0])):\n raise ValueError(\n \"Distance can not be higher than the periodicity range\"\n )\n\n\ndef validate_shell_distances(lower_bound, upper_bound, periodic):\n \"\"\"Distance bounds, upper and lower, can be scalar or numpy array.\"\"\"\n validate_distance_bound(lower_bound, periodic)\n validate_distance_bound(upper_bound, periodic)\n\n # Check that lower_bound is lower than upper_bound\n if not np.all(lower_bound <= upper_bound):\n raise ValueError(\n \"Distance: Lower bound must be lower than higher bound.\"\n )\n\n\ndef validate_bool(flag):\n \"\"\"Check if bool.\"\"\"\n if not isinstance(flag, bool):\n raise TypeError(\n \"Flag: Expected boolean. \" \"Got instead type {}\".format(type(flag))\n )\n\n\ndef validate_sortkind(kind):\n \"\"\"Define valid sorting algorithm names.\"\"\"\n valid_kind_names = [\"quicksort\", \"mergesort\", \"heapsort\", \"stable\"]\n\n # Chek if string\n if not isinstance(kind, str):\n raise TypeError(\n \"Kind: Sorting name must be a string. \"\n \"Got instead type {}\".format(type(kind))\n )\n\n # Check if name is valid\n if kind not in valid_kind_names:\n raise ValueError(\n \"Kind: Got an invalid name: '{}'. \"\n \"Options are: {}\".format(kind, valid_kind_names)\n )\n\n\ndef validate_n_nearest(n, data, periodic):\n \"\"\"Validate method params: n_nearest.\"\"\"\n # Chek if int\n if not isinstance(n, int):\n raise TypeError(\n \"Nth-nearest: Argument must be an integer. \"\n \"Got instead type {}\".format(type(n))\n )\n # Check if number is valid, i.e. higher than 1\n if n < 1:\n raise ValueError(\n \"Nth-nearest: Argument must be higher than 1. \"\n \"Got instead {}\".format(n)\n )\n\n # check that n is not larger than the number of data points\n # within 1 periodic range\n Np = len(data)\n valid_axis = len([v for v in periodic.values() if v is not None])\n Nvalid = Np * 3**valid_axis\n if n > Nvalid:\n raise ValueError(\n \"Nth-nearest: Argument must be lower than the number of \"\n \"available data points within 1 periodic range, {}. \"\n \"Got instead {}\".format(Nvalid, n)\n )\n" ]
[ [ "numpy.all", "numpy.any", "numpy.isscalar", "numpy.isfinite" ] ]
lUllLabs/tensorlayer
[ "fb5b94715ca258cc699ef63d3ba8566ebad2a0cb" ]
[ "tests/test_models.py" ]
[ "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\nimport unittest\n\ntry:\n from tests.unittests_helper import CustomTestCase\nexcept ImportError:\n from unittests_helper import CustomTestCase\n\nimport tensorflow as tf\nimport tensorlayer as tl\n\n\nclass VGG_Model_Test(CustomTestCase):\n\n @classmethod\n def setUpClass(cls):\n\n with tf.Graph().as_default():\n # - Classify ImageNet classes with VGG16, see `tutorial_models_vgg16.py <https://github.com/tensorlayer/tensorlayer/blob/master/example/tutorial_models_vgg16.py>__`\n x = tf.placeholder(tf.float32, [None, 224, 224, 3])\n # get the whole model\n vgg1 = tl.models.VGG16(x)\n # restore pre-trained VGG parameters\n # sess = tf.InteractiveSession()\n # vgg.restore_params(sess)\n # use for inferencing\n # probs = tf.nn.softmax(vgg1.outputs)\n\n cls.vgg1_layers = vgg1.all_layers\n cls.vgg1_params = vgg1.all_params\n\n with tf.Graph().as_default():\n # - Extract features with VGG16 and Train a classifier with 100 classes\n x = tf.placeholder(tf.float32, [None, 224, 224, 3])\n # get VGG without the last layer\n vgg2 = tl.models.VGG16(x, end_with='fc2_relu')\n\n cls.vgg2_layers = vgg2.all_layers\n cls.vgg2_params = vgg2.all_params\n\n print(\"TYPE:\", type(vgg2))\n\n # add one more layer\n _ = tl.layers.DenseLayer(vgg2, n_units=100, name='out')\n # initialize all parameters\n # sess = tf.InteractiveSession()\n # tl.layers.initialize_global_variables(sess)\n # restore pre-trained VGG parameters\n # vgg.restore_params(sess)\n # train your own classifier (only update the last layer)\n\n cls.vgg2_train_params = tl.layers.get_variables_with_name('out')\n\n with tf.Graph().as_default() as graph:\n # - Reuse model\n x = tf.placeholder(tf.float32, [None, 224, 224, 3])\n # get VGG without the last layer\n vgg3 = tl.models.VGG16(x, end_with='fc2_relu')\n # reuse the parameters of vgg1 with different input\n # restore pre-trained VGG parameters (as they share parameters, we don’t need to restore vgg2)\n # sess = tf.InteractiveSession()\n # vgg1.restore_params(sess)\n\n cls.vgg3_layers = vgg3.all_layers\n cls.vgg3_params = vgg3.all_params\n cls.vgg3_graph = graph\n\n @classmethod\n def tearDownClass(cls):\n tf.reset_default_graph()\n\n def test_vgg1_layers(self):\n self.assertEqual(len(self.vgg1_layers), 23)\n\n def test_vgg2_layers(self):\n self.assertEqual(len(self.vgg2_layers), 22)\n\n def test_vgg3_layers(self):\n self.assertEqual(len(self.vgg3_layers), 22)\n\n def test_vgg1_params(self):\n self.assertEqual(len(self.vgg1_params), 32)\n\n def test_vgg2_params(self):\n self.assertEqual(len(self.vgg2_params), 30)\n\n def test_vgg3_params(self):\n self.assertEqual(len(self.vgg3_params), 30)\n\n def test_vgg2_train_params(self):\n self.assertEqual(len(self.vgg2_train_params), 2)\n\n def test_reuse_vgg(self):\n\n with self.assertNotRaises(Exception):\n with self.vgg3_graph.as_default():\n x = tf.placeholder(tf.float32, [None, 224, 224, 3])\n _ = tl.models.VGG16(x, end_with='fc2_relu', reuse=True)\n\n\nif __name__ == '__main__':\n\n tf.logging.set_verbosity(tf.logging.DEBUG)\n tl.logging.set_verbosity(tl.logging.DEBUG)\n\n unittest.main()\n" ]
[ [ "tensorflow.logging.set_verbosity", "tensorflow.reset_default_graph", "tensorflow.placeholder", "tensorflow.Graph" ] ]
OpenMeasurement/virtual_society_modeling_framework
[ "709d23da1f4e4c010b4995d00f413dfacb477ad2" ]
[ "audience_modeling_toolbox/plotting.py" ]
[ "# MIT License\n\n# Copyright (c) 2021 OpenMeasurement\n\n# Permission is hereby granted, free of charge, to any person obtaining a copy\n# of this software and associated documentation files (the \"Software\"), to deal\n# in the Software without restriction, including without limitation the rights\n# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell\n# copies of the Software, and to permit persons to whom the Software is\n# furnished to do so, subject to the following conditions:\n\n# The above copyright notice and this permission notice shall be included in all\n# copies or substantial portions of the Software.\n\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\n# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\n# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\n# SOFTWARE.\n\nimport numpy as np\nimport matplotlib\nimport matplotlib.pyplot as plt\n\ndef _log_colormap(x, vmin, vmax, cmap=matplotlib.cm.viridis) :\n norm = matplotlib.colors.LogNorm(vmin=vmin, vmax=vmax)\n return cmap(norm(x))\n\ndef _freq_ticks(max_freq=20, jump=2) :\n tickrange = range(0, max_freq+1, jump)\n ticklabels = [str(i) for i in range(0, max_freq, jump)]\n ticklabels[-1] = ticklabels[-1] + \"+\"\n return tickrange, ticklabels\n\ndef _plot_1d_reach(data, dim, ax) :\n if ax is None:\n fig, ax = plt.subplots()\n\n data_size = len(data)\n tickrange, ticklabels = _freq_ticks(max_freq=data_size, jump=2)\n\n ax.bar(np.arange(data_size), data,\n color=[_log_colormap(v, vmin=1, vmax=np.max(data)) for v in data])\n ax.set_xlabel(dim)\n ax.set_ylabel(\"Reach\")\n ax.set_yscale(\"log\")\n ax.set_xticks(tickrange)\n ax.set_xticklabels(ticklabels)\n\n return ax\n\ndef _plot_2d_reach(data, dims, ax=None) :\n\n if ax is None:\n fig, ax = plt.subplots()\n\n data_size = data.shape[0]\n tickrange, ticklabels = _freq_ticks(max_freq=data_size, jump=2)\n\n vmax = np.max(data)\n im = ax.imshow(data,\n norm=matplotlib.colors.LogNorm(\n vmin=1,\n vmax=vmax\n ),\n origin=\"lower\"\n )\n ax.set_ylabel(dims[0])\n ax.set_xlabel(dims[1])\n\n ax.set_xticks(tickrange)\n ax.set_xticklabels(ticklabels)\n ax.set_yticks(tickrange)\n ax.set_yticklabels(ticklabels)\n\n plt.gcf().colorbar(im, ax=ax)\n\n return ax\n" ]
[ [ "numpy.max", "matplotlib.pyplot.subplots", "numpy.arange", "matplotlib.pyplot.gcf", "matplotlib.colors.LogNorm" ] ]
hirayamy/nngen
[ "63f72be83e4bb1a697a969fb6a14d0335ec0316f" ]
[ "nngen/onnx/slice_.py" ]
[ "from __future__ import absolute_import\nfrom __future__ import print_function\nfrom __future__ import division\n\nimport numpy as np\n\nimport nngen.operator as operator\n\nfrom . import util\n\n\ndef Slice(visitor, node):\n\n srcs = []\n\n for src in node.input:\n src_obj = visitor.visit(src)\n srcs.append(src_obj)\n\n srcs = [util.optimize_to_raw_value(src) for src in srcs]\n\n input = srcs[0]\n if isinstance(input, (tuple, list)):\n input = np.array(input)\n\n shape = input.shape\n\n starts = [0 for s in shape]\n ends = [s for s in shape]\n axes = [i for i in range(len(shape))]\n steps = [1 for s in shape]\n\n # for Slice-1 (Deprecated)\n for attribute in node.attribute:\n if attribute.name == 'starts':\n starts = [v for v in attribute.ints]\n\n elif attribute.name == 'ends':\n ends = [v for v in attribute.ints]\n\n elif attribute.name == 'axes':\n axes = [v for v in attribute.ints]\n\n if len(srcs) > 1:\n starts = srcs[1]\n ends = srcs[2]\n axes = srcs[3]\n steps = srcs[4]\n\n if isinstance(input, (tuple, list, np.ndarray)):\n input = np.array(input)\n v = get_sliced_value(input, starts, ends, axes, steps)\n return v\n\n starts, ends, steps = extract_slices(input, starts, ends, axes, steps)\n return operator.slice_(input, starts, ends, steps)\n\n\ndef get_sliced_value(input, starts, ends, axes, steps):\n\n slices = to_slices(input, starts, ends, axes, steps)\n return input[slices]\n\n\ndef to_slices(input, starts, ends, axes, steps):\n slices = []\n index = 0\n\n for start, end, axis, step in sorted(zip(starts, ends, axes, steps),\n key=lambda x: x[2]):\n\n while index < axis:\n slices.append(slice(0, input.shape[index]))\n index += 1\n\n slices.append(slice(start, end, step))\n index += 1\n\n return tuple(slices)\n\n\ndef extract_slices(input, starts, ends, axes, steps):\n ret_starts = []\n ret_ends = []\n ret_steps = []\n index = 0\n\n for start, end, axis, step in sorted(zip(starts, ends, axes, steps),\n key=lambda x: x[2]):\n\n while index < axis:\n ret_starts.append(0)\n ret_ends.append(input.shape[index])\n ret_steps.append(1)\n index += 1\n\n ret_starts.append(start)\n ret_ends.append(end)\n ret_steps.append(step)\n index += 1\n\n return tuple(ret_starts), tuple(ret_ends), tuple(ret_steps)\n" ]
[ [ "numpy.array" ] ]
gost-gk/turel
[ "1a1164dae80d3ea8f53f966da42b384ed4f63daa" ]
[ "turel.py" ]
[ "from argparse import ArgumentParser\nimport random\nimport string\nimport sys\nimport time\nfrom typing import Dict, List, Tuple, Sequence, Optional, Union, Set, TypeVar\n\nfrom PIL import Image, ImageDraw, ImageFont\nimport keras\nfrom keras.layers import Dense\nfrom keras.models import Sequential\nimport numpy as np\n\nfrom wildcard_trie import Trie\n\n\nImageSize = Tuple[int, int]\nBaseImages = Dict[str, Image.Image]\nKerasSet = Tuple[np.ndarray, np.ndarray]\n\nCHARS_ENG: str = string.ascii_letters\n\nCHARS_RUS: str = 'йцукенгшщзхъфывапролджэячсмитьбюё'\nCHARS_RUS += CHARS_RUS.upper()\n\nCHARS_SPECIAL: str = string.punctuation\nCHARS_SPECIAL += '«»'\nCHARS_SPECIAL += string.digits\n\nCHARS_ALL: str = CHARS_ENG + CHARS_RUS + CHARS_SPECIAL\n\nCHARS_NUM: int = len(CHARS_ALL)\n\n_CHAR_TO_ONEHOT: Dict[str, np.ndarray] = dict((\n (char, np.array([0] * idx + [1] + [0] * (max(CHARS_NUM - idx - 1, 0))))\n for\n idx, char\n in\n enumerate(CHARS_ALL)\n))\n\n_GLYPH_OFFSET_X = 2\n_GLYPH_OFFSET_Y = 2\n\ndef char_to_onehot(char: str) -> np.ndarray:\n return np.copy(_CHAR_TO_ONEHOT[char])\n\n\ndef onehot_to_char(onehot: np.ndarray) -> str:\n idx = max(enumerate(onehot), key=lambda x: x[1])[0]\n return CHARS_ALL[idx]\n\n\ndef onehot_to_char_gauss(onehot: np.ndarray, sigma: float = 1.5) -> Tuple[str, float]:\n indexes_desc = sorted(((idx, weight) for idx, weight in enumerate(onehot)), key=lambda x: -x[1])\n indexes_idx = min(abs(int(random.normalvariate(0, sigma))), len(indexes_desc) - 1)\n return CHARS_ALL[indexes_desc[indexes_idx][0]], 1\n\n\ndef onehot_to_char_confidence(onehot: np.ndarray) -> Tuple[str, float]:\n idx, confidence = max(enumerate(onehot), key=lambda x: x[1])\n return CHARS_ALL[idx], confidence\n\n\ndef calculate_max_size(chars: str, font: ImageFont) -> ImageSize:\n sizes = []\n for ch in chars:\n sizes.append(font.font.getsize(ch)[0])\n return (max(sizes, key=lambda x: x[0])[0] + _GLYPH_OFFSET_X * 2, max(sizes, key=lambda x: x[1])[1] + _GLYPH_OFFSET_Y * 2)\n\n\ndef generate_base_images(chars: str, font: ImageFont, max_size: ImageSize) -> BaseImages:\n img_dict: BaseImages = dict()\n for char in chars:\n img = Image.new('LA', max_size, (0, 0))\n draw = ImageDraw.Draw(img)\n draw.text((_GLYPH_OFFSET_X, _GLYPH_OFFSET_Y), char, (0,), font=font)\n img_dict[char] = img\n return img_dict\n\n\n# Copyright https://github.com/kuszaj/claptcha\ndef random_transform_image(image: Image.Image) -> Image:\n w, h = image.size\n\n dx = w * random.uniform(0.1, 0.3)\n dy = h * random.uniform(0.1, 0.3)\n\n x1, y1 = _random_point_disposition(dx, dy)\n x2, y2 = _random_point_disposition(dx, dy)\n\n w += abs(x1) + abs(x2)\n h += abs(x1) + abs(x2)\n\n quad = _quad_points((w, h), (x1, y1), (x2, y2))\n\n return image.transform(image.size, Image.QUAD,\n data=quad, resample=Image.BILINEAR)\n\n\ndef _random_point_disposition(dx, dy):\n x = int(random.uniform(-dx, dx))\n y = int(random.uniform(-dy, dy))\n return (x, y)\n\n\ndef _quad_points(size, disp1, disp2):\n w, h = size\n x1, y1 = disp1\n x2, y2 = disp2\n\n return (\n x1, -y1,\n -x1, h + y2,\n w + x2, h - y2,\n w - x2, y1\n )\n\n\ndef generate_distorted_sample(img_LA: Image.Image) -> np.ndarray:\n img_arr = np.array(random_transform_image(img_LA))\n shape = img_arr.shape\n img_arr_solid = np.zeros((shape[0], shape[1]), dtype='float')\n for i in range(shape[0]):\n for j in range(shape[1]):\n if img_arr[i, j][1] == 255:\n img_arr_solid[i, j] = 1\n else:\n img_arr_solid[i, j] = 0\n return img_arr_solid.reshape(img_arr.shape[0] * img_arr.shape[1])\n\n\ndef shuffle_sets_equally(x_set: np.ndarray, y_set: np.ndarray):\n # LOL\n rng_state = np.random.get_state()\n np.random.shuffle(x_set)\n np.random.set_state(rng_state)\n np.random.shuffle(y_set)\n\n\ndef generate_set(max_size: ImageSize, base_images: BaseImages, samples_per_char: int) -> KerasSet:\n input_vec_len = max_size[0] * max_size[1]\n output_vec_len = CHARS_NUM\n set_size = samples_per_char * len(base_images)\n\n x_values: List[np.ndarray] = []\n y_values: List[np.ndarray] = []\n for c, img in base_images.items():\n for _ in range(samples_per_char):\n x_values.append(generate_distorted_sample(img))\n y_values.append(char_to_onehot(c))\n\n x_set: np.ndarray = np.concatenate(x_values).reshape((set_size, input_vec_len))\n y_set: np.ndarray = np.concatenate(y_values).reshape((set_size, output_vec_len))\n shuffle_sets_equally(x_set, y_set)\n\n return x_set, y_set\n\n\ndef generate_set_biased(max_size: ImageSize, base_images_list: Sequence[BaseImages], base_images_samples: Sequence[int]) -> KerasSet:\n assert len(base_images_list) == len(base_images_samples)\n res_x = []\n res_y = []\n for base_images, samples_per_image in zip(base_images_list, base_images_samples):\n x_set, y_set = generate_set(max_size, base_images, samples_per_image)\n res_x.append(x_set)\n res_y.append(y_set)\n x_set = np.concatenate(res_x)\n y_set = np.concatenate(res_y)\n\n shuffle_sets_equally(x_set, y_set)\n\n return x_set, y_set\n\n\ndef generate_batch_from_string(max_size: ImageSize, base_images: BaseImages, target_string: str) -> np.ndarray:\n input_vec_len = max_size[0] * max_size[1]\n batch_size = len(target_string)\n x_set = np.empty(shape=(batch_size, input_vec_len))\n\n sample_num = 0\n for c in target_string:\n x_set[sample_num] = generate_distorted_sample(base_images[c])\n sample_num += 1\n return x_set\n\n\nSamplesCntTuple = Optional[Tuple[int, int, int]]\ndef generate_sets(max_size: ImageSize,\n base_images_rus: Optional[BaseImages],\n base_images_eng: Optional[BaseImages],\n base_images_special: Optional[BaseImages],\n rus_samples_per_img: SamplesCntTuple,\n eng_samples_per_img: SamplesCntTuple,\n special_samples_per_img: SamplesCntTuple) -> Tuple[KerasSet, KerasSet, KerasSet]:\n assert((base_images_rus is None) == (rus_samples_per_img is None))\n assert((base_images_eng is None) == (eng_samples_per_img is None))\n assert((base_images_special is None) == (special_samples_per_img is None))\n \n base_images_list = tuple(x for x in (base_images_rus, base_images_special, base_images_eng) if x is not None)\n samples_counts = np.array([\n cnt for cnt\n in (rus_samples_per_img, eng_samples_per_img, special_samples_per_img)\n if cnt is not None\n ]).transpose()\n\n x_train, y_train = generate_set_biased(max_size, base_images_list, samples_counts[0])\n x_valid, y_valid = generate_set_biased(max_size, base_images_list, samples_counts[1])\n x_test, y_test = generate_set_biased(max_size, base_images_list, samples_counts[2])\n return (x_train, y_train), (x_valid, y_valid), (x_test, y_test)\n\n\ndef save_sets(file_prefix: str,\n x_train: np.ndarray, y_train: np.ndarray,\n x_valid: np.ndarray, y_valid: np.ndarray,\n x_test: np.ndarray, y_test: np.ndarray):\n np.save(file_prefix + '_x_train', x_train)\n np.save(file_prefix + '_y_train', y_train)\n np.save(file_prefix + '_x_valid', x_valid)\n np.save(file_prefix + '_y_valid', y_valid)\n np.save(file_prefix + '_x_test', x_test)\n np.save(file_prefix + '_y_test', y_test)\n\n\ndef load_sets(file_prefix: str) -> Tuple[KerasSet, KerasSet, KerasSet]:\n x_train, y_train = np.load(file_prefix + '_x_train.npy'), np.load(file_prefix + '_y_train.npy')\n x_valid, y_valid = np.load(file_prefix + '_x_valid.npy'), np.load(file_prefix + '_y_valid.npy')\n x_test, y_test = np.load(file_prefix + '_x_test.npy'), np.load(file_prefix + '_y_test.npy')\n return (x_train, y_train), (x_valid, y_valid), (x_test, y_test)\n\n\nFilteredCharsList = List[Tuple[int, str]]\ndef filter_out_chars(string: str, whitelist: Union[str, Set[str]]) -> Tuple[List[str], FilteredCharsList]:\n whitelist = set(whitelist)\n res: List[str] = []\n filtered_chars: FilteredCharsList = []\n for idx, c in enumerate(string):\n if c in whitelist:\n res.append(c)\n else:\n filtered_chars.append((idx, c))\n return res, filtered_chars[::-1]\n\n\nT, F = TypeVar('T'), TypeVar('F')\ndef restore_chars(string: Sequence[T], filtered_chars: List[Tuple[int, F]]) -> List[Union[T, F]]:\n idx: int = 0\n res: List[T] = []\n for c in string:\n while len(filtered_chars) > 0:\n if filtered_chars[-1][0] == idx:\n res.append(filtered_chars[-1][1])\n filtered_chars.pop()\n idx += 1\n else:\n break\n res.append(c)\n idx += 1\n return res\n\n\ndef main():\n parser = ArgumentParser()\n parser.add_argument('model', choices=('load-model', 'train-model'))\n parser.add_argument('--sets', choices=('load', 'gen'), default=None)\n parser.add_argument('--sets-prefix', type=str, dest='sets_prefix', default='turel')\n parser.add_argument('--model-filename', type=str, dest='model_filename', default='turel-model.keras')\n parser.add_argument('--save-model', action='store_true', dest='save_model', default=False)\n parser.add_argument('--train-epochs', type=int, dest='train_epochs', default=10)\n parser.add_argument('--save-sets', action='store_true', dest='save_sets', default=False)\n parser.add_argument('--font-filename', type=str, dest='font_filename', default='DroidSansMono.ttf')\n parser.add_argument('--font-size', type=int, dest='font_size', default=64)\n parser.add_argument('--use-gauss', action='store_true', dest='use_gauss', default=False, help='Manually emulate model errors')\n parser.add_argument('--gauss-sigma', type=float, dest='gauss_sigma', default=0.8)\n parser.add_argument('--use-substitution', action='store_true', dest='use_substitution', help='Substitute words that have low confidence')\n parser.add_argument('--confidence-threshold', type=float, dest='confidence_threshold', default=0.9)\n parser.add_argument('--input', type=str, dest='input_filename', default='input.txt', help='Input filename')\n parser.add_argument('--output', type=str, dest='output_filename', default='output.txt', help='Output filename')\n args = parser.parse_args()\n\n args.load_model = args.model == 'load-model'\n args.train_model = args.model == 'train-model'\n\n FONT_SIZE = args.font_size\n FONT_FILENAME = args.font_filename\n \n trie = Trie()\n\n font = ImageFont.truetype(FONT_FILENAME, FONT_SIZE)\n max_size = calculate_max_size(CHARS_ALL, font)\n \n base_images_rus = generate_base_images(CHARS_RUS, font, max_size)\n base_images_eng = generate_base_images(CHARS_ENG, font, max_size)\n base_images_special = generate_base_images(CHARS_SPECIAL, font, max_size)\n base_images_all = generate_base_images(CHARS_ALL, font, max_size)\n\n input_vec_len = max_size[0] * max_size[1]\n\n if args.train_model and args.sets is None:\n print('To train the model you must specify either \"--sets load\" or \"--sets gen\".')\n sys.exit(1)\n\n if args.save_sets and args.sets != 'gen':\n print('To save sets you must generate them with \"--sets gen\".')\n sys.exit(1)\n\n if args.use_substitution and args.use_gauss:\n print('Couldn\\'t use --use-substitution wit --use-gauss.')\n sys.exit(1)\n\n if args.use_substitution:\n start_time = time.time()\n print('Loading words...')\n with open('russian-words/russian.txt', 'r', encoding='cp1251') as f:\n words = [w for w in (w.strip().lower() for w in f.readlines()) if len(w) > 0]\n print(f'{len(words)} words loaded in {(time.time() - start_time) * 1000} ms.')\n\n start_time = time.time()\n print('Parsing words...')\n for w in words:\n trie.add(w, w)\n print(f'Parsed in {(time.time() - start_time) * 1000} ms.')\n\n x_train, y_train, x_valid, y_valid, x_test, y_test = (None,) * 6\n if args.sets == 'gen':\n print('Generating sets, this may take several minutes...')\n start_time = time.time()\n sets = generate_sets(max_size,\n base_images_rus,\n None,\n #base_images_eng,\n base_images_special,\n (90, 20, 7),\n None,\n #(3, 1, 2),\n (7, 4, 1))\n (x_train, y_train), (x_valid, y_valid), (x_test, y_test) = sets \n print(f'Sets generated in {(time.time() - start_time) * 1000} ms.')\n elif args.sets == 'load':\n print('Loading sets...')\n start_time = time.time()\n (x_train, y_train), (x_valid, y_valid), (x_test, y_test) = load_sets(args.sets_prefix)\n print(f'Sets loaded in {(time.time() - start_time) * 1000} ms.')\n\n if args.save_sets:\n print('Saving sets...')\n start_time = time.time()\n save_sets(args.sets_prefix, x_train, y_train, x_valid, y_valid, x_test, y_test)\n print(f'Sets saved in {(time.time() - start_time) * 1000} ms.')\n\n print('Creating the model...')\n optimizer = keras.optimizers.Adagrad(learning_rate=0.02)\n model = Sequential()\n model.add(Dense(units=CHARS_NUM * 2, activation='relu', input_dim=input_vec_len))\n model.add(Dense(units=CHARS_NUM, activation='softmax'))\n model.compile(loss='categorical_crossentropy',\n optimizer=optimizer,\n metrics=['accuracy'])\n print('Model created.')\n\n if args.load_model:\n print('Loading the model...')\n model.load_weights(args.model_filename)\n print('Model loaded.')\n\n if args.train_model:\n print('Training the model...')\n model.fit(x_train, y_train, validation_data=(x_valid, y_valid), epochs=args.train_epochs, batch_size=32)\n loss_and_metrics = model.evaluate(x_test, y_test, batch_size=128)\n print(model.metrics_names)\n print(loss_and_metrics)\n print('Model trained.')\n\n if args.save_model:\n print('Saving the model to', args.model_filename)\n model.save(args.model_filename)\n print('Model saved.')\n\n with open(args.input_filename, 'r', encoding='utf-8') as f:\n text = f.read()\n\n start_time = time.time()\n print('Generating input batch...')\n filtered_text_list, filtered_chars = filter_out_chars(text, CHARS_ALL)\n x_manual = generate_batch_from_string(max_size, base_images_all, ''.join(filtered_text_list))\n print(f'Input batch generated in {(time.time() - start_time) * 1000} ms.')\n\n print(f'Cobenizing {args.input_filename}...')\n model_prediction = model.predict(x_manual)\n predicted_chars = []\n for predicted_one_hot in model_prediction:\n if args.use_gauss:\n predicted_chars.append(onehot_to_char_gauss(predicted_one_hot, args.gauss_sigma))\n else:\n predicted_chars.append(onehot_to_char_confidence(predicted_one_hot))\n if not args.use_substitution:\n predict = ''.join((c[0] for c in predicted_chars))\n restored_text = ''.join(restore_chars(predict, filtered_chars))\n else:\n _CHARS_RUS_SET = set(CHARS_RUS)\n # filtered characters have confidence = 1\n restored_list = restore_chars(predicted_chars, [(c[0], (c[1], 1)) for c in filtered_chars])\n text_list = []\n word = []\n word_orig = []\n restored_list.append((' ', 1)) # to parse the last word\n for char, confidence in restored_list:\n if confidence < args.confidence_threshold:\n word.append('?')\n word_orig.append(char)\n else:\n if char in _CHARS_RUS_SET:\n word.append(char)\n word_orig.append(char)\n else:\n if '?' in word:\n if len(word) > 4:\n case_mask = [c.isupper() for c in word]\n words = list(trie.get_wildcard(''.join(word).lower())) + [''.join(word_orig).lower()]\n chosen_one = [c.upper() if isup else c for c, isup in zip(random.choice(words), case_mask)]\n text_list.extend(chosen_one)\n else:\n text_list.extend(word_orig)\n else:\n text_list.extend(word)\n word.clear()\n word_orig.clear()\n text_list.append(char)\n restored_text = ''.join(text_list)\n\n print(restored_text)\n\n with open(args.output_filename, 'w', encoding='utf-8') as f:\n f.write(restored_text)\n\n # show_batch(x_manual, max_size)\n\n\ndef show_batch(x_values, max_size, max_count=-1):\n imgs = []\n for i in range(x_values.shape[0] if max_count < 0 else max_count):\n img_arr = x_values[i].reshape((max_size[1], max_size[0])) * 255\n imgs.append(Image.fromarray(img_arr))\n show_combined_image(imgs, max_size)\n\n\ndef show_combined_image(images, max_size):\n img_combined = Image.new('L', (max_size[0] * len(images), max_size[1]), (255,))\n offset_x = 0\n for img in images:\n img_combined.paste(img, (offset_x, 0))\n offset_x += max_size[0]\n img_combined.show()\n\n\nif __name__ == '__main__':\n main()\n" ]
[ [ "numpy.concatenate", "numpy.array", "numpy.empty", "numpy.zeros", "numpy.copy", "numpy.load", "numpy.random.shuffle", "numpy.save", "numpy.random.get_state", "numpy.random.set_state" ] ]
chenxran/coqa-roberta-baselines
[ "c0eeaa809f9e8e963f24d66e2f6e131c995dd1ed", "c0eeaa809f9e8e963f24d66e2f6e131c995dd1ed" ]
[ "rc/utils/graph_utils.py", "rc/utils/data_utils.py" ]
[ "import matplotlib\nmatplotlib.use('agg')\n\nimport matplotlib.pyplot as plt\n\n\n################################################################################\n# Graphing Functions #\n################################################################################\n\ndef plot_learn(values, yAxis, xAxis, title=None, saveTo=None):\n \"\"\"\n Plots the learning curve with train/val for all values. Limited to\n 7 learning curves on the same graph as we only have 7 colours.\n\n Args:\n 1. values: Dictionary of tuples of lists, where the tuple is ([train values], [dev values])\n and the key is the name of the model for the graph label.\n 2. yAxis: Either 'Loss', 'F1' or 'Exact Match'\n 3. xAxis: 'Epochs' or 'Iterations'\n 4. title: optional title to the graph\n 5. saveTo: save location for graph\n \"\"\"\n colours = ['b', 'g', 'r', 'c', 'm', 'y', 'k']\n\n for i, (k, (train_values, dev_values)) in enumerate(values.items()):\n\n plt.plot(map(float, train_values), linewidth=2, color=colours[i],\n linestyle='--', label=\"Train {} {}\".format(yAxis, k))\n if dev_values:\n plt.plot(map(float, dev_values), linewidth=2, color=colours[i],\n linestyle='-', label=\"Dev {} {}\".format(yAxis, k))\n\n plt.xlabel(xAxis)\n plt.ylabel(yAxis)\n if title:\n plt.title(title)\n\n if yAxis == \"Loss\":\n plt.legend(loc='upper right', shadow=True, prop={'size': 6})\n else:\n plt.legend(loc='upper left', shadow=True, prop={'size': 6})\n\n assert saveTo\n plt.savefig(\"{}\".format(saveTo))\n plt.cla()\n plt.clf()\n plt.close()\n\n\ndef plot_metrics(values, yAxis, xAxis, title=None, saveTo=None):\n colours = ['b', 'g', 'r', 'c', 'm', 'y', 'k']\n\n for i, (train_values, dev_values, metric) in enumerate(values):\n plt.plot(map(float, train_values), linewidth=2, color=colours[i],\n linestyle='-', label=\"Train {}\".format(metric))\n if dev_values:\n plt.plot(map(float, dev_values), linewidth=2, color=colours[i],\n linestyle='--', label=\"Dev {}\".format(metric))\n\n plt.xlabel(xAxis)\n plt.ylabel(yAxis)\n if title:\n plt.title(title)\n\n if yAxis == \"Loss\":\n plt.legend(loc='upper right', shadow=True, prop={'size': 6})\n else:\n plt.legend(loc='upper left', shadow=True, prop={'size': 6})\n\n assert saveTo\n plt.savefig(\"{}\".format(saveTo))\n plt.cla()\n plt.clf()\n plt.close()\n", "# -*- coding: utf-8 -*-\n\"\"\"\nModule to handle getting data loading classes and helper functions.\n\"\"\"\n\nimport json\nimport io\nimport torch\nimport numpy as np\n\nfrom collections import Counter, defaultdict\nfrom torch.utils.data import Dataset\nfrom . import constants as Constants\nfrom .timer import Timer\n\n\n################################################################################\n# Dataset Prep #\n################################################################################\n\ndef prepare_datasets(config):\n train_set = None if config['trainset'] is None else CoQADataset(config['trainset'], config)\n dev_set = None if config['devset'] is None else CoQADataset(config['devset'], config)\n test_set = None if config['testset'] is None else CoQADataset(config['testset'], config)\n return {'train': train_set, 'dev': dev_set, 'test': test_set}\n\n################################################################################\n# Dataset Classes #\n################################################################################\n\n\nclass CoQADataset(Dataset):\n \"\"\"SQuAD dataset.\"\"\"\n\n def __init__(self, filename, config):\n timer = Timer('Load %s' % filename)\n self.filename = filename\n self.config = config\n paragraph_lens = []\n question_lens = []\n self.paragraphs = []\n self.examples = []\n self.vocab = Counter()\n dataset = read_json(filename)\n for paragraph in dataset['data']:\n history = []\n for qas in paragraph['qas']:\n qas['paragraph_id'] = len(self.paragraphs)\n temp = []\n n_history = len(history) if config['n_history'] < 0 else min(config['n_history'], len(history))\n if n_history > 0:\n for i, (q, a) in enumerate(history[-n_history:]):\n d = n_history - i\n temp.append('<Q{}>'.format(d))\n temp.extend(q)\n temp.append('<A{}>'.format(d))\n temp.extend(a)\n temp.append('<Q>')\n temp.extend(qas['annotated_question']['word'])\n history.append((qas['annotated_question']['word'], qas['annotated_answer']['word']))\n qas['annotated_question']['word'] = temp\n self.examples.append(qas)\n question_lens.append(len(qas['annotated_question']['word']))\n paragraph_lens.append(len(paragraph['annotated_context']['word']))\n for w in qas['annotated_question']['word']:\n self.vocab[w] += 1\n for w in paragraph['annotated_context']['word']:\n self.vocab[w] += 1\n for w in qas['annotated_answer']['word']:\n self.vocab[w] += 1\n self.paragraphs.append(paragraph)\n print('Load {} paragraphs, {} examples.'.format(len(self.paragraphs), len(self.examples)))\n print('Paragraph length: avg = %.1f, max = %d' % (np.average(paragraph_lens), np.max(paragraph_lens)))\n print('Question length: avg = %.1f, max = %d' % (np.average(question_lens), np.max(question_lens)))\n timer.finish()\n\n def __len__(self):\n return 50 if self.config['debug'] else len(self.examples)\n\n def __getitem__(self, idx):\n qas = self.examples[idx]\n paragraph = self.paragraphs[qas['paragraph_id']]\n question = qas['annotated_question']\n answers = [qas['answer']]\n if 'additional_answers' in qas:\n answers = answers + qas['additional_answers']\n\n sample = {'id': (paragraph['id'], qas['turn_id']),\n 'question': question,\n 'answers': answers,\n 'evidence': paragraph['annotated_context'],\n 'targets': qas['answer_span']}\n\n if self.config['predict_raw_text']:\n sample['raw_evidence'] = paragraph['context']\n return sample\n\n\n################################################################################\n# Read & Write Helper Functions #\n################################################################################\n\n\ndef write_json_to_file(json_object, json_file, mode='w', encoding='utf-8'):\n with io.open(json_file, mode, encoding=encoding) as outfile:\n json.dump(json_object, outfile, indent=4, sort_keys=True, ensure_ascii=False)\n\n\ndef log_json(data, filename, mode='w', encoding='utf-8'):\n with io.open(filename, mode, encoding=encoding) as outfile:\n outfile.write(json.dumps(data, indent=4, ensure_ascii=False))\n\n\ndef get_file_contents(filename, encoding='utf-8'):\n with io.open(filename, encoding=encoding) as f:\n content = f.read()\n f.close()\n return content\n\n\ndef read_json(filename, encoding='utf-8'):\n contents = get_file_contents(filename, encoding=encoding)\n return json.loads(contents)\n\n\ndef get_processed_file_contents(file_path, encoding=\"utf-8\"):\n contents = get_file_contents(file_path, encoding=encoding)\n return contents.strip()\n\n################################################################################\n# DataLoader Helper Functions #\n################################################################################\n\n\ndef sanitize_input(sample_batch, config, vocab, feature_dict, training=True):\n \"\"\"\n Reformats sample_batch for easy vectorization.\n Args:\n sample_batch: the sampled batch, yet to be sanitized or vectorized.\n vocab: word embedding dictionary.\n feature_dict: the features we want to concatenate to our embeddings.\n train: train or test?\n \"\"\"\n sanitized_batch = defaultdict(list)\n for ex in sample_batch:\n question = ex['question']['word']\n evidence = ex['evidence']['word']\n offsets = ex['evidence']['offsets']\n\n processed_q, processed_e = [], []\n for w in question:\n processed_q.append(vocab[w] if w in vocab else vocab[Constants._UNK_TOKEN])\n for w in evidence:\n processed_e.append(vocab[w] if w in vocab else vocab[Constants._UNK_TOKEN])\n\n # Append relevant index-structures to batch\n sanitized_batch['question'].append(processed_q)\n sanitized_batch['evidence'].append(processed_e)\n\n if config['predict_raw_text']:\n sanitized_batch['raw_evidence_text'].append(ex['raw_evidence'])\n sanitized_batch['offsets'].append(offsets)\n else:\n sanitized_batch['evidence_text'].append(evidence)\n\n # featurize evidence document:\n sanitized_batch['features'].append(featurize(ex['question'], ex['evidence'], feature_dict))\n sanitized_batch['targets'].append(ex['targets'])\n sanitized_batch['answers'].append(ex['answers'])\n if 'id' in ex:\n sanitized_batch['id'].append(ex['id'])\n return sanitized_batch\n\n\ndef vectorize_input(batch, config, training=True, device=None):\n \"\"\"\n - Vectorize question and question mask\n - Vectorize evidence documents, mask and features\n - Vectorize target representations\n \"\"\"\n # Check there is at least one valid example in batch (containing targets):\n if not batch:\n return None\n\n # Relevant parameters:\n batch_size = len(batch['question'])\n\n # Initialize all relevant parameters to None:\n targets = None\n\n # Part 1: Question Words\n # Batch questions ( sum_bs(n_sect), len_q)\n max_q_len = max([len(q) for q in batch['question']])\n xq = torch.LongTensor(batch_size, max_q_len).fill_(0)\n xq_mask = torch.ByteTensor(batch_size, max_q_len).fill_(1)\n for i, q in enumerate(batch['question']):\n xq[i, :len(q)].copy_(torch.LongTensor(q))\n xq_mask[i, :len(q)].fill_(0)\n\n # Part 2: Document Words\n max_d_len = max([len(d) for d in batch['evidence']])\n xd = torch.LongTensor(batch_size, max_d_len).fill_(0)\n xd_mask = torch.ByteTensor(batch_size, max_d_len).fill_(1)\n xd_f = torch.zeros(batch_size, max_d_len, config['num_features']) if config['num_features'] > 0 else None\n\n # 2(a): fill up DrQA section variables\n for i, d in enumerate(batch['evidence']):\n xd[i, :len(d)].copy_(torch.LongTensor(d))\n xd_mask[i, :len(d)].fill_(0)\n if config['num_features'] > 0:\n xd_f[i, :len(d)].copy_(batch['features'][i])\n\n # Part 3: Target representations\n if config['sum_loss']: # For sum_loss \"targets\" acts as a mask rather than indices.\n targets = torch.ByteTensor(batch_size, max_d_len, 2).fill_(0)\n for i, _targets in enumerate(batch['targets']):\n for s, e in _targets:\n targets[i, s, 0] = 1\n targets[i, e, 1] = 1\n else:\n targets = torch.LongTensor(batch_size, 2)\n for i, _target in enumerate(batch['targets']):\n targets[i][0] = _target[0]\n targets[i][1] = _target[1]\n\n torch.set_grad_enabled(training)\n example = {'batch_size': batch_size,\n 'answers': batch['answers'],\n 'xq': xq.to(device) if device else xq,\n 'xq_mask': xq_mask.to(device) if device else xq_mask,\n 'xd': xd.to(device) if device else xd,\n 'xd_mask': xd_mask.to(device) if device else xd_mask,\n 'xd_f': xd_f.to(device) if device else xd_f,\n 'targets': targets.to(device) if device else targets}\n\n if config['predict_raw_text']:\n example['raw_evidence_text'] = batch['raw_evidence_text']\n example['offsets'] = batch['offsets']\n else:\n example['evidence_text'] = batch['evidence_text']\n return example\n\n\ndef featurize(question, document, feature_dict):\n doc_len = len(document['word'])\n features = torch.zeros(doc_len, len(feature_dict))\n q_cased_words = set([w for w in question['word']])\n q_uncased_words = set([w.lower() for w in question['word']])\n for i in range(doc_len):\n d_word = document['word'][i]\n if 'f_qem_cased' in feature_dict and d_word in q_cased_words:\n features[i][feature_dict['f_qem_cased']] = 1.0\n if 'f_qem_uncased' in feature_dict and d_word.lower() in q_uncased_words:\n features[i][feature_dict['f_qem_uncased']] = 1.0\n if 'pos' in document:\n f_pos = 'f_pos={}'.format(document['pos'][i])\n if f_pos in feature_dict:\n features[i][feature_dict[f_pos]] = 1.0\n if 'ner' in document:\n f_ner = 'f_ner={}'.format(document['ner'][i])\n if f_ner in feature_dict:\n features[i][feature_dict[f_ner]] = 1.0\n return features\n" ]
[ [ "matplotlib.use", "matplotlib.pyplot.xlabel", "matplotlib.pyplot.title", "matplotlib.pyplot.legend", "matplotlib.pyplot.close", "matplotlib.pyplot.cla", "matplotlib.pyplot.ylabel", "matplotlib.pyplot.clf" ], [ "torch.zeros", "numpy.max", "torch.ByteTensor", "torch.LongTensor", "numpy.average", "torch.set_grad_enabled" ] ]
statphysandml/pystatplottools
[ "b1dec1f057b4757b7ed969d56a3905ce904e19d0" ]
[ "pystatplottools/distributions/distributionDD.py" ]
[ "import numpy as np\nimport pandas as pd\n\n\ndef transform_log10(data, columns):\n # Compute log10 of data\n log_data = data[columns].apply([\"log10\"])\n log_data.columns = log_data.columns.droplevel(1) + \"_log10\"\n cols_to_use = data.columns.difference(log_data.columns)\n data = pd.concat([data[cols_to_use], log_data], axis=1, verify_integrity=True)\n return data, [col + '_log10' for col in columns]\n\n\nclass DistributionDD:\n\n def __init__(self, **kwargs):\n self.data = kwargs.pop(\"data\", None)\n self.name = kwargs.pop(\"name\", \"Unknown\")\n self._distribution = None\n self._axes_indices = None\n\n @property\n def distribution(self):\n return self._distribution\n\n @staticmethod\n def transpose_linearized_statistics(axes_indices, data):\n # Verify if bins of axes_indices are the same\n if data.columns.names[0] == \"axes_and_statistics\":\n new_data_columns_names = [\"axes_and_dfs\"]\n new_data_index_names = [\"statistics\", \"idx\"]\n\n else:\n new_data_columns_names = [\"axes_and_statistics\"]\n new_data_index_names = ['dfs', 'idx']\n\n non_axes_cols = [item for item in data.columns if item not in axes_indices]\n upper_level_row_indices = list(data.index.unique(level=0))\n bins = data.loc[upper_level_row_indices[0]][axes_indices].values\n num_data_per_upper_level_row = len(bins)\n for key in upper_level_row_indices:\n upper_level_df_bins = data.loc[key][axes_indices].values\n assert np.array_equal(bins, upper_level_df_bins),\\\n \"Bins of the different upper level row dataframes do not coincide. Transposing is not possible.\"\n\n data = data[non_axes_cols].values.transpose().reshape(len(upper_level_row_indices), -1)\n bins = np.tile(bins.transpose(), (1, len(non_axes_cols)))\n index = pd.MultiIndex.from_product([non_axes_cols, np.arange(num_data_per_upper_level_row)], names=new_data_index_names)\n data = pd.DataFrame(np.concatenate([bins, data], axis=0).transpose(), index=index, columns=axes_indices + upper_level_row_indices)\n data.columns.names = new_data_columns_names\n return data\n\n @staticmethod\n def marginalize(initial_axes_indices, remaining_axes_indices, data):\n non_axes_cols = [item for item in data.columns if item not in initial_axes_indices]\n upper_row_level_name = data.index.names[0]\n data = data.reset_index(level=0).groupby(by=[upper_row_level_name] + remaining_axes_indices)[non_axes_cols].agg(\"sum\").reset_index()\n data = data.set_index(upper_row_level_name)\n data = data.groupby(upper_row_level_name).apply(lambda x: x.reset_index(drop=True))\n data.index.names = [upper_row_level_name, \"idx\"]\n return data\n\n @staticmethod\n def drop_zero_statistics(axes_indices, data):\n non_axes_cols = [item for item in data.columns if item not in axes_indices]\n data_index_names = data.index.names\n data = data[np.any(data[non_axes_cols].values != 0, axis=1)].groupby(data_index_names[0]).apply(lambda x: x.reset_index(drop=True))\n data.index.names = data_index_names\n return data\n\n @staticmethod\n def compute_multi_index_bin(linearized_sparse_distribution, bin_information, bin_alignment=\"center\"):\n bins = bin_information[\"bins\"]\n shape = tuple([len(bin) - 1 for bin in bins])\n if \"bin\" in linearized_sparse_distribution.keys():\n multi_dimensionsal_indices = np.unravel_index(indices=linearized_sparse_distribution.bin.values, shape=shape)\n else:\n multi_dimensionsal_indices = linearized_sparse_distribution[[\"bin_\" + str(bin) for bin in bin_information[\"binnames\"]]].values.transpose()\n\n binedges = np.zeros((len(linearized_sparse_distribution), len(bins)))\n for dim, (bin, dim_index) in enumerate(zip(bins, multi_dimensionsal_indices)):\n if bin_alignment == \"center\":\n aligned_bins = ((bin[:-1] + bin[1:]) / 2.0)[dim_index]\n elif bin_alignment == \"right:\":\n aligned_bins = bin[1:][dim_index]\n else:\n aligned_bins = bin[:-1][dim_index]\n binedges[:, dim] = aligned_bins\n\n return binedges, bin_information[\"binnames\"]\n\n def extract_min_max_range_values(self, columns=None):\n if columns is None:\n range_min, range_max = list(map(list, self.data.agg([\"min\", \"max\"]).values))\n else:\n range_min, range_max = list(map(list, self.data[columns].agg([\"min\", \"max\"]).values))\n return range_min, range_max\n\n # Private methods\n\n def _compute_binned_statistics(self, axes_indices=None,\n columns=None,\n statistic='count',\n transform='lin',\n nbins=[],\n range_min=None,\n range_max=None,\n bin_scales='linear',\n with_binnumber=False,\n expand_binnumbers=False):\n \"\"\"\n Computes a joint probability distribution/histogram of axes_indices over binned values of the data frame\n self.data or computes binned statistics over binned samples. The function makes of numpy's binned_statistic_dd\n function\n :param axes_indices: list of column indices - random variables of the joint distribution / the binned dimensions\n :param columns: the statistic is computed over these values\n :param statistic: statistic to be computed\n :param transform: lin or log10 - possibility to transform the column data before the computation of the statistics\n :param nbins: scalar of list - number of bins in each dimensions\n :param range_min: scalar or list - minimum of the ranges of the bins in the different dimension of axes_indices\n :param range_max: scalar or list - maximum of ranges of the bins in the different dimension of axes_indices\n :param bin_scales: \"linear\" or \"logarithmic\" - possibility to introduce another scale for the bins\n :param with_binnumber: assign binned_numbers are also returned for each value in self.data\n :return: dictionary that contains for each dataframe and each column a histogram. For statistic equal to\n \"probability\" or \"count\", the histograms can be accessed via [df][statistic] for columns via [df][column]\n \"\"\"\n \n if axes_indices is None:\n # Compute 1D statistics (marginal distribution) separately for each column\n axes_indices = columns # To generate the correct bins\n seperate_statistics = True\n else:\n seperate_statistics = False\n\n if columns is None:\n # Probability distribution or standard Histogram\n columns = [axes_indices[0]]\n\n columns, row_values, histogram_prep = self._prepare(\n axes_indices=axes_indices, columns=columns, transform=transform, nbins=nbins,\n range_min=range_min, range_max=range_max, bin_scales=bin_scales\n )\n\n from scipy.stats import binned_statistic_dd\n\n bin_statistic = statistic\n\n sample_indices = axes_indices # Default assumes computation with axes indices\n binned_statistics = dict()\n for row in row_values:\n binned_statistics[row] = dict()\n for col in columns:\n if statistic == \"probability\":\n bin_statistic = lambda x: len(x) * 1.0 / len(self.data.loc[row])\n if seperate_statistics: # For 1D histrograms\n sample_indices = [col]\n data_in_range_mask = DistributionDD.compute_data_mask_based_on_ranges(\n data=self.data.loc[row][sample_indices].values,\n ranges_min=[histogram_prep[ax_idx][row]['range_min'] for ax_idx in sample_indices],\n ranges_max=[histogram_prep[ax_idx][row]['range_max'] for ax_idx in sample_indices]\n )\n assert np.sum(data_in_range_mask) != 0, \"No data point is in the given range in at least one dimension\"\n if np.sum(data_in_range_mask) < len(self.data.loc[row][sample_indices]):\n print(\"Not all data points are in the given ranges\")\n hist, binedges, binnumber = binned_statistic_dd(\n sample=self.data.loc[row][sample_indices].values[data_in_range_mask], values=self.data.loc[row][col].values[data_in_range_mask], statistic=bin_statistic,\n bins=[histogram_prep[ax_idx][row]['binedges'] for ax_idx in sample_indices], expand_binnumbers=expand_binnumbers)\n binscale = [histogram_prep[ax_idx][row]['binscale'] for ax_idx in sample_indices]\n\n out_key = col\n if (statistic == \"probability\" or statistic == \"count\") and seperate_statistics is False:\n out_key = statistic\n\n if with_binnumber:\n binned_statistics[row][out_key] = {'hist': hist, 'binedges': binedges, 'binedgesindex': sample_indices, 'binnumber': binnumber, 'binscale': binscale}\n else:\n binned_statistics[row][out_key] = {'hist': hist, 'binedges': binedges, 'binedgesindex': sample_indices, 'binscale': binscale}\n\n return binned_statistics\n\n def _prepare(self, axes_indices=None, columns=None, transform='lin', nbins=[], range_min=None, range_max=None,\n bin_scales='linear'):\n\n if transform == \"log10\":\n # Compute log10 of data\n data, columns = transform_log10(data=self.data, columns=columns)\n\n # includes the indices of the different considered datasets\n row_values = list(self.data.index.unique(0))\n\n if range_min is None:\n ranges_min = self.data[axes_indices].groupby(level=0).agg(['min'])\n else:\n ranges_min = DistributionDD._tile_scalar(val=range_min, row_values=row_values, axes_indices=axes_indices, identifier='min')\n\n if range_max is None:\n ranges_max = self.data[axes_indices].groupby(level=0).agg(['max'])\n else:\n ranges_max = DistributionDD._tile_scalar(val=range_max, row_values=row_values, axes_indices=axes_indices, identifier='max')\n\n nb = DistributionDD._tile_scalar(val=nbins, row_values=row_values, axes_indices=axes_indices, identifier='nbins')\n binscales = DistributionDD._tile_scalar(val=bin_scales, row_values=row_values, axes_indices=axes_indices, identifier='binscale')\n\n histogram_prep = pd.concat([ranges_min, ranges_max, nb, binscales], axis=1, sort=True)\n histogram_prep = histogram_prep.groupby(level=0, axis=1).apply(\n lambda x: DistributionDD._get_bin_properties_of_collection(x))\n\n return columns, row_values, histogram_prep\n\n # The operation assumes that the order of the columns in data are the same as in histogram_prep\n @staticmethod\n def compute_data_mask_based_on_ranges(data, ranges_min, ranges_max):\n if len(data) == data.size:\n # Data is one dimensional\n data = data.reshape(len(data), 1)\n mask1 = data <= np.tile(np.array(ranges_max), (len(data), 1))\n mask2 = data >= np.tile(np.array(ranges_min), (len(data), 1))\n return np.all(mask1 & mask2, axis=1)\n\n @staticmethod\n def _linearize_histograms(histogram_data, order_by_bin=False, bin_alignment=\"center\"):\n \"\"\"\n Generates a linearized dataframe from histogram_data\n :param histogram_data: ToDo\n :param order_by_bin: True/False\n :return: If order_by_bin = True: A multiindex dataframe with index (col, bin_idx) and columns (binedges, df...).\n The binedges refer to col. The binedges refer to ravelled indices of\n a higher-dimensional histogram if histogram_data has been generated by histrogramDD.\n if order_by_bin = False: A multiindex dataframe with index (df, col, bin_idx) and columns (binedges, data),\n where data contains the counts for probabilities for each col\n \"\"\"\n keys = list(histogram_data.keys())\n columns = list(histogram_data[keys[0]].keys())\n\n linearized_data = []\n\n if order_by_bin:\n for col in columns:\n\n # Compute union of all bins of all dataframe\n\n # Only has an effect if binedges do not coincide for all keys\n shape = list(histogram_data[keys[0]][col][\"binedges\"][0].shape)\n shape[0] = 0\n # Multidimensional bin index\n if len(shape) > 1:\n bins = np.empty(shape=shape, dtype=np.float32)\n for key in keys:\n bins = [x for x in set(tuple(x) for x in bins) | set(\n tuple(x) for x in histogram_data[key][col][\"binedges\"][0][:-1])]\n bins.sort()\n binnames = histogram_data[keys[0]][col]['binnames']\n # One-dimensional bin index\n else:\n from functools import reduce\n binarrays = [histogram_data[key][col][\"binedges\"][0][:-1] for key in keys]\n bins = reduce(np.union1d, binarrays)\n\n # Generate a pandas dataframe for each dataset (row_value)\n\n results = {}\n # Same bins for each dataframe\n if np.array_equal(np.array(bins), histogram_data[keys[0]][col]['binedges'][0][:-1]):\n from pystatplottools.utils.bins_and_alignment import align_bins\n bins = align_bins(bins=histogram_data[keys[0]][col]['binedges'][0],\n bin_alignment=bin_alignment,\n bin_scale=histogram_data[keys[0]][col]['binscale'][0])\n if len(shape) > 1:\n results = {\"bin_\" + str(bin): bins[:, idx] for idx, bin in enumerate(binnames)}\n else:\n results = {\"bin\": bins}\n for key in keys:\n results[key] = histogram_data[key][col]['hist'].reshape(-1)\n # Different bins for each dataframe\n else:\n assert bin_alignment == \"left\", \\\n \"For irregular bins or bins that refer to indices, only left alignment is reasonable.\"\n if len(shape) > 1: # -> bins\n for key in keys:\n result = np.zeros(len(bins))\n considered_bins = [tuple(x) for x in histogram_data[key][col]['binedges'][0][:-1]]\n result[np.array([item in considered_bins for item in bins]).nonzero()[0]] = histogram_data[key][col]['hist'].reshape(-1)\n results[key] = result\n\n bins = np.array(bins)\n for idx, bin in enumerate(binnames):\n results[\"bin_\" + str(bin)] = bins[:, idx]\n results = {k: results[k] for k in [\"bin_\" + str(bin) for bin in binnames] + keys}\n else:\n results = {\"bin\": bins}\n for key in keys:\n result = np.zeros(len(bins))\n result[np.in1d(bins, histogram_data[key][col]['binedges'][0][:-1]).nonzero()[0]] = histogram_data[key][col]['hist'].reshape(-1)\n results[key] = result\n\n linearized_data.append(pd.DataFrame(results))\n linearized_data = pd.concat(linearized_data, keys=columns)\n linearized_data.columns.names = [\"bin_num_and_dfs\"]\n linearized_data.index.names = [\"statistics\", \"idx\"]\n return linearized_data\n else:\n for key in keys:\n results = []\n for idx, col in enumerate(columns):\n from pystatplottools.utils.bins_and_alignment import align_bins\n bins = align_bins(bins=histogram_data[key][col]['binedges'][0],\n bin_alignment=bin_alignment,\n bin_scale=histogram_data[key][col]['binscale'][0])\n if len(list(bins.shape)) > 1:\n binnames = histogram_data[keys[0]][col]['binnames']\n result = {\"bin_\" + str(bin): bins[:, idx] for idx, bin in enumerate(binnames)}\n result[\"data\"] = histogram_data[key][col]['hist'].reshape(-1)\n results.append(pd.DataFrame(result))\n else:\n results.append(pd.DataFrame({\"bin\": bins, \"data\": histogram_data[key][col]['hist'].reshape(-1)}))\n linearized_data.append(pd.concat(results, keys=columns))\n linearized_data = pd.concat(linearized_data, keys=keys)\n linearized_data.index.names = [\"dfs\", \"statistics\", \"idx\"]\n linearized_data.columns.names = [\"bin_num_and_statistics\"]\n return linearized_data\n\n @staticmethod\n def _linearize_binned_statistics(axes_indices, binned_statistics, output_statistics_names=None,\n dataframes_as_columns=True, bin_alignment=\"center\"):\n keys = list(binned_statistics.keys())\n columns = list(binned_statistics[keys[0]].keys())\n\n if isinstance(output_statistics_names, str):\n output_statistics_names = [output_statistics_names]\n\n if output_statistics_names is not None:\n assert len(columns) == len(output_statistics_names), \\\n \"Number of columns and number of output_statistics_names do not coincide\"\n\n binedges = binned_statistics[keys[0]][columns[0]]['binedges']\n\n # Check whether all rows and columns have the same rel_bin\n for key in keys:\n for col in columns:\n binn = binned_statistics[key][col]['binedges']\n assert False not in [np.array_equal(bis, bi) for bis, bi in zip(binedges, binn)], \\\n \"Rows and columns do not share the same bins - linearisation is not possible\"\n\n bins = binned_statistics[keys[0]][columns[0]]\n bins[\"nbins\"] = np.array([len(bins['binedges'][i])-1 for i in range(len(axes_indices))])\n\n resulting_bins = []\n for i in range(len(axes_indices)):\n from pystatplottools.utils.bins_and_alignment import align_bins\n ax_bins = align_bins(bins=bins[\"binedges\"][i], bin_alignment=bin_alignment, bin_scale=bins[\"binscale\"][i])\n resulting_bins.append(np.tile(np.repeat(ax_bins, np.prod(bins[\"nbins\"][i + 1:])), np.prod(bins[\"nbins\"][:i])))\n\n # Determine columns names\n cols_out = []\n if output_statistics_names is None:\n for col in columns:\n if col in axes_indices:\n col_out = str(col) + \"_ext\"\n else:\n col_out = col\n cols_out.append(col_out)\n else:\n cols_out = output_statistics_names\n\n if dataframes_as_columns:\n linearized_data = []\n for idx, col in enumerate(columns):\n results = {ax_idx: resulting_bin for ax_idx, resulting_bin in zip(axes_indices, resulting_bins)}\n for key in keys:\n results[key] = binned_statistics[key][col]['hist'].reshape(-1)\n linearized_data.append(pd.DataFrame(results))\n linearized_data = pd.concat(linearized_data, keys=cols_out)\n linearized_data.columns.names = [\"axes_and_dfs\"]\n linearized_data.index.names = [\"statistics\", \"idx\"]\n return linearized_data\n else:\n linearized_data = []\n for key in keys:\n results = {ax_idx: resulting_bin for ax_idx, resulting_bin in zip(axes_indices, resulting_bins)}\n for idx, col in enumerate(columns):\n results[cols_out[idx]] = binned_statistics[key][col]['hist'].reshape(-1)\n linearized_data.append(pd.DataFrame(results))\n linearized_data = pd.concat(linearized_data, keys=keys)\n linearized_data.columns.names = [\"axes_and_statistics\"]\n linearized_data.index.names = [\"dfs\", \"idx\"]\n return linearized_data\n\n ''' Further Helper Functions '''\n\n @staticmethod\n def _tile_scalar(val, row_values, axes_indices, identifier='nbins'):\n n = len(axes_indices)\n if hasattr(val, \"__len__\") and type(val) != str:\n assert len(val) == n, \"Number of \" + identifier + \" and dimension \" + str(n) + \"do not coincide.\"\n scalars = val\n else:\n scalars = [val for _ in range(n)]\n nb = [scalars for _ in range(len(row_values))]\n nb_tuples = list(zip(axes_indices, [identifier for _ in range(n)]))\n nb_col_index = pd.MultiIndex.from_tuples(tuples=nb_tuples)\n nb = pd.DataFrame(nb, index=row_values, columns=nb_col_index)\n return nb\n\n @staticmethod\n def _get_bin_properties_of_collection(x):\n col = x.columns.get_level_values(0).values[0]\n results = dict()\n for idx in x.index.values:\n assert x.loc[idx][col, 'min'] != x.loc[idx][col, 'max'] or nbins == 1,\\\n \"Computation of a histogram/distribution for \" + str(col) + \" currently not possible since all values are equal.\" \\\n \"Consider to take out \" + str(col) + \" from your distribution.\"\n results[idx] = DistributionDD._get_bin_properties(range_min=x.loc[idx][col, 'min'],\n range_max=x.loc[idx][col, 'max'],\n nbins=x.loc[idx][col, 'nbins'],\n binscale=x.loc[idx][col, 'binscale'])\n return results\n\n @staticmethod\n def _get_bin_properties(range_min, range_max, nbins, binscale='linear'):\n if binscale == \"linear\":\n binedges = np.linspace(range_min, range_max, nbins + 1)\n elif binscale == \"logarithmic\":\n binedges = np.logspace(np.log10(range_min), np.log10(range_max), nbins + 1)\n else:\n assert False, 'No scale given in _get_bin_properties'\n return {'range_max': range_max,\n 'range_min': range_min,\n 'nbins': nbins,\n 'binedges': binedges,\n 'binscale': binscale}\n" ]
[ [ "numpy.concatenate", "numpy.log10", "numpy.array", "numpy.array_equal", "numpy.empty", "scipy.stats.binned_statistic_dd", "pandas.DataFrame", "numpy.sum", "pandas.MultiIndex.from_tuples", "numpy.unravel_index", "numpy.any", "numpy.prod", "numpy.arange", "pandas.concat", "numpy.all", "numpy.in1d", "numpy.linspace" ] ]
BonizzoniLab/SVD
[ "95ed967ae385ed0a339030763a07ea7acfa0c1d3" ]
[ "Grouping.py" ]
[ "import argparse\nimport collections \nimport numpy as np\n\nclass LOCUS():\n locus_name=''\n Supercontig=''\n start=''\n end=''\n groupDict={}\n alleleDict={}\n pass\n \ndef GroupsEvaluation(allelesfile, group_locus_dict):\n ''' Attribution of a variation group to an allele'''\n \n #initializations\n delta='\\x94'\n iota='\\x99'\n\n \n #read allele file and assign a code: 0 for absense, A for alleles equal to the annotation, B:Z for allele with only a portion of the annotated sequence\n for line in allelesfile:\n line.rstrip()\n if line.startswith('Locus_name'):\n continue\n else:\n parts = line.split(\"\\t\")\n locus_name=parts[0]\n locus_start=parts[1]\n locus_stop=parts[2]\n d={}\n a={}\n \n for part in parts:\n if ':' in list(str(part)):\n startallele=part.split(':')[0]\n stopallele=part.split(':')[1].split(delta)[0].split(iota)[0]\n \n #case absence\n if int(startallele) == 0 and int(stopallele) == 0:\n samples=parts[parts.index(part)+1].split('|')\n for s in samples:\n if s is not '':\n d[s.split('\\n')[0]]='0'\n \n #case no variability, that is equal to annotation\n elif startallele == locus_start and stopallele == locus_stop and delta not in list(str(part)) and iota not in list(str(part)):\n samples=parts[parts.index(part)+1].split('|')\n for s in samples:\n if s is not '':\n d[s.split('\\n')[0]]='A'\n a['A']=str(part)\n elif d.values()==[] or str(chr(ord(max(d.values())))) == '0':\n new_letter='B'\n samples=parts[parts.index(part)+1].split('|')\n for s in samples:\n if s is not '':\n d[s.split('\\n')[0]]=new_letter\n a[str(new_letter)]=str(part)\n else:\n new_letter=chr(ord(max(d.values()))+1)\n samples=parts[parts.index(part)+1].split('|')\n for s in samples:\n if s is not '':\n d[s.split('\\n')[0]]=new_letter\n a[str(new_letter)]=str(part)\n\n group_locus_dict[locus_name].groupDict=d\n group_locus_dict[locus_name].alleleDict=a\n\n return group_locus_dict\n\n\ndef WriteGroups(samplesName, group_locus_dict, snp_indel_pol_dict):\n ''' Write the output file '''\n \n outFile=open(opts.outputPath+'/SummaryOf_StructuralVariability_InSamples.txt','w')\n \n header='Locus_name\\t'\n for s in sorted(samplesName):\n header=header+str(s)+'\\t'\n header=header+'\\t\\tAllele_Legend'\n outFile.write(header+'\\n')\n \n # for each locus\n for k,v in group_locus_dict.iteritems():\n # find several allele\n unique_str_allele = np.unique(v.groupDict.values())\n \n for sa in unique_str_allele:\n if str(sa) is not '0': # if locus is not always absent\n sample_with_same_str_allele=[]\n count=1\n \n # find samples supporting the same str_allele\n for sample, letter in v.groupDict.iteritems():\n if letter == sa:\n sample_with_same_str_allele.append(sample.split('\\n')[0])\n \n for pol, samples in snp_indel_pol_dict[k].iteritems():\n\n # in loci with polymorphisms but in which samples have polymorphisms sequence like '000..00' the pedix is 0\n sample_same_str_pol_allele=[]\n for s_str_all in sample_with_same_str_allele:\n if s_str_all in samples:\n sample_same_str_pol_allele.append(s_str_all)\n \n if sample_same_str_pol_allele: \n if str(1) in list(pol): \n for s in sample_same_str_pol_allele:\n old_letter=v.groupDict[s]\n new_letter=old_letter+str(count)\n v.groupDict[s]=new_letter\n count=count+1\n \n else:\n for s in sample_same_str_pol_allele:\n old_letter=v.groupDict[s]\n new_letter=old_letter+str(0)\n v.groupDict[s]=new_letter\n \n string_to_print=''\n od_sample = collections.OrderedDict(sorted(v.groupDict.items()))\n for s in od_sample:\n string_to_print=string_to_print+str(od_sample[s])+'\\t'\n \n string_to_print=string_to_print+'\\t\\t'\n \n od_allele = collections.OrderedDict(sorted(v.alleleDict.items()))\n for a in od_allele:\n string_to_print=string_to_print+str(a)+':'+str(od_allele[a])+'\\t'\n \n outFile.write(k+'\\t'+string_to_print+'\\n')\n \n outFile.close() \n\n\ndef read_locus_info(fileinfo):\n '''method to read the RegionsInfo file containing:Supercontig start end locus_name'''\n dictionary={}\n \n for line in fileinfo:\n line.rstrip()\n if line.startswith('locus_name'):\n continue\n else:\n parts = line.split(\"\\t\")\n singleLocus=LOCUS()\n singleLocus.Supercontig=parts[0].strip()\n singleLocus.start=parts[1].strip()\n singleLocus.end=parts[2].strip()\n singleLocus.locus_name=parts[3].split('\\n')[0].strip()\n \n dictionary[singleLocus.locus_name]=singleLocus \n \n return dictionary;\n\n\ndef read_variants(in_file):\n '''method to read the indel or snp file, save variants in dictionary'''\n \n variants_dictionary={}\n \n with in_file as f:\n lis = [x.split() for x in f]\n first_line=lis[0]\n sample_list = first_line[5:-2]\n \n j=0\n for x in zip(*lis):\n i=0\n row=[]\n \n for y in x:\n if (i==0):\n i=i+1\n continue\n \n else:\n if not y in first_line:\n row.append(y)\n i=i+1 \n \n variants_dictionary[first_line[j]]=row\n j=j+1\n \n in_file.close() \n return sample_list, variants_dictionary;\n\n\ndef load_variants(samples_list, group_locus_dict, variants_dictionary):\n '''method to save variants in dictionary'''\n snp_indel_polimorphisms={}\n \n all_loci_var = np.array(variants_dictionary['LOCUS_NAME'])\n for locus in group_locus_dict.iterkeys():\n\n indicies_locus = np.where(locus == all_loci_var)\n d={}\n \n for sample in samples_list:\n all_var_sample = np.array(variants_dictionary[sample])\n locus_var_sample = all_var_sample[indicies_locus]\n \n if str(group_locus_dict[locus].groupDict[sample]) == str(0) and str(1) in locus_var_sample:\n print('Variants in 0 locus: '+str(locus)+' '+str(sample))\n print(locus_var_sample)\n locus_var_sample[np.where(locus_var_sample == '1')]='0'\n print(locus_var_sample)\n \n single_pol = ''.join(locus_var_sample)\n \n if not single_pol == '': # so there are not polimorphisms for this locus called by variant calling\n if single_pol in d.keys():\n temp=d[single_pol]\n temp.append(sample)\n d[single_pol]=temp\n else:\n d[single_pol]=[sample]\n \n snp_indel_polimorphisms[locus]=d\n\n return snp_indel_polimorphisms\n \n \ndef main():\n #read parameters of the script\n parser = argparse.ArgumentParser('Locus alleles grouped by the variations founded (INDEL, cropped left, cropped rigth and cropped central). Output is to stdout.')\n parser.add_argument('-RegionsInfo', '--RegionsInfo', help=\"input file containing regions locus name, viral family, Supercontig, Start and End positions tab delimited\")\n parser.add_argument('-Variants', '--VariantsList', help=\"file of the variants\")\n parser.add_argument('-AlleleFile', '--AllelesFile', help=\"input file containing the alleles found for each locus of interest\")\n parser.add_argument('-outputPath', '--outputPath', help=\"Path of the output file\")\n \n global opts \n opts = parser.parse_args()\n \n # open file in input\n VarFileList=open(opts.VariantsList)\n alleles=open(opts.AllelesFile)\n in_info=open(opts.RegionsInfo)\n \n # read locus info and variants and load them in structure\n group_locus_dict=read_locus_info(in_info)\n \n # read structural different alleles and load them in structure\n group_locus_dict=GroupsEvaluation(alleles, group_locus_dict)\n \n # read variants and load them in structure\n [samplesName, variants_dictionary] = read_variants(VarFileList)\n snp_indel_pol_dict=load_variants(samplesName, group_locus_dict, variants_dictionary)\n \n # write the output file with the structural variability group\n WriteGroups(samplesName, group_locus_dict, snp_indel_pol_dict)\n \n # close file in input\n alleles.close()\n VarFileList.close()\n in_info.close()\n \nmain()" ]
[ [ "numpy.where", "numpy.array" ] ]
csdongxian/skip-connections-matter
[ "9b2e5cca9b673efcac253e16b2f55f6cda1a8692" ]
[ "utils_data.py" ]
[ "import os\nimport numpy as np\nimport torch\nfrom torch.utils.data import Dataset, DataLoader\nfrom PIL import Image\n\n\nclass SubsetImageNet(Dataset):\n def __init__(self, root, class_to_idx='./imagenet_class_to_idx.npy', transform=None):\n super(SubsetImageNet, self).__init__()\n self.root = root\n self.transform = transform\n img_path = os.listdir(root)\n img_path = sorted(img_path)\n self.img_path = [item for item in img_path if 'png' in item]\n self.class_to_idx = np.load(class_to_idx, allow_pickle=True)[()]\n\n def __getitem__(self, item):\n filepath = os.path.join(self.root, self.img_path[item])\n sample = Image.open(filepath, mode='r')\n\n if self.transform:\n sample = self.transform(sample)\n\n class_name = self.img_path[item].split('_')[0]\n label = self.class_to_idx[class_name]\n\n return sample, label, item\n\n def __len__(self):\n return len(self.img_path)\n\n\ndef save_images(images, img_list, idx, output_dir):\n \"\"\"Saves images to the output directory.\n Args:\n images: tensor with minibatch of images\n img_list: list of filenames without path\n If number of file names in this list less than number of images in\n the minibatch then only first len(filenames) images will be saved.\n output_dir: directory where to save images\n \"\"\"\n for i, sample_idx in enumerate(idx.numpy()):\n # Images for inception classifier are normalized to be in [-1, 1] interval,\n # so rescale them back to [0, 1].\n filename = img_list[sample_idx]\n cur_images = (images[i, :, :, :].transpose(1, 2, 0) * 255).astype(np.uint8)\n\n im = Image.fromarray(cur_images)\n im.save('{}.png'.format(os.path.join(output_dir, filename)))\n" ]
[ [ "numpy.load" ] ]
SrikrishnaBhat/google-research
[ "46d08e390cffa1b94160de4cff60efaf4e7a421c" ]
[ "dreg_estimators/model.py" ]
[ "# coding=utf-8\n# Copyright 2019 The Google Research Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Basic IWAE setup with DReGs estimators.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport numpy as np\nimport sonnet as snt\nimport tensorflow as tf\nimport tensorflow_probability as tfp\n\ntfd = tfp.distributions\nFLAGS = tf.flags.FLAGS\n\nDEFAULT_INITIALIZERS = {\n \"w\": tf.contrib.layers.xavier_initializer(),\n \"b\": tf.zeros_initializer()\n}\n\n\nclass ConditionalBernoulli(object):\n \"\"\"A Bernoulli distribution conditioned on Tensor inputs via a fc net.\"\"\"\n\n def __init__(self,\n size,\n hidden_layer_sizes,\n hidden_activation_fn=tf.nn.tanh,\n initializers=None,\n bias_init=0.0,\n name=\"conditional_bernoulli\"):\n \"\"\"Creates a conditional Bernoulli distribution.\n\n Args:\n size: The dimension of the random variable.\n hidden_layer_sizes: The sizes of the hidden layers of the fully connected\n network used to condition the distribution on the inputs.\n hidden_activation_fn: The activation function to use on the hidden layers\n of the fully connected network.\n initializers: The variable intiializers to use for the fully connected\n network. The network is implemented using snt.nets.MLP so it must be a\n dictionary mapping the keys 'w' and 'b' to the initializers for the\n weights and biases. Defaults to xavier for the weights and zeros for the\n biases when initializers is None.\n bias_init: A scalar or vector Tensor that is added to the output of the\n fully-connected network that parameterizes the mean of this\n distribution.\n name: The name of this distribution, used for sonnet scoping.\n \"\"\"\n self.bias_init = bias_init\n self.size = size\n if initializers is None:\n initializers = DEFAULT_INITIALIZERS\n self.fcnet = snt.nets.MLP(\n output_sizes=hidden_layer_sizes + [size],\n activation=hidden_activation_fn,\n initializers=initializers,\n activate_final=False,\n use_bias=True,\n name=name + \"_fcnet\")\n\n def condition(self, tensor_list):\n \"\"\"Computes the p parameter of the Bernoulli distribution.\"\"\"\n # Remove None's from tensor_list\n tensor_list = [t for t in tensor_list if t is not None]\n concatted_inputs = tf.concat(tensor_list, axis=-1)\n input_dim = concatted_inputs.get_shape().as_list()[-1]\n raw_input_shape = tf.shape(concatted_inputs)[:-1]\n fcnet_input_shape = [tf.reduce_prod(raw_input_shape), input_dim]\n fcnet_inputs = tf.reshape(concatted_inputs, fcnet_input_shape)\n outs = self.fcnet(fcnet_inputs) + self.bias_init\n # Reshape outputs to the original shape.\n output_size = tf.concat([raw_input_shape, [self.size]], axis=0)\n return tf.reshape(outs, output_size)\n\n def __call__(self, *args, **kwargs):\n p = self.condition(args)\n if kwargs.get(\"stop_gradient\", False):\n p = tf.stop_gradient(p)\n return tfd.Bernoulli(logits=p)\n\n\nclass ConditionalNormal(object):\n \"\"\"A Normal distribution conditioned on Tensor inputs via a fc network.\"\"\"\n\n def __init__(self,\n size,\n hidden_layer_sizes,\n mean_center=None,\n sigma_min=0.0,\n raw_sigma_bias=0.25,\n hidden_activation_fn=tf.nn.tanh,\n initializers=None,\n name=\"conditional_normal\"):\n \"\"\"Creates a conditional Normal distribution.\n\n Args:\n size: The dimension of the random variable.\n hidden_layer_sizes: The sizes of the hidden layers of the fully connected\n network used to condition the distribution on the inputs.\n mean_center: Optionally, mean center the data using this Tensor as the\n mean.\n sigma_min: The minimum standard deviation allowed, a scalar.\n raw_sigma_bias: A scalar that is added to the raw standard deviation\n output from the fully connected network. Set to 0.25 by default to\n prevent standard deviations close to 0.\n hidden_activation_fn: The activation function to use on the hidden layers\n of the fully connected network.\n initializers: The variable intitializers to use for the fully connected\n network. The network is implemented using snt.nets.MLP so it must be a\n dictionary mapping the keys 'w' and 'b' to the initializers for the\n weights and biases. Defaults to xavier for the weights and zeros for the\n biases when initializers is None.\n name: The name of this distribution, used for sonnet scoping.\n \"\"\"\n self.sigma_min = sigma_min\n self.raw_sigma_bias = raw_sigma_bias\n self.name = name\n self.mean_center = mean_center\n if initializers is None:\n initializers = DEFAULT_INITIALIZERS\n self.fcnet = snt.nets.MLP(\n output_sizes=hidden_layer_sizes + [2 * size],\n activation=hidden_activation_fn,\n initializers=initializers,\n activate_final=False,\n use_bias=True,\n name=name + \"_fcnet\")\n\n def condition(self, tensor_list):\n \"\"\"Computes the parameters of a normal distribution based on the inputs.\"\"\"\n # Remove None's from tensor_list\n tensor_list = [t for t in tensor_list if t is not None]\n inputs = tf.concat(tensor_list, axis=1)\n if self.mean_center is not None:\n inputs -= self.mean_center\n outs = self.fcnet(inputs)\n mu, sigma = tf.split(outs, 2, axis=1)\n sigma = tf.maximum(\n tf.nn.softplus(sigma + self.raw_sigma_bias), self.sigma_min)\n return mu, sigma\n\n def __call__(self, *args, **kwargs):\n \"\"\"Creates a normal distribution conditioned on the inputs.\"\"\"\n mu, sigma = self.condition(args)\n\n # Optional stop_gradient argument stops the parameters of the distribution.\n # TODO(gjt): This only works for 1 latent layer networks.\n if kwargs.get(\"stop_gradient\", False):\n mu = tf.stop_gradient(mu)\n sigma = tf.stop_gradient(sigma)\n return tfd.Normal(loc=mu, scale=sigma)\n\n\ndef iwae(p_z,\n p_x_given_z,\n q_z,\n observations,\n num_samples,\n cvs,\n contexts=None,\n antithetic=False):\n \"\"\"Computes a gradient of the IWAE estimator.\n\n Args:\n p_z: The prior. Should be a callable that optionally accepts a conditioning\n context and returns a tfp.distributions.Distribution which has the\n log_prob and sample methods implemented. The distribution should be over a\n [batch_size, latent_dim] space.\n p_x_given_z: The likelihood. Should be a callable that accepts as input a\n tensor of shape [num_samples, batch_size, latent_size + context_size] and\n returns a tfd.Distribution over a [num_samples, batch_size, data_dim]\n space.\n q_z: The proposal, should be a callable which accepts a batch of\n observations of shape [batch_size, data_dim] and returns a distribution\n over [batch_size, latent_dim].\n observations: A float Tensor of shape [batch_size, data_dim] containing the\n observations.\n num_samples: The number of samples for the IWAE estimator.\n cvs: Control variate variables.\n contexts: A float Tensor of shape [batch_size, context_dim] containing the\n contexts. (Optionally, none)\n antithetic: Whether to use antithetic sampling.\n\n Returns:\n estimators: Dictionary of tuples (objective, neg_model_loss,\n neg_inference_network_loss).\n \"\"\"\n alpha, beta, gamma, delta = cvs\n batch_size = tf.shape(observations)[0]\n proposal = q_z(observations, contexts, stop_gradient=False)\n # [num_samples, batch_size, latent_size]\n\n # If antithetic sampling, draw half of the samples and use the antithetics\n # for the other half.\n if antithetic:\n z_pos = proposal.sample(sample_shape=[num_samples // 2])\n z_neg = 2 * proposal.loc - z_pos\n z = tf.concat((z_pos, z_neg), axis=0)\n else:\n z = proposal.sample(sample_shape=[num_samples])\n\n tiled_contexts = None\n if contexts is not None:\n tiled_contexts = tf.tile(tf.expand_dims(contexts, 0), [num_samples, 1, 1])\n likelihood = p_x_given_z(z, tiled_contexts)\n # Before reduce_sum is [num_samples, batch_size, latent_dim].\n # Sum over the latent dim.\n log_q_z = tf.reduce_sum(proposal.log_prob(z), axis=-1)\n # Before reduce_sum is [num_samples, batch_size, latent_dim].\n # Sum over latent dim.\n prior = p_z(contexts)\n log_p_z = tf.reduce_sum(prior.log_prob(z), axis=-1)\n # Before reduce_sum is [num_samples, batch_size, data_dim]\n log_p_x_given_z = tf.reduce_sum(likelihood.log_prob(observations), axis=-1)\n\n log_weights = log_p_z + log_p_x_given_z - log_q_z\n log_sum_weight = tf.reduce_logsumexp(log_weights, axis=0)\n log_avg_weight = log_sum_weight - tf.log(tf.to_float(num_samples))\n normalized_weights = tf.stop_gradient(tf.nn.softmax(log_weights, axis=0))\n\n if FLAGS.image_summary:\n best_index = tf.to_int32(tf.argmax(normalized_weights, axis=0))\n indices = tf.stack((best_index, tf.range(0, batch_size)), axis=-1)\n best_images = tf.gather_nd(likelihood.probs_parameter(), indices)\n\n if FLAGS.dataset == \"struct_mnist\":\n tf.summary.image(\"bottom_half\",\n tf.reshape(best_images, [batch_size, -1, 28, 1]))\n else:\n tf.summary.image(\"output\", tf.reshape(best_images,\n [batch_size, -1, 28, 1]))\n tf.summary.image(\"input\", tf.reshape(observations, [batch_size, -1, 28, 1]))\n\n # Compute gradient estimators\n model_loss = log_avg_weight\n estimators = {}\n\n estimators[\"iwae\"] = (log_avg_weight, log_avg_weight, log_avg_weight)\n\n stopped_z_log_q_z = tf.reduce_sum(\n proposal.log_prob(tf.stop_gradient(z)), axis=-1)\n estimators[\"rws\"] = (log_avg_weight, model_loss,\n tf.reduce_sum(\n normalized_weights * stopped_z_log_q_z, axis=0))\n\n # Doubly reparameterized\n stopped_proposal = q_z(observations, contexts, stop_gradient=True)\n stopped_log_q_z = tf.reduce_sum(stopped_proposal.log_prob(z), axis=-1)\n stopped_log_weights = log_p_z + log_p_x_given_z - stopped_log_q_z\n sq_normalized_weights = tf.square(normalized_weights)\n\n estimators[\"stl\"] = (log_avg_weight, model_loss,\n tf.reduce_sum(\n normalized_weights * stopped_log_weights, axis=0))\n estimators[\"dreg\"] = (log_avg_weight, model_loss,\n tf.reduce_sum(\n sq_normalized_weights * stopped_log_weights,\n axis=0))\n estimators[\"rws-dreg\"] = (\n log_avg_weight, model_loss,\n tf.reduce_sum(\n (normalized_weights - sq_normalized_weights) * stopped_log_weights,\n axis=0))\n\n # Add normed versions\n normalized_sq_normalized_weights = (\n sq_normalized_weights / tf.reduce_sum(\n sq_normalized_weights, axis=0, keepdims=True))\n estimators[\"dreg-norm\"] = (\n log_avg_weight, model_loss,\n tf.reduce_sum(\n normalized_sq_normalized_weights * stopped_log_weights, axis=0))\n\n rws_dregs_weights = normalized_weights - sq_normalized_weights\n normalized_rws_dregs_weights = rws_dregs_weights / tf.reduce_sum(\n rws_dregs_weights, axis=0, keepdims=True)\n estimators[\"rws-dreg-norm\"] = (\n log_avg_weight, model_loss,\n tf.reduce_sum(normalized_rws_dregs_weights * stopped_log_weights, axis=0))\n\n estimators[\"dreg-alpha\"] = (log_avg_weight, model_loss,\n (1 - FLAGS.alpha) * estimators[\"dreg\"][-1] +\n FLAGS.alpha * estimators[\"rws-dreg\"][-1])\n\n # Jackknife\n loo_log_weights = tf.tile(\n tf.expand_dims(tf.transpose(log_weights), -1), [1, 1, num_samples])\n loo_log_weights = tf.matrix_set_diag(\n loo_log_weights, -np.inf * tf.ones([batch_size, num_samples]))\n loo_log_avg_weight = tf.reduce_mean(\n tf.reduce_logsumexp(loo_log_weights, axis=1) - tf.log(\n tf.to_float(num_samples - 1)),\n axis=-1)\n jk_model_loss = num_samples * log_avg_weight - (\n num_samples - 1) * loo_log_avg_weight\n\n estimators[\"jk\"] = (jk_model_loss, jk_model_loss, jk_model_loss)\n\n # Compute JK w/ DReG for the inference network\n loo_normalized_weights = tf.reduce_mean(\n tf.square(tf.stop_gradient(tf.nn.softmax(loo_log_weights, axis=1))),\n axis=-1)\n estimators[\"jk-dreg\"] = (\n jk_model_loss, jk_model_loss, num_samples * tf.reduce_sum(\n sq_normalized_weights * stopped_log_weights, axis=0) -\n (num_samples - 1) * tf.reduce_sum(\n tf.transpose(loo_normalized_weights) * stopped_log_weights, axis=0))\n\n # Compute control variates\n loo_baseline = tf.expand_dims(tf.transpose(log_weights), -1)\n loo_baseline = tf.tile(loo_baseline, [1, 1, num_samples])\n loo_baseline = tf.matrix_set_diag(\n loo_baseline, -np.inf * tf.ones_like(tf.transpose(log_weights)))\n loo_baseline = tf.reduce_logsumexp(loo_baseline, axis=1)\n loo_baseline = tf.transpose(loo_baseline)\n\n learning_signal = tf.stop_gradient(tf.expand_dims(\n log_avg_weight, 0)) - (1 - gamma) * tf.stop_gradient(loo_baseline)\n vimco = tf.reduce_sum(learning_signal * stopped_z_log_q_z, axis=0)\n\n first_part = alpha * vimco + (1 - alpha) * tf.reduce_sum(\n normalized_weights * stopped_log_weights, axis=0)\n second_part = ((1 - beta) * (tf.reduce_sum(\n ((1 - delta) / tf.to_float(num_samples) - normalized_weights) *\n stopped_z_log_q_z,\n axis=0)) + beta * tf.reduce_sum(\n (sq_normalized_weights - normalized_weights) * stopped_log_weights,\n axis=0))\n estimators[\"dreg-cv\"] = (log_avg_weight, model_loss, first_part + second_part)\n\n return estimators\n" ]
[ [ "tensorflow.contrib.layers.xavier_initializer", "tensorflow.ones", "tensorflow.reshape", "tensorflow.to_float", "tensorflow.tile", "tensorflow.nn.softmax", "tensorflow.square", "tensorflow.shape", "tensorflow.concat", "tensorflow.argmax", "tensorflow.transpose", "tensorflow.nn.softplus", "tensorflow.split", "tensorflow.reduce_logsumexp", "tensorflow.range", "tensorflow.expand_dims", "tensorflow.reduce_prod", "tensorflow.reduce_sum", "tensorflow.zeros_initializer", "tensorflow.stop_gradient" ] ]
thepinetree/noisepage-pilot
[ "97ab95d2458fe3974aac13935094be17fca69522" ]
[ "evaluation/utils.py" ]
[ "import numpy as np\nimport xml.etree.ElementTree as ET\nfrom psycopg import cursor\nimport yaml\n\ndef param_sweep_space(ps_dist):\n '''Construct parameter sweep space from configuration.\n \n Parameters:\n ------------\n ps_dist : Dict[str, List[Any]]\n Contains parameter name to candidate value lists.\n\n Returns:\n ---------\n ps_space : List[Tuple(List[str], List[Any])]\n Return parameter sweeping space as a list of tuples of pairs of list.\n Parse name as name level, handle linear range scales.\n '''\n ps_space = []\n assert ps_dist is not None and len(ps_dist) > 0, 'Parameter space should not be empty.\\nCheck the configuration file.'\n for name in ps_dist:\n val_list = ps_dist[name]\n assert val_list is not None and len(val_list) > 0, 'Parameter space should not be empty.\\nCheck the configuration file.'\n \n xml_level = list(name.strip().split('.'))\n\n # Linear scale range\n if (val_list[0] == '.range'):\n def convert_range(range_list):\n '''Convert a list starting with .range to linear range.\n\n Parameters:\n ------------\n range_list : List[Any]\n The range statement. First should be .range; should be 3 or 4 in length.\n \n Returns:\n -----------\n values : List[Num]\n The list of numeric values generated.\n '''\n LENGTH_NO_STEP = 3\n LENGTH_WITH_STEP = 4\n assert len(range_list) in (LENGTH_NO_STEP, LENGTH_WITH_STEP), 'Incorrect argument number for linear scale.\\nCheck the configuration file.'\n if (len(range_list) == LENGTH_NO_STEP):\n start, end, step = range_list[1], range_list[2], 1\n else:\n start, end, step = range_list[1:]\n\n values = np.arange(start, end, step).tolist()\n return values\n\n val_list = convert_range(val_list)\n\n ps_space.append((xml_level, val_list))\n\n return ps_space\n\ndef inject_param_xml(file_path, parameters):\n '''Inject and re-write XML file with given parameters.\n\n Parameters:\n -----------\n file_path : str\n XML file path to inject.\n parameters : List[Tuple(List[str], Any)]\n The list of parameter names and values to inject.\n '''\n conf_etree = ET.parse(file_path)\n root = conf_etree.getroot()\n for name_level, val in parameters:\n cursor = root\n # Traverse XML name levels.\n for key in name_level:\n cursor = cursor.find(key)\n if cursor is None:\n break\n\n assert cursor is not None, 'Fail to inject parameter in conf file,' + str(name_level) + '\\nCheck the format of target XML file.'\n cursor.text = str(val)\n\n conf_etree.write(file_path)\n\ndef parameter_sweep(ps_space, f, closure=None):\n '''Recursive calling routine on parameter space.\n\n Parameters:\n ------------\n ps_space : List[Tuple(List[str], List[Any])]\n Parameter space to sweep. each element is parameter name level + value list.\n f : (List[Tuple(List[str], Any)], Dict[str, Any])->Any\n Callback function to be executed in the sweep, takes parameter combination\n and closure dict.\n closure : Dict[str, Any]\n Closure environment passed from caller.\n '''\n assert(len(ps_space) > 0), 'Parameter space should not be empty.\\nCheck the configuration file.'\n\n if closure is None:\n closure = {}\n\n # Maintain the traverse states. Initial to the first value on level 0.\n cursor_stack = [-1]\n parameters = []\n while len(cursor_stack) > 0:\n depth = len(cursor_stack) - 1\n cursor_stack[depth] += 1\n name_level, val_list = ps_space[depth]\n if (cursor_stack[depth] >= len(val_list)):\n # Backtrace.\n del cursor_stack[-1]\n if len(parameters) > 0:\n del parameters[-1]\n continue\n\n name_level = ps_space[depth][0]\n value = ps_space[depth][1][cursor_stack[depth]]\n parameters.append((name_level, value))\n\n if len(cursor_stack) >= len(ps_space):\n f(parameters, closure)\n del parameters[-1]\n else:\n cursor_stack.append(-1)\n\nif __name__ == '__main__':\n # This standalone block of code is only used for tests.\n config = yaml.load(open(\"config/behavior/default.yaml\", \"r\", encoding=\"utf-8\"), Loader=yaml.FullLoader)\n ps = param_sweep_space(config['datagen']['param_sweep'])\n print(ps)\n\n def f(params, closure):\n print('_'.join([x[0][-1] + '_' + str(x[1]) for x in params]))\n \n parameter_sweep(ps, f)\n\n\n " ]
[ [ "numpy.arange" ] ]
prasoonpatidar/multiagentRL-resource-sharing
[ "e63ba7fc3c7ab019e9fd109cd45b739e3322152f" ]
[ "evaluation/RL_performance.py" ]
[ "'''\nInput:\n--priceHistory = {}, each value of the dic is a N len 1D list\n--purchaseHistroy = {}, each value of the dic is a M len 1D list, X_ij\n--providedResourceHistory ={}, each value of the dic is 2D array, size-M*N, Z_ij\n--sellersUtilityHistory = {}, each value of the dic is a N len 1D list\n--buyersUtilityHistory = {}, each value of the dic is a M len 1D list\n-- time--training steps\n'''\n\n# import python libraries\nimport os\nimport pickle\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport itertools\n\n# import custom libraries\n# from training_plot import , getX, sliceList\n\ntrain_dir = '../results/training'\neval_dir = '../results/evaluation'\nsup_plot_dir = '../results/plots/10k_run3am'\n\nmarket_configs = ['tightMarket','looseMarket','distMarket','monoMarket']\ntrain_configs = ['q_r1','wolf_r1','dqn_r2','ddqn_r2','dqn_duel_r2']\nenv_ids = [0,1]\nsliceCompare = 100\n# slice = 200\n\nlabel_dict = {\n 'q_r1':'Q-Learning',\n 'wolf_r1': 'WoLF-PHC',\n 'dqn_r2':'Deep Q-Learning',\n 'ddqn_r2':'Double Q-Learning',\n 'dqn_duel_r2':'Dueling Networks'\n}\n\ncolor_label = {\n 'q_r1':'r',\n 'wolf_r1':'b',\n 'dqn_r2':'c',\n 'ddqn_r2':'m',\n 'dqn_duel_r2':'g'\n}\n\nfor market_config in market_configs:\n for seller_id in range(5):\n for env_id in env_ids:\n # load compare results\n compare_dir = '../results/compare'\n compare_name = f'compare_{market_config}_seller_{seller_id}_env_{env_id}'\n print(f\"Plotting {compare_name}\")\n compare_results = pickle.load(open(f'{compare_dir}/{compare_name}.pb','rb'))\n results_plot_dir =f\"{compare_dir}/plots_10k_5pm/{compare_name}\"\n if not os.path.exists(results_plot_dir):\n os.makedirs((results_plot_dir))\n def get_performance(results):\n pricesHistory = results['price_history'] # P_ij\n purchasesHistory = results['demand_history'] # X_ij\n providedResourcesHistory = results['supply_history'] # Z_ij\n sellerUtilitiesHistory = results['seller_utilties'] # fi_j\n buyerUtilitiesHistory = results['buyer_utilties'] # fi_i\n return pricesHistory, purchasesHistory, providedResourcesHistory, sellerUtilitiesHistory, buyerUtilitiesHistory\n\n class performance():\n def __init__(self, pricesHistory, purchasesHistory, providedResourcesHistory, sellerUtilitiesHistory, buyerUtilitiesHistory):\n self.__pricesHistory = pricesHistory\n self.__purchasesHistory = purchasesHistory\n self.__providedResourcesHistory = providedResourcesHistory\n self.__sellerUtilitiesHistory = sellerUtilitiesHistory\n self.__buyerUtilitiesHistory = buyerUtilitiesHistory\n self.__times = len(self.buyerUtilitiesHistory)\n\n def trading(self):\n # axis = 0, get each buyer/devices total provided resource, sum z_ij over j\n # axis = 1, get each seller/providers total provided resource, sum z_ij over i\n provided_buyer = []\n provided_seller = []\n for values in self.__providedResourcesHistory.values():\n tem_buyer = values.sum(axis=0)\n temp_seller = values.sum(axis=1)\n provided_buyer.append(tem_buyer)\n provided_seller.append(temp_seller)\n return provided_seller, provided_buyer\n\n def get_mean_min_max(self, data, varName):\n # get market mean, min, and max of the variable at each step\n mean = []\n min = []\n max = []\n mean_min_max = {}\n if isinstance(data, dict):\n for values in data.values():\n mean.append(values.mean())\n min.append(values.min())\n max.append(values.max())\n else:\n for values in data:\n mean.append(values.mean())\n min.append(values.min())\n max.append(values.max())\n mean_min_max[varName] = varName\n mean_min_max['mean'] = mean\n mean_min_max['max'] = max\n mean_min_max['min'] = min\n return mean_min_max\n\n def plot_mean_min_max(self, data, title, num=None):\n # num --is the latest number of points to be plotted\n fontsize = 12\n if num == None:\n mean = data['mean']\n max = data['max']\n min = data['min']\n else:\n mean = data['mean'][-num:]\n max = data['max'][-num:]\n min = data['min'][-num:]\n x = [*range(len(mean))]\n plt.figure()\n plt.plot(x, mean, c='r')\n plt.fill_between(x, min, max, alpha=0.3)\n plt.xlabel('iterations', fontsize=fontsize)\n plt.ylabel('Resources', fontsize=fontsize)\n plt.title(title, fontsize=fontsize)\n plt.savefig(f'{results_plot_dir}/{title}.png', dpi=150)\n plt.close()\n\n def provided_buyer_plot(self, num=None):\n provided_seller, provided_buyer = self.trading()\n data = self.get_mean_min_max(provided_buyer, 'provided_buyers')\n self.plot_mean_min_max(data, 'provided_buyers', num=num)\n\n def provided_seller_plot(self, num=None):\n provided_seller, provided_buyer = self.trading()\n data = self.get_mean_min_max(provided_seller, 'provided_seller')\n self.plot_mean_min_max(data, 'provided_seller', num=num)\n\n def priceHistroy_plot(self, num=None):\n name = 'prices History'\n data = self.get_mean_min_max(self.__pricesHistory, name)\n self.plot_mean_min_max(data, name, num=num)\n\n def purchaseHistory_plot(self, num=None):\n name = ' purchase History'\n data = self.get_mean_min_max(self.__purchasesHistory, name)\n self.plot_mean_min_max(data, name, num=num)\n\n def sellerUtilitiesHistory_plot(self, num=None):\n name = ' seller Utilities History '\n data = self.get_mean_min_max(self.__purchasesHistory, name)\n self.plot_mean_min_max(data, name, num=num)\n\n def buyerUtilitiesHistory_plot(self, num=None):\n name = ' buyer Utilities History '\n data = self.get_mean_min_max(self.__purchasesHistory, name)\n self.plot_mean_min_max(data, name, num=num)\n\n def socialWelfare_plot(self, num=None):\n socialWelfare = []\n for t in range(self.__times):\n buyerUtility = self.__buyerUtilitiesHistory[t]\n sellerUtility = self.__sellerUtilitiesHistory[t]\n socialWelfare.append(sum(buyerUtility) + sum(sellerUtility))\n fontsize = 12\n title = 'socialWelfare'\n x = [*range(self.__times)]\n plt.figure()\n plt.plot(x, socialWelfare)\n plt.xlabel('iterations', fontsize=fontsize)\n plt.ylabel('Social welfare', fontsize=fontsize)\n plt.title(title, fontsize=fontsize)\n plt.savefig(f'{results_plot_dir}/{title}.png', dpi=150)\n plt.close()\n\n\n def slice_params(compare_seller_id, params):\n env_params = [np.concatenate((val[:compare_seller_id],val[compare_seller_id+1:])) for val in params]\n mean = []\n for values in env_params:\n mean.append(sum(values) / len(values))\n # list(itertools.chain.from_iterable(mean))\n t_params = [val[compare_seller_id] for val in params]\n return mean, t_params\n\n def get_socialWelfare(iterates, buyerUtilitiesHistory, sellerUtilitiesHistory):\n socialWelfare = []\n for t in range(iterates):\n buyU = list(buyerUtilitiesHistory[t])\n # list(itertools.chain.from_iterable(buyU))\n sellerU = sellerUtilitiesHistory[t]\n socialWelfare.append(np.sum(buyU) + np.sum(sellerU))\n return socialWelfare\n\n\n def data_slice(compare_results):\n # container for separated data\n socialWelfares = []\n prices = []\n seller_utilities = []\n labels = ['market average']\n i = -1\n for results in compare_results:\n i += 1\n compare_seller_id = results['compare_seller_id']\n agent_name = results['compared_agents'][compare_seller_id]\n labels.append(agent_name)\n\n pricesHistory, purchasesHistory, \\\n providedResourcesHistory, \\\n sellerUtilitiesHistory, buyerUtilitiesHistory = get_performance(results)\n\n # get the average market price, and target agent's price\n\n env_price, t_price = slice_params(compare_seller_id, pricesHistory)\n # get the average market seller utility and target agent's utility\n env_utility, t_utility = slice_params(compare_seller_id, sellerUtilitiesHistory)\n\n if i==0:\n # only append the env parameters at the first time\n prices.append(env_price)\n seller_utilities.append(env_utility)\n prices.append(t_price)\n seller_utilities.append(t_utility)\n\n iterates = len(purchasesHistory)\n socialWelfare = get_socialWelfare(iterates, buyerUtilitiesHistory, sellerUtilitiesHistory)\n socialWelfares.append(socialWelfare)\n return prices, seller_utilities, labels, socialWelfares\n\n def data_slice(compare_results):\n target_utilities = []\n target_prices = []\n env_utilities = None\n env_prices = None\n social_welfares = []\n labels = []\n for results in compare_results:\n compare_seller_id = results['compare_seller_id']\n agent_name = results['compared_agents'][compare_seller_id]\n labels.append(agent_name)\n\n pricesHistory, purchasesHistory, \\\n providedResourcesHistory, \\\n sellerUtilitiesHistory, buyerUtilitiesHistory = get_performance(results)\n\n # get the average market price, and target agent's price\n\n env_price, t_price = slice_params(compare_seller_id, pricesHistory)\n # get the average market seller utility and target agent's utility\n env_utility, t_utility = slice_params(compare_seller_id, sellerUtilitiesHistory)\n\n target_utilities.append(t_utility)\n target_prices.append(t_price)\n if env_utilities is None:\n env_utilities = np.array(env_utility)\n env_prices = np.array(env_price)\n else:\n env_utilities += np.array(env_utility)\n env_prices += np.array(env_price)\n\n iterates = len(purchasesHistory)\n socialWelfare = get_socialWelfare(iterates, buyerUtilitiesHistory, sellerUtilitiesHistory)\n social_welfares.append(socialWelfare)\n\n # get average over env utilities and prices:\n env_utilities = list(env_utilities/len(compare_results))\n env_prices = list(env_prices/len(compare_results))\n labels.insert(0,'market average')\n target_utilities.insert(0,env_utilities)\n target_prices.insert(0,env_prices)\n social_welfares.insert(0,None)\n return target_prices, target_utilities, labels, social_welfares\n\n def get_average(segment):\n average = sum(segment)/len(segment)\n return average\n\n\n def slice_data(data, slice):\n container = []\n for iter in range(1, len(data)):\n if iter % slice == 0:\n ndata = data[:iter]\n segment = ndata[-slice:]\n container.append(get_average(segment))\n return container\n\n def sliceList(dataList, slice):\n container = []\n for data in dataList:\n if data is not None:\n dataS = slice_data(data, slice)\n container.append(dataS)\n else:\n container.append(None)\n return container\n\n def getX(sl, l):\n x = []\n for iter in range(1, len(l)):\n if iter % sl == 0:\n x.append(iter)\n return x\n\n def plot_sellers(x, ys, labels, title, n_fig=None):\n # x = [*range(len(ys[0]))]\n # x = getX(sliceCompare, ys[0])\n plt.figure(n_fig)\n for id in range(len(ys)):\n if ys[id] is None:\n continue\n if labels[id]=='market average':\n plt.plot(x, ys[id], label='Market Average',linewidth=5,alpha=0.5,color='k')\n else:\n plt.plot(x, ys[id], label=label_dict[labels[id]].split(\"_\")[0],color=color_label[labels[id]])\n plt.legend(loc=\"upper left\")\n plt.title(title)\n if 'Prices' in title:\n plt.ylim(20,50)\n plt.savefig(f'{results_plot_dir}/{title}.png', dpi=150)\n plt.close()\n\n\n prices, seller_utilities, labels, socialWelfares = data_slice(compare_results)\n prices = [list(p) for p in prices]\n x = getX(sliceCompare, prices[0])\n plot_sellers(x, sliceList(prices, sliceCompare), labels, 'Seller Prices', n_fig=0)\n plot_sellers(x,sliceList(seller_utilities, sliceCompare), labels, 'Seller Utilities', n_fig=1)\n plot_sellers(x,sliceList(socialWelfares, sliceCompare), labels, 'Social Welfares', n_fig=2)" ]
[ [ "numpy.concatenate", "numpy.array", "matplotlib.pyplot.savefig", "matplotlib.pyplot.xlabel", "matplotlib.pyplot.title", "matplotlib.pyplot.legend", "matplotlib.pyplot.close", "matplotlib.pyplot.plot", "matplotlib.pyplot.figure", "matplotlib.pyplot.ylim", "numpy.sum", "matplotlib.pyplot.fill_between", "matplotlib.pyplot.ylabel" ] ]
tsingcbx99/TorchSSL
[ "2011241f57ed60000902adf0acf09786429a8c21" ]
[ "models/meanteacher/meanteacher.py" ]
[ "import torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nimport torchvision.models as models\nfrom torch.cuda.amp import autocast, GradScaler\n\nimport os\nimport contextlib\nfrom train_utils import AverageMeter\n\nfrom .meanteacher_utils import consistency_loss, Get_Scalar\nfrom train_utils import ce_loss, wd_loss, EMA, Bn_Controller\n\nfrom sklearn.metrics import *\nimport numpy as np\nfrom copy import deepcopy\n\n\nclass MeanTeacher:\n def __init__(self, net_builder, num_classes, ema_m, lambda_u, \\\n it=0, num_eval_iter=1000, tb_log=None, logger=None):\n \"\"\"\n class Fixmatch contains setter of data_loader, optimizer, and model update methods.\n Args:\n net_builder: backbone network class (see net_builder in utils.py)\n num_classes: # of label classes \n ema_m: momentum of exponential moving average for eval_model\n lambda_u: ratio of unsupervised loss to supervised loss\n it: initial iteration count\n num_eval_iter: freqeuncy of iteration (after 500,000 iters)\n tb_log: tensorboard writer (see train_utils.py)\n logger: logger (see utils.py)\n \"\"\"\n\n super(MeanTeacher, self).__init__()\n\n # momentum update param\n self.loader = {}\n self.num_classes = num_classes\n self.ema_m = ema_m\n\n # create the encoders\n # network is builded only by num_classes,\n # other configs are covered in main.py\n\n self.model = net_builder(num_classes=num_classes)\n self.ema_model = deepcopy(self.model)\n\n self.num_eval_iter = num_eval_iter\n self.lambda_u = lambda_u\n self.tb_log = tb_log\n\n self.optimizer = None\n self.scheduler = None\n\n self.it = 0\n\n self.logger = logger\n self.print_fn = print if logger is None else logger.info\n\n self.bn_controller = Bn_Controller()\n\n def set_data_loader(self, loader_dict):\n self.loader_dict = loader_dict\n self.print_fn(f'[!] data loader keys: {self.loader_dict.keys()}')\n\n def set_optimizer(self, optimizer, scheduler=None):\n self.optimizer = optimizer\n self.scheduler = scheduler\n\n def train(self, args, logger=None):\n\n ngpus_per_node = torch.cuda.device_count()\n\n # lb: labeled, ulb: unlabeled\n self.model.train()\n\n # EMA\n self.ema = EMA(self.model, self.ema_m)\n self.ema.register()\n if args.resume == True:\n self.ema.load(self.ema_model)\n\n # for gpu profiling\n start_batch = torch.cuda.Event(enable_timing=True)\n end_batch = torch.cuda.Event(enable_timing=True)\n start_run = torch.cuda.Event(enable_timing=True)\n end_run = torch.cuda.Event(enable_timing=True)\n\n start_batch.record()\n best_eval_acc, best_it = 0.0, 0\n\n scaler = GradScaler()\n amp_cm = autocast if args.amp else contextlib.nullcontext\n\n # eval for once to verify if the checkpoint is loaded correctly\n if args.resume == True:\n eval_dict = self.evaluate(args=args)\n print(eval_dict)\n\n for (_, x_lb, y_lb), (_, x_ulb_w1, x_ulb_w2) in zip(self.loader_dict['train_lb'],\n self.loader_dict['train_ulb']):\n\n # prevent the training iterations exceed args.num_train_iter\n if self.it > args.num_train_iter:\n break\n\n end_batch.record()\n torch.cuda.synchronize()\n start_run.record()\n\n # to CUDA\n x_lb, x_ulb_w1, x_ulb_w2 = x_lb.cuda(args.gpu), x_ulb_w1.cuda(args.gpu), x_ulb_w2.cuda(args.gpu)\n y_lb = y_lb.cuda(args.gpu)\n\n # inference and calculate sup/unsup losses\n with amp_cm():\n # TODO mean teacher却是分两次输入\n logits_x_lb = self.model(x_lb)\n\n self.bn_controller.freeze_bn(self.model)\n logits_x_ulb_w2 = self.model(x_ulb_w2)\n self.bn_controller.unfreeze_bn(self.model)\n\n self.ema.apply_shadow()\n with torch.no_grad():\n self.bn_controller.freeze_bn(self.model)\n logits_x_ulb_w1 = self.model(x_ulb_w1)\n self.bn_controller.unfreeze_bn(self.model)\n self.ema.restore()\n\n sup_loss = ce_loss(logits_x_lb, y_lb, reduction='mean') # CE_loss for labeled data\n\n warm_up = float(np.clip((self.it) / (args.unsup_warm_up * args.num_train_iter), 0., 1.))\n unsup_loss = consistency_loss(logits_x_ulb_w2, logits_x_ulb_w1) # MSE loss for unlabeled data\n total_loss = sup_loss + warm_up * self.lambda_u * unsup_loss\n\n # parameter updates\n if args.amp:\n scaler.scale(total_loss).backward()\n if (args.clip > 0):\n torch.nn.utils.clip_grad_norm_(self.model.parameters(), args.clip)\n scaler.step(self.optimizer)\n scaler.update()\n else:\n total_loss.backward()\n if (args.clip > 0):\n torch.nn.utils.clip_grad_norm_(self.model.parameters(), args.clip)\n self.optimizer.step()\n\n self.scheduler.step()\n self.ema.update()\n self.model.zero_grad()\n\n end_run.record()\n torch.cuda.synchronize()\n\n # tensorboard_dict update\n tb_dict = {}\n tb_dict['train/sup_loss'] = sup_loss.detach()\n tb_dict['train/unsup_loss'] = unsup_loss.detach()\n tb_dict['train/total_loss'] = total_loss.detach()\n tb_dict['lr'] = self.optimizer.param_groups[0]['lr']\n tb_dict['train/prefecth_time'] = start_batch.elapsed_time(end_batch) / 1000.\n tb_dict['train/run_time'] = start_run.elapsed_time(end_run) / 1000.\n\n # save model for each 10K steps and best model for each 1K steps\n if self.it % 10000 == 0:\n save_path = os.path.join(args.save_dir, args.save_name)\n if not args.multiprocessing_distributed or \\\n (args.multiprocessing_distributed and args.rank % ngpus_per_node == 0):\n self.save_model('latest_model.pth', save_path)\n\n if self.it % self.num_eval_iter == 0:\n eval_dict = self.evaluate(args=args)\n tb_dict.update(eval_dict)\n\n save_path = os.path.join(args.save_dir, args.save_name)\n\n if tb_dict['eval/top-1-acc'] > best_eval_acc:\n best_eval_acc = tb_dict['eval/top-1-acc']\n best_it = self.it\n\n self.print_fn(\n f\"{self.it} iteration, USE_EMA: {self.ema_m != 0}, {tb_dict}, BEST_EVAL_ACC: {best_eval_acc}, at {best_it} iters\")\n\n if not args.multiprocessing_distributed or \\\n (args.multiprocessing_distributed and args.rank % ngpus_per_node == 0):\n\n if self.it == best_it:\n self.save_model('model_best.pth', save_path)\n\n if not self.tb_log is None:\n self.tb_log.update(tb_dict, self.it)\n\n self.it += 1\n del tb_dict\n start_batch.record()\n if self.it > 0.8 * args.num_train_iter:\n self.num_eval_iter = 1000\n\n eval_dict = self.evaluate(args=args)\n eval_dict.update({'eval/best_acc': best_eval_acc, 'eval/best_it': best_it})\n return eval_dict\n\n @torch.no_grad()\n def evaluate(self, eval_loader=None, args=None):\n self.model.eval()\n self.ema.apply_shadow()\n if eval_loader is None:\n eval_loader = self.loader_dict['eval']\n total_loss = 0.0\n total_num = 0.0\n y_true = []\n y_pred = []\n y_logits = []\n for _, x, y in eval_loader:\n x, y = x.cuda(args.gpu), y.cuda(args.gpu)\n num_batch = x.shape[0]\n total_num += num_batch\n logits = self.model(x)\n loss = F.cross_entropy(logits, y, reduction='mean')\n y_true.extend(y.cpu().tolist())\n y_pred.extend(torch.max(logits, dim=-1)[1].cpu().tolist())\n y_logits.extend(torch.softmax(logits, dim=-1).cpu().tolist())\n total_loss += loss.detach() * num_batch\n top1 = accuracy_score(y_true, y_pred)\n top5 = top_k_accuracy_score(y_true, y_logits, k=5)\n cf_mat = confusion_matrix(y_true, y_pred, normalize='true')\n self.print_fn('confusion matrix:\\n' + np.array_str(cf_mat))\n self.ema.restore()\n self.model.train()\n return {'eval/loss': total_loss / total_num, 'eval/top-1-acc': top1, 'eval/top-5-acc': top5}\n\n def save_model(self, save_name, save_path):\n if self.it < 1000000:\n return\n save_filename = os.path.join(save_path, save_name)\n # copy EMA parameters to ema_model for saving with model as temp\n self.model.eval()\n self.ema.apply_shadow()\n ema_model = deepcopy(self.model)\n self.ema.restore()\n self.model.train()\n\n torch.save({'model': self.model.state_dict(),\n 'optimizer': self.optimizer.state_dict(),\n 'scheduler': self.scheduler.state_dict(),\n 'it': self.it,\n 'ema_model': ema_model.state_dict()},\n save_filename)\n\n self.print_fn(f\"model saved: {save_filename}\")\n\n def load_model(self, load_path):\n checkpoint = torch.load(load_path)\n\n self.model.load_state_dict(checkpoint['model'])\n self.optimizer.load_state_dict(checkpoint['optimizer'])\n self.scheduler.load_state_dict(checkpoint['scheduler'])\n self.it = checkpoint['it']\n self.ema_model.load_state_dict(checkpoint['ema_model'])\n self.print_fn('model loaded')\n\n # Abandoned in Meanteacher\n def interleave_offsets(self, batch, nu):\n groups = [batch // (nu + 1)] * (nu + 1)\n for x in range(batch - sum(groups)):\n groups[-x - 1] += 1\n offsets = [0]\n for g in groups:\n offsets.append(offsets[-1] + g)\n assert offsets[-1] == batch\n return offsets\n\n def interleave(self, xy, batch):\n nu = len(xy) - 1\n offsets = self.interleave_offsets(batch, nu)\n xy = [[v[offsets[p]:offsets[p + 1]] for p in range(nu + 1)] for v in xy]\n for i in range(1, nu + 1):\n xy[0][i], xy[i][i] = xy[i][i], xy[0][i]\n return [torch.cat(v, dim=0) for v in xy]\n\n\nif __name__ == \"__main__\":\n pass\n" ]
[ [ "torch.cat", "torch.cuda.synchronize", "torch.cuda.Event", "torch.max", "torch.no_grad", "numpy.array_str", "torch.softmax", "torch.cuda.device_count", "torch.nn.functional.cross_entropy", "torch.load", "numpy.clip", "torch.cuda.amp.GradScaler" ] ]
pooja-kabra/A_star
[ "a3bda60b735e993b85991d38eb03002a8cf4f4e8" ]
[ "A_star.py" ]
[ "# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Wed Apr 7 19:01:51 2021\r\n\r\n@author: Paras Savnani and Pooja Kabra\r\n\"\"\"\r\nimport time\r\nimport pygame\r\nimport sys\r\nimport math\r\nimport copy\r\nfrom collections import deque\r\n# from queue import PriorityQueue\r\nimport numpy as np\r\n\r\n# Note: Input nodes after approximating only\r\n\r\n#-----------------------------------------------------------------------------\r\n# Optimization results\r\n#-----------------------------------------------------------------------------\r\n# The deque class from collections module is used as the data structure to store the nodes because it is faster compared to the python's inbuilt list.\r\n# String is fast in comparion but searching and conversion from list to string is time consuming(experimentally verified)\r\n# Cannot implement binay search to improve searching speeds because we have to search in an unsorted array.\r\n \r\n#-----------------------------------------------------------------------------\r\n# Node Class\r\n#-----------------------------------------------------------------------------\r\nclass Node:\r\n \"\"\" This class stores the attributes(like state and parent node) and methods(like check_obstacle_space, actions, generate children etc.) \r\n for the each Node in the Map.\"\"\" \r\n \r\n #-------------------------------------------------------------------------\r\n \r\n def __init__(self, state, parent, cost_to_come, cost_to_go, cost, distance_step, theta_step_rad):\r\n \r\n self.state = state # state is the list of x,y, theta coordinates of the node\r\n self.parent = parent # parent attribute stores the parent node of current node\r\n self.cost_to_come = cost_to_come\r\n self.cost_to_go = cost_to_go\r\n self.cost = cost\r\n self.distance_step = distance_step\r\n self.theta_step_rad = theta_step_rad\r\n\r\n #-------------------------------------------------------------------------\r\n \r\n def __repr__(self):\r\n return str(self.state) # This method returns the state information of the node\r\n \r\n #-------------------------------------------------------------------------\r\n \r\n def check_obstacle_space(self, pot_node):\r\n \"\"\"Defining obstacle space constraints using half plane equations.\r\n Furthermore obstcales are inflated by 15 units(radius + clearance) to incorporate the mobile robot\r\n IMP_NOTE: For concave obstacles divide the obstacles into smaller convex obstacles and take 'OR' between them to find the constraints.\r\n Example obstacle 3 and 5.\"\"\"\r\n\r\n x, y = pot_node[0], pot_node[1]\r\n\r\n # Boundary condition\r\n if (x < 0) or (x > 400) or (y < 0) or (y > 300): \r\n return True\r\n \r\n # Obstacle 1 (Circle)\r\n elif (x-90)**2 + (y-70)**2 - 2500 <= 0: \r\n return True\r\n \r\n # Obstacle 2 (Rectangle) \r\n elif (y- 150.727 + 1.4377*x >= 0) and (y - 117.176 - 0.6960*x <= 0) and (y - 466.181 + 1.4419*x <= 0) and (y - 56.308 - 0.6959*x >= 0): \r\n return True\r\n \r\n # Obstacle 3 (C section)\r\n elif (x >= 185 and x <= 225 and y <= 295 and y >= 215) or (x >= 225 and x <= 245 and y <= 295 and y >= 255) or (x >= 225 and x <= 245 and y <=245 and y >= 215): \r\n return True\r\n \r\n # Obstacle 4 (Ellipse)\r\n elif ((x-246)**2)/75**2 + ((y-145)**2)/45**2 - 1 <= 0: \r\n return True\r\n \r\n else:\r\n # Node in Freespace\r\n return False \r\n \r\n #-------------------------------------------------------------------------\r\n \r\n def generate_child(self, dir):\r\n # This method performs the up action on the current node\r\n # Pot node in not an instance of node class, it is just a tuple of x,y,theta values (i.e similar to state of the node)\r\n pot_node = (self.state[0] + distance_step*(math.cos(self.state[2]+ dir*theta_step_rad)), self.state[1] + distance_step*(math.sin(self.state[2]+ dir*theta_step_rad)), self.state[2]+ dir*theta_step_rad)\r\n pot_node = self.approximate_node(pot_node)\r\n if not self.check_obstacle_space(pot_node):\r\n cost_to_come = self.cost_to_come + distance_step\r\n cost_to_go = ((self.state[0] - goal.state[0])**2 + (self.state[1] - goal.state[1])**2)**0.5\r\n cost = cost_to_come + cost_to_go\r\n new_node = Node(copy.deepcopy(self.state), Node(self.state, self.parent, self.cost_to_come, self.cost_to_go, self.cost, self.distance_step, self.theta_step_rad), cost_to_come, cost_to_go, cost, self.distance_step, self.theta_step_rad)\r\n new_node.state[0] = pot_node[0]\r\n new_node.state[1] = pot_node[1]\r\n new_node.state[2] = pot_node[2]\r\n return new_node\r\n return None\r\n\r\n #-------------------------------------------------------------------------\r\n \r\n def approximate_node(self, pot_node):\r\n x, y, theta_rad = pot_node[0], pot_node[1], pot_node[2]\r\n theta_deg = (theta_rad*180)/math.pi\r\n dec_x, dec_y = math.modf(x)[0], math.modf(y)[0]\r\n \r\n # X approximation\r\n if dec_x < 0.25:\r\n x = math.floor(x)\r\n elif 0.25 <= dec_x and dec_x <= 0.5:\r\n x = math.floor(x) + 0.5\r\n elif 0.5 < dec_x and dec_x < 0.75:\r\n x = math.floor(x) + 0.5\r\n else:\r\n x = math.ceil(x)\r\n \r\n # Y approximation\r\n if dec_y < 0.25:\r\n y = math.floor(y)\r\n elif 0.25 <= dec_y and dec_y <= 0.5:\r\n y = math.floor(y) + 0.5\r\n elif 0.5 < dec_y and dec_y < 0.75:\r\n y = math.floor(y) + 0.5\r\n else:\r\n y = math.ceil(y)\r\n\r\n # theta approximation\r\n theta_deg_norm = theta_deg/30\r\n theta_dec, theta_int = math.modf(theta_deg_norm)[0], math.modf(theta_deg_norm)[1]\r\n print(theta_int, theta_dec)\r\n if theta_dec < 0.5:\r\n theta_deg = theta_int*30\r\n else:\r\n theta_deg = (theta_int+1)*30\r\n \r\n theta_deg = theta_deg % 360\r\n theta_rad = (theta_deg*math.pi)/180\r\n return x, y, theta_rad\r\n\r\n #-------------------------------------------------------------------------\r\n \r\n def goal_threshold(self):\r\n # This method is used to check if robot is in the goal radius, because it may not reach the exact loaction due to limited action set\r\n x_goal = goal.state[0]\r\n y_goal = goal.state[1]\r\n \r\n if (self.state[0]-x_goal)**2 + (self.state[1]-y_goal)**2 - 100 <= 0: # 10 pixel radius\r\n return True\r\n else:\r\n return False\r\n #-------------------------------------------------------------------------\r\n \r\n def generate_children(self):\r\n # This method applies the actions functions to generate the children nodes of the current node \r\n \r\n children = []\r\n \r\n for direction in [0, 1, 2, -1, -2]:\r\n child = self.generate_child(direction)\r\n if child:\r\n children.append(child)\r\n \r\n return children\r\n \r\n #-------------------------------------------------------------------------\r\n \r\n def find_path(self, goal_node):\r\n print(\"Shortest Path: \")\r\n # Backtracking the parent node to find the shortest path\r\n # Print sequence GOAL node to START node\r\n current_node = goal_node\r\n path = []\r\n\r\n while(current_node.state[0:2] != self.state[0:2]):\r\n path.append(current_node.state)\r\n current_node = current_node.parent\r\n print(current_node)\r\n \r\n return path\r\n\r\n#-----------------------------------------------------------------------------\r\n# Main\r\n#-----------------------------------------------------------------------------\r\n\r\nif __name__== \"__main__\":\r\n\r\n global goal\r\n\r\n while(1):\r\n \r\n #---------------------------------------------------------------------\r\n # User Input\r\n #---------------------------------------------------------------------\r\n # Start node\r\n x1, y1, theta_s = map(int, input(\"Please input the X, Y, theta(in degrees) coordinates of the start node!\\n\").split())\r\n # Goal node\r\n x2, y2, theta_g = map(int, input(\"Please input the X, Y, theta(in degrees) coordinates of the goal node!\\n\").split())\r\n # Theta\r\n theta_step = int(input(\"Please enter the theta(in degrees) between consecutive actions\\n\")) ## 30\r\n theta_step_rad = math.radians(theta_step)\r\n # Step size\r\n distance_step = int(input(\"Please input the step size\\n\"))\r\n \r\n input_node = Node([x1, y1, math.radians(theta_s)] , None, 0, ((x1 - x2)**2 + (y1 - y2)**2)**0.5, ((x1 - x2)**2 + (y1 - y2)**2)**0.5, \r\n distance_step, theta_step_rad)\r\n \r\n goal = Node([x2, y2, math.radians(theta_g)] , None, 9999999, 0, 9999999, \r\n distance_step, theta_step_rad)\r\n \r\n # obstacle check\r\n if goal.check_obstacle_space(goal.state) or input_node.check_obstacle_space(input_node.state):\r\n print(\"Input Coordinates are in obstacle space!\")\r\n else:\r\n break \r\n\r\n # Using python's inbuilt queue from collections module, because it performs faster enqueue and dequeue operations compared to a list\r\n # queue = deque()\r\n queue = []\r\n queue.append(input_node)\r\n\r\n # checking this array for visited\r\n visited_states = np.zeros((601,801,12))\r\n \r\n # maintaining for visualizing exploration in pygame\r\n visited_states1 = []\r\n \r\n if input_node.parent != None:\r\n visited_states1.append([input_node.state, input_node.parent.state])\r\n else:\r\n visited_states1.append([input_node.state, None])\r\n \r\n \r\n t = time.time()\r\n\r\n # A star's Implementation\r\n while(1):\r\n\r\n queue.sort(key = lambda x: x.cost)\r\n current_node = queue.pop(0)\r\n # print(current_node, end = '\\n')\r\n \r\n #---------------------------------------------------------------------\r\n # Goal found!\r\n #---------------------------------------------------------------------\r\n if current_node.goal_threshold():\r\n print(\"Goal Found\\n\") \r\n shortest = input_node.find_path(current_node)\r\n break\r\n\r\n #---------------------------------------------------------------------\r\n # Goal not found yet, explore on\r\n #---------------------------------------------------------------------\r\n children = current_node.generate_children() \r\n\r\n for child in children:\r\n r = int(2 * child.state[1]) # node coordinates even after approximation will be\r\n c = int(2 * child.state[0]) # multiples of 0.5, to use as array indices, multiply by 2\r\n ang = int(math.degrees(child.state[2])/30)\r\n \r\n # if child.state not in visited_states:\r\n if visited_states[r][c][ang] == 0:\r\n visited_states[r][c][ang] = 1 # mark visited\r\n visited_states1.append([child.state, child.parent.state])\r\n queue.append(child)\r\n \r\n else: \r\n parent_node = current_node.parent \r\n if current_node.cost > parent_node.cost_to_come + current_node.distance_step + current_node.cost_to_go: # new cost is less\r\n current_node.cost_to_come = parent_node.cost_to_come + current_node.distance_step # update cost to come\r\n current_node.cost = current_node.cost_to_come + current_node.cost_to_go # update cost\r\n \r\n print(\"Execution time\", time.time()-t)\r\n\r\n#______________________Pygame Animation_______________________________________ \r\n\r\n print(\"Running pygame animation..................\")\r\n pygame.init()\r\n screen = pygame.display.set_mode((800, 600))\r\n counter = 0\r\n while True:\r\n\r\n # Map Generation in pygame\r\n screen.fill((0,0,0))\r\n \r\n # Inflated obstacles\r\n pygame.draw.circle(screen, (255,0,0), (180, 600-140), 100)\r\n pygame.draw.polygon(screen, (255,0,0), ((88.50, 600-174.214), (31.45, 600-256.24),(326.48 ,600-461.586), (383.43, 600-379.468)))\r\n pygame.draw.polygon(screen, (255,0,0), ((370,10), (490,10), (490, 170), (370, 170)))\r\n pygame.draw.ellipse(screen, (255,0,0), ((342,220, 300, 180)))\r\n\r\n # Goal threshold\r\n pygame.draw.circle(screen, (0,255,0), (2*goal.state[0], 600-2*goal.state[1]), 10)\r\n \r\n #Old obstacles\r\n pygame.draw.circle(screen, (0,0, 255), (180, 600-140), 70)\r\n pygame.draw.polygon(screen, (0,0, 255), ((96, 600-216), (73.06, 600-248.76),(318.8, 600-420.832), (341.74, 600-388.072)))\r\n pygame.draw.polygon(screen, (0,0, 255), ((400,40),(460,40),(460,60),(420,60),(420,120), (460,120), (460, 140), (400, 140)))\r\n pygame.draw.ellipse(screen, (0,0, 255), ((372,250, 240, 120)))\r\n\r\n for event in pygame.event.get():\r\n if event.type == pygame.QUIT:\r\n pygame.quit()\r\n sys.exit()\r\n if counter ==0:\r\n \r\n #-----------------------------------------------------------------\r\n # Draw Exploration\r\n #-----------------------------------------------------------------\r\n for node in visited_states1:\r\n # time.sleep(0.1)\r\n print(node)\r\n child = node[0]\r\n parent = node[1]\r\n\r\n if parent != None: \r\n pygame.draw.line(screen, (255,255,255), (child[0]*2, 600-child[1]*2), (parent[0]*2, 600-parent[1]*2)) \r\n \r\n # pygame.draw.circle(screen, (255,255,255), (state[0]*2, 600-state[1]*2), 1) \r\n pygame.display.update()\r\n \r\n \r\n #-----------------------------------------------------------------\r\n # Draw Shortest Path\r\n #-----------------------------------------------------------------\r\n for state in shortest:\r\n pygame.draw.circle(screen, (255, 0, 0), (state[0]*2, 600-state[1]*2), 5)\r\n pygame.display.update()\r\n \r\n counter +=1 \r\n" ]
[ [ "numpy.zeros" ] ]
Amorteza1376/ML-Regression-Analysis
[ "d5c2eed721eaf9647a813b4f970b92248973c559" ]
[ "src/Q3-(4-5).py" ]
[ "import sys\nimport scipy.io as sio\nfrom pprint import pprint\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport openpyxl\nfrom pathlib import Path\n\nfrom sklearn.ensemble import RandomForestRegressor\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.multioutput import MultiOutputRegressor\nfrom sklearn.ensemble import GradientBoostingRegressor\nfrom sklearn.model_selection import cross_val_score\nfrom sklearn.metrics import accuracy_score\nfrom sklearn.metrics import r2_score\n\n\n\nxlsx_file = Path('ENB2012_data.xlsx')\nwb_obj = openpyxl.load_workbook(xlsx_file) \nsheet = wb_obj.active\n\nX = []\nfor row in sheet.iter_rows(min_row=2, max_row=769, min_col=1, max_col=8):\n data = []\n for cell in row:\n data.append(cell.value)\n X.append(data)\nX = np.array(X)\ny = []\nfor row in sheet.iter_rows(min_row=2, max_row=769, min_col=9, max_col=10):\n data = []\n for cell in row:\n data.append(cell.value)\n y.append(data)\ny = np.array(y)\n\nX_train, X_test, y_train, y_test = train_test_split(\n X, y, train_size=600, test_size=168, shuffle=False)\n\n\ndef cv_10folds(X):\n\tn = X.shape[0]\n\ti = 1\n\twhile i <= 10:\n\t\tidx = np.arange(n * (i - 1) / 10, n * i / 10, dtype=int)\n\t\tyield idx, idx\n\t\ti += 1\n\n\nmulti_output_regr = MultiOutputRegressor(GradientBoostingRegressor(alpha=0.007, validation_fraction=0.1, n_iter_no_change=45))\n\ncv = cv_10folds(X)\ncross_val_score = cross_val_score(multi_output_regr, X, y, cv=cv)\nprint(\"cross_val_score:\", cross_val_score)\n\nmulti_output_regr.fit(X_train, y_train)\n\n# Predict on new data\ny_multi_gd = multi_output_regr.predict(X_test)\n\nplt.figure()\nplt.scatter(y_test[:, 0], y_test[:, 1], edgecolor='k', c=\"r\", label=\"Data\")\nplt.scatter(y_multi_gd[:, 0], y_multi_gd[:, 1], edgecolor='w', c=\"b\", marker=\"s\", label=\"Multi GD score=%.2f\" % multi_output_regr.score(X_test, y_test))\nplt.xlabel(\"target 1\")\nplt.ylabel(\"target 2\")\nplt.title(\"MultiOutputRegressor\")\nplt.legend()\nplt.show()" ]
[ [ "numpy.array", "sklearn.ensemble.GradientBoostingRegressor", "matplotlib.pyplot.xlabel", "matplotlib.pyplot.title", "matplotlib.pyplot.legend", "matplotlib.pyplot.figure", "matplotlib.pyplot.show", "numpy.arange", "matplotlib.pyplot.ylabel", "sklearn.model_selection.train_test_split", "matplotlib.pyplot.scatter" ] ]
calebkoy/pulse-check
[ "38c1b6c5b5bf5e264b69ef0cfe6498cf474a3f0b" ]
[ "app.py" ]
[ "import pickle\nimport re\n\nimport emoji\nimport flask\nimport numpy as np\nimport requests\nimport twitter\nimport yaml\n\nfrom sentiment import Sentiment\n\napp = flask.Flask(__name__, template_folder='templates')\n\ndef process_yaml():\n with open(\"config.yaml\") as file:\n return yaml.safe_load(file)\n\ndef create_bearer_token(data):\n return data[\"search_tweets_api\"][\"bearer_token\"]\n\ndef make_request(bearer_token, url):\n headers = {\"Authorization\": \"Bearer {}\".format(bearer_token)}\n response = requests.request(\"GET\", url, headers=headers)\n return response.json()\n\ndef get_tweet_data(topic, max_results): \n base_url = 'https://api.twitter.com/2/tweets/search/recent' \n parameters = f'query={topic}&tweet.fields=created_at,lang,author_id&max_results={max_results}&expansions=referenced_tweets.id'\n endpoint_url = f'{base_url}?{parameters}' \n bearer_token = create_bearer_token(process_yaml())\n return make_request(bearer_token, endpoint_url)\n\ndef remove_html_character_entities(tweet):\n tweet['text'] = re.sub(r'&[a-zA-Z]+;', '', tweet['text'])\n\ndef remove_urls(tweet):\n tweet['text'] = re.sub(r'www\\.\\S+|https?://\\S+', '', tweet['text'])\n\ndef remove_emoji(tweet):\n tweet['text'] = emoji.get_emoji_regexp().sub(u'', tweet['text'])\n\ndef remove_at_mentions(tweet):\n tweet['text'] = re.sub(r'@\\S+', '', tweet['text'])\n\ndef remove_non_alpha_or_space_characters(tweet): \n tweet['text'] = re.sub(r'[^a-zA-Z\\s]', '', tweet['text'])\n\ndef remove_many_consecutive_repeated_characters(tweet):\n tweet['text'] = re.sub(r'([a-zA-Z])\\1{3,}', r'\\1\\1', tweet['text'])\n\ndef remove_short_words(tweet):\n tweet['text'] = re.sub(r'\\b\\w{1,2}\\b', '', tweet['text'])\n\ndef process_tweets(response_json):\n ellipsis_unicode = '\\u2026' \n retweet_abbreviation = 'RT' \n retweeted_tweets = {}\n for tweet in response_json['data']: \n if (tweet['text'].startswith(retweet_abbreviation) and \n tweet['text'].endswith(ellipsis_unicode)): \n \n for tweet_reference in tweet['referenced_tweets']:\n if tweet_reference['type'] == 'retweeted':\n retweeted_tweet_id = tweet_reference['id']\n break \n if retweeted_tweet_id in retweeted_tweets:\n tweet['text'] = retweeted_tweets[retweeted_tweet_id]\n elif 'includes' in response_json:\n for referenced_tweet in response_json['includes']['tweets']:\n if referenced_tweet['id'] == retweeted_tweet_id:\n full_tweet = referenced_tweet['text']\n retweeted_tweets[retweeted_tweet_id] = full_tweet\n tweet['text'] = full_tweet\n break \n remove_urls(tweet)\n remove_html_character_entities(tweet) \n remove_emoji(tweet)\n remove_at_mentions(tweet) \n remove_non_alpha_or_space_characters(tweet)\n remove_many_consecutive_repeated_characters(tweet)\n remove_short_words(tweet)\n\ndef compute_sentiment_percentages(predictions):\n total_predictions = len(predictions) \n percent_positive = (sum((predictions == Sentiment.POSITIVE.value).astype(int)) / \n total_predictions)\n percent_negative = (sum((predictions == Sentiment.NEGATIVE.value).astype(int)) / \n total_predictions)\n return {\"positive\": round(percent_positive * 100, 1), \n \"negative\": round(percent_negative * 100, 1)}\n\ndef get_ids_by_sentiment(predictions, tweet_ids, author_ids):\n positive_indices = np.asarray(predictions == Sentiment.POSITIVE.value).nonzero()\n negative_indices = np.asarray(predictions == Sentiment.NEGATIVE.value).nonzero()\n positive_tweet_ids = tweet_ids[positive_indices]\n negative_tweet_ids = tweet_ids[negative_indices]\n positive_author_ids = author_ids[positive_indices]\n negative_author_ids = author_ids[negative_indices]\n positive_ids = list(zip(positive_author_ids, positive_tweet_ids))\n negative_ids = list(zip(negative_author_ids, negative_tweet_ids))\n return {\"positive\": positive_ids, \n \"negative\": negative_ids}\n\[email protected](404)\ndef page_not_found(e):\n return flask.render_template('404.html'), 404\n\[email protected]('/')\ndef main():\n if not 'topic' in flask.request.args:\n return flask.render_template('main.html') \n else:\n max_results = 20 \n topic = flask.request.args['topic'].strip()\n topic = re.sub(r'[^a-zA-Z\\s]', '', topic)\n response_json = get_tweet_data(topic, max_results) \n if 'data' in response_json: \n response_json['data'] = ([tweet for tweet in response_json['data'] \n if tweet['lang'] == \"en\" \n or tweet['lang'] == \"en-gb\"]) \n \n if (not response_json['data']):\n return flask.render_template('main.html', no_show=True)\n \n process_tweets(response_json) \n tweets = []\n tweet_ids = []\n author_ids = [] \n for tweet in response_json['data']:\n if tweet['text'] not in tweets: \n tweets.append(tweet['text']) \n tweet_ids.append(tweet['id']) \n author_ids.append(tweet['author_id'])\n with open('grid_search_NB_clf_sentiment140.pkl', 'rb') as f:\n classifier = pickle.load(f)\n predictions = classifier.predict(tweets)\n sentiment_percentages = compute_sentiment_percentages(predictions)\n tweet_ids_by_sentiment = get_ids_by_sentiment(predictions, \n np.array(tweet_ids),\n np.array(author_ids)) \n return flask.render_template('main.html', \n original_topic=topic, \n sentiment_percentages=sentiment_percentages,\n tweet_ids=tweet_ids_by_sentiment, \n total_tweets=len(tweets)) \n else:\n return flask.render_template('main.html', no_show=True)\n\nif __name__ == \"__main__\":\n app.run()\n" ]
[ [ "numpy.array", "numpy.asarray" ] ]
sdi1100041/SLEIPNIR
[ "02dd3eca8574899fd3f0e287b1a050e76e5ba0de" ]
[ "ODIN_interface.py" ]
[ "from odin.core.GP_risk_minimization import GPRiskMinimization\nfrom odin.core.GP_approx_risk_minimization import GPApproxRiskMinimization\nfrom odin.core.ODE_risk_minimization import ODERiskMinimization\nfrom odin.core.ODE_approx_risk_minimization import ODEApproxRiskMinimization\nimport numpy as np\nimport tensorflow as tf\n\nimport argparse\nparser = argparse.ArgumentParser()\n\nparser.add_argument( \"--gp_npoints\", type=int, default=-1, help=\"number of points to train the Gaussian Process\")\nparser.add_argument( \"--gp_sampling\", type=str, default=\"first\", choices={\"random\",\"uniform\",\"first\"}, help=\"method of sampling for the Gaussian Process\")\nparser.add_argument( \"--profiling\", action='store_true', help=\"profiling\")\nparser.add_argument( \"--tensorboard\", action='store_true', help=\"tensorboard profiling\")\nparser.add_argument( \"--load_hyperparameters\", action='store_true',help=\"load hyperparameters from file hyperparams\")\nparser.add_argument( \"--GP_approx_method\", type=str, default=\"QFF\",choices={\"QFF\",\"RFF\",\"RFF_bias\"}, help=\"approximation method for Gaussian Process\")\nparser.add_argument( \"--Risk_min_approx_method\", type=str, default=\"QFF\",choices={\"QFF\",\"RFF\",\"RFF_bias\"}, help=\"approximation method for Risk Minimization\")\nparser.add_argument( \"--n_features\", type=int, default=-1, help=\"number of features for QFF approximation\")\nparser.add_argument( \"--state_normalization\", help=\"normalize the states\", action='store_true')\nparser.add_argument( \"--train_gamma\", help=\"gamma training\", action='store_true')\nparser.add_argument(\"--model\", type=str, required = True, choices={\"LV\",\"Lorenz63\",\"PT\",\"QUADRO\",\"FHN\",\"Glucose\"}, help=\"ODEs model\")\nargs = parser.parse_args()\n\nsystem_obs = np.load(\"state_observations.npy\")\nt_obs = np.load(\"time_points.npy\")\n\nt_obs = np.array(t_obs).reshape(-1, 1)\nstate_bounds = None\n\nprofiling_dir = None\ntensorboard_dir = None\nlogging_dir= args.model + \"_\" + str(t_obs.shape[0]-1) + \"_\" + args.Risk_min_approx_method + \"_\"+ str(args.n_features)\nif args.profiling:\n profiling_dir = logging_dir\nif args.tensorboard:\n tensorboard_dir = logging_dir\n\nif args.gp_npoints == -1:\n args.gp_npoints = t_obs.shape[0]\n\nif (args.n_features != -1) and ((args.n_features %2) == 1):\n args.n_features+=1\n\nif not args.load_hyperparameters:\n if args.gp_sampling == \"first\":\n sample_indices=np.arange(np.min([t_obs.shape[0],args.gp_npoints]))\n elif args.gp_sampling == \"uniform\":\n sample_indices=np.arange(0,t_obs.shape[0],np.int16(np.ceil(t_obs.shape[0]/args.gp_npoints)))\n else:\n sample_indices=np.random.choice(t_obs.shape[0], np.min([t_obs.shape[0],args.gp_npoints]), replace=False)\n\n if args.n_features != -1 :\n gp_risk_minimizer = GPApproxRiskMinimization(system_obs[:,sample_indices],\n t_obs[sample_indices,:], gp_kernel='RBF',\n single_gp=False,\n time_normalization=False,\n state_normalization=args.state_normalization,\n QFF_approx = args.n_features,\n Approx_method = args.GP_approx_method)\n else:\n gp_risk_minimizer = GPRiskMinimization(system_obs[:,sample_indices],\n t_obs[sample_indices,:], gp_kernel='RBF',\n single_gp=False,\n time_normalization=False,\n state_normalization=args.state_normalization)\n\n gp_risk_minimizer.build_model()\n gp_parameters=gp_risk_minimizer.train()\n hyperparameter_training_time = gp_parameters[0]\n gp_parameters = gp_parameters[1:]\nelse:\n hyperparameter_training_time = 0\n gp_parameters = np.expand_dims( np.expand_dims( np.loadtxt(\"hyperparams\"),-1 ), -1)\n\nif args.model == \"LV\":\n from odin.utils.trainable_models import TrainableLotkaVolterra\n theta_bounds = np.array([[0.0, 100.0], [0.0, 100.0], [0.0, 100.0], [0.0, 100.0]])\n trainable_model = TrainableLotkaVolterra(system_obs, t_obs, bounds=theta_bounds)\n state_bounds = np.array([[0.0, 10.0], [0.0, 10.0]])\nelif args.model == \"Lorenz63\" :\n from odin.utils.trainable_models import TrainableLorenz63\n theta_bounds = np.array([[0.0, 100.0], [0.0, 100.0], [0.0, 10.0]])# Trainable object\n trainable_model = TrainableLorenz63(system_obs, t_obs, bounds=theta_bounds)\nelif args.model == \"PT\":\n from odin.utils.trainable_models import TrainableProteinTransduction\n theta_bounds = np.array([[1e-8, 10.0], [1e-8, 10.0], [1e-8, 10.0], [1e-8, 10.0],[1e-8, 10.0], [1e-8, 10.0]])\n trainable_model = TrainableProteinTransduction(system_obs, t_obs, bounds=theta_bounds)\n state_bounds = np.array([[0.0, 2.0], [0.0, 2.0], [0.0, 2.0], [0.0, 2.0], [0.0, 2.0]])\nelif args.model == \"QUADRO\":\n from odin.utils.trainable_models import TrainableQuadrocopter\n theta_bounds = np.array([[1e-8, 10.0]]*7)\n trainable_model = TrainableQuadrocopter(system_obs, t_obs, bounds=theta_bounds)\n state_bounds = np.array([[-200, 150], [-200, 150], [-200, 150], [-2, 2], [-2, 2], [-2, 2], [-20, 20], [-20, 20], [-20, 20],\n [-800, 100], [-800, 100], [-800, 100]])\nelif args.model == \"FHN\":\n from odin.utils.trainable_models import TrainableFitzHughNagumo\n theta_bounds = np.array([[0.0, 100.0], [0.0, 100.0], [0.0, 100.0]])# Trainable object\n trainable_model = TrainableFitzHughNagumo(system_obs, t_obs, bounds=theta_bounds)\nelif args.model == \"Glucose\":\n from odin.utils.trainable_models import TrainableGlucose\n theta_bounds = np.array([[-1, 5]]*10)\n trainable_model = TrainableGlucose(system_obs, t_obs, bounds=theta_bounds)\n\nif args.n_features == -1 :\n test_risk_minimizer = ODERiskMinimization(trainable_model, system_obs,\n t_obs, gp_kernel='RBF',\n optimizer='L-BFGS-B',\n initial_gamma=0.3,\n train_gamma=args.train_gamma,\n state_bounds=state_bounds,\n single_gp=False,\n basinhopping=False,\n time_normalization=False,\n state_normalization=args.state_normalization,\n runtime_prof_dir=profiling_dir,\n\t\t\t\t tensorboard_summary_dir=tensorboard_dir)\n test_risk_minimizer.build_model()\n theta, secs = test_risk_minimizer.train(gp_parameters=gp_parameters)\nelse:\n test_approx_risk_minimizer = ODEApproxRiskMinimization(trainable_model, system_obs,\n t_obs, gp_kernel='RBF',\n optimizer='L-BFGS-B',\n initial_gamma=0.3,\n train_gamma=args.train_gamma,\n state_bounds=state_bounds,\n single_gp=False,\n basinhopping=False,\n time_normalization=False,\n state_normalization=args.state_normalization,\n QFF_features = args.n_features,\n Approx_method = args.Risk_min_approx_method, \n runtime_prof_dir=profiling_dir,\n tensorboard_summary_dir=tensorboard_dir)\n test_approx_risk_minimizer.build_model()\n theta, secs = test_approx_risk_minimizer.train(gp_parameters=gp_parameters)\n\nnp.savetxt(\"hyperparameter_training_times.csv\",np.array([hyperparameter_training_time]))\nnp.savetxt(\"optimization_times.csv\",np.array([secs]))\nnp.savetxt(\"thetas.csv\",theta,delimiter=',')\n" ]
[ [ "numpy.array", "numpy.ceil", "numpy.savetxt", "numpy.load", "numpy.min", "numpy.loadtxt" ] ]
SamrahSyed/Visionet_ML_Project_Web_App
[ "a792dcbabf2e112836d87698bc423531c6c18e77" ]
[ "run3.py" ]
[ "from mylib.centroidtracker import CentroidTracker\nfrom mylib.trackableobject import TrackableObject\nfrom imutils.video import VideoStream\nfrom imutils.video import FPS\nfrom mylib.mailer import Mailer\nfrom mylib import config, thread\nimport time, schedule, csv\nimport numpy as np\nimport argparse, imutils\nimport time, dlib, cv2, datetime\nfrom itertools import zip_longest\nimport os\n\n\nt0 = time.time()\n\nprint(os.getcwd())\ndef run(input,output):\n prototxt= \"mobilenet_ssd/MobileNetSSD_deploy.prototxt\"\n model= \"mobilenet_ssd/MobileNetSSD_deploy.caffemodel\"\n default_confidence=0.4\n skip_frames=30\n\n CLASSES = [\"background\", \"aeroplane\", \"bicycle\", \"bird\", \"boat\", \"bottle\", \"bus\", \"car\", \"cat\", \"chair\", \"cow\", \"diningtable\", \"dog\", \"horse\", \"motorbike\", \"person\", \"pottedplant\", \"sheep\", \"sofa\", \"train\", \"tvmonitor\"]\n net = cv2.dnn.readNetFromCaffe(prototxt, model)\n\n # if a video path was not supplied, grab a reference to the ip camer\n if not input:\n print(\"[INFO] Starting the live stream..\")\n vs = VideoStream(config.url).start()\n time.sleep(2.0)\n\n # otherwise, grab a reference to the video file\n else:\n print(\"[INFO] Starting the video..\")\n vs = cv2.VideoCapture(input)\n\n # initialize the video writer (we'll instantiate later if need be)\n writer = None\n# fourcc = cv2.VideoWriter_fourcc(*'MP42') #(*'MP42')\n# out = cv2.VideoWriter('output0.mp4', fourcc, 20.0, (402, 300))\n \n# Below VideoWriter object will create\n# a frame of above defined The output \n# is stored in 'filename.avi' file.\n# result = cv2.VideoWriter('filename1.mp4', \n# cv2.VideoWriter_fourcc(*'MJPG'),\n# 10, size)\n # initialize the frame dimensions (we'll set them as soon as we read\n # the first frame from the video)\n W = None\n H = None\n\n # instantiate our centroid tracker, then initialize a list to store\n # each of our dlib correlation trackers, followed by a dictionary to\n # map each unique object ID to a TrackableObject\n ct = CentroidTracker(maxDisappeared=40, maxDistance=50)\n trackers = []\n trackableObjects = {}\n\n # initialize the total number of frames processed thus far, along\n # with the total number of objects that have moved either up or down\n totalFrames = 0\n totalDown = 0\n totalUp = 0\n x = []\n empty=[]\n empty1=[]\n\n # start the frames per second throughput estimator\n fps = FPS().start()\n\n if config.Thread:\n vs = thread.ThreadingClass(config.url)\n\n # loop over frames from the video stream\n while True:\n # grab the next frame and handle if we are reading from either\n # VideoCapture or VideoStream\n frame = vs.read()\n frame = frame[1] if input else frame\n\n # if we are viewing a video and we did not grab a frame then we\n # have reached the end of the video\n if input is not None and frame is None:\n break\n\n # resize the frame to have a maximum width of 500 pixels (the\n # less data we have, the faster we can process it), then convert\n # the frame from BGR to RGB for dlib\n frame = imutils.resize(frame, width = 500)\n rgb = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)\n\n # if the frame dimensions are empty, set them\n if W is None or H is None:\n (H, W) = frame.shape[:2]\n\n # if we are supposed to be writing a video to disk, initialize\n # the writer\n if output is not None and writer is None:\n fourcc = cv2.VideoWriter_fourcc(*\"MJPG\")\n writer = cv2.VideoWriter(output, fourcc, 30,\n (W, H), True)\n print(output)\n\n # initialize the current status along with our list of bounding\n # box rectangles returned by either (1) our object detector or\n # (2) the correlation trackers\n status = \"Waiting\"\n rects = []\n\n # check to see if we should run a more computationally expensive\n # object detection method to aid our tracker\n if totalFrames % skip_frames == 0:\n # set the status and initialize our new set of object trackers\n status = \"Detecting\"\n trackers = []\n\n # convert the frame to a blob and pass the blob through the\n # network and obtain the detections\n blob = cv2.dnn.blobFromImage(frame, 0.007843, (W, H), 127.5)\n net.setInput(blob)\n detections = net.forward()\n\n # loop over the detections\n for i in np.arange(0, detections.shape[2]):\n # extract the confidence (i.e., probability) associated\n # with the prediction\n confidence = detections[0, 0, i, 2]\n\n # filter out weak detections by requiring a minimum\n # confidence\n if confidence > default_confidence:\n # extract the index of the class label from the\n # detections list\n idx = int(detections[0, 0, i, 1])\n\n # if the class label is not a person, ignore it\n if CLASSES[idx] != \"person\":\n continue\n\n # compute the (x, y)-coordinates of the bounding box\n # for the object\n box = detections[0, 0, i, 3:7] * np.array([W, H, W, H])\n (startX, startY, endX, endY) = box.astype(\"int\")\n\n\n # construct a dlib rectangle object from the bounding\n # box coordinates and then start the dlib correlation\n # tracker\n tracker = dlib.correlation_tracker()\n rect = dlib.rectangle(startX, startY, endX, endY)\n tracker.start_track(rgb, rect)\n\n # add the tracker to our list of trackers so we can\n # utilize it during skip frames\n trackers.append(tracker)\n\n # otherwise, we should utilize our object *trackers* rather than\n # object *detectors* to obtain a higher frame processing throughput\n else:\n # loop over the trackers\n for tracker in trackers:\n # set the status of our system to be 'tracking' rather\n # than 'waiting' or 'detecting'\n status = \"Tracking\"\n\n # update the tracker and grab the updated position\n tracker.update(rgb)\n pos = tracker.get_position()\n\n # unpack the position object\n startX = int(pos.left())\n startY = int(pos.top())\n endX = int(pos.right())\n endY = int(pos.bottom())\n\n # add the bounding box coordinates to the rectangles list\n rects.append((startX, startY, endX, endY))\n\n # draw a horizontal line in the center of the frame -- once an\n # object crosses this line we will determine whether they were\n # moving 'up' or 'down'\n cv2.line(frame, (0, H // 2), (W, H // 2), (0, 0, 0), 3)\n cv2.putText(frame, \"-Prediction border - Entrance-\", (10, H - ((i * 20) + 200)),\n cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 0, 0), 1)\n\n # use the centroid tracker to associate the (1) old object\n # centroids with (2) the newly computed object centroids\n objects = ct.update(rects)\n\n # loop over the tracked objects\n for (objectID, centroid) in objects.items():\n # check to see if a trackable object exists for the current\n # object ID\n to = trackableObjects.get(objectID, None)\n\n # if there is no existing trackable object, create one\n if to is None:\n to = TrackableObject(objectID, centroid)\n\n # otherwise, there is a trackable object so we can utilize it\n # to determine direction\n else:\n # the difference between the y-coordinate of the *current*\n # centroid and the mean of *previous* centroids will tell\n # us in which direction the object is moving (negative for\n # 'up' and positive for 'down')\n y = [c[1] for c in to.centroids]\n direction = centroid[1] - np.mean(y)\n to.centroids.append(centroid)\n\n # check to see if the object has been counted or not\n if not to.counted:\n # if the direction is negative (indicating the object\n # is moving up) AND the centroid is above the center\n # line, count the object\n if direction < 0 and centroid[1] < H // 2:\n totalUp += 1\n empty.append(totalUp)\n to.counted = True\n\n # if the direction is positive (indicating the object\n # is moving down) AND the centroid is below the\n # center line, count the object\n elif direction > 0 and centroid[1] > H // 2:\n totalDown += 1\n empty1.append(totalDown)\n #print(empty1[-1])\n # if the people limit exceeds over threshold, send an email alert\n if sum(x) >= config.Threshold:\n cv2.putText(frame, \"-ALERT: People limit exceeded-\", (10, frame.shape[0] - 80),\n cv2.FONT_HERSHEY_COMPLEX, 0.5, (0, 0, 255), 2)\n if config.ALERT:\n print(\"[INFO] Sending email alert..\")\n Mailer().send(config.MAIL)\n print(\"[INFO] Alert sent\")\n\n to.counted = True\n \n x = []\n # compute the sum of total people inside\n x.append(len(empty1)-len(empty))\n #print(\"Total people inside:\", x)\n\n\n # store the trackable object in our dictionary\n trackableObjects[objectID] = to\n\n # draw both the ID of the object and the centroid of the\n # object on the output frame\n text = \"ID {}\".format(objectID)\n cv2.putText(frame, text, (centroid[0] - 10, centroid[1] - 10),\n cv2.FONT_HERSHEY_SIMPLEX, 0.5, (255, 255, 255), 2)\n cv2.circle(frame, (centroid[0], centroid[1]), 4, (255, 255, 255), -1)\n\n # construct a tuple of information we will be displaying on the\n info = [\n (\"Exit\", totalUp),\n (\"Enter\", totalDown),\n (\"Status\", status),\n ]\n\n info2 = [\n (\"Total people inside\", x),\n ]\n\n # Display the output\n for (i, (k, v)) in enumerate(info):\n text = \"{}: {}\".format(k, v)\n cv2.putText(frame, text, (10, H - ((i * 20) + 20)), cv2.FONT_HERSHEY_SIMPLEX, 0.6, (0, 0, 0), 2)\n\n for (i, (k, v)) in enumerate(info2):\n text = \"{}: {}\".format(k, v)\n cv2.putText(frame, text, (265, H - ((i * 20) + 60)), cv2.FONT_HERSHEY_SIMPLEX, 0.6, (255, 255, 255), 2)\n\n # Initiate a simple log to save data at end of the day\n if config.Log:\n datetimee = [datetime.datetime.now()]\n d = [datetimee, empty1, empty, x]\n export_data = zip_longest(*d, fillvalue = '')\n\n with open('Log.csv', 'w', newline='') as myfile:\n wr = csv.writer(myfile, quoting=csv.QUOTE_ALL)\n wr.writerow((\"End Time\", \"In\", \"Out\", \"Total Inside\"))\n wr.writerows(export_data)\n\n\n # show the output frame\n# frame = cv2.resize(frame, (640, 480))\n# out.write(frame)\n cv2.imshow(\"Real-Time Monitoring/Analysis Window\", frame)\n# writer.write(output)\n # frame = cv2.resize(frame, (402, 300))\n writer.write(frame)\n key = cv2.waitKey(1) & 0xFF\n\n# frame_width = int(vs.get(3))\n# frame_height = int(vs.get(4))\n# \n# size = (frame_width, frame_height)\n \n # Below VideoWriter object will create\n # a frame of above defined The output \n # is stored in 'filename.avi' file.\n# result = cv2.VideoWriter(\n# os.path.join('static/uploads','filename.mp4'), \n# cv2.VideoWriter_fourcc(*'MJPG'),\n# 10, size)\n# out = cv2.VideoWriter(\n# os.path.join('static/uploads', 'depth72.mp4'), fourcc, 20.0, (width, height))\n # if the `q` key was pressed, break from the loop\n if key == ord(\"q\"):\n break\n\n # increment the total number of frames processed thus far and\n # then update the FPS counter\n totalFrames += 1\n fps.update()\n\n if config.Timer:\n # Automatic timer to stop the live stream. Set to 8 hours (28800s).\n t1 = time.time()\n num_seconds=(t1-t0)\n if num_seconds > 28800:\n break\n\n # stop the timer and display FPS information\n fps.stop()\n print(\"[INFO] elapsed time: {:.2f}\".format(fps.elapsed()))\n print(\"[INFO] approx. FPS: {:.2f}\".format(fps.fps()))\n\n# return result\n # # if we are not using a video file, stop the camera video stream\n # if not args.get(\"input\", False):\n # vs.stop()\n #\n # # otherwise, release the video file pointer\n # else:\n # vs.release()\n \n # issue 15\n\n# if config.Thread:\n# vs.release()\n#\n# # close any open windows\n# cv2.destroyAllWindows()\n#\n#\n###learn more about different schedules here: https://pypi.org/project/schedule/\n#if config.Scheduler:\n# ##Runs for every 1 second\n# #schedule.every(1).seconds.do(run)\n# ##Runs at every day (9:00 am). You can change it.\n# schedule.every().day.at(\"9:00\").do(run)\n#\n# while 1:\n# schedule.run_pending()\n#\n#else:\n#run(\"videos/example_01.mp4\", \"./static/demo12_output.mp4\")\n#run(\"videos/example_01.mp4\", \"videos/example_01.mp4\")" ]
[ [ "numpy.array", "numpy.arange", "numpy.mean" ] ]
JaroslavHron/PyOP2
[ "3c88f9e7ef572387d8a78ee863b79349bc918272" ]
[ "test/unit/test_matrices.py" ]
[ "# This file is part of PyOP2\n#\n# PyOP2 is Copyright (c) 2012, Imperial College London and\n# others. Please see the AUTHORS file in the main source directory for\n# a full list of copyright holders. All rights reserved.\n#\n# Redistribution and use in source and binary forms, with or without\n# modification, are permitted provided that the following conditions\n# are met:\n#\n# * Redistributions of source code must retain the above copyright\n# notice, this list of conditions and the following disclaimer.\n# * Redistributions in binary form must reproduce the above copyright\n# notice, this list of conditions and the following disclaimer in the\n# documentation and/or other materials provided with the distribution.\n# * The name of Imperial College London or that of other\n# contributors may not be used to endorse or promote products\n# derived from this software without specific prior written\n# permission.\n#\n# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTERS\n# ''AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT\n# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS\n# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE\n# COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT,\n# INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES\n# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR\n# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)\n# HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,\n# STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)\n# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED\n# OF THE POSSIBILITY OF SUCH DAMAGE.\n\n\nimport pytest\nimport numpy as np\nfrom numpy.testing import assert_allclose\n\nfrom pyop2 import op2\nfrom pyop2.exceptions import MapValueError, ModeValueError\n\nfrom coffee.base import *\n\nfrom petsc4py.PETSc import ScalarType\n\n# Data type\nvaluetype = ScalarType\n\n# Constants\nNUM_ELE = 2\nNUM_NODES = 4\nNUM_DIMS = 2\nlayers = 11\n\nelem_node_map = np.asarray([0, 1, 3, 2, 3, 1], dtype=np.uint32)\n\nxtr_elem_node_map = np.asarray([0, 1, 11, 12, 33, 34, 22, 23, 33, 34, 11, 12], dtype=np.uint32)\n\n\[email protected](scope='module')\ndef nodes():\n return op2.Set(NUM_NODES, \"nodes\")\n\n\[email protected](scope='module')\ndef elements():\n return op2.Set(NUM_ELE, \"elements\")\n\n\[email protected](scope='module')\ndef dnodes(nodes):\n return op2.DataSet(nodes, 1, \"dnodes\")\n\n\[email protected](scope='module')\ndef dvnodes(nodes):\n return op2.DataSet(nodes, 2, \"dvnodes\")\n\n\[email protected](scope='module')\ndef delements(elements):\n return op2.DataSet(elements, 1, \"delements\")\n\n\[email protected](scope='module')\ndef elem_node(elements, nodes):\n return op2.Map(elements, nodes, 3, elem_node_map, \"elem_node\")\n\n\[email protected](scope='module')\ndef mat(elem_node, dnodes):\n sparsity = op2.Sparsity((dnodes, dnodes), (elem_node, elem_node), name=\"sparsity\")\n return op2.Mat(sparsity, valuetype, \"mat\")\n\n\[email protected]\ndef coords(dvnodes):\n coord_vals = np.asarray([(0.0, 0.0), (2.0, 0.0),\n (1.0, 1.0), (0.0, 1.5)],\n dtype=valuetype)\n return op2.Dat(dvnodes, coord_vals, valuetype, \"coords\")\n\n\[email protected](scope='module')\ndef g(request):\n return op2.Global(1, 1.0, np.float64, \"g\")\n\n\[email protected]\ndef f(dnodes):\n f_vals = np.asarray([1.0, 2.0, 3.0, 4.0], dtype=valuetype)\n return op2.Dat(dnodes, f_vals, valuetype, \"f\")\n\n\[email protected]\ndef f_vec(dvnodes):\n f_vals = np.asarray([(1.0, 2.0)] * 4, dtype=valuetype)\n return op2.Dat(dvnodes, f_vals, valuetype, \"f\")\n\n\[email protected](scope='module')\ndef b(dnodes):\n b_vals = np.zeros(NUM_NODES, dtype=valuetype)\n return op2.Dat(dnodes, b_vals, valuetype, \"b\")\n\n\[email protected](scope='module')\ndef b_vec(dvnodes):\n b_vals = np.zeros(NUM_NODES * 2, dtype=valuetype)\n return op2.Dat(dvnodes, b_vals, valuetype, \"b\")\n\n\[email protected]\ndef x(dnodes):\n x_vals = np.zeros(NUM_NODES, dtype=valuetype)\n return op2.Dat(dnodes, x_vals, valuetype, \"x\")\n\n\[email protected]\ndef x_vec(dvnodes):\n x_vals = np.zeros(NUM_NODES * 2, dtype=valuetype)\n return op2.Dat(dvnodes, x_vals, valuetype, \"x\")\n\n\[email protected]\ndef mass():\n init = FlatBlock(\"\"\"\n double CG1[3][6] = { { 0.09157621, 0.09157621, 0.81684757,\n 0.44594849, 0.44594849, 0.10810302 },\n { 0.09157621, 0.81684757, 0.09157621,\n 0.44594849, 0.10810302, 0.44594849 },\n { 0.81684757, 0.09157621, 0.09157621,\n 0.10810302, 0.44594849, 0.44594849 } };\n double d_CG1[3][6][2] = { { { 1., 0. },\n { 1., 0. },\n { 1., 0. },\n { 1., 0. },\n { 1., 0. },\n { 1., 0. } },\n { { 0., 1. },\n { 0., 1. },\n { 0., 1. },\n { 0., 1. },\n { 0., 1. },\n { 0., 1. } },\n { { -1.,-1. },\n { -1.,-1. },\n { -1.,-1. },\n { -1.,-1. },\n { -1.,-1. },\n { -1.,-1. } } };\n double w[6] = { 0.05497587, 0.05497587, 0.05497587, 0.11169079,\n 0.11169079, 0.11169079 };\n double c_q0[6][2][2];\n for(int i_g = 0; i_g < 6; i_g++)\n {\n for(int i_d_0 = 0; i_d_0 < 2; i_d_0++)\n {\n for(int i_d_1 = 0; i_d_1 < 2; i_d_1++)\n {\n c_q0[i_g][i_d_0][i_d_1] = 0.0;\n for(int q_r_0 = 0; q_r_0 < 3; q_r_0++)\n {\n c_q0[i_g][i_d_0][i_d_1] += c0[q_r_0][i_d_0] * d_CG1[q_r_0][i_g][i_d_1];\n };\n };\n };\n };\n for(int i_g = 0; i_g < 6; i_g++)\n\"\"\")\n assembly = Incr(Symbol(\"localTensor\", (\"i_r_0\", \"i_r_1\")),\n FlatBlock(\"ST0 * w[i_g]\"))\n assembly = Block([FlatBlock(\"double ST0 = 0.0;\\nST0 += CG1[i_r_0][i_g] * CG1[i_r_1][i_g] * (c_q0[i_g][0][0] * \\\n c_q0[i_g][1][1] + -1 * c_q0[i_g][0][1] * c_q0[i_g][1][0]);\\n\"), assembly], open_scope=True)\n assembly = c_for(\"i_r_0\", 3, c_for(\"i_r_1\", 3, assembly))\n\n kernel_code = FunDecl(\"void\", \"mass\",\n [Decl(\"double\", Symbol(\"localTensor\", (3, 3))),\n Decl(\"double\", Symbol(\"c0\", (3, 2)))],\n Block([init, assembly], open_scope=False),\n pred=[\"static\"])\n\n return op2.Kernel(kernel_code.gencode(), \"mass\")\n\n\[email protected]\ndef rhs():\n kernel_code = FlatBlock(\"\"\"\nstatic void rhs(double* localTensor, double c0[3][2], double* c1)\n{\n double CG1[3][6] = { { 0.09157621, 0.09157621, 0.81684757,\n 0.44594849, 0.44594849, 0.10810302 },\n { 0.09157621, 0.81684757, 0.09157621,\n 0.44594849, 0.10810302, 0.44594849 },\n { 0.81684757, 0.09157621, 0.09157621,\n 0.10810302, 0.44594849, 0.44594849 } };\n double d_CG1[3][6][2] = { { { 1., 0. },\n { 1., 0. },\n { 1., 0. },\n { 1., 0. },\n { 1., 0. },\n { 1., 0. } },\n { { 0., 1. },\n { 0., 1. },\n { 0., 1. },\n { 0., 1. },\n { 0., 1. },\n { 0., 1. } },\n { { -1.,-1. },\n { -1.,-1. },\n { -1.,-1. },\n { -1.,-1. },\n { -1.,-1. },\n { -1.,-1. } } };\n double w[6] = { 0.05497587, 0.05497587, 0.05497587, 0.11169079,\n 0.11169079, 0.11169079 };\n double c_q1[6];\n double c_q0[6][2][2];\n for(int i_g = 0; i_g < 6; i_g++)\n {\n c_q1[i_g] = 0.0;\n for(int q_r_0 = 0; q_r_0 < 3; q_r_0++)\n {\n c_q1[i_g] += c1[q_r_0] * CG1[q_r_0][i_g];\n };\n for(int i_d_0 = 0; i_d_0 < 2; i_d_0++)\n {\n for(int i_d_1 = 0; i_d_1 < 2; i_d_1++)\n {\n c_q0[i_g][i_d_0][i_d_1] = 0.0;\n for(int q_r_0 = 0; q_r_0 < 3; q_r_0++)\n {\n c_q0[i_g][i_d_0][i_d_1] += c0[q_r_0][i_d_0] * d_CG1[q_r_0][i_g][i_d_1];\n };\n };\n };\n };\n for(int i_r_0 = 0; i_r_0 < 3; i_r_0++)\n {\n for(int i_g = 0; i_g < 6; i_g++)\n {\n double ST1 = 0.0;\n ST1 += CG1[i_r_0][i_g] * c_q1[i_g] * (c_q0[i_g][0][0] * c_q0[i_g][1][1] + -1 * c_q0[i_g][0][1] * c_q0[i_g][1][0]);\n localTensor[i_r_0] += ST1 * w[i_g];\n };\n };\n}\"\"\")\n return op2.Kernel(kernel_code.gencode(), \"rhs\")\n\n\[email protected]\ndef mass_ffc():\n init = FlatBlock(\"\"\"\ndouble J_00 = x[1][0] - x[0][0];\ndouble J_01 = x[2][0] - x[0][0];\ndouble J_10 = x[1][1] - x[0][1];\ndouble J_11 = x[2][1] - x[0][1];\n\ndouble detJ = J_00*J_11 - J_01*J_10;\ndouble det = fabs(detJ);\n\ndouble W3[3] = {0.166666666666667, 0.166666666666667, 0.166666666666667};\ndouble FE0[3][3] = \\\n{{0.666666666666667, 0.166666666666667, 0.166666666666667},\n{0.166666666666667, 0.166666666666667, 0.666666666666667},\n{0.166666666666667, 0.666666666666667, 0.166666666666667}};\n\nfor (unsigned int ip = 0; ip < 3; ip++)\n\"\"\")\n assembly = Incr(Symbol(\"A\", (\"j\", \"k\")),\n FlatBlock(\"FE0[ip][j]*FE0[ip][k]*W3[ip]*det\"))\n assembly = c_for(\"j\", 3, c_for(\"k\", 3, assembly))\n\n kernel_code = FunDecl(\"void\", \"mass_ffc\",\n [Decl(\"double\", Symbol(\"A\", (3, 3))),\n Decl(\"double\", Symbol(\"x\", (3, 2)))],\n Block([init, assembly], open_scope=False),\n pred=[\"static\"])\n\n return op2.Kernel(kernel_code.gencode(), \"mass_ffc\")\n\n\[email protected]\ndef rhs_ffc():\n kernel_code = FlatBlock(\"\"\"\nstatic void rhs_ffc(double *A, double x[3][2], double *w0)\n{\n double J_00 = x[1][0] - x[0][0];\n double J_01 = x[2][0] - x[0][0];\n double J_10 = x[1][1] - x[0][1];\n double J_11 = x[2][1] - x[0][1];\n\n double detJ = J_00*J_11 - J_01*J_10;\n\n double det = fabs(detJ);\n\n double W3[3] = {0.166666666666667, 0.166666666666667, 0.166666666666667};\n double FE0[3][3] = \\\n {{0.666666666666667, 0.166666666666667, 0.166666666666667},\n {0.166666666666667, 0.166666666666667, 0.666666666666667},\n {0.166666666666667, 0.666666666666667, 0.166666666666667}};\n\n for (unsigned int ip = 0; ip < 3; ip++)\n {\n double F0 = 0.0;\n\n for (unsigned int r = 0; r < 3; r++)\n {\n F0 += FE0[ip][r]*w0[r];\n }\n\n for (unsigned int j = 0; j < 3; j++)\n {\n A[j] += FE0[ip][j]*F0*W3[ip]*det;\n }\n }\n}\n\"\"\")\n return op2.Kernel(kernel_code.gencode(), \"rhs_ffc\")\n\n\[email protected]\ndef rhs_ffc_itspace():\n init = FlatBlock(\"\"\"\ndouble J_00 = x[1][0] - x[0][0];\ndouble J_01 = x[2][0] - x[0][0];\ndouble J_10 = x[1][1] - x[0][1];\ndouble J_11 = x[2][1] - x[0][1];\n\ndouble detJ = J_00*J_11 - J_01*J_10;\ndouble det = fabs(detJ);\n\ndouble W3[3] = {0.166666666666667, 0.166666666666667, 0.166666666666667};\ndouble FE0[3][3] = \\\n{{0.666666666666667, 0.166666666666667, 0.166666666666667},\n{0.166666666666667, 0.166666666666667, 0.666666666666667},\n{0.166666666666667, 0.666666666666667, 0.166666666666667}};\n\nfor (unsigned int ip = 0; ip < 3; ip++)\n{\n double F0 = 0.0;\n\n for (unsigned int r = 0; r < 3; r++)\n {\n F0 += FE0[ip][r]*w0[r];\n }\n\n\"\"\")\n assembly = Incr(Symbol(\"A\", (\"j\",)), FlatBlock(\"FE0[ip][j]*F0*W3[ip]*det\"))\n assembly = c_for(\"j\", 3, assembly)\n end = FlatBlock(\"}\")\n\n kernel_code = FunDecl(\"void\", \"rhs_ffc_itspace\",\n [Decl(\"double\", Symbol(\"A\", (3,))),\n Decl(\"double\", Symbol(\"x\", (3, 2))),\n Decl(\"double*\", Symbol(\"w0\"))],\n Block([init, assembly, end], open_scope=False),\n pred=[\"static\"])\n\n return op2.Kernel(kernel_code.gencode(), \"rhs_ffc_itspace\")\n\n\[email protected]\ndef zero_dat():\n kernel_code = \"\"\"\nstatic void zero_dat(double *dat)\n{\n *dat = 0.0;\n}\n\"\"\"\n return op2.Kernel(kernel_code, \"zero_dat\")\n\n\[email protected]\ndef zero_vec_dat():\n kernel_code = \"\"\"\nstatic void zero_vec_dat(double *dat)\n{\n dat[0] = 0.0; dat[1] = 0.0;\n}\n\"\"\"\n return op2.Kernel(kernel_code, \"zero_vec_dat\")\n\n\[email protected]\ndef kernel_inc():\n code = c_for(\"i\", 3,\n c_for(\"j\", 3,\n Incr(Symbol(\"entry\", (\"i\", \"j\")), c_sym(\"*g\"))))\n\n kernel_code = FunDecl(\"void\", \"inc\",\n [Decl(\"double\", Symbol(\"entry\", (3, 3))),\n Decl(\"double*\", c_sym(\"g\"))],\n Block([code], open_scope=False),\n pred=[\"static\"])\n\n return op2.Kernel(kernel_code.gencode(), \"inc\")\n\n\[email protected]\ndef kernel_set():\n code = c_for(\"i\", 3,\n c_for(\"j\", 3,\n Assign(Symbol(\"entry\", (\"i\", \"j\")), c_sym(\"*g\"))))\n\n kernel_code = FunDecl(\"void\", \"set\",\n [Decl(\"double\", Symbol(\"entry\", (3, 3))),\n Decl(\"double*\", c_sym(\"g\"))],\n Block([code], open_scope=False),\n pred=[\"static\"])\n\n return op2.Kernel(kernel_code.gencode(), \"set\")\n\n\[email protected]\ndef kernel_inc_vec():\n kernel_code = \"\"\"\nstatic void inc_vec(double entry[2][2], double* g, int i, int j)\n{\n entry[0][0] += *g;\n entry[0][1] += *g;\n entry[1][0] += *g;\n entry[1][1] += *g;\n}\n\"\"\"\n return op2.Kernel(kernel_code, \"inc_vec\")\n\n\[email protected]\ndef kernel_set_vec():\n kernel_code = \"\"\"\nstatic void set_vec(double entry[2][2], double* g, int i, int j)\n{\n entry[0][0] = *g;\n entry[0][1] = *g;\n entry[1][0] = *g;\n entry[1][1] = *g;\n}\n\"\"\"\n return op2.Kernel(kernel_code, \"set_vec\")\n\n\[email protected]\ndef expected_matrix():\n expected_vals = [(0.25, 0.125, 0.0, 0.125),\n (0.125, 0.291667, 0.0208333, 0.145833),\n (0.0, 0.0208333, 0.0416667, 0.0208333),\n (0.125, 0.145833, 0.0208333, 0.291667)]\n return np.asarray(expected_vals, dtype=valuetype)\n\n\[email protected]\ndef expected_vector_matrix():\n expected_vals = [(0.25, 0., 0.125, 0., 0., 0., 0.125, 0.),\n (0., 0.25, 0., 0.125, 0., 0., 0., 0.125),\n (0.125, 0., 0.29166667, 0.,\n 0.02083333, 0., 0.14583333, 0.),\n (0., 0.125, 0., 0.29166667, 0.,\n 0.02083333, 0., 0.14583333),\n (0., 0., 0.02083333, 0.,\n 0.04166667, 0., 0.02083333, 0.),\n (0., 0., 0., 0.02083333, 0.,\n 0.04166667, 0., 0.02083333),\n (0.125, 0., 0.14583333, 0.,\n 0.02083333, 0., 0.29166667, 0.),\n (0., 0.125, 0., 0.14583333, 0., 0.02083333, 0., 0.29166667)]\n return np.asarray(expected_vals, dtype=valuetype)\n\n\[email protected]\ndef expected_rhs():\n return np.asarray([0.9999999523522115, 1.3541666031724144,\n 0.2499999883507239, 1.6458332580869566],\n dtype=valuetype)\n\n\[email protected]\ndef expected_vec_rhs():\n return np.asarray([[0.5, 1.0], [0.58333333, 1.16666667],\n [0.08333333, 0.16666667], [0.58333333, 1.16666667]],\n dtype=valuetype)\n\n\[email protected]\ndef mset():\n return op2.MixedSet((op2.Set(3), op2.Set(4)))\n\n\nrdata = lambda s: np.arange(1, s + 1, dtype=np.float64)\n\n\[email protected]\ndef mdat(mset):\n return op2.MixedDat(op2.Dat(s, rdata(s.size)) for s in mset)\n\n\[email protected]\ndef mvdat(mset):\n return op2.MixedDat(op2.Dat(s ** 2, list(zip(rdata(s.size), rdata(s.size)))) for s in mset)\n\n\[email protected]\ndef mmap(mset):\n elem, node = mset\n return op2.MixedMap((op2.Map(elem, elem, 1, [0, 1, 2]),\n op2.Map(elem, node, 2, [0, 1, 1, 2, 2, 3])))\n\n\[email protected]\ndef msparsity(mset, mmap):\n return op2.Sparsity(mset, mmap)\n\n\[email protected]\ndef non_nest_mixed_sparsity(mset, mmap):\n return op2.Sparsity(mset, mmap, nest=False)\n\n\[email protected]\ndef mvsparsity(mset, mmap):\n return op2.Sparsity(mset ** 2, mmap)\n\n\nclass TestSparsity:\n\n \"\"\"\n Sparsity tests\n \"\"\"\n\n def test_sparsity_null_maps(self):\n \"\"\"Building sparsity from a pair of non-initialized maps should fail.\"\"\"\n s = op2.Set(5)\n with pytest.raises(MapValueError):\n m = op2.Map(s, s, 1)\n op2.Sparsity((s, s), (m, m))\n\n def test_sparsity_has_diagonal_space(self):\n # A sparsity should have space for diagonal entries if rmap==cmap\n s = op2.Set(1)\n d = op2.Set(4)\n m = op2.Map(s, d, 2, [1, 3])\n d2 = op2.Set(4)\n m2 = op2.Map(s, d2, 3, [1, 2, 3])\n sparsity = op2.Sparsity((d, d), (m, m))\n sparsity2 = op2.Sparsity((d, d2), (m, m2))\n\n assert all(sparsity.nnz == [1, 2, 1, 2])\n assert all(sparsity2.nnz == [0, 3, 0, 3])\n\n\nclass TestMatrices:\n\n \"\"\"\n Matrix tests\n \"\"\"\n\n @pytest.mark.parametrize(\"mode\", [op2.READ, op2.RW, op2.MAX, op2.MIN])\n def test_invalid_mode(self, elements, elem_node, mat, mode):\n \"\"\"Mat args can only have modes WRITE and INC.\"\"\"\n with pytest.raises(ModeValueError):\n op2.par_loop(op2.Kernel(\"\", \"dummy\"), elements,\n mat(mode, (elem_node, elem_node)))\n\n @pytest.mark.parametrize('n', [1, 2])\n def test_mat_set_diagonal(self, nodes, elem_node, n):\n \"Set the diagonal of the entire matrix to 1.0\"\n mat = op2.Mat(op2.Sparsity(nodes**n, elem_node), valuetype)\n nrows = mat.sparsity.nrows\n mat.set_local_diagonal_entries(list(range(nrows)))\n mat.assemble()\n assert (mat.values == np.identity(nrows * n)).all()\n\n @pytest.mark.parametrize('n', [1, 2])\n def test_mat_repeated_set_diagonal(self, nodes, elem_node, n):\n \"Set the diagonal of the entire matrix to 1.0\"\n mat = op2.Mat(op2.Sparsity(nodes**n, elem_node), valuetype)\n nrows = mat.sparsity.nrows\n mat.set_local_diagonal_entries(list(range(nrows)))\n mat.assemble()\n assert (mat.values == np.identity(nrows * n)).all()\n mat.set_local_diagonal_entries(list(range(nrows)))\n mat.assemble()\n assert (mat.values == np.identity(nrows * n)).all()\n\n def test_mat_always_has_diagonal_space(self):\n # A sparsity should always have space for diagonal entries\n s = op2.Set(1)\n d = op2.Set(4)\n m = op2.Map(s, d, 1, [2])\n d2 = op2.Set(3)\n m2 = op2.Map(s, d2, 1, [1])\n sparsity = op2.Sparsity((d, d2), (m, m2))\n\n from petsc4py import PETSc\n # petsc4py default error handler swallows SETERRQ, so just\n # install the abort handler to notice an error.\n PETSc.Sys.pushErrorHandler(\"abort\")\n mat = op2.Mat(sparsity)\n PETSc.Sys.popErrorHandler()\n\n assert np.allclose(mat.handle.getDiagonal().array, 0.0)\n\n def test_minimal_zero_mat(self):\n \"\"\"Assemble a matrix that is all zeros.\"\"\"\n\n code = c_for(\"i\", 1,\n c_for(\"j\", 1,\n Assign(Symbol(\"local_mat\", (\"i\", \"j\")), c_sym(\"0.0\"))))\n zero_mat_code = FunDecl(\"void\", \"zero_mat\",\n [Decl(\"double\", Symbol(\"local_mat\", (1, 1)))],\n Block([code], open_scope=False))\n\n nelems = 128\n set = op2.Set(nelems)\n map = op2.Map(set, set, 1, np.array(list(range(nelems)), np.uint32))\n sparsity = op2.Sparsity((set, set), (map, map))\n mat = op2.Mat(sparsity, np.float64)\n kernel = op2.Kernel(zero_mat_code.gencode(), \"zero_mat\")\n op2.par_loop(kernel, set,\n mat(op2.WRITE, (map, map)))\n\n mat.assemble()\n expected_matrix = np.zeros((nelems, nelems), dtype=np.float64)\n eps = 1.e-12\n assert_allclose(mat.values, expected_matrix, eps)\n\n def test_assemble_mat(self, mass, mat, coords, elements,\n elem_node, expected_matrix):\n \"\"\"Assemble a simple finite-element matrix and check the result.\"\"\"\n mat.zero()\n op2.par_loop(mass, elements,\n mat(op2.INC, (elem_node, elem_node)),\n coords(op2.READ, elem_node))\n mat.assemble()\n eps = 1.e-5\n assert_allclose(mat.values, expected_matrix, eps)\n\n def test_assemble_rhs(self, rhs, elements, b, coords, f,\n elem_node, expected_rhs):\n \"\"\"Assemble a simple finite-element right-hand side and check result.\"\"\"\n b.zero()\n op2.par_loop(rhs, elements,\n b(op2.INC, elem_node),\n coords(op2.READ, elem_node),\n f(op2.READ, elem_node))\n\n eps = 1.e-12\n assert_allclose(b.data, expected_rhs, eps)\n\n def test_solve(self, mat, b, x, f):\n \"\"\"Solve a linear system where the solution is equal to the right-hand\n side and check the result.\"\"\"\n mat.assemble()\n x = np.linalg.solve(mat.values, b.data)\n eps = 1.e-8\n assert_allclose(x, f.data, eps)\n\n def test_zero_matrix(self, mat):\n \"\"\"Test that the matrix is zeroed correctly.\"\"\"\n mat.zero()\n expected_matrix = np.zeros((4, 4), dtype=valuetype)\n eps = 1.e-14\n assert_allclose(mat.values, expected_matrix, eps)\n\n def test_set_matrix(self, mat, elements, elem_node,\n kernel_inc, kernel_set, g):\n \"\"\"Test accessing a scalar matrix with the WRITE access by adding some\n non-zero values into the matrix, then setting them back to zero with a\n kernel using op2.WRITE\"\"\"\n mat.zero()\n op2.par_loop(kernel_inc, elements,\n mat(op2.INC, (elem_node, elem_node)),\n g(op2.READ))\n mat.assemble()\n # Check we have ones in the matrix\n assert mat.values.sum() == 3 * 3 * elements.size\n op2.par_loop(kernel_set, elements,\n mat(op2.WRITE, (elem_node, elem_node)),\n g(op2.READ))\n mat.assemble()\n assert mat.values.sum() == (3 * 3 - 2) * elements.size\n mat.zero()\n\n def test_zero_rhs(self, b, zero_dat, nodes):\n \"\"\"Test that the RHS is zeroed correctly.\"\"\"\n op2.par_loop(zero_dat, nodes,\n b(op2.WRITE))\n assert all(b.data == np.zeros_like(b.data))\n\n def test_assemble_ffc(self, mass_ffc, mat, coords, elements,\n elem_node, expected_matrix):\n \"\"\"Test that the FFC mass assembly assembles the correct values.\"\"\"\n op2.par_loop(mass_ffc, elements,\n mat(op2.INC, (elem_node, elem_node)),\n coords(op2.READ, elem_node))\n mat.assemble()\n eps = 1.e-5\n assert_allclose(mat.values, expected_matrix, eps)\n\n def test_rhs_ffc(self, rhs_ffc, elements, b, coords, f,\n elem_node, expected_rhs):\n \"\"\"Test that the FFC rhs assembly assembles the correct values.\"\"\"\n op2.par_loop(rhs_ffc, elements,\n b(op2.INC, elem_node),\n coords(op2.READ, elem_node),\n f(op2.READ, elem_node))\n\n eps = 1.e-6\n assert_allclose(b.data, expected_rhs, eps)\n\n def test_rhs_ffc_itspace(self, rhs_ffc_itspace, elements, b,\n coords, f, elem_node, expected_rhs,\n zero_dat, nodes):\n \"\"\"Test that the FFC right-hand side assembly using iteration spaces\n assembles the correct values.\"\"\"\n # Zero the RHS first\n op2.par_loop(zero_dat, nodes,\n b(op2.WRITE))\n op2.par_loop(rhs_ffc_itspace, elements,\n b(op2.INC, elem_node),\n coords(op2.READ, elem_node),\n f(op2.READ, elem_node))\n eps = 1.e-6\n assert_allclose(b.data, expected_rhs, eps)\n\n def test_zero_rows(self, mat, expected_matrix):\n \"\"\"Zeroing a row in the matrix should set the diagonal to the given\n value and all other values to 0.\"\"\"\n expected_matrix[0] = [12.0, 0.0, 0.0, 0.0]\n mat.zero_rows([0], 12.0)\n eps = 1.e-5\n assert_allclose(mat.values, expected_matrix, eps)\n\n def test_zero_rows_subset(self, nodes, mat, expected_matrix):\n \"\"\"Zeroing rows in the matrix given by a :class:`op2.Subset` should\n set the diagonal to the given value and all other values to 0.\"\"\"\n expected_matrix[0] = [12.0, 0.0, 0.0, 0.0]\n ss = op2.Subset(nodes, [0])\n mat.zero_rows(ss, 12.0)\n assert_allclose(mat.values, expected_matrix, 1e-5)\n\n def test_zero_last_row(self, mat, expected_matrix):\n \"\"\"Zeroing a row in the matrix should set the diagonal to the given\n value and all other values to 0.\"\"\"\n which = NUM_NODES - 1\n # because the previous test zeroed the first row\n expected_matrix[0] = [12.0, 0.0, 0.0, 0.0]\n expected_matrix[which] = [0.0, 0.0, 0.0, 4.0]\n mat.zero_rows([which], 4.0)\n eps = 1.e-5\n assert_allclose(mat.values, expected_matrix, eps)\n\n def test_mat_nbytes(self, mat):\n \"\"\"Check that the matrix uses the amount of memory we expect.\"\"\"\n assert mat.nbytes == 14 * 8\n\n\nclass TestMatrixStateChanges:\n\n \"\"\"\n Test that matrix state changes are correctly tracked.\n \"\"\"\n\n @pytest.fixture(params=[False, True],\n ids=[\"Non-nested\", \"Nested\"])\n def mat(self, request, msparsity, non_nest_mixed_sparsity):\n if request.param:\n mat = op2.Mat(msparsity)\n else:\n mat = op2.Mat(non_nest_mixed_sparsity)\n\n opt = mat.handle.Option.NEW_NONZERO_ALLOCATION_ERR\n opt2 = mat.handle.Option.UNUSED_NONZERO_LOCATION_ERR\n mat.handle.setOption(opt, False)\n mat.handle.setOption(opt2, False)\n for m in mat:\n m.handle.setOption(opt, False)\n m.handle.setOption(opt2, False)\n return mat\n\n def test_mat_starts_assembled(self, mat):\n assert mat.assembly_state is op2.Mat.ASSEMBLED\n for m in mat:\n assert mat.assembly_state is op2.Mat.ASSEMBLED\n\n def test_after_set_local_state_is_insert(self, mat):\n mat[0, 0].set_local_diagonal_entries([0])\n mat._force_evaluation()\n assert mat[0, 0].assembly_state is op2.Mat.INSERT_VALUES\n if not mat.sparsity.nested:\n assert mat.assembly_state is op2.Mat.INSERT_VALUES\n if mat.sparsity.nested:\n assert mat[1, 1].assembly_state is op2.Mat.ASSEMBLED\n\n def test_after_addto_state_is_add(self, mat):\n mat[0, 0].addto_values(0, 0, [1])\n mat._force_evaluation()\n assert mat[0, 0].assembly_state is op2.Mat.ADD_VALUES\n if not mat.sparsity.nested:\n assert mat.assembly_state is op2.Mat.ADD_VALUES\n if mat.sparsity.nested:\n assert mat[1, 1].assembly_state is op2.Mat.ASSEMBLED\n\n def test_matblock_assemble_runtimeerror(self, mat):\n if mat.sparsity.nested:\n return\n with pytest.raises(RuntimeError):\n mat[0, 0].assemble()\n\n with pytest.raises(RuntimeError):\n mat[0, 0]._assemble()\n\n def test_mixing_insert_and_add_works(self, mat):\n mat[0, 0].addto_values(0, 0, [1])\n mat[1, 1].addto_values(1, 1, [3])\n mat[1, 1].set_values(0, 0, [2])\n mat[0, 0].set_values(1, 1, [4])\n mat[1, 1].addto_values(0, 0, [3])\n mat.assemble()\n\n assert np.allclose(mat[0, 0].values, np.diag([1, 4, 0]))\n assert np.allclose(mat[1, 1].values, np.diag([5, 3, 0, 0]))\n\n assert np.allclose(mat[0, 1].values, 0)\n assert np.allclose(mat[1, 0].values, 0)\n\n def test_assembly_flushed_between_insert_and_add(self, mat):\n import types\n flush_counter = [0]\n\n def make_flush(old_flush):\n def flush(self):\n old_flush()\n flush_counter[0] += 1\n return flush\n\n oflush = mat._flush_assembly\n mat._flush_assembly = types.MethodType(make_flush(oflush), mat)\n if mat.sparsity.nested:\n for m in mat:\n oflush = m._flush_assembly\n m._flush_assembly = types.MethodType(make_flush(oflush), m)\n\n mat[0, 0].addto_values(0, 0, [1])\n mat._force_evaluation()\n assert flush_counter[0] == 0\n mat[0, 0].set_values(1, 0, [2])\n mat._force_evaluation()\n assert flush_counter[0] == 1\n mat.assemble()\n mat._force_evaluation()\n assert flush_counter[0] == 1\n\n\nclass TestMixedMatrices:\n \"\"\"\n Matrix tests for mixed spaces\n \"\"\"\n\n # off-diagonal blocks\n od = np.array([[1.0, 2.0, 0.0, 0.0],\n [0.0, 4.0, 6.0, 0.0],\n [0.0, 0.0, 9.0, 12.0]])\n # lower left block\n ll = (np.diag([1.0, 8.0, 18.0, 16.0])\n + np.diag([2.0, 6.0, 12.0], -1)\n + np.diag([2.0, 6.0, 12.0], 1))\n\n @pytest.fixture\n def mat(self, msparsity, mmap, mdat):\n mat = op2.Mat(msparsity)\n\n code = c_for(\"i\", 3,\n c_for(\"j\", 3,\n Incr(Symbol(\"v\", (\"i\", \"j\")), FlatBlock(\"d[i][0] * d[j][0]\"))))\n addone = FunDecl(\"void\", \"addone_mat\",\n [Decl(\"double\", Symbol(\"v\", (3, 3))),\n Decl(\"double\", c_sym(\"**d\"))],\n Block([code], open_scope=False),\n pred=[\"static\"])\n\n addone = op2.Kernel(addone, \"addone_mat\")\n op2.par_loop(addone, mmap.iterset,\n mat(op2.INC, (mmap, mmap)),\n mdat(op2.READ, mmap))\n mat.assemble()\n mat._force_evaluation()\n return mat\n\n @pytest.fixture\n def dat(self, mset, mmap, mdat):\n dat = op2.MixedDat(mset)\n kernel_code = FunDecl(\"void\", \"addone_rhs\",\n [Decl(\"double\", Symbol(\"v\", (3,))),\n Decl(\"double\", Symbol(\"d\", (3,)))],\n c_for(\"i\", 3, Incr(Symbol(\"v\", (\"i\")), FlatBlock(\"d[i]\"))),\n pred=[\"static\"])\n addone = op2.Kernel(kernel_code.gencode(), \"addone_rhs\")\n op2.par_loop(addone, mmap.iterset,\n dat(op2.INC, mmap),\n mdat(op2.READ, mmap))\n return dat\n\n @pytest.mark.xfail(reason=\"Assembling directly into mixed mats unsupported\")\n def test_assemble_mixed_mat(self, mat):\n \"\"\"Assemble into a matrix declared on a mixed sparsity.\"\"\"\n eps = 1.e-12\n assert_allclose(mat[0, 0].values, np.diag([1.0, 4.0, 9.0]), eps)\n assert_allclose(mat[0, 1].values, self.od, eps)\n assert_allclose(mat[1, 0].values, self.od.T, eps)\n assert_allclose(mat[1, 1].values, self.ll, eps)\n\n def test_assemble_mixed_rhs(self, dat):\n \"\"\"Assemble a simple right-hand side over a mixed space and check result.\"\"\"\n eps = 1.e-12\n assert_allclose(dat[0].data_ro, rdata(3), eps)\n assert_allclose(dat[1].data_ro, [1.0, 4.0, 6.0, 4.0], eps)\n\n def test_assemble_mixed_rhs_vector(self, mset, mmap, mvdat):\n \"\"\"Assemble a simple right-hand side over a mixed space and check result.\"\"\"\n dat = op2.MixedDat(mset ** 2)\n assembly = Block(\n [Incr(Symbol(\"v\", (\"i\"), ((2, 0),)), FlatBlock(\"d[i][0]\")),\n Incr(Symbol(\"v\", (\"i\"), ((2, 1),)), FlatBlock(\"d[i][1]\"))], open_scope=True)\n kernel_code = FunDecl(\"void\", \"addone_rhs_vec\",\n [Decl(\"double\", Symbol(\"v\", (6,))),\n Decl(\"double\", Symbol(\"d\", (3, 2)))],\n c_for(\"i\", 3, assembly),\n pred=[\"static\"])\n addone = op2.Kernel(kernel_code.gencode(), \"addone_rhs_vec\")\n op2.par_loop(addone, mmap.iterset,\n dat(op2.INC, mmap),\n mvdat(op2.READ, mmap))\n eps = 1.e-12\n exp = np.kron(list(zip([1.0, 4.0, 6.0, 4.0])), np.ones(2))\n assert_allclose(dat[0].data_ro, np.kron(list(zip(rdata(3))), np.ones(2)), eps)\n assert_allclose(dat[1].data_ro, exp, eps)\n\n @pytest.mark.xfail(reason=\"Assembling directly into mixed mats unsupported\")\n def test_solve_mixed(self, mat, dat):\n x = op2.MixedDat(dat.dataset)\n op2.solve(mat, x, dat)\n b = mat * x\n eps = 1.e-12\n assert_allclose(dat[0].data_ro, b[0].data_ro, eps)\n assert_allclose(dat[1].data_ro, b[1].data_ro, eps)\n\n\nif __name__ == '__main__':\n import os\n pytest.main(os.path.abspath(__file__))\n" ]
[ [ "numpy.testing.assert_allclose", "numpy.array", "numpy.zeros_like", "numpy.asarray", "numpy.zeros", "numpy.ones", "numpy.identity", "numpy.allclose", "numpy.arange", "numpy.linalg.solve", "numpy.diag" ] ]
ThomasProctor/pandas
[ "9c202a1b7e2418ca53ac42179d2499223a21c710" ]
[ "pandas/tests/window/test_grouper.py" ]
[ "import numpy as np\nimport pytest\n\nimport pandas as pd\nfrom pandas import DataFrame, Series\nimport pandas._testing as tm\nfrom pandas.core.groupby.groupby import get_groupby\n\n\nclass TestGrouperGrouping:\n def setup_method(self):\n self.series = Series(np.arange(10))\n self.frame = DataFrame({\"A\": [1] * 20 + [2] * 12 + [3] * 8, \"B\": np.arange(40)})\n\n def test_mutated(self):\n\n msg = r\"groupby\\(\\) got an unexpected keyword argument 'foo'\"\n with pytest.raises(TypeError, match=msg):\n self.frame.groupby(\"A\", foo=1)\n\n g = self.frame.groupby(\"A\")\n assert not g.mutated\n g = get_groupby(self.frame, by=\"A\", mutated=True)\n assert g.mutated\n\n def test_getitem(self):\n g = self.frame.groupby(\"A\")\n g_mutated = get_groupby(self.frame, by=\"A\", mutated=True)\n\n expected = g_mutated.B.apply(lambda x: x.rolling(2).mean())\n\n result = g.rolling(2).mean().B\n tm.assert_series_equal(result, expected)\n\n result = g.rolling(2).B.mean()\n tm.assert_series_equal(result, expected)\n\n result = g.B.rolling(2).mean()\n tm.assert_series_equal(result, expected)\n\n result = self.frame.B.groupby(self.frame.A).rolling(2).mean()\n tm.assert_series_equal(result, expected)\n\n def test_getitem_multiple(self):\n\n # GH 13174\n g = self.frame.groupby(\"A\")\n r = g.rolling(2, min_periods=0)\n g_mutated = get_groupby(self.frame, by=\"A\", mutated=True)\n expected = g_mutated.B.apply(lambda x: x.rolling(2, min_periods=0).count())\n\n result = r.B.count()\n tm.assert_series_equal(result, expected)\n\n result = r.B.count()\n tm.assert_series_equal(result, expected)\n\n @pytest.mark.parametrize(\n \"f\",\n [\n \"sum\",\n \"mean\",\n \"min\",\n \"max\",\n pytest.param(\n \"count\",\n marks=pytest.mark.filterwarnings(\"ignore:min_periods:FutureWarning\"),\n ),\n \"kurt\",\n \"skew\",\n ],\n )\n def test_rolling(self, f):\n g = self.frame.groupby(\"A\")\n r = g.rolling(window=4)\n\n result = getattr(r, f)()\n expected = g.apply(lambda x: getattr(x.rolling(4), f)())\n tm.assert_frame_equal(result, expected)\n\n @pytest.mark.parametrize(\"f\", [\"std\", \"var\"])\n def test_rolling_ddof(self, f):\n g = self.frame.groupby(\"A\")\n r = g.rolling(window=4)\n\n result = getattr(r, f)(ddof=1)\n expected = g.apply(lambda x: getattr(x.rolling(4), f)(ddof=1))\n tm.assert_frame_equal(result, expected)\n\n @pytest.mark.parametrize(\n \"interpolation\", [\"linear\", \"lower\", \"higher\", \"midpoint\", \"nearest\"]\n )\n def test_rolling_quantile(self, interpolation):\n g = self.frame.groupby(\"A\")\n r = g.rolling(window=4)\n result = r.quantile(0.4, interpolation=interpolation)\n expected = g.apply(\n lambda x: x.rolling(4).quantile(0.4, interpolation=interpolation)\n )\n tm.assert_frame_equal(result, expected)\n\n @pytest.mark.parametrize(\"f\", [\"corr\", \"cov\"])\n def test_rolling_corr_cov(self, f):\n g = self.frame.groupby(\"A\")\n r = g.rolling(window=4)\n\n result = getattr(r, f)(self.frame)\n\n def func(x):\n return getattr(x.rolling(4), f)(self.frame)\n\n expected = g.apply(func)\n tm.assert_frame_equal(result, expected)\n\n result = getattr(r.B, f)(pairwise=True)\n\n def func(x):\n return getattr(x.B.rolling(4), f)(pairwise=True)\n\n expected = g.apply(func)\n tm.assert_series_equal(result, expected)\n\n def test_rolling_apply(self, raw):\n g = self.frame.groupby(\"A\")\n r = g.rolling(window=4)\n\n # reduction\n result = r.apply(lambda x: x.sum(), raw=raw)\n expected = g.apply(lambda x: x.rolling(4).apply(lambda y: y.sum(), raw=raw))\n tm.assert_frame_equal(result, expected)\n\n def test_rolling_apply_mutability(self):\n # GH 14013\n df = pd.DataFrame({\"A\": [\"foo\"] * 3 + [\"bar\"] * 3, \"B\": [1] * 6})\n g = df.groupby(\"A\")\n\n mi = pd.MultiIndex.from_tuples(\n [(\"bar\", 3), (\"bar\", 4), (\"bar\", 5), (\"foo\", 0), (\"foo\", 1), (\"foo\", 2)]\n )\n\n mi.names = [\"A\", None]\n # Grouped column should not be a part of the output\n expected = pd.DataFrame([np.nan, 2.0, 2.0] * 2, columns=[\"B\"], index=mi)\n\n result = g.rolling(window=2).sum()\n tm.assert_frame_equal(result, expected)\n\n # Call an arbitrary function on the groupby\n g.sum()\n\n # Make sure nothing has been mutated\n result = g.rolling(window=2).sum()\n tm.assert_frame_equal(result, expected)\n\n @pytest.mark.parametrize(\n \"f\", [\"sum\", \"mean\", \"min\", \"max\", \"count\", \"kurt\", \"skew\"]\n )\n def test_expanding(self, f):\n g = self.frame.groupby(\"A\")\n r = g.expanding()\n\n result = getattr(r, f)()\n expected = g.apply(lambda x: getattr(x.expanding(), f)())\n tm.assert_frame_equal(result, expected)\n\n @pytest.mark.parametrize(\"f\", [\"std\", \"var\"])\n def test_expanding_ddof(self, f):\n g = self.frame.groupby(\"A\")\n r = g.expanding()\n\n result = getattr(r, f)(ddof=0)\n expected = g.apply(lambda x: getattr(x.expanding(), f)(ddof=0))\n tm.assert_frame_equal(result, expected)\n\n @pytest.mark.parametrize(\n \"interpolation\", [\"linear\", \"lower\", \"higher\", \"midpoint\", \"nearest\"]\n )\n def test_expanding_quantile(self, interpolation):\n g = self.frame.groupby(\"A\")\n r = g.expanding()\n result = r.quantile(0.4, interpolation=interpolation)\n expected = g.apply(\n lambda x: x.expanding().quantile(0.4, interpolation=interpolation)\n )\n tm.assert_frame_equal(result, expected)\n\n @pytest.mark.parametrize(\"f\", [\"corr\", \"cov\"])\n def test_expanding_corr_cov(self, f):\n g = self.frame.groupby(\"A\")\n r = g.expanding()\n\n result = getattr(r, f)(self.frame)\n\n def func(x):\n return getattr(x.expanding(), f)(self.frame)\n\n expected = g.apply(func)\n tm.assert_frame_equal(result, expected)\n\n result = getattr(r.B, f)(pairwise=True)\n\n def func(x):\n return getattr(x.B.expanding(), f)(pairwise=True)\n\n expected = g.apply(func)\n tm.assert_series_equal(result, expected)\n\n def test_expanding_apply(self, raw):\n g = self.frame.groupby(\"A\")\n r = g.expanding()\n\n # reduction\n result = r.apply(lambda x: x.sum(), raw=raw)\n expected = g.apply(lambda x: x.expanding().apply(lambda y: y.sum(), raw=raw))\n tm.assert_frame_equal(result, expected)\n\n @pytest.mark.parametrize(\"expected_value,raw_value\", [[1.0, True], [0.0, False]])\n def test_groupby_rolling(self, expected_value, raw_value):\n # GH 31754\n\n def foo(x):\n return int(isinstance(x, np.ndarray))\n\n df = pd.DataFrame({\"id\": [1, 1, 1], \"value\": [1, 2, 3]})\n result = df.groupby(\"id\").value.rolling(1).apply(foo, raw=raw_value)\n expected = Series(\n [expected_value] * 3,\n index=pd.MultiIndex.from_tuples(\n ((1, 0), (1, 1), (1, 2)), names=[\"id\", None]\n ),\n name=\"value\",\n )\n tm.assert_series_equal(result, expected)\n\n def test_groupby_rolling_center_center(self):\n # GH 35552\n series = Series(range(1, 6))\n result = series.groupby(series).rolling(center=True, window=3).mean()\n expected = Series(\n [np.nan] * 5,\n index=pd.MultiIndex.from_tuples(((1, 0), (2, 1), (3, 2), (4, 3), (5, 4))),\n )\n tm.assert_series_equal(result, expected)\n\n series = Series(range(1, 5))\n result = series.groupby(series).rolling(center=True, window=3).mean()\n expected = Series(\n [np.nan] * 4,\n index=pd.MultiIndex.from_tuples(((1, 0), (2, 1), (3, 2), (4, 3))),\n )\n tm.assert_series_equal(result, expected)\n\n df = pd.DataFrame({\"a\": [\"a\"] * 5 + [\"b\"] * 6, \"b\": range(11)})\n result = df.groupby(\"a\").rolling(center=True, window=3).mean()\n expected = pd.DataFrame(\n [np.nan, 1, 2, 3, np.nan, np.nan, 6, 7, 8, 9, np.nan],\n index=pd.MultiIndex.from_tuples(\n (\n (\"a\", 0),\n (\"a\", 1),\n (\"a\", 2),\n (\"a\", 3),\n (\"a\", 4),\n (\"b\", 5),\n (\"b\", 6),\n (\"b\", 7),\n (\"b\", 8),\n (\"b\", 9),\n (\"b\", 10),\n ),\n names=[\"a\", None],\n ),\n columns=[\"b\"],\n )\n tm.assert_frame_equal(result, expected)\n\n df = pd.DataFrame({\"a\": [\"a\"] * 5 + [\"b\"] * 5, \"b\": range(10)})\n result = df.groupby(\"a\").rolling(center=True, window=3).mean()\n expected = pd.DataFrame(\n [np.nan, 1, 2, 3, np.nan, np.nan, 6, 7, 8, np.nan],\n index=pd.MultiIndex.from_tuples(\n (\n (\"a\", 0),\n (\"a\", 1),\n (\"a\", 2),\n (\"a\", 3),\n (\"a\", 4),\n (\"b\", 5),\n (\"b\", 6),\n (\"b\", 7),\n (\"b\", 8),\n (\"b\", 9),\n ),\n names=[\"a\", None],\n ),\n columns=[\"b\"],\n )\n tm.assert_frame_equal(result, expected)\n\n @pytest.mark.parametrize(\"min_periods\", [5, 4, 3])\n def test_groupby_rolling_center_min_periods(self, min_periods):\n # GH 36040\n df = pd.DataFrame({\"group\": [\"A\"] * 10 + [\"B\"] * 10, \"data\": range(20)})\n\n window_size = 5\n result = (\n df.groupby(\"group\")\n .rolling(window_size, center=True, min_periods=min_periods)\n .mean()\n )\n result = result.reset_index()[[\"group\", \"data\"]]\n\n grp_A_mean = [1.0, 1.5, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 7.5, 8.0]\n grp_B_mean = [x + 10.0 for x in grp_A_mean]\n\n num_nans = max(0, min_periods - 3) # For window_size of 5\n nans = [np.nan] * num_nans\n grp_A_expected = nans + grp_A_mean[num_nans : 10 - num_nans] + nans\n grp_B_expected = nans + grp_B_mean[num_nans : 10 - num_nans] + nans\n\n expected = pd.DataFrame(\n {\"group\": [\"A\"] * 10 + [\"B\"] * 10, \"data\": grp_A_expected + grp_B_expected}\n )\n\n tm.assert_frame_equal(result, expected)\n\n def test_groupby_subselect_rolling(self):\n # GH 35486\n df = DataFrame(\n {\"a\": [1, 2, 3, 2], \"b\": [4.0, 2.0, 3.0, 1.0], \"c\": [10, 20, 30, 20]}\n )\n result = df.groupby(\"a\")[[\"b\"]].rolling(2).max()\n expected = DataFrame(\n [np.nan, np.nan, 2.0, np.nan],\n columns=[\"b\"],\n index=pd.MultiIndex.from_tuples(\n ((1, 0), (2, 1), (2, 3), (3, 2)), names=[\"a\", None]\n ),\n )\n tm.assert_frame_equal(result, expected)\n\n result = df.groupby(\"a\")[\"b\"].rolling(2).max()\n expected = Series(\n [np.nan, np.nan, 2.0, np.nan],\n index=pd.MultiIndex.from_tuples(\n ((1, 0), (2, 1), (2, 3), (3, 2)), names=[\"a\", None]\n ),\n name=\"b\",\n )\n tm.assert_series_equal(result, expected)\n\n def test_groupby_rolling_custom_indexer(self):\n # GH 35557\n class SimpleIndexer(pd.api.indexers.BaseIndexer):\n def get_window_bounds(\n self, num_values=0, min_periods=None, center=None, closed=None\n ):\n min_periods = self.window_size if min_periods is None else 0\n end = np.arange(num_values, dtype=np.int64) + 1\n start = end.copy() - self.window_size\n start[start < 0] = min_periods\n return start, end\n\n df = pd.DataFrame(\n {\"a\": [1.0, 2.0, 3.0, 4.0, 5.0] * 3}, index=[0] * 5 + [1] * 5 + [2] * 5\n )\n result = (\n df.groupby(df.index)\n .rolling(SimpleIndexer(window_size=3), min_periods=1)\n .sum()\n )\n expected = df.groupby(df.index).rolling(window=3, min_periods=1).sum()\n tm.assert_frame_equal(result, expected)\n\n def test_groupby_rolling_subset_with_closed(self):\n # GH 35549\n df = pd.DataFrame(\n {\n \"column1\": range(6),\n \"column2\": range(6),\n \"group\": 3 * [\"A\", \"B\"],\n \"date\": [pd.Timestamp(\"2019-01-01\")] * 6,\n }\n )\n result = (\n df.groupby(\"group\").rolling(\"1D\", on=\"date\", closed=\"left\")[\"column1\"].sum()\n )\n expected = Series(\n [np.nan, 0.0, 2.0, np.nan, 1.0, 4.0],\n index=pd.MultiIndex.from_tuples(\n [(\"A\", pd.Timestamp(\"2019-01-01\"))] * 3\n + [(\"B\", pd.Timestamp(\"2019-01-01\"))] * 3,\n names=[\"group\", \"date\"],\n ),\n name=\"column1\",\n )\n tm.assert_series_equal(result, expected)\n\n def test_groupby_subset_rolling_subset_with_closed(self):\n # GH 35549\n df = pd.DataFrame(\n {\n \"column1\": range(6),\n \"column2\": range(6),\n \"group\": 3 * [\"A\", \"B\"],\n \"date\": [pd.Timestamp(\"2019-01-01\")] * 6,\n }\n )\n\n result = (\n df.groupby(\"group\")[[\"column1\", \"date\"]]\n .rolling(\"1D\", on=\"date\", closed=\"left\")[\"column1\"]\n .sum()\n )\n expected = Series(\n [np.nan, 0.0, 2.0, np.nan, 1.0, 4.0],\n index=pd.MultiIndex.from_tuples(\n [(\"A\", pd.Timestamp(\"2019-01-01\"))] * 3\n + [(\"B\", pd.Timestamp(\"2019-01-01\"))] * 3,\n names=[\"group\", \"date\"],\n ),\n name=\"column1\",\n )\n tm.assert_series_equal(result, expected)\n\n @pytest.mark.parametrize(\"func\", [\"max\", \"min\"])\n def test_groupby_rolling_index_changed(self, func):\n # GH: #36018 nlevels of MultiIndex changed\n ds = Series(\n [1, 2, 2],\n index=pd.MultiIndex.from_tuples(\n [(\"a\", \"x\"), (\"a\", \"y\"), (\"c\", \"z\")], names=[\"1\", \"2\"]\n ),\n name=\"a\",\n )\n\n result = getattr(ds.groupby(ds).rolling(2), func)()\n expected = Series(\n [np.nan, np.nan, 2.0],\n index=pd.MultiIndex.from_tuples(\n [(1, \"a\", \"x\"), (2, \"a\", \"y\"), (2, \"c\", \"z\")], names=[\"a\", \"1\", \"2\"]\n ),\n name=\"a\",\n )\n tm.assert_series_equal(result, expected)\n\n def test_groupby_rolling_empty_frame(self):\n # GH 36197\n expected = pd.DataFrame({\"s1\": []})\n result = expected.groupby(\"s1\").rolling(window=1).sum()\n expected.index = pd.MultiIndex.from_tuples([], names=[\"s1\", None])\n tm.assert_frame_equal(result, expected)\n\n expected = pd.DataFrame({\"s1\": [], \"s2\": []})\n result = expected.groupby([\"s1\", \"s2\"]).rolling(window=1).sum()\n expected.index = pd.MultiIndex.from_tuples([], names=[\"s1\", \"s2\", None])\n tm.assert_frame_equal(result, expected)\n\n def test_groupby_rolling_string_index(self):\n # GH: 36727\n df = pd.DataFrame(\n [\n [\"A\", \"group_1\", pd.Timestamp(2019, 1, 1, 9)],\n [\"B\", \"group_1\", pd.Timestamp(2019, 1, 2, 9)],\n [\"Z\", \"group_2\", pd.Timestamp(2019, 1, 3, 9)],\n [\"H\", \"group_1\", pd.Timestamp(2019, 1, 6, 9)],\n [\"E\", \"group_2\", pd.Timestamp(2019, 1, 20, 9)],\n ],\n columns=[\"index\", \"group\", \"eventTime\"],\n ).set_index(\"index\")\n\n groups = df.groupby(\"group\")\n df[\"count_to_date\"] = groups.cumcount()\n rolling_groups = groups.rolling(\"10d\", on=\"eventTime\")\n result = rolling_groups.apply(lambda df: df.shape[0])\n expected = pd.DataFrame(\n [\n [\"A\", \"group_1\", pd.Timestamp(2019, 1, 1, 9), 1.0],\n [\"B\", \"group_1\", pd.Timestamp(2019, 1, 2, 9), 2.0],\n [\"H\", \"group_1\", pd.Timestamp(2019, 1, 6, 9), 3.0],\n [\"Z\", \"group_2\", pd.Timestamp(2019, 1, 3, 9), 1.0],\n [\"E\", \"group_2\", pd.Timestamp(2019, 1, 20, 9), 1.0],\n ],\n columns=[\"index\", \"group\", \"eventTime\", \"count_to_date\"],\n ).set_index([\"group\", \"index\"])\n tm.assert_frame_equal(result, expected)\n\n def test_groupby_rolling_no_sort(self):\n # GH 36889\n result = (\n pd.DataFrame({\"foo\": [2, 1], \"bar\": [2, 1]})\n .groupby(\"foo\", sort=False)\n .rolling(1)\n .min()\n )\n expected = pd.DataFrame(\n np.array([[2.0, 2.0], [1.0, 1.0]]),\n columns=[\"foo\", \"bar\"],\n index=pd.MultiIndex.from_tuples([(2, 0), (1, 1)], names=[\"foo\", None]),\n )\n tm.assert_frame_equal(result, expected)\n\n def test_groupby_rolling_count_closed_on(self):\n # GH 35869\n df = pd.DataFrame(\n {\n \"column1\": range(6),\n \"column2\": range(6),\n \"group\": 3 * [\"A\", \"B\"],\n \"date\": pd.date_range(end=\"20190101\", periods=6),\n }\n )\n result = (\n df.groupby(\"group\")\n .rolling(\"3d\", on=\"date\", closed=\"left\")[\"column1\"]\n .count()\n )\n expected = pd.Series(\n [np.nan, 1.0, 1.0, np.nan, 1.0, 1.0],\n name=\"column1\",\n index=pd.MultiIndex.from_tuples(\n [\n (\"A\", pd.Timestamp(\"2018-12-27\")),\n (\"A\", pd.Timestamp(\"2018-12-29\")),\n (\"A\", pd.Timestamp(\"2018-12-31\")),\n (\"B\", pd.Timestamp(\"2018-12-28\")),\n (\"B\", pd.Timestamp(\"2018-12-30\")),\n (\"B\", pd.Timestamp(\"2019-01-01\")),\n ],\n names=[\"group\", \"date\"],\n ),\n )\n tm.assert_series_equal(result, expected)\n\n @pytest.mark.parametrize(\n (\"func\", \"kwargs\"),\n [(\"rolling\", {\"window\": 2, \"min_periods\": 1}), (\"expanding\", {})],\n )\n def test_groupby_rolling_sem(self, func, kwargs):\n # GH: 26476\n df = pd.DataFrame(\n [[\"a\", 1], [\"a\", 2], [\"b\", 1], [\"b\", 2], [\"b\", 3]], columns=[\"a\", \"b\"]\n )\n result = getattr(df.groupby(\"a\"), func)(**kwargs).sem()\n expected = pd.DataFrame(\n {\"a\": [np.nan] * 5, \"b\": [np.nan, 0.70711, np.nan, 0.70711, 0.70711]},\n index=pd.MultiIndex.from_tuples(\n [(\"a\", 0), (\"a\", 1), (\"b\", 2), (\"b\", 3), (\"b\", 4)], names=[\"a\", None]\n ),\n )\n tm.assert_frame_equal(result, expected)\n" ]
[ [ "numpy.array", "pandas.DataFrame", "pandas.date_range", "pandas.MultiIndex.from_tuples", "pandas.core.groupby.groupby.get_groupby", "pandas._testing.assert_frame_equal", "pandas.Timestamp", "numpy.arange", "pandas._testing.assert_series_equal" ] ]
nutszebra/adversarial-robustness-toolbox
[ "5d03b3c8164e912f76c2ac4778cea3f071ac115f" ]
[ "art/metrics_unittest.py" ]
[ "from __future__ import absolute_import, division, print_function, unicode_literals\n\nimport unittest\n\nimport keras\nimport keras.backend as k\nfrom keras.models import Sequential\nfrom keras.layers import Dense, Flatten, Conv2D, MaxPooling2D\nimport numpy as np\nimport tensorflow as tf\nimport torch.nn as nn\nimport torch.nn.functional as f\nimport torch.optim as optim\n\nfrom art.classifiers import KerasClassifier, PyTorchClassifier, TFClassifier\nfrom art.metrics import empirical_robustness, clever_t, clever_u, clever, loss_sensitivity\nfrom art.utils import load_mnist\n\n\nBATCH_SIZE = 10\nNB_TRAIN = 100\nNB_TEST = 100\n\nr_l1 = 40\nr_l2 = 2\nr_li = 0.1\n\n\nclass TestMetrics(unittest.TestCase):\n def test_emp_robustness_mnist(self):\n # Get MNIST\n (x_train, y_train), (_, _), _, _ = load_mnist()\n x_train, y_train = x_train[:NB_TRAIN], y_train[:NB_TRAIN]\n\n # Get classifier\n classifier = self._cnn_mnist_k([28, 28, 1])\n classifier.fit(x_train, y_train, batch_size=BATCH_SIZE, nb_epochs=2)\n\n # Compute minimal perturbations\n params = {\"eps_step\": 1.1}\n emp_robust = empirical_robustness(classifier, x_train, str('fgsm'), params)\n self.assertEqual(emp_robust, 0.)\n\n params = {\"eps_step\": 1.,\n \"eps_max\": 1.}\n emp_robust = empirical_robustness(classifier, x_train, str('fgsm'), params)\n self.assertAlmostEqual(emp_robust, 1., 3)\n\n params = {\"eps_step\": 0.1,\n \"eps_max\": 0.2}\n emp_robust = empirical_robustness(classifier, x_train, str('fgsm'), params)\n self.assertLessEqual(emp_robust, 0.21)\n\n def test_loss_sensitivity(self):\n # Get MNIST\n (x_train, y_train), (_, _), _, _ = load_mnist()\n x_train, y_train = x_train[:NB_TRAIN], y_train[:NB_TRAIN]\n\n # Get classifier\n classifier = self._cnn_mnist_k([28, 28, 1])\n classifier.fit(x_train, y_train, batch_size=BATCH_SIZE, nb_epochs=2)\n\n l = loss_sensitivity(classifier, x_train, y_train)\n self.assertGreaterEqual(l, 0)\n\n # def testNearestNeighborDist(self):\n # # Get MNIST\n # (x_train, y_train), (_, _), _, _ = load_mnist()\n # x_train, y_train = x_train[:NB_TRAIN], y_train[:NB_TRAIN]\n #\n # # Get classifier\n # classifier = self._cnn_mnist_k([28, 28, 1])\n # classifier.fit(x_train, y_train, batch_size=BATCH_SIZE, nb_epochs=2)\n #\n # dist = nearest_neighbour_dist(classifier, x_train, x_train, str('fgsm'))\n # self.assertGreaterEqual(dist, 0)\n\n @staticmethod\n def _cnn_mnist_k(input_shape):\n # Create simple CNN\n model = Sequential()\n model.add(Conv2D(4, kernel_size=(5, 5), activation='relu', input_shape=input_shape))\n model.add(MaxPooling2D(pool_size=(2, 2)))\n model.add(Flatten())\n model.add(Dense(10, activation='softmax'))\n\n model.compile(loss=keras.losses.categorical_crossentropy, optimizer=keras.optimizers.Adam(lr=0.01),\n metrics=['accuracy'])\n\n classifier = KerasClassifier((0, 1), model, use_logits=False)\n return classifier\n\n#########################################\n# This part is the unit test for Clever.#\n#########################################\n\n\nclass Model(nn.Module):\n def __init__(self):\n super(Model, self).__init__()\n self.conv = nn.Conv2d(1, 16, 5)\n self.pool = nn.MaxPool2d(2, 2)\n self.fc = nn.Linear(2304, 10)\n\n def forward(self, x):\n x = self.pool(f.relu(self.conv(x)))\n x = x.view(-1, 2304)\n logit_output = self.fc(x)\n\n return logit_output\n\n\nclass TestClever(unittest.TestCase):\n \"\"\"\n Unittest for Clever metrics.\n \"\"\"\n @staticmethod\n def _create_tfclassifier():\n \"\"\"\n To create a simple TFClassifier for testing.\n :return:\n \"\"\"\n # Define input and output placeholders\n input_ph = tf.placeholder(tf.float32, shape=[None, 28, 28, 1])\n output_ph = tf.placeholder(tf.int32, shape=[None, 10])\n\n # Define the tensorflow graph\n conv = tf.layers.conv2d(input_ph, 4, 5, activation=tf.nn.relu)\n conv = tf.layers.max_pooling2d(conv, 2, 2)\n fc = tf.contrib.layers.flatten(conv)\n\n # Logits layer\n logits = tf.layers.dense(fc, 10)\n\n # Train operator\n loss = tf.reduce_mean(tf.losses.softmax_cross_entropy(logits=logits, onehot_labels=output_ph))\n optimizer = tf.train.AdamOptimizer(learning_rate=0.01)\n train = optimizer.minimize(loss)\n\n # Tensorflow session and initialization\n sess = tf.Session()\n sess.run(tf.global_variables_initializer())\n\n # Create the classifier\n tfc = TFClassifier((0, 1), input_ph, logits, output_ph, train, loss, None, sess)\n\n return tfc\n\n @staticmethod\n def _create_krclassifier():\n \"\"\"\n To create a simple KerasClassifier for testing.\n :return:\n \"\"\"\n # Initialize a tf session\n session = tf.Session()\n k.set_session(session)\n\n # Create simple CNN\n model = Sequential()\n model.add(Conv2D(4, kernel_size=(5, 5), activation='relu', input_shape=(28, 28, 1)))\n model.add(MaxPooling2D(pool_size=(2, 2)))\n model.add(Flatten())\n model.add(Dense(10, activation='softmax'))\n\n model.compile(loss=keras.losses.categorical_crossentropy, optimizer=keras.optimizers.Adam(lr=0.01),\n metrics=['accuracy'])\n\n # Get the classifier\n krc = KerasClassifier((0, 1), model, use_logits=False)\n\n return krc\n\n @staticmethod\n def _create_ptclassifier():\n \"\"\"\n To create a simple PyTorchClassifier for testing.\n :return:\n \"\"\"\n # Define the network\n model = Model()\n\n # Define a loss function and optimizer\n loss_fn = nn.CrossEntropyLoss()\n optimizer = optim.Adam(model.parameters(), lr=0.01)\n\n # Get classifier\n ptc = PyTorchClassifier((0, 1), model, loss_fn, optimizer, (1, 28, 28), 10)\n\n return ptc\n\n def test_clever_tf(self):\n \"\"\"\n Test with tensorflow.\n :return:\n \"\"\"\n # Get MNIST\n batch_size, nb_train, nb_test = 100, 1000, 10\n (x_train, y_train), (x_test, y_test), _, _ = load_mnist()\n x_train, y_train = x_train[:nb_train], y_train[:nb_train]\n x_test, y_test = x_test[:nb_test], y_test[:nb_test]\n\n # Get the classifier\n tfc = self._create_tfclassifier()\n tfc.fit(x_train, y_train, batch_size=batch_size, nb_epochs=1)\n\n # TODO Need to configure r \n # Test targeted clever\n res0 = clever_t(tfc, x_test[-1], 2, 10, 5, r_l1, norm=1, pool_factor=3)\n res1 = clever_t(tfc, x_test[-1], 2, 10, 5, r_l2, norm=2, pool_factor=3)\n res2 = clever_t(tfc, x_test[-1], 2, 10, 5, r_li, norm=np.inf, pool_factor=3)\n print(\"Target tf: \", res0, res1, res2)\n self.assertFalse(res0 == res1)\n self.assertFalse(res1 == res2)\n self.assertFalse(res2 == res0)\n\n # Test untargeted clever\n res0 = clever_u(tfc, x_test[-1], 10, 5, r_l1, norm=1, pool_factor=3)\n res1 = clever_u(tfc, x_test[-1], 10, 5, r_l2, norm=2, pool_factor=3)\n res2 = clever_u(tfc, x_test[-1], 10, 5, r_li, norm=np.inf, pool_factor=3)\n print(\"Untarget tf: \", res0, res1, res2)\n self.assertFalse(res0 == res1)\n self.assertFalse(res1 == res2)\n self.assertFalse(res2 == res0)\n\n def test_clever_kr(self):\n \"\"\"\n Test with keras.\n :return:\n \"\"\"\n # Get MNIST\n batch_size, nb_train, nb_test = 100, 1000, 10\n (x_train, y_train), (x_test, y_test), _, _ = load_mnist()\n x_train, y_train = x_train[:nb_train], y_train[:nb_train]\n x_test, y_test = x_test[:nb_test], y_test[:nb_test]\n\n # Get the classifier\n krc = self._create_krclassifier()\n krc.fit(x_train, y_train, batch_size=batch_size, nb_epochs=1)\n\n # Test targeted clever\n res0 = clever_t(krc, x_test[-1], 2, 10, 5, r_l1, norm=1, pool_factor=3)\n res1 = clever_t(krc, x_test[-1], 2, 10, 5, r_l2, norm=2, pool_factor=3)\n res2 = clever_t(krc, x_test[-1], 2, 10, 5, r_li, norm=np.inf, pool_factor=3)\n print(\"Target kr: \", res0, res1, res2)\n self.assertNotEqual(res0, res1)\n self.assertNotEqual(res1, res2)\n self.assertNotEqual(res2, res0)\n\n # Test untargeted clever\n res0 = clever_u(krc, x_test[-1], 10, 5, r_l1, norm=1, pool_factor=3)\n res1 = clever_u(krc, x_test[-1], 10, 5, r_l2, norm=2, pool_factor=3)\n res2 = clever_u(krc, x_test[-1], 10, 5, r_li, norm=np.inf, pool_factor=3)\n print(\"Untarget kr: \", res0, res1, res2)\n self.assertNotEqual(res0, res1)\n self.assertNotEqual(res1, res2)\n self.assertNotEqual(res2, res0)\n\n def test_clever_pt(self):\n \"\"\"\n Test with pytorch.\n :return:\n \"\"\"\n # Get MNIST\n batch_size, nb_train, nb_test = 100, 1000, 10\n (x_train, y_train), (x_test, y_test), _, _ = load_mnist()\n x_train, y_train = x_train[:nb_train], y_train[:nb_train]\n x_test, y_test = x_test[:nb_test], y_test[:nb_test]\n x_train = np.swapaxes(x_train, 1, 3)\n x_test = np.swapaxes(x_test, 1, 3)\n\n # Get the classifier\n ptc = self._create_ptclassifier()\n ptc.fit(x_train, y_train, batch_size=batch_size, nb_epochs=1)\n\n # Test targeted clever\n res0 = clever_t(ptc, x_test[-1], 2, 10, 5, r_l1, norm=1, pool_factor=3)\n res1 = clever_t(ptc, x_test[-1], 2, 10, 5, r_l2, norm=2, pool_factor=3)\n res2 = clever_t(ptc, x_test[-1], 2, 10, 5, r_li, norm=np.inf, pool_factor=3)\n print(\"Target pt: \", res0, res1, res2)\n self.assertFalse(res0 == res1)\n self.assertFalse(res1 == res2)\n self.assertFalse(res2 == res0)\n\n # Test untargeted clever\n res0 = clever_u(ptc, x_test[-1], 10, 5, r_l1, norm=1, pool_factor=3)\n res1 = clever_u(ptc, x_test[-1], 10, 5, r_l2, norm=2, pool_factor=3)\n res2 = clever_u(ptc, x_test[-1], 10, 5, r_li, norm=np.inf, pool_factor=3)\n print(\"Untarget pt: \", res0, res1, res2)\n self.assertFalse(res0 == res1)\n self.assertFalse(res1 == res2)\n self.assertFalse(res2 == res0)\n\n # def test_clever_l2_no_target(self):\n # batch_size, nb_train, nb_test = 100, 500, 10\n # (x_train, y_train), (x_test, y_test), _, _ = load_mnist()\n #\n # # Get the classifier\n # krc = self._create_krclassifier()\n # krc.fit(x_train, y_train, batch_size=batch_size, nb_epochs=2)\n #\n # scores = clever(krc, x_test[0], 5, 5, 3, 2, target=None, c_init=1, pool_factor=10)\n # print(\"Clever Scores for n-1 classes\", scores, scores.shape)\n # self.assertTrue(scores.shape == (krc.nb_classes-1,))\n #\n # def test_clever_l2_no_target_sorted(self):\n # batch_size, nb_train, nb_test = 100, 500, 10\n # (x_train, y_train), (x_test, y_test), _, _ = load_mnist()\n #\n # # Get the classifier\n # krc = self._create_krclassifier()\n # krc.fit(x_train, y_train, batch_size=batch_size, nb_epochs=2)\n #\n # scores = clever(krc, x_test[0], 5, 5, 3, 2, target=None, target_sort=True, c_init=1, pool_factor=10)\n # print(\"Clever scores for n-1 classes\", scores, scores.shape)\n # # Should approx. be in decreasing value\n # self.assertTrue(scores.shape == (krc.nb_classes-1,))\n #\n # def test_clever_l2_same_target(self):\n # batch_size, nb_train, nb_test = 100, 500, 10\n # (x_train, y_train), (x_test, y_test), _, _ = load_mnist()\n #\n # # Get the classifier\n # krc = self._create_krclassifier()\n # krc.fit(x_train, y_train, batch_size=batch_size, nb_epochs=2)\n #\n # scores = clever(krc, x_test[0], 5, 5, 3, 2, target=np.argmax(krc.predict(x_test[:1])), c_init=1, pool_factor=10)\n # self.assertIsNone(scores[0], msg='Clever scores for the predicted class should be `None`.')\n\n\nif __name__ == '__main__':\n unittest.main()\n" ]
[ [ "torch.nn.Linear", "tensorflow.train.AdamOptimizer", "torch.nn.MaxPool2d", "tensorflow.layers.max_pooling2d", "tensorflow.Session", "tensorflow.losses.softmax_cross_entropy", "torch.nn.Conv2d", "numpy.swapaxes", "tensorflow.layers.conv2d", "tensorflow.placeholder", "tensorflow.layers.dense", "tensorflow.contrib.layers.flatten", "tensorflow.global_variables_initializer", "torch.nn.CrossEntropyLoss" ] ]
Jichao-Wang/MDOAU-net
[ "d16bde0386a3c5996c778ea6f5ba282f7a03b391" ]
[ "wjc_core.py" ]
[ "import torch\r\nimport numpy as np\r\nimport os\r\nimport matplotlib.pyplot as plt\r\nimport PIL.Image as Image\r\nfrom torch.utils.data import DataLoader\r\nfrom torch import nn, optim\r\nfrom torchvision.transforms import transforms\r\nfrom dataset import Train_Dataset, Validation_Dataset, Test_Dataset\r\nimport skimage.io as io\r\nimport shutil\r\n\r\nthreshold = 0.5 # 二分类阈值\r\n# 是否使用cuda\r\ndevice = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")\r\n\r\nx_transforms = transforms.Compose([\r\n transforms.ToTensor(),\r\n transforms.Normalize([0.5], [0.5])\r\n])\r\n\r\n# mask只需要转换为tensor\r\ny_transforms = transforms.ToTensor()\r\n\r\n\r\ndef makedir(new_path):\r\n folder = os.path.exists(new_path)\r\n if not folder:\r\n os.makedirs(new_path)\r\n else:\r\n shutil.rmtree(new_path)\r\n os.makedirs(new_path)\r\n\r\n\r\ndef init_work_space(args):\r\n makedir('./' + args.model_name + '/results')\r\n makedir(args.ckpt)\r\n makedir('./' + args.model_name + '/runs')\r\n\r\n\r\ndef train_model(args, writer, model, criterion, optimizer, dataload, regular=''):\r\n save_epoch, best_val_acc = 0, -0.1\r\n for epoch in range(args.epoch):\r\n print('Epoch {}/{}'.format(epoch, args.epoch - 1))\r\n print('-' * 10)\r\n dt_size = len(dataload.dataset)\r\n epoch_loss = 0\r\n epoch_correct_pixels, epoch_total_pixels = [], []\r\n step = 0\r\n for x, y in dataload:\r\n step += 1\r\n inputs = x.to(device)\r\n labels = y.to(device)\r\n # zero the parameter gradients\r\n optimizer.zero_grad()\r\n\r\n # forward\r\n outputs = model(inputs).to(device)\r\n del inputs\r\n loss = criterion(outputs, labels)\r\n loss.backward()\r\n optimizer.step()\r\n\r\n # calculate accuracy\r\n predicted = outputs.detach().numpy()\r\n predicted[predicted >= threshold] = 1\r\n predicted[predicted < threshold] = 0\r\n correct = (predicted == labels.detach().numpy()).sum()\r\n del predicted\r\n pixel_num = 1.0\r\n for i in range(len(labels.size())):\r\n pixel_num *= labels.size()[i]\r\n\r\n epoch_correct_pixels.append(correct)\r\n epoch_total_pixels.append(pixel_num)\r\n epoch_loss += float(loss.item())\r\n del labels\r\n del loss\r\n val_accuracy = validation(args, model, method='train')\r\n epoch_loss = epoch_loss / step\r\n epoch_train_accuracy = np.mean(epoch_correct_pixels) / np.mean(epoch_total_pixels)\r\n print(\"epoch %d loss:%0.3f train accuracy:%0.3f val accuracy:%0.3f\" % (\r\n epoch, epoch_loss, epoch_train_accuracy, val_accuracy))\r\n writer.add_scalar('loss', epoch_loss / step, global_step=epoch)\r\n writer.add_scalar('train accuracy', epoch_train_accuracy, global_step=epoch)\r\n writer.add_scalar('validated accuracy', val_accuracy, global_step=epoch)\r\n writer.add_scalars('accuracy/group',\r\n {'train_accuracy': epoch_train_accuracy, 'validated accuracy': val_accuracy},\r\n global_step=epoch)\r\n if best_val_acc < val_accuracy:\r\n save_epoch = epoch\r\n torch.save(model, args.ckpt + '/' + args.model_name + '.pth')\r\n best_val_acc = val_accuracy\r\n print(\"Model:\", args.model_name)\r\n print(\"Dataset:\", args.data_file)\r\n print(\"Best epoch is\" + str(save_epoch))\r\n print(\"Best val acc is \" + str(best_val_acc))\r\n return model\r\n\r\n\r\n# 训练模型\r\ndef train(args, writer, model, regular=''):\r\n model.to(device)\r\n criterion = nn.BCELoss()\r\n optimizer = optim.Adam(model.parameters(), )\r\n liver_dataset = Train_Dataset(args.data_file, transform=x_transforms, target_transform=y_transforms)\r\n dataloaders = DataLoader(liver_dataset, batch_size=args.batch_size, shuffle=True, num_workers=1)\r\n train_model(args, writer, model, criterion, optimizer, dataloaders, regular)\r\n\r\n\r\n# 用于测试模型在有image有label的数据中的表现\r\ndef validation(args, model, print_each=False, method='train'):\r\n liver_dataset = Validation_Dataset(args.data_file, transform=x_transforms, target_transform=y_transforms) #\r\n dataloaders = DataLoader(liver_dataset, batch_size=1)\r\n if method == 'train':\r\n dataloaders = DataLoader(liver_dataset, batch_size=8)\r\n model.eval()\r\n epoch_correct_pixels, epoch_total_pixels = [], []\r\n with torch.no_grad():\r\n for x, y, x_path in dataloaders:\r\n inputs = x.to(device)\r\n labels = y.to(device)\r\n predicted = model(inputs).detach().numpy()\r\n predicted[predicted >= threshold] = 1\r\n predicted[predicted < threshold] = 0\r\n correct = (predicted == labels.detach().numpy()).sum()\r\n del predicted\r\n pixel_num = 1.0\r\n for i in range(len(labels.size())):\r\n pixel_num *= labels.size()[i]\r\n epoch_correct_pixels.append(correct)\r\n epoch_total_pixels.append(pixel_num)\r\n if print_each:\r\n print(x_path, 'acc', correct / pixel_num)\r\n return np.mean(epoch_correct_pixels) / np.mean(epoch_total_pixels)\r\n\r\n\r\n# 用于测试只有image但没有label的数据\r\ndef test(args, save_gray=False, manual=False, weight_path=''):\r\n model = None\r\n if not manual:\r\n model = torch.load(args.ckpt + '/' + args.model_name + '.pth', map_location='cpu')\r\n if manual:\r\n model = torch.load(weight_path, map_location='cpu') # use certain model weight.\r\n\r\n liver_dataset = Test_Dataset(args.data_file, transform=x_transforms, target_transform=y_transforms)\r\n\r\n dataloaders = DataLoader(liver_dataset, batch_size=1)\r\n\r\n model.eval()\r\n with torch.no_grad():\r\n for x, pic_name_i in dataloaders:\r\n pic_name_i = pic_name_i[0]\r\n io.imsave(args.model_name + \"/results/\" + pic_name_i.split('.')[0] + \"_x.png\", torch.squeeze(x).numpy())\r\n predict = model(x)\r\n predict = torch.squeeze(predict).detach().numpy()\r\n if save_gray:\r\n io.imsave(args.model_name + \"/results/\" + pic_name_i.split('.')[0] + \"_gray_pre.png\", predict)\r\n\r\n predict[predict >= threshold] = 1\r\n predict[predict < threshold] = 0\r\n io.imsave(args.model_name + \"/results/\" + pic_name_i.split('.')[0] + \"_label_pre.png\", predict)\r\n\r\n\r\nclass SaveOutput:\r\n def __init__(self):\r\n self.outputs = []\r\n\r\n def __call__(self, module, module_in, module_out):\r\n self.outputs.append(module_out)\r\n\r\n def clear(self):\r\n self.outputs = []\r\n\r\n\r\ndef model_forward_visualization(image_path, weight_path, model_name=''):\r\n \"\"\"输入一张测试图像和训练好的模型权重,可视化每一步卷积的结果\"\"\"\r\n model = torch.load(weight_path, map_location='cpu') # load trained model\r\n\r\n save_output = SaveOutput() # register hooks for each layer\r\n hook_handles, k1, k2 = [], 0, 0\r\n for layer in model.modules():\r\n k1 += 1\r\n if isinstance(layer, torch.nn.modules.conv.Conv2d):\r\n k2 += 1\r\n handle = layer.register_forward_hook(save_output)\r\n hook_handles.append(handle)\r\n\r\n x = x_transforms(Image.open(image_path).convert('L').resize(size=(512, 512))).unsqueeze(0)\r\n print(x, x.dtype)\r\n y = model(x)\r\n\r\n def module_output_to_numpy(tensor):\r\n return tensor.detach().to('cpu').numpy()\r\n\r\n for layer_idx in range(len(save_output.outputs)):\r\n images = module_output_to_numpy(save_output.outputs[layer_idx])\r\n # 这里的0代表读取output里第一个卷积层的输出\r\n\r\n print(type(images))\r\n print(images.shape)\r\n mid_1 = images.shape[1]\r\n mid_idx = 0\r\n while mid_idx < mid_1:\r\n # mid_idx is the index of feature\r\n with plt.style.context(\"seaborn-white\"):\r\n plt.figure(frameon=False)\r\n for idx in range(64):\r\n # idx is the index of subplot\r\n if mid_idx == mid_1:\r\n break\r\n plt.subplot(8, 8, idx + 1)\r\n plt.imshow(images[0, mid_idx])\r\n mid_idx += 1\r\n plt.setp(plt.gcf().get_axes(), xticks=[], yticks=[])\r\n plt.savefig(\r\n './model_visualization/' + model_name + '/layer_' + str(layer_idx) + '_mid_' + str(mid_idx) + '.png')\r\n plt.cla()\r\n plt.close('all')\r\n\r\n\r\ndef model_print(model):\r\n print(sum(p.numel() for p in model.parameters()))\r\n" ]
[ [ "matplotlib.pyplot.style.context", "torch.no_grad", "torch.save", "matplotlib.pyplot.close", "numpy.mean", "matplotlib.pyplot.cla", "matplotlib.pyplot.figure", "torch.squeeze", "torch.cuda.is_available", "torch.utils.data.DataLoader", "torch.nn.BCELoss", "torch.load", "matplotlib.pyplot.gcf", "matplotlib.pyplot.imshow", "matplotlib.pyplot.subplot" ] ]
Manik2000/drinker_fate_analysis
[ "7c4a54eabe556cc9727610d1151d85ef69e1971b" ]
[ "utils.py" ]
[ "import numpy as np\n\n\nclass Drinker:\n \"\"\"Class implementation of a drinker.\"\"\"\n\n def __init__(self, v):\n \"\"\"Initialize class object's attributes.\"\"\"\n self.x = 0\n self.y = 0\n self.v = v\n self.trajectory = [(0, 0)]\n\n def move(self):\n \"\"\"Move drinker to a new position.\"\"\"\n self.x, self.y = self.x + np.random.randn() + self.v, self.y + np.random.randn()\n self.x = abs(self.x)\n self.y = abs(self.y)\n if self.y > 50:\n self.y = 100 - self.y\n self.trajectory.append((self.x, self.y))\n\n\nclass Car:\n \"\"\"Class implementing car object.\"\"\"\n\n def __init__(self, x, y, v, direction):\n \"\"\"Initialize class object's attributes.\"\"\"\n self.x = x\n self.y = y\n self.v = v\n self.direction = direction\n\n def move(self):\n \"\"\"Move car to a new position.\"\"\"\n self.x += self.direction * self.v\n\n def get_position(self):\n \"\"\"Return car's position, and direction of its drive.\"\"\"\n return self.x, self.y, self.direction\n\n\ndef mixed_poisson(Λ, T):\n \"\"\"\n Return an array with moments of mixed Poisson process jumps moments.\n :param Λ: a probabilistic distribution\n :param T: time horizon\n \"\"\"\n S = []\n λ = abs(Λ.rvs())\n U = np.random.rand()\n t = - 1 / λ * np.log(U)\n while t < T:\n S.append(t)\n U = np.random.rand()\n t = t - 1 / λ * np.log(U)\n return np.array(S)\n\n\ndef generate_arrivals(Λ, T, scale=10):\n \"\"\"\n Return rounded and scaled mixed Poisson jumps moments.\n :param Λ: probabilistic distribution\n :param T: time horizon\n :param scale: scalar scaling the moments of jumps\n \"\"\"\n arr = np.round(scale * mixed_poisson(Λ, T))\n return arr[arr < T]\n\n\nclass Unif:\n \"\"\"Class implementation of a uniform distribution.\"\"\"\n\n def __init__(self, a, b):\n \"\"\"Initialize the class object's attributes.\"\"\"\n self.a = a\n self.b = b\n\n def rvs(self):\n \"\"\"Return a number from uniform U(a, b) distribution.\"\"\"\n return np.random.uniform(self.a, self.b)\n" ]
[ [ "numpy.array", "numpy.random.rand", "numpy.log", "numpy.random.randn", "numpy.random.uniform" ] ]